From df474653733c51ed91d60cf3efee69f7bf3199bd Mon Sep 17 00:00:00 2001 From: Pavel Kalinnikov Date: Tue, 13 Jul 2021 12:44:48 +0100 Subject: [PATCH] merkle: Use only complete subtree nodes for proofs (#2572) This change makes proof construction functions always fetch non-ephemeral nodes unconditionally. Previously it sometimes could fetch ephemeral nodes (when the requested tree size is equal to the stored size). Not fetching them allows not storing them, which enables storage savings and the removal of the revisions concept. --- CHANGELOG.md | 3 + docs/merkletree/treetex/main.go | 3 +- merkle/log_proofs.go | 61 ++++++------------- merkle/log_proofs_test.go | 104 ++++++++++++++------------------ server/log_rpc_server.go | 18 +++--- server/log_rpc_server_test.go | 72 +++++++++++++++------- server/proof_fetcher_test.go | 8 +-- 7 files changed, 133 insertions(+), 136 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f167250e36..3f71217283 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -35,6 +35,9 @@ * Removed the `ReadOnlyLogTX` interface, and put its only used `GetActiveLogIDs` method to `LogStorage`. * Inlined the `LogMetadata` interface to `ReadOnlyLogStorage`. + * Removed the need for the storage layer to return ephemeral node hashes. The + application layer always requests for complete subtree nodes comprising the + compact ranges corresponding to the requests. * TODO(pavelkalinnikov): More changes are coming, and will be added here. ## v1.3.13 diff --git a/docs/merkletree/treetex/main.go b/docs/merkletree/treetex/main.go index df5ff553e2..f3ba96c98c 100644 --- a/docs/merkletree/treetex/main.go +++ b/docs/merkletree/treetex/main.go @@ -379,7 +379,8 @@ func main() { if *inclusion > 0 { leafID := compact.NewNodeID(0, uint64(*inclusion)) modifyNodeInfo(leafID, func(n *nodeInfo) { n.incPath = true }) - nf, err := merkle.CalcInclusionProofNodeAddresses(int64(*treeSize), *inclusion, int64(*treeSize)) + // TODO(pavelkalinnikov): Highlight the "ephemeral" node too. + nf, err := merkle.CalcInclusionProofNodeAddresses(int64(*treeSize), *inclusion) if err != nil { log.Fatalf("Failed to calculate inclusion proof addresses: %s", err) } diff --git a/merkle/log_proofs.go b/merkle/log_proofs.go index 863c9752ba..af2ab97f61 100644 --- a/merkle/log_proofs.go +++ b/merkle/log_proofs.go @@ -16,7 +16,6 @@ package merkle import ( "errors" - "fmt" "math/bits" "github.com/google/trillian/merkle/compact" @@ -31,27 +30,14 @@ type NodeFetch struct { Rehash bool } -// checkSize performs a couple of simple sanity checks on size and storedSize -// and returns an error if there's a problem. -func checkSize(desc string, size, storedSize int64) error { - if size < 1 { - return fmt.Errorf("%s %d < 1", desc, size) - } - if size > storedSize { - return fmt.Errorf("%s %d > storedSize %d", desc, size, storedSize) - } - return nil -} - // CalcInclusionProofNodeAddresses returns the tree node IDs needed to build an -// inclusion proof for a specified tree size and leaf index. The size parameter -// is the tree size being queried for, storedSize is the actual size of the -// tree at the revision we are using to fetch nodes (this can be > size). +// inclusion proof for a specified tree size and leaf index. All the returned +// nodes represent complete subtrees in the tree of this size or above. // // Use Rehash function to compose the proof after the node hashes are fetched. -func CalcInclusionProofNodeAddresses(size, index, storedSize int64) ([]NodeFetch, error) { - if err := checkSize("size", size, storedSize); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for inclusion proof: %v", err) +func CalcInclusionProofNodeAddresses(size, index int64) ([]NodeFetch, error) { + if size < 1 { + return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for inclusion proof: size %d < 1", size) } if index >= size { return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for inclusion proof: index %d is >= size %d", index, size) @@ -59,40 +45,31 @@ func CalcInclusionProofNodeAddresses(size, index, storedSize int64) ([]NodeFetch if index < 0 { return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for inclusion proof: index %d is < 0", index) } - // Note: If size < storedSize, the storage might not contain the - // "ephemeral" node of this proof, so rehashing is needed. - return proofNodes(uint64(index), 0, uint64(size), size < storedSize), nil + return proofNodes(uint64(index), 0, uint64(size), true), nil } // CalcConsistencyProofNodeAddresses returns the tree node IDs needed to build -// a consistency proof between two specified tree sizes. size1 and size2 -// represent the two tree sizes for which consistency should be proved, -// storedSize is the actual size of the tree at the revision we are using to -// fetch nodes (this can be > size2). -// -// The caller is responsible for checking that the input tree sizes correspond -// to valid tree heads. All returned NodeIDs are tree coordinates within the -// new tree. It is assumed that they will be fetched from storage at a revision -// corresponding to the STH associated with the storedSize parameter. +// a consistency proof between two specified tree sizes. All the returned nodes +// represent complete subtrees in the tree of size2 or above. // // Use Rehash function to compose the proof after the node hashes are fetched. -func CalcConsistencyProofNodeAddresses(size1, size2, storedSize int64) ([]NodeFetch, error) { - if err := checkSize("size1", size1, storedSize); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for consistency proof: %v", err) +func CalcConsistencyProofNodeAddresses(size1, size2 int64) ([]NodeFetch, error) { + if size1 < 1 { + return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for consistency proof: size1 %d < 1", size1) } - if err := checkSize("size2", size2, storedSize); err != nil { - return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for consistency proof: %v", err) + if size2 < 1 { + return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for consistency proof: size2 %d < 1", size2) } if size1 > size2 { return nil, status.Errorf(codes.InvalidArgument, "invalid parameter for consistency proof: size1 %d > size2 %d", size1, size2) } - return consistencyNodes(size1, size2, storedSize) + return consistencyNodes(size1, size2) } -// consistencyNodes does the calculation of consistency proof node addresses -// between two tree sizes in a bigger tree of the given storedSize. -func consistencyNodes(size1, size2, storedSize int64) ([]NodeFetch, error) { +// consistencyNodes returns node addresses for the consistency proof between +// the given tree sizes. +func consistencyNodes(size1, size2 int64) ([]NodeFetch, error) { if size1 == size2 { return []NodeFetch{}, nil } @@ -110,7 +87,7 @@ func consistencyNodes(size1, size2, storedSize int64) ([]NodeFetch, error) { } // Now append the path from this node to the root of size2. - p := proofNodes(index, level, uint64(size2), size2 < storedSize) + p := proofNodes(index, level, uint64(size2), true) return append(proof, p...), nil } @@ -134,6 +111,8 @@ func proofNodes(index uint64, level uint, size uint64, rehash bool) []NodeFetch // are special, because their hashes are collapsed into a single "ephemeral" // hash. This hash is already known if rehash==false, otherwise the caller // needs to compute it based on the hashes of compact range [end+l, size). + // + // TODO(pavelkalinnikov): Always assume rehash = true. var right []compact.NodeID if r != 0 { if rehash { diff --git a/merkle/log_proofs_test.go b/merkle/log_proofs_test.go index bbceb82cb2..ca721616a3 100644 --- a/merkle/log_proofs_test.go +++ b/merkle/log_proofs_test.go @@ -55,20 +55,17 @@ func TestCalcInclusionProofNodeAddresses(t *testing.T) { for _, tc := range []struct { size int64 // The requested past tree size. index int64 // Leaf index in the requested tree. - bigSize int64 // The current tree size. want []NodeFetch wantErr bool }{ // Errors. - {size: 0, index: 0, bigSize: 0, wantErr: true}, - {size: 0, index: 1, bigSize: 0, wantErr: true}, - {size: 1, index: 0, bigSize: 0, wantErr: true}, - {size: 1, index: 2, bigSize: 1, wantErr: true}, - {size: 0, index: 3, bigSize: 0, wantErr: true}, - {size: -1, index: 3, bigSize: -1, wantErr: true}, - {size: 7, index: -1, bigSize: 7, wantErr: true}, - {size: 7, index: 8, bigSize: 7, wantErr: true}, - {size: 7, index: 3, bigSize: -7, wantErr: true}, + {size: 0, index: 0, wantErr: true}, + {size: 0, index: 1, wantErr: true}, + {size: 1, index: 2, wantErr: true}, + {size: 0, index: 3, wantErr: true}, + {size: -1, index: 3, wantErr: true}, + {size: 7, index: -1, wantErr: true}, + {size: 7, index: 8, wantErr: true}, // Small trees. {size: 1, index: 0, want: []NodeFetch{}}, @@ -78,17 +75,17 @@ func TestCalcInclusionProofNodeAddresses(t *testing.T) { // Tree of size 7. {size: 7, index: 0, want: []NodeFetch{ - node(0, 1), node(1, 1), node(2, 1), - }}, // b h l + node(0, 1), node(1, 1), rehash(0, 6), rehash(1, 2), + }}, // b h l=hash(i,j) {size: 7, index: 1, want: []NodeFetch{ - node(0, 0), node(1, 1), node(2, 1), - }}, // a h l + node(0, 0), node(1, 1), rehash(0, 6), rehash(1, 2), + }}, // a h l=hash(i,j) {size: 7, index: 2, want: []NodeFetch{ - node(0, 3), node(1, 0), node(2, 1), - }}, // d g l + node(0, 3), node(1, 0), rehash(0, 6), rehash(1, 2), + }}, // d g l=hash(i,j) {size: 7, index: 3, want: []NodeFetch{ - node(0, 2), node(1, 0), node(2, 1), - }}, // c g l + node(0, 2), node(1, 0), rehash(0, 6), rehash(1, 2), + }}, // c g l=hash(i,j) {size: 7, index: 4, want: []NodeFetch{ node(0, 5), node(0, 6), node(2, 0), }}, // f j k @@ -100,47 +97,42 @@ func TestCalcInclusionProofNodeAddresses(t *testing.T) { }}, // i k // Smaller trees within a bigger stored tree. - {size: 4, index: 2, bigSize: 7, want: []NodeFetch{ + {size: 4, index: 2, want: []NodeFetch{ node(0, 3), node(1, 0), }}, // d g - {size: 5, index: 3, bigSize: 7, want: []NodeFetch{ + {size: 5, index: 3, want: []NodeFetch{ node(0, 2), node(1, 0), node(0, 4), }}, // c g e - {size: 6, index: 3, bigSize: 7, want: []NodeFetch{ + {size: 6, index: 3, want: []NodeFetch{ node(0, 2), node(1, 0), node(1, 2), }}, // c g i - {size: 6, index: 4, bigSize: 8, want: []NodeFetch{ + {size: 6, index: 4, want: []NodeFetch{ node(0, 5), node(2, 0), }}, // f k - {size: 7, index: 1, bigSize: 8, want: []NodeFetch{ + {size: 7, index: 1, want: []NodeFetch{ node(0, 0), node(1, 1), rehash(0, 6), rehash(1, 2), }}, // a h l=hash(i,j) - {size: 7, index: 3, bigSize: 8, want: []NodeFetch{ + {size: 7, index: 3, want: []NodeFetch{ node(0, 2), node(1, 0), rehash(0, 6), rehash(1, 2), }}, // c g l=hash(i,j) // Some rehashes in the middle of the returned list. - {size: 15, index: 10, bigSize: 21, want: []NodeFetch{ + {size: 15, index: 10, want: []NodeFetch{ node(0, 11), node(1, 4), rehash(0, 14), rehash(1, 6), node(3, 0), }}, - {size: 31, index: 24, bigSize: 41, want: []NodeFetch{ + {size: 31, index: 24, want: []NodeFetch{ node(0, 25), node(1, 13), rehash(0, 30), rehash(1, 14), node(3, 2), node(4, 0), }}, - {size: 95, index: 81, bigSize: 111, want: []NodeFetch{ + {size: 95, index: 81, want: []NodeFetch{ node(0, 80), node(1, 41), node(2, 21), rehash(0, 94), rehash(1, 46), rehash(2, 22), node(4, 4), node(6, 0), }}, } { - bigSize := tc.bigSize - // Use the same tree size by default. - if bigSize == 0 && !tc.wantErr { - bigSize = tc.size - } - t.Run(fmt.Sprintf("%d:%d:%d", tc.size, tc.index, bigSize), func(t *testing.T) { - proof, err := CalcInclusionProofNodeAddresses(tc.size, tc.index, bigSize) + t.Run(fmt.Sprintf("%d:%d", tc.size, tc.index), func(t *testing.T) { + proof, err := CalcInclusionProofNodeAddresses(tc.size, tc.index) if tc.wantErr { if err == nil { t.Fatal("accepted bad params") @@ -187,7 +179,6 @@ func TestCalcConsistencyProofNodeAddresses(t *testing.T) { for _, tc := range []struct { size1 int64 // The smaller of the two tree sizes. size2 int64 // The bigger of the two tree sizes. - bigSize int64 // The current tree size. want []NodeFetch wantErr bool }{ @@ -196,9 +187,7 @@ func TestCalcConsistencyProofNodeAddresses(t *testing.T) { {size1: -10, size2: 0, wantErr: true}, {size1: -1, size2: -1, wantErr: true}, {size1: 0, size2: 0, wantErr: true}, - {size1: 5, size2: 9, bigSize: 7, wantErr: true}, {size1: 9, size2: 8, wantErr: true}, - {size1: 9, size2: 8, bigSize: 20, wantErr: true}, {size1: 1, size2: 2, want: []NodeFetch{node(0, 1)}}, // b {size1: 1, size2: 4, want: []NodeFetch{node(0, 1), node(1, 1)}}, // b h @@ -210,12 +199,12 @@ func TestCalcConsistencyProofNodeAddresses(t *testing.T) { {size1: 2, size2: 3, want: []NodeFetch{node(0, 2)}}, // c {size1: 2, size2: 8, want: []NodeFetch{node(1, 1), node(2, 1)}}, // h l {size1: 3, size2: 7, want: []NodeFetch{ - node(0, 2), // c - node(0, 3), // d - node(1, 0), // g - node(2, 1), // l + node(0, 2), // c + node(0, 3), // d + node(1, 0), // g + rehash(0, 6), rehash(1, 2), // l=hash(i,j) }}, - {size1: 4, size2: 7, want: []NodeFetch{node(2, 1)}}, // l + {size1: 4, size2: 7, want: []NodeFetch{rehash(0, 6), rehash(1, 2)}}, // l=hash(i,j) {size1: 5, size2: 7, want: []NodeFetch{ node(0, 4), // e node(0, 5), // f @@ -244,43 +233,38 @@ func TestCalcConsistencyProofNodeAddresses(t *testing.T) { {size1: 8, size2: 8, want: []NodeFetch{}}, // Smaller trees within a bigger stored tree. - {size1: 2, size2: 4, bigSize: 7, want: []NodeFetch{node(1, 1)}}, // h - {size1: 3, size2: 5, bigSize: 7, want: []NodeFetch{ + {size1: 2, size2: 4, want: []NodeFetch{node(1, 1)}}, // h + {size1: 3, size2: 5, want: []NodeFetch{ node(0, 2), node(0, 3), node(1, 0), node(0, 4), }}, // c d g e - {size1: 3, size2: 6, bigSize: 7, want: []NodeFetch{ + {size1: 3, size2: 6, want: []NodeFetch{ node(0, 2), node(0, 3), node(1, 0), node(1, 2), }}, // c d g i - {size1: 4, size2: 6, bigSize: 8, want: []NodeFetch{node(1, 2)}}, // i - {size1: 1, size2: 7, bigSize: 8, want: []NodeFetch{ + {size1: 4, size2: 6, want: []NodeFetch{node(1, 2)}}, // i + {size1: 1, size2: 7, want: []NodeFetch{ node(0, 1), node(1, 1), rehash(0, 6), rehash(1, 2), }}, // b h l=hash(i,j) - {size1: 3, size2: 7, bigSize: 8, want: []NodeFetch{ + {size1: 3, size2: 7, want: []NodeFetch{ node(0, 2), node(0, 3), node(1, 0), rehash(0, 6), rehash(1, 2), }}, // c d g l=hash(i,j) // Some rehashes in the middle of the returned list. - {size1: 10, size2: 15, bigSize: 21, want: []NodeFetch{ + {size1: 10, size2: 15, want: []NodeFetch{ node(1, 4), node(1, 5), rehash(0, 14), rehash(1, 6), node(3, 0), }}, - {size1: 24, size2: 31, bigSize: 41, want: []NodeFetch{ + {size1: 24, size2: 31, want: []NodeFetch{ node(3, 2), rehash(0, 30), rehash(1, 14), rehash(2, 6), node(4, 0), }}, - {size1: 81, size2: 95, bigSize: 111, want: []NodeFetch{ + {size1: 81, size2: 95, want: []NodeFetch{ node(0, 80), node(0, 81), node(1, 41), node(2, 21), rehash(0, 94), rehash(1, 46), rehash(2, 22), node(4, 4), node(6, 0), }}, } { - bigSize := tc.bigSize - // Use the same tree size by default. - if bigSize == 0 && !tc.wantErr { - bigSize = tc.size2 - } - t.Run(fmt.Sprintf("%d:%d:%d", tc.size1, tc.size2, bigSize), func(t *testing.T) { - proof, err := CalcConsistencyProofNodeAddresses(tc.size1, tc.size2, bigSize) + t.Run(fmt.Sprintf("%d:%d", tc.size1, tc.size2), func(t *testing.T) { + proof, err := CalcConsistencyProofNodeAddresses(tc.size1, tc.size2) if tc.wantErr { if err == nil { t.Fatal("accepted bad params") @@ -300,7 +284,7 @@ func TestInclusionSucceedsUpToTreeSize(t *testing.T) { const maxSize = 555 for ts := 1; ts <= maxSize; ts++ { for i := ts; i < ts; i++ { - if _, err := CalcInclusionProofNodeAddresses(int64(ts), int64(i), int64(ts)); err != nil { + if _, err := CalcInclusionProofNodeAddresses(int64(ts), int64(i)); err != nil { t.Errorf("CalcInclusionProofNodeAddresses(ts:%d, i:%d) = %v", ts, i, err) } } @@ -311,7 +295,7 @@ func TestConsistencySucceedsUpToTreeSize(t *testing.T) { const maxSize = 100 for s1 := 1; s1 < maxSize; s1++ { for s2 := s1 + 1; s2 <= maxSize; s2++ { - if _, err := CalcConsistencyProofNodeAddresses(int64(s1), int64(s2), int64(s2)); err != nil { + if _, err := CalcConsistencyProofNodeAddresses(int64(s1), int64(s2)); err != nil { t.Errorf("CalcConsistencyProofNodeAddresses(%d, %d) = %v", s1, s2, err) } } diff --git a/server/log_rpc_server.go b/server/log_rpc_server.go index c6dad9ad26..a473e50033 100644 --- a/server/log_rpc_server.go +++ b/server/log_rpc_server.go @@ -205,7 +205,7 @@ func (t *TrillianLogRPCServer) GetInclusionProof(ctx context.Context, req *trill return r, nil } - proof, err := getInclusionProofForLeafIndex(ctx, tx, hasher, req.TreeSize, req.LeafIndex, int64(root.TreeSize)) + proof, err := getInclusionProofForLeafIndex(ctx, tx, hasher, req.TreeSize, req.LeafIndex) if err != nil { return nil, err } @@ -267,7 +267,7 @@ func (t *TrillianLogRPCServer) GetInclusionProofByHash(ctx context.Context, req if leaf.LeafIndex >= req.TreeSize { continue } - proof, err := getInclusionProofForLeafIndex(ctx, tx, hasher, req.TreeSize, leaf.LeafIndex, int64(root.TreeSize)) + proof, err := getInclusionProofForLeafIndex(ctx, tx, hasher, req.TreeSize, leaf.LeafIndex) if err != nil { return nil, err } @@ -327,7 +327,7 @@ func (t *TrillianLogRPCServer) GetConsistencyProof(ctx context.Context, req *tri return r, nil } // Try to get consistency proof - proof, err := tryGetConsistencyProof(ctx, req.FirstTreeSize, req.SecondTreeSize, int64(root.TreeSize), tx, hasher) + proof, err := tryGetConsistencyProof(ctx, req.FirstTreeSize, req.SecondTreeSize, tx, hasher) if err != nil { return nil, err } @@ -386,7 +386,7 @@ func (t *TrillianLogRPCServer) GetLatestSignedLogRoot(ctx context.Context, req * return nil, err } // Try to get consistency proof - proof, err := tryGetConsistencyProof(ctx, reqProof.FirstTreeSize, reqProof.SecondTreeSize, int64(root.TreeSize), tx, hasher) + proof, err := tryGetConsistencyProof(ctx, reqProof.FirstTreeSize, reqProof.SecondTreeSize, tx, hasher) if err != nil { return nil, err } @@ -398,8 +398,8 @@ func (t *TrillianLogRPCServer) GetLatestSignedLogRoot(ctx context.Context, req * return r, nil } -func tryGetConsistencyProof(ctx context.Context, firstTreeSize, secondTreeSize, rootTreeSize int64, tx storage.ReadOnlyLogTreeTX, hasher hashers.LogHasher) (*trillian.Proof, error) { - nodeFetches, err := merkle.CalcConsistencyProofNodeAddresses(firstTreeSize, secondTreeSize, rootTreeSize) +func tryGetConsistencyProof(ctx context.Context, firstTreeSize, secondTreeSize int64, tx storage.ReadOnlyLogTreeTX, hasher hashers.LogHasher) (*trillian.Proof, error) { + nodeFetches, err := merkle.CalcConsistencyProofNodeAddresses(firstTreeSize, secondTreeSize) if err != nil { return nil, err } @@ -498,7 +498,7 @@ func (t *TrillianLogRPCServer) GetEntryAndProof(ctx context.Context, req *trilli } if req.TreeSize <= int64(root.TreeSize) { - proof, err := getInclusionProofForLeafIndex(ctx, tx, hasher, req.TreeSize, req.LeafIndex, int64(root.TreeSize)) + proof, err := getInclusionProofForLeafIndex(ctx, tx, hasher, req.TreeSize, req.LeafIndex) if err != nil { return nil, err } @@ -545,9 +545,9 @@ func (t *TrillianLogRPCServer) closeAndLog(ctx context.Context, logID int64, tx // getInclusionProofForLeafIndex is used by multiple handlers. It does the storage fetching // and makes additional checks on the returned proof. Returns a Proof suitable for inclusion in // an RPC response -func getInclusionProofForLeafIndex(ctx context.Context, tx storage.ReadOnlyLogTreeTX, hasher hashers.LogHasher, snapshot, leafIndex, treeSize int64) (*trillian.Proof, error) { +func getInclusionProofForLeafIndex(ctx context.Context, tx storage.ReadOnlyLogTreeTX, hasher hashers.LogHasher, size, leafIndex int64) (*trillian.Proof, error) { // We have the tree size and leaf index so we know the nodes that we need to serve the proof - proofNodeIDs, err := merkle.CalcInclusionProofNodeAddresses(snapshot, leafIndex, treeSize) + proofNodeIDs, err := merkle.CalcInclusionProofNodeAddresses(size, leafIndex) if err != nil { return nil, err } diff --git a/server/log_rpc_server_test.go b/server/log_rpc_server_test.go index da0905a377..9aae43e359 100644 --- a/server/log_rpc_server_test.go +++ b/server/log_rpc_server_test.go @@ -107,10 +107,11 @@ var ( nodeIdsInclusionSize7Index2 = []compact.NodeID{ compact.NewNodeID(0, 3), compact.NewNodeID(1, 0), - compact.NewNodeID(2, 1), + compact.NewNodeID(0, 6), + compact.NewNodeID(1, 2), } - nodeIdsConsistencySize4ToSize7 = []compact.NodeID{compact.NewNodeID(2, 1)} + nodeIdsConsistencySize4ToSize7 = []compact.NodeID{compact.NewNodeID(0, 6), compact.NewNodeID(1, 2)} corruptLogRoot = &trillian.SignedLogRoot{LogRoot: []byte("this is not tls encoded data")} ) @@ -598,7 +599,7 @@ func TestGetProofByHashErrors(t *testing.T) { tx.EXPECT().Close().Return(nil) }, req: &getInclusionProofByHashRequest7, - errStr: "expected 3 nodes", + errStr: "expected 4 nodes", }, { name: "wrong node", @@ -608,7 +609,10 @@ func TestGetProofByHashErrors(t *testing.T) { tx.EXPECT().LatestSignedLogRoot(gomock.Any()).Return(signedRoot1, nil) tx.EXPECT().GetLeavesByHash(gomock.Any(), [][]byte{leafHash1}, false).Return([]*trillian.LogLeaf{{LeafIndex: 2}}, nil) // We set this up so one of the returned nodes has the wrong ID - tx.EXPECT().GetMerkleNodes(gomock.Any(), nodeIdsInclusionSize7Index2).Return([]tree.Node{{ID: nodeIdsInclusionSize7Index2[0]}, {ID: compact.NewNodeID(4, 5)}, {ID: nodeIdsInclusionSize7Index2[2]}}, nil) + tx.EXPECT().GetMerkleNodes(gomock.Any(), nodeIdsInclusionSize7Index2).Return([]tree.Node{ + {ID: nodeIdsInclusionSize7Index2[0]}, {ID: compact.NewNodeID(4, 5)}, + {ID: nodeIdsInclusionSize7Index2[2]}, {ID: nodeIdsInclusionSize7Index2[3]}, + }, nil) tx.EXPECT().Close().Return(nil) }, req: &getInclusionProofByHashRequest7, @@ -712,6 +716,7 @@ func TestGetProofByHash(t *testing.T) { {ID: nodeIdsInclusionSize7Index2[0], Hash: []byte("nodehash0")}, {ID: nodeIdsInclusionSize7Index2[1], Hash: []byte("nodehash1")}, {ID: nodeIdsInclusionSize7Index2[2], Hash: []byte("nodehash2")}, + {ID: nodeIdsInclusionSize7Index2[3], Hash: []byte("nodehash3")}, }, nil).AnyTimes() mockTX.EXPECT().Commit(gomock.Any()).Return(nil) mockTX.EXPECT().Close().Return(nil) @@ -747,7 +752,7 @@ func TestGetProofByHash(t *testing.T) { Hashes: [][]byte{ []byte("nodehash0"), []byte("nodehash1"), - []byte("nodehash2"), + th.HashChildren([]byte("nodehash3"), []byte("nodehash2")), }, } @@ -825,7 +830,7 @@ func TestGetProofByIndex(t *testing.T) { tx.EXPECT().Close().Return(nil) }, req: &getInclusionProofByIndexRequest7, - errStr: "expected 3 nodes", + errStr: "expected 4 nodes", }, { name: "wrong node", @@ -834,7 +839,10 @@ func TestGetProofByIndex(t *testing.T) { s.EXPECT().SnapshotForTree(gomock.Any(), cmpMatcher{tree1}).Return(tx, nil) // We set this up so one of the returned nodes has the wrong ID tx.EXPECT().LatestSignedLogRoot(gomock.Any()).Return(signedRoot1, nil) - tx.EXPECT().GetMerkleNodes(gomock.Any(), nodeIdsInclusionSize7Index2).Return([]tree.Node{{ID: nodeIdsInclusionSize7Index2[0]}, {ID: compact.NewNodeID(4, 5)}, {ID: nodeIdsInclusionSize7Index2[2]}}, nil) + tx.EXPECT().GetMerkleNodes(gomock.Any(), nodeIdsInclusionSize7Index2).Return([]tree.Node{ + {ID: nodeIdsInclusionSize7Index2[0]}, {ID: compact.NewNodeID(4, 5)}, + {ID: nodeIdsInclusionSize7Index2[2]}, {ID: nodeIdsInclusionSize7Index2[3]}, + }, nil) tx.EXPECT().Close().Return(nil) }, req: &getInclusionProofByIndexRequest7, @@ -846,7 +854,10 @@ func TestGetProofByIndex(t *testing.T) { tx := storage.NewMockLogTreeTX(c) s.EXPECT().SnapshotForTree(gomock.Any(), cmpMatcher{tree1}).Return(tx, nil) tx.EXPECT().LatestSignedLogRoot(gomock.Any()).Return(signedRoot1, nil) - tx.EXPECT().GetMerkleNodes(gomock.Any(), nodeIdsInclusionSize7Index2).Return([]tree.Node{{ID: nodeIdsInclusionSize7Index2[0]}, {ID: nodeIdsInclusionSize7Index2[1]}, {ID: nodeIdsInclusionSize7Index2[2]}}, nil) + tx.EXPECT().GetMerkleNodes(gomock.Any(), nodeIdsInclusionSize7Index2).Return([]tree.Node{ + {ID: nodeIdsInclusionSize7Index2[0]}, {ID: nodeIdsInclusionSize7Index2[1]}, + {ID: nodeIdsInclusionSize7Index2[2]}, {ID: nodeIdsInclusionSize7Index2[3]}, + }, nil) tx.EXPECT().Commit(gomock.Any()).Return(errors.New("COMMIT")) tx.EXPECT().Close().Return(nil) }, @@ -885,6 +896,7 @@ func TestGetProofByIndex(t *testing.T) { {ID: nodeIdsInclusionSize7Index2[0], Hash: []byte("nodehash0")}, {ID: nodeIdsInclusionSize7Index2[1], Hash: []byte("nodehash1")}, {ID: nodeIdsInclusionSize7Index2[2], Hash: []byte("nodehash2")}, + {ID: nodeIdsInclusionSize7Index2[3], Hash: []byte("nodehash3")}, }, nil) tx.EXPECT().Commit(gomock.Any()).Return(nil) tx.EXPECT().Close().Return(nil) @@ -897,7 +909,7 @@ func TestGetProofByIndex(t *testing.T) { Hashes: [][]byte{ []byte("nodehash0"), []byte("nodehash1"), - []byte("nodehash2"), + th.HashChildren([]byte("nodehash3"), []byte("nodehash2")), }, }, }, @@ -995,6 +1007,7 @@ func TestGetEntryAndProof(t *testing.T) { {ID: nodeIdsInclusionSize7Index2[0], Hash: []byte("nodehash0")}, {ID: nodeIdsInclusionSize7Index2[1], Hash: []byte("nodehash1")}, {ID: nodeIdsInclusionSize7Index2[2], Hash: []byte("nodehash2")}, + {ID: nodeIdsInclusionSize7Index2[3], Hash: []byte("nodehash3")}, }, nil) tx.EXPECT().GetLeavesByRange(gomock.Any(), int64(2), int64(1)).Return(nil, errors.New("STORAGE")) tx.EXPECT().Close().Return(nil) @@ -1012,6 +1025,7 @@ func TestGetEntryAndProof(t *testing.T) { {ID: nodeIdsInclusionSize7Index2[0], Hash: []byte("nodehash0")}, {ID: nodeIdsInclusionSize7Index2[1], Hash: []byte("nodehash1")}, {ID: nodeIdsInclusionSize7Index2[2], Hash: []byte("nodehash2")}, + {ID: nodeIdsInclusionSize7Index2[3], Hash: []byte("nodehash3")}, }, nil) tx.EXPECT().GetLeavesByRange(gomock.Any(), int64(2), int64(1)).Return([]*trillian.LogLeaf{leaf1}, nil) tx.EXPECT().Commit(gomock.Any()).Return(errors.New("COMMIT")) @@ -1052,6 +1066,7 @@ func TestGetEntryAndProof(t *testing.T) { {ID: nodeIdsInclusionSize7Index2[0], Hash: []byte("nodehash0")}, {ID: nodeIdsInclusionSize7Index2[1], Hash: []byte("nodehash1")}, {ID: nodeIdsInclusionSize7Index2[2], Hash: []byte("nodehash2")}, + {ID: nodeIdsInclusionSize7Index2[3], Hash: []byte("nodehash3")}, }, nil) // Code passed one leaf index so expects one result, but we return more tx.EXPECT().GetLeavesByRange(gomock.Any(), int64(2), int64(1)).Return([]*trillian.LogLeaf{leaf1, leaf3}, nil) @@ -1070,6 +1085,7 @@ func TestGetEntryAndProof(t *testing.T) { {ID: nodeIdsInclusionSize7Index2[0], Hash: []byte("nodehash0")}, {ID: nodeIdsInclusionSize7Index2[1], Hash: []byte("nodehash1")}, {ID: nodeIdsInclusionSize7Index2[2], Hash: []byte("nodehash2")}, + {ID: nodeIdsInclusionSize7Index2[3], Hash: []byte("nodehash3")}, }, nil) tx.EXPECT().GetLeavesByRange(gomock.Any(), int64(2), int64(1)).Return([]*trillian.LogLeaf{leaf1}, nil) tx.EXPECT().Commit(gomock.Any()).Return(nil) @@ -1083,7 +1099,7 @@ func TestGetEntryAndProof(t *testing.T) { Hashes: [][]byte{ []byte("nodehash0"), []byte("nodehash1"), - []byte("nodehash2"), + th.HashChildren([]byte("nodehash3"), []byte("nodehash2")), }, }, Leaf: leaf1, @@ -1113,6 +1129,7 @@ func TestGetEntryAndProof(t *testing.T) { {ID: nodeIdsInclusionSize7Index2[0], Hash: []byte("nodehash0")}, {ID: nodeIdsInclusionSize7Index2[1], Hash: []byte("nodehash1")}, {ID: nodeIdsInclusionSize7Index2[2], Hash: []byte("nodehash2")}, + {ID: nodeIdsInclusionSize7Index2[3], Hash: []byte("nodehash3")}, }, nil) tx.EXPECT().GetLeavesByRange(gomock.Any(), int64(2), int64(1)).Return([]*trillian.LogLeaf{leaf1}, nil) tx.EXPECT().Commit(gomock.Any()).Return(nil) @@ -1126,7 +1143,7 @@ func TestGetEntryAndProof(t *testing.T) { Hashes: [][]byte{ []byte("nodehash0"), []byte("nodehash1"), - []byte("nodehash2"), + th.HashChildren([]byte("nodehash3"), []byte("nodehash2")), }, }, Leaf: leaf1, @@ -1215,26 +1232,36 @@ func TestGetConsistencyProof(t *testing.T) { errStr: "commit", wantHashes: [][]byte{[]byte("nodehash")}, nodeIDs: nodeIdsConsistencySize4ToSize7, - nodes: []tree.Node{{ID: compact.NewNodeID(2, 1), Hash: []byte("nodehash")}}, - commitErr: errors.New("commit() failed"), + nodes: []tree.Node{ + {ID: compact.NewNodeID(0, 6), Hash: []byte("nodehash1")}, + {ID: compact.NewNodeID(1, 2), Hash: []byte("nodehash2")}, + }, + commitErr: errors.New("commit() failed"), }, { // Storage doesn't return the requested node, should result in an error. req: &getConsistencyProofRequest7, - errStr: "expected node {2 1} at", + errStr: "expected node {0 6} at", wantHashes: [][]byte{[]byte("nodehash")}, nodeIDs: nodeIdsConsistencySize4ToSize7, - nodes: []tree.Node{{ID: compact.NewNodeID(3, 1), Hash: []byte("nodehash")}}, - noCommit: true, + nodes: []tree.Node{ + {ID: compact.NewNodeID(3, 1), Hash: []byte("nodehash1")}, + {ID: compact.NewNodeID(1, 2), Hash: []byte("nodehash2")}, + }, + noCommit: true, }, { // Storage returns an unexpected extra node, should result in an error. req: &getConsistencyProofRequest7, - errStr: "expected 1 nodes", + errStr: "expected 2 nodes", wantHashes: [][]byte{[]byte("nodehash")}, nodeIDs: nodeIdsConsistencySize4ToSize7, - nodes: []tree.Node{{ID: compact.NewNodeID(2, 1), Hash: []byte("nodehash")}, {ID: compact.NewNodeID(3, 10), Hash: []byte("nodehash2")}}, - noCommit: true, + nodes: []tree.Node{ + {ID: compact.NewNodeID(0, 6), Hash: []byte("nodehash1")}, + {ID: compact.NewNodeID(1, 2), Hash: []byte("nodehash2")}, + {ID: compact.NewNodeID(3, 10), Hash: []byte("nodehash3")}, + }, + noCommit: true, }, { // Ask for a proof from size 4 to 8 but the tree is only size 7. This should succeed but with no proof. @@ -1246,9 +1273,12 @@ func TestGetConsistencyProof(t *testing.T) { { // A normal request which should succeed. req: &getConsistencyProofRequest7, - wantHashes: [][]byte{[]byte("nodehash")}, + wantHashes: [][]byte{th.HashChildren([]byte("nodehash2"), []byte("nodehash1"))}, nodeIDs: nodeIdsConsistencySize4ToSize7, - nodes: []tree.Node{{ID: compact.NewNodeID(2, 1), Hash: []byte("nodehash")}}, + nodes: []tree.Node{ + {ID: compact.NewNodeID(0, 6), Hash: []byte("nodehash1")}, + {ID: compact.NewNodeID(1, 2), Hash: []byte("nodehash2")}, + }, }, { // Tests first==second edge case, which should succeed but is an empty proof. diff --git a/server/proof_fetcher_test.go b/server/proof_fetcher_test.go index c9c9a9663a..60d376db3d 100644 --- a/server/proof_fetcher_test.go +++ b/server/proof_fetcher_test.go @@ -40,7 +40,7 @@ func TestTree813FetchAll(t *testing.T) { }) for l := int64(271); l < ts; l++ { - fetches, err := merkle.CalcInclusionProofNodeAddresses(ts, l, ts) + fetches, err := merkle.CalcInclusionProofNodeAddresses(ts, l) if err != nil { t.Fatal(err) } @@ -82,7 +82,7 @@ func TestTree32InclusionProofFetchAll(t *testing.T) { for s := int64(2); s <= int64(ts); s++ { for l := int64(0); l < s; l++ { - fetches, err := merkle.CalcInclusionProofNodeAddresses(s, l, int64(ts)) + fetches, err := merkle.CalcInclusionProofNodeAddresses(s, l) if err != nil { t.Fatal(err) } @@ -127,7 +127,7 @@ func TestTree32InclusionProofFetchMultiBatch(t *testing.T) { for s := int64(2); s <= 32; s++ { for l := int64(0); l < s; l++ { - fetches, err := merkle.CalcInclusionProofNodeAddresses(s, l, 32) + fetches, err := merkle.CalcInclusionProofNodeAddresses(s, l) if err != nil { t.Fatal(err) } @@ -165,7 +165,7 @@ func TestTree32ConsistencyProofFetchAll(t *testing.T) { for s1 := int64(2); s1 < int64(ts); s1++ { for s2 := int64(s1 + 1); s2 < int64(ts); s2++ { - fetches, err := merkle.CalcConsistencyProofNodeAddresses(s1, s2, int64(ts)) + fetches, err := merkle.CalcConsistencyProofNodeAddresses(s1, s2) if err != nil { t.Fatal(err) }