Skip to content

Commit

Permalink
Move and unexport layout and Suffix code (#2583)
Browse files Browse the repository at this point in the history
The layout.go and suffix.go files are only consumed by the cache package.
This change moves both types to this package, and unexports them.
  • Loading branch information
pav-kv authored Jul 20, 2021
1 parent 39c005d commit 0654e78
Show file tree
Hide file tree
Showing 7 changed files with 68 additions and 70 deletions.
16 changes: 8 additions & 8 deletions storage/tree/layout.go → storage/cache/layout.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,21 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.

package tree
package cache

import (
"encoding/binary"

"github.com/google/trillian/merkle/compact"
)

// GetTileID returns the path from the "virtual" root at level 64 to the root
// getTileID returns the path from the "virtual" root at level 64 to the root
// of the tile that the given node belongs to. All the bits of the returned
// slice are significant because all tile heights are 8.
//
// Note that a root of a tile belongs to a tile above it (as its leaf node).
// The exception is the "virtual" root which belongs to its own "pseudo" tile.
func GetTileID(id compact.NodeID) []byte {
func getTileID(id compact.NodeID) []byte {
if id.Level >= 64 {
return []byte{} // Note: Not nil, so that storage/SQL doesn't use NULL.
}
Expand All @@ -38,19 +38,19 @@ func GetTileID(id compact.NodeID) []byte {
return bytes[8-bytesCount:]
}

// Split returns the path from the "virtual" root at level 64 to the root of
// splitID returns the path from the "virtual" root at level 64 to the root of
// the tile that the given node belongs to, and the corresponding local address
// of this node within this tile.
func Split(id compact.NodeID) ([]byte, *Suffix) {
func splitID(id compact.NodeID) ([]byte, *suffix) {
if id.Level >= 64 {
return []byte{}, EmptySuffix
return []byte{}, emptySuffix
}
tileID := GetTileID(id)
tileID := getTileID(id)

var bytes [8]byte
bits := 64 - id.Level - uint(len(tileID)*8)
binary.BigEndian.PutUint64(bytes[:], id.Index<<(64-bits))
suffix := NewSuffix(uint8(bits), bytes[:])
suffix := newSuffix(uint8(bits), bytes[:])

return tileID, suffix
}
10 changes: 5 additions & 5 deletions storage/tree/layout_test.go → storage/cache/layout_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

package tree
package cache

import (
"bytes"
Expand Down Expand Up @@ -45,14 +45,14 @@ func TestGetTileID(t *testing.T) {
{id: nID(64, 0), want: []byte{}},
} {
t.Run(fmt.Sprintf("%d:%d", tc.id.Level, tc.id.Index), func(t *testing.T) {
if got, want := GetTileID(tc.id), tc.want; !bytes.Equal(got, want) {
t.Errorf("GetTileID: got %x, want %x", got, want)
if got, want := getTileID(tc.id), tc.want; !bytes.Equal(got, want) {
t.Errorf("getTileID: got %x, want %x", got, want)
}
})
}
}

func TestSplitNodeID(t *testing.T) {
func TestSplitID(t *testing.T) {
for _, tc := range []struct {
id compact.NodeID
outPrefix []byte
Expand All @@ -75,7 +75,7 @@ func TestSplitNodeID(t *testing.T) {
{nID(49, 0x0003>>1), []byte{0x00}, 7, []byte{0x02}},
} {
t.Run(fmt.Sprintf("%v", tc.id), func(t *testing.T) {
p, s := Split(tc.id)
p, s := splitID(tc.id)
if got, want := p, tc.outPrefix; !bytes.Equal(got, want) {
t.Errorf("prefix %x, want %x", got, want)
}
Expand Down
3 changes: 1 addition & 2 deletions storage/cache/log_tile.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (
"github.com/google/trillian/merkle/compact"
"github.com/google/trillian/merkle/hashers"
"github.com/google/trillian/storage/storagepb"
"github.com/google/trillian/storage/tree"
)

const (
Expand Down Expand Up @@ -129,7 +128,7 @@ func toSuffix(id compact.NodeID) string {
depth := logStrataDepth - int(id.Level)
var index [8]byte
binary.BigEndian.PutUint64(index[:], id.Index<<(maxLogDepth-depth))
return tree.NewSuffix(uint8(depth), index[:]).String()
return newSuffix(uint8(depth), index[:]).String()
}

// newEmptyTile creates an empty log tile for the passed-in ID.
Expand Down
6 changes: 3 additions & 3 deletions storage/cache/subtree_cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ func (s *SubtreeCache) preload(ids []compact.NodeID, getSubtrees GetSubtreesFunc
// Figure out the set of subtrees we need.
want := make(map[string]bool)
for _, id := range ids {
subID := string(tree.GetTileID(id))
subID := string(getTileID(id))
if _, ok := want[subID]; ok {
// No need to check s.subtrees map twice.
continue
Expand Down Expand Up @@ -221,7 +221,7 @@ func (s *SubtreeCache) prefixIsDirty(prefixKey string) bool {

// getNodeHash returns a single node hash from the cache.
func (s *SubtreeCache) getNodeHash(id compact.NodeID, getSubtree GetSubtreeFunc) ([]byte, error) {
subID, sx := tree.Split(id)
subID, sx := splitID(id)
c := s.getCachedSubtree(subID)
if c == nil {
glog.V(2).Infof("Cache miss for %x so we'll try to fetch from storage", subID)
Expand Down Expand Up @@ -268,7 +268,7 @@ func (s *SubtreeCache) getNodeHash(id compact.NodeID, getSubtree GetSubtreeFunc)

// SetNodeHash sets a node hash in the cache.
func (s *SubtreeCache) SetNodeHash(id compact.NodeID, h []byte, getSubtree GetSubtreeFunc) error {
subID, sx := tree.Split(id)
subID, sx := splitID(id)
c := s.getCachedSubtree(subID)
if c == nil {
// TODO(al): This is ok, IFF *all* leaves in the subtree are being set,
Expand Down
3 changes: 1 addition & 2 deletions storage/cache/subtree_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ import (
"github.com/google/trillian/merkle/compact"
"github.com/google/trillian/merkle/rfc6962"
"github.com/google/trillian/storage/storagepb"
"github.com/google/trillian/storage/tree"

"github.com/golang/mock/gomock"
)
Expand Down Expand Up @@ -214,7 +213,7 @@ func TestRepopulateLogSubtree(t *testing.T) {
store := func(id compact.NodeID, hash []byte) {
// Don't store leaves or the subtree root in InternalNodes
if id.Level > 0 && id.Level < 8 {
_, sfx := tree.Split(id)
_, sfx := splitID(id)
cmtStorage.InternalNodes[sfx.String()] = hash
}
}
Expand Down
50 changes: 25 additions & 25 deletions storage/tree/suffix.go → storage/cache/suffix.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

package tree
package cache

import (
"encoding/base64"
Expand All @@ -29,24 +29,24 @@ type key struct {
}

var (
// EmptySuffix is a reusable suffix of zero bits. To avoid special cases
// emptySuffix is a reusable suffix of zero bits. To avoid special cases
// there is a single byte path attached to it and there is no way to create
// a Suffix with a nil or empty path.
EmptySuffix = NewSuffix(0, []byte{0})
// fromRaw maps a bit length and single byte path to a Suffix.
fromRaw = make(map[key]*Suffix)
// fromString maps a base64 encoded string representation to a Suffix.
fromString = make(map[string]*Suffix)
// a suffix with a nil or empty path.
emptySuffix = newSuffix(0, []byte{0})
// fromRaw maps a bit length and single byte path to a suffix.
fromRaw = make(map[key]*suffix)
// fromString maps a base64 encoded string representation to a suffix.
fromString = make(map[string]*suffix)
)

// Suffix represents the tail of a NodeID. It is the path within the subtree.
// suffix represents the tail of a NodeID. It is the path within the subtree.
// The portion of the path that extends beyond the subtree is not part of this suffix.
// We keep a cache of the Suffix values use by log trees, which will have a
// We keep a cache of the suffix values use by log trees, which will have a
// depth between 1 and 8 bits. These are reused to avoid constant reallocation
// and base64 conversion overhead.
//
// TODO(pavelkalinnikov, v2): This type is specific to SubtreeProto. Move it.
type Suffix struct {
type suffix struct {
// bits is the number of bits in the node ID suffix.
bits uint8
// path is the suffix itself.
Expand All @@ -55,11 +55,11 @@ type Suffix struct {
asString string
}

// NewSuffix creates a new Suffix. The primary use for them is to get their
// newSuffix creates a new suffix. The primary use for them is to get their
// String value to use as a key so we compute that once up front.
//
// TODO(pavelkalinnikov): Mask the last byte of path.
func NewSuffix(bits uint8, path []byte) *Suffix {
func newSuffix(bits uint8, path []byte) *suffix {
// Use a shared value for a short suffix if we have one, they're immutable.
if bits <= 8 {
if sfx, ok := fromRaw[key{depth: bits, value: path[0]}]; ok {
Expand All @@ -72,28 +72,28 @@ func NewSuffix(bits uint8, path []byte) *Suffix {
r = append(r, path...)
s := base64.StdEncoding.EncodeToString(r)

return &Suffix{bits: bits, path: r[1:], asString: s}
return &suffix{bits: bits, path: r[1:], asString: s}
}

// Bits returns the number of significant bits in the Suffix path.
func (s Suffix) Bits() uint8 {
// Bits returns the number of significant bits in the suffix path.
func (s suffix) Bits() uint8 {
return s.bits
}

// Path returns a copy of the Suffix path.
func (s Suffix) Path() []byte {
// Path returns a copy of the suffix path.
func (s suffix) Path() []byte {
return append(make([]byte, 0, len(s.path)), s.path...)
}

// String returns a string that represents Suffix.
// String returns a string that represents suffix.
// This is a base64 encoding of the following format:
// [ 1 byte for depth || path bytes ]
func (s Suffix) String() string {
func (s suffix) String() string {
return s.asString
}

// ParseSuffix converts a suffix string back into a Suffix.
func ParseSuffix(s string) (*Suffix, error) {
// parseSuffix converts a suffix string back into a suffix.
func parseSuffix(s string) (*suffix, error) {
if sfx, ok := fromString[s]; ok {
// Matches a precalculated value, use that.
return sfx, nil
Expand All @@ -111,7 +111,7 @@ func ParseSuffix(s string) (*Suffix, error) {
return nil, fmt.Errorf("unexpected length %d, need %d", got, want)
}

return NewSuffix(bits, b), nil
return newSuffix(bits, b), nil
}

// bytesForBits returns the number of bytes required to store numBits bits.
Expand All @@ -131,8 +131,8 @@ func init() {
// Don't need to mask off lower bits outside the valid ones because we
// know they're already zero.
path[0] = byte(i << uint(8-d))
sfx := NewSuffix(byte(d), path)
// As an extra check there should be no collisions in the Suffix values
sfx := newSuffix(byte(d), path)
// As an extra check there should be no collisions in the suffix values
// that we build so map entries should not be overwritten.
k := key{depth: uint8(d), value: path[0]}
if _, ok := fromRaw[k]; ok {
Expand Down
50 changes: 25 additions & 25 deletions storage/tree/suffix_test.go → storage/cache/suffix_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.

package tree
package cache

import (
"bytes"
Expand Down Expand Up @@ -46,7 +46,7 @@ func TestParseSuffix(t *testing.T) {
wantErr bool
}{
{str: h2b6(""), wantErr: true},
// TODO(pavelkalinnikov): Parse "00" without a segfault in NewSuffix.
// TODO(pavelkalinnikov): Parse "00" without a segfault in newSuffix.
{str: h2b6("0100"), bits: 1, path: h2b("00")},
// TODO(pavelkalinnikov): The last byte must be masked.
{str: h2b6("01FC"), bits: 1, path: h2b("FC")},
Expand All @@ -60,17 +60,17 @@ func TestParseSuffix(t *testing.T) {
{str: "----", wantErr: true},
} {
t.Run("", func(t *testing.T) {
sfx, err := ParseSuffix(tc.str)
sfx, err := parseSuffix(tc.str)
if got, want := err != nil, tc.wantErr; got != want {
t.Fatalf("ParseSuffix: %v, wantErr: %v", err, want)
t.Fatalf("parseSuffix: %v, wantErr: %v", err, want)
} else if err != nil {
return
}
if got, want := sfx.Bits(), tc.bits; got != want {
t.Errorf("ParseSuffix: got %d bits, want %d", got, want)
t.Errorf("parseSuffix: got %d bits, want %d", got, want)
}
if got, want := sfx.Path(), tc.path; !bytes.Equal(got, want) {
t.Errorf("ParseSuffix: got path %x, want %x", got, want)
t.Errorf("parseSuffix: got path %x, want %x", got, want)
}
})
}
Expand Down Expand Up @@ -120,7 +120,7 @@ func TestSuffixKey(t *testing.T) {
}

// makeSuffixKey creates a suffix key for indexing into the subtree's Leaves and InternalNodes maps.
// This function documents existing log storage behavior. Any new code that emits Suffix objects must
// This function documents existing log storage behavior. Any new code that emits suffix objects must
// produce the exact same outputs as this function would for Logs.
func makeSuffixKey(depth int, index int64) (string, error) {
if depth < 0 {
Expand All @@ -129,22 +129,22 @@ func makeSuffixKey(depth int, index int64) (string, error) {
if index < 0 {
return "", fmt.Errorf("invalid negative index %d", index)
}
sfx := NewSuffix(byte(depth), []byte{byte(index)})
sfx := newSuffix(byte(depth), []byte{byte(index)})
return sfx.String(), nil
}

func TestSuffixSerialize(t *testing.T) {
for _, tc := range []struct {
s *Suffix
s *suffix
want string
}{
// Pre-existing format. This test vector must NOT change or existing data will be inaccessible.
{s: NewSuffix(1, []byte{0xae}), want: "Aa4="},
{s: NewSuffix(5, []byte{0xae}), want: "Ba4="},
{s: NewSuffix(8, []byte{0xae}), want: "CK4="},
{s: NewSuffix(15, []byte{0xae, 0x27}), want: "D64n"},
{s: NewSuffix(16, []byte{0xae, 0x27}), want: "EK4n"},
{s: NewSuffix(23, []byte{0xae, 0x27, 0x49}), want: "F64nSQ=="},
{s: newSuffix(1, []byte{0xae}), want: "Aa4="},
{s: newSuffix(5, []byte{0xae}), want: "Ba4="},
{s: newSuffix(8, []byte{0xae}), want: "CK4="},
{s: newSuffix(15, []byte{0xae, 0x27}), want: "D64n"},
{s: newSuffix(16, []byte{0xae, 0x27}), want: "EK4n"},
{s: newSuffix(23, []byte{0xae, 0x27, 0x49}), want: "F64nSQ=="},
} {
if got, want := tc.s.String(), tc.want; got != want {
t.Errorf("%v.serialize(): %v, want %v", tc.s, got, want)
Expand All @@ -153,8 +153,8 @@ func TestSuffixSerialize(t *testing.T) {
}

func TestSuffixPathImmutable(t *testing.T) {
s1 := NewSuffix(8, []byte{0x97})
s2 := NewSuffix(8, []byte{0x97})
s1 := newSuffix(8, []byte{0x97})
s2 := newSuffix(8, []byte{0x97})

p1 := s1.Path()
p2 := s2.Path()
Expand Down Expand Up @@ -194,34 +194,34 @@ func Test8BitSuffixCache(t *testing.T) {
{b: 24, path: []byte{0x40, 0xf0, 0xaa}, wantCache: false},
{b: 32, path: []byte{0x40, 0xf0, 0xaa, 0xed}, wantCache: false},
} {
s1 := NewSuffix(tc.b, tc.path)
s2 := NewSuffix(tc.b, tc.path)
s1 := newSuffix(tc.b, tc.path)
s2 := newSuffix(tc.b, tc.path)

if s1 == s2 != tc.wantCache {
t.Errorf("NewSuffix(): %v: cache / non cache mismatch: %v", tc, s1 == s2)
t.Errorf("newSuffix(): %v: cache / non cache mismatch: %v", tc, s1 == s2)
}

// Test the other direction as well by parsing it and we should get the
// same instance again.
s3, err := ParseSuffix(s1.String())
s3, err := parseSuffix(s1.String())
if err != nil {
t.Fatalf("failed to parse our own suffix: %v", err)
}
if s1 == s3 != tc.wantCache {
t.Errorf("ParseSuffix(): %v: cache / non cache mismatch: %v", tc, s1 == s3)
t.Errorf("parseSuffix(): %v: cache / non cache mismatch: %v", tc, s1 == s3)
}
}
}

// TestCacheIsolation ensures that users can't corrupt the cache by modifying
// values.
func TestCacheIsolation(t *testing.T) {
s1 := NewSuffix(8, []byte{0x80})
s1 := newSuffix(8, []byte{0x80})
s1.Path()[0] ^= 0xff
s2 := NewSuffix(8, []byte{0x80})
s2 := newSuffix(8, []byte{0x80})

if s1 != s2 {
t.Fatalf("did not get same instance back from NewSuffix(8, ...)")
t.Fatalf("did not get same instance back from newSuffix(8, ...)")
}
if s2.Path()[0] != 0x80 {
t.Fatalf("cache instances are not immutable")
Expand Down

0 comments on commit 0654e78

Please sign in to comment.