Skip to content

Commit

Permalink
Fix a regression introduced with volume improvements
Browse files Browse the repository at this point in the history
PR puppetlabs-toy-chest#330 introduced a regression where, when the cache expired, listing
a volume could repopulate the cache with different data and break
navigation. Fix so that VolumeList calls for a particular cache key
always uses the same path.

Signed-off-by: Michael Smith <[email protected]>
  • Loading branch information
MikaelSmith committed Jun 17, 2019
1 parent 6b18bad commit 32b1d17
Show file tree
Hide file tree
Showing 9 changed files with 107 additions and 38 deletions.
10 changes: 3 additions & 7 deletions plugin/execCommandImpl.go
Original file line number Diff line number Diff line change
Expand Up @@ -47,10 +47,10 @@ func NewExecCommand(ctx context.Context) *ExecCommandImpl {
if ctx == nil {
panic("plugin.NewExecCommand called with a nil context")
}

cmd := &ExecCommandImpl{
ctx: ctx,
exitCodeCh: make(chan int, 1),
ctx: ctx,
exitCodeCh: make(chan int, 1),
exitCodeErrCh: make(chan error, 1),
}

Expand Down Expand Up @@ -148,7 +148,3 @@ func (cmd *ExecCommandImpl) ExitCode() (int, error) {
return 0, err
}
}

// sealed implements ExecCommand#sealed
func (cmd *ExecCommandImpl) sealed() {
}
1 change: 0 additions & 1 deletion plugin/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,6 @@ type ExecOutputChunk struct {
type ExecCommand interface {
OutputCh() <-chan ExecOutputChunk
ExitCode() (int, error)
sealed()
}

// Execable is an entry that can have a command run on it.
Expand Down
5 changes: 4 additions & 1 deletion volume/core.go
Original file line number Diff line number Diff line change
Expand Up @@ -43,10 +43,13 @@ func ChildSchemas() []plugin.EntrySchema {
return plugin.ChildSchemas(dirBase(), fileBase())
}

// RootPath is the root of the filesystem described by a DirMap returned from VolumeList.
const RootPath = ""

// List constructs an array of entries for the given path from a DirMap.
// If a directory that hasn't been explored yet is listed it will conduct further exploration.
// Requests are cached against the supplied Interface using the VolumeListCB op.
func List(ctx context.Context, impl Interface) ([]plugin.Entry, error) {
// Start with the implementation as the cache key so we re-use data we get from it for subdirectory queries.
return newDir("placeholder", plugin.EntryAttributes{}, impl, impl, "").List(ctx)
return newDir("dummy", plugin.EntryAttributes{}, impl, nil, RootPath).List(ctx)
}
31 changes: 23 additions & 8 deletions volume/dir.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,8 +11,14 @@ import (
type dir struct {
plugin.EntryBase
impl Interface
key plugin.Entry
path string
// subtreeRoot represents the location we started searching for this subtree, which is used both as
// a cache key for that search and to be able to repeat the search. We must always query the same
// path when running VolumeList for its related key. If we didn't, we might start with a cache at
// '/', then later refill the same cache entry with a hierarchy starting at '/foo'. If we used that
// new cache data to try and list '/', we'd only get back a directory containing 'foo' and omit any
// other files in '/' because they wouldn't be in the cache at the time.
subtreeRoot *dir
path string
}

func dirBase() *dir {
Expand All @@ -24,10 +30,13 @@ func dirBase() *dir {
}

// newDir creates a dir populated from dirs.
func newDir(name string, attr plugin.EntryAttributes, impl Interface, key plugin.Entry, path string) *dir {
func newDir(name string, attr plugin.EntryAttributes, impl Interface, subtreeRoot *dir, path string) *dir {
vd := dirBase()
vd.impl = impl
vd.key = key
vd.subtreeRoot = subtreeRoot
if vd.subtreeRoot == nil {
vd.subtreeRoot = vd
}
vd.path = path
vd.SetName(name)
vd.SetAttributes(attr)
Expand All @@ -44,8 +53,14 @@ func (v *dir) ChildSchemas() []plugin.EntrySchema {

// List lists the children of the directory.
func (v *dir) List(ctx context.Context) ([]plugin.Entry, error) {
result, err := plugin.CachedOp(ctx, "VolumeListCB", v.key, 30*time.Second, func() (interface{}, error) {
return v.impl.VolumeList(ctx, v.path)
// Use subtree root if specified. If it's the base path, then use impl instead because we started
// from a dummy root that doesn't have an ID, so can't be used for caching.
var subtreeRoot plugin.Entry = v.subtreeRoot
if v.subtreeRoot.path == RootPath {
subtreeRoot = v.impl
}
result, err := plugin.CachedOp(ctx, "VolumeListCB", subtreeRoot, 30*time.Second, func() (interface{}, error) {
return v.impl.VolumeList(ctx, v.subtreeRoot.path)
})
if err != nil {
return nil, err
Expand All @@ -56,10 +71,10 @@ func (v *dir) List(ctx context.Context) ([]plugin.Entry, error) {
for name, attr := range root {
if attr.Mode().IsDir() {
subpath := v.path + "/" + name
newEntry := newDir(name, attr, v.impl, v.key, subpath)
newEntry := newDir(name, attr, v.impl, v.subtreeRoot, subpath)
if d, ok := result.(DirMap)[subpath]; !ok || d == nil {
// Update key so we trigger new exploration with a new cache key at this subpath.
newEntry.key = newEntry
newEntry.subtreeRoot = newEntry
}
entries = append(entries, newEntry)
} else {
Expand Down
13 changes: 7 additions & 6 deletions volume/dir_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,14 +37,15 @@ func TestVolumeDir(t *testing.T) {
entry := mockDirEntry{EntryBase: plugin.NewEntryBase(), dmap: dmap}
entry.SetName("mine")
entry.SetTestID("/mine")
entryDir := newDir("dummy", plugin.EntryAttributes{}, &entry, nil, RootPath)

assert.NotNil(t, dmap[""]["path"])
vd := newDir("path", dmap[""]["path"], &entry, &entry, "/path")
assert.NotNil(t, dmap[RootPath]["path"])
vd := newDir("path", dmap[RootPath]["path"], &entry, entryDir, "/path")
attr := plugin.Attributes(vd)
assert.Equal(t, 0755|os.ModeDir, attr.Mode())

assert.NotNil(t, dmap[""]["path1"])
vd = newDir("path", dmap[""]["path1"], &entry, &entry, "/path1")
assert.NotNil(t, dmap[RootPath]["path1"])
vd = newDir("path", dmap[RootPath]["path1"], &entry, entryDir, "/path1")
entries, err := vd.List(context.Background())
assert.Nil(t, err)
assert.Equal(t, 1, len(entries))
Expand All @@ -53,8 +54,8 @@ func TestVolumeDir(t *testing.T) {
assert.Equal(t, "/path1/a file", entry.path)
}

assert.NotNil(t, dmap[""]["path2"])
vd = newDir("path", dmap[""]["path2"], &entry, &entry, "/path2")
assert.NotNil(t, dmap[RootPath]["path2"])
vd = newDir("path", dmap[RootPath]["path2"], &entry, entryDir, "/path2")
entries, err = vd.List(context.Background())
assert.Nil(t, err)
assert.Equal(t, 1, len(entries))
Expand Down
2 changes: 1 addition & 1 deletion volume/fs.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,7 +98,7 @@ func (d *FS) VolumeList(ctx context.Context, path string) (DirMap, error) {
}
activity.Record(ctx, "VolumeList complete")
// Always returns results normalized to the base.
return StatParseAll(buf, "", path, d.maxdepth)
return StatParseAll(buf, RootPath, path, d.maxdepth)
}

// VolumeOpen satisfies the Interface required by List to read file contents.
Expand Down
55 changes: 55 additions & 0 deletions volume/fs_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ import (
"sort"
"strings"
"testing"
"time"

"github.com/puppetlabs/wash/datastore"
"github.com/puppetlabs/wash/plugin"
Expand Down Expand Up @@ -173,6 +174,40 @@ func (suite *fsTestSuite) TestFSListTwice() {
}
}

func (suite *fsTestSuite) TestFSListExpiredCache() {
shortFixture := `
96 1550611510 1550611448 1550611448 41ed /var
96 1550611510 1550611448 1550611448 41ed /var/log
96 1550611510 1550611448 1550611448 41ed /var/log/path
`
depth := 3
exec := &mockExecutor{EntryBase: plugin.NewEntryBase()}
exec.SetName("instance")
exec.SetTestID("/instance")
cmd := StatCmd("/", depth)
exec.On("Exec", mock.Anything, cmd[0], cmd[1:], plugin.ExecOptions{Elevate: true}).Return(mockExecCmd{shortFixture}, nil)

fs := NewFS("fs", exec, depth)
// ID would normally be set when listing FS within the parent instance.
fs.SetTestID("/instance/fs")

entry := suite.find(fs, "var/log").(plugin.Parent)
entries, err := plugin.CachedList(suite.ctx, entry)
if !suite.NoError(err) {
suite.FailNow("Listing entries failed")
}
suite.Equal(1, len(entries))
suite.Contains(entries, "path")

_, err = plugin.ClearCacheFor("/instance/fs")
suite.NoError(err)
entries, err = plugin.CachedList(suite.ctx, entry)
if suite.NoError(err) {
suite.Equal(1, len(entries))
}
suite.Implements((*plugin.Parent)(nil), suite.find(fs, "var/log"))
}

func (suite *fsTestSuite) TestFSRead() {
exec := suite.createExec(varLogFixture, varLogDepth)
fs := NewFS("fs", exec, varLogDepth)
Expand Down Expand Up @@ -207,3 +242,23 @@ func (m *mockExecutor) Exec(ctx context.Context, cmd string, args []string, opts
arger := m.Called(ctx, cmd, args, opts)
return arger.Get(0).(plugin.ExecCommand), arger.Error(1)
}

// Mock ExecCommand that can be used repeatedly when mocking a repeated call.
type mockExecCmd struct {
data string
}

func (cmd mockExecCmd) OutputCh() <-chan plugin.ExecOutputChunk {
ch := make(chan plugin.ExecOutputChunk, 1)
ch <- plugin.ExecOutputChunk{
StreamID: plugin.Stdout,
Timestamp: time.Now(),
Data: cmd.data,
}
close(ch)
return ch
}

func (cmd mockExecCmd) ExitCode() (int, error) {
return 0, nil
}
8 changes: 4 additions & 4 deletions volume/stat.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ import (
// StatCmd returns the command required to stat all the files in a directory up to maxdepth.
func StatCmd(path string, maxdepth int) []string {
// List uses "" to mean root. Translate for executing on the target.
if path == "" {
if path == RootPath {
path = "/"
}
// size, atime, mtime, ctime, mode, name
Expand Down Expand Up @@ -85,8 +85,8 @@ func StatParse(line string) (plugin.EntryAttributes, string, error) {
// directory. The directory may not exist if we're parsing stat output for a basepath that's closer
// to the root directory than where we searched because we want to preserve some of the hierarchy.
func makedirs(dirmap DirMap, newpath string) Dir {
// If it exists, return the children map. Base case would be newpath == "", which we create at
// the start of StatParseAll.
// If it exists, return the children map. Base case would be newpath == RootPath, which we create
// at the start of StatParseAll.
if newchildren, ok := dirmap[newpath]; ok {
return newchildren
}
Expand Down Expand Up @@ -117,7 +117,7 @@ func StatParseAll(output io.Reader, base string, start string, maxdepth int) (Di
scanner := bufio.NewScanner(output)
// Create lookup table for directories to contents, and prepopulate the root entry because
// the mount point won't be included in the stat output.
dirmap := DirMap{"": make(Dir)}
dirmap := DirMap{RootPath: make(Dir)}
for scanner.Scan() {
text := strings.TrimSpace(scanner.Text())
if text != "" {
Expand Down
20 changes: 10 additions & 10 deletions volume/stat_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,15 +92,15 @@ func TestStatParseAll(t *testing.T) {
assert.Nil(t, err)
assert.NotNil(t, dmap)
assert.Equal(t, 8, len(dmap))
for _, dir := range []string{"", "/path", "/path/has", "/path/has/got", "/path/has/got/some", "/path1", "/path2", "/path2/dir"} {
for _, dir := range []string{RootPath, "/path", "/path/has", "/path/has/got", "/path/has/got/some", "/path1", "/path2", "/path2/dir"} {
assert.Contains(t, dmap, dir)
}
for _, file := range []string{"/path/has/got/some/legs", "/path1/a file"} {
assert.NotContains(t, dmap, file)
}

for _, node := range []string{"path", "path1", "path2"} {
assert.Contains(t, dmap[""], node)
assert.Contains(t, dmap[RootPath], node)
}

expectedAttr := plugin.EntryAttributes{}
Expand Down Expand Up @@ -140,8 +140,8 @@ func TestStatParseAllUnfinished(t *testing.T) {
assert.Nil(t, err)
assert.NotNil(t, dmap)
assert.Equal(t, 3, len(dmap))
assert.Contains(t, dmap, "")
assert.Contains(t, dmap[""], "path")
assert.Contains(t, dmap, RootPath)
assert.Contains(t, dmap[RootPath], "path")
assert.Contains(t, dmap, "/path")
assert.Contains(t, dmap["/path"], "has")
// Depth two paths will be nil to signify we don't know their children.
Expand All @@ -154,12 +154,12 @@ func TestStatParseAllDeep(t *testing.T) {
96 1550611510 1550611448 1550611448 41ed mnt/path
96 1550611510 1550611448 1550611448 41ed mnt/path/has
`
dmap, err := StatParseAll(strings.NewReader(shortFixture), "", mountpoint, 2)
dmap, err := StatParseAll(strings.NewReader(shortFixture), RootPath, mountpoint, 2)
assert.Nil(t, err)
assert.NotNil(t, dmap)
assert.Equal(t, 4, len(dmap))
assert.Contains(t, dmap, "")
assert.Contains(t, dmap[""], "mnt")
assert.Contains(t, dmap, RootPath)
assert.Contains(t, dmap[RootPath], "mnt")
assert.Contains(t, dmap, "mnt")
assert.Contains(t, dmap["mnt"], "path")
assert.Contains(t, dmap, "mnt/path")
Expand All @@ -170,11 +170,11 @@ func TestStatParseAllDeep(t *testing.T) {
}

func TestStatParseAllRoot(t *testing.T) {
dmap, err := StatParseAll(strings.NewReader(fixture), "", "", mountDepth+1)
dmap, err := StatParseAll(strings.NewReader(fixture), RootPath, RootPath, mountDepth+1)
assert.Nil(t, err)
assert.NotNil(t, dmap)
assert.Equal(t, 9, len(dmap))
for _, dir := range []string{"", "mnt", "mnt/path", "mnt/path/has", "mnt/path/has/got", "mnt/path/has/got/some", "mnt/path1", "mnt/path2", "mnt/path2/dir"} {
for _, dir := range []string{RootPath, "mnt", "mnt/path", "mnt/path/has", "mnt/path/has/got", "mnt/path/has/got/some", "mnt/path1", "mnt/path2", "mnt/path2/dir"} {
assert.Contains(t, dmap, dir)
}
for _, file := range []string{"mnt/path/has/got/some/legs", "mnt/path1/a file"} {
Expand Down Expand Up @@ -214,7 +214,7 @@ func TestStatParseAllRoot(t *testing.T) {

expectedAttr = plugin.EntryAttributes{}
expectedAttr.SetMode(0550 | os.ModeDir)
assert.Equal(t, expectedAttr, dmap[""]["mnt"])
assert.Equal(t, expectedAttr, dmap[RootPath]["mnt"])
}

func TestNumPathSegments(t *testing.T) {
Expand Down

0 comments on commit 32b1d17

Please sign in to comment.