From 32b1d1794d155611adb4e578350bd7a77e244344 Mon Sep 17 00:00:00 2001 From: Michael Smith Date: Fri, 14 Jun 2019 17:15:26 -0700 Subject: [PATCH] Fix a regression introduced with volume improvements PR #330 introduced a regression where, when the cache expired, listing a volume could repopulate the cache with different data and break navigation. Fix so that VolumeList calls for a particular cache key always uses the same path. Signed-off-by: Michael Smith --- plugin/execCommandImpl.go | 10 +++---- plugin/types.go | 1 - volume/core.go | 5 +++- volume/dir.go | 31 ++++++++++++++++------ volume/dir_test.go | 13 ++++----- volume/fs.go | 2 +- volume/fs_test.go | 55 +++++++++++++++++++++++++++++++++++++++ volume/stat.go | 8 +++--- volume/stat_test.go | 20 +++++++------- 9 files changed, 107 insertions(+), 38 deletions(-) diff --git a/plugin/execCommandImpl.go b/plugin/execCommandImpl.go index 619decca3..21190f193 100644 --- a/plugin/execCommandImpl.go +++ b/plugin/execCommandImpl.go @@ -47,10 +47,10 @@ func NewExecCommand(ctx context.Context) *ExecCommandImpl { if ctx == nil { panic("plugin.NewExecCommand called with a nil context") } - + cmd := &ExecCommandImpl{ - ctx: ctx, - exitCodeCh: make(chan int, 1), + ctx: ctx, + exitCodeCh: make(chan int, 1), exitCodeErrCh: make(chan error, 1), } @@ -148,7 +148,3 @@ func (cmd *ExecCommandImpl) ExitCode() (int, error) { return 0, err } } - -// sealed implements ExecCommand#sealed -func (cmd *ExecCommandImpl) sealed() { -} \ No newline at end of file diff --git a/plugin/types.go b/plugin/types.go index d3bfd6ba0..11484a02b 100644 --- a/plugin/types.go +++ b/plugin/types.go @@ -120,7 +120,6 @@ type ExecOutputChunk struct { type ExecCommand interface { OutputCh() <-chan ExecOutputChunk ExitCode() (int, error) - sealed() } // Execable is an entry that can have a command run on it. diff --git a/volume/core.go b/volume/core.go index 1a7dc6479..5c953219d 100644 --- a/volume/core.go +++ b/volume/core.go @@ -43,10 +43,13 @@ func ChildSchemas() []plugin.EntrySchema { return plugin.ChildSchemas(dirBase(), fileBase()) } +// RootPath is the root of the filesystem described by a DirMap returned from VolumeList. +const RootPath = "" + // List constructs an array of entries for the given path from a DirMap. // If a directory that hasn't been explored yet is listed it will conduct further exploration. // Requests are cached against the supplied Interface using the VolumeListCB op. func List(ctx context.Context, impl Interface) ([]plugin.Entry, error) { // Start with the implementation as the cache key so we re-use data we get from it for subdirectory queries. - return newDir("placeholder", plugin.EntryAttributes{}, impl, impl, "").List(ctx) + return newDir("dummy", plugin.EntryAttributes{}, impl, nil, RootPath).List(ctx) } diff --git a/volume/dir.go b/volume/dir.go index d4507c0b9..dd2c30200 100644 --- a/volume/dir.go +++ b/volume/dir.go @@ -11,8 +11,14 @@ import ( type dir struct { plugin.EntryBase impl Interface - key plugin.Entry - path string + // subtreeRoot represents the location we started searching for this subtree, which is used both as + // a cache key for that search and to be able to repeat the search. We must always query the same + // path when running VolumeList for its related key. If we didn't, we might start with a cache at + // '/', then later refill the same cache entry with a hierarchy starting at '/foo'. If we used that + // new cache data to try and list '/', we'd only get back a directory containing 'foo' and omit any + // other files in '/' because they wouldn't be in the cache at the time. + subtreeRoot *dir + path string } func dirBase() *dir { @@ -24,10 +30,13 @@ func dirBase() *dir { } // newDir creates a dir populated from dirs. -func newDir(name string, attr plugin.EntryAttributes, impl Interface, key plugin.Entry, path string) *dir { +func newDir(name string, attr plugin.EntryAttributes, impl Interface, subtreeRoot *dir, path string) *dir { vd := dirBase() vd.impl = impl - vd.key = key + vd.subtreeRoot = subtreeRoot + if vd.subtreeRoot == nil { + vd.subtreeRoot = vd + } vd.path = path vd.SetName(name) vd.SetAttributes(attr) @@ -44,8 +53,14 @@ func (v *dir) ChildSchemas() []plugin.EntrySchema { // List lists the children of the directory. func (v *dir) List(ctx context.Context) ([]plugin.Entry, error) { - result, err := plugin.CachedOp(ctx, "VolumeListCB", v.key, 30*time.Second, func() (interface{}, error) { - return v.impl.VolumeList(ctx, v.path) + // Use subtree root if specified. If it's the base path, then use impl instead because we started + // from a dummy root that doesn't have an ID, so can't be used for caching. + var subtreeRoot plugin.Entry = v.subtreeRoot + if v.subtreeRoot.path == RootPath { + subtreeRoot = v.impl + } + result, err := plugin.CachedOp(ctx, "VolumeListCB", subtreeRoot, 30*time.Second, func() (interface{}, error) { + return v.impl.VolumeList(ctx, v.subtreeRoot.path) }) if err != nil { return nil, err @@ -56,10 +71,10 @@ func (v *dir) List(ctx context.Context) ([]plugin.Entry, error) { for name, attr := range root { if attr.Mode().IsDir() { subpath := v.path + "/" + name - newEntry := newDir(name, attr, v.impl, v.key, subpath) + newEntry := newDir(name, attr, v.impl, v.subtreeRoot, subpath) if d, ok := result.(DirMap)[subpath]; !ok || d == nil { // Update key so we trigger new exploration with a new cache key at this subpath. - newEntry.key = newEntry + newEntry.subtreeRoot = newEntry } entries = append(entries, newEntry) } else { diff --git a/volume/dir_test.go b/volume/dir_test.go index 9b4785e59..161377c61 100644 --- a/volume/dir_test.go +++ b/volume/dir_test.go @@ -37,14 +37,15 @@ func TestVolumeDir(t *testing.T) { entry := mockDirEntry{EntryBase: plugin.NewEntryBase(), dmap: dmap} entry.SetName("mine") entry.SetTestID("/mine") + entryDir := newDir("dummy", plugin.EntryAttributes{}, &entry, nil, RootPath) - assert.NotNil(t, dmap[""]["path"]) - vd := newDir("path", dmap[""]["path"], &entry, &entry, "/path") + assert.NotNil(t, dmap[RootPath]["path"]) + vd := newDir("path", dmap[RootPath]["path"], &entry, entryDir, "/path") attr := plugin.Attributes(vd) assert.Equal(t, 0755|os.ModeDir, attr.Mode()) - assert.NotNil(t, dmap[""]["path1"]) - vd = newDir("path", dmap[""]["path1"], &entry, &entry, "/path1") + assert.NotNil(t, dmap[RootPath]["path1"]) + vd = newDir("path", dmap[RootPath]["path1"], &entry, entryDir, "/path1") entries, err := vd.List(context.Background()) assert.Nil(t, err) assert.Equal(t, 1, len(entries)) @@ -53,8 +54,8 @@ func TestVolumeDir(t *testing.T) { assert.Equal(t, "/path1/a file", entry.path) } - assert.NotNil(t, dmap[""]["path2"]) - vd = newDir("path", dmap[""]["path2"], &entry, &entry, "/path2") + assert.NotNil(t, dmap[RootPath]["path2"]) + vd = newDir("path", dmap[RootPath]["path2"], &entry, entryDir, "/path2") entries, err = vd.List(context.Background()) assert.Nil(t, err) assert.Equal(t, 1, len(entries)) diff --git a/volume/fs.go b/volume/fs.go index fde87a264..84c8c9d47 100644 --- a/volume/fs.go +++ b/volume/fs.go @@ -98,7 +98,7 @@ func (d *FS) VolumeList(ctx context.Context, path string) (DirMap, error) { } activity.Record(ctx, "VolumeList complete") // Always returns results normalized to the base. - return StatParseAll(buf, "", path, d.maxdepth) + return StatParseAll(buf, RootPath, path, d.maxdepth) } // VolumeOpen satisfies the Interface required by List to read file contents. diff --git a/volume/fs_test.go b/volume/fs_test.go index 272b0ab0f..0a2884457 100644 --- a/volume/fs_test.go +++ b/volume/fs_test.go @@ -6,6 +6,7 @@ import ( "sort" "strings" "testing" + "time" "github.com/puppetlabs/wash/datastore" "github.com/puppetlabs/wash/plugin" @@ -173,6 +174,40 @@ func (suite *fsTestSuite) TestFSListTwice() { } } +func (suite *fsTestSuite) TestFSListExpiredCache() { + shortFixture := ` +96 1550611510 1550611448 1550611448 41ed /var +96 1550611510 1550611448 1550611448 41ed /var/log +96 1550611510 1550611448 1550611448 41ed /var/log/path +` + depth := 3 + exec := &mockExecutor{EntryBase: plugin.NewEntryBase()} + exec.SetName("instance") + exec.SetTestID("/instance") + cmd := StatCmd("/", depth) + exec.On("Exec", mock.Anything, cmd[0], cmd[1:], plugin.ExecOptions{Elevate: true}).Return(mockExecCmd{shortFixture}, nil) + + fs := NewFS("fs", exec, depth) + // ID would normally be set when listing FS within the parent instance. + fs.SetTestID("/instance/fs") + + entry := suite.find(fs, "var/log").(plugin.Parent) + entries, err := plugin.CachedList(suite.ctx, entry) + if !suite.NoError(err) { + suite.FailNow("Listing entries failed") + } + suite.Equal(1, len(entries)) + suite.Contains(entries, "path") + + _, err = plugin.ClearCacheFor("/instance/fs") + suite.NoError(err) + entries, err = plugin.CachedList(suite.ctx, entry) + if suite.NoError(err) { + suite.Equal(1, len(entries)) + } + suite.Implements((*plugin.Parent)(nil), suite.find(fs, "var/log")) +} + func (suite *fsTestSuite) TestFSRead() { exec := suite.createExec(varLogFixture, varLogDepth) fs := NewFS("fs", exec, varLogDepth) @@ -207,3 +242,23 @@ func (m *mockExecutor) Exec(ctx context.Context, cmd string, args []string, opts arger := m.Called(ctx, cmd, args, opts) return arger.Get(0).(plugin.ExecCommand), arger.Error(1) } + +// Mock ExecCommand that can be used repeatedly when mocking a repeated call. +type mockExecCmd struct { + data string +} + +func (cmd mockExecCmd) OutputCh() <-chan plugin.ExecOutputChunk { + ch := make(chan plugin.ExecOutputChunk, 1) + ch <- plugin.ExecOutputChunk{ + StreamID: plugin.Stdout, + Timestamp: time.Now(), + Data: cmd.data, + } + close(ch) + return ch +} + +func (cmd mockExecCmd) ExitCode() (int, error) { + return 0, nil +} diff --git a/volume/stat.go b/volume/stat.go index 7f5f1ab23..662b7f74e 100644 --- a/volume/stat.go +++ b/volume/stat.go @@ -17,7 +17,7 @@ import ( // StatCmd returns the command required to stat all the files in a directory up to maxdepth. func StatCmd(path string, maxdepth int) []string { // List uses "" to mean root. Translate for executing on the target. - if path == "" { + if path == RootPath { path = "/" } // size, atime, mtime, ctime, mode, name @@ -85,8 +85,8 @@ func StatParse(line string) (plugin.EntryAttributes, string, error) { // directory. The directory may not exist if we're parsing stat output for a basepath that's closer // to the root directory than where we searched because we want to preserve some of the hierarchy. func makedirs(dirmap DirMap, newpath string) Dir { - // If it exists, return the children map. Base case would be newpath == "", which we create at - // the start of StatParseAll. + // If it exists, return the children map. Base case would be newpath == RootPath, which we create + // at the start of StatParseAll. if newchildren, ok := dirmap[newpath]; ok { return newchildren } @@ -117,7 +117,7 @@ func StatParseAll(output io.Reader, base string, start string, maxdepth int) (Di scanner := bufio.NewScanner(output) // Create lookup table for directories to contents, and prepopulate the root entry because // the mount point won't be included in the stat output. - dirmap := DirMap{"": make(Dir)} + dirmap := DirMap{RootPath: make(Dir)} for scanner.Scan() { text := strings.TrimSpace(scanner.Text()) if text != "" { diff --git a/volume/stat_test.go b/volume/stat_test.go index 5be32e2d8..1385c3c35 100644 --- a/volume/stat_test.go +++ b/volume/stat_test.go @@ -92,7 +92,7 @@ func TestStatParseAll(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, dmap) assert.Equal(t, 8, len(dmap)) - for _, dir := range []string{"", "/path", "/path/has", "/path/has/got", "/path/has/got/some", "/path1", "/path2", "/path2/dir"} { + for _, dir := range []string{RootPath, "/path", "/path/has", "/path/has/got", "/path/has/got/some", "/path1", "/path2", "/path2/dir"} { assert.Contains(t, dmap, dir) } for _, file := range []string{"/path/has/got/some/legs", "/path1/a file"} { @@ -100,7 +100,7 @@ func TestStatParseAll(t *testing.T) { } for _, node := range []string{"path", "path1", "path2"} { - assert.Contains(t, dmap[""], node) + assert.Contains(t, dmap[RootPath], node) } expectedAttr := plugin.EntryAttributes{} @@ -140,8 +140,8 @@ func TestStatParseAllUnfinished(t *testing.T) { assert.Nil(t, err) assert.NotNil(t, dmap) assert.Equal(t, 3, len(dmap)) - assert.Contains(t, dmap, "") - assert.Contains(t, dmap[""], "path") + assert.Contains(t, dmap, RootPath) + assert.Contains(t, dmap[RootPath], "path") assert.Contains(t, dmap, "/path") assert.Contains(t, dmap["/path"], "has") // Depth two paths will be nil to signify we don't know their children. @@ -154,12 +154,12 @@ func TestStatParseAllDeep(t *testing.T) { 96 1550611510 1550611448 1550611448 41ed mnt/path 96 1550611510 1550611448 1550611448 41ed mnt/path/has ` - dmap, err := StatParseAll(strings.NewReader(shortFixture), "", mountpoint, 2) + dmap, err := StatParseAll(strings.NewReader(shortFixture), RootPath, mountpoint, 2) assert.Nil(t, err) assert.NotNil(t, dmap) assert.Equal(t, 4, len(dmap)) - assert.Contains(t, dmap, "") - assert.Contains(t, dmap[""], "mnt") + assert.Contains(t, dmap, RootPath) + assert.Contains(t, dmap[RootPath], "mnt") assert.Contains(t, dmap, "mnt") assert.Contains(t, dmap["mnt"], "path") assert.Contains(t, dmap, "mnt/path") @@ -170,11 +170,11 @@ func TestStatParseAllDeep(t *testing.T) { } func TestStatParseAllRoot(t *testing.T) { - dmap, err := StatParseAll(strings.NewReader(fixture), "", "", mountDepth+1) + dmap, err := StatParseAll(strings.NewReader(fixture), RootPath, RootPath, mountDepth+1) assert.Nil(t, err) assert.NotNil(t, dmap) assert.Equal(t, 9, len(dmap)) - for _, dir := range []string{"", "mnt", "mnt/path", "mnt/path/has", "mnt/path/has/got", "mnt/path/has/got/some", "mnt/path1", "mnt/path2", "mnt/path2/dir"} { + for _, dir := range []string{RootPath, "mnt", "mnt/path", "mnt/path/has", "mnt/path/has/got", "mnt/path/has/got/some", "mnt/path1", "mnt/path2", "mnt/path2/dir"} { assert.Contains(t, dmap, dir) } for _, file := range []string{"mnt/path/has/got/some/legs", "mnt/path1/a file"} { @@ -214,7 +214,7 @@ func TestStatParseAllRoot(t *testing.T) { expectedAttr = plugin.EntryAttributes{} expectedAttr.SetMode(0550 | os.ModeDir) - assert.Equal(t, expectedAttr, dmap[""]["mnt"]) + assert.Equal(t, expectedAttr, dmap[RootPath]["mnt"]) } func TestNumPathSegments(t *testing.T) {