Skip to content

Commit

Permalink
gopls/internal/lsp/cache: clean up public API
Browse files Browse the repository at this point in the history
This change unexports various parts of the API, including
DiskFile, Overlay, BundleQuickFixes, XrefIndex, and
the methods of fileMap.

It also moves some declarations for clarity and updates
doc comments. This is is a preparatory step for moving
this package out of lsp/.

Change-Id: I1ff745b612315f9b9c0cd188c67bdfa695f9708f
Reviewed-on: https://go-review.googlesource.com/c/tools/+/557498
Reviewed-by: Robert Findley <[email protected]>
LUCI-TryBot-Result: Go LUCI <[email protected]>
  • Loading branch information
adonovan committed Jan 23, 2024
1 parent 4c53267 commit bd547e5
Show file tree
Hide file tree
Showing 15 changed files with 148 additions and 147 deletions.
26 changes: 15 additions & 11 deletions gopls/internal/debug/template_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ import (
"github.com/jba/templatecheck"
"golang.org/x/tools/go/packages"
"golang.org/x/tools/gopls/internal/debug"
"golang.org/x/tools/gopls/internal/file"
"golang.org/x/tools/gopls/internal/lsp/cache"
"golang.org/x/tools/internal/testenv"
)
Expand All @@ -30,15 +31,18 @@ var templates = map[string]struct {
tmpl *template.Template
data interface{} // a value of the needed type
}{
"MainTmpl": {debug.MainTmpl, &debug.Instance{}},
"DebugTmpl": {debug.DebugTmpl, nil},
"RPCTmpl": {debug.RPCTmpl, &debug.Rpcs{}},
"TraceTmpl": {debug.TraceTmpl, debug.TraceResults{}},
"CacheTmpl": {debug.CacheTmpl, &cache.Cache{}},
"SessionTmpl": {debug.SessionTmpl, &cache.Session{}},
"ClientTmpl": {debug.ClientTmpl, &debug.Client{}},
"ServerTmpl": {debug.ServerTmpl, &debug.Server{}},
"FileTmpl": {debug.FileTmpl, &cache.Overlay{}},
"MainTmpl": {debug.MainTmpl, &debug.Instance{}},
"DebugTmpl": {debug.DebugTmpl, nil},
"RPCTmpl": {debug.RPCTmpl, &debug.Rpcs{}},
"TraceTmpl": {debug.TraceTmpl, debug.TraceResults{}},
"CacheTmpl": {debug.CacheTmpl, &cache.Cache{}},
"SessionTmpl": {debug.SessionTmpl, &cache.Session{}},
"ClientTmpl": {debug.ClientTmpl, &debug.Client{}},
"ServerTmpl": {debug.ServerTmpl, &debug.Server{}},
"FileTmpl": {debug.FileTmpl, *new(interface {
file.Handle
Kind() file.Kind // (overlay files only)
})},
"InfoTmpl": {debug.InfoTmpl, "something"},
"MemoryTmpl": {debug.MemoryTmpl, runtime.MemStats{}},
"AnalysisTmpl": {debug.AnalysisTmpl, new(debug.State).Analysis()},
Expand Down Expand Up @@ -74,7 +78,7 @@ func TestTemplates(t *testing.T) {
if tree == nil {
t.Fatalf("found no syntax tree for %s", "serve.go")
}
renders := callsOf(p, tree, "render")
renders := callsOf(tree, "render")
if len(renders) == 0 {
t.Fatalf("found no calls to render")
}
Expand Down Expand Up @@ -122,7 +126,7 @@ func TestTemplates(t *testing.T) {
}
}

func callsOf(p *packages.Package, tree *ast.File, name string) []*ast.CallExpr {
func callsOf(tree *ast.File, name string) []*ast.CallExpr {
var ans []*ast.CallExpr
f := func(n ast.Node) bool {
x, ok := n.(*ast.CallExpr)
Expand Down
24 changes: 0 additions & 24 deletions gopls/internal/lsp/cache/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,11 @@
package cache

import (
"context"
"reflect"
"strconv"
"sync/atomic"
"time"

"golang.org/x/tools/gopls/internal/lsp/command"
"golang.org/x/tools/gopls/internal/lsp/protocol"
"golang.org/x/tools/internal/event"
"golang.org/x/tools/internal/gocommand"
"golang.org/x/tools/internal/memoize"
)

Expand Down Expand Up @@ -51,25 +46,6 @@ type Cache struct {
*memoizedFS // implements source.FileSource
}

// NewSession creates a new gopls session with the given cache and options overrides.
//
// The provided optionsOverrides may be nil.
//
// TODO(rfindley): move this to session.go.
func NewSession(ctx context.Context, c *Cache) *Session {
index := atomic.AddInt64(&sessionIndex, 1)
s := &Session{
id: strconv.FormatInt(index, 10),
cache: c,
gocmdRunner: &gocommand.Runner{},
overlayFS: newOverlayFS(c),
parseCache: newParseCache(1 * time.Minute), // keep recently parsed files for a minute, to optimize typing CPU
viewMap: make(map[protocol.DocumentURI]*View),
}
event.Log(ctx, "New session", KeyCreateSession.Of(s))
return s
}

var cacheIndex, sessionIndex, viewIndex int64

func (c *Cache) ID() string { return c.id }
Expand Down
4 changes: 2 additions & 2 deletions gopls/internal/lsp/cache/check.go
Original file line number Diff line number Diff line change
Expand Up @@ -1693,7 +1693,7 @@ func depsErrors(ctx context.Context, snapshot *Snapshot, mp *metadata.Package) (
Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
SuggestedFixes: goGetQuickFixes(mp.Module != nil, imp.cgf.URI, item),
}
if !BundleQuickFixes(diag) {
if !bundleQuickFixes(diag) {
bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
}
errors = append(errors, diag)
Expand Down Expand Up @@ -1736,7 +1736,7 @@ func depsErrors(ctx context.Context, snapshot *Snapshot, mp *metadata.Package) (
Message: fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
SuggestedFixes: goGetQuickFixes(true, pm.URI, item),
}
if !BundleQuickFixes(diag) {
if !bundleQuickFixes(diag) {
bug.Reportf("failed to bundle fixes for diagnostic %q", diag.Message)
}
errors = append(errors, diag)
Expand Down
4 changes: 2 additions & 2 deletions gopls/internal/lsp/cache/diagnostics.go
Original file line number Diff line number Diff line change
Expand Up @@ -119,10 +119,10 @@ type quickFixesJSON struct {
Fixes []protocol.CodeAction
}

// BundleQuickFixes attempts to bundle sd.SuggestedFixes into the
// bundleQuickFixes attempts to bundle sd.SuggestedFixes into the
// sd.BundledFixes field, so that it can be round-tripped through the client.
// It returns false if the quick-fixes cannot be bundled.
func BundleQuickFixes(sd *Diagnostic) bool {
func bundleQuickFixes(sd *Diagnostic) bool {
if len(sd.SuggestedFixes) == 0 {
return true
}
Expand Down
44 changes: 22 additions & 22 deletions gopls/internal/lsp/cache/filemap.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,21 +17,21 @@ import (
// file.
type fileMap struct {
files *persistent.Map[protocol.DocumentURI, file.Handle]
overlays *persistent.Map[protocol.DocumentURI, *Overlay] // the subset of files that are overlays
overlays *persistent.Map[protocol.DocumentURI, *overlay] // the subset of files that are overlays
dirs *persistent.Set[string] // all dirs containing files; if nil, dirs have not been initialized
}

func newFileMap() *fileMap {
return &fileMap{
files: new(persistent.Map[protocol.DocumentURI, file.Handle]),
overlays: new(persistent.Map[protocol.DocumentURI, *Overlay]),
overlays: new(persistent.Map[protocol.DocumentURI, *overlay]),
dirs: new(persistent.Set[string]),
}
}

// Clone creates a copy of the fileMap, incorporating the changes specified by
// clone creates a copy of the fileMap, incorporating the changes specified by
// the changes map.
func (m *fileMap) Clone(changes map[protocol.DocumentURI]file.Handle) *fileMap {
func (m *fileMap) clone(changes map[protocol.DocumentURI]file.Handle) *fileMap {
m2 := &fileMap{
files: m.files.Clone(),
overlays: m.overlays.Clone(),
Expand All @@ -52,43 +52,43 @@ func (m *fileMap) Clone(changes map[protocol.DocumentURI]file.Handle) *fileMap {
// first, as a set before a deletion would result in pointless work.
for uri, fh := range changes {
if !fileExists(fh) {
m2.Delete(uri)
m2.delete(uri)
}
}
for uri, fh := range changes {
if fileExists(fh) {
m2.Set(uri, fh)
m2.set(uri, fh)
}
}
return m2
}

func (m *fileMap) Destroy() {
func (m *fileMap) destroy() {
m.files.Destroy()
m.overlays.Destroy()
if m.dirs != nil {
m.dirs.Destroy()
}
}

// Get returns the file handle mapped by the given key, or (nil, false) if the
// get returns the file handle mapped by the given key, or (nil, false) if the
// key is not present.
func (m *fileMap) Get(key protocol.DocumentURI) (file.Handle, bool) {
func (m *fileMap) get(key protocol.DocumentURI) (file.Handle, bool) {
return m.files.Get(key)
}

// Range calls f for each (uri, fh) in the map.
func (m *fileMap) Range(f func(uri protocol.DocumentURI, fh file.Handle)) {
// foreach calls f for each (uri, fh) in the map.
func (m *fileMap) foreach(f func(uri protocol.DocumentURI, fh file.Handle)) {
m.files.Range(f)
}

// Set stores the given file handle for key, updating overlays and directories
// set stores the given file handle for key, updating overlays and directories
// accordingly.
func (m *fileMap) Set(key protocol.DocumentURI, fh file.Handle) {
func (m *fileMap) set(key protocol.DocumentURI, fh file.Handle) {
m.files.Set(key, fh, nil)

// update overlays
if o, ok := fh.(*Overlay); ok {
if o, ok := fh.(*overlay); ok {
m.overlays.Set(key, o, nil)
} else {
// Setting a non-overlay must delete the corresponding overlay, to preserve
Expand All @@ -111,9 +111,9 @@ func (m *fileMap) addDirs(u protocol.DocumentURI) {
}
}

// Delete removes a file from the map, and updates overlays and dirs
// delete removes a file from the map, and updates overlays and dirs
// accordingly.
func (m *fileMap) Delete(key protocol.DocumentURI) {
func (m *fileMap) delete(key protocol.DocumentURI) {
m.files.Delete(key)
m.overlays.Delete(key)

Expand All @@ -127,20 +127,20 @@ func (m *fileMap) Delete(key protocol.DocumentURI) {
}
}

// Overlays returns a new unordered array of overlay files.
func (m *fileMap) Overlays() []*Overlay {
var overlays []*Overlay
m.overlays.Range(func(_ protocol.DocumentURI, o *Overlay) {
// getOverlays returns a new unordered array of overlay files.
func (m *fileMap) getOverlays() []*overlay {
var overlays []*overlay
m.overlays.Range(func(_ protocol.DocumentURI, o *overlay) {
overlays = append(overlays, o)
})
return overlays
}

// Dirs reports returns the set of dirs observed by the fileMap.
// getDirs reports returns the set of dirs observed by the fileMap.
//
// This operation mutates the fileMap.
// The result must not be mutated by the caller.
func (m *fileMap) Dirs() *persistent.Set[string] {
func (m *fileMap) getDirs() *persistent.Set[string] {
if m.dirs == nil {
m.dirs = new(persistent.Set[string])
m.files.Range(func(u protocol.DocumentURI, _ file.Handle) {
Expand Down
14 changes: 7 additions & 7 deletions gopls/internal/lsp/cache/filemap_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -72,18 +72,18 @@ func TestFileMap(t *testing.T) {
case set:
var fh file.Handle
if op.overlay {
fh = &Overlay{uri: uri}
fh = &overlay{uri: uri}
} else {
fh = &DiskFile{uri: uri}
fh = &diskFile{uri: uri}
}
m.Set(uri, fh)
m.set(uri, fh)
case del:
m.Delete(uri)
m.delete(uri)
}
}

var gotFiles []string
m.Range(func(uri protocol.DocumentURI, _ file.Handle) {
m.foreach(func(uri protocol.DocumentURI, _ file.Handle) {
gotFiles = append(gotFiles, normalize(uri.Path()))
})
sort.Strings(gotFiles)
Expand All @@ -92,15 +92,15 @@ func TestFileMap(t *testing.T) {
}

var gotOverlays []string
for _, o := range m.Overlays() {
for _, o := range m.getOverlays() {
gotOverlays = append(gotOverlays, normalize(o.URI().Path()))
}
if diff := cmp.Diff(test.wantOverlays, gotOverlays); diff != "" {
t.Errorf("Overlays mismatch (-want +got):\n%s", diff)
}

var gotDirs []string
m.Dirs().Range(func(dir string) {
m.getDirs().Range(func(dir string) {
gotDirs = append(gotDirs, normalize(dir))
})
sort.Strings(gotDirs)
Expand Down
28 changes: 14 additions & 14 deletions gopls/internal/lsp/cache/fs_memoized.go
Original file line number Diff line number Diff line change
Expand Up @@ -24,42 +24,42 @@ type memoizedFS struct {
// filesByID maps existing file inodes to the result of a read.
// (The read may have failed, e.g. due to EACCES or a delete between stat+read.)
// Each slice is a non-empty list of aliases: different URIs.
filesByID map[robustio.FileID][]*DiskFile
filesByID map[robustio.FileID][]*diskFile
}

func newMemoizedFS() *memoizedFS {
return &memoizedFS{filesByID: make(map[robustio.FileID][]*DiskFile)}
return &memoizedFS{filesByID: make(map[robustio.FileID][]*diskFile)}
}

// A DiskFile is a file on the filesystem, or a failure to read one.
// A diskFile is a file in the filesystem, or a failure to read one.
// It implements the source.FileHandle interface.
type DiskFile struct {
type diskFile struct {
uri protocol.DocumentURI
modTime time.Time
content []byte
hash file.Hash
err error
}

func (h *DiskFile) URI() protocol.DocumentURI { return h.uri }
func (h *diskFile) URI() protocol.DocumentURI { return h.uri }

func (h *DiskFile) Identity() file.Identity {
func (h *diskFile) Identity() file.Identity {
return file.Identity{
URI: h.uri,
Hash: h.hash,
}
}

func (h *DiskFile) SameContentsOnDisk() bool { return true }
func (h *DiskFile) Version() int32 { return 0 }
func (h *DiskFile) Content() ([]byte, error) { return h.content, h.err }
func (h *diskFile) SameContentsOnDisk() bool { return true }
func (h *diskFile) Version() int32 { return 0 }
func (h *diskFile) Content() ([]byte, error) { return h.content, h.err }

// ReadFile stats and (maybe) reads the file, updates the cache, and returns it.
func (fs *memoizedFS) ReadFile(ctx context.Context, uri protocol.DocumentURI) (file.Handle, error) {
id, mtime, err := robustio.GetFileID(uri.Path())
if err != nil {
// file does not exist
return &DiskFile{
return &diskFile{
err: err,
uri: uri,
}, nil
Expand All @@ -79,7 +79,7 @@ func (fs *memoizedFS) ReadFile(ctx context.Context, uri protocol.DocumentURI) (f
fs.mu.Lock()
fhs, ok := fs.filesByID[id]
if ok && fhs[0].modTime.Equal(mtime) {
var fh *DiskFile
var fh *diskFile
// We have already seen this file and it has not changed.
for _, h := range fhs {
if h.uri == uri {
Expand Down Expand Up @@ -108,7 +108,7 @@ func (fs *memoizedFS) ReadFile(ctx context.Context, uri protocol.DocumentURI) (f

fs.mu.Lock()
if !recentlyModified {
fs.filesByID[id] = []*DiskFile{fh}
fs.filesByID[id] = []*diskFile{fh}
} else {
delete(fs.filesByID, id)
}
Expand Down Expand Up @@ -141,7 +141,7 @@ func (fs *memoizedFS) fileStats() (files, largest, errs int) {
// ioLimit limits the number of parallel file reads per process.
var ioLimit = make(chan struct{}, 128)

func readFile(ctx context.Context, uri protocol.DocumentURI, mtime time.Time) (*DiskFile, error) {
func readFile(ctx context.Context, uri protocol.DocumentURI, mtime time.Time) (*diskFile, error) {
select {
case ioLimit <- struct{}{}:
case <-ctx.Done():
Expand All @@ -161,7 +161,7 @@ func readFile(ctx context.Context, uri protocol.DocumentURI, mtime time.Time) (*
if err != nil {
content = nil // just in case
}
return &DiskFile{
return &diskFile{
modTime: mtime,
uri: uri,
content: content,
Expand Down
Loading

0 comments on commit bd547e5

Please sign in to comment.