text
stringlengths 76
10.6k
|
---|
// cachedFetch will open a file from the local cache with key. If missing,
// fetcher will fill the cache first. cachedFetch also performs
// single-flighting.
func cachedFetch(ctx context.Context, key string, s *diskcache.Store, fetcher func(context.Context) (io.ReadCloser, error)) (ff *cachedFile, err error) {
f, err := s.Open(ctx, key, fetcher)
if err!= nil {
return nil, err
}
return &cachedFile{
File: f.File,
path: f.Path,
}, nil
} |
// determineEnvironment will setup the language server InitializeParams based
// what it can detect from the filesystem and what it received from the client's
// InitializeParams.
//
// It is expected that fs will be mounted at InitializeParams.RootURI.
func determineEnvironment(ctx context.Context, fs ctxvfs.FileSystem, params lspext.InitializeParams) (*langserver.InitializeParams, error) {
rootImportPath, err := determineRootImportPath(ctx, params.OriginalRootURI, fs)
if err!= nil {
return nil, fmt.Errorf("unable to determine workspace's root Go import path: %s (original rootPath is %q)", err, params.OriginalRootURI)
}
// Sanity-check the import path.
if rootImportPath == "" || rootImportPath!= path.Clean(rootImportPath) || strings.Contains(rootImportPath, "..") || strings.HasPrefix(rootImportPath, string(os.PathSeparator)) || strings.HasPrefix(rootImportPath, "/") || strings.HasPrefix(rootImportPath, ".") {
return nil, fmt.Errorf("empty or suspicious import path: %q", rootImportPath)
}
// Put all files in the workspace under a /src/IMPORTPATH
// directory, such as /src/github.com/foo/bar, so that Go can
// build it in GOPATH=/.
var rootPath string
if rootImportPath == "github.com/golang/go" {
// stdlib means our rootpath is the GOPATH
rootPath = goroot
rootImportPath = ""
} else {
rootPath = "/src/" + rootImportPath
}
GOPATH := gopath
if customGOPATH := detectCustomGOPATH(ctx, fs); len(customGOPATH) > 0 {
// Convert list of relative GOPATHs into absolute. We can have
// more than one so we root ourselves at /workspace. We still
// append the default GOPATH of `/` at the end. Fetched
// dependencies will be mounted at that location.
rootPath = "/workspace"
rootImportPath = ""
for i := range customGOPATH {
customGOPATH[i] = rootPath + customGOPATH[i]
}
customGOPATH = append(customGOPATH, gopath)
GOPATH = strings.Join(customGOPATH, ":")
}
// Send "initialize" to the wrapped lang server.
langInitParams := &langserver.InitializeParams{
InitializeParams: params.InitializeParams,
NoOSFileSystemAccess: true,
BuildContext: &langserver.InitializeBuildContextParams{
GOOS: goos,
GOARCH: goarch,
GOPATH: GOPATH,
GOROOT: goroot,
CgoEnabled: false,
Compiler: gocompiler,
// TODO(sqs): We'd like to set this to true only for
// the package we're analyzing (or for the whole
// repo), but go/loader is insufficiently
// configurable, so it applies it to the entire
// program, which takes a lot longer and causes weird
// error messages in the runtime package, etc. Disable
// it for now.
UseAllFiles: false,
},
}
langInitParams.RootURI = lsp.DocumentURI("file://" + rootPath)
langInitParams.RootImportPath = rootImportPath
return langInitParams, nil
} |
// detectCustomGOPATH tries to detect monorepos which require their own custom
// GOPATH.
//
// This is best-effort. If any errors occur or we do not detect a custom
// gopath, an empty result is returned.
func detectCustomGOPATH(ctx context.Context, fs ctxvfs.FileSystem) (gopaths []string) {
// If we detect any.sorucegraph/config.json GOPATHs then they take
// absolute precedence and override all others.
if paths := detectSourcegraphGOPATH(ctx, fs); len(paths) > 0 {
return paths
}
// Check.vscode/config.json and.envrc files, giving them equal precedence.
if paths := detectVSCodeGOPATH(ctx, fs); len(paths) > 0 {
gopaths = append(gopaths, paths...)
}
if paths := detectEnvRCGOPATH(ctx, fs); len(paths) > 0 {
gopaths = append(gopaths, paths...)
}
return
} |
// detectVSCodeGOPATH tries to detect monorepos which require their own custom
// GOPATH. We want to support monorepos as described in
// https://blog.gopheracademy.com/advent-2015/go-in-a-monorepo/ We use
//.vscode/settings.json to be informed of the custom GOPATH.
//
// This is best-effort. If any errors occur or we do not detect a custom
// gopath, an empty result is returned.
func detectVSCodeGOPATH(ctx context.Context, fs ctxvfs.FileSystem) []string {
const settingsPath = ".vscode/settings.json"
b, err := ctxvfs.ReadFile(ctx, fs, "/"+settingsPath)
if err!= nil {
return nil
}
settings := struct {
GOPATH string `json:"go.gopath"`
}{}
if err := unmarshalJSONC(string(b), &settings); err!= nil {
log15.Warn("Failed to parse JSON in "+settingsPath+" file. Treating as empty.", "err", err)
}
var paths []string
for _, p := range filepath.SplitList(settings.GOPATH) {
// We only care about relative gopaths
if!strings.HasPrefix(p, "${workspaceRoot}") {
continue
}
paths = append(paths, p[len("${workspaceRoot}"):])
}
return paths
} |
// unmarshalJSONC unmarshals the JSON using a fault-tolerant parser that allows comments
// and trailing commas. If any unrecoverable faults are found, an error is returned.
func unmarshalJSONC(text string, v interface{}) error {
data, errs := jsonx.Parse(text, jsonx.ParseOptions{Comments: true, TrailingCommas: true})
if len(errs) > 0 {
return fmt.Errorf("failed to parse JSON: %v", errs)
}
return json.Unmarshal(data, v)
} |
// detectEnvRCGOPATH tries to detect monorepos which require their own custom
// GOPATH. We want to support monorepos such as the ones described in
// http://tammersaleh.com/posts/manage-your-gopath-with-direnv/ We use
// $REPO_ROOT/.envrc to be informed of the custom GOPATH. We support any line
// matching one of two formats below (because we do not want to actually
// execute.envrc):
//
// export GOPATH=VALUE
// GOPATH_add VALUE
//
// Where "VALUE" may be any of:
//
// some/relative/path
// one/:two:three/
// ${PWD}/path
// $(PWD)/path
// `pwd`/path
//
// Or any of the above with double or single quotes wrapped around them. We
// will ignore any absolute path values.
func detectEnvRCGOPATH(ctx context.Context, fs ctxvfs.FileSystem) (gopaths []string) {
b, err := ctxvfs.ReadFile(ctx, fs, "/.envrc")
if err!= nil {
return nil
}
scanner := bufio.NewScanner(bytes.NewReader(b))
for scanner.Scan() {
value := ""
line := scanner.Text()
if prefixStr := "export GOPATH="; strings.HasPrefix(line, prefixStr) {
value = strings.TrimSpace(strings.TrimPrefix(line, prefixStr))
} else if prefixStr := "GOPATH_add "; strings.HasPrefix(line, prefixStr) {
value = strings.TrimSpace(strings.TrimPrefix(line, prefixStr))
} else {
continue // no value
}
value = unquote(value, `"`) // remove double quotes
value = unquote(value, `'`) // remove single quotes
for _, value := range strings.Split(value, ":") {
if strings.HasPrefix(value, "/") {
// Not interested in absolute paths.
continue
}
// Replace any form of PWD with an empty string (so we get a path
// relative to repo root).
value = strings.Replace(value, "${PWD}", "", -1)
value = strings.Replace(value, "$(PWD)", "", -1)
value = strings.Replace(value, "`pwd`", "", -1)
if!strings.HasPrefix(value, "/") {
value = "/" + value
}
gopaths = append(gopaths, value)
}
}
_ = scanner.Err() // discarded intentionally
return
} |
// unquote removes the given quote string (either `'` or `"`) from the given
// string if it is wrapped in them.
func unquote(s, quote string) string {
if!strings.HasPrefix(s, quote) &&!strings.HasSuffix(s, quote) {
return s
}
s = strings.TrimPrefix(s, quote)
s = strings.TrimSuffix(s, quote)
return s
} |
// detectSourcegraphGOPATH tries to detect monorepos which require their own custom
// GOPATH. We detect a.sourcegraph/config.json file with the following
// contents:
//
// {
// "go": {
// "GOPATH": ["gopathdir", "gopathdir2"]
// }
// }
//
// See the sourcegraphConfig struct documentation for more info.
//
// This is best-effort. If any errors occur or we do not detect a custom
// gopath, an empty result is returned.
func detectSourcegraphGOPATH(ctx context.Context, fs ctxvfs.FileSystem) (gopaths []string) {
cfg := readSourcegraphConfig(ctx, fs)
for _, p := range cfg.Go.GOPATH {
if!strings.HasPrefix(p, "/") {
// Assume all paths are relative to repo root.
p = "/" + p
}
gopaths = append(gopaths, p)
}
return
} |
// readSourcegraphConfig reads the.sourcegraph/config.json file from the
// repository root if it exists, otherwise an empty struct value (not nil).
func readSourcegraphConfig(ctx context.Context, fs ctxvfs.FileSystem) *sourcegraphConfig {
config := sourcegraphConfig{}
b, err := ctxvfs.ReadFile(ctx, fs, "/.sourcegraph/config.json")
if err!= nil {
return &config
}
_ = json.Unmarshal(b, &config)
return &config
} |
// determineRootImportPath determines the root import path for the Go
// workspace. It looks at canonical import path comments and the
// repo's original clone URL to infer it.
//
// It's intended to handle cases like
// github.com/kubernetes/kubernetes, which has doc.go files that
// indicate its root import path is k8s.io/kubernetes.
func determineRootImportPath(ctx context.Context, originalRootURI lsp.DocumentURI, fs ctxvfs.FileSystem) (rootImportPath string, err error) {
if originalRootURI == "" {
return "", errors.New("unable to determine Go workspace root import path without due to empty root path")
}
u, err := gituri.Parse(string(originalRootURI))
if err!= nil {
return "", err
}
if path.Join(u.Host, u.Path) == "github.com/golang/go" {
return "github.com/golang/go", nil
}
switch u.Scheme {
case "git":
rootImportPath = path.Join(u.Host, strings.TrimSuffix(u.Path, ".git"), u.FilePath())
default:
return "", fmt.Errorf("unrecognized originalRootPath: %q", u)
}
// If.sourcegraph/config.json specifies a root import path to use, then
// use that one above all else.
cfg := readSourcegraphConfig(ctx, fs)
if v := cfg.Go.RootImportPath; v!= "" {
return v, nil
}
// Glide provides a canonical import path for us, try that first if it
// exists.
yml, err := ctxvfs.ReadFile(ctx, fs, "/glide.yaml")
if err == nil && len(yml) > 0 {
glide := struct {
Package string `yaml:"package"`
// There are other fields, but we don't use them
}{}
// best effort, so ignore error if we have a badly formatted
// yml file
_ = yaml.Unmarshal(yml, &glide)
if glide.Package!= "" {
return glide.Package, nil
}
}
// Now scan for canonical import path comments. This is a
// heuristic; it is not guaranteed to produce the right result
// (e.g., you could have multiple files with different canonical
// import path comments that don't share a prefix, which is weird
// and would break this).
//
// Since we have not yet set h.FS, we need to use the passed in fs.
w := ctxvfs.Walk(ctx, "/", fs)
const maxSlashes = 4 // heuristic, shouldn't need to traverse too deep to find this out
const maxFiles = 25 // heuristic, shouldn't need to read too many files to find this out
numFiles := 0
for w.Step() {
if err := w.Err(); err!= nil {
return "", err
}
fi := w.Stat()
if fi.Mode().IsDir() && ((fi.Name()!= "." && strings.HasPrefix(fi.Name(), ".")) || fi.Name() == "examples" || fi.Name() == "Godeps" || fi.Name() == "vendor" || fi.Name() == "third_party" || strings.HasPrefix(fi.Name(), "_") || strings.Count(w.Path(), "/") >= maxSlashes) {
w.SkipDir()
continue
}
if strings.HasSuffix(fi.Name(), ".go") {
if numFiles >= maxFiles {
// Instead of breaking, we SkipDir here so that we
// ensure we always read all files in the root dir (to
// improve the heuristic hit rate). We will not read
// any more subdir files after calling SkipDir, which
// is what we want.
w.SkipDir()
}
numFiles++
// For perf, read for the canonical import path 1 file at
// a time instead of using build.Import, which always
// reads all the files.
contents, err := ctxvfs.ReadFile(ctx, fs, w.Path())
if err!= nil {
return "", err
}
canonImportPath, err := readCanonicalImportPath(contents)
if err == nil && canonImportPath!= "" {
// Chop off the subpackage path.
parts := strings.Split(canonImportPath, "/")
popComponents := strings.Count(w.Path(), "/") - 1
if len(parts) <= popComponents {
return "", fmt.Errorf("invalid canonical import path %q in file at path %q", canonImportPath, w.Path())
}
return strings.Join(parts[:len(parts)-popComponents], "/"), nil
}
}
}
// No canonical import path found, using our heuristics. Use the
// root import path derived from the repo's clone URL.
return rootImportPath, nil
} |
// deref returns a pointer's element type; otherwise it returns typ.
func deref(typ types.Type) types.Type {
if p, ok := typ.Underlying().(*types.Pointer); ok {
return p.Elem()
}
return typ
} |
// get returns a mutex unique to the given key.
func (k *keyMutex) get(key string) *sync.Mutex {
k.mu.Lock()
mu, ok := k.mus[key]
if!ok {
mu = &sync.Mutex{}
k.mus[key] = mu
}
k.mu.Unlock()
return mu
} |
// fetchTransitiveDepsOfFile fetches the transitive dependencies of
// the named Go file. A Go file's dependencies are the imports of its
// own package, plus all of its imports' imports, and so on.
//
// It adds fetched dependencies to its own file system overlay, and
// the returned depFiles should be passed onto the language server to
// add to its overlay.
func (h *BuildHandler) fetchTransitiveDepsOfFile(ctx context.Context, fileURI lsp.DocumentURI, dc *depCache) (err error) {
parentSpan := opentracing.SpanFromContext(ctx)
span := parentSpan.Tracer().StartSpan("go-langserver-go: fetch transitive dependencies",
opentracing.Tags{"fileURI": fileURI},
opentracing.ChildOf(parentSpan.Context()),
)
ctx = opentracing.ContextWithSpan(ctx, span)
defer func() {
if err!= nil {
ext.Error.Set(span, true)
span.LogFields(otlog.Error(err))
}
span.Finish()
}()
bctx := h.lang.BuildContext(ctx)
filename := h.FilePath(fileURI)
bpkg, err := langserver.ContainingPackage(bctx, filename, h.RootFSPath)
if err!= nil &&!isMultiplePackageError(err) {
return err
}
err = doDeps(bpkg, 0, dc, func(path, srcDir string, mode build.ImportMode) (*build.Package, error) {
return h.doFindPackage(ctx, bctx, path, srcDir, mode, dc)
})
return err
} |
// findPackage is a langserver.FindPackageFunc which integrates with the build
// server. It will fetch dependencies just in time.
func (h *BuildHandler) findPackage(ctx context.Context, bctx *build.Context, path, srcDir string, mode build.ImportMode) (*build.Package, error) {
return h.doFindPackage(ctx, bctx, path, srcDir, mode, newDepCache())
} |
// isUnderCanonicalImportPath tells if the given path is under the given root import path.
func isUnderRootImportPath(rootImportPath, path string) bool {
return rootImportPath!= "" && util.PathHasPrefix(path, rootImportPath)
} |
// FetchCommonDeps will fetch our common used dependencies. This is to avoid
// impacting the first ever typecheck we do in a repo since it will have to
// fetch the dependency from the internet.
func FetchCommonDeps() {
// github.com/golang/go
d, _ := gosrc.ResolveImportPath(http.DefaultClient, "time")
u, _ := url.Parse(d.CloneURL)
_, _ = NewDepRepoVFS(context.Background(), u, d.Rev, nil)
} |
// namedOf returns the named type T when given T or *T.
// Otherwise, it returns nil.
func namedOf(typ types.Type) *types.Named {
if ptr, isPtr := typ.(*types.Pointer); isPtr {
typ = ptr.Elem()
}
res, _ := typ.(*types.Named)
return res
} |
// NewZipVFS downloads a zip archive from a URL (or fetches from the local cache
// on disk) and returns a new VFS backed by that zip archive.
func NewZipVFS(ctx context.Context, url string, onFetchStart, onFetchFailed func(), evictOnClose bool) (*ArchiveFS, error) {
request, err := http.NewRequest("HEAD", url, nil)
if err!= nil {
return nil, errors.Wrapf(err, "failed to construct a new request with URL %s", url)
}
setAuthFromNetrc(request)
response, err := ctxhttp.Do(ctx, nil, request)
if err!= nil {
return nil, err
}
if response.StatusCode!= http.StatusOK {
return nil, fmt.Errorf("unable to fetch zip from %s (expected HTTP response code 200, but got %d)", url, response.StatusCode)
}
fetch := func(ctx context.Context) (ar *archiveReader, err error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "zip Fetch")
ext.Component.Set(span, "zipvfs")
span.SetTag("url", url)
defer func() {
if err!= nil {
ext.Error.Set(span, true)
span.SetTag("err", err)
}
span.Finish()
}()
store := &diskcache.Store{
Dir: filepath.Join(ArchiveCacheDir, "zipvfs"),
Component: "zipvfs",
MaxCacheSizeBytes: MaxCacheSizeBytes,
}
ff, err := cachedFetch(ctx, withoutAuth(url), store, func(ctx context.Context) (io.ReadCloser, error) {
onFetchStart()
request, err := http.NewRequest("GET", url, nil)
if err!= nil {
return nil, errors.Wrapf(err, "failed to construct a new request with URL %s", url)
}
request.Header.Add("Accept", "application/zip")
setAuthFromNetrc(request)
resp, err := ctxhttp.Do(ctx, nil, request)
if err!= nil {
return nil, errors.Wrapf(err, "failed to fetch zip archive from %s", url)
}
if resp.StatusCode!= http.StatusOK {
resp.Body.Close()
return nil, errors.Errorf("zip URL %s returned HTTP %d", url, resp.StatusCode)
}
return resp.Body, nil
})
if err!= nil {
onFetchFailed()
return nil, errors.Wrapf(err, "failed to fetch/write/open zip archive from %s", url)
}
f := ff.File
zr, err := zipNewFileReader(f)
if err!= nil {
f.Close()
return nil, errors.Wrapf(err, "failed to read zip archive from %s", url)
}
if len(zr.File) == 0 {
f.Close()
return nil, errors.Errorf("zip archive from %s is empty", url)
}
return &archiveReader{
Reader: zr,
Closer: f,
StripTopLevelDir: true,
Evicter: store,
}, nil
}
return &ArchiveFS{fetch: fetch, EvictOnClose: evictOnClose}, nil
} |
// Create a new URL that doesn't include the user:password (the access
// token) so that the same repository at a revision for a different user
// results in a cache hit.
func withoutAuth(urlString string) string {
u, err := url.Parse(urlString)
if err!= nil {
return urlString
}
u.User = nil
return u.String()
} |
// packageStatementName returns the package name ((*ast.Ident).Name)
// of node iff node is the package statement of a file ("package p").
func packageStatementName(fset *token.FileSet, files []*ast.File, node *ast.Ident) string {
for _, f := range files {
if f.Name == node {
return node.Name
}
}
return ""
} |
// maybeAddComments appends the specified comments converted to Markdown godoc
// form to the specified contents slice, if the comments string is not empty.
func maybeAddComments(comments string, contents []lsp.MarkedString) []lsp.MarkedString {
if comments == "" {
return contents
}
var b bytes.Buffer
doc.ToMarkdown(&b, comments, nil)
return append(contents, lsp.RawMarkedString(b.String()))
} |
// joinCommentGroups joins the resultant non-empty comment text from two
// CommentGroups with a newline.
func joinCommentGroups(a, b *ast.CommentGroup) string {
aText := a.Text()
bText := b.Text()
if aText == "" {
return bText
} else if bText == "" {
return aText
} else {
return aText + "\n" + bText
}
} |
// packageDoc finds the documentation for the named package from its files or
// additional files.
func packageDoc(files []*ast.File, pkgName string) string {
for _, f := range files {
if f.Name.Name == pkgName {
txt := f.Doc.Text()
if strings.TrimSpace(txt)!= "" {
return txt
}
}
}
return ""
} |
// builtinDoc finds the documentation for a builtin node.
func builtinDoc(ident string) []lsp.MarkedString {
// Grab files from builtin package
pkgs, err := packages.Load(
&packages.Config{
Mode: packages.LoadFiles,
},
"builtin",
)
if err!= nil {
return nil
}
// Parse the files into ASTs
pkg := pkgs[0]
fs := token.NewFileSet()
asts := &ast.Package{
Name: "builtin",
Files: make(map[string]*ast.File),
}
for _, filename := range pkg.GoFiles {
file, err := parser.ParseFile(fs, filename, nil, parser.ParseComments)
if err!= nil {
fmt.Println(err.Error())
}
asts.Files[filename] = file
}
// Extract documentation and declaration from the ASTs
docs := doc.New(asts, "builtin", doc.AllDecls)
node, pos := findDocIdent(docs, ident)
contents, _ := fmtDocObject(fs, node, fs.Position(pos))
return contents
} |
// findDocIdentt walks an input *doc.Package and locates the *doc.Value,
// *doc.Type, or *doc.Func with the given identifier.
func findDocIdent(docs *doc.Package, ident string) (node interface{}, pos token.Pos) {
searchFuncs := func(funcs []*doc.Func) bool {
for _, f := range funcs {
if f.Name == ident {
node = f
pos = f.Decl.Pos()
return true
}
}
return false
}
searchVars := func(vars []*doc.Value) bool {
for _, v := range vars {
for _, spec := range v.Decl.Specs {
switch t := spec.(type) {
case *ast.ValueSpec:
for _, name := range t.Names {
if name.Name == ident {
node = v
pos = name.Pos()
return true
}
}
}
}
}
return false
}
if searchFuncs(docs.Funcs) {
return
}
if searchVars(docs.Consts) {
return
}
if searchVars(docs.Vars) {
return
}
for _, t := range docs.Types {
if t.Name == ident {
node = t
pos = t.Decl.Pos()
return
}
if searchFuncs(t.Funcs) {
return
}
if searchVars(t.Consts) {
return
}
if searchVars(t.Vars) {
return
}
}
return
} |
// commentsToText converts a slice of []*ast.CommentGroup to a flat string,
// ensuring whitespace-only comment groups are dropped.
func commentsToText(cgroups []*ast.CommentGroup) (text string) {
for _, c := range cgroups {
if strings.TrimSpace(c.Text())!= "" {
text += c.Text()
}
}
return text
} |
// prettyPrintTypesString is pretty printing specific to the output of
// types.*String. Instead of re-implementing the printer, we can just
// transform its output.
func prettyPrintTypesString(s string) string {
// Don't bother including the fields if it is empty
if strings.HasSuffix(s, "{}") {
return ""
}
var b bytes.Buffer
b.Grow(len(s))
depth := 0
for i := 0; i < len(s); i++ {
c := s[i]
switch c {
case ';':
b.WriteByte('\n')
for j := 0; j < depth; j++ {
b.WriteString(" ")
}
// Skip following space
i++
case '{':
if i == len(s)-1 {
// This should never happen, but in case it
// does give up
return s
}
n := s[i+1]
if n == '}' {
// Do not modify {}
b.WriteString("{}")
// We have already written }, so skip
i++
} else {
// We expect fields to follow, insert a newline and space
depth++
b.WriteString(" {\n")
for j := 0; j < depth; j++ {
b.WriteString(" ")
}
}
case '}':
depth--
if depth < 0 {
return s
}
b.WriteString("\n}")
default:
b.WriteByte(c)
}
}
return b.String()
} |
// packageForFile returns the import path and pkg from pkgs that contains the
// named file.
func packageForFile(pkgs map[string]*ast.Package, filename string) (string, *ast.Package, error) {
for path, pkg := range pkgs {
for pkgFile := range pkg.Files {
if pkgFile == filename {
return path, pkg, nil
}
}
}
return "", nil, fmt.Errorf("failed to find %q in packages %q", filename, pkgs)
} |
// inRange tells if x is in the range of a-b inclusive.
func inRange(x, a, b token.Position) bool {
if!util.PathEqual(x.Filename, a.Filename) ||!util.PathEqual(x.Filename, b.Filename) {
return false
}
return x.Offset >= a.Offset && x.Offset <= b.Offset
} |
// findDocTarget walks an input *doc.Package and locates the *doc.Value,
// *doc.Type, or *doc.Func for the given target position.
func findDocTarget(fset *token.FileSet, target token.Position, in interface{}) interface{} {
switch v := in.(type) {
case *doc.Package:
for _, x := range v.Consts {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
for _, x := range v.Types {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
for _, x := range v.Vars {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
for _, x := range v.Funcs {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
return nil
case *doc.Value:
if inRange(target, fset.Position(v.Decl.Pos()), fset.Position(v.Decl.End())) {
return v
}
return nil
case *doc.Type:
if inRange(target, fset.Position(v.Decl.Pos()), fset.Position(v.Decl.End())) {
return v
}
for _, x := range v.Consts {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
for _, x := range v.Vars {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
for _, x := range v.Funcs {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
for _, x := range v.Methods {
if r := findDocTarget(fset, target, x); r!= nil {
return r
}
}
return nil
case *doc.Func:
if inRange(target, fset.Position(v.Decl.Pos()), fset.Position(v.Decl.End())) {
return v
}
return nil
default:
panic("unreachable")
}
} |
// fmtDocObject formats one of:
//
// *doc.Value
// *doc.Type
// *doc.Func
//
func fmtDocObject(fset *token.FileSet, x interface{}, target token.Position) ([]lsp.MarkedString, ast.Node) {
switch v := x.(type) {
case *doc.Value: // Vars and Consts
// Sort the specs by distance to find the one nearest to target.
sort.Sort(byDistance{v.Decl.Specs, fset, target})
spec := v.Decl.Specs[0].(*ast.ValueSpec)
// Use the doc directly above the var inside a var() block, or if there
// is none, fall back to the doc directly above the var() block.
doc := spec.Doc.Text()
if doc == "" {
doc = v.Doc
}
// Create a copy of the spec with no doc for formatting separately.
cpy := *spec
cpy.Doc = nil
value := v.Decl.Tok.String() + " " + fmtNode(fset, &cpy)
return maybeAddComments(doc, []lsp.MarkedString{{Language: "go", Value: value}}), spec
case *doc.Type: // Type declarations
spec := v.Decl.Specs[0].(*ast.TypeSpec)
// Handle interfaces methods and struct fields separately now.
switch s := spec.Type.(type) {
case *ast.InterfaceType:
// Find the method that is an exact match for our target position.
for _, field := range s.Methods.List {
if fset.Position(field.Pos()).Offset == target.Offset {
// An exact match.
value := fmt.Sprintf("func (%s).%s%s", spec.Name.Name, field.Names[0].Name, strings.TrimPrefix(fmtNode(fset, field.Type), "func"))
return maybeAddComments(field.Doc.Text(), []lsp.MarkedString{{Language: "go", Value: value}}), field
}
}
case *ast.StructType:
// Find the field that is an exact match for our target position.
for _, field := range s.Fields.List {
if fset.Position(field.Pos()).Offset == target.Offset {
// An exact match.
value := fmt.Sprintf("struct field %s %s", field.Names[0], fmtNode(fset, field.Type))
// Concat associated documentation with any inline comments
comments := joinCommentGroups(field.Doc, field.Comment)
return maybeAddComments(comments, []lsp.MarkedString{{Language: "go", Value: value}}), field
}
}
}
// Formatting of all type declarations: structs, interfaces, integers, etc.
name := v.Decl.Tok.String() + " " + spec.Name.Name + " " + typeName(fset, spec.Type)
res := []lsp.MarkedString{{Language: "go", Value: name}}
doc := spec.Doc.Text()
if doc == "" {
doc = v.Doc
}
res = maybeAddComments(doc, res)
if n := typeName(fset, spec.Type); n == "interface" || n == "struct" {
res = append(res, lsp.MarkedString{Language: "go", Value: fmtNode(fset, spec.Type)})
}
return res, spec
case *doc.Func: // Functions
return maybeAddComments(v.Doc, []lsp.MarkedString{{Language: "go", Value: fmtNode(fset, v.Decl)}}), v.Decl
default:
panic("unreachable")
}
} |
// typeName returns the name of typ, shortening interface and struct types to
// just "interface" and "struct" rather than their full contents (incl. methods
// and fields).
func typeName(fset *token.FileSet, typ ast.Expr) string {
switch typ.(type) {
case *ast.InterfaceType:
return "interface"
case *ast.StructType:
return "struct"
default:
return fmtNode(fset, typ)
}
} |
// fmtNode formats the given node as a string.
func fmtNode(fset *token.FileSet, n ast.Node) string {
var buf bytes.Buffer
err := format.Node(&buf, fset, n)
if err!= nil {
panic("unreachable")
}
return buf.String()
} |
// deepRecvType gets the embedded struct's name that the method or
// field is actually defined on, not just the original/outer recv
// type.
func deepRecvType(sel *types.Selection) types.Type {
var offset int
offset = 1
if sel.Kind() == types.MethodVal || sel.Kind() == types.MethodExpr {
offset = 0
}
typ := sel.Recv()
idx := sel.Index()
for k, i := range idx[:len(idx)-offset] {
final := k == len(idx)-offset-1
t := getMethod(typ, i, final, sel.Kind()!= types.FieldVal)
if t == nil {
log.Printf("failed to get method/field at index %v on recv %s", idx, typ)
return nil
}
typ = t.Type()
}
return typ
} |
// dereferenceType finds the "root" type of a thing, meaning
// the type pointed-to by a pointer, the element type of
// a slice or array, or the object type of a chan. The special
// case for Map is because a Map would also have a key, and
// you might be interested in either of those.
func dereferenceType(otyp types.Type) types.Type {
for {
switch typ := otyp.(type) {
case *types.Map:
return otyp
case dereferencable:
otyp = typ.Elem()
default:
return otyp
}
}
} |
// buildPackageForNamedFileInMultiPackageDir returns a package that
// refer to the package named by filename. If there are multiple
// (e.g.) main packages in a dir in separate files, this lets you
// synthesize a *build.Package that just refers to one. It's necessary
// to handle that case.
func buildPackageForNamedFileInMultiPackageDir(bpkg *build.Package, m *build.MultiplePackageError, filename string) (*build.Package, error) {
copy := *bpkg
bpkg = ©
// First, find which package name each filename is in.
fileToPkgName := make(map[string]string, len(m.Files))
for i, f := range m.Files {
fileToPkgName[f] = m.Packages[i]
}
pkgName := fileToPkgName[filename]
if pkgName == "" {
return nil, fmt.Errorf("package %q in %s has no file %q", bpkg.ImportPath, bpkg.Dir, filename)
}
filterToFilesInPackage := func(files []string, pkgName string) []string {
var keep []string
for _, f := range files {
if fileToPkgName[f] == pkgName {
keep = append(keep, f)
}
}
return keep
}
// Trim the *GoFiles fields to only those files in the same
// package.
bpkg.Name = pkgName
if pkgName == "main" {
// TODO(sqs): If the package name is "main", and there are
// multiple main packages that are separate programs (and,
// e.g., expected to be run directly run `go run main1.go
// main2.go`), then this will break because it will try to
// compile them all together. There's no good way to handle
// that case that I can think of, other than with heuristics.
}
var nonXTestPkgName, xtestPkgName string
if strings.HasSuffix(pkgName, "_test") {
nonXTestPkgName = strings.TrimSuffix(pkgName, "_test")
xtestPkgName = pkgName
} else {
nonXTestPkgName = pkgName
xtestPkgName = pkgName + "_test"
}
bpkg.GoFiles = filterToFilesInPackage(bpkg.GoFiles, nonXTestPkgName)
bpkg.TestGoFiles = filterToFilesInPackage(bpkg.TestGoFiles, nonXTestPkgName)
bpkg.XTestGoFiles = filterToFilesInPackage(bpkg.XTestGoFiles, xtestPkgName)
return bpkg, nil
} |
// TODO(sqs): allow typechecking just a specific file not in a package, too
func typecheck(ctx context.Context, fset *token.FileSet, bctx *build.Context, bpkg *build.Package, findPackage FindPackageFunc, rootPath string) (*loader.Program, diagnostics, error) {
var typeErrs []error
conf := loader.Config{
Fset: fset,
TypeChecker: types.Config{
DisableUnusedImportCheck: true,
FakeImportC: true,
Error: func(err error) {
typeErrs = append(typeErrs, err)
},
},
Build: bctx,
Cwd: bpkg.Dir,
AllowErrors: true,
TypeCheckFuncBodies: func(p string) bool {
return bpkg.ImportPath == p
},
ParserMode: parser.AllErrors | parser.ParseComments, // prevent parser from bailing out
FindPackage: func(bctx *build.Context, importPath, fromDir string, mode build.ImportMode) (*build.Package, error) {
// When importing a package, ignore any
// MultipleGoErrors. This occurs, e.g., when you have a
// main.go with "// +build ignore" that imports the
// non-main package in the same dir.
bpkg, err := findPackage(ctx, bctx, importPath, fromDir, rootPath, mode)
if err!= nil &&!isMultiplePackageError(err) {
return bpkg, err
}
return bpkg, nil
},
}
// Hover needs this info, otherwise we could zero out the unnecessary
// results to save memory.
//
// TODO(sqs): investigate other ways to speed this up using
// AfterTypeCheck; see
// https://sourcegraph.com/github.com/golang/tools@5ffc3249d341c947aa65178abbf2253ed49c9e03/-/blob/cmd/guru/referrers.go#L148.
//
// conf.AfterTypeCheck = func(info *loader.PackageInfo, files []*ast.File) {
// if!conf.TypeCheckFuncBodies(info.Pkg.Path()) {
// clearInfoFields(info)
// }
// }
//
var goFiles []string
goFiles = append(goFiles, bpkg.GoFiles...)
goFiles = append(goFiles, bpkg.TestGoFiles...)
if strings.HasSuffix(bpkg.Name, "_test") {
goFiles = append(goFiles, bpkg.XTestGoFiles...)
}
for i, filename := range goFiles {
goFiles[i] = buildutil.JoinPath(bctx, bpkg.Dir, filename)
}
conf.CreateFromFilenames(bpkg.ImportPath, goFiles...)
prog, err := conf.Load()
if err!= nil && prog == nil {
return nil, nil, err
}
diags, err := errsToDiagnostics(typeErrs, prog)
if err!= nil {
return nil, nil, err
}
return prog, diags, nil
} |
// workspaceRefsFromPkg collects all the references made to dependencies from
// the specified package and returns the results.
func (h *LangHandler) workspaceRefsFromPkg(ctx context.Context, bctx *build.Context, conn jsonrpc2.JSONRPC2, params lspext.WorkspaceReferencesParams, fs *token.FileSet, pkg *loader.PackageInfo, files []*ast.File, rootPath string, results *refResult) (err error) {
if err := ctx.Err(); err!= nil {
return err
}
span, ctx := opentracing.StartSpanFromContext(ctx, "workspaceRefsFromPkg")
defer func() {
if err!= nil {
ext.Error.Set(span, true)
span.SetTag("err", err.Error())
}
span.Finish()
}()
span.SetTag("pkg", pkg)
// Compute workspace references.
findPackage := h.getFindPackageFunc()
cfg := &refs.Config{
FileSet: fs,
Pkg: pkg.Pkg,
PkgFiles: files,
Info: &pkg.Info,
}
refsErr := cfg.Refs(func(r *refs.Ref) {
symDesc, err := defSymbolDescriptor(ctx, bctx, rootPath, r.Def, findPackage)
if err!= nil {
// Log the error, and flag it as one in the trace -- but do not
// halt execution (hopefully, it is limited to a small subset of
// the data).
ext.Error.Set(span, true)
err := fmt.Errorf("workspaceRefsFromPkg: failed to import %v: %v", r.Def.ImportPath, err)
log.Println(err)
span.SetTag("error", err.Error())
return
}
if!symDesc.Contains(params.Query) {
return
}
results.resultsMu.Lock()
results.results = append(results.results, referenceInformation{
Reference: goRangeToLSPLocation(fs, r.Start, r.End),
Symbol: symDesc,
})
results.resultsMu.Unlock()
})
if refsErr!= nil {
// Trace the error, but do not consider it a true error. In many cases
// it is a problem with the user's code, not our workspace reference
// finding code.
span.SetTag("err", fmt.Sprintf("workspaceRefsFromPkg: workspace refs failed: %v: %v", pkg, refsErr))
}
return nil
} |
// findIdentifier looks for an identifier at byte-offset searchpos
// inside the parsed source represented by node.
// If it is part of a selector expression, it returns
// that expression rather than the identifier itself.
//
// As a special case, if it finds an import
// spec, it returns ImportSpec.
//
func findIdentifier(fset *token.FileSet, f *ast.File, searchpos int) ast.Node {
ec := make(chan ast.Node)
found := func(startPos, endPos token.Pos) bool {
start := fset.Position(startPos).Offset
end := start + int(endPos-startPos)
return start <= searchpos && searchpos <= end
}
go func() {
var visit func(ast.Node) bool
visit = func(n ast.Node) bool {
var startPos token.Pos
switch n := n.(type) {
default:
return true
case *ast.Ident:
startPos = n.NamePos
case *ast.SelectorExpr:
startPos = n.Sel.NamePos
case *ast.ImportSpec:
startPos = n.Pos()
case *ast.StructType:
// If we find an anonymous bare field in a
// struct type, its definition points to itself,
// but we actually want to go elsewhere,
// so assume (dubiously) that the expression
// works globally and return a new node for it.
for _, field := range n.Fields.List {
if field.Names!= nil {
continue
}
t := field.Type
if pt, ok := field.Type.(*ast.StarExpr); ok {
t = pt.X
}
if id, ok := t.(*ast.Ident); ok {
if found(id.NamePos, id.End()) {
e, err := parseExpr(fset, f.Scope, id.Name)
if err!= nil {
log.Println(err) // TODO(slimsag): return to caller
}
ec <- e
runtime.Goexit()
}
}
}
return true
}
if found(startPos, n.End()) {
ec <- n
runtime.Goexit()
}
return true
}
ast.Walk(visitorFunc(visit), f)
ec <- nil
}()
return <-ec
} |
// parseLocalPackage reads and parses all go files from the
// current directory that implement the same package name
// the principal source file, except the original source file
// itself, which will already have been parsed.
//
func parseLocalPackage(fset *token.FileSet, filename string, src *ast.File, pkgScope *ast.Scope, pathToName parser.ImportPathToName) (*ast.Package, error) {
pkg := &ast.Package{src.Name.Name, pkgScope, nil, map[string]*ast.File{filename: src}}
d, f := filepath.Split(filename)
if d == "" {
d = "./"
}
fd, err := os.Open(d)
if err!= nil {
return nil, err
}
defer fd.Close()
list, err := fd.Readdirnames(-1)
if err!= nil {
return nil, errNoPkgFiles
}
for _, pf := range list {
file := filepath.Join(d, pf)
if!strings.HasSuffix(pf, ".go") ||
pf == f ||
pkgName(fset, file)!= pkg.Name {
continue
}
src, err := parser.ParseFile(fset, file, nil, 0, pkg.Scope, types.DefaultImportPathToName)
if err == nil {
pkg.Files[file] = src
}
}
if len(pkg.Files) == 1 {
return nil, errNoPkgFiles
}
return pkg, nil
} |
// pkgName returns the package name implemented by the
// go source filename.
//
func pkgName(fset *token.FileSet, filename string) string {
prog, _ := parser.ParseFile(fset, filename, nil, parser.PackageClauseOnly, nil, types.DefaultImportPathToName)
if prog!= nil {
return prog.Name.Name
}
return ""
} |
// If src!= nil, readSource converts src to a []byte if possible;
// otherwise it returns an error. If src == nil, readSource returns
// the result of reading the file specified by filename.
//
func readSource(filename string, src interface{}) ([]byte, error) {
if src!= nil {
switch s := src.(type) {
case string:
return []byte(s), nil
case []byte:
return s, nil
case *bytes.Buffer:
// is io.Reader, but src is already available in []byte form
if s!= nil {
return s.Bytes(), nil
}
case io.Reader:
var buf bytes.Buffer
_, err := io.Copy(&buf, s)
if err!= nil {
return nil, err
}
return buf.Bytes(), nil
default:
return nil, errors.New("invalid source")
}
}
return ioutil.ReadFile(filename)
} |
// ParseExpr parses a Go expression and returns the corresponding
// AST node. The fset, filename, and src arguments have the same interpretation
// as for ParseFile. If there is an error, the result expression
// may be nil or contain a partial AST.
//
// if scope is non-nil, it will be used as the scope for the expression.
//
func ParseExpr(fset *token.FileSet, filename string, src interface{}, scope *ast.Scope, pathToName ImportPathToName) (ast.Expr, error) {
data, err := readSource(filename, src)
if err!= nil {
return nil, err
}
var p parser
p.init(fset, filename, data, 0, scope, pathToName)
x := p.parseExpr()
if p.tok == token.SEMICOLON {
p.next() // consume automatically inserted semicolon, if any
}
return x, p.parseEOF()
} |
// ParseFile parses the source code of a single Go source file and returns
// the corresponding ast.File node. The source code may be provided via
// the filename of the source file, or via the src parameter.
//
// If src!= nil, ParseFile parses the source from src and the filename is
// only used when recording position information. The type of the argument
// for the src parameter must be string, []byte, or io.Reader.
//
// If src == nil, ParseFile parses the file specified by filename.
//
// The mode parameter controls the amount of source text parsed and other
// optional parser functionality. Position information is recorded in the
// file set fset.
//
// If the source couldn't be read, the returned AST is nil and the error
// indicates the specific failure. If the source was read but syntax
// errors were found, the result is a partial AST (with ast.BadX nodes
// representing the fragments of erroneous source code). Multiple errors
// are returned via a scanner.ErrorList which is sorted by file position.
//
func ParseFile(fset *token.FileSet, filename string, src interface{}, mode uint, pkgScope *ast.Scope, pathToName ImportPathToName) (*ast.File, error) {
data, err := readSource(filename, src)
if err!= nil {
return nil, err
}
var p parser
p.init(fset, filename, data, mode, pkgScope, pathToName)
p.pkgScope = p.topScope
p.openScope()
p.fileScope = p.topScope
p.ErrorList.RemoveMultiples()
return p.parseFile(), p.ErrorList.Err() // parseFile() reads to EOF
} |
// ParseFiles calls ParseFile for each file in the filenames list and returns
// a map of package name -> package AST with all the packages found. The mode
// bits are passed to ParseFile unchanged. Position information is recorded
// in the file set fset.
//
// Files with parse errors are ignored. In this case the map of packages may
// be incomplete (missing packages and/or incomplete packages) and the first
// error encountered is returned.
//
func ParseFiles(fset *token.FileSet, filenames []string, mode uint, pathToName ImportPathToName) (pkgs map[string]*ast.Package, first error) {
pkgs = make(map[string]*ast.Package)
for _, filename := range filenames {
if err := parseFileInPkg(fset, pkgs, filename, mode, pathToName); err!= nil && first == nil {
first = err
}
}
return
} |
// ParseDir calls ParseFile for the files in the directory specified by path and
// returns a map of package name -> package AST with all the packages found. If
// filter!= nil, only the files with os.FileInfo entries passing through the filter
// are considered. The mode bits are passed to ParseFile unchanged. Position
// information is recorded in the file set fset.
//
// If the directory couldn't be read, a nil map and the respective error are
// returned. If a parse error occurred, a non-nil but incomplete map and the
// error are returned.
//
func ParseDir(fset *token.FileSet, path string, filter func(os.FileInfo) bool, mode uint, pathToName ImportPathToName) (map[string]*ast.Package, error) {
fd, err := os.Open(path)
if err!= nil {
return nil, err
}
defer fd.Close()
list, err := fd.Readdir(-1)
if err!= nil {
return nil, err
}
filenames := make([]string, len(list))
n := 0
for i := 0; i < len(list); i++ {
d := list[i]
if filter == nil || filter(d) {
filenames[n] = filepath.Join(path, d.Name())
n++
}
}
filenames = filenames[0:n]
return ParseFiles(fset, filenames, mode, pathToName)
} |
// NewGitHubRepoVFS creates a new VFS backed by a GitHub downloadable
// repository archive.
func NewGitHubRepoVFS(ctx context.Context, repo, rev string) (*ArchiveFS, error) {
if!githubRepoRx.MatchString(repo) {
return nil, fmt.Errorf(`invalid GitHub repo %q: must be "github.com/user/repo"`, repo)
}
url := fmt.Sprintf("https://codeload.%s/zip/%s", repo, rev)
return NewZipVFS(ctx, url, ghFetch.Inc, ghFetchFailed.Inc, false)
} |
// BuildContext creates a build.Context which uses the overlay FS and the InitializeParams.BuildContext overrides.
func (h *LangHandler) BuildContext(ctx context.Context) *build.Context {
var bctx *build.Context
if override := h.init.BuildContext; override!= nil {
bctx = &build.Context{
GOOS: override.GOOS,
GOARCH: override.GOARCH,
GOPATH: override.GOPATH,
GOROOT: override.GOROOT,
CgoEnabled: override.CgoEnabled,
UseAllFiles: override.UseAllFiles,
Compiler: override.Compiler,
BuildTags: override.BuildTags,
// Enable analysis of all go version build tags that
// our compiler should understand.
ReleaseTags: build.Default.ReleaseTags,
}
} else {
// make a copy since we will mutate it
copy := build.Default
bctx = ©
}
h.Mu.Lock()
fs := h.FS
h.Mu.Unlock()
util.PrepareContext(bctx, ctx, fs)
return bctx
} |
// ContainingPackage returns the package that contains the given
// filename. It is like buildutil.ContainingPackage, except that:
//
// * it returns the whole package (i.e., it doesn't use build.FindOnly)
// * it does not perform FS calls that are unnecessary for us (such
// as searching the GOROOT; this is only called on the main
// workspace's code, not its deps).
// * if the file is in the xtest package (package p_test not package p),
// it returns build.Package only representing that xtest package
func ContainingPackage(bctx *build.Context, filename, rootPath string) (*build.Package, error) {
gopaths := buildutil.SplitPathList(bctx, bctx.GOPATH) // list will be empty with no GOPATH
for _, gopath := range gopaths {
if!buildutil.IsAbsPath(bctx, gopath) {
return nil, fmt.Errorf("build context GOPATH must be an absolute path (GOPATH=%q)", gopath)
}
}
pkgDir := filename
if!bctx.IsDir(filename) {
pkgDir = path.Dir(filename)
}
var srcDir string
if util.PathHasPrefix(filename, bctx.GOROOT) {
srcDir = bctx.GOROOT // if workspace is Go stdlib
} else {
for _, gopath := range gopaths {
if util.PathHasPrefix(pkgDir, gopath) {
srcDir = gopath
break
}
}
}
var (
pkg *build.Package
err error
xtest bool
)
if srcDir == "" {
// workspace is out of GOPATH
pkg, err = bctx.ImportDir(pkgDir, 0)
if pkg!= nil {
parts := strings.Split(util.PathTrimPrefix(pkgDir, filepath.Dir(rootPath)), "vendor/")
pkg.ImportPath = parts[len(parts)-1]
}
} else {
srcDir = path.Join(filepath.ToSlash(srcDir), "src")
importPath := util.PathTrimPrefix(pkgDir, srcDir)
pkg, err = bctx.Import(importPath, pkgDir, 0)
}
if pkg!= nil {
base := path.Base(filename)
for _, f := range pkg.XTestGoFiles {
if f == base {
xtest = true
break
}
}
}
// If the filename we want refers to a file in an xtest package
// (package p_test not package p), then munge the package so that
// it only refers to that xtest package.
if pkg!= nil && xtest &&!strings.HasSuffix(pkg.Name, "_test") {
pkg.Name += "_test"
pkg.GoFiles = nil
pkg.CgoFiles = nil
pkg.TestGoFiles = nil
}
return pkg, err
} |
// DeclPos computes the source position of the declaration of an object name.
// The result may be an invalid position if it cannot be computed
// (obj.Decl may be nil or not correct).
// This should be called ast.Object.Pos.
func DeclPos(obj *ast.Object) token.Pos {
decl, _ := obj.Decl.(ast.Node)
if decl == nil {
return token.NoPos
}
pos := declPos(obj.Name, decl)
if!pos.IsValid() {
pos = decl.Pos()
}
return pos
} |
// typeLookup looks for a named type, but will search through
// any number of type qualifiers (chan/array/slice/pointer)
// which have an unambiguous base type. If no named type is
// found, we are not interested, because this is only used
// for finding a type's definition.
func typeLookup(prog *loader.Program, typ types.Type) *types.TypeName {
if typ == nil {
return nil
}
for {
switch t := typ.(type) {
case *types.Named:
return t.Obj()
case *types.Map:
return nil
case dereferencable:
typ = t.Elem()
default:
return nil
}
}
} |
// references calls emitRef on each transitive package that has been seen by
// the dependency cache. The parameters say that the Go package directory `path`
// has imported the Go package described by r.
func (d *depCache) references(emitRef func(path string, r goDependencyReference), depthLimit int) {
// Example import graph with edge cases:
//
// '/' (root)
// |
// a
// |\
// b c
// \|
// .>. d <<<<<<.
// | \ / \ | |
// .<< e f >>^ |
// | |
// f >>>>>>>>^
//
// Although Go does not allow such cyclic import graphs, we must handle
// them here due to the fact that we aggregate imports for all packages in
// a directory (e.g. including xtest files, which can import the package
// path itself).
// orderedEmit emits the dependency references found in m as being
// referenced by the given path. The only difference from emitRef is that
// the emissions are in a sorted order rather than in random map order.
orderedEmit := func(path string, m map[string]goDependencyReference) {
keys := make([]string, 0, len(m))
for k := range m {
keys = append(keys, k)
}
sort.Strings(keys)
for _, k := range keys {
emitRef(path, m[k])
}
}
// Prepare a function to walk every package node in the above example graph.
beganWalk := map[string]struct{}{}
var walk func(rootDir, pkgDir string, parentDirs []string, emissions map[string]goDependencyReference, depth int)
walk = func(rootDir, pkgDir string, parentDirs []string, emissions map[string]goDependencyReference, depth int) {
if depth >= depthLimit {
return
}
// The imports are recorded in parallel by goroutines in doDeps, so we
// must sort them in order to get a stable output order.
imports := d.seen[pkgDir]
sort.Sort(sortedImportRecord(imports))
for _, imp := range imports {
// At this point we know that `imp.pkg.ImportPath` has imported
// `imp.imports.ImportPath`.
// If the package being referenced is the package itself, i.e. the
// package tried to import itself, do not walk any further.
if imp.pkg.Dir == imp.imports.Dir {
continue
}
// If the package being referenced is itself one of our parent
// packages, then we have hit a cyclic dependency and should not
// walk any further.
cyclic := false
for _, parentDir := range parentDirs {
if parentDir == imp.imports.Dir {
cyclic = true
break
}
}
if cyclic {
continue
}
// Walk the referenced dependency so that we emit transitive
// dependencies.
walk(rootDir, imp.imports.Dir, append(parentDirs, pkgDir), emissions, depth+1)
// If the dependency being referenced has not already been walked
// individually / on its own, do so now.
_, began := beganWalk[imp.imports.Dir]
if!began {
beganWalk[imp.imports.Dir] = struct{}{}
childEmissions := map[string]goDependencyReference{}
walk(imp.imports.Dir, imp.imports.Dir, append(parentDirs, pkgDir), childEmissions, 0)
orderedEmit(imp.imports.Dir, childEmissions)
}
// If the new emissions for the import path would have a greater
// depth, then do not overwrite the old emission. This ensures that
// for a single package which is referenced we always get the
// closest (smallest) depth value.
if existing, ok := emissions[imp.imports.ImportPath]; ok {
if existing.depth < depth {
return
}
}
emissions[imp.imports.ImportPath] = goDependencyReference{
pkg: unvendoredPath(imp.imports.ImportPath),
absolute: imp.imports.ImportPath,
vendor: util.IsVendorDir(imp.imports.Dir),
depth: depth,
}
}
}
sort.Strings(d.entryPackageDirs)
for _, entryDir := range d.entryPackageDirs {
emissions := map[string]goDependencyReference{}
walk(entryDir, entryDir, nil, emissions, 0)
orderedEmit(entryDir, emissions)
}
} |
// scannerMode returns the scanner mode bits given the parser's mode bits.
func scannerMode(mode uint) scanner.Mode {
var m scanner.Mode
if mode&ParseComments!= 0 {
m |= scanner.ScanComments
}
return m
} |
// newIdent returns a new identifier with attached Object.
// If no Object currently exists for the identifier, it is
// created in package scope.
func (p *parser) resolve(ident *ast.Ident) {
if ident.Name == "_" {
return
}
// try to resolve the identifier
for s := p.topScope; s!= nil; s = s.Outer {
if obj := s.Lookup(ident.Name); obj!= nil {
ident.Obj = obj
return
}
}
if p.pkgScope == nil {
return
}
ident.Obj = ast.NewObj(ast.Bad, ident.Name)
p.pkgScope.Insert(ident.Obj)
} |
// ----------------------------------------------------------------------------
// Parsing support
func (p *parser) printTrace(a...interface{}) {
const dots = "................................ " +
"................................ "
const n = uint(len(dots))
pos := p.file.Position(p.pos)
fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
i := 2 * p.indent
for ; i > n; i -= n {
fmt.Print(dots)
}
fmt.Print(dots[0:i])
fmt.Println(a...)
} |
// Advance to the next token.
func (p *parser) next0() {
// Because of one-token look-ahead, print the previous token
// when tracing as it provides a more readable output. The
// very first token (!p.pos.IsValid()) is not initialized
// (it is token.ILLEGAL), so don't print it.
if p.trace && p.pos.IsValid() {
s := p.tok.String()
switch {
case p.tok.IsLiteral():
p.printTrace(s, string(p.lit))
case p.tok.IsOperator(), p.tok.IsKeyword():
p.printTrace("\"" + s + "\"")
default:
p.printTrace(s)
}
}
p.pos, p.tok, p.lit = p.scanner.Scan()
} |
// Consume a comment and return it and the line on which it ends.
func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
// /*-style comments may end on a different line than where they start.
// Scan the comment for '\n' chars and adjust endline accordingly.
endline = p.file.Line(p.pos)
if p.lit[1] == '*' {
for _, b := range p.lit {
if b == '\n' {
endline++
}
}
}
comment = &ast.Comment{p.pos, p.lit}
p.next0()
return
} |
// Consume a group of adjacent comments, add it to the parser's
// comments list, and return it together with the line at which
// the last comment in the group ends. An empty line or non-comment
// token terminates a comment group.
//
func (p *parser) consumeCommentGroup() (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.file.Line(p.pos)
for p.tok == token.COMMENT && endline+1 >= p.file.Line(p.pos) {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{list}
p.comments = append(p.comments, comments)
return
} |
// Advance to the next non-comment token. In the process, collect
// any comment groups encountered, and remember the last lead and
// and line comments.
//
// A lead comment is a comment group that starts and ends in a
// line without any other tokens and that is followed by a non-comment
// token on the line immediately after the comment group.
//
// A line comment is a comment group that follows a non-comment
// token on the same line, and that has no tokens after it on the line
// where it ends.
//
// Lead and line comments may be considered documentation that is
// stored in the AST.
//
func (p *parser) next() {
p.leadComment = nil
p.lineComment = nil
line := p.file.Line(p.pos) // current line
p.next0()
if p.tok == token.COMMENT {
var comment *ast.CommentGroup
var endline int
if p.file.Line(p.pos) == line {
// The comment is on same line as previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup()
if p.file.Line(p.pos)!= endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok == token.COMMENT {
comment, endline = p.consumeCommentGroup()
}
if endline+1 == p.file.Line(p.pos) {
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
} |
// ----------------------------------------------------------------------------
// Identifiers
func (p *parser) parseIdent() *ast.Ident {
pos := p.pos
name := "_"
if p.tok == token.IDENT {
name = string(p.lit)
p.next()
} else {
p.expect(token.IDENT) // use expect() error handling
}
return &ast.Ident{pos, name, nil}
} |
// ----------------------------------------------------------------------------
// Common productions
func (p *parser) parseExprList() (list []ast.Expr) {
if p.trace {
defer un(trace(p, "ExpressionList"))
}
list = append(list, p.parseExpr())
for p.tok == token.COMMA {
p.next()
list = append(list, p.parseExpr())
}
return
} |
// ----------------------------------------------------------------------------
// Types
func (p *parser) parseType() ast.Expr {
if p.trace {
defer un(trace(p, "Type"))
}
typ := p.tryType()
if typ == nil {
pos := p.pos
p.errorExpected(pos, "type")
p.next() // make progress
return &ast.BadExpr{pos, p.pos}
}
return typ
} |
// The object for the identifier in an anonymous
// field must point to the original type because
// the object has its own identity as a field member.
//
func makeAnonField(t, declType ast.Expr) ast.Expr {
switch t := t.(type) {
case *ast.Ident:
id := new(ast.Ident)
*id = *t
id.Obj = ast.NewObj(ast.Var, id.Name)
id.Obj.Decl = &ast.Field{nil, []*ast.Ident{id}, declType, nil, nil}
return id
case *ast.SelectorExpr:
return &ast.SelectorExpr{t.X, makeAnonField(t.Sel, declType).(*ast.Ident)}
case *ast.StarExpr:
return &ast.StarExpr{t.Star, makeAnonField(t.X, declType)}
}
return t
} |
// ----------------------------------------------------------------------------
// Blocks
func (p *parser) parseStmtList() (list []ast.Stmt) {
if p.trace {
defer un(trace(p, "StatementList"))
}
for p.tok!= token.CASE && p.tok!= token.DEFAULT && p.tok!= token.RBRACE && p.tok!= token.EOF {
list = append(list, p.parseStmt())
}
return
} |
// ----------------------------------------------------------------------------
// Expressions
func (p *parser) parseFuncTypeOrLit() ast.Expr {
if p.trace {
defer un(trace(p, "FuncTypeOrLit"))
}
typ, scope := p.parseFuncType()
if p.tok!= token.LBRACE {
// function type only
return typ
}
p.exprLev++
body := p.parseBody(scope)
p.exprLev--
return &ast.FuncLit{typ, body}
} |
// parseOperand may return an expression or a raw type (incl. array
// types of the form [...]T. Callers must verify the result.
//
func (p *parser) parseOperand() ast.Expr {
if p.trace {
defer un(trace(p, "Operand"))
}
switch p.tok {
case token.IDENT:
ident := p.parseIdent()
p.resolve(ident)
return ident
case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
x := &ast.BasicLit{p.pos, p.tok, p.lit}
p.next()
return x
case token.LPAREN:
lparen := p.pos
p.next()
p.exprLev++
x := p.parseExpr()
p.exprLev--
rparen := p.expect(token.RPAREN)
return &ast.ParenExpr{lparen, x, rparen}
case token.FUNC:
return p.parseFuncTypeOrLit()
default:
t := p.tryRawType(true) // could be type for composite literal or conversion
if t!= nil {
return t
}
}
pos := p.pos
p.errorExpected(pos, "operand")
p.next() // make progress
return &ast.BadExpr{pos, p.pos}
} |
// checkExpr checks that x is an expression (and not a type).
func (p *parser) checkExpr(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.BasicLit:
case *ast.FuncLit:
case *ast.CompositeLit:
case *ast.ParenExpr:
panic("unreachable")
case *ast.SelectorExpr:
case *ast.IndexExpr:
case *ast.SliceExpr:
case *ast.TypeAssertExpr:
if t.Type == nil {
// the form X.(type) is only allowed in type switch expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos(), x.End()}
}
case *ast.CallExpr:
case *ast.StarExpr:
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos(), x.End()}
}
case *ast.BinaryExpr:
default:
// all other nodes are not proper expressions
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos(), x.End()}
}
return x
} |
// isTypeName returns true iff x is a (qualified) TypeName.
func isTypeName(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
default:
return false // all other nodes are not type names
}
return true
} |
// isLiteralType returns true iff x is a legal composite literal type.
func isLiteralType(x ast.Expr) bool {
switch t := x.(type) {
case *ast.BadExpr:
case *ast.Ident:
case *ast.SelectorExpr:
_, isIdent := t.X.(*ast.Ident)
return isIdent
case *ast.ArrayType:
case *ast.StructType:
case *ast.MapType:
default:
return false // all other nodes are not legal composite literal types
}
return true
} |
// If x is of the form *T, deref returns T, otherwise it returns x.
func deref(x ast.Expr) ast.Expr {
if p, isPtr := x.(*ast.StarExpr); isPtr {
x = p.X
}
return x
} |
// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
func unparen(x ast.Expr) ast.Expr {
if p, isParen := x.(*ast.ParenExpr); isParen {
x = unparen(p.X)
}
return x
} |
// checkExprOrType checks that x is an expression or a type
// (and not a raw type such as [...]T).
//
func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
switch t := unparen(x).(type) {
case *ast.ParenExpr:
panic("unreachable")
case *ast.UnaryExpr:
if t.Op == token.RANGE {
// the range operator is only allowed at the top of a for statement
p.errorExpected(x.Pos(), "expression")
x = &ast.BadExpr{x.Pos(), x.End()}
}
case *ast.ArrayType:
if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
p.error(len.Pos(), "expected array length, found '...'")
x = &ast.BadExpr{x.Pos(), x.End()}
}
}
// all other nodes are expressions or types
return x
} |
// TODO(gri): parseExpr may return a type or even a raw type ([..]int) -
// should reject when a type/raw type is obviously not allowed
func (p *parser) parseExpr() ast.Expr {
if p.trace {
defer un(trace(p, "Expression"))
}
return p.parseBinaryExpr(token.LowestPrec + 1)
} |
// ----------------------------------------------------------------------------
// Statements
func (p *parser) parseSimpleStmt(labelOk bool) ast.Stmt {
if p.trace {
defer un(trace(p, "SimpleStmt"))
}
x := p.parseExprList()
switch p.tok {
case
token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
// assignment statement
pos, tok := p.pos, p.tok
p.next()
y := p.parseExprList()
stmt := &ast.AssignStmt{x, pos, tok, y}
if tok == token.DEFINE {
p.shortVarDecl(p.makeIdentList(x), stmt)
}
return stmt
}
if len(x) > 1 {
p.errorExpected(x[0].Pos(), "1 expression")
// continue with first expression
}
switch p.tok {
case token.COLON:
// labeled statement
colon := p.pos
p.next()
if label, isIdent := x[0].(*ast.Ident); labelOk && isIdent {
// Go spec: The scope of a label is the body of the function
// in which it is declared and excludes the body of any nested
// function.
stmt := &ast.LabeledStmt{label, colon, p.parseStmt()}
p.declare(stmt, p.labelScope, ast.Lbl, label)
return stmt
}
p.error(x[0].Pos(), "illegal label declaration")
return &ast.BadStmt{x[0].Pos(), colon + 1}
case token.ARROW:
// send statement
arrow := p.pos
p.next() // consume "<-"
y := p.parseExpr()
return &ast.SendStmt{x[0], arrow, y}
case token.INC, token.DEC:
// increment or decrement
s := &ast.IncDecStmt{x[0], p.pos, p.tok}
p.next() // consume "++" or "--"
return s
}
// expression
return &ast.ExprStmt{x[0]}
} |
// litToString converts from a string literal to a regular string.
func litToString(lit *ast.BasicLit) (v string) {
if lit.Kind!= token.STRING {
panic("expected string")
}
if lit.Value[0] == '`' {
return string(lit.Value[1 : len(lit.Value)-1])
}
v, _ = strconv.Unquote(string(lit.Value))
return
} |
// newScope creates a new scope only if we're using scopes.
func (p *parser) newScope(outer *ast.Scope) *ast.Scope {
if p.topScope == nil {
return nil
}
return ast.NewScope(outer)
} |
// Suggest returns a list of suggestion candidates and the length of
// the text that should be replaced, if any.
func (c *Config) Suggest(filename string, data []byte, cursor int) ([]Candidate, int, error) {
if cursor < 0 {
return nil, 0, nil
}
a, err := c.analyzePackage(filename, data, cursor)
if err!= nil {
return nil, 0, err
}
fset := a.fset
pos := a.pos
pkg := a.pkg
if pkg == nil {
return nil, 0, nil
}
scope := pkg.Scope().Innermost(pos)
ctx, expr, partial := deduceCursorContext(data, cursor)
b := candidateCollector{
localpkg: pkg,
partial: partial,
filter: objectFilters[partial],
builtin: ctx!= selectContext && c.Builtin,
}
switch ctx {
case selectContext:
tv, _ := types.Eval(fset, pkg, pos, expr)
if lookdot.Walk(&tv, b.appendObject) {
break
}
_, obj := scope.LookupParent(expr, pos)
if pkgName, isPkg := obj.(*types.PkgName); isPkg {
c.packageCandidates(pkgName.Imported(), &b)
break
}
return nil, 0, nil
case compositeLiteralContext:
tv, _ := types.Eval(fset, pkg, pos, expr)
if tv.IsType() {
if _, isStruct := tv.Type.Underlying().(*types.Struct); isStruct {
c.fieldNameCandidates(tv.Type, &b)
break
}
}
fallthrough
default:
c.scopeCandidates(scope, pos, &b)
}
res := b.getCandidates()
if len(res) == 0 {
return nil, 0, nil
}
return res, len(partial), nil
} |
// Open will open a file from the local cache with key. If missing, fetcher
// will fill the cache first. Open also performs single-flighting for fetcher.
func (s *Store) Open(ctx context.Context, key string, fetcher Fetcher) (file *File, err error) {
span, ctx := opentracing.StartSpanFromContext(ctx, "Cached Fetch")
if s.Component!= "" {
ext.Component.Set(span, s.Component)
}
defer func() {
if err!= nil {
ext.Error.Set(span, true)
span.SetTag("err", err.Error())
}
if file!= nil {
// Update modified time. Modified time is used to decide which
// files to evict from the cache.
touch(file.Path)
}
span.Finish()
}()
if s.Dir == "" {
return nil, errors.New("diskcache.Store.Dir must be set")
}
// path uses a sha256 hash of the key since we want to use it for the
// disk name.
h := sha256.Sum256([]byte(key))
path := filepath.Join(s.Dir, hex.EncodeToString(h[:])) + ".zip"
span.LogKV("key", key, "path", path)
// First do a fast-path, assume already on disk
f, err := os.Open(path)
if err == nil {
span.SetTag("source", "fast")
return &File{File: f, Path: path}, nil
}
// We (probably) have to fetch
span.SetTag("source", "fetch")
// Do the fetch in another goroutine so we can respect ctx cancellation.
type result struct {
f *File
err error
}
ch := make(chan result, 1)
go func(ctx context.Context) {
if s.BackgroundTimeout!= 0 {
var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(context.Background(), s.BackgroundTimeout)
defer cancel()
}
f, err := doFetch(ctx, path, fetcher)
ch <- result{f, err}
}(ctx)
select {
case <-ctx.Done():
// *os.File sets a finalizer to close the file when no longer used, so
// we don't need to worry about closing the file in the case of context
// cancellation.
return nil, ctx.Err()
case r := <-ch:
return r.f, r.err
}
} |
// Evict will remove files from Store.Dir until it is smaller than
// maxCacheSizeBytes. It evicts files with the oldest modification time first.
func (s *Store) EvictMaxSize(maxCacheSizeBytes int64) (stats EvictStats, err error) {
isZip := func(fi os.FileInfo) bool {
return strings.HasSuffix(fi.Name(), ".zip")
}
list, err := ioutil.ReadDir(s.Dir)
if err!= nil {
if os.IsNotExist(err) {
return EvictStats{
CacheSize: 0,
Evicted: 0,
}, nil
}
return stats, errors.Wrapf(err, "failed to ReadDir %s", s.Dir)
}
// Sum up the total size of all zips
var size int64
for _, fi := range list {
if isZip(fi) {
size += fi.Size()
}
}
stats.CacheSize = size
// Nothing to evict
if size <= maxCacheSizeBytes {
return stats, nil
}
// Keep removing files until we are under the cache size. Remove the
// oldest first.
sort.Slice(list, func(i, j int) bool {
return list[i].ModTime().Before(list[j].ModTime())
})
for _, fi := range list {
if size <= maxCacheSizeBytes {
break
}
if!isZip(fi) {
continue
}
path := filepath.Join(s.Dir, fi.Name())
if s.BeforeEvict!= nil {
s.BeforeEvict(path)
}
err = os.Remove(path)
if err!= nil {
log.Printf("failed to remove %s: %s", path, err)
continue
}
stats.Evicted++
size -= fi.Size()
}
return stats, nil
} |
// touch updates the modified time to time.Now(). It is best-effort, and will
// log if it fails.
func touch(path string) {
t := time.Now()
if err := os.Chtimes(path, t, t); err!= nil {
log.Printf("failed to touch %s: %s", path, err)
}
} |
// PathHasPrefix returns true if s is starts with the given prefix
func PathHasPrefix(s, prefix string) bool {
s = normalizePath(s)
prefix = normalizePath(prefix)
if s == prefix {
return true
}
if!strings.HasSuffix(prefix, "/") {
prefix += "/"
}
return s == prefix || strings.HasPrefix(s, prefix)
} |
// PathTrimPrefix removes the prefix from s
func PathTrimPrefix(s, prefix string) string {
s = normalizePath(s)
prefix = normalizePath(prefix)
if s == prefix {
return ""
}
if!strings.HasSuffix(prefix, "/") {
prefix += "/"
}
return strings.TrimPrefix(s, prefix)
} |
// IsVendorDir tells if the specified directory is a vendor directory.
func IsVendorDir(dir string) bool {
return strings.HasPrefix(dir, "vendor/") || strings.Contains(dir, "/vendor/")
} |
// PathToURI converts given absolute path to file URI
func PathToURI(path string) lsp.DocumentURI {
path = filepath.ToSlash(path)
parts := strings.SplitN(path, "/", 2)
// If the first segment is a Windows drive letter, prefix with a slash and skip encoding
head := parts[0]
if head!= "" {
head = "/" + head
}
rest := ""
if len(parts) > 1 {
rest = "/" + parts[1]
}
return lsp.DocumentURI("file://" + head + rest)
} |
// UriToPath converts given file URI to path
func UriToPath(uri lsp.DocumentURI) string {
u, err := url.Parse(string(uri))
if err!= nil {
return trimFilePrefix(string(uri))
}
return u.Path
} |
// UriToRealPath converts the given file URI to the platform specific path
func UriToRealPath(uri lsp.DocumentURI) string {
path := UriToPath(uri)
if regDriveLetter.MatchString(path) {
// remove the leading slash if it starts with a drive letter
// and convert to back slashes
path = filepath.FromSlash(path[1:])
}
return path
} |
// IsAbs returns true if the given path is absolute
func IsAbs(path string) bool {
// Windows implementation accepts path-like and filepath-like arguments
return strings.HasPrefix(path, "/") || filepath.IsAbs(path)
} |
// Panicf takes the return value of recover() and outputs data to the log with
// the stack trace appended. Arguments are handled in the manner of
// fmt.Printf. Arguments should format to a string which identifies what the
// panic code was doing. Returns a non-nil error if it recovered from a panic.
func Panicf(r interface{}, format string, v...interface{}) error {
if r!= nil {
// Same as net/http
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
id := fmt.Sprintf(format, v...)
log.Printf("panic serving %s: %v\n%s", id, r, string(buf))
return fmt.Errorf("unexpected panic: %v", r)
}
return nil
} |
// when the cursor is at the ')' or ']' or '}', move the cursor to an opposite
// bracket pair, this functions takes nested bracket pairs into account
func (ti *tokenIterator) skipToBalancedPair() bool {
right := ti.token().tok
left := bracket_pairs_map[right]
return ti.skipToLeft(left, right)
} |
// Move the cursor to the open brace of the current block, taking nested blocks
// into account.
func (ti *tokenIterator) skipToLeftCurly() bool {
return ti.skipToLeft(token.LBRACE, token.RBRACE)
} |
// Extract the type expression right before the enclosing curly bracket block.
// Examples (# - the cursor):
// &lib.Struct{Whatever: 1, Hel#} // returns "lib.Struct"
// X{#} // returns X
// The idea is that we check if this type expression is a type and it is, we
// can apply special filtering for autocompletion results.
func (ti *tokenIterator) extractLiteralType() (res string) {
if!ti.skipToLeftCurly() {
return ""
}
origPos := ti.pos
if!ti.prev() {
return ""
}
// A composite literal type must end with either "ident",
// "ident.ident", or "struct {... }".
switch ti.token().tok {
case token.IDENT:
if!ti.prev() {
return ""
}
if ti.token().tok == token.PERIOD {
if!ti.prev() {
return ""
}
if ti.token().tok!= token.IDENT {
return ""
}
if!ti.prev() {
return ""
}
}
case token.RBRACE:
ti.skipToBalancedPair()
if!ti.prev() {
return ""
}
if ti.token().tok!= token.STRUCT {
return ""
}
if!ti.prev() {
return ""
}
}
// Continuing backwards, we might see "[]", "[...]", "[expr]",
// or "map[T]".
for ti.token().tok == token.RBRACK {
ti.skipToBalancedPair()
if!ti.prev() {
return ""
}
if ti.token().tok == token.MAP {
if!ti.prev() {
return ""
}
}
}
return joinTokens(ti.tokens[ti.pos+1 : origPos])
} |
// Starting from the token under the cursor move back and extract something
// that resembles a valid Go primary expression. Examples of primary expressions
// from Go spec:
// x
// 2
// (s + ".txt")
// f(3.1415, true)
// Point{1, 2}
// m["foo"]
// s[i : j + 1]
// obj.color
// f.p[i].x()
//
// As you can see we can move through all of them using balanced bracket
// matching and applying simple rules
// E.g.
// Point{1, 2}.m["foo"].s[i : j + 1].MethodCall(a, func(a, b int) int { return a + b }).
// Can be seen as:
// Point{ }.m[ ].s[ ].MethodCall( ).
// Which boils the rules down to these connected via dots:
// ident
// ident[]
// ident{}
// ident()
// Of course there are also slightly more complicated rules for brackets:
// ident{}.ident()[5][4](), etc.
func (ti *tokenIterator) extractExpr() string {
orig := ti.pos
// Contains the type of the previously scanned token (initialized with
// the token right under the cursor). This is the token to the *right* of
// the current one.
prev := ti.token().tok
loop:
for {
if!ti.prev() {
return joinTokens(ti.tokens[:orig])
}
switch ti.token().tok {
case token.PERIOD:
// If the '.' is not followed by IDENT, it's invalid.
if prev!= token.IDENT {
break loop
}
case token.IDENT:
// Valid tokens after IDENT are '.', '[', '{' and '('.
switch prev {
case token.PERIOD, token.LBRACK, token.LBRACE, token.LPAREN:
// all ok
default:
break loop
}
case token.RBRACE:
// This one can only be a part of type initialization, like:
// Dummy{}.Hello()
// It is valid Go if Hello method is defined on a non-pointer receiver.
if prev!= token.PERIOD {
break loop
}
ti.skipToBalancedPair()
case token.RPAREN, token.RBRACK:
// After ']' and ')' their opening counterparts are valid '[', '(',
// as well as the dot.
switch prev {
case token.PERIOD, token.LBRACK, token.LPAREN:
// all ok
default:
break loop
}
ti.skipToBalancedPair()
default:
break loop
}
prev = ti.token().tok
}
return joinTokens(ti.tokens[ti.pos+1 : orig])
} |
// Given a slice of token_item, reassembles them into the original literal
// expression.
func joinTokens(tokens []tokenItem) string {
var buf bytes.Buffer
for i, tok := range tokens {
if i > 0 {
buf.WriteByte(' ')
}
buf.WriteString(tok.String())
}
return buf.String()
} |
// InitTracer initializes the tracer for the connection if it has not
// already been initialized.
//
// It assumes that h is only ever called for this conn.
func (h *HandlerCommon) InitTracer(conn *jsonrpc2.Conn) {
h.mu.Lock()
defer h.mu.Unlock()
if h.tracer!= nil {
return
}
if _, isNoopTracer := opentracing.GlobalTracer().(opentracing.NoopTracer);!isNoopTracer {
// We have configured a tracer, use that instead of telemetry/event
h.tracer = opentracing.GlobalTracer()
return
}
t := tracer{conn: conn}
opt := basictracer.DefaultOptions()
opt.Recorder = &t
h.tracer = basictracer.NewWithOptions(opt)
go func() {
<-conn.DisconnectNotify()
t.mu.Lock()
t.conn = nil
t.mu.Unlock()
}()
} |
// FollowsFrom means the parent span does not depend on the child span, but
// caused it to start.
func startSpanFollowsFromContext(ctx context.Context, operationName string, opts...opentracing.StartSpanOption) opentracing.Span {
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan!= nil {
opts = append(opts, opentracing.FollowsFrom(parentSpan.Context()))
return parentSpan.Tracer().StartSpan(operationName, opts...)
}
return opentracing.GlobalTracer().StartSpan(operationName, opts...)
} |
// String prints the string version of the Op consts
func (e Op) String() string {
if op, found := ops[e]; found {
return op
}
return "???"
} |
// String returns a string depending on what type of event occurred and the
// file name associated with the event.
func (e Event) String() string {
if e.FileInfo == nil {
return "???"
}
pathType := "FILE"
if e.IsDir() {
pathType = "DIRECTORY"
}
return fmt.Sprintf("%s %q %s [%s]", pathType, e.Name(), e.Op, e.Path)
} |
// RegexFilterHook is a function that accepts or rejects a file
// for listing based on whether it's filename or full path matches
// a regular expression.
func RegexFilterHook(r *regexp.Regexp, useFullPath bool) FilterFileHookFunc {
return func(info os.FileInfo, fullPath string) error {
str := info.Name()
if useFullPath {
str = fullPath
}
// Match
if r.MatchString(str) {
return nil
}
// No match.
return ErrSkip
}
} |
// New creates a new Watcher.
func New() *Watcher {
// Set up the WaitGroup for w.Wait().
var wg sync.WaitGroup
wg.Add(1)
return &Watcher{
Event: make(chan Event),
Error: make(chan error),
Closed: make(chan struct{}),
close: make(chan struct{}),
mu: new(sync.Mutex),
wg: &wg,
files: make(map[string]os.FileInfo),
ignored: make(map[string]struct{}),
names: make(map[string]bool),
}
} |
// SetMaxEvents controls the maximum amount of events that are sent on
// the Event channel per watching cycle. If max events is less than 1, there is
// no limit, which is the default.
func (w *Watcher) SetMaxEvents(delta int) {
w.mu.Lock()
w.maxEvents = delta
w.mu.Unlock()
} |