mirror of
https://github.com/cloudflare/cloudflared.git
synced 2025-07-28 16:01:05 +00:00
TUN-8456: Update quic-go to 0.45 and collect mtu and congestion control metrics
This commit is contained in:

committed by
Chung-Ting Huang

parent
cb6e5999e1
commit
0b62d45738
334
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
334
vendor/golang.org/x/tools/internal/imports/fix.go
generated
vendored
@@ -13,6 +13,8 @@ import (
|
||||
"go/build"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io/fs"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
@@ -26,8 +28,10 @@ import (
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
"golang.org/x/tools/internal/event"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/stdlib"
|
||||
)
|
||||
|
||||
// importToGroup is a list of functions which map from an import path to
|
||||
@@ -106,7 +110,7 @@ func parseOtherFiles(fset *token.FileSet, srcDir, filename string) []*ast.File {
|
||||
considerTests := strings.HasSuffix(filename, "_test.go")
|
||||
|
||||
fileBase := filepath.Base(filename)
|
||||
packageFileInfos, err := ioutil.ReadDir(srcDir)
|
||||
packageFileInfos, err := os.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
@@ -252,7 +256,7 @@ type pass struct {
|
||||
otherFiles []*ast.File // sibling files.
|
||||
|
||||
// Intermediate state, generated by load.
|
||||
existingImports map[string]*ImportInfo
|
||||
existingImports map[string][]*ImportInfo
|
||||
allRefs references
|
||||
missingRefs references
|
||||
|
||||
@@ -297,6 +301,20 @@ func (p *pass) loadPackageNames(imports []*ImportInfo) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// if there is a trailing major version, remove it
|
||||
func withoutVersion(nm string) string {
|
||||
if v := path.Base(nm); len(v) > 0 && v[0] == 'v' {
|
||||
if _, err := strconv.Atoi(v[1:]); err == nil {
|
||||
// this is, for instance, called with rand/v2 and returns rand
|
||||
if len(v) < len(nm) {
|
||||
xnm := nm[:len(nm)-len(v)-1]
|
||||
return path.Base(xnm)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nm
|
||||
}
|
||||
|
||||
// importIdentifier returns the identifier that imp will introduce. It will
|
||||
// guess if the package name has not been loaded, e.g. because the source
|
||||
// is not available.
|
||||
@@ -306,7 +324,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
|
||||
}
|
||||
known := p.knownPackages[imp.ImportPath]
|
||||
if known != nil && known.name != "" {
|
||||
return known.name
|
||||
return withoutVersion(known.name)
|
||||
}
|
||||
return ImportPathToAssumedName(imp.ImportPath)
|
||||
}
|
||||
@@ -317,7 +335,7 @@ func (p *pass) importIdentifier(imp *ImportInfo) string {
|
||||
func (p *pass) load() ([]*ImportFix, bool) {
|
||||
p.knownPackages = map[string]*packageInfo{}
|
||||
p.missingRefs = references{}
|
||||
p.existingImports = map[string]*ImportInfo{}
|
||||
p.existingImports = map[string][]*ImportInfo{}
|
||||
|
||||
// Load basic information about the file in question.
|
||||
p.allRefs = collectReferences(p.f)
|
||||
@@ -348,7 +366,7 @@ func (p *pass) load() ([]*ImportFix, bool) {
|
||||
}
|
||||
}
|
||||
for _, imp := range imports {
|
||||
p.existingImports[p.importIdentifier(imp)] = imp
|
||||
p.existingImports[p.importIdentifier(imp)] = append(p.existingImports[p.importIdentifier(imp)], imp)
|
||||
}
|
||||
|
||||
// Find missing references.
|
||||
@@ -387,31 +405,33 @@ func (p *pass) fix() ([]*ImportFix, bool) {
|
||||
|
||||
// Found everything, or giving up. Add the new imports and remove any unused.
|
||||
var fixes []*ImportFix
|
||||
for _, imp := range p.existingImports {
|
||||
// We deliberately ignore globals here, because we can't be sure
|
||||
// they're in the same package. People do things like put multiple
|
||||
// main packages in the same directory, and we don't want to
|
||||
// remove imports if they happen to have the same name as a var in
|
||||
// a different package.
|
||||
if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok {
|
||||
fixes = append(fixes, &ImportFix{
|
||||
StmtInfo: *imp,
|
||||
IdentName: p.importIdentifier(imp),
|
||||
FixType: DeleteImport,
|
||||
})
|
||||
continue
|
||||
}
|
||||
for _, identifierImports := range p.existingImports {
|
||||
for _, imp := range identifierImports {
|
||||
// We deliberately ignore globals here, because we can't be sure
|
||||
// they're in the same package. People do things like put multiple
|
||||
// main packages in the same directory, and we don't want to
|
||||
// remove imports if they happen to have the same name as a var in
|
||||
// a different package.
|
||||
if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok {
|
||||
fixes = append(fixes, &ImportFix{
|
||||
StmtInfo: *imp,
|
||||
IdentName: p.importIdentifier(imp),
|
||||
FixType: DeleteImport,
|
||||
})
|
||||
continue
|
||||
}
|
||||
|
||||
// An existing import may need to update its import name to be correct.
|
||||
if name := p.importSpecName(imp); name != imp.Name {
|
||||
fixes = append(fixes, &ImportFix{
|
||||
StmtInfo: ImportInfo{
|
||||
Name: name,
|
||||
ImportPath: imp.ImportPath,
|
||||
},
|
||||
IdentName: p.importIdentifier(imp),
|
||||
FixType: SetImportName,
|
||||
})
|
||||
// An existing import may need to update its import name to be correct.
|
||||
if name := p.importSpecName(imp); name != imp.Name {
|
||||
fixes = append(fixes, &ImportFix{
|
||||
StmtInfo: ImportInfo{
|
||||
Name: name,
|
||||
ImportPath: imp.ImportPath,
|
||||
},
|
||||
IdentName: p.importIdentifier(imp),
|
||||
FixType: SetImportName,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
// Collecting fixes involved map iteration, so sort for stability. See
|
||||
@@ -506,9 +526,9 @@ func (p *pass) assumeSiblingImportsValid() {
|
||||
}
|
||||
for left, rights := range refs {
|
||||
if imp, ok := importsByName[left]; ok {
|
||||
if m, ok := stdlib[imp.ImportPath]; ok {
|
||||
if m, ok := stdlib.PackageSymbols[imp.ImportPath]; ok {
|
||||
// We have the stdlib in memory; no need to guess.
|
||||
rights = copyExports(m)
|
||||
rights = symbolNameSet(m)
|
||||
}
|
||||
p.addCandidate(imp, &packageInfo{
|
||||
// no name; we already know it.
|
||||
@@ -543,7 +563,7 @@ func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) {
|
||||
var fixImports = fixImportsDefault
|
||||
|
||||
func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) error {
|
||||
fixes, err := getFixes(fset, f, filename, env)
|
||||
fixes, err := getFixes(context.Background(), fset, f, filename, env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -553,7 +573,7 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P
|
||||
|
||||
// getFixes gets the import fixes that need to be made to f in order to fix the imports.
|
||||
// It does not modify the ast.
|
||||
func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
|
||||
func getFixes(ctx context.Context, fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) {
|
||||
abs, err := filepath.Abs(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -607,7 +627,7 @@ func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv
|
||||
|
||||
// Go look for candidates in $GOPATH, etc. We don't necessarily load
|
||||
// the real exports of sibling imports, so keep assuming their contents.
|
||||
if err := addExternalCandidates(p, p.missingRefs, filename); err != nil {
|
||||
if err := addExternalCandidates(ctx, p, p.missingRefs, filename); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -636,7 +656,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena
|
||||
dupCheck := map[string]struct{}{}
|
||||
|
||||
// Start off with the standard library.
|
||||
for importPath, exports := range stdlib {
|
||||
for importPath, symbols := range stdlib.PackageSymbols {
|
||||
p := &pkg{
|
||||
dir: filepath.Join(goenv["GOROOT"], "src", importPath),
|
||||
importPathShort: importPath,
|
||||
@@ -645,6 +665,13 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena
|
||||
}
|
||||
dupCheck[importPath] = struct{}{}
|
||||
if notSelf(p) && wrappedCallback.dirFound(p) && wrappedCallback.packageNameLoaded(p) {
|
||||
var exports []stdlib.Symbol
|
||||
for _, sym := range symbols {
|
||||
switch sym.Kind {
|
||||
case stdlib.Func, stdlib.Type, stdlib.Var, stdlib.Const:
|
||||
exports = append(exports, sym)
|
||||
}
|
||||
}
|
||||
wrappedCallback.exportsLoaded(p, exports)
|
||||
}
|
||||
}
|
||||
@@ -665,7 +692,7 @@ func getCandidatePkgs(ctx context.Context, wrappedCallback *scanCallback, filena
|
||||
dupCheck[pkg.importPathShort] = struct{}{}
|
||||
return notSelf(pkg) && wrappedCallback.packageNameLoaded(pkg)
|
||||
},
|
||||
exportsLoaded: func(pkg *pkg, exports []string) {
|
||||
exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) {
|
||||
// If we're an x_test, load the package under test's test variant.
|
||||
if strings.HasSuffix(filePkg, "_test") && pkg.dir == filepath.Dir(filename) {
|
||||
var err error
|
||||
@@ -696,20 +723,21 @@ func ScoreImportPaths(ctx context.Context, env *ProcessEnv, paths []string) (map
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func PrimeCache(ctx context.Context, env *ProcessEnv) error {
|
||||
func PrimeCache(ctx context.Context, resolver Resolver) error {
|
||||
// Fully scan the disk for directories, but don't actually read any Go files.
|
||||
callback := &scanCallback{
|
||||
rootFound: func(gopathwalk.Root) bool {
|
||||
return true
|
||||
rootFound: func(root gopathwalk.Root) bool {
|
||||
// See getCandidatePkgs: walking GOROOT is apparently expensive and
|
||||
// unnecessary.
|
||||
return root.Type != gopathwalk.RootGOROOT
|
||||
},
|
||||
dirFound: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
return false
|
||||
},
|
||||
// packageNameLoaded and exportsLoaded must never be called.
|
||||
}
|
||||
return getCandidatePkgs(ctx, callback, "", "", env)
|
||||
|
||||
return resolver.scan(ctx, callback)
|
||||
}
|
||||
|
||||
func candidateImportName(pkg *pkg) string {
|
||||
@@ -789,7 +817,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix,
|
||||
// A PackageExport is a package and its exports.
|
||||
type PackageExport struct {
|
||||
Fix *ImportFix
|
||||
Exports []string
|
||||
Exports []stdlib.Symbol
|
||||
}
|
||||
|
||||
// GetPackageExports returns all known packages with name pkg and their exports.
|
||||
@@ -804,8 +832,8 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
|
||||
packageNameLoaded: func(pkg *pkg) bool {
|
||||
return pkg.packageName == searchPkg
|
||||
},
|
||||
exportsLoaded: func(pkg *pkg, exports []string) {
|
||||
sort.Strings(exports)
|
||||
exportsLoaded: func(pkg *pkg, exports []stdlib.Symbol) {
|
||||
sortSymbols(exports)
|
||||
wrapped(PackageExport{
|
||||
Fix: &ImportFix{
|
||||
StmtInfo: ImportInfo{
|
||||
@@ -823,16 +851,45 @@ func GetPackageExports(ctx context.Context, wrapped func(PackageExport), searchP
|
||||
return getCandidatePkgs(ctx, callback, filename, filePkg, env)
|
||||
}
|
||||
|
||||
var requiredGoEnvVars = []string{"GO111MODULE", "GOFLAGS", "GOINSECURE", "GOMOD", "GOMODCACHE", "GONOPROXY", "GONOSUMDB", "GOPATH", "GOPROXY", "GOROOT", "GOSUMDB", "GOWORK"}
|
||||
// TODO(rfindley): we should depend on GOOS and GOARCH, to provide accurate
|
||||
// imports when doing cross-platform development.
|
||||
var requiredGoEnvVars = []string{
|
||||
"GO111MODULE",
|
||||
"GOFLAGS",
|
||||
"GOINSECURE",
|
||||
"GOMOD",
|
||||
"GOMODCACHE",
|
||||
"GONOPROXY",
|
||||
"GONOSUMDB",
|
||||
"GOPATH",
|
||||
"GOPROXY",
|
||||
"GOROOT",
|
||||
"GOSUMDB",
|
||||
"GOWORK",
|
||||
}
|
||||
|
||||
// ProcessEnv contains environment variables and settings that affect the use of
|
||||
// the go command, the go/build package, etc.
|
||||
//
|
||||
// ...a ProcessEnv *also* overwrites its Env along with derived state in the
|
||||
// form of the resolver. And because it is lazily initialized, an env may just
|
||||
// be broken and unusable, but there is no way for the caller to detect that:
|
||||
// all queries will just fail.
|
||||
//
|
||||
// TODO(rfindley): refactor this package so that this type (perhaps renamed to
|
||||
// just Env or Config) is an immutable configuration struct, to be exchanged
|
||||
// for an initialized object via a constructor that returns an error. Perhaps
|
||||
// the signature should be `func NewResolver(*Env) (*Resolver, error)`, where
|
||||
// resolver is a concrete type used for resolving imports. Via this
|
||||
// refactoring, we can avoid the need to call ProcessEnv.init and
|
||||
// ProcessEnv.GoEnv everywhere, and implicitly fix all the places where this
|
||||
// these are misused. Also, we'd delegate the caller the decision of how to
|
||||
// handle a broken environment.
|
||||
type ProcessEnv struct {
|
||||
GocmdRunner *gocommand.Runner
|
||||
|
||||
BuildFlags []string
|
||||
ModFlag string
|
||||
ModFile string
|
||||
|
||||
// SkipPathInScan returns true if the path should be skipped from scans of
|
||||
// the RootCurrentModule root type. The function argument is a clean,
|
||||
@@ -842,7 +899,7 @@ type ProcessEnv struct {
|
||||
// Env overrides the OS environment, and can be used to specify
|
||||
// GOPROXY, GO111MODULE, etc. PATH cannot be set here, because
|
||||
// exec.Command will not honor it.
|
||||
// Specifying all of RequiredGoEnvVars avoids a call to `go env`.
|
||||
// Specifying all of requiredGoEnvVars avoids a call to `go env`.
|
||||
Env map[string]string
|
||||
|
||||
WorkingDir string
|
||||
@@ -850,9 +907,17 @@ type ProcessEnv struct {
|
||||
// If Logf is non-nil, debug logging is enabled through this function.
|
||||
Logf func(format string, args ...interface{})
|
||||
|
||||
initialized bool
|
||||
// If set, ModCache holds a shared cache of directory info to use across
|
||||
// multiple ProcessEnvs.
|
||||
ModCache *DirInfoCache
|
||||
|
||||
resolver Resolver
|
||||
initialized bool // see TODO above
|
||||
|
||||
// resolver and resolverErr are lazily evaluated (see GetResolver).
|
||||
// This is unclean, but see the big TODO in the docstring for ProcessEnv
|
||||
// above: for now, we can't be sure that the ProcessEnv is fully initialized.
|
||||
resolver Resolver
|
||||
resolverErr error
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) goEnv() (map[string]string, error) {
|
||||
@@ -932,20 +997,33 @@ func (e *ProcessEnv) env() []string {
|
||||
}
|
||||
|
||||
func (e *ProcessEnv) GetResolver() (Resolver, error) {
|
||||
if e.resolver != nil {
|
||||
return e.resolver, nil
|
||||
}
|
||||
if err := e.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
e.resolver = newGopathResolver(e)
|
||||
return e.resolver, nil
|
||||
|
||||
if e.resolver == nil && e.resolverErr == nil {
|
||||
// TODO(rfindley): we should only use a gopathResolver here if the working
|
||||
// directory is actually *in* GOPATH. (I seem to recall an open gopls issue
|
||||
// for this behavior, but I can't find it).
|
||||
//
|
||||
// For gopls, we can optionally explicitly choose a resolver type, since we
|
||||
// already know the view type.
|
||||
if len(e.Env["GOMOD"]) == 0 && len(e.Env["GOWORK"]) == 0 {
|
||||
e.resolver = newGopathResolver(e)
|
||||
} else if r, err := newModuleResolver(e, e.ModCache); err != nil {
|
||||
e.resolverErr = err
|
||||
} else {
|
||||
e.resolver = Resolver(r)
|
||||
}
|
||||
}
|
||||
e.resolver = newModuleResolver(e)
|
||||
return e.resolver, nil
|
||||
|
||||
return e.resolver, e.resolverErr
|
||||
}
|
||||
|
||||
// buildContext returns the build.Context to use for matching files.
|
||||
//
|
||||
// TODO(rfindley): support dynamic GOOS, GOARCH here, when doing cross-platform
|
||||
// development.
|
||||
func (e *ProcessEnv) buildContext() (*build.Context, error) {
|
||||
ctx := build.Default
|
||||
goenv, err := e.goEnv()
|
||||
@@ -995,24 +1073,40 @@ func addStdlibCandidates(pass *pass, refs references) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
localbase := func(nm string) string {
|
||||
ans := path.Base(nm)
|
||||
if ans[0] == 'v' {
|
||||
// this is called, for instance, with math/rand/v2 and returns rand/v2
|
||||
if _, err := strconv.Atoi(ans[1:]); err == nil {
|
||||
ix := strings.LastIndex(nm, ans)
|
||||
more := path.Base(nm[:ix])
|
||||
ans = path.Join(more, ans)
|
||||
}
|
||||
}
|
||||
return ans
|
||||
}
|
||||
add := func(pkg string) {
|
||||
// Prevent self-imports.
|
||||
if path.Base(pkg) == pass.f.Name.Name && filepath.Join(goenv["GOROOT"], "src", pkg) == pass.srcDir {
|
||||
return
|
||||
}
|
||||
exports := copyExports(stdlib[pkg])
|
||||
exports := symbolNameSet(stdlib.PackageSymbols[pkg])
|
||||
pass.addCandidate(
|
||||
&ImportInfo{ImportPath: pkg},
|
||||
&packageInfo{name: path.Base(pkg), exports: exports})
|
||||
&packageInfo{name: localbase(pkg), exports: exports})
|
||||
}
|
||||
for left := range refs {
|
||||
if left == "rand" {
|
||||
// Make sure we try crypto/rand before math/rand.
|
||||
// Make sure we try crypto/rand before any version of math/rand as both have Int()
|
||||
// and our policy is to recommend crypto
|
||||
add("crypto/rand")
|
||||
add("math/rand")
|
||||
// if the user's no later than go1.21, this should be "math/rand"
|
||||
// but we have no way of figuring out what the user is using
|
||||
// TODO: investigate using the toolchain version to disambiguate in the stdlib
|
||||
add("math/rand/v2")
|
||||
continue
|
||||
}
|
||||
for importPath := range stdlib {
|
||||
for importPath := range stdlib.PackageSymbols {
|
||||
if path.Base(importPath) == left {
|
||||
add(importPath)
|
||||
}
|
||||
@@ -1025,15 +1119,23 @@ func addStdlibCandidates(pass *pass, refs references) error {
|
||||
type Resolver interface {
|
||||
// loadPackageNames loads the package names in importPaths.
|
||||
loadPackageNames(importPaths []string, srcDir string) (map[string]string, error)
|
||||
|
||||
// scan works with callback to search for packages. See scanCallback for details.
|
||||
scan(ctx context.Context, callback *scanCallback) error
|
||||
|
||||
// loadExports returns the set of exported symbols in the package at dir.
|
||||
// loadExports may be called concurrently.
|
||||
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error)
|
||||
loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error)
|
||||
|
||||
// scoreImportPath returns the relevance for an import path.
|
||||
scoreImportPath(ctx context.Context, path string) float64
|
||||
|
||||
ClearForNewScan()
|
||||
// ClearForNewScan returns a new Resolver based on the receiver that has
|
||||
// cleared its internal caches of directory contents.
|
||||
//
|
||||
// The new resolver should be primed and then set via
|
||||
// [ProcessEnv.UpdateResolver].
|
||||
ClearForNewScan() Resolver
|
||||
}
|
||||
|
||||
// A scanCallback controls a call to scan and receives its results.
|
||||
@@ -1052,10 +1154,13 @@ type scanCallback struct {
|
||||
// If it returns true, the package's exports will be loaded.
|
||||
packageNameLoaded func(pkg *pkg) bool
|
||||
// exportsLoaded is called when a package's exports have been loaded.
|
||||
exportsLoaded func(pkg *pkg, exports []string)
|
||||
exportsLoaded func(pkg *pkg, exports []stdlib.Symbol)
|
||||
}
|
||||
|
||||
func addExternalCandidates(pass *pass, refs references, filename string) error {
|
||||
func addExternalCandidates(ctx context.Context, pass *pass, refs references, filename string) error {
|
||||
ctx, done := event.Start(ctx, "imports.addExternalCandidates")
|
||||
defer done()
|
||||
|
||||
var mu sync.Mutex
|
||||
found := make(map[string][]pkgDistance)
|
||||
callback := &scanCallback{
|
||||
@@ -1113,7 +1218,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error {
|
||||
go func(pkgName string, symbols map[string]bool) {
|
||||
defer wg.Done()
|
||||
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols, filename)
|
||||
found, err := findImport(ctx, pass, found[pkgName], pkgName, symbols)
|
||||
|
||||
if err != nil {
|
||||
firstErrOnce.Do(func() {
|
||||
@@ -1144,6 +1249,17 @@ func addExternalCandidates(pass *pass, refs references, filename string) error {
|
||||
}()
|
||||
|
||||
for result := range results {
|
||||
// Don't offer completions that would shadow predeclared
|
||||
// names, such as github.com/coreos/etcd/error.
|
||||
if types.Universe.Lookup(result.pkg.name) != nil { // predeclared
|
||||
// Ideally we would skip this candidate only
|
||||
// if the predeclared name is actually
|
||||
// referenced by the file, but that's a lot
|
||||
// trickier to compute and would still create
|
||||
// an import that is likely to surprise the
|
||||
// user before long.
|
||||
continue
|
||||
}
|
||||
pass.addCandidate(result.imp, result.pkg)
|
||||
}
|
||||
return firstErr
|
||||
@@ -1186,31 +1302,22 @@ func ImportPathToAssumedName(importPath string) string {
|
||||
type gopathResolver struct {
|
||||
env *ProcessEnv
|
||||
walked bool
|
||||
cache *dirInfoCache
|
||||
cache *DirInfoCache
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans.
|
||||
}
|
||||
|
||||
func newGopathResolver(env *ProcessEnv) *gopathResolver {
|
||||
r := &gopathResolver{
|
||||
env: env,
|
||||
cache: &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
},
|
||||
env: env,
|
||||
cache: NewDirInfoCache(),
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *gopathResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.cache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.walked = false
|
||||
r.scanSema <- struct{}{}
|
||||
func (r *gopathResolver) ClearForNewScan() Resolver {
|
||||
return newGopathResolver(r.env)
|
||||
}
|
||||
|
||||
func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
@@ -1228,7 +1335,7 @@ func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) (
|
||||
// importPathToName finds out the actual package name, as declared in its .go files.
|
||||
func importPathToName(bctx *build.Context, importPath, srcDir string) string {
|
||||
// Fast path for standard library without going to disk.
|
||||
if _, ok := stdlib[importPath]; ok {
|
||||
if stdlib.HasPackage(importPath) {
|
||||
return path.Base(importPath) // stdlib packages always match their paths.
|
||||
}
|
||||
|
||||
@@ -1426,7 +1533,7 @@ func (r *gopathResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
}
|
||||
|
||||
func (r *gopathResolver) scoreImportPath(ctx context.Context, path string) float64 {
|
||||
if _, ok := stdlib[path]; ok {
|
||||
if stdlib.HasPackage(path) {
|
||||
return MaxRelevance
|
||||
}
|
||||
return MaxRelevance - 1
|
||||
@@ -1443,7 +1550,7 @@ func filterRoots(roots []gopathwalk.Root, include func(gopathwalk.Root) bool) []
|
||||
return result
|
||||
}
|
||||
|
||||
func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
func (r *gopathResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) {
|
||||
if info, ok := r.cache.Load(pkg.dir); ok && !includeTest {
|
||||
return r.cache.CacheExports(ctx, r.env, info)
|
||||
}
|
||||
@@ -1463,13 +1570,13 @@ func VendorlessPath(ipath string) string {
|
||||
return ipath
|
||||
}
|
||||
|
||||
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []string, error) {
|
||||
func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, includeTest bool) (string, []stdlib.Symbol, error) {
|
||||
// Look for non-test, buildable .go files which could provide exports.
|
||||
all, err := ioutil.ReadDir(dir)
|
||||
all, err := os.ReadDir(dir)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
var files []os.FileInfo
|
||||
var files []fs.DirEntry
|
||||
for _, fi := range all {
|
||||
name := fi.Name()
|
||||
if !strings.HasSuffix(name, ".go") || (!includeTest && strings.HasSuffix(name, "_test.go")) {
|
||||
@@ -1487,7 +1594,7 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
|
||||
}
|
||||
|
||||
var pkgName string
|
||||
var exports []string
|
||||
var exports []stdlib.Symbol
|
||||
fset := token.NewFileSet()
|
||||
for _, fi := range files {
|
||||
select {
|
||||
@@ -1514,24 +1621,44 @@ func loadExportsFromFiles(ctx context.Context, env *ProcessEnv, dir string, incl
|
||||
continue
|
||||
}
|
||||
pkgName = f.Name.Name
|
||||
for name := range f.Scope.Objects {
|
||||
for name, obj := range f.Scope.Objects {
|
||||
if ast.IsExported(name) {
|
||||
exports = append(exports, name)
|
||||
var kind stdlib.Kind
|
||||
switch obj.Kind {
|
||||
case ast.Con:
|
||||
kind = stdlib.Const
|
||||
case ast.Typ:
|
||||
kind = stdlib.Type
|
||||
case ast.Var:
|
||||
kind = stdlib.Var
|
||||
case ast.Fun:
|
||||
kind = stdlib.Func
|
||||
}
|
||||
exports = append(exports, stdlib.Symbol{
|
||||
Name: name,
|
||||
Kind: kind,
|
||||
Version: 0, // unknown; be permissive
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
sortSymbols(exports)
|
||||
|
||||
if env.Logf != nil {
|
||||
sortedExports := append([]string(nil), exports...)
|
||||
sort.Strings(sortedExports)
|
||||
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, strings.Join(sortedExports, ", "))
|
||||
env.Logf("loaded exports in dir %v (package %v): %v", dir, pkgName, exports)
|
||||
}
|
||||
return pkgName, exports, nil
|
||||
}
|
||||
|
||||
func sortSymbols(syms []stdlib.Symbol) {
|
||||
sort.Slice(syms, func(i, j int) bool {
|
||||
return syms[i].Name < syms[j].Name
|
||||
})
|
||||
}
|
||||
|
||||
// findImport searches for a package with the given symbols.
|
||||
// If no package is found, findImport returns ("", false, nil)
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool, filename string) (*pkg, error) {
|
||||
func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgName string, symbols map[string]bool) (*pkg, error) {
|
||||
// Sort the candidates by their import package length,
|
||||
// assuming that shorter package names are better than long
|
||||
// ones. Note that this sorts by the de-vendored name, so
|
||||
@@ -1595,7 +1722,7 @@ func findImport(ctx context.Context, pass *pass, candidates []pkgDistance, pkgNa
|
||||
|
||||
exportsMap := make(map[string]bool, len(exports))
|
||||
for _, sym := range exports {
|
||||
exportsMap[sym] = true
|
||||
exportsMap[sym.Name] = true
|
||||
}
|
||||
|
||||
// If it doesn't have the right
|
||||
@@ -1753,10 +1880,13 @@ func (fn visitFn) Visit(node ast.Node) ast.Visitor {
|
||||
return fn(node)
|
||||
}
|
||||
|
||||
func copyExports(pkg []string) map[string]bool {
|
||||
m := make(map[string]bool, len(pkg))
|
||||
for _, v := range pkg {
|
||||
m[v] = true
|
||||
func symbolNameSet(symbols []stdlib.Symbol) map[string]bool {
|
||||
names := make(map[string]bool)
|
||||
for _, sym := range symbols {
|
||||
switch sym.Kind {
|
||||
case stdlib.Const, stdlib.Var, stdlib.Type, stdlib.Func:
|
||||
names[sym.Name] = true
|
||||
}
|
||||
}
|
||||
return m
|
||||
return names
|
||||
}
|
||||
|
15
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
15
vendor/golang.org/x/tools/internal/imports/imports.go
generated
vendored
@@ -2,8 +2,6 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run mkstdlib.go
|
||||
|
||||
// Package imports implements a Go pretty-printer (like package "go/format")
|
||||
// that also adds or removes import statements as necessary.
|
||||
package imports
|
||||
@@ -11,6 +9,7 @@ package imports
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/format"
|
||||
@@ -23,6 +22,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/go/ast/astutil"
|
||||
"golang.org/x/tools/internal/event"
|
||||
)
|
||||
|
||||
// Options is golang.org/x/tools/imports.Options with extra internal-only options.
|
||||
@@ -66,14 +66,17 @@ func Process(filename string, src []byte, opt *Options) (formatted []byte, err e
|
||||
//
|
||||
// Note that filename's directory influences which imports can be chosen,
|
||||
// so it is important that filename be accurate.
|
||||
func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
|
||||
func FixImports(ctx context.Context, filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) {
|
||||
ctx, done := event.Start(ctx, "imports.FixImports")
|
||||
defer done()
|
||||
|
||||
fileSet := token.NewFileSet()
|
||||
file, _, err := parse(fileSet, filename, src, opt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return getFixes(fileSet, file, filename, opt.Env)
|
||||
return getFixes(ctx, fileSet, file, filename, opt.Env)
|
||||
}
|
||||
|
||||
// ApplyFixes applies all of the fixes to the file and formats it. extraMode
|
||||
@@ -104,7 +107,7 @@ func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options, e
|
||||
}
|
||||
|
||||
// formatFile formats the file syntax tree.
|
||||
// It may mutate the token.FileSet.
|
||||
// It may mutate the token.FileSet and the ast.File.
|
||||
//
|
||||
// If an adjust function is provided, it is called after formatting
|
||||
// with the original source (formatFile's src parameter) and the
|
||||
@@ -231,7 +234,7 @@ func parse(fset *token.FileSet, filename string, src []byte, opt *Options) (*ast
|
||||
src = src[:len(src)-len("}\n")]
|
||||
// Gofmt has also indented the function body one level.
|
||||
// Remove that indent.
|
||||
src = bytes.Replace(src, []byte("\n\t"), []byte("\n"), -1)
|
||||
src = bytes.ReplaceAll(src, []byte("\n\t"), []byte("\n"))
|
||||
return matchSpace(orig, src)
|
||||
}
|
||||
return file, adjust, nil
|
||||
|
353
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
353
vendor/golang.org/x/tools/internal/imports/mod.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -19,80 +18,141 @@ import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/tools/internal/event"
|
||||
"golang.org/x/tools/internal/gocommand"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/stdlib"
|
||||
)
|
||||
|
||||
// ModuleResolver implements resolver for modules using the go command as little
|
||||
// as feasible.
|
||||
// Notes(rfindley): ModuleResolver appears to be heavily optimized for scanning
|
||||
// as fast as possible, which is desirable for a call to goimports from the
|
||||
// command line, but it doesn't work as well for gopls, where it suffers from
|
||||
// slow startup (golang/go#44863) and intermittent hanging (golang/go#59216),
|
||||
// both caused by populating the cache, albeit in slightly different ways.
|
||||
//
|
||||
// A high level list of TODOs:
|
||||
// - Optimize the scan itself, as there is some redundancy statting and
|
||||
// reading go.mod files.
|
||||
// - Invert the relationship between ProcessEnv and Resolver (see the
|
||||
// docstring of ProcessEnv).
|
||||
// - Make it easier to use an external resolver implementation.
|
||||
//
|
||||
// Smaller TODOs are annotated in the code below.
|
||||
|
||||
// ModuleResolver implements the Resolver interface for a workspace using
|
||||
// modules.
|
||||
//
|
||||
// A goal of the ModuleResolver is to invoke the Go command as little as
|
||||
// possible. To this end, it runs the Go command only for listing module
|
||||
// information (i.e. `go list -m -e -json ...`). Package scanning, the process
|
||||
// of loading package information for the modules, is implemented internally
|
||||
// via the scan method.
|
||||
//
|
||||
// It has two types of state: the state derived from the go command, which
|
||||
// is populated by init, and the state derived from scans, which is populated
|
||||
// via scan. A root is considered scanned if it has been walked to discover
|
||||
// directories. However, if the scan did not require additional information
|
||||
// from the directory (such as package name or exports), the directory
|
||||
// information itself may be partially populated. It will be lazily filled in
|
||||
// as needed by scans, using the scanCallback.
|
||||
type ModuleResolver struct {
|
||||
env *ProcessEnv
|
||||
moduleCacheDir string
|
||||
dummyVendorMod *gocommand.ModuleJSON // If vendoring is enabled, the pseudo-module that represents the /vendor directory.
|
||||
roots []gopathwalk.Root
|
||||
scanSema chan struct{} // scanSema prevents concurrent scans and guards scannedRoots.
|
||||
scannedRoots map[gopathwalk.Root]bool
|
||||
env *ProcessEnv
|
||||
|
||||
initialized bool
|
||||
mains []*gocommand.ModuleJSON
|
||||
mainByDir map[string]*gocommand.ModuleJSON
|
||||
modsByModPath []*gocommand.ModuleJSON // All modules, ordered by # of path components in module Path...
|
||||
modsByDir []*gocommand.ModuleJSON // ...or Dir.
|
||||
// Module state, populated during construction
|
||||
dummyVendorMod *gocommand.ModuleJSON // if vendoring is enabled, a pseudo-module to represent the /vendor directory
|
||||
moduleCacheDir string // GOMODCACHE, inferred from GOPATH if unset
|
||||
roots []gopathwalk.Root // roots to scan, in approximate order of importance
|
||||
mains []*gocommand.ModuleJSON // main modules
|
||||
mainByDir map[string]*gocommand.ModuleJSON // module information by dir, to join with roots
|
||||
modsByModPath []*gocommand.ModuleJSON // all modules, ordered by # of path components in their module path
|
||||
modsByDir []*gocommand.ModuleJSON // ...or by the number of path components in their Dir.
|
||||
|
||||
// moduleCacheCache stores information about the module cache.
|
||||
moduleCacheCache *dirInfoCache
|
||||
otherCache *dirInfoCache
|
||||
// Scanning state, populated by scan
|
||||
|
||||
// scanSema prevents concurrent scans, and guards scannedRoots and the cache
|
||||
// fields below (though the caches themselves are concurrency safe).
|
||||
// Receive to acquire, send to release.
|
||||
scanSema chan struct{}
|
||||
scannedRoots map[gopathwalk.Root]bool // if true, root has been walked
|
||||
|
||||
// Caches of directory info, populated by scans and scan callbacks
|
||||
//
|
||||
// moduleCacheCache stores cached information about roots in the module
|
||||
// cache, which are immutable and therefore do not need to be invalidated.
|
||||
//
|
||||
// otherCache stores information about all other roots (even GOROOT), which
|
||||
// may change.
|
||||
moduleCacheCache *DirInfoCache
|
||||
otherCache *DirInfoCache
|
||||
}
|
||||
|
||||
func newModuleResolver(e *ProcessEnv) *ModuleResolver {
|
||||
// newModuleResolver returns a new module-aware goimports resolver.
|
||||
//
|
||||
// Note: use caution when modifying this constructor: changes must also be
|
||||
// reflected in ModuleResolver.ClearForNewScan.
|
||||
func newModuleResolver(e *ProcessEnv, moduleCacheCache *DirInfoCache) (*ModuleResolver, error) {
|
||||
r := &ModuleResolver{
|
||||
env: e,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) init() error {
|
||||
if r.initialized {
|
||||
return nil
|
||||
}
|
||||
r.scanSema <- struct{}{} // release
|
||||
|
||||
goenv, err := r.env.goEnv()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// TODO(rfindley): can we refactor to share logic with r.env.invokeGo?
|
||||
inv := gocommand.Invocation{
|
||||
BuildFlags: r.env.BuildFlags,
|
||||
ModFlag: r.env.ModFlag,
|
||||
ModFile: r.env.ModFile,
|
||||
Env: r.env.env(),
|
||||
Logf: r.env.Logf,
|
||||
WorkingDir: r.env.WorkingDir,
|
||||
}
|
||||
|
||||
vendorEnabled := false
|
||||
var mainModVendor *gocommand.ModuleJSON
|
||||
var mainModVendor *gocommand.ModuleJSON // for module vendoring
|
||||
var mainModsVendor []*gocommand.ModuleJSON // for workspace vendoring
|
||||
|
||||
// Module vendor directories are ignored in workspace mode:
|
||||
// https://go.googlesource.com/proposal/+/master/design/45713-workspace.md
|
||||
if len(r.env.Env["GOWORK"]) == 0 {
|
||||
goWork := r.env.Env["GOWORK"]
|
||||
if len(goWork) == 0 {
|
||||
// TODO(rfindley): VendorEnabled runs the go command to get GOFLAGS, but
|
||||
// they should be available from the ProcessEnv. Can we avoid the redundant
|
||||
// invocation?
|
||||
vendorEnabled, mainModVendor, err = gocommand.VendorEnabled(context.TODO(), inv, r.env.GocmdRunner)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
vendorEnabled, mainModsVendor, err = gocommand.WorkspaceVendorEnabled(context.Background(), inv, r.env.GocmdRunner)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if mainModVendor != nil && vendorEnabled {
|
||||
// Vendor mode is on, so all the non-Main modules are irrelevant,
|
||||
// and we need to search /vendor for everything.
|
||||
r.mains = []*gocommand.ModuleJSON{mainModVendor}
|
||||
r.dummyVendorMod = &gocommand.ModuleJSON{
|
||||
Path: "",
|
||||
Dir: filepath.Join(mainModVendor.Dir, "vendor"),
|
||||
if vendorEnabled {
|
||||
if mainModVendor != nil {
|
||||
// Module vendor mode is on, so all the non-Main modules are irrelevant,
|
||||
// and we need to search /vendor for everything.
|
||||
r.mains = []*gocommand.ModuleJSON{mainModVendor}
|
||||
r.dummyVendorMod = &gocommand.ModuleJSON{
|
||||
Path: "",
|
||||
Dir: filepath.Join(mainModVendor.Dir, "vendor"),
|
||||
}
|
||||
r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
|
||||
r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
|
||||
} else {
|
||||
// Workspace vendor mode is on, so all the non-Main modules are irrelevant,
|
||||
// and we need to search /vendor for everything.
|
||||
r.mains = mainModsVendor
|
||||
r.dummyVendorMod = &gocommand.ModuleJSON{
|
||||
Path: "",
|
||||
Dir: filepath.Join(filepath.Dir(goWork), "vendor"),
|
||||
}
|
||||
r.modsByModPath = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod)
|
||||
r.modsByDir = append(append([]*gocommand.ModuleJSON{}, mainModsVendor...), r.dummyVendorMod)
|
||||
}
|
||||
r.modsByModPath = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
|
||||
r.modsByDir = []*gocommand.ModuleJSON{mainModVendor, r.dummyVendorMod}
|
||||
} else {
|
||||
// Vendor mode is off, so run go list -m ... to find everything.
|
||||
err := r.initAllMods()
|
||||
@@ -100,19 +160,14 @@ func (r *ModuleResolver) init() error {
|
||||
// GO111MODULE=on. Other errors are fatal.
|
||||
if err != nil {
|
||||
if errMsg := err.Error(); !strings.Contains(errMsg, "working directory is not part of a module") && !strings.Contains(errMsg, "go.mod file not found") {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if gmc := r.env.Env["GOMODCACHE"]; gmc != "" {
|
||||
r.moduleCacheDir = gmc
|
||||
} else {
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
return fmt.Errorf("empty GOPATH")
|
||||
}
|
||||
r.moduleCacheDir = filepath.Join(gopaths[0], "/pkg/mod")
|
||||
r.moduleCacheDir = gomodcacheForEnv(goenv)
|
||||
if r.moduleCacheDir == "" {
|
||||
return nil, fmt.Errorf("cannot resolve GOMODCACHE")
|
||||
}
|
||||
|
||||
sort.Slice(r.modsByModPath, func(i, j int) bool {
|
||||
@@ -123,13 +178,14 @@ func (r *ModuleResolver) init() error {
|
||||
})
|
||||
sort.Slice(r.modsByDir, func(i, j int) bool {
|
||||
count := func(x int) int {
|
||||
return strings.Count(r.modsByDir[x].Dir, "/")
|
||||
return strings.Count(r.modsByDir[x].Dir, string(filepath.Separator))
|
||||
}
|
||||
return count(j) < count(i) // descending order
|
||||
})
|
||||
|
||||
r.roots = []gopathwalk.Root{
|
||||
{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT},
|
||||
r.roots = []gopathwalk.Root{}
|
||||
if goenv["GOROOT"] != "" { // "" happens in tests
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: filepath.Join(goenv["GOROOT"], "/src"), Type: gopathwalk.RootGOROOT})
|
||||
}
|
||||
r.mainByDir = make(map[string]*gocommand.ModuleJSON)
|
||||
for _, main := range r.mains {
|
||||
@@ -141,7 +197,11 @@ func (r *ModuleResolver) init() error {
|
||||
} else {
|
||||
addDep := func(mod *gocommand.ModuleJSON) {
|
||||
if mod.Replace == nil {
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough.
|
||||
// This is redundant with the cache, but we'll skip it cheaply enough
|
||||
// when we encounter it in the module cache scan.
|
||||
//
|
||||
// Including it at a lower index in r.roots than the module cache dir
|
||||
// helps prioritize matches from within existing dependencies.
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootModuleCache})
|
||||
} else {
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: mod.Dir, Type: gopathwalk.RootOther})
|
||||
@@ -158,24 +218,40 @@ func (r *ModuleResolver) init() error {
|
||||
addDep(mod)
|
||||
}
|
||||
}
|
||||
// If provided, share the moduleCacheCache.
|
||||
//
|
||||
// TODO(rfindley): The module cache is immutable. However, the loaded
|
||||
// exports do depend on GOOS and GOARCH. Fortunately, the
|
||||
// ProcessEnv.buildContext does not adjust these from build.DefaultContext
|
||||
// (even though it should). So for now, this is OK to share, but we need to
|
||||
// add logic for handling GOOS/GOARCH.
|
||||
r.moduleCacheCache = moduleCacheCache
|
||||
r.roots = append(r.roots, gopathwalk.Root{Path: r.moduleCacheDir, Type: gopathwalk.RootModuleCache})
|
||||
}
|
||||
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
if r.moduleCacheCache == nil {
|
||||
r.moduleCacheCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.moduleCacheCache = NewDirInfoCache()
|
||||
}
|
||||
if r.otherCache == nil {
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.otherCache = NewDirInfoCache()
|
||||
return r, nil
|
||||
}
|
||||
|
||||
// gomodcacheForEnv returns the GOMODCACHE value to use based on the given env
|
||||
// map, which must have GOMODCACHE and GOPATH populated.
|
||||
//
|
||||
// TODO(rfindley): this is defensive refactoring.
|
||||
// 1. Is this even relevant anymore? Can't we just read GOMODCACHE.
|
||||
// 2. Use this to separate module cache scanning from other scanning.
|
||||
func gomodcacheForEnv(goenv map[string]string) string {
|
||||
if gmc := goenv["GOMODCACHE"]; gmc != "" {
|
||||
return gmc
|
||||
}
|
||||
r.initialized = true
|
||||
return nil
|
||||
gopaths := filepath.SplitList(goenv["GOPATH"])
|
||||
if len(gopaths) == 0 {
|
||||
return ""
|
||||
}
|
||||
return filepath.Join(gopaths[0], "/pkg/mod")
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) initAllMods() error {
|
||||
@@ -206,30 +282,86 @@ func (r *ModuleResolver) initAllMods() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) ClearForNewScan() {
|
||||
<-r.scanSema
|
||||
r.scannedRoots = map[gopathwalk.Root]bool{}
|
||||
r.otherCache = &dirInfoCache{
|
||||
dirs: map[string]*directoryPackageInfo{},
|
||||
listeners: map[*int]cacheListener{},
|
||||
}
|
||||
r.scanSema <- struct{}{}
|
||||
}
|
||||
// ClearForNewScan invalidates the last scan.
|
||||
//
|
||||
// It preserves the set of roots, but forgets about the set of directories.
|
||||
// Though it forgets the set of module cache directories, it remembers their
|
||||
// contents, since they are assumed to be immutable.
|
||||
func (r *ModuleResolver) ClearForNewScan() Resolver {
|
||||
<-r.scanSema // acquire r, to guard scannedRoots
|
||||
r2 := &ModuleResolver{
|
||||
env: r.env,
|
||||
dummyVendorMod: r.dummyVendorMod,
|
||||
moduleCacheDir: r.moduleCacheDir,
|
||||
roots: r.roots,
|
||||
mains: r.mains,
|
||||
mainByDir: r.mainByDir,
|
||||
modsByModPath: r.modsByModPath,
|
||||
|
||||
func (r *ModuleResolver) ClearForNewMod() {
|
||||
<-r.scanSema
|
||||
*r = ModuleResolver{
|
||||
env: r.env,
|
||||
scanSema: make(chan struct{}, 1),
|
||||
scannedRoots: make(map[gopathwalk.Root]bool),
|
||||
otherCache: NewDirInfoCache(),
|
||||
moduleCacheCache: r.moduleCacheCache,
|
||||
otherCache: r.otherCache,
|
||||
scanSema: r.scanSema,
|
||||
}
|
||||
r.init()
|
||||
r.scanSema <- struct{}{}
|
||||
r2.scanSema <- struct{}{} // r2 must start released
|
||||
// Invalidate root scans. We don't need to invalidate module cache roots,
|
||||
// because they are immutable.
|
||||
// (We don't support a use case where GOMODCACHE is cleaned in the middle of
|
||||
// e.g. a gopls session: the user must restart gopls to get accurate
|
||||
// imports.)
|
||||
//
|
||||
// Scanning for new directories in GOMODCACHE should be handled elsewhere,
|
||||
// via a call to ScanModuleCache.
|
||||
for _, root := range r.roots {
|
||||
if root.Type == gopathwalk.RootModuleCache && r.scannedRoots[root] {
|
||||
r2.scannedRoots[root] = true
|
||||
}
|
||||
}
|
||||
r.scanSema <- struct{}{} // release r
|
||||
return r2
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory that contains the package at
|
||||
// the given import path, or returns nil, "" if no module is in scope.
|
||||
// ClearModuleInfo invalidates resolver state that depends on go.mod file
|
||||
// contents (essentially, the output of go list -m -json ...).
|
||||
//
|
||||
// Notably, it does not forget directory contents, which are reset
|
||||
// asynchronously via ClearForNewScan.
|
||||
//
|
||||
// If the ProcessEnv is a GOPATH environment, ClearModuleInfo is a no op.
|
||||
//
|
||||
// TODO(rfindley): move this to a new env.go, consolidating ProcessEnv methods.
|
||||
func (e *ProcessEnv) ClearModuleInfo() {
|
||||
if r, ok := e.resolver.(*ModuleResolver); ok {
|
||||
resolver, err := newModuleResolver(e, e.ModCache)
|
||||
if err != nil {
|
||||
e.resolver = nil
|
||||
e.resolverErr = err
|
||||
return
|
||||
}
|
||||
|
||||
<-r.scanSema // acquire (guards caches)
|
||||
resolver.moduleCacheCache = r.moduleCacheCache
|
||||
resolver.otherCache = r.otherCache
|
||||
r.scanSema <- struct{}{} // release
|
||||
|
||||
e.UpdateResolver(resolver)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateResolver sets the resolver for the ProcessEnv to use in imports
|
||||
// operations. Only for use with the result of [Resolver.ClearForNewScan].
|
||||
//
|
||||
// TODO(rfindley): this awkward API is a result of the (arguably) inverted
|
||||
// relationship between configuration and state described in the doc comment
|
||||
// for [ProcessEnv].
|
||||
func (e *ProcessEnv) UpdateResolver(r Resolver) {
|
||||
e.resolver = r
|
||||
e.resolverErr = nil
|
||||
}
|
||||
|
||||
// findPackage returns the module and directory from within the main modules
|
||||
// and their dependencies that contains the package at the given import path,
|
||||
// or returns nil, "" if no module is in scope.
|
||||
func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON, string) {
|
||||
// This can't find packages in the stdlib, but that's harmless for all
|
||||
// the existing code paths.
|
||||
@@ -264,7 +396,7 @@ func (r *ModuleResolver) findPackage(importPath string) (*gocommand.ModuleJSON,
|
||||
}
|
||||
|
||||
// Not cached. Read the filesystem.
|
||||
pkgFiles, err := ioutil.ReadDir(pkgDir)
|
||||
pkgFiles, err := os.ReadDir(pkgDir)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
@@ -295,10 +427,6 @@ func (r *ModuleResolver) cacheStore(info directoryPackageInfo) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheKeys() []string {
|
||||
return append(r.moduleCacheCache.Keys(), r.otherCache.Keys()...)
|
||||
}
|
||||
|
||||
// cachePackageName caches the package name for a dir already in the cache.
|
||||
func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
@@ -307,7 +435,7 @@ func (r *ModuleResolver) cachePackageName(info directoryPackageInfo) (string, er
|
||||
return r.otherCache.CachePackageName(info)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
func (r *ModuleResolver) cacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) {
|
||||
if info.rootType == gopathwalk.RootModuleCache {
|
||||
return r.moduleCacheCache.CacheExports(ctx, env, info)
|
||||
}
|
||||
@@ -327,6 +455,10 @@ func (r *ModuleResolver) findModuleByDir(dir string) *gocommand.ModuleJSON {
|
||||
// - in /vendor/ in -mod=vendor mode.
|
||||
// - nested module? Dunno.
|
||||
// Rumor has it that replace targets cannot contain other replace targets.
|
||||
//
|
||||
// Note that it is critical here that modsByDir is sorted to have deeper dirs
|
||||
// first. This ensures that findModuleByDir finds the innermost module.
|
||||
// See also golang/go#56291.
|
||||
for _, m := range r.modsByDir {
|
||||
if !strings.HasPrefix(dir, m.Dir) {
|
||||
continue
|
||||
@@ -363,15 +495,15 @@ func (r *ModuleResolver) dirIsNestedModule(dir string, mod *gocommand.ModuleJSON
|
||||
return modDir != mod.Dir
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir string, modName string) {
|
||||
readModName := func(modFile string) string {
|
||||
modBytes, err := ioutil.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
func readModName(modFile string) string {
|
||||
modBytes, err := os.ReadFile(modFile)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return modulePath(modBytes)
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) modInfo(dir string) (modDir, modName string) {
|
||||
if r.dirInModuleCache(dir) {
|
||||
if matches := modCacheRegexp.FindStringSubmatch(dir); len(matches) == 3 {
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
@@ -405,11 +537,9 @@ func (r *ModuleResolver) dirInModuleCache(dir string) bool {
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
names := map[string]string{}
|
||||
for _, path := range importPaths {
|
||||
// TODO(rfindley): shouldn't this use the dirInfoCache?
|
||||
_, packageDir := r.findPackage(path)
|
||||
if packageDir == "" {
|
||||
continue
|
||||
@@ -424,9 +554,8 @@ func (r *ModuleResolver) loadPackageNames(importPaths []string, srcDir string) (
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error {
|
||||
if err := r.init(); err != nil {
|
||||
return err
|
||||
}
|
||||
ctx, done := event.Start(ctx, "imports.ModuleResolver.scan")
|
||||
defer done()
|
||||
|
||||
processDir := func(info directoryPackageInfo) {
|
||||
// Skip this directory if we were not able to get the package information successfully.
|
||||
@@ -437,18 +566,18 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.dirFound(pkg) {
|
||||
return
|
||||
}
|
||||
|
||||
pkg.packageName, err = r.cachePackageName(info)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if !callback.packageNameLoaded(pkg) {
|
||||
return
|
||||
}
|
||||
|
||||
_, exports, err := r.loadExports(ctx, pkg, false)
|
||||
if err != nil {
|
||||
return
|
||||
@@ -487,7 +616,6 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
// Add anything new to the cache, and process it if we're still listening.
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
r.cacheStore(r.scanDirForPackage(root, dir))
|
||||
}
|
||||
@@ -502,9 +630,9 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-r.scanSema:
|
||||
case <-r.scanSema: // acquire
|
||||
}
|
||||
defer func() { r.scanSema <- struct{}{} }()
|
||||
defer func() { r.scanSema <- struct{}{} }() // release
|
||||
// We have the lock on r.scannedRoots, and no other scans can run.
|
||||
for _, root := range roots {
|
||||
if ctx.Err() != nil {
|
||||
@@ -527,7 +655,7 @@ func (r *ModuleResolver) scan(ctx context.Context, callback *scanCallback) error
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) scoreImportPath(ctx context.Context, path string) float64 {
|
||||
if _, ok := stdlib[path]; ok {
|
||||
if stdlib.HasPackage(path) {
|
||||
return MaxRelevance
|
||||
}
|
||||
mod, _ := r.findPackage(path)
|
||||
@@ -605,10 +733,7 @@ func (r *ModuleResolver) canonicalize(info directoryPackageInfo) (*pkg, error) {
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []string, error) {
|
||||
if err := r.init(); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
func (r *ModuleResolver) loadExports(ctx context.Context, pkg *pkg, includeTest bool) (string, []stdlib.Symbol, error) {
|
||||
if info, ok := r.cacheLoad(pkg.dir); ok && !includeTest {
|
||||
return r.cacheExports(ctx, r.env, info)
|
||||
}
|
||||
|
121
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
121
vendor/golang.org/x/tools/internal/imports/mod_cache.go
generated
vendored
@@ -7,12 +7,17 @@ package imports
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/mod/module"
|
||||
"golang.org/x/tools/internal/gopathwalk"
|
||||
"golang.org/x/tools/internal/stdlib"
|
||||
)
|
||||
|
||||
// To find packages to import, the resolver needs to know about all of the
|
||||
// To find packages to import, the resolver needs to know about all of
|
||||
// the packages that could be imported. This includes packages that are
|
||||
// already in modules that are in (1) the current module, (2) replace targets,
|
||||
// and (3) packages in the module cache. Packages in (1) and (2) may change over
|
||||
@@ -39,6 +44,8 @@ const (
|
||||
exportsLoaded
|
||||
)
|
||||
|
||||
// directoryPackageInfo holds (possibly incomplete) information about packages
|
||||
// contained in a given directory.
|
||||
type directoryPackageInfo struct {
|
||||
// status indicates the extent to which this struct has been filled in.
|
||||
status directoryPackageStatus
|
||||
@@ -63,8 +70,11 @@ type directoryPackageInfo struct {
|
||||
packageName string // the package name, as declared in the source.
|
||||
|
||||
// Set when status >= exportsLoaded.
|
||||
|
||||
exports []string
|
||||
// TODO(rfindley): it's hard to see this, but exports depend implicitly on
|
||||
// the default build context GOOS and GOARCH.
|
||||
//
|
||||
// We can make this explicit, and key exports by GOOS, GOARCH.
|
||||
exports []stdlib.Symbol
|
||||
}
|
||||
|
||||
// reachedStatus returns true when info has a status at least target and any error associated with
|
||||
@@ -79,7 +89,7 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// dirInfoCache is a concurrency safe map for storing information about
|
||||
// DirInfoCache is a concurrency-safe map for storing information about
|
||||
// directories that may contain packages.
|
||||
//
|
||||
// The information in this cache is built incrementally. Entries are initialized in scan.
|
||||
@@ -92,21 +102,26 @@ func (info *directoryPackageInfo) reachedStatus(target directoryPackageStatus) (
|
||||
// The information in the cache is not expected to change for the cache's
|
||||
// lifetime, so there is no protection against competing writes. Users should
|
||||
// take care not to hold the cache across changes to the underlying files.
|
||||
//
|
||||
// TODO(suzmue): consider other concurrency strategies and data structures (RWLocks, sync.Map, etc)
|
||||
type dirInfoCache struct {
|
||||
type DirInfoCache struct {
|
||||
mu sync.Mutex
|
||||
// dirs stores information about packages in directories, keyed by absolute path.
|
||||
dirs map[string]*directoryPackageInfo
|
||||
listeners map[*int]cacheListener
|
||||
}
|
||||
|
||||
func NewDirInfoCache() *DirInfoCache {
|
||||
return &DirInfoCache{
|
||||
dirs: make(map[string]*directoryPackageInfo),
|
||||
listeners: make(map[*int]cacheListener),
|
||||
}
|
||||
}
|
||||
|
||||
type cacheListener func(directoryPackageInfo)
|
||||
|
||||
// ScanAndListen calls listener on all the items in the cache, and on anything
|
||||
// newly added. The returned stop function waits for all in-flight callbacks to
|
||||
// finish and blocks new ones.
|
||||
func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
func (d *DirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener) func() {
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
|
||||
// Flushing out all the callbacks is tricky without knowing how many there
|
||||
@@ -162,8 +177,10 @@ func (d *dirInfoCache) ScanAndListen(ctx context.Context, listener cacheListener
|
||||
}
|
||||
|
||||
// Store stores the package info for dir.
|
||||
func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
func (d *DirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
d.mu.Lock()
|
||||
// TODO(rfindley, golang/go#59216): should we overwrite an existing entry?
|
||||
// That seems incorrect as the cache should be idempotent.
|
||||
_, old := d.dirs[dir]
|
||||
d.dirs[dir] = &info
|
||||
var listeners []cacheListener
|
||||
@@ -180,7 +197,7 @@ func (d *dirInfoCache) Store(dir string, info directoryPackageInfo) {
|
||||
}
|
||||
|
||||
// Load returns a copy of the directoryPackageInfo for absolute directory dir.
|
||||
func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
func (d *DirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
info, ok := d.dirs[dir]
|
||||
@@ -191,7 +208,7 @@ func (d *dirInfoCache) Load(dir string) (directoryPackageInfo, bool) {
|
||||
}
|
||||
|
||||
// Keys returns the keys currently present in d.
|
||||
func (d *dirInfoCache) Keys() (keys []string) {
|
||||
func (d *DirInfoCache) Keys() (keys []string) {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
for key := range d.dirs {
|
||||
@@ -200,7 +217,7 @@ func (d *dirInfoCache) Keys() (keys []string) {
|
||||
return keys
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
func (d *DirInfoCache) CachePackageName(info directoryPackageInfo) (string, error) {
|
||||
if loaded, err := info.reachedStatus(nameLoaded); loaded {
|
||||
return info.packageName, err
|
||||
}
|
||||
@@ -213,7 +230,7 @@ func (d *dirInfoCache) CachePackageName(info directoryPackageInfo) (string, erro
|
||||
return info.packageName, info.err
|
||||
}
|
||||
|
||||
func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []string, error) {
|
||||
func (d *DirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info directoryPackageInfo) (string, []stdlib.Symbol, error) {
|
||||
if reached, _ := info.reachedStatus(exportsLoaded); reached {
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
@@ -234,3 +251,81 @@ func (d *dirInfoCache) CacheExports(ctx context.Context, env *ProcessEnv, info d
|
||||
d.Store(info.dir, info)
|
||||
return info.packageName, info.exports, info.err
|
||||
}
|
||||
|
||||
// ScanModuleCache walks the given directory, which must be a GOMODCACHE value,
|
||||
// for directory package information, storing the results in cache.
|
||||
func ScanModuleCache(dir string, cache *DirInfoCache, logf func(string, ...any)) {
|
||||
// Note(rfindley): it's hard to see, but this function attempts to implement
|
||||
// just the side effects on cache of calling PrimeCache with a ProcessEnv
|
||||
// that has the given dir as its GOMODCACHE.
|
||||
//
|
||||
// Teasing out the control flow, we see that we can avoid any handling of
|
||||
// vendor/ and can infer module info entirely from the path, simplifying the
|
||||
// logic here.
|
||||
|
||||
root := gopathwalk.Root{
|
||||
Path: filepath.Clean(dir),
|
||||
Type: gopathwalk.RootModuleCache,
|
||||
}
|
||||
|
||||
directoryInfo := func(root gopathwalk.Root, dir string) directoryPackageInfo {
|
||||
// This is a copy of ModuleResolver.scanDirForPackage, trimmed down to
|
||||
// logic that applies to a module cache directory.
|
||||
|
||||
subdir := ""
|
||||
if dir != root.Path {
|
||||
subdir = dir[len(root.Path)+len("/"):]
|
||||
}
|
||||
|
||||
matches := modCacheRegexp.FindStringSubmatch(subdir)
|
||||
if len(matches) == 0 {
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("invalid module cache path: %v", subdir),
|
||||
}
|
||||
}
|
||||
modPath, err := module.UnescapePath(filepath.ToSlash(matches[1]))
|
||||
if err != nil {
|
||||
if logf != nil {
|
||||
logf("decoding module cache path %q: %v", subdir, err)
|
||||
}
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
err: fmt.Errorf("decoding module cache path %q: %v", subdir, err),
|
||||
}
|
||||
}
|
||||
importPath := path.Join(modPath, filepath.ToSlash(matches[3]))
|
||||
index := strings.Index(dir, matches[1]+"@"+matches[2])
|
||||
modDir := filepath.Join(dir[:index], matches[1]+"@"+matches[2])
|
||||
modName := readModName(filepath.Join(modDir, "go.mod"))
|
||||
return directoryPackageInfo{
|
||||
status: directoryScanned,
|
||||
dir: dir,
|
||||
rootType: root.Type,
|
||||
nonCanonicalImportPath: importPath,
|
||||
moduleDir: modDir,
|
||||
moduleName: modName,
|
||||
}
|
||||
}
|
||||
|
||||
add := func(root gopathwalk.Root, dir string) {
|
||||
info := directoryInfo(root, dir)
|
||||
cache.Store(info.dir, info)
|
||||
}
|
||||
|
||||
skip := func(_ gopathwalk.Root, dir string) bool {
|
||||
// Skip directories that have already been scanned.
|
||||
//
|
||||
// Note that gopathwalk only adds "package" directories, which must contain
|
||||
// a .go file, and all such package directories in the module cache are
|
||||
// immutable. So if we can load a dir, it can be skipped.
|
||||
info, ok := cache.Load(dir)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
packageScanned, _ := info.reachedStatus(directoryScanned)
|
||||
return packageScanned
|
||||
}
|
||||
|
||||
gopathwalk.WalkSkip([]gopathwalk.Root{root}, add, skip, gopathwalk.Options{Logf: logf, ModulesEnabled: true})
|
||||
}
|
||||
|
2
vendor/golang.org/x/tools/internal/imports/sortimports.go
generated
vendored
2
vendor/golang.org/x/tools/internal/imports/sortimports.go
generated
vendored
@@ -18,7 +18,7 @@ import (
|
||||
// sortImports sorts runs of consecutive import lines in import blocks in f.
|
||||
// It also removes duplicate imports when it is possible to do so without data loss.
|
||||
//
|
||||
// It may mutate the token.File.
|
||||
// It may mutate the token.File and the ast.File.
|
||||
func sortImports(localPrefix string, tokFile *token.File, f *ast.File) {
|
||||
for i, d := range f.Decls {
|
||||
d, ok := d.(*ast.GenDecl)
|
||||
|
11115
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
11115
vendor/golang.org/x/tools/internal/imports/zstdlib.go
generated
vendored
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user