1// Copyright 2019 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package cache
6
7import (
8	"bytes"
9	"context"
10	"fmt"
11	"go/ast"
12	"go/types"
13	"path"
14	"path/filepath"
15	"sort"
16	"strings"
17	"sync"
18
19	"golang.org/x/mod/module"
20	"golang.org/x/tools/go/ast/astutil"
21	"golang.org/x/tools/go/packages"
22	"golang.org/x/tools/internal/event"
23	"golang.org/x/tools/internal/lsp/debug/tag"
24	"golang.org/x/tools/internal/lsp/protocol"
25	"golang.org/x/tools/internal/lsp/source"
26	"golang.org/x/tools/internal/memoize"
27	"golang.org/x/tools/internal/packagesinternal"
28	"golang.org/x/tools/internal/span"
29	"golang.org/x/tools/internal/typeparams"
30	"golang.org/x/tools/internal/typesinternal"
31	errors "golang.org/x/xerrors"
32)
33
34type packageHandleKey string
35
36type packageHandle struct {
37	handle *memoize.Handle
38
39	goFiles, compiledGoFiles []*parseGoHandle
40
41	// mode is the mode the files were parsed in.
42	mode source.ParseMode
43
44	// m is the metadata associated with the package.
45	m *knownMetadata
46
47	// key is the hashed key for the package.
48	key packageHandleKey
49}
50
51func (ph *packageHandle) packageKey() packageKey {
52	return packageKey{
53		id:   ph.m.id,
54		mode: ph.mode,
55	}
56}
57
58func (ph *packageHandle) imports(ctx context.Context, s source.Snapshot) (result []string) {
59	for _, pgh := range ph.goFiles {
60		f, err := s.ParseGo(ctx, pgh.file, source.ParseHeader)
61		if err != nil {
62			continue
63		}
64		seen := map[string]struct{}{}
65		for _, impSpec := range f.File.Imports {
66			imp := strings.Trim(impSpec.Path.Value, `"`)
67			if _, ok := seen[imp]; !ok {
68				seen[imp] = struct{}{}
69				result = append(result, imp)
70			}
71		}
72	}
73
74	sort.Strings(result)
75	return result
76}
77
78// packageData contains the data produced by type-checking a package.
79type packageData struct {
80	pkg *pkg
81	err error
82}
83
84// buildPackageHandle returns a packageHandle for a given package and mode.
85// It assumes that the given ID already has metadata available, so it does not
86// attempt to reload missing or invalid metadata. The caller must reload
87// metadata if needed.
88func (s *snapshot) buildPackageHandle(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, error) {
89	if ph := s.getPackage(id, mode); ph != nil {
90		return ph, nil
91	}
92
93	// Build the packageHandle for this ID and its dependencies.
94	ph, deps, err := s.buildKey(ctx, id, mode)
95	if err != nil {
96		return nil, err
97	}
98
99	// Do not close over the packageHandle or the snapshot in the Bind function.
100	// This creates a cycle, which causes the finalizers to never run on the handles.
101	// The possible cycles are:
102	//
103	//     packageHandle.h.function -> packageHandle
104	//     packageHandle.h.function -> snapshot -> packageHandle
105	//
106
107	m := ph.m
108	key := ph.key
109
110	h := s.generation.Bind(key, func(ctx context.Context, arg memoize.Arg) interface{} {
111		snapshot := arg.(*snapshot)
112
113		// Begin loading the direct dependencies, in parallel.
114		var wg sync.WaitGroup
115		for _, dep := range deps {
116			wg.Add(1)
117			go func(dep *packageHandle) {
118				dep.check(ctx, snapshot)
119				wg.Done()
120			}(dep)
121		}
122
123		data := &packageData{}
124		data.pkg, data.err = typeCheck(ctx, snapshot, m.metadata, mode, deps)
125		// Make sure that the workers above have finished before we return,
126		// especially in case of cancellation.
127		wg.Wait()
128
129		return data
130	}, nil)
131	ph.handle = h
132
133	// Cache the handle in the snapshot. If a package handle has already
134	// been cached, addPackage will return the cached value. This is fine,
135	// since the original package handle above will have no references and be
136	// garbage collected.
137	ph = s.addPackageHandle(ph)
138
139	return ph, nil
140}
141
142// buildKey computes the key for a given packageHandle.
143func (s *snapshot) buildKey(ctx context.Context, id packageID, mode source.ParseMode) (*packageHandle, map[packagePath]*packageHandle, error) {
144	m := s.getMetadata(id)
145	if m == nil {
146		return nil, nil, errors.Errorf("no metadata for %s", id)
147	}
148	goFiles, err := s.parseGoHandles(ctx, m.goFiles, mode)
149	if err != nil {
150		return nil, nil, err
151	}
152	compiledGoFiles, err := s.parseGoHandles(ctx, m.compiledGoFiles, mode)
153	if err != nil {
154		return nil, nil, err
155	}
156	ph := &packageHandle{
157		m:               m,
158		goFiles:         goFiles,
159		compiledGoFiles: compiledGoFiles,
160		mode:            mode,
161	}
162	// Make sure all of the depList are sorted.
163	depList := append([]packageID{}, m.deps...)
164	sort.Slice(depList, func(i, j int) bool {
165		return depList[i] < depList[j]
166	})
167
168	deps := make(map[packagePath]*packageHandle)
169
170	// Begin computing the key by getting the depKeys for all dependencies.
171	var depKeys []packageHandleKey
172	for _, depID := range depList {
173		depHandle, err := s.buildPackageHandle(ctx, depID, s.workspaceParseMode(depID))
174		// Don't use invalid metadata for dependencies if the top-level
175		// metadata is valid. We only load top-level packages, so if the
176		// top-level is valid, all of its dependencies should be as well.
177		if err != nil || m.valid && !depHandle.m.valid {
178			if err != nil {
179				event.Error(ctx, fmt.Sprintf("%s: no dep handle for %s", id, depID), err, tag.Snapshot.Of(s.id))
180			} else {
181				event.Log(ctx, fmt.Sprintf("%s: invalid dep handle for %s", id, depID), tag.Snapshot.Of(s.id))
182			}
183
184			if ctx.Err() != nil {
185				return nil, nil, ctx.Err()
186			}
187			// One bad dependency should not prevent us from checking the entire package.
188			// Add a special key to mark a bad dependency.
189			depKeys = append(depKeys, packageHandleKey(fmt.Sprintf("%s import not found", depID)))
190			continue
191		}
192		deps[depHandle.m.pkgPath] = depHandle
193		depKeys = append(depKeys, depHandle.key)
194	}
195	experimentalKey := s.View().Options().ExperimentalPackageCacheKey
196	ph.key = checkPackageKey(ph.m.id, compiledGoFiles, m.config, depKeys, mode, experimentalKey)
197	return ph, deps, nil
198}
199
200func (s *snapshot) workspaceParseMode(id packageID) source.ParseMode {
201	s.mu.Lock()
202	defer s.mu.Unlock()
203	_, ws := s.workspacePackages[id]
204	if !ws {
205		return source.ParseExported
206	}
207	if s.view.Options().MemoryMode == source.ModeNormal {
208		return source.ParseFull
209	}
210
211	// Degraded mode. Check for open files.
212	m, ok := s.metadata[id]
213	if !ok {
214		return source.ParseExported
215	}
216	for _, cgf := range m.compiledGoFiles {
217		if s.isOpenLocked(cgf) {
218			return source.ParseFull
219		}
220	}
221	return source.ParseExported
222}
223
224func checkPackageKey(id packageID, pghs []*parseGoHandle, cfg *packages.Config, deps []packageHandleKey, mode source.ParseMode, experimentalKey bool) packageHandleKey {
225	b := bytes.NewBuffer(nil)
226	b.WriteString(string(id))
227	if !experimentalKey {
228		// cfg was used to produce the other hashed inputs (package ID, parsed Go
229		// files, and deps). It should not otherwise affect the inputs to the type
230		// checker, so this experiment omits it. This should increase cache hits on
231		// the daemon as cfg contains the environment and working directory.
232		b.WriteString(hashConfig(cfg))
233	}
234	b.WriteByte(byte(mode))
235	for _, dep := range deps {
236		b.WriteString(string(dep))
237	}
238	for _, cgf := range pghs {
239		b.WriteString(cgf.file.FileIdentity().String())
240	}
241	return packageHandleKey(hashContents(b.Bytes()))
242}
243
244// hashEnv returns a hash of the snapshot's configuration.
245func hashEnv(s *snapshot) string {
246	s.view.optionsMu.Lock()
247	env := s.view.options.EnvSlice()
248	s.view.optionsMu.Unlock()
249
250	b := &bytes.Buffer{}
251	for _, e := range env {
252		b.WriteString(e)
253	}
254	return hashContents(b.Bytes())
255}
256
257// hashConfig returns the hash for the *packages.Config.
258func hashConfig(config *packages.Config) string {
259	b := bytes.NewBuffer(nil)
260
261	// Dir, Mode, Env, BuildFlags are the parts of the config that can change.
262	b.WriteString(config.Dir)
263	b.WriteString(string(rune(config.Mode)))
264
265	for _, e := range config.Env {
266		b.WriteString(e)
267	}
268	for _, f := range config.BuildFlags {
269		b.WriteString(f)
270	}
271	return hashContents(b.Bytes())
272}
273
274func (ph *packageHandle) Check(ctx context.Context, s source.Snapshot) (source.Package, error) {
275	return ph.check(ctx, s.(*snapshot))
276}
277
278func (ph *packageHandle) check(ctx context.Context, s *snapshot) (*pkg, error) {
279	v, err := ph.handle.Get(ctx, s.generation, s)
280	if err != nil {
281		return nil, err
282	}
283	data := v.(*packageData)
284	return data.pkg, data.err
285}
286
287func (ph *packageHandle) CompiledGoFiles() []span.URI {
288	return ph.m.compiledGoFiles
289}
290
291func (ph *packageHandle) ID() string {
292	return string(ph.m.id)
293}
294
295func (ph *packageHandle) cached(g *memoize.Generation) (*pkg, error) {
296	v := ph.handle.Cached(g)
297	if v == nil {
298		return nil, errors.Errorf("no cached type information for %s", ph.m.pkgPath)
299	}
300	data := v.(*packageData)
301	return data.pkg, data.err
302}
303
304func (s *snapshot) parseGoHandles(ctx context.Context, files []span.URI, mode source.ParseMode) ([]*parseGoHandle, error) {
305	pghs := make([]*parseGoHandle, 0, len(files))
306	for _, uri := range files {
307		fh, err := s.GetFile(ctx, uri)
308		if err != nil {
309			return nil, err
310		}
311		pghs = append(pghs, s.parseGoHandle(ctx, fh, mode))
312	}
313	return pghs, nil
314}
315
316func typeCheck(ctx context.Context, snapshot *snapshot, m *metadata, mode source.ParseMode, deps map[packagePath]*packageHandle) (*pkg, error) {
317	var filter *unexportedFilter
318	if mode == source.ParseExported {
319		filter = &unexportedFilter{uses: map[string]bool{}}
320	}
321	pkg, err := doTypeCheck(ctx, snapshot, m, mode, deps, filter)
322	if err != nil {
323		return nil, err
324	}
325
326	if mode == source.ParseExported {
327		// The AST filtering is a little buggy and may remove things it
328		// shouldn't. If we only got undeclared name errors, try one more
329		// time keeping those names.
330		missing, unexpected := filter.ProcessErrors(pkg.typeErrors)
331		if len(unexpected) == 0 && len(missing) != 0 {
332			event.Log(ctx, fmt.Sprintf("discovered missing identifiers: %v", missing), tag.Package.Of(string(m.id)))
333			pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, filter)
334			if err != nil {
335				return nil, err
336			}
337			missing, unexpected = filter.ProcessErrors(pkg.typeErrors)
338		}
339		if len(unexpected) != 0 || len(missing) != 0 {
340			event.Log(ctx, fmt.Sprintf("falling back to safe trimming due to type errors: %v or still-missing identifiers: %v", unexpected, missing), tag.Package.Of(string(m.id)))
341			pkg, err = doTypeCheck(ctx, snapshot, m, mode, deps, nil)
342			if err != nil {
343				return nil, err
344			}
345		}
346	}
347	// If this is a replaced module in the workspace, the version is
348	// meaningless, and we don't want clients to access it.
349	if m.module != nil {
350		version := m.module.Version
351		if source.IsWorkspaceModuleVersion(version) {
352			version = ""
353		}
354		pkg.version = &module.Version{
355			Path:    m.module.Path,
356			Version: version,
357		}
358	}
359
360	// We don't care about a package's errors unless we have parsed it in full.
361	if mode != source.ParseFull {
362		return pkg, nil
363	}
364
365	for _, e := range m.errors {
366		diags, err := goPackagesErrorDiagnostics(snapshot, pkg, e)
367		if err != nil {
368			event.Error(ctx, "unable to compute positions for list errors", err, tag.Package.Of(pkg.ID()))
369			continue
370		}
371		pkg.diagnostics = append(pkg.diagnostics, diags...)
372	}
373
374	// Our heuristic for whether to show type checking errors is:
375	//  + If any file was 'fixed', don't show type checking errors as we
376	//    can't guarantee that they reference accurate locations in the source.
377	//  + If there is a parse error _in the current file_, suppress type
378	//    errors in that file.
379	//  + Otherwise, show type errors even in the presence of parse errors in
380	//    other package files. go/types attempts to suppress follow-on errors
381	//    due to bad syntax, so on balance type checking errors still provide
382	//    a decent signal/noise ratio as long as the file in question parses.
383
384	// Track URIs with parse errors so that we can suppress type errors for these
385	// files.
386	unparseable := map[span.URI]bool{}
387	for _, e := range pkg.parseErrors {
388		diags, err := parseErrorDiagnostics(snapshot, pkg, e)
389		if err != nil {
390			event.Error(ctx, "unable to compute positions for parse errors", err, tag.Package.Of(pkg.ID()))
391			continue
392		}
393		for _, diag := range diags {
394			unparseable[diag.URI] = true
395			pkg.diagnostics = append(pkg.diagnostics, diag)
396		}
397	}
398
399	if pkg.hasFixedFiles {
400		return pkg, nil
401	}
402
403	unexpanded := pkg.typeErrors
404	pkg.typeErrors = nil
405	for _, e := range expandErrors(unexpanded, snapshot.View().Options().RelatedInformationSupported) {
406		diags, err := typeErrorDiagnostics(snapshot, pkg, e)
407		if err != nil {
408			event.Error(ctx, "unable to compute positions for type errors", err, tag.Package.Of(pkg.ID()))
409			continue
410		}
411		pkg.typeErrors = append(pkg.typeErrors, e.primary)
412		for _, diag := range diags {
413			// If the file didn't parse cleanly, it is highly likely that type
414			// checking errors will be confusing or redundant. But otherwise, type
415			// checking usually provides a good enough signal to include.
416			if !unparseable[diag.URI] {
417				pkg.diagnostics = append(pkg.diagnostics, diag)
418			}
419		}
420	}
421
422	depsErrors, err := snapshot.depsErrors(ctx, pkg)
423	if err != nil {
424		return nil, err
425	}
426	pkg.diagnostics = append(pkg.diagnostics, depsErrors...)
427
428	return pkg, nil
429}
430
431func doTypeCheck(ctx context.Context, snapshot *snapshot, m *metadata, mode source.ParseMode, deps map[packagePath]*packageHandle, astFilter *unexportedFilter) (*pkg, error) {
432	ctx, done := event.Start(ctx, "cache.typeCheck", tag.Package.Of(string(m.id)))
433	defer done()
434
435	pkg := &pkg{
436		m:       m,
437		mode:    mode,
438		imports: make(map[packagePath]*pkg),
439		types:   types.NewPackage(string(m.pkgPath), string(m.name)),
440		typesInfo: &types.Info{
441			Types:      make(map[ast.Expr]types.TypeAndValue),
442			Defs:       make(map[*ast.Ident]types.Object),
443			Uses:       make(map[*ast.Ident]types.Object),
444			Implicits:  make(map[ast.Node]types.Object),
445			Selections: make(map[*ast.SelectorExpr]*types.Selection),
446			Scopes:     make(map[ast.Node]*types.Scope),
447		},
448		typesSizes: m.typesSizes,
449	}
450	typeparams.InitInferred(pkg.typesInfo)
451
452	for _, gf := range pkg.m.goFiles {
453		// In the presence of line directives, we may need to report errors in
454		// non-compiled Go files, so we need to register them on the package.
455		// However, we only need to really parse them in ParseFull mode, when
456		// the user might actually be looking at the file.
457		fh, err := snapshot.GetFile(ctx, gf)
458		if err != nil {
459			return nil, err
460		}
461		goMode := source.ParseFull
462		if mode != source.ParseFull {
463			goMode = source.ParseHeader
464		}
465		pgf, err := snapshot.ParseGo(ctx, fh, goMode)
466		if err != nil {
467			return nil, err
468		}
469		pkg.goFiles = append(pkg.goFiles, pgf)
470	}
471
472	if err := parseCompiledGoFiles(ctx, snapshot, mode, pkg, astFilter); err != nil {
473		return nil, err
474	}
475
476	// Use the default type information for the unsafe package.
477	if m.pkgPath == "unsafe" {
478		// Don't type check Unsafe: it's unnecessary, and doing so exposes a data
479		// race to Unsafe.completed.
480		pkg.types = types.Unsafe
481		return pkg, nil
482	}
483
484	if len(m.compiledGoFiles) == 0 {
485		// No files most likely means go/packages failed. Try to attach error
486		// messages to the file as much as possible.
487		var found bool
488		for _, e := range m.errors {
489			srcDiags, err := goPackagesErrorDiagnostics(snapshot, pkg, e)
490			if err != nil {
491				continue
492			}
493			found = true
494			pkg.diagnostics = append(pkg.diagnostics, srcDiags...)
495		}
496		if found {
497			return pkg, nil
498		}
499		return nil, errors.Errorf("no parsed files for package %s, expected: %v, errors: %v", pkg.m.pkgPath, pkg.compiledGoFiles, m.errors)
500	}
501
502	cfg := &types.Config{
503		Error: func(e error) {
504			pkg.typeErrors = append(pkg.typeErrors, e.(types.Error))
505		},
506		Importer: importerFunc(func(pkgPath string) (*types.Package, error) {
507			// If the context was cancelled, we should abort.
508			if ctx.Err() != nil {
509				return nil, ctx.Err()
510			}
511			dep := resolveImportPath(pkgPath, pkg, deps)
512			if dep == nil {
513				return nil, snapshot.missingPkgError(ctx, pkgPath)
514			}
515			if !source.IsValidImport(string(m.pkgPath), string(dep.m.pkgPath)) {
516				return nil, errors.Errorf("invalid use of internal package %s", pkgPath)
517			}
518			depPkg, err := dep.check(ctx, snapshot)
519			if err != nil {
520				return nil, err
521			}
522			pkg.imports[depPkg.m.pkgPath] = depPkg
523			return depPkg.types, nil
524		}),
525	}
526
527	if mode != source.ParseFull {
528		cfg.DisableUnusedImportCheck = true
529		cfg.IgnoreFuncBodies = true
530	}
531
532	// We want to type check cgo code if go/types supports it.
533	// We passed typecheckCgo to go/packages when we Loaded.
534	typesinternal.SetUsesCgo(cfg)
535
536	check := types.NewChecker(cfg, snapshot.FileSet(), pkg.types, pkg.typesInfo)
537
538	var files []*ast.File
539	for _, cgf := range pkg.compiledGoFiles {
540		files = append(files, cgf.File)
541	}
542	// Type checking errors are handled via the config, so ignore them here.
543	_ = check.Files(files)
544	// If the context was cancelled, we may have returned a ton of transient
545	// errors to the type checker. Swallow them.
546	if ctx.Err() != nil {
547		return nil, ctx.Err()
548	}
549	return pkg, nil
550}
551
552func parseCompiledGoFiles(ctx context.Context, snapshot *snapshot, mode source.ParseMode, pkg *pkg, astFilter *unexportedFilter) error {
553	for _, cgf := range pkg.m.compiledGoFiles {
554		fh, err := snapshot.GetFile(ctx, cgf)
555		if err != nil {
556			return err
557		}
558
559		var pgf *source.ParsedGoFile
560		var fixed bool
561		// Only parse Full through the cache -- we need to own Exported ASTs
562		// to prune them.
563		if mode == source.ParseFull {
564			pgh := snapshot.parseGoHandle(ctx, fh, mode)
565			pgf, fixed, err = snapshot.parseGo(ctx, pgh)
566		} else {
567			d := parseGo(ctx, snapshot.FileSet(), fh, mode)
568			pgf, fixed, err = d.parsed, d.fixed, d.err
569		}
570		if err != nil {
571			return err
572		}
573		pkg.compiledGoFiles = append(pkg.compiledGoFiles, pgf)
574		if pgf.ParseErr != nil {
575			pkg.parseErrors = append(pkg.parseErrors, pgf.ParseErr)
576		}
577		// If we have fixed parse errors in any of the files, we should hide type
578		// errors, as they may be completely nonsensical.
579		pkg.hasFixedFiles = pkg.hasFixedFiles || fixed
580	}
581	if mode != source.ParseExported {
582		return nil
583	}
584	if astFilter != nil {
585		var files []*ast.File
586		for _, cgf := range pkg.compiledGoFiles {
587			files = append(files, cgf.File)
588		}
589		astFilter.Filter(files)
590	} else {
591		for _, cgf := range pkg.compiledGoFiles {
592			trimAST(cgf.File)
593		}
594	}
595	return nil
596}
597
598func (s *snapshot) depsErrors(ctx context.Context, pkg *pkg) ([]*source.Diagnostic, error) {
599	// Select packages that can't be found, and were imported in non-workspace packages.
600	// Workspace packages already show their own errors.
601	var relevantErrors []*packagesinternal.PackageError
602	for _, depsError := range pkg.m.depsErrors {
603		// Up to Go 1.15, the missing package was included in the stack, which
604		// was presumably a bug. We want the next one up.
605		directImporterIdx := len(depsError.ImportStack) - 1
606		if s.view.goversion < 15 {
607			directImporterIdx = len(depsError.ImportStack) - 2
608		}
609		if directImporterIdx < 0 {
610			continue
611		}
612
613		directImporter := depsError.ImportStack[directImporterIdx]
614		if s.isWorkspacePackage(packageID(directImporter)) {
615			continue
616		}
617		relevantErrors = append(relevantErrors, depsError)
618	}
619
620	// Don't build the import index for nothing.
621	if len(relevantErrors) == 0 {
622		return nil, nil
623	}
624
625	// Build an index of all imports in the package.
626	type fileImport struct {
627		cgf *source.ParsedGoFile
628		imp *ast.ImportSpec
629	}
630	allImports := map[string][]fileImport{}
631	for _, cgf := range pkg.compiledGoFiles {
632		for _, group := range astutil.Imports(s.FileSet(), cgf.File) {
633			for _, imp := range group {
634				if imp.Path == nil {
635					continue
636				}
637				path := strings.Trim(imp.Path.Value, `"`)
638				allImports[path] = append(allImports[path], fileImport{cgf, imp})
639			}
640		}
641	}
642
643	// Apply a diagnostic to any import involved in the error, stopping once
644	// we reach the workspace.
645	var errors []*source.Diagnostic
646	for _, depErr := range relevantErrors {
647		for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
648			item := depErr.ImportStack[i]
649			if s.isWorkspacePackage(packageID(item)) {
650				break
651			}
652
653			for _, imp := range allImports[item] {
654				rng, err := source.NewMappedRange(s.FileSet(), imp.cgf.Mapper, imp.imp.Pos(), imp.imp.End()).Range()
655				if err != nil {
656					return nil, err
657				}
658				fixes, err := goGetQuickFixes(s, imp.cgf.URI, item)
659				if err != nil {
660					return nil, err
661				}
662				errors = append(errors, &source.Diagnostic{
663					URI:            imp.cgf.URI,
664					Range:          rng,
665					Severity:       protocol.SeverityError,
666					Source:         source.TypeError,
667					Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
668					SuggestedFixes: fixes,
669				})
670			}
671		}
672	}
673
674	if len(pkg.compiledGoFiles) == 0 {
675		return errors, nil
676	}
677	mod := s.GoModForFile(pkg.compiledGoFiles[0].URI)
678	if mod == "" {
679		return errors, nil
680	}
681	fh, err := s.GetFile(ctx, mod)
682	if err != nil {
683		return nil, err
684	}
685	pm, err := s.ParseMod(ctx, fh)
686	if err != nil {
687		return nil, err
688	}
689
690	// Add a diagnostic to the module that contained the lowest-level import of
691	// the missing package.
692	for _, depErr := range relevantErrors {
693		for i := len(depErr.ImportStack) - 1; i >= 0; i-- {
694			item := depErr.ImportStack[i]
695			m := s.getMetadata(packageID(item))
696			if m == nil || m.module == nil {
697				continue
698			}
699			modVer := module.Version{Path: m.module.Path, Version: m.module.Version}
700			reference := findModuleReference(pm.File, modVer)
701			if reference == nil {
702				continue
703			}
704			rng, err := rangeFromPositions(pm.Mapper, reference.Start, reference.End)
705			if err != nil {
706				return nil, err
707			}
708			fixes, err := goGetQuickFixes(s, pm.URI, item)
709			if err != nil {
710				return nil, err
711			}
712			errors = append(errors, &source.Diagnostic{
713				URI:            pm.URI,
714				Range:          rng,
715				Severity:       protocol.SeverityError,
716				Source:         source.TypeError,
717				Message:        fmt.Sprintf("error while importing %v: %v", item, depErr.Err),
718				SuggestedFixes: fixes,
719			})
720			break
721		}
722	}
723	return errors, nil
724}
725
726// missingPkgError returns an error message for a missing package that varies
727// based on the user's workspace mode.
728func (s *snapshot) missingPkgError(ctx context.Context, pkgPath string) error {
729	var b strings.Builder
730	if s.workspaceMode()&moduleMode == 0 {
731		gorootSrcPkg := filepath.FromSlash(filepath.Join(s.view.goroot, "src", pkgPath))
732
733		b.WriteString(fmt.Sprintf("cannot find package %q in any of \n\t%s (from $GOROOT)", pkgPath, gorootSrcPkg))
734
735		for _, gopath := range strings.Split(s.view.gopath, ":") {
736			gopathSrcPkg := filepath.FromSlash(filepath.Join(gopath, "src", pkgPath))
737			b.WriteString(fmt.Sprintf("\n\t%s (from $GOPATH)", gopathSrcPkg))
738		}
739	} else {
740		b.WriteString(fmt.Sprintf("no required module provides package %q", pkgPath))
741		if err := s.getInitializationError(ctx); err != nil {
742			b.WriteString(fmt.Sprintf("(workspace configuration error: %s)", err.MainError))
743		}
744	}
745	return errors.New(b.String())
746}
747
748type extendedError struct {
749	primary     types.Error
750	secondaries []types.Error
751}
752
753func (e extendedError) Error() string {
754	return e.primary.Error()
755}
756
757// expandErrors duplicates "secondary" errors by mapping them to their main
758// error. Some errors returned by the type checker are followed by secondary
759// errors which give more information about the error. These are errors in
760// their own right, and they are marked by starting with \t. For instance, when
761// there is a multiply-defined function, the secondary error points back to the
762// definition first noticed.
763//
764// This function associates the secondary error with its primary error, which can
765// then be used as RelatedInformation when the error becomes a diagnostic.
766//
767// If supportsRelatedInformation is false, the secondary is instead embedded as
768// additional context in the primary error.
769func expandErrors(errs []types.Error, supportsRelatedInformation bool) []extendedError {
770	var result []extendedError
771	for i := 0; i < len(errs); {
772		original := extendedError{
773			primary: errs[i],
774		}
775		for i++; i < len(errs); i++ {
776			spl := errs[i]
777			if len(spl.Msg) == 0 || spl.Msg[0] != '\t' {
778				break
779			}
780			spl.Msg = spl.Msg[1:]
781			original.secondaries = append(original.secondaries, spl)
782		}
783
784		// Clone the error to all its related locations -- VS Code, at least,
785		// doesn't do it for us.
786		result = append(result, original)
787		for i, mainSecondary := range original.secondaries {
788			// Create the new primary error, with a tweaked message, in the
789			// secondary's location. We need to start from the secondary to
790			// capture its unexported location fields.
791			relocatedSecondary := mainSecondary
792			if supportsRelatedInformation {
793				relocatedSecondary.Msg = fmt.Sprintf("%v (see details)", original.primary.Msg)
794			} else {
795				relocatedSecondary.Msg = fmt.Sprintf("%v (this error: %v)", original.primary.Msg, mainSecondary.Msg)
796			}
797			relocatedSecondary.Soft = original.primary.Soft
798
799			// Copy over the secondary errors, noting the location of the
800			// current error we're cloning.
801			clonedError := extendedError{primary: relocatedSecondary, secondaries: []types.Error{original.primary}}
802			for j, secondary := range original.secondaries {
803				if i == j {
804					secondary.Msg += " (this error)"
805				}
806				clonedError.secondaries = append(clonedError.secondaries, secondary)
807			}
808			result = append(result, clonedError)
809		}
810
811	}
812	return result
813}
814
815// resolveImportPath resolves an import path in pkg to a package from deps.
816// It should produce the same results as resolveImportPath:
817// https://cs.opensource.google/go/go/+/master:src/cmd/go/internal/load/pkg.go;drc=641918ee09cb44d282a30ee8b66f99a0b63eaef9;l=990.
818func resolveImportPath(importPath string, pkg *pkg, deps map[packagePath]*packageHandle) *packageHandle {
819	if dep := deps[packagePath(importPath)]; dep != nil {
820		return dep
821	}
822	// We may be in GOPATH mode, in which case we need to check vendor dirs.
823	searchDir := path.Dir(pkg.PkgPath())
824	for {
825		vdir := packagePath(path.Join(searchDir, "vendor", importPath))
826		if vdep := deps[vdir]; vdep != nil {
827			return vdep
828		}
829
830		// Search until Dir doesn't take us anywhere new, e.g. "." or "/".
831		next := path.Dir(searchDir)
832		if searchDir == next {
833			break
834		}
835		searchDir = next
836	}
837
838	// Vendor didn't work. Let's try minimal module compatibility mode.
839	// In MMC, the packagePath is the canonical (.../vN/...) path, which
840	// is hard to calculate. But the go command has already resolved the ID
841	// to the non-versioned path, and we can take advantage of that.
842	for _, dep := range deps {
843		if dep.ID() == importPath {
844			return dep
845		}
846	}
847	return nil
848}
849
850// An importFunc is an implementation of the single-method
851// types.Importer interface based on a function value.
852type importerFunc func(path string) (*types.Package, error)
853
854func (f importerFunc) Import(path string) (*types.Package, error) { return f(path) }
855