1// Copyright 2018 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package checker defines the implementation of the checker commands.
6// The same code drives the multi-analysis driver, the single-analysis
7// driver that is conventionally provided for convenience along with
8// each analysis package, and the test driver.
9package checker
10
11import (
12	"bytes"
13	"encoding/gob"
14	"flag"
15	"fmt"
16	"go/format"
17	"go/parser"
18	"go/token"
19	"go/types"
20	"io/ioutil"
21	"log"
22	"os"
23	"reflect"
24	"runtime"
25	"runtime/pprof"
26	"runtime/trace"
27	"sort"
28	"strings"
29	"sync"
30	"time"
31
32	"golang.org/x/tools/go/analysis"
33	"golang.org/x/tools/go/analysis/internal/analysisflags"
34	"golang.org/x/tools/go/packages"
35	"golang.org/x/tools/internal/analysisinternal"
36	"golang.org/x/tools/internal/span"
37)
38
39var (
40	// Debug is a set of single-letter flags:
41	//
42	//	f	show [f]acts as they are created
43	// 	p	disable [p]arallel execution of analyzers
44	//	s	do additional [s]anity checks on fact types and serialization
45	//	t	show [t]iming info (NB: use 'p' flag to avoid GC/scheduler noise)
46	//	v	show [v]erbose logging
47	//
48	Debug = ""
49
50	// Log files for optional performance tracing.
51	CPUProfile, MemProfile, Trace string
52
53	// Fix determines whether to apply all suggested fixes.
54	Fix bool
55)
56
57// RegisterFlags registers command-line flags used by the analysis driver.
58func RegisterFlags() {
59	// When adding flags here, remember to update
60	// the list of suppressed flags in analysisflags.
61
62	flag.StringVar(&Debug, "debug", Debug, `debug flags, any subset of "fpstv"`)
63
64	flag.StringVar(&CPUProfile, "cpuprofile", "", "write CPU profile to this file")
65	flag.StringVar(&MemProfile, "memprofile", "", "write memory profile to this file")
66	flag.StringVar(&Trace, "trace", "", "write trace log to this file")
67
68	flag.BoolVar(&Fix, "fix", false, "apply all suggested fixes")
69}
70
71// Run loads the packages specified by args using go/packages,
72// then applies the specified analyzers to them.
73// Analysis flags must already have been set.
74// It provides most of the logic for the main functions of both the
75// singlechecker and the multi-analysis commands.
76// It returns the appropriate exit code.
77func Run(args []string, analyzers []*analysis.Analyzer) (exitcode int) {
78	if CPUProfile != "" {
79		f, err := os.Create(CPUProfile)
80		if err != nil {
81			log.Fatal(err)
82		}
83		if err := pprof.StartCPUProfile(f); err != nil {
84			log.Fatal(err)
85		}
86		// NB: profile won't be written in case of error.
87		defer pprof.StopCPUProfile()
88	}
89
90	if Trace != "" {
91		f, err := os.Create(Trace)
92		if err != nil {
93			log.Fatal(err)
94		}
95		if err := trace.Start(f); err != nil {
96			log.Fatal(err)
97		}
98		// NB: trace log won't be written in case of error.
99		defer func() {
100			trace.Stop()
101			log.Printf("To view the trace, run:\n$ go tool trace view %s", Trace)
102		}()
103	}
104
105	if MemProfile != "" {
106		f, err := os.Create(MemProfile)
107		if err != nil {
108			log.Fatal(err)
109		}
110		// NB: memprofile won't be written in case of error.
111		defer func() {
112			runtime.GC() // get up-to-date statistics
113			if err := pprof.WriteHeapProfile(f); err != nil {
114				log.Fatalf("Writing memory profile: %v", err)
115			}
116			f.Close()
117		}()
118	}
119
120	// Load the packages.
121	if dbg('v') {
122		log.SetPrefix("")
123		log.SetFlags(log.Lmicroseconds) // display timing
124		log.Printf("load %s", args)
125	}
126
127	// Optimization: if the selected analyzers don't produce/consume
128	// facts, we need source only for the initial packages.
129	allSyntax := needFacts(analyzers)
130	initial, err := load(args, allSyntax)
131	if err != nil {
132		log.Print(err)
133		return 1 // load errors
134	}
135
136	// Print the results.
137	roots := analyze(initial, analyzers)
138
139	if Fix {
140		applyFixes(roots)
141	}
142
143	return printDiagnostics(roots)
144}
145
146// load loads the initial packages.
147func load(patterns []string, allSyntax bool) ([]*packages.Package, error) {
148	mode := packages.LoadSyntax
149	if allSyntax {
150		mode = packages.LoadAllSyntax
151	}
152	conf := packages.Config{
153		Mode:  mode,
154		Tests: true,
155	}
156	initial, err := packages.Load(&conf, patterns...)
157	if err == nil {
158		if n := packages.PrintErrors(initial); n > 1 {
159			err = fmt.Errorf("%d errors during loading", n)
160		} else if n == 1 {
161			err = fmt.Errorf("error during loading")
162		} else if len(initial) == 0 {
163			err = fmt.Errorf("%s matched no packages", strings.Join(patterns, " "))
164		}
165	}
166
167	return initial, err
168}
169
170// TestAnalyzer applies an analysis to a set of packages (and their
171// dependencies if necessary) and returns the results.
172//
173// Facts about pkg are returned in a map keyed by object; package facts
174// have a nil key.
175//
176// This entry point is used only by analysistest.
177func TestAnalyzer(a *analysis.Analyzer, pkgs []*packages.Package) []*TestAnalyzerResult {
178	var results []*TestAnalyzerResult
179	for _, act := range analyze(pkgs, []*analysis.Analyzer{a}) {
180		facts := make(map[types.Object][]analysis.Fact)
181		for key, fact := range act.objectFacts {
182			if key.obj.Pkg() == act.pass.Pkg {
183				facts[key.obj] = append(facts[key.obj], fact)
184			}
185		}
186		for key, fact := range act.packageFacts {
187			if key.pkg == act.pass.Pkg {
188				facts[nil] = append(facts[nil], fact)
189			}
190		}
191
192		results = append(results, &TestAnalyzerResult{act.pass, act.diagnostics, facts, act.result, act.err})
193	}
194	return results
195}
196
197type TestAnalyzerResult struct {
198	Pass        *analysis.Pass
199	Diagnostics []analysis.Diagnostic
200	Facts       map[types.Object][]analysis.Fact
201	Result      interface{}
202	Err         error
203}
204
205func analyze(pkgs []*packages.Package, analyzers []*analysis.Analyzer) []*action {
206	// Construct the action graph.
207	if dbg('v') {
208		log.Printf("building graph of analysis passes")
209	}
210
211	// Each graph node (action) is one unit of analysis.
212	// Edges express package-to-package (vertical) dependencies,
213	// and analysis-to-analysis (horizontal) dependencies.
214	type key struct {
215		*analysis.Analyzer
216		*packages.Package
217	}
218	actions := make(map[key]*action)
219
220	var mkAction func(a *analysis.Analyzer, pkg *packages.Package) *action
221	mkAction = func(a *analysis.Analyzer, pkg *packages.Package) *action {
222		k := key{a, pkg}
223		act, ok := actions[k]
224		if !ok {
225			act = &action{a: a, pkg: pkg}
226
227			// Add a dependency on each required analyzers.
228			for _, req := range a.Requires {
229				act.deps = append(act.deps, mkAction(req, pkg))
230			}
231
232			// An analysis that consumes/produces facts
233			// must run on the package's dependencies too.
234			if len(a.FactTypes) > 0 {
235				paths := make([]string, 0, len(pkg.Imports))
236				for path := range pkg.Imports {
237					paths = append(paths, path)
238				}
239				sort.Strings(paths) // for determinism
240				for _, path := range paths {
241					dep := mkAction(a, pkg.Imports[path])
242					act.deps = append(act.deps, dep)
243				}
244			}
245
246			actions[k] = act
247		}
248		return act
249	}
250
251	// Build nodes for initial packages.
252	var roots []*action
253	for _, a := range analyzers {
254		for _, pkg := range pkgs {
255			root := mkAction(a, pkg)
256			root.isroot = true
257			roots = append(roots, root)
258		}
259	}
260
261	// Execute the graph in parallel.
262	execAll(roots)
263
264	return roots
265}
266
267func applyFixes(roots []*action) {
268	visited := make(map[*action]bool)
269	var apply func(*action) error
270	var visitAll func(actions []*action) error
271	visitAll = func(actions []*action) error {
272		for _, act := range actions {
273			if !visited[act] {
274				visited[act] = true
275				visitAll(act.deps)
276				if err := apply(act); err != nil {
277					return err
278				}
279			}
280		}
281		return nil
282	}
283
284	// TODO(matloob): Is this tree business too complicated? (After all this is Go!)
285	// Just create a set (map) of edits, sort by pos and call it a day?
286	type offsetedit struct {
287		start, end int
288		newText    []byte
289	} // TextEdit using byteOffsets instead of pos
290	type node struct {
291		edit        offsetedit
292		left, right *node
293	}
294
295	var insert func(tree **node, edit offsetedit) error
296	insert = func(treeptr **node, edit offsetedit) error {
297		if *treeptr == nil {
298			*treeptr = &node{edit, nil, nil}
299			return nil
300		}
301		tree := *treeptr
302		if edit.end <= tree.edit.start {
303			return insert(&tree.left, edit)
304		} else if edit.start >= tree.edit.end {
305			return insert(&tree.right, edit)
306		}
307
308		// Overlapping text edit.
309		return fmt.Errorf("analyses applying overlapping text edits affecting pos range (%v, %v) and (%v, %v)",
310			edit.start, edit.end, tree.edit.start, tree.edit.end)
311
312	}
313
314	editsForFile := make(map[*token.File]*node)
315
316	apply = func(act *action) error {
317		for _, diag := range act.diagnostics {
318			for _, sf := range diag.SuggestedFixes {
319				for _, edit := range sf.TextEdits {
320					// Validate the edit.
321					if edit.Pos > edit.End {
322						return fmt.Errorf(
323							"diagnostic for analysis %v contains Suggested Fix with malformed edit: pos (%v) > end (%v)",
324							act.a.Name, edit.Pos, edit.End)
325					}
326					file, endfile := act.pkg.Fset.File(edit.Pos), act.pkg.Fset.File(edit.End)
327					if file == nil || endfile == nil || file != endfile {
328						return (fmt.Errorf(
329							"diagnostic for analysis %v contains Suggested Fix with malformed spanning files %v and %v",
330							act.a.Name, file.Name(), endfile.Name()))
331					}
332					start, end := file.Offset(edit.Pos), file.Offset(edit.End)
333
334					// TODO(matloob): Validate that edits do not affect other packages.
335					root := editsForFile[file]
336					if err := insert(&root, offsetedit{start, end, edit.NewText}); err != nil {
337						return err
338					}
339					editsForFile[file] = root // In case the root changed
340				}
341			}
342		}
343		return nil
344	}
345
346	visitAll(roots)
347
348	fset := token.NewFileSet() // Shared by parse calls below
349	// Now we've got a set of valid edits for each file. Get the new file contents.
350	for f, tree := range editsForFile {
351		contents, err := ioutil.ReadFile(f.Name())
352		if err != nil {
353			log.Fatal(err)
354		}
355
356		cur := 0 // current position in the file
357
358		var out bytes.Buffer
359
360		var recurse func(*node)
361		recurse = func(node *node) {
362			if node.left != nil {
363				recurse(node.left)
364			}
365
366			edit := node.edit
367			if edit.start > cur {
368				out.Write(contents[cur:edit.start])
369				out.Write(edit.newText)
370			}
371			cur = edit.end
372
373			if node.right != nil {
374				recurse(node.right)
375			}
376		}
377		recurse(tree)
378		// Write out the rest of the file.
379		if cur < len(contents) {
380			out.Write(contents[cur:])
381		}
382
383		// Try to format the file.
384		ff, err := parser.ParseFile(fset, f.Name(), out.Bytes(), parser.ParseComments)
385		if err == nil {
386			var buf bytes.Buffer
387			if err = format.Node(&buf, fset, ff); err == nil {
388				out = buf
389			}
390		}
391
392		ioutil.WriteFile(f.Name(), out.Bytes(), 0644)
393	}
394}
395
396// printDiagnostics prints the diagnostics for the root packages in either
397// plain text or JSON format. JSON format also includes errors for any
398// dependencies.
399//
400// It returns the exitcode: in plain mode, 0 for success, 1 for analysis
401// errors, and 3 for diagnostics. We avoid 2 since the flag package uses
402// it. JSON mode always succeeds at printing errors and diagnostics in a
403// structured form to stdout.
404func printDiagnostics(roots []*action) (exitcode int) {
405	// Print the output.
406	//
407	// Print diagnostics only for root packages,
408	// but errors for all packages.
409	printed := make(map[*action]bool)
410	var print func(*action)
411	var visitAll func(actions []*action)
412	visitAll = func(actions []*action) {
413		for _, act := range actions {
414			if !printed[act] {
415				printed[act] = true
416				visitAll(act.deps)
417				print(act)
418			}
419		}
420	}
421
422	if analysisflags.JSON {
423		// JSON output
424		tree := make(analysisflags.JSONTree)
425		print = func(act *action) {
426			var diags []analysis.Diagnostic
427			if act.isroot {
428				diags = act.diagnostics
429			}
430			tree.Add(act.pkg.Fset, act.pkg.ID, act.a.Name, diags, act.err)
431		}
432		visitAll(roots)
433		tree.Print()
434	} else {
435		// plain text output
436
437		// De-duplicate diagnostics by position (not token.Pos) to
438		// avoid double-reporting in source files that belong to
439		// multiple packages, such as foo and foo.test.
440		type key struct {
441			pos token.Position
442			end token.Position
443			*analysis.Analyzer
444			message string
445		}
446		seen := make(map[key]bool)
447
448		print = func(act *action) {
449			if act.err != nil {
450				fmt.Fprintf(os.Stderr, "%s: %v\n", act.a.Name, act.err)
451				exitcode = 1 // analysis failed, at least partially
452				return
453			}
454			if act.isroot {
455				for _, diag := range act.diagnostics {
456					// We don't display a.Name/f.Category
457					// as most users don't care.
458
459					posn := act.pkg.Fset.Position(diag.Pos)
460					end := act.pkg.Fset.Position(diag.End)
461					k := key{posn, end, act.a, diag.Message}
462					if seen[k] {
463						continue // duplicate
464					}
465					seen[k] = true
466
467					analysisflags.PrintPlain(act.pkg.Fset, diag)
468				}
469			}
470		}
471		visitAll(roots)
472
473		if exitcode == 0 && len(seen) > 0 {
474			exitcode = 3 // successfully produced diagnostics
475		}
476	}
477
478	// Print timing info.
479	if dbg('t') {
480		if !dbg('p') {
481			log.Println("Warning: times are mostly GC/scheduler noise; use -debug=tp to disable parallelism")
482		}
483		var all []*action
484		var total time.Duration
485		for act := range printed {
486			all = append(all, act)
487			total += act.duration
488		}
489		sort.Slice(all, func(i, j int) bool {
490			return all[i].duration > all[j].duration
491		})
492
493		// Print actions accounting for 90% of the total.
494		var sum time.Duration
495		for _, act := range all {
496			fmt.Fprintf(os.Stderr, "%s\t%s\n", act.duration, act)
497			sum += act.duration
498			if sum >= total*9/10 {
499				break
500			}
501		}
502	}
503
504	return exitcode
505}
506
507// needFacts reports whether any analysis required by the specified set
508// needs facts.  If so, we must load the entire program from source.
509func needFacts(analyzers []*analysis.Analyzer) bool {
510	seen := make(map[*analysis.Analyzer]bool)
511	var q []*analysis.Analyzer // for BFS
512	q = append(q, analyzers...)
513	for len(q) > 0 {
514		a := q[0]
515		q = q[1:]
516		if !seen[a] {
517			seen[a] = true
518			if len(a.FactTypes) > 0 {
519				return true
520			}
521			q = append(q, a.Requires...)
522		}
523	}
524	return false
525}
526
527// An action represents one unit of analysis work: the application of
528// one analysis to one package. Actions form a DAG, both within a
529// package (as different analyzers are applied, either in sequence or
530// parallel), and across packages (as dependencies are analyzed).
531type action struct {
532	once         sync.Once
533	a            *analysis.Analyzer
534	pkg          *packages.Package
535	pass         *analysis.Pass
536	isroot       bool
537	deps         []*action
538	objectFacts  map[objectFactKey]analysis.Fact
539	packageFacts map[packageFactKey]analysis.Fact
540	inputs       map[*analysis.Analyzer]interface{}
541	result       interface{}
542	diagnostics  []analysis.Diagnostic
543	err          error
544	duration     time.Duration
545}
546
547type objectFactKey struct {
548	obj types.Object
549	typ reflect.Type
550}
551
552type packageFactKey struct {
553	pkg *types.Package
554	typ reflect.Type
555}
556
557func (act *action) String() string {
558	return fmt.Sprintf("%s@%s", act.a, act.pkg)
559}
560
561func execAll(actions []*action) {
562	sequential := dbg('p')
563	var wg sync.WaitGroup
564	for _, act := range actions {
565		wg.Add(1)
566		work := func(act *action) {
567			act.exec()
568			wg.Done()
569		}
570		if sequential {
571			work(act)
572		} else {
573			go work(act)
574		}
575	}
576	wg.Wait()
577}
578
579func (act *action) exec() { act.once.Do(act.execOnce) }
580
581func (act *action) execOnce() {
582	// Analyze dependencies.
583	execAll(act.deps)
584
585	// TODO(adonovan): uncomment this during profiling.
586	// It won't build pre-go1.11 but conditional compilation
587	// using build tags isn't warranted.
588	//
589	// ctx, task := trace.NewTask(context.Background(), "exec")
590	// trace.Log(ctx, "pass", act.String())
591	// defer task.End()
592
593	// Record time spent in this node but not its dependencies.
594	// In parallel mode, due to GC/scheduler contention, the
595	// time is 5x higher than in sequential mode, even with a
596	// semaphore limiting the number of threads here.
597	// So use -debug=tp.
598	if dbg('t') {
599		t0 := time.Now()
600		defer func() { act.duration = time.Since(t0) }()
601	}
602
603	// Report an error if any dependency failed.
604	var failed []string
605	for _, dep := range act.deps {
606		if dep.err != nil {
607			failed = append(failed, dep.String())
608		}
609	}
610	if failed != nil {
611		sort.Strings(failed)
612		act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", "))
613		return
614	}
615
616	// Plumb the output values of the dependencies
617	// into the inputs of this action.  Also facts.
618	inputs := make(map[*analysis.Analyzer]interface{})
619	act.objectFacts = make(map[objectFactKey]analysis.Fact)
620	act.packageFacts = make(map[packageFactKey]analysis.Fact)
621	for _, dep := range act.deps {
622		if dep.pkg == act.pkg {
623			// Same package, different analysis (horizontal edge):
624			// in-memory outputs of prerequisite analyzers
625			// become inputs to this analysis pass.
626			inputs[dep.a] = dep.result
627
628		} else if dep.a == act.a { // (always true)
629			// Same analysis, different package (vertical edge):
630			// serialized facts produced by prerequisite analysis
631			// become available to this analysis pass.
632			inheritFacts(act, dep)
633		}
634	}
635
636	// Run the analysis.
637	pass := &analysis.Pass{
638		Analyzer:          act.a,
639		Fset:              act.pkg.Fset,
640		Files:             act.pkg.Syntax,
641		OtherFiles:        act.pkg.OtherFiles,
642		IgnoredFiles:      act.pkg.IgnoredFiles,
643		Pkg:               act.pkg.Types,
644		TypesInfo:         act.pkg.TypesInfo,
645		TypesSizes:        act.pkg.TypesSizes,
646		ResultOf:          inputs,
647		Report:            func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) },
648		ImportObjectFact:  act.importObjectFact,
649		ExportObjectFact:  act.exportObjectFact,
650		ImportPackageFact: act.importPackageFact,
651		ExportPackageFact: act.exportPackageFact,
652		AllObjectFacts:    act.allObjectFacts,
653		AllPackageFacts:   act.allPackageFacts,
654	}
655	act.pass = pass
656
657	var errors []types.Error
658	// Get any type errors that are attributed to the pkg.
659	// This is necessary to test analyzers that provide
660	// suggested fixes for compiler/type errors.
661	for _, err := range act.pkg.Errors {
662		if err.Kind != packages.TypeError {
663			continue
664		}
665		// err.Pos is a string of form: "file:line:col" or "file:line" or "" or "-"
666		spn := span.Parse(err.Pos)
667		// Extract the token positions from the error string.
668		line, col, offset := spn.Start().Line(), spn.Start().Column(), -1
669		act.pkg.Fset.Iterate(func(f *token.File) bool {
670			if f.Name() != spn.URI().Filename() {
671				return true
672			}
673			offset = int(f.LineStart(line)) + col - 1
674			return false
675		})
676		if offset == -1 {
677			continue
678		}
679		errors = append(errors, types.Error{
680			Fset: act.pkg.Fset,
681			Msg:  err.Msg,
682			Pos:  token.Pos(offset),
683		})
684	}
685	analysisinternal.SetTypeErrors(pass, errors)
686
687	var err error
688	if act.pkg.IllTyped && !pass.Analyzer.RunDespiteErrors {
689		err = fmt.Errorf("analysis skipped due to errors in package")
690	} else {
691		act.result, err = pass.Analyzer.Run(pass)
692		if err == nil {
693			if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want {
694				err = fmt.Errorf(
695					"internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v",
696					pass.Pkg.Path(), pass.Analyzer, got, want)
697			}
698		}
699	}
700	act.err = err
701
702	// disallow calls after Run
703	pass.ExportObjectFact = nil
704	pass.ExportPackageFact = nil
705}
706
707// inheritFacts populates act.facts with
708// those it obtains from its dependency, dep.
709func inheritFacts(act, dep *action) {
710	serialize := dbg('s')
711
712	for key, fact := range dep.objectFacts {
713		// Filter out facts related to objects
714		// that are irrelevant downstream
715		// (equivalently: not in the compiler export data).
716		if !exportedFrom(key.obj, dep.pkg.Types) {
717			if false {
718				log.Printf("%v: discarding %T fact from %s for %s: %s", act, fact, dep, key.obj, fact)
719			}
720			continue
721		}
722
723		// Optionally serialize/deserialize fact
724		// to verify that it works across address spaces.
725		if serialize {
726			encodedFact, err := codeFact(fact)
727			if err != nil {
728				log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
729			}
730			fact = encodedFact
731		}
732
733		if false {
734			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.obj, fact)
735		}
736		act.objectFacts[key] = fact
737	}
738
739	for key, fact := range dep.packageFacts {
740		// TODO: filter out facts that belong to
741		// packages not mentioned in the export data
742		// to prevent side channels.
743
744		// Optionally serialize/deserialize fact
745		// to verify that it works across address spaces
746		// and is deterministic.
747		if serialize {
748			encodedFact, err := codeFact(fact)
749			if err != nil {
750				log.Panicf("internal error: encoding of %T fact failed in %v", fact, act)
751			}
752			fact = encodedFact
753		}
754
755		if false {
756			log.Printf("%v: inherited %T fact for %s: %s", act, fact, key.pkg.Path(), fact)
757		}
758		act.packageFacts[key] = fact
759	}
760}
761
762// codeFact encodes then decodes a fact,
763// just to exercise that logic.
764func codeFact(fact analysis.Fact) (analysis.Fact, error) {
765	// We encode facts one at a time.
766	// A real modular driver would emit all facts
767	// into one encoder to improve gob efficiency.
768	var buf bytes.Buffer
769	if err := gob.NewEncoder(&buf).Encode(fact); err != nil {
770		return nil, err
771	}
772
773	// Encode it twice and assert that we get the same bits.
774	// This helps detect nondeterministic Gob encoding (e.g. of maps).
775	var buf2 bytes.Buffer
776	if err := gob.NewEncoder(&buf2).Encode(fact); err != nil {
777		return nil, err
778	}
779	if !bytes.Equal(buf.Bytes(), buf2.Bytes()) {
780		return nil, fmt.Errorf("encoding of %T fact is nondeterministic", fact)
781	}
782
783	new := reflect.New(reflect.TypeOf(fact).Elem()).Interface().(analysis.Fact)
784	if err := gob.NewDecoder(&buf).Decode(new); err != nil {
785		return nil, err
786	}
787	return new, nil
788}
789
790// exportedFrom reports whether obj may be visible to a package that imports pkg.
791// This includes not just the exported members of pkg, but also unexported
792// constants, types, fields, and methods, perhaps belonging to oether packages,
793// that find there way into the API.
794// This is an overapproximation of the more accurate approach used by
795// gc export data, which walks the type graph, but it's much simpler.
796//
797// TODO(adonovan): do more accurate filtering by walking the type graph.
798func exportedFrom(obj types.Object, pkg *types.Package) bool {
799	switch obj := obj.(type) {
800	case *types.Func:
801		return obj.Exported() && obj.Pkg() == pkg ||
802			obj.Type().(*types.Signature).Recv() != nil
803	case *types.Var:
804		if obj.IsField() {
805			return true
806		}
807		// we can't filter more aggressively than this because we need
808		// to consider function parameters exported, but have no way
809		// of telling apart function parameters from local variables.
810		return obj.Pkg() == pkg
811	case *types.TypeName, *types.Const:
812		return true
813	}
814	return false // Nil, Builtin, Label, or PkgName
815}
816
817// importObjectFact implements Pass.ImportObjectFact.
818// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
819// importObjectFact copies the fact value to *ptr.
820func (act *action) importObjectFact(obj types.Object, ptr analysis.Fact) bool {
821	if obj == nil {
822		panic("nil object")
823	}
824	key := objectFactKey{obj, factType(ptr)}
825	if v, ok := act.objectFacts[key]; ok {
826		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
827		return true
828	}
829	return false
830}
831
832// exportObjectFact implements Pass.ExportObjectFact.
833func (act *action) exportObjectFact(obj types.Object, fact analysis.Fact) {
834	if act.pass.ExportObjectFact == nil {
835		log.Panicf("%s: Pass.ExportObjectFact(%s, %T) called after Run", act, obj, fact)
836	}
837
838	if obj.Pkg() != act.pkg.Types {
839		log.Panicf("internal error: in analysis %s of package %s: Fact.Set(%s, %T): can't set facts on objects belonging another package",
840			act.a, act.pkg, obj, fact)
841	}
842
843	key := objectFactKey{obj, factType(fact)}
844	act.objectFacts[key] = fact // clobber any existing entry
845	if dbg('f') {
846		objstr := types.ObjectString(obj, (*types.Package).Name)
847		fmt.Fprintf(os.Stderr, "%s: object %s has fact %s\n",
848			act.pkg.Fset.Position(obj.Pos()), objstr, fact)
849	}
850}
851
852// allObjectFacts implements Pass.AllObjectFacts.
853func (act *action) allObjectFacts() []analysis.ObjectFact {
854	facts := make([]analysis.ObjectFact, 0, len(act.objectFacts))
855	for k := range act.objectFacts {
856		facts = append(facts, analysis.ObjectFact{k.obj, act.objectFacts[k]})
857	}
858	return facts
859}
860
861// importPackageFact implements Pass.ImportPackageFact.
862// Given a non-nil pointer ptr of type *T, where *T satisfies Fact,
863// fact copies the fact value to *ptr.
864func (act *action) importPackageFact(pkg *types.Package, ptr analysis.Fact) bool {
865	if pkg == nil {
866		panic("nil package")
867	}
868	key := packageFactKey{pkg, factType(ptr)}
869	if v, ok := act.packageFacts[key]; ok {
870		reflect.ValueOf(ptr).Elem().Set(reflect.ValueOf(v).Elem())
871		return true
872	}
873	return false
874}
875
876// exportPackageFact implements Pass.ExportPackageFact.
877func (act *action) exportPackageFact(fact analysis.Fact) {
878	if act.pass.ExportPackageFact == nil {
879		log.Panicf("%s: Pass.ExportPackageFact(%T) called after Run", act, fact)
880	}
881
882	key := packageFactKey{act.pass.Pkg, factType(fact)}
883	act.packageFacts[key] = fact // clobber any existing entry
884	if dbg('f') {
885		fmt.Fprintf(os.Stderr, "%s: package %s has fact %s\n",
886			act.pkg.Fset.Position(act.pass.Files[0].Pos()), act.pass.Pkg.Path(), fact)
887	}
888}
889
890func factType(fact analysis.Fact) reflect.Type {
891	t := reflect.TypeOf(fact)
892	if t.Kind() != reflect.Ptr {
893		log.Fatalf("invalid Fact type: got %T, want pointer", t)
894	}
895	return t
896}
897
898// allObjectFacts implements Pass.AllObjectFacts.
899func (act *action) allPackageFacts() []analysis.PackageFact {
900	facts := make([]analysis.PackageFact, 0, len(act.packageFacts))
901	for k := range act.packageFacts {
902		facts = append(facts, analysis.PackageFact{k.pkg, act.packageFacts[k]})
903	}
904	return facts
905}
906
907func dbg(b byte) bool { return strings.IndexByte(Debug, b) >= 0 }
908