1// Copyright 2009 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// Package parser implements a parser for Go source files. Input may be
6// provided in a variety of forms (see the various Parse* functions); the
7// output is an abstract syntax tree (AST) representing the Go source. The
8// parser is invoked through one of the Parse* functions.
9//
10// The parser accepts a larger language than is syntactically permitted by
11// the Go spec, for simplicity, and for improved robustness in the presence
12// of syntax errors. For instance, in method declarations, the receiver is
13// treated like an ordinary parameter list and thus may contain multiple
14// entries where the spec permits exactly one. Consequently, the corresponding
15// field in the AST (ast.FuncDecl.Recv) field is not restricted to one entry.
16//
17package parser
18
19import (
20	"fmt"
21	"go/ast"
22	"go/internal/typeparams"
23	"go/scanner"
24	"go/token"
25	"strconv"
26	"strings"
27	"unicode"
28)
29
30// The parser structure holds the parser's internal state.
31type parser struct {
32	file    *token.File
33	errors  scanner.ErrorList
34	scanner scanner.Scanner
35
36	// Tracing/debugging
37	mode   Mode // parsing mode
38	trace  bool // == (mode&Trace != 0)
39	indent int  // indentation used for tracing output
40
41	// Comments
42	comments    []*ast.CommentGroup
43	leadComment *ast.CommentGroup // last lead comment
44	lineComment *ast.CommentGroup // last line comment
45
46	// Next token
47	pos token.Pos   // token position
48	tok token.Token // one token look-ahead
49	lit string      // token literal
50
51	// Error recovery
52	// (used to limit the number of calls to parser.advance
53	// w/o making scanning progress - avoids potential endless
54	// loops across multiple parser functions during error recovery)
55	syncPos token.Pos // last synchronization position
56	syncCnt int       // number of parser.advance calls without progress
57
58	// Non-syntactic parser control
59	exprLev int  // < 0: in control clause, >= 0: in expression
60	inRhs   bool // if set, the parser is parsing a rhs expression
61
62	imports []*ast.ImportSpec // list of imports
63}
64
65func (p *parser) init(fset *token.FileSet, filename string, src []byte, mode Mode) {
66	p.file = fset.AddFile(filename, -1, len(src))
67	var m scanner.Mode
68	if mode&ParseComments != 0 {
69		m = scanner.ScanComments
70	}
71	eh := func(pos token.Position, msg string) { p.errors.Add(pos, msg) }
72	p.scanner.Init(p.file, src, eh, m)
73
74	p.mode = mode
75	p.trace = mode&Trace != 0 // for convenience (p.trace is used frequently)
76	p.next()
77}
78
79func (p *parser) parseTypeParams() bool {
80	return typeparams.Enabled && p.mode&typeparams.DisallowParsing == 0
81}
82
83// ----------------------------------------------------------------------------
84// Parsing support
85
86func (p *parser) printTrace(a ...interface{}) {
87	const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
88	const n = len(dots)
89	pos := p.file.Position(p.pos)
90	fmt.Printf("%5d:%3d: ", pos.Line, pos.Column)
91	i := 2 * p.indent
92	for i > n {
93		fmt.Print(dots)
94		i -= n
95	}
96	// i <= n
97	fmt.Print(dots[0:i])
98	fmt.Println(a...)
99}
100
101func trace(p *parser, msg string) *parser {
102	p.printTrace(msg, "(")
103	p.indent++
104	return p
105}
106
107// Usage pattern: defer un(trace(p, "..."))
108func un(p *parser) {
109	p.indent--
110	p.printTrace(")")
111}
112
113// Advance to the next token.
114func (p *parser) next0() {
115	// Because of one-token look-ahead, print the previous token
116	// when tracing as it provides a more readable output. The
117	// very first token (!p.pos.IsValid()) is not initialized
118	// (it is token.ILLEGAL), so don't print it.
119	if p.trace && p.pos.IsValid() {
120		s := p.tok.String()
121		switch {
122		case p.tok.IsLiteral():
123			p.printTrace(s, p.lit)
124		case p.tok.IsOperator(), p.tok.IsKeyword():
125			p.printTrace("\"" + s + "\"")
126		default:
127			p.printTrace(s)
128		}
129	}
130
131	p.pos, p.tok, p.lit = p.scanner.Scan()
132}
133
134// Consume a comment and return it and the line on which it ends.
135func (p *parser) consumeComment() (comment *ast.Comment, endline int) {
136	// /*-style comments may end on a different line than where they start.
137	// Scan the comment for '\n' chars and adjust endline accordingly.
138	endline = p.file.Line(p.pos)
139	if p.lit[1] == '*' {
140		// don't use range here - no need to decode Unicode code points
141		for i := 0; i < len(p.lit); i++ {
142			if p.lit[i] == '\n' {
143				endline++
144			}
145		}
146	}
147
148	comment = &ast.Comment{Slash: p.pos, Text: p.lit}
149	p.next0()
150
151	return
152}
153
154// Consume a group of adjacent comments, add it to the parser's
155// comments list, and return it together with the line at which
156// the last comment in the group ends. A non-comment token or n
157// empty lines terminate a comment group.
158//
159func (p *parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
160	var list []*ast.Comment
161	endline = p.file.Line(p.pos)
162	for p.tok == token.COMMENT && p.file.Line(p.pos) <= endline+n {
163		var comment *ast.Comment
164		comment, endline = p.consumeComment()
165		list = append(list, comment)
166	}
167
168	// add comment group to the comments list
169	comments = &ast.CommentGroup{List: list}
170	p.comments = append(p.comments, comments)
171
172	return
173}
174
175// Advance to the next non-comment token. In the process, collect
176// any comment groups encountered, and remember the last lead and
177// line comments.
178//
179// A lead comment is a comment group that starts and ends in a
180// line without any other tokens and that is followed by a non-comment
181// token on the line immediately after the comment group.
182//
183// A line comment is a comment group that follows a non-comment
184// token on the same line, and that has no tokens after it on the line
185// where it ends.
186//
187// Lead and line comments may be considered documentation that is
188// stored in the AST.
189//
190func (p *parser) next() {
191	p.leadComment = nil
192	p.lineComment = nil
193	prev := p.pos
194	p.next0()
195
196	if p.tok == token.COMMENT {
197		var comment *ast.CommentGroup
198		var endline int
199
200		if p.file.Line(p.pos) == p.file.Line(prev) {
201			// The comment is on same line as the previous token; it
202			// cannot be a lead comment but may be a line comment.
203			comment, endline = p.consumeCommentGroup(0)
204			if p.file.Line(p.pos) != endline || p.tok == token.EOF {
205				// The next token is on a different line, thus
206				// the last comment group is a line comment.
207				p.lineComment = comment
208			}
209		}
210
211		// consume successor comments, if any
212		endline = -1
213		for p.tok == token.COMMENT {
214			comment, endline = p.consumeCommentGroup(1)
215		}
216
217		if endline+1 == p.file.Line(p.pos) {
218			// The next token is following on the line immediately after the
219			// comment group, thus the last comment group is a lead comment.
220			p.leadComment = comment
221		}
222	}
223}
224
225// A bailout panic is raised to indicate early termination.
226type bailout struct{}
227
228func (p *parser) error(pos token.Pos, msg string) {
229	if p.trace {
230		defer un(trace(p, "error: "+msg))
231	}
232
233	epos := p.file.Position(pos)
234
235	// If AllErrors is not set, discard errors reported on the same line
236	// as the last recorded error and stop parsing if there are more than
237	// 10 errors.
238	if p.mode&AllErrors == 0 {
239		n := len(p.errors)
240		if n > 0 && p.errors[n-1].Pos.Line == epos.Line {
241			return // discard - likely a spurious error
242		}
243		if n > 10 {
244			panic(bailout{})
245		}
246	}
247
248	p.errors.Add(epos, msg)
249}
250
251func (p *parser) errorExpected(pos token.Pos, msg string) {
252	msg = "expected " + msg
253	if pos == p.pos {
254		// the error happened at the current position;
255		// make the error message more specific
256		switch {
257		case p.tok == token.SEMICOLON && p.lit == "\n":
258			msg += ", found newline"
259		case p.tok.IsLiteral():
260			// print 123 rather than 'INT', etc.
261			msg += ", found " + p.lit
262		default:
263			msg += ", found '" + p.tok.String() + "'"
264		}
265	}
266	p.error(pos, msg)
267}
268
269func (p *parser) expect(tok token.Token) token.Pos {
270	pos := p.pos
271	if p.tok != tok {
272		p.errorExpected(pos, "'"+tok.String()+"'")
273	}
274	p.next() // make progress
275	return pos
276}
277
278// expect2 is like expect, but it returns an invalid position
279// if the expected token is not found.
280func (p *parser) expect2(tok token.Token) (pos token.Pos) {
281	if p.tok == tok {
282		pos = p.pos
283	} else {
284		p.errorExpected(p.pos, "'"+tok.String()+"'")
285	}
286	p.next() // make progress
287	return
288}
289
290// expectClosing is like expect but provides a better error message
291// for the common case of a missing comma before a newline.
292//
293func (p *parser) expectClosing(tok token.Token, context string) token.Pos {
294	if p.tok != tok && p.tok == token.SEMICOLON && p.lit == "\n" {
295		p.error(p.pos, "missing ',' before newline in "+context)
296		p.next()
297	}
298	return p.expect(tok)
299}
300
301func (p *parser) expectSemi() {
302	// semicolon is optional before a closing ')' or '}'
303	if p.tok != token.RPAREN && p.tok != token.RBRACE {
304		switch p.tok {
305		case token.COMMA:
306			// permit a ',' instead of a ';' but complain
307			p.errorExpected(p.pos, "';'")
308			fallthrough
309		case token.SEMICOLON:
310			p.next()
311		default:
312			p.errorExpected(p.pos, "';'")
313			p.advance(stmtStart)
314		}
315	}
316}
317
318func (p *parser) atComma(context string, follow token.Token) bool {
319	if p.tok == token.COMMA {
320		return true
321	}
322	if p.tok != follow {
323		msg := "missing ','"
324		if p.tok == token.SEMICOLON && p.lit == "\n" {
325			msg += " before newline"
326		}
327		p.error(p.pos, msg+" in "+context)
328		return true // "insert" comma and continue
329	}
330	return false
331}
332
333func assert(cond bool, msg string) {
334	if !cond {
335		panic("go/parser internal error: " + msg)
336	}
337}
338
339// advance consumes tokens until the current token p.tok
340// is in the 'to' set, or token.EOF. For error recovery.
341func (p *parser) advance(to map[token.Token]bool) {
342	for ; p.tok != token.EOF; p.next() {
343		if to[p.tok] {
344			// Return only if parser made some progress since last
345			// sync or if it has not reached 10 advance calls without
346			// progress. Otherwise consume at least one token to
347			// avoid an endless parser loop (it is possible that
348			// both parseOperand and parseStmt call advance and
349			// correctly do not advance, thus the need for the
350			// invocation limit p.syncCnt).
351			if p.pos == p.syncPos && p.syncCnt < 10 {
352				p.syncCnt++
353				return
354			}
355			if p.pos > p.syncPos {
356				p.syncPos = p.pos
357				p.syncCnt = 0
358				return
359			}
360			// Reaching here indicates a parser bug, likely an
361			// incorrect token list in this function, but it only
362			// leads to skipping of possibly correct code if a
363			// previous error is present, and thus is preferred
364			// over a non-terminating parse.
365		}
366	}
367}
368
369var stmtStart = map[token.Token]bool{
370	token.BREAK:       true,
371	token.CONST:       true,
372	token.CONTINUE:    true,
373	token.DEFER:       true,
374	token.FALLTHROUGH: true,
375	token.FOR:         true,
376	token.GO:          true,
377	token.GOTO:        true,
378	token.IF:          true,
379	token.RETURN:      true,
380	token.SELECT:      true,
381	token.SWITCH:      true,
382	token.TYPE:        true,
383	token.VAR:         true,
384}
385
386var declStart = map[token.Token]bool{
387	token.CONST: true,
388	token.TYPE:  true,
389	token.VAR:   true,
390}
391
392var exprEnd = map[token.Token]bool{
393	token.COMMA:     true,
394	token.COLON:     true,
395	token.SEMICOLON: true,
396	token.RPAREN:    true,
397	token.RBRACK:    true,
398	token.RBRACE:    true,
399}
400
401// safePos returns a valid file position for a given position: If pos
402// is valid to begin with, safePos returns pos. If pos is out-of-range,
403// safePos returns the EOF position.
404//
405// This is hack to work around "artificial" end positions in the AST which
406// are computed by adding 1 to (presumably valid) token positions. If the
407// token positions are invalid due to parse errors, the resulting end position
408// may be past the file's EOF position, which would lead to panics if used
409// later on.
410//
411func (p *parser) safePos(pos token.Pos) (res token.Pos) {
412	defer func() {
413		if recover() != nil {
414			res = token.Pos(p.file.Base() + p.file.Size()) // EOF position
415		}
416	}()
417	_ = p.file.Offset(pos) // trigger a panic if position is out-of-range
418	return pos
419}
420
421// ----------------------------------------------------------------------------
422// Identifiers
423
424func (p *parser) parseIdent() *ast.Ident {
425	pos := p.pos
426	name := "_"
427	if p.tok == token.IDENT {
428		name = p.lit
429		p.next()
430	} else {
431		p.expect(token.IDENT) // use expect() error handling
432	}
433	return &ast.Ident{NamePos: pos, Name: name}
434}
435
436func (p *parser) parseIdentList() (list []*ast.Ident) {
437	if p.trace {
438		defer un(trace(p, "IdentList"))
439	}
440
441	list = append(list, p.parseIdent())
442	for p.tok == token.COMMA {
443		p.next()
444		list = append(list, p.parseIdent())
445	}
446
447	return
448}
449
450// ----------------------------------------------------------------------------
451// Common productions
452
453// If lhs is set, result list elements which are identifiers are not resolved.
454func (p *parser) parseExprList() (list []ast.Expr) {
455	if p.trace {
456		defer un(trace(p, "ExpressionList"))
457	}
458
459	list = append(list, p.checkExpr(p.parseExpr()))
460	for p.tok == token.COMMA {
461		p.next()
462		list = append(list, p.checkExpr(p.parseExpr()))
463	}
464
465	return
466}
467
468func (p *parser) parseList(inRhs bool) []ast.Expr {
469	old := p.inRhs
470	p.inRhs = inRhs
471	list := p.parseExprList()
472	p.inRhs = old
473	return list
474}
475
476// ----------------------------------------------------------------------------
477// Types
478
479func (p *parser) parseType() ast.Expr {
480	if p.trace {
481		defer un(trace(p, "Type"))
482	}
483
484	typ := p.tryIdentOrType()
485
486	if typ == nil {
487		pos := p.pos
488		p.errorExpected(pos, "type")
489		p.advance(exprEnd)
490		return &ast.BadExpr{From: pos, To: p.pos}
491	}
492
493	return typ
494}
495
496func (p *parser) parseQualifiedIdent(ident *ast.Ident) ast.Expr {
497	if p.trace {
498		defer un(trace(p, "QualifiedIdent"))
499	}
500
501	typ := p.parseTypeName(ident)
502	if p.tok == token.LBRACK && p.parseTypeParams() {
503		typ = p.parseTypeInstance(typ)
504	}
505
506	return typ
507}
508
509// If the result is an identifier, it is not resolved.
510func (p *parser) parseTypeName(ident *ast.Ident) ast.Expr {
511	if p.trace {
512		defer un(trace(p, "TypeName"))
513	}
514
515	if ident == nil {
516		ident = p.parseIdent()
517	}
518
519	if p.tok == token.PERIOD {
520		// ident is a package name
521		p.next()
522		sel := p.parseIdent()
523		return &ast.SelectorExpr{X: ident, Sel: sel}
524	}
525
526	return ident
527}
528
529func (p *parser) parseArrayLen() ast.Expr {
530	if p.trace {
531		defer un(trace(p, "ArrayLen"))
532	}
533
534	p.exprLev++
535	var len ast.Expr
536	// always permit ellipsis for more fault-tolerant parsing
537	if p.tok == token.ELLIPSIS {
538		len = &ast.Ellipsis{Ellipsis: p.pos}
539		p.next()
540	} else if p.tok != token.RBRACK {
541		len = p.parseRhs()
542	}
543	p.exprLev--
544
545	return len
546}
547
548func (p *parser) parseArrayFieldOrTypeInstance(x *ast.Ident) (*ast.Ident, ast.Expr) {
549	if p.trace {
550		defer un(trace(p, "ArrayFieldOrTypeInstance"))
551	}
552
553	// TODO(gri) Should we allow a trailing comma in a type argument
554	//           list such as T[P,]? (We do in parseTypeInstance).
555	lbrack := p.expect(token.LBRACK)
556	var args []ast.Expr
557	var firstComma token.Pos
558	// TODO(rfindley): consider changing parseRhsOrType so that this function variable
559	// is not needed.
560	argparser := p.parseRhsOrType
561	if !p.parseTypeParams() {
562		argparser = p.parseRhs
563	}
564	if p.tok != token.RBRACK {
565		p.exprLev++
566		args = append(args, argparser())
567		for p.tok == token.COMMA {
568			if !firstComma.IsValid() {
569				firstComma = p.pos
570			}
571			p.next()
572			args = append(args, argparser())
573		}
574		p.exprLev--
575	}
576	rbrack := p.expect(token.RBRACK)
577
578	if len(args) == 0 {
579		// x []E
580		elt := p.parseType()
581		return x, &ast.ArrayType{Lbrack: lbrack, Elt: elt}
582	}
583
584	// x [P]E or x[P]
585	if len(args) == 1 {
586		elt := p.tryIdentOrType()
587		if elt != nil {
588			// x [P]E
589			return x, &ast.ArrayType{Lbrack: lbrack, Len: args[0], Elt: elt}
590		}
591		if !p.parseTypeParams() {
592			p.error(rbrack, "missing element type in array type expression")
593			return nil, &ast.BadExpr{From: args[0].Pos(), To: args[0].End()}
594		}
595	}
596
597	if !p.parseTypeParams() {
598		p.error(firstComma, "expected ']', found ','")
599		return x, &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
600	}
601
602	// x[P], x[P1, P2], ...
603	return nil, &ast.IndexExpr{X: x, Lbrack: lbrack, Index: typeparams.PackExpr(args), Rbrack: rbrack}
604}
605
606func (p *parser) parseFieldDecl() *ast.Field {
607	if p.trace {
608		defer un(trace(p, "FieldDecl"))
609	}
610
611	doc := p.leadComment
612
613	var names []*ast.Ident
614	var typ ast.Expr
615	if p.tok == token.IDENT {
616		name := p.parseIdent()
617		if p.tok == token.PERIOD || p.tok == token.STRING || p.tok == token.SEMICOLON || p.tok == token.RBRACE {
618			// embedded type
619			typ = name
620			if p.tok == token.PERIOD {
621				typ = p.parseQualifiedIdent(name)
622			}
623		} else {
624			// name1, name2, ... T
625			names = []*ast.Ident{name}
626			for p.tok == token.COMMA {
627				p.next()
628				names = append(names, p.parseIdent())
629			}
630			// Careful dance: We don't know if we have an embedded instantiated
631			// type T[P1, P2, ...] or a field T of array type []E or [P]E.
632			if len(names) == 1 && p.tok == token.LBRACK {
633				name, typ = p.parseArrayFieldOrTypeInstance(name)
634				if name == nil {
635					names = nil
636				}
637			} else {
638				// T P
639				typ = p.parseType()
640			}
641		}
642	} else {
643		// embedded, possibly generic type
644		// (using the enclosing parentheses to distinguish it from a named field declaration)
645		// TODO(rFindley) confirm that this doesn't allow parenthesized embedded type
646		typ = p.parseType()
647	}
648
649	var tag *ast.BasicLit
650	if p.tok == token.STRING {
651		tag = &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
652		p.next()
653	}
654
655	p.expectSemi() // call before accessing p.linecomment
656
657	field := &ast.Field{Doc: doc, Names: names, Type: typ, Tag: tag, Comment: p.lineComment}
658	return field
659}
660
661func (p *parser) parseStructType() *ast.StructType {
662	if p.trace {
663		defer un(trace(p, "StructType"))
664	}
665
666	pos := p.expect(token.STRUCT)
667	lbrace := p.expect(token.LBRACE)
668	var list []*ast.Field
669	for p.tok == token.IDENT || p.tok == token.MUL || p.tok == token.LPAREN {
670		// a field declaration cannot start with a '(' but we accept
671		// it here for more robust parsing and better error messages
672		// (parseFieldDecl will check and complain if necessary)
673		list = append(list, p.parseFieldDecl())
674	}
675	rbrace := p.expect(token.RBRACE)
676
677	return &ast.StructType{
678		Struct: pos,
679		Fields: &ast.FieldList{
680			Opening: lbrace,
681			List:    list,
682			Closing: rbrace,
683		},
684	}
685}
686
687func (p *parser) parsePointerType() *ast.StarExpr {
688	if p.trace {
689		defer un(trace(p, "PointerType"))
690	}
691
692	star := p.expect(token.MUL)
693	base := p.parseType()
694
695	return &ast.StarExpr{Star: star, X: base}
696}
697
698func (p *parser) parseDotsType() *ast.Ellipsis {
699	if p.trace {
700		defer un(trace(p, "DotsType"))
701	}
702
703	pos := p.expect(token.ELLIPSIS)
704	elt := p.parseType()
705
706	return &ast.Ellipsis{Ellipsis: pos, Elt: elt}
707}
708
709type field struct {
710	name *ast.Ident
711	typ  ast.Expr
712}
713
714func (p *parser) parseParamDecl(name *ast.Ident) (f field) {
715	// TODO(rFindley) compare with parser.paramDeclOrNil in the syntax package
716	if p.trace {
717		defer un(trace(p, "ParamDeclOrNil"))
718	}
719
720	ptok := p.tok
721	if name != nil {
722		p.tok = token.IDENT // force token.IDENT case in switch below
723	}
724
725	switch p.tok {
726	case token.IDENT:
727		if name != nil {
728			f.name = name
729			p.tok = ptok
730		} else {
731			f.name = p.parseIdent()
732		}
733		switch p.tok {
734		case token.IDENT, token.MUL, token.ARROW, token.FUNC, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
735			// name type
736			f.typ = p.parseType()
737
738		case token.LBRACK:
739			// name[type1, type2, ...] or name []type or name [len]type
740			f.name, f.typ = p.parseArrayFieldOrTypeInstance(f.name)
741
742		case token.ELLIPSIS:
743			// name ...type
744			f.typ = p.parseDotsType()
745
746		case token.PERIOD:
747			// qualified.typename
748			f.typ = p.parseQualifiedIdent(f.name)
749			f.name = nil
750		}
751
752	case token.MUL, token.ARROW, token.FUNC, token.LBRACK, token.CHAN, token.MAP, token.STRUCT, token.INTERFACE, token.LPAREN:
753		// type
754		f.typ = p.parseType()
755
756	case token.ELLIPSIS:
757		// ...type
758		// (always accepted)
759		f.typ = p.parseDotsType()
760
761	default:
762		p.errorExpected(p.pos, ")")
763		p.advance(exprEnd)
764	}
765
766	return
767}
768
769func (p *parser) parseParameterList(name0 *ast.Ident, closing token.Token, parseParamDecl func(*ast.Ident) field, tparams bool) (params []*ast.Field) {
770	if p.trace {
771		defer un(trace(p, "ParameterList"))
772	}
773
774	pos := p.pos
775	if name0 != nil {
776		pos = name0.Pos()
777	}
778
779	var list []field
780	var named int // number of parameters that have an explicit name and type
781
782	for name0 != nil || p.tok != closing && p.tok != token.EOF {
783		par := parseParamDecl(name0)
784		name0 = nil // 1st name was consumed if present
785		if par.name != nil || par.typ != nil {
786			list = append(list, par)
787			if par.name != nil && par.typ != nil {
788				named++
789			}
790		}
791		if !p.atComma("parameter list", closing) {
792			break
793		}
794		p.next()
795	}
796
797	if len(list) == 0 {
798		return // not uncommon
799	}
800
801	// TODO(gri) parameter distribution and conversion to []*ast.Field
802	//           can be combined and made more efficient
803
804	// distribute parameter types
805	if named == 0 {
806		// all unnamed => found names are type names
807		for i := 0; i < len(list); i++ {
808			par := &list[i]
809			if typ := par.name; typ != nil {
810				par.typ = typ
811				par.name = nil
812			}
813		}
814		if tparams {
815			p.error(pos, "all type parameters must be named")
816		}
817	} else if named != len(list) {
818		// some named => all must be named
819		ok := true
820		var typ ast.Expr
821		for i := len(list) - 1; i >= 0; i-- {
822			if par := &list[i]; par.typ != nil {
823				typ = par.typ
824				if par.name == nil {
825					ok = false
826					n := ast.NewIdent("_")
827					n.NamePos = typ.Pos() // correct position
828					par.name = n
829				}
830			} else if typ != nil {
831				par.typ = typ
832			} else {
833				// par.typ == nil && typ == nil => we only have a par.name
834				ok = false
835				par.typ = &ast.BadExpr{From: par.name.Pos(), To: p.pos}
836			}
837		}
838		if !ok {
839			if tparams {
840				p.error(pos, "all type parameters must be named")
841			} else {
842				p.error(pos, "mixed named and unnamed parameters")
843			}
844		}
845	}
846
847	// convert list []*ast.Field
848	if named == 0 {
849		// parameter list consists of types only
850		for _, par := range list {
851			assert(par.typ != nil, "nil type in unnamed parameter list")
852			params = append(params, &ast.Field{Type: par.typ})
853		}
854		return
855	}
856
857	// parameter list consists of named parameters with types
858	var names []*ast.Ident
859	var typ ast.Expr
860	addParams := func() {
861		assert(typ != nil, "nil type in named parameter list")
862		field := &ast.Field{Names: names, Type: typ}
863		params = append(params, field)
864		names = nil
865	}
866	for _, par := range list {
867		if par.typ != typ {
868			if len(names) > 0 {
869				addParams()
870			}
871			typ = par.typ
872		}
873		names = append(names, par.name)
874	}
875	if len(names) > 0 {
876		addParams()
877	}
878	return
879}
880
881func (p *parser) parseParameters(acceptTParams bool) (tparams, params *ast.FieldList) {
882	if p.trace {
883		defer un(trace(p, "Parameters"))
884	}
885
886	if p.parseTypeParams() && acceptTParams && p.tok == token.LBRACK {
887		opening := p.pos
888		p.next()
889		// [T any](params) syntax
890		list := p.parseParameterList(nil, token.RBRACK, p.parseParamDecl, true)
891		rbrack := p.expect(token.RBRACK)
892		tparams = &ast.FieldList{Opening: opening, List: list, Closing: rbrack}
893		// Type parameter lists must not be empty.
894		if tparams.NumFields() == 0 {
895			p.error(tparams.Closing, "empty type parameter list")
896			tparams = nil // avoid follow-on errors
897		}
898	}
899
900	opening := p.expect(token.LPAREN)
901
902	var fields []*ast.Field
903	if p.tok != token.RPAREN {
904		fields = p.parseParameterList(nil, token.RPAREN, p.parseParamDecl, false)
905	}
906
907	rparen := p.expect(token.RPAREN)
908	params = &ast.FieldList{Opening: opening, List: fields, Closing: rparen}
909
910	return
911}
912
913func (p *parser) parseResult() *ast.FieldList {
914	if p.trace {
915		defer un(trace(p, "Result"))
916	}
917
918	if p.tok == token.LPAREN {
919		_, results := p.parseParameters(false)
920		return results
921	}
922
923	typ := p.tryIdentOrType()
924	if typ != nil {
925		list := make([]*ast.Field, 1)
926		list[0] = &ast.Field{Type: typ}
927		return &ast.FieldList{List: list}
928	}
929
930	return nil
931}
932
933func (p *parser) parseFuncType() *ast.FuncType {
934	if p.trace {
935		defer un(trace(p, "FuncType"))
936	}
937
938	pos := p.expect(token.FUNC)
939	tparams, params := p.parseParameters(true)
940	if tparams != nil {
941		p.error(tparams.Pos(), "function type cannot have type parameters")
942	}
943	results := p.parseResult()
944
945	return &ast.FuncType{Func: pos, Params: params, Results: results}
946}
947
948func (p *parser) parseMethodSpec() *ast.Field {
949	if p.trace {
950		defer un(trace(p, "MethodSpec"))
951	}
952
953	doc := p.leadComment
954	var idents []*ast.Ident
955	var typ ast.Expr
956	x := p.parseTypeName(nil)
957	if ident, _ := x.(*ast.Ident); ident != nil {
958		switch {
959		case p.tok == token.LBRACK && p.parseTypeParams():
960			// generic method or embedded instantiated type
961			lbrack := p.pos
962			p.next()
963			p.exprLev++
964			x := p.parseExpr()
965			p.exprLev--
966			if name0, _ := x.(*ast.Ident); name0 != nil && p.tok != token.COMMA && p.tok != token.RBRACK {
967				// generic method m[T any]
968				list := p.parseParameterList(name0, token.RBRACK, p.parseParamDecl, true)
969				rbrack := p.expect(token.RBRACK)
970				tparams := &ast.FieldList{Opening: lbrack, List: list, Closing: rbrack}
971				// TODO(rfindley) refactor to share code with parseFuncType.
972				_, params := p.parseParameters(false)
973				results := p.parseResult()
974				idents = []*ast.Ident{ident}
975				typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
976				typeparams.Set(typ, tparams)
977			} else {
978				// embedded instantiated type
979				// TODO(rfindley) should resolve all identifiers in x.
980				list := []ast.Expr{x}
981				if p.atComma("type argument list", token.RBRACK) {
982					p.exprLev++
983					for p.tok != token.RBRACK && p.tok != token.EOF {
984						list = append(list, p.parseType())
985						if !p.atComma("type argument list", token.RBRACK) {
986							break
987						}
988						p.next()
989					}
990					p.exprLev--
991				}
992				rbrack := p.expectClosing(token.RBRACK, "type argument list")
993				typ = &ast.IndexExpr{X: ident, Lbrack: lbrack, Index: typeparams.PackExpr(list), Rbrack: rbrack}
994			}
995		case p.tok == token.LPAREN:
996			// ordinary method
997			// TODO(rfindley) refactor to share code with parseFuncType.
998			_, params := p.parseParameters(false)
999			results := p.parseResult()
1000			idents = []*ast.Ident{ident}
1001			typ = &ast.FuncType{Func: token.NoPos, Params: params, Results: results}
1002		default:
1003			// embedded type
1004			typ = x
1005		}
1006	} else {
1007		// embedded, possibly instantiated type
1008		typ = x
1009		if p.tok == token.LBRACK && p.parseTypeParams() {
1010			// embedded instantiated interface
1011			typ = p.parseTypeInstance(typ)
1012		}
1013	}
1014	p.expectSemi() // call before accessing p.linecomment
1015
1016	spec := &ast.Field{Doc: doc, Names: idents, Type: typ, Comment: p.lineComment}
1017
1018	return spec
1019}
1020
1021func (p *parser) parseInterfaceType() *ast.InterfaceType {
1022	if p.trace {
1023		defer un(trace(p, "InterfaceType"))
1024	}
1025
1026	pos := p.expect(token.INTERFACE)
1027	lbrace := p.expect(token.LBRACE)
1028	var list []*ast.Field
1029	for p.tok == token.IDENT || p.parseTypeParams() && p.tok == token.TYPE {
1030		if p.tok == token.IDENT {
1031			list = append(list, p.parseMethodSpec())
1032		} else {
1033			// all types in a type list share the same field name "type"
1034			// (since type is a keyword, a Go program cannot have that field name)
1035			name := []*ast.Ident{{NamePos: p.pos, Name: "type"}}
1036			p.next()
1037			// add each type as a field named "type"
1038			for _, typ := range p.parseTypeList() {
1039				list = append(list, &ast.Field{Names: name, Type: typ})
1040			}
1041			p.expectSemi()
1042		}
1043	}
1044	// TODO(rfindley): the error produced here could be improved, since we could
1045	// accept a identifier, 'type', or a '}' at this point.
1046	rbrace := p.expect(token.RBRACE)
1047
1048	return &ast.InterfaceType{
1049		Interface: pos,
1050		Methods: &ast.FieldList{
1051			Opening: lbrace,
1052			List:    list,
1053			Closing: rbrace,
1054		},
1055	}
1056}
1057
1058func (p *parser) parseMapType() *ast.MapType {
1059	if p.trace {
1060		defer un(trace(p, "MapType"))
1061	}
1062
1063	pos := p.expect(token.MAP)
1064	p.expect(token.LBRACK)
1065	key := p.parseType()
1066	p.expect(token.RBRACK)
1067	value := p.parseType()
1068
1069	return &ast.MapType{Map: pos, Key: key, Value: value}
1070}
1071
1072func (p *parser) parseChanType() *ast.ChanType {
1073	if p.trace {
1074		defer un(trace(p, "ChanType"))
1075	}
1076
1077	pos := p.pos
1078	dir := ast.SEND | ast.RECV
1079	var arrow token.Pos
1080	if p.tok == token.CHAN {
1081		p.next()
1082		if p.tok == token.ARROW {
1083			arrow = p.pos
1084			p.next()
1085			dir = ast.SEND
1086		}
1087	} else {
1088		arrow = p.expect(token.ARROW)
1089		p.expect(token.CHAN)
1090		dir = ast.RECV
1091	}
1092	value := p.parseType()
1093
1094	return &ast.ChanType{Begin: pos, Arrow: arrow, Dir: dir, Value: value}
1095}
1096
1097func (p *parser) parseTypeInstance(typ ast.Expr) ast.Expr {
1098	assert(p.parseTypeParams(), "parseTypeInstance while not parsing type params")
1099	if p.trace {
1100		defer un(trace(p, "TypeInstance"))
1101	}
1102
1103	opening := p.expect(token.LBRACK)
1104
1105	p.exprLev++
1106	var list []ast.Expr
1107	for p.tok != token.RBRACK && p.tok != token.EOF {
1108		list = append(list, p.parseType())
1109		if !p.atComma("type argument list", token.RBRACK) {
1110			break
1111		}
1112		p.next()
1113	}
1114	p.exprLev--
1115
1116	closing := p.expectClosing(token.RBRACK, "type argument list")
1117
1118	return &ast.IndexExpr{X: typ, Lbrack: opening, Index: typeparams.PackExpr(list), Rbrack: closing}
1119}
1120
1121func (p *parser) tryIdentOrType() ast.Expr {
1122	switch p.tok {
1123	case token.IDENT:
1124		typ := p.parseTypeName(nil)
1125		if p.tok == token.LBRACK && p.parseTypeParams() {
1126			typ = p.parseTypeInstance(typ)
1127		}
1128		return typ
1129	case token.LBRACK:
1130		lbrack := p.expect(token.LBRACK)
1131		alen := p.parseArrayLen()
1132		p.expect(token.RBRACK)
1133		elt := p.parseType()
1134		return &ast.ArrayType{Lbrack: lbrack, Len: alen, Elt: elt}
1135	case token.STRUCT:
1136		return p.parseStructType()
1137	case token.MUL:
1138		return p.parsePointerType()
1139	case token.FUNC:
1140		typ := p.parseFuncType()
1141		return typ
1142	case token.INTERFACE:
1143		return p.parseInterfaceType()
1144	case token.MAP:
1145		return p.parseMapType()
1146	case token.CHAN, token.ARROW:
1147		return p.parseChanType()
1148	case token.LPAREN:
1149		lparen := p.pos
1150		p.next()
1151		typ := p.parseType()
1152		rparen := p.expect(token.RPAREN)
1153		return &ast.ParenExpr{Lparen: lparen, X: typ, Rparen: rparen}
1154	}
1155
1156	// no type found
1157	return nil
1158}
1159
1160// ----------------------------------------------------------------------------
1161// Blocks
1162
1163func (p *parser) parseStmtList() (list []ast.Stmt) {
1164	if p.trace {
1165		defer un(trace(p, "StatementList"))
1166	}
1167
1168	for p.tok != token.CASE && p.tok != token.DEFAULT && p.tok != token.RBRACE && p.tok != token.EOF {
1169		list = append(list, p.parseStmt())
1170	}
1171
1172	return
1173}
1174
1175func (p *parser) parseBody() *ast.BlockStmt {
1176	if p.trace {
1177		defer un(trace(p, "Body"))
1178	}
1179
1180	lbrace := p.expect(token.LBRACE)
1181	list := p.parseStmtList()
1182	rbrace := p.expect2(token.RBRACE)
1183
1184	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1185}
1186
1187func (p *parser) parseBlockStmt() *ast.BlockStmt {
1188	if p.trace {
1189		defer un(trace(p, "BlockStmt"))
1190	}
1191
1192	lbrace := p.expect(token.LBRACE)
1193	list := p.parseStmtList()
1194	rbrace := p.expect2(token.RBRACE)
1195
1196	return &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
1197}
1198
1199// ----------------------------------------------------------------------------
1200// Expressions
1201
1202func (p *parser) parseFuncTypeOrLit() ast.Expr {
1203	if p.trace {
1204		defer un(trace(p, "FuncTypeOrLit"))
1205	}
1206
1207	typ := p.parseFuncType()
1208	if p.tok != token.LBRACE {
1209		// function type only
1210		return typ
1211	}
1212
1213	p.exprLev++
1214	body := p.parseBody()
1215	p.exprLev--
1216
1217	return &ast.FuncLit{Type: typ, Body: body}
1218}
1219
1220// parseOperand may return an expression or a raw type (incl. array
1221// types of the form [...]T. Callers must verify the result.
1222//
1223func (p *parser) parseOperand() ast.Expr {
1224	if p.trace {
1225		defer un(trace(p, "Operand"))
1226	}
1227
1228	switch p.tok {
1229	case token.IDENT:
1230		x := p.parseIdent()
1231		return x
1232
1233	case token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING:
1234		x := &ast.BasicLit{ValuePos: p.pos, Kind: p.tok, Value: p.lit}
1235		p.next()
1236		return x
1237
1238	case token.LPAREN:
1239		lparen := p.pos
1240		p.next()
1241		p.exprLev++
1242		x := p.parseRhsOrType() // types may be parenthesized: (some type)
1243		p.exprLev--
1244		rparen := p.expect(token.RPAREN)
1245		return &ast.ParenExpr{Lparen: lparen, X: x, Rparen: rparen}
1246
1247	case token.FUNC:
1248		return p.parseFuncTypeOrLit()
1249	}
1250
1251	if typ := p.tryIdentOrType(); typ != nil { // do not consume trailing type parameters
1252		// could be type for composite literal or conversion
1253		_, isIdent := typ.(*ast.Ident)
1254		assert(!isIdent, "type cannot be identifier")
1255		return typ
1256	}
1257
1258	// we have an error
1259	pos := p.pos
1260	p.errorExpected(pos, "operand")
1261	p.advance(stmtStart)
1262	return &ast.BadExpr{From: pos, To: p.pos}
1263}
1264
1265func (p *parser) parseSelector(x ast.Expr) ast.Expr {
1266	if p.trace {
1267		defer un(trace(p, "Selector"))
1268	}
1269
1270	sel := p.parseIdent()
1271
1272	return &ast.SelectorExpr{X: x, Sel: sel}
1273}
1274
1275func (p *parser) parseTypeAssertion(x ast.Expr) ast.Expr {
1276	if p.trace {
1277		defer un(trace(p, "TypeAssertion"))
1278	}
1279
1280	lparen := p.expect(token.LPAREN)
1281	var typ ast.Expr
1282	if p.tok == token.TYPE {
1283		// type switch: typ == nil
1284		p.next()
1285	} else {
1286		typ = p.parseType()
1287	}
1288	rparen := p.expect(token.RPAREN)
1289
1290	return &ast.TypeAssertExpr{X: x, Type: typ, Lparen: lparen, Rparen: rparen}
1291}
1292
1293func (p *parser) parseIndexOrSliceOrInstance(x ast.Expr) ast.Expr {
1294	if p.trace {
1295		defer un(trace(p, "parseIndexOrSliceOrInstance"))
1296	}
1297
1298	lbrack := p.expect(token.LBRACK)
1299	if p.tok == token.RBRACK {
1300		// empty index, slice or index expressions are not permitted;
1301		// accept them for parsing tolerance, but complain
1302		p.errorExpected(p.pos, "operand")
1303		rbrack := p.pos
1304		p.next()
1305		return &ast.IndexExpr{
1306			X:      x,
1307			Lbrack: lbrack,
1308			Index:  &ast.BadExpr{From: rbrack, To: rbrack},
1309			Rbrack: rbrack,
1310		}
1311	}
1312	p.exprLev++
1313
1314	const N = 3 // change the 3 to 2 to disable 3-index slices
1315	var args []ast.Expr
1316	var index [N]ast.Expr
1317	var colons [N - 1]token.Pos
1318	var firstComma token.Pos
1319	if p.tok != token.COLON {
1320		// We can't know if we have an index expression or a type instantiation;
1321		// so even if we see a (named) type we are not going to be in type context.
1322		index[0] = p.parseRhsOrType()
1323	}
1324	ncolons := 0
1325	switch p.tok {
1326	case token.COLON:
1327		// slice expression
1328		for p.tok == token.COLON && ncolons < len(colons) {
1329			colons[ncolons] = p.pos
1330			ncolons++
1331			p.next()
1332			if p.tok != token.COLON && p.tok != token.RBRACK && p.tok != token.EOF {
1333				index[ncolons] = p.parseRhs()
1334			}
1335		}
1336	case token.COMMA:
1337		firstComma = p.pos
1338		// instance expression
1339		args = append(args, index[0])
1340		for p.tok == token.COMMA {
1341			p.next()
1342			if p.tok != token.RBRACK && p.tok != token.EOF {
1343				args = append(args, p.parseType())
1344			}
1345		}
1346	}
1347
1348	p.exprLev--
1349	rbrack := p.expect(token.RBRACK)
1350
1351	if ncolons > 0 {
1352		// slice expression
1353		slice3 := false
1354		if ncolons == 2 {
1355			slice3 = true
1356			// Check presence of 2nd and 3rd index here rather than during type-checking
1357			// to prevent erroneous programs from passing through gofmt (was issue 7305).
1358			if index[1] == nil {
1359				p.error(colons[0], "2nd index required in 3-index slice")
1360				index[1] = &ast.BadExpr{From: colons[0] + 1, To: colons[1]}
1361			}
1362			if index[2] == nil {
1363				p.error(colons[1], "3rd index required in 3-index slice")
1364				index[2] = &ast.BadExpr{From: colons[1] + 1, To: rbrack}
1365			}
1366		}
1367		return &ast.SliceExpr{X: x, Lbrack: lbrack, Low: index[0], High: index[1], Max: index[2], Slice3: slice3, Rbrack: rbrack}
1368	}
1369
1370	if len(args) == 0 {
1371		// index expression
1372		return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: index[0], Rbrack: rbrack}
1373	}
1374
1375	if !p.parseTypeParams() {
1376		p.error(firstComma, "expected ']' or ':', found ','")
1377		return &ast.BadExpr{From: args[0].Pos(), To: args[len(args)-1].End()}
1378	}
1379
1380	// instance expression
1381	return &ast.IndexExpr{X: x, Lbrack: lbrack, Index: typeparams.PackExpr(args), Rbrack: rbrack}
1382}
1383
1384func (p *parser) parseCallOrConversion(fun ast.Expr) *ast.CallExpr {
1385	if p.trace {
1386		defer un(trace(p, "CallOrConversion"))
1387	}
1388
1389	lparen := p.expect(token.LPAREN)
1390	p.exprLev++
1391	var list []ast.Expr
1392	var ellipsis token.Pos
1393	for p.tok != token.RPAREN && p.tok != token.EOF && !ellipsis.IsValid() {
1394		list = append(list, p.parseRhsOrType()) // builtins may expect a type: make(some type, ...)
1395		if p.tok == token.ELLIPSIS {
1396			ellipsis = p.pos
1397			p.next()
1398		}
1399		if !p.atComma("argument list", token.RPAREN) {
1400			break
1401		}
1402		p.next()
1403	}
1404	p.exprLev--
1405	rparen := p.expectClosing(token.RPAREN, "argument list")
1406
1407	return &ast.CallExpr{Fun: fun, Lparen: lparen, Args: list, Ellipsis: ellipsis, Rparen: rparen}
1408}
1409
1410func (p *parser) parseValue() ast.Expr {
1411	if p.trace {
1412		defer un(trace(p, "Element"))
1413	}
1414
1415	if p.tok == token.LBRACE {
1416		return p.parseLiteralValue(nil)
1417	}
1418
1419	x := p.checkExpr(p.parseExpr())
1420
1421	return x
1422}
1423
1424func (p *parser) parseElement() ast.Expr {
1425	if p.trace {
1426		defer un(trace(p, "Element"))
1427	}
1428
1429	x := p.parseValue()
1430	if p.tok == token.COLON {
1431		colon := p.pos
1432		p.next()
1433		x = &ast.KeyValueExpr{Key: x, Colon: colon, Value: p.parseValue()}
1434	}
1435
1436	return x
1437}
1438
1439func (p *parser) parseElementList() (list []ast.Expr) {
1440	if p.trace {
1441		defer un(trace(p, "ElementList"))
1442	}
1443
1444	for p.tok != token.RBRACE && p.tok != token.EOF {
1445		list = append(list, p.parseElement())
1446		if !p.atComma("composite literal", token.RBRACE) {
1447			break
1448		}
1449		p.next()
1450	}
1451
1452	return
1453}
1454
1455func (p *parser) parseLiteralValue(typ ast.Expr) ast.Expr {
1456	if p.trace {
1457		defer un(trace(p, "LiteralValue"))
1458	}
1459
1460	lbrace := p.expect(token.LBRACE)
1461	var elts []ast.Expr
1462	p.exprLev++
1463	if p.tok != token.RBRACE {
1464		elts = p.parseElementList()
1465	}
1466	p.exprLev--
1467	rbrace := p.expectClosing(token.RBRACE, "composite literal")
1468	return &ast.CompositeLit{Type: typ, Lbrace: lbrace, Elts: elts, Rbrace: rbrace}
1469}
1470
1471// checkExpr checks that x is an expression (and not a type).
1472func (p *parser) checkExpr(x ast.Expr) ast.Expr {
1473	switch unparen(x).(type) {
1474	case *ast.BadExpr:
1475	case *ast.Ident:
1476	case *ast.BasicLit:
1477	case *ast.FuncLit:
1478	case *ast.CompositeLit:
1479	case *ast.ParenExpr:
1480		panic("unreachable")
1481	case *ast.SelectorExpr:
1482	case *ast.IndexExpr:
1483	case *ast.SliceExpr:
1484	case *ast.TypeAssertExpr:
1485		// If t.Type == nil we have a type assertion of the form
1486		// y.(type), which is only allowed in type switch expressions.
1487		// It's hard to exclude those but for the case where we are in
1488		// a type switch. Instead be lenient and test this in the type
1489		// checker.
1490	case *ast.CallExpr:
1491	case *ast.StarExpr:
1492	case *ast.UnaryExpr:
1493	case *ast.BinaryExpr:
1494	default:
1495		// all other nodes are not proper expressions
1496		p.errorExpected(x.Pos(), "expression")
1497		x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
1498	}
1499	return x
1500}
1501
1502// If x is of the form (T), unparen returns unparen(T), otherwise it returns x.
1503func unparen(x ast.Expr) ast.Expr {
1504	if p, isParen := x.(*ast.ParenExpr); isParen {
1505		x = unparen(p.X)
1506	}
1507	return x
1508}
1509
1510// checkExprOrType checks that x is an expression or a type
1511// (and not a raw type such as [...]T).
1512//
1513func (p *parser) checkExprOrType(x ast.Expr) ast.Expr {
1514	switch t := unparen(x).(type) {
1515	case *ast.ParenExpr:
1516		panic("unreachable")
1517	case *ast.ArrayType:
1518		if len, isEllipsis := t.Len.(*ast.Ellipsis); isEllipsis {
1519			p.error(len.Pos(), "expected array length, found '...'")
1520			x = &ast.BadExpr{From: x.Pos(), To: p.safePos(x.End())}
1521		}
1522	}
1523
1524	// all other nodes are expressions or types
1525	return x
1526}
1527
1528func (p *parser) parsePrimaryExpr() (x ast.Expr) {
1529	if p.trace {
1530		defer un(trace(p, "PrimaryExpr"))
1531	}
1532
1533	x = p.parseOperand()
1534	for {
1535		switch p.tok {
1536		case token.PERIOD:
1537			p.next()
1538			switch p.tok {
1539			case token.IDENT:
1540				x = p.parseSelector(p.checkExprOrType(x))
1541			case token.LPAREN:
1542				x = p.parseTypeAssertion(p.checkExpr(x))
1543			default:
1544				pos := p.pos
1545				p.errorExpected(pos, "selector or type assertion")
1546				// TODO(rFindley) The check for token.RBRACE below is a targeted fix
1547				//                to error recovery sufficient to make the x/tools tests to
1548				//                pass with the new parsing logic introduced for type
1549				//                parameters. Remove this once error recovery has been
1550				//                more generally reconsidered.
1551				if p.tok != token.RBRACE {
1552					p.next() // make progress
1553				}
1554				sel := &ast.Ident{NamePos: pos, Name: "_"}
1555				x = &ast.SelectorExpr{X: x, Sel: sel}
1556			}
1557		case token.LBRACK:
1558			x = p.parseIndexOrSliceOrInstance(p.checkExpr(x))
1559		case token.LPAREN:
1560			x = p.parseCallOrConversion(p.checkExprOrType(x))
1561		case token.LBRACE:
1562			// operand may have returned a parenthesized complit
1563			// type; accept it but complain if we have a complit
1564			t := unparen(x)
1565			// determine if '{' belongs to a composite literal or a block statement
1566			switch t.(type) {
1567			case *ast.BadExpr, *ast.Ident, *ast.SelectorExpr:
1568				if p.exprLev < 0 {
1569					return
1570				}
1571				// x is possibly a composite literal type
1572			case *ast.IndexExpr:
1573				if p.exprLev < 0 {
1574					return
1575				}
1576				// x is possibly a composite literal type
1577			case *ast.ArrayType, *ast.StructType, *ast.MapType:
1578				// x is a composite literal type
1579			default:
1580				return
1581			}
1582			if t != x {
1583				p.error(t.Pos(), "cannot parenthesize type in composite literal")
1584				// already progressed, no need to advance
1585			}
1586			x = p.parseLiteralValue(x)
1587		default:
1588			return
1589		}
1590	}
1591}
1592
1593func (p *parser) parseUnaryExpr() ast.Expr {
1594	if p.trace {
1595		defer un(trace(p, "UnaryExpr"))
1596	}
1597
1598	switch p.tok {
1599	case token.ADD, token.SUB, token.NOT, token.XOR, token.AND:
1600		pos, op := p.pos, p.tok
1601		p.next()
1602		x := p.parseUnaryExpr()
1603		return &ast.UnaryExpr{OpPos: pos, Op: op, X: p.checkExpr(x)}
1604
1605	case token.ARROW:
1606		// channel type or receive expression
1607		arrow := p.pos
1608		p.next()
1609
1610		// If the next token is token.CHAN we still don't know if it
1611		// is a channel type or a receive operation - we only know
1612		// once we have found the end of the unary expression. There
1613		// are two cases:
1614		//
1615		//   <- type  => (<-type) must be channel type
1616		//   <- expr  => <-(expr) is a receive from an expression
1617		//
1618		// In the first case, the arrow must be re-associated with
1619		// the channel type parsed already:
1620		//
1621		//   <- (chan type)    =>  (<-chan type)
1622		//   <- (chan<- type)  =>  (<-chan (<-type))
1623
1624		x := p.parseUnaryExpr()
1625
1626		// determine which case we have
1627		if typ, ok := x.(*ast.ChanType); ok {
1628			// (<-type)
1629
1630			// re-associate position info and <-
1631			dir := ast.SEND
1632			for ok && dir == ast.SEND {
1633				if typ.Dir == ast.RECV {
1634					// error: (<-type) is (<-(<-chan T))
1635					p.errorExpected(typ.Arrow, "'chan'")
1636				}
1637				arrow, typ.Begin, typ.Arrow = typ.Arrow, arrow, arrow
1638				dir, typ.Dir = typ.Dir, ast.RECV
1639				typ, ok = typ.Value.(*ast.ChanType)
1640			}
1641			if dir == ast.SEND {
1642				p.errorExpected(arrow, "channel type")
1643			}
1644
1645			return x
1646		}
1647
1648		// <-(expr)
1649		return &ast.UnaryExpr{OpPos: arrow, Op: token.ARROW, X: p.checkExpr(x)}
1650
1651	case token.MUL:
1652		// pointer type or unary "*" expression
1653		pos := p.pos
1654		p.next()
1655		x := p.parseUnaryExpr()
1656		return &ast.StarExpr{Star: pos, X: p.checkExprOrType(x)}
1657	}
1658
1659	return p.parsePrimaryExpr()
1660}
1661
1662func (p *parser) tokPrec() (token.Token, int) {
1663	tok := p.tok
1664	if p.inRhs && tok == token.ASSIGN {
1665		tok = token.EQL
1666	}
1667	return tok, tok.Precedence()
1668}
1669
1670func (p *parser) parseBinaryExpr(prec1 int) ast.Expr {
1671	if p.trace {
1672		defer un(trace(p, "BinaryExpr"))
1673	}
1674
1675	x := p.parseUnaryExpr()
1676	for {
1677		op, oprec := p.tokPrec()
1678		if oprec < prec1 {
1679			return x
1680		}
1681		pos := p.expect(op)
1682		y := p.parseBinaryExpr(oprec + 1)
1683		x = &ast.BinaryExpr{X: p.checkExpr(x), OpPos: pos, Op: op, Y: p.checkExpr(y)}
1684	}
1685}
1686
1687// The result may be a type or even a raw type ([...]int). Callers must
1688// check the result (using checkExpr or checkExprOrType), depending on
1689// context.
1690func (p *parser) parseExpr() ast.Expr {
1691	if p.trace {
1692		defer un(trace(p, "Expression"))
1693	}
1694
1695	return p.parseBinaryExpr(token.LowestPrec + 1)
1696}
1697
1698func (p *parser) parseRhs() ast.Expr {
1699	old := p.inRhs
1700	p.inRhs = true
1701	x := p.checkExpr(p.parseExpr())
1702	p.inRhs = old
1703	return x
1704}
1705
1706func (p *parser) parseRhsOrType() ast.Expr {
1707	old := p.inRhs
1708	p.inRhs = true
1709	x := p.checkExprOrType(p.parseExpr())
1710	p.inRhs = old
1711	return x
1712}
1713
1714// ----------------------------------------------------------------------------
1715// Statements
1716
1717// Parsing modes for parseSimpleStmt.
1718const (
1719	basic = iota
1720	labelOk
1721	rangeOk
1722)
1723
1724// parseSimpleStmt returns true as 2nd result if it parsed the assignment
1725// of a range clause (with mode == rangeOk). The returned statement is an
1726// assignment with a right-hand side that is a single unary expression of
1727// the form "range x". No guarantees are given for the left-hand side.
1728func (p *parser) parseSimpleStmt(mode int) (ast.Stmt, bool) {
1729	if p.trace {
1730		defer un(trace(p, "SimpleStmt"))
1731	}
1732
1733	x := p.parseList(false)
1734
1735	switch p.tok {
1736	case
1737		token.DEFINE, token.ASSIGN, token.ADD_ASSIGN,
1738		token.SUB_ASSIGN, token.MUL_ASSIGN, token.QUO_ASSIGN,
1739		token.REM_ASSIGN, token.AND_ASSIGN, token.OR_ASSIGN,
1740		token.XOR_ASSIGN, token.SHL_ASSIGN, token.SHR_ASSIGN, token.AND_NOT_ASSIGN:
1741		// assignment statement, possibly part of a range clause
1742		pos, tok := p.pos, p.tok
1743		p.next()
1744		var y []ast.Expr
1745		isRange := false
1746		if mode == rangeOk && p.tok == token.RANGE && (tok == token.DEFINE || tok == token.ASSIGN) {
1747			pos := p.pos
1748			p.next()
1749			y = []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
1750			isRange = true
1751		} else {
1752			y = p.parseList(true)
1753		}
1754		as := &ast.AssignStmt{Lhs: x, TokPos: pos, Tok: tok, Rhs: y}
1755		if tok == token.DEFINE {
1756			p.checkAssignStmt(as)
1757		}
1758		return as, isRange
1759	}
1760
1761	if len(x) > 1 {
1762		p.errorExpected(x[0].Pos(), "1 expression")
1763		// continue with first expression
1764	}
1765
1766	switch p.tok {
1767	case token.COLON:
1768		// labeled statement
1769		colon := p.pos
1770		p.next()
1771		if label, isIdent := x[0].(*ast.Ident); mode == labelOk && isIdent {
1772			// Go spec: The scope of a label is the body of the function
1773			// in which it is declared and excludes the body of any nested
1774			// function.
1775			stmt := &ast.LabeledStmt{Label: label, Colon: colon, Stmt: p.parseStmt()}
1776			return stmt, false
1777		}
1778		// The label declaration typically starts at x[0].Pos(), but the label
1779		// declaration may be erroneous due to a token after that position (and
1780		// before the ':'). If SpuriousErrors is not set, the (only) error
1781		// reported for the line is the illegal label error instead of the token
1782		// before the ':' that caused the problem. Thus, use the (latest) colon
1783		// position for error reporting.
1784		p.error(colon, "illegal label declaration")
1785		return &ast.BadStmt{From: x[0].Pos(), To: colon + 1}, false
1786
1787	case token.ARROW:
1788		// send statement
1789		arrow := p.pos
1790		p.next()
1791		y := p.parseRhs()
1792		return &ast.SendStmt{Chan: x[0], Arrow: arrow, Value: y}, false
1793
1794	case token.INC, token.DEC:
1795		// increment or decrement
1796		s := &ast.IncDecStmt{X: x[0], TokPos: p.pos, Tok: p.tok}
1797		p.next()
1798		return s, false
1799	}
1800
1801	// expression
1802	return &ast.ExprStmt{X: x[0]}, false
1803}
1804
1805func (p *parser) checkAssignStmt(as *ast.AssignStmt) {
1806	for _, x := range as.Lhs {
1807		if _, isIdent := x.(*ast.Ident); !isIdent {
1808			p.errorExpected(x.Pos(), "identifier on left side of :=")
1809		}
1810	}
1811}
1812
1813func (p *parser) parseCallExpr(callType string) *ast.CallExpr {
1814	x := p.parseRhsOrType() // could be a conversion: (some type)(x)
1815	if call, isCall := x.(*ast.CallExpr); isCall {
1816		return call
1817	}
1818	if _, isBad := x.(*ast.BadExpr); !isBad {
1819		// only report error if it's a new one
1820		p.error(p.safePos(x.End()), fmt.Sprintf("function must be invoked in %s statement", callType))
1821	}
1822	return nil
1823}
1824
1825func (p *parser) parseGoStmt() ast.Stmt {
1826	if p.trace {
1827		defer un(trace(p, "GoStmt"))
1828	}
1829
1830	pos := p.expect(token.GO)
1831	call := p.parseCallExpr("go")
1832	p.expectSemi()
1833	if call == nil {
1834		return &ast.BadStmt{From: pos, To: pos + 2} // len("go")
1835	}
1836
1837	return &ast.GoStmt{Go: pos, Call: call}
1838}
1839
1840func (p *parser) parseDeferStmt() ast.Stmt {
1841	if p.trace {
1842		defer un(trace(p, "DeferStmt"))
1843	}
1844
1845	pos := p.expect(token.DEFER)
1846	call := p.parseCallExpr("defer")
1847	p.expectSemi()
1848	if call == nil {
1849		return &ast.BadStmt{From: pos, To: pos + 5} // len("defer")
1850	}
1851
1852	return &ast.DeferStmt{Defer: pos, Call: call}
1853}
1854
1855func (p *parser) parseReturnStmt() *ast.ReturnStmt {
1856	if p.trace {
1857		defer un(trace(p, "ReturnStmt"))
1858	}
1859
1860	pos := p.pos
1861	p.expect(token.RETURN)
1862	var x []ast.Expr
1863	if p.tok != token.SEMICOLON && p.tok != token.RBRACE {
1864		x = p.parseList(true)
1865	}
1866	p.expectSemi()
1867
1868	return &ast.ReturnStmt{Return: pos, Results: x}
1869}
1870
1871func (p *parser) parseBranchStmt(tok token.Token) *ast.BranchStmt {
1872	if p.trace {
1873		defer un(trace(p, "BranchStmt"))
1874	}
1875
1876	pos := p.expect(tok)
1877	var label *ast.Ident
1878	if tok != token.FALLTHROUGH && p.tok == token.IDENT {
1879		label = p.parseIdent()
1880	}
1881	p.expectSemi()
1882
1883	return &ast.BranchStmt{TokPos: pos, Tok: tok, Label: label}
1884}
1885
1886func (p *parser) makeExpr(s ast.Stmt, want string) ast.Expr {
1887	if s == nil {
1888		return nil
1889	}
1890	if es, isExpr := s.(*ast.ExprStmt); isExpr {
1891		return p.checkExpr(es.X)
1892	}
1893	found := "simple statement"
1894	if _, isAss := s.(*ast.AssignStmt); isAss {
1895		found = "assignment"
1896	}
1897	p.error(s.Pos(), fmt.Sprintf("expected %s, found %s (missing parentheses around composite literal?)", want, found))
1898	return &ast.BadExpr{From: s.Pos(), To: p.safePos(s.End())}
1899}
1900
1901// parseIfHeader is an adjusted version of parser.header
1902// in cmd/compile/internal/syntax/parser.go, which has
1903// been tuned for better error handling.
1904func (p *parser) parseIfHeader() (init ast.Stmt, cond ast.Expr) {
1905	if p.tok == token.LBRACE {
1906		p.error(p.pos, "missing condition in if statement")
1907		cond = &ast.BadExpr{From: p.pos, To: p.pos}
1908		return
1909	}
1910	// p.tok != token.LBRACE
1911
1912	prevLev := p.exprLev
1913	p.exprLev = -1
1914
1915	if p.tok != token.SEMICOLON {
1916		// accept potential variable declaration but complain
1917		if p.tok == token.VAR {
1918			p.next()
1919			p.error(p.pos, "var declaration not allowed in 'IF' initializer")
1920		}
1921		init, _ = p.parseSimpleStmt(basic)
1922	}
1923
1924	var condStmt ast.Stmt
1925	var semi struct {
1926		pos token.Pos
1927		lit string // ";" or "\n"; valid if pos.IsValid()
1928	}
1929	if p.tok != token.LBRACE {
1930		if p.tok == token.SEMICOLON {
1931			semi.pos = p.pos
1932			semi.lit = p.lit
1933			p.next()
1934		} else {
1935			p.expect(token.SEMICOLON)
1936		}
1937		if p.tok != token.LBRACE {
1938			condStmt, _ = p.parseSimpleStmt(basic)
1939		}
1940	} else {
1941		condStmt = init
1942		init = nil
1943	}
1944
1945	if condStmt != nil {
1946		cond = p.makeExpr(condStmt, "boolean expression")
1947	} else if semi.pos.IsValid() {
1948		if semi.lit == "\n" {
1949			p.error(semi.pos, "unexpected newline, expecting { after if clause")
1950		} else {
1951			p.error(semi.pos, "missing condition in if statement")
1952		}
1953	}
1954
1955	// make sure we have a valid AST
1956	if cond == nil {
1957		cond = &ast.BadExpr{From: p.pos, To: p.pos}
1958	}
1959
1960	p.exprLev = prevLev
1961	return
1962}
1963
1964func (p *parser) parseIfStmt() *ast.IfStmt {
1965	if p.trace {
1966		defer un(trace(p, "IfStmt"))
1967	}
1968
1969	pos := p.expect(token.IF)
1970
1971	init, cond := p.parseIfHeader()
1972	body := p.parseBlockStmt()
1973
1974	var else_ ast.Stmt
1975	if p.tok == token.ELSE {
1976		p.next()
1977		switch p.tok {
1978		case token.IF:
1979			else_ = p.parseIfStmt()
1980		case token.LBRACE:
1981			else_ = p.parseBlockStmt()
1982			p.expectSemi()
1983		default:
1984			p.errorExpected(p.pos, "if statement or block")
1985			else_ = &ast.BadStmt{From: p.pos, To: p.pos}
1986		}
1987	} else {
1988		p.expectSemi()
1989	}
1990
1991	return &ast.IfStmt{If: pos, Init: init, Cond: cond, Body: body, Else: else_}
1992}
1993
1994func (p *parser) parseTypeList() (list []ast.Expr) {
1995	if p.trace {
1996		defer un(trace(p, "TypeList"))
1997	}
1998
1999	list = append(list, p.parseType())
2000	for p.tok == token.COMMA {
2001		p.next()
2002		list = append(list, p.parseType())
2003	}
2004
2005	return
2006}
2007
2008func (p *parser) parseCaseClause(typeSwitch bool) *ast.CaseClause {
2009	if p.trace {
2010		defer un(trace(p, "CaseClause"))
2011	}
2012
2013	pos := p.pos
2014	var list []ast.Expr
2015	if p.tok == token.CASE {
2016		p.next()
2017		if typeSwitch {
2018			list = p.parseTypeList()
2019		} else {
2020			list = p.parseList(true)
2021		}
2022	} else {
2023		p.expect(token.DEFAULT)
2024	}
2025
2026	colon := p.expect(token.COLON)
2027	body := p.parseStmtList()
2028
2029	return &ast.CaseClause{Case: pos, List: list, Colon: colon, Body: body}
2030}
2031
2032func isTypeSwitchAssert(x ast.Expr) bool {
2033	a, ok := x.(*ast.TypeAssertExpr)
2034	return ok && a.Type == nil
2035}
2036
2037func (p *parser) isTypeSwitchGuard(s ast.Stmt) bool {
2038	switch t := s.(type) {
2039	case *ast.ExprStmt:
2040		// x.(type)
2041		return isTypeSwitchAssert(t.X)
2042	case *ast.AssignStmt:
2043		// v := x.(type)
2044		if len(t.Lhs) == 1 && len(t.Rhs) == 1 && isTypeSwitchAssert(t.Rhs[0]) {
2045			switch t.Tok {
2046			case token.ASSIGN:
2047				// permit v = x.(type) but complain
2048				p.error(t.TokPos, "expected ':=', found '='")
2049				fallthrough
2050			case token.DEFINE:
2051				return true
2052			}
2053		}
2054	}
2055	return false
2056}
2057
2058func (p *parser) parseSwitchStmt() ast.Stmt {
2059	if p.trace {
2060		defer un(trace(p, "SwitchStmt"))
2061	}
2062
2063	pos := p.expect(token.SWITCH)
2064
2065	var s1, s2 ast.Stmt
2066	if p.tok != token.LBRACE {
2067		prevLev := p.exprLev
2068		p.exprLev = -1
2069		if p.tok != token.SEMICOLON {
2070			s2, _ = p.parseSimpleStmt(basic)
2071		}
2072		if p.tok == token.SEMICOLON {
2073			p.next()
2074			s1 = s2
2075			s2 = nil
2076			if p.tok != token.LBRACE {
2077				// A TypeSwitchGuard may declare a variable in addition
2078				// to the variable declared in the initial SimpleStmt.
2079				// Introduce extra scope to avoid redeclaration errors:
2080				//
2081				//	switch t := 0; t := x.(T) { ... }
2082				//
2083				// (this code is not valid Go because the first t
2084				// cannot be accessed and thus is never used, the extra
2085				// scope is needed for the correct error message).
2086				//
2087				// If we don't have a type switch, s2 must be an expression.
2088				// Having the extra nested but empty scope won't affect it.
2089				s2, _ = p.parseSimpleStmt(basic)
2090			}
2091		}
2092		p.exprLev = prevLev
2093	}
2094
2095	typeSwitch := p.isTypeSwitchGuard(s2)
2096	lbrace := p.expect(token.LBRACE)
2097	var list []ast.Stmt
2098	for p.tok == token.CASE || p.tok == token.DEFAULT {
2099		list = append(list, p.parseCaseClause(typeSwitch))
2100	}
2101	rbrace := p.expect(token.RBRACE)
2102	p.expectSemi()
2103	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2104
2105	if typeSwitch {
2106		return &ast.TypeSwitchStmt{Switch: pos, Init: s1, Assign: s2, Body: body}
2107	}
2108
2109	return &ast.SwitchStmt{Switch: pos, Init: s1, Tag: p.makeExpr(s2, "switch expression"), Body: body}
2110}
2111
2112func (p *parser) parseCommClause() *ast.CommClause {
2113	if p.trace {
2114		defer un(trace(p, "CommClause"))
2115	}
2116
2117	pos := p.pos
2118	var comm ast.Stmt
2119	if p.tok == token.CASE {
2120		p.next()
2121		lhs := p.parseList(false)
2122		if p.tok == token.ARROW {
2123			// SendStmt
2124			if len(lhs) > 1 {
2125				p.errorExpected(lhs[0].Pos(), "1 expression")
2126				// continue with first expression
2127			}
2128			arrow := p.pos
2129			p.next()
2130			rhs := p.parseRhs()
2131			comm = &ast.SendStmt{Chan: lhs[0], Arrow: arrow, Value: rhs}
2132		} else {
2133			// RecvStmt
2134			if tok := p.tok; tok == token.ASSIGN || tok == token.DEFINE {
2135				// RecvStmt with assignment
2136				if len(lhs) > 2 {
2137					p.errorExpected(lhs[0].Pos(), "1 or 2 expressions")
2138					// continue with first two expressions
2139					lhs = lhs[0:2]
2140				}
2141				pos := p.pos
2142				p.next()
2143				rhs := p.parseRhs()
2144				as := &ast.AssignStmt{Lhs: lhs, TokPos: pos, Tok: tok, Rhs: []ast.Expr{rhs}}
2145				if tok == token.DEFINE {
2146					p.checkAssignStmt(as)
2147				}
2148				comm = as
2149			} else {
2150				// lhs must be single receive operation
2151				if len(lhs) > 1 {
2152					p.errorExpected(lhs[0].Pos(), "1 expression")
2153					// continue with first expression
2154				}
2155				comm = &ast.ExprStmt{X: lhs[0]}
2156			}
2157		}
2158	} else {
2159		p.expect(token.DEFAULT)
2160	}
2161
2162	colon := p.expect(token.COLON)
2163	body := p.parseStmtList()
2164
2165	return &ast.CommClause{Case: pos, Comm: comm, Colon: colon, Body: body}
2166}
2167
2168func (p *parser) parseSelectStmt() *ast.SelectStmt {
2169	if p.trace {
2170		defer un(trace(p, "SelectStmt"))
2171	}
2172
2173	pos := p.expect(token.SELECT)
2174	lbrace := p.expect(token.LBRACE)
2175	var list []ast.Stmt
2176	for p.tok == token.CASE || p.tok == token.DEFAULT {
2177		list = append(list, p.parseCommClause())
2178	}
2179	rbrace := p.expect(token.RBRACE)
2180	p.expectSemi()
2181	body := &ast.BlockStmt{Lbrace: lbrace, List: list, Rbrace: rbrace}
2182
2183	return &ast.SelectStmt{Select: pos, Body: body}
2184}
2185
2186func (p *parser) parseForStmt() ast.Stmt {
2187	if p.trace {
2188		defer un(trace(p, "ForStmt"))
2189	}
2190
2191	pos := p.expect(token.FOR)
2192
2193	var s1, s2, s3 ast.Stmt
2194	var isRange bool
2195	if p.tok != token.LBRACE {
2196		prevLev := p.exprLev
2197		p.exprLev = -1
2198		if p.tok != token.SEMICOLON {
2199			if p.tok == token.RANGE {
2200				// "for range x" (nil lhs in assignment)
2201				pos := p.pos
2202				p.next()
2203				y := []ast.Expr{&ast.UnaryExpr{OpPos: pos, Op: token.RANGE, X: p.parseRhs()}}
2204				s2 = &ast.AssignStmt{Rhs: y}
2205				isRange = true
2206			} else {
2207				s2, isRange = p.parseSimpleStmt(rangeOk)
2208			}
2209		}
2210		if !isRange && p.tok == token.SEMICOLON {
2211			p.next()
2212			s1 = s2
2213			s2 = nil
2214			if p.tok != token.SEMICOLON {
2215				s2, _ = p.parseSimpleStmt(basic)
2216			}
2217			p.expectSemi()
2218			if p.tok != token.LBRACE {
2219				s3, _ = p.parseSimpleStmt(basic)
2220			}
2221		}
2222		p.exprLev = prevLev
2223	}
2224
2225	body := p.parseBlockStmt()
2226	p.expectSemi()
2227
2228	if isRange {
2229		as := s2.(*ast.AssignStmt)
2230		// check lhs
2231		var key, value ast.Expr
2232		switch len(as.Lhs) {
2233		case 0:
2234			// nothing to do
2235		case 1:
2236			key = as.Lhs[0]
2237		case 2:
2238			key, value = as.Lhs[0], as.Lhs[1]
2239		default:
2240			p.errorExpected(as.Lhs[len(as.Lhs)-1].Pos(), "at most 2 expressions")
2241			return &ast.BadStmt{From: pos, To: p.safePos(body.End())}
2242		}
2243		// parseSimpleStmt returned a right-hand side that
2244		// is a single unary expression of the form "range x"
2245		x := as.Rhs[0].(*ast.UnaryExpr).X
2246		return &ast.RangeStmt{
2247			For:    pos,
2248			Key:    key,
2249			Value:  value,
2250			TokPos: as.TokPos,
2251			Tok:    as.Tok,
2252			X:      x,
2253			Body:   body,
2254		}
2255	}
2256
2257	// regular for statement
2258	return &ast.ForStmt{
2259		For:  pos,
2260		Init: s1,
2261		Cond: p.makeExpr(s2, "boolean or range expression"),
2262		Post: s3,
2263		Body: body,
2264	}
2265}
2266
2267func (p *parser) parseStmt() (s ast.Stmt) {
2268	if p.trace {
2269		defer un(trace(p, "Statement"))
2270	}
2271
2272	switch p.tok {
2273	case token.CONST, token.TYPE, token.VAR:
2274		s = &ast.DeclStmt{Decl: p.parseDecl(stmtStart)}
2275	case
2276		// tokens that may start an expression
2277		token.IDENT, token.INT, token.FLOAT, token.IMAG, token.CHAR, token.STRING, token.FUNC, token.LPAREN, // operands
2278		token.LBRACK, token.STRUCT, token.MAP, token.CHAN, token.INTERFACE, // composite types
2279		token.ADD, token.SUB, token.MUL, token.AND, token.XOR, token.ARROW, token.NOT: // unary operators
2280		s, _ = p.parseSimpleStmt(labelOk)
2281		// because of the required look-ahead, labeled statements are
2282		// parsed by parseSimpleStmt - don't expect a semicolon after
2283		// them
2284		if _, isLabeledStmt := s.(*ast.LabeledStmt); !isLabeledStmt {
2285			p.expectSemi()
2286		}
2287	case token.GO:
2288		s = p.parseGoStmt()
2289	case token.DEFER:
2290		s = p.parseDeferStmt()
2291	case token.RETURN:
2292		s = p.parseReturnStmt()
2293	case token.BREAK, token.CONTINUE, token.GOTO, token.FALLTHROUGH:
2294		s = p.parseBranchStmt(p.tok)
2295	case token.LBRACE:
2296		s = p.parseBlockStmt()
2297		p.expectSemi()
2298	case token.IF:
2299		s = p.parseIfStmt()
2300	case token.SWITCH:
2301		s = p.parseSwitchStmt()
2302	case token.SELECT:
2303		s = p.parseSelectStmt()
2304	case token.FOR:
2305		s = p.parseForStmt()
2306	case token.SEMICOLON:
2307		// Is it ever possible to have an implicit semicolon
2308		// producing an empty statement in a valid program?
2309		// (handle correctly anyway)
2310		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: p.lit == "\n"}
2311		p.next()
2312	case token.RBRACE:
2313		// a semicolon may be omitted before a closing "}"
2314		s = &ast.EmptyStmt{Semicolon: p.pos, Implicit: true}
2315	default:
2316		// no statement found
2317		pos := p.pos
2318		p.errorExpected(pos, "statement")
2319		p.advance(stmtStart)
2320		s = &ast.BadStmt{From: pos, To: p.pos}
2321	}
2322
2323	return
2324}
2325
2326// ----------------------------------------------------------------------------
2327// Declarations
2328
2329type parseSpecFunction func(doc *ast.CommentGroup, pos token.Pos, keyword token.Token, iota int) ast.Spec
2330
2331func isValidImport(lit string) bool {
2332	const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
2333	s, _ := strconv.Unquote(lit) // go/scanner returns a legal string literal
2334	for _, r := range s {
2335		if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
2336			return false
2337		}
2338	}
2339	return s != ""
2340}
2341
2342func (p *parser) parseImportSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
2343	if p.trace {
2344		defer un(trace(p, "ImportSpec"))
2345	}
2346
2347	var ident *ast.Ident
2348	switch p.tok {
2349	case token.PERIOD:
2350		ident = &ast.Ident{NamePos: p.pos, Name: "."}
2351		p.next()
2352	case token.IDENT:
2353		ident = p.parseIdent()
2354	}
2355
2356	pos := p.pos
2357	var path string
2358	if p.tok == token.STRING {
2359		path = p.lit
2360		if !isValidImport(path) {
2361			p.error(pos, "invalid import path: "+path)
2362		}
2363		p.next()
2364	} else {
2365		p.expect(token.STRING) // use expect() error handling
2366	}
2367	p.expectSemi() // call before accessing p.linecomment
2368
2369	// collect imports
2370	spec := &ast.ImportSpec{
2371		Doc:     doc,
2372		Name:    ident,
2373		Path:    &ast.BasicLit{ValuePos: pos, Kind: token.STRING, Value: path},
2374		Comment: p.lineComment,
2375	}
2376	p.imports = append(p.imports, spec)
2377
2378	return spec
2379}
2380
2381func (p *parser) parseValueSpec(doc *ast.CommentGroup, _ token.Pos, keyword token.Token, iota int) ast.Spec {
2382	if p.trace {
2383		defer un(trace(p, keyword.String()+"Spec"))
2384	}
2385
2386	pos := p.pos
2387	idents := p.parseIdentList()
2388	typ := p.tryIdentOrType()
2389	var values []ast.Expr
2390	// always permit optional initialization for more tolerant parsing
2391	if p.tok == token.ASSIGN {
2392		p.next()
2393		values = p.parseList(true)
2394	}
2395	p.expectSemi() // call before accessing p.linecomment
2396
2397	switch keyword {
2398	case token.VAR:
2399		if typ == nil && values == nil {
2400			p.error(pos, "missing variable type or initialization")
2401		}
2402	case token.CONST:
2403		if values == nil && (iota == 0 || typ != nil) {
2404			p.error(pos, "missing constant value")
2405		}
2406	}
2407
2408	spec := &ast.ValueSpec{
2409		Doc:     doc,
2410		Names:   idents,
2411		Type:    typ,
2412		Values:  values,
2413		Comment: p.lineComment,
2414	}
2415	return spec
2416}
2417
2418func (p *parser) parseGenericType(spec *ast.TypeSpec, openPos token.Pos, name0 *ast.Ident, closeTok token.Token) {
2419	list := p.parseParameterList(name0, closeTok, p.parseParamDecl, true)
2420	closePos := p.expect(closeTok)
2421	typeparams.Set(spec, &ast.FieldList{Opening: openPos, List: list, Closing: closePos})
2422	// Type alias cannot have type parameters. Accept them for robustness but complain.
2423	if p.tok == token.ASSIGN {
2424		p.error(p.pos, "generic type cannot be alias")
2425		p.next()
2426	}
2427	spec.Type = p.parseType()
2428}
2429
2430func (p *parser) parseTypeSpec(doc *ast.CommentGroup, _ token.Pos, _ token.Token, _ int) ast.Spec {
2431	if p.trace {
2432		defer un(trace(p, "TypeSpec"))
2433	}
2434
2435	ident := p.parseIdent()
2436	spec := &ast.TypeSpec{Doc: doc, Name: ident}
2437
2438	switch p.tok {
2439	case token.LBRACK:
2440		lbrack := p.pos
2441		p.next()
2442		if p.tok == token.IDENT {
2443			// array type or generic type [T any]
2444			p.exprLev++
2445			x := p.parseExpr()
2446			p.exprLev--
2447			if name0, _ := x.(*ast.Ident); p.parseTypeParams() && name0 != nil && p.tok != token.RBRACK {
2448				// generic type [T any];
2449				p.parseGenericType(spec, lbrack, name0, token.RBRACK)
2450			} else {
2451				// array type
2452				// TODO(rfindley) should resolve all identifiers in x.
2453				p.expect(token.RBRACK)
2454				elt := p.parseType()
2455				spec.Type = &ast.ArrayType{Lbrack: lbrack, Len: x, Elt: elt}
2456			}
2457		} else {
2458			// array type
2459			alen := p.parseArrayLen()
2460			p.expect(token.RBRACK)
2461			elt := p.parseType()
2462			spec.Type = &ast.ArrayType{Lbrack: lbrack, Len: alen, Elt: elt}
2463		}
2464
2465	default:
2466		// no type parameters
2467		if p.tok == token.ASSIGN {
2468			// type alias
2469			spec.Assign = p.pos
2470			p.next()
2471		}
2472		spec.Type = p.parseType()
2473	}
2474
2475	p.expectSemi() // call before accessing p.linecomment
2476	spec.Comment = p.lineComment
2477
2478	return spec
2479}
2480
2481func (p *parser) parseGenDecl(keyword token.Token, f parseSpecFunction) *ast.GenDecl {
2482	if p.trace {
2483		defer un(trace(p, "GenDecl("+keyword.String()+")"))
2484	}
2485
2486	doc := p.leadComment
2487	pos := p.expect(keyword)
2488	var lparen, rparen token.Pos
2489	var list []ast.Spec
2490	if p.tok == token.LPAREN {
2491		lparen = p.pos
2492		p.next()
2493		for iota := 0; p.tok != token.RPAREN && p.tok != token.EOF; iota++ {
2494			list = append(list, f(p.leadComment, pos, keyword, iota))
2495		}
2496		rparen = p.expect(token.RPAREN)
2497		p.expectSemi()
2498	} else {
2499		list = append(list, f(nil, pos, keyword, 0))
2500	}
2501
2502	return &ast.GenDecl{
2503		Doc:    doc,
2504		TokPos: pos,
2505		Tok:    keyword,
2506		Lparen: lparen,
2507		Specs:  list,
2508		Rparen: rparen,
2509	}
2510}
2511
2512func (p *parser) parseFuncDecl() *ast.FuncDecl {
2513	if p.trace {
2514		defer un(trace(p, "FunctionDecl"))
2515	}
2516
2517	doc := p.leadComment
2518	pos := p.expect(token.FUNC)
2519
2520	var recv *ast.FieldList
2521	if p.tok == token.LPAREN {
2522		_, recv = p.parseParameters(false)
2523	}
2524
2525	ident := p.parseIdent()
2526
2527	tparams, params := p.parseParameters(true)
2528	results := p.parseResult()
2529
2530	var body *ast.BlockStmt
2531	if p.tok == token.LBRACE {
2532		body = p.parseBody()
2533		p.expectSemi()
2534	} else if p.tok == token.SEMICOLON {
2535		p.next()
2536		if p.tok == token.LBRACE {
2537			// opening { of function declaration on next line
2538			p.error(p.pos, "unexpected semicolon or newline before {")
2539			body = p.parseBody()
2540			p.expectSemi()
2541		}
2542	} else {
2543		p.expectSemi()
2544	}
2545
2546	decl := &ast.FuncDecl{
2547		Doc:  doc,
2548		Recv: recv,
2549		Name: ident,
2550		Type: &ast.FuncType{
2551			Func:    pos,
2552			Params:  params,
2553			Results: results,
2554		},
2555		Body: body,
2556	}
2557	typeparams.Set(decl.Type, tparams)
2558	return decl
2559}
2560
2561func (p *parser) parseDecl(sync map[token.Token]bool) ast.Decl {
2562	if p.trace {
2563		defer un(trace(p, "Declaration"))
2564	}
2565
2566	var f parseSpecFunction
2567	switch p.tok {
2568	case token.CONST, token.VAR:
2569		f = p.parseValueSpec
2570
2571	case token.TYPE:
2572		f = p.parseTypeSpec
2573
2574	case token.FUNC:
2575		return p.parseFuncDecl()
2576
2577	default:
2578		pos := p.pos
2579		p.errorExpected(pos, "declaration")
2580		p.advance(sync)
2581		return &ast.BadDecl{From: pos, To: p.pos}
2582	}
2583
2584	return p.parseGenDecl(p.tok, f)
2585}
2586
2587// ----------------------------------------------------------------------------
2588// Source files
2589
2590func (p *parser) parseFile() *ast.File {
2591	if p.trace {
2592		defer un(trace(p, "File"))
2593	}
2594
2595	// Don't bother parsing the rest if we had errors scanning the first token.
2596	// Likely not a Go source file at all.
2597	if p.errors.Len() != 0 {
2598		return nil
2599	}
2600
2601	// package clause
2602	doc := p.leadComment
2603	pos := p.expect(token.PACKAGE)
2604	// Go spec: The package clause is not a declaration;
2605	// the package name does not appear in any scope.
2606	ident := p.parseIdent()
2607	if ident.Name == "_" && p.mode&DeclarationErrors != 0 {
2608		p.error(p.pos, "invalid package name _")
2609	}
2610	p.expectSemi()
2611
2612	// Don't bother parsing the rest if we had errors parsing the package clause.
2613	// Likely not a Go source file at all.
2614	if p.errors.Len() != 0 {
2615		return nil
2616	}
2617
2618	var decls []ast.Decl
2619	if p.mode&PackageClauseOnly == 0 {
2620		// import decls
2621		for p.tok == token.IMPORT {
2622			decls = append(decls, p.parseGenDecl(token.IMPORT, p.parseImportSpec))
2623		}
2624
2625		if p.mode&ImportsOnly == 0 {
2626			// rest of package body
2627			for p.tok != token.EOF {
2628				decls = append(decls, p.parseDecl(declStart))
2629			}
2630		}
2631	}
2632
2633	f := &ast.File{
2634		Doc:      doc,
2635		Package:  pos,
2636		Name:     ident,
2637		Decls:    decls,
2638		Imports:  p.imports,
2639		Comments: p.comments,
2640	}
2641	var declErr func(token.Pos, string)
2642	if p.mode&DeclarationErrors != 0 {
2643		declErr = p.error
2644	}
2645	if p.mode&SkipObjectResolution == 0 {
2646		resolveFile(f, p.file, declErr)
2647	}
2648
2649	return f
2650}
2651