1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5package gc
6
7import (
8	"bytes"
9	"encoding/binary"
10	"fmt"
11	"html"
12	"os"
13	"sort"
14
15	"cmd/compile/internal/ssa"
16	"cmd/compile/internal/types"
17	"cmd/internal/obj"
18	"cmd/internal/objabi"
19	"cmd/internal/src"
20	"cmd/internal/sys"
21)
22
23var ssaConfig *ssa.Config
24var ssaCaches []ssa.Cache
25
26func initssaconfig() {
27	types_ := ssa.Types{
28		Bool:       types.Types[TBOOL],
29		Int8:       types.Types[TINT8],
30		Int16:      types.Types[TINT16],
31		Int32:      types.Types[TINT32],
32		Int64:      types.Types[TINT64],
33		UInt8:      types.Types[TUINT8],
34		UInt16:     types.Types[TUINT16],
35		UInt32:     types.Types[TUINT32],
36		UInt64:     types.Types[TUINT64],
37		Float32:    types.Types[TFLOAT32],
38		Float64:    types.Types[TFLOAT64],
39		Int:        types.Types[TINT],
40		Uintptr:    types.Types[TUINTPTR],
41		String:     types.Types[TSTRING],
42		BytePtr:    types.NewPtr(types.Types[TUINT8]),
43		Int32Ptr:   types.NewPtr(types.Types[TINT32]),
44		UInt32Ptr:  types.NewPtr(types.Types[TUINT32]),
45		IntPtr:     types.NewPtr(types.Types[TINT]),
46		UintptrPtr: types.NewPtr(types.Types[TUINTPTR]),
47		Float32Ptr: types.NewPtr(types.Types[TFLOAT32]),
48		Float64Ptr: types.NewPtr(types.Types[TFLOAT64]),
49		BytePtrPtr: types.NewPtr(types.NewPtr(types.Types[TUINT8])),
50	}
51	// Generate a few pointer types that are uncommon in the frontend but common in the backend.
52	// Caching is disabled in the backend, so generating these here avoids allocations.
53	_ = types.NewPtr(types.Types[TINTER])                             // *interface{}
54	_ = types.NewPtr(types.NewPtr(types.Types[TSTRING]))              // **string
55	_ = types.NewPtr(types.NewPtr(types.Idealstring))                 // **string
56	_ = types.NewPtr(types.NewSlice(types.Types[TINTER]))             // *[]interface{}
57	_ = types.NewPtr(types.NewPtr(types.Bytetype))                    // **byte
58	_ = types.NewPtr(types.NewSlice(types.Bytetype))                  // *[]byte
59	_ = types.NewPtr(types.NewSlice(types.Types[TSTRING]))            // *[]string
60	_ = types.NewPtr(types.NewSlice(types.Idealstring))               // *[]string
61	_ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[TUINT8]))) // ***uint8
62	_ = types.NewPtr(types.Types[TINT16])                             // *int16
63	_ = types.NewPtr(types.Types[TINT64])                             // *int64
64	_ = types.NewPtr(types.Errortype)                                 // *error
65	types.NewPtrCacheEnabled = false
66	ssaConfig = ssa.NewConfig(thearch.LinkArch.Name, types_, Ctxt, Debug['N'] == 0)
67	if thearch.LinkArch.Name == "386" {
68		ssaConfig.Set387(thearch.Use387)
69	}
70	ssaCaches = make([]ssa.Cache, nBackendWorkers)
71
72	// Set up some runtime functions we'll need to call.
73	Newproc = Sysfunc("newproc")
74	Deferproc = Sysfunc("deferproc")
75	Deferreturn = Sysfunc("deferreturn")
76	Duffcopy = Sysfunc("duffcopy")
77	Duffzero = Sysfunc("duffzero")
78	panicindex = Sysfunc("panicindex")
79	panicslice = Sysfunc("panicslice")
80	panicdivide = Sysfunc("panicdivide")
81	growslice = Sysfunc("growslice")
82	panicdottypeE = Sysfunc("panicdottypeE")
83	panicdottypeI = Sysfunc("panicdottypeI")
84	panicnildottype = Sysfunc("panicnildottype")
85	assertE2I = Sysfunc("assertE2I")
86	assertE2I2 = Sysfunc("assertE2I2")
87	assertI2I = Sysfunc("assertI2I")
88	assertI2I2 = Sysfunc("assertI2I2")
89	goschedguarded = Sysfunc("goschedguarded")
90	writeBarrier = Sysfunc("writeBarrier")
91	writebarrierptr = Sysfunc("writebarrierptr")
92	typedmemmove = Sysfunc("typedmemmove")
93	typedmemclr = Sysfunc("typedmemclr")
94	Udiv = Sysfunc("udiv")
95
96	// GO386=387 runtime functions
97	ControlWord64trunc = Sysfunc("controlWord64trunc")
98	ControlWord32 = Sysfunc("controlWord32")
99}
100
101// buildssa builds an SSA function for fn.
102// worker indicates which of the backend workers is doing the processing.
103func buildssa(fn *Node, worker int) *ssa.Func {
104	name := fn.funcname()
105	printssa := name == os.Getenv("GOSSAFUNC")
106	if printssa {
107		fmt.Println("generating SSA for", name)
108		dumplist("buildssa-enter", fn.Func.Enter)
109		dumplist("buildssa-body", fn.Nbody)
110		dumplist("buildssa-exit", fn.Func.Exit)
111	}
112
113	var s state
114	s.pushLine(fn.Pos)
115	defer s.popLine()
116
117	s.hasdefer = fn.Func.HasDefer()
118	if fn.Func.Pragma&CgoUnsafeArgs != 0 {
119		s.cgoUnsafeArgs = true
120	}
121
122	fe := ssafn{
123		curfn: fn,
124		log:   printssa,
125	}
126	s.curfn = fn
127
128	s.f = ssa.NewFunc(&fe)
129	s.config = ssaConfig
130	s.f.Config = ssaConfig
131	s.f.Cache = &ssaCaches[worker]
132	s.f.Cache.Reset()
133	s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH", name)
134	s.f.Name = name
135	if fn.Func.Pragma&Nosplit != 0 {
136		s.f.NoSplit = true
137	}
138	defer func() {
139		if s.f.WBPos.IsKnown() {
140			fn.Func.WBPos = s.f.WBPos
141		}
142	}()
143	s.exitCode = fn.Func.Exit
144	s.panics = map[funcLine]*ssa.Block{}
145
146	if name == os.Getenv("GOSSAFUNC") {
147		s.f.HTMLWriter = ssa.NewHTMLWriter("ssa.html", s.f.Frontend(), name)
148		// TODO: generate and print a mapping from nodes to values and blocks
149	}
150
151	// Allocate starting block
152	s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
153
154	// Allocate starting values
155	s.labels = map[string]*ssaLabel{}
156	s.labeledNodes = map[*Node]*ssaLabel{}
157	s.fwdVars = map[*Node]*ssa.Value{}
158	s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
159	s.sp = s.entryNewValue0(ssa.OpSP, types.Types[TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
160	s.sb = s.entryNewValue0(ssa.OpSB, types.Types[TUINTPTR])
161
162	s.startBlock(s.f.Entry)
163	s.vars[&memVar] = s.startmem
164
165	s.varsyms = map[*Node]interface{}{}
166
167	// Generate addresses of local declarations
168	s.decladdrs = map[*Node]*ssa.Value{}
169	for _, n := range fn.Func.Dcl {
170		switch n.Class() {
171		case PPARAM, PPARAMOUT:
172			aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
173			s.decladdrs[n] = s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sp)
174			if n.Class() == PPARAMOUT && s.canSSA(n) {
175				// Save ssa-able PPARAMOUT variables so we can
176				// store them back to the stack at the end of
177				// the function.
178				s.returns = append(s.returns, n)
179			}
180		case PAUTO:
181			// processed at each use, to prevent Addr coming
182			// before the decl.
183		case PAUTOHEAP:
184			// moved to heap - already handled by frontend
185		case PFUNC:
186			// local function - already handled by frontend
187		default:
188			s.Fatalf("local variable with class %s unimplemented", classnames[n.Class()])
189		}
190	}
191
192	// Populate SSAable arguments.
193	for _, n := range fn.Func.Dcl {
194		if n.Class() == PPARAM && s.canSSA(n) {
195			s.vars[n] = s.newValue0A(ssa.OpArg, n.Type, n)
196		}
197	}
198
199	// Convert the AST-based IR to the SSA-based IR
200	s.stmtList(fn.Func.Enter)
201	s.stmtList(fn.Nbody)
202
203	// fallthrough to exit
204	if s.curBlock != nil {
205		s.pushLine(fn.Func.Endlineno)
206		s.exit()
207		s.popLine()
208	}
209
210	s.insertPhis()
211
212	// Don't carry reference this around longer than necessary
213	s.exitCode = Nodes{}
214
215	// Main call to ssa package to compile function
216	ssa.Compile(s.f)
217	return s.f
218}
219
220type state struct {
221	// configuration (arch) information
222	config *ssa.Config
223
224	// function we're building
225	f *ssa.Func
226
227	// Node for function
228	curfn *Node
229
230	// labels and labeled control flow nodes (OFOR, OFORUNTIL, OSWITCH, OSELECT) in f
231	labels       map[string]*ssaLabel
232	labeledNodes map[*Node]*ssaLabel
233
234	// Code that must precede any return
235	// (e.g., copying value of heap-escaped paramout back to true paramout)
236	exitCode Nodes
237
238	// unlabeled break and continue statement tracking
239	breakTo    *ssa.Block // current target for plain break statement
240	continueTo *ssa.Block // current target for plain continue statement
241
242	// current location where we're interpreting the AST
243	curBlock *ssa.Block
244
245	// variable assignments in the current block (map from variable symbol to ssa value)
246	// *Node is the unique identifier (an ONAME Node) for the variable.
247	// TODO: keep a single varnum map, then make all of these maps slices instead?
248	vars map[*Node]*ssa.Value
249
250	// fwdVars are variables that are used before they are defined in the current block.
251	// This map exists just to coalesce multiple references into a single FwdRef op.
252	// *Node is the unique identifier (an ONAME Node) for the variable.
253	fwdVars map[*Node]*ssa.Value
254
255	// all defined variables at the end of each block. Indexed by block ID.
256	defvars []map[*Node]*ssa.Value
257
258	// addresses of PPARAM and PPARAMOUT variables.
259	decladdrs map[*Node]*ssa.Value
260
261	// symbols for PEXTERN, PAUTO and PPARAMOUT variables so they can be reused.
262	varsyms map[*Node]interface{}
263
264	// starting values. Memory, stack pointer, and globals pointer
265	startmem *ssa.Value
266	sp       *ssa.Value
267	sb       *ssa.Value
268
269	// line number stack. The current line number is top of stack
270	line []src.XPos
271
272	// list of panic calls by function name and line number.
273	// Used to deduplicate panic calls.
274	panics map[funcLine]*ssa.Block
275
276	// list of PPARAMOUT (return) variables.
277	returns []*Node
278
279	cgoUnsafeArgs bool
280	hasdefer      bool // whether the function contains a defer statement
281}
282
283type funcLine struct {
284	f    *obj.LSym
285	base *src.PosBase
286	line uint
287}
288
289type ssaLabel struct {
290	target         *ssa.Block // block identified by this label
291	breakTarget    *ssa.Block // block to break to in control flow node identified by this label
292	continueTarget *ssa.Block // block to continue to in control flow node identified by this label
293}
294
295// label returns the label associated with sym, creating it if necessary.
296func (s *state) label(sym *types.Sym) *ssaLabel {
297	lab := s.labels[sym.Name]
298	if lab == nil {
299		lab = new(ssaLabel)
300		s.labels[sym.Name] = lab
301	}
302	return lab
303}
304
305func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
306func (s *state) Log() bool                            { return s.f.Log() }
307func (s *state) Fatalf(msg string, args ...interface{}) {
308	s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
309}
310func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
311func (s *state) Debug_checknil() bool                                { return s.f.Frontend().Debug_checknil() }
312
313var (
314	// dummy node for the memory variable
315	memVar = Node{Op: ONAME, Sym: &types.Sym{Name: "mem"}}
316
317	// dummy nodes for temporary variables
318	ptrVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "ptr"}}
319	lenVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "len"}}
320	newlenVar = Node{Op: ONAME, Sym: &types.Sym{Name: "newlen"}}
321	capVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "cap"}}
322	typVar    = Node{Op: ONAME, Sym: &types.Sym{Name: "typ"}}
323	okVar     = Node{Op: ONAME, Sym: &types.Sym{Name: "ok"}}
324)
325
326// startBlock sets the current block we're generating code in to b.
327func (s *state) startBlock(b *ssa.Block) {
328	if s.curBlock != nil {
329		s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
330	}
331	s.curBlock = b
332	s.vars = map[*Node]*ssa.Value{}
333	for n := range s.fwdVars {
334		delete(s.fwdVars, n)
335	}
336}
337
338// endBlock marks the end of generating code for the current block.
339// Returns the (former) current block. Returns nil if there is no current
340// block, i.e. if no code flows to the current execution point.
341func (s *state) endBlock() *ssa.Block {
342	b := s.curBlock
343	if b == nil {
344		return nil
345	}
346	for len(s.defvars) <= int(b.ID) {
347		s.defvars = append(s.defvars, nil)
348	}
349	s.defvars[b.ID] = s.vars
350	s.curBlock = nil
351	s.vars = nil
352	b.Pos = s.peekPos()
353	return b
354}
355
356// pushLine pushes a line number on the line number stack.
357func (s *state) pushLine(line src.XPos) {
358	if !line.IsKnown() {
359		// the frontend may emit node with line number missing,
360		// use the parent line number in this case.
361		line = s.peekPos()
362		if Debug['K'] != 0 {
363			Warn("buildssa: unknown position (line 0)")
364		}
365	}
366	s.line = append(s.line, line)
367}
368
369// popLine pops the top of the line number stack.
370func (s *state) popLine() {
371	s.line = s.line[:len(s.line)-1]
372}
373
374// peekPos peeks the top of the line number stack.
375func (s *state) peekPos() src.XPos {
376	return s.line[len(s.line)-1]
377}
378
379// newValue0 adds a new value with no arguments to the current block.
380func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
381	return s.curBlock.NewValue0(s.peekPos(), op, t)
382}
383
384// newValue0A adds a new value with no arguments and an aux value to the current block.
385func (s *state) newValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
386	return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
387}
388
389// newValue0I adds a new value with no arguments and an auxint value to the current block.
390func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
391	return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
392}
393
394// newValue1 adds a new value with one argument to the current block.
395func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
396	return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
397}
398
399// newValue1A adds a new value with one argument and an aux value to the current block.
400func (s *state) newValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
401	return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
402}
403
404// newValue1I adds a new value with one argument and an auxint value to the current block.
405func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
406	return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
407}
408
409// newValue2 adds a new value with two arguments to the current block.
410func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
411	return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
412}
413
414// newValue2I adds a new value with two arguments and an auxint value to the current block.
415func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
416	return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
417}
418
419// newValue3 adds a new value with three arguments to the current block.
420func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
421	return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
422}
423
424// newValue3I adds a new value with three arguments and an auxint value to the current block.
425func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
426	return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
427}
428
429// newValue3A adds a new value with three arguments and an aux value to the current block.
430func (s *state) newValue3A(op ssa.Op, t *types.Type, aux interface{}, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
431	return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
432}
433
434// newValue4 adds a new value with four arguments to the current block.
435func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
436	return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
437}
438
439// entryNewValue0 adds a new value with no arguments to the entry block.
440func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
441	return s.f.Entry.NewValue0(src.NoXPos, op, t)
442}
443
444// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
445func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux interface{}) *ssa.Value {
446	return s.f.Entry.NewValue0A(s.peekPos(), op, t, aux)
447}
448
449// entryNewValue1 adds a new value with one argument to the entry block.
450func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
451	return s.f.Entry.NewValue1(s.peekPos(), op, t, arg)
452}
453
454// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
455func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
456	return s.f.Entry.NewValue1I(s.peekPos(), op, t, auxint, arg)
457}
458
459// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
460func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux interface{}, arg *ssa.Value) *ssa.Value {
461	return s.f.Entry.NewValue1A(s.peekPos(), op, t, aux, arg)
462}
463
464// entryNewValue2 adds a new value with two arguments to the entry block.
465func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
466	return s.f.Entry.NewValue2(s.peekPos(), op, t, arg0, arg1)
467}
468
469// const* routines add a new const value to the entry block.
470func (s *state) constSlice(t *types.Type) *ssa.Value {
471	return s.f.ConstSlice(s.peekPos(), t)
472}
473func (s *state) constInterface(t *types.Type) *ssa.Value {
474	return s.f.ConstInterface(s.peekPos(), t)
475}
476func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(s.peekPos(), t) }
477func (s *state) constEmptyString(t *types.Type) *ssa.Value {
478	return s.f.ConstEmptyString(s.peekPos(), t)
479}
480func (s *state) constBool(c bool) *ssa.Value {
481	return s.f.ConstBool(s.peekPos(), types.Types[TBOOL], c)
482}
483func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
484	return s.f.ConstInt8(s.peekPos(), t, c)
485}
486func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
487	return s.f.ConstInt16(s.peekPos(), t, c)
488}
489func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
490	return s.f.ConstInt32(s.peekPos(), t, c)
491}
492func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
493	return s.f.ConstInt64(s.peekPos(), t, c)
494}
495func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
496	return s.f.ConstFloat32(s.peekPos(), t, c)
497}
498func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
499	return s.f.ConstFloat64(s.peekPos(), t, c)
500}
501func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
502	if s.config.PtrSize == 8 {
503		return s.constInt64(t, c)
504	}
505	if int64(int32(c)) != c {
506		s.Fatalf("integer constant too big %d", c)
507	}
508	return s.constInt32(t, int32(c))
509}
510func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
511	return s.f.ConstOffPtrSP(s.peekPos(), t, c, s.sp)
512}
513
514// stmtList converts the statement list n to SSA and adds it to s.
515func (s *state) stmtList(l Nodes) {
516	for _, n := range l.Slice() {
517		s.stmt(n)
518	}
519}
520
521// stmt converts the statement n to SSA and adds it to s.
522func (s *state) stmt(n *Node) {
523	s.pushLine(n.Pos)
524	defer s.popLine()
525
526	// If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
527	// then this code is dead. Stop here.
528	if s.curBlock == nil && n.Op != OLABEL {
529		return
530	}
531
532	s.stmtList(n.Ninit)
533	switch n.Op {
534
535	case OBLOCK:
536		s.stmtList(n.List)
537
538	// No-ops
539	case OEMPTY, ODCLCONST, ODCLTYPE, OFALL:
540
541	// Expression statements
542	case OCALLFUNC:
543		if isIntrinsicCall(n) {
544			s.intrinsicCall(n)
545			return
546		}
547		fallthrough
548
549	case OCALLMETH, OCALLINTER:
550		s.call(n, callNormal)
551		if n.Op == OCALLFUNC && n.Left.Op == ONAME && n.Left.Class() == PFUNC {
552			if fn := n.Left.Sym.Name; compiling_runtime && fn == "throw" ||
553				n.Left.Sym.Pkg == Runtimepkg && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block") {
554				m := s.mem()
555				b := s.endBlock()
556				b.Kind = ssa.BlockExit
557				b.SetControl(m)
558				// TODO: never rewrite OPANIC to OCALLFUNC in the
559				// first place. Need to wait until all backends
560				// go through SSA.
561			}
562		}
563	case ODEFER:
564		s.call(n.Left, callDefer)
565	case OPROC:
566		s.call(n.Left, callGo)
567
568	case OAS2DOTTYPE:
569		res, resok := s.dottype(n.Rlist.First(), true)
570		deref := false
571		if !canSSAType(n.Rlist.First().Type) {
572			if res.Op != ssa.OpLoad {
573				s.Fatalf("dottype of non-load")
574			}
575			mem := s.mem()
576			if mem.Op == ssa.OpVarKill {
577				mem = mem.Args[0]
578			}
579			if res.Args[1] != mem {
580				s.Fatalf("memory no longer live from 2-result dottype load")
581			}
582			deref = true
583			res = res.Args[0]
584		}
585		s.assign(n.List.First(), res, deref, 0)
586		s.assign(n.List.Second(), resok, false, 0)
587		return
588
589	case OAS2FUNC:
590		// We come here only when it is an intrinsic call returning two values.
591		if !isIntrinsicCall(n.Rlist.First()) {
592			s.Fatalf("non-intrinsic AS2FUNC not expanded %v", n.Rlist.First())
593		}
594		v := s.intrinsicCall(n.Rlist.First())
595		v1 := s.newValue1(ssa.OpSelect0, n.List.First().Type, v)
596		v2 := s.newValue1(ssa.OpSelect1, n.List.Second().Type, v)
597		s.assign(n.List.First(), v1, false, 0)
598		s.assign(n.List.Second(), v2, false, 0)
599		return
600
601	case ODCL:
602		if n.Left.Class() == PAUTOHEAP {
603			Fatalf("DCL %v", n)
604		}
605
606	case OLABEL:
607		sym := n.Left.Sym
608		lab := s.label(sym)
609
610		// Associate label with its control flow node, if any
611		if ctl := n.labeledControl(); ctl != nil {
612			s.labeledNodes[ctl] = lab
613		}
614
615		// The label might already have a target block via a goto.
616		if lab.target == nil {
617			lab.target = s.f.NewBlock(ssa.BlockPlain)
618		}
619
620		// Go to that label.
621		// (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
622		if s.curBlock != nil {
623			b := s.endBlock()
624			b.AddEdgeTo(lab.target)
625		}
626		s.startBlock(lab.target)
627
628	case OGOTO:
629		sym := n.Left.Sym
630
631		lab := s.label(sym)
632		if lab.target == nil {
633			lab.target = s.f.NewBlock(ssa.BlockPlain)
634		}
635
636		b := s.endBlock()
637		b.AddEdgeTo(lab.target)
638
639	case OAS:
640		if n.Left == n.Right && n.Left.Op == ONAME {
641			// An x=x assignment. No point in doing anything
642			// here. In addition, skipping this assignment
643			// prevents generating:
644			//   VARDEF x
645			//   COPY x -> x
646			// which is bad because x is incorrectly considered
647			// dead before the vardef. See issue #14904.
648			return
649		}
650
651		// Evaluate RHS.
652		rhs := n.Right
653		if rhs != nil {
654			switch rhs.Op {
655			case OSTRUCTLIT, OARRAYLIT, OSLICELIT:
656				// All literals with nonzero fields have already been
657				// rewritten during walk. Any that remain are just T{}
658				// or equivalents. Use the zero value.
659				if !iszero(rhs) {
660					Fatalf("literal with nonzero value in SSA: %v", rhs)
661				}
662				rhs = nil
663			case OAPPEND:
664				// If we're writing the result of an append back to the same slice,
665				// handle it specially to avoid write barriers on the fast (non-growth) path.
666				// If the slice can be SSA'd, it'll be on the stack,
667				// so there will be no write barriers,
668				// so there's no need to attempt to prevent them.
669				if samesafeexpr(n.Left, rhs.List.First()) {
670					if !s.canSSA(n.Left) {
671						if Debug_append > 0 {
672							Warnl(n.Pos, "append: len-only update")
673						}
674						s.append(rhs, true)
675						return
676					} else {
677						if Debug_append > 0 { // replicating old diagnostic message
678							Warnl(n.Pos, "append: len-only update (in local slice)")
679						}
680					}
681				}
682			}
683		}
684
685		if isblank(n.Left) {
686			// _ = rhs
687			// Just evaluate rhs for side-effects.
688			if rhs != nil {
689				s.expr(rhs)
690			}
691			return
692		}
693
694		var t *types.Type
695		if n.Right != nil {
696			t = n.Right.Type
697		} else {
698			t = n.Left.Type
699		}
700
701		var r *ssa.Value
702		deref := !canSSAType(t)
703		if deref {
704			if rhs == nil {
705				r = nil // Signal assign to use OpZero.
706			} else {
707				r = s.addr(rhs, false)
708			}
709		} else {
710			if rhs == nil {
711				r = s.zeroVal(t)
712			} else {
713				r = s.expr(rhs)
714			}
715		}
716
717		var skip skipMask
718		if rhs != nil && (rhs.Op == OSLICE || rhs.Op == OSLICE3 || rhs.Op == OSLICESTR) && samesafeexpr(rhs.Left, n.Left) {
719			// We're assigning a slicing operation back to its source.
720			// Don't write back fields we aren't changing. See issue #14855.
721			i, j, k := rhs.SliceBounds()
722			if i != nil && (i.Op == OLITERAL && i.Val().Ctype() == CTINT && i.Int64() == 0) {
723				// [0:...] is the same as [:...]
724				i = nil
725			}
726			// TODO: detect defaults for len/cap also.
727			// Currently doesn't really work because (*p)[:len(*p)] appears here as:
728			//    tmp = len(*p)
729			//    (*p)[:tmp]
730			//if j != nil && (j.Op == OLEN && samesafeexpr(j.Left, n.Left)) {
731			//      j = nil
732			//}
733			//if k != nil && (k.Op == OCAP && samesafeexpr(k.Left, n.Left)) {
734			//      k = nil
735			//}
736			if i == nil {
737				skip |= skipPtr
738				if j == nil {
739					skip |= skipLen
740				}
741				if k == nil {
742					skip |= skipCap
743				}
744			}
745		}
746
747		s.assign(n.Left, r, deref, skip)
748
749	case OIF:
750		bThen := s.f.NewBlock(ssa.BlockPlain)
751		bEnd := s.f.NewBlock(ssa.BlockPlain)
752		var bElse *ssa.Block
753		var likely int8
754		if n.Likely() {
755			likely = 1
756		}
757		if n.Rlist.Len() != 0 {
758			bElse = s.f.NewBlock(ssa.BlockPlain)
759			s.condBranch(n.Left, bThen, bElse, likely)
760		} else {
761			s.condBranch(n.Left, bThen, bEnd, likely)
762		}
763
764		s.startBlock(bThen)
765		s.stmtList(n.Nbody)
766		if b := s.endBlock(); b != nil {
767			b.AddEdgeTo(bEnd)
768		}
769
770		if n.Rlist.Len() != 0 {
771			s.startBlock(bElse)
772			s.stmtList(n.Rlist)
773			if b := s.endBlock(); b != nil {
774				b.AddEdgeTo(bEnd)
775			}
776		}
777		s.startBlock(bEnd)
778
779	case ORETURN:
780		s.stmtList(n.List)
781		s.exit()
782	case ORETJMP:
783		s.stmtList(n.List)
784		b := s.exit()
785		b.Kind = ssa.BlockRetJmp // override BlockRet
786		b.Aux = n.Left.Sym.Linksym()
787
788	case OCONTINUE, OBREAK:
789		var to *ssa.Block
790		if n.Left == nil {
791			// plain break/continue
792			switch n.Op {
793			case OCONTINUE:
794				to = s.continueTo
795			case OBREAK:
796				to = s.breakTo
797			}
798		} else {
799			// labeled break/continue; look up the target
800			sym := n.Left.Sym
801			lab := s.label(sym)
802			switch n.Op {
803			case OCONTINUE:
804				to = lab.continueTarget
805			case OBREAK:
806				to = lab.breakTarget
807			}
808		}
809
810		b := s.endBlock()
811		b.AddEdgeTo(to)
812
813	case OFOR, OFORUNTIL:
814		// OFOR: for Ninit; Left; Right { Nbody }
815		// For      = cond; body; incr
816		// Foruntil = body; incr; cond
817		bCond := s.f.NewBlock(ssa.BlockPlain)
818		bBody := s.f.NewBlock(ssa.BlockPlain)
819		bIncr := s.f.NewBlock(ssa.BlockPlain)
820		bEnd := s.f.NewBlock(ssa.BlockPlain)
821
822		// first, jump to condition test (OFOR) or body (OFORUNTIL)
823		b := s.endBlock()
824		if n.Op == OFOR {
825			b.AddEdgeTo(bCond)
826			// generate code to test condition
827			s.startBlock(bCond)
828			if n.Left != nil {
829				s.condBranch(n.Left, bBody, bEnd, 1)
830			} else {
831				b := s.endBlock()
832				b.Kind = ssa.BlockPlain
833				b.AddEdgeTo(bBody)
834			}
835
836		} else {
837			b.AddEdgeTo(bBody)
838		}
839
840		// set up for continue/break in body
841		prevContinue := s.continueTo
842		prevBreak := s.breakTo
843		s.continueTo = bIncr
844		s.breakTo = bEnd
845		lab := s.labeledNodes[n]
846		if lab != nil {
847			// labeled for loop
848			lab.continueTarget = bIncr
849			lab.breakTarget = bEnd
850		}
851
852		// generate body
853		s.startBlock(bBody)
854		s.stmtList(n.Nbody)
855
856		// tear down continue/break
857		s.continueTo = prevContinue
858		s.breakTo = prevBreak
859		if lab != nil {
860			lab.continueTarget = nil
861			lab.breakTarget = nil
862		}
863
864		// done with body, goto incr
865		if b := s.endBlock(); b != nil {
866			b.AddEdgeTo(bIncr)
867		}
868
869		// generate incr
870		s.startBlock(bIncr)
871		if n.Right != nil {
872			s.stmt(n.Right)
873		}
874		if b := s.endBlock(); b != nil {
875			b.AddEdgeTo(bCond)
876		}
877
878		if n.Op == OFORUNTIL {
879			// generate code to test condition
880			s.startBlock(bCond)
881			if n.Left != nil {
882				s.condBranch(n.Left, bBody, bEnd, 1)
883			} else {
884				b := s.endBlock()
885				b.Kind = ssa.BlockPlain
886				b.AddEdgeTo(bBody)
887			}
888		}
889
890		s.startBlock(bEnd)
891
892	case OSWITCH, OSELECT:
893		// These have been mostly rewritten by the front end into their Nbody fields.
894		// Our main task is to correctly hook up any break statements.
895		bEnd := s.f.NewBlock(ssa.BlockPlain)
896
897		prevBreak := s.breakTo
898		s.breakTo = bEnd
899		lab := s.labeledNodes[n]
900		if lab != nil {
901			// labeled
902			lab.breakTarget = bEnd
903		}
904
905		// generate body code
906		s.stmtList(n.Nbody)
907
908		s.breakTo = prevBreak
909		if lab != nil {
910			lab.breakTarget = nil
911		}
912
913		// walk adds explicit OBREAK nodes to the end of all reachable code paths.
914		// If we still have a current block here, then mark it unreachable.
915		if s.curBlock != nil {
916			m := s.mem()
917			b := s.endBlock()
918			b.Kind = ssa.BlockExit
919			b.SetControl(m)
920		}
921		s.startBlock(bEnd)
922
923	case OVARKILL:
924		// Insert a varkill op to record that a variable is no longer live.
925		// We only care about liveness info at call sites, so putting the
926		// varkill in the store chain is enough to keep it correctly ordered
927		// with respect to call ops.
928		if !s.canSSA(n.Left) {
929			s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, n.Left, s.mem())
930		}
931
932	case OVARLIVE:
933		// Insert a varlive op to record that a variable is still live.
934		if !n.Left.Addrtaken() {
935			s.Fatalf("VARLIVE variable %v must have Addrtaken set", n.Left)
936		}
937		s.vars[&memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, n.Left, s.mem())
938
939	case OCHECKNIL:
940		p := s.expr(n.Left)
941		s.nilCheck(p)
942
943	default:
944		s.Fatalf("unhandled stmt %v", n.Op)
945	}
946}
947
948// exit processes any code that needs to be generated just before returning.
949// It returns a BlockRet block that ends the control flow. Its control value
950// will be set to the final memory state.
951func (s *state) exit() *ssa.Block {
952	if s.hasdefer {
953		s.rtcall(Deferreturn, true, nil)
954	}
955
956	// Run exit code. Typically, this code copies heap-allocated PPARAMOUT
957	// variables back to the stack.
958	s.stmtList(s.exitCode)
959
960	// Store SSAable PPARAMOUT variables back to stack locations.
961	for _, n := range s.returns {
962		addr := s.decladdrs[n]
963		val := s.variable(n, n.Type)
964		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
965		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, n.Type, addr, val, s.mem())
966		// TODO: if val is ever spilled, we'd like to use the
967		// PPARAMOUT slot for spilling it. That won't happen
968		// currently.
969	}
970
971	// Do actual return.
972	m := s.mem()
973	b := s.endBlock()
974	b.Kind = ssa.BlockRet
975	b.SetControl(m)
976	return b
977}
978
979type opAndType struct {
980	op    Op
981	etype types.EType
982}
983
984var opToSSA = map[opAndType]ssa.Op{
985	opAndType{OADD, TINT8}:    ssa.OpAdd8,
986	opAndType{OADD, TUINT8}:   ssa.OpAdd8,
987	opAndType{OADD, TINT16}:   ssa.OpAdd16,
988	opAndType{OADD, TUINT16}:  ssa.OpAdd16,
989	opAndType{OADD, TINT32}:   ssa.OpAdd32,
990	opAndType{OADD, TUINT32}:  ssa.OpAdd32,
991	opAndType{OADD, TPTR32}:   ssa.OpAdd32,
992	opAndType{OADD, TINT64}:   ssa.OpAdd64,
993	opAndType{OADD, TUINT64}:  ssa.OpAdd64,
994	opAndType{OADD, TPTR64}:   ssa.OpAdd64,
995	opAndType{OADD, TFLOAT32}: ssa.OpAdd32F,
996	opAndType{OADD, TFLOAT64}: ssa.OpAdd64F,
997
998	opAndType{OSUB, TINT8}:    ssa.OpSub8,
999	opAndType{OSUB, TUINT8}:   ssa.OpSub8,
1000	opAndType{OSUB, TINT16}:   ssa.OpSub16,
1001	opAndType{OSUB, TUINT16}:  ssa.OpSub16,
1002	opAndType{OSUB, TINT32}:   ssa.OpSub32,
1003	opAndType{OSUB, TUINT32}:  ssa.OpSub32,
1004	opAndType{OSUB, TINT64}:   ssa.OpSub64,
1005	opAndType{OSUB, TUINT64}:  ssa.OpSub64,
1006	opAndType{OSUB, TFLOAT32}: ssa.OpSub32F,
1007	opAndType{OSUB, TFLOAT64}: ssa.OpSub64F,
1008
1009	opAndType{ONOT, TBOOL}: ssa.OpNot,
1010
1011	opAndType{OMINUS, TINT8}:    ssa.OpNeg8,
1012	opAndType{OMINUS, TUINT8}:   ssa.OpNeg8,
1013	opAndType{OMINUS, TINT16}:   ssa.OpNeg16,
1014	opAndType{OMINUS, TUINT16}:  ssa.OpNeg16,
1015	opAndType{OMINUS, TINT32}:   ssa.OpNeg32,
1016	opAndType{OMINUS, TUINT32}:  ssa.OpNeg32,
1017	opAndType{OMINUS, TINT64}:   ssa.OpNeg64,
1018	opAndType{OMINUS, TUINT64}:  ssa.OpNeg64,
1019	opAndType{OMINUS, TFLOAT32}: ssa.OpNeg32F,
1020	opAndType{OMINUS, TFLOAT64}: ssa.OpNeg64F,
1021
1022	opAndType{OCOM, TINT8}:   ssa.OpCom8,
1023	opAndType{OCOM, TUINT8}:  ssa.OpCom8,
1024	opAndType{OCOM, TINT16}:  ssa.OpCom16,
1025	opAndType{OCOM, TUINT16}: ssa.OpCom16,
1026	opAndType{OCOM, TINT32}:  ssa.OpCom32,
1027	opAndType{OCOM, TUINT32}: ssa.OpCom32,
1028	opAndType{OCOM, TINT64}:  ssa.OpCom64,
1029	opAndType{OCOM, TUINT64}: ssa.OpCom64,
1030
1031	opAndType{OIMAG, TCOMPLEX64}:  ssa.OpComplexImag,
1032	opAndType{OIMAG, TCOMPLEX128}: ssa.OpComplexImag,
1033	opAndType{OREAL, TCOMPLEX64}:  ssa.OpComplexReal,
1034	opAndType{OREAL, TCOMPLEX128}: ssa.OpComplexReal,
1035
1036	opAndType{OMUL, TINT8}:    ssa.OpMul8,
1037	opAndType{OMUL, TUINT8}:   ssa.OpMul8,
1038	opAndType{OMUL, TINT16}:   ssa.OpMul16,
1039	opAndType{OMUL, TUINT16}:  ssa.OpMul16,
1040	opAndType{OMUL, TINT32}:   ssa.OpMul32,
1041	opAndType{OMUL, TUINT32}:  ssa.OpMul32,
1042	opAndType{OMUL, TINT64}:   ssa.OpMul64,
1043	opAndType{OMUL, TUINT64}:  ssa.OpMul64,
1044	opAndType{OMUL, TFLOAT32}: ssa.OpMul32F,
1045	opAndType{OMUL, TFLOAT64}: ssa.OpMul64F,
1046
1047	opAndType{ODIV, TFLOAT32}: ssa.OpDiv32F,
1048	opAndType{ODIV, TFLOAT64}: ssa.OpDiv64F,
1049
1050	opAndType{ODIV, TINT8}:   ssa.OpDiv8,
1051	opAndType{ODIV, TUINT8}:  ssa.OpDiv8u,
1052	opAndType{ODIV, TINT16}:  ssa.OpDiv16,
1053	opAndType{ODIV, TUINT16}: ssa.OpDiv16u,
1054	opAndType{ODIV, TINT32}:  ssa.OpDiv32,
1055	opAndType{ODIV, TUINT32}: ssa.OpDiv32u,
1056	opAndType{ODIV, TINT64}:  ssa.OpDiv64,
1057	opAndType{ODIV, TUINT64}: ssa.OpDiv64u,
1058
1059	opAndType{OMOD, TINT8}:   ssa.OpMod8,
1060	opAndType{OMOD, TUINT8}:  ssa.OpMod8u,
1061	opAndType{OMOD, TINT16}:  ssa.OpMod16,
1062	opAndType{OMOD, TUINT16}: ssa.OpMod16u,
1063	opAndType{OMOD, TINT32}:  ssa.OpMod32,
1064	opAndType{OMOD, TUINT32}: ssa.OpMod32u,
1065	opAndType{OMOD, TINT64}:  ssa.OpMod64,
1066	opAndType{OMOD, TUINT64}: ssa.OpMod64u,
1067
1068	opAndType{OAND, TINT8}:   ssa.OpAnd8,
1069	opAndType{OAND, TUINT8}:  ssa.OpAnd8,
1070	opAndType{OAND, TINT16}:  ssa.OpAnd16,
1071	opAndType{OAND, TUINT16}: ssa.OpAnd16,
1072	opAndType{OAND, TINT32}:  ssa.OpAnd32,
1073	opAndType{OAND, TUINT32}: ssa.OpAnd32,
1074	opAndType{OAND, TINT64}:  ssa.OpAnd64,
1075	opAndType{OAND, TUINT64}: ssa.OpAnd64,
1076
1077	opAndType{OOR, TINT8}:   ssa.OpOr8,
1078	opAndType{OOR, TUINT8}:  ssa.OpOr8,
1079	opAndType{OOR, TINT16}:  ssa.OpOr16,
1080	opAndType{OOR, TUINT16}: ssa.OpOr16,
1081	opAndType{OOR, TINT32}:  ssa.OpOr32,
1082	opAndType{OOR, TUINT32}: ssa.OpOr32,
1083	opAndType{OOR, TINT64}:  ssa.OpOr64,
1084	opAndType{OOR, TUINT64}: ssa.OpOr64,
1085
1086	opAndType{OXOR, TINT8}:   ssa.OpXor8,
1087	opAndType{OXOR, TUINT8}:  ssa.OpXor8,
1088	opAndType{OXOR, TINT16}:  ssa.OpXor16,
1089	opAndType{OXOR, TUINT16}: ssa.OpXor16,
1090	opAndType{OXOR, TINT32}:  ssa.OpXor32,
1091	opAndType{OXOR, TUINT32}: ssa.OpXor32,
1092	opAndType{OXOR, TINT64}:  ssa.OpXor64,
1093	opAndType{OXOR, TUINT64}: ssa.OpXor64,
1094
1095	opAndType{OEQ, TBOOL}:      ssa.OpEqB,
1096	opAndType{OEQ, TINT8}:      ssa.OpEq8,
1097	opAndType{OEQ, TUINT8}:     ssa.OpEq8,
1098	opAndType{OEQ, TINT16}:     ssa.OpEq16,
1099	opAndType{OEQ, TUINT16}:    ssa.OpEq16,
1100	opAndType{OEQ, TINT32}:     ssa.OpEq32,
1101	opAndType{OEQ, TUINT32}:    ssa.OpEq32,
1102	opAndType{OEQ, TINT64}:     ssa.OpEq64,
1103	opAndType{OEQ, TUINT64}:    ssa.OpEq64,
1104	opAndType{OEQ, TINTER}:     ssa.OpEqInter,
1105	opAndType{OEQ, TSLICE}:     ssa.OpEqSlice,
1106	opAndType{OEQ, TFUNC}:      ssa.OpEqPtr,
1107	opAndType{OEQ, TMAP}:       ssa.OpEqPtr,
1108	opAndType{OEQ, TCHAN}:      ssa.OpEqPtr,
1109	opAndType{OEQ, TPTR32}:     ssa.OpEqPtr,
1110	opAndType{OEQ, TPTR64}:     ssa.OpEqPtr,
1111	opAndType{OEQ, TUINTPTR}:   ssa.OpEqPtr,
1112	opAndType{OEQ, TUNSAFEPTR}: ssa.OpEqPtr,
1113	opAndType{OEQ, TFLOAT64}:   ssa.OpEq64F,
1114	opAndType{OEQ, TFLOAT32}:   ssa.OpEq32F,
1115
1116	opAndType{ONE, TBOOL}:      ssa.OpNeqB,
1117	opAndType{ONE, TINT8}:      ssa.OpNeq8,
1118	opAndType{ONE, TUINT8}:     ssa.OpNeq8,
1119	opAndType{ONE, TINT16}:     ssa.OpNeq16,
1120	opAndType{ONE, TUINT16}:    ssa.OpNeq16,
1121	opAndType{ONE, TINT32}:     ssa.OpNeq32,
1122	opAndType{ONE, TUINT32}:    ssa.OpNeq32,
1123	opAndType{ONE, TINT64}:     ssa.OpNeq64,
1124	opAndType{ONE, TUINT64}:    ssa.OpNeq64,
1125	opAndType{ONE, TINTER}:     ssa.OpNeqInter,
1126	opAndType{ONE, TSLICE}:     ssa.OpNeqSlice,
1127	opAndType{ONE, TFUNC}:      ssa.OpNeqPtr,
1128	opAndType{ONE, TMAP}:       ssa.OpNeqPtr,
1129	opAndType{ONE, TCHAN}:      ssa.OpNeqPtr,
1130	opAndType{ONE, TPTR32}:     ssa.OpNeqPtr,
1131	opAndType{ONE, TPTR64}:     ssa.OpNeqPtr,
1132	opAndType{ONE, TUINTPTR}:   ssa.OpNeqPtr,
1133	opAndType{ONE, TUNSAFEPTR}: ssa.OpNeqPtr,
1134	opAndType{ONE, TFLOAT64}:   ssa.OpNeq64F,
1135	opAndType{ONE, TFLOAT32}:   ssa.OpNeq32F,
1136
1137	opAndType{OLT, TINT8}:    ssa.OpLess8,
1138	opAndType{OLT, TUINT8}:   ssa.OpLess8U,
1139	opAndType{OLT, TINT16}:   ssa.OpLess16,
1140	opAndType{OLT, TUINT16}:  ssa.OpLess16U,
1141	opAndType{OLT, TINT32}:   ssa.OpLess32,
1142	opAndType{OLT, TUINT32}:  ssa.OpLess32U,
1143	opAndType{OLT, TINT64}:   ssa.OpLess64,
1144	opAndType{OLT, TUINT64}:  ssa.OpLess64U,
1145	opAndType{OLT, TFLOAT64}: ssa.OpLess64F,
1146	opAndType{OLT, TFLOAT32}: ssa.OpLess32F,
1147
1148	opAndType{OGT, TINT8}:    ssa.OpGreater8,
1149	opAndType{OGT, TUINT8}:   ssa.OpGreater8U,
1150	opAndType{OGT, TINT16}:   ssa.OpGreater16,
1151	opAndType{OGT, TUINT16}:  ssa.OpGreater16U,
1152	opAndType{OGT, TINT32}:   ssa.OpGreater32,
1153	opAndType{OGT, TUINT32}:  ssa.OpGreater32U,
1154	opAndType{OGT, TINT64}:   ssa.OpGreater64,
1155	opAndType{OGT, TUINT64}:  ssa.OpGreater64U,
1156	opAndType{OGT, TFLOAT64}: ssa.OpGreater64F,
1157	opAndType{OGT, TFLOAT32}: ssa.OpGreater32F,
1158
1159	opAndType{OLE, TINT8}:    ssa.OpLeq8,
1160	opAndType{OLE, TUINT8}:   ssa.OpLeq8U,
1161	opAndType{OLE, TINT16}:   ssa.OpLeq16,
1162	opAndType{OLE, TUINT16}:  ssa.OpLeq16U,
1163	opAndType{OLE, TINT32}:   ssa.OpLeq32,
1164	opAndType{OLE, TUINT32}:  ssa.OpLeq32U,
1165	opAndType{OLE, TINT64}:   ssa.OpLeq64,
1166	opAndType{OLE, TUINT64}:  ssa.OpLeq64U,
1167	opAndType{OLE, TFLOAT64}: ssa.OpLeq64F,
1168	opAndType{OLE, TFLOAT32}: ssa.OpLeq32F,
1169
1170	opAndType{OGE, TINT8}:    ssa.OpGeq8,
1171	opAndType{OGE, TUINT8}:   ssa.OpGeq8U,
1172	opAndType{OGE, TINT16}:   ssa.OpGeq16,
1173	opAndType{OGE, TUINT16}:  ssa.OpGeq16U,
1174	opAndType{OGE, TINT32}:   ssa.OpGeq32,
1175	opAndType{OGE, TUINT32}:  ssa.OpGeq32U,
1176	opAndType{OGE, TINT64}:   ssa.OpGeq64,
1177	opAndType{OGE, TUINT64}:  ssa.OpGeq64U,
1178	opAndType{OGE, TFLOAT64}: ssa.OpGeq64F,
1179	opAndType{OGE, TFLOAT32}: ssa.OpGeq32F,
1180}
1181
1182func (s *state) concreteEtype(t *types.Type) types.EType {
1183	e := t.Etype
1184	switch e {
1185	default:
1186		return e
1187	case TINT:
1188		if s.config.PtrSize == 8 {
1189			return TINT64
1190		}
1191		return TINT32
1192	case TUINT:
1193		if s.config.PtrSize == 8 {
1194			return TUINT64
1195		}
1196		return TUINT32
1197	case TUINTPTR:
1198		if s.config.PtrSize == 8 {
1199			return TUINT64
1200		}
1201		return TUINT32
1202	}
1203}
1204
1205func (s *state) ssaOp(op Op, t *types.Type) ssa.Op {
1206	etype := s.concreteEtype(t)
1207	x, ok := opToSSA[opAndType{op, etype}]
1208	if !ok {
1209		s.Fatalf("unhandled binary op %v %s", op, etype)
1210	}
1211	return x
1212}
1213
1214func floatForComplex(t *types.Type) *types.Type {
1215	if t.Size() == 8 {
1216		return types.Types[TFLOAT32]
1217	} else {
1218		return types.Types[TFLOAT64]
1219	}
1220}
1221
1222type opAndTwoTypes struct {
1223	op     Op
1224	etype1 types.EType
1225	etype2 types.EType
1226}
1227
1228type twoTypes struct {
1229	etype1 types.EType
1230	etype2 types.EType
1231}
1232
1233type twoOpsAndType struct {
1234	op1              ssa.Op
1235	op2              ssa.Op
1236	intermediateType types.EType
1237}
1238
1239var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
1240
1241	twoTypes{TINT8, TFLOAT32}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, TINT32},
1242	twoTypes{TINT16, TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, TINT32},
1243	twoTypes{TINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, TINT32},
1244	twoTypes{TINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, TINT64},
1245
1246	twoTypes{TINT8, TFLOAT64}:  twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, TINT32},
1247	twoTypes{TINT16, TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, TINT32},
1248	twoTypes{TINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, TINT32},
1249	twoTypes{TINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, TINT64},
1250
1251	twoTypes{TFLOAT32, TINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
1252	twoTypes{TFLOAT32, TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
1253	twoTypes{TFLOAT32, TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, TINT32},
1254	twoTypes{TFLOAT32, TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, TINT64},
1255
1256	twoTypes{TFLOAT64, TINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
1257	twoTypes{TFLOAT64, TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
1258	twoTypes{TFLOAT64, TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, TINT32},
1259	twoTypes{TFLOAT64, TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, TINT64},
1260	// unsigned
1261	twoTypes{TUINT8, TFLOAT32}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, TINT32},
1262	twoTypes{TUINT16, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, TINT32},
1263	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, TINT64}, // go wide to dodge unsigned
1264	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto32F, branchy code expansion instead
1265
1266	twoTypes{TUINT8, TFLOAT64}:  twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, TINT32},
1267	twoTypes{TUINT16, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, TINT32},
1268	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, TINT64}, // go wide to dodge unsigned
1269	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, TUINT64},            // Cvt64Uto64F, branchy code expansion instead
1270
1271	twoTypes{TFLOAT32, TUINT8}:  twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, TINT32},
1272	twoTypes{TFLOAT32, TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, TINT32},
1273	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
1274	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt32Fto64U, branchy code expansion instead
1275
1276	twoTypes{TFLOAT64, TUINT8}:  twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, TINT32},
1277	twoTypes{TFLOAT64, TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, TINT32},
1278	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, TINT64}, // go wide to dodge unsigned
1279	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, TUINT64},          // Cvt64Fto64U, branchy code expansion instead
1280
1281	// float
1282	twoTypes{TFLOAT64, TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, TFLOAT32},
1283	twoTypes{TFLOAT64, TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, TFLOAT64},
1284	twoTypes{TFLOAT32, TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, TFLOAT32},
1285	twoTypes{TFLOAT32, TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, TFLOAT64},
1286}
1287
1288// this map is used only for 32-bit arch, and only includes the difference
1289// on 32-bit arch, don't use int64<->float conversion for uint32
1290var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
1291	twoTypes{TUINT32, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, TUINT32},
1292	twoTypes{TUINT32, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, TUINT32},
1293	twoTypes{TFLOAT32, TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, TUINT32},
1294	twoTypes{TFLOAT64, TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, TUINT32},
1295}
1296
1297// uint64<->float conversions, only on machines that have intructions for that
1298var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
1299	twoTypes{TUINT64, TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, TUINT64},
1300	twoTypes{TUINT64, TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, TUINT64},
1301	twoTypes{TFLOAT32, TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, TUINT64},
1302	twoTypes{TFLOAT64, TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, TUINT64},
1303}
1304
1305var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
1306	opAndTwoTypes{OLSH, TINT8, TUINT8}:   ssa.OpLsh8x8,
1307	opAndTwoTypes{OLSH, TUINT8, TUINT8}:  ssa.OpLsh8x8,
1308	opAndTwoTypes{OLSH, TINT8, TUINT16}:  ssa.OpLsh8x16,
1309	opAndTwoTypes{OLSH, TUINT8, TUINT16}: ssa.OpLsh8x16,
1310	opAndTwoTypes{OLSH, TINT8, TUINT32}:  ssa.OpLsh8x32,
1311	opAndTwoTypes{OLSH, TUINT8, TUINT32}: ssa.OpLsh8x32,
1312	opAndTwoTypes{OLSH, TINT8, TUINT64}:  ssa.OpLsh8x64,
1313	opAndTwoTypes{OLSH, TUINT8, TUINT64}: ssa.OpLsh8x64,
1314
1315	opAndTwoTypes{OLSH, TINT16, TUINT8}:   ssa.OpLsh16x8,
1316	opAndTwoTypes{OLSH, TUINT16, TUINT8}:  ssa.OpLsh16x8,
1317	opAndTwoTypes{OLSH, TINT16, TUINT16}:  ssa.OpLsh16x16,
1318	opAndTwoTypes{OLSH, TUINT16, TUINT16}: ssa.OpLsh16x16,
1319	opAndTwoTypes{OLSH, TINT16, TUINT32}:  ssa.OpLsh16x32,
1320	opAndTwoTypes{OLSH, TUINT16, TUINT32}: ssa.OpLsh16x32,
1321	opAndTwoTypes{OLSH, TINT16, TUINT64}:  ssa.OpLsh16x64,
1322	opAndTwoTypes{OLSH, TUINT16, TUINT64}: ssa.OpLsh16x64,
1323
1324	opAndTwoTypes{OLSH, TINT32, TUINT8}:   ssa.OpLsh32x8,
1325	opAndTwoTypes{OLSH, TUINT32, TUINT8}:  ssa.OpLsh32x8,
1326	opAndTwoTypes{OLSH, TINT32, TUINT16}:  ssa.OpLsh32x16,
1327	opAndTwoTypes{OLSH, TUINT32, TUINT16}: ssa.OpLsh32x16,
1328	opAndTwoTypes{OLSH, TINT32, TUINT32}:  ssa.OpLsh32x32,
1329	opAndTwoTypes{OLSH, TUINT32, TUINT32}: ssa.OpLsh32x32,
1330	opAndTwoTypes{OLSH, TINT32, TUINT64}:  ssa.OpLsh32x64,
1331	opAndTwoTypes{OLSH, TUINT32, TUINT64}: ssa.OpLsh32x64,
1332
1333	opAndTwoTypes{OLSH, TINT64, TUINT8}:   ssa.OpLsh64x8,
1334	opAndTwoTypes{OLSH, TUINT64, TUINT8}:  ssa.OpLsh64x8,
1335	opAndTwoTypes{OLSH, TINT64, TUINT16}:  ssa.OpLsh64x16,
1336	opAndTwoTypes{OLSH, TUINT64, TUINT16}: ssa.OpLsh64x16,
1337	opAndTwoTypes{OLSH, TINT64, TUINT32}:  ssa.OpLsh64x32,
1338	opAndTwoTypes{OLSH, TUINT64, TUINT32}: ssa.OpLsh64x32,
1339	opAndTwoTypes{OLSH, TINT64, TUINT64}:  ssa.OpLsh64x64,
1340	opAndTwoTypes{OLSH, TUINT64, TUINT64}: ssa.OpLsh64x64,
1341
1342	opAndTwoTypes{ORSH, TINT8, TUINT8}:   ssa.OpRsh8x8,
1343	opAndTwoTypes{ORSH, TUINT8, TUINT8}:  ssa.OpRsh8Ux8,
1344	opAndTwoTypes{ORSH, TINT8, TUINT16}:  ssa.OpRsh8x16,
1345	opAndTwoTypes{ORSH, TUINT8, TUINT16}: ssa.OpRsh8Ux16,
1346	opAndTwoTypes{ORSH, TINT8, TUINT32}:  ssa.OpRsh8x32,
1347	opAndTwoTypes{ORSH, TUINT8, TUINT32}: ssa.OpRsh8Ux32,
1348	opAndTwoTypes{ORSH, TINT8, TUINT64}:  ssa.OpRsh8x64,
1349	opAndTwoTypes{ORSH, TUINT8, TUINT64}: ssa.OpRsh8Ux64,
1350
1351	opAndTwoTypes{ORSH, TINT16, TUINT8}:   ssa.OpRsh16x8,
1352	opAndTwoTypes{ORSH, TUINT16, TUINT8}:  ssa.OpRsh16Ux8,
1353	opAndTwoTypes{ORSH, TINT16, TUINT16}:  ssa.OpRsh16x16,
1354	opAndTwoTypes{ORSH, TUINT16, TUINT16}: ssa.OpRsh16Ux16,
1355	opAndTwoTypes{ORSH, TINT16, TUINT32}:  ssa.OpRsh16x32,
1356	opAndTwoTypes{ORSH, TUINT16, TUINT32}: ssa.OpRsh16Ux32,
1357	opAndTwoTypes{ORSH, TINT16, TUINT64}:  ssa.OpRsh16x64,
1358	opAndTwoTypes{ORSH, TUINT16, TUINT64}: ssa.OpRsh16Ux64,
1359
1360	opAndTwoTypes{ORSH, TINT32, TUINT8}:   ssa.OpRsh32x8,
1361	opAndTwoTypes{ORSH, TUINT32, TUINT8}:  ssa.OpRsh32Ux8,
1362	opAndTwoTypes{ORSH, TINT32, TUINT16}:  ssa.OpRsh32x16,
1363	opAndTwoTypes{ORSH, TUINT32, TUINT16}: ssa.OpRsh32Ux16,
1364	opAndTwoTypes{ORSH, TINT32, TUINT32}:  ssa.OpRsh32x32,
1365	opAndTwoTypes{ORSH, TUINT32, TUINT32}: ssa.OpRsh32Ux32,
1366	opAndTwoTypes{ORSH, TINT32, TUINT64}:  ssa.OpRsh32x64,
1367	opAndTwoTypes{ORSH, TUINT32, TUINT64}: ssa.OpRsh32Ux64,
1368
1369	opAndTwoTypes{ORSH, TINT64, TUINT8}:   ssa.OpRsh64x8,
1370	opAndTwoTypes{ORSH, TUINT64, TUINT8}:  ssa.OpRsh64Ux8,
1371	opAndTwoTypes{ORSH, TINT64, TUINT16}:  ssa.OpRsh64x16,
1372	opAndTwoTypes{ORSH, TUINT64, TUINT16}: ssa.OpRsh64Ux16,
1373	opAndTwoTypes{ORSH, TINT64, TUINT32}:  ssa.OpRsh64x32,
1374	opAndTwoTypes{ORSH, TUINT64, TUINT32}: ssa.OpRsh64Ux32,
1375	opAndTwoTypes{ORSH, TINT64, TUINT64}:  ssa.OpRsh64x64,
1376	opAndTwoTypes{ORSH, TUINT64, TUINT64}: ssa.OpRsh64Ux64,
1377}
1378
1379func (s *state) ssaShiftOp(op Op, t *types.Type, u *types.Type) ssa.Op {
1380	etype1 := s.concreteEtype(t)
1381	etype2 := s.concreteEtype(u)
1382	x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
1383	if !ok {
1384		s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
1385	}
1386	return x
1387}
1388
1389// expr converts the expression n to ssa, adds it to s and returns the ssa result.
1390func (s *state) expr(n *Node) *ssa.Value {
1391	if !(n.Op == ONAME || n.Op == OLITERAL && n.Sym != nil) {
1392		// ONAMEs and named OLITERALs have the line number
1393		// of the decl, not the use. See issue 14742.
1394		s.pushLine(n.Pos)
1395		defer s.popLine()
1396	}
1397
1398	s.stmtList(n.Ninit)
1399	switch n.Op {
1400	case OARRAYBYTESTRTMP:
1401		slice := s.expr(n.Left)
1402		ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
1403		len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
1404		return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
1405	case OSTRARRAYBYTETMP:
1406		str := s.expr(n.Left)
1407		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
1408		len := s.newValue1(ssa.OpStringLen, types.Types[TINT], str)
1409		return s.newValue3(ssa.OpSliceMake, n.Type, ptr, len, len)
1410	case OCFUNC:
1411		aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Left.Sym.Linksym()})
1412		return s.entryNewValue1A(ssa.OpAddr, n.Type, aux, s.sb)
1413	case ONAME:
1414		if n.Class() == PFUNC {
1415			// "value" of a function is the address of the function's closure
1416			sym := funcsym(n.Sym).Linksym()
1417			aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: sym})
1418			return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type), aux, s.sb)
1419		}
1420		if s.canSSA(n) {
1421			return s.variable(n, n.Type)
1422		}
1423		addr := s.addr(n, false)
1424		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1425	case OCLOSUREVAR:
1426		addr := s.addr(n, false)
1427		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1428	case OLITERAL:
1429		switch u := n.Val().U.(type) {
1430		case *Mpint:
1431			i := u.Int64()
1432			switch n.Type.Size() {
1433			case 1:
1434				return s.constInt8(n.Type, int8(i))
1435			case 2:
1436				return s.constInt16(n.Type, int16(i))
1437			case 4:
1438				return s.constInt32(n.Type, int32(i))
1439			case 8:
1440				return s.constInt64(n.Type, i)
1441			default:
1442				s.Fatalf("bad integer size %d", n.Type.Size())
1443				return nil
1444			}
1445		case string:
1446			if u == "" {
1447				return s.constEmptyString(n.Type)
1448			}
1449			return s.entryNewValue0A(ssa.OpConstString, n.Type, u)
1450		case bool:
1451			return s.constBool(u)
1452		case *NilVal:
1453			t := n.Type
1454			switch {
1455			case t.IsSlice():
1456				return s.constSlice(t)
1457			case t.IsInterface():
1458				return s.constInterface(t)
1459			default:
1460				return s.constNil(t)
1461			}
1462		case *Mpflt:
1463			switch n.Type.Size() {
1464			case 4:
1465				return s.constFloat32(n.Type, u.Float32())
1466			case 8:
1467				return s.constFloat64(n.Type, u.Float64())
1468			default:
1469				s.Fatalf("bad float size %d", n.Type.Size())
1470				return nil
1471			}
1472		case *Mpcplx:
1473			r := &u.Real
1474			i := &u.Imag
1475			switch n.Type.Size() {
1476			case 8:
1477				pt := types.Types[TFLOAT32]
1478				return s.newValue2(ssa.OpComplexMake, n.Type,
1479					s.constFloat32(pt, r.Float32()),
1480					s.constFloat32(pt, i.Float32()))
1481			case 16:
1482				pt := types.Types[TFLOAT64]
1483				return s.newValue2(ssa.OpComplexMake, n.Type,
1484					s.constFloat64(pt, r.Float64()),
1485					s.constFloat64(pt, i.Float64()))
1486			default:
1487				s.Fatalf("bad float size %d", n.Type.Size())
1488				return nil
1489			}
1490
1491		default:
1492			s.Fatalf("unhandled OLITERAL %v", n.Val().Ctype())
1493			return nil
1494		}
1495	case OCONVNOP:
1496		to := n.Type
1497		from := n.Left.Type
1498
1499		// Assume everything will work out, so set up our return value.
1500		// Anything interesting that happens from here is a fatal.
1501		x := s.expr(n.Left)
1502
1503		// Special case for not confusing GC and liveness.
1504		// We don't want pointers accidentally classified
1505		// as not-pointers or vice-versa because of copy
1506		// elision.
1507		if to.IsPtrShaped() != from.IsPtrShaped() {
1508			return s.newValue2(ssa.OpConvert, to, x, s.mem())
1509		}
1510
1511		v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
1512
1513		// CONVNOP closure
1514		if to.Etype == TFUNC && from.IsPtrShaped() {
1515			return v
1516		}
1517
1518		// named <--> unnamed type or typed <--> untyped const
1519		if from.Etype == to.Etype {
1520			return v
1521		}
1522
1523		// unsafe.Pointer <--> *T
1524		if to.Etype == TUNSAFEPTR && from.IsPtr() || from.Etype == TUNSAFEPTR && to.IsPtr() {
1525			return v
1526		}
1527
1528		dowidth(from)
1529		dowidth(to)
1530		if from.Width != to.Width {
1531			s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Width, to, to.Width)
1532			return nil
1533		}
1534		if etypesign(from.Etype) != etypesign(to.Etype) {
1535			s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Etype, to, to.Etype)
1536			return nil
1537		}
1538
1539		if instrumenting {
1540			// These appear to be fine, but they fail the
1541			// integer constraint below, so okay them here.
1542			// Sample non-integer conversion: map[string]string -> *uint8
1543			return v
1544		}
1545
1546		if etypesign(from.Etype) == 0 {
1547			s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
1548			return nil
1549		}
1550
1551		// integer, same width, same sign
1552		return v
1553
1554	case OCONV:
1555		x := s.expr(n.Left)
1556		ft := n.Left.Type // from type
1557		tt := n.Type      // to type
1558		if ft.IsBoolean() && tt.IsKind(TUINT8) {
1559			// Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
1560			return s.newValue1(ssa.OpCopy, n.Type, x)
1561		}
1562		if ft.IsInteger() && tt.IsInteger() {
1563			var op ssa.Op
1564			if tt.Size() == ft.Size() {
1565				op = ssa.OpCopy
1566			} else if tt.Size() < ft.Size() {
1567				// truncation
1568				switch 10*ft.Size() + tt.Size() {
1569				case 21:
1570					op = ssa.OpTrunc16to8
1571				case 41:
1572					op = ssa.OpTrunc32to8
1573				case 42:
1574					op = ssa.OpTrunc32to16
1575				case 81:
1576					op = ssa.OpTrunc64to8
1577				case 82:
1578					op = ssa.OpTrunc64to16
1579				case 84:
1580					op = ssa.OpTrunc64to32
1581				default:
1582					s.Fatalf("weird integer truncation %v -> %v", ft, tt)
1583				}
1584			} else if ft.IsSigned() {
1585				// sign extension
1586				switch 10*ft.Size() + tt.Size() {
1587				case 12:
1588					op = ssa.OpSignExt8to16
1589				case 14:
1590					op = ssa.OpSignExt8to32
1591				case 18:
1592					op = ssa.OpSignExt8to64
1593				case 24:
1594					op = ssa.OpSignExt16to32
1595				case 28:
1596					op = ssa.OpSignExt16to64
1597				case 48:
1598					op = ssa.OpSignExt32to64
1599				default:
1600					s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
1601				}
1602			} else {
1603				// zero extension
1604				switch 10*ft.Size() + tt.Size() {
1605				case 12:
1606					op = ssa.OpZeroExt8to16
1607				case 14:
1608					op = ssa.OpZeroExt8to32
1609				case 18:
1610					op = ssa.OpZeroExt8to64
1611				case 24:
1612					op = ssa.OpZeroExt16to32
1613				case 28:
1614					op = ssa.OpZeroExt16to64
1615				case 48:
1616					op = ssa.OpZeroExt32to64
1617				default:
1618					s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
1619				}
1620			}
1621			return s.newValue1(op, n.Type, x)
1622		}
1623
1624		if ft.IsFloat() || tt.IsFloat() {
1625			conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
1626			if s.config.RegSize == 4 && thearch.LinkArch.Family != sys.MIPS {
1627				if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
1628					conv = conv1
1629				}
1630			}
1631			if thearch.LinkArch.Family == sys.ARM64 {
1632				if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
1633					conv = conv1
1634				}
1635			}
1636
1637			if thearch.LinkArch.Family == sys.MIPS {
1638				if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
1639					// tt is float32 or float64, and ft is also unsigned
1640					if tt.Size() == 4 {
1641						return s.uint32Tofloat32(n, x, ft, tt)
1642					}
1643					if tt.Size() == 8 {
1644						return s.uint32Tofloat64(n, x, ft, tt)
1645					}
1646				} else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
1647					// ft is float32 or float64, and tt is unsigned integer
1648					if ft.Size() == 4 {
1649						return s.float32ToUint32(n, x, ft, tt)
1650					}
1651					if ft.Size() == 8 {
1652						return s.float64ToUint32(n, x, ft, tt)
1653					}
1654				}
1655			}
1656
1657			if !ok {
1658				s.Fatalf("weird float conversion %v -> %v", ft, tt)
1659			}
1660			op1, op2, it := conv.op1, conv.op2, conv.intermediateType
1661
1662			if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
1663				// normal case, not tripping over unsigned 64
1664				if op1 == ssa.OpCopy {
1665					if op2 == ssa.OpCopy {
1666						return x
1667					}
1668					return s.newValue1(op2, n.Type, x)
1669				}
1670				if op2 == ssa.OpCopy {
1671					return s.newValue1(op1, n.Type, x)
1672				}
1673				return s.newValue1(op2, n.Type, s.newValue1(op1, types.Types[it], x))
1674			}
1675			// Tricky 64-bit unsigned cases.
1676			if ft.IsInteger() {
1677				// tt is float32 or float64, and ft is also unsigned
1678				if tt.Size() == 4 {
1679					return s.uint64Tofloat32(n, x, ft, tt)
1680				}
1681				if tt.Size() == 8 {
1682					return s.uint64Tofloat64(n, x, ft, tt)
1683				}
1684				s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
1685			}
1686			// ft is float32 or float64, and tt is unsigned integer
1687			if ft.Size() == 4 {
1688				return s.float32ToUint64(n, x, ft, tt)
1689			}
1690			if ft.Size() == 8 {
1691				return s.float64ToUint64(n, x, ft, tt)
1692			}
1693			s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
1694			return nil
1695		}
1696
1697		if ft.IsComplex() && tt.IsComplex() {
1698			var op ssa.Op
1699			if ft.Size() == tt.Size() {
1700				switch ft.Size() {
1701				case 8:
1702					op = ssa.OpRound32F
1703				case 16:
1704					op = ssa.OpRound64F
1705				default:
1706					s.Fatalf("weird complex conversion %v -> %v", ft, tt)
1707				}
1708			} else if ft.Size() == 8 && tt.Size() == 16 {
1709				op = ssa.OpCvt32Fto64F
1710			} else if ft.Size() == 16 && tt.Size() == 8 {
1711				op = ssa.OpCvt64Fto32F
1712			} else {
1713				s.Fatalf("weird complex conversion %v -> %v", ft, tt)
1714			}
1715			ftp := floatForComplex(ft)
1716			ttp := floatForComplex(tt)
1717			return s.newValue2(ssa.OpComplexMake, tt,
1718				s.newValue1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, x)),
1719				s.newValue1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, x)))
1720		}
1721
1722		s.Fatalf("unhandled OCONV %s -> %s", n.Left.Type.Etype, n.Type.Etype)
1723		return nil
1724
1725	case ODOTTYPE:
1726		res, _ := s.dottype(n, false)
1727		return res
1728
1729	// binary ops
1730	case OLT, OEQ, ONE, OLE, OGE, OGT:
1731		a := s.expr(n.Left)
1732		b := s.expr(n.Right)
1733		if n.Left.Type.IsComplex() {
1734			pt := floatForComplex(n.Left.Type)
1735			op := s.ssaOp(OEQ, pt)
1736			r := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
1737			i := s.newValue2(op, types.Types[TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
1738			c := s.newValue2(ssa.OpAndB, types.Types[TBOOL], r, i)
1739			switch n.Op {
1740			case OEQ:
1741				return c
1742			case ONE:
1743				return s.newValue1(ssa.OpNot, types.Types[TBOOL], c)
1744			default:
1745				s.Fatalf("ordered complex compare %v", n.Op)
1746			}
1747		}
1748		return s.newValue2(s.ssaOp(n.Op, n.Left.Type), types.Types[TBOOL], a, b)
1749	case OMUL:
1750		a := s.expr(n.Left)
1751		b := s.expr(n.Right)
1752		if n.Type.IsComplex() {
1753			mulop := ssa.OpMul64F
1754			addop := ssa.OpAdd64F
1755			subop := ssa.OpSub64F
1756			pt := floatForComplex(n.Type) // Could be Float32 or Float64
1757			wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancelation error
1758
1759			areal := s.newValue1(ssa.OpComplexReal, pt, a)
1760			breal := s.newValue1(ssa.OpComplexReal, pt, b)
1761			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
1762			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
1763
1764			if pt != wt { // Widen for calculation
1765				areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
1766				breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
1767				aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
1768				bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
1769			}
1770
1771			xreal := s.newValue2(subop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
1772			ximag := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, bimag), s.newValue2(mulop, wt, aimag, breal))
1773
1774			if pt != wt { // Narrow to store back
1775				xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
1776				ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
1777			}
1778
1779			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
1780		}
1781		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1782
1783	case ODIV:
1784		a := s.expr(n.Left)
1785		b := s.expr(n.Right)
1786		if n.Type.IsComplex() {
1787			// TODO this is not executed because the front-end substitutes a runtime call.
1788			// That probably ought to change; with modest optimization the widen/narrow
1789			// conversions could all be elided in larger expression trees.
1790			mulop := ssa.OpMul64F
1791			addop := ssa.OpAdd64F
1792			subop := ssa.OpSub64F
1793			divop := ssa.OpDiv64F
1794			pt := floatForComplex(n.Type) // Could be Float32 or Float64
1795			wt := types.Types[TFLOAT64]   // Compute in Float64 to minimize cancelation error
1796
1797			areal := s.newValue1(ssa.OpComplexReal, pt, a)
1798			breal := s.newValue1(ssa.OpComplexReal, pt, b)
1799			aimag := s.newValue1(ssa.OpComplexImag, pt, a)
1800			bimag := s.newValue1(ssa.OpComplexImag, pt, b)
1801
1802			if pt != wt { // Widen for calculation
1803				areal = s.newValue1(ssa.OpCvt32Fto64F, wt, areal)
1804				breal = s.newValue1(ssa.OpCvt32Fto64F, wt, breal)
1805				aimag = s.newValue1(ssa.OpCvt32Fto64F, wt, aimag)
1806				bimag = s.newValue1(ssa.OpCvt32Fto64F, wt, bimag)
1807			}
1808
1809			denom := s.newValue2(addop, wt, s.newValue2(mulop, wt, breal, breal), s.newValue2(mulop, wt, bimag, bimag))
1810			xreal := s.newValue2(addop, wt, s.newValue2(mulop, wt, areal, breal), s.newValue2(mulop, wt, aimag, bimag))
1811			ximag := s.newValue2(subop, wt, s.newValue2(mulop, wt, aimag, breal), s.newValue2(mulop, wt, areal, bimag))
1812
1813			// TODO not sure if this is best done in wide precision or narrow
1814			// Double-rounding might be an issue.
1815			// Note that the pre-SSA implementation does the entire calculation
1816			// in wide format, so wide is compatible.
1817			xreal = s.newValue2(divop, wt, xreal, denom)
1818			ximag = s.newValue2(divop, wt, ximag, denom)
1819
1820			if pt != wt { // Narrow to store back
1821				xreal = s.newValue1(ssa.OpCvt64Fto32F, pt, xreal)
1822				ximag = s.newValue1(ssa.OpCvt64Fto32F, pt, ximag)
1823			}
1824			return s.newValue2(ssa.OpComplexMake, n.Type, xreal, ximag)
1825		}
1826		if n.Type.IsFloat() {
1827			return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1828		}
1829		return s.intDivide(n, a, b)
1830	case OMOD:
1831		a := s.expr(n.Left)
1832		b := s.expr(n.Right)
1833		return s.intDivide(n, a, b)
1834	case OADD, OSUB:
1835		a := s.expr(n.Left)
1836		b := s.expr(n.Right)
1837		if n.Type.IsComplex() {
1838			pt := floatForComplex(n.Type)
1839			op := s.ssaOp(n.Op, pt)
1840			return s.newValue2(ssa.OpComplexMake, n.Type,
1841				s.newValue2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
1842				s.newValue2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
1843		}
1844		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1845	case OAND, OOR, OXOR:
1846		a := s.expr(n.Left)
1847		b := s.expr(n.Right)
1848		return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
1849	case OLSH, ORSH:
1850		a := s.expr(n.Left)
1851		b := s.expr(n.Right)
1852		return s.newValue2(s.ssaShiftOp(n.Op, n.Type, n.Right.Type), a.Type, a, b)
1853	case OANDAND, OOROR:
1854		// To implement OANDAND (and OOROR), we introduce a
1855		// new temporary variable to hold the result. The
1856		// variable is associated with the OANDAND node in the
1857		// s.vars table (normally variables are only
1858		// associated with ONAME nodes). We convert
1859		//     A && B
1860		// to
1861		//     var = A
1862		//     if var {
1863		//         var = B
1864		//     }
1865		// Using var in the subsequent block introduces the
1866		// necessary phi variable.
1867		el := s.expr(n.Left)
1868		s.vars[n] = el
1869
1870		b := s.endBlock()
1871		b.Kind = ssa.BlockIf
1872		b.SetControl(el)
1873		// In theory, we should set b.Likely here based on context.
1874		// However, gc only gives us likeliness hints
1875		// in a single place, for plain OIF statements,
1876		// and passing around context is finnicky, so don't bother for now.
1877
1878		bRight := s.f.NewBlock(ssa.BlockPlain)
1879		bResult := s.f.NewBlock(ssa.BlockPlain)
1880		if n.Op == OANDAND {
1881			b.AddEdgeTo(bRight)
1882			b.AddEdgeTo(bResult)
1883		} else if n.Op == OOROR {
1884			b.AddEdgeTo(bResult)
1885			b.AddEdgeTo(bRight)
1886		}
1887
1888		s.startBlock(bRight)
1889		er := s.expr(n.Right)
1890		s.vars[n] = er
1891
1892		b = s.endBlock()
1893		b.AddEdgeTo(bResult)
1894
1895		s.startBlock(bResult)
1896		return s.variable(n, types.Types[TBOOL])
1897	case OCOMPLEX:
1898		r := s.expr(n.Left)
1899		i := s.expr(n.Right)
1900		return s.newValue2(ssa.OpComplexMake, n.Type, r, i)
1901
1902	// unary ops
1903	case OMINUS:
1904		a := s.expr(n.Left)
1905		if n.Type.IsComplex() {
1906			tp := floatForComplex(n.Type)
1907			negop := s.ssaOp(n.Op, tp)
1908			return s.newValue2(ssa.OpComplexMake, n.Type,
1909				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
1910				s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
1911		}
1912		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
1913	case ONOT, OCOM:
1914		a := s.expr(n.Left)
1915		return s.newValue1(s.ssaOp(n.Op, n.Type), a.Type, a)
1916	case OIMAG, OREAL:
1917		a := s.expr(n.Left)
1918		return s.newValue1(s.ssaOp(n.Op, n.Left.Type), n.Type, a)
1919	case OPLUS:
1920		return s.expr(n.Left)
1921
1922	case OADDR:
1923		return s.addr(n.Left, n.Bounded())
1924
1925	case OINDREGSP:
1926		addr := s.constOffPtrSP(types.NewPtr(n.Type), n.Xoffset)
1927		return s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
1928
1929	case OIND:
1930		p := s.exprPtr(n.Left, false, n.Pos)
1931		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1932
1933	case ODOT:
1934		t := n.Left.Type
1935		if canSSAType(t) {
1936			v := s.expr(n.Left)
1937			return s.newValue1I(ssa.OpStructSelect, n.Type, int64(fieldIdx(n)), v)
1938		}
1939		if n.Left.Op == OSTRUCTLIT {
1940			// All literals with nonzero fields have already been
1941			// rewritten during walk. Any that remain are just T{}
1942			// or equivalents. Use the zero value.
1943			if !iszero(n.Left) {
1944				Fatalf("literal with nonzero value in SSA: %v", n.Left)
1945			}
1946			return s.zeroVal(n.Type)
1947		}
1948		p := s.addr(n, false)
1949		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1950
1951	case ODOTPTR:
1952		p := s.exprPtr(n.Left, false, n.Pos)
1953		p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type), n.Xoffset, p)
1954		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
1955
1956	case OINDEX:
1957		switch {
1958		case n.Left.Type.IsString():
1959			if n.Bounded() && Isconst(n.Left, CTSTR) && Isconst(n.Right, CTINT) {
1960				// Replace "abc"[1] with 'b'.
1961				// Delayed until now because "abc"[1] is not an ideal constant.
1962				// See test/fixedbugs/issue11370.go.
1963				return s.newValue0I(ssa.OpConst8, types.Types[TUINT8], int64(int8(n.Left.Val().U.(string)[n.Right.Int64()])))
1964			}
1965			a := s.expr(n.Left)
1966			i := s.expr(n.Right)
1967			i = s.extendIndex(i, panicindex)
1968			if !n.Bounded() {
1969				len := s.newValue1(ssa.OpStringLen, types.Types[TINT], a)
1970				s.boundsCheck(i, len)
1971			}
1972			ptrtyp := s.f.Config.Types.BytePtr
1973			ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
1974			if Isconst(n.Right, CTINT) {
1975				ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, n.Right.Int64(), ptr)
1976			} else {
1977				ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
1978			}
1979			return s.newValue2(ssa.OpLoad, types.Types[TUINT8], ptr, s.mem())
1980		case n.Left.Type.IsSlice():
1981			p := s.addr(n, false)
1982			return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
1983		case n.Left.Type.IsArray():
1984			if bound := n.Left.Type.NumElem(); bound <= 1 {
1985				// SSA can handle arrays of length at most 1.
1986				a := s.expr(n.Left)
1987				i := s.expr(n.Right)
1988				if bound == 0 {
1989					// Bounds check will never succeed.  Might as well
1990					// use constants for the bounds check.
1991					z := s.constInt(types.Types[TINT], 0)
1992					s.boundsCheck(z, z)
1993					// The return value won't be live, return junk.
1994					return s.newValue0(ssa.OpUnknown, n.Type)
1995				}
1996				i = s.extendIndex(i, panicindex)
1997				if !n.Bounded() {
1998					s.boundsCheck(i, s.constInt(types.Types[TINT], bound))
1999				}
2000				return s.newValue1I(ssa.OpArraySelect, n.Type, 0, a)
2001			}
2002			p := s.addr(n, false)
2003			return s.newValue2(ssa.OpLoad, n.Left.Type.Elem(), p, s.mem())
2004		default:
2005			s.Fatalf("bad type for index %v", n.Left.Type)
2006			return nil
2007		}
2008
2009	case OLEN, OCAP:
2010		switch {
2011		case n.Left.Type.IsSlice():
2012			op := ssa.OpSliceLen
2013			if n.Op == OCAP {
2014				op = ssa.OpSliceCap
2015			}
2016			return s.newValue1(op, types.Types[TINT], s.expr(n.Left))
2017		case n.Left.Type.IsString(): // string; not reachable for OCAP
2018			return s.newValue1(ssa.OpStringLen, types.Types[TINT], s.expr(n.Left))
2019		case n.Left.Type.IsMap(), n.Left.Type.IsChan():
2020			return s.referenceTypeBuiltin(n, s.expr(n.Left))
2021		default: // array
2022			return s.constInt(types.Types[TINT], n.Left.Type.NumElem())
2023		}
2024
2025	case OSPTR:
2026		a := s.expr(n.Left)
2027		if n.Left.Type.IsSlice() {
2028			return s.newValue1(ssa.OpSlicePtr, n.Type, a)
2029		} else {
2030			return s.newValue1(ssa.OpStringPtr, n.Type, a)
2031		}
2032
2033	case OITAB:
2034		a := s.expr(n.Left)
2035		return s.newValue1(ssa.OpITab, n.Type, a)
2036
2037	case OIDATA:
2038		a := s.expr(n.Left)
2039		return s.newValue1(ssa.OpIData, n.Type, a)
2040
2041	case OEFACE:
2042		tab := s.expr(n.Left)
2043		data := s.expr(n.Right)
2044		return s.newValue2(ssa.OpIMake, n.Type, tab, data)
2045
2046	case OSLICE, OSLICEARR, OSLICE3, OSLICE3ARR:
2047		v := s.expr(n.Left)
2048		var i, j, k *ssa.Value
2049		low, high, max := n.SliceBounds()
2050		if low != nil {
2051			i = s.extendIndex(s.expr(low), panicslice)
2052		}
2053		if high != nil {
2054			j = s.extendIndex(s.expr(high), panicslice)
2055		}
2056		if max != nil {
2057			k = s.extendIndex(s.expr(max), panicslice)
2058		}
2059		p, l, c := s.slice(n.Left.Type, v, i, j, k)
2060		return s.newValue3(ssa.OpSliceMake, n.Type, p, l, c)
2061
2062	case OSLICESTR:
2063		v := s.expr(n.Left)
2064		var i, j *ssa.Value
2065		low, high, _ := n.SliceBounds()
2066		if low != nil {
2067			i = s.extendIndex(s.expr(low), panicslice)
2068		}
2069		if high != nil {
2070			j = s.extendIndex(s.expr(high), panicslice)
2071		}
2072		p, l, _ := s.slice(n.Left.Type, v, i, j, nil)
2073		return s.newValue2(ssa.OpStringMake, n.Type, p, l)
2074
2075	case OCALLFUNC:
2076		if isIntrinsicCall(n) {
2077			return s.intrinsicCall(n)
2078		}
2079		fallthrough
2080
2081	case OCALLINTER, OCALLMETH:
2082		a := s.call(n, callNormal)
2083		return s.newValue2(ssa.OpLoad, n.Type, a, s.mem())
2084
2085	case OGETG:
2086		return s.newValue1(ssa.OpGetG, n.Type, s.mem())
2087
2088	case OAPPEND:
2089		return s.append(n, false)
2090
2091	case OSTRUCTLIT, OARRAYLIT:
2092		// All literals with nonzero fields have already been
2093		// rewritten during walk. Any that remain are just T{}
2094		// or equivalents. Use the zero value.
2095		if !iszero(n) {
2096			Fatalf("literal with nonzero value in SSA: %v", n)
2097		}
2098		return s.zeroVal(n.Type)
2099
2100	default:
2101		s.Fatalf("unhandled expr %v", n.Op)
2102		return nil
2103	}
2104}
2105
2106// append converts an OAPPEND node to SSA.
2107// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
2108// adds it to s, and returns the Value.
2109// If inplace is true, it writes the result of the OAPPEND expression n
2110// back to the slice being appended to, and returns nil.
2111// inplace MUST be set to false if the slice can be SSA'd.
2112func (s *state) append(n *Node, inplace bool) *ssa.Value {
2113	// If inplace is false, process as expression "append(s, e1, e2, e3)":
2114	//
2115	// ptr, len, cap := s
2116	// newlen := len + 3
2117	// if newlen > cap {
2118	//     ptr, len, cap = growslice(s, newlen)
2119	//     newlen = len + 3 // recalculate to avoid a spill
2120	// }
2121	// // with write barriers, if needed:
2122	// *(ptr+len) = e1
2123	// *(ptr+len+1) = e2
2124	// *(ptr+len+2) = e3
2125	// return makeslice(ptr, newlen, cap)
2126	//
2127	//
2128	// If inplace is true, process as statement "s = append(s, e1, e2, e3)":
2129	//
2130	// a := &s
2131	// ptr, len, cap := s
2132	// newlen := len + 3
2133	// if newlen > cap {
2134	//    newptr, len, newcap = growslice(ptr, len, cap, newlen)
2135	//    vardef(a)       // if necessary, advise liveness we are writing a new a
2136	//    *a.cap = newcap // write before ptr to avoid a spill
2137	//    *a.ptr = newptr // with write barrier
2138	// }
2139	// newlen = len + 3 // recalculate to avoid a spill
2140	// *a.len = newlen
2141	// // with write barriers, if needed:
2142	// *(ptr+len) = e1
2143	// *(ptr+len+1) = e2
2144	// *(ptr+len+2) = e3
2145
2146	et := n.Type.Elem()
2147	pt := types.NewPtr(et)
2148
2149	// Evaluate slice
2150	sn := n.List.First() // the slice node is the first in the list
2151
2152	var slice, addr *ssa.Value
2153	if inplace {
2154		addr = s.addr(sn, false)
2155		slice = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
2156	} else {
2157		slice = s.expr(sn)
2158	}
2159
2160	// Allocate new blocks
2161	grow := s.f.NewBlock(ssa.BlockPlain)
2162	assign := s.f.NewBlock(ssa.BlockPlain)
2163
2164	// Decide if we need to grow
2165	nargs := int64(n.List.Len() - 1)
2166	p := s.newValue1(ssa.OpSlicePtr, pt, slice)
2167	l := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
2168	c := s.newValue1(ssa.OpSliceCap, types.Types[TINT], slice)
2169	nl := s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
2170
2171	cmp := s.newValue2(s.ssaOp(OGT, types.Types[TINT]), types.Types[TBOOL], nl, c)
2172	s.vars[&ptrVar] = p
2173
2174	if !inplace {
2175		s.vars[&newlenVar] = nl
2176		s.vars[&capVar] = c
2177	} else {
2178		s.vars[&lenVar] = l
2179	}
2180
2181	b := s.endBlock()
2182	b.Kind = ssa.BlockIf
2183	b.Likely = ssa.BranchUnlikely
2184	b.SetControl(cmp)
2185	b.AddEdgeTo(grow)
2186	b.AddEdgeTo(assign)
2187
2188	// Call growslice
2189	s.startBlock(grow)
2190	taddr := s.expr(n.Left)
2191	r := s.rtcall(growslice, true, []*types.Type{pt, types.Types[TINT], types.Types[TINT]}, taddr, p, l, c, nl)
2192
2193	if inplace {
2194		if sn.Op == ONAME {
2195			// Tell liveness we're about to build a new slice
2196			s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
2197		}
2198		capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_cap), addr)
2199		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capaddr, r[2], s.mem())
2200		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, pt, addr, r[0], s.mem())
2201		// load the value we just stored to avoid having to spill it
2202		s.vars[&ptrVar] = s.newValue2(ssa.OpLoad, pt, addr, s.mem())
2203		s.vars[&lenVar] = r[1] // avoid a spill in the fast path
2204	} else {
2205		s.vars[&ptrVar] = r[0]
2206		s.vars[&newlenVar] = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], r[1], s.constInt(types.Types[TINT], nargs))
2207		s.vars[&capVar] = r[2]
2208	}
2209
2210	b = s.endBlock()
2211	b.AddEdgeTo(assign)
2212
2213	// assign new elements to slots
2214	s.startBlock(assign)
2215
2216	if inplace {
2217		l = s.variable(&lenVar, types.Types[TINT]) // generates phi for len
2218		nl = s.newValue2(s.ssaOp(OADD, types.Types[TINT]), types.Types[TINT], l, s.constInt(types.Types[TINT], nargs))
2219		lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, int64(array_nel), addr)
2220		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenaddr, nl, s.mem())
2221	}
2222
2223	// Evaluate args
2224	type argRec struct {
2225		// if store is true, we're appending the value v.  If false, we're appending the
2226		// value at *v.
2227		v     *ssa.Value
2228		store bool
2229	}
2230	args := make([]argRec, 0, nargs)
2231	for _, n := range n.List.Slice()[1:] {
2232		if canSSAType(n.Type) {
2233			args = append(args, argRec{v: s.expr(n), store: true})
2234		} else {
2235			v := s.addr(n, false)
2236			args = append(args, argRec{v: v})
2237		}
2238	}
2239
2240	p = s.variable(&ptrVar, pt) // generates phi for ptr
2241	if !inplace {
2242		nl = s.variable(&newlenVar, types.Types[TINT]) // generates phi for nl
2243		c = s.variable(&capVar, types.Types[TINT])     // generates phi for cap
2244	}
2245	p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
2246	for i, arg := range args {
2247		addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[TINT], int64(i)))
2248		if arg.store {
2249			s.storeType(et, addr, arg.v, 0)
2250		} else {
2251			store := s.newValue3I(ssa.OpMove, types.TypeMem, et.Size(), addr, arg.v, s.mem())
2252			store.Aux = et
2253			s.vars[&memVar] = store
2254		}
2255	}
2256
2257	delete(s.vars, &ptrVar)
2258	if inplace {
2259		delete(s.vars, &lenVar)
2260		return nil
2261	}
2262	delete(s.vars, &newlenVar)
2263	delete(s.vars, &capVar)
2264	// make result
2265	return s.newValue3(ssa.OpSliceMake, n.Type, p, nl, c)
2266}
2267
2268// condBranch evaluates the boolean expression cond and branches to yes
2269// if cond is true and no if cond is false.
2270// This function is intended to handle && and || better than just calling
2271// s.expr(cond) and branching on the result.
2272func (s *state) condBranch(cond *Node, yes, no *ssa.Block, likely int8) {
2273	if cond.Op == OANDAND {
2274		mid := s.f.NewBlock(ssa.BlockPlain)
2275		s.stmtList(cond.Ninit)
2276		s.condBranch(cond.Left, mid, no, max8(likely, 0))
2277		s.startBlock(mid)
2278		s.condBranch(cond.Right, yes, no, likely)
2279		return
2280		// Note: if likely==1, then both recursive calls pass 1.
2281		// If likely==-1, then we don't have enough information to decide
2282		// whether the first branch is likely or not. So we pass 0 for
2283		// the likeliness of the first branch.
2284		// TODO: have the frontend give us branch prediction hints for
2285		// OANDAND and OOROR nodes (if it ever has such info).
2286	}
2287	if cond.Op == OOROR {
2288		mid := s.f.NewBlock(ssa.BlockPlain)
2289		s.stmtList(cond.Ninit)
2290		s.condBranch(cond.Left, yes, mid, min8(likely, 0))
2291		s.startBlock(mid)
2292		s.condBranch(cond.Right, yes, no, likely)
2293		return
2294		// Note: if likely==-1, then both recursive calls pass -1.
2295		// If likely==1, then we don't have enough info to decide
2296		// the likelihood of the first branch.
2297	}
2298	if cond.Op == ONOT {
2299		s.stmtList(cond.Ninit)
2300		s.condBranch(cond.Left, no, yes, -likely)
2301		return
2302	}
2303	c := s.expr(cond)
2304	b := s.endBlock()
2305	b.Kind = ssa.BlockIf
2306	b.SetControl(c)
2307	b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
2308	b.AddEdgeTo(yes)
2309	b.AddEdgeTo(no)
2310}
2311
2312type skipMask uint8
2313
2314const (
2315	skipPtr skipMask = 1 << iota
2316	skipLen
2317	skipCap
2318)
2319
2320// assign does left = right.
2321// Right has already been evaluated to ssa, left has not.
2322// If deref is true, then we do left = *right instead (and right has already been nil-checked).
2323// If deref is true and right == nil, just do left = 0.
2324// skip indicates assignments (at the top level) that can be avoided.
2325func (s *state) assign(left *Node, right *ssa.Value, deref bool, skip skipMask) {
2326	if left.Op == ONAME && isblank(left) {
2327		return
2328	}
2329	t := left.Type
2330	dowidth(t)
2331	if s.canSSA(left) {
2332		if deref {
2333			s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
2334		}
2335		if left.Op == ODOT {
2336			// We're assigning to a field of an ssa-able value.
2337			// We need to build a new structure with the new value for the
2338			// field we're assigning and the old values for the other fields.
2339			// For instance:
2340			//   type T struct {a, b, c int}
2341			//   var T x
2342			//   x.b = 5
2343			// For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
2344
2345			// Grab information about the structure type.
2346			t := left.Left.Type
2347			nf := t.NumFields()
2348			idx := fieldIdx(left)
2349
2350			// Grab old value of structure.
2351			old := s.expr(left.Left)
2352
2353			// Make new structure.
2354			new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
2355
2356			// Add fields as args.
2357			for i := 0; i < nf; i++ {
2358				if i == idx {
2359					new.AddArg(right)
2360				} else {
2361					new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
2362				}
2363			}
2364
2365			// Recursively assign the new value we've made to the base of the dot op.
2366			s.assign(left.Left, new, false, 0)
2367			// TODO: do we need to update named values here?
2368			return
2369		}
2370		if left.Op == OINDEX && left.Left.Type.IsArray() {
2371			// We're assigning to an element of an ssa-able array.
2372			// a[i] = v
2373			t := left.Left.Type
2374			n := t.NumElem()
2375
2376			i := s.expr(left.Right) // index
2377			if n == 0 {
2378				// The bounds check must fail.  Might as well
2379				// ignore the actual index and just use zeros.
2380				z := s.constInt(types.Types[TINT], 0)
2381				s.boundsCheck(z, z)
2382				return
2383			}
2384			if n != 1 {
2385				s.Fatalf("assigning to non-1-length array")
2386			}
2387			// Rewrite to a = [1]{v}
2388			i = s.extendIndex(i, panicindex)
2389			s.boundsCheck(i, s.constInt(types.Types[TINT], 1))
2390			v := s.newValue1(ssa.OpArrayMake1, t, right)
2391			s.assign(left.Left, v, false, 0)
2392			return
2393		}
2394		// Update variable assignment.
2395		s.vars[left] = right
2396		s.addNamedValue(left, right)
2397		return
2398	}
2399	// Left is not ssa-able. Compute its address.
2400	addr := s.addr(left, false)
2401	if left.Op == ONAME && skip == 0 {
2402		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, left, s.mem())
2403	}
2404	if isReflectHeaderDataField(left) {
2405		// Package unsafe's documentation says storing pointers into
2406		// reflect.SliceHeader and reflect.StringHeader's Data fields
2407		// is valid, even though they have type uintptr (#19168).
2408		// Mark it pointer type to signal the writebarrier pass to
2409		// insert a write barrier.
2410		t = types.Types[TUNSAFEPTR]
2411	}
2412	if deref {
2413		// Treat as a mem->mem move.
2414		var store *ssa.Value
2415		if right == nil {
2416			store = s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), addr, s.mem())
2417		} else {
2418			store = s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), addr, right, s.mem())
2419		}
2420		store.Aux = t
2421		s.vars[&memVar] = store
2422		return
2423	}
2424	// Treat as a store.
2425	s.storeType(t, addr, right, skip)
2426}
2427
2428// zeroVal returns the zero value for type t.
2429func (s *state) zeroVal(t *types.Type) *ssa.Value {
2430	switch {
2431	case t.IsInteger():
2432		switch t.Size() {
2433		case 1:
2434			return s.constInt8(t, 0)
2435		case 2:
2436			return s.constInt16(t, 0)
2437		case 4:
2438			return s.constInt32(t, 0)
2439		case 8:
2440			return s.constInt64(t, 0)
2441		default:
2442			s.Fatalf("bad sized integer type %v", t)
2443		}
2444	case t.IsFloat():
2445		switch t.Size() {
2446		case 4:
2447			return s.constFloat32(t, 0)
2448		case 8:
2449			return s.constFloat64(t, 0)
2450		default:
2451			s.Fatalf("bad sized float type %v", t)
2452		}
2453	case t.IsComplex():
2454		switch t.Size() {
2455		case 8:
2456			z := s.constFloat32(types.Types[TFLOAT32], 0)
2457			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
2458		case 16:
2459			z := s.constFloat64(types.Types[TFLOAT64], 0)
2460			return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
2461		default:
2462			s.Fatalf("bad sized complex type %v", t)
2463		}
2464
2465	case t.IsString():
2466		return s.constEmptyString(t)
2467	case t.IsPtrShaped():
2468		return s.constNil(t)
2469	case t.IsBoolean():
2470		return s.constBool(false)
2471	case t.IsInterface():
2472		return s.constInterface(t)
2473	case t.IsSlice():
2474		return s.constSlice(t)
2475	case t.IsStruct():
2476		n := t.NumFields()
2477		v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
2478		for i := 0; i < n; i++ {
2479			v.AddArg(s.zeroVal(t.FieldType(i)))
2480		}
2481		return v
2482	case t.IsArray():
2483		switch t.NumElem() {
2484		case 0:
2485			return s.entryNewValue0(ssa.OpArrayMake0, t)
2486		case 1:
2487			return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
2488		}
2489	}
2490	s.Fatalf("zero for type %v not implemented", t)
2491	return nil
2492}
2493
2494type callKind int8
2495
2496const (
2497	callNormal callKind = iota
2498	callDefer
2499	callGo
2500)
2501
2502var intrinsics map[intrinsicKey]intrinsicBuilder
2503
2504// An intrinsicBuilder converts a call node n into an ssa value that
2505// implements that call as an intrinsic. args is a list of arguments to the func.
2506type intrinsicBuilder func(s *state, n *Node, args []*ssa.Value) *ssa.Value
2507
2508type intrinsicKey struct {
2509	arch *sys.Arch
2510	pkg  string
2511	fn   string
2512}
2513
2514func init() {
2515	intrinsics = map[intrinsicKey]intrinsicBuilder{}
2516
2517	var all []*sys.Arch
2518	var p4 []*sys.Arch
2519	var p8 []*sys.Arch
2520	for _, a := range sys.Archs {
2521		all = append(all, a)
2522		if a.PtrSize == 4 {
2523			p4 = append(p4, a)
2524		} else {
2525			p8 = append(p8, a)
2526		}
2527	}
2528
2529	// add adds the intrinsic b for pkg.fn for the given list of architectures.
2530	add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
2531		for _, a := range archs {
2532			intrinsics[intrinsicKey{a, pkg, fn}] = b
2533		}
2534	}
2535	// addF does the same as add but operates on architecture families.
2536	addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
2537		m := 0
2538		for _, f := range archFamilies {
2539			if f >= 32 {
2540				panic("too many architecture families")
2541			}
2542			m |= 1 << uint(f)
2543		}
2544		for _, a := range all {
2545			if m>>uint(a.Family)&1 != 0 {
2546				intrinsics[intrinsicKey{a, pkg, fn}] = b
2547			}
2548		}
2549	}
2550	// alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
2551	alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
2552		for _, a := range archs {
2553			if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
2554				intrinsics[intrinsicKey{a, pkg, fn}] = b
2555			}
2556		}
2557	}
2558
2559	/******** runtime ********/
2560	if !instrumenting {
2561		add("runtime", "slicebytetostringtmp",
2562			func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2563				// Compiler frontend optimizations emit OARRAYBYTESTRTMP nodes
2564				// for the backend instead of slicebytetostringtmp calls
2565				// when not instrumenting.
2566				slice := args[0]
2567				ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
2568				len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], slice)
2569				return s.newValue2(ssa.OpStringMake, n.Type, ptr, len)
2570			},
2571			all...)
2572	}
2573	add("runtime", "KeepAlive",
2574		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2575			data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
2576			s.vars[&memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
2577			return nil
2578		},
2579		all...)
2580
2581	/******** runtime/internal/sys ********/
2582	addF("runtime/internal/sys", "Ctz32",
2583		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2584			return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
2585		},
2586		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
2587	addF("runtime/internal/sys", "Ctz64",
2588		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2589			return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
2590		},
2591		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS)
2592	addF("runtime/internal/sys", "Bswap32",
2593		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2594			return s.newValue1(ssa.OpBswap32, types.Types[TUINT32], args[0])
2595		},
2596		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
2597	addF("runtime/internal/sys", "Bswap64",
2598		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2599			return s.newValue1(ssa.OpBswap64, types.Types[TUINT64], args[0])
2600		},
2601		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
2602
2603	/******** runtime/internal/atomic ********/
2604	addF("runtime/internal/atomic", "Load",
2605		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2606			v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], s.mem())
2607			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2608			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
2609		},
2610		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
2611
2612	addF("runtime/internal/atomic", "Load64",
2613		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2614			v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], s.mem())
2615			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2616			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
2617		},
2618		sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
2619	addF("runtime/internal/atomic", "Loadp",
2620		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2621			v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
2622			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2623			return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
2624		},
2625		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
2626
2627	addF("runtime/internal/atomic", "Store",
2628		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2629			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
2630			return nil
2631		},
2632		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
2633	addF("runtime/internal/atomic", "Store64",
2634		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2635			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
2636			return nil
2637		},
2638		sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
2639	addF("runtime/internal/atomic", "StorepNoWB",
2640		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2641			s.vars[&memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
2642			return nil
2643		},
2644		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS)
2645
2646	addF("runtime/internal/atomic", "Xchg",
2647		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2648			v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
2649			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2650			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
2651		},
2652		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
2653	addF("runtime/internal/atomic", "Xchg64",
2654		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2655			v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
2656			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2657			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
2658		},
2659		sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
2660
2661	addF("runtime/internal/atomic", "Xadd",
2662		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2663			v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[TUINT32], types.TypeMem), args[0], args[1], s.mem())
2664			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2665			return s.newValue1(ssa.OpSelect0, types.Types[TUINT32], v)
2666		},
2667		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
2668	addF("runtime/internal/atomic", "Xadd64",
2669		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2670			v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[TUINT64], types.TypeMem), args[0], args[1], s.mem())
2671			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2672			return s.newValue1(ssa.OpSelect0, types.Types[TUINT64], v)
2673		},
2674		sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
2675
2676	addF("runtime/internal/atomic", "Cas",
2677		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2678			v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
2679			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2680			return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
2681		},
2682		sys.AMD64, sys.ARM64, sys.S390X, sys.MIPS, sys.PPC64)
2683	addF("runtime/internal/atomic", "Cas64",
2684		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2685			v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
2686			s.vars[&memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
2687			return s.newValue1(ssa.OpSelect0, types.Types[TBOOL], v)
2688		},
2689		sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64)
2690
2691	addF("runtime/internal/atomic", "And8",
2692		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2693			s.vars[&memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
2694			return nil
2695		},
2696		sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
2697	addF("runtime/internal/atomic", "Or8",
2698		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2699			s.vars[&memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
2700			return nil
2701		},
2702		sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64)
2703
2704	alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
2705	alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
2706	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
2707	alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
2708	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
2709	alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
2710	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
2711	alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
2712	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
2713	alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
2714	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
2715	alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
2716	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
2717	alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
2718	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
2719	alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
2720
2721	/******** math ********/
2722	addF("math", "Sqrt",
2723		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2724			return s.newValue1(ssa.OpSqrt, types.Types[TFLOAT64], args[0])
2725		},
2726		sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.PPC64, sys.S390X)
2727
2728	/******** math/bits ********/
2729	addF("math/bits", "TrailingZeros64",
2730		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2731			return s.newValue1(ssa.OpCtz64, types.Types[TINT], args[0])
2732		},
2733		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
2734	addF("math/bits", "TrailingZeros32",
2735		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2736			return s.newValue1(ssa.OpCtz32, types.Types[TINT], args[0])
2737		},
2738		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
2739	addF("math/bits", "TrailingZeros16",
2740		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2741			x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
2742			c := s.constInt32(types.Types[TUINT32], 1<<16)
2743			y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
2744			return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
2745		},
2746		sys.ARM, sys.MIPS)
2747	addF("math/bits", "TrailingZeros16",
2748		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2749			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
2750			c := s.constInt64(types.Types[TUINT64], 1<<16)
2751			y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
2752			return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
2753		},
2754		sys.AMD64, sys.ARM64, sys.S390X)
2755	addF("math/bits", "TrailingZeros8",
2756		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2757			x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
2758			c := s.constInt32(types.Types[TUINT32], 1<<8)
2759			y := s.newValue2(ssa.OpOr32, types.Types[TUINT32], x, c)
2760			return s.newValue1(ssa.OpCtz32, types.Types[TINT], y)
2761		},
2762		sys.ARM, sys.MIPS)
2763	addF("math/bits", "TrailingZeros8",
2764		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2765			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
2766			c := s.constInt64(types.Types[TUINT64], 1<<8)
2767			y := s.newValue2(ssa.OpOr64, types.Types[TUINT64], x, c)
2768			return s.newValue1(ssa.OpCtz64, types.Types[TINT], y)
2769		},
2770		sys.AMD64, sys.ARM64, sys.S390X)
2771	alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
2772	alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
2773	// ReverseBytes inlines correctly, no need to intrinsify it.
2774	// ReverseBytes16 lowers to a rotate, no need for anything special here.
2775	addF("math/bits", "Len64",
2776		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2777			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
2778		},
2779		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
2780	addF("math/bits", "Len32",
2781		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2782			if s.config.PtrSize == 4 {
2783				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
2784			}
2785			x := s.newValue1(ssa.OpZeroExt32to64, types.Types[TUINT64], args[0])
2786			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
2787		},
2788		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
2789	addF("math/bits", "Len16",
2790		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2791			if s.config.PtrSize == 4 {
2792				x := s.newValue1(ssa.OpZeroExt16to32, types.Types[TUINT32], args[0])
2793				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
2794			}
2795			x := s.newValue1(ssa.OpZeroExt16to64, types.Types[TUINT64], args[0])
2796			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
2797		},
2798		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
2799	// Note: disabled on AMD64 because the Go code is faster!
2800	addF("math/bits", "Len8",
2801		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2802			if s.config.PtrSize == 4 {
2803				x := s.newValue1(ssa.OpZeroExt8to32, types.Types[TUINT32], args[0])
2804				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], x)
2805			}
2806			x := s.newValue1(ssa.OpZeroExt8to64, types.Types[TUINT64], args[0])
2807			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], x)
2808		},
2809		sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
2810
2811	addF("math/bits", "Len",
2812		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2813			if s.config.PtrSize == 4 {
2814				return s.newValue1(ssa.OpBitLen32, types.Types[TINT], args[0])
2815			}
2816			return s.newValue1(ssa.OpBitLen64, types.Types[TINT], args[0])
2817		},
2818		sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
2819	// LeadingZeros is handled because it trivially calls Len.
2820	addF("math/bits", "Reverse64",
2821		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2822			return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
2823		},
2824		sys.ARM64)
2825	addF("math/bits", "Reverse32",
2826		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2827			return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
2828		},
2829		sys.ARM64)
2830	addF("math/bits", "Reverse16",
2831		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2832			return s.newValue1(ssa.OpBitRev16, types.Types[TINT], args[0])
2833		},
2834		sys.ARM64)
2835	addF("math/bits", "Reverse8",
2836		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2837			return s.newValue1(ssa.OpBitRev8, types.Types[TINT], args[0])
2838		},
2839		sys.ARM64)
2840	addF("math/bits", "Reverse",
2841		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2842			if s.config.PtrSize == 4 {
2843				return s.newValue1(ssa.OpBitRev32, types.Types[TINT], args[0])
2844			}
2845			return s.newValue1(ssa.OpBitRev64, types.Types[TINT], args[0])
2846		},
2847		sys.ARM64)
2848	makeOnesCountAMD64 := func(op64 ssa.Op, op32 ssa.Op) func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2849		return func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2850			aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: syslook("support_popcnt").Sym.Linksym()})
2851			addr := s.entryNewValue1A(ssa.OpAddr, types.Types[TBOOL].PtrTo(), aux, s.sb)
2852			v := s.newValue2(ssa.OpLoad, types.Types[TBOOL], addr, s.mem())
2853			b := s.endBlock()
2854			b.Kind = ssa.BlockIf
2855			b.SetControl(v)
2856			bTrue := s.f.NewBlock(ssa.BlockPlain)
2857			bFalse := s.f.NewBlock(ssa.BlockPlain)
2858			bEnd := s.f.NewBlock(ssa.BlockPlain)
2859			b.AddEdgeTo(bTrue)
2860			b.AddEdgeTo(bFalse)
2861			b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
2862
2863			// We have the intrinsic - use it directly.
2864			s.startBlock(bTrue)
2865			op := op64
2866			if s.config.PtrSize == 4 {
2867				op = op32
2868			}
2869			s.vars[n] = s.newValue1(op, types.Types[TINT], args[0])
2870			s.endBlock().AddEdgeTo(bEnd)
2871
2872			// Call the pure Go version.
2873			s.startBlock(bFalse)
2874			a := s.call(n, callNormal)
2875			s.vars[n] = s.newValue2(ssa.OpLoad, types.Types[TINT], a, s.mem())
2876			s.endBlock().AddEdgeTo(bEnd)
2877
2878			// Merge results.
2879			s.startBlock(bEnd)
2880			return s.variable(n, types.Types[TINT])
2881		}
2882	}
2883	addF("math/bits", "OnesCount64",
2884		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount64),
2885		sys.AMD64)
2886	addF("math/bits", "OnesCount64",
2887		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2888			return s.newValue1(ssa.OpPopCount64, types.Types[TINT], args[0])
2889		},
2890		sys.PPC64)
2891	addF("math/bits", "OnesCount32",
2892		makeOnesCountAMD64(ssa.OpPopCount32, ssa.OpPopCount32),
2893		sys.AMD64)
2894	addF("math/bits", "OnesCount32",
2895		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2896			return s.newValue1(ssa.OpPopCount32, types.Types[TINT], args[0])
2897		},
2898		sys.PPC64)
2899	addF("math/bits", "OnesCount16",
2900		makeOnesCountAMD64(ssa.OpPopCount16, ssa.OpPopCount16),
2901		sys.AMD64)
2902	// Note: no OnesCount8, the Go implementation is faster - just a table load.
2903	addF("math/bits", "OnesCount",
2904		makeOnesCountAMD64(ssa.OpPopCount64, ssa.OpPopCount32),
2905		sys.AMD64)
2906
2907	/******** sync/atomic ********/
2908
2909	// Note: these are disabled by flag_race in findIntrinsic below.
2910	alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
2911	alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
2912	alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
2913	alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
2914	alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
2915	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
2916	alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
2917
2918	alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
2919	alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
2920	// Note: not StorePointer, that needs a write barrier.  Same below for {CompareAnd}Swap.
2921	alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
2922	alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
2923	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
2924	alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
2925
2926	alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
2927	alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
2928	alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
2929	alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
2930	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
2931	alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
2932
2933	alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
2934	alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
2935	alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
2936	alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
2937	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
2938	alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
2939
2940	alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
2941	alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
2942	alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
2943	alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
2944	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
2945	alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
2946
2947	/******** math/big ********/
2948	add("math/big", "mulWW",
2949		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2950			return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1])
2951		},
2952		sys.ArchAMD64)
2953	add("math/big", "divWW",
2954		func(s *state, n *Node, args []*ssa.Value) *ssa.Value {
2955			return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[TUINT64], types.Types[TUINT64]), args[0], args[1], args[2])
2956		},
2957		sys.ArchAMD64)
2958}
2959
2960// findIntrinsic returns a function which builds the SSA equivalent of the
2961// function identified by the symbol sym.  If sym is not an intrinsic call, returns nil.
2962func findIntrinsic(sym *types.Sym) intrinsicBuilder {
2963	if ssa.IntrinsicsDisable {
2964		return nil
2965	}
2966	if sym == nil || sym.Pkg == nil {
2967		return nil
2968	}
2969	pkg := sym.Pkg.Path
2970	if sym.Pkg == localpkg {
2971		pkg = myimportpath
2972	}
2973	if flag_race && pkg == "sync/atomic" {
2974		// The race detector needs to be able to intercept these calls.
2975		// We can't intrinsify them.
2976		return nil
2977	}
2978	fn := sym.Name
2979	return intrinsics[intrinsicKey{thearch.LinkArch.Arch, pkg, fn}]
2980}
2981
2982func isIntrinsicCall(n *Node) bool {
2983	if n == nil || n.Left == nil {
2984		return false
2985	}
2986	return findIntrinsic(n.Left.Sym) != nil
2987}
2988
2989// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
2990func (s *state) intrinsicCall(n *Node) *ssa.Value {
2991	v := findIntrinsic(n.Left.Sym)(s, n, s.intrinsicArgs(n))
2992	if ssa.IntrinsicsDebug > 0 {
2993		x := v
2994		if x == nil {
2995			x = s.mem()
2996		}
2997		if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
2998			x = x.Args[0]
2999		}
3000		Warnl(n.Pos, "intrinsic substitution for %v with %s", n.Left.Sym.Name, x.LongString())
3001	}
3002	return v
3003}
3004
3005type callArg struct {
3006	offset int64
3007	v      *ssa.Value
3008}
3009type byOffset []callArg
3010
3011func (x byOffset) Len() int      { return len(x) }
3012func (x byOffset) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
3013func (x byOffset) Less(i, j int) bool {
3014	return x[i].offset < x[j].offset
3015}
3016
3017// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
3018func (s *state) intrinsicArgs(n *Node) []*ssa.Value {
3019	// This code is complicated because of how walk transforms calls. For a call node,
3020	// each entry in n.List is either an assignment to OINDREGSP which actually
3021	// stores an arg, or an assignment to a temporary which computes an arg
3022	// which is later assigned.
3023	// The args can also be out of order.
3024	// TODO: when walk goes away someday, this code can go away also.
3025	var args []callArg
3026	temps := map[*Node]*ssa.Value{}
3027	for _, a := range n.List.Slice() {
3028		if a.Op != OAS {
3029			s.Fatalf("non-assignment as a function argument %s", opnames[a.Op])
3030		}
3031		l, r := a.Left, a.Right
3032		switch l.Op {
3033		case ONAME:
3034			// Evaluate and store to "temporary".
3035			// Walk ensures these temporaries are dead outside of n.
3036			temps[l] = s.expr(r)
3037		case OINDREGSP:
3038			// Store a value to an argument slot.
3039			var v *ssa.Value
3040			if x, ok := temps[r]; ok {
3041				// This is a previously computed temporary.
3042				v = x
3043			} else {
3044				// This is an explicit value; evaluate it.
3045				v = s.expr(r)
3046			}
3047			args = append(args, callArg{l.Xoffset, v})
3048		default:
3049			s.Fatalf("function argument assignment target not allowed: %s", opnames[l.Op])
3050		}
3051	}
3052	sort.Sort(byOffset(args))
3053	res := make([]*ssa.Value, len(args))
3054	for i, a := range args {
3055		res[i] = a.v
3056	}
3057	return res
3058}
3059
3060// Calls the function n using the specified call type.
3061// Returns the address of the return value (or nil if none).
3062func (s *state) call(n *Node, k callKind) *ssa.Value {
3063	var sym *types.Sym     // target symbol (if static)
3064	var closure *ssa.Value // ptr to closure to run (if dynamic)
3065	var codeptr *ssa.Value // ptr to target code (if dynamic)
3066	var rcvr *ssa.Value    // receiver to set
3067	fn := n.Left
3068	switch n.Op {
3069	case OCALLFUNC:
3070		if k == callNormal && fn.Op == ONAME && fn.Class() == PFUNC {
3071			sym = fn.Sym
3072			break
3073		}
3074		closure = s.expr(fn)
3075	case OCALLMETH:
3076		if fn.Op != ODOTMETH {
3077			Fatalf("OCALLMETH: n.Left not an ODOTMETH: %v", fn)
3078		}
3079		if k == callNormal {
3080			sym = fn.Sym
3081			break
3082		}
3083		// Make a name n2 for the function.
3084		// fn.Sym might be sync.(*Mutex).Unlock.
3085		// Make a PFUNC node out of that, then evaluate it.
3086		// We get back an SSA value representing &sync.(*Mutex).Unlock·f.
3087		// We can then pass that to defer or go.
3088		n2 := newnamel(fn.Pos, fn.Sym)
3089		n2.Name.Curfn = s.curfn
3090		n2.SetClass(PFUNC)
3091		n2.Pos = fn.Pos
3092		n2.Type = types.Types[TUINT8] // dummy type for a static closure. Could use runtime.funcval if we had it.
3093		closure = s.expr(n2)
3094		// Note: receiver is already assigned in n.List, so we don't
3095		// want to set it here.
3096	case OCALLINTER:
3097		if fn.Op != ODOTINTER {
3098			Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op)
3099		}
3100		i := s.expr(fn.Left)
3101		itab := s.newValue1(ssa.OpITab, types.Types[TUINTPTR], i)
3102		if k != callNormal {
3103			s.nilCheck(itab)
3104		}
3105		itabidx := fn.Xoffset + 3*int64(Widthptr) + 8 // offset of fun field in runtime.itab
3106		itab = s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
3107		if k == callNormal {
3108			codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], itab, s.mem())
3109		} else {
3110			closure = itab
3111		}
3112		rcvr = s.newValue1(ssa.OpIData, types.Types[TUINTPTR], i)
3113	}
3114	dowidth(fn.Type)
3115	stksize := fn.Type.ArgWidth() // includes receiver
3116
3117	// Run all argument assignments. The arg slots have already
3118	// been offset by the appropriate amount (+2*widthptr for go/defer,
3119	// +widthptr for interface calls).
3120	// For OCALLMETH, the receiver is set in these statements.
3121	s.stmtList(n.List)
3122
3123	// Set receiver (for interface calls)
3124	if rcvr != nil {
3125		argStart := Ctxt.FixedFrameSize()
3126		if k != callNormal {
3127			argStart += int64(2 * Widthptr)
3128		}
3129		addr := s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart)
3130		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, rcvr, s.mem())
3131	}
3132
3133	// Defer/go args
3134	if k != callNormal {
3135		// Write argsize and closure (args to Newproc/Deferproc).
3136		argStart := Ctxt.FixedFrameSize()
3137		argsize := s.constInt32(types.Types[TUINT32], int32(stksize))
3138		addr := s.constOffPtrSP(s.f.Config.Types.UInt32Ptr, argStart)
3139		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINT32], addr, argsize, s.mem())
3140		addr = s.constOffPtrSP(s.f.Config.Types.UintptrPtr, argStart+int64(Widthptr))
3141		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], addr, closure, s.mem())
3142		stksize += 2 * int64(Widthptr)
3143	}
3144
3145	// call target
3146	var call *ssa.Value
3147	switch {
3148	case k == callDefer:
3149		call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Deferproc, s.mem())
3150	case k == callGo:
3151		call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, Newproc, s.mem())
3152	case closure != nil:
3153		codeptr = s.newValue2(ssa.OpLoad, types.Types[TUINTPTR], closure, s.mem())
3154		call = s.newValue3(ssa.OpClosureCall, types.TypeMem, codeptr, closure, s.mem())
3155	case codeptr != nil:
3156		call = s.newValue2(ssa.OpInterCall, types.TypeMem, codeptr, s.mem())
3157	case sym != nil:
3158		call = s.newValue1A(ssa.OpStaticCall, types.TypeMem, sym.Linksym(), s.mem())
3159	default:
3160		Fatalf("bad call type %v %v", n.Op, n)
3161	}
3162	call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
3163	s.vars[&memVar] = call
3164
3165	// Finish block for defers
3166	if k == callDefer {
3167		b := s.endBlock()
3168		b.Kind = ssa.BlockDefer
3169		b.SetControl(call)
3170		bNext := s.f.NewBlock(ssa.BlockPlain)
3171		b.AddEdgeTo(bNext)
3172		// Add recover edge to exit code.
3173		r := s.f.NewBlock(ssa.BlockPlain)
3174		s.startBlock(r)
3175		s.exit()
3176		b.AddEdgeTo(r)
3177		b.Likely = ssa.BranchLikely
3178		s.startBlock(bNext)
3179	}
3180
3181	res := n.Left.Type.Results()
3182	if res.NumFields() == 0 || k != callNormal {
3183		// call has no return value. Continue with the next statement.
3184		return nil
3185	}
3186	fp := res.Field(0)
3187	return s.constOffPtrSP(types.NewPtr(fp.Type), fp.Offset+Ctxt.FixedFrameSize())
3188}
3189
3190// etypesign returns the signed-ness of e, for integer/pointer etypes.
3191// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
3192func etypesign(e types.EType) int8 {
3193	switch e {
3194	case TINT8, TINT16, TINT32, TINT64, TINT:
3195		return -1
3196	case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR, TUNSAFEPTR:
3197		return +1
3198	}
3199	return 0
3200}
3201
3202// lookupSymbol is used to retrieve the symbol (Extern, Arg or Auto) used for a particular node.
3203// This improves the effectiveness of cse by using the same Aux values for the
3204// same symbols.
3205func (s *state) lookupSymbol(n *Node, sym interface{}) interface{} {
3206	switch sym.(type) {
3207	default:
3208		s.Fatalf("sym %v is of unknown type %T", sym, sym)
3209	case *ssa.ExternSymbol, *ssa.ArgSymbol, *ssa.AutoSymbol:
3210		// these are the only valid types
3211	}
3212
3213	if lsym, ok := s.varsyms[n]; ok {
3214		return lsym
3215	}
3216	s.varsyms[n] = sym
3217	return sym
3218}
3219
3220// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
3221// The value that the returned Value represents is guaranteed to be non-nil.
3222// If bounded is true then this address does not require a nil check for its operand
3223// even if that would otherwise be implied.
3224func (s *state) addr(n *Node, bounded bool) *ssa.Value {
3225	t := types.NewPtr(n.Type)
3226	switch n.Op {
3227	case ONAME:
3228		switch n.Class() {
3229		case PEXTERN:
3230			// global variable
3231			aux := s.lookupSymbol(n, &ssa.ExternSymbol{Sym: n.Sym.Linksym()})
3232			v := s.entryNewValue1A(ssa.OpAddr, t, aux, s.sb)
3233			// TODO: Make OpAddr use AuxInt as well as Aux.
3234			if n.Xoffset != 0 {
3235				v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, n.Xoffset, v)
3236			}
3237			return v
3238		case PPARAM:
3239			// parameter slot
3240			v := s.decladdrs[n]
3241			if v != nil {
3242				return v
3243			}
3244			if n == nodfp {
3245				// Special arg that points to the frame pointer (Used by ORECOVER).
3246				aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
3247				return s.entryNewValue1A(ssa.OpAddr, t, aux, s.sp)
3248			}
3249			s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
3250			return nil
3251		case PAUTO:
3252			aux := s.lookupSymbol(n, &ssa.AutoSymbol{Node: n})
3253			return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
3254		case PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
3255			// ensure that we reuse symbols for out parameters so
3256			// that cse works on their addresses
3257			aux := s.lookupSymbol(n, &ssa.ArgSymbol{Node: n})
3258			return s.newValue1A(ssa.OpAddr, t, aux, s.sp)
3259		default:
3260			s.Fatalf("variable address class %v not implemented", classnames[n.Class()])
3261			return nil
3262		}
3263	case OINDREGSP:
3264		// indirect off REGSP
3265		// used for storing/loading arguments/returns to/from callees
3266		return s.constOffPtrSP(t, n.Xoffset)
3267	case OINDEX:
3268		if n.Left.Type.IsSlice() {
3269			a := s.expr(n.Left)
3270			i := s.expr(n.Right)
3271			i = s.extendIndex(i, panicindex)
3272			len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], a)
3273			if !n.Bounded() {
3274				s.boundsCheck(i, len)
3275			}
3276			p := s.newValue1(ssa.OpSlicePtr, t, a)
3277			return s.newValue2(ssa.OpPtrIndex, t, p, i)
3278		} else { // array
3279			a := s.addr(n.Left, bounded)
3280			i := s.expr(n.Right)
3281			i = s.extendIndex(i, panicindex)
3282			len := s.constInt(types.Types[TINT], n.Left.Type.NumElem())
3283			if !n.Bounded() {
3284				s.boundsCheck(i, len)
3285			}
3286			return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.Left.Type.Elem()), a, i)
3287		}
3288	case OIND:
3289		return s.exprPtr(n.Left, bounded, n.Pos)
3290	case ODOT:
3291		p := s.addr(n.Left, bounded)
3292		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
3293	case ODOTPTR:
3294		p := s.exprPtr(n.Left, bounded, n.Pos)
3295		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset, p)
3296	case OCLOSUREVAR:
3297		return s.newValue1I(ssa.OpOffPtr, t, n.Xoffset,
3298			s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr))
3299	case OCONVNOP:
3300		addr := s.addr(n.Left, bounded)
3301		return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
3302	case OCALLFUNC, OCALLINTER, OCALLMETH:
3303		return s.call(n, callNormal)
3304	case ODOTTYPE:
3305		v, _ := s.dottype(n, false)
3306		if v.Op != ssa.OpLoad {
3307			s.Fatalf("dottype of non-load")
3308		}
3309		if v.Args[1] != s.mem() {
3310			s.Fatalf("memory no longer live from dottype load")
3311		}
3312		return v.Args[0]
3313	default:
3314		s.Fatalf("unhandled addr %v", n.Op)
3315		return nil
3316	}
3317}
3318
3319// canSSA reports whether n is SSA-able.
3320// n must be an ONAME (or an ODOT sequence with an ONAME base).
3321func (s *state) canSSA(n *Node) bool {
3322	if Debug['N'] != 0 {
3323		return false
3324	}
3325	for n.Op == ODOT || (n.Op == OINDEX && n.Left.Type.IsArray()) {
3326		n = n.Left
3327	}
3328	if n.Op != ONAME {
3329		return false
3330	}
3331	if n.Addrtaken() {
3332		return false
3333	}
3334	if n.isParamHeapCopy() {
3335		return false
3336	}
3337	if n.Class() == PAUTOHEAP {
3338		Fatalf("canSSA of PAUTOHEAP %v", n)
3339	}
3340	switch n.Class() {
3341	case PEXTERN:
3342		return false
3343	case PPARAMOUT:
3344		if s.hasdefer {
3345			// TODO: handle this case?  Named return values must be
3346			// in memory so that the deferred function can see them.
3347			// Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
3348			// Or maybe not, see issue 18860.  Even unnamed return values
3349			// must be written back so if a defer recovers, the caller can see them.
3350			return false
3351		}
3352		if s.cgoUnsafeArgs {
3353			// Cgo effectively takes the address of all result args,
3354			// but the compiler can't see that.
3355			return false
3356		}
3357	}
3358	if n.Class() == PPARAM && n.Sym != nil && n.Sym.Name == ".this" {
3359		// wrappers generated by genwrapper need to update
3360		// the .this pointer in place.
3361		// TODO: treat as a PPARMOUT?
3362		return false
3363	}
3364	return canSSAType(n.Type)
3365	// TODO: try to make more variables SSAable?
3366}
3367
3368// canSSA reports whether variables of type t are SSA-able.
3369func canSSAType(t *types.Type) bool {
3370	dowidth(t)
3371	if t.Width > int64(4*Widthptr) {
3372		// 4*Widthptr is an arbitrary constant. We want it
3373		// to be at least 3*Widthptr so slices can be registerized.
3374		// Too big and we'll introduce too much register pressure.
3375		return false
3376	}
3377	switch t.Etype {
3378	case TARRAY:
3379		// We can't do larger arrays because dynamic indexing is
3380		// not supported on SSA variables.
3381		// TODO: allow if all indexes are constant.
3382		if t.NumElem() <= 1 {
3383			return canSSAType(t.Elem())
3384		}
3385		return false
3386	case TSTRUCT:
3387		if t.NumFields() > ssa.MaxStruct {
3388			return false
3389		}
3390		for _, t1 := range t.Fields().Slice() {
3391			if !canSSAType(t1.Type) {
3392				return false
3393			}
3394		}
3395		return true
3396	default:
3397		return true
3398	}
3399}
3400
3401// exprPtr evaluates n to a pointer and nil-checks it.
3402func (s *state) exprPtr(n *Node, bounded bool, lineno src.XPos) *ssa.Value {
3403	p := s.expr(n)
3404	if bounded || n.NonNil() {
3405		if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
3406			s.f.Warnl(lineno, "removed nil check")
3407		}
3408		return p
3409	}
3410	s.nilCheck(p)
3411	return p
3412}
3413
3414// nilCheck generates nil pointer checking code.
3415// Used only for automatically inserted nil checks,
3416// not for user code like 'x != nil'.
3417func (s *state) nilCheck(ptr *ssa.Value) {
3418	if disable_checknil != 0 || s.curfn.Func.NilCheckDisabled() {
3419		return
3420	}
3421	s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
3422}
3423
3424// boundsCheck generates bounds checking code. Checks if 0 <= idx < len, branches to exit if not.
3425// Starts a new block on return.
3426// idx is already converted to full int width.
3427func (s *state) boundsCheck(idx, len *ssa.Value) {
3428	if Debug['B'] != 0 {
3429		return
3430	}
3431
3432	// bounds check
3433	cmp := s.newValue2(ssa.OpIsInBounds, types.Types[TBOOL], idx, len)
3434	s.check(cmp, panicindex)
3435}
3436
3437// sliceBoundsCheck generates slice bounds checking code. Checks if 0 <= idx <= len, branches to exit if not.
3438// Starts a new block on return.
3439// idx and len are already converted to full int width.
3440func (s *state) sliceBoundsCheck(idx, len *ssa.Value) {
3441	if Debug['B'] != 0 {
3442		return
3443	}
3444
3445	// bounds check
3446	cmp := s.newValue2(ssa.OpIsSliceInBounds, types.Types[TBOOL], idx, len)
3447	s.check(cmp, panicslice)
3448}
3449
3450// If cmp (a bool) is false, panic using the given function.
3451func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
3452	b := s.endBlock()
3453	b.Kind = ssa.BlockIf
3454	b.SetControl(cmp)
3455	b.Likely = ssa.BranchLikely
3456	bNext := s.f.NewBlock(ssa.BlockPlain)
3457	line := s.peekPos()
3458	pos := Ctxt.PosTable.Pos(line)
3459	fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
3460	bPanic := s.panics[fl]
3461	if bPanic == nil {
3462		bPanic = s.f.NewBlock(ssa.BlockPlain)
3463		s.panics[fl] = bPanic
3464		s.startBlock(bPanic)
3465		// The panic call takes/returns memory to ensure that the right
3466		// memory state is observed if the panic happens.
3467		s.rtcall(fn, false, nil)
3468	}
3469	b.AddEdgeTo(bNext)
3470	b.AddEdgeTo(bPanic)
3471	s.startBlock(bNext)
3472}
3473
3474func (s *state) intDivide(n *Node, a, b *ssa.Value) *ssa.Value {
3475	needcheck := true
3476	switch b.Op {
3477	case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
3478		if b.AuxInt != 0 {
3479			needcheck = false
3480		}
3481	}
3482	if needcheck {
3483		// do a size-appropriate check for zero
3484		cmp := s.newValue2(s.ssaOp(ONE, n.Type), types.Types[TBOOL], b, s.zeroVal(n.Type))
3485		s.check(cmp, panicdivide)
3486	}
3487	return s.newValue2(s.ssaOp(n.Op, n.Type), a.Type, a, b)
3488}
3489
3490// rtcall issues a call to the given runtime function fn with the listed args.
3491// Returns a slice of results of the given result types.
3492// The call is added to the end of the current block.
3493// If returns is false, the block is marked as an exit block.
3494func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
3495	// Write args to the stack
3496	off := Ctxt.FixedFrameSize()
3497	for _, arg := range args {
3498		t := arg.Type
3499		off = Rnd(off, t.Alignment())
3500		ptr := s.constOffPtrSP(t.PtrTo(), off)
3501		size := t.Size()
3502		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, ptr, arg, s.mem())
3503		off += size
3504	}
3505	off = Rnd(off, int64(Widthreg))
3506
3507	// Issue call
3508	call := s.newValue1A(ssa.OpStaticCall, types.TypeMem, fn, s.mem())
3509	s.vars[&memVar] = call
3510
3511	if !returns {
3512		// Finish block
3513		b := s.endBlock()
3514		b.Kind = ssa.BlockExit
3515		b.SetControl(call)
3516		call.AuxInt = off - Ctxt.FixedFrameSize()
3517		if len(results) > 0 {
3518			Fatalf("panic call can't have results")
3519		}
3520		return nil
3521	}
3522
3523	// Load results
3524	res := make([]*ssa.Value, len(results))
3525	for i, t := range results {
3526		off = Rnd(off, t.Alignment())
3527		ptr := s.constOffPtrSP(types.NewPtr(t), off)
3528		res[i] = s.newValue2(ssa.OpLoad, t, ptr, s.mem())
3529		off += t.Size()
3530	}
3531	off = Rnd(off, int64(Widthptr))
3532
3533	// Remember how much callee stack space we needed.
3534	call.AuxInt = off
3535
3536	return res
3537}
3538
3539// do *left = right for type t.
3540func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask) {
3541	if skip == 0 && (!types.Haspointers(t) || ssa.IsStackAddr(left)) {
3542		// Known to not have write barrier. Store the whole type.
3543		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
3544		return
3545	}
3546
3547	// store scalar fields first, so write barrier stores for
3548	// pointer fields can be grouped together, and scalar values
3549	// don't need to be live across the write barrier call.
3550	// TODO: if the writebarrier pass knows how to reorder stores,
3551	// we can do a single store here as long as skip==0.
3552	s.storeTypeScalars(t, left, right, skip)
3553	if skip&skipPtr == 0 && types.Haspointers(t) {
3554		s.storeTypePtrs(t, left, right)
3555	}
3556}
3557
3558// do *left = right for all scalar (non-pointer) parts of t.
3559func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
3560	switch {
3561	case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
3562		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
3563	case t.IsPtrShaped():
3564		// no scalar fields.
3565	case t.IsString():
3566		if skip&skipLen != 0 {
3567			return
3568		}
3569		len := s.newValue1(ssa.OpStringLen, types.Types[TINT], right)
3570		lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
3571		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
3572	case t.IsSlice():
3573		if skip&skipLen == 0 {
3574			len := s.newValue1(ssa.OpSliceLen, types.Types[TINT], right)
3575			lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
3576			s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], lenAddr, len, s.mem())
3577		}
3578		if skip&skipCap == 0 {
3579			cap := s.newValue1(ssa.OpSliceCap, types.Types[TINT], right)
3580			capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
3581			s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TINT], capAddr, cap, s.mem())
3582		}
3583	case t.IsInterface():
3584		// itab field doesn't need a write barrier (even though it is a pointer).
3585		itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
3586		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, types.Types[TUINTPTR], left, itab, s.mem())
3587	case t.IsStruct():
3588		n := t.NumFields()
3589		for i := 0; i < n; i++ {
3590			ft := t.FieldType(i)
3591			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3592			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3593			s.storeTypeScalars(ft, addr, val, 0)
3594		}
3595	case t.IsArray() && t.NumElem() == 0:
3596		// nothing
3597	case t.IsArray() && t.NumElem() == 1:
3598		s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
3599	default:
3600		s.Fatalf("bad write barrier type %v", t)
3601	}
3602}
3603
3604// do *left = right for all pointer parts of t.
3605func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
3606	switch {
3607	case t.IsPtrShaped():
3608		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, left, right, s.mem())
3609	case t.IsString():
3610		ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
3611		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
3612	case t.IsSlice():
3613		ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, right)
3614		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, left, ptr, s.mem())
3615	case t.IsInterface():
3616		// itab field is treated as a scalar.
3617		idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
3618		idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
3619		s.vars[&memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, s.f.Config.Types.BytePtr, idataAddr, idata, s.mem())
3620	case t.IsStruct():
3621		n := t.NumFields()
3622		for i := 0; i < n; i++ {
3623			ft := t.FieldType(i)
3624			if !types.Haspointers(ft) {
3625				continue
3626			}
3627			addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
3628			val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
3629			s.storeTypePtrs(ft, addr, val)
3630		}
3631	case t.IsArray() && t.NumElem() == 0:
3632		// nothing
3633	case t.IsArray() && t.NumElem() == 1:
3634		s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
3635	default:
3636		s.Fatalf("bad write barrier type %v", t)
3637	}
3638}
3639
3640// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
3641// i,j,k may be nil, in which case they are set to their default value.
3642// t is a slice, ptr to array, or string type.
3643func (s *state) slice(t *types.Type, v, i, j, k *ssa.Value) (p, l, c *ssa.Value) {
3644	var elemtype *types.Type
3645	var ptrtype *types.Type
3646	var ptr *ssa.Value
3647	var len *ssa.Value
3648	var cap *ssa.Value
3649	zero := s.constInt(types.Types[TINT], 0)
3650	switch {
3651	case t.IsSlice():
3652		elemtype = t.Elem()
3653		ptrtype = types.NewPtr(elemtype)
3654		ptr = s.newValue1(ssa.OpSlicePtr, ptrtype, v)
3655		len = s.newValue1(ssa.OpSliceLen, types.Types[TINT], v)
3656		cap = s.newValue1(ssa.OpSliceCap, types.Types[TINT], v)
3657	case t.IsString():
3658		elemtype = types.Types[TUINT8]
3659		ptrtype = types.NewPtr(elemtype)
3660		ptr = s.newValue1(ssa.OpStringPtr, ptrtype, v)
3661		len = s.newValue1(ssa.OpStringLen, types.Types[TINT], v)
3662		cap = len
3663	case t.IsPtr():
3664		if !t.Elem().IsArray() {
3665			s.Fatalf("bad ptr to array in slice %v\n", t)
3666		}
3667		elemtype = t.Elem().Elem()
3668		ptrtype = types.NewPtr(elemtype)
3669		s.nilCheck(v)
3670		ptr = v
3671		len = s.constInt(types.Types[TINT], t.Elem().NumElem())
3672		cap = len
3673	default:
3674		s.Fatalf("bad type in slice %v\n", t)
3675	}
3676
3677	// Set default values
3678	if i == nil {
3679		i = zero
3680	}
3681	if j == nil {
3682		j = len
3683	}
3684	if k == nil {
3685		k = cap
3686	}
3687
3688	// Panic if slice indices are not in bounds.
3689	s.sliceBoundsCheck(i, j)
3690	if j != k {
3691		s.sliceBoundsCheck(j, k)
3692	}
3693	if k != cap {
3694		s.sliceBoundsCheck(k, cap)
3695	}
3696
3697	// Generate the following code assuming that indexes are in bounds.
3698	// The masking is to make sure that we don't generate a slice
3699	// that points to the next object in memory.
3700	// rlen = j - i
3701	// rcap = k - i
3702	// delta = i * elemsize
3703	// rptr = p + delta&mask(rcap)
3704	// result = (SliceMake rptr rlen rcap)
3705	// where mask(x) is 0 if x==0 and -1 if x>0.
3706	subOp := s.ssaOp(OSUB, types.Types[TINT])
3707	mulOp := s.ssaOp(OMUL, types.Types[TINT])
3708	andOp := s.ssaOp(OAND, types.Types[TINT])
3709	rlen := s.newValue2(subOp, types.Types[TINT], j, i)
3710	var rcap *ssa.Value
3711	switch {
3712	case t.IsString():
3713		// Capacity of the result is unimportant. However, we use
3714		// rcap to test if we've generated a zero-length slice.
3715		// Use length of strings for that.
3716		rcap = rlen
3717	case j == k:
3718		rcap = rlen
3719	default:
3720		rcap = s.newValue2(subOp, types.Types[TINT], k, i)
3721	}
3722
3723	var rptr *ssa.Value
3724	if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
3725		// No pointer arithmetic necessary.
3726		rptr = ptr
3727	} else {
3728		// delta = # of bytes to offset pointer by.
3729		delta := s.newValue2(mulOp, types.Types[TINT], i, s.constInt(types.Types[TINT], elemtype.Width))
3730		// If we're slicing to the point where the capacity is zero,
3731		// zero out the delta.
3732		mask := s.newValue1(ssa.OpSlicemask, types.Types[TINT], rcap)
3733		delta = s.newValue2(andOp, types.Types[TINT], delta, mask)
3734		// Compute rptr = ptr + delta
3735		rptr = s.newValue2(ssa.OpAddPtr, ptrtype, ptr, delta)
3736	}
3737
3738	return rptr, rlen, rcap
3739}
3740
3741type u642fcvtTab struct {
3742	geq, cvt2F, and, rsh, or, add ssa.Op
3743	one                           func(*state, *types.Type, int64) *ssa.Value
3744}
3745
3746var u64_f64 u642fcvtTab = u642fcvtTab{
3747	geq:   ssa.OpGeq64,
3748	cvt2F: ssa.OpCvt64to64F,
3749	and:   ssa.OpAnd64,
3750	rsh:   ssa.OpRsh64Ux64,
3751	or:    ssa.OpOr64,
3752	add:   ssa.OpAdd64F,
3753	one:   (*state).constInt64,
3754}
3755
3756var u64_f32 u642fcvtTab = u642fcvtTab{
3757	geq:   ssa.OpGeq64,
3758	cvt2F: ssa.OpCvt64to32F,
3759	and:   ssa.OpAnd64,
3760	rsh:   ssa.OpRsh64Ux64,
3761	or:    ssa.OpOr64,
3762	add:   ssa.OpAdd32F,
3763	one:   (*state).constInt64,
3764}
3765
3766func (s *state) uint64Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3767	return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
3768}
3769
3770func (s *state) uint64Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3771	return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
3772}
3773
3774func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3775	// if x >= 0 {
3776	//    result = (floatY) x
3777	// } else {
3778	// 	  y = uintX(x) ; y = x & 1
3779	// 	  z = uintX(x) ; z = z >> 1
3780	// 	  z = z >> 1
3781	// 	  z = z | y
3782	// 	  result = floatY(z)
3783	// 	  result = result + result
3784	// }
3785	//
3786	// Code borrowed from old code generator.
3787	// What's going on: large 64-bit "unsigned" looks like
3788	// negative number to hardware's integer-to-float
3789	// conversion. However, because the mantissa is only
3790	// 63 bits, we don't need the LSB, so instead we do an
3791	// unsigned right shift (divide by two), convert, and
3792	// double. However, before we do that, we need to be
3793	// sure that we do not lose a "1" if that made the
3794	// difference in the resulting rounding. Therefore, we
3795	// preserve it, and OR (not ADD) it back in. The case
3796	// that matters is when the eleven discarded bits are
3797	// equal to 10000000001; that rounds up, and the 1 cannot
3798	// be lost else it would round down if the LSB of the
3799	// candidate mantissa is 0.
3800	cmp := s.newValue2(cvttab.geq, types.Types[TBOOL], x, s.zeroVal(ft))
3801	b := s.endBlock()
3802	b.Kind = ssa.BlockIf
3803	b.SetControl(cmp)
3804	b.Likely = ssa.BranchLikely
3805
3806	bThen := s.f.NewBlock(ssa.BlockPlain)
3807	bElse := s.f.NewBlock(ssa.BlockPlain)
3808	bAfter := s.f.NewBlock(ssa.BlockPlain)
3809
3810	b.AddEdgeTo(bThen)
3811	s.startBlock(bThen)
3812	a0 := s.newValue1(cvttab.cvt2F, tt, x)
3813	s.vars[n] = a0
3814	s.endBlock()
3815	bThen.AddEdgeTo(bAfter)
3816
3817	b.AddEdgeTo(bElse)
3818	s.startBlock(bElse)
3819	one := cvttab.one(s, ft, 1)
3820	y := s.newValue2(cvttab.and, ft, x, one)
3821	z := s.newValue2(cvttab.rsh, ft, x, one)
3822	z = s.newValue2(cvttab.or, ft, z, y)
3823	a := s.newValue1(cvttab.cvt2F, tt, z)
3824	a1 := s.newValue2(cvttab.add, tt, a, a)
3825	s.vars[n] = a1
3826	s.endBlock()
3827	bElse.AddEdgeTo(bAfter)
3828
3829	s.startBlock(bAfter)
3830	return s.variable(n, n.Type)
3831}
3832
3833type u322fcvtTab struct {
3834	cvtI2F, cvtF2F ssa.Op
3835}
3836
3837var u32_f64 u322fcvtTab = u322fcvtTab{
3838	cvtI2F: ssa.OpCvt32to64F,
3839	cvtF2F: ssa.OpCopy,
3840}
3841
3842var u32_f32 u322fcvtTab = u322fcvtTab{
3843	cvtI2F: ssa.OpCvt32to32F,
3844	cvtF2F: ssa.OpCvt64Fto32F,
3845}
3846
3847func (s *state) uint32Tofloat64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3848	return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
3849}
3850
3851func (s *state) uint32Tofloat32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3852	return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
3853}
3854
3855func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3856	// if x >= 0 {
3857	// 	result = floatY(x)
3858	// } else {
3859	// 	result = floatY(float64(x) + (1<<32))
3860	// }
3861	cmp := s.newValue2(ssa.OpGeq32, types.Types[TBOOL], x, s.zeroVal(ft))
3862	b := s.endBlock()
3863	b.Kind = ssa.BlockIf
3864	b.SetControl(cmp)
3865	b.Likely = ssa.BranchLikely
3866
3867	bThen := s.f.NewBlock(ssa.BlockPlain)
3868	bElse := s.f.NewBlock(ssa.BlockPlain)
3869	bAfter := s.f.NewBlock(ssa.BlockPlain)
3870
3871	b.AddEdgeTo(bThen)
3872	s.startBlock(bThen)
3873	a0 := s.newValue1(cvttab.cvtI2F, tt, x)
3874	s.vars[n] = a0
3875	s.endBlock()
3876	bThen.AddEdgeTo(bAfter)
3877
3878	b.AddEdgeTo(bElse)
3879	s.startBlock(bElse)
3880	a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[TFLOAT64], x)
3881	twoToThe32 := s.constFloat64(types.Types[TFLOAT64], float64(1<<32))
3882	a2 := s.newValue2(ssa.OpAdd64F, types.Types[TFLOAT64], a1, twoToThe32)
3883	a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
3884
3885	s.vars[n] = a3
3886	s.endBlock()
3887	bElse.AddEdgeTo(bAfter)
3888
3889	s.startBlock(bAfter)
3890	return s.variable(n, n.Type)
3891}
3892
3893// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
3894func (s *state) referenceTypeBuiltin(n *Node, x *ssa.Value) *ssa.Value {
3895	if !n.Left.Type.IsMap() && !n.Left.Type.IsChan() {
3896		s.Fatalf("node must be a map or a channel")
3897	}
3898	// if n == nil {
3899	//   return 0
3900	// } else {
3901	//   // len
3902	//   return *((*int)n)
3903	//   // cap
3904	//   return *(((*int)n)+1)
3905	// }
3906	lenType := n.Type
3907	nilValue := s.constNil(types.Types[TUINTPTR])
3908	cmp := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], x, nilValue)
3909	b := s.endBlock()
3910	b.Kind = ssa.BlockIf
3911	b.SetControl(cmp)
3912	b.Likely = ssa.BranchUnlikely
3913
3914	bThen := s.f.NewBlock(ssa.BlockPlain)
3915	bElse := s.f.NewBlock(ssa.BlockPlain)
3916	bAfter := s.f.NewBlock(ssa.BlockPlain)
3917
3918	// length/capacity of a nil map/chan is zero
3919	b.AddEdgeTo(bThen)
3920	s.startBlock(bThen)
3921	s.vars[n] = s.zeroVal(lenType)
3922	s.endBlock()
3923	bThen.AddEdgeTo(bAfter)
3924
3925	b.AddEdgeTo(bElse)
3926	s.startBlock(bElse)
3927	if n.Op == OLEN {
3928		// length is stored in the first word for map/chan
3929		s.vars[n] = s.newValue2(ssa.OpLoad, lenType, x, s.mem())
3930	} else if n.Op == OCAP {
3931		// capacity is stored in the second word for chan
3932		sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Width, x)
3933		s.vars[n] = s.newValue2(ssa.OpLoad, lenType, sw, s.mem())
3934	} else {
3935		s.Fatalf("op must be OLEN or OCAP")
3936	}
3937	s.endBlock()
3938	bElse.AddEdgeTo(bAfter)
3939
3940	s.startBlock(bAfter)
3941	return s.variable(n, lenType)
3942}
3943
3944type f2uCvtTab struct {
3945	ltf, cvt2U, subf, or ssa.Op
3946	floatValue           func(*state, *types.Type, float64) *ssa.Value
3947	intValue             func(*state, *types.Type, int64) *ssa.Value
3948	cutoff               uint64
3949}
3950
3951var f32_u64 f2uCvtTab = f2uCvtTab{
3952	ltf:        ssa.OpLess32F,
3953	cvt2U:      ssa.OpCvt32Fto64,
3954	subf:       ssa.OpSub32F,
3955	or:         ssa.OpOr64,
3956	floatValue: (*state).constFloat32,
3957	intValue:   (*state).constInt64,
3958	cutoff:     9223372036854775808,
3959}
3960
3961var f64_u64 f2uCvtTab = f2uCvtTab{
3962	ltf:        ssa.OpLess64F,
3963	cvt2U:      ssa.OpCvt64Fto64,
3964	subf:       ssa.OpSub64F,
3965	or:         ssa.OpOr64,
3966	floatValue: (*state).constFloat64,
3967	intValue:   (*state).constInt64,
3968	cutoff:     9223372036854775808,
3969}
3970
3971var f32_u32 f2uCvtTab = f2uCvtTab{
3972	ltf:        ssa.OpLess32F,
3973	cvt2U:      ssa.OpCvt32Fto32,
3974	subf:       ssa.OpSub32F,
3975	or:         ssa.OpOr32,
3976	floatValue: (*state).constFloat32,
3977	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
3978	cutoff:     2147483648,
3979}
3980
3981var f64_u32 f2uCvtTab = f2uCvtTab{
3982	ltf:        ssa.OpLess64F,
3983	cvt2U:      ssa.OpCvt64Fto32,
3984	subf:       ssa.OpSub64F,
3985	or:         ssa.OpOr32,
3986	floatValue: (*state).constFloat64,
3987	intValue:   func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
3988	cutoff:     2147483648,
3989}
3990
3991func (s *state) float32ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3992	return s.floatToUint(&f32_u64, n, x, ft, tt)
3993}
3994func (s *state) float64ToUint64(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3995	return s.floatToUint(&f64_u64, n, x, ft, tt)
3996}
3997
3998func (s *state) float32ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
3999	return s.floatToUint(&f32_u32, n, x, ft, tt)
4000}
4001
4002func (s *state) float64ToUint32(n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
4003	return s.floatToUint(&f64_u32, n, x, ft, tt)
4004}
4005
4006func (s *state) floatToUint(cvttab *f2uCvtTab, n *Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
4007	// cutoff:=1<<(intY_Size-1)
4008	// if x < floatX(cutoff) {
4009	// 	result = uintY(x)
4010	// } else {
4011	// 	y = x - floatX(cutoff)
4012	// 	z = uintY(y)
4013	// 	result = z | -(cutoff)
4014	// }
4015	cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
4016	cmp := s.newValue2(cvttab.ltf, types.Types[TBOOL], x, cutoff)
4017	b := s.endBlock()
4018	b.Kind = ssa.BlockIf
4019	b.SetControl(cmp)
4020	b.Likely = ssa.BranchLikely
4021
4022	bThen := s.f.NewBlock(ssa.BlockPlain)
4023	bElse := s.f.NewBlock(ssa.BlockPlain)
4024	bAfter := s.f.NewBlock(ssa.BlockPlain)
4025
4026	b.AddEdgeTo(bThen)
4027	s.startBlock(bThen)
4028	a0 := s.newValue1(cvttab.cvt2U, tt, x)
4029	s.vars[n] = a0
4030	s.endBlock()
4031	bThen.AddEdgeTo(bAfter)
4032
4033	b.AddEdgeTo(bElse)
4034	s.startBlock(bElse)
4035	y := s.newValue2(cvttab.subf, ft, x, cutoff)
4036	y = s.newValue1(cvttab.cvt2U, tt, y)
4037	z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
4038	a1 := s.newValue2(cvttab.or, tt, y, z)
4039	s.vars[n] = a1
4040	s.endBlock()
4041	bElse.AddEdgeTo(bAfter)
4042
4043	s.startBlock(bAfter)
4044	return s.variable(n, n.Type)
4045}
4046
4047// dottype generates SSA for a type assertion node.
4048// commaok indicates whether to panic or return a bool.
4049// If commaok is false, resok will be nil.
4050func (s *state) dottype(n *Node, commaok bool) (res, resok *ssa.Value) {
4051	iface := s.expr(n.Left)   // input interface
4052	target := s.expr(n.Right) // target type
4053	byteptr := s.f.Config.Types.BytePtr
4054
4055	if n.Type.IsInterface() {
4056		if n.Type.IsEmptyInterface() {
4057			// Converting to an empty interface.
4058			// Input could be an empty or nonempty interface.
4059			if Debug_typeassert > 0 {
4060				Warnl(n.Pos, "type assertion inlined")
4061			}
4062
4063			// Get itab/type field from input.
4064			itab := s.newValue1(ssa.OpITab, byteptr, iface)
4065			// Conversion succeeds iff that field is not nil.
4066			cond := s.newValue2(ssa.OpNeqPtr, types.Types[TBOOL], itab, s.constNil(byteptr))
4067
4068			if n.Left.Type.IsEmptyInterface() && commaok {
4069				// Converting empty interface to empty interface with ,ok is just a nil check.
4070				return iface, cond
4071			}
4072
4073			// Branch on nilness.
4074			b := s.endBlock()
4075			b.Kind = ssa.BlockIf
4076			b.SetControl(cond)
4077			b.Likely = ssa.BranchLikely
4078			bOk := s.f.NewBlock(ssa.BlockPlain)
4079			bFail := s.f.NewBlock(ssa.BlockPlain)
4080			b.AddEdgeTo(bOk)
4081			b.AddEdgeTo(bFail)
4082
4083			if !commaok {
4084				// On failure, panic by calling panicnildottype.
4085				s.startBlock(bFail)
4086				s.rtcall(panicnildottype, false, nil, target)
4087
4088				// On success, return (perhaps modified) input interface.
4089				s.startBlock(bOk)
4090				if n.Left.Type.IsEmptyInterface() {
4091					res = iface // Use input interface unchanged.
4092					return
4093				}
4094				// Load type out of itab, build interface with existing idata.
4095				off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
4096				typ := s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
4097				idata := s.newValue1(ssa.OpIData, n.Type, iface)
4098				res = s.newValue2(ssa.OpIMake, n.Type, typ, idata)
4099				return
4100			}
4101
4102			s.startBlock(bOk)
4103			// nonempty -> empty
4104			// Need to load type from itab
4105			off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(Widthptr), itab)
4106			s.vars[&typVar] = s.newValue2(ssa.OpLoad, byteptr, off, s.mem())
4107			s.endBlock()
4108
4109			// itab is nil, might as well use that as the nil result.
4110			s.startBlock(bFail)
4111			s.vars[&typVar] = itab
4112			s.endBlock()
4113
4114			// Merge point.
4115			bEnd := s.f.NewBlock(ssa.BlockPlain)
4116			bOk.AddEdgeTo(bEnd)
4117			bFail.AddEdgeTo(bEnd)
4118			s.startBlock(bEnd)
4119			idata := s.newValue1(ssa.OpIData, n.Type, iface)
4120			res = s.newValue2(ssa.OpIMake, n.Type, s.variable(&typVar, byteptr), idata)
4121			resok = cond
4122			delete(s.vars, &typVar)
4123			return
4124		}
4125		// converting to a nonempty interface needs a runtime call.
4126		if Debug_typeassert > 0 {
4127			Warnl(n.Pos, "type assertion not inlined")
4128		}
4129		if n.Left.Type.IsEmptyInterface() {
4130			if commaok {
4131				call := s.rtcall(assertE2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
4132				return call[0], call[1]
4133			}
4134			return s.rtcall(assertE2I, true, []*types.Type{n.Type}, target, iface)[0], nil
4135		}
4136		if commaok {
4137			call := s.rtcall(assertI2I2, true, []*types.Type{n.Type, types.Types[TBOOL]}, target, iface)
4138			return call[0], call[1]
4139		}
4140		return s.rtcall(assertI2I, true, []*types.Type{n.Type}, target, iface)[0], nil
4141	}
4142
4143	if Debug_typeassert > 0 {
4144		Warnl(n.Pos, "type assertion inlined")
4145	}
4146
4147	// Converting to a concrete type.
4148	direct := isdirectiface(n.Type)
4149	itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
4150	if Debug_typeassert > 0 {
4151		Warnl(n.Pos, "type assertion inlined")
4152	}
4153	var targetITab *ssa.Value
4154	if n.Left.Type.IsEmptyInterface() {
4155		// Looking for pointer to target type.
4156		targetITab = target
4157	} else {
4158		// Looking for pointer to itab for target type and source interface.
4159		targetITab = s.expr(n.List.First())
4160	}
4161
4162	var tmp *Node       // temporary for use with large types
4163	var addr *ssa.Value // address of tmp
4164	if commaok && !canSSAType(n.Type) {
4165		// unSSAable type, use temporary.
4166		// TODO: get rid of some of these temporaries.
4167		tmp = tempAt(n.Pos, s.curfn, n.Type)
4168		addr = s.addr(tmp, false)
4169		s.vars[&memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
4170	}
4171
4172	cond := s.newValue2(ssa.OpEqPtr, types.Types[TBOOL], itab, targetITab)
4173	b := s.endBlock()
4174	b.Kind = ssa.BlockIf
4175	b.SetControl(cond)
4176	b.Likely = ssa.BranchLikely
4177
4178	bOk := s.f.NewBlock(ssa.BlockPlain)
4179	bFail := s.f.NewBlock(ssa.BlockPlain)
4180	b.AddEdgeTo(bOk)
4181	b.AddEdgeTo(bFail)
4182
4183	if !commaok {
4184		// on failure, panic by calling panicdottype
4185		s.startBlock(bFail)
4186		taddr := s.expr(n.Right.Right)
4187		if n.Left.Type.IsEmptyInterface() {
4188			s.rtcall(panicdottypeE, false, nil, itab, target, taddr)
4189		} else {
4190			s.rtcall(panicdottypeI, false, nil, itab, target, taddr)
4191		}
4192
4193		// on success, return data from interface
4194		s.startBlock(bOk)
4195		if direct {
4196			return s.newValue1(ssa.OpIData, n.Type, iface), nil
4197		}
4198		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
4199		return s.newValue2(ssa.OpLoad, n.Type, p, s.mem()), nil
4200	}
4201
4202	// commaok is the more complicated case because we have
4203	// a control flow merge point.
4204	bEnd := s.f.NewBlock(ssa.BlockPlain)
4205	// Note that we need a new valVar each time (unlike okVar where we can
4206	// reuse the variable) because it might have a different type every time.
4207	valVar := &Node{Op: ONAME, Sym: &types.Sym{Name: "val"}}
4208
4209	// type assertion succeeded
4210	s.startBlock(bOk)
4211	if tmp == nil {
4212		if direct {
4213			s.vars[valVar] = s.newValue1(ssa.OpIData, n.Type, iface)
4214		} else {
4215			p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
4216			s.vars[valVar] = s.newValue2(ssa.OpLoad, n.Type, p, s.mem())
4217		}
4218	} else {
4219		p := s.newValue1(ssa.OpIData, types.NewPtr(n.Type), iface)
4220		store := s.newValue3I(ssa.OpMove, types.TypeMem, n.Type.Size(), addr, p, s.mem())
4221		store.Aux = n.Type
4222		s.vars[&memVar] = store
4223	}
4224	s.vars[&okVar] = s.constBool(true)
4225	s.endBlock()
4226	bOk.AddEdgeTo(bEnd)
4227
4228	// type assertion failed
4229	s.startBlock(bFail)
4230	if tmp == nil {
4231		s.vars[valVar] = s.zeroVal(n.Type)
4232	} else {
4233		store := s.newValue2I(ssa.OpZero, types.TypeMem, n.Type.Size(), addr, s.mem())
4234		store.Aux = n.Type
4235		s.vars[&memVar] = store
4236	}
4237	s.vars[&okVar] = s.constBool(false)
4238	s.endBlock()
4239	bFail.AddEdgeTo(bEnd)
4240
4241	// merge point
4242	s.startBlock(bEnd)
4243	if tmp == nil {
4244		res = s.variable(valVar, n.Type)
4245		delete(s.vars, valVar)
4246	} else {
4247		res = s.newValue2(ssa.OpLoad, n.Type, addr, s.mem())
4248		s.vars[&memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp, s.mem())
4249	}
4250	resok = s.variable(&okVar, types.Types[TBOOL])
4251	delete(s.vars, &okVar)
4252	return res, resok
4253}
4254
4255// variable returns the value of a variable at the current location.
4256func (s *state) variable(name *Node, t *types.Type) *ssa.Value {
4257	v := s.vars[name]
4258	if v != nil {
4259		return v
4260	}
4261	v = s.fwdVars[name]
4262	if v != nil {
4263		return v
4264	}
4265
4266	if s.curBlock == s.f.Entry {
4267		// No variable should be live at entry.
4268		s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, name, v)
4269	}
4270	// Make a FwdRef, which records a value that's live on block input.
4271	// We'll find the matching definition as part of insertPhis.
4272	v = s.newValue0A(ssa.OpFwdRef, t, name)
4273	s.fwdVars[name] = v
4274	s.addNamedValue(name, v)
4275	return v
4276}
4277
4278func (s *state) mem() *ssa.Value {
4279	return s.variable(&memVar, types.TypeMem)
4280}
4281
4282func (s *state) addNamedValue(n *Node, v *ssa.Value) {
4283	if n.Class() == Pxxx {
4284		// Don't track our dummy nodes (&memVar etc.).
4285		return
4286	}
4287	if n.IsAutoTmp() {
4288		// Don't track temporary variables.
4289		return
4290	}
4291	if n.Class() == PPARAMOUT {
4292		// Don't track named output values.  This prevents return values
4293		// from being assigned too early. See #14591 and #14762. TODO: allow this.
4294		return
4295	}
4296	if n.Class() == PAUTO && n.Xoffset != 0 {
4297		s.Fatalf("AUTO var with offset %v %d", n, n.Xoffset)
4298	}
4299	loc := ssa.LocalSlot{N: n, Type: n.Type, Off: 0}
4300	values, ok := s.f.NamedValues[loc]
4301	if !ok {
4302		s.f.Names = append(s.f.Names, loc)
4303	}
4304	s.f.NamedValues[loc] = append(values, v)
4305}
4306
4307// Branch is an unresolved branch.
4308type Branch struct {
4309	P *obj.Prog  // branch instruction
4310	B *ssa.Block // target
4311}
4312
4313// SSAGenState contains state needed during Prog generation.
4314type SSAGenState struct {
4315	pp *Progs
4316
4317	// Branches remembers all the branch instructions we've seen
4318	// and where they would like to go.
4319	Branches []Branch
4320
4321	// bstart remembers where each block starts (indexed by block ID)
4322	bstart []*obj.Prog
4323
4324	// 387 port: maps from SSE registers (REG_X?) to 387 registers (REG_F?)
4325	SSEto387 map[int16]int16
4326	// Some architectures require a 64-bit temporary for FP-related register shuffling. Examples include x86-387, PPC, and Sparc V8.
4327	ScratchFpMem *Node
4328
4329	maxarg int64 // largest frame size for arguments to calls made by the function
4330
4331	// Map from GC safe points to stack map index, generated by
4332	// liveness analysis.
4333	stackMapIndex map[*ssa.Value]int
4334}
4335
4336// Prog appends a new Prog.
4337func (s *SSAGenState) Prog(as obj.As) *obj.Prog {
4338	return s.pp.Prog(as)
4339}
4340
4341// Pc returns the current Prog.
4342func (s *SSAGenState) Pc() *obj.Prog {
4343	return s.pp.next
4344}
4345
4346// SetPos sets the current source position.
4347func (s *SSAGenState) SetPos(pos src.XPos) {
4348	s.pp.pos = pos
4349}
4350
4351// DebugFriendlySetPos sets the position subject to heuristics
4352// that reduce "jumpy" line number churn when debugging.
4353// Spill/fill/copy instructions from the register allocator,
4354// phi functions, and instructions with a no-pos position
4355// are examples of instructions that can cause churn.
4356func (s *SSAGenState) DebugFriendlySetPosFrom(v *ssa.Value) {
4357	// The two choices here are either to leave lineno unchanged,
4358	// or to explicitly set it to src.NoXPos.  Leaving it unchanged
4359	// (reusing the preceding line number) produces slightly better-
4360	// looking assembly language output from the compiler, and is
4361	// expected by some already-existing tests.
4362	// The debug information appears to be the same in either case
4363	switch v.Op {
4364	case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
4365		// leave the position unchanged from beginning of block
4366		// or previous line number.
4367	default:
4368		if v.Pos != src.NoXPos {
4369			s.SetPos(v.Pos)
4370		}
4371	}
4372}
4373
4374// genssa appends entries to pp for each instruction in f.
4375func genssa(f *ssa.Func, pp *Progs) {
4376	var s SSAGenState
4377
4378	e := f.Frontend().(*ssafn)
4379
4380	// Generate GC bitmaps, except if the stack is too large,
4381	// in which compilation will fail later anyway (issue 20529).
4382	if e.stksize < maxStackSize {
4383		s.stackMapIndex = liveness(e, f)
4384	}
4385
4386	// Remember where each block starts.
4387	s.bstart = make([]*obj.Prog, f.NumBlocks())
4388	s.pp = pp
4389	var valueProgs map[*obj.Prog]*ssa.Value
4390	var blockProgs map[*obj.Prog]*ssa.Block
4391	var logProgs = e.log
4392	if logProgs {
4393		valueProgs = make(map[*obj.Prog]*ssa.Value, f.NumValues())
4394		blockProgs = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
4395		f.Logf("genssa %s\n", f.Name)
4396		blockProgs[s.pp.next] = f.Blocks[0]
4397	}
4398
4399	if thearch.Use387 {
4400		s.SSEto387 = map[int16]int16{}
4401	}
4402
4403	s.ScratchFpMem = e.scratchFpMem
4404
4405	// Emit basic blocks
4406	for i, b := range f.Blocks {
4407		s.bstart[b.ID] = s.pp.next
4408		// Emit values in block
4409		thearch.SSAMarkMoves(&s, b)
4410		for _, v := range b.Values {
4411			x := s.pp.next
4412			s.DebugFriendlySetPosFrom(v)
4413			switch v.Op {
4414			case ssa.OpInitMem:
4415				// memory arg needs no code
4416			case ssa.OpArg:
4417				// input args need no code
4418			case ssa.OpSP, ssa.OpSB:
4419				// nothing to do
4420			case ssa.OpSelect0, ssa.OpSelect1:
4421				// nothing to do
4422			case ssa.OpGetG:
4423				// nothing to do when there's a g register,
4424				// and checkLower complains if there's not
4425			case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive:
4426				// nothing to do; already used by liveness
4427			case ssa.OpVarKill:
4428				// Zero variable if it is ambiguously live.
4429				// After the VARKILL anything this variable references
4430				// might be collected. If it were to become live again later,
4431				// the GC will see references to already-collected objects.
4432				// See issue 20029.
4433				n := v.Aux.(*Node)
4434				if n.Name.Needzero() {
4435					if n.Class() != PAUTO {
4436						v.Fatalf("zero of variable which isn't PAUTO %v", n)
4437					}
4438					if n.Type.Size()%int64(Widthptr) != 0 {
4439						v.Fatalf("zero of variable not a multiple of ptr size %v", n)
4440					}
4441					thearch.ZeroAuto(s.pp, n)
4442				}
4443			case ssa.OpPhi:
4444				CheckLoweredPhi(v)
4445
4446			default:
4447				// let the backend handle it
4448				thearch.SSAGenValue(&s, v)
4449			}
4450
4451			if logProgs {
4452				for ; x != s.pp.next; x = x.Link {
4453					valueProgs[x] = v
4454				}
4455			}
4456		}
4457		// Emit control flow instructions for block
4458		var next *ssa.Block
4459		if i < len(f.Blocks)-1 && Debug['N'] == 0 {
4460			// If -N, leave next==nil so every block with successors
4461			// ends in a JMP (except call blocks - plive doesn't like
4462			// select{send,recv} followed by a JMP call).  Helps keep
4463			// line numbers for otherwise empty blocks.
4464			next = f.Blocks[i+1]
4465		}
4466		x := s.pp.next
4467		s.SetPos(b.Pos)
4468		thearch.SSAGenBlock(&s, b, next)
4469		if logProgs {
4470			for ; x != s.pp.next; x = x.Link {
4471				blockProgs[x] = b
4472			}
4473		}
4474	}
4475
4476	// Resolve branches
4477	for _, br := range s.Branches {
4478		br.P.To.Val = s.bstart[br.B.ID]
4479	}
4480
4481	if logProgs {
4482		for p := pp.Text; p != nil; p = p.Link {
4483			var s string
4484			if v, ok := valueProgs[p]; ok {
4485				s = v.String()
4486			} else if b, ok := blockProgs[p]; ok {
4487				s = b.String()
4488			} else {
4489				s = "   " // most value and branch strings are 2-3 characters long
4490			}
4491			f.Logf("%s\t%s\n", s, p)
4492		}
4493		if f.HTMLWriter != nil {
4494			// LineHist is defunct now - this code won't do
4495			// anything.
4496			// TODO: fix this (ideally without a global variable)
4497			// saved := pp.Text.Ctxt.LineHist.PrintFilenameOnly
4498			// pp.Text.Ctxt.LineHist.PrintFilenameOnly = true
4499			var buf bytes.Buffer
4500			buf.WriteString("<code>")
4501			buf.WriteString("<dl class=\"ssa-gen\">")
4502			for p := pp.Text; p != nil; p = p.Link {
4503				buf.WriteString("<dt class=\"ssa-prog-src\">")
4504				if v, ok := valueProgs[p]; ok {
4505					buf.WriteString(v.HTML())
4506				} else if b, ok := blockProgs[p]; ok {
4507					buf.WriteString(b.HTML())
4508				}
4509				buf.WriteString("</dt>")
4510				buf.WriteString("<dd class=\"ssa-prog\">")
4511				buf.WriteString(html.EscapeString(p.String()))
4512				buf.WriteString("</dd>")
4513				buf.WriteString("</li>")
4514			}
4515			buf.WriteString("</dl>")
4516			buf.WriteString("</code>")
4517			f.HTMLWriter.WriteColumn("genssa", buf.String())
4518			// pp.Text.Ctxt.LineHist.PrintFilenameOnly = saved
4519		}
4520	}
4521
4522	defframe(&s, e)
4523	if Debug['f'] != 0 {
4524		frame(0)
4525	}
4526
4527	f.HTMLWriter.Close()
4528	f.HTMLWriter = nil
4529}
4530
4531func defframe(s *SSAGenState, e *ssafn) {
4532	pp := s.pp
4533
4534	frame := Rnd(s.maxarg+e.stksize, int64(Widthreg))
4535	if thearch.PadFrame != nil {
4536		frame = thearch.PadFrame(frame)
4537	}
4538
4539	// Fill in argument and frame size.
4540	pp.Text.To.Type = obj.TYPE_TEXTSIZE
4541	pp.Text.To.Val = int32(Rnd(e.curfn.Type.ArgWidth(), int64(Widthreg)))
4542	pp.Text.To.Offset = frame
4543
4544	// Insert code to zero ambiguously live variables so that the
4545	// garbage collector only sees initialized values when it
4546	// looks for pointers.
4547	p := pp.Text
4548	var lo, hi int64
4549
4550	// Opaque state for backend to use. Current backends use it to
4551	// keep track of which helper registers have been zeroed.
4552	var state uint32
4553
4554	// Iterate through declarations. They are sorted in decreasing Xoffset order.
4555	for _, n := range e.curfn.Func.Dcl {
4556		if !n.Name.Needzero() {
4557			continue
4558		}
4559		if n.Class() != PAUTO {
4560			Fatalf("needzero class %d", n.Class())
4561		}
4562		if n.Type.Size()%int64(Widthptr) != 0 || n.Xoffset%int64(Widthptr) != 0 || n.Type.Size() == 0 {
4563			Fatalf("var %L has size %d offset %d", n, n.Type.Size(), n.Xoffset)
4564		}
4565
4566		if lo != hi && n.Xoffset+n.Type.Size() >= lo-int64(2*Widthreg) {
4567			// Merge with range we already have.
4568			lo = n.Xoffset
4569			continue
4570		}
4571
4572		// Zero old range
4573		p = thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
4574
4575		// Set new range.
4576		lo = n.Xoffset
4577		hi = lo + n.Type.Size()
4578	}
4579
4580	// Zero final range.
4581	thearch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
4582}
4583
4584type FloatingEQNEJump struct {
4585	Jump  obj.As
4586	Index int
4587}
4588
4589func (s *SSAGenState) oneFPJump(b *ssa.Block, jumps *FloatingEQNEJump) {
4590	p := s.Prog(jumps.Jump)
4591	p.To.Type = obj.TYPE_BRANCH
4592	to := jumps.Index
4593	s.Branches = append(s.Branches, Branch{p, b.Succs[to].Block()})
4594}
4595
4596func (s *SSAGenState) FPJump(b, next *ssa.Block, jumps *[2][2]FloatingEQNEJump) {
4597	switch next {
4598	case b.Succs[0].Block():
4599		s.oneFPJump(b, &jumps[0][0])
4600		s.oneFPJump(b, &jumps[0][1])
4601	case b.Succs[1].Block():
4602		s.oneFPJump(b, &jumps[1][0])
4603		s.oneFPJump(b, &jumps[1][1])
4604	default:
4605		s.oneFPJump(b, &jumps[1][0])
4606		s.oneFPJump(b, &jumps[1][1])
4607		q := s.Prog(obj.AJMP)
4608		q.To.Type = obj.TYPE_BRANCH
4609		s.Branches = append(s.Branches, Branch{q, b.Succs[1].Block()})
4610	}
4611}
4612
4613func AuxOffset(v *ssa.Value) (offset int64) {
4614	if v.Aux == nil {
4615		return 0
4616	}
4617	switch sym := v.Aux.(type) {
4618
4619	case *ssa.AutoSymbol:
4620		n := sym.Node.(*Node)
4621		return n.Xoffset
4622	}
4623	return 0
4624}
4625
4626// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
4627func AddAux(a *obj.Addr, v *ssa.Value) {
4628	AddAux2(a, v, v.AuxInt)
4629}
4630func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
4631	if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
4632		v.Fatalf("bad AddAux addr %v", a)
4633	}
4634	// add integer offset
4635	a.Offset += offset
4636
4637	// If no additional symbol offset, we're done.
4638	if v.Aux == nil {
4639		return
4640	}
4641	// Add symbol's offset from its base register.
4642	switch sym := v.Aux.(type) {
4643	case *ssa.ExternSymbol:
4644		a.Name = obj.NAME_EXTERN
4645		a.Sym = sym.Sym
4646	case *ssa.ArgSymbol:
4647		n := sym.Node.(*Node)
4648		a.Name = obj.NAME_PARAM
4649		a.Sym = n.Orig.Sym.Linksym()
4650		a.Offset += n.Xoffset
4651	case *ssa.AutoSymbol:
4652		n := sym.Node.(*Node)
4653		a.Name = obj.NAME_AUTO
4654		a.Sym = n.Sym.Linksym()
4655		a.Offset += n.Xoffset
4656	default:
4657		v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
4658	}
4659}
4660
4661// extendIndex extends v to a full int width.
4662// panic using the given function if v does not fit in an int (only on 32-bit archs).
4663func (s *state) extendIndex(v *ssa.Value, panicfn *obj.LSym) *ssa.Value {
4664	size := v.Type.Size()
4665	if size == s.config.PtrSize {
4666		return v
4667	}
4668	if size > s.config.PtrSize {
4669		// truncate 64-bit indexes on 32-bit pointer archs. Test the
4670		// high word and branch to out-of-bounds failure if it is not 0.
4671		if Debug['B'] == 0 {
4672			hi := s.newValue1(ssa.OpInt64Hi, types.Types[TUINT32], v)
4673			cmp := s.newValue2(ssa.OpEq32, types.Types[TBOOL], hi, s.constInt32(types.Types[TUINT32], 0))
4674			s.check(cmp, panicfn)
4675		}
4676		return s.newValue1(ssa.OpTrunc64to32, types.Types[TINT], v)
4677	}
4678
4679	// Extend value to the required size
4680	var op ssa.Op
4681	if v.Type.IsSigned() {
4682		switch 10*size + s.config.PtrSize {
4683		case 14:
4684			op = ssa.OpSignExt8to32
4685		case 18:
4686			op = ssa.OpSignExt8to64
4687		case 24:
4688			op = ssa.OpSignExt16to32
4689		case 28:
4690			op = ssa.OpSignExt16to64
4691		case 48:
4692			op = ssa.OpSignExt32to64
4693		default:
4694			s.Fatalf("bad signed index extension %s", v.Type)
4695		}
4696	} else {
4697		switch 10*size + s.config.PtrSize {
4698		case 14:
4699			op = ssa.OpZeroExt8to32
4700		case 18:
4701			op = ssa.OpZeroExt8to64
4702		case 24:
4703			op = ssa.OpZeroExt16to32
4704		case 28:
4705			op = ssa.OpZeroExt16to64
4706		case 48:
4707			op = ssa.OpZeroExt32to64
4708		default:
4709			s.Fatalf("bad unsigned index extension %s", v.Type)
4710		}
4711	}
4712	return s.newValue1(op, types.Types[TINT], v)
4713}
4714
4715// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
4716// Called during ssaGenValue.
4717func CheckLoweredPhi(v *ssa.Value) {
4718	if v.Op != ssa.OpPhi {
4719		v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
4720	}
4721	if v.Type.IsMemory() {
4722		return
4723	}
4724	f := v.Block.Func
4725	loc := f.RegAlloc[v.ID]
4726	for _, a := range v.Args {
4727		if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
4728			v.Fatalf("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n%s\n", v, loc, a, aloc, v.Block.Func)
4729		}
4730	}
4731}
4732
4733// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block.
4734// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
4735// That register contains the closure pointer on closure entry.
4736func CheckLoweredGetClosurePtr(v *ssa.Value) {
4737	entry := v.Block.Func.Entry
4738	if entry != v.Block || entry.Values[0] != v {
4739		Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
4740	}
4741}
4742
4743// AutoVar returns a *Node and int64 representing the auto variable and offset within it
4744// where v should be spilled.
4745func AutoVar(v *ssa.Value) (*Node, int64) {
4746	loc := v.Block.Func.RegAlloc[v.ID].(ssa.LocalSlot)
4747	if v.Type.Size() > loc.Type.Size() {
4748		v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
4749	}
4750	return loc.N.(*Node), loc.Off
4751}
4752
4753func AddrAuto(a *obj.Addr, v *ssa.Value) {
4754	n, off := AutoVar(v)
4755	a.Type = obj.TYPE_MEM
4756	a.Sym = n.Sym.Linksym()
4757	a.Reg = int16(thearch.REGSP)
4758	a.Offset = n.Xoffset + off
4759	if n.Class() == PPARAM || n.Class() == PPARAMOUT {
4760		a.Name = obj.NAME_PARAM
4761	} else {
4762		a.Name = obj.NAME_AUTO
4763	}
4764}
4765
4766func (s *SSAGenState) AddrScratch(a *obj.Addr) {
4767	if s.ScratchFpMem == nil {
4768		panic("no scratch memory available; forgot to declare usesScratch for Op?")
4769	}
4770	a.Type = obj.TYPE_MEM
4771	a.Name = obj.NAME_AUTO
4772	a.Sym = s.ScratchFpMem.Sym.Linksym()
4773	a.Reg = int16(thearch.REGSP)
4774	a.Offset = s.ScratchFpMem.Xoffset
4775}
4776
4777func (s *SSAGenState) Call(v *ssa.Value) *obj.Prog {
4778	idx, ok := s.stackMapIndex[v]
4779	if !ok {
4780		Fatalf("missing stack map index for %v", v.LongString())
4781	}
4782	p := s.Prog(obj.APCDATA)
4783	Addrconst(&p.From, objabi.PCDATA_StackMapIndex)
4784	Addrconst(&p.To, int64(idx))
4785
4786	if sym, _ := v.Aux.(*obj.LSym); sym == Deferreturn {
4787		// Deferred calls will appear to be returning to
4788		// the CALL deferreturn(SB) that we are about to emit.
4789		// However, the stack trace code will show the line
4790		// of the instruction byte before the return PC.
4791		// To avoid that being an unrelated instruction,
4792		// insert an actual hardware NOP that will have the right line number.
4793		// This is different from obj.ANOP, which is a virtual no-op
4794		// that doesn't make it into the instruction stream.
4795		thearch.Ginsnop(s.pp)
4796	}
4797
4798	p = s.Prog(obj.ACALL)
4799	if sym, ok := v.Aux.(*obj.LSym); ok {
4800		p.To.Type = obj.TYPE_MEM
4801		p.To.Name = obj.NAME_EXTERN
4802		p.To.Sym = sym
4803	} else {
4804		// TODO(mdempsky): Can these differences be eliminated?
4805		switch thearch.LinkArch.Family {
4806		case sys.AMD64, sys.I386, sys.PPC64, sys.S390X:
4807			p.To.Type = obj.TYPE_REG
4808		case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
4809			p.To.Type = obj.TYPE_MEM
4810		default:
4811			Fatalf("unknown indirect call family")
4812		}
4813		p.To.Reg = v.Args[0].Reg()
4814	}
4815	if s.maxarg < v.AuxInt {
4816		s.maxarg = v.AuxInt
4817	}
4818	return p
4819}
4820
4821// fieldIdx finds the index of the field referred to by the ODOT node n.
4822func fieldIdx(n *Node) int {
4823	t := n.Left.Type
4824	f := n.Sym
4825	if !t.IsStruct() {
4826		panic("ODOT's LHS is not a struct")
4827	}
4828
4829	var i int
4830	for _, t1 := range t.Fields().Slice() {
4831		if t1.Sym != f {
4832			i++
4833			continue
4834		}
4835		if t1.Offset != n.Xoffset {
4836			panic("field offset doesn't match")
4837		}
4838		return i
4839	}
4840	panic(fmt.Sprintf("can't find field in expr %v\n", n))
4841
4842	// TODO: keep the result of this function somewhere in the ODOT Node
4843	// so we don't have to recompute it each time we need it.
4844}
4845
4846// ssafn holds frontend information about a function that the backend is processing.
4847// It also exports a bunch of compiler services for the ssa backend.
4848type ssafn struct {
4849	curfn        *Node
4850	strings      map[string]interface{} // map from constant string to data symbols
4851	scratchFpMem *Node                  // temp for floating point register / memory moves on some architectures
4852	stksize      int64                  // stack size for current frame
4853	stkptrsize   int64                  // prefix of stack containing pointers
4854	log          bool
4855}
4856
4857// StringData returns a symbol (a *types.Sym wrapped in an interface) which
4858// is the data component of a global string constant containing s.
4859func (e *ssafn) StringData(s string) interface{} {
4860	if aux, ok := e.strings[s]; ok {
4861		return aux
4862	}
4863	if e.strings == nil {
4864		e.strings = make(map[string]interface{})
4865	}
4866	data := stringsym(s)
4867	aux := &ssa.ExternSymbol{Sym: data}
4868	e.strings[s] = aux
4869	return aux
4870}
4871
4872func (e *ssafn) Auto(pos src.XPos, t *types.Type) ssa.GCNode {
4873	n := tempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
4874	return n
4875}
4876
4877func (e *ssafn) SplitString(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4878	n := name.N.(*Node)
4879	ptrType := types.NewPtr(types.Types[TUINT8])
4880	lenType := types.Types[TINT]
4881	if n.Class() == PAUTO && !n.Addrtaken() {
4882		// Split this string up into two separate variables.
4883		p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos)
4884		l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos)
4885		return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}
4886	}
4887	// Return the two parts of the larger variable.
4888	return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off}, ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)}
4889}
4890
4891func (e *ssafn) SplitInterface(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4892	n := name.N.(*Node)
4893	t := types.NewPtr(types.Types[TUINT8])
4894	if n.Class() == PAUTO && !n.Addrtaken() {
4895		// Split this interface up into two separate variables.
4896		f := ".itab"
4897		if n.Type.IsEmptyInterface() {
4898			f = ".type"
4899		}
4900		c := e.namedAuto(n.Sym.Name+f, t, n.Pos)
4901		d := e.namedAuto(n.Sym.Name+".data", t, n.Pos)
4902		return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
4903	}
4904	// Return the two parts of the larger variable.
4905	return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + int64(Widthptr)}
4906}
4907
4908func (e *ssafn) SplitSlice(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot, ssa.LocalSlot) {
4909	n := name.N.(*Node)
4910	ptrType := types.NewPtr(name.Type.ElemType())
4911	lenType := types.Types[TINT]
4912	if n.Class() == PAUTO && !n.Addrtaken() {
4913		// Split this slice up into three separate variables.
4914		p := e.namedAuto(n.Sym.Name+".ptr", ptrType, n.Pos)
4915		l := e.namedAuto(n.Sym.Name+".len", lenType, n.Pos)
4916		c := e.namedAuto(n.Sym.Name+".cap", lenType, n.Pos)
4917		return ssa.LocalSlot{N: p, Type: ptrType, Off: 0}, ssa.LocalSlot{N: l, Type: lenType, Off: 0}, ssa.LocalSlot{N: c, Type: lenType, Off: 0}
4918	}
4919	// Return the three parts of the larger variable.
4920	return ssa.LocalSlot{N: n, Type: ptrType, Off: name.Off},
4921		ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(Widthptr)},
4922		ssa.LocalSlot{N: n, Type: lenType, Off: name.Off + int64(2*Widthptr)}
4923}
4924
4925func (e *ssafn) SplitComplex(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4926	n := name.N.(*Node)
4927	s := name.Type.Size() / 2
4928	var t *types.Type
4929	if s == 8 {
4930		t = types.Types[TFLOAT64]
4931	} else {
4932		t = types.Types[TFLOAT32]
4933	}
4934	if n.Class() == PAUTO && !n.Addrtaken() {
4935		// Split this complex up into two separate variables.
4936		c := e.namedAuto(n.Sym.Name+".real", t, n.Pos)
4937		d := e.namedAuto(n.Sym.Name+".imag", t, n.Pos)
4938		return ssa.LocalSlot{N: c, Type: t, Off: 0}, ssa.LocalSlot{N: d, Type: t, Off: 0}
4939	}
4940	// Return the two parts of the larger variable.
4941	return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: t, Off: name.Off + s}
4942}
4943
4944func (e *ssafn) SplitInt64(name ssa.LocalSlot) (ssa.LocalSlot, ssa.LocalSlot) {
4945	n := name.N.(*Node)
4946	var t *types.Type
4947	if name.Type.IsSigned() {
4948		t = types.Types[TINT32]
4949	} else {
4950		t = types.Types[TUINT32]
4951	}
4952	if n.Class() == PAUTO && !n.Addrtaken() {
4953		// Split this int64 up into two separate variables.
4954		h := e.namedAuto(n.Sym.Name+".hi", t, n.Pos)
4955		l := e.namedAuto(n.Sym.Name+".lo", types.Types[TUINT32], n.Pos)
4956		return ssa.LocalSlot{N: h, Type: t, Off: 0}, ssa.LocalSlot{N: l, Type: types.Types[TUINT32], Off: 0}
4957	}
4958	// Return the two parts of the larger variable.
4959	if thearch.LinkArch.ByteOrder == binary.BigEndian {
4960		return ssa.LocalSlot{N: n, Type: t, Off: name.Off}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off + 4}
4961	}
4962	return ssa.LocalSlot{N: n, Type: t, Off: name.Off + 4}, ssa.LocalSlot{N: n, Type: types.Types[TUINT32], Off: name.Off}
4963}
4964
4965func (e *ssafn) SplitStruct(name ssa.LocalSlot, i int) ssa.LocalSlot {
4966	n := name.N.(*Node)
4967	st := name.Type
4968	ft := st.FieldType(i)
4969	if n.Class() == PAUTO && !n.Addrtaken() {
4970		// Note: the _ field may appear several times.  But
4971		// have no fear, identically-named but distinct Autos are
4972		// ok, albeit maybe confusing for a debugger.
4973		x := e.namedAuto(n.Sym.Name+"."+st.FieldName(i), ft, n.Pos)
4974		return ssa.LocalSlot{N: x, Type: ft, Off: 0}
4975	}
4976	return ssa.LocalSlot{N: n, Type: ft, Off: name.Off + st.FieldOff(i)}
4977}
4978
4979func (e *ssafn) SplitArray(name ssa.LocalSlot) ssa.LocalSlot {
4980	n := name.N.(*Node)
4981	at := name.Type
4982	if at.NumElem() != 1 {
4983		Fatalf("bad array size")
4984	}
4985	et := at.ElemType()
4986	if n.Class() == PAUTO && !n.Addrtaken() {
4987		x := e.namedAuto(n.Sym.Name+"[0]", et, n.Pos)
4988		return ssa.LocalSlot{N: x, Type: et, Off: 0}
4989	}
4990	return ssa.LocalSlot{N: n, Type: et, Off: name.Off}
4991}
4992
4993func (e *ssafn) DerefItab(it *obj.LSym, offset int64) *obj.LSym {
4994	return itabsym(it, offset)
4995}
4996
4997// namedAuto returns a new AUTO variable with the given name and type.
4998// These are exposed to the debugger.
4999func (e *ssafn) namedAuto(name string, typ *types.Type, pos src.XPos) ssa.GCNode {
5000	t := typ
5001	s := &types.Sym{Name: name, Pkg: localpkg}
5002
5003	n := new(Node)
5004	n.Name = new(Name)
5005	n.Op = ONAME
5006	n.Pos = pos
5007	n.Orig = n
5008
5009	s.Def = asTypesNode(n)
5010	asNode(s.Def).Name.SetUsed(true)
5011	n.Sym = s
5012	n.Type = t
5013	n.SetClass(PAUTO)
5014	n.SetAddable(true)
5015	n.Esc = EscNever
5016	n.Name.Curfn = e.curfn
5017	e.curfn.Func.Dcl = append(e.curfn.Func.Dcl, n)
5018	dowidth(t)
5019	return n
5020}
5021
5022func (e *ssafn) CanSSA(t *types.Type) bool {
5023	return canSSAType(t)
5024}
5025
5026func (e *ssafn) Line(pos src.XPos) string {
5027	return linestr(pos)
5028}
5029
5030// Log logs a message from the compiler.
5031func (e *ssafn) Logf(msg string, args ...interface{}) {
5032	if e.log {
5033		fmt.Printf(msg, args...)
5034	}
5035}
5036
5037func (e *ssafn) Log() bool {
5038	return e.log
5039}
5040
5041// Fatal reports a compiler error and exits.
5042func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
5043	lineno = pos
5044	Fatalf(msg, args...)
5045}
5046
5047// Warnl reports a "warning", which is usually flag-triggered
5048// logging output for the benefit of tests.
5049func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
5050	Warnl(pos, fmt_, args...)
5051}
5052
5053func (e *ssafn) Debug_checknil() bool {
5054	return Debug_checknil != 0
5055}
5056
5057func (e *ssafn) Debug_wb() bool {
5058	return Debug_wb != 0
5059}
5060
5061func (e *ssafn) UseWriteBarrier() bool {
5062	return use_writebarrier
5063}
5064
5065func (e *ssafn) Syslook(name string) *obj.LSym {
5066	switch name {
5067	case "goschedguarded":
5068		return goschedguarded
5069	case "writeBarrier":
5070		return writeBarrier
5071	case "writebarrierptr":
5072		return writebarrierptr
5073	case "typedmemmove":
5074		return typedmemmove
5075	case "typedmemclr":
5076		return typedmemclr
5077	}
5078	Fatalf("unknown Syslook func %v", name)
5079	return nil
5080}
5081
5082func (n *Node) Typ() *types.Type {
5083	return n.Type
5084}
5085