1// Copyright 2015 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5// TODO: live at start of block instead?
6
7package ssa
8
9import (
10	"cmd/compile/internal/types"
11	"cmd/internal/src"
12	"fmt"
13)
14
15type stackAllocState struct {
16	f *Func
17
18	// live is the output of stackalloc.
19	// live[b.id] = live values at the end of block b.
20	live [][]ID
21
22	// The following slices are reused across multiple users
23	// of stackAllocState.
24	values    []stackValState
25	interfere [][]ID // interfere[v.id] = values that interfere with v.
26	names     []LocalSlot
27	slots     []int
28	used      []bool
29
30	nArgSlot, // Number of Values sourced to arg slot
31	nNotNeed, // Number of Values not needing a stack slot
32	nNamedSlot, // Number of Values using a named stack slot
33	nReuse, // Number of values reusing a stack slot
34	nAuto, // Number of autos allocated for stack slots.
35	nSelfInterfere int32 // Number of self-interferences
36}
37
38func newStackAllocState(f *Func) *stackAllocState {
39	s := f.Cache.stackAllocState
40	if s == nil {
41		return new(stackAllocState)
42	}
43	if s.f != nil {
44		f.fe.Fatalf(src.NoXPos, "newStackAllocState called without previous free")
45	}
46	return s
47}
48
49func putStackAllocState(s *stackAllocState) {
50	for i := range s.values {
51		s.values[i] = stackValState{}
52	}
53	for i := range s.interfere {
54		s.interfere[i] = nil
55	}
56	for i := range s.names {
57		s.names[i] = LocalSlot{}
58	}
59	for i := range s.slots {
60		s.slots[i] = 0
61	}
62	for i := range s.used {
63		s.used[i] = false
64	}
65	s.f.Cache.stackAllocState = s
66	s.f = nil
67	s.live = nil
68	s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
69}
70
71type stackValState struct {
72	typ      *types.Type
73	spill    *Value
74	needSlot bool
75}
76
77// stackalloc allocates storage in the stack frame for
78// all Values that did not get a register.
79// Returns a map from block ID to the stack values live at the end of that block.
80func stackalloc(f *Func, spillLive [][]ID) [][]ID {
81	if f.pass.debug > stackDebug {
82		fmt.Println("before stackalloc")
83		fmt.Println(f.String())
84	}
85	s := newStackAllocState(f)
86	s.init(f, spillLive)
87	defer putStackAllocState(s)
88
89	s.stackalloc()
90	if f.pass.stats > 0 {
91		f.LogStat("stack_alloc_stats",
92			s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed",
93			s.nNamedSlot, "named_slots", s.nAuto, "auto_slots",
94			s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering")
95	}
96
97	return s.live
98}
99
100func (s *stackAllocState) init(f *Func, spillLive [][]ID) {
101	s.f = f
102
103	// Initialize value information.
104	if n := f.NumValues(); cap(s.values) >= n {
105		s.values = s.values[:n]
106	} else {
107		s.values = make([]stackValState, n)
108	}
109	for _, b := range f.Blocks {
110		for _, v := range b.Values {
111			s.values[v.ID].typ = v.Type
112			s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable()
113			if f.pass.debug > stackDebug && s.values[v.ID].needSlot {
114				fmt.Printf("%s needs a stack slot\n", v)
115			}
116			if v.Op == OpStoreReg {
117				s.values[v.Args[0].ID].spill = v
118			}
119		}
120	}
121
122	// Compute liveness info for values needing a slot.
123	s.computeLive(spillLive)
124
125	// Build interference graph among values needing a slot.
126	s.buildInterferenceGraph()
127}
128
129func (s *stackAllocState) stackalloc() {
130	f := s.f
131
132	// Build map from values to their names, if any.
133	// A value may be associated with more than one name (e.g. after
134	// the assignment i=j). This step picks one name per value arbitrarily.
135	if n := f.NumValues(); cap(s.names) >= n {
136		s.names = s.names[:n]
137	} else {
138		s.names = make([]LocalSlot, n)
139	}
140	names := s.names
141	for _, name := range f.Names {
142		// Note: not "range f.NamedValues" above, because
143		// that would be nondeterministic.
144		for _, v := range f.NamedValues[name] {
145			names[v.ID] = name
146		}
147	}
148
149	// Allocate args to their assigned locations.
150	for _, v := range f.Entry.Values {
151		if v.Op != OpArg {
152			continue
153		}
154		loc := LocalSlot{v.Aux.(GCNode), v.Type, v.AuxInt}
155		if f.pass.debug > stackDebug {
156			fmt.Printf("stackalloc %s to %s\n", v, loc.Name())
157		}
158		f.setHome(v, loc)
159	}
160
161	// For each type, we keep track of all the stack slots we
162	// have allocated for that type.
163	// TODO: share slots among equivalent types. We would need to
164	// only share among types with the same GC signature. See the
165	// type.Equal calls below for where this matters.
166	locations := map[*types.Type][]LocalSlot{}
167
168	// Each time we assign a stack slot to a value v, we remember
169	// the slot we used via an index into locations[v.Type].
170	slots := s.slots
171	if n := f.NumValues(); cap(slots) >= n {
172		slots = slots[:n]
173	} else {
174		slots = make([]int, n)
175		s.slots = slots
176	}
177	for i := range slots {
178		slots[i] = -1
179	}
180
181	// Pick a stack slot for each value needing one.
182	var used []bool
183	if n := f.NumValues(); cap(s.used) >= n {
184		used = s.used[:n]
185	} else {
186		used = make([]bool, n)
187		s.used = used
188	}
189	for _, b := range f.Blocks {
190		for _, v := range b.Values {
191			if !s.values[v.ID].needSlot {
192				s.nNotNeed++
193				continue
194			}
195			if v.Op == OpArg {
196				s.nArgSlot++
197				continue // already picked
198			}
199
200			// If this is a named value, try to use the name as
201			// the spill location.
202			var name LocalSlot
203			if v.Op == OpStoreReg {
204				name = names[v.Args[0].ID]
205			} else {
206				name = names[v.ID]
207			}
208			if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq {
209				for _, id := range s.interfere[v.ID] {
210					h := f.getHome(id)
211					if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
212						// A variable can interfere with itself.
213						// It is rare, but but it can happen.
214						s.nSelfInterfere++
215						goto noname
216					}
217				}
218				if f.pass.debug > stackDebug {
219					fmt.Printf("stackalloc %s to %s\n", v, name.Name())
220				}
221				s.nNamedSlot++
222				f.setHome(v, name)
223				continue
224			}
225
226		noname:
227			// Set of stack slots we could reuse.
228			locs := locations[v.Type]
229			// Mark all positions in locs used by interfering values.
230			for i := 0; i < len(locs); i++ {
231				used[i] = false
232			}
233			for _, xid := range s.interfere[v.ID] {
234				slot := slots[xid]
235				if slot >= 0 {
236					used[slot] = true
237				}
238			}
239			// Find an unused stack slot.
240			var i int
241			for i = 0; i < len(locs); i++ {
242				if !used[i] {
243					s.nReuse++
244					break
245				}
246			}
247			// If there is no unused stack slot, allocate a new one.
248			if i == len(locs) {
249				s.nAuto++
250				locs = append(locs, LocalSlot{N: f.fe.Auto(v.Pos, v.Type), Type: v.Type, Off: 0})
251				locations[v.Type] = locs
252			}
253			// Use the stack variable at that index for v.
254			loc := locs[i]
255			if f.pass.debug > stackDebug {
256				fmt.Printf("stackalloc %s to %s\n", v, loc.Name())
257			}
258			f.setHome(v, loc)
259			slots[v.ID] = i
260		}
261	}
262}
263
264// computeLive computes a map from block ID to a list of
265// stack-slot-needing value IDs live at the end of that block.
266// TODO: this could be quadratic if lots of variables are live across lots of
267// basic blocks. Figure out a way to make this function (or, more precisely, the user
268// of this function) require only linear size & time.
269func (s *stackAllocState) computeLive(spillLive [][]ID) {
270	s.live = make([][]ID, s.f.NumBlocks())
271	var phis []*Value
272	live := s.f.newSparseSet(s.f.NumValues())
273	defer s.f.retSparseSet(live)
274	t := s.f.newSparseSet(s.f.NumValues())
275	defer s.f.retSparseSet(t)
276
277	// Instead of iterating over f.Blocks, iterate over their postordering.
278	// Liveness information flows backward, so starting at the end
279	// increases the probability that we will stabilize quickly.
280	po := s.f.postorder()
281	for {
282		changed := false
283		for _, b := range po {
284			// Start with known live values at the end of the block
285			live.clear()
286			live.addAll(s.live[b.ID])
287
288			// Propagate backwards to the start of the block
289			phis = phis[:0]
290			for i := len(b.Values) - 1; i >= 0; i-- {
291				v := b.Values[i]
292				live.remove(v.ID)
293				if v.Op == OpPhi {
294					// Save phi for later.
295					// Note: its args might need a stack slot even though
296					// the phi itself doesn't. So don't use needSlot.
297					if !v.Type.IsMemory() && !v.Type.IsVoid() {
298						phis = append(phis, v)
299					}
300					continue
301				}
302				for _, a := range v.Args {
303					if s.values[a.ID].needSlot {
304						live.add(a.ID)
305					}
306				}
307			}
308
309			// for each predecessor of b, expand its list of live-at-end values
310			// invariant: s contains the values live at the start of b (excluding phi inputs)
311			for i, e := range b.Preds {
312				p := e.b
313				t.clear()
314				t.addAll(s.live[p.ID])
315				t.addAll(live.contents())
316				t.addAll(spillLive[p.ID])
317				for _, v := range phis {
318					a := v.Args[i]
319					if s.values[a.ID].needSlot {
320						t.add(a.ID)
321					}
322					if spill := s.values[a.ID].spill; spill != nil {
323						//TODO: remove?  Subsumed by SpillUse?
324						t.add(spill.ID)
325					}
326				}
327				if t.size() == len(s.live[p.ID]) {
328					continue
329				}
330				// grow p's live set
331				s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...)
332				changed = true
333			}
334		}
335
336		if !changed {
337			break
338		}
339	}
340	if s.f.pass.debug > stackDebug {
341		for _, b := range s.f.Blocks {
342			fmt.Printf("stacklive %s %v\n", b, s.live[b.ID])
343		}
344	}
345}
346
347func (f *Func) getHome(vid ID) Location {
348	if int(vid) >= len(f.RegAlloc) {
349		return nil
350	}
351	return f.RegAlloc[vid]
352}
353
354func (f *Func) setHome(v *Value, loc Location) {
355	for v.ID >= ID(len(f.RegAlloc)) {
356		f.RegAlloc = append(f.RegAlloc, nil)
357	}
358	f.RegAlloc[v.ID] = loc
359}
360
361func (s *stackAllocState) buildInterferenceGraph() {
362	f := s.f
363	if n := f.NumValues(); cap(s.interfere) >= n {
364		s.interfere = s.interfere[:n]
365	} else {
366		s.interfere = make([][]ID, n)
367	}
368	live := f.newSparseSet(f.NumValues())
369	defer f.retSparseSet(live)
370	for _, b := range f.Blocks {
371		// Propagate liveness backwards to the start of the block.
372		// Two values interfere if one is defined while the other is live.
373		live.clear()
374		live.addAll(s.live[b.ID])
375		for i := len(b.Values) - 1; i >= 0; i-- {
376			v := b.Values[i]
377			if s.values[v.ID].needSlot {
378				live.remove(v.ID)
379				for _, id := range live.contents() {
380					if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq {
381						s.interfere[v.ID] = append(s.interfere[v.ID], id)
382						s.interfere[id] = append(s.interfere[id], v.ID)
383					}
384				}
385			}
386			for _, a := range v.Args {
387				if s.values[a.ID].needSlot {
388					live.add(a.ID)
389				}
390			}
391			if v.Op == OpArg && s.values[v.ID].needSlot {
392				// OpArg is an input argument which is pre-spilled.
393				// We add back v.ID here because we want this value
394				// to appear live even before this point. Being live
395				// all the way to the start of the entry block prevents other
396				// values from being allocated to the same slot and clobbering
397				// the input value before we have a chance to load it.
398				live.add(v.ID)
399			}
400		}
401	}
402	if f.pass.debug > stackDebug {
403		for vid, i := range s.interfere {
404			if len(i) > 0 {
405				fmt.Printf("v%d interferes with", vid)
406				for _, x := range i {
407					fmt.Printf(" v%d", x)
408				}
409				fmt.Println()
410			}
411		}
412	}
413}
414