1
2
3
4
5
6
7 package ssa
8
9 import (
10 "cmd/compile/internal/ir"
11 "cmd/compile/internal/types"
12 "cmd/internal/src"
13 "fmt"
14 )
15
16 type stackAllocState struct {
17 f *Func
18
19
20
21 live [][]ID
22
23
24
25 values []stackValState
26 interfere [][]ID
27 names []LocalSlot
28
29 nArgSlot,
30 nNotNeed,
31 nNamedSlot,
32 nReuse,
33 nAuto,
34 nSelfInterfere int32
35 }
36
37 func newStackAllocState(f *Func) *stackAllocState {
38 s := f.Cache.stackAllocState
39 if s == nil {
40 return new(stackAllocState)
41 }
42 if s.f != nil {
43 f.fe.Fatalf(src.NoXPos, "newStackAllocState called without previous free")
44 }
45 return s
46 }
47
48 func putStackAllocState(s *stackAllocState) {
49 clear(s.values)
50 clear(s.interfere)
51 clear(s.names)
52 s.f.Cache.stackAllocState = s
53 s.f = nil
54 s.live = nil
55 s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
56 }
57
58 type stackValState struct {
59 typ *types.Type
60 spill *Value
61 needSlot bool
62 isArg bool
63 }
64
65
66
67
68 func stackalloc(f *Func, spillLive [][]ID) [][]ID {
69 if f.pass.debug > stackDebug {
70 fmt.Println("before stackalloc")
71 fmt.Println(f.String())
72 }
73 s := newStackAllocState(f)
74 s.init(f, spillLive)
75 defer putStackAllocState(s)
76
77 s.stackalloc()
78 if f.pass.stats > 0 {
79 f.LogStat("stack_alloc_stats",
80 s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed",
81 s.nNamedSlot, "named_slots", s.nAuto, "auto_slots",
82 s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering")
83 }
84
85 return s.live
86 }
87
88 func (s *stackAllocState) init(f *Func, spillLive [][]ID) {
89 s.f = f
90
91
92 if n := f.NumValues(); cap(s.values) >= n {
93 s.values = s.values[:n]
94 } else {
95 s.values = make([]stackValState, n)
96 }
97 for _, b := range f.Blocks {
98 for _, v := range b.Values {
99 s.values[v.ID].typ = v.Type
100 s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack
101 s.values[v.ID].isArg = hasAnyArgOp(v)
102 if f.pass.debug > stackDebug && s.values[v.ID].needSlot {
103 fmt.Printf("%s needs a stack slot\n", v)
104 }
105 if v.Op == OpStoreReg {
106 s.values[v.Args[0].ID].spill = v
107 }
108 }
109 }
110
111
112 s.computeLive(spillLive)
113
114
115 s.buildInterferenceGraph()
116 }
117
118 func (s *stackAllocState) stackalloc() {
119 f := s.f
120
121
122
123
124 if n := f.NumValues(); cap(s.names) >= n {
125 s.names = s.names[:n]
126 } else {
127 s.names = make([]LocalSlot, n)
128 }
129 names := s.names
130 empty := LocalSlot{}
131 for _, name := range f.Names {
132
133
134 for _, v := range f.NamedValues[*name] {
135 if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
136 aux := v.Aux.(*AuxNameOffset)
137
138 if name.N != aux.Name || name.Off != aux.Offset {
139 if f.pass.debug > stackDebug {
140 fmt.Printf("stackalloc register arg %s skipping name %s\n", v, name)
141 }
142 continue
143 }
144 } else if name.N.Class == ir.PPARAM && v.Op != OpArg {
145
146 if f.pass.debug > stackDebug {
147 fmt.Printf("stackalloc PPARAM name %s skipping non-Arg %s\n", name, v)
148 }
149 continue
150 }
151
152 if names[v.ID] == empty {
153 if f.pass.debug > stackDebug {
154 fmt.Printf("stackalloc value %s to name %s\n", v, *name)
155 }
156 names[v.ID] = *name
157 }
158 }
159 }
160
161
162 for _, v := range f.Entry.Values {
163 if !hasAnyArgOp(v) {
164 continue
165 }
166 if v.Aux == nil {
167 f.Fatalf("%s has nil Aux\n", v.LongString())
168 }
169 if v.Op == OpArg {
170 loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt}
171 if f.pass.debug > stackDebug {
172 fmt.Printf("stackalloc OpArg %s to %s\n", v, loc)
173 }
174 f.setHome(v, loc)
175 continue
176 }
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196 }
197
198
199
200
201
202
203 locations := map[string][]LocalSlot{}
204
205
206
207 slots := f.Cache.allocIntSlice(f.NumValues())
208 defer f.Cache.freeIntSlice(slots)
209 for i := range slots {
210 slots[i] = -1
211 }
212
213
214 used := f.Cache.allocBoolSlice(f.NumValues())
215 defer f.Cache.freeBoolSlice(used)
216 for _, b := range f.Blocks {
217 for _, v := range b.Values {
218 if !s.values[v.ID].needSlot {
219 s.nNotNeed++
220 continue
221 }
222 if hasAnyArgOp(v) {
223 s.nArgSlot++
224 continue
225 }
226
227
228
229 var name LocalSlot
230 if v.Op == OpStoreReg {
231 name = names[v.Args[0].ID]
232 } else {
233 name = names[v.ID]
234 }
235 if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq {
236 for _, id := range s.interfere[v.ID] {
237 h := f.getHome(id)
238 if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
239
240
241 s.nSelfInterfere++
242 goto noname
243 }
244 }
245 if f.pass.debug > stackDebug {
246 fmt.Printf("stackalloc %s to %s\n", v, name)
247 }
248 s.nNamedSlot++
249 f.setHome(v, name)
250 continue
251 }
252
253 noname:
254
255 typeKey := v.Type.LinkString()
256 locs := locations[typeKey]
257
258 for i := 0; i < len(locs); i++ {
259 used[i] = false
260 }
261 for _, xid := range s.interfere[v.ID] {
262 slot := slots[xid]
263 if slot >= 0 {
264 used[slot] = true
265 }
266 }
267
268 var i int
269 for i = 0; i < len(locs); i++ {
270 if !used[i] {
271 s.nReuse++
272 break
273 }
274 }
275
276 if i == len(locs) {
277 s.nAuto++
278 locs = append(locs, LocalSlot{N: f.NewLocal(v.Pos, v.Type), Type: v.Type, Off: 0})
279 locations[typeKey] = locs
280 }
281
282 loc := locs[i]
283 if f.pass.debug > stackDebug {
284 fmt.Printf("stackalloc %s to %s\n", v, loc)
285 }
286 f.setHome(v, loc)
287 slots[v.ID] = i
288 }
289 }
290 }
291
292
293
294
295
296
297 func (s *stackAllocState) computeLive(spillLive [][]ID) {
298 s.live = make([][]ID, s.f.NumBlocks())
299 var phis []*Value
300 live := s.f.newSparseSet(s.f.NumValues())
301 defer s.f.retSparseSet(live)
302 t := s.f.newSparseSet(s.f.NumValues())
303 defer s.f.retSparseSet(t)
304
305
306
307
308 po := s.f.postorder()
309 for {
310 changed := false
311 for _, b := range po {
312
313 live.clear()
314 live.addAll(s.live[b.ID])
315
316
317 phis = phis[:0]
318 for i := len(b.Values) - 1; i >= 0; i-- {
319 v := b.Values[i]
320 live.remove(v.ID)
321 if v.Op == OpPhi {
322
323
324
325 if !v.Type.IsMemory() && !v.Type.IsVoid() {
326 phis = append(phis, v)
327 }
328 continue
329 }
330 for _, a := range v.Args {
331 if s.values[a.ID].needSlot {
332 live.add(a.ID)
333 }
334 }
335 }
336
337
338
339 for i, e := range b.Preds {
340 p := e.b
341 t.clear()
342 t.addAll(s.live[p.ID])
343 t.addAll(live.contents())
344 t.addAll(spillLive[p.ID])
345 for _, v := range phis {
346 a := v.Args[i]
347 if s.values[a.ID].needSlot {
348 t.add(a.ID)
349 }
350 if spill := s.values[a.ID].spill; spill != nil {
351
352 t.add(spill.ID)
353 }
354 }
355 if t.size() == len(s.live[p.ID]) {
356 continue
357 }
358
359 s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...)
360 changed = true
361 }
362 }
363
364 if !changed {
365 break
366 }
367 }
368 if s.f.pass.debug > stackDebug {
369 for _, b := range s.f.Blocks {
370 fmt.Printf("stacklive %s %v\n", b, s.live[b.ID])
371 }
372 }
373 }
374
375 func (f *Func) getHome(vid ID) Location {
376 if int(vid) >= len(f.RegAlloc) {
377 return nil
378 }
379 return f.RegAlloc[vid]
380 }
381
382 func (f *Func) setHome(v *Value, loc Location) {
383 for v.ID >= ID(len(f.RegAlloc)) {
384 f.RegAlloc = append(f.RegAlloc, nil)
385 }
386 f.RegAlloc[v.ID] = loc
387 }
388
389 func (s *stackAllocState) buildInterferenceGraph() {
390 f := s.f
391 if n := f.NumValues(); cap(s.interfere) >= n {
392 s.interfere = s.interfere[:n]
393 } else {
394 s.interfere = make([][]ID, n)
395 }
396 live := f.newSparseSet(f.NumValues())
397 defer f.retSparseSet(live)
398 for _, b := range f.Blocks {
399
400
401 live.clear()
402 live.addAll(s.live[b.ID])
403 for i := len(b.Values) - 1; i >= 0; i-- {
404 v := b.Values[i]
405 if s.values[v.ID].needSlot {
406 live.remove(v.ID)
407 for _, id := range live.contents() {
408
409
410 if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || hasAnyArgOp(v) || s.values[id].isArg {
411 s.interfere[v.ID] = append(s.interfere[v.ID], id)
412 s.interfere[id] = append(s.interfere[id], v.ID)
413 }
414 }
415 }
416 for _, a := range v.Args {
417 if s.values[a.ID].needSlot {
418 live.add(a.ID)
419 }
420 }
421 if hasAnyArgOp(v) && s.values[v.ID].needSlot {
422
423
424
425
426
427
428
429
430 live.add(v.ID)
431 }
432 }
433 }
434 if f.pass.debug > stackDebug {
435 for vid, i := range s.interfere {
436 if len(i) > 0 {
437 fmt.Printf("v%d interferes with", vid)
438 for _, x := range i {
439 fmt.Printf(" v%d", x)
440 }
441 fmt.Println()
442 }
443 }
444 }
445 }
446
447 func hasAnyArgOp(v *Value) bool {
448 return v.Op == OpArg || v.Op == OpArgIntReg || v.Op == OpArgFloatReg
449 }
450
View as plain text