1
2
3
4
5 package escape
6
7 import (
8 "cmd/compile/internal/base"
9 "cmd/compile/internal/ir"
10 "cmd/compile/internal/logopt"
11 "cmd/internal/src"
12 "fmt"
13 "math/bits"
14 "strings"
15 )
16
17
18
19 func (b *batch) walkAll() {
20
21
22
23
24
25
26
27
28
29
30 todo := newQueue(len(b.allLocs) + 3)
31
32 enqueue := func(loc *location) {
33 if !loc.queuedWalkAll {
34 loc.queuedWalkAll = true
35 if loc.hasAttr(attrEscapes) {
36
37
38
39 todo.pushFront(loc)
40 } else {
41 todo.pushBack(loc)
42 }
43 }
44 }
45
46 for _, loc := range b.allLocs {
47 todo.pushFront(loc)
48
49 loc.queuedWalkAll = true
50 }
51 todo.pushFront(&b.mutatorLoc)
52 todo.pushFront(&b.calleeLoc)
53 todo.pushFront(&b.heapLoc)
54
55 b.mutatorLoc.queuedWalkAll = true
56 b.calleeLoc.queuedWalkAll = true
57 b.heapLoc.queuedWalkAll = true
58
59 var walkgen uint32
60 for todo.len() > 0 {
61 root := todo.popFront()
62 root.queuedWalkAll = false
63 walkgen++
64 b.walkOne(root, walkgen, enqueue)
65 }
66 }
67
68
69
70 func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
71
72
73
74
75
76 root.walkgen = walkgen
77 root.derefs = 0
78 root.dst = nil
79
80 if root.hasAttr(attrCalls) {
81 if clo, ok := root.n.(*ir.ClosureExpr); ok {
82 if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() {
83 fn.SetClosureResultsLost(true)
84
85
86
87 for _, result := range fn.Type().Results() {
88 enqueue(b.oldLoc(result.Nname.(*ir.Name)))
89 }
90 }
91 }
92 }
93
94 todo := newQueue(1)
95 todo.pushFront(root)
96
97 for todo.len() > 0 {
98 l := todo.popFront()
99 l.queuedWalkOne = 0
100
101 derefs := l.derefs
102 var newAttrs locAttr
103
104
105 addressOf := derefs < 0
106 if addressOf {
107
108
109
110
111 derefs = 0
112
113
114
115
116 if b.outlives(root, l) {
117 if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
118 if base.Flag.LowerM >= 2 {
119 fmt.Printf("%s: %v escapes to heap in %v:\n", base.FmtPos(l.n.Pos()), l.n, ir.FuncName(l.curfn))
120 }
121 explanation := b.explainPath(root, l)
122 if logopt.Enabled() {
123 var e_curfn *ir.Func
124 logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
125 }
126 }
127 newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls
128 } else
129
130
131 if root.hasAttr(attrPersists) {
132 newAttrs |= attrPersists
133 }
134 }
135
136 if derefs == 0 {
137 newAttrs |= root.attrs & (attrMutates | attrCalls)
138 }
139
140
141
142
143
144
145 if l.param {
146 if b.outlives(root, l) {
147 if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
148 if base.Flag.LowerM >= 2 {
149 fmt.Printf("%s: parameter %v leaks to %s for %v with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), ir.FuncName(l.curfn), derefs)
150 }
151 explanation := b.explainPath(root, l)
152 if logopt.Enabled() {
153 var e_curfn *ir.Func
154 logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
155 fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
156 }
157 }
158 l.leakTo(root, derefs)
159 }
160 if root.hasAttr(attrMutates) {
161 l.paramEsc.AddMutator(derefs)
162 }
163 if root.hasAttr(attrCalls) {
164 l.paramEsc.AddCallee(derefs)
165 }
166 }
167
168 if newAttrs&^l.attrs != 0 {
169 l.attrs |= newAttrs
170 enqueue(l)
171 if l.attrs&attrEscapes != 0 {
172 continue
173 }
174 }
175
176 for i, edge := range l.edges {
177 if edge.src.hasAttr(attrEscapes) {
178 continue
179 }
180 d := derefs + edge.derefs
181 if edge.src.walkgen != walkgen || edge.src.derefs > d {
182 edge.src.walkgen = walkgen
183 edge.src.derefs = d
184 edge.src.dst = l
185 edge.src.dstEdgeIdx = i
186
187 if edge.src.queuedWalkOne != walkgen {
188 edge.src.queuedWalkOne = walkgen
189
190
191
192 todo.pushBack(edge.src)
193 }
194 }
195 }
196 }
197 }
198
199
200 func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
201 visited := make(map[*location]bool)
202 pos := base.FmtPos(src.n.Pos())
203 var explanation []*logopt.LoggedOpt
204 for {
205
206 if visited[src] {
207 if base.Flag.LowerM >= 2 {
208 fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
209 }
210 break
211 }
212 visited[src] = true
213 dst := src.dst
214 edge := &dst.edges[src.dstEdgeIdx]
215 if edge.src != src {
216 base.Fatalf("path inconsistency: %v != %v", edge.src, src)
217 }
218
219 explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
220
221 if dst == root {
222 break
223 }
224 src = dst
225 }
226
227 return explanation
228 }
229
230 func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
231 ops := "&"
232 if derefs >= 0 {
233 ops = strings.Repeat("*", derefs)
234 }
235 print := base.Flag.LowerM >= 2
236
237 flow := fmt.Sprintf(" flow: %s ← %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
238 if print {
239 fmt.Printf("%s:%s\n", pos, flow)
240 }
241 if logopt.Enabled() {
242 var epos src.XPos
243 if notes != nil {
244 epos = notes.where.Pos()
245 } else if srcloc != nil && srcloc.n != nil {
246 epos = srcloc.n.Pos()
247 }
248 var e_curfn *ir.Func
249 explanation = append(explanation, logopt.NewLoggedOpt(epos, epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
250 }
251
252 for note := notes; note != nil; note = note.next {
253 if print {
254 fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
255 }
256 if logopt.Enabled() {
257 var e_curfn *ir.Func
258 notePos := note.where.Pos()
259 explanation = append(explanation, logopt.NewLoggedOpt(notePos, notePos, "escflow", "escape", ir.FuncName(e_curfn),
260 fmt.Sprintf(" from %v (%v)", note.where, note.why)))
261 }
262 }
263 return explanation
264 }
265
266 func (b *batch) explainLoc(l *location) string {
267 if l == &b.heapLoc {
268 return "{heap}"
269 }
270 if l.n == nil {
271
272 return "{temp}"
273 }
274 if l.n.Op() == ir.ONAME {
275 return fmt.Sprintf("%v", l.n)
276 }
277 return fmt.Sprintf("{storage for %v}", l.n)
278 }
279
280
281
282 func (b *batch) outlives(l, other *location) bool {
283
284 if l.hasAttr(attrEscapes) {
285 return true
286 }
287
288
289 if l == &b.mutatorLoc || l == &b.calleeLoc {
290 return false
291 }
292
293
294
295
296 if l.paramOut {
297
298
299
300
301
302
303
304 if ir.ContainsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() {
305 return false
306 }
307
308 return true
309 }
310
311
312
313
314
315
316
317
318
319 if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
320 return true
321 }
322
323
324
325
326
327
328
329
330 if ir.ContainsClosure(l.curfn, other.curfn) {
331 return true
332 }
333
334 return false
335 }
336
337
338
339
340 type queue struct {
341 locs []*location
342 head int
343 tail int
344 elems int
345 }
346
347 func newQueue(capacity int) *queue {
348 capacity = max(capacity, 2)
349 capacity = 1 << bits.Len64(uint64(capacity-1))
350 return &queue{locs: make([]*location, capacity)}
351 }
352
353
354 func (q *queue) pushFront(loc *location) {
355 if q.elems == len(q.locs) {
356 q.grow()
357 }
358 q.head = q.wrap(q.head - 1)
359 q.locs[q.head] = loc
360 q.elems++
361 }
362
363
364 func (q *queue) pushBack(loc *location) {
365 if q.elems == len(q.locs) {
366 q.grow()
367 }
368 q.locs[q.tail] = loc
369 q.tail = q.wrap(q.tail + 1)
370 q.elems++
371 }
372
373
374 func (q *queue) popFront() *location {
375 if q.elems == 0 {
376 return nil
377 }
378 loc := q.locs[q.head]
379 q.head = q.wrap(q.head + 1)
380 q.elems--
381 return loc
382 }
383
384
385 func (q *queue) grow() {
386 newLocs := make([]*location, len(q.locs)*2)
387 for i := range q.elems {
388
389 newLocs[i] = q.locs[q.wrap(q.head+i)]
390 }
391 q.locs = newLocs
392 q.head = 0
393 q.tail = q.elems
394 }
395
396 func (q *queue) len() int { return q.elems }
397 func (q *queue) wrap(i int) int { return i & (len(q.locs) - 1) }
398
View as plain text