Source file src/cmd/compile/internal/escape/solve.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package escape
     6  
     7  import (
     8  	"cmd/compile/internal/base"
     9  	"cmd/compile/internal/ir"
    10  	"cmd/compile/internal/logopt"
    11  	"cmd/internal/src"
    12  	"fmt"
    13  	"math/bits"
    14  	"strings"
    15  )
    16  
    17  // walkAll computes the minimal dereferences between all pairs of
    18  // locations.
    19  func (b *batch) walkAll() {
    20  	// We use a work queue to keep track of locations that we need
    21  	// to visit, and repeatedly walk until we reach a fixed point.
    22  	//
    23  	// We walk once from each location (including the heap), and
    24  	// then re-enqueue each location on its transition from
    25  	// !persists->persists and !escapes->escapes, which can each
    26  	// happen at most once. So we take Θ(len(e.allLocs)) walks.
    27  
    28  	// Queue of locations to walk. Has enough room for b.allLocs
    29  	// plus b.heapLoc, b.mutatorLoc, b.calleeLoc.
    30  	todo := newQueue(len(b.allLocs) + 3)
    31  
    32  	enqueue := func(loc *location) {
    33  		if !loc.queuedWalkAll {
    34  			loc.queuedWalkAll = true
    35  			if loc.hasAttr(attrEscapes) {
    36  				// Favor locations that escape to the heap,
    37  				// which in some cases allows attrEscape to
    38  				// propagate faster.
    39  				todo.pushFront(loc)
    40  			} else {
    41  				todo.pushBack(loc)
    42  			}
    43  		}
    44  	}
    45  
    46  	for _, loc := range b.allLocs {
    47  		todo.pushFront(loc)
    48  		// TODO(thepudds): clean up setting queuedWalkAll.
    49  		loc.queuedWalkAll = true
    50  	}
    51  	todo.pushFront(&b.mutatorLoc)
    52  	todo.pushFront(&b.calleeLoc)
    53  	todo.pushFront(&b.heapLoc)
    54  
    55  	b.mutatorLoc.queuedWalkAll = true
    56  	b.calleeLoc.queuedWalkAll = true
    57  	b.heapLoc.queuedWalkAll = true
    58  
    59  	var walkgen uint32
    60  	for todo.len() > 0 {
    61  		root := todo.popFront()
    62  		root.queuedWalkAll = false
    63  		walkgen++
    64  		b.walkOne(root, walkgen, enqueue)
    65  	}
    66  }
    67  
    68  // walkOne computes the minimal number of dereferences from root to
    69  // all other locations.
    70  func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
    71  	// The data flow graph has negative edges (from addressing
    72  	// operations), so we use the Bellman-Ford algorithm. However,
    73  	// we don't have to worry about infinite negative cycles since
    74  	// we bound intermediate dereference counts to 0.
    75  
    76  	root.walkgen = walkgen
    77  	root.derefs = 0
    78  	root.dst = nil
    79  
    80  	if root.hasAttr(attrCalls) {
    81  		if clo, ok := root.n.(*ir.ClosureExpr); ok {
    82  			if fn := clo.Func; b.inMutualBatch(fn.Nname) && !fn.ClosureResultsLost() {
    83  				fn.SetClosureResultsLost(true)
    84  
    85  				// Re-flow from the closure's results, now that we're aware
    86  				// we lost track of them.
    87  				for _, result := range fn.Type().Results() {
    88  					enqueue(b.oldLoc(result.Nname.(*ir.Name)))
    89  				}
    90  			}
    91  		}
    92  	}
    93  
    94  	todo := newQueue(1)
    95  	todo.pushFront(root)
    96  
    97  	for todo.len() > 0 {
    98  		l := todo.popFront()
    99  		l.queuedWalkOne = 0 // no longer queued for walkOne
   100  
   101  		derefs := l.derefs
   102  		var newAttrs locAttr
   103  
   104  		// If l.derefs < 0, then l's address flows to root.
   105  		addressOf := derefs < 0
   106  		if addressOf {
   107  			// For a flow path like "root = &l; l = x",
   108  			// l's address flows to root, but x's does
   109  			// not. We recognize this by lower bounding
   110  			// derefs at 0.
   111  			derefs = 0
   112  
   113  			// If l's address flows somewhere that
   114  			// outlives it, then l needs to be heap
   115  			// allocated.
   116  			if b.outlives(root, l) {
   117  				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
   118  					if base.Flag.LowerM >= 2 {
   119  						fmt.Printf("%s: %v escapes to heap in %v:\n", base.FmtPos(l.n.Pos()), l.n, ir.FuncName(l.curfn))
   120  					}
   121  					explanation := b.explainPath(root, l)
   122  					if logopt.Enabled() {
   123  						var e_curfn *ir.Func // TODO(mdempsky): Fix.
   124  						logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
   125  					}
   126  				}
   127  				newAttrs |= attrEscapes | attrPersists | attrMutates | attrCalls
   128  			} else
   129  			// If l's address flows to a persistent location, then l needs
   130  			// to persist too.
   131  			if root.hasAttr(attrPersists) {
   132  				newAttrs |= attrPersists
   133  			}
   134  		}
   135  
   136  		if derefs == 0 {
   137  			newAttrs |= root.attrs & (attrMutates | attrCalls)
   138  		}
   139  
   140  		// l's value flows to root. If l is a function
   141  		// parameter and root is the heap or a
   142  		// corresponding result parameter, then record
   143  		// that value flow for tagging the function
   144  		// later.
   145  		if l.param {
   146  			if b.outlives(root, l) {
   147  				if !l.hasAttr(attrEscapes) && (logopt.Enabled() || base.Flag.LowerM >= 2) {
   148  					if base.Flag.LowerM >= 2 {
   149  						fmt.Printf("%s: parameter %v leaks to %s for %v with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), ir.FuncName(l.curfn), derefs)
   150  					}
   151  					explanation := b.explainPath(root, l)
   152  					if logopt.Enabled() {
   153  						var e_curfn *ir.Func // TODO(mdempsky): Fix.
   154  						logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
   155  							fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
   156  					}
   157  				}
   158  				l.leakTo(root, derefs)
   159  			}
   160  			if root.hasAttr(attrMutates) {
   161  				l.paramEsc.AddMutator(derefs)
   162  			}
   163  			if root.hasAttr(attrCalls) {
   164  				l.paramEsc.AddCallee(derefs)
   165  			}
   166  		}
   167  
   168  		if newAttrs&^l.attrs != 0 {
   169  			l.attrs |= newAttrs
   170  			enqueue(l)
   171  			if l.attrs&attrEscapes != 0 {
   172  				continue
   173  			}
   174  		}
   175  
   176  		for i, edge := range l.edges {
   177  			if edge.src.hasAttr(attrEscapes) {
   178  				continue
   179  			}
   180  			d := derefs + edge.derefs
   181  			if edge.src.walkgen != walkgen || edge.src.derefs > d {
   182  				edge.src.walkgen = walkgen
   183  				edge.src.derefs = d
   184  				edge.src.dst = l
   185  				edge.src.dstEdgeIdx = i
   186  				// Check if already queued in todo.
   187  				if edge.src.queuedWalkOne != walkgen {
   188  					edge.src.queuedWalkOne = walkgen // Mark queued for this walkgen.
   189  
   190  					// Place at the back to possibly give time for
   191  					// other possible attribute changes to src.
   192  					todo.pushBack(edge.src)
   193  				}
   194  			}
   195  		}
   196  	}
   197  }
   198  
   199  // explainPath prints an explanation of how src flows to the walk root.
   200  func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
   201  	visited := make(map[*location]bool)
   202  	pos := base.FmtPos(src.n.Pos())
   203  	var explanation []*logopt.LoggedOpt
   204  	for {
   205  		// Prevent infinite loop.
   206  		if visited[src] {
   207  			if base.Flag.LowerM >= 2 {
   208  				fmt.Printf("%s:   warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
   209  			}
   210  			break
   211  		}
   212  		visited[src] = true
   213  		dst := src.dst
   214  		edge := &dst.edges[src.dstEdgeIdx]
   215  		if edge.src != src {
   216  			base.Fatalf("path inconsistency: %v != %v", edge.src, src)
   217  		}
   218  
   219  		explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
   220  
   221  		if dst == root {
   222  			break
   223  		}
   224  		src = dst
   225  	}
   226  
   227  	return explanation
   228  }
   229  
   230  func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
   231  	ops := "&"
   232  	if derefs >= 0 {
   233  		ops = strings.Repeat("*", derefs)
   234  	}
   235  	print := base.Flag.LowerM >= 2
   236  
   237  	flow := fmt.Sprintf("   flow: %s ← %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
   238  	if print {
   239  		fmt.Printf("%s:%s\n", pos, flow)
   240  	}
   241  	if logopt.Enabled() {
   242  		var epos src.XPos
   243  		if notes != nil {
   244  			epos = notes.where.Pos()
   245  		} else if srcloc != nil && srcloc.n != nil {
   246  			epos = srcloc.n.Pos()
   247  		}
   248  		var e_curfn *ir.Func // TODO(mdempsky): Fix.
   249  		explanation = append(explanation, logopt.NewLoggedOpt(epos, epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
   250  	}
   251  
   252  	for note := notes; note != nil; note = note.next {
   253  		if print {
   254  			fmt.Printf("%s:     from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
   255  		}
   256  		if logopt.Enabled() {
   257  			var e_curfn *ir.Func // TODO(mdempsky): Fix.
   258  			notePos := note.where.Pos()
   259  			explanation = append(explanation, logopt.NewLoggedOpt(notePos, notePos, "escflow", "escape", ir.FuncName(e_curfn),
   260  				fmt.Sprintf("     from %v (%v)", note.where, note.why)))
   261  		}
   262  	}
   263  	return explanation
   264  }
   265  
   266  func (b *batch) explainLoc(l *location) string {
   267  	if l == &b.heapLoc {
   268  		return "{heap}"
   269  	}
   270  	if l.n == nil {
   271  		// TODO(mdempsky): Omit entirely.
   272  		return "{temp}"
   273  	}
   274  	if l.n.Op() == ir.ONAME {
   275  		return fmt.Sprintf("%v", l.n)
   276  	}
   277  	return fmt.Sprintf("{storage for %v}", l.n)
   278  }
   279  
   280  // outlives reports whether values stored in l may survive beyond
   281  // other's lifetime if stack allocated.
   282  func (b *batch) outlives(l, other *location) bool {
   283  	// The heap outlives everything.
   284  	if l.hasAttr(attrEscapes) {
   285  		return true
   286  	}
   287  
   288  	// Pseudo-locations that don't really exist.
   289  	if l == &b.mutatorLoc || l == &b.calleeLoc {
   290  		return false
   291  	}
   292  
   293  	// We don't know what callers do with returned values, so
   294  	// pessimistically we need to assume they flow to the heap and
   295  	// outlive everything too.
   296  	if l.paramOut {
   297  		// Exception: Closures can return locations allocated outside of
   298  		// them without forcing them to the heap, if we can statically
   299  		// identify all call sites. For example:
   300  		//
   301  		//	var u int  // okay to stack allocate
   302  		//	fn := func() *int { return &u }()
   303  		//	*fn() = 42
   304  		if ir.ContainsClosure(other.curfn, l.curfn) && !l.curfn.ClosureResultsLost() {
   305  			return false
   306  		}
   307  
   308  		return true
   309  	}
   310  
   311  	// If l and other are within the same function, then l
   312  	// outlives other if it was declared outside other's loop
   313  	// scope. For example:
   314  	//
   315  	//	var l *int
   316  	//	for {
   317  	//		l = new(int) // must heap allocate: outlives for loop
   318  	//	}
   319  	if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
   320  		return true
   321  	}
   322  
   323  	// If other is declared within a child closure of where l is
   324  	// declared, then l outlives it. For example:
   325  	//
   326  	//	var l *int
   327  	//	func() {
   328  	//		l = new(int) // must heap allocate: outlives call frame (if not inlined)
   329  	//	}()
   330  	if ir.ContainsClosure(l.curfn, other.curfn) {
   331  		return true
   332  	}
   333  
   334  	return false
   335  }
   336  
   337  // queue implements a queue of locations for use in WalkAll and WalkOne.
   338  // It supports pushing to front & back, and popping from front.
   339  // TODO(thepudds): does cmd/compile have a deque or similar somewhere?
   340  type queue struct {
   341  	locs  []*location
   342  	head  int // index of front element
   343  	tail  int // next back element
   344  	elems int
   345  }
   346  
   347  func newQueue(capacity int) *queue {
   348  	capacity = max(capacity, 2)
   349  	capacity = 1 << bits.Len64(uint64(capacity-1)) // round up to a power of 2
   350  	return &queue{locs: make([]*location, capacity)}
   351  }
   352  
   353  // pushFront adds an element to the front of the queue.
   354  func (q *queue) pushFront(loc *location) {
   355  	if q.elems == len(q.locs) {
   356  		q.grow()
   357  	}
   358  	q.head = q.wrap(q.head - 1)
   359  	q.locs[q.head] = loc
   360  	q.elems++
   361  }
   362  
   363  // pushBack adds an element to the back of the queue.
   364  func (q *queue) pushBack(loc *location) {
   365  	if q.elems == len(q.locs) {
   366  		q.grow()
   367  	}
   368  	q.locs[q.tail] = loc
   369  	q.tail = q.wrap(q.tail + 1)
   370  	q.elems++
   371  }
   372  
   373  // popFront removes the front of the queue.
   374  func (q *queue) popFront() *location {
   375  	if q.elems == 0 {
   376  		return nil
   377  	}
   378  	loc := q.locs[q.head]
   379  	q.head = q.wrap(q.head + 1)
   380  	q.elems--
   381  	return loc
   382  }
   383  
   384  // grow doubles the capacity.
   385  func (q *queue) grow() {
   386  	newLocs := make([]*location, len(q.locs)*2)
   387  	for i := range q.elems {
   388  		// Copy over our elements in order.
   389  		newLocs[i] = q.locs[q.wrap(q.head+i)]
   390  	}
   391  	q.locs = newLocs
   392  	q.head = 0
   393  	q.tail = q.elems
   394  }
   395  
   396  func (q *queue) len() int       { return q.elems }
   397  func (q *queue) wrap(i int) int { return i & (len(q.locs) - 1) }
   398  

View as plain text