Source file src/cmd/compile/internal/escape/escape.go

     1  // Copyright 2018 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package escape
     6  
     7  import (
     8  	"fmt"
     9  	"go/constant"
    10  	"go/token"
    11  
    12  	"cmd/compile/internal/base"
    13  	"cmd/compile/internal/ir"
    14  	"cmd/compile/internal/logopt"
    15  	"cmd/compile/internal/typecheck"
    16  	"cmd/compile/internal/types"
    17  	"cmd/internal/src"
    18  )
    19  
    20  // Escape analysis.
    21  //
    22  // Here we analyze functions to determine which Go variables
    23  // (including implicit allocations such as calls to "new" or "make",
    24  // composite literals, etc.) can be allocated on the stack. The two
    25  // key invariants we have to ensure are: (1) pointers to stack objects
    26  // cannot be stored in the heap, and (2) pointers to a stack object
    27  // cannot outlive that object (e.g., because the declaring function
    28  // returned and destroyed the object's stack frame, or its space is
    29  // reused across loop iterations for logically distinct variables).
    30  //
    31  // We implement this with a static data-flow analysis of the AST.
    32  // First, we construct a directed weighted graph where vertices
    33  // (termed "locations") represent variables allocated by statements
    34  // and expressions, and edges represent assignments between variables
    35  // (with weights representing addressing/dereference counts).
    36  //
    37  // Next we walk the graph looking for assignment paths that might
    38  // violate the invariants stated above. If a variable v's address is
    39  // stored in the heap or elsewhere that may outlive it, then v is
    40  // marked as requiring heap allocation.
    41  //
    42  // To support interprocedural analysis, we also record data-flow from
    43  // each function's parameters to the heap and to its result
    44  // parameters. This information is summarized as "parameter tags",
    45  // which are used at static call sites to improve escape analysis of
    46  // function arguments.
    47  
    48  // Constructing the location graph.
    49  //
    50  // Every allocating statement (e.g., variable declaration) or
    51  // expression (e.g., "new" or "make") is first mapped to a unique
    52  // "location."
    53  //
    54  // We also model every Go assignment as a directed edges between
    55  // locations. The number of dereference operations minus the number of
    56  // addressing operations is recorded as the edge's weight (termed
    57  // "derefs"). For example:
    58  //
    59  //     p = &q    // -1
    60  //     p = q     //  0
    61  //     p = *q    //  1
    62  //     p = **q   //  2
    63  //
    64  //     p = **&**&q  // 2
    65  //
    66  // Note that the & operator can only be applied to addressable
    67  // expressions, and the expression &x itself is not addressable, so
    68  // derefs cannot go below -1.
    69  //
    70  // Every Go language construct is lowered into this representation,
    71  // generally without sensitivity to flow, path, or context; and
    72  // without distinguishing elements within a compound variable. For
    73  // example:
    74  //
    75  //     var x struct { f, g *int }
    76  //     var u []*int
    77  //
    78  //     x.f = u[0]
    79  //
    80  // is modeled simply as
    81  //
    82  //     x = *u
    83  //
    84  // That is, we don't distinguish x.f from x.g, or u[0] from u[1],
    85  // u[2], etc. However, we do record the implicit dereference involved
    86  // in indexing a slice.
    87  
    88  // A batch holds escape analysis state that's shared across an entire
    89  // batch of functions being analyzed at once.
    90  type batch struct {
    91  	allLocs         []*location
    92  	closures        []closure
    93  	reassignOracles map[*ir.Func]*ir.ReassignOracle
    94  
    95  	heapLoc    location
    96  	mutatorLoc location
    97  	calleeLoc  location
    98  	blankLoc   location
    99  }
   100  
   101  // A closure holds a closure expression and its spill hole (i.e.,
   102  // where the hole representing storing into its closure record).
   103  type closure struct {
   104  	k   hole
   105  	clo *ir.ClosureExpr
   106  }
   107  
   108  // An escape holds state specific to a single function being analyzed
   109  // within a batch.
   110  type escape struct {
   111  	*batch
   112  
   113  	curfn *ir.Func // function being analyzed
   114  
   115  	labels map[*types.Sym]labelState // known labels
   116  
   117  	// loopDepth counts the current loop nesting depth within
   118  	// curfn. It increments within each "for" loop and at each
   119  	// label with a corresponding backwards "goto" (i.e.,
   120  	// unstructured loop).
   121  	loopDepth int
   122  }
   123  
   124  func Funcs(all []*ir.Func) {
   125  	ir.VisitFuncsBottomUp(all, Batch)
   126  }
   127  
   128  // Batch performs escape analysis on a minimal batch of
   129  // functions.
   130  func Batch(fns []*ir.Func, recursive bool) {
   131  	var b batch
   132  	b.heapLoc.attrs = attrEscapes | attrPersists | attrMutates | attrCalls
   133  	b.mutatorLoc.attrs = attrMutates
   134  	b.calleeLoc.attrs = attrCalls
   135  	b.reassignOracles = make(map[*ir.Func]*ir.ReassignOracle)
   136  
   137  	// Construct data-flow graph from syntax trees.
   138  	for _, fn := range fns {
   139  		if base.Flag.W > 1 {
   140  			s := fmt.Sprintf("\nbefore escape %v", fn)
   141  			ir.Dump(s, fn)
   142  		}
   143  		b.initFunc(fn)
   144  	}
   145  	for _, fn := range fns {
   146  		if !fn.IsClosure() {
   147  			b.walkFunc(fn)
   148  		}
   149  	}
   150  
   151  	// We've walked the function bodies, so we've seen everywhere a
   152  	// variable might be reassigned or have its address taken. Now we
   153  	// can decide whether closures should capture their free variables
   154  	// by value or reference.
   155  	for _, closure := range b.closures {
   156  		b.flowClosure(closure.k, closure.clo)
   157  	}
   158  	b.closures = nil
   159  
   160  	for _, loc := range b.allLocs {
   161  		// Try to replace some non-constant expressions with literals.
   162  		b.rewriteWithLiterals(loc.n, loc.curfn)
   163  
   164  		// Check if the node must be heap allocated for certain reasons
   165  		// such as OMAKESLICE for a large slice.
   166  		if why := HeapAllocReason(loc.n); why != "" {
   167  			b.flow(b.heapHole().addr(loc.n, why), loc)
   168  		}
   169  	}
   170  
   171  	b.walkAll()
   172  	b.finish(fns)
   173  }
   174  
   175  func (b *batch) with(fn *ir.Func) *escape {
   176  	return &escape{
   177  		batch:     b,
   178  		curfn:     fn,
   179  		loopDepth: 1,
   180  	}
   181  }
   182  
   183  func (b *batch) initFunc(fn *ir.Func) {
   184  	e := b.with(fn)
   185  	if fn.Esc() != escFuncUnknown {
   186  		base.Fatalf("unexpected node: %v", fn)
   187  	}
   188  	fn.SetEsc(escFuncPlanned)
   189  	if base.Flag.LowerM > 3 {
   190  		ir.Dump("escAnalyze", fn)
   191  	}
   192  
   193  	// Allocate locations for local variables.
   194  	for _, n := range fn.Dcl {
   195  		e.newLoc(n, true)
   196  	}
   197  
   198  	// Also for hidden parameters (e.g., the ".this" parameter to a
   199  	// method value wrapper).
   200  	if fn.OClosure == nil {
   201  		for _, n := range fn.ClosureVars {
   202  			e.newLoc(n.Canonical(), true)
   203  		}
   204  	}
   205  
   206  	// Initialize resultIndex for result parameters.
   207  	for i, f := range fn.Type().Results() {
   208  		e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i
   209  	}
   210  }
   211  
   212  func (b *batch) walkFunc(fn *ir.Func) {
   213  	e := b.with(fn)
   214  	fn.SetEsc(escFuncStarted)
   215  
   216  	// Identify labels that mark the head of an unstructured loop.
   217  	ir.Visit(fn, func(n ir.Node) {
   218  		switch n.Op() {
   219  		case ir.OLABEL:
   220  			n := n.(*ir.LabelStmt)
   221  			if n.Label.IsBlank() {
   222  				break
   223  			}
   224  			if e.labels == nil {
   225  				e.labels = make(map[*types.Sym]labelState)
   226  			}
   227  			e.labels[n.Label] = nonlooping
   228  
   229  		case ir.OGOTO:
   230  			// If we visited the label before the goto,
   231  			// then this is a looping label.
   232  			n := n.(*ir.BranchStmt)
   233  			if e.labels[n.Label] == nonlooping {
   234  				e.labels[n.Label] = looping
   235  			}
   236  		}
   237  	})
   238  
   239  	e.block(fn.Body)
   240  
   241  	if len(e.labels) != 0 {
   242  		base.FatalfAt(fn.Pos(), "leftover labels after walkFunc")
   243  	}
   244  }
   245  
   246  func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
   247  	for _, cv := range clo.Func.ClosureVars {
   248  		n := cv.Canonical()
   249  		loc := b.oldLoc(cv)
   250  		if !loc.captured {
   251  			base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv)
   252  		}
   253  
   254  		// Capture by value for variables <= 128 bytes that are never reassigned.
   255  		n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128)
   256  		if !n.Byval() {
   257  			n.SetAddrtaken(true)
   258  			if n.Sym().Name == typecheck.LocalDictName {
   259  				base.FatalfAt(n.Pos(), "dictionary variable not captured by value")
   260  			}
   261  		}
   262  
   263  		if base.Flag.LowerM > 1 {
   264  			how := "ref"
   265  			if n.Byval() {
   266  				how = "value"
   267  			}
   268  			base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size())
   269  		}
   270  
   271  		// Flow captured variables to closure.
   272  		k := k
   273  		if !cv.Byval() {
   274  			k = k.addr(cv, "reference")
   275  		}
   276  		b.flow(k.note(cv, "captured by a closure"), loc)
   277  	}
   278  }
   279  
   280  func (b *batch) finish(fns []*ir.Func) {
   281  	// Record parameter tags for package export data.
   282  	for _, fn := range fns {
   283  		fn.SetEsc(escFuncTagged)
   284  
   285  		for i, param := range fn.Type().RecvParams() {
   286  			param.Note = b.paramTag(fn, 1+i, param)
   287  		}
   288  	}
   289  
   290  	for _, loc := range b.allLocs {
   291  		n := loc.n
   292  		if n == nil {
   293  			continue
   294  		}
   295  
   296  		if n.Op() == ir.ONAME {
   297  			n := n.(*ir.Name)
   298  			n.Opt = nil
   299  		}
   300  
   301  		// Update n.Esc based on escape analysis results.
   302  
   303  		// Omit escape diagnostics for go/defer wrappers, at least for now.
   304  		// Historically, we haven't printed them, and test cases don't expect them.
   305  		// TODO(mdempsky): Update tests to expect this.
   306  		goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
   307  
   308  		if loc.hasAttr(attrEscapes) {
   309  			if n.Op() == ir.ONAME {
   310  				if base.Flag.CompilingRuntime {
   311  					base.ErrorfAt(n.Pos(), 0, "%v escapes to heap, not allowed in runtime", n)
   312  				}
   313  				if base.Flag.LowerM != 0 {
   314  					base.WarnfAt(n.Pos(), "moved to heap: %v", n)
   315  				}
   316  			} else {
   317  				if base.Flag.LowerM != 0 && !goDeferWrapper {
   318  					if n.Op() == ir.OAPPEND {
   319  						base.WarnfAt(n.Pos(), "append escapes to heap")
   320  					} else {
   321  						base.WarnfAt(n.Pos(), "%v escapes to heap", n)
   322  					}
   323  				}
   324  				if logopt.Enabled() {
   325  					var e_curfn *ir.Func // TODO(mdempsky): Fix.
   326  					logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn))
   327  				}
   328  			}
   329  			n.SetEsc(ir.EscHeap)
   330  		} else {
   331  			if base.Flag.LowerM != 0 && n.Op() != ir.ONAME && !goDeferWrapper {
   332  				if n.Op() == ir.OAPPEND {
   333  					base.WarnfAt(n.Pos(), "append does not escape")
   334  				} else {
   335  					base.WarnfAt(n.Pos(), "%v does not escape", n)
   336  				}
   337  			}
   338  			n.SetEsc(ir.EscNone)
   339  			if !loc.hasAttr(attrPersists) {
   340  				switch n.Op() {
   341  				case ir.OCLOSURE:
   342  					n := n.(*ir.ClosureExpr)
   343  					n.SetTransient(true)
   344  				case ir.OMETHVALUE:
   345  					n := n.(*ir.SelectorExpr)
   346  					n.SetTransient(true)
   347  				case ir.OSLICELIT:
   348  					n := n.(*ir.CompLitExpr)
   349  					n.SetTransient(true)
   350  				}
   351  			}
   352  		}
   353  
   354  		// If the result of a string->[]byte conversion is never mutated,
   355  		// then it can simply reuse the string's memory directly.
   356  		if base.Debug.ZeroCopy != 0 {
   357  			if n, ok := n.(*ir.ConvExpr); ok && n.Op() == ir.OSTR2BYTES && !loc.hasAttr(attrMutates) {
   358  				if base.Flag.LowerM >= 1 {
   359  					base.WarnfAt(n.Pos(), "zero-copy string->[]byte conversion")
   360  				}
   361  				n.SetOp(ir.OSTR2BYTESTMP)
   362  			}
   363  		}
   364  	}
   365  }
   366  
   367  // inMutualBatch reports whether function fn is in the batch of
   368  // mutually recursive functions being analyzed. When this is true,
   369  // fn has not yet been analyzed, so its parameters and results
   370  // should be incorporated directly into the flow graph instead of
   371  // relying on its escape analysis tagging.
   372  func (b *batch) inMutualBatch(fn *ir.Name) bool {
   373  	if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
   374  		if fn.Defn.Esc() == escFuncUnknown {
   375  			base.FatalfAt(fn.Pos(), "graph inconsistency: %v", fn)
   376  		}
   377  		return true
   378  	}
   379  	return false
   380  }
   381  
   382  const (
   383  	escFuncUnknown = 0 + iota
   384  	escFuncPlanned
   385  	escFuncStarted
   386  	escFuncTagged
   387  )
   388  
   389  // Mark labels that have no backjumps to them as not increasing e.loopdepth.
   390  type labelState int
   391  
   392  const (
   393  	looping labelState = 1 + iota
   394  	nonlooping
   395  )
   396  
   397  func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
   398  	name := func() string {
   399  		if f.Nname != nil {
   400  			return f.Nname.Sym().Name
   401  		}
   402  		return fmt.Sprintf("arg#%d", narg)
   403  	}
   404  
   405  	// Only report diagnostics for user code;
   406  	// not for wrappers generated around them.
   407  	// TODO(mdempsky): Generalize this.
   408  	diagnose := base.Flag.LowerM != 0 && !(fn.Wrapper() || fn.Dupok())
   409  
   410  	if len(fn.Body) == 0 {
   411  		// Assume that uintptr arguments must be held live across the call.
   412  		// This is most important for syscall.Syscall.
   413  		// See golang.org/issue/13372.
   414  		// This really doesn't have much to do with escape analysis per se,
   415  		// but we are reusing the ability to annotate an individual function
   416  		// argument and pass those annotations along to importing code.
   417  		fn.Pragma |= ir.UintptrKeepAlive
   418  
   419  		if f.Type.IsUintptr() {
   420  			if diagnose {
   421  				base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
   422  			}
   423  			return ""
   424  		}
   425  
   426  		if !f.Type.HasPointers() { // don't bother tagging for scalars
   427  			return ""
   428  		}
   429  
   430  		var esc leaks
   431  
   432  		// External functions are assumed unsafe, unless
   433  		// //go:noescape is given before the declaration.
   434  		if fn.Pragma&ir.Noescape != 0 {
   435  			if diagnose && f.Sym != nil {
   436  				base.WarnfAt(f.Pos, "%v does not escape", name())
   437  			}
   438  			esc.AddMutator(0)
   439  			esc.AddCallee(0)
   440  		} else {
   441  			if diagnose && f.Sym != nil {
   442  				base.WarnfAt(f.Pos, "leaking param: %v", name())
   443  			}
   444  			esc.AddHeap(0)
   445  		}
   446  
   447  		return esc.Encode()
   448  	}
   449  
   450  	if fn.Pragma&ir.UintptrEscapes != 0 {
   451  		if f.Type.IsUintptr() {
   452  			if diagnose {
   453  				base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
   454  			}
   455  			return ""
   456  		}
   457  		if f.IsDDD() && f.Type.Elem().IsUintptr() {
   458  			// final argument is ...uintptr.
   459  			if diagnose {
   460  				base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
   461  			}
   462  			return ""
   463  		}
   464  	}
   465  
   466  	if !f.Type.HasPointers() { // don't bother tagging for scalars
   467  		return ""
   468  	}
   469  
   470  	// Unnamed parameters are unused and therefore do not escape.
   471  	if f.Sym == nil || f.Sym.IsBlank() {
   472  		var esc leaks
   473  		return esc.Encode()
   474  	}
   475  
   476  	n := f.Nname.(*ir.Name)
   477  	loc := b.oldLoc(n)
   478  	esc := loc.paramEsc
   479  	esc.Optimize()
   480  
   481  	if diagnose && !loc.hasAttr(attrEscapes) {
   482  		b.reportLeaks(f.Pos, name(), esc, fn.Type())
   483  	}
   484  
   485  	return esc.Encode()
   486  }
   487  
   488  func (b *batch) reportLeaks(pos src.XPos, name string, esc leaks, sig *types.Type) {
   489  	warned := false
   490  	if x := esc.Heap(); x >= 0 {
   491  		if x == 0 {
   492  			base.WarnfAt(pos, "leaking param: %v", name)
   493  		} else {
   494  			// TODO(mdempsky): Mention level=x like below?
   495  			base.WarnfAt(pos, "leaking param content: %v", name)
   496  		}
   497  		warned = true
   498  	}
   499  	for i := 0; i < numEscResults; i++ {
   500  		if x := esc.Result(i); x >= 0 {
   501  			res := sig.Result(i).Nname.Sym().Name
   502  			base.WarnfAt(pos, "leaking param: %v to result %v level=%d", name, res, x)
   503  			warned = true
   504  		}
   505  	}
   506  
   507  	if base.Debug.EscapeMutationsCalls <= 0 {
   508  		if !warned {
   509  			base.WarnfAt(pos, "%v does not escape", name)
   510  		}
   511  		return
   512  	}
   513  
   514  	if x := esc.Mutator(); x >= 0 {
   515  		base.WarnfAt(pos, "mutates param: %v derefs=%v", name, x)
   516  		warned = true
   517  	}
   518  	if x := esc.Callee(); x >= 0 {
   519  		base.WarnfAt(pos, "calls param: %v derefs=%v", name, x)
   520  		warned = true
   521  	}
   522  
   523  	if !warned {
   524  		base.WarnfAt(pos, "%v does not escape, mutate, or call", name)
   525  	}
   526  }
   527  
   528  // rewriteWithLiterals attempts to replace certain non-constant expressions
   529  // within n with a literal if possible.
   530  func (b *batch) rewriteWithLiterals(n ir.Node, fn *ir.Func) {
   531  	if n == nil || fn == nil {
   532  		return
   533  	}
   534  	if n.Op() != ir.OMAKESLICE && n.Op() != ir.OCONVIFACE {
   535  		return
   536  	}
   537  	if base.Flag.Cfg.CoverageInfo != nil {
   538  		// Avoid altering coverage results.
   539  		return
   540  	}
   541  
   542  	// Look up a cached ReassignOracle for the function, lazily computing one if needed.
   543  	ro := b.reassignOracle(fn)
   544  	if ro == nil {
   545  		base.Fatalf("no ReassignOracle for function %v with closure parent %v", fn, fn.ClosureParent)
   546  	}
   547  
   548  	assignTemp := func(n ir.Node, init *ir.Nodes) {
   549  		// Preserve any side effects of n by assigning it to an otherwise unused temp.
   550  		pos := n.Pos()
   551  		tmp := typecheck.TempAt(pos, fn, n.Type())
   552  		init.Append(typecheck.Stmt(ir.NewDecl(pos, ir.ODCL, tmp)))
   553  		init.Append(typecheck.Stmt(ir.NewAssignStmt(pos, tmp, n)))
   554  	}
   555  
   556  	switch n.Op() {
   557  	case ir.OMAKESLICE:
   558  		// Check if we can replace a non-constant argument to make with
   559  		// a literal to allow for this slice to be stack allocated if otherwise allowed.
   560  		n := n.(*ir.MakeExpr)
   561  
   562  		r := &n.Cap
   563  		if n.Cap == nil {
   564  			r = &n.Len
   565  		}
   566  
   567  		if (*r).Op() != ir.OLITERAL {
   568  			if s := ro.StaticValue(*r); s.Op() == ir.OLITERAL {
   569  				lit, ok := s.(*ir.BasicLit)
   570  				if !ok || lit.Val().Kind() != constant.Int {
   571  					base.Fatalf("unexpected BasicLit Kind")
   572  				}
   573  				if constant.Compare(lit.Val(), token.GEQ, constant.MakeInt64(0)) {
   574  					// Preserve any side effects of the original expression, then replace it.
   575  					assignTemp(*r, n.PtrInit())
   576  					*r = lit
   577  				}
   578  			}
   579  		}
   580  	case ir.OCONVIFACE:
   581  		// Check if we can replace a non-constant expression in an interface conversion with
   582  		// a literal to avoid heap allocating the underlying interface value.
   583  		conv := n.(*ir.ConvExpr)
   584  		if conv.X.Op() != ir.OLITERAL && !conv.X.Type().IsInterface() {
   585  			v := ro.StaticValue(conv.X)
   586  			if v != nil && v.Op() == ir.OLITERAL && ir.ValidTypeForConst(conv.X.Type(), v.Val()) {
   587  				if base.Debug.EscapeDebug >= 3 {
   588  					base.WarnfAt(n.Pos(), "rewriting OCONVIFACE value from %v (%v) to %v (%v)", conv.X, conv.X.Type(), v, v.Type())
   589  				}
   590  				// Preserve any side effects of the original expression, then replace it.
   591  				assignTemp(conv.X, conv.PtrInit())
   592  				v := v.(*ir.BasicLit)
   593  				conv.X = ir.NewBasicLit(conv.X.Pos(), conv.X.Type(), v.Val())
   594  				typecheck.Expr(conv)
   595  			}
   596  		}
   597  	}
   598  }
   599  
   600  // reassignOracle returns an initialized *ir.ReassignOracle for fn.
   601  // If fn is a closure, it returns the ReassignOracle for the ultimate parent.
   602  //
   603  // A new ReassignOracle is initialized lazily if needed, and the result
   604  // is cached to reduce duplicative work of preparing a ReassignOracle.
   605  func (b *batch) reassignOracle(fn *ir.Func) *ir.ReassignOracle {
   606  	if ro, ok := b.reassignOracles[fn]; ok {
   607  		return ro // Hit.
   608  	}
   609  
   610  	// For closures, we want the ultimate parent's ReassignOracle,
   611  	// so walk up the parent chain, if any.
   612  	f := fn
   613  	for f.ClosureParent != nil && !f.ClosureParent.IsPackageInit() {
   614  		f = f.ClosureParent
   615  	}
   616  
   617  	if f != fn {
   618  		// We found a parent.
   619  		ro := b.reassignOracles[f]
   620  		if ro != nil {
   621  			// Hit, via a parent. Before returning, store this ro for the original fn as well.
   622  			b.reassignOracles[fn] = ro
   623  			return ro
   624  		}
   625  	}
   626  
   627  	// Miss. We did not find a ReassignOracle for fn or a parent, so lazily create one.
   628  	ro := &ir.ReassignOracle{}
   629  	ro.Init(f)
   630  
   631  	// Cache the answer for the original fn.
   632  	b.reassignOracles[fn] = ro
   633  	if f != fn {
   634  		// Cache for the parent as well.
   635  		b.reassignOracles[f] = ro
   636  	}
   637  	return ro
   638  }
   639  

View as plain text