Source file src/cmd/compile/internal/slice/slice.go

     1  // Copyright 2025 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package slice
     6  
     7  // This file implements a stack-allocation optimization
     8  // for the backing store of slices.
     9  //
    10  // Consider the code:
    11  //
    12  //     var s []int
    13  //     for i := range ... {
    14  //        s = append(s, i)
    15  //     }
    16  //     return s
    17  //
    18  // Some of the append operations will need to do an allocation
    19  // by calling growslice. This will happen on the 1st, 2nd, 4th,
    20  // 8th, etc. append calls. The allocations done by all but the
    21  // last growslice call will then immediately be garbage.
    22  //
    23  // We'd like to avoid doing some of those intermediate
    24  // allocations if possible.
    25  //
    26  // If we can determine that the "return s" statement is the
    27  // *only* way that the backing store for s escapes, then we
    28  // can rewrite the code to something like:
    29  //
    30  //     var s []int
    31  //     for i := range N {
    32  //        s = append(s, i)
    33  //     }
    34  //     s = move2heap(s)
    35  //     return s
    36  //
    37  // Using the move2heap runtime function, which does:
    38  //
    39  //     move2heap(s):
    40  //         If s is not backed by a stackframe-allocated
    41  //         backing store, return s. Otherwise, copy s
    42  //         to the heap and return the copy.
    43  //
    44  // Now we can treat the backing store of s allocated at the
    45  // append site as not escaping. Previous stack allocation
    46  // optimizations now apply, which can use a fixed-size
    47  // stack-allocated backing store for s when appending.
    48  // (See ../ssagen/ssa.go:(*state).append)
    49  //
    50  // It is tricky to do this optimization safely. To describe
    51  // our analysis, we first define what an "exclusive" slice
    52  // variable is.
    53  //
    54  // A slice variable (a variable of slice type) is called
    55  // "exclusive" if, when it has a reference to a
    56  // stackframe-allocated backing store, it is the only
    57  // variable with such a reference.
    58  //
    59  // In other words, a slice variable is exclusive if
    60  // any of the following holds:
    61  //  1) It points to a heap-allocated backing store
    62  //  2) It points to a stack-allocated backing store
    63  //     for any parent frame.
    64  //  3) It is the only variable that references its
    65  //     backing store.
    66  //  4) It is nil.
    67  //
    68  // The nice thing about exclusive slice variables is that
    69  // it is always safe to do
    70  //    s = move2heap(s)
    71  // whenever s is an exclusive slice variable. Because no
    72  // one else has a reference to the backing store, no one
    73  // else can tell that we moved the backing store from one
    74  // location to another.
    75  //
    76  // Note that exclusiveness is a dynamic property. A slice
    77  // variable may be exclusive during some parts of execution
    78  // and not exclusive during others.
    79  //
    80  // The following operations set or preserve the exclusivity
    81  // of a slice variable s:
    82  //     s = nil
    83  //     s = append(s, ...)
    84  //     s = s[i:j]
    85  //     ... = s[i]
    86  //     s[i] = ...
    87  //     f(s) where f does not escape its argument
    88  // Other operations destroy exclusivity. A non-exhaustive list includes:
    89  //     x = s
    90  //     *p = s
    91  //     f(s) where f escapes its argument
    92  //     return s
    93  // To err on the safe side, we white list exclusivity-preserving
    94  // operations and we asssume that any other operations that mention s
    95  // destroy its exclusivity.
    96  //
    97  // Our strategy is to move the backing store of s to the heap before
    98  // any exclusive->nonexclusive transition. That way, s will only ever
    99  // have a reference to a stack backing store while it is exclusive.
   100  //
   101  // move2heap for a variable s is implemented with:
   102  //     if s points to within the stack frame {
   103  //         s2 := make([]T, s.len, s.cap)
   104  //         copy(s2[:s.cap], s[:s.cap])
   105  //         s = s2
   106  //     }
   107  // Note that in general we need to copy all of s[:cap(s)] elements when
   108  // moving to the heap. As an optimization, we keep track of slice variables
   109  // whose capacity, and the elements in s[len(s):cap(s)], are never accessed.
   110  // For those slice variables, we can allocate to the next size class above
   111  // the length, which saves memory and copying cost.
   112  
   113  import (
   114  	"cmd/compile/internal/base"
   115  	"cmd/compile/internal/escape"
   116  	"cmd/compile/internal/ir"
   117  	"cmd/compile/internal/reflectdata"
   118  )
   119  
   120  func Funcs(all []*ir.Func) {
   121  	if base.Flag.N != 0 {
   122  		return
   123  	}
   124  	for _, fn := range all {
   125  		analyze(fn)
   126  	}
   127  }
   128  
   129  func analyze(fn *ir.Func) {
   130  	type sliceInfo struct {
   131  		// Slice variable.
   132  		s *ir.Name
   133  
   134  		// Count of uses that this pass understands.
   135  		okUses int32
   136  		// Count of all uses found.
   137  		allUses int32
   138  
   139  		// A place where the slice variable transitions from
   140  		// exclusive to nonexclusive.
   141  		// We could keep track of more than one, but one is enough for now.
   142  		// Currently, this can be either a return statement or
   143  		// an assignment.
   144  		// TODO: other possible transitions?
   145  		transition ir.Stmt
   146  
   147  		// Each s = append(s, ...) instance we found.
   148  		appends []*ir.CallExpr
   149  
   150  		// Weight of the number of s = append(s, ...) instances we found.
   151  		// The optimizations we do are only really useful if there are at
   152  		// least weight 2. (Note: appends in loops have weight >= 2.)
   153  		appendWeight int
   154  
   155  		// Loop depth at declaration point.
   156  		// Use for heuristics only, it is not guaranteed to be correct
   157  		// in the presence of gotos.
   158  		declDepth int
   159  
   160  		// Whether we ever do cap(s), or other operations that use cap(s)
   161  		// (possibly implicitly), like s[i:j].
   162  		capUsed bool
   163  	}
   164  
   165  	// Every variable (*ir.Name) that we are tracking will have
   166  	// a non-nil *sliceInfo in its Opt field.
   167  	haveLocalSlice := false
   168  	maxStackSize := int64(base.Debug.VariableMakeThreshold)
   169  	var namedRets []*ir.Name
   170  	for _, s := range fn.Dcl {
   171  		if !s.Type().IsSlice() {
   172  			continue
   173  		}
   174  		if s.Type().Elem().Size() > maxStackSize {
   175  			continue
   176  		}
   177  		if !base.VariableMakeHash.MatchPos(s.Pos(), nil) {
   178  			continue
   179  		}
   180  		s.Opt = &sliceInfo{s: s} // start tracking s
   181  		haveLocalSlice = true
   182  		if s.Class == ir.PPARAMOUT {
   183  			namedRets = append(namedRets, s)
   184  		}
   185  	}
   186  	if !haveLocalSlice {
   187  		return
   188  	}
   189  
   190  	// Keep track of loop depth while walking.
   191  	loopDepth := 0
   192  
   193  	// tracking returns the info for the slice variable if n is a slice
   194  	// variable that we're still considering, or nil otherwise.
   195  	tracking := func(n ir.Node) *sliceInfo {
   196  		if n == nil || n.Op() != ir.ONAME {
   197  			return nil
   198  		}
   199  		s := n.(*ir.Name)
   200  		if s.Opt == nil {
   201  			return nil
   202  		}
   203  		return s.Opt.(*sliceInfo)
   204  	}
   205  
   206  	// addTransition(n, loc) records that s experiences an exclusive->nonexclusive
   207  	// transition somewhere within loc.
   208  	addTransition := func(i *sliceInfo, loc ir.Stmt) {
   209  		if i.transition != nil {
   210  			// We only keep track of a single exclusive->nonexclusive transition
   211  			// for a slice variable. If we find more than one, give up.
   212  			// (More than one transition location would be fine, but we would
   213  			// start to get worried about introducing too much additional code.)
   214  			i.s.Opt = nil
   215  			return
   216  		}
   217  		if loopDepth > i.declDepth {
   218  			// Conservatively, we disable this optimization when the
   219  			// transition is inside a loop. This can result in adding
   220  			// overhead unnecessarily in cases like:
   221  			// func f(n int, p *[]byte) {
   222  			//     var s []byte
   223  			//     for i := range n {
   224  			//         *p = s
   225  			//         s = append(s, 0)
   226  			//     }
   227  			// }
   228  			i.s.Opt = nil
   229  			return
   230  		}
   231  		i.transition = loc
   232  	}
   233  
   234  	// Examine an x = y assignment that occurs somewhere within statement stmt.
   235  	assign := func(x, y ir.Node, stmt ir.Stmt) {
   236  		if i := tracking(x); i != nil {
   237  			// s = y. Check for understood patterns for y.
   238  			if y == nil || y.Op() == ir.ONIL {
   239  				// s = nil is ok.
   240  				i.okUses++
   241  			} else if y.Op() == ir.OSLICELIT {
   242  				// s = []{...} is ok.
   243  				// Note: this reveals capacity. Should it?
   244  				i.okUses++
   245  				i.capUsed = true
   246  			} else if y.Op() == ir.OSLICE {
   247  				y := y.(*ir.SliceExpr)
   248  				if y.X == i.s {
   249  					// s = s[...:...] is ok
   250  					i.okUses += 2
   251  					i.capUsed = true
   252  				}
   253  			} else if y.Op() == ir.OAPPEND {
   254  				y := y.(*ir.CallExpr)
   255  				if y.Args[0] == i.s {
   256  					// s = append(s, ...) is ok
   257  					i.okUses += 2
   258  					i.appends = append(i.appends, y)
   259  					i.appendWeight += 1 + (loopDepth - i.declDepth)
   260  				}
   261  				// TODO: s = append(nil, ...)?
   262  			}
   263  			// Note that technically s = make([]T, ...) preserves exclusivity, but
   264  			// we don't track that because we assume users who wrote that know
   265  			// better than the compiler does.
   266  
   267  			// TODO: figure out how to handle s = fn(..., s, ...)
   268  			// It would be nice to maintain exclusivity of s in this situation.
   269  			// But unfortunately, fn can return one of its other arguments, which
   270  			// may be a slice with a stack-allocated backing store other than s.
   271  			// (which may have preexisting references to its backing store).
   272  			//
   273  			// Maybe we could do it if s is the only argument?
   274  		}
   275  
   276  		if i := tracking(y); i != nil {
   277  			// ... = s
   278  			// Treat this as an exclusive->nonexclusive transition.
   279  			i.okUses++
   280  			addTransition(i, stmt)
   281  		}
   282  	}
   283  
   284  	var do func(ir.Node) bool
   285  	do = func(n ir.Node) bool {
   286  		if n == nil {
   287  			return false
   288  		}
   289  		switch n.Op() {
   290  		case ir.ONAME:
   291  			if i := tracking(n); i != nil {
   292  				// A use of a slice variable. Count it.
   293  				i.allUses++
   294  			}
   295  		case ir.ODCL:
   296  			n := n.(*ir.Decl)
   297  			if i := tracking(n.X); i != nil {
   298  				i.okUses++
   299  				i.declDepth = loopDepth
   300  			}
   301  		case ir.OINDEX:
   302  			n := n.(*ir.IndexExpr)
   303  			if i := tracking(n.X); i != nil {
   304  				// s[i] is ok.
   305  				i.okUses++
   306  			}
   307  		case ir.OLEN:
   308  			n := n.(*ir.UnaryExpr)
   309  			if i := tracking(n.X); i != nil {
   310  				// len(s) is ok
   311  				i.okUses++
   312  			}
   313  		case ir.OCAP:
   314  			n := n.(*ir.UnaryExpr)
   315  			if i := tracking(n.X); i != nil {
   316  				// cap(s) is ok
   317  				i.okUses++
   318  				i.capUsed = true
   319  			}
   320  		case ir.OADDR:
   321  			n := n.(*ir.AddrExpr)
   322  			if n.X.Op() == ir.OINDEX {
   323  				n := n.X.(*ir.IndexExpr)
   324  				if i := tracking(n.X); i != nil {
   325  					// &s[i] is definitely a nonexclusive transition.
   326  					// (We need this case because s[i] is ok, but &s[i] is not.)
   327  					i.s.Opt = nil
   328  				}
   329  			}
   330  		case ir.ORETURN:
   331  			n := n.(*ir.ReturnStmt)
   332  			for _, x := range n.Results {
   333  				if i := tracking(x); i != nil {
   334  					i.okUses++
   335  					// We go exclusive->nonexclusive here
   336  					addTransition(i, n)
   337  				}
   338  			}
   339  			if len(n.Results) == 0 {
   340  				// Uses of named result variables are implicit here.
   341  				for _, x := range namedRets {
   342  					if i := tracking(x); i != nil {
   343  						addTransition(i, n)
   344  					}
   345  				}
   346  			}
   347  		case ir.OCALLFUNC:
   348  			n := n.(*ir.CallExpr)
   349  			for idx, arg := range n.Args {
   350  				if i := tracking(arg); i != nil {
   351  					if !argLeak(n, idx) {
   352  						// Passing s to a nonescaping arg is ok.
   353  						i.okUses++
   354  						i.capUsed = true
   355  					}
   356  				}
   357  			}
   358  		case ir.ORANGE:
   359  			// Range over slice is ok.
   360  			n := n.(*ir.RangeStmt)
   361  			if i := tracking(n.X); i != nil {
   362  				i.okUses++
   363  			}
   364  		case ir.OAS:
   365  			n := n.(*ir.AssignStmt)
   366  			assign(n.X, n.Y, n)
   367  		case ir.OAS2:
   368  			n := n.(*ir.AssignListStmt)
   369  			for i := range len(n.Lhs) {
   370  				assign(n.Lhs[i], n.Rhs[i], n)
   371  			}
   372  		case ir.OCLOSURE:
   373  			n := n.(*ir.ClosureExpr)
   374  			for _, v := range n.Func.ClosureVars {
   375  				do(v.Outer)
   376  			}
   377  		}
   378  		if n.Op() == ir.OFOR || n.Op() == ir.ORANGE {
   379  			// Note: loopDepth isn't really right for init portion
   380  			// of the for statement, but that's ok. Correctness
   381  			// does not depend on depth info.
   382  			loopDepth++
   383  			defer func() { loopDepth-- }()
   384  		}
   385  		// Check all the children.
   386  		ir.DoChildren(n, do)
   387  		return false
   388  	}
   389  
   390  	// Run the analysis over the whole body.
   391  	for _, stmt := range fn.Body {
   392  		do(stmt)
   393  	}
   394  
   395  	// Process accumulated info to find slice variables
   396  	// that we can allocate on the stack.
   397  	for _, s := range fn.Dcl {
   398  		if s.Opt == nil {
   399  			continue
   400  		}
   401  		i := s.Opt.(*sliceInfo)
   402  		s.Opt = nil
   403  		if i.okUses != i.allUses {
   404  			// Some use of i.s that don't understand lurks. Give up.
   405  			continue
   406  		}
   407  
   408  		// At this point, we've decided that we *can* do
   409  		// the optimization.
   410  
   411  		if i.transition == nil {
   412  			// Exclusive for its whole lifetime. That means it
   413  			// didn't escape. We can already handle nonescaping
   414  			// slices without this pass.
   415  			continue
   416  		}
   417  		if i.appendWeight < 2 {
   418  			// This optimization only really helps if there is
   419  			// (dynamically) more than one append.
   420  			continue
   421  		}
   422  
   423  		// Commit point - at this point we've decided we *should*
   424  		// do the optimization.
   425  
   426  		// Insert a move2heap operation before the exclusive->nonexclusive
   427  		// transition.
   428  		move := ir.NewMoveToHeapExpr(i.transition.Pos(), i.s)
   429  		if i.capUsed {
   430  			move.PreserveCapacity = true
   431  		}
   432  		move.RType = reflectdata.AppendElemRType(i.transition.Pos(), i.appends[0])
   433  		move.SetType(i.s.Type())
   434  		move.SetTypecheck(1)
   435  		as := ir.NewAssignStmt(i.transition.Pos(), i.s, move)
   436  		as.SetTypecheck(1)
   437  		i.transition.PtrInit().Prepend(as)
   438  		// Note: we prepend because we need to put the move2heap
   439  		// operation first, before any other init work, as the transition
   440  		// might occur in the init work.
   441  
   442  		// Now that we've inserted a move2heap operation before every
   443  		// exclusive -> nonexclusive transition, appends can now use
   444  		// stack backing stores.
   445  		// (This is the whole point of this pass, to enable stack
   446  		// allocation of append backing stores.)
   447  		for _, a := range i.appends {
   448  			a.SetEsc(ir.EscNone)
   449  			if i.capUsed {
   450  				a.UseBuf = true
   451  			}
   452  		}
   453  	}
   454  }
   455  
   456  // argLeak reports if the idx'th argument to the call n escapes anywhere
   457  // (to the heap, another argument, return value, etc.)
   458  // If unknown returns true.
   459  func argLeak(n *ir.CallExpr, idx int) bool {
   460  	if n.Op() != ir.OCALLFUNC {
   461  		return true
   462  	}
   463  	fn := ir.StaticCalleeName(ir.StaticValue(n.Fun))
   464  	if fn == nil {
   465  		return true
   466  	}
   467  	fntype := fn.Type()
   468  	if recv := fntype.Recv(); recv != nil {
   469  		if idx == 0 {
   470  			return escape.ParseLeaks(recv.Note).Any()
   471  		}
   472  		idx--
   473  	}
   474  	return escape.ParseLeaks(fntype.Params()[idx].Note).Any()
   475  }
   476  

View as plain text