Source file src/cmd/compile/internal/walk/range.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package walk
     6  
     7  import (
     8  	"go/constant"
     9  	"internal/buildcfg"
    10  	"unicode/utf8"
    11  
    12  	"cmd/compile/internal/base"
    13  	"cmd/compile/internal/ir"
    14  	"cmd/compile/internal/reflectdata"
    15  	"cmd/compile/internal/ssagen"
    16  	"cmd/compile/internal/typecheck"
    17  	"cmd/compile/internal/types"
    18  	"cmd/internal/src"
    19  	"cmd/internal/sys"
    20  )
    21  
    22  func cheapComputableIndex(width int64) bool {
    23  	switch ssagen.Arch.LinkArch.Family {
    24  	// MIPS does not have R+R addressing
    25  	// Arm64 may lack ability to generate this code in our assembler,
    26  	// but the architecture supports it.
    27  	case sys.PPC64, sys.S390X:
    28  		return width == 1
    29  	case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
    30  		switch width {
    31  		case 1, 2, 4, 8:
    32  			return true
    33  		}
    34  	}
    35  	return false
    36  }
    37  
    38  // walkRange transforms various forms of ORANGE into
    39  // simpler forms.  The result must be assigned back to n.
    40  // Node n may also be modified in place, and may also be
    41  // the returned node.
    42  func walkRange(nrange *ir.RangeStmt) ir.Node {
    43  	base.Assert(!nrange.DistinctVars) // Should all be rewritten before escape analysis
    44  	if isMapClear(nrange) {
    45  		return mapRangeClear(nrange)
    46  	}
    47  
    48  	nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil, nrange.DistinctVars)
    49  	nfor.SetInit(nrange.Init())
    50  	nfor.Label = nrange.Label
    51  
    52  	// variable name conventions:
    53  	//	ohv1, hv1, hv2: hidden (old) val 1, 2
    54  	//	ha, hit: hidden aggregate, iterator
    55  	//	hn, hp: hidden len, pointer
    56  	//	hb: hidden bool
    57  	//	a, v1, v2: not hidden aggregate, val 1, 2
    58  
    59  	a := nrange.X
    60  	t := a.Type()
    61  	lno := ir.SetPos(a)
    62  
    63  	v1, v2 := nrange.Key, nrange.Value
    64  
    65  	if ir.IsBlank(v2) {
    66  		v2 = nil
    67  	}
    68  
    69  	if ir.IsBlank(v1) && v2 == nil {
    70  		v1 = nil
    71  	}
    72  
    73  	if v1 == nil && v2 != nil {
    74  		base.Fatalf("walkRange: v2 != nil while v1 == nil")
    75  	}
    76  
    77  	var body []ir.Node
    78  	var init []ir.Node
    79  	switch k := t.Kind(); {
    80  	default:
    81  		base.Fatalf("walkRange")
    82  
    83  	case types.IsInt[k]:
    84  		if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
    85  			base.Pos = lno
    86  			return nn
    87  		}
    88  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t)
    89  		hn := typecheck.TempAt(base.Pos, ir.CurFunc, t)
    90  
    91  		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
    92  		init = append(init, ir.NewAssignStmt(base.Pos, hn, a))
    93  
    94  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
    95  		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
    96  
    97  		if v1 != nil {
    98  			body = []ir.Node{rangeAssign(nrange, hv1)}
    99  		}
   100  
   101  	case k == types.TARRAY, k == types.TSLICE, k == types.TPTR: // TPTR is pointer-to-array
   102  		if nn := arrayRangeClear(nrange, v1, v2, a); nn != nil {
   103  			base.Pos = lno
   104  			return nn
   105  		}
   106  
   107  		// Element type of the iteration
   108  		var elem *types.Type
   109  		switch t.Kind() {
   110  		case types.TSLICE, types.TARRAY:
   111  			elem = t.Elem()
   112  		case types.TPTR:
   113  			elem = t.Elem().Elem()
   114  		}
   115  
   116  		// order.stmt arranged for a copy of the array/slice variable if needed.
   117  		ha := a
   118  
   119  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   120  		hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   121  
   122  		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
   123  		init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
   124  
   125  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
   126  		nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))
   127  
   128  		// for range ha { body }
   129  		if v1 == nil {
   130  			break
   131  		}
   132  
   133  		// for v1 := range ha { body }
   134  		if v2 == nil {
   135  			body = []ir.Node{rangeAssign(nrange, hv1)}
   136  			break
   137  		}
   138  
   139  		// for v1, v2 := range ha { body }
   140  		if cheapComputableIndex(elem.Size()) {
   141  			// v1, v2 = hv1, ha[hv1]
   142  			tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
   143  			tmp.SetBounded(true)
   144  			body = []ir.Node{rangeAssign2(nrange, hv1, tmp)}
   145  			break
   146  		}
   147  
   148  		// Slice to iterate over
   149  		var hs ir.Node
   150  		if t.IsSlice() {
   151  			hs = ha
   152  		} else {
   153  			var arr ir.Node
   154  			if t.IsPtr() {
   155  				arr = ha
   156  			} else {
   157  				arr = typecheck.NodAddr(ha)
   158  				arr.SetType(t.PtrTo())
   159  				arr.SetTypecheck(1)
   160  			}
   161  			hs = ir.NewSliceExpr(base.Pos, ir.OSLICEARR, arr, nil, nil, nil)
   162  			// old typechecker doesn't know OSLICEARR, so we set types explicitly
   163  			hs.SetType(types.NewSlice(elem))
   164  			hs.SetTypecheck(1)
   165  		}
   166  
   167  		// We use a "pointer" to keep track of where we are in the backing array
   168  		// of the slice hs. This pointer starts at hs.ptr and gets incremented
   169  		// by the element size each time through the loop.
   170  		//
   171  		// It's tricky, though, as on the last iteration this pointer gets
   172  		// incremented to point past the end of the backing array. We can't
   173  		// let the garbage collector see that final out-of-bounds pointer.
   174  		//
   175  		// To avoid this, we keep the "pointer" alternately in 2 variables, one
   176  		// pointer typed and one uintptr typed. Most of the time it lives in the
   177  		// regular pointer variable, but when it might be out of bounds (after it
   178  		// has been incremented, but before the loop condition has been checked)
   179  		// it lives briefly in the uintptr variable.
   180  		//
   181  		// hp contains the pointer version (of type *T, where T is the element type).
   182  		// It is guaranteed to always be in range, keeps the backing store alive,
   183  		// and is updated on stack copies. If a GC occurs when this function is
   184  		// suspended at any safepoint, this variable ensures correct operation.
   185  		//
   186  		// hu contains the equivalent uintptr version. It may point past the
   187  		// end, but doesn't keep the backing store alive and doesn't get updated
   188  		// on a stack copy. If a GC occurs while this function is on the top of
   189  		// the stack, then the last frame is scanned conservatively and hu will
   190  		// act as a reference to the backing array to ensure it is not collected.
   191  		//
   192  		// The "pointer" we're moving across the backing array lives in one
   193  		// or the other of hp and hu as the loop proceeds.
   194  		//
   195  		// hp is live during most of the body of the loop. But it isn't live
   196  		// at the very top of the loop, when we haven't checked i<n yet, and
   197  		// it could point off the end of the backing store.
   198  		// hu is live only at the very top and very bottom of the loop.
   199  		// In particular, only when it cannot possibly be live across a call.
   200  		//
   201  		// So we do
   202  		//   hu = uintptr(unsafe.Pointer(hs.ptr))
   203  		//   for i := 0; i < hs.len; i++ {
   204  		//     hp = (*T)(unsafe.Pointer(hu))
   205  		//     v1, v2 = i, *hp
   206  		//     ... body of loop ...
   207  		//     hu = uintptr(unsafe.Pointer(hp)) + elemsize
   208  		//   }
   209  		//
   210  		// Between the assignments to hu and the assignment back to hp, there
   211  		// must not be any calls.
   212  
   213  		// Pointer to current iteration position. Start on entry to the loop
   214  		// with the pointer in hu.
   215  		ptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, hs)
   216  		ptr.SetBounded(true)
   217  		huVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], ptr)
   218  		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
   219  		hu := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
   220  		init = append(init, ir.NewAssignStmt(base.Pos, hu, huVal))
   221  
   222  		// Convert hu to hp at the top of the loop (after the condition has been checked).
   223  		hpVal := ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hu)
   224  		hpVal.SetCheckPtr(true) // disable checkptr on this conversion
   225  		hpVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, elem.PtrTo(), hpVal)
   226  		hp := typecheck.TempAt(base.Pos, ir.CurFunc, elem.PtrTo())
   227  		body = append(body, ir.NewAssignStmt(base.Pos, hp, hpVal))
   228  
   229  		// Assign variables on the LHS of the range statement. Use *hp to get the element.
   230  		e := ir.NewStarExpr(base.Pos, hp)
   231  		e.SetBounded(true)
   232  		a := rangeAssign2(nrange, hv1, e)
   233  		body = append(body, a)
   234  
   235  		// Advance pointer for next iteration of the loop.
   236  		// This reads from hp and writes to hu.
   237  		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], hp)
   238  		huVal = ir.NewConvExpr(base.Pos, ir.OCONVNOP, types.Types[types.TUINTPTR], huVal)
   239  		as := ir.NewAssignStmt(base.Pos, hu, ir.NewBinaryExpr(base.Pos, ir.OADD, huVal, ir.NewInt(base.Pos, elem.Size())))
   240  		nfor.Post = ir.NewBlockStmt(base.Pos, []ir.Node{nfor.Post, as})
   241  
   242  	case k == types.TMAP:
   243  		// order.stmt allocated the iterator for us.
   244  		// we only use a once, so no copy needed.
   245  		ha := a
   246  
   247  		hit := nrange.Prealloc
   248  		th := hit.Type()
   249  		// depends on layout of iterator struct.
   250  		// See cmd/compile/internal/reflectdata/reflect.go:MapIterType
   251  		var keysym, elemsym *types.Sym
   252  		var iterInit, iterNext string
   253  		if buildcfg.Experiment.SwissMap {
   254  			keysym = th.Field(0).Sym
   255  			elemsym = th.Field(1).Sym // ditto
   256  			iterInit = "mapIterStart"
   257  			iterNext = "mapIterNext"
   258  		} else {
   259  			keysym = th.Field(0).Sym
   260  			elemsym = th.Field(1).Sym // ditto
   261  			iterInit = "mapiterinit"
   262  			iterNext = "mapiternext"
   263  		}
   264  
   265  		fn := typecheck.LookupRuntime(iterInit, t.Key(), t.Elem(), th)
   266  		init = append(init, mkcallstmt1(fn, reflectdata.RangeMapRType(base.Pos, nrange), ha, typecheck.NodAddr(hit)))
   267  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
   268  
   269  		fn = typecheck.LookupRuntime(iterNext, th)
   270  		nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
   271  
   272  		key := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), types.NewPtr(t.Key())))
   273  		if v1 == nil {
   274  			body = nil
   275  		} else if v2 == nil {
   276  			body = []ir.Node{rangeAssign(nrange, key)}
   277  		} else {
   278  			elem := ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym), types.NewPtr(t.Elem())))
   279  			body = []ir.Node{rangeAssign2(nrange, key, elem)}
   280  		}
   281  
   282  	case k == types.TCHAN:
   283  		// order.stmt arranged for a copy of the channel variable.
   284  		ha := a
   285  
   286  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, t.Elem())
   287  		hv1.SetTypecheck(1)
   288  		if t.Elem().HasPointers() {
   289  			init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
   290  		}
   291  		hb := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
   292  
   293  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(base.Pos, false))
   294  		lhs := []ir.Node{hv1, hb}
   295  		rhs := []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
   296  		a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, lhs, rhs)
   297  		a.SetTypecheck(1)
   298  		nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
   299  		if v1 == nil {
   300  			body = nil
   301  		} else {
   302  			body = []ir.Node{rangeAssign(nrange, hv1)}
   303  		}
   304  		// Zero hv1. This prevents hv1 from being the sole, inaccessible
   305  		// reference to an otherwise GC-able value during the next channel receive.
   306  		// See issue 15281.
   307  		body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
   308  
   309  	case k == types.TSTRING:
   310  		// Transform string range statements like "for v1, v2 = range a" into
   311  		//
   312  		// ha := a
   313  		// for hv1 := 0; hv1 < len(ha); {
   314  		//   hv1t := hv1
   315  		//   hv2 := rune(ha[hv1])
   316  		//   if hv2 < utf8.RuneSelf {
   317  		//      hv1++
   318  		//   } else {
   319  		//      hv2, hv1 = decoderune(ha, hv1)
   320  		//   }
   321  		//   v1, v2 = hv1t, hv2
   322  		//   // original body
   323  		// }
   324  
   325  		// order.stmt arranged for a copy of the string variable.
   326  		ha := a
   327  
   328  		hv1 := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   329  		hv1t := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   330  		hv2 := typecheck.TempAt(base.Pos, ir.CurFunc, types.RuneType)
   331  
   332  		// hv1 := 0
   333  		init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
   334  
   335  		// hv1 < len(ha)
   336  		nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
   337  
   338  		if v1 != nil {
   339  			// hv1t = hv1
   340  			body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
   341  		}
   342  
   343  		// hv2 := rune(ha[hv1])
   344  		nind := ir.NewIndexExpr(base.Pos, ha, hv1)
   345  		nind.SetBounded(true)
   346  		body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
   347  
   348  		// if hv2 < utf8.RuneSelf
   349  		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
   350  
   351  		// On x86, hv2 <= 127 is shorter to encode than hv2 < 128
   352  		// Doesn't hurt other archs.
   353  		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, hv2, ir.NewInt(base.Pos, utf8.RuneSelf-1))
   354  
   355  		// hv1++
   356  		nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(base.Pos, 1)))}
   357  
   358  		// } else {
   359  		// hv2, hv1 = decoderune(ha, hv1)
   360  		fn := typecheck.LookupRuntime("decoderune")
   361  		call := mkcall1(fn, fn.Type().ResultsTuple(), &nif.Else, ha, hv1)
   362  		a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call})
   363  		nif.Else.Append(a)
   364  
   365  		body = append(body, nif)
   366  
   367  		if v1 != nil {
   368  			if v2 != nil {
   369  				// v1, v2 = hv1t, hv2
   370  				body = append(body, rangeAssign2(nrange, hv1t, hv2))
   371  			} else {
   372  				// v1 = hv1t
   373  				body = append(body, rangeAssign(nrange, hv1t))
   374  			}
   375  		}
   376  	}
   377  
   378  	typecheck.Stmts(init)
   379  
   380  	nfor.PtrInit().Append(init...)
   381  
   382  	typecheck.Stmts(nfor.Cond.Init())
   383  
   384  	nfor.Cond = typecheck.Expr(nfor.Cond)
   385  	nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
   386  	nfor.Post = typecheck.Stmt(nfor.Post)
   387  	typecheck.Stmts(body)
   388  	nfor.Body.Append(body...)
   389  	nfor.Body.Append(nrange.Body...)
   390  
   391  	var n ir.Node = nfor
   392  
   393  	n = walkStmt(n)
   394  
   395  	base.Pos = lno
   396  	return n
   397  }
   398  
   399  // rangeAssign returns "n.Key = key".
   400  func rangeAssign(n *ir.RangeStmt, key ir.Node) ir.Node {
   401  	key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
   402  	return ir.NewAssignStmt(n.Pos(), n.Key, key)
   403  }
   404  
   405  // rangeAssign2 returns "n.Key, n.Value = key, value".
   406  func rangeAssign2(n *ir.RangeStmt, key, value ir.Node) ir.Node {
   407  	// Use OAS2 to correctly handle assignments
   408  	// of the form "v1, a[v1] = range".
   409  	key = rangeConvert(n, n.Key.Type(), key, n.KeyTypeWord, n.KeySrcRType)
   410  	value = rangeConvert(n, n.Value.Type(), value, n.ValueTypeWord, n.ValueSrcRType)
   411  	return ir.NewAssignListStmt(n.Pos(), ir.OAS2, []ir.Node{n.Key, n.Value}, []ir.Node{key, value})
   412  }
   413  
   414  // rangeConvert returns src, converted to dst if necessary. If a
   415  // conversion is necessary, then typeWord and srcRType are copied to
   416  // their respective ConvExpr fields.
   417  func rangeConvert(nrange *ir.RangeStmt, dst *types.Type, src, typeWord, srcRType ir.Node) ir.Node {
   418  	src = typecheck.Expr(src)
   419  	if dst.Kind() == types.TBLANK || types.Identical(dst, src.Type()) {
   420  		return src
   421  	}
   422  
   423  	n := ir.NewConvExpr(nrange.Pos(), ir.OCONV, dst, src)
   424  	n.TypeWord = typeWord
   425  	n.SrcRType = srcRType
   426  	return typecheck.Expr(n)
   427  }
   428  
   429  // isMapClear checks if n is of the form:
   430  //
   431  //	for k := range m {
   432  //		delete(m, k)
   433  //	}
   434  //
   435  // where == for keys of map m is reflexive.
   436  func isMapClear(n *ir.RangeStmt) bool {
   437  	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
   438  		return false
   439  	}
   440  
   441  	t := n.X.Type()
   442  	if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
   443  		return false
   444  	}
   445  
   446  	k := n.Key
   447  	// Require k to be a new variable name.
   448  	if !ir.DeclaredBy(k, n) {
   449  		return false
   450  	}
   451  
   452  	if len(n.Body) != 1 {
   453  		return false
   454  	}
   455  
   456  	stmt := n.Body[0] // only stmt in body
   457  	if stmt == nil || stmt.Op() != ir.ODELETE {
   458  		return false
   459  	}
   460  
   461  	m := n.X
   462  	if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
   463  		return false
   464  	}
   465  
   466  	// Keys where equality is not reflexive can not be deleted from maps.
   467  	if !types.IsReflexive(t.Key()) {
   468  		return false
   469  	}
   470  
   471  	return true
   472  }
   473  
   474  // mapRangeClear constructs a call to runtime.mapclear for the map range idiom.
   475  func mapRangeClear(nrange *ir.RangeStmt) ir.Node {
   476  	m := nrange.X
   477  	origPos := ir.SetPos(m)
   478  	defer func() { base.Pos = origPos }()
   479  
   480  	return mapClear(m, reflectdata.RangeMapRType(base.Pos, nrange))
   481  }
   482  
   483  // mapClear constructs a call to runtime.mapclear for the map m.
   484  func mapClear(m, rtyp ir.Node) ir.Node {
   485  	t := m.Type()
   486  
   487  	// instantiate mapclear(typ *type, hmap map[any]any)
   488  	fn := typecheck.LookupRuntime("mapclear", t.Key(), t.Elem())
   489  	n := mkcallstmt1(fn, rtyp, m)
   490  	return walkStmt(typecheck.Stmt(n))
   491  }
   492  
   493  // Lower n into runtime·memclr if possible, for
   494  // fast zeroing of slices and arrays (issue 5373).
   495  // Look for instances of
   496  //
   497  //	for i := range a {
   498  //		a[i] = zero
   499  //	}
   500  //
   501  // in which the evaluation of a is side-effect-free.
   502  //
   503  // Parameters are as in walkRange: "for v1, v2 = range a".
   504  func arrayRangeClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
   505  	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
   506  		return nil
   507  	}
   508  
   509  	if v1 == nil || v2 != nil {
   510  		return nil
   511  	}
   512  
   513  	if len(loop.Body) != 1 || loop.Body[0] == nil {
   514  		return nil
   515  	}
   516  
   517  	stmt1 := loop.Body[0] // only stmt in body
   518  	if stmt1.Op() != ir.OAS {
   519  		return nil
   520  	}
   521  	stmt := stmt1.(*ir.AssignStmt)
   522  	if stmt.X.Op() != ir.OINDEX {
   523  		return nil
   524  	}
   525  	lhs := stmt.X.(*ir.IndexExpr)
   526  	x := lhs.X
   527  
   528  	// Get constant number of iterations for int and array cases.
   529  	n := int64(-1)
   530  	if ir.IsConst(a, constant.Int) {
   531  		n = ir.Int64Val(a)
   532  	} else if a.Type().IsArray() {
   533  		n = a.Type().NumElem()
   534  	} else if a.Type().IsPtr() && a.Type().Elem().IsArray() {
   535  		n = a.Type().Elem().NumElem()
   536  	}
   537  
   538  	if n >= 0 {
   539  		// Int/Array case.
   540  		if !x.Type().IsArray() {
   541  			return nil
   542  		}
   543  		if x.Type().NumElem() != n {
   544  			return nil
   545  		}
   546  	} else {
   547  		// Slice case.
   548  		if !ir.SameSafeExpr(x, a) {
   549  			return nil
   550  		}
   551  	}
   552  
   553  	if !ir.SameSafeExpr(lhs.Index, v1) {
   554  		return nil
   555  	}
   556  
   557  	if !ir.IsZero(stmt.Y) {
   558  		return nil
   559  	}
   560  
   561  	return arrayClear(stmt.Pos(), x, loop)
   562  }
   563  
   564  // arrayClear constructs a call to runtime.memclr for fast zeroing of slices and arrays.
   565  func arrayClear(wbPos src.XPos, a ir.Node, nrange *ir.RangeStmt) ir.Node {
   566  	elemsize := typecheck.RangeExprType(a.Type()).Elem().Size()
   567  	if elemsize <= 0 {
   568  		return nil
   569  	}
   570  
   571  	// Convert to
   572  	// if len(a) != 0 {
   573  	// 	hp = &a[0]
   574  	// 	hn = len(a)*sizeof(elem(a))
   575  	// 	memclr{NoHeap,Has}Pointers(hp, hn)
   576  	// 	i = len(a) - 1
   577  	// }
   578  	n := ir.NewIfStmt(base.Pos, nil, nil, nil)
   579  	n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 0))
   580  
   581  	// hp = &a[0]
   582  	hp := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUNSAFEPTR])
   583  
   584  	ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(base.Pos, 0))
   585  	ix.SetBounded(true)
   586  	addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
   587  	n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
   588  
   589  	// hn = len(a) * sizeof(elem(a))
   590  	hn := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
   591  	mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, elemsize)), types.Types[types.TUINTPTR])
   592  	n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
   593  
   594  	var fn ir.Node
   595  	if a.Type().Elem().HasPointers() {
   596  		// memclrHasPointers(hp, hn)
   597  		ir.CurFunc.SetWBPos(wbPos)
   598  		fn = mkcallstmt("memclrHasPointers", hp, hn)
   599  	} else {
   600  		// memclrNoHeapPointers(hp, hn)
   601  		fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
   602  	}
   603  
   604  	n.Body.Append(fn)
   605  
   606  	// For array range clear, also set "i = len(a) - 1"
   607  	if nrange != nil {
   608  		idx := ir.NewAssignStmt(base.Pos, nrange.Key, typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(base.Pos, 1)), nrange.Key.Type()))
   609  		n.Body.Append(idx)
   610  	}
   611  
   612  	n.Cond = typecheck.Expr(n.Cond)
   613  	n.Cond = typecheck.DefaultLit(n.Cond, nil)
   614  	typecheck.Stmts(n.Body)
   615  	return walkStmt(n)
   616  }
   617  

View as plain text