Source file src/cmd/compile/internal/walk/builtin.go

     1  // Copyright 2009 The Go Authors. All rights reserved.walk/bui
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package walk
     6  
     7  import (
     8  	"fmt"
     9  	"go/constant"
    10  	"go/token"
    11  	"internal/abi"
    12  	"internal/buildcfg"
    13  	"strings"
    14  
    15  	"cmd/compile/internal/base"
    16  	"cmd/compile/internal/escape"
    17  	"cmd/compile/internal/ir"
    18  	"cmd/compile/internal/reflectdata"
    19  	"cmd/compile/internal/typecheck"
    20  	"cmd/compile/internal/types"
    21  )
    22  
    23  // Rewrite append(src, x, y, z) so that any side effects in
    24  // x, y, z (including runtime panics) are evaluated in
    25  // initialization statements before the append.
    26  // For normal code generation, stop there and leave the
    27  // rest to ssagen.
    28  //
    29  // For race detector, expand append(src, a [, b]* ) to
    30  //
    31  //	init {
    32  //	  s := src
    33  //	  const argc = len(args) - 1
    34  //	  newLen := s.len + argc
    35  //	  if uint(newLen) <= uint(s.cap) {
    36  //	    s = s[:newLen]
    37  //	  } else {
    38  //	    s = growslice(s.ptr, newLen, s.cap, argc, elemType)
    39  //	  }
    40  //	  s[s.len - argc] = a
    41  //	  s[s.len - argc + 1] = b
    42  //	  ...
    43  //	}
    44  //	s
    45  func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
    46  	if !ir.SameSafeExpr(dst, n.Args[0]) {
    47  		n.Args[0] = safeExpr(n.Args[0], init)
    48  		n.Args[0] = walkExpr(n.Args[0], init)
    49  	}
    50  	walkExprListSafe(n.Args[1:], init)
    51  
    52  	nsrc := n.Args[0]
    53  
    54  	// walkExprListSafe will leave OINDEX (s[n]) alone if both s
    55  	// and n are name or literal, but those may index the slice we're
    56  	// modifying here. Fix explicitly.
    57  	// Using cheapExpr also makes sure that the evaluation
    58  	// of all arguments (and especially any panics) happen
    59  	// before we begin to modify the slice in a visible way.
    60  	ls := n.Args[1:]
    61  	for i, n := range ls {
    62  		n = cheapExpr(n, init)
    63  		if !types.Identical(n.Type(), nsrc.Type().Elem()) {
    64  			n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
    65  			n = walkExpr(n, init)
    66  		}
    67  		ls[i] = n
    68  	}
    69  
    70  	argc := len(n.Args) - 1
    71  	if argc < 1 {
    72  		return nsrc
    73  	}
    74  
    75  	// General case, with no function calls left as arguments.
    76  	// Leave for ssagen, except that instrumentation requires the old form.
    77  	if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
    78  		return n
    79  	}
    80  
    81  	var l []ir.Node
    82  
    83  	// s = slice to append to
    84  	s := typecheck.TempAt(base.Pos, ir.CurFunc, nsrc.Type())
    85  	l = append(l, ir.NewAssignStmt(base.Pos, s, nsrc))
    86  
    87  	// num = number of things to append
    88  	num := ir.NewInt(base.Pos, int64(argc))
    89  
    90  	// newLen := s.len + num
    91  	newLen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
    92  	l = append(l, ir.NewAssignStmt(base.Pos, newLen, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), num)))
    93  
    94  	// if uint(newLen) <= uint(s.cap)
    95  	nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
    96  	nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLE, typecheck.Conv(newLen, types.Types[types.TUINT]), typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT]))
    97  	nif.Likely = true
    98  
    99  	// then { s = s[:n] }
   100  	slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, newLen, nil)
   101  	slice.SetBounded(true)
   102  	nif.Body = []ir.Node{
   103  		ir.NewAssignStmt(base.Pos, s, slice),
   104  	}
   105  
   106  	// else { s = growslice(s.ptr, n, s.cap, a, T) }
   107  	nif.Else = []ir.Node{
   108  		ir.NewAssignStmt(base.Pos, s, walkGrowslice(s, nif.PtrInit(),
   109  			ir.NewUnaryExpr(base.Pos, ir.OSPTR, s),
   110  			newLen,
   111  			ir.NewUnaryExpr(base.Pos, ir.OCAP, s),
   112  			num)),
   113  	}
   114  
   115  	l = append(l, nif)
   116  
   117  	ls = n.Args[1:]
   118  	for i, n := range ls {
   119  		// s[s.len-argc+i] = arg
   120  		ix := ir.NewIndexExpr(base.Pos, s, ir.NewBinaryExpr(base.Pos, ir.OSUB, newLen, ir.NewInt(base.Pos, int64(argc-i))))
   121  		ix.SetBounded(true)
   122  		l = append(l, ir.NewAssignStmt(base.Pos, ix, n))
   123  	}
   124  
   125  	typecheck.Stmts(l)
   126  	walkStmtList(l)
   127  	init.Append(l...)
   128  	return s
   129  }
   130  
   131  // growslice(ptr *T, newLen, oldCap, num int, <type>) (ret []T)
   132  func walkGrowslice(slice *ir.Name, init *ir.Nodes, oldPtr, newLen, oldCap, num ir.Node) *ir.CallExpr {
   133  	elemtype := slice.Type().Elem()
   134  	fn := typecheck.LookupRuntime("growslice", elemtype, elemtype)
   135  	elemtypeptr := reflectdata.TypePtrAt(base.Pos, elemtype)
   136  	return mkcall1(fn, slice.Type(), init, oldPtr, newLen, oldCap, num, elemtypeptr)
   137  }
   138  
   139  // walkClear walks an OCLEAR node.
   140  func walkClear(n *ir.UnaryExpr) ir.Node {
   141  	typ := n.X.Type()
   142  	switch {
   143  	case typ.IsSlice():
   144  		if n := arrayClear(n.X.Pos(), n.X, nil); n != nil {
   145  			return n
   146  		}
   147  		// If n == nil, we are clearing an array which takes zero memory, do nothing.
   148  		return ir.NewBlockStmt(n.Pos(), nil)
   149  	case typ.IsMap():
   150  		return mapClear(n.X, reflectdata.TypePtrAt(n.X.Pos(), n.X.Type()))
   151  	}
   152  	panic("unreachable")
   153  }
   154  
   155  // walkClose walks an OCLOSE node.
   156  func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
   157  	return mkcall1(chanfn("closechan", 1, n.X.Type()), nil, init, n.X)
   158  }
   159  
   160  // Lower copy(a, b) to a memmove call or a runtime call.
   161  //
   162  //	init {
   163  //	  n := len(a)
   164  //	  if n > len(b) { n = len(b) }
   165  //	  if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
   166  //	}
   167  //	n;
   168  //
   169  // Also works if b is a string.
   170  func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
   171  	if n.X.Type().Elem().HasPointers() {
   172  		ir.CurFunc.SetWBPos(n.Pos())
   173  		fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
   174  		n.X = cheapExpr(n.X, init)
   175  		ptrL, lenL := backingArrayPtrLen(n.X)
   176  		n.Y = cheapExpr(n.Y, init)
   177  		ptrR, lenR := backingArrayPtrLen(n.Y)
   178  		return mkcall1(fn, n.Type(), init, reflectdata.CopyElemRType(base.Pos, n), ptrL, lenL, ptrR, lenR)
   179  	}
   180  
   181  	if runtimecall {
   182  		// rely on runtime to instrument:
   183  		//  copy(n.Left, n.Right)
   184  		// n.Right can be a slice or string.
   185  
   186  		n.X = cheapExpr(n.X, init)
   187  		ptrL, lenL := backingArrayPtrLen(n.X)
   188  		n.Y = cheapExpr(n.Y, init)
   189  		ptrR, lenR := backingArrayPtrLen(n.Y)
   190  
   191  		fn := typecheck.LookupRuntime("slicecopy", ptrL.Type().Elem(), ptrR.Type().Elem())
   192  
   193  		return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(base.Pos, n.X.Type().Elem().Size()))
   194  	}
   195  
   196  	n.X = walkExpr(n.X, init)
   197  	n.Y = walkExpr(n.Y, init)
   198  	nl := typecheck.TempAt(base.Pos, ir.CurFunc, n.X.Type())
   199  	nr := typecheck.TempAt(base.Pos, ir.CurFunc, n.Y.Type())
   200  	var l []ir.Node
   201  	l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
   202  	l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
   203  
   204  	nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
   205  	nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
   206  
   207  	nlen := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TINT])
   208  
   209  	// n = len(to)
   210  	l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
   211  
   212  	// if n > len(frm) { n = len(frm) }
   213  	nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
   214  
   215  	nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
   216  	nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
   217  	l = append(l, nif)
   218  
   219  	// if to.ptr != frm.ptr { memmove( ... ) }
   220  	ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
   221  	ne.Likely = true
   222  	l = append(l, ne)
   223  
   224  	fn := typecheck.LookupRuntime("memmove", nl.Type().Elem(), nl.Type().Elem())
   225  	nwid := ir.Node(typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR]))
   226  	setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
   227  	ne.Body.Append(setwid)
   228  	nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(base.Pos, nl.Type().Elem().Size()))
   229  	call := mkcall1(fn, nil, init, nto, nfrm, nwid)
   230  	ne.Body.Append(call)
   231  
   232  	typecheck.Stmts(l)
   233  	walkStmtList(l)
   234  	init.Append(l...)
   235  	return nlen
   236  }
   237  
   238  // walkDelete walks an ODELETE node.
   239  func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
   240  	init.Append(ir.TakeInit(n)...)
   241  	map_ := n.Args[0]
   242  	key := n.Args[1]
   243  	map_ = walkExpr(map_, init)
   244  	key = walkExpr(key, init)
   245  
   246  	t := map_.Type()
   247  	fast := mapfast(t)
   248  	key = mapKeyArg(fast, n, key, false)
   249  	return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.DeleteMapRType(base.Pos, n), map_, key)
   250  }
   251  
   252  // walkLenCap walks an OLEN or OCAP node.
   253  func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
   254  	if isRuneCount(n) {
   255  		// Replace len([]rune(string)) with runtime.countrunes(string).
   256  		return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
   257  	}
   258  	if isByteCount(n) {
   259  		conv := n.X.(*ir.ConvExpr)
   260  		walkStmtList(conv.Init())
   261  		init.Append(ir.TakeInit(conv)...)
   262  		_, len := backingArrayPtrLen(cheapExpr(conv.X, init))
   263  		return len
   264  	}
   265  	if isChanLenCap(n) {
   266  		name := "chanlen"
   267  		if n.Op() == ir.OCAP {
   268  			name = "chancap"
   269  		}
   270  		// cannot use chanfn - closechan takes any, not chan any,
   271  		// because it accepts both send-only and recv-only channels.
   272  		fn := typecheck.LookupRuntime(name, n.X.Type())
   273  		return mkcall1(fn, n.Type(), init, n.X)
   274  	}
   275  
   276  	n.X = walkExpr(n.X, init)
   277  
   278  	// replace len(*[10]int) with 10.
   279  	// delayed until now to preserve side effects.
   280  	t := n.X.Type()
   281  	if t.IsPtr() {
   282  		t = t.Elem()
   283  	}
   284  	if t.IsArray() {
   285  		// evaluate any side effects in n.X. See issue 72844.
   286  		appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.BlankNode, n.X))
   287  
   288  		con := ir.NewConstExpr(constant.MakeInt64(t.NumElem()), n)
   289  		con.SetTypecheck(1)
   290  		return con
   291  	}
   292  	return n
   293  }
   294  
   295  // walkMakeChan walks an OMAKECHAN node.
   296  func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
   297  	// When size fits into int, use makechan instead of
   298  	// makechan64, which is faster and shorter on 32 bit platforms.
   299  	size := n.Len
   300  	fnname := "makechan64"
   301  	argtype := types.Types[types.TINT64]
   302  
   303  	// Type checking guarantees that TIDEAL size is positive and fits in an int.
   304  	// The case of size overflow when converting TUINT or TUINTPTR to TINT
   305  	// will be handled by the negative range checks in makechan during runtime.
   306  	if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
   307  		fnname = "makechan"
   308  		argtype = types.Types[types.TINT]
   309  	}
   310  
   311  	return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.MakeChanRType(base.Pos, n), typecheck.Conv(size, argtype))
   312  }
   313  
   314  // walkMakeMap walks an OMAKEMAP node.
   315  func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
   316  	if buildcfg.Experiment.SwissMap {
   317  		return walkMakeSwissMap(n, init)
   318  	}
   319  	return walkMakeOldMap(n, init)
   320  }
   321  
   322  func walkMakeSwissMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
   323  	t := n.Type()
   324  	mapType := reflectdata.SwissMapType()
   325  	hint := n.Len
   326  
   327  	// var m *Map
   328  	var m ir.Node
   329  	if n.Esc() == ir.EscNone {
   330  		// Allocate hmap on stack.
   331  
   332  		// var mv Map
   333  		// m = &mv
   334  		m = stackTempAddr(init, mapType)
   335  
   336  		// Allocate one group pointed to by m.dirPtr on stack if hint
   337  		// is not larger than SwissMapGroupSlots. In case hint is
   338  		// larger, runtime.makemap will allocate on the heap.
   339  		// Maximum key and elem size is 128 bytes, larger objects
   340  		// are stored with an indirection. So max bucket size is 2048+eps.
   341  		if !ir.IsConst(hint, constant.Int) ||
   342  			constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
   343  
   344  			// In case hint is larger than SwissMapGroupSlots
   345  			// runtime.makemap will allocate on the heap, see
   346  			// #20184
   347  			//
   348  			// if hint <= abi.SwissMapGroupSlots {
   349  			//     var gv group
   350  			//     g = &gv
   351  			//     g.ctrl = abi.SwissMapCtrlEmpty
   352  			//     m.dirPtr = g
   353  			// }
   354  
   355  			nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.SwissMapGroupSlots)), nil, nil)
   356  			nif.Likely = true
   357  
   358  			groupType := reflectdata.SwissMapGroupType(t)
   359  
   360  			// var gv group
   361  			// g = &gv
   362  			g := stackTempAddr(&nif.Body, groupType)
   363  
   364  			// Can't use ir.NewInt because bit 63 is set, which
   365  			// makes conversion to uint64 upset.
   366  			empty := ir.NewBasicLit(base.Pos, types.UntypedInt, constant.MakeUint64(abi.SwissMapCtrlEmpty))
   367  
   368  			// g.ctrl = abi.SwissMapCtrlEmpty
   369  			csym := groupType.Field(0).Sym // g.ctrl see reflectdata/map_swiss.go
   370  			ca := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, g, csym), empty)
   371  			nif.Body.Append(ca)
   372  
   373  			// m.dirPtr = g
   374  			dsym := mapType.Field(2).Sym // m.dirPtr see reflectdata/map_swiss.go
   375  			na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, dsym), typecheck.ConvNop(g, types.Types[types.TUNSAFEPTR]))
   376  			nif.Body.Append(na)
   377  			appendWalkStmt(init, nif)
   378  		}
   379  	}
   380  
   381  	if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.SwissMapGroupSlots)) {
   382  		// Handling make(map[any]any) and
   383  		// make(map[any]any, hint) where hint <= abi.SwissMapGroupSlots
   384  		// specially allows for faster map initialization and
   385  		// improves binary size by using calls with fewer arguments.
   386  		// For hint <= abi.SwissMapGroupSlots no groups will be
   387  		// allocated by makemap. Therefore, no groups need to be
   388  		// allocated in this code path.
   389  		if n.Esc() == ir.EscNone {
   390  			// Only need to initialize m.seed since
   391  			// m map has been allocated on the stack already.
   392  			// m.seed = uintptr(rand())
   393  			rand := mkcall("rand", types.Types[types.TUINT64], init)
   394  			seedSym := mapType.Field(1).Sym // m.seed see reflectdata/map_swiss.go
   395  			appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, m, seedSym), typecheck.Conv(rand, types.Types[types.TUINTPTR])))
   396  			return typecheck.ConvNop(m, t)
   397  		}
   398  		// Call runtime.makemap_small to allocate a
   399  		// map on the heap and initialize the map's seed field.
   400  		fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
   401  		return mkcall1(fn, n.Type(), init)
   402  	}
   403  
   404  	if n.Esc() != ir.EscNone {
   405  		m = typecheck.NodNil()
   406  	}
   407  
   408  	// Map initialization with a variable or large hint is
   409  	// more complicated. We therefore generate a call to
   410  	// runtime.makemap to initialize hmap and allocate the
   411  	// map buckets.
   412  
   413  	// When hint fits into int, use makemap instead of
   414  	// makemap64, which is faster and shorter on 32 bit platforms.
   415  	fnname := "makemap64"
   416  	argtype := types.Types[types.TINT64]
   417  
   418  	// Type checking guarantees that TIDEAL hint is positive and fits in an int.
   419  	// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
   420  	// The case of hint overflow when converting TUINT or TUINTPTR to TINT
   421  	// will be handled by the negative range checks in makemap during runtime.
   422  	if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
   423  		fnname = "makemap"
   424  		argtype = types.Types[types.TINT]
   425  	}
   426  
   427  	fn := typecheck.LookupRuntime(fnname, mapType, t.Key(), t.Elem())
   428  	return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), m)
   429  }
   430  
   431  func walkMakeOldMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
   432  	t := n.Type()
   433  	hmapType := reflectdata.OldMapType()
   434  	hint := n.Len
   435  
   436  	// var h *hmap
   437  	var h ir.Node
   438  	if n.Esc() == ir.EscNone {
   439  		// Allocate hmap on stack.
   440  
   441  		// var hv hmap
   442  		// h = &hv
   443  		h = stackTempAddr(init, hmapType)
   444  
   445  		// Allocate one bucket pointed to by hmap.buckets on stack if hint
   446  		// is not larger than BUCKETSIZE. In case hint is larger than
   447  		// BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
   448  		// Maximum key and elem size is 128 bytes, larger objects
   449  		// are stored with an indirection. So max bucket size is 2048+eps.
   450  		if !ir.IsConst(hint, constant.Int) ||
   451  			constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
   452  
   453  			// In case hint is larger than BUCKETSIZE runtime.makemap
   454  			// will allocate the buckets on the heap, see #20184
   455  			//
   456  			// if hint <= BUCKETSIZE {
   457  			//     var bv bmap
   458  			//     b = &bv
   459  			//     h.buckets = b
   460  			// }
   461  
   462  			nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(base.Pos, abi.OldMapBucketCount)), nil, nil)
   463  			nif.Likely = true
   464  
   465  			// var bv bmap
   466  			// b = &bv
   467  			b := stackTempAddr(&nif.Body, reflectdata.OldMapBucketType(t))
   468  
   469  			// h.buckets = b
   470  			bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
   471  			na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), typecheck.ConvNop(b, types.Types[types.TUNSAFEPTR]))
   472  			nif.Body.Append(na)
   473  			appendWalkStmt(init, nif)
   474  		}
   475  	}
   476  
   477  	if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(abi.OldMapBucketCount)) {
   478  		// Handling make(map[any]any) and
   479  		// make(map[any]any, hint) where hint <= BUCKETSIZE
   480  		// special allows for faster map initialization and
   481  		// improves binary size by using calls with fewer arguments.
   482  		// For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
   483  		// and no buckets will be allocated by makemap. Therefore,
   484  		// no buckets need to be allocated in this code path.
   485  		if n.Esc() == ir.EscNone {
   486  			// Only need to initialize h.hash0 since
   487  			// hmap h has been allocated on the stack already.
   488  			// h.hash0 = rand32()
   489  			rand := mkcall("rand32", types.Types[types.TUINT32], init)
   490  			hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
   491  			appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
   492  			return typecheck.ConvNop(h, t)
   493  		}
   494  		// Call runtime.makemap_small to allocate an
   495  		// hmap on the heap and initialize hmap's hash0 field.
   496  		fn := typecheck.LookupRuntime("makemap_small", t.Key(), t.Elem())
   497  		return mkcall1(fn, n.Type(), init)
   498  	}
   499  
   500  	if n.Esc() != ir.EscNone {
   501  		h = typecheck.NodNil()
   502  	}
   503  	// Map initialization with a variable or large hint is
   504  	// more complicated. We therefore generate a call to
   505  	// runtime.makemap to initialize hmap and allocate the
   506  	// map buckets.
   507  
   508  	// When hint fits into int, use makemap instead of
   509  	// makemap64, which is faster and shorter on 32 bit platforms.
   510  	fnname := "makemap64"
   511  	argtype := types.Types[types.TINT64]
   512  
   513  	// Type checking guarantees that TIDEAL hint is positive and fits in an int.
   514  	// See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
   515  	// The case of hint overflow when converting TUINT or TUINTPTR to TINT
   516  	// will be handled by the negative range checks in makemap during runtime.
   517  	if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
   518  		fnname = "makemap"
   519  		argtype = types.Types[types.TINT]
   520  	}
   521  
   522  	fn := typecheck.LookupRuntime(fnname, hmapType, t.Key(), t.Elem())
   523  	return mkcall1(fn, n.Type(), init, reflectdata.MakeMapRType(base.Pos, n), typecheck.Conv(hint, argtype), h)
   524  }
   525  
   526  // walkMakeSlice walks an OMAKESLICE node.
   527  func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
   528  	len := n.Len
   529  	cap := n.Cap
   530  	len = safeExpr(len, init)
   531  	if cap != nil {
   532  		cap = safeExpr(cap, init)
   533  	} else {
   534  		cap = len
   535  	}
   536  	t := n.Type()
   537  	if t.Elem().NotInHeap() {
   538  		base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
   539  	}
   540  
   541  	tryStack := false
   542  	if n.Esc() == ir.EscNone {
   543  		if why := escape.HeapAllocReason(n); why != "" {
   544  			base.Fatalf("%v has EscNone, but %v", n, why)
   545  		}
   546  		if ir.IsSmallIntConst(cap) {
   547  			// Constant backing array - allocate it and slice it.
   548  			cap := typecheck.IndexConst(cap)
   549  			// Note that len might not be constant. If it isn't, check for panics.
   550  			// cap is constrained to [0,2^31) or [0,2^63) depending on whether
   551  			// we're in 32-bit or 64-bit systems. So it's safe to do:
   552  			//
   553  			// if uint64(len) > cap {
   554  			//     if len < 0 { panicmakeslicelen() }
   555  			//     panicmakeslicecap()
   556  			// }
   557  			nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, types.Types[types.TUINT64]), ir.NewInt(base.Pos, cap)), nil, nil)
   558  			niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, len, ir.NewInt(base.Pos, 0)), nil, nil)
   559  			niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
   560  			nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
   561  			init.Append(typecheck.Stmt(nif))
   562  
   563  			// var arr [cap]E
   564  			// s = arr[:len]
   565  			t := types.NewArray(t.Elem(), cap) // [cap]E
   566  			arr := typecheck.TempAt(base.Pos, ir.CurFunc, t)
   567  			appendWalkStmt(init, ir.NewAssignStmt(base.Pos, arr, nil))    // zero temp
   568  			s := ir.NewSliceExpr(base.Pos, ir.OSLICE, arr, nil, len, nil) // arr[:len]
   569  			// The conv is necessary in case n.Type is named.
   570  			return walkExpr(typecheck.Expr(typecheck.Conv(s, n.Type())), init)
   571  		}
   572  		// Check that this optimization is enabled in general and for this node.
   573  		tryStack = base.Flag.N == 0 && base.VariableMakeHash.MatchPos(n.Pos(), nil)
   574  	}
   575  
   576  	// The final result is assigned to this variable.
   577  	slice := typecheck.TempAt(base.Pos, ir.CurFunc, n.Type()) // []E result (possibly named)
   578  
   579  	if tryStack {
   580  		// K := maxStackSize/sizeof(E)
   581  		// if cap <= K {
   582  		//     var arr [K]E
   583  		//     slice = arr[:len:cap]
   584  		// } else {
   585  		//     slice = makeslice(elemType, len, cap)
   586  		// }
   587  		maxStackSize := int64(base.Debug.VariableMakeThreshold)
   588  		K := maxStackSize / t.Elem().Size() // rounds down
   589  		if K > 0 {                          // skip if elem size is too big.
   590  			nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, typecheck.Conv(cap, types.Types[types.TUINT64]), ir.NewInt(base.Pos, K)), nil, nil)
   591  
   592  			// cap is in bounds after the K check, but len might not be.
   593  			// (Note that the slicing below would generate a panic for
   594  			// the same bad cases, but we want makeslice panics, not
   595  			// regular slicing panics.)
   596  			lenCap := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, types.Types[types.TUINT64]), typecheck.Conv(cap, types.Types[types.TUINT64])), nil, nil)
   597  			lenZero := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, len, ir.NewInt(base.Pos, 0)), nil, nil)
   598  			lenZero.Body.Append(mkcall("panicmakeslicelen", nil, &lenZero.Body))
   599  			lenCap.Body.Append(lenZero)
   600  			lenCap.Body.Append(mkcall("panicmakeslicecap", nil, &lenCap.Body))
   601  			nif.Body.Append(lenCap)
   602  
   603  			t := types.NewArray(t.Elem(), K)                              // [K]E
   604  			arr := typecheck.TempAt(base.Pos, ir.CurFunc, t)              // var arr [K]E
   605  			nif.Body.Append(ir.NewAssignStmt(base.Pos, arr, nil))         // arr = {} (zero it)
   606  			s := ir.NewSliceExpr(base.Pos, ir.OSLICE, arr, nil, len, cap) // arr[:len:cap]
   607  			nif.Body.Append(ir.NewAssignStmt(base.Pos, slice, s))         // slice = arr[:len:cap]
   608  
   609  			appendWalkStmt(init, typecheck.Stmt(nif))
   610  
   611  			// Put makeslice call below in the else branch.
   612  			init = &nif.Else
   613  		}
   614  	}
   615  
   616  	// Set up a call to makeslice.
   617  	// When len and cap can fit into int, use makeslice instead of
   618  	// makeslice64, which is faster and shorter on 32 bit platforms.
   619  	fnname := "makeslice64"
   620  	argtype := types.Types[types.TINT64]
   621  
   622  	// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
   623  	// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
   624  	// will be handled by the negative range checks in makeslice during runtime.
   625  	if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
   626  		(cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
   627  		fnname = "makeslice"
   628  		argtype = types.Types[types.TINT]
   629  	}
   630  	fn := typecheck.LookupRuntime(fnname)
   631  	ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
   632  	ptr.MarkNonNil()
   633  	len = typecheck.Conv(len, types.Types[types.TINT])
   634  	cap = typecheck.Conv(cap, types.Types[types.TINT])
   635  	s := ir.NewSliceHeaderExpr(base.Pos, t, ptr, len, cap)
   636  	appendWalkStmt(init, ir.NewAssignStmt(base.Pos, slice, s))
   637  
   638  	return slice
   639  }
   640  
   641  // walkMakeSliceCopy walks an OMAKESLICECOPY node.
   642  func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
   643  	if n.Esc() == ir.EscNone {
   644  		base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
   645  	}
   646  
   647  	t := n.Type()
   648  	if t.Elem().NotInHeap() {
   649  		base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
   650  	}
   651  
   652  	length := typecheck.Conv(n.Len, types.Types[types.TINT])
   653  	copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
   654  	copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
   655  
   656  	if !t.Elem().HasPointers() && n.Bounded() {
   657  		// When len(to)==len(from) and elements have no pointers:
   658  		// replace make+copy with runtime.mallocgc+runtime.memmove.
   659  
   660  		// We do not check for overflow of len(to)*elem.Width here
   661  		// since len(from) is an existing checked slice capacity
   662  		// with same elem.Width for the from slice.
   663  		size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(base.Pos, t.Elem().Size()), types.Types[types.TUINTPTR]))
   664  
   665  		// instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
   666  		fn := typecheck.LookupRuntime("mallocgc")
   667  		ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(base.Pos, false))
   668  		ptr.MarkNonNil()
   669  		sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
   670  
   671  		s := typecheck.TempAt(base.Pos, ir.CurFunc, t)
   672  		r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
   673  		r = walkExpr(r, init)
   674  		init.Append(r)
   675  
   676  		// instantiate memmove(to *any, frm *any, size uintptr)
   677  		fn = typecheck.LookupRuntime("memmove", t.Elem(), t.Elem())
   678  		ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
   679  		init.Append(walkExpr(typecheck.Stmt(ncopy), init))
   680  
   681  		return s
   682  	}
   683  	// Replace make+copy with runtime.makeslicecopy.
   684  	// instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
   685  	fn := typecheck.LookupRuntime("makeslicecopy")
   686  	ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.MakeSliceElemRType(base.Pos, n), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
   687  	ptr.MarkNonNil()
   688  	sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
   689  	return walkExpr(typecheck.Expr(sh), init)
   690  }
   691  
   692  // walkNew walks an ONEW node.
   693  func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
   694  	t := n.Type().Elem()
   695  	if t.NotInHeap() {
   696  		base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
   697  	}
   698  	if n.Esc() == ir.EscNone {
   699  		if t.Size() > ir.MaxImplicitStackVarSize {
   700  			base.Fatalf("large ONEW with EscNone: %v", n)
   701  		}
   702  		return stackTempAddr(init, t)
   703  	}
   704  	types.CalcSize(t)
   705  	n.MarkNonNil()
   706  	return n
   707  }
   708  
   709  func walkMinMax(n *ir.CallExpr, init *ir.Nodes) ir.Node {
   710  	init.Append(ir.TakeInit(n)...)
   711  	walkExprList(n.Args, init)
   712  	return n
   713  }
   714  
   715  // generate code for print.
   716  func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
   717  	// Hoist all the argument evaluation up before the lock.
   718  	walkExprListCheap(nn.Args, init)
   719  
   720  	// For println, add " " between elements and "\n" at the end.
   721  	if nn.Op() == ir.OPRINTLN {
   722  		s := nn.Args
   723  		t := make([]ir.Node, 0, len(s)*2)
   724  		for i, n := range s {
   725  			if i != 0 {
   726  				t = append(t, ir.NewString(base.Pos, " "))
   727  			}
   728  			t = append(t, n)
   729  		}
   730  		t = append(t, ir.NewString(base.Pos, "\n"))
   731  		nn.Args = t
   732  	}
   733  
   734  	// Collapse runs of constant strings.
   735  	s := nn.Args
   736  	t := make([]ir.Node, 0, len(s))
   737  	for i := 0; i < len(s); {
   738  		var strs []string
   739  		for i < len(s) && ir.IsConst(s[i], constant.String) {
   740  			strs = append(strs, ir.StringVal(s[i]))
   741  			i++
   742  		}
   743  		if len(strs) > 0 {
   744  			t = append(t, ir.NewString(base.Pos, strings.Join(strs, "")))
   745  		}
   746  		if i < len(s) {
   747  			t = append(t, s[i])
   748  			i++
   749  		}
   750  	}
   751  	nn.Args = t
   752  
   753  	calls := []ir.Node{mkcall("printlock", nil, init)}
   754  	for i, n := range nn.Args {
   755  		if n.Op() == ir.OLITERAL {
   756  			if n.Type() == types.UntypedRune {
   757  				n = typecheck.DefaultLit(n, types.RuneType)
   758  			}
   759  
   760  			switch n.Val().Kind() {
   761  			case constant.Int:
   762  				n = typecheck.DefaultLit(n, types.Types[types.TINT64])
   763  
   764  			case constant.Float:
   765  				n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
   766  			}
   767  		}
   768  
   769  		if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
   770  			n = typecheck.DefaultLit(n, types.Types[types.TINT64])
   771  		}
   772  		n = typecheck.DefaultLit(n, nil)
   773  		nn.Args[i] = n
   774  		if n.Type() == nil || n.Type().Kind() == types.TFORW {
   775  			continue
   776  		}
   777  
   778  		var on *ir.Name
   779  		switch n.Type().Kind() {
   780  		case types.TINTER:
   781  			if n.Type().IsEmptyInterface() {
   782  				on = typecheck.LookupRuntime("printeface", n.Type())
   783  			} else {
   784  				on = typecheck.LookupRuntime("printiface", n.Type())
   785  			}
   786  		case types.TPTR:
   787  			if n.Type().Elem().NotInHeap() {
   788  				on = typecheck.LookupRuntime("printuintptr")
   789  				n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
   790  				n.SetType(types.Types[types.TUNSAFEPTR])
   791  				n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
   792  				n.SetType(types.Types[types.TUINTPTR])
   793  				break
   794  			}
   795  			fallthrough
   796  		case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
   797  			on = typecheck.LookupRuntime("printpointer", n.Type())
   798  		case types.TSLICE:
   799  			on = typecheck.LookupRuntime("printslice", n.Type())
   800  		case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
   801  			if types.RuntimeSymName(n.Type().Sym()) == "hex" {
   802  				on = typecheck.LookupRuntime("printhex")
   803  			} else {
   804  				on = typecheck.LookupRuntime("printuint")
   805  			}
   806  		case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
   807  			on = typecheck.LookupRuntime("printint")
   808  		case types.TFLOAT32, types.TFLOAT64:
   809  			on = typecheck.LookupRuntime("printfloat")
   810  		case types.TCOMPLEX64, types.TCOMPLEX128:
   811  			on = typecheck.LookupRuntime("printcomplex")
   812  		case types.TBOOL:
   813  			on = typecheck.LookupRuntime("printbool")
   814  		case types.TSTRING:
   815  			cs := ""
   816  			if ir.IsConst(n, constant.String) {
   817  				cs = ir.StringVal(n)
   818  			}
   819  			switch cs {
   820  			case " ":
   821  				on = typecheck.LookupRuntime("printsp")
   822  			case "\n":
   823  				on = typecheck.LookupRuntime("printnl")
   824  			default:
   825  				on = typecheck.LookupRuntime("printstring")
   826  			}
   827  		default:
   828  			badtype(ir.OPRINT, n.Type(), nil)
   829  			continue
   830  		}
   831  
   832  		r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
   833  		if params := on.Type().Params(); len(params) > 0 {
   834  			t := params[0].Type
   835  			n = typecheck.Conv(n, t)
   836  			r.Args.Append(n)
   837  		}
   838  		calls = append(calls, r)
   839  	}
   840  
   841  	calls = append(calls, mkcall("printunlock", nil, init))
   842  
   843  	typecheck.Stmts(calls)
   844  	walkExprList(calls, init)
   845  
   846  	r := ir.NewBlockStmt(base.Pos, nil)
   847  	r.List = calls
   848  	return walkStmt(typecheck.Stmt(r))
   849  }
   850  
   851  // walkRecoverFP walks an ORECOVERFP node.
   852  func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
   853  	return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
   854  }
   855  
   856  // walkUnsafeData walks an OUNSAFESLICEDATA or OUNSAFESTRINGDATA expression.
   857  func walkUnsafeData(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
   858  	slice := walkExpr(n.X, init)
   859  	res := typecheck.Expr(ir.NewUnaryExpr(n.Pos(), ir.OSPTR, slice))
   860  	res.SetType(n.Type())
   861  	return walkExpr(res, init)
   862  }
   863  
   864  func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
   865  	ptr := safeExpr(n.X, init)
   866  	len := safeExpr(n.Y, init)
   867  	sliceType := n.Type()
   868  
   869  	lenType := types.Types[types.TINT64]
   870  	unsafePtr := typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR])
   871  
   872  	// If checkptr enabled, call runtime.unsafeslicecheckptr to check ptr and len.
   873  	// for simplicity, unsafeslicecheckptr always uses int64.
   874  	// Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
   875  	// The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
   876  	// will be handled by the negative range checks in unsafeslice during runtime.
   877  	if ir.ShouldCheckPtr(ir.CurFunc, 1) {
   878  		fnname := "unsafeslicecheckptr"
   879  		fn := typecheck.LookupRuntime(fnname)
   880  		init.Append(mkcall1(fn, nil, init, reflectdata.UnsafeSliceElemRType(base.Pos, n), unsafePtr, typecheck.Conv(len, lenType)))
   881  	} else {
   882  		// Otherwise, open code unsafe.Slice to prevent runtime call overhead.
   883  		// Keep this code in sync with runtime.unsafeslice{,64}
   884  		if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
   885  			lenType = types.Types[types.TINT]
   886  		} else {
   887  			// len64 := int64(len)
   888  			// if int64(int(len64)) != len64 {
   889  			//     panicunsafeslicelen()
   890  			// }
   891  			len64 := typecheck.Conv(len, lenType)
   892  			nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
   893  			nif.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, typecheck.Conv(typecheck.Conv(len64, types.Types[types.TINT]), lenType), len64)
   894  			nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body))
   895  			appendWalkStmt(init, nif)
   896  		}
   897  
   898  		// if len < 0 { panicunsafeslicelen() }
   899  		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
   900  		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0))
   901  		nif.Body.Append(mkcall("panicunsafeslicelen", nil, &nif.Body))
   902  		appendWalkStmt(init, nif)
   903  
   904  		if sliceType.Elem().Size() == 0 {
   905  			// if ptr == nil && len > 0  {
   906  			//      panicunsafesliceptrnil()
   907  			// }
   908  			nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil)
   909  			isNil := ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil())
   910  			gtZero := ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0))
   911  			nifPtr.Cond =
   912  				ir.NewLogicalExpr(base.Pos, ir.OANDAND, isNil, gtZero)
   913  			nifPtr.Body.Append(mkcall("panicunsafeslicenilptr", nil, &nifPtr.Body))
   914  			appendWalkStmt(init, nifPtr)
   915  
   916  			h := ir.NewSliceHeaderExpr(n.Pos(), sliceType,
   917  				typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
   918  				typecheck.Conv(len, types.Types[types.TINT]),
   919  				typecheck.Conv(len, types.Types[types.TINT]))
   920  			return walkExpr(typecheck.Expr(h), init)
   921  		}
   922  
   923  		// mem, overflow := math.mulUintptr(et.size, len)
   924  		mem := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TUINTPTR])
   925  		overflow := typecheck.TempAt(base.Pos, ir.CurFunc, types.Types[types.TBOOL])
   926  
   927  		decl := types.NewSignature(nil,
   928  			[]*types.Field{
   929  				types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
   930  				types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
   931  			},
   932  			[]*types.Field{
   933  				types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
   934  				types.NewField(base.Pos, nil, types.Types[types.TBOOL]),
   935  			})
   936  
   937  		fn := ir.NewFunc(n.Pos(), n.Pos(), math_MulUintptr, decl)
   938  
   939  		call := mkcall1(fn.Nname, fn.Type().ResultsTuple(), init, ir.NewInt(base.Pos, sliceType.Elem().Size()), typecheck.Conv(typecheck.Conv(len, lenType), types.Types[types.TUINTPTR]))
   940  		appendWalkStmt(init, ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{mem, overflow}, []ir.Node{call}))
   941  
   942  		// if overflow || mem > -uintptr(ptr) {
   943  		//     if ptr == nil {
   944  		//         panicunsafesliceptrnil()
   945  		//     }
   946  		//     panicunsafeslicelen()
   947  		// }
   948  		nif = ir.NewIfStmt(base.Pos, nil, nil, nil)
   949  		memCond := ir.NewBinaryExpr(base.Pos, ir.OGT, mem, ir.NewUnaryExpr(base.Pos, ir.ONEG, typecheck.Conv(unsafePtr, types.Types[types.TUINTPTR])))
   950  		nif.Cond = ir.NewLogicalExpr(base.Pos, ir.OOROR, overflow, memCond)
   951  		nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil)
   952  		nifPtr.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil())
   953  		nifPtr.Body.Append(mkcall("panicunsafeslicenilptr", nil, &nifPtr.Body))
   954  		nif.Body.Append(nifPtr, mkcall("panicunsafeslicelen", nil, &nif.Body))
   955  		appendWalkStmt(init, nif)
   956  	}
   957  
   958  	h := ir.NewSliceHeaderExpr(n.Pos(), sliceType,
   959  		typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
   960  		typecheck.Conv(len, types.Types[types.TINT]),
   961  		typecheck.Conv(len, types.Types[types.TINT]))
   962  	return walkExpr(typecheck.Expr(h), init)
   963  }
   964  
   965  var math_MulUintptr = &types.Sym{Pkg: types.NewPkg("internal/runtime/math", "math"), Name: "MulUintptr"}
   966  
   967  func walkUnsafeString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
   968  	ptr := safeExpr(n.X, init)
   969  	len := safeExpr(n.Y, init)
   970  
   971  	lenType := types.Types[types.TINT64]
   972  	unsafePtr := typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR])
   973  
   974  	// If checkptr enabled, call runtime.unsafestringcheckptr to check ptr and len.
   975  	// for simplicity, unsafestringcheckptr always uses int64.
   976  	// Type checking guarantees that TIDEAL len are positive and fit in an int.
   977  	if ir.ShouldCheckPtr(ir.CurFunc, 1) {
   978  		fnname := "unsafestringcheckptr"
   979  		fn := typecheck.LookupRuntime(fnname)
   980  		init.Append(mkcall1(fn, nil, init, unsafePtr, typecheck.Conv(len, lenType)))
   981  	} else {
   982  		// Otherwise, open code unsafe.String to prevent runtime call overhead.
   983  		// Keep this code in sync with runtime.unsafestring{,64}
   984  		if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
   985  			lenType = types.Types[types.TINT]
   986  		} else {
   987  			// len64 := int64(len)
   988  			// if int64(int(len64)) != len64 {
   989  			//     panicunsafestringlen()
   990  			// }
   991  			len64 := typecheck.Conv(len, lenType)
   992  			nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
   993  			nif.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, typecheck.Conv(typecheck.Conv(len64, types.Types[types.TINT]), lenType), len64)
   994  			nif.Body.Append(mkcall("panicunsafestringlen", nil, &nif.Body))
   995  			appendWalkStmt(init, nif)
   996  		}
   997  
   998  		// if len < 0 { panicunsafestringlen() }
   999  		nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
  1000  		nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, typecheck.Conv(len, lenType), ir.NewInt(base.Pos, 0))
  1001  		nif.Body.Append(mkcall("panicunsafestringlen", nil, &nif.Body))
  1002  		appendWalkStmt(init, nif)
  1003  
  1004  		// if uintpr(len) > -uintptr(ptr) {
  1005  		//    if ptr == nil {
  1006  		//       panicunsafestringnilptr()
  1007  		//    }
  1008  		//    panicunsafeslicelen()
  1009  		// }
  1010  		nifLen := ir.NewIfStmt(base.Pos, nil, nil, nil)
  1011  		nifLen.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(len, types.Types[types.TUINTPTR]), ir.NewUnaryExpr(base.Pos, ir.ONEG, typecheck.Conv(unsafePtr, types.Types[types.TUINTPTR])))
  1012  		nifPtr := ir.NewIfStmt(base.Pos, nil, nil, nil)
  1013  		nifPtr.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, unsafePtr, typecheck.NodNil())
  1014  		nifPtr.Body.Append(mkcall("panicunsafestringnilptr", nil, &nifPtr.Body))
  1015  		nifLen.Body.Append(nifPtr, mkcall("panicunsafestringlen", nil, &nifLen.Body))
  1016  		appendWalkStmt(init, nifLen)
  1017  	}
  1018  	h := ir.NewStringHeaderExpr(n.Pos(),
  1019  		typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
  1020  		typecheck.Conv(len, types.Types[types.TINT]),
  1021  	)
  1022  	return walkExpr(typecheck.Expr(h), init)
  1023  }
  1024  
  1025  func badtype(op ir.Op, tl, tr *types.Type) {
  1026  	var s string
  1027  	if tl != nil {
  1028  		s += fmt.Sprintf("\n\t%v", tl)
  1029  	}
  1030  	if tr != nil {
  1031  		s += fmt.Sprintf("\n\t%v", tr)
  1032  	}
  1033  
  1034  	// common mistake: *struct and *interface.
  1035  	if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
  1036  		if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
  1037  			s += "\n\t(*struct vs *interface)"
  1038  		} else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
  1039  			s += "\n\t(*interface vs *struct)"
  1040  		}
  1041  	}
  1042  
  1043  	base.Errorf("illegal types for operand: %v%s", op, s)
  1044  }
  1045  
  1046  func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
  1047  	return typecheck.LookupRuntime(name, l, r)
  1048  }
  1049  
  1050  // isRuneCount reports whether n is of the form len([]rune(string)).
  1051  // These are optimized into a call to runtime.countrunes.
  1052  func isRuneCount(n ir.Node) bool {
  1053  	return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
  1054  }
  1055  
  1056  // isByteCount reports whether n is of the form len(string([]byte)).
  1057  func isByteCount(n ir.Node) bool {
  1058  	return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN &&
  1059  		(n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STR || n.(*ir.UnaryExpr).X.Op() == ir.OBYTES2STRTMP)
  1060  }
  1061  
  1062  // isChanLenCap reports whether n is of the form len(c) or cap(c) for a channel c.
  1063  // Note that this does not check for -n or instrumenting because this
  1064  // is a correctness rewrite, not an optimization.
  1065  func isChanLenCap(n ir.Node) bool {
  1066  	return (n.Op() == ir.OLEN || n.Op() == ir.OCAP) && n.(*ir.UnaryExpr).X.Type().IsChan()
  1067  }
  1068  

View as plain text