Text file src/cmd/compile/internal/ssa/_gen/ARM64.rules

     1  // Copyright 2016 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  (Add(Ptr|64|32|16|8) ...) => (ADD ...)
     6  (Add(32|64)F ...) => (FADD(S|D) ...)
     7  
     8  (Sub(Ptr|64|32|16|8) ...) => (SUB ...)
     9  (Sub(32|64)F ...) => (FSUB(S|D) ...)
    10  
    11  (Mul64 ...) => (MUL ...)
    12  (Mul(32|16|8) ...) => (MULW ...)
    13  (Mul(32|64)F  ...) => (FMUL(S|D) ...)
    14  
    15  (Hmul64  ...) => (MULH ...)
    16  (Hmul64u ...) => (UMULH ...)
    17  (Hmul32  x y) => (SRAconst (MULL <typ.Int64> x y) [32])
    18  (Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
    19  (Select0 (Mul64uhilo x y)) => (UMULH x y)
    20  (Select1 (Mul64uhilo x y)) => (MUL x y)
    21  
    22  (Div64 [false] x y) => (DIV  x y)
    23  (Div32 [false] x y) => (DIVW x y)
    24  (Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
    25  (Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
    26  (Div8   x y) => (DIVW  (SignExt8to32  x) (SignExt8to32  y))
    27  (Div8u  x y) => (UDIVW (ZeroExt8to32  x) (ZeroExt8to32  y))
    28  (Div64u ...) => (UDIV  ...)
    29  (Div32u ...) => (UDIVW ...)
    30  (Div32F ...) => (FDIVS ...)
    31  (Div64F ...) => (FDIVD ...)
    32  
    33  (Mod64 x y) => (MOD x y)
    34  (Mod32 x y) => (MODW x y)
    35  (Mod64u ...) => (UMOD ...)
    36  (Mod32u ...) => (UMODW ...)
    37  (Mod(16|8)  x y) => (MODW  (SignExt(16|8)to32 x) (SignExt(16|8)to32 y))
    38  (Mod(16|8)u x y) => (UMODW (ZeroExt(16|8)to32 x) (ZeroExt(16|8)to32 y))
    39  
    40  // (x + y) / 2 with x>=y    =>    (x - y) / 2 + y
    41  (Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
    42  
    43  (And(64|32|16|8) ...) => (AND ...)
    44  (Or(64|32|16|8)  ...) => (OR ...)
    45  (Xor(64|32|16|8) ...) => (XOR ...)
    46  
    47  // unary ops
    48  (Neg(64|32|16|8) ...) => (NEG ...)
    49  (Neg(32|64)F     ...) => (FNEG(S|D) ...)
    50  (Com(64|32|16|8) ...) => (MVN ...)
    51  
    52  // math package intrinsics
    53  (Abs         ...) => (FABSD   ...)
    54  (Sqrt        ...) => (FSQRTD  ...)
    55  (Ceil        ...) => (FRINTPD ...)
    56  (Floor       ...) => (FRINTMD ...)
    57  (Round       ...) => (FRINTAD ...)
    58  (RoundToEven ...) => (FRINTND ...)
    59  (Trunc       ...) => (FRINTZD ...)
    60  (FMA       x y z) => (FMADDD z x y)
    61  
    62  (Sqrt32 ...) => (FSQRTS ...)
    63  
    64  (Min(64|32)F ...) => (FMIN(D|S) ...)
    65  (Max(64|32)F ...) => (FMAX(D|S) ...)
    66  
    67  // lowering rotates
    68  // we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first.
    69  (RotateLeft8  <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
    70  (RotateLeft8  <t> x y) => (OR <t> (SLL <t> x (ANDconst <typ.Int64> [7] y)) (SRL <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEG <typ.Int64> y))))
    71  (RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
    72  (RotateLeft16 <t> x y) => (RORW <t> (ORshiftLL <typ.UInt32> (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG <typ.Int64> y))
    73  (RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
    74  (RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
    75  
    76  (Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
    77  
    78  (Ctz64 <t> x) => (CLZ  (RBIT  <t> x))
    79  (Ctz32 <t> x) => (CLZW (RBITW <t> x))
    80  (Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
    81  (Ctz8  <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
    82  
    83  (PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
    84  (PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
    85  (PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
    86  
    87  // Load args directly into the register class where it will be used.
    88  (FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
    89  (FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
    90  
    91  // Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
    92  (MOVDstore  [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
    93  (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
    94  (MOVWstore  [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
    95  (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
    96  
    97  // float <=> int register moves, with no conversion.
    98  // These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
    99  (MOVDload  [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
   100  (FMOVDload [off] {sym} ptr (MOVDstore  [off] {sym} ptr val _)) => (FMOVDgpfp val)
   101  (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
   102  (FMOVSload [off] {sym} ptr (MOVWstore  [off] {sym} ptr val _)) => (FMOVSgpfp val)
   103  
   104  (BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
   105  (BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
   106  (BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
   107  
   108  (Bswap64 ...) => (REV ...)
   109  (Bswap32 ...) => (REVW ...)
   110  (Bswap16 ...) => (REV16W ...)
   111  
   112  (BitRev64 ...) => (RBIT ...)
   113  (BitRev32 ...) => (RBITW ...)
   114  (BitRev16   x) => (SRLconst [48] (RBIT <typ.UInt64> x))
   115  (BitRev8    x) => (SRLconst [56] (RBIT <typ.UInt64> x))
   116  
   117  // In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
   118  // UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
   119  // after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
   120  // The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
   121  (UMOD  <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
   122  (UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
   123  
   124  // 64-bit addition with carry.
   125  (Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
   126  (Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
   127  
   128  // 64-bit subtraction with borrowing.
   129  (Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
   130  (Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
   131  
   132  // boolean ops -- booleans are represented with 0=false, 1=true
   133  (AndB ...) => (AND ...)
   134  (OrB  ...) => (OR ...)
   135  (EqB  x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
   136  (NeqB ...) => (XOR ...)
   137  (Not    x) => (XOR (MOVDconst [1]) x)
   138  
   139  // shifts
   140  // hardware instruction uses only the low 6 bits of the shift
   141  // we compare to 64 to ensure Go semantics for large shifts
   142  // Rules about rotates with non-const shift are based on the following rules,
   143  // if the following rules change, please also modify the rules based on them.
   144  
   145  // check shiftIsBounded first, if shift value is proved to be valid then we
   146  // can do the shift directly.
   147  // left shift
   148  (Lsh(64|32|16|8)x64 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   149  (Lsh(64|32|16|8)x32 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   150  (Lsh(64|32|16|8)x16 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   151  (Lsh(64|32|16|8)x8  <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
   152  
   153  // signed right shift
   154  (Rsh64x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> x y)
   155  (Rsh32x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) y)
   156  (Rsh16x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) y)
   157  (Rsh8x(64|32|16|8)  <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) y)
   158  
   159  // unsigned right shift
   160  (Rsh64Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> x y)
   161  (Rsh32Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt32to64 x) y)
   162  (Rsh16Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt16to64 x) y)
   163  (Rsh8Ux(64|32|16|8)  <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt8to64 x) y)
   164  
   165  // shift value may be out of range, use CMP + CSEL instead
   166  (Lsh64x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   167  (Lsh64x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   168  
   169  (Lsh32x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   170  (Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   171  
   172  (Lsh16x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   173  (Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   174  
   175  (Lsh8x64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   176  (Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   177  
   178  (Rsh64Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
   179  (Rsh64Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   180  
   181  (Rsh32Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   182  (Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   183  
   184  (Rsh16Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   185  (Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   186  
   187  (Rsh8Ux64 <t> x y) && !shiftIsBounded(v)        => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
   188  (Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
   189  
   190  (Rsh64x64 x y) && !shiftIsBounded(v)        => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   191  (Rsh64x(32|16|8) x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   192  
   193  (Rsh32x64 x y) && !shiftIsBounded(v)        => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   194  (Rsh32x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   195  
   196  (Rsh16x64 x y) && !shiftIsBounded(v)        => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   197  (Rsh16x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   198  
   199  (Rsh8x64 x y) && !shiftIsBounded(v)        => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
   200  (Rsh8x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
   201  
   202  // constants
   203  (Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   204  (Const(32|64)F    [val]) => (FMOV(S|D)const [float64(val)])
   205  (ConstNil) => (MOVDconst [0])
   206  (ConstBool [t]) => (MOVDconst [b2i(t)])
   207  
   208  (Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
   209  
   210  // truncations
   211  // Because we ignore high parts of registers, truncates are just copies.
   212  (Trunc16to8  ...) => (Copy ...)
   213  (Trunc32to8  ...) => (Copy ...)
   214  (Trunc32to16 ...) => (Copy ...)
   215  (Trunc64to8  ...) => (Copy ...)
   216  (Trunc64to16 ...) => (Copy ...)
   217  (Trunc64to32 ...) => (Copy ...)
   218  
   219  // Zero-/Sign-extensions
   220  (ZeroExt8to16  ...) => (MOVBUreg ...)
   221  (ZeroExt8to32  ...) => (MOVBUreg ...)
   222  (ZeroExt16to32 ...) => (MOVHUreg ...)
   223  (ZeroExt8to64  ...) => (MOVBUreg ...)
   224  (ZeroExt16to64 ...) => (MOVHUreg ...)
   225  (ZeroExt32to64 ...) => (MOVWUreg ...)
   226  
   227  (SignExt8to16  ...) => (MOVBreg ...)
   228  (SignExt8to32  ...) => (MOVBreg ...)
   229  (SignExt16to32 ...) => (MOVHreg ...)
   230  (SignExt8to64  ...) => (MOVBreg ...)
   231  (SignExt16to64 ...) => (MOVHreg ...)
   232  (SignExt32to64 ...) => (MOVWreg ...)
   233  
   234  // float <=> int conversion
   235  (Cvt32to32F  ...) => (SCVTFWS ...)
   236  (Cvt32to64F  ...) => (SCVTFWD ...)
   237  (Cvt64to32F  ...) => (SCVTFS ...)
   238  (Cvt64to64F  ...) => (SCVTFD ...)
   239  (Cvt32Uto32F ...) => (UCVTFWS ...)
   240  (Cvt32Uto64F ...) => (UCVTFWD ...)
   241  (Cvt64Uto32F ...) => (UCVTFS ...)
   242  (Cvt64Uto64F ...) => (UCVTFD ...)
   243  (Cvt32Fto32  ...) => (FCVTZSSW ...)
   244  (Cvt64Fto32  ...) => (FCVTZSDW ...)
   245  (Cvt32Fto64  ...) => (FCVTZSS ...)
   246  (Cvt64Fto64  ...) => (FCVTZSD ...)
   247  (Cvt32Fto32U ...) => (FCVTZUSW ...)
   248  (Cvt64Fto32U ...) => (FCVTZUDW ...)
   249  (Cvt32Fto64U ...) => (FCVTZUS ...)
   250  (Cvt64Fto64U ...) => (FCVTZUD ...)
   251  (Cvt32Fto64F ...) => (FCVTSD ...)
   252  (Cvt64Fto32F ...) => (FCVTDS ...)
   253  
   254  (CvtBoolToUint8 ...) => (Copy ...)
   255  
   256  (Round32F ...) => (LoweredRound32F ...)
   257  (Round64F ...) => (LoweredRound64F ...)
   258  
   259  // comparisons
   260  (Eq8  x y)  => (Equal (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   261  (Eq16  x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   262  (Eq32  x y) => (Equal (CMPW  x y))
   263  (Eq64  x y) => (Equal (CMP   x y))
   264  (EqPtr x y) => (Equal (CMP   x y))
   265  (Eq32F x y) => (Equal (FCMPS x y))
   266  (Eq64F x y) => (Equal (FCMPD x y))
   267  
   268  (Neq8   x y) => (NotEqual (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   269  (Neq16  x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   270  (Neq32  x y) => (NotEqual (CMPW  x y))
   271  (Neq64  x y) => (NotEqual (CMP   x y))
   272  (NeqPtr x y) => (NotEqual (CMP   x y))
   273  (Neq(32|64)F x y) => (NotEqual (FCMP(S|D) x y))
   274  
   275  (Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
   276  (Less32 x y) => (LessThan (CMPW x y))
   277  (Less64 x y) => (LessThan (CMP  x y))
   278  
   279  // Set condition flags for floating-point comparisons "x < y"
   280  // and "x <= y". Because if either or both of the operands are
   281  // NaNs, all three of (x < y), (x == y) and (x > y) are false,
   282  // and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
   283  // of this case to (0, 0, 1, 1).
   284  (Less32F x y) => (LessThanF (FCMPS x y))
   285  (Less64F x y) => (LessThanF (FCMPD x y))
   286  
   287  // For an unsigned integer x, the following rules are useful when combining branch
   288  // 0 <  x  =>  x != 0
   289  // x <= 0  =>  x == 0
   290  // x <  1  =>  x == 0
   291  // 1 <= x  =>  x != 0
   292  (Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
   293  (Leq(8U|16U|32U|64U)  x zero:(MOVDconst [0])) => (Eq(8|16|32|64)  x zero)
   294  (Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64)  x (MOVDconst [0]))
   295  (Leq(8U|16U|32U|64U)  (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
   296  
   297  (Less8U  x y) => (LessThanU (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   298  (Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   299  (Less32U x y) => (LessThanU (CMPW x y))
   300  (Less64U x y) => (LessThanU (CMP x y))
   301  
   302  (Leq8  x y) => (LessEqual (CMPW (SignExt8to32  x) (SignExt8to32  y)))
   303  (Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
   304  (Leq32 x y) => (LessEqual (CMPW x y))
   305  (Leq64 x y) => (LessEqual (CMP x y))
   306  
   307  // Refer to the comments for op Less64F above.
   308  (Leq32F x y) => (LessEqualF (FCMPS x y))
   309  (Leq64F x y) => (LessEqualF (FCMPD x y))
   310  
   311  (Leq8U  x y) => (LessEqualU (CMPW (ZeroExt8to32  x) (ZeroExt8to32  y)))
   312  (Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
   313  (Leq32U x y) => (LessEqualU (CMPW x y))
   314  (Leq64U x y) => (LessEqualU (CMP x y))
   315  
   316  // Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
   317  (FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
   318  (FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
   319  (FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
   320  (FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
   321  
   322  // CSEL needs a flag-generating argument. Synthesize a TSTW if necessary.
   323  (CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
   324  (CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
   325  
   326  (OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
   327  (OffPtr [off] ptr) => (ADDconst [off] ptr)
   328  
   329  (Addr {sym} base) => (MOVDaddr {sym} base)
   330  (LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
   331  (LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
   332  
   333  // loads
   334  (Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
   335  (Load <t> ptr mem) && (is8BitInt(t)  &&  t.IsSigned()) => (MOVBload ptr mem)
   336  (Load <t> ptr mem) && (is8BitInt(t)  && !t.IsSigned()) => (MOVBUload ptr mem)
   337  (Load <t> ptr mem) && (is16BitInt(t) &&  t.IsSigned()) => (MOVHload ptr mem)
   338  (Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
   339  (Load <t> ptr mem) && (is32BitInt(t) &&  t.IsSigned()) => (MOVWload ptr mem)
   340  (Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
   341  (Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
   342  (Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
   343  (Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
   344  
   345  // stores
   346  (Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   347  (Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   348  (Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   349  (Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
   350  (Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVSstore ptr val mem)
   351  (Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
   352  
   353  // zeroing
   354  (Zero [0] _   mem) => mem
   355  (Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
   356  (Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
   357  (Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
   358  (Zero [3] ptr mem) =>
   359  	(MOVBstore [2] ptr (MOVDconst [0])
   360  		(MOVHstore ptr (MOVDconst [0]) mem))
   361  (Zero [5] ptr mem) =>
   362  	(MOVBstore [4] ptr (MOVDconst [0])
   363  		(MOVWstore ptr (MOVDconst [0]) mem))
   364  (Zero [6] ptr mem) =>
   365  	(MOVHstore [4] ptr (MOVDconst [0])
   366  		(MOVWstore ptr (MOVDconst [0]) mem))
   367  (Zero [7] ptr mem) =>
   368  	(MOVWstore [3] ptr (MOVDconst [0])
   369  		(MOVWstore ptr (MOVDconst [0]) mem))
   370  (Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
   371  (Zero [9] ptr mem) =>
   372  	(MOVBstore [8] ptr (MOVDconst [0])
   373  		(MOVDstore ptr (MOVDconst [0]) mem))
   374  (Zero [10] ptr mem) =>
   375  	(MOVHstore [8] ptr (MOVDconst [0])
   376  		(MOVDstore ptr (MOVDconst [0]) mem))
   377  (Zero [11] ptr mem) =>
   378  	(MOVDstore [3] ptr (MOVDconst [0])
   379  		(MOVDstore ptr (MOVDconst [0]) mem))
   380  (Zero [12] ptr mem) =>
   381  	(MOVWstore [8] ptr (MOVDconst [0])
   382  		(MOVDstore ptr (MOVDconst [0]) mem))
   383  (Zero [13] ptr mem) =>
   384  	(MOVDstore [5] ptr (MOVDconst [0])
   385  		(MOVDstore ptr (MOVDconst [0]) mem))
   386  (Zero [14] ptr mem) =>
   387  	(MOVDstore [6] ptr (MOVDconst [0])
   388  		(MOVDstore ptr (MOVDconst [0]) mem))
   389  (Zero [15] ptr mem) =>
   390  	(MOVDstore [7] ptr (MOVDconst [0])
   391  		(MOVDstore ptr (MOVDconst [0]) mem))
   392  (Zero [16] ptr mem) =>
   393  	(STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
   394  
   395  (Zero [32] ptr mem) =>
   396  	(STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
   397  		(STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
   398  
   399  (Zero [48] ptr mem) =>
   400  	(STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
   401  		(STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
   402  			(STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
   403  
   404  (Zero [64] ptr mem) =>
   405  	(STP [48] ptr (MOVDconst [0]) (MOVDconst [0])
   406  		(STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
   407  			(STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
   408  				(STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
   409  
   410  // strip off fractional word zeroing
   411  (Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
   412  	(Zero [8]
   413  		(OffPtr <ptr.Type> ptr [s-8])
   414  		(Zero [s-s%16] ptr mem))
   415  (Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
   416  	(Zero [16]
   417  		(OffPtr <ptr.Type> ptr [s-16])
   418  		(Zero [s-s%16] ptr mem))
   419  
   420  // medium zeroing uses a duff device
   421  // 4, 16, and 64 are magic constants, see runtime/mkduff.go
   422  (Zero [s] ptr mem)
   423  	&& s%16 == 0 && s > 64 && s <= 16*64 =>
   424  	(DUFFZERO [4 * (64 - s/16)] ptr mem)
   425  
   426  // large zeroing uses a loop
   427  (Zero [s] ptr mem)
   428  	&& s%16 == 0 && s > 16*64 =>
   429  	(LoweredZero
   430  		ptr
   431  		(ADDconst <ptr.Type> [s-16] ptr)
   432  		mem)
   433  
   434  // moves
   435  (Move [0] _   _   mem) => mem
   436  (Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
   437  (Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
   438  (Move [3] dst src mem) =>
   439  	(MOVBstore [2] dst (MOVBUload [2] src mem)
   440  		(MOVHstore dst (MOVHUload src mem) mem))
   441  (Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
   442  (Move [5] dst src mem) =>
   443  	(MOVBstore [4] dst (MOVBUload [4] src mem)
   444  		(MOVWstore dst (MOVWUload src mem) mem))
   445  (Move [6] dst src mem) =>
   446  	(MOVHstore [4] dst (MOVHUload [4] src mem)
   447  		(MOVWstore dst (MOVWUload src mem) mem))
   448  (Move [7] dst src mem) =>
   449  	(MOVWstore [3] dst (MOVWUload [3] src mem)
   450  		(MOVWstore dst (MOVWUload src mem) mem))
   451  (Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
   452  (Move [9] dst src mem) =>
   453  	(MOVBstore [8] dst (MOVBUload [8] src mem)
   454  		(MOVDstore dst (MOVDload src mem) mem))
   455  (Move [10] dst src mem) =>
   456  	(MOVHstore [8] dst (MOVHUload [8] src mem)
   457  		(MOVDstore dst (MOVDload src mem) mem))
   458  (Move [11] dst src mem) =>
   459  	(MOVDstore [3] dst (MOVDload [3] src mem)
   460  		(MOVDstore dst (MOVDload src mem) mem))
   461  (Move [12] dst src mem) =>
   462  	(MOVWstore [8] dst (MOVWUload [8] src mem)
   463  		(MOVDstore dst (MOVDload src mem) mem))
   464  (Move [13] dst src mem) =>
   465  	(MOVDstore [5] dst (MOVDload [5] src mem)
   466  		(MOVDstore dst (MOVDload src mem) mem))
   467  (Move [14] dst src mem) =>
   468  	(MOVDstore [6] dst (MOVDload [6] src mem)
   469  		(MOVDstore dst (MOVDload src mem) mem))
   470  (Move [15] dst src mem) =>
   471  	(MOVDstore [7] dst (MOVDload [7] src mem)
   472  		(MOVDstore dst (MOVDload src mem) mem))
   473  (Move [16] dst src mem) =>
   474  	(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)
   475  (Move [32] dst src mem) =>
   476  	(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
   477  		(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
   478  (Move [48] dst src mem) =>
   479  	(STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
   480  		(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
   481  			(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
   482  (Move [64] dst src mem) =>
   483  	(STP [48] dst (Select0 <typ.UInt64> (LDP [48] src mem)) (Select1 <typ.UInt64> (LDP [48] src mem))
   484  		(STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
   485  			(STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
   486  				(STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
   487  
   488  // strip off fractional word move
   489  (Move [s] dst src mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
   490  	(Move [8]
   491  		(OffPtr <dst.Type> dst [s-8])
   492  		(OffPtr <src.Type> src [s-8])
   493  		(Move [s-s%16] dst src mem))
   494  (Move [s] dst src mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
   495  	(Move [16]
   496  		(OffPtr <dst.Type> dst [s-16])
   497  		(OffPtr <src.Type> src [s-16])
   498  		(Move [s-s%16] dst src mem))
   499  
   500  // medium move uses a duff device
   501  (Move [s] dst src mem)
   502  	&& s > 64 && s <= 16*64 && s%16 == 0
   503  	&& logLargeCopy(v, s) =>
   504  	(DUFFCOPY [8 * (64 - s/16)] dst src mem)
   505  // 8 is the number of bytes to encode:
   506  //
   507  // LDP.P   16(R16), (R26, R27)
   508  // STP.P   (R26, R27), 16(R17)
   509  //
   510  // 64 is number of these blocks. See runtime/duff_arm64.s:duffcopy
   511  
   512  // large move uses a loop
   513  (Move [s] dst src mem)
   514  	&& s%16 == 0 && s > 16*64
   515  	&& logLargeCopy(v, s) =>
   516  	(LoweredMove
   517  		dst
   518  		src
   519  		(ADDconst <src.Type> src [s-16])
   520  		mem)
   521  
   522  // calls
   523  (StaticCall  ...) => (CALLstatic  ...)
   524  (ClosureCall ...) => (CALLclosure ...)
   525  (InterCall   ...) => (CALLinter   ...)
   526  (TailCall    ...) => (CALLtail    ...)
   527  
   528  // checks
   529  (NilCheck ...) => (LoweredNilCheck ...)
   530  (IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
   531  (IsInBounds      idx len) => (LessThanU  (CMP idx len))
   532  (IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
   533  
   534  // pseudo-ops
   535  (GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   536  (GetCallerSP   ...) => (LoweredGetCallerSP   ...)
   537  (GetCallerPC   ...) => (LoweredGetCallerPC   ...)
   538  
   539  // Absorb pseudo-ops into blocks.
   540  (If (Equal         cc) yes no) => (EQ cc yes no)
   541  (If (NotEqual      cc) yes no) => (NE cc yes no)
   542  (If (LessThan      cc) yes no) => (LT cc yes no)
   543  (If (LessThanU     cc) yes no) => (ULT cc yes no)
   544  (If (LessEqual     cc) yes no) => (LE cc yes no)
   545  (If (LessEqualU    cc) yes no) => (ULE cc yes no)
   546  (If (GreaterThan   cc) yes no) => (GT cc yes no)
   547  (If (GreaterThanU  cc) yes no) => (UGT cc yes no)
   548  (If (GreaterEqual  cc) yes no) => (GE cc yes no)
   549  (If (GreaterEqualU cc) yes no) => (UGE cc yes no)
   550  (If (LessThanF     cc) yes no) => (FLT cc yes no)
   551  (If (LessEqualF    cc) yes no) => (FLE cc yes no)
   552  (If (GreaterThanF  cc) yes no) => (FGT cc yes no)
   553  (If (GreaterEqualF cc) yes no) => (FGE cc yes no)
   554  
   555  (If cond yes no) => (TBNZ [0] cond yes no)
   556  
   557  (JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
   558  
   559  // atomic intrinsics
   560  // Note: these ops do not accept offset.
   561  (AtomicLoad8   ...) => (LDARB ...)
   562  (AtomicLoad32  ...) => (LDARW ...)
   563  (AtomicLoad64  ...) => (LDAR  ...)
   564  (AtomicLoadPtr ...) => (LDAR  ...)
   565  
   566  (AtomicStore8       ...) => (STLRB ...)
   567  (AtomicStore32      ...) => (STLRW ...)
   568  (AtomicStore64      ...) => (STLR  ...)
   569  (AtomicStorePtrNoWB ...) => (STLR  ...)
   570  
   571  (AtomicExchange(8|32|64)       ...) => (LoweredAtomicExchange(8|32|64) ...)
   572  (AtomicAdd(32|64)            ...) => (LoweredAtomicAdd(32|64)      ...)
   573  (AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64)      ...)
   574  
   575  (AtomicAdd(32|64)Variant            ...) => (LoweredAtomicAdd(32|64)Variant      ...)
   576  (AtomicExchange(8|32|64)Variant       ...) => (LoweredAtomicExchange(8|32|64)Variant ...)
   577  (AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant      ...)
   578  
   579  // Return old contents.
   580  (AtomicAnd(64|32|8)value            ...) => (LoweredAtomicAnd(64|32|8)            ...)
   581  (AtomicOr(64|32|8)value             ...) => (LoweredAtomicOr(64|32|8)             ...)
   582  (AtomicAnd(64|32|8)valueVariant     ...) => (LoweredAtomicAnd(64|32|8)Variant     ...)
   583  (AtomicOr(64|32|8)valueVariant      ...) => (LoweredAtomicOr(64|32|8)Variant      ...)
   584  
   585  // Write barrier.
   586  (WB ...) => (LoweredWB ...)
   587  
   588  // Publication barrier (0xe is ST option)
   589  (PubBarrier mem) => (DMB [0xe] mem)
   590  
   591  (PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
   592  (PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
   593  (PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
   594  
   595  // Optimizations
   596  
   597  // Absorb boolean tests into block
   598  (NZ (Equal         cc) yes no) => (EQ  cc yes no)
   599  (NZ (NotEqual      cc) yes no) => (NE  cc yes no)
   600  (NZ (LessThan      cc) yes no) => (LT  cc yes no)
   601  (NZ (LessThanU     cc) yes no) => (ULT cc yes no)
   602  (NZ (LessEqual     cc) yes no) => (LE  cc yes no)
   603  (NZ (LessEqualU    cc) yes no) => (ULE cc yes no)
   604  (NZ (GreaterThan   cc) yes no) => (GT  cc yes no)
   605  (NZ (GreaterThanU  cc) yes no) => (UGT cc yes no)
   606  (NZ (GreaterEqual  cc) yes no) => (GE  cc yes no)
   607  (NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
   608  (NZ (LessThanF     cc) yes no) => (FLT cc yes no)
   609  (NZ (LessEqualF    cc) yes no) => (FLE cc yes no)
   610  (NZ (GreaterThanF  cc) yes no) => (FGT cc yes no)
   611  (NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
   612  
   613  (TBNZ [0] (Equal         cc) yes no) => (EQ  cc yes no)
   614  (TBNZ [0] (NotEqual      cc) yes no) => (NE  cc yes no)
   615  (TBNZ [0] (LessThan      cc) yes no) => (LT  cc yes no)
   616  (TBNZ [0] (LessThanU     cc) yes no) => (ULT cc yes no)
   617  (TBNZ [0] (LessEqual     cc) yes no) => (LE  cc yes no)
   618  (TBNZ [0] (LessEqualU    cc) yes no) => (ULE cc yes no)
   619  (TBNZ [0] (GreaterThan   cc) yes no) => (GT  cc yes no)
   620  (TBNZ [0] (GreaterThanU  cc) yes no) => (UGT cc yes no)
   621  (TBNZ [0] (GreaterEqual  cc) yes no) => (GE  cc yes no)
   622  (TBNZ [0] (GreaterEqualU cc) yes no) => (UGE cc yes no)
   623  (TBNZ [0] (LessThanF     cc) yes no) => (FLT cc yes no)
   624  (TBNZ [0] (LessEqualF    cc) yes no) => (FLE cc yes no)
   625  (TBNZ [0] (GreaterThanF  cc) yes no) => (FGT cc yes no)
   626  (TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no)
   627  
   628  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(AND        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TST                x y) yes no)
   629  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTconst         [c] y) yes no)
   630  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(AND        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTW               x y) yes no)
   631  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTWconst [int32(c)] y) yes no)
   632  
   633  // For conditional instructions such as CSET, CSEL.
   634  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0]  z:(AND        x y))) && z.Uses == 1 =>
   635  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TST x y))
   636  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
   637  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTWconst [int32(c)] y))
   638  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] z:(AND        x y))) && z.Uses == 1 =>
   639  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTW x y))
   640  ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0]  x:(ANDconst [c] y))) && x.Uses == 1 =>
   641  	((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTconst [c] y))
   642  
   643  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNconst         [c] y) yes no)
   644  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNWconst [int32(c)] y) yes no)
   645  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(ADD        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN                x y) yes no)
   646  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(ADD        x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW               x y) yes no)
   647  
   648  // CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63
   649  ((EQ|NE) (CMP x z:(NEG y)) yes no)   && z.Uses == 1 => ((EQ|NE) (CMN x y) yes no)
   650  ((Equal|NotEqual) (CMP x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMN x y))
   651  
   652  // CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31
   653  ((EQ|NE) (CMPW x z:(NEG y)) yes no)   && z.Uses == 1 => ((EQ|NE) (CMNW x y) yes no)
   654  ((Equal|NotEqual) (CMPW x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMNW x y))
   655  
   656  // For conditional instructions such as CSET, CSEL.
   657  // TODO: add support for LE, GT, overflow needs to be considered.
   658  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNconst [c] y))
   659  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNWconst [int32(c)] y))
   660  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] z:(ADD        x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN  x y))
   661  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(ADD        x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW x y))
   662  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] z:(MADD     a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN  a (MUL  <x.Type> x y)))
   663  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst  [0] z:(MSUB     a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMP  a (MUL  <x.Type> x y)))
   664  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MADDW    a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW a (MULW <x.Type> x y)))
   665  ((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MSUBW    a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMPW a (MULW <x.Type> x y)))
   666  
   667  ((CMPconst|CMNconst)   [c] y) && c < 0 && c != -1<<63 => ((CMNconst|CMPconst)   [-c] y)
   668  ((CMPWconst|CMNWconst) [c] y) && c < 0 && c != -1<<31 => ((CMNWconst|CMPWconst) [-c] y)
   669  
   670  ((EQ|NE) (CMPconst  [0] x) yes no) => ((Z|NZ)   x yes no)
   671  ((EQ|NE) (CMPWconst [0] x) yes no) => ((ZW|NZW) x yes no)
   672  
   673  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(MADD a x y))  yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN  a (MUL  <x.Type> x y)) yes no)
   674  ((EQ|NE|LT|LE|GT|GE) (CMPconst  [0] z:(MSUB a x y))  yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMP  a (MUL  <x.Type> x y)) yes no)
   675  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW a (MULW <x.Type> x y)) yes no)
   676  ((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMPW a (MULW <x.Type> x y)) yes no)
   677  
   678  // Absorb bit-tests into block
   679  (Z   (ANDconst  [c] x) yes no) && oneBit(c) => (TBZ  [int64(ntz64(c))] x yes no)
   680  (NZ  (ANDconst  [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
   681  (ZW  (ANDconst  [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ  [int64(ntz64(int64(uint32(c))))] x yes no)
   682  (NZW (ANDconst  [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
   683  (EQ  (TSTconst  [c] x) yes no) && oneBit(c) => (TBZ  [int64(ntz64(c))] x yes no)
   684  (NE  (TSTconst  [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
   685  (EQ  (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ  [int64(ntz64(int64(uint32(c))))] x yes no)
   686  (NE  (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
   687  
   688  // Test sign-bit for signed comparisons against zero
   689  (GE (CMPWconst [0] x) yes no) => (TBZ  [31] x yes no)
   690  (GE (CMPconst [0] x)  yes no) => (TBZ  [63] x yes no)
   691  (LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
   692  (LT (CMPconst [0] x)  yes no) => (TBNZ [63] x yes no)
   693  
   694  // fold offset into address
   695  (ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
   696  	 (MOVDaddr [int32(off1)+off2] {sym} ptr)
   697  
   698  // fold address into load/store.
   699  // Do not fold global variable access in -dynlink mode, where it will
   700  // be rewritten to use the GOT via REGTMP, which currently cannot handle
   701  // large offset.
   702  (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   703  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   704  	(MOVBload [off1+int32(off2)] {sym} ptr mem)
   705  (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   706  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   707  	(MOVBUload [off1+int32(off2)] {sym} ptr mem)
   708  (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   709  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   710  	(MOVHload [off1+int32(off2)] {sym} ptr mem)
   711  (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   712  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   713  	(MOVHUload [off1+int32(off2)] {sym} ptr mem)
   714  (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   715  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   716  	(MOVWload [off1+int32(off2)] {sym} ptr mem)
   717  (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   718  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   719  	(MOVWUload [off1+int32(off2)] {sym} ptr mem)
   720  (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   721  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   722  	(MOVDload [off1+int32(off2)] {sym} ptr mem)
   723  (LDP [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   724  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   725  	(LDP [off1+int32(off2)] {sym} ptr mem)
   726  (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   727  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   728  	(FMOVSload [off1+int32(off2)] {sym} ptr mem)
   729  (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   730  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   731  	(FMOVDload [off1+int32(off2)] {sym} ptr mem)
   732  
   733  // register indexed load
   734  (MOVDload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
   735  (MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
   736  (MOVWload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
   737  (MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
   738  (MOVHload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
   739  (MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
   740  (MOVBload  [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
   741  (FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
   742  (FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
   743  
   744  (MOVDloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload  [int32(c)] ptr mem)
   745  (MOVDloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload  [int32(c)] ptr mem)
   746  (MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
   747  (MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
   748  (MOVWloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload  [int32(c)] ptr mem)
   749  (MOVWloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload  [int32(c)] ptr mem)
   750  (MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
   751  (MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
   752  (MOVHloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload  [int32(c)] ptr mem)
   753  (MOVHloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload  [int32(c)] ptr mem)
   754  (MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
   755  (MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
   756  (MOVBloadidx  ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload  [int32(c)] ptr mem)
   757  (MOVBloadidx  (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload  [int32(c)] ptr mem)
   758  (FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
   759  (FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
   760  (FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
   761  (FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
   762  
   763  // shifted register indexed load
   764  (MOVDload  [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
   765  (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
   766  (MOVWload  [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
   767  (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
   768  (MOVHload  [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
   769  (MOVDloadidx  ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
   770  (MOVWloadidx  ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
   771  (MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
   772  (MOVHloadidx  ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
   773  (MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
   774  (MOVHloadidx  ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
   775  (MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
   776  (MOVDloadidx  (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
   777  (MOVWloadidx  (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
   778  (MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
   779  (MOVHloadidx  (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
   780  (MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
   781  (MOVDloadidx8  ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload  [int32(c)<<3] ptr mem)
   782  (MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
   783  (MOVWloadidx4  ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload  [int32(c)<<2] ptr mem)
   784  (MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
   785  (MOVHloadidx2  ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload  [int32(c)<<1] ptr mem)
   786  
   787  (FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem)
   788  (FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem)
   789  (FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem)
   790  (FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem)
   791  (FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem)
   792  (FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem)
   793  (FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem)
   794  (FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem)
   795  
   796  (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   797  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   798  	(MOVBstore [off1+int32(off2)] {sym} ptr val mem)
   799  (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   800  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   801  	(MOVHstore [off1+int32(off2)] {sym} ptr val mem)
   802  (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   803  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   804  	(MOVWstore [off1+int32(off2)] {sym} ptr val mem)
   805  (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   806  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   807  	(MOVDstore [off1+int32(off2)] {sym} ptr val mem)
   808  (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
   809  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   810  	(STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
   811  (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   812  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   813  	(FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
   814  (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   815  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   816  	(FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
   817  
   818  // register indexed store
   819  (MOVDstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
   820  (MOVWstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
   821  (MOVHstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
   822  (MOVBstore  [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
   823  (FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
   824  (FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
   825  (MOVDstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore  [int32(c)] ptr val mem)
   826  (MOVDstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore  [int32(c)] idx val mem)
   827  (MOVWstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore  [int32(c)] ptr val mem)
   828  (MOVWstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore  [int32(c)] idx val mem)
   829  (MOVHstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore  [int32(c)] ptr val mem)
   830  (MOVHstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore  [int32(c)] idx val mem)
   831  (MOVBstoreidx  ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore  [int32(c)] ptr val mem)
   832  (MOVBstoreidx  (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore  [int32(c)] idx val mem)
   833  (FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
   834  (FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
   835  (FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
   836  (FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
   837  
   838  // shifted register indexed store
   839  (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
   840  (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
   841  (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
   842  (MOVDstoreidx  ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
   843  (MOVWstoreidx  ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
   844  (MOVHstoreidx  ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
   845  (MOVHstoreidx  ptr (ADD      idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
   846  (MOVDstoreidx  (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
   847  (MOVWstoreidx  (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
   848  (MOVHstoreidx  (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
   849  (MOVHstoreidx  (ADD      idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
   850  (MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
   851  (MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
   852  (MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
   853  
   854  (FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem)
   855  (FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem)
   856  (FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem)
   857  (FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem)
   858  (FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem)
   859  (FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem)
   860  (FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem)
   861  (FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem)
   862  
   863  (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   864  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   865  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   866  	(MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   867  (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   868  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   869  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   870  	(MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   871  (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   872  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   873  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   874  	(MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   875  (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   876  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   877  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   878  	(MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   879  (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   880  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   881  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   882  	(MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   883  (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   884  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   885  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   886  	(MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   887  (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   888  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   889  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   890  	(MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   891  (LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   892  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   893  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   894  	(LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   895  (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   896  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   897  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   898  	(FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   899  (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
   900  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   901  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   902  	(FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
   903  
   904  (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   905  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   906  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   907  	(MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   908  (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   909  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   910  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   911  	(MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   912  (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   913  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   914  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   915  	(MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   916  (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   917  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   918  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   919  	(MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   920  (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
   921  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   922  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   923  	(STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
   924  (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   925  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   926  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   927  	(FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   928  (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
   929  	&& canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
   930  	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   931  	(FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
   932  
   933  // replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
   934  // these seem to have bad interaction with other rules, resulting in slower code
   935  //(MOVBload  [off] {sym} ptr (MOVBstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
   936  //(MOVBUload [off] {sym} ptr (MOVBstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
   937  //(MOVHload  [off] {sym} ptr (MOVHstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
   938  //(MOVHUload [off] {sym} ptr (MOVHstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
   939  //(MOVWload  [off] {sym} ptr (MOVWstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
   940  //(MOVWUload [off] {sym} ptr (MOVWstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
   941  //(MOVDload  [off] {sym} ptr (MOVDstore  [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   942  //(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   943  //(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
   944  //(LDP       [off] {sym} ptr (STP      [off2] {sym2} ptr2 x y _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x y
   945  
   946  // don't extend before store
   947  (MOVBstore [off] {sym} ptr (MOVBreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   948  (MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   949  (MOVBstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   950  (MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   951  (MOVBstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   952  (MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   953  (MOVHstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   954  (MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   955  (MOVHstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   956  (MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   957  (MOVWstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVWstore [off] {sym} ptr x mem)
   958  (MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   959  (MOVBstoreidx  ptr idx (MOVBreg  x) mem) => (MOVBstoreidx  ptr idx x mem)
   960  (MOVBstoreidx  ptr idx (MOVBUreg x) mem) => (MOVBstoreidx  ptr idx x mem)
   961  (MOVBstoreidx  ptr idx (MOVHreg  x) mem) => (MOVBstoreidx  ptr idx x mem)
   962  (MOVBstoreidx  ptr idx (MOVHUreg x) mem) => (MOVBstoreidx  ptr idx x mem)
   963  (MOVBstoreidx  ptr idx (MOVWreg  x) mem) => (MOVBstoreidx  ptr idx x mem)
   964  (MOVBstoreidx  ptr idx (MOVWUreg x) mem) => (MOVBstoreidx  ptr idx x mem)
   965  (MOVHstoreidx  ptr idx (MOVHreg  x) mem) => (MOVHstoreidx  ptr idx x mem)
   966  (MOVHstoreidx  ptr idx (MOVHUreg x) mem) => (MOVHstoreidx  ptr idx x mem)
   967  (MOVHstoreidx  ptr idx (MOVWreg  x) mem) => (MOVHstoreidx  ptr idx x mem)
   968  (MOVHstoreidx  ptr idx (MOVWUreg x) mem) => (MOVHstoreidx  ptr idx x mem)
   969  (MOVWstoreidx  ptr idx (MOVWreg  x) mem) => (MOVWstoreidx  ptr idx x mem)
   970  (MOVWstoreidx  ptr idx (MOVWUreg x) mem) => (MOVWstoreidx  ptr idx x mem)
   971  (MOVHstoreidx2 ptr idx (MOVHreg  x) mem) => (MOVHstoreidx2 ptr idx x mem)
   972  (MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
   973  (MOVHstoreidx2 ptr idx (MOVWreg  x) mem) => (MOVHstoreidx2 ptr idx x mem)
   974  (MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
   975  (MOVWstoreidx4 ptr idx (MOVWreg  x) mem) => (MOVWstoreidx4 ptr idx x mem)
   976  (MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
   977  
   978  // if a register move has only 1 use, just use the same register without emitting instruction
   979  // MOVDnop doesn't emit instruction, only for ensuring the type.
   980  (MOVDreg x) && x.Uses == 1 => (MOVDnop x)
   981  
   982  // TODO: we should be able to get rid of MOVDnop all together.
   983  // But for now, this is enough to get rid of lots of them.
   984  (MOVDnop (MOVDconst [c])) => (MOVDconst [c])
   985  
   986  // fold constant into arithmetic ops
   987  (ADD  x (MOVDconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
   988  (SUB  x (MOVDconst [c])) => (SUBconst [c] x)
   989  (AND  x (MOVDconst [c])) => (ANDconst [c] x)
   990  (OR   x (MOVDconst [c])) => (ORconst  [c] x)
   991  (XOR  x (MOVDconst [c])) => (XORconst [c] x)
   992  (TST  x (MOVDconst [c])) => (TSTconst [c] x)
   993  (TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
   994  (CMN  x (MOVDconst [c])) => (CMNconst [c] x)
   995  (CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
   996  (BIC  x (MOVDconst [c])) => (ANDconst [^c] x)
   997  (EON  x (MOVDconst [c])) => (XORconst [^c] x)
   998  (ORN  x (MOVDconst [c])) => (ORconst  [^c] x)
   999  
  1000  (SLL x (MOVDconst [c])) => (SLLconst x [c&63])
  1001  (SRL x (MOVDconst [c])) => (SRLconst x [c&63])
  1002  (SRA x (MOVDconst [c])) => (SRAconst x [c&63])
  1003  (SLL x (ANDconst [63] y)) => (SLL x y)
  1004  (SRL x (ANDconst [63] y)) => (SRL x y)
  1005  (SRA x (ANDconst [63] y)) => (SRA x y)
  1006  
  1007  (CMP  x (MOVDconst [c])) => (CMPconst [c] x)
  1008  (CMP  (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
  1009  (CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
  1010  (CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
  1011  
  1012  (ROR  x (MOVDconst [c])) => (RORconst x [c&63])
  1013  (RORW x (MOVDconst [c])) => (RORWconst x [c&31])
  1014  
  1015  (ADDSflags x (MOVDconst [c]))  => (ADDSconstflags [c] x)
  1016  
  1017  (ADDconst [c] y) && c < 0 => (SUBconst [-c] y)
  1018  
  1019  // Canonicalize the order of arguments to comparisons - helps with CSE.
  1020  ((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
  1021  
  1022  // mul-neg => mneg
  1023  (NEG  (MUL  x y)) => (MNEG  x y)
  1024  (NEG  (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
  1025  (MUL  (NEG  x) y) => (MNEG  x y)
  1026  (MULW (NEG  x) y) => (MNEGW x y)
  1027  
  1028  // madd/msub
  1029  (ADD a l:(MUL  x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
  1030  (SUB a l:(MUL  x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
  1031  (ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
  1032  (SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
  1033  
  1034  (ADD a l:(MULW  x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
  1035  (SUB a l:(MULW  x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
  1036  (ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
  1037  (SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
  1038  
  1039  // madd/msub can't take constant arguments, so do a bit of reordering if a non-constant is available.
  1040  // Note: don't reorder arithmetic concerning pointers, as we must ensure that
  1041  // no intermediate computations are invalid pointers.
  1042  (ADD <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (ADD <v.Type> a m))
  1043  (ADD <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (ADD <v.Type> a m))
  1044  (SUB <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (SUB <v.Type> a m))
  1045  (SUB <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (SUB <v.Type> a m))
  1046  
  1047  // optimize ADCSflags, SBCSflags and friends
  1048  (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
  1049  (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
  1050  (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
  1051  (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
  1052  
  1053  // mul by constant
  1054  (MUL _ (MOVDconst [0])) => (MOVDconst [0])
  1055  (MUL x (MOVDconst [1])) => x
  1056  
  1057  (MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
  1058  (MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x)
  1059  
  1060  (MUL  x (MOVDconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
  1061  (MULW x (MOVDconst [c])) && v.Type.Size() <= 4 && canMulStrengthReduce32(config, int32(c)) => {mulStrengthReduce32(v, x, int32(c))}
  1062  
  1063  // mneg by constant
  1064  (MNEG x (MOVDconst [-1])) => x
  1065  (MNEG _ (MOVDconst [0])) => (MOVDconst [0])
  1066  (MNEG x (MOVDconst [1])) => (NEG x)
  1067  (MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
  1068  (MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1069  (MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
  1070  (MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
  1071  (MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
  1072  (MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
  1073  (MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
  1074  
  1075  
  1076  (MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x)
  1077  (MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
  1078  (MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG <x.Type> x))
  1079  (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
  1080  (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1081  (MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
  1082  (MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
  1083  (MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
  1084  (MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
  1085  (MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
  1086  
  1087  
  1088  (MADD a x (MOVDconst [-1])) => (SUB a x)
  1089  (MADD a _ (MOVDconst [0])) => a
  1090  (MADD a x (MOVDconst [1])) => (ADD a x)
  1091  (MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
  1092  (MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1093  (MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1094  (MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1095  (MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1096  (MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1097  (MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1098  
  1099  (MADD a (MOVDconst [-1]) x) => (SUB a x)
  1100  (MADD a (MOVDconst [0]) _) => a
  1101  (MADD a (MOVDconst [1]) x) => (ADD a x)
  1102  (MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
  1103  (MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1104  (MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1105  (MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1106  (MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1107  (MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1108  (MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1109  
  1110  (MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
  1111  (MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
  1112  (MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
  1113  (MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
  1114  (MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1115  (MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1116  (MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1117  (MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1118  (MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1119  (MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1120  
  1121  (MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
  1122  (MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
  1123  (MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
  1124  (MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
  1125  (MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1126  (MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1127  (MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1128  (MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1129  (MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1130  (MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1131  
  1132  (MSUB a x (MOVDconst [-1])) => (ADD a x)
  1133  (MSUB a _ (MOVDconst [0])) => a
  1134  (MSUB a x (MOVDconst [1])) => (SUB a x)
  1135  (MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
  1136  (MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1137  (MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1138  (MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1139  (MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1140  (MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1141  (MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1142  
  1143  (MSUB a (MOVDconst [-1]) x) => (ADD a x)
  1144  (MSUB a (MOVDconst [0]) _) => a
  1145  (MSUB a (MOVDconst [1]) x) => (SUB a x)
  1146  (MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
  1147  (MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
  1148  (MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
  1149  (MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
  1150  (MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
  1151  (MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
  1152  (MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
  1153  
  1154  (MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
  1155  (MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
  1156  (MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
  1157  (MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
  1158  (MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1159  (MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1160  (MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1161  (MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1162  (MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1163  (MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1164  
  1165  (MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
  1166  (MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
  1167  (MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
  1168  (MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
  1169  (MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
  1170  (MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
  1171  (MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
  1172  (MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
  1173  (MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
  1174  (MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
  1175  
  1176  // div by constant
  1177  (UDIV  x (MOVDconst [1])) => x
  1178  (UDIV  x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log64(c)] x)
  1179  (UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x)
  1180  (UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
  1181  (UMOD  _ (MOVDconst [1])) => (MOVDconst [0])
  1182  (UMOD  x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x)
  1183  (UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
  1184  (UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x)
  1185  
  1186  // generic simplifications
  1187  (ADD x (NEG y)) => (SUB x y)
  1188  (SUB x x) => (MOVDconst [0])
  1189  (AND x x) => x
  1190  (OR  x x) => x
  1191  (XOR x x) => (MOVDconst [0])
  1192  (BIC x x) => (MOVDconst [0])
  1193  (EON x x) => (MOVDconst [-1])
  1194  (ORN x x) => (MOVDconst [-1])
  1195  (AND x (MVN y)) => (BIC x y)
  1196  (XOR x (MVN y)) => (EON x y)
  1197  (OR  x (MVN y)) => (ORN x y)
  1198  (MVN (XOR x y)) => (EON x y)
  1199  (NEG (NEG x)) => x
  1200  
  1201  (CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag)
  1202  (CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag)
  1203  (CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
  1204  (CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
  1205  (CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag)
  1206  (CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag)
  1207  (CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag)
  1208  (CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag)
  1209  (CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag)
  1210  (CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag)
  1211  
  1212  (SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
  1213  (SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
  1214  
  1215  // remove redundant *const ops
  1216  (ADDconst [0]  x) => x
  1217  (SUBconst [0]  x) => x
  1218  (ANDconst [0]  _) => (MOVDconst [0])
  1219  (ANDconst [-1] x) => x
  1220  (ORconst  [0]  x) => x
  1221  (ORconst  [-1] _) => (MOVDconst [-1])
  1222  (XORconst [0]  x) => x
  1223  (XORconst [-1] x) => (MVN x)
  1224  
  1225  // generic constant folding
  1226  (ADDconst [c] (MOVDconst [d]))  => (MOVDconst [c+d])
  1227  (ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
  1228  (ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
  1229  (SUBconst [c] (MOVDconst [d]))  => (MOVDconst [d-c])
  1230  (SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
  1231  (SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
  1232  (SLLconst [c] (MOVDconst [d]))  => (MOVDconst [d<<uint64(c)])
  1233  (SRLconst [c] (MOVDconst [d]))  => (MOVDconst [int64(uint64(d)>>uint64(c))])
  1234  (SRAconst [c] (MOVDconst [d]))  => (MOVDconst [d>>uint64(c)])
  1235  (MUL   (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
  1236  (MNEG  (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
  1237  (MULW  (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))])
  1238  (MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))])
  1239  (MADD  (MOVDconst [c]) x y) => (ADDconst [c] (MUL  <x.Type> x y))
  1240  (MSUB  (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
  1241  (MADD  a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
  1242  (MSUB  a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
  1243  (MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MULW  <x.Type> x y)))
  1244  (MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
  1245  (MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst <a.Type> [c*d] a))
  1246  (MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst <a.Type> [c*d] a))
  1247  (DIV   (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
  1248  (UDIV  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
  1249  (DIVW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))])
  1250  (UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
  1251  (MOD   (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
  1252  (UMOD  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
  1253  (MODW  (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))])
  1254  (UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
  1255  (ANDconst [c] (MOVDconst [d]))  => (MOVDconst [c&d])
  1256  (ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
  1257  (ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
  1258  (ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
  1259  (ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
  1260  (MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
  1261  (MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
  1262  (MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
  1263  (ORconst  [c] (MOVDconst [d]))  => (MOVDconst [c|d])
  1264  (ORconst  [c] (ORconst [d] x))  => (ORconst [c|d] x)
  1265  (XORconst [c] (MOVDconst [d]))  => (MOVDconst [c^d])
  1266  (XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
  1267  (MVN (MOVDconst [c])) => (MOVDconst [^c])
  1268  (NEG (MOVDconst [c])) => (MOVDconst [-c])
  1269  (MOVBreg  (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
  1270  (MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
  1271  (MOVHreg  (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
  1272  (MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
  1273  (MOVWreg  (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
  1274  (MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
  1275  (MOVDreg  (MOVDconst [c])) => (MOVDconst [c])
  1276  
  1277  // constant comparisons
  1278  (CMPconst  (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
  1279  (CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
  1280  (TSTconst  (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
  1281  (TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
  1282  (CMNconst  (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
  1283  (CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
  1284  
  1285  // other known comparisons
  1286  (CMPconst  (MOVBUreg _) [c]) && 0xff < c       => (FlagConstant [subFlags64(0,1)])
  1287  (CMPconst  (MOVHUreg _) [c]) && 0xffff < c     => (FlagConstant [subFlags64(0,1)])
  1288  (CMPconst  (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
  1289  (CMPconst  (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
  1290  (CMPconst  (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
  1291  (CMPWconst (MOVBUreg _) [c]) && 0xff   < c => (FlagConstant [subFlags64(0,1)])
  1292  (CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
  1293  
  1294  // absorb flag constants into branches
  1295  (EQ (FlagConstant [fc]) yes no) &&  fc.eq() => (First yes no)
  1296  (EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
  1297  
  1298  (NE (FlagConstant [fc]) yes no) &&  fc.ne() => (First yes no)
  1299  (NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
  1300  
  1301  (LT (FlagConstant [fc]) yes no) &&  fc.lt() => (First yes no)
  1302  (LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
  1303  
  1304  (LE (FlagConstant [fc]) yes no) &&  fc.le() => (First yes no)
  1305  (LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
  1306  
  1307  (GT (FlagConstant [fc]) yes no) &&  fc.gt() => (First yes no)
  1308  (GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
  1309  
  1310  (GE (FlagConstant [fc]) yes no) &&  fc.ge() => (First yes no)
  1311  (GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
  1312  
  1313  (ULT (FlagConstant [fc]) yes no) &&  fc.ult() => (First yes no)
  1314  (ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
  1315  
  1316  (ULE (FlagConstant [fc]) yes no) &&  fc.ule() => (First yes no)
  1317  (ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
  1318  
  1319  (UGT (FlagConstant [fc]) yes no) &&  fc.ugt() => (First yes no)
  1320  (UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
  1321  
  1322  (UGE (FlagConstant [fc]) yes no) &&  fc.uge() => (First yes no)
  1323  (UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
  1324  
  1325  (LTnoov (FlagConstant [fc]) yes no) &&  fc.ltNoov() => (First yes no)
  1326  (LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
  1327  
  1328  (LEnoov (FlagConstant [fc]) yes no) &&  fc.leNoov() => (First yes no)
  1329  (LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
  1330  
  1331  (GTnoov (FlagConstant [fc]) yes no) &&  fc.gtNoov() => (First yes no)
  1332  (GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
  1333  
  1334  (GEnoov (FlagConstant [fc]) yes no) &&  fc.geNoov() => (First yes no)
  1335  (GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
  1336  
  1337  (Z   (MOVDconst [0]) yes no)                  => (First yes no)
  1338  (Z   (MOVDconst [c]) yes no) && c != 0        => (First no yes)
  1339  (NZ  (MOVDconst [0]) yes no)                  => (First no yes)
  1340  (NZ  (MOVDconst [c]) yes no) && c != 0        => (First yes no)
  1341  (ZW  (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
  1342  (ZW  (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
  1343  (NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
  1344  (NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
  1345  
  1346  // absorb InvertFlags into branches
  1347  (LT  (InvertFlags cmp) yes no) => (GT cmp yes no)
  1348  (GT  (InvertFlags cmp) yes no) => (LT cmp yes no)
  1349  (LE  (InvertFlags cmp) yes no) => (GE cmp yes no)
  1350  (GE  (InvertFlags cmp) yes no) => (LE cmp yes no)
  1351  (ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
  1352  (UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
  1353  (ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
  1354  (UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
  1355  (EQ  (InvertFlags cmp) yes no) => (EQ cmp yes no)
  1356  (NE  (InvertFlags cmp) yes no) => (NE cmp yes no)
  1357  (FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
  1358  (FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
  1359  (FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
  1360  (FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
  1361  (LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
  1362  (GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
  1363  (LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
  1364  (GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
  1365  
  1366  // absorb InvertFlags into conditional instructions
  1367  (CSEL  [cc] x y (InvertFlags cmp)) => (CSEL  [arm64Invert(cc)] x y cmp)
  1368  (CSEL0 [cc] x   (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x   cmp)
  1369  (CSETM [cc]     (InvertFlags cmp)) => (CSETM [arm64Invert(cc)]     cmp)
  1370  (CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp)
  1371  (CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp)
  1372  (CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp)
  1373  
  1374  // absorb flag constants into boolean values
  1375  (Equal             (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
  1376  (NotEqual          (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
  1377  (LessThan          (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
  1378  (LessThanU         (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
  1379  (LessEqual         (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
  1380  (LessEqualU        (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
  1381  (GreaterThan       (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
  1382  (GreaterThanU      (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
  1383  (GreaterEqual      (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
  1384  (GreaterEqualU     (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
  1385  (LessThanNoov      (FlagConstant [fc])) => (MOVDconst [b2i(fc.ltNoov())])
  1386  (GreaterEqualNoov  (FlagConstant [fc])) => (MOVDconst [b2i(fc.geNoov())])
  1387  
  1388  // absorb InvertFlags into boolean values
  1389  (Equal            (InvertFlags x)) => (Equal x)
  1390  (NotEqual         (InvertFlags x)) => (NotEqual x)
  1391  (LessThan         (InvertFlags x)) => (GreaterThan x)
  1392  (LessThanU        (InvertFlags x)) => (GreaterThanU x)
  1393  (GreaterThan      (InvertFlags x)) => (LessThan x)
  1394  (GreaterThanU     (InvertFlags x)) => (LessThanU x)
  1395  (LessEqual        (InvertFlags x)) => (GreaterEqual x)
  1396  (LessEqualU       (InvertFlags x)) => (GreaterEqualU x)
  1397  (GreaterEqual     (InvertFlags x)) => (LessEqual x)
  1398  (GreaterEqualU    (InvertFlags x)) => (LessEqualU x)
  1399  (LessThanF        (InvertFlags x)) => (GreaterThanF x)
  1400  (LessEqualF       (InvertFlags x)) => (GreaterEqualF x)
  1401  (GreaterThanF     (InvertFlags x)) => (LessThanF x)
  1402  (GreaterEqualF    (InvertFlags x)) => (LessEqualF x)
  1403  (LessThanNoov     (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
  1404  (GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
  1405  
  1406  // Don't bother extending if we're not using the higher bits.
  1407  (MOV(B|BU)reg x) && v.Type.Size() <= 1 => x
  1408  (MOV(H|HU)reg x) && v.Type.Size() <= 2 => x
  1409  (MOV(W|WU)reg x) && v.Type.Size() <= 4 => x
  1410  
  1411  // omit sign extension
  1412  (MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
  1413  (MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
  1414  (MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
  1415  
  1416  // absorb flag constants into conditional instructions
  1417  (CSEL  [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1418  (CSEL  [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
  1419  (CSEL0 [cc] x   flag) && ccARM64Eval(cc, flag) > 0 => x
  1420  (CSEL0 [cc] _   flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
  1421  (CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1422  (CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y)
  1423  (CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1424  (CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y)
  1425  (CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
  1426  (CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y)
  1427  (CSETM [cc]     flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1])
  1428  (CSETM [cc]     flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
  1429  
  1430  // absorb flags back into boolean CSEL
  1431  (CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
  1432        (CSEL [boolval.Op] x y flagArg(boolval))
  1433  (CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
  1434        (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
  1435  (CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
  1436        (CSEL0 [boolval.Op] x flagArg(boolval))
  1437  (CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
  1438        (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
  1439  
  1440  // absorb shifts into ops
  1441  (NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
  1442  (NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
  1443  (NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
  1444  (MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
  1445  (MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
  1446  (MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
  1447  (MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y)
  1448  (ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
  1449  (ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
  1450  (ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
  1451  (SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
  1452  (SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
  1453  (SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
  1454  (AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
  1455  (AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
  1456  (AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
  1457  (AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c])
  1458  (OR  x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL  x0 y [c]) // useful for combined load
  1459  (OR  x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL  x0 y [c])
  1460  (OR  x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA  x0 y [c])
  1461  (OR  x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO  x0 y [c])
  1462  (XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
  1463  (XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
  1464  (XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
  1465  (XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c])
  1466  (BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
  1467  (BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
  1468  (BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
  1469  (BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c])
  1470  (ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
  1471  (ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
  1472  (ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
  1473  (ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c])
  1474  (EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
  1475  (EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
  1476  (EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
  1477  (EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c])
  1478  (CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
  1479  (CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
  1480  (CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
  1481  (CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
  1482  (CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
  1483  (CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
  1484  (CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
  1485  (CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
  1486  (CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
  1487  (TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
  1488  (TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
  1489  (TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
  1490  (TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c])
  1491  
  1492  // prefer *const ops to *shift ops
  1493  (ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
  1494  (ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
  1495  (ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
  1496  (ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
  1497  (ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
  1498  (ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
  1499  (ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst <x.Type> x [d]))
  1500  (ORshiftLL  (MOVDconst [c]) x [d]) => (ORconst  [c] (SLLconst <x.Type> x [d]))
  1501  (ORshiftRL  (MOVDconst [c]) x [d]) => (ORconst  [c] (SRLconst <x.Type> x [d]))
  1502  (ORshiftRA  (MOVDconst [c]) x [d]) => (ORconst  [c] (SRAconst <x.Type> x [d]))
  1503  (ORshiftRO  (MOVDconst [c]) x [d]) => (ORconst  [c] (RORconst <x.Type> x [d]))
  1504  (XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
  1505  (XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
  1506  (XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
  1507  (XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst <x.Type> x [d]))
  1508  (CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
  1509  (CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
  1510  (CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
  1511  (CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
  1512  (CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
  1513  (CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
  1514  (TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
  1515  (TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
  1516  (TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
  1517  (TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst <x.Type> x [d]))
  1518  
  1519  // constant folding in *shift ops
  1520  (MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
  1521  (MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
  1522  (MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
  1523  (MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)])
  1524  (NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
  1525  (NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
  1526  (NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
  1527  (ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
  1528  (ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
  1529  (ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
  1530  (SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
  1531  (SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
  1532  (SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
  1533  (ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
  1534  (ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
  1535  (ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
  1536  (ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)])
  1537  (ORshiftLL  x (MOVDconst [c]) [d]) => (ORconst  x [int64(uint64(c)<<uint64(d))])
  1538  (ORshiftRL  x (MOVDconst [c]) [d]) => (ORconst  x [int64(uint64(c)>>uint64(d))])
  1539  (ORshiftRA  x (MOVDconst [c]) [d]) => (ORconst  x [c>>uint64(d)])
  1540  (ORshiftRO  x (MOVDconst [c]) [d]) => (ORconst  x [rotateRight64(c, d)])
  1541  (XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
  1542  (XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
  1543  (XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
  1544  (XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)])
  1545  (BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
  1546  (BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
  1547  (BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
  1548  (BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)])
  1549  (ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst  x [^int64(uint64(c)<<uint64(d))])
  1550  (ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst  x [^int64(uint64(c)>>uint64(d))])
  1551  (ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst  x [^(c>>uint64(d))])
  1552  (ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst  x [^rotateRight64(c, d)])
  1553  (EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
  1554  (EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
  1555  (EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
  1556  (EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)])
  1557  (CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
  1558  (CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
  1559  (CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
  1560  (CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
  1561  (CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
  1562  (CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
  1563  (TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
  1564  (TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
  1565  (TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
  1566  (TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)])
  1567  
  1568  // simplification with *shift ops
  1569  (SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
  1570  (SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
  1571  (SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
  1572  (ANDshiftLL y:(SLLconst x [c]) x [c]) => y
  1573  (ANDshiftRL y:(SRLconst x [c]) x [c]) => y
  1574  (ANDshiftRA y:(SRAconst x [c]) x [c]) => y
  1575  (ANDshiftRO y:(RORconst x [c]) x [c]) => y
  1576  (ORshiftLL  y:(SLLconst x [c]) x [c]) => y
  1577  (ORshiftRL  y:(SRLconst x [c]) x [c]) => y
  1578  (ORshiftRA  y:(SRAconst x [c]) x [c]) => y
  1579  (ORshiftRO  y:(RORconst x [c]) x [c]) => y
  1580  (XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
  1581  (XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
  1582  (XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
  1583  (XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
  1584  (BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
  1585  (BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
  1586  (BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
  1587  (BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
  1588  (EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
  1589  (EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
  1590  (EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
  1591  (EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
  1592  (ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
  1593  (ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
  1594  (ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
  1595  (ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
  1596  
  1597  // rev16w | rev16
  1598  // ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
  1599  ((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
  1600  
  1601  // ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
  1602  ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
  1603  	&& uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
  1604  	=> (REV16W x)
  1605  
  1606  // ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+".
  1607  ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
  1608  	&& (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
  1609  	=> (REV16 x)
  1610  
  1611  // ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
  1612  ((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
  1613  	&& (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
  1614  	=> (REV16 (ANDconst <x.Type> [0xffffffff] x))
  1615  
  1616  // Extract from reg pair
  1617  (ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
  1618  ( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
  1619  (XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
  1620  
  1621  (ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
  1622  	=> (EXTRWconst [32-c] x2 x)
  1623  ( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
  1624  	=> (EXTRWconst [32-c] x2 x)
  1625  (XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
  1626  	=> (EXTRWconst [32-c] x2 x)
  1627  
  1628  // Rewrite special pairs of shifts to AND.
  1629  // On ARM64 the bitmask can fit into an instruction.
  1630  (SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
  1631  (SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
  1632  
  1633  // Special case setting bit as 1. An example is math.Copysign(c,-1)
  1634  (ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0  => (ORconst [c1] x)
  1635  
  1636  // If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
  1637  (MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0])
  1638  (MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0])
  1639  (MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0])
  1640  
  1641  // After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0.
  1642  (SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0])
  1643  (SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
  1644  (SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
  1645  
  1646  // bitfield ops
  1647  
  1648  // sbfiz
  1649  // (x << lc) >> rc
  1650  (SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
  1651  // int64(x << lc)
  1652  (MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
  1653  (MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
  1654  (MOVBreg (SLLconst [lc] x)) && lc < 8  => (SBFIZ [armBFAuxInt(lc,  8-lc)] x)
  1655  // int64(x) << lc
  1656  (SLLconst [lc] (MOVWreg x))  => (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
  1657  (SLLconst [lc] (MOVHreg x))  => (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
  1658  (SLLconst [lc] (MOVBreg x))  => (SBFIZ [armBFAuxInt(lc, min(8,  64-lc))] x)
  1659  
  1660  // sbfx
  1661  // (x << lc) >> rc
  1662  (SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
  1663  // int64(x) >> rc
  1664  (SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
  1665  (SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
  1666  (SRAconst [rc] (MOVBreg x)) && rc < 8  => (SBFX [armBFAuxInt(rc,  8-rc)] x)
  1667  // merge sbfx and sign-extension into sbfx
  1668  (MOVWreg (SBFX [bfc] x)) && bfc.width() <= 32 => (SBFX [bfc] x)
  1669  (MOVHreg (SBFX [bfc] x)) && bfc.width() <= 16 => (SBFX [bfc] x)
  1670  (MOVBreg (SBFX [bfc] x)) && bfc.width() <=  8 => (SBFX [bfc] x)
  1671  
  1672  // sbfiz/sbfx combinations: merge shifts into bitfield ops
  1673  (SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.lsb()
  1674  	=> (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
  1675  (SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.lsb()
  1676  	&& sc < bfc.lsb()+bfc.width()
  1677  	=> (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
  1678  (SBFX [bfc] s:(SLLconst [sc] x))
  1679  	&& s.Uses == 1
  1680  	&& sc <= bfc.lsb()
  1681  	=> (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
  1682  (SBFX [bfc] s:(SLLconst [sc] x))
  1683  	&& s.Uses == 1
  1684  	&& sc > bfc.lsb()
  1685  	=> (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
  1686  
  1687  // ubfiz
  1688  // (x << lc) >> rc
  1689  (SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
  1690  // uint64(x) << lc
  1691  (SLLconst [lc] (MOVWUreg x))  => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
  1692  (SLLconst [lc] (MOVHUreg x))  => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
  1693  (SLLconst [lc] (MOVBUreg x))  => (UBFIZ [armBFAuxInt(lc, min(8,  64-lc))] x)
  1694  // uint64(x << lc)
  1695  (MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
  1696  (MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
  1697  (MOVBUreg (SLLconst [lc] x)) && lc < 8  => (UBFIZ [armBFAuxInt(lc,  8-lc)] x)
  1698  
  1699  // merge ANDconst into ubfiz
  1700  // (x & ac) << sc
  1701  (SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
  1702  	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
  1703  // (x << sc) & ac
  1704  (ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
  1705  	=> (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
  1706  
  1707  // ubfx
  1708  // (x << lc) >> rc
  1709  (SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
  1710  // uint64(x) >> rc
  1711  (SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
  1712  (SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
  1713  (SRLconst [rc] (MOVBUreg x)) && rc < 8  => (UBFX [armBFAuxInt(rc,  8-rc)] x)
  1714  // uint64(x >> rc)
  1715  (MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
  1716  (MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
  1717  (MOVBUreg (SRLconst [rc] x)) && rc < 8  => (UBFX [armBFAuxInt(rc,  8)] x)
  1718  // merge ANDconst into ubfx
  1719  // (x >> sc) & ac
  1720  (ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
  1721  	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
  1722  // (x & ac) >> sc
  1723  (SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
  1724  	=> (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
  1725  // merge ANDconst and ubfx into ubfx
  1726  (ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) =>
  1727  	(UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
  1728  (UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0) =>
  1729  	(UBFX [bfc] x)
  1730  // merge ubfx and zero-extension into ubfx
  1731  (MOVWUreg (UBFX [bfc] x)) && bfc.width() <= 32 => (UBFX [bfc] x)
  1732  (MOVHUreg (UBFX [bfc] x)) && bfc.width() <= 16 => (UBFX [bfc] x)
  1733  (MOVBUreg (UBFX [bfc] x)) && bfc.width() <=  8 => (UBFX [bfc] x)
  1734  
  1735  // Extracting bits from across a zero-extension boundary.
  1736  (UBFX [bfc] e:(MOVWUreg x))
  1737  	&& e.Uses == 1
  1738  	&& bfc.lsb() < 32
  1739  	=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
  1740  (UBFX [bfc] e:(MOVHUreg x))
  1741  	&& e.Uses == 1
  1742  	&& bfc.lsb() < 16
  1743  	=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
  1744  (UBFX [bfc] e:(MOVBUreg x))
  1745  	&& e.Uses == 1
  1746  	&& bfc.lsb() < 8
  1747  	=> (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
  1748  
  1749  // ubfiz/ubfx combinations: merge shifts into bitfield ops
  1750  (SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.width()
  1751  	=> (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
  1752  (UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.width()+bfc.lsb() < 64
  1753  	=> (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
  1754  (SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.width()+bfc.lsb() < 64
  1755  	=> (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
  1756  (UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.width()
  1757  	=> (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
  1758  // ((x << c1) >> c2) >> c3
  1759  (SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.lsb()
  1760  	=> (ANDconst [1<<uint(bfc.width())-1] x)
  1761  (SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.lsb()
  1762  	=> (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
  1763  (SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.lsb()
  1764  	&& sc < bfc.lsb()+bfc.width()
  1765  	=> (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
  1766  // ((x << c1) << c2) >> c3
  1767  (UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.lsb()
  1768  	=> (ANDconst [1<<uint(bfc.width())-1] x)
  1769  (UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.lsb()
  1770  	=> (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
  1771  (UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.lsb()
  1772  	&& sc < bfc.lsb()+bfc.width()
  1773  	=> (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
  1774  
  1775  // bfi
  1776  (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
  1777  	&& ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
  1778  	=> (BFI [bfc] y x)
  1779  (ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
  1780  	&& xc == ^(yc << s)    // opposite masks
  1781  	&& yc & (yc+1) == 0    // power of 2 minus 1
  1782  	&& yc > 0              // not 0, not all 64 bits (there are better rewrites in that case)
  1783  	&& s+log64(yc+1) <= 64 // shifted mask doesn't overflow
  1784  	=> (BFI [armBFAuxInt(s, log64(yc+1))] x y)
  1785  (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
  1786  	&& lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
  1787  	=> (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
  1788  // bfxil
  1789  (OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.width())-1)
  1790  	=> (BFXIL [bfc] y x)
  1791  (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.width()
  1792  	=> (BFXIL [bfc] y x)
  1793  (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
  1794  	=> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
  1795  
  1796  // FP simplification
  1797  (FNEGS  (FMULS  x y)) => (FNMULS x y)
  1798  (FNEGD  (FMULD  x y)) => (FNMULD x y)
  1799  (FMULS  (FNEGS  x) y) => (FNMULS x y)
  1800  (FMULD  (FNEGD  x) y) => (FNMULD x y)
  1801  (FNEGS  (FNMULS x y)) => (FMULS  x y)
  1802  (FNEGD  (FNMULD x y)) => (FMULD  x y)
  1803  (FNMULS (FNEGS  x) y) => (FMULS  x y)
  1804  (FNMULD (FNEGD  x) y) => (FMULD  x y)
  1805  
  1806  (FADDS a (FMULS  x y)) && a.Block.Func.useFMA(v) => (FMADDS  a x y)
  1807  (FADDD a (FMULD  x y)) && a.Block.Func.useFMA(v) => (FMADDD  a x y)
  1808  (FSUBS a (FMULS  x y)) && a.Block.Func.useFMA(v) => (FMSUBS  a x y)
  1809  (FSUBD a (FMULD  x y)) && a.Block.Func.useFMA(v) => (FMSUBD  a x y)
  1810  (FSUBS (FMULS  x y) a) && a.Block.Func.useFMA(v) => (FNMSUBS a x y)
  1811  (FSUBD (FMULD  x y) a) && a.Block.Func.useFMA(v) => (FNMSUBD a x y)
  1812  (FADDS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS  a x y)
  1813  (FADDD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD  a x y)
  1814  (FSUBS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS  a x y)
  1815  (FSUBD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD  a x y)
  1816  (FSUBS (FNMULS x y) a) && a.Block.Func.useFMA(v) => (FNMADDS a x y)
  1817  (FSUBD (FNMULD x y) a) && a.Block.Func.useFMA(v) => (FNMADDD a x y)
  1818  
  1819  (MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
  1820  (MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1821  (MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1822  (MOVDload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
  1823  (MOVBload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int8(read8(sym, int64(off))))])
  1824  (MOVHload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1825  (MOVWload  [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
  1826  
  1827  // Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM)
  1828  (PrefetchCache addr mem)         => (PRFM [0] addr mem)
  1829  (PrefetchCacheStreamed addr mem) => (PRFM [1] addr mem)
  1830  
  1831  // Arch-specific inlining for small or disjoint runtime.memmove
  1832  (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore  _ src s3:(MOVDstore {t} _ dst mem)))))
  1833  	&& sz >= 0
  1834  	&& isSameCall(sym, "runtime.memmove")
  1835  	&& s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
  1836  	&& isInlinableMemmove(dst, src, sz, config)
  1837  	&& clobber(s1, s2, s3, call)
  1838  	=> (Move [sz] dst src mem)
  1839  
  1840  // Match post-lowering calls, register version.
  1841  (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
  1842  	&& sz >= 0
  1843  	&& isSameCall(sym, "runtime.memmove")
  1844  	&& call.Uses == 1
  1845  	&& isInlinableMemmove(dst, src, sz, config)
  1846  	&& clobber(call)
  1847  	=> (Move [sz] dst src mem)
  1848  
  1849  ((REV|REVW) ((REV|REVW) p)) => p
  1850  
  1851  // internal/runtime/math.MulUintptr intrinsics
  1852  
  1853  (Select0 (Mul64uover x y)) => (MUL x y)
  1854  (Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0]))
  1855  

View as plain text