1
2
3 package ssa
4
5 import "internal/buildcfg"
6 import "math"
7 import "cmd/internal/obj"
8 import "cmd/compile/internal/types"
9
10 func rewriteValueAMD64(v *Value) bool {
11 switch v.Op {
12 case OpAMD64ADCQ:
13 return rewriteValueAMD64_OpAMD64ADCQ(v)
14 case OpAMD64ADCQconst:
15 return rewriteValueAMD64_OpAMD64ADCQconst(v)
16 case OpAMD64ADDL:
17 return rewriteValueAMD64_OpAMD64ADDL(v)
18 case OpAMD64ADDLconst:
19 return rewriteValueAMD64_OpAMD64ADDLconst(v)
20 case OpAMD64ADDLconstmodify:
21 return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
22 case OpAMD64ADDLload:
23 return rewriteValueAMD64_OpAMD64ADDLload(v)
24 case OpAMD64ADDLmodify:
25 return rewriteValueAMD64_OpAMD64ADDLmodify(v)
26 case OpAMD64ADDQ:
27 return rewriteValueAMD64_OpAMD64ADDQ(v)
28 case OpAMD64ADDQcarry:
29 return rewriteValueAMD64_OpAMD64ADDQcarry(v)
30 case OpAMD64ADDQconst:
31 return rewriteValueAMD64_OpAMD64ADDQconst(v)
32 case OpAMD64ADDQconstmodify:
33 return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
34 case OpAMD64ADDQload:
35 return rewriteValueAMD64_OpAMD64ADDQload(v)
36 case OpAMD64ADDQmodify:
37 return rewriteValueAMD64_OpAMD64ADDQmodify(v)
38 case OpAMD64ADDSD:
39 return rewriteValueAMD64_OpAMD64ADDSD(v)
40 case OpAMD64ADDSDload:
41 return rewriteValueAMD64_OpAMD64ADDSDload(v)
42 case OpAMD64ADDSS:
43 return rewriteValueAMD64_OpAMD64ADDSS(v)
44 case OpAMD64ADDSSload:
45 return rewriteValueAMD64_OpAMD64ADDSSload(v)
46 case OpAMD64ANDL:
47 return rewriteValueAMD64_OpAMD64ANDL(v)
48 case OpAMD64ANDLconst:
49 return rewriteValueAMD64_OpAMD64ANDLconst(v)
50 case OpAMD64ANDLconstmodify:
51 return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
52 case OpAMD64ANDLload:
53 return rewriteValueAMD64_OpAMD64ANDLload(v)
54 case OpAMD64ANDLmodify:
55 return rewriteValueAMD64_OpAMD64ANDLmodify(v)
56 case OpAMD64ANDNL:
57 return rewriteValueAMD64_OpAMD64ANDNL(v)
58 case OpAMD64ANDNQ:
59 return rewriteValueAMD64_OpAMD64ANDNQ(v)
60 case OpAMD64ANDQ:
61 return rewriteValueAMD64_OpAMD64ANDQ(v)
62 case OpAMD64ANDQconst:
63 return rewriteValueAMD64_OpAMD64ANDQconst(v)
64 case OpAMD64ANDQconstmodify:
65 return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
66 case OpAMD64ANDQload:
67 return rewriteValueAMD64_OpAMD64ANDQload(v)
68 case OpAMD64ANDQmodify:
69 return rewriteValueAMD64_OpAMD64ANDQmodify(v)
70 case OpAMD64BSFQ:
71 return rewriteValueAMD64_OpAMD64BSFQ(v)
72 case OpAMD64BSWAPL:
73 return rewriteValueAMD64_OpAMD64BSWAPL(v)
74 case OpAMD64BSWAPQ:
75 return rewriteValueAMD64_OpAMD64BSWAPQ(v)
76 case OpAMD64BTCQconst:
77 return rewriteValueAMD64_OpAMD64BTCQconst(v)
78 case OpAMD64BTLconst:
79 return rewriteValueAMD64_OpAMD64BTLconst(v)
80 case OpAMD64BTQconst:
81 return rewriteValueAMD64_OpAMD64BTQconst(v)
82 case OpAMD64BTRQconst:
83 return rewriteValueAMD64_OpAMD64BTRQconst(v)
84 case OpAMD64BTSQconst:
85 return rewriteValueAMD64_OpAMD64BTSQconst(v)
86 case OpAMD64CMOVLCC:
87 return rewriteValueAMD64_OpAMD64CMOVLCC(v)
88 case OpAMD64CMOVLCS:
89 return rewriteValueAMD64_OpAMD64CMOVLCS(v)
90 case OpAMD64CMOVLEQ:
91 return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
92 case OpAMD64CMOVLGE:
93 return rewriteValueAMD64_OpAMD64CMOVLGE(v)
94 case OpAMD64CMOVLGT:
95 return rewriteValueAMD64_OpAMD64CMOVLGT(v)
96 case OpAMD64CMOVLHI:
97 return rewriteValueAMD64_OpAMD64CMOVLHI(v)
98 case OpAMD64CMOVLLE:
99 return rewriteValueAMD64_OpAMD64CMOVLLE(v)
100 case OpAMD64CMOVLLS:
101 return rewriteValueAMD64_OpAMD64CMOVLLS(v)
102 case OpAMD64CMOVLLT:
103 return rewriteValueAMD64_OpAMD64CMOVLLT(v)
104 case OpAMD64CMOVLNE:
105 return rewriteValueAMD64_OpAMD64CMOVLNE(v)
106 case OpAMD64CMOVQCC:
107 return rewriteValueAMD64_OpAMD64CMOVQCC(v)
108 case OpAMD64CMOVQCS:
109 return rewriteValueAMD64_OpAMD64CMOVQCS(v)
110 case OpAMD64CMOVQEQ:
111 return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
112 case OpAMD64CMOVQGE:
113 return rewriteValueAMD64_OpAMD64CMOVQGE(v)
114 case OpAMD64CMOVQGT:
115 return rewriteValueAMD64_OpAMD64CMOVQGT(v)
116 case OpAMD64CMOVQHI:
117 return rewriteValueAMD64_OpAMD64CMOVQHI(v)
118 case OpAMD64CMOVQLE:
119 return rewriteValueAMD64_OpAMD64CMOVQLE(v)
120 case OpAMD64CMOVQLS:
121 return rewriteValueAMD64_OpAMD64CMOVQLS(v)
122 case OpAMD64CMOVQLT:
123 return rewriteValueAMD64_OpAMD64CMOVQLT(v)
124 case OpAMD64CMOVQNE:
125 return rewriteValueAMD64_OpAMD64CMOVQNE(v)
126 case OpAMD64CMOVWCC:
127 return rewriteValueAMD64_OpAMD64CMOVWCC(v)
128 case OpAMD64CMOVWCS:
129 return rewriteValueAMD64_OpAMD64CMOVWCS(v)
130 case OpAMD64CMOVWEQ:
131 return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
132 case OpAMD64CMOVWGE:
133 return rewriteValueAMD64_OpAMD64CMOVWGE(v)
134 case OpAMD64CMOVWGT:
135 return rewriteValueAMD64_OpAMD64CMOVWGT(v)
136 case OpAMD64CMOVWHI:
137 return rewriteValueAMD64_OpAMD64CMOVWHI(v)
138 case OpAMD64CMOVWLE:
139 return rewriteValueAMD64_OpAMD64CMOVWLE(v)
140 case OpAMD64CMOVWLS:
141 return rewriteValueAMD64_OpAMD64CMOVWLS(v)
142 case OpAMD64CMOVWLT:
143 return rewriteValueAMD64_OpAMD64CMOVWLT(v)
144 case OpAMD64CMOVWNE:
145 return rewriteValueAMD64_OpAMD64CMOVWNE(v)
146 case OpAMD64CMPB:
147 return rewriteValueAMD64_OpAMD64CMPB(v)
148 case OpAMD64CMPBconst:
149 return rewriteValueAMD64_OpAMD64CMPBconst(v)
150 case OpAMD64CMPBconstload:
151 return rewriteValueAMD64_OpAMD64CMPBconstload(v)
152 case OpAMD64CMPBload:
153 return rewriteValueAMD64_OpAMD64CMPBload(v)
154 case OpAMD64CMPL:
155 return rewriteValueAMD64_OpAMD64CMPL(v)
156 case OpAMD64CMPLconst:
157 return rewriteValueAMD64_OpAMD64CMPLconst(v)
158 case OpAMD64CMPLconstload:
159 return rewriteValueAMD64_OpAMD64CMPLconstload(v)
160 case OpAMD64CMPLload:
161 return rewriteValueAMD64_OpAMD64CMPLload(v)
162 case OpAMD64CMPQ:
163 return rewriteValueAMD64_OpAMD64CMPQ(v)
164 case OpAMD64CMPQconst:
165 return rewriteValueAMD64_OpAMD64CMPQconst(v)
166 case OpAMD64CMPQconstload:
167 return rewriteValueAMD64_OpAMD64CMPQconstload(v)
168 case OpAMD64CMPQload:
169 return rewriteValueAMD64_OpAMD64CMPQload(v)
170 case OpAMD64CMPW:
171 return rewriteValueAMD64_OpAMD64CMPW(v)
172 case OpAMD64CMPWconst:
173 return rewriteValueAMD64_OpAMD64CMPWconst(v)
174 case OpAMD64CMPWconstload:
175 return rewriteValueAMD64_OpAMD64CMPWconstload(v)
176 case OpAMD64CMPWload:
177 return rewriteValueAMD64_OpAMD64CMPWload(v)
178 case OpAMD64CMPXCHGLlock:
179 return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
180 case OpAMD64CMPXCHGQlock:
181 return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
182 case OpAMD64DIVSD:
183 return rewriteValueAMD64_OpAMD64DIVSD(v)
184 case OpAMD64DIVSDload:
185 return rewriteValueAMD64_OpAMD64DIVSDload(v)
186 case OpAMD64DIVSS:
187 return rewriteValueAMD64_OpAMD64DIVSS(v)
188 case OpAMD64DIVSSload:
189 return rewriteValueAMD64_OpAMD64DIVSSload(v)
190 case OpAMD64HMULL:
191 return rewriteValueAMD64_OpAMD64HMULL(v)
192 case OpAMD64HMULLU:
193 return rewriteValueAMD64_OpAMD64HMULLU(v)
194 case OpAMD64HMULQ:
195 return rewriteValueAMD64_OpAMD64HMULQ(v)
196 case OpAMD64HMULQU:
197 return rewriteValueAMD64_OpAMD64HMULQU(v)
198 case OpAMD64LEAL:
199 return rewriteValueAMD64_OpAMD64LEAL(v)
200 case OpAMD64LEAL1:
201 return rewriteValueAMD64_OpAMD64LEAL1(v)
202 case OpAMD64LEAL2:
203 return rewriteValueAMD64_OpAMD64LEAL2(v)
204 case OpAMD64LEAL4:
205 return rewriteValueAMD64_OpAMD64LEAL4(v)
206 case OpAMD64LEAL8:
207 return rewriteValueAMD64_OpAMD64LEAL8(v)
208 case OpAMD64LEAQ:
209 return rewriteValueAMD64_OpAMD64LEAQ(v)
210 case OpAMD64LEAQ1:
211 return rewriteValueAMD64_OpAMD64LEAQ1(v)
212 case OpAMD64LEAQ2:
213 return rewriteValueAMD64_OpAMD64LEAQ2(v)
214 case OpAMD64LEAQ4:
215 return rewriteValueAMD64_OpAMD64LEAQ4(v)
216 case OpAMD64LEAQ8:
217 return rewriteValueAMD64_OpAMD64LEAQ8(v)
218 case OpAMD64MOVBELstore:
219 return rewriteValueAMD64_OpAMD64MOVBELstore(v)
220 case OpAMD64MOVBEQstore:
221 return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
222 case OpAMD64MOVBEWstore:
223 return rewriteValueAMD64_OpAMD64MOVBEWstore(v)
224 case OpAMD64MOVBQSX:
225 return rewriteValueAMD64_OpAMD64MOVBQSX(v)
226 case OpAMD64MOVBQSXload:
227 return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
228 case OpAMD64MOVBQZX:
229 return rewriteValueAMD64_OpAMD64MOVBQZX(v)
230 case OpAMD64MOVBatomicload:
231 return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
232 case OpAMD64MOVBload:
233 return rewriteValueAMD64_OpAMD64MOVBload(v)
234 case OpAMD64MOVBstore:
235 return rewriteValueAMD64_OpAMD64MOVBstore(v)
236 case OpAMD64MOVBstoreconst:
237 return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
238 case OpAMD64MOVLQSX:
239 return rewriteValueAMD64_OpAMD64MOVLQSX(v)
240 case OpAMD64MOVLQSXload:
241 return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
242 case OpAMD64MOVLQZX:
243 return rewriteValueAMD64_OpAMD64MOVLQZX(v)
244 case OpAMD64MOVLatomicload:
245 return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
246 case OpAMD64MOVLf2i:
247 return rewriteValueAMD64_OpAMD64MOVLf2i(v)
248 case OpAMD64MOVLi2f:
249 return rewriteValueAMD64_OpAMD64MOVLi2f(v)
250 case OpAMD64MOVLload:
251 return rewriteValueAMD64_OpAMD64MOVLload(v)
252 case OpAMD64MOVLstore:
253 return rewriteValueAMD64_OpAMD64MOVLstore(v)
254 case OpAMD64MOVLstoreconst:
255 return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
256 case OpAMD64MOVOload:
257 return rewriteValueAMD64_OpAMD64MOVOload(v)
258 case OpAMD64MOVOstore:
259 return rewriteValueAMD64_OpAMD64MOVOstore(v)
260 case OpAMD64MOVOstoreconst:
261 return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
262 case OpAMD64MOVQatomicload:
263 return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
264 case OpAMD64MOVQf2i:
265 return rewriteValueAMD64_OpAMD64MOVQf2i(v)
266 case OpAMD64MOVQi2f:
267 return rewriteValueAMD64_OpAMD64MOVQi2f(v)
268 case OpAMD64MOVQload:
269 return rewriteValueAMD64_OpAMD64MOVQload(v)
270 case OpAMD64MOVQstore:
271 return rewriteValueAMD64_OpAMD64MOVQstore(v)
272 case OpAMD64MOVQstoreconst:
273 return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
274 case OpAMD64MOVSDload:
275 return rewriteValueAMD64_OpAMD64MOVSDload(v)
276 case OpAMD64MOVSDstore:
277 return rewriteValueAMD64_OpAMD64MOVSDstore(v)
278 case OpAMD64MOVSSload:
279 return rewriteValueAMD64_OpAMD64MOVSSload(v)
280 case OpAMD64MOVSSstore:
281 return rewriteValueAMD64_OpAMD64MOVSSstore(v)
282 case OpAMD64MOVWQSX:
283 return rewriteValueAMD64_OpAMD64MOVWQSX(v)
284 case OpAMD64MOVWQSXload:
285 return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
286 case OpAMD64MOVWQZX:
287 return rewriteValueAMD64_OpAMD64MOVWQZX(v)
288 case OpAMD64MOVWload:
289 return rewriteValueAMD64_OpAMD64MOVWload(v)
290 case OpAMD64MOVWstore:
291 return rewriteValueAMD64_OpAMD64MOVWstore(v)
292 case OpAMD64MOVWstoreconst:
293 return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
294 case OpAMD64MULL:
295 return rewriteValueAMD64_OpAMD64MULL(v)
296 case OpAMD64MULLconst:
297 return rewriteValueAMD64_OpAMD64MULLconst(v)
298 case OpAMD64MULQ:
299 return rewriteValueAMD64_OpAMD64MULQ(v)
300 case OpAMD64MULQconst:
301 return rewriteValueAMD64_OpAMD64MULQconst(v)
302 case OpAMD64MULSD:
303 return rewriteValueAMD64_OpAMD64MULSD(v)
304 case OpAMD64MULSDload:
305 return rewriteValueAMD64_OpAMD64MULSDload(v)
306 case OpAMD64MULSS:
307 return rewriteValueAMD64_OpAMD64MULSS(v)
308 case OpAMD64MULSSload:
309 return rewriteValueAMD64_OpAMD64MULSSload(v)
310 case OpAMD64NEGL:
311 return rewriteValueAMD64_OpAMD64NEGL(v)
312 case OpAMD64NEGQ:
313 return rewriteValueAMD64_OpAMD64NEGQ(v)
314 case OpAMD64NOTL:
315 return rewriteValueAMD64_OpAMD64NOTL(v)
316 case OpAMD64NOTQ:
317 return rewriteValueAMD64_OpAMD64NOTQ(v)
318 case OpAMD64ORL:
319 return rewriteValueAMD64_OpAMD64ORL(v)
320 case OpAMD64ORLconst:
321 return rewriteValueAMD64_OpAMD64ORLconst(v)
322 case OpAMD64ORLconstmodify:
323 return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
324 case OpAMD64ORLload:
325 return rewriteValueAMD64_OpAMD64ORLload(v)
326 case OpAMD64ORLmodify:
327 return rewriteValueAMD64_OpAMD64ORLmodify(v)
328 case OpAMD64ORQ:
329 return rewriteValueAMD64_OpAMD64ORQ(v)
330 case OpAMD64ORQconst:
331 return rewriteValueAMD64_OpAMD64ORQconst(v)
332 case OpAMD64ORQconstmodify:
333 return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
334 case OpAMD64ORQload:
335 return rewriteValueAMD64_OpAMD64ORQload(v)
336 case OpAMD64ORQmodify:
337 return rewriteValueAMD64_OpAMD64ORQmodify(v)
338 case OpAMD64ROLB:
339 return rewriteValueAMD64_OpAMD64ROLB(v)
340 case OpAMD64ROLBconst:
341 return rewriteValueAMD64_OpAMD64ROLBconst(v)
342 case OpAMD64ROLL:
343 return rewriteValueAMD64_OpAMD64ROLL(v)
344 case OpAMD64ROLLconst:
345 return rewriteValueAMD64_OpAMD64ROLLconst(v)
346 case OpAMD64ROLQ:
347 return rewriteValueAMD64_OpAMD64ROLQ(v)
348 case OpAMD64ROLQconst:
349 return rewriteValueAMD64_OpAMD64ROLQconst(v)
350 case OpAMD64ROLW:
351 return rewriteValueAMD64_OpAMD64ROLW(v)
352 case OpAMD64ROLWconst:
353 return rewriteValueAMD64_OpAMD64ROLWconst(v)
354 case OpAMD64RORB:
355 return rewriteValueAMD64_OpAMD64RORB(v)
356 case OpAMD64RORL:
357 return rewriteValueAMD64_OpAMD64RORL(v)
358 case OpAMD64RORQ:
359 return rewriteValueAMD64_OpAMD64RORQ(v)
360 case OpAMD64RORW:
361 return rewriteValueAMD64_OpAMD64RORW(v)
362 case OpAMD64SARB:
363 return rewriteValueAMD64_OpAMD64SARB(v)
364 case OpAMD64SARBconst:
365 return rewriteValueAMD64_OpAMD64SARBconst(v)
366 case OpAMD64SARL:
367 return rewriteValueAMD64_OpAMD64SARL(v)
368 case OpAMD64SARLconst:
369 return rewriteValueAMD64_OpAMD64SARLconst(v)
370 case OpAMD64SARQ:
371 return rewriteValueAMD64_OpAMD64SARQ(v)
372 case OpAMD64SARQconst:
373 return rewriteValueAMD64_OpAMD64SARQconst(v)
374 case OpAMD64SARW:
375 return rewriteValueAMD64_OpAMD64SARW(v)
376 case OpAMD64SARWconst:
377 return rewriteValueAMD64_OpAMD64SARWconst(v)
378 case OpAMD64SARXLload:
379 return rewriteValueAMD64_OpAMD64SARXLload(v)
380 case OpAMD64SARXQload:
381 return rewriteValueAMD64_OpAMD64SARXQload(v)
382 case OpAMD64SBBLcarrymask:
383 return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
384 case OpAMD64SBBQ:
385 return rewriteValueAMD64_OpAMD64SBBQ(v)
386 case OpAMD64SBBQcarrymask:
387 return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
388 case OpAMD64SBBQconst:
389 return rewriteValueAMD64_OpAMD64SBBQconst(v)
390 case OpAMD64SETA:
391 return rewriteValueAMD64_OpAMD64SETA(v)
392 case OpAMD64SETAE:
393 return rewriteValueAMD64_OpAMD64SETAE(v)
394 case OpAMD64SETAEstore:
395 return rewriteValueAMD64_OpAMD64SETAEstore(v)
396 case OpAMD64SETAstore:
397 return rewriteValueAMD64_OpAMD64SETAstore(v)
398 case OpAMD64SETB:
399 return rewriteValueAMD64_OpAMD64SETB(v)
400 case OpAMD64SETBE:
401 return rewriteValueAMD64_OpAMD64SETBE(v)
402 case OpAMD64SETBEstore:
403 return rewriteValueAMD64_OpAMD64SETBEstore(v)
404 case OpAMD64SETBstore:
405 return rewriteValueAMD64_OpAMD64SETBstore(v)
406 case OpAMD64SETEQ:
407 return rewriteValueAMD64_OpAMD64SETEQ(v)
408 case OpAMD64SETEQstore:
409 return rewriteValueAMD64_OpAMD64SETEQstore(v)
410 case OpAMD64SETG:
411 return rewriteValueAMD64_OpAMD64SETG(v)
412 case OpAMD64SETGE:
413 return rewriteValueAMD64_OpAMD64SETGE(v)
414 case OpAMD64SETGEstore:
415 return rewriteValueAMD64_OpAMD64SETGEstore(v)
416 case OpAMD64SETGstore:
417 return rewriteValueAMD64_OpAMD64SETGstore(v)
418 case OpAMD64SETL:
419 return rewriteValueAMD64_OpAMD64SETL(v)
420 case OpAMD64SETLE:
421 return rewriteValueAMD64_OpAMD64SETLE(v)
422 case OpAMD64SETLEstore:
423 return rewriteValueAMD64_OpAMD64SETLEstore(v)
424 case OpAMD64SETLstore:
425 return rewriteValueAMD64_OpAMD64SETLstore(v)
426 case OpAMD64SETNE:
427 return rewriteValueAMD64_OpAMD64SETNE(v)
428 case OpAMD64SETNEstore:
429 return rewriteValueAMD64_OpAMD64SETNEstore(v)
430 case OpAMD64SHLL:
431 return rewriteValueAMD64_OpAMD64SHLL(v)
432 case OpAMD64SHLLconst:
433 return rewriteValueAMD64_OpAMD64SHLLconst(v)
434 case OpAMD64SHLQ:
435 return rewriteValueAMD64_OpAMD64SHLQ(v)
436 case OpAMD64SHLQconst:
437 return rewriteValueAMD64_OpAMD64SHLQconst(v)
438 case OpAMD64SHLXLload:
439 return rewriteValueAMD64_OpAMD64SHLXLload(v)
440 case OpAMD64SHLXQload:
441 return rewriteValueAMD64_OpAMD64SHLXQload(v)
442 case OpAMD64SHRB:
443 return rewriteValueAMD64_OpAMD64SHRB(v)
444 case OpAMD64SHRBconst:
445 return rewriteValueAMD64_OpAMD64SHRBconst(v)
446 case OpAMD64SHRL:
447 return rewriteValueAMD64_OpAMD64SHRL(v)
448 case OpAMD64SHRLconst:
449 return rewriteValueAMD64_OpAMD64SHRLconst(v)
450 case OpAMD64SHRQ:
451 return rewriteValueAMD64_OpAMD64SHRQ(v)
452 case OpAMD64SHRQconst:
453 return rewriteValueAMD64_OpAMD64SHRQconst(v)
454 case OpAMD64SHRW:
455 return rewriteValueAMD64_OpAMD64SHRW(v)
456 case OpAMD64SHRWconst:
457 return rewriteValueAMD64_OpAMD64SHRWconst(v)
458 case OpAMD64SHRXLload:
459 return rewriteValueAMD64_OpAMD64SHRXLload(v)
460 case OpAMD64SHRXQload:
461 return rewriteValueAMD64_OpAMD64SHRXQload(v)
462 case OpAMD64SUBL:
463 return rewriteValueAMD64_OpAMD64SUBL(v)
464 case OpAMD64SUBLconst:
465 return rewriteValueAMD64_OpAMD64SUBLconst(v)
466 case OpAMD64SUBLload:
467 return rewriteValueAMD64_OpAMD64SUBLload(v)
468 case OpAMD64SUBLmodify:
469 return rewriteValueAMD64_OpAMD64SUBLmodify(v)
470 case OpAMD64SUBQ:
471 return rewriteValueAMD64_OpAMD64SUBQ(v)
472 case OpAMD64SUBQborrow:
473 return rewriteValueAMD64_OpAMD64SUBQborrow(v)
474 case OpAMD64SUBQconst:
475 return rewriteValueAMD64_OpAMD64SUBQconst(v)
476 case OpAMD64SUBQload:
477 return rewriteValueAMD64_OpAMD64SUBQload(v)
478 case OpAMD64SUBQmodify:
479 return rewriteValueAMD64_OpAMD64SUBQmodify(v)
480 case OpAMD64SUBSD:
481 return rewriteValueAMD64_OpAMD64SUBSD(v)
482 case OpAMD64SUBSDload:
483 return rewriteValueAMD64_OpAMD64SUBSDload(v)
484 case OpAMD64SUBSS:
485 return rewriteValueAMD64_OpAMD64SUBSS(v)
486 case OpAMD64SUBSSload:
487 return rewriteValueAMD64_OpAMD64SUBSSload(v)
488 case OpAMD64TESTB:
489 return rewriteValueAMD64_OpAMD64TESTB(v)
490 case OpAMD64TESTBconst:
491 return rewriteValueAMD64_OpAMD64TESTBconst(v)
492 case OpAMD64TESTL:
493 return rewriteValueAMD64_OpAMD64TESTL(v)
494 case OpAMD64TESTLconst:
495 return rewriteValueAMD64_OpAMD64TESTLconst(v)
496 case OpAMD64TESTQ:
497 return rewriteValueAMD64_OpAMD64TESTQ(v)
498 case OpAMD64TESTQconst:
499 return rewriteValueAMD64_OpAMD64TESTQconst(v)
500 case OpAMD64TESTW:
501 return rewriteValueAMD64_OpAMD64TESTW(v)
502 case OpAMD64TESTWconst:
503 return rewriteValueAMD64_OpAMD64TESTWconst(v)
504 case OpAMD64XADDLlock:
505 return rewriteValueAMD64_OpAMD64XADDLlock(v)
506 case OpAMD64XADDQlock:
507 return rewriteValueAMD64_OpAMD64XADDQlock(v)
508 case OpAMD64XCHGL:
509 return rewriteValueAMD64_OpAMD64XCHGL(v)
510 case OpAMD64XCHGQ:
511 return rewriteValueAMD64_OpAMD64XCHGQ(v)
512 case OpAMD64XORL:
513 return rewriteValueAMD64_OpAMD64XORL(v)
514 case OpAMD64XORLconst:
515 return rewriteValueAMD64_OpAMD64XORLconst(v)
516 case OpAMD64XORLconstmodify:
517 return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
518 case OpAMD64XORLload:
519 return rewriteValueAMD64_OpAMD64XORLload(v)
520 case OpAMD64XORLmodify:
521 return rewriteValueAMD64_OpAMD64XORLmodify(v)
522 case OpAMD64XORQ:
523 return rewriteValueAMD64_OpAMD64XORQ(v)
524 case OpAMD64XORQconst:
525 return rewriteValueAMD64_OpAMD64XORQconst(v)
526 case OpAMD64XORQconstmodify:
527 return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
528 case OpAMD64XORQload:
529 return rewriteValueAMD64_OpAMD64XORQload(v)
530 case OpAMD64XORQmodify:
531 return rewriteValueAMD64_OpAMD64XORQmodify(v)
532 case OpAdd16:
533 v.Op = OpAMD64ADDL
534 return true
535 case OpAdd32:
536 v.Op = OpAMD64ADDL
537 return true
538 case OpAdd32F:
539 v.Op = OpAMD64ADDSS
540 return true
541 case OpAdd64:
542 v.Op = OpAMD64ADDQ
543 return true
544 case OpAdd64F:
545 v.Op = OpAMD64ADDSD
546 return true
547 case OpAdd8:
548 v.Op = OpAMD64ADDL
549 return true
550 case OpAddPtr:
551 v.Op = OpAMD64ADDQ
552 return true
553 case OpAddr:
554 return rewriteValueAMD64_OpAddr(v)
555 case OpAnd16:
556 v.Op = OpAMD64ANDL
557 return true
558 case OpAnd32:
559 v.Op = OpAMD64ANDL
560 return true
561 case OpAnd64:
562 v.Op = OpAMD64ANDQ
563 return true
564 case OpAnd8:
565 v.Op = OpAMD64ANDL
566 return true
567 case OpAndB:
568 v.Op = OpAMD64ANDL
569 return true
570 case OpAtomicAdd32:
571 return rewriteValueAMD64_OpAtomicAdd32(v)
572 case OpAtomicAdd64:
573 return rewriteValueAMD64_OpAtomicAdd64(v)
574 case OpAtomicAnd32:
575 return rewriteValueAMD64_OpAtomicAnd32(v)
576 case OpAtomicAnd32value:
577 return rewriteValueAMD64_OpAtomicAnd32value(v)
578 case OpAtomicAnd64value:
579 return rewriteValueAMD64_OpAtomicAnd64value(v)
580 case OpAtomicAnd8:
581 return rewriteValueAMD64_OpAtomicAnd8(v)
582 case OpAtomicCompareAndSwap32:
583 return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
584 case OpAtomicCompareAndSwap64:
585 return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
586 case OpAtomicExchange32:
587 return rewriteValueAMD64_OpAtomicExchange32(v)
588 case OpAtomicExchange64:
589 return rewriteValueAMD64_OpAtomicExchange64(v)
590 case OpAtomicExchange8:
591 return rewriteValueAMD64_OpAtomicExchange8(v)
592 case OpAtomicLoad32:
593 return rewriteValueAMD64_OpAtomicLoad32(v)
594 case OpAtomicLoad64:
595 return rewriteValueAMD64_OpAtomicLoad64(v)
596 case OpAtomicLoad8:
597 return rewriteValueAMD64_OpAtomicLoad8(v)
598 case OpAtomicLoadPtr:
599 return rewriteValueAMD64_OpAtomicLoadPtr(v)
600 case OpAtomicOr32:
601 return rewriteValueAMD64_OpAtomicOr32(v)
602 case OpAtomicOr32value:
603 return rewriteValueAMD64_OpAtomicOr32value(v)
604 case OpAtomicOr64value:
605 return rewriteValueAMD64_OpAtomicOr64value(v)
606 case OpAtomicOr8:
607 return rewriteValueAMD64_OpAtomicOr8(v)
608 case OpAtomicStore32:
609 return rewriteValueAMD64_OpAtomicStore32(v)
610 case OpAtomicStore64:
611 return rewriteValueAMD64_OpAtomicStore64(v)
612 case OpAtomicStore8:
613 return rewriteValueAMD64_OpAtomicStore8(v)
614 case OpAtomicStorePtrNoWB:
615 return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
616 case OpAvg64u:
617 v.Op = OpAMD64AVGQU
618 return true
619 case OpBitLen16:
620 return rewriteValueAMD64_OpBitLen16(v)
621 case OpBitLen32:
622 return rewriteValueAMD64_OpBitLen32(v)
623 case OpBitLen64:
624 return rewriteValueAMD64_OpBitLen64(v)
625 case OpBitLen8:
626 return rewriteValueAMD64_OpBitLen8(v)
627 case OpBswap16:
628 return rewriteValueAMD64_OpBswap16(v)
629 case OpBswap32:
630 v.Op = OpAMD64BSWAPL
631 return true
632 case OpBswap64:
633 v.Op = OpAMD64BSWAPQ
634 return true
635 case OpCeil:
636 return rewriteValueAMD64_OpCeil(v)
637 case OpClosureCall:
638 v.Op = OpAMD64CALLclosure
639 return true
640 case OpCom16:
641 v.Op = OpAMD64NOTL
642 return true
643 case OpCom32:
644 v.Op = OpAMD64NOTL
645 return true
646 case OpCom64:
647 v.Op = OpAMD64NOTQ
648 return true
649 case OpCom8:
650 v.Op = OpAMD64NOTL
651 return true
652 case OpCondSelect:
653 return rewriteValueAMD64_OpCondSelect(v)
654 case OpConst16:
655 return rewriteValueAMD64_OpConst16(v)
656 case OpConst32:
657 v.Op = OpAMD64MOVLconst
658 return true
659 case OpConst32F:
660 v.Op = OpAMD64MOVSSconst
661 return true
662 case OpConst64:
663 v.Op = OpAMD64MOVQconst
664 return true
665 case OpConst64F:
666 v.Op = OpAMD64MOVSDconst
667 return true
668 case OpConst8:
669 return rewriteValueAMD64_OpConst8(v)
670 case OpConstBool:
671 return rewriteValueAMD64_OpConstBool(v)
672 case OpConstNil:
673 return rewriteValueAMD64_OpConstNil(v)
674 case OpCtz16:
675 return rewriteValueAMD64_OpCtz16(v)
676 case OpCtz16NonZero:
677 return rewriteValueAMD64_OpCtz16NonZero(v)
678 case OpCtz32:
679 return rewriteValueAMD64_OpCtz32(v)
680 case OpCtz32NonZero:
681 return rewriteValueAMD64_OpCtz32NonZero(v)
682 case OpCtz64:
683 return rewriteValueAMD64_OpCtz64(v)
684 case OpCtz64NonZero:
685 return rewriteValueAMD64_OpCtz64NonZero(v)
686 case OpCtz8:
687 return rewriteValueAMD64_OpCtz8(v)
688 case OpCtz8NonZero:
689 return rewriteValueAMD64_OpCtz8NonZero(v)
690 case OpCvt32Fto32:
691 v.Op = OpAMD64CVTTSS2SL
692 return true
693 case OpCvt32Fto64:
694 v.Op = OpAMD64CVTTSS2SQ
695 return true
696 case OpCvt32Fto64F:
697 v.Op = OpAMD64CVTSS2SD
698 return true
699 case OpCvt32to32F:
700 v.Op = OpAMD64CVTSL2SS
701 return true
702 case OpCvt32to64F:
703 v.Op = OpAMD64CVTSL2SD
704 return true
705 case OpCvt64Fto32:
706 v.Op = OpAMD64CVTTSD2SL
707 return true
708 case OpCvt64Fto32F:
709 v.Op = OpAMD64CVTSD2SS
710 return true
711 case OpCvt64Fto64:
712 v.Op = OpAMD64CVTTSD2SQ
713 return true
714 case OpCvt64to32F:
715 v.Op = OpAMD64CVTSQ2SS
716 return true
717 case OpCvt64to64F:
718 v.Op = OpAMD64CVTSQ2SD
719 return true
720 case OpCvtBoolToUint8:
721 v.Op = OpCopy
722 return true
723 case OpDiv128u:
724 v.Op = OpAMD64DIVQU2
725 return true
726 case OpDiv16:
727 return rewriteValueAMD64_OpDiv16(v)
728 case OpDiv16u:
729 return rewriteValueAMD64_OpDiv16u(v)
730 case OpDiv32:
731 return rewriteValueAMD64_OpDiv32(v)
732 case OpDiv32F:
733 v.Op = OpAMD64DIVSS
734 return true
735 case OpDiv32u:
736 return rewriteValueAMD64_OpDiv32u(v)
737 case OpDiv64:
738 return rewriteValueAMD64_OpDiv64(v)
739 case OpDiv64F:
740 v.Op = OpAMD64DIVSD
741 return true
742 case OpDiv64u:
743 return rewriteValueAMD64_OpDiv64u(v)
744 case OpDiv8:
745 return rewriteValueAMD64_OpDiv8(v)
746 case OpDiv8u:
747 return rewriteValueAMD64_OpDiv8u(v)
748 case OpEq16:
749 return rewriteValueAMD64_OpEq16(v)
750 case OpEq32:
751 return rewriteValueAMD64_OpEq32(v)
752 case OpEq32F:
753 return rewriteValueAMD64_OpEq32F(v)
754 case OpEq64:
755 return rewriteValueAMD64_OpEq64(v)
756 case OpEq64F:
757 return rewriteValueAMD64_OpEq64F(v)
758 case OpEq8:
759 return rewriteValueAMD64_OpEq8(v)
760 case OpEqB:
761 return rewriteValueAMD64_OpEqB(v)
762 case OpEqPtr:
763 return rewriteValueAMD64_OpEqPtr(v)
764 case OpFMA:
765 return rewriteValueAMD64_OpFMA(v)
766 case OpFloor:
767 return rewriteValueAMD64_OpFloor(v)
768 case OpGetCallerPC:
769 v.Op = OpAMD64LoweredGetCallerPC
770 return true
771 case OpGetCallerSP:
772 v.Op = OpAMD64LoweredGetCallerSP
773 return true
774 case OpGetClosurePtr:
775 v.Op = OpAMD64LoweredGetClosurePtr
776 return true
777 case OpGetG:
778 return rewriteValueAMD64_OpGetG(v)
779 case OpHasCPUFeature:
780 return rewriteValueAMD64_OpHasCPUFeature(v)
781 case OpHmul32:
782 v.Op = OpAMD64HMULL
783 return true
784 case OpHmul32u:
785 v.Op = OpAMD64HMULLU
786 return true
787 case OpHmul64:
788 v.Op = OpAMD64HMULQ
789 return true
790 case OpHmul64u:
791 v.Op = OpAMD64HMULQU
792 return true
793 case OpInterCall:
794 v.Op = OpAMD64CALLinter
795 return true
796 case OpIsInBounds:
797 return rewriteValueAMD64_OpIsInBounds(v)
798 case OpIsNonNil:
799 return rewriteValueAMD64_OpIsNonNil(v)
800 case OpIsSliceInBounds:
801 return rewriteValueAMD64_OpIsSliceInBounds(v)
802 case OpLeq16:
803 return rewriteValueAMD64_OpLeq16(v)
804 case OpLeq16U:
805 return rewriteValueAMD64_OpLeq16U(v)
806 case OpLeq32:
807 return rewriteValueAMD64_OpLeq32(v)
808 case OpLeq32F:
809 return rewriteValueAMD64_OpLeq32F(v)
810 case OpLeq32U:
811 return rewriteValueAMD64_OpLeq32U(v)
812 case OpLeq64:
813 return rewriteValueAMD64_OpLeq64(v)
814 case OpLeq64F:
815 return rewriteValueAMD64_OpLeq64F(v)
816 case OpLeq64U:
817 return rewriteValueAMD64_OpLeq64U(v)
818 case OpLeq8:
819 return rewriteValueAMD64_OpLeq8(v)
820 case OpLeq8U:
821 return rewriteValueAMD64_OpLeq8U(v)
822 case OpLess16:
823 return rewriteValueAMD64_OpLess16(v)
824 case OpLess16U:
825 return rewriteValueAMD64_OpLess16U(v)
826 case OpLess32:
827 return rewriteValueAMD64_OpLess32(v)
828 case OpLess32F:
829 return rewriteValueAMD64_OpLess32F(v)
830 case OpLess32U:
831 return rewriteValueAMD64_OpLess32U(v)
832 case OpLess64:
833 return rewriteValueAMD64_OpLess64(v)
834 case OpLess64F:
835 return rewriteValueAMD64_OpLess64F(v)
836 case OpLess64U:
837 return rewriteValueAMD64_OpLess64U(v)
838 case OpLess8:
839 return rewriteValueAMD64_OpLess8(v)
840 case OpLess8U:
841 return rewriteValueAMD64_OpLess8U(v)
842 case OpLoad:
843 return rewriteValueAMD64_OpLoad(v)
844 case OpLocalAddr:
845 return rewriteValueAMD64_OpLocalAddr(v)
846 case OpLsh16x16:
847 return rewriteValueAMD64_OpLsh16x16(v)
848 case OpLsh16x32:
849 return rewriteValueAMD64_OpLsh16x32(v)
850 case OpLsh16x64:
851 return rewriteValueAMD64_OpLsh16x64(v)
852 case OpLsh16x8:
853 return rewriteValueAMD64_OpLsh16x8(v)
854 case OpLsh32x16:
855 return rewriteValueAMD64_OpLsh32x16(v)
856 case OpLsh32x32:
857 return rewriteValueAMD64_OpLsh32x32(v)
858 case OpLsh32x64:
859 return rewriteValueAMD64_OpLsh32x64(v)
860 case OpLsh32x8:
861 return rewriteValueAMD64_OpLsh32x8(v)
862 case OpLsh64x16:
863 return rewriteValueAMD64_OpLsh64x16(v)
864 case OpLsh64x32:
865 return rewriteValueAMD64_OpLsh64x32(v)
866 case OpLsh64x64:
867 return rewriteValueAMD64_OpLsh64x64(v)
868 case OpLsh64x8:
869 return rewriteValueAMD64_OpLsh64x8(v)
870 case OpLsh8x16:
871 return rewriteValueAMD64_OpLsh8x16(v)
872 case OpLsh8x32:
873 return rewriteValueAMD64_OpLsh8x32(v)
874 case OpLsh8x64:
875 return rewriteValueAMD64_OpLsh8x64(v)
876 case OpLsh8x8:
877 return rewriteValueAMD64_OpLsh8x8(v)
878 case OpMax32F:
879 return rewriteValueAMD64_OpMax32F(v)
880 case OpMax64F:
881 return rewriteValueAMD64_OpMax64F(v)
882 case OpMin32F:
883 return rewriteValueAMD64_OpMin32F(v)
884 case OpMin64F:
885 return rewriteValueAMD64_OpMin64F(v)
886 case OpMod16:
887 return rewriteValueAMD64_OpMod16(v)
888 case OpMod16u:
889 return rewriteValueAMD64_OpMod16u(v)
890 case OpMod32:
891 return rewriteValueAMD64_OpMod32(v)
892 case OpMod32u:
893 return rewriteValueAMD64_OpMod32u(v)
894 case OpMod64:
895 return rewriteValueAMD64_OpMod64(v)
896 case OpMod64u:
897 return rewriteValueAMD64_OpMod64u(v)
898 case OpMod8:
899 return rewriteValueAMD64_OpMod8(v)
900 case OpMod8u:
901 return rewriteValueAMD64_OpMod8u(v)
902 case OpMove:
903 return rewriteValueAMD64_OpMove(v)
904 case OpMul16:
905 v.Op = OpAMD64MULL
906 return true
907 case OpMul32:
908 v.Op = OpAMD64MULL
909 return true
910 case OpMul32F:
911 v.Op = OpAMD64MULSS
912 return true
913 case OpMul64:
914 v.Op = OpAMD64MULQ
915 return true
916 case OpMul64F:
917 v.Op = OpAMD64MULSD
918 return true
919 case OpMul64uhilo:
920 v.Op = OpAMD64MULQU2
921 return true
922 case OpMul8:
923 v.Op = OpAMD64MULL
924 return true
925 case OpNeg16:
926 v.Op = OpAMD64NEGL
927 return true
928 case OpNeg32:
929 v.Op = OpAMD64NEGL
930 return true
931 case OpNeg32F:
932 return rewriteValueAMD64_OpNeg32F(v)
933 case OpNeg64:
934 v.Op = OpAMD64NEGQ
935 return true
936 case OpNeg64F:
937 return rewriteValueAMD64_OpNeg64F(v)
938 case OpNeg8:
939 v.Op = OpAMD64NEGL
940 return true
941 case OpNeq16:
942 return rewriteValueAMD64_OpNeq16(v)
943 case OpNeq32:
944 return rewriteValueAMD64_OpNeq32(v)
945 case OpNeq32F:
946 return rewriteValueAMD64_OpNeq32F(v)
947 case OpNeq64:
948 return rewriteValueAMD64_OpNeq64(v)
949 case OpNeq64F:
950 return rewriteValueAMD64_OpNeq64F(v)
951 case OpNeq8:
952 return rewriteValueAMD64_OpNeq8(v)
953 case OpNeqB:
954 return rewriteValueAMD64_OpNeqB(v)
955 case OpNeqPtr:
956 return rewriteValueAMD64_OpNeqPtr(v)
957 case OpNilCheck:
958 v.Op = OpAMD64LoweredNilCheck
959 return true
960 case OpNot:
961 return rewriteValueAMD64_OpNot(v)
962 case OpOffPtr:
963 return rewriteValueAMD64_OpOffPtr(v)
964 case OpOr16:
965 v.Op = OpAMD64ORL
966 return true
967 case OpOr32:
968 v.Op = OpAMD64ORL
969 return true
970 case OpOr64:
971 v.Op = OpAMD64ORQ
972 return true
973 case OpOr8:
974 v.Op = OpAMD64ORL
975 return true
976 case OpOrB:
977 v.Op = OpAMD64ORL
978 return true
979 case OpPanicBounds:
980 return rewriteValueAMD64_OpPanicBounds(v)
981 case OpPopCount16:
982 return rewriteValueAMD64_OpPopCount16(v)
983 case OpPopCount32:
984 v.Op = OpAMD64POPCNTL
985 return true
986 case OpPopCount64:
987 v.Op = OpAMD64POPCNTQ
988 return true
989 case OpPopCount8:
990 return rewriteValueAMD64_OpPopCount8(v)
991 case OpPrefetchCache:
992 v.Op = OpAMD64PrefetchT0
993 return true
994 case OpPrefetchCacheStreamed:
995 v.Op = OpAMD64PrefetchNTA
996 return true
997 case OpRotateLeft16:
998 v.Op = OpAMD64ROLW
999 return true
1000 case OpRotateLeft32:
1001 v.Op = OpAMD64ROLL
1002 return true
1003 case OpRotateLeft64:
1004 v.Op = OpAMD64ROLQ
1005 return true
1006 case OpRotateLeft8:
1007 v.Op = OpAMD64ROLB
1008 return true
1009 case OpRound32F:
1010 v.Op = OpAMD64LoweredRound32F
1011 return true
1012 case OpRound64F:
1013 v.Op = OpAMD64LoweredRound64F
1014 return true
1015 case OpRoundToEven:
1016 return rewriteValueAMD64_OpRoundToEven(v)
1017 case OpRsh16Ux16:
1018 return rewriteValueAMD64_OpRsh16Ux16(v)
1019 case OpRsh16Ux32:
1020 return rewriteValueAMD64_OpRsh16Ux32(v)
1021 case OpRsh16Ux64:
1022 return rewriteValueAMD64_OpRsh16Ux64(v)
1023 case OpRsh16Ux8:
1024 return rewriteValueAMD64_OpRsh16Ux8(v)
1025 case OpRsh16x16:
1026 return rewriteValueAMD64_OpRsh16x16(v)
1027 case OpRsh16x32:
1028 return rewriteValueAMD64_OpRsh16x32(v)
1029 case OpRsh16x64:
1030 return rewriteValueAMD64_OpRsh16x64(v)
1031 case OpRsh16x8:
1032 return rewriteValueAMD64_OpRsh16x8(v)
1033 case OpRsh32Ux16:
1034 return rewriteValueAMD64_OpRsh32Ux16(v)
1035 case OpRsh32Ux32:
1036 return rewriteValueAMD64_OpRsh32Ux32(v)
1037 case OpRsh32Ux64:
1038 return rewriteValueAMD64_OpRsh32Ux64(v)
1039 case OpRsh32Ux8:
1040 return rewriteValueAMD64_OpRsh32Ux8(v)
1041 case OpRsh32x16:
1042 return rewriteValueAMD64_OpRsh32x16(v)
1043 case OpRsh32x32:
1044 return rewriteValueAMD64_OpRsh32x32(v)
1045 case OpRsh32x64:
1046 return rewriteValueAMD64_OpRsh32x64(v)
1047 case OpRsh32x8:
1048 return rewriteValueAMD64_OpRsh32x8(v)
1049 case OpRsh64Ux16:
1050 return rewriteValueAMD64_OpRsh64Ux16(v)
1051 case OpRsh64Ux32:
1052 return rewriteValueAMD64_OpRsh64Ux32(v)
1053 case OpRsh64Ux64:
1054 return rewriteValueAMD64_OpRsh64Ux64(v)
1055 case OpRsh64Ux8:
1056 return rewriteValueAMD64_OpRsh64Ux8(v)
1057 case OpRsh64x16:
1058 return rewriteValueAMD64_OpRsh64x16(v)
1059 case OpRsh64x32:
1060 return rewriteValueAMD64_OpRsh64x32(v)
1061 case OpRsh64x64:
1062 return rewriteValueAMD64_OpRsh64x64(v)
1063 case OpRsh64x8:
1064 return rewriteValueAMD64_OpRsh64x8(v)
1065 case OpRsh8Ux16:
1066 return rewriteValueAMD64_OpRsh8Ux16(v)
1067 case OpRsh8Ux32:
1068 return rewriteValueAMD64_OpRsh8Ux32(v)
1069 case OpRsh8Ux64:
1070 return rewriteValueAMD64_OpRsh8Ux64(v)
1071 case OpRsh8Ux8:
1072 return rewriteValueAMD64_OpRsh8Ux8(v)
1073 case OpRsh8x16:
1074 return rewriteValueAMD64_OpRsh8x16(v)
1075 case OpRsh8x32:
1076 return rewriteValueAMD64_OpRsh8x32(v)
1077 case OpRsh8x64:
1078 return rewriteValueAMD64_OpRsh8x64(v)
1079 case OpRsh8x8:
1080 return rewriteValueAMD64_OpRsh8x8(v)
1081 case OpSelect0:
1082 return rewriteValueAMD64_OpSelect0(v)
1083 case OpSelect1:
1084 return rewriteValueAMD64_OpSelect1(v)
1085 case OpSelectN:
1086 return rewriteValueAMD64_OpSelectN(v)
1087 case OpSignExt16to32:
1088 v.Op = OpAMD64MOVWQSX
1089 return true
1090 case OpSignExt16to64:
1091 v.Op = OpAMD64MOVWQSX
1092 return true
1093 case OpSignExt32to64:
1094 v.Op = OpAMD64MOVLQSX
1095 return true
1096 case OpSignExt8to16:
1097 v.Op = OpAMD64MOVBQSX
1098 return true
1099 case OpSignExt8to32:
1100 v.Op = OpAMD64MOVBQSX
1101 return true
1102 case OpSignExt8to64:
1103 v.Op = OpAMD64MOVBQSX
1104 return true
1105 case OpSlicemask:
1106 return rewriteValueAMD64_OpSlicemask(v)
1107 case OpSpectreIndex:
1108 return rewriteValueAMD64_OpSpectreIndex(v)
1109 case OpSpectreSliceIndex:
1110 return rewriteValueAMD64_OpSpectreSliceIndex(v)
1111 case OpSqrt:
1112 v.Op = OpAMD64SQRTSD
1113 return true
1114 case OpSqrt32:
1115 v.Op = OpAMD64SQRTSS
1116 return true
1117 case OpStaticCall:
1118 v.Op = OpAMD64CALLstatic
1119 return true
1120 case OpStore:
1121 return rewriteValueAMD64_OpStore(v)
1122 case OpSub16:
1123 v.Op = OpAMD64SUBL
1124 return true
1125 case OpSub32:
1126 v.Op = OpAMD64SUBL
1127 return true
1128 case OpSub32F:
1129 v.Op = OpAMD64SUBSS
1130 return true
1131 case OpSub64:
1132 v.Op = OpAMD64SUBQ
1133 return true
1134 case OpSub64F:
1135 v.Op = OpAMD64SUBSD
1136 return true
1137 case OpSub8:
1138 v.Op = OpAMD64SUBL
1139 return true
1140 case OpSubPtr:
1141 v.Op = OpAMD64SUBQ
1142 return true
1143 case OpTailCall:
1144 v.Op = OpAMD64CALLtail
1145 return true
1146 case OpTrunc:
1147 return rewriteValueAMD64_OpTrunc(v)
1148 case OpTrunc16to8:
1149 v.Op = OpCopy
1150 return true
1151 case OpTrunc32to16:
1152 v.Op = OpCopy
1153 return true
1154 case OpTrunc32to8:
1155 v.Op = OpCopy
1156 return true
1157 case OpTrunc64to16:
1158 v.Op = OpCopy
1159 return true
1160 case OpTrunc64to32:
1161 v.Op = OpCopy
1162 return true
1163 case OpTrunc64to8:
1164 v.Op = OpCopy
1165 return true
1166 case OpWB:
1167 v.Op = OpAMD64LoweredWB
1168 return true
1169 case OpXor16:
1170 v.Op = OpAMD64XORL
1171 return true
1172 case OpXor32:
1173 v.Op = OpAMD64XORL
1174 return true
1175 case OpXor64:
1176 v.Op = OpAMD64XORQ
1177 return true
1178 case OpXor8:
1179 v.Op = OpAMD64XORL
1180 return true
1181 case OpZero:
1182 return rewriteValueAMD64_OpZero(v)
1183 case OpZeroExt16to32:
1184 v.Op = OpAMD64MOVWQZX
1185 return true
1186 case OpZeroExt16to64:
1187 v.Op = OpAMD64MOVWQZX
1188 return true
1189 case OpZeroExt32to64:
1190 v.Op = OpAMD64MOVLQZX
1191 return true
1192 case OpZeroExt8to16:
1193 v.Op = OpAMD64MOVBQZX
1194 return true
1195 case OpZeroExt8to32:
1196 v.Op = OpAMD64MOVBQZX
1197 return true
1198 case OpZeroExt8to64:
1199 v.Op = OpAMD64MOVBQZX
1200 return true
1201 }
1202 return false
1203 }
1204 func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
1205 v_2 := v.Args[2]
1206 v_1 := v.Args[1]
1207 v_0 := v.Args[0]
1208
1209
1210
1211 for {
1212 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1213 x := v_0
1214 if v_1.Op != OpAMD64MOVQconst {
1215 continue
1216 }
1217 c := auxIntToInt64(v_1.AuxInt)
1218 carry := v_2
1219 if !(is32Bit(c)) {
1220 continue
1221 }
1222 v.reset(OpAMD64ADCQconst)
1223 v.AuxInt = int32ToAuxInt(int32(c))
1224 v.AddArg2(x, carry)
1225 return true
1226 }
1227 break
1228 }
1229
1230
1231 for {
1232 x := v_0
1233 y := v_1
1234 if v_2.Op != OpAMD64FlagEQ {
1235 break
1236 }
1237 v.reset(OpAMD64ADDQcarry)
1238 v.AddArg2(x, y)
1239 return true
1240 }
1241 return false
1242 }
1243 func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
1244 v_1 := v.Args[1]
1245 v_0 := v.Args[0]
1246
1247
1248 for {
1249 c := auxIntToInt32(v.AuxInt)
1250 x := v_0
1251 if v_1.Op != OpAMD64FlagEQ {
1252 break
1253 }
1254 v.reset(OpAMD64ADDQconstcarry)
1255 v.AuxInt = int32ToAuxInt(c)
1256 v.AddArg(x)
1257 return true
1258 }
1259 return false
1260 }
1261 func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
1262 v_1 := v.Args[1]
1263 v_0 := v.Args[0]
1264
1265
1266 for {
1267 if v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
1268 break
1269 }
1270 x := v_0.Args[0]
1271 if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 1 || x != v_1.Args[0] {
1272 break
1273 }
1274 v.reset(OpAMD64ANDLconst)
1275 v.AuxInt = int32ToAuxInt(-2)
1276 v.AddArg(x)
1277 return true
1278 }
1279
1280
1281 for {
1282 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1283 x := v_0
1284 if v_1.Op != OpAMD64MOVLconst {
1285 continue
1286 }
1287 c := auxIntToInt32(v_1.AuxInt)
1288 v.reset(OpAMD64ADDLconst)
1289 v.AuxInt = int32ToAuxInt(c)
1290 v.AddArg(x)
1291 return true
1292 }
1293 break
1294 }
1295
1296
1297 for {
1298 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1299 x := v_0
1300 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
1301 continue
1302 }
1303 y := v_1.Args[0]
1304 v.reset(OpAMD64LEAL8)
1305 v.AddArg2(x, y)
1306 return true
1307 }
1308 break
1309 }
1310
1311
1312 for {
1313 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1314 x := v_0
1315 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
1316 continue
1317 }
1318 y := v_1.Args[0]
1319 v.reset(OpAMD64LEAL4)
1320 v.AddArg2(x, y)
1321 return true
1322 }
1323 break
1324 }
1325
1326
1327 for {
1328 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1329 x := v_0
1330 if v_1.Op != OpAMD64ADDL {
1331 continue
1332 }
1333 y := v_1.Args[1]
1334 if y != v_1.Args[0] {
1335 continue
1336 }
1337 v.reset(OpAMD64LEAL2)
1338 v.AddArg2(x, y)
1339 return true
1340 }
1341 break
1342 }
1343
1344
1345 for {
1346 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1347 x := v_0
1348 if v_1.Op != OpAMD64ADDL {
1349 continue
1350 }
1351 _ = v_1.Args[1]
1352 v_1_0 := v_1.Args[0]
1353 v_1_1 := v_1.Args[1]
1354 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1355 if x != v_1_0 {
1356 continue
1357 }
1358 y := v_1_1
1359 v.reset(OpAMD64LEAL2)
1360 v.AddArg2(y, x)
1361 return true
1362 }
1363 }
1364 break
1365 }
1366
1367
1368 for {
1369 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1370 if v_0.Op != OpAMD64ADDLconst {
1371 continue
1372 }
1373 c := auxIntToInt32(v_0.AuxInt)
1374 x := v_0.Args[0]
1375 y := v_1
1376 v.reset(OpAMD64LEAL1)
1377 v.AuxInt = int32ToAuxInt(c)
1378 v.AddArg2(x, y)
1379 return true
1380 }
1381 break
1382 }
1383
1384
1385
1386 for {
1387 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1388 x := v_0
1389 if v_1.Op != OpAMD64LEAL {
1390 continue
1391 }
1392 c := auxIntToInt32(v_1.AuxInt)
1393 s := auxToSym(v_1.Aux)
1394 y := v_1.Args[0]
1395 if !(x.Op != OpSB && y.Op != OpSB) {
1396 continue
1397 }
1398 v.reset(OpAMD64LEAL1)
1399 v.AuxInt = int32ToAuxInt(c)
1400 v.Aux = symToAux(s)
1401 v.AddArg2(x, y)
1402 return true
1403 }
1404 break
1405 }
1406
1407
1408 for {
1409 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1410 x := v_0
1411 if v_1.Op != OpAMD64NEGL {
1412 continue
1413 }
1414 y := v_1.Args[0]
1415 v.reset(OpAMD64SUBL)
1416 v.AddArg2(x, y)
1417 return true
1418 }
1419 break
1420 }
1421
1422
1423
1424 for {
1425 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1426 x := v_0
1427 l := v_1
1428 if l.Op != OpAMD64MOVLload {
1429 continue
1430 }
1431 off := auxIntToInt32(l.AuxInt)
1432 sym := auxToSym(l.Aux)
1433 mem := l.Args[1]
1434 ptr := l.Args[0]
1435 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
1436 continue
1437 }
1438 v.reset(OpAMD64ADDLload)
1439 v.AuxInt = int32ToAuxInt(off)
1440 v.Aux = symToAux(sym)
1441 v.AddArg3(x, ptr, mem)
1442 return true
1443 }
1444 break
1445 }
1446 return false
1447 }
1448 func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
1449 v_0 := v.Args[0]
1450
1451
1452 for {
1453 c := auxIntToInt32(v.AuxInt)
1454 if v_0.Op != OpAMD64ADDL {
1455 break
1456 }
1457 y := v_0.Args[1]
1458 x := v_0.Args[0]
1459 v.reset(OpAMD64LEAL1)
1460 v.AuxInt = int32ToAuxInt(c)
1461 v.AddArg2(x, y)
1462 return true
1463 }
1464
1465
1466 for {
1467 c := auxIntToInt32(v.AuxInt)
1468 if v_0.Op != OpAMD64ADDL {
1469 break
1470 }
1471 x := v_0.Args[1]
1472 if x != v_0.Args[0] {
1473 break
1474 }
1475 v.reset(OpAMD64LEAL1)
1476 v.AuxInt = int32ToAuxInt(c)
1477 v.AddArg2(x, x)
1478 return true
1479 }
1480
1481
1482
1483 for {
1484 c := auxIntToInt32(v.AuxInt)
1485 if v_0.Op != OpAMD64LEAL {
1486 break
1487 }
1488 d := auxIntToInt32(v_0.AuxInt)
1489 s := auxToSym(v_0.Aux)
1490 x := v_0.Args[0]
1491 if !(is32Bit(int64(c) + int64(d))) {
1492 break
1493 }
1494 v.reset(OpAMD64LEAL)
1495 v.AuxInt = int32ToAuxInt(c + d)
1496 v.Aux = symToAux(s)
1497 v.AddArg(x)
1498 return true
1499 }
1500
1501
1502
1503 for {
1504 c := auxIntToInt32(v.AuxInt)
1505 if v_0.Op != OpAMD64LEAL1 {
1506 break
1507 }
1508 d := auxIntToInt32(v_0.AuxInt)
1509 s := auxToSym(v_0.Aux)
1510 y := v_0.Args[1]
1511 x := v_0.Args[0]
1512 if !(is32Bit(int64(c) + int64(d))) {
1513 break
1514 }
1515 v.reset(OpAMD64LEAL1)
1516 v.AuxInt = int32ToAuxInt(c + d)
1517 v.Aux = symToAux(s)
1518 v.AddArg2(x, y)
1519 return true
1520 }
1521
1522
1523
1524 for {
1525 c := auxIntToInt32(v.AuxInt)
1526 if v_0.Op != OpAMD64LEAL2 {
1527 break
1528 }
1529 d := auxIntToInt32(v_0.AuxInt)
1530 s := auxToSym(v_0.Aux)
1531 y := v_0.Args[1]
1532 x := v_0.Args[0]
1533 if !(is32Bit(int64(c) + int64(d))) {
1534 break
1535 }
1536 v.reset(OpAMD64LEAL2)
1537 v.AuxInt = int32ToAuxInt(c + d)
1538 v.Aux = symToAux(s)
1539 v.AddArg2(x, y)
1540 return true
1541 }
1542
1543
1544
1545 for {
1546 c := auxIntToInt32(v.AuxInt)
1547 if v_0.Op != OpAMD64LEAL4 {
1548 break
1549 }
1550 d := auxIntToInt32(v_0.AuxInt)
1551 s := auxToSym(v_0.Aux)
1552 y := v_0.Args[1]
1553 x := v_0.Args[0]
1554 if !(is32Bit(int64(c) + int64(d))) {
1555 break
1556 }
1557 v.reset(OpAMD64LEAL4)
1558 v.AuxInt = int32ToAuxInt(c + d)
1559 v.Aux = symToAux(s)
1560 v.AddArg2(x, y)
1561 return true
1562 }
1563
1564
1565
1566 for {
1567 c := auxIntToInt32(v.AuxInt)
1568 if v_0.Op != OpAMD64LEAL8 {
1569 break
1570 }
1571 d := auxIntToInt32(v_0.AuxInt)
1572 s := auxToSym(v_0.Aux)
1573 y := v_0.Args[1]
1574 x := v_0.Args[0]
1575 if !(is32Bit(int64(c) + int64(d))) {
1576 break
1577 }
1578 v.reset(OpAMD64LEAL8)
1579 v.AuxInt = int32ToAuxInt(c + d)
1580 v.Aux = symToAux(s)
1581 v.AddArg2(x, y)
1582 return true
1583 }
1584
1585
1586
1587 for {
1588 c := auxIntToInt32(v.AuxInt)
1589 x := v_0
1590 if !(c == 0) {
1591 break
1592 }
1593 v.copyOf(x)
1594 return true
1595 }
1596
1597
1598 for {
1599 c := auxIntToInt32(v.AuxInt)
1600 if v_0.Op != OpAMD64MOVLconst {
1601 break
1602 }
1603 d := auxIntToInt32(v_0.AuxInt)
1604 v.reset(OpAMD64MOVLconst)
1605 v.AuxInt = int32ToAuxInt(c + d)
1606 return true
1607 }
1608
1609
1610 for {
1611 c := auxIntToInt32(v.AuxInt)
1612 if v_0.Op != OpAMD64ADDLconst {
1613 break
1614 }
1615 d := auxIntToInt32(v_0.AuxInt)
1616 x := v_0.Args[0]
1617 v.reset(OpAMD64ADDLconst)
1618 v.AuxInt = int32ToAuxInt(c + d)
1619 v.AddArg(x)
1620 return true
1621 }
1622
1623
1624 for {
1625 off := auxIntToInt32(v.AuxInt)
1626 x := v_0
1627 if x.Op != OpSP {
1628 break
1629 }
1630 v.reset(OpAMD64LEAL)
1631 v.AuxInt = int32ToAuxInt(off)
1632 v.AddArg(x)
1633 return true
1634 }
1635 return false
1636 }
1637 func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
1638 v_1 := v.Args[1]
1639 v_0 := v.Args[0]
1640
1641
1642
1643 for {
1644 valoff1 := auxIntToValAndOff(v.AuxInt)
1645 sym := auxToSym(v.Aux)
1646 if v_0.Op != OpAMD64ADDQconst {
1647 break
1648 }
1649 off2 := auxIntToInt32(v_0.AuxInt)
1650 base := v_0.Args[0]
1651 mem := v_1
1652 if !(ValAndOff(valoff1).canAdd32(off2)) {
1653 break
1654 }
1655 v.reset(OpAMD64ADDLconstmodify)
1656 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1657 v.Aux = symToAux(sym)
1658 v.AddArg2(base, mem)
1659 return true
1660 }
1661
1662
1663
1664 for {
1665 valoff1 := auxIntToValAndOff(v.AuxInt)
1666 sym1 := auxToSym(v.Aux)
1667 if v_0.Op != OpAMD64LEAQ {
1668 break
1669 }
1670 off2 := auxIntToInt32(v_0.AuxInt)
1671 sym2 := auxToSym(v_0.Aux)
1672 base := v_0.Args[0]
1673 mem := v_1
1674 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
1675 break
1676 }
1677 v.reset(OpAMD64ADDLconstmodify)
1678 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
1679 v.Aux = symToAux(mergeSym(sym1, sym2))
1680 v.AddArg2(base, mem)
1681 return true
1682 }
1683 return false
1684 }
1685 func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
1686 v_2 := v.Args[2]
1687 v_1 := v.Args[1]
1688 v_0 := v.Args[0]
1689 b := v.Block
1690 typ := &b.Func.Config.Types
1691
1692
1693
1694 for {
1695 off1 := auxIntToInt32(v.AuxInt)
1696 sym := auxToSym(v.Aux)
1697 val := v_0
1698 if v_1.Op != OpAMD64ADDQconst {
1699 break
1700 }
1701 off2 := auxIntToInt32(v_1.AuxInt)
1702 base := v_1.Args[0]
1703 mem := v_2
1704 if !(is32Bit(int64(off1) + int64(off2))) {
1705 break
1706 }
1707 v.reset(OpAMD64ADDLload)
1708 v.AuxInt = int32ToAuxInt(off1 + off2)
1709 v.Aux = symToAux(sym)
1710 v.AddArg3(val, base, mem)
1711 return true
1712 }
1713
1714
1715
1716 for {
1717 off1 := auxIntToInt32(v.AuxInt)
1718 sym1 := auxToSym(v.Aux)
1719 val := v_0
1720 if v_1.Op != OpAMD64LEAQ {
1721 break
1722 }
1723 off2 := auxIntToInt32(v_1.AuxInt)
1724 sym2 := auxToSym(v_1.Aux)
1725 base := v_1.Args[0]
1726 mem := v_2
1727 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1728 break
1729 }
1730 v.reset(OpAMD64ADDLload)
1731 v.AuxInt = int32ToAuxInt(off1 + off2)
1732 v.Aux = symToAux(mergeSym(sym1, sym2))
1733 v.AddArg3(val, base, mem)
1734 return true
1735 }
1736
1737
1738 for {
1739 off := auxIntToInt32(v.AuxInt)
1740 sym := auxToSym(v.Aux)
1741 x := v_0
1742 ptr := v_1
1743 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
1744 break
1745 }
1746 y := v_2.Args[1]
1747 if ptr != v_2.Args[0] {
1748 break
1749 }
1750 v.reset(OpAMD64ADDL)
1751 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
1752 v0.AddArg(y)
1753 v.AddArg2(x, v0)
1754 return true
1755 }
1756 return false
1757 }
1758 func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
1759 v_2 := v.Args[2]
1760 v_1 := v.Args[1]
1761 v_0 := v.Args[0]
1762
1763
1764
1765 for {
1766 off1 := auxIntToInt32(v.AuxInt)
1767 sym := auxToSym(v.Aux)
1768 if v_0.Op != OpAMD64ADDQconst {
1769 break
1770 }
1771 off2 := auxIntToInt32(v_0.AuxInt)
1772 base := v_0.Args[0]
1773 val := v_1
1774 mem := v_2
1775 if !(is32Bit(int64(off1) + int64(off2))) {
1776 break
1777 }
1778 v.reset(OpAMD64ADDLmodify)
1779 v.AuxInt = int32ToAuxInt(off1 + off2)
1780 v.Aux = symToAux(sym)
1781 v.AddArg3(base, val, mem)
1782 return true
1783 }
1784
1785
1786
1787 for {
1788 off1 := auxIntToInt32(v.AuxInt)
1789 sym1 := auxToSym(v.Aux)
1790 if v_0.Op != OpAMD64LEAQ {
1791 break
1792 }
1793 off2 := auxIntToInt32(v_0.AuxInt)
1794 sym2 := auxToSym(v_0.Aux)
1795 base := v_0.Args[0]
1796 val := v_1
1797 mem := v_2
1798 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
1799 break
1800 }
1801 v.reset(OpAMD64ADDLmodify)
1802 v.AuxInt = int32ToAuxInt(off1 + off2)
1803 v.Aux = symToAux(mergeSym(sym1, sym2))
1804 v.AddArg3(base, val, mem)
1805 return true
1806 }
1807 return false
1808 }
1809 func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
1810 v_1 := v.Args[1]
1811 v_0 := v.Args[0]
1812
1813
1814 for {
1815 if v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
1816 break
1817 }
1818 x := v_0.Args[0]
1819 if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 1 || x != v_1.Args[0] {
1820 break
1821 }
1822 v.reset(OpAMD64ANDQconst)
1823 v.AuxInt = int32ToAuxInt(-2)
1824 v.AddArg(x)
1825 return true
1826 }
1827
1828
1829
1830 for {
1831 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1832 x := v_0
1833 if v_1.Op != OpAMD64MOVQconst {
1834 continue
1835 }
1836 t := v_1.Type
1837 c := auxIntToInt64(v_1.AuxInt)
1838 if !(is32Bit(c) && !t.IsPtr()) {
1839 continue
1840 }
1841 v.reset(OpAMD64ADDQconst)
1842 v.AuxInt = int32ToAuxInt(int32(c))
1843 v.AddArg(x)
1844 return true
1845 }
1846 break
1847 }
1848
1849
1850 for {
1851 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1852 x := v_0
1853 if v_1.Op != OpAMD64MOVLconst {
1854 continue
1855 }
1856 c := auxIntToInt32(v_1.AuxInt)
1857 v.reset(OpAMD64ADDQconst)
1858 v.AuxInt = int32ToAuxInt(c)
1859 v.AddArg(x)
1860 return true
1861 }
1862 break
1863 }
1864
1865
1866 for {
1867 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1868 x := v_0
1869 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
1870 continue
1871 }
1872 y := v_1.Args[0]
1873 v.reset(OpAMD64LEAQ8)
1874 v.AddArg2(x, y)
1875 return true
1876 }
1877 break
1878 }
1879
1880
1881 for {
1882 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1883 x := v_0
1884 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
1885 continue
1886 }
1887 y := v_1.Args[0]
1888 v.reset(OpAMD64LEAQ4)
1889 v.AddArg2(x, y)
1890 return true
1891 }
1892 break
1893 }
1894
1895
1896 for {
1897 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1898 x := v_0
1899 if v_1.Op != OpAMD64ADDQ {
1900 continue
1901 }
1902 y := v_1.Args[1]
1903 if y != v_1.Args[0] {
1904 continue
1905 }
1906 v.reset(OpAMD64LEAQ2)
1907 v.AddArg2(x, y)
1908 return true
1909 }
1910 break
1911 }
1912
1913
1914 for {
1915 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1916 x := v_0
1917 if v_1.Op != OpAMD64ADDQ {
1918 continue
1919 }
1920 _ = v_1.Args[1]
1921 v_1_0 := v_1.Args[0]
1922 v_1_1 := v_1.Args[1]
1923 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
1924 if x != v_1_0 {
1925 continue
1926 }
1927 y := v_1_1
1928 v.reset(OpAMD64LEAQ2)
1929 v.AddArg2(y, x)
1930 return true
1931 }
1932 }
1933 break
1934 }
1935
1936
1937 for {
1938 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1939 if v_0.Op != OpAMD64ADDQconst {
1940 continue
1941 }
1942 c := auxIntToInt32(v_0.AuxInt)
1943 x := v_0.Args[0]
1944 y := v_1
1945 v.reset(OpAMD64LEAQ1)
1946 v.AuxInt = int32ToAuxInt(c)
1947 v.AddArg2(x, y)
1948 return true
1949 }
1950 break
1951 }
1952
1953
1954
1955 for {
1956 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1957 x := v_0
1958 if v_1.Op != OpAMD64LEAQ {
1959 continue
1960 }
1961 c := auxIntToInt32(v_1.AuxInt)
1962 s := auxToSym(v_1.Aux)
1963 y := v_1.Args[0]
1964 if !(x.Op != OpSB && y.Op != OpSB) {
1965 continue
1966 }
1967 v.reset(OpAMD64LEAQ1)
1968 v.AuxInt = int32ToAuxInt(c)
1969 v.Aux = symToAux(s)
1970 v.AddArg2(x, y)
1971 return true
1972 }
1973 break
1974 }
1975
1976
1977 for {
1978 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1979 x := v_0
1980 if v_1.Op != OpAMD64NEGQ {
1981 continue
1982 }
1983 y := v_1.Args[0]
1984 v.reset(OpAMD64SUBQ)
1985 v.AddArg2(x, y)
1986 return true
1987 }
1988 break
1989 }
1990
1991
1992
1993 for {
1994 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
1995 x := v_0
1996 l := v_1
1997 if l.Op != OpAMD64MOVQload {
1998 continue
1999 }
2000 off := auxIntToInt32(l.AuxInt)
2001 sym := auxToSym(l.Aux)
2002 mem := l.Args[1]
2003 ptr := l.Args[0]
2004 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2005 continue
2006 }
2007 v.reset(OpAMD64ADDQload)
2008 v.AuxInt = int32ToAuxInt(off)
2009 v.Aux = symToAux(sym)
2010 v.AddArg3(x, ptr, mem)
2011 return true
2012 }
2013 break
2014 }
2015 return false
2016 }
2017 func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
2018 v_1 := v.Args[1]
2019 v_0 := v.Args[0]
2020
2021
2022
2023 for {
2024 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2025 x := v_0
2026 if v_1.Op != OpAMD64MOVQconst {
2027 continue
2028 }
2029 c := auxIntToInt64(v_1.AuxInt)
2030 if !(is32Bit(c)) {
2031 continue
2032 }
2033 v.reset(OpAMD64ADDQconstcarry)
2034 v.AuxInt = int32ToAuxInt(int32(c))
2035 v.AddArg(x)
2036 return true
2037 }
2038 break
2039 }
2040 return false
2041 }
2042 func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
2043 v_0 := v.Args[0]
2044
2045
2046 for {
2047 c := auxIntToInt32(v.AuxInt)
2048 if v_0.Op != OpAMD64ADDQ {
2049 break
2050 }
2051 y := v_0.Args[1]
2052 x := v_0.Args[0]
2053 v.reset(OpAMD64LEAQ1)
2054 v.AuxInt = int32ToAuxInt(c)
2055 v.AddArg2(x, y)
2056 return true
2057 }
2058
2059
2060 for {
2061 c := auxIntToInt32(v.AuxInt)
2062 if v_0.Op != OpAMD64ADDQ {
2063 break
2064 }
2065 x := v_0.Args[1]
2066 if x != v_0.Args[0] {
2067 break
2068 }
2069 v.reset(OpAMD64LEAQ1)
2070 v.AuxInt = int32ToAuxInt(c)
2071 v.AddArg2(x, x)
2072 return true
2073 }
2074
2075
2076
2077 for {
2078 c := auxIntToInt32(v.AuxInt)
2079 if v_0.Op != OpAMD64LEAQ {
2080 break
2081 }
2082 d := auxIntToInt32(v_0.AuxInt)
2083 s := auxToSym(v_0.Aux)
2084 x := v_0.Args[0]
2085 if !(is32Bit(int64(c) + int64(d))) {
2086 break
2087 }
2088 v.reset(OpAMD64LEAQ)
2089 v.AuxInt = int32ToAuxInt(c + d)
2090 v.Aux = symToAux(s)
2091 v.AddArg(x)
2092 return true
2093 }
2094
2095
2096
2097 for {
2098 c := auxIntToInt32(v.AuxInt)
2099 if v_0.Op != OpAMD64LEAQ1 {
2100 break
2101 }
2102 d := auxIntToInt32(v_0.AuxInt)
2103 s := auxToSym(v_0.Aux)
2104 y := v_0.Args[1]
2105 x := v_0.Args[0]
2106 if !(is32Bit(int64(c) + int64(d))) {
2107 break
2108 }
2109 v.reset(OpAMD64LEAQ1)
2110 v.AuxInt = int32ToAuxInt(c + d)
2111 v.Aux = symToAux(s)
2112 v.AddArg2(x, y)
2113 return true
2114 }
2115
2116
2117
2118 for {
2119 c := auxIntToInt32(v.AuxInt)
2120 if v_0.Op != OpAMD64LEAQ2 {
2121 break
2122 }
2123 d := auxIntToInt32(v_0.AuxInt)
2124 s := auxToSym(v_0.Aux)
2125 y := v_0.Args[1]
2126 x := v_0.Args[0]
2127 if !(is32Bit(int64(c) + int64(d))) {
2128 break
2129 }
2130 v.reset(OpAMD64LEAQ2)
2131 v.AuxInt = int32ToAuxInt(c + d)
2132 v.Aux = symToAux(s)
2133 v.AddArg2(x, y)
2134 return true
2135 }
2136
2137
2138
2139 for {
2140 c := auxIntToInt32(v.AuxInt)
2141 if v_0.Op != OpAMD64LEAQ4 {
2142 break
2143 }
2144 d := auxIntToInt32(v_0.AuxInt)
2145 s := auxToSym(v_0.Aux)
2146 y := v_0.Args[1]
2147 x := v_0.Args[0]
2148 if !(is32Bit(int64(c) + int64(d))) {
2149 break
2150 }
2151 v.reset(OpAMD64LEAQ4)
2152 v.AuxInt = int32ToAuxInt(c + d)
2153 v.Aux = symToAux(s)
2154 v.AddArg2(x, y)
2155 return true
2156 }
2157
2158
2159
2160 for {
2161 c := auxIntToInt32(v.AuxInt)
2162 if v_0.Op != OpAMD64LEAQ8 {
2163 break
2164 }
2165 d := auxIntToInt32(v_0.AuxInt)
2166 s := auxToSym(v_0.Aux)
2167 y := v_0.Args[1]
2168 x := v_0.Args[0]
2169 if !(is32Bit(int64(c) + int64(d))) {
2170 break
2171 }
2172 v.reset(OpAMD64LEAQ8)
2173 v.AuxInt = int32ToAuxInt(c + d)
2174 v.Aux = symToAux(s)
2175 v.AddArg2(x, y)
2176 return true
2177 }
2178
2179
2180 for {
2181 if auxIntToInt32(v.AuxInt) != 0 {
2182 break
2183 }
2184 x := v_0
2185 v.copyOf(x)
2186 return true
2187 }
2188
2189
2190 for {
2191 c := auxIntToInt32(v.AuxInt)
2192 if v_0.Op != OpAMD64MOVQconst {
2193 break
2194 }
2195 d := auxIntToInt64(v_0.AuxInt)
2196 v.reset(OpAMD64MOVQconst)
2197 v.AuxInt = int64ToAuxInt(int64(c) + d)
2198 return true
2199 }
2200
2201
2202
2203 for {
2204 c := auxIntToInt32(v.AuxInt)
2205 if v_0.Op != OpAMD64ADDQconst {
2206 break
2207 }
2208 d := auxIntToInt32(v_0.AuxInt)
2209 x := v_0.Args[0]
2210 if !(is32Bit(int64(c) + int64(d))) {
2211 break
2212 }
2213 v.reset(OpAMD64ADDQconst)
2214 v.AuxInt = int32ToAuxInt(c + d)
2215 v.AddArg(x)
2216 return true
2217 }
2218
2219
2220 for {
2221 off := auxIntToInt32(v.AuxInt)
2222 x := v_0
2223 if x.Op != OpSP {
2224 break
2225 }
2226 v.reset(OpAMD64LEAQ)
2227 v.AuxInt = int32ToAuxInt(off)
2228 v.AddArg(x)
2229 return true
2230 }
2231 return false
2232 }
2233 func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
2234 v_1 := v.Args[1]
2235 v_0 := v.Args[0]
2236
2237
2238
2239 for {
2240 valoff1 := auxIntToValAndOff(v.AuxInt)
2241 sym := auxToSym(v.Aux)
2242 if v_0.Op != OpAMD64ADDQconst {
2243 break
2244 }
2245 off2 := auxIntToInt32(v_0.AuxInt)
2246 base := v_0.Args[0]
2247 mem := v_1
2248 if !(ValAndOff(valoff1).canAdd32(off2)) {
2249 break
2250 }
2251 v.reset(OpAMD64ADDQconstmodify)
2252 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2253 v.Aux = symToAux(sym)
2254 v.AddArg2(base, mem)
2255 return true
2256 }
2257
2258
2259
2260 for {
2261 valoff1 := auxIntToValAndOff(v.AuxInt)
2262 sym1 := auxToSym(v.Aux)
2263 if v_0.Op != OpAMD64LEAQ {
2264 break
2265 }
2266 off2 := auxIntToInt32(v_0.AuxInt)
2267 sym2 := auxToSym(v_0.Aux)
2268 base := v_0.Args[0]
2269 mem := v_1
2270 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2271 break
2272 }
2273 v.reset(OpAMD64ADDQconstmodify)
2274 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2275 v.Aux = symToAux(mergeSym(sym1, sym2))
2276 v.AddArg2(base, mem)
2277 return true
2278 }
2279 return false
2280 }
2281 func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
2282 v_2 := v.Args[2]
2283 v_1 := v.Args[1]
2284 v_0 := v.Args[0]
2285 b := v.Block
2286 typ := &b.Func.Config.Types
2287
2288
2289
2290 for {
2291 off1 := auxIntToInt32(v.AuxInt)
2292 sym := auxToSym(v.Aux)
2293 val := v_0
2294 if v_1.Op != OpAMD64ADDQconst {
2295 break
2296 }
2297 off2 := auxIntToInt32(v_1.AuxInt)
2298 base := v_1.Args[0]
2299 mem := v_2
2300 if !(is32Bit(int64(off1) + int64(off2))) {
2301 break
2302 }
2303 v.reset(OpAMD64ADDQload)
2304 v.AuxInt = int32ToAuxInt(off1 + off2)
2305 v.Aux = symToAux(sym)
2306 v.AddArg3(val, base, mem)
2307 return true
2308 }
2309
2310
2311
2312 for {
2313 off1 := auxIntToInt32(v.AuxInt)
2314 sym1 := auxToSym(v.Aux)
2315 val := v_0
2316 if v_1.Op != OpAMD64LEAQ {
2317 break
2318 }
2319 off2 := auxIntToInt32(v_1.AuxInt)
2320 sym2 := auxToSym(v_1.Aux)
2321 base := v_1.Args[0]
2322 mem := v_2
2323 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2324 break
2325 }
2326 v.reset(OpAMD64ADDQload)
2327 v.AuxInt = int32ToAuxInt(off1 + off2)
2328 v.Aux = symToAux(mergeSym(sym1, sym2))
2329 v.AddArg3(val, base, mem)
2330 return true
2331 }
2332
2333
2334 for {
2335 off := auxIntToInt32(v.AuxInt)
2336 sym := auxToSym(v.Aux)
2337 x := v_0
2338 ptr := v_1
2339 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2340 break
2341 }
2342 y := v_2.Args[1]
2343 if ptr != v_2.Args[0] {
2344 break
2345 }
2346 v.reset(OpAMD64ADDQ)
2347 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
2348 v0.AddArg(y)
2349 v.AddArg2(x, v0)
2350 return true
2351 }
2352 return false
2353 }
2354 func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
2355 v_2 := v.Args[2]
2356 v_1 := v.Args[1]
2357 v_0 := v.Args[0]
2358
2359
2360
2361 for {
2362 off1 := auxIntToInt32(v.AuxInt)
2363 sym := auxToSym(v.Aux)
2364 if v_0.Op != OpAMD64ADDQconst {
2365 break
2366 }
2367 off2 := auxIntToInt32(v_0.AuxInt)
2368 base := v_0.Args[0]
2369 val := v_1
2370 mem := v_2
2371 if !(is32Bit(int64(off1) + int64(off2))) {
2372 break
2373 }
2374 v.reset(OpAMD64ADDQmodify)
2375 v.AuxInt = int32ToAuxInt(off1 + off2)
2376 v.Aux = symToAux(sym)
2377 v.AddArg3(base, val, mem)
2378 return true
2379 }
2380
2381
2382
2383 for {
2384 off1 := auxIntToInt32(v.AuxInt)
2385 sym1 := auxToSym(v.Aux)
2386 if v_0.Op != OpAMD64LEAQ {
2387 break
2388 }
2389 off2 := auxIntToInt32(v_0.AuxInt)
2390 sym2 := auxToSym(v_0.Aux)
2391 base := v_0.Args[0]
2392 val := v_1
2393 mem := v_2
2394 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2395 break
2396 }
2397 v.reset(OpAMD64ADDQmodify)
2398 v.AuxInt = int32ToAuxInt(off1 + off2)
2399 v.Aux = symToAux(mergeSym(sym1, sym2))
2400 v.AddArg3(base, val, mem)
2401 return true
2402 }
2403 return false
2404 }
2405 func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
2406 v_1 := v.Args[1]
2407 v_0 := v.Args[0]
2408
2409
2410
2411 for {
2412 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2413 x := v_0
2414 l := v_1
2415 if l.Op != OpAMD64MOVSDload {
2416 continue
2417 }
2418 off := auxIntToInt32(l.AuxInt)
2419 sym := auxToSym(l.Aux)
2420 mem := l.Args[1]
2421 ptr := l.Args[0]
2422 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2423 continue
2424 }
2425 v.reset(OpAMD64ADDSDload)
2426 v.AuxInt = int32ToAuxInt(off)
2427 v.Aux = symToAux(sym)
2428 v.AddArg3(x, ptr, mem)
2429 return true
2430 }
2431 break
2432 }
2433
2434
2435
2436 for {
2437 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2438 if v_0.Op != OpAMD64MULSD {
2439 continue
2440 }
2441 y := v_0.Args[1]
2442 x := v_0.Args[0]
2443 z := v_1
2444 if !(buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v)) {
2445 continue
2446 }
2447 v.reset(OpAMD64VFMADD231SD)
2448 v.AddArg3(z, x, y)
2449 return true
2450 }
2451 break
2452 }
2453 return false
2454 }
2455 func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
2456 v_2 := v.Args[2]
2457 v_1 := v.Args[1]
2458 v_0 := v.Args[0]
2459 b := v.Block
2460 typ := &b.Func.Config.Types
2461
2462
2463
2464 for {
2465 off1 := auxIntToInt32(v.AuxInt)
2466 sym := auxToSym(v.Aux)
2467 val := v_0
2468 if v_1.Op != OpAMD64ADDQconst {
2469 break
2470 }
2471 off2 := auxIntToInt32(v_1.AuxInt)
2472 base := v_1.Args[0]
2473 mem := v_2
2474 if !(is32Bit(int64(off1) + int64(off2))) {
2475 break
2476 }
2477 v.reset(OpAMD64ADDSDload)
2478 v.AuxInt = int32ToAuxInt(off1 + off2)
2479 v.Aux = symToAux(sym)
2480 v.AddArg3(val, base, mem)
2481 return true
2482 }
2483
2484
2485
2486 for {
2487 off1 := auxIntToInt32(v.AuxInt)
2488 sym1 := auxToSym(v.Aux)
2489 val := v_0
2490 if v_1.Op != OpAMD64LEAQ {
2491 break
2492 }
2493 off2 := auxIntToInt32(v_1.AuxInt)
2494 sym2 := auxToSym(v_1.Aux)
2495 base := v_1.Args[0]
2496 mem := v_2
2497 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2498 break
2499 }
2500 v.reset(OpAMD64ADDSDload)
2501 v.AuxInt = int32ToAuxInt(off1 + off2)
2502 v.Aux = symToAux(mergeSym(sym1, sym2))
2503 v.AddArg3(val, base, mem)
2504 return true
2505 }
2506
2507
2508 for {
2509 off := auxIntToInt32(v.AuxInt)
2510 sym := auxToSym(v.Aux)
2511 x := v_0
2512 ptr := v_1
2513 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2514 break
2515 }
2516 y := v_2.Args[1]
2517 if ptr != v_2.Args[0] {
2518 break
2519 }
2520 v.reset(OpAMD64ADDSD)
2521 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
2522 v0.AddArg(y)
2523 v.AddArg2(x, v0)
2524 return true
2525 }
2526 return false
2527 }
2528 func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
2529 v_1 := v.Args[1]
2530 v_0 := v.Args[0]
2531
2532
2533
2534 for {
2535 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2536 x := v_0
2537 l := v_1
2538 if l.Op != OpAMD64MOVSSload {
2539 continue
2540 }
2541 off := auxIntToInt32(l.AuxInt)
2542 sym := auxToSym(l.Aux)
2543 mem := l.Args[1]
2544 ptr := l.Args[0]
2545 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2546 continue
2547 }
2548 v.reset(OpAMD64ADDSSload)
2549 v.AuxInt = int32ToAuxInt(off)
2550 v.Aux = symToAux(sym)
2551 v.AddArg3(x, ptr, mem)
2552 return true
2553 }
2554 break
2555 }
2556
2557
2558
2559 for {
2560 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2561 if v_0.Op != OpAMD64MULSS {
2562 continue
2563 }
2564 y := v_0.Args[1]
2565 x := v_0.Args[0]
2566 z := v_1
2567 if !(buildcfg.GOAMD64 >= 3 && z.Block.Func.useFMA(v)) {
2568 continue
2569 }
2570 v.reset(OpAMD64VFMADD231SS)
2571 v.AddArg3(z, x, y)
2572 return true
2573 }
2574 break
2575 }
2576 return false
2577 }
2578 func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
2579 v_2 := v.Args[2]
2580 v_1 := v.Args[1]
2581 v_0 := v.Args[0]
2582 b := v.Block
2583 typ := &b.Func.Config.Types
2584
2585
2586
2587 for {
2588 off1 := auxIntToInt32(v.AuxInt)
2589 sym := auxToSym(v.Aux)
2590 val := v_0
2591 if v_1.Op != OpAMD64ADDQconst {
2592 break
2593 }
2594 off2 := auxIntToInt32(v_1.AuxInt)
2595 base := v_1.Args[0]
2596 mem := v_2
2597 if !(is32Bit(int64(off1) + int64(off2))) {
2598 break
2599 }
2600 v.reset(OpAMD64ADDSSload)
2601 v.AuxInt = int32ToAuxInt(off1 + off2)
2602 v.Aux = symToAux(sym)
2603 v.AddArg3(val, base, mem)
2604 return true
2605 }
2606
2607
2608
2609 for {
2610 off1 := auxIntToInt32(v.AuxInt)
2611 sym1 := auxToSym(v.Aux)
2612 val := v_0
2613 if v_1.Op != OpAMD64LEAQ {
2614 break
2615 }
2616 off2 := auxIntToInt32(v_1.AuxInt)
2617 sym2 := auxToSym(v_1.Aux)
2618 base := v_1.Args[0]
2619 mem := v_2
2620 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2621 break
2622 }
2623 v.reset(OpAMD64ADDSSload)
2624 v.AuxInt = int32ToAuxInt(off1 + off2)
2625 v.Aux = symToAux(mergeSym(sym1, sym2))
2626 v.AddArg3(val, base, mem)
2627 return true
2628 }
2629
2630
2631 for {
2632 off := auxIntToInt32(v.AuxInt)
2633 sym := auxToSym(v.Aux)
2634 x := v_0
2635 ptr := v_1
2636 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2637 break
2638 }
2639 y := v_2.Args[1]
2640 if ptr != v_2.Args[0] {
2641 break
2642 }
2643 v.reset(OpAMD64ADDSS)
2644 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
2645 v0.AddArg(y)
2646 v.AddArg2(x, v0)
2647 return true
2648 }
2649 return false
2650 }
2651 func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
2652 v_1 := v.Args[1]
2653 v_0 := v.Args[0]
2654 b := v.Block
2655 typ := &b.Func.Config.Types
2656
2657
2658 for {
2659 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2660 if v_0.Op != OpAMD64NOTL {
2661 continue
2662 }
2663 v_0_0 := v_0.Args[0]
2664 if v_0_0.Op != OpAMD64SHLL {
2665 continue
2666 }
2667 y := v_0_0.Args[1]
2668 v_0_0_0 := v_0_0.Args[0]
2669 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
2670 continue
2671 }
2672 x := v_1
2673 v.reset(OpAMD64BTRL)
2674 v.AddArg2(x, y)
2675 return true
2676 }
2677 break
2678 }
2679
2680
2681 for {
2682 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2683 x := v_0
2684 if v_1.Op != OpAMD64MOVLconst {
2685 continue
2686 }
2687 c := auxIntToInt32(v_1.AuxInt)
2688 v.reset(OpAMD64ANDLconst)
2689 v.AuxInt = int32ToAuxInt(c)
2690 v.AddArg(x)
2691 return true
2692 }
2693 break
2694 }
2695
2696
2697 for {
2698 x := v_0
2699 if x != v_1 {
2700 break
2701 }
2702 v.copyOf(x)
2703 return true
2704 }
2705
2706
2707
2708 for {
2709 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2710 x := v_0
2711 l := v_1
2712 if l.Op != OpAMD64MOVLload {
2713 continue
2714 }
2715 off := auxIntToInt32(l.AuxInt)
2716 sym := auxToSym(l.Aux)
2717 mem := l.Args[1]
2718 ptr := l.Args[0]
2719 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
2720 continue
2721 }
2722 v.reset(OpAMD64ANDLload)
2723 v.AuxInt = int32ToAuxInt(off)
2724 v.Aux = symToAux(sym)
2725 v.AddArg3(x, ptr, mem)
2726 return true
2727 }
2728 break
2729 }
2730
2731
2732
2733 for {
2734 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2735 x := v_0
2736 if v_1.Op != OpAMD64NOTL {
2737 continue
2738 }
2739 y := v_1.Args[0]
2740 if !(buildcfg.GOAMD64 >= 3) {
2741 continue
2742 }
2743 v.reset(OpAMD64ANDNL)
2744 v.AddArg2(x, y)
2745 return true
2746 }
2747 break
2748 }
2749
2750
2751
2752 for {
2753 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2754 x := v_0
2755 if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2756 continue
2757 }
2758 v.reset(OpAMD64BLSIL)
2759 v.AddArg(x)
2760 return true
2761 }
2762 break
2763 }
2764
2765
2766
2767 for {
2768 t := v.Type
2769 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
2770 x := v_0
2771 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
2772 continue
2773 }
2774 v.reset(OpSelect0)
2775 v.Type = t
2776 v0 := b.NewValue0(v.Pos, OpAMD64BLSRL, types.NewTuple(typ.UInt32, types.TypeFlags))
2777 v0.AddArg(x)
2778 v.AddArg(v0)
2779 return true
2780 }
2781 break
2782 }
2783 return false
2784 }
2785 func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
2786 v_0 := v.Args[0]
2787
2788
2789 for {
2790 c := auxIntToInt32(v.AuxInt)
2791 if v_0.Op != OpAMD64ANDLconst {
2792 break
2793 }
2794 d := auxIntToInt32(v_0.AuxInt)
2795 x := v_0.Args[0]
2796 v.reset(OpAMD64ANDLconst)
2797 v.AuxInt = int32ToAuxInt(c & d)
2798 v.AddArg(x)
2799 return true
2800 }
2801
2802
2803 for {
2804 if auxIntToInt32(v.AuxInt) != 0xFF {
2805 break
2806 }
2807 x := v_0
2808 v.reset(OpAMD64MOVBQZX)
2809 v.AddArg(x)
2810 return true
2811 }
2812
2813
2814 for {
2815 if auxIntToInt32(v.AuxInt) != 0xFFFF {
2816 break
2817 }
2818 x := v_0
2819 v.reset(OpAMD64MOVWQZX)
2820 v.AddArg(x)
2821 return true
2822 }
2823
2824
2825
2826 for {
2827 c := auxIntToInt32(v.AuxInt)
2828 if !(c == 0) {
2829 break
2830 }
2831 v.reset(OpAMD64MOVLconst)
2832 v.AuxInt = int32ToAuxInt(0)
2833 return true
2834 }
2835
2836
2837
2838 for {
2839 c := auxIntToInt32(v.AuxInt)
2840 x := v_0
2841 if !(c == -1) {
2842 break
2843 }
2844 v.copyOf(x)
2845 return true
2846 }
2847
2848
2849 for {
2850 c := auxIntToInt32(v.AuxInt)
2851 if v_0.Op != OpAMD64MOVLconst {
2852 break
2853 }
2854 d := auxIntToInt32(v_0.AuxInt)
2855 v.reset(OpAMD64MOVLconst)
2856 v.AuxInt = int32ToAuxInt(c & d)
2857 return true
2858 }
2859 return false
2860 }
2861 func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
2862 v_1 := v.Args[1]
2863 v_0 := v.Args[0]
2864
2865
2866
2867 for {
2868 valoff1 := auxIntToValAndOff(v.AuxInt)
2869 sym := auxToSym(v.Aux)
2870 if v_0.Op != OpAMD64ADDQconst {
2871 break
2872 }
2873 off2 := auxIntToInt32(v_0.AuxInt)
2874 base := v_0.Args[0]
2875 mem := v_1
2876 if !(ValAndOff(valoff1).canAdd32(off2)) {
2877 break
2878 }
2879 v.reset(OpAMD64ANDLconstmodify)
2880 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2881 v.Aux = symToAux(sym)
2882 v.AddArg2(base, mem)
2883 return true
2884 }
2885
2886
2887
2888 for {
2889 valoff1 := auxIntToValAndOff(v.AuxInt)
2890 sym1 := auxToSym(v.Aux)
2891 if v_0.Op != OpAMD64LEAQ {
2892 break
2893 }
2894 off2 := auxIntToInt32(v_0.AuxInt)
2895 sym2 := auxToSym(v_0.Aux)
2896 base := v_0.Args[0]
2897 mem := v_1
2898 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
2899 break
2900 }
2901 v.reset(OpAMD64ANDLconstmodify)
2902 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
2903 v.Aux = symToAux(mergeSym(sym1, sym2))
2904 v.AddArg2(base, mem)
2905 return true
2906 }
2907 return false
2908 }
2909 func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
2910 v_2 := v.Args[2]
2911 v_1 := v.Args[1]
2912 v_0 := v.Args[0]
2913 b := v.Block
2914 typ := &b.Func.Config.Types
2915
2916
2917
2918 for {
2919 off1 := auxIntToInt32(v.AuxInt)
2920 sym := auxToSym(v.Aux)
2921 val := v_0
2922 if v_1.Op != OpAMD64ADDQconst {
2923 break
2924 }
2925 off2 := auxIntToInt32(v_1.AuxInt)
2926 base := v_1.Args[0]
2927 mem := v_2
2928 if !(is32Bit(int64(off1) + int64(off2))) {
2929 break
2930 }
2931 v.reset(OpAMD64ANDLload)
2932 v.AuxInt = int32ToAuxInt(off1 + off2)
2933 v.Aux = symToAux(sym)
2934 v.AddArg3(val, base, mem)
2935 return true
2936 }
2937
2938
2939
2940 for {
2941 off1 := auxIntToInt32(v.AuxInt)
2942 sym1 := auxToSym(v.Aux)
2943 val := v_0
2944 if v_1.Op != OpAMD64LEAQ {
2945 break
2946 }
2947 off2 := auxIntToInt32(v_1.AuxInt)
2948 sym2 := auxToSym(v_1.Aux)
2949 base := v_1.Args[0]
2950 mem := v_2
2951 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
2952 break
2953 }
2954 v.reset(OpAMD64ANDLload)
2955 v.AuxInt = int32ToAuxInt(off1 + off2)
2956 v.Aux = symToAux(mergeSym(sym1, sym2))
2957 v.AddArg3(val, base, mem)
2958 return true
2959 }
2960
2961
2962 for {
2963 off := auxIntToInt32(v.AuxInt)
2964 sym := auxToSym(v.Aux)
2965 x := v_0
2966 ptr := v_1
2967 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
2968 break
2969 }
2970 y := v_2.Args[1]
2971 if ptr != v_2.Args[0] {
2972 break
2973 }
2974 v.reset(OpAMD64ANDL)
2975 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
2976 v0.AddArg(y)
2977 v.AddArg2(x, v0)
2978 return true
2979 }
2980 return false
2981 }
2982 func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
2983 v_2 := v.Args[2]
2984 v_1 := v.Args[1]
2985 v_0 := v.Args[0]
2986
2987
2988
2989 for {
2990 off1 := auxIntToInt32(v.AuxInt)
2991 sym := auxToSym(v.Aux)
2992 if v_0.Op != OpAMD64ADDQconst {
2993 break
2994 }
2995 off2 := auxIntToInt32(v_0.AuxInt)
2996 base := v_0.Args[0]
2997 val := v_1
2998 mem := v_2
2999 if !(is32Bit(int64(off1) + int64(off2))) {
3000 break
3001 }
3002 v.reset(OpAMD64ANDLmodify)
3003 v.AuxInt = int32ToAuxInt(off1 + off2)
3004 v.Aux = symToAux(sym)
3005 v.AddArg3(base, val, mem)
3006 return true
3007 }
3008
3009
3010
3011 for {
3012 off1 := auxIntToInt32(v.AuxInt)
3013 sym1 := auxToSym(v.Aux)
3014 if v_0.Op != OpAMD64LEAQ {
3015 break
3016 }
3017 off2 := auxIntToInt32(v_0.AuxInt)
3018 sym2 := auxToSym(v_0.Aux)
3019 base := v_0.Args[0]
3020 val := v_1
3021 mem := v_2
3022 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3023 break
3024 }
3025 v.reset(OpAMD64ANDLmodify)
3026 v.AuxInt = int32ToAuxInt(off1 + off2)
3027 v.Aux = symToAux(mergeSym(sym1, sym2))
3028 v.AddArg3(base, val, mem)
3029 return true
3030 }
3031 return false
3032 }
3033 func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
3034 v_1 := v.Args[1]
3035 v_0 := v.Args[0]
3036
3037
3038 for {
3039 x := v_0
3040 if v_1.Op != OpAMD64SHLL {
3041 break
3042 }
3043 y := v_1.Args[1]
3044 v_1_0 := v_1.Args[0]
3045 if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
3046 break
3047 }
3048 v.reset(OpAMD64BTRL)
3049 v.AddArg2(x, y)
3050 return true
3051 }
3052 return false
3053 }
3054 func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
3055 v_1 := v.Args[1]
3056 v_0 := v.Args[0]
3057
3058
3059 for {
3060 x := v_0
3061 if v_1.Op != OpAMD64SHLQ {
3062 break
3063 }
3064 y := v_1.Args[1]
3065 v_1_0 := v_1.Args[0]
3066 if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
3067 break
3068 }
3069 v.reset(OpAMD64BTRQ)
3070 v.AddArg2(x, y)
3071 return true
3072 }
3073 return false
3074 }
3075 func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
3076 v_1 := v.Args[1]
3077 v_0 := v.Args[0]
3078 b := v.Block
3079 typ := &b.Func.Config.Types
3080
3081
3082 for {
3083 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3084 if v_0.Op != OpAMD64NOTQ {
3085 continue
3086 }
3087 v_0_0 := v_0.Args[0]
3088 if v_0_0.Op != OpAMD64SHLQ {
3089 continue
3090 }
3091 y := v_0_0.Args[1]
3092 v_0_0_0 := v_0_0.Args[0]
3093 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
3094 continue
3095 }
3096 x := v_1
3097 v.reset(OpAMD64BTRQ)
3098 v.AddArg2(x, y)
3099 return true
3100 }
3101 break
3102 }
3103
3104
3105
3106 for {
3107 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3108 if v_0.Op != OpAMD64MOVQconst {
3109 continue
3110 }
3111 c := auxIntToInt64(v_0.AuxInt)
3112 x := v_1
3113 if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 1<<31) {
3114 continue
3115 }
3116 v.reset(OpAMD64BTRQconst)
3117 v.AuxInt = int8ToAuxInt(int8(log64(^c)))
3118 v.AddArg(x)
3119 return true
3120 }
3121 break
3122 }
3123
3124
3125
3126 for {
3127 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3128 x := v_0
3129 if v_1.Op != OpAMD64MOVQconst {
3130 continue
3131 }
3132 c := auxIntToInt64(v_1.AuxInt)
3133 if !(is32Bit(c)) {
3134 continue
3135 }
3136 v.reset(OpAMD64ANDQconst)
3137 v.AuxInt = int32ToAuxInt(int32(c))
3138 v.AddArg(x)
3139 return true
3140 }
3141 break
3142 }
3143
3144
3145 for {
3146 x := v_0
3147 if x != v_1 {
3148 break
3149 }
3150 v.copyOf(x)
3151 return true
3152 }
3153
3154
3155
3156 for {
3157 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3158 x := v_0
3159 l := v_1
3160 if l.Op != OpAMD64MOVQload {
3161 continue
3162 }
3163 off := auxIntToInt32(l.AuxInt)
3164 sym := auxToSym(l.Aux)
3165 mem := l.Args[1]
3166 ptr := l.Args[0]
3167 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
3168 continue
3169 }
3170 v.reset(OpAMD64ANDQload)
3171 v.AuxInt = int32ToAuxInt(off)
3172 v.Aux = symToAux(sym)
3173 v.AddArg3(x, ptr, mem)
3174 return true
3175 }
3176 break
3177 }
3178
3179
3180
3181 for {
3182 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3183 x := v_0
3184 if v_1.Op != OpAMD64NOTQ {
3185 continue
3186 }
3187 y := v_1.Args[0]
3188 if !(buildcfg.GOAMD64 >= 3) {
3189 continue
3190 }
3191 v.reset(OpAMD64ANDNQ)
3192 v.AddArg2(x, y)
3193 return true
3194 }
3195 break
3196 }
3197
3198
3199
3200 for {
3201 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3202 x := v_0
3203 if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3204 continue
3205 }
3206 v.reset(OpAMD64BLSIQ)
3207 v.AddArg(x)
3208 return true
3209 }
3210 break
3211 }
3212
3213
3214
3215 for {
3216 t := v.Type
3217 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
3218 x := v_0
3219 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
3220 continue
3221 }
3222 v.reset(OpSelect0)
3223 v.Type = t
3224 v0 := b.NewValue0(v.Pos, OpAMD64BLSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
3225 v0.AddArg(x)
3226 v.AddArg(v0)
3227 return true
3228 }
3229 break
3230 }
3231 return false
3232 }
3233 func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
3234 v_0 := v.Args[0]
3235
3236
3237 for {
3238 c := auxIntToInt32(v.AuxInt)
3239 if v_0.Op != OpAMD64ANDQconst {
3240 break
3241 }
3242 d := auxIntToInt32(v_0.AuxInt)
3243 x := v_0.Args[0]
3244 v.reset(OpAMD64ANDQconst)
3245 v.AuxInt = int32ToAuxInt(c & d)
3246 v.AddArg(x)
3247 return true
3248 }
3249
3250
3251 for {
3252 if auxIntToInt32(v.AuxInt) != 0xFF {
3253 break
3254 }
3255 x := v_0
3256 v.reset(OpAMD64MOVBQZX)
3257 v.AddArg(x)
3258 return true
3259 }
3260
3261
3262 for {
3263 if auxIntToInt32(v.AuxInt) != 0xFFFF {
3264 break
3265 }
3266 x := v_0
3267 v.reset(OpAMD64MOVWQZX)
3268 v.AddArg(x)
3269 return true
3270 }
3271
3272
3273 for {
3274 if auxIntToInt32(v.AuxInt) != 0 {
3275 break
3276 }
3277 v.reset(OpAMD64MOVQconst)
3278 v.AuxInt = int64ToAuxInt(0)
3279 return true
3280 }
3281
3282
3283 for {
3284 if auxIntToInt32(v.AuxInt) != -1 {
3285 break
3286 }
3287 x := v_0
3288 v.copyOf(x)
3289 return true
3290 }
3291
3292
3293 for {
3294 c := auxIntToInt32(v.AuxInt)
3295 if v_0.Op != OpAMD64MOVQconst {
3296 break
3297 }
3298 d := auxIntToInt64(v_0.AuxInt)
3299 v.reset(OpAMD64MOVQconst)
3300 v.AuxInt = int64ToAuxInt(int64(c) & d)
3301 return true
3302 }
3303 return false
3304 }
3305 func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
3306 v_1 := v.Args[1]
3307 v_0 := v.Args[0]
3308
3309
3310
3311 for {
3312 valoff1 := auxIntToValAndOff(v.AuxInt)
3313 sym := auxToSym(v.Aux)
3314 if v_0.Op != OpAMD64ADDQconst {
3315 break
3316 }
3317 off2 := auxIntToInt32(v_0.AuxInt)
3318 base := v_0.Args[0]
3319 mem := v_1
3320 if !(ValAndOff(valoff1).canAdd32(off2)) {
3321 break
3322 }
3323 v.reset(OpAMD64ANDQconstmodify)
3324 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3325 v.Aux = symToAux(sym)
3326 v.AddArg2(base, mem)
3327 return true
3328 }
3329
3330
3331
3332 for {
3333 valoff1 := auxIntToValAndOff(v.AuxInt)
3334 sym1 := auxToSym(v.Aux)
3335 if v_0.Op != OpAMD64LEAQ {
3336 break
3337 }
3338 off2 := auxIntToInt32(v_0.AuxInt)
3339 sym2 := auxToSym(v_0.Aux)
3340 base := v_0.Args[0]
3341 mem := v_1
3342 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
3343 break
3344 }
3345 v.reset(OpAMD64ANDQconstmodify)
3346 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
3347 v.Aux = symToAux(mergeSym(sym1, sym2))
3348 v.AddArg2(base, mem)
3349 return true
3350 }
3351 return false
3352 }
3353 func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
3354 v_2 := v.Args[2]
3355 v_1 := v.Args[1]
3356 v_0 := v.Args[0]
3357 b := v.Block
3358 typ := &b.Func.Config.Types
3359
3360
3361
3362 for {
3363 off1 := auxIntToInt32(v.AuxInt)
3364 sym := auxToSym(v.Aux)
3365 val := v_0
3366 if v_1.Op != OpAMD64ADDQconst {
3367 break
3368 }
3369 off2 := auxIntToInt32(v_1.AuxInt)
3370 base := v_1.Args[0]
3371 mem := v_2
3372 if !(is32Bit(int64(off1) + int64(off2))) {
3373 break
3374 }
3375 v.reset(OpAMD64ANDQload)
3376 v.AuxInt = int32ToAuxInt(off1 + off2)
3377 v.Aux = symToAux(sym)
3378 v.AddArg3(val, base, mem)
3379 return true
3380 }
3381
3382
3383
3384 for {
3385 off1 := auxIntToInt32(v.AuxInt)
3386 sym1 := auxToSym(v.Aux)
3387 val := v_0
3388 if v_1.Op != OpAMD64LEAQ {
3389 break
3390 }
3391 off2 := auxIntToInt32(v_1.AuxInt)
3392 sym2 := auxToSym(v_1.Aux)
3393 base := v_1.Args[0]
3394 mem := v_2
3395 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3396 break
3397 }
3398 v.reset(OpAMD64ANDQload)
3399 v.AuxInt = int32ToAuxInt(off1 + off2)
3400 v.Aux = symToAux(mergeSym(sym1, sym2))
3401 v.AddArg3(val, base, mem)
3402 return true
3403 }
3404
3405
3406 for {
3407 off := auxIntToInt32(v.AuxInt)
3408 sym := auxToSym(v.Aux)
3409 x := v_0
3410 ptr := v_1
3411 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
3412 break
3413 }
3414 y := v_2.Args[1]
3415 if ptr != v_2.Args[0] {
3416 break
3417 }
3418 v.reset(OpAMD64ANDQ)
3419 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
3420 v0.AddArg(y)
3421 v.AddArg2(x, v0)
3422 return true
3423 }
3424 return false
3425 }
3426 func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
3427 v_2 := v.Args[2]
3428 v_1 := v.Args[1]
3429 v_0 := v.Args[0]
3430
3431
3432
3433 for {
3434 off1 := auxIntToInt32(v.AuxInt)
3435 sym := auxToSym(v.Aux)
3436 if v_0.Op != OpAMD64ADDQconst {
3437 break
3438 }
3439 off2 := auxIntToInt32(v_0.AuxInt)
3440 base := v_0.Args[0]
3441 val := v_1
3442 mem := v_2
3443 if !(is32Bit(int64(off1) + int64(off2))) {
3444 break
3445 }
3446 v.reset(OpAMD64ANDQmodify)
3447 v.AuxInt = int32ToAuxInt(off1 + off2)
3448 v.Aux = symToAux(sym)
3449 v.AddArg3(base, val, mem)
3450 return true
3451 }
3452
3453
3454
3455 for {
3456 off1 := auxIntToInt32(v.AuxInt)
3457 sym1 := auxToSym(v.Aux)
3458 if v_0.Op != OpAMD64LEAQ {
3459 break
3460 }
3461 off2 := auxIntToInt32(v_0.AuxInt)
3462 sym2 := auxToSym(v_0.Aux)
3463 base := v_0.Args[0]
3464 val := v_1
3465 mem := v_2
3466 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
3467 break
3468 }
3469 v.reset(OpAMD64ANDQmodify)
3470 v.AuxInt = int32ToAuxInt(off1 + off2)
3471 v.Aux = symToAux(mergeSym(sym1, sym2))
3472 v.AddArg3(base, val, mem)
3473 return true
3474 }
3475 return false
3476 }
3477 func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
3478 v_0 := v.Args[0]
3479 b := v.Block
3480
3481
3482 for {
3483 if v_0.Op != OpAMD64ORQconst {
3484 break
3485 }
3486 t := v_0.Type
3487 if auxIntToInt32(v_0.AuxInt) != 1<<8 {
3488 break
3489 }
3490 v_0_0 := v_0.Args[0]
3491 if v_0_0.Op != OpAMD64MOVBQZX {
3492 break
3493 }
3494 x := v_0_0.Args[0]
3495 v.reset(OpAMD64BSFQ)
3496 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3497 v0.AuxInt = int32ToAuxInt(1 << 8)
3498 v0.AddArg(x)
3499 v.AddArg(v0)
3500 return true
3501 }
3502
3503
3504 for {
3505 if v_0.Op != OpAMD64ORQconst {
3506 break
3507 }
3508 t := v_0.Type
3509 if auxIntToInt32(v_0.AuxInt) != 1<<16 {
3510 break
3511 }
3512 v_0_0 := v_0.Args[0]
3513 if v_0_0.Op != OpAMD64MOVWQZX {
3514 break
3515 }
3516 x := v_0_0.Args[0]
3517 v.reset(OpAMD64BSFQ)
3518 v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
3519 v0.AuxInt = int32ToAuxInt(1 << 16)
3520 v0.AddArg(x)
3521 v.AddArg(v0)
3522 return true
3523 }
3524 return false
3525 }
3526 func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
3527 v_0 := v.Args[0]
3528 b := v.Block
3529 typ := &b.Func.Config.Types
3530
3531
3532 for {
3533 if v_0.Op != OpAMD64BSWAPL {
3534 break
3535 }
3536 p := v_0.Args[0]
3537 v.copyOf(p)
3538 return true
3539 }
3540
3541
3542
3543 for {
3544 x := v_0
3545 if x.Op != OpAMD64MOVLload {
3546 break
3547 }
3548 i := auxIntToInt32(x.AuxInt)
3549 s := auxToSym(x.Aux)
3550 mem := x.Args[1]
3551 p := x.Args[0]
3552 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3553 break
3554 }
3555 b = x.Block
3556 v0 := b.NewValue0(x.Pos, OpAMD64MOVBELload, typ.UInt32)
3557 v.copyOf(v0)
3558 v0.AuxInt = int32ToAuxInt(i)
3559 v0.Aux = symToAux(s)
3560 v0.AddArg2(p, mem)
3561 return true
3562 }
3563
3564
3565
3566 for {
3567 x := v_0
3568 if x.Op != OpAMD64MOVBELload {
3569 break
3570 }
3571 i := auxIntToInt32(x.AuxInt)
3572 s := auxToSym(x.Aux)
3573 mem := x.Args[1]
3574 p := x.Args[0]
3575 if !(x.Uses == 1) {
3576 break
3577 }
3578 b = x.Block
3579 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, typ.UInt32)
3580 v.copyOf(v0)
3581 v0.AuxInt = int32ToAuxInt(i)
3582 v0.Aux = symToAux(s)
3583 v0.AddArg2(p, mem)
3584 return true
3585 }
3586 return false
3587 }
3588 func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
3589 v_0 := v.Args[0]
3590 b := v.Block
3591 typ := &b.Func.Config.Types
3592
3593
3594 for {
3595 if v_0.Op != OpAMD64BSWAPQ {
3596 break
3597 }
3598 p := v_0.Args[0]
3599 v.copyOf(p)
3600 return true
3601 }
3602
3603
3604
3605 for {
3606 x := v_0
3607 if x.Op != OpAMD64MOVQload {
3608 break
3609 }
3610 i := auxIntToInt32(x.AuxInt)
3611 s := auxToSym(x.Aux)
3612 mem := x.Args[1]
3613 p := x.Args[0]
3614 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
3615 break
3616 }
3617 b = x.Block
3618 v0 := b.NewValue0(x.Pos, OpAMD64MOVBEQload, typ.UInt64)
3619 v.copyOf(v0)
3620 v0.AuxInt = int32ToAuxInt(i)
3621 v0.Aux = symToAux(s)
3622 v0.AddArg2(p, mem)
3623 return true
3624 }
3625
3626
3627
3628 for {
3629 x := v_0
3630 if x.Op != OpAMD64MOVBEQload {
3631 break
3632 }
3633 i := auxIntToInt32(x.AuxInt)
3634 s := auxToSym(x.Aux)
3635 mem := x.Args[1]
3636 p := x.Args[0]
3637 if !(x.Uses == 1) {
3638 break
3639 }
3640 b = x.Block
3641 v0 := b.NewValue0(x.Pos, OpAMD64MOVQload, typ.UInt64)
3642 v.copyOf(v0)
3643 v0.AuxInt = int32ToAuxInt(i)
3644 v0.Aux = symToAux(s)
3645 v0.AddArg2(p, mem)
3646 return true
3647 }
3648 return false
3649 }
3650 func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
3651 v_0 := v.Args[0]
3652
3653
3654 for {
3655 c := auxIntToInt8(v.AuxInt)
3656 if v_0.Op != OpAMD64MOVQconst {
3657 break
3658 }
3659 d := auxIntToInt64(v_0.AuxInt)
3660 v.reset(OpAMD64MOVQconst)
3661 v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
3662 return true
3663 }
3664 return false
3665 }
3666 func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
3667 v_0 := v.Args[0]
3668
3669
3670
3671 for {
3672 c := auxIntToInt8(v.AuxInt)
3673 if v_0.Op != OpAMD64SHRQconst {
3674 break
3675 }
3676 d := auxIntToInt8(v_0.AuxInt)
3677 x := v_0.Args[0]
3678 if !((c + d) < 64) {
3679 break
3680 }
3681 v.reset(OpAMD64BTQconst)
3682 v.AuxInt = int8ToAuxInt(c + d)
3683 v.AddArg(x)
3684 return true
3685 }
3686
3687
3688
3689 for {
3690 c := auxIntToInt8(v.AuxInt)
3691 if v_0.Op != OpAMD64ADDQ {
3692 break
3693 }
3694 x := v_0.Args[1]
3695 if x != v_0.Args[0] || !(c > 1) {
3696 break
3697 }
3698 v.reset(OpAMD64BTLconst)
3699 v.AuxInt = int8ToAuxInt(c - 1)
3700 v.AddArg(x)
3701 return true
3702 }
3703
3704
3705
3706 for {
3707 c := auxIntToInt8(v.AuxInt)
3708 if v_0.Op != OpAMD64SHLQconst {
3709 break
3710 }
3711 d := auxIntToInt8(v_0.AuxInt)
3712 x := v_0.Args[0]
3713 if !(c > d) {
3714 break
3715 }
3716 v.reset(OpAMD64BTLconst)
3717 v.AuxInt = int8ToAuxInt(c - d)
3718 v.AddArg(x)
3719 return true
3720 }
3721
3722
3723 for {
3724 if auxIntToInt8(v.AuxInt) != 0 {
3725 break
3726 }
3727 s := v_0
3728 if s.Op != OpAMD64SHRQ {
3729 break
3730 }
3731 y := s.Args[1]
3732 x := s.Args[0]
3733 v.reset(OpAMD64BTQ)
3734 v.AddArg2(y, x)
3735 return true
3736 }
3737
3738
3739
3740 for {
3741 c := auxIntToInt8(v.AuxInt)
3742 if v_0.Op != OpAMD64SHRLconst {
3743 break
3744 }
3745 d := auxIntToInt8(v_0.AuxInt)
3746 x := v_0.Args[0]
3747 if !((c + d) < 32) {
3748 break
3749 }
3750 v.reset(OpAMD64BTLconst)
3751 v.AuxInt = int8ToAuxInt(c + d)
3752 v.AddArg(x)
3753 return true
3754 }
3755
3756
3757
3758 for {
3759 c := auxIntToInt8(v.AuxInt)
3760 if v_0.Op != OpAMD64ADDL {
3761 break
3762 }
3763 x := v_0.Args[1]
3764 if x != v_0.Args[0] || !(c > 1) {
3765 break
3766 }
3767 v.reset(OpAMD64BTLconst)
3768 v.AuxInt = int8ToAuxInt(c - 1)
3769 v.AddArg(x)
3770 return true
3771 }
3772
3773
3774
3775 for {
3776 c := auxIntToInt8(v.AuxInt)
3777 if v_0.Op != OpAMD64SHLLconst {
3778 break
3779 }
3780 d := auxIntToInt8(v_0.AuxInt)
3781 x := v_0.Args[0]
3782 if !(c > d) {
3783 break
3784 }
3785 v.reset(OpAMD64BTLconst)
3786 v.AuxInt = int8ToAuxInt(c - d)
3787 v.AddArg(x)
3788 return true
3789 }
3790
3791
3792 for {
3793 if auxIntToInt8(v.AuxInt) != 0 {
3794 break
3795 }
3796 s := v_0
3797 if s.Op != OpAMD64SHRL {
3798 break
3799 }
3800 y := s.Args[1]
3801 x := s.Args[0]
3802 v.reset(OpAMD64BTL)
3803 v.AddArg2(y, x)
3804 return true
3805 }
3806
3807
3808 for {
3809 if auxIntToInt8(v.AuxInt) != 0 {
3810 break
3811 }
3812 s := v_0
3813 if s.Op != OpAMD64SHRXL {
3814 break
3815 }
3816 y := s.Args[1]
3817 x := s.Args[0]
3818 v.reset(OpAMD64BTL)
3819 v.AddArg2(y, x)
3820 return true
3821 }
3822 return false
3823 }
3824 func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
3825 v_0 := v.Args[0]
3826
3827
3828
3829 for {
3830 c := auxIntToInt8(v.AuxInt)
3831 if v_0.Op != OpAMD64SHRQconst {
3832 break
3833 }
3834 d := auxIntToInt8(v_0.AuxInt)
3835 x := v_0.Args[0]
3836 if !((c + d) < 64) {
3837 break
3838 }
3839 v.reset(OpAMD64BTQconst)
3840 v.AuxInt = int8ToAuxInt(c + d)
3841 v.AddArg(x)
3842 return true
3843 }
3844
3845
3846
3847 for {
3848 c := auxIntToInt8(v.AuxInt)
3849 if v_0.Op != OpAMD64ADDQ {
3850 break
3851 }
3852 x := v_0.Args[1]
3853 if x != v_0.Args[0] || !(c > 1) {
3854 break
3855 }
3856 v.reset(OpAMD64BTQconst)
3857 v.AuxInt = int8ToAuxInt(c - 1)
3858 v.AddArg(x)
3859 return true
3860 }
3861
3862
3863
3864 for {
3865 c := auxIntToInt8(v.AuxInt)
3866 if v_0.Op != OpAMD64SHLQconst {
3867 break
3868 }
3869 d := auxIntToInt8(v_0.AuxInt)
3870 x := v_0.Args[0]
3871 if !(c > d) {
3872 break
3873 }
3874 v.reset(OpAMD64BTQconst)
3875 v.AuxInt = int8ToAuxInt(c - d)
3876 v.AddArg(x)
3877 return true
3878 }
3879
3880
3881 for {
3882 if auxIntToInt8(v.AuxInt) != 0 {
3883 break
3884 }
3885 s := v_0
3886 if s.Op != OpAMD64SHRQ {
3887 break
3888 }
3889 y := s.Args[1]
3890 x := s.Args[0]
3891 v.reset(OpAMD64BTQ)
3892 v.AddArg2(y, x)
3893 return true
3894 }
3895 return false
3896 }
3897 func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
3898 v_0 := v.Args[0]
3899
3900
3901 for {
3902 c := auxIntToInt8(v.AuxInt)
3903 if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
3904 break
3905 }
3906 x := v_0.Args[0]
3907 v.reset(OpAMD64BTRQconst)
3908 v.AuxInt = int8ToAuxInt(c)
3909 v.AddArg(x)
3910 return true
3911 }
3912
3913
3914 for {
3915 c := auxIntToInt8(v.AuxInt)
3916 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3917 break
3918 }
3919 x := v_0.Args[0]
3920 v.reset(OpAMD64BTRQconst)
3921 v.AuxInt = int8ToAuxInt(c)
3922 v.AddArg(x)
3923 return true
3924 }
3925
3926
3927 for {
3928 c := auxIntToInt8(v.AuxInt)
3929 if v_0.Op != OpAMD64MOVQconst {
3930 break
3931 }
3932 d := auxIntToInt64(v_0.AuxInt)
3933 v.reset(OpAMD64MOVQconst)
3934 v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
3935 return true
3936 }
3937 return false
3938 }
3939 func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
3940 v_0 := v.Args[0]
3941
3942
3943 for {
3944 c := auxIntToInt8(v.AuxInt)
3945 if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
3946 break
3947 }
3948 x := v_0.Args[0]
3949 v.reset(OpAMD64BTSQconst)
3950 v.AuxInt = int8ToAuxInt(c)
3951 v.AddArg(x)
3952 return true
3953 }
3954
3955
3956 for {
3957 c := auxIntToInt8(v.AuxInt)
3958 if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
3959 break
3960 }
3961 x := v_0.Args[0]
3962 v.reset(OpAMD64BTSQconst)
3963 v.AuxInt = int8ToAuxInt(c)
3964 v.AddArg(x)
3965 return true
3966 }
3967
3968
3969 for {
3970 c := auxIntToInt8(v.AuxInt)
3971 if v_0.Op != OpAMD64MOVQconst {
3972 break
3973 }
3974 d := auxIntToInt64(v_0.AuxInt)
3975 v.reset(OpAMD64MOVQconst)
3976 v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
3977 return true
3978 }
3979 return false
3980 }
3981 func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
3982 v_2 := v.Args[2]
3983 v_1 := v.Args[1]
3984 v_0 := v.Args[0]
3985
3986
3987 for {
3988 x := v_0
3989 y := v_1
3990 if v_2.Op != OpAMD64InvertFlags {
3991 break
3992 }
3993 cond := v_2.Args[0]
3994 v.reset(OpAMD64CMOVLLS)
3995 v.AddArg3(x, y, cond)
3996 return true
3997 }
3998
3999
4000 for {
4001 x := v_1
4002 if v_2.Op != OpAMD64FlagEQ {
4003 break
4004 }
4005 v.copyOf(x)
4006 return true
4007 }
4008
4009
4010 for {
4011 x := v_1
4012 if v_2.Op != OpAMD64FlagGT_UGT {
4013 break
4014 }
4015 v.copyOf(x)
4016 return true
4017 }
4018
4019
4020 for {
4021 y := v_0
4022 if v_2.Op != OpAMD64FlagGT_ULT {
4023 break
4024 }
4025 v.copyOf(y)
4026 return true
4027 }
4028
4029
4030 for {
4031 y := v_0
4032 if v_2.Op != OpAMD64FlagLT_ULT {
4033 break
4034 }
4035 v.copyOf(y)
4036 return true
4037 }
4038
4039
4040 for {
4041 x := v_1
4042 if v_2.Op != OpAMD64FlagLT_UGT {
4043 break
4044 }
4045 v.copyOf(x)
4046 return true
4047 }
4048 return false
4049 }
4050 func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
4051 v_2 := v.Args[2]
4052 v_1 := v.Args[1]
4053 v_0 := v.Args[0]
4054
4055
4056 for {
4057 x := v_0
4058 y := v_1
4059 if v_2.Op != OpAMD64InvertFlags {
4060 break
4061 }
4062 cond := v_2.Args[0]
4063 v.reset(OpAMD64CMOVLHI)
4064 v.AddArg3(x, y, cond)
4065 return true
4066 }
4067
4068
4069 for {
4070 y := v_0
4071 if v_2.Op != OpAMD64FlagEQ {
4072 break
4073 }
4074 v.copyOf(y)
4075 return true
4076 }
4077
4078
4079 for {
4080 y := v_0
4081 if v_2.Op != OpAMD64FlagGT_UGT {
4082 break
4083 }
4084 v.copyOf(y)
4085 return true
4086 }
4087
4088
4089 for {
4090 x := v_1
4091 if v_2.Op != OpAMD64FlagGT_ULT {
4092 break
4093 }
4094 v.copyOf(x)
4095 return true
4096 }
4097
4098
4099 for {
4100 x := v_1
4101 if v_2.Op != OpAMD64FlagLT_ULT {
4102 break
4103 }
4104 v.copyOf(x)
4105 return true
4106 }
4107
4108
4109 for {
4110 y := v_0
4111 if v_2.Op != OpAMD64FlagLT_UGT {
4112 break
4113 }
4114 v.copyOf(y)
4115 return true
4116 }
4117 return false
4118 }
4119 func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
4120 v_2 := v.Args[2]
4121 v_1 := v.Args[1]
4122 v_0 := v.Args[0]
4123 b := v.Block
4124
4125
4126 for {
4127 x := v_0
4128 y := v_1
4129 if v_2.Op != OpAMD64InvertFlags {
4130 break
4131 }
4132 cond := v_2.Args[0]
4133 v.reset(OpAMD64CMOVLEQ)
4134 v.AddArg3(x, y, cond)
4135 return true
4136 }
4137
4138
4139 for {
4140 x := v_1
4141 if v_2.Op != OpAMD64FlagEQ {
4142 break
4143 }
4144 v.copyOf(x)
4145 return true
4146 }
4147
4148
4149 for {
4150 y := v_0
4151 if v_2.Op != OpAMD64FlagGT_UGT {
4152 break
4153 }
4154 v.copyOf(y)
4155 return true
4156 }
4157
4158
4159 for {
4160 y := v_0
4161 if v_2.Op != OpAMD64FlagGT_ULT {
4162 break
4163 }
4164 v.copyOf(y)
4165 return true
4166 }
4167
4168
4169 for {
4170 y := v_0
4171 if v_2.Op != OpAMD64FlagLT_ULT {
4172 break
4173 }
4174 v.copyOf(y)
4175 return true
4176 }
4177
4178
4179 for {
4180 y := v_0
4181 if v_2.Op != OpAMD64FlagLT_UGT {
4182 break
4183 }
4184 v.copyOf(y)
4185 return true
4186 }
4187
4188
4189 for {
4190 x := v_0
4191 y := v_1
4192 if v_2.Op != OpAMD64TESTQ {
4193 break
4194 }
4195 _ = v_2.Args[1]
4196 v_2_0 := v_2.Args[0]
4197 v_2_1 := v_2.Args[1]
4198 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4199 s := v_2_0
4200 if s.Op != OpSelect0 {
4201 continue
4202 }
4203 blsr := s.Args[0]
4204 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4205 continue
4206 }
4207 v.reset(OpAMD64CMOVLEQ)
4208 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4209 v0.AddArg(blsr)
4210 v.AddArg3(x, y, v0)
4211 return true
4212 }
4213 break
4214 }
4215
4216
4217 for {
4218 x := v_0
4219 y := v_1
4220 if v_2.Op != OpAMD64TESTL {
4221 break
4222 }
4223 _ = v_2.Args[1]
4224 v_2_0 := v_2.Args[0]
4225 v_2_1 := v_2.Args[1]
4226 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4227 s := v_2_0
4228 if s.Op != OpSelect0 {
4229 continue
4230 }
4231 blsr := s.Args[0]
4232 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4233 continue
4234 }
4235 v.reset(OpAMD64CMOVLEQ)
4236 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4237 v0.AddArg(blsr)
4238 v.AddArg3(x, y, v0)
4239 return true
4240 }
4241 break
4242 }
4243 return false
4244 }
4245 func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
4246 v_2 := v.Args[2]
4247 v_1 := v.Args[1]
4248 v_0 := v.Args[0]
4249 b := v.Block
4250
4251
4252 for {
4253 x := v_0
4254 y := v_1
4255 if v_2.Op != OpAMD64InvertFlags {
4256 break
4257 }
4258 cond := v_2.Args[0]
4259 v.reset(OpAMD64CMOVLLE)
4260 v.AddArg3(x, y, cond)
4261 return true
4262 }
4263
4264
4265 for {
4266 x := v_1
4267 if v_2.Op != OpAMD64FlagEQ {
4268 break
4269 }
4270 v.copyOf(x)
4271 return true
4272 }
4273
4274
4275 for {
4276 x := v_1
4277 if v_2.Op != OpAMD64FlagGT_UGT {
4278 break
4279 }
4280 v.copyOf(x)
4281 return true
4282 }
4283
4284
4285 for {
4286 x := v_1
4287 if v_2.Op != OpAMD64FlagGT_ULT {
4288 break
4289 }
4290 v.copyOf(x)
4291 return true
4292 }
4293
4294
4295 for {
4296 y := v_0
4297 if v_2.Op != OpAMD64FlagLT_ULT {
4298 break
4299 }
4300 v.copyOf(y)
4301 return true
4302 }
4303
4304
4305 for {
4306 y := v_0
4307 if v_2.Op != OpAMD64FlagLT_UGT {
4308 break
4309 }
4310 v.copyOf(y)
4311 return true
4312 }
4313
4314
4315
4316 for {
4317 x := v_0
4318 y := v_1
4319 c := v_2
4320 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
4321 break
4322 }
4323 z := c.Args[0]
4324 if !(c.Uses == 1) {
4325 break
4326 }
4327 v.reset(OpAMD64CMOVLGT)
4328 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
4329 v0.AuxInt = int32ToAuxInt(127)
4330 v0.AddArg(z)
4331 v.AddArg3(x, y, v0)
4332 return true
4333 }
4334
4335
4336
4337 for {
4338 x := v_0
4339 y := v_1
4340 c := v_2
4341 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
4342 break
4343 }
4344 z := c.Args[0]
4345 if !(c.Uses == 1) {
4346 break
4347 }
4348 v.reset(OpAMD64CMOVLGT)
4349 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
4350 v0.AuxInt = int32ToAuxInt(127)
4351 v0.AddArg(z)
4352 v.AddArg3(x, y, v0)
4353 return true
4354 }
4355 return false
4356 }
4357 func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
4358 v_2 := v.Args[2]
4359 v_1 := v.Args[1]
4360 v_0 := v.Args[0]
4361
4362
4363 for {
4364 x := v_0
4365 y := v_1
4366 if v_2.Op != OpAMD64InvertFlags {
4367 break
4368 }
4369 cond := v_2.Args[0]
4370 v.reset(OpAMD64CMOVLLT)
4371 v.AddArg3(x, y, cond)
4372 return true
4373 }
4374
4375
4376 for {
4377 y := v_0
4378 if v_2.Op != OpAMD64FlagEQ {
4379 break
4380 }
4381 v.copyOf(y)
4382 return true
4383 }
4384
4385
4386 for {
4387 x := v_1
4388 if v_2.Op != OpAMD64FlagGT_UGT {
4389 break
4390 }
4391 v.copyOf(x)
4392 return true
4393 }
4394
4395
4396 for {
4397 x := v_1
4398 if v_2.Op != OpAMD64FlagGT_ULT {
4399 break
4400 }
4401 v.copyOf(x)
4402 return true
4403 }
4404
4405
4406 for {
4407 y := v_0
4408 if v_2.Op != OpAMD64FlagLT_ULT {
4409 break
4410 }
4411 v.copyOf(y)
4412 return true
4413 }
4414
4415
4416 for {
4417 y := v_0
4418 if v_2.Op != OpAMD64FlagLT_UGT {
4419 break
4420 }
4421 v.copyOf(y)
4422 return true
4423 }
4424 return false
4425 }
4426 func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
4427 v_2 := v.Args[2]
4428 v_1 := v.Args[1]
4429 v_0 := v.Args[0]
4430
4431
4432 for {
4433 x := v_0
4434 y := v_1
4435 if v_2.Op != OpAMD64InvertFlags {
4436 break
4437 }
4438 cond := v_2.Args[0]
4439 v.reset(OpAMD64CMOVLCS)
4440 v.AddArg3(x, y, cond)
4441 return true
4442 }
4443
4444
4445 for {
4446 y := v_0
4447 if v_2.Op != OpAMD64FlagEQ {
4448 break
4449 }
4450 v.copyOf(y)
4451 return true
4452 }
4453
4454
4455 for {
4456 x := v_1
4457 if v_2.Op != OpAMD64FlagGT_UGT {
4458 break
4459 }
4460 v.copyOf(x)
4461 return true
4462 }
4463
4464
4465 for {
4466 y := v_0
4467 if v_2.Op != OpAMD64FlagGT_ULT {
4468 break
4469 }
4470 v.copyOf(y)
4471 return true
4472 }
4473
4474
4475 for {
4476 y := v_0
4477 if v_2.Op != OpAMD64FlagLT_ULT {
4478 break
4479 }
4480 v.copyOf(y)
4481 return true
4482 }
4483
4484
4485 for {
4486 x := v_1
4487 if v_2.Op != OpAMD64FlagLT_UGT {
4488 break
4489 }
4490 v.copyOf(x)
4491 return true
4492 }
4493 return false
4494 }
4495 func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
4496 v_2 := v.Args[2]
4497 v_1 := v.Args[1]
4498 v_0 := v.Args[0]
4499
4500
4501 for {
4502 x := v_0
4503 y := v_1
4504 if v_2.Op != OpAMD64InvertFlags {
4505 break
4506 }
4507 cond := v_2.Args[0]
4508 v.reset(OpAMD64CMOVLGE)
4509 v.AddArg3(x, y, cond)
4510 return true
4511 }
4512
4513
4514 for {
4515 x := v_1
4516 if v_2.Op != OpAMD64FlagEQ {
4517 break
4518 }
4519 v.copyOf(x)
4520 return true
4521 }
4522
4523
4524 for {
4525 y := v_0
4526 if v_2.Op != OpAMD64FlagGT_UGT {
4527 break
4528 }
4529 v.copyOf(y)
4530 return true
4531 }
4532
4533
4534 for {
4535 y := v_0
4536 if v_2.Op != OpAMD64FlagGT_ULT {
4537 break
4538 }
4539 v.copyOf(y)
4540 return true
4541 }
4542
4543
4544 for {
4545 x := v_1
4546 if v_2.Op != OpAMD64FlagLT_ULT {
4547 break
4548 }
4549 v.copyOf(x)
4550 return true
4551 }
4552
4553
4554 for {
4555 x := v_1
4556 if v_2.Op != OpAMD64FlagLT_UGT {
4557 break
4558 }
4559 v.copyOf(x)
4560 return true
4561 }
4562 return false
4563 }
4564 func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
4565 v_2 := v.Args[2]
4566 v_1 := v.Args[1]
4567 v_0 := v.Args[0]
4568
4569
4570 for {
4571 x := v_0
4572 y := v_1
4573 if v_2.Op != OpAMD64InvertFlags {
4574 break
4575 }
4576 cond := v_2.Args[0]
4577 v.reset(OpAMD64CMOVLCC)
4578 v.AddArg3(x, y, cond)
4579 return true
4580 }
4581
4582
4583 for {
4584 x := v_1
4585 if v_2.Op != OpAMD64FlagEQ {
4586 break
4587 }
4588 v.copyOf(x)
4589 return true
4590 }
4591
4592
4593 for {
4594 y := v_0
4595 if v_2.Op != OpAMD64FlagGT_UGT {
4596 break
4597 }
4598 v.copyOf(y)
4599 return true
4600 }
4601
4602
4603 for {
4604 x := v_1
4605 if v_2.Op != OpAMD64FlagGT_ULT {
4606 break
4607 }
4608 v.copyOf(x)
4609 return true
4610 }
4611
4612
4613 for {
4614 x := v_1
4615 if v_2.Op != OpAMD64FlagLT_ULT {
4616 break
4617 }
4618 v.copyOf(x)
4619 return true
4620 }
4621
4622
4623 for {
4624 y := v_0
4625 if v_2.Op != OpAMD64FlagLT_UGT {
4626 break
4627 }
4628 v.copyOf(y)
4629 return true
4630 }
4631 return false
4632 }
4633 func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
4634 v_2 := v.Args[2]
4635 v_1 := v.Args[1]
4636 v_0 := v.Args[0]
4637 b := v.Block
4638
4639
4640 for {
4641 x := v_0
4642 y := v_1
4643 if v_2.Op != OpAMD64InvertFlags {
4644 break
4645 }
4646 cond := v_2.Args[0]
4647 v.reset(OpAMD64CMOVLGT)
4648 v.AddArg3(x, y, cond)
4649 return true
4650 }
4651
4652
4653 for {
4654 y := v_0
4655 if v_2.Op != OpAMD64FlagEQ {
4656 break
4657 }
4658 v.copyOf(y)
4659 return true
4660 }
4661
4662
4663 for {
4664 y := v_0
4665 if v_2.Op != OpAMD64FlagGT_UGT {
4666 break
4667 }
4668 v.copyOf(y)
4669 return true
4670 }
4671
4672
4673 for {
4674 y := v_0
4675 if v_2.Op != OpAMD64FlagGT_ULT {
4676 break
4677 }
4678 v.copyOf(y)
4679 return true
4680 }
4681
4682
4683 for {
4684 x := v_1
4685 if v_2.Op != OpAMD64FlagLT_ULT {
4686 break
4687 }
4688 v.copyOf(x)
4689 return true
4690 }
4691
4692
4693 for {
4694 x := v_1
4695 if v_2.Op != OpAMD64FlagLT_UGT {
4696 break
4697 }
4698 v.copyOf(x)
4699 return true
4700 }
4701
4702
4703
4704 for {
4705 x := v_0
4706 y := v_1
4707 c := v_2
4708 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
4709 break
4710 }
4711 z := c.Args[0]
4712 if !(c.Uses == 1) {
4713 break
4714 }
4715 v.reset(OpAMD64CMOVLLE)
4716 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
4717 v0.AuxInt = int32ToAuxInt(127)
4718 v0.AddArg(z)
4719 v.AddArg3(x, y, v0)
4720 return true
4721 }
4722
4723
4724
4725 for {
4726 x := v_0
4727 y := v_1
4728 c := v_2
4729 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
4730 break
4731 }
4732 z := c.Args[0]
4733 if !(c.Uses == 1) {
4734 break
4735 }
4736 v.reset(OpAMD64CMOVLLE)
4737 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
4738 v0.AuxInt = int32ToAuxInt(127)
4739 v0.AddArg(z)
4740 v.AddArg3(x, y, v0)
4741 return true
4742 }
4743 return false
4744 }
4745 func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
4746 v_2 := v.Args[2]
4747 v_1 := v.Args[1]
4748 v_0 := v.Args[0]
4749 b := v.Block
4750
4751
4752 for {
4753 x := v_0
4754 y := v_1
4755 if v_2.Op != OpAMD64InvertFlags {
4756 break
4757 }
4758 cond := v_2.Args[0]
4759 v.reset(OpAMD64CMOVLNE)
4760 v.AddArg3(x, y, cond)
4761 return true
4762 }
4763
4764
4765 for {
4766 y := v_0
4767 if v_2.Op != OpAMD64FlagEQ {
4768 break
4769 }
4770 v.copyOf(y)
4771 return true
4772 }
4773
4774
4775 for {
4776 x := v_1
4777 if v_2.Op != OpAMD64FlagGT_UGT {
4778 break
4779 }
4780 v.copyOf(x)
4781 return true
4782 }
4783
4784
4785 for {
4786 x := v_1
4787 if v_2.Op != OpAMD64FlagGT_ULT {
4788 break
4789 }
4790 v.copyOf(x)
4791 return true
4792 }
4793
4794
4795 for {
4796 x := v_1
4797 if v_2.Op != OpAMD64FlagLT_ULT {
4798 break
4799 }
4800 v.copyOf(x)
4801 return true
4802 }
4803
4804
4805 for {
4806 x := v_1
4807 if v_2.Op != OpAMD64FlagLT_UGT {
4808 break
4809 }
4810 v.copyOf(x)
4811 return true
4812 }
4813
4814
4815 for {
4816 x := v_0
4817 y := v_1
4818 if v_2.Op != OpAMD64TESTQ {
4819 break
4820 }
4821 _ = v_2.Args[1]
4822 v_2_0 := v_2.Args[0]
4823 v_2_1 := v_2.Args[1]
4824 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4825 s := v_2_0
4826 if s.Op != OpSelect0 {
4827 continue
4828 }
4829 blsr := s.Args[0]
4830 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
4831 continue
4832 }
4833 v.reset(OpAMD64CMOVLNE)
4834 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4835 v0.AddArg(blsr)
4836 v.AddArg3(x, y, v0)
4837 return true
4838 }
4839 break
4840 }
4841
4842
4843 for {
4844 x := v_0
4845 y := v_1
4846 if v_2.Op != OpAMD64TESTL {
4847 break
4848 }
4849 _ = v_2.Args[1]
4850 v_2_0 := v_2.Args[0]
4851 v_2_1 := v_2.Args[1]
4852 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
4853 s := v_2_0
4854 if s.Op != OpSelect0 {
4855 continue
4856 }
4857 blsr := s.Args[0]
4858 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
4859 continue
4860 }
4861 v.reset(OpAMD64CMOVLNE)
4862 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
4863 v0.AddArg(blsr)
4864 v.AddArg3(x, y, v0)
4865 return true
4866 }
4867 break
4868 }
4869 return false
4870 }
4871 func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
4872 v_2 := v.Args[2]
4873 v_1 := v.Args[1]
4874 v_0 := v.Args[0]
4875
4876
4877 for {
4878 x := v_0
4879 y := v_1
4880 if v_2.Op != OpAMD64InvertFlags {
4881 break
4882 }
4883 cond := v_2.Args[0]
4884 v.reset(OpAMD64CMOVQLS)
4885 v.AddArg3(x, y, cond)
4886 return true
4887 }
4888
4889
4890 for {
4891 x := v_1
4892 if v_2.Op != OpAMD64FlagEQ {
4893 break
4894 }
4895 v.copyOf(x)
4896 return true
4897 }
4898
4899
4900 for {
4901 x := v_1
4902 if v_2.Op != OpAMD64FlagGT_UGT {
4903 break
4904 }
4905 v.copyOf(x)
4906 return true
4907 }
4908
4909
4910 for {
4911 y := v_0
4912 if v_2.Op != OpAMD64FlagGT_ULT {
4913 break
4914 }
4915 v.copyOf(y)
4916 return true
4917 }
4918
4919
4920 for {
4921 y := v_0
4922 if v_2.Op != OpAMD64FlagLT_ULT {
4923 break
4924 }
4925 v.copyOf(y)
4926 return true
4927 }
4928
4929
4930 for {
4931 x := v_1
4932 if v_2.Op != OpAMD64FlagLT_UGT {
4933 break
4934 }
4935 v.copyOf(x)
4936 return true
4937 }
4938 return false
4939 }
4940 func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
4941 v_2 := v.Args[2]
4942 v_1 := v.Args[1]
4943 v_0 := v.Args[0]
4944
4945
4946 for {
4947 x := v_0
4948 y := v_1
4949 if v_2.Op != OpAMD64InvertFlags {
4950 break
4951 }
4952 cond := v_2.Args[0]
4953 v.reset(OpAMD64CMOVQHI)
4954 v.AddArg3(x, y, cond)
4955 return true
4956 }
4957
4958
4959 for {
4960 y := v_0
4961 if v_2.Op != OpAMD64FlagEQ {
4962 break
4963 }
4964 v.copyOf(y)
4965 return true
4966 }
4967
4968
4969 for {
4970 y := v_0
4971 if v_2.Op != OpAMD64FlagGT_UGT {
4972 break
4973 }
4974 v.copyOf(y)
4975 return true
4976 }
4977
4978
4979 for {
4980 x := v_1
4981 if v_2.Op != OpAMD64FlagGT_ULT {
4982 break
4983 }
4984 v.copyOf(x)
4985 return true
4986 }
4987
4988
4989 for {
4990 x := v_1
4991 if v_2.Op != OpAMD64FlagLT_ULT {
4992 break
4993 }
4994 v.copyOf(x)
4995 return true
4996 }
4997
4998
4999 for {
5000 y := v_0
5001 if v_2.Op != OpAMD64FlagLT_UGT {
5002 break
5003 }
5004 v.copyOf(y)
5005 return true
5006 }
5007 return false
5008 }
5009 func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
5010 v_2 := v.Args[2]
5011 v_1 := v.Args[1]
5012 v_0 := v.Args[0]
5013 b := v.Block
5014
5015
5016 for {
5017 x := v_0
5018 y := v_1
5019 if v_2.Op != OpAMD64InvertFlags {
5020 break
5021 }
5022 cond := v_2.Args[0]
5023 v.reset(OpAMD64CMOVQEQ)
5024 v.AddArg3(x, y, cond)
5025 return true
5026 }
5027
5028
5029 for {
5030 x := v_1
5031 if v_2.Op != OpAMD64FlagEQ {
5032 break
5033 }
5034 v.copyOf(x)
5035 return true
5036 }
5037
5038
5039 for {
5040 y := v_0
5041 if v_2.Op != OpAMD64FlagGT_UGT {
5042 break
5043 }
5044 v.copyOf(y)
5045 return true
5046 }
5047
5048
5049 for {
5050 y := v_0
5051 if v_2.Op != OpAMD64FlagGT_ULT {
5052 break
5053 }
5054 v.copyOf(y)
5055 return true
5056 }
5057
5058
5059 for {
5060 y := v_0
5061 if v_2.Op != OpAMD64FlagLT_ULT {
5062 break
5063 }
5064 v.copyOf(y)
5065 return true
5066 }
5067
5068
5069 for {
5070 y := v_0
5071 if v_2.Op != OpAMD64FlagLT_UGT {
5072 break
5073 }
5074 v.copyOf(y)
5075 return true
5076 }
5077
5078
5079
5080 for {
5081 x := v_0
5082 if v_2.Op != OpSelect1 {
5083 break
5084 }
5085 v_2_0 := v_2.Args[0]
5086 if v_2_0.Op != OpAMD64BSFQ {
5087 break
5088 }
5089 v_2_0_0 := v_2_0.Args[0]
5090 if v_2_0_0.Op != OpAMD64ORQconst {
5091 break
5092 }
5093 c := auxIntToInt32(v_2_0_0.AuxInt)
5094 if !(c != 0) {
5095 break
5096 }
5097 v.copyOf(x)
5098 return true
5099 }
5100
5101
5102
5103 for {
5104 x := v_0
5105 if v_2.Op != OpSelect1 {
5106 break
5107 }
5108 v_2_0 := v_2.Args[0]
5109 if v_2_0.Op != OpAMD64BSRQ {
5110 break
5111 }
5112 v_2_0_0 := v_2_0.Args[0]
5113 if v_2_0_0.Op != OpAMD64ORQconst {
5114 break
5115 }
5116 c := auxIntToInt32(v_2_0_0.AuxInt)
5117 if !(c != 0) {
5118 break
5119 }
5120 v.copyOf(x)
5121 return true
5122 }
5123
5124
5125 for {
5126 x := v_0
5127 y := v_1
5128 if v_2.Op != OpAMD64TESTQ {
5129 break
5130 }
5131 _ = v_2.Args[1]
5132 v_2_0 := v_2.Args[0]
5133 v_2_1 := v_2.Args[1]
5134 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5135 s := v_2_0
5136 if s.Op != OpSelect0 {
5137 continue
5138 }
5139 blsr := s.Args[0]
5140 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5141 continue
5142 }
5143 v.reset(OpAMD64CMOVQEQ)
5144 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5145 v0.AddArg(blsr)
5146 v.AddArg3(x, y, v0)
5147 return true
5148 }
5149 break
5150 }
5151
5152
5153 for {
5154 x := v_0
5155 y := v_1
5156 if v_2.Op != OpAMD64TESTL {
5157 break
5158 }
5159 _ = v_2.Args[1]
5160 v_2_0 := v_2.Args[0]
5161 v_2_1 := v_2.Args[1]
5162 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5163 s := v_2_0
5164 if s.Op != OpSelect0 {
5165 continue
5166 }
5167 blsr := s.Args[0]
5168 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5169 continue
5170 }
5171 v.reset(OpAMD64CMOVQEQ)
5172 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5173 v0.AddArg(blsr)
5174 v.AddArg3(x, y, v0)
5175 return true
5176 }
5177 break
5178 }
5179 return false
5180 }
5181 func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
5182 v_2 := v.Args[2]
5183 v_1 := v.Args[1]
5184 v_0 := v.Args[0]
5185 b := v.Block
5186
5187
5188 for {
5189 x := v_0
5190 y := v_1
5191 if v_2.Op != OpAMD64InvertFlags {
5192 break
5193 }
5194 cond := v_2.Args[0]
5195 v.reset(OpAMD64CMOVQLE)
5196 v.AddArg3(x, y, cond)
5197 return true
5198 }
5199
5200
5201 for {
5202 x := v_1
5203 if v_2.Op != OpAMD64FlagEQ {
5204 break
5205 }
5206 v.copyOf(x)
5207 return true
5208 }
5209
5210
5211 for {
5212 x := v_1
5213 if v_2.Op != OpAMD64FlagGT_UGT {
5214 break
5215 }
5216 v.copyOf(x)
5217 return true
5218 }
5219
5220
5221 for {
5222 x := v_1
5223 if v_2.Op != OpAMD64FlagGT_ULT {
5224 break
5225 }
5226 v.copyOf(x)
5227 return true
5228 }
5229
5230
5231 for {
5232 y := v_0
5233 if v_2.Op != OpAMD64FlagLT_ULT {
5234 break
5235 }
5236 v.copyOf(y)
5237 return true
5238 }
5239
5240
5241 for {
5242 y := v_0
5243 if v_2.Op != OpAMD64FlagLT_UGT {
5244 break
5245 }
5246 v.copyOf(y)
5247 return true
5248 }
5249
5250
5251
5252 for {
5253 x := v_0
5254 y := v_1
5255 c := v_2
5256 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
5257 break
5258 }
5259 z := c.Args[0]
5260 if !(c.Uses == 1) {
5261 break
5262 }
5263 v.reset(OpAMD64CMOVQGT)
5264 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
5265 v0.AuxInt = int32ToAuxInt(127)
5266 v0.AddArg(z)
5267 v.AddArg3(x, y, v0)
5268 return true
5269 }
5270
5271
5272
5273 for {
5274 x := v_0
5275 y := v_1
5276 c := v_2
5277 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
5278 break
5279 }
5280 z := c.Args[0]
5281 if !(c.Uses == 1) {
5282 break
5283 }
5284 v.reset(OpAMD64CMOVQGT)
5285 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
5286 v0.AuxInt = int32ToAuxInt(127)
5287 v0.AddArg(z)
5288 v.AddArg3(x, y, v0)
5289 return true
5290 }
5291 return false
5292 }
5293 func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
5294 v_2 := v.Args[2]
5295 v_1 := v.Args[1]
5296 v_0 := v.Args[0]
5297
5298
5299 for {
5300 x := v_0
5301 y := v_1
5302 if v_2.Op != OpAMD64InvertFlags {
5303 break
5304 }
5305 cond := v_2.Args[0]
5306 v.reset(OpAMD64CMOVQLT)
5307 v.AddArg3(x, y, cond)
5308 return true
5309 }
5310
5311
5312 for {
5313 y := v_0
5314 if v_2.Op != OpAMD64FlagEQ {
5315 break
5316 }
5317 v.copyOf(y)
5318 return true
5319 }
5320
5321
5322 for {
5323 x := v_1
5324 if v_2.Op != OpAMD64FlagGT_UGT {
5325 break
5326 }
5327 v.copyOf(x)
5328 return true
5329 }
5330
5331
5332 for {
5333 x := v_1
5334 if v_2.Op != OpAMD64FlagGT_ULT {
5335 break
5336 }
5337 v.copyOf(x)
5338 return true
5339 }
5340
5341
5342 for {
5343 y := v_0
5344 if v_2.Op != OpAMD64FlagLT_ULT {
5345 break
5346 }
5347 v.copyOf(y)
5348 return true
5349 }
5350
5351
5352 for {
5353 y := v_0
5354 if v_2.Op != OpAMD64FlagLT_UGT {
5355 break
5356 }
5357 v.copyOf(y)
5358 return true
5359 }
5360 return false
5361 }
5362 func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
5363 v_2 := v.Args[2]
5364 v_1 := v.Args[1]
5365 v_0 := v.Args[0]
5366
5367
5368 for {
5369 x := v_0
5370 y := v_1
5371 if v_2.Op != OpAMD64InvertFlags {
5372 break
5373 }
5374 cond := v_2.Args[0]
5375 v.reset(OpAMD64CMOVQCS)
5376 v.AddArg3(x, y, cond)
5377 return true
5378 }
5379
5380
5381 for {
5382 y := v_0
5383 if v_2.Op != OpAMD64FlagEQ {
5384 break
5385 }
5386 v.copyOf(y)
5387 return true
5388 }
5389
5390
5391 for {
5392 x := v_1
5393 if v_2.Op != OpAMD64FlagGT_UGT {
5394 break
5395 }
5396 v.copyOf(x)
5397 return true
5398 }
5399
5400
5401 for {
5402 y := v_0
5403 if v_2.Op != OpAMD64FlagGT_ULT {
5404 break
5405 }
5406 v.copyOf(y)
5407 return true
5408 }
5409
5410
5411 for {
5412 y := v_0
5413 if v_2.Op != OpAMD64FlagLT_ULT {
5414 break
5415 }
5416 v.copyOf(y)
5417 return true
5418 }
5419
5420
5421 for {
5422 x := v_1
5423 if v_2.Op != OpAMD64FlagLT_UGT {
5424 break
5425 }
5426 v.copyOf(x)
5427 return true
5428 }
5429 return false
5430 }
5431 func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
5432 v_2 := v.Args[2]
5433 v_1 := v.Args[1]
5434 v_0 := v.Args[0]
5435
5436
5437 for {
5438 x := v_0
5439 y := v_1
5440 if v_2.Op != OpAMD64InvertFlags {
5441 break
5442 }
5443 cond := v_2.Args[0]
5444 v.reset(OpAMD64CMOVQGE)
5445 v.AddArg3(x, y, cond)
5446 return true
5447 }
5448
5449
5450 for {
5451 x := v_1
5452 if v_2.Op != OpAMD64FlagEQ {
5453 break
5454 }
5455 v.copyOf(x)
5456 return true
5457 }
5458
5459
5460 for {
5461 y := v_0
5462 if v_2.Op != OpAMD64FlagGT_UGT {
5463 break
5464 }
5465 v.copyOf(y)
5466 return true
5467 }
5468
5469
5470 for {
5471 y := v_0
5472 if v_2.Op != OpAMD64FlagGT_ULT {
5473 break
5474 }
5475 v.copyOf(y)
5476 return true
5477 }
5478
5479
5480 for {
5481 x := v_1
5482 if v_2.Op != OpAMD64FlagLT_ULT {
5483 break
5484 }
5485 v.copyOf(x)
5486 return true
5487 }
5488
5489
5490 for {
5491 x := v_1
5492 if v_2.Op != OpAMD64FlagLT_UGT {
5493 break
5494 }
5495 v.copyOf(x)
5496 return true
5497 }
5498 return false
5499 }
5500 func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
5501 v_2 := v.Args[2]
5502 v_1 := v.Args[1]
5503 v_0 := v.Args[0]
5504
5505
5506 for {
5507 x := v_0
5508 y := v_1
5509 if v_2.Op != OpAMD64InvertFlags {
5510 break
5511 }
5512 cond := v_2.Args[0]
5513 v.reset(OpAMD64CMOVQCC)
5514 v.AddArg3(x, y, cond)
5515 return true
5516 }
5517
5518
5519 for {
5520 x := v_1
5521 if v_2.Op != OpAMD64FlagEQ {
5522 break
5523 }
5524 v.copyOf(x)
5525 return true
5526 }
5527
5528
5529 for {
5530 y := v_0
5531 if v_2.Op != OpAMD64FlagGT_UGT {
5532 break
5533 }
5534 v.copyOf(y)
5535 return true
5536 }
5537
5538
5539 for {
5540 x := v_1
5541 if v_2.Op != OpAMD64FlagGT_ULT {
5542 break
5543 }
5544 v.copyOf(x)
5545 return true
5546 }
5547
5548
5549 for {
5550 x := v_1
5551 if v_2.Op != OpAMD64FlagLT_ULT {
5552 break
5553 }
5554 v.copyOf(x)
5555 return true
5556 }
5557
5558
5559 for {
5560 y := v_0
5561 if v_2.Op != OpAMD64FlagLT_UGT {
5562 break
5563 }
5564 v.copyOf(y)
5565 return true
5566 }
5567 return false
5568 }
5569 func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
5570 v_2 := v.Args[2]
5571 v_1 := v.Args[1]
5572 v_0 := v.Args[0]
5573 b := v.Block
5574
5575
5576 for {
5577 x := v_0
5578 y := v_1
5579 if v_2.Op != OpAMD64InvertFlags {
5580 break
5581 }
5582 cond := v_2.Args[0]
5583 v.reset(OpAMD64CMOVQGT)
5584 v.AddArg3(x, y, cond)
5585 return true
5586 }
5587
5588
5589 for {
5590 y := v_0
5591 if v_2.Op != OpAMD64FlagEQ {
5592 break
5593 }
5594 v.copyOf(y)
5595 return true
5596 }
5597
5598
5599 for {
5600 y := v_0
5601 if v_2.Op != OpAMD64FlagGT_UGT {
5602 break
5603 }
5604 v.copyOf(y)
5605 return true
5606 }
5607
5608
5609 for {
5610 y := v_0
5611 if v_2.Op != OpAMD64FlagGT_ULT {
5612 break
5613 }
5614 v.copyOf(y)
5615 return true
5616 }
5617
5618
5619 for {
5620 x := v_1
5621 if v_2.Op != OpAMD64FlagLT_ULT {
5622 break
5623 }
5624 v.copyOf(x)
5625 return true
5626 }
5627
5628
5629 for {
5630 x := v_1
5631 if v_2.Op != OpAMD64FlagLT_UGT {
5632 break
5633 }
5634 v.copyOf(x)
5635 return true
5636 }
5637
5638
5639
5640 for {
5641 x := v_0
5642 y := v_1
5643 c := v_2
5644 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
5645 break
5646 }
5647 z := c.Args[0]
5648 if !(c.Uses == 1) {
5649 break
5650 }
5651 v.reset(OpAMD64CMOVQLE)
5652 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
5653 v0.AuxInt = int32ToAuxInt(127)
5654 v0.AddArg(z)
5655 v.AddArg3(x, y, v0)
5656 return true
5657 }
5658
5659
5660
5661 for {
5662 x := v_0
5663 y := v_1
5664 c := v_2
5665 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
5666 break
5667 }
5668 z := c.Args[0]
5669 if !(c.Uses == 1) {
5670 break
5671 }
5672 v.reset(OpAMD64CMOVQLE)
5673 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
5674 v0.AuxInt = int32ToAuxInt(127)
5675 v0.AddArg(z)
5676 v.AddArg3(x, y, v0)
5677 return true
5678 }
5679 return false
5680 }
5681 func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
5682 v_2 := v.Args[2]
5683 v_1 := v.Args[1]
5684 v_0 := v.Args[0]
5685 b := v.Block
5686
5687
5688 for {
5689 x := v_0
5690 y := v_1
5691 if v_2.Op != OpAMD64InvertFlags {
5692 break
5693 }
5694 cond := v_2.Args[0]
5695 v.reset(OpAMD64CMOVQNE)
5696 v.AddArg3(x, y, cond)
5697 return true
5698 }
5699
5700
5701 for {
5702 y := v_0
5703 if v_2.Op != OpAMD64FlagEQ {
5704 break
5705 }
5706 v.copyOf(y)
5707 return true
5708 }
5709
5710
5711 for {
5712 x := v_1
5713 if v_2.Op != OpAMD64FlagGT_UGT {
5714 break
5715 }
5716 v.copyOf(x)
5717 return true
5718 }
5719
5720
5721 for {
5722 x := v_1
5723 if v_2.Op != OpAMD64FlagGT_ULT {
5724 break
5725 }
5726 v.copyOf(x)
5727 return true
5728 }
5729
5730
5731 for {
5732 x := v_1
5733 if v_2.Op != OpAMD64FlagLT_ULT {
5734 break
5735 }
5736 v.copyOf(x)
5737 return true
5738 }
5739
5740
5741 for {
5742 x := v_1
5743 if v_2.Op != OpAMD64FlagLT_UGT {
5744 break
5745 }
5746 v.copyOf(x)
5747 return true
5748 }
5749
5750
5751 for {
5752 x := v_0
5753 y := v_1
5754 if v_2.Op != OpAMD64TESTQ {
5755 break
5756 }
5757 _ = v_2.Args[1]
5758 v_2_0 := v_2.Args[0]
5759 v_2_1 := v_2.Args[1]
5760 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5761 s := v_2_0
5762 if s.Op != OpSelect0 {
5763 continue
5764 }
5765 blsr := s.Args[0]
5766 if blsr.Op != OpAMD64BLSRQ || s != v_2_1 {
5767 continue
5768 }
5769 v.reset(OpAMD64CMOVQNE)
5770 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5771 v0.AddArg(blsr)
5772 v.AddArg3(x, y, v0)
5773 return true
5774 }
5775 break
5776 }
5777
5778
5779 for {
5780 x := v_0
5781 y := v_1
5782 if v_2.Op != OpAMD64TESTL {
5783 break
5784 }
5785 _ = v_2.Args[1]
5786 v_2_0 := v_2.Args[0]
5787 v_2_1 := v_2.Args[1]
5788 for _i0 := 0; _i0 <= 1; _i0, v_2_0, v_2_1 = _i0+1, v_2_1, v_2_0 {
5789 s := v_2_0
5790 if s.Op != OpSelect0 {
5791 continue
5792 }
5793 blsr := s.Args[0]
5794 if blsr.Op != OpAMD64BLSRL || s != v_2_1 {
5795 continue
5796 }
5797 v.reset(OpAMD64CMOVQNE)
5798 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
5799 v0.AddArg(blsr)
5800 v.AddArg3(x, y, v0)
5801 return true
5802 }
5803 break
5804 }
5805 return false
5806 }
5807 func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
5808 v_2 := v.Args[2]
5809 v_1 := v.Args[1]
5810 v_0 := v.Args[0]
5811
5812
5813 for {
5814 x := v_0
5815 y := v_1
5816 if v_2.Op != OpAMD64InvertFlags {
5817 break
5818 }
5819 cond := v_2.Args[0]
5820 v.reset(OpAMD64CMOVWLS)
5821 v.AddArg3(x, y, cond)
5822 return true
5823 }
5824
5825
5826 for {
5827 x := v_1
5828 if v_2.Op != OpAMD64FlagEQ {
5829 break
5830 }
5831 v.copyOf(x)
5832 return true
5833 }
5834
5835
5836 for {
5837 x := v_1
5838 if v_2.Op != OpAMD64FlagGT_UGT {
5839 break
5840 }
5841 v.copyOf(x)
5842 return true
5843 }
5844
5845
5846 for {
5847 y := v_0
5848 if v_2.Op != OpAMD64FlagGT_ULT {
5849 break
5850 }
5851 v.copyOf(y)
5852 return true
5853 }
5854
5855
5856 for {
5857 y := v_0
5858 if v_2.Op != OpAMD64FlagLT_ULT {
5859 break
5860 }
5861 v.copyOf(y)
5862 return true
5863 }
5864
5865
5866 for {
5867 x := v_1
5868 if v_2.Op != OpAMD64FlagLT_UGT {
5869 break
5870 }
5871 v.copyOf(x)
5872 return true
5873 }
5874 return false
5875 }
5876 func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
5877 v_2 := v.Args[2]
5878 v_1 := v.Args[1]
5879 v_0 := v.Args[0]
5880
5881
5882 for {
5883 x := v_0
5884 y := v_1
5885 if v_2.Op != OpAMD64InvertFlags {
5886 break
5887 }
5888 cond := v_2.Args[0]
5889 v.reset(OpAMD64CMOVWHI)
5890 v.AddArg3(x, y, cond)
5891 return true
5892 }
5893
5894
5895 for {
5896 y := v_0
5897 if v_2.Op != OpAMD64FlagEQ {
5898 break
5899 }
5900 v.copyOf(y)
5901 return true
5902 }
5903
5904
5905 for {
5906 y := v_0
5907 if v_2.Op != OpAMD64FlagGT_UGT {
5908 break
5909 }
5910 v.copyOf(y)
5911 return true
5912 }
5913
5914
5915 for {
5916 x := v_1
5917 if v_2.Op != OpAMD64FlagGT_ULT {
5918 break
5919 }
5920 v.copyOf(x)
5921 return true
5922 }
5923
5924
5925 for {
5926 x := v_1
5927 if v_2.Op != OpAMD64FlagLT_ULT {
5928 break
5929 }
5930 v.copyOf(x)
5931 return true
5932 }
5933
5934
5935 for {
5936 y := v_0
5937 if v_2.Op != OpAMD64FlagLT_UGT {
5938 break
5939 }
5940 v.copyOf(y)
5941 return true
5942 }
5943 return false
5944 }
5945 func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
5946 v_2 := v.Args[2]
5947 v_1 := v.Args[1]
5948 v_0 := v.Args[0]
5949
5950
5951 for {
5952 x := v_0
5953 y := v_1
5954 if v_2.Op != OpAMD64InvertFlags {
5955 break
5956 }
5957 cond := v_2.Args[0]
5958 v.reset(OpAMD64CMOVWEQ)
5959 v.AddArg3(x, y, cond)
5960 return true
5961 }
5962
5963
5964 for {
5965 x := v_1
5966 if v_2.Op != OpAMD64FlagEQ {
5967 break
5968 }
5969 v.copyOf(x)
5970 return true
5971 }
5972
5973
5974 for {
5975 y := v_0
5976 if v_2.Op != OpAMD64FlagGT_UGT {
5977 break
5978 }
5979 v.copyOf(y)
5980 return true
5981 }
5982
5983
5984 for {
5985 y := v_0
5986 if v_2.Op != OpAMD64FlagGT_ULT {
5987 break
5988 }
5989 v.copyOf(y)
5990 return true
5991 }
5992
5993
5994 for {
5995 y := v_0
5996 if v_2.Op != OpAMD64FlagLT_ULT {
5997 break
5998 }
5999 v.copyOf(y)
6000 return true
6001 }
6002
6003
6004 for {
6005 y := v_0
6006 if v_2.Op != OpAMD64FlagLT_UGT {
6007 break
6008 }
6009 v.copyOf(y)
6010 return true
6011 }
6012 return false
6013 }
6014 func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
6015 v_2 := v.Args[2]
6016 v_1 := v.Args[1]
6017 v_0 := v.Args[0]
6018
6019
6020 for {
6021 x := v_0
6022 y := v_1
6023 if v_2.Op != OpAMD64InvertFlags {
6024 break
6025 }
6026 cond := v_2.Args[0]
6027 v.reset(OpAMD64CMOVWLE)
6028 v.AddArg3(x, y, cond)
6029 return true
6030 }
6031
6032
6033 for {
6034 x := v_1
6035 if v_2.Op != OpAMD64FlagEQ {
6036 break
6037 }
6038 v.copyOf(x)
6039 return true
6040 }
6041
6042
6043 for {
6044 x := v_1
6045 if v_2.Op != OpAMD64FlagGT_UGT {
6046 break
6047 }
6048 v.copyOf(x)
6049 return true
6050 }
6051
6052
6053 for {
6054 x := v_1
6055 if v_2.Op != OpAMD64FlagGT_ULT {
6056 break
6057 }
6058 v.copyOf(x)
6059 return true
6060 }
6061
6062
6063 for {
6064 y := v_0
6065 if v_2.Op != OpAMD64FlagLT_ULT {
6066 break
6067 }
6068 v.copyOf(y)
6069 return true
6070 }
6071
6072
6073 for {
6074 y := v_0
6075 if v_2.Op != OpAMD64FlagLT_UGT {
6076 break
6077 }
6078 v.copyOf(y)
6079 return true
6080 }
6081 return false
6082 }
6083 func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
6084 v_2 := v.Args[2]
6085 v_1 := v.Args[1]
6086 v_0 := v.Args[0]
6087
6088
6089 for {
6090 x := v_0
6091 y := v_1
6092 if v_2.Op != OpAMD64InvertFlags {
6093 break
6094 }
6095 cond := v_2.Args[0]
6096 v.reset(OpAMD64CMOVWLT)
6097 v.AddArg3(x, y, cond)
6098 return true
6099 }
6100
6101
6102 for {
6103 y := v_0
6104 if v_2.Op != OpAMD64FlagEQ {
6105 break
6106 }
6107 v.copyOf(y)
6108 return true
6109 }
6110
6111
6112 for {
6113 x := v_1
6114 if v_2.Op != OpAMD64FlagGT_UGT {
6115 break
6116 }
6117 v.copyOf(x)
6118 return true
6119 }
6120
6121
6122 for {
6123 x := v_1
6124 if v_2.Op != OpAMD64FlagGT_ULT {
6125 break
6126 }
6127 v.copyOf(x)
6128 return true
6129 }
6130
6131
6132 for {
6133 y := v_0
6134 if v_2.Op != OpAMD64FlagLT_ULT {
6135 break
6136 }
6137 v.copyOf(y)
6138 return true
6139 }
6140
6141
6142 for {
6143 y := v_0
6144 if v_2.Op != OpAMD64FlagLT_UGT {
6145 break
6146 }
6147 v.copyOf(y)
6148 return true
6149 }
6150 return false
6151 }
6152 func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
6153 v_2 := v.Args[2]
6154 v_1 := v.Args[1]
6155 v_0 := v.Args[0]
6156
6157
6158 for {
6159 x := v_0
6160 y := v_1
6161 if v_2.Op != OpAMD64InvertFlags {
6162 break
6163 }
6164 cond := v_2.Args[0]
6165 v.reset(OpAMD64CMOVWCS)
6166 v.AddArg3(x, y, cond)
6167 return true
6168 }
6169
6170
6171 for {
6172 y := v_0
6173 if v_2.Op != OpAMD64FlagEQ {
6174 break
6175 }
6176 v.copyOf(y)
6177 return true
6178 }
6179
6180
6181 for {
6182 x := v_1
6183 if v_2.Op != OpAMD64FlagGT_UGT {
6184 break
6185 }
6186 v.copyOf(x)
6187 return true
6188 }
6189
6190
6191 for {
6192 y := v_0
6193 if v_2.Op != OpAMD64FlagGT_ULT {
6194 break
6195 }
6196 v.copyOf(y)
6197 return true
6198 }
6199
6200
6201 for {
6202 y := v_0
6203 if v_2.Op != OpAMD64FlagLT_ULT {
6204 break
6205 }
6206 v.copyOf(y)
6207 return true
6208 }
6209
6210
6211 for {
6212 x := v_1
6213 if v_2.Op != OpAMD64FlagLT_UGT {
6214 break
6215 }
6216 v.copyOf(x)
6217 return true
6218 }
6219 return false
6220 }
6221 func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
6222 v_2 := v.Args[2]
6223 v_1 := v.Args[1]
6224 v_0 := v.Args[0]
6225
6226
6227 for {
6228 x := v_0
6229 y := v_1
6230 if v_2.Op != OpAMD64InvertFlags {
6231 break
6232 }
6233 cond := v_2.Args[0]
6234 v.reset(OpAMD64CMOVWGE)
6235 v.AddArg3(x, y, cond)
6236 return true
6237 }
6238
6239
6240 for {
6241 x := v_1
6242 if v_2.Op != OpAMD64FlagEQ {
6243 break
6244 }
6245 v.copyOf(x)
6246 return true
6247 }
6248
6249
6250 for {
6251 y := v_0
6252 if v_2.Op != OpAMD64FlagGT_UGT {
6253 break
6254 }
6255 v.copyOf(y)
6256 return true
6257 }
6258
6259
6260 for {
6261 y := v_0
6262 if v_2.Op != OpAMD64FlagGT_ULT {
6263 break
6264 }
6265 v.copyOf(y)
6266 return true
6267 }
6268
6269
6270 for {
6271 x := v_1
6272 if v_2.Op != OpAMD64FlagLT_ULT {
6273 break
6274 }
6275 v.copyOf(x)
6276 return true
6277 }
6278
6279
6280 for {
6281 x := v_1
6282 if v_2.Op != OpAMD64FlagLT_UGT {
6283 break
6284 }
6285 v.copyOf(x)
6286 return true
6287 }
6288 return false
6289 }
6290 func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
6291 v_2 := v.Args[2]
6292 v_1 := v.Args[1]
6293 v_0 := v.Args[0]
6294
6295
6296 for {
6297 x := v_0
6298 y := v_1
6299 if v_2.Op != OpAMD64InvertFlags {
6300 break
6301 }
6302 cond := v_2.Args[0]
6303 v.reset(OpAMD64CMOVWCC)
6304 v.AddArg3(x, y, cond)
6305 return true
6306 }
6307
6308
6309 for {
6310 x := v_1
6311 if v_2.Op != OpAMD64FlagEQ {
6312 break
6313 }
6314 v.copyOf(x)
6315 return true
6316 }
6317
6318
6319 for {
6320 y := v_0
6321 if v_2.Op != OpAMD64FlagGT_UGT {
6322 break
6323 }
6324 v.copyOf(y)
6325 return true
6326 }
6327
6328
6329 for {
6330 x := v_1
6331 if v_2.Op != OpAMD64FlagGT_ULT {
6332 break
6333 }
6334 v.copyOf(x)
6335 return true
6336 }
6337
6338
6339 for {
6340 x := v_1
6341 if v_2.Op != OpAMD64FlagLT_ULT {
6342 break
6343 }
6344 v.copyOf(x)
6345 return true
6346 }
6347
6348
6349 for {
6350 y := v_0
6351 if v_2.Op != OpAMD64FlagLT_UGT {
6352 break
6353 }
6354 v.copyOf(y)
6355 return true
6356 }
6357 return false
6358 }
6359 func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
6360 v_2 := v.Args[2]
6361 v_1 := v.Args[1]
6362 v_0 := v.Args[0]
6363
6364
6365 for {
6366 x := v_0
6367 y := v_1
6368 if v_2.Op != OpAMD64InvertFlags {
6369 break
6370 }
6371 cond := v_2.Args[0]
6372 v.reset(OpAMD64CMOVWGT)
6373 v.AddArg3(x, y, cond)
6374 return true
6375 }
6376
6377
6378 for {
6379 y := v_0
6380 if v_2.Op != OpAMD64FlagEQ {
6381 break
6382 }
6383 v.copyOf(y)
6384 return true
6385 }
6386
6387
6388 for {
6389 y := v_0
6390 if v_2.Op != OpAMD64FlagGT_UGT {
6391 break
6392 }
6393 v.copyOf(y)
6394 return true
6395 }
6396
6397
6398 for {
6399 y := v_0
6400 if v_2.Op != OpAMD64FlagGT_ULT {
6401 break
6402 }
6403 v.copyOf(y)
6404 return true
6405 }
6406
6407
6408 for {
6409 x := v_1
6410 if v_2.Op != OpAMD64FlagLT_ULT {
6411 break
6412 }
6413 v.copyOf(x)
6414 return true
6415 }
6416
6417
6418 for {
6419 x := v_1
6420 if v_2.Op != OpAMD64FlagLT_UGT {
6421 break
6422 }
6423 v.copyOf(x)
6424 return true
6425 }
6426 return false
6427 }
6428 func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
6429 v_2 := v.Args[2]
6430 v_1 := v.Args[1]
6431 v_0 := v.Args[0]
6432
6433
6434 for {
6435 x := v_0
6436 y := v_1
6437 if v_2.Op != OpAMD64InvertFlags {
6438 break
6439 }
6440 cond := v_2.Args[0]
6441 v.reset(OpAMD64CMOVWNE)
6442 v.AddArg3(x, y, cond)
6443 return true
6444 }
6445
6446
6447 for {
6448 y := v_0
6449 if v_2.Op != OpAMD64FlagEQ {
6450 break
6451 }
6452 v.copyOf(y)
6453 return true
6454 }
6455
6456
6457 for {
6458 x := v_1
6459 if v_2.Op != OpAMD64FlagGT_UGT {
6460 break
6461 }
6462 v.copyOf(x)
6463 return true
6464 }
6465
6466
6467 for {
6468 x := v_1
6469 if v_2.Op != OpAMD64FlagGT_ULT {
6470 break
6471 }
6472 v.copyOf(x)
6473 return true
6474 }
6475
6476
6477 for {
6478 x := v_1
6479 if v_2.Op != OpAMD64FlagLT_ULT {
6480 break
6481 }
6482 v.copyOf(x)
6483 return true
6484 }
6485
6486
6487 for {
6488 x := v_1
6489 if v_2.Op != OpAMD64FlagLT_UGT {
6490 break
6491 }
6492 v.copyOf(x)
6493 return true
6494 }
6495 return false
6496 }
6497 func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
6498 v_1 := v.Args[1]
6499 v_0 := v.Args[0]
6500 b := v.Block
6501
6502
6503 for {
6504 x := v_0
6505 if v_1.Op != OpAMD64MOVLconst {
6506 break
6507 }
6508 c := auxIntToInt32(v_1.AuxInt)
6509 v.reset(OpAMD64CMPBconst)
6510 v.AuxInt = int8ToAuxInt(int8(c))
6511 v.AddArg(x)
6512 return true
6513 }
6514
6515
6516 for {
6517 if v_0.Op != OpAMD64MOVLconst {
6518 break
6519 }
6520 c := auxIntToInt32(v_0.AuxInt)
6521 x := v_1
6522 v.reset(OpAMD64InvertFlags)
6523 v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
6524 v0.AuxInt = int8ToAuxInt(int8(c))
6525 v0.AddArg(x)
6526 v.AddArg(v0)
6527 return true
6528 }
6529
6530
6531
6532 for {
6533 x := v_0
6534 y := v_1
6535 if !(canonLessThan(x, y)) {
6536 break
6537 }
6538 v.reset(OpAMD64InvertFlags)
6539 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
6540 v0.AddArg2(y, x)
6541 v.AddArg(v0)
6542 return true
6543 }
6544
6545
6546
6547 for {
6548 l := v_0
6549 if l.Op != OpAMD64MOVBload {
6550 break
6551 }
6552 off := auxIntToInt32(l.AuxInt)
6553 sym := auxToSym(l.Aux)
6554 mem := l.Args[1]
6555 ptr := l.Args[0]
6556 x := v_1
6557 if !(canMergeLoad(v, l) && clobber(l)) {
6558 break
6559 }
6560 v.reset(OpAMD64CMPBload)
6561 v.AuxInt = int32ToAuxInt(off)
6562 v.Aux = symToAux(sym)
6563 v.AddArg3(ptr, x, mem)
6564 return true
6565 }
6566
6567
6568
6569 for {
6570 x := v_0
6571 l := v_1
6572 if l.Op != OpAMD64MOVBload {
6573 break
6574 }
6575 off := auxIntToInt32(l.AuxInt)
6576 sym := auxToSym(l.Aux)
6577 mem := l.Args[1]
6578 ptr := l.Args[0]
6579 if !(canMergeLoad(v, l) && clobber(l)) {
6580 break
6581 }
6582 v.reset(OpAMD64InvertFlags)
6583 v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
6584 v0.AuxInt = int32ToAuxInt(off)
6585 v0.Aux = symToAux(sym)
6586 v0.AddArg3(ptr, x, mem)
6587 v.AddArg(v0)
6588 return true
6589 }
6590 return false
6591 }
6592 func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
6593 v_0 := v.Args[0]
6594 b := v.Block
6595
6596
6597
6598 for {
6599 y := auxIntToInt8(v.AuxInt)
6600 if v_0.Op != OpAMD64MOVLconst {
6601 break
6602 }
6603 x := auxIntToInt32(v_0.AuxInt)
6604 if !(int8(x) == y) {
6605 break
6606 }
6607 v.reset(OpAMD64FlagEQ)
6608 return true
6609 }
6610
6611
6612
6613 for {
6614 y := auxIntToInt8(v.AuxInt)
6615 if v_0.Op != OpAMD64MOVLconst {
6616 break
6617 }
6618 x := auxIntToInt32(v_0.AuxInt)
6619 if !(int8(x) < y && uint8(x) < uint8(y)) {
6620 break
6621 }
6622 v.reset(OpAMD64FlagLT_ULT)
6623 return true
6624 }
6625
6626
6627
6628 for {
6629 y := auxIntToInt8(v.AuxInt)
6630 if v_0.Op != OpAMD64MOVLconst {
6631 break
6632 }
6633 x := auxIntToInt32(v_0.AuxInt)
6634 if !(int8(x) < y && uint8(x) > uint8(y)) {
6635 break
6636 }
6637 v.reset(OpAMD64FlagLT_UGT)
6638 return true
6639 }
6640
6641
6642
6643 for {
6644 y := auxIntToInt8(v.AuxInt)
6645 if v_0.Op != OpAMD64MOVLconst {
6646 break
6647 }
6648 x := auxIntToInt32(v_0.AuxInt)
6649 if !(int8(x) > y && uint8(x) < uint8(y)) {
6650 break
6651 }
6652 v.reset(OpAMD64FlagGT_ULT)
6653 return true
6654 }
6655
6656
6657
6658 for {
6659 y := auxIntToInt8(v.AuxInt)
6660 if v_0.Op != OpAMD64MOVLconst {
6661 break
6662 }
6663 x := auxIntToInt32(v_0.AuxInt)
6664 if !(int8(x) > y && uint8(x) > uint8(y)) {
6665 break
6666 }
6667 v.reset(OpAMD64FlagGT_UGT)
6668 return true
6669 }
6670
6671
6672
6673 for {
6674 n := auxIntToInt8(v.AuxInt)
6675 if v_0.Op != OpAMD64ANDLconst {
6676 break
6677 }
6678 m := auxIntToInt32(v_0.AuxInt)
6679 if !(0 <= int8(m) && int8(m) < n) {
6680 break
6681 }
6682 v.reset(OpAMD64FlagLT_ULT)
6683 return true
6684 }
6685
6686
6687
6688 for {
6689 if auxIntToInt8(v.AuxInt) != 0 {
6690 break
6691 }
6692 a := v_0
6693 if a.Op != OpAMD64ANDL {
6694 break
6695 }
6696 y := a.Args[1]
6697 x := a.Args[0]
6698 if !(a.Uses == 1) {
6699 break
6700 }
6701 v.reset(OpAMD64TESTB)
6702 v.AddArg2(x, y)
6703 return true
6704 }
6705
6706
6707
6708 for {
6709 if auxIntToInt8(v.AuxInt) != 0 {
6710 break
6711 }
6712 a := v_0
6713 if a.Op != OpAMD64ANDLconst {
6714 break
6715 }
6716 c := auxIntToInt32(a.AuxInt)
6717 x := a.Args[0]
6718 if !(a.Uses == 1) {
6719 break
6720 }
6721 v.reset(OpAMD64TESTBconst)
6722 v.AuxInt = int8ToAuxInt(int8(c))
6723 v.AddArg(x)
6724 return true
6725 }
6726
6727
6728 for {
6729 if auxIntToInt8(v.AuxInt) != 0 {
6730 break
6731 }
6732 x := v_0
6733 v.reset(OpAMD64TESTB)
6734 v.AddArg2(x, x)
6735 return true
6736 }
6737
6738
6739
6740 for {
6741 c := auxIntToInt8(v.AuxInt)
6742 l := v_0
6743 if l.Op != OpAMD64MOVBload {
6744 break
6745 }
6746 off := auxIntToInt32(l.AuxInt)
6747 sym := auxToSym(l.Aux)
6748 mem := l.Args[1]
6749 ptr := l.Args[0]
6750 if !(l.Uses == 1 && clobber(l)) {
6751 break
6752 }
6753 b = l.Block
6754 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
6755 v.copyOf(v0)
6756 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
6757 v0.Aux = symToAux(sym)
6758 v0.AddArg2(ptr, mem)
6759 return true
6760 }
6761 return false
6762 }
6763 func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
6764 v_1 := v.Args[1]
6765 v_0 := v.Args[0]
6766
6767
6768
6769 for {
6770 valoff1 := auxIntToValAndOff(v.AuxInt)
6771 sym := auxToSym(v.Aux)
6772 if v_0.Op != OpAMD64ADDQconst {
6773 break
6774 }
6775 off2 := auxIntToInt32(v_0.AuxInt)
6776 base := v_0.Args[0]
6777 mem := v_1
6778 if !(ValAndOff(valoff1).canAdd32(off2)) {
6779 break
6780 }
6781 v.reset(OpAMD64CMPBconstload)
6782 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6783 v.Aux = symToAux(sym)
6784 v.AddArg2(base, mem)
6785 return true
6786 }
6787
6788
6789
6790 for {
6791 valoff1 := auxIntToValAndOff(v.AuxInt)
6792 sym1 := auxToSym(v.Aux)
6793 if v_0.Op != OpAMD64LEAQ {
6794 break
6795 }
6796 off2 := auxIntToInt32(v_0.AuxInt)
6797 sym2 := auxToSym(v_0.Aux)
6798 base := v_0.Args[0]
6799 mem := v_1
6800 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
6801 break
6802 }
6803 v.reset(OpAMD64CMPBconstload)
6804 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
6805 v.Aux = symToAux(mergeSym(sym1, sym2))
6806 v.AddArg2(base, mem)
6807 return true
6808 }
6809 return false
6810 }
6811 func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
6812 v_2 := v.Args[2]
6813 v_1 := v.Args[1]
6814 v_0 := v.Args[0]
6815
6816
6817
6818 for {
6819 off1 := auxIntToInt32(v.AuxInt)
6820 sym := auxToSym(v.Aux)
6821 if v_0.Op != OpAMD64ADDQconst {
6822 break
6823 }
6824 off2 := auxIntToInt32(v_0.AuxInt)
6825 base := v_0.Args[0]
6826 val := v_1
6827 mem := v_2
6828 if !(is32Bit(int64(off1) + int64(off2))) {
6829 break
6830 }
6831 v.reset(OpAMD64CMPBload)
6832 v.AuxInt = int32ToAuxInt(off1 + off2)
6833 v.Aux = symToAux(sym)
6834 v.AddArg3(base, val, mem)
6835 return true
6836 }
6837
6838
6839
6840 for {
6841 off1 := auxIntToInt32(v.AuxInt)
6842 sym1 := auxToSym(v.Aux)
6843 if v_0.Op != OpAMD64LEAQ {
6844 break
6845 }
6846 off2 := auxIntToInt32(v_0.AuxInt)
6847 sym2 := auxToSym(v_0.Aux)
6848 base := v_0.Args[0]
6849 val := v_1
6850 mem := v_2
6851 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
6852 break
6853 }
6854 v.reset(OpAMD64CMPBload)
6855 v.AuxInt = int32ToAuxInt(off1 + off2)
6856 v.Aux = symToAux(mergeSym(sym1, sym2))
6857 v.AddArg3(base, val, mem)
6858 return true
6859 }
6860
6861
6862 for {
6863 off := auxIntToInt32(v.AuxInt)
6864 sym := auxToSym(v.Aux)
6865 ptr := v_0
6866 if v_1.Op != OpAMD64MOVLconst {
6867 break
6868 }
6869 c := auxIntToInt32(v_1.AuxInt)
6870 mem := v_2
6871 v.reset(OpAMD64CMPBconstload)
6872 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
6873 v.Aux = symToAux(sym)
6874 v.AddArg2(ptr, mem)
6875 return true
6876 }
6877 return false
6878 }
6879 func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
6880 v_1 := v.Args[1]
6881 v_0 := v.Args[0]
6882 b := v.Block
6883
6884
6885 for {
6886 x := v_0
6887 if v_1.Op != OpAMD64MOVLconst {
6888 break
6889 }
6890 c := auxIntToInt32(v_1.AuxInt)
6891 v.reset(OpAMD64CMPLconst)
6892 v.AuxInt = int32ToAuxInt(c)
6893 v.AddArg(x)
6894 return true
6895 }
6896
6897
6898 for {
6899 if v_0.Op != OpAMD64MOVLconst {
6900 break
6901 }
6902 c := auxIntToInt32(v_0.AuxInt)
6903 x := v_1
6904 v.reset(OpAMD64InvertFlags)
6905 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
6906 v0.AuxInt = int32ToAuxInt(c)
6907 v0.AddArg(x)
6908 v.AddArg(v0)
6909 return true
6910 }
6911
6912
6913
6914 for {
6915 x := v_0
6916 y := v_1
6917 if !(canonLessThan(x, y)) {
6918 break
6919 }
6920 v.reset(OpAMD64InvertFlags)
6921 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
6922 v0.AddArg2(y, x)
6923 v.AddArg(v0)
6924 return true
6925 }
6926
6927
6928
6929 for {
6930 l := v_0
6931 if l.Op != OpAMD64MOVLload {
6932 break
6933 }
6934 off := auxIntToInt32(l.AuxInt)
6935 sym := auxToSym(l.Aux)
6936 mem := l.Args[1]
6937 ptr := l.Args[0]
6938 x := v_1
6939 if !(canMergeLoad(v, l) && clobber(l)) {
6940 break
6941 }
6942 v.reset(OpAMD64CMPLload)
6943 v.AuxInt = int32ToAuxInt(off)
6944 v.Aux = symToAux(sym)
6945 v.AddArg3(ptr, x, mem)
6946 return true
6947 }
6948
6949
6950
6951 for {
6952 x := v_0
6953 l := v_1
6954 if l.Op != OpAMD64MOVLload {
6955 break
6956 }
6957 off := auxIntToInt32(l.AuxInt)
6958 sym := auxToSym(l.Aux)
6959 mem := l.Args[1]
6960 ptr := l.Args[0]
6961 if !(canMergeLoad(v, l) && clobber(l)) {
6962 break
6963 }
6964 v.reset(OpAMD64InvertFlags)
6965 v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
6966 v0.AuxInt = int32ToAuxInt(off)
6967 v0.Aux = symToAux(sym)
6968 v0.AddArg3(ptr, x, mem)
6969 v.AddArg(v0)
6970 return true
6971 }
6972 return false
6973 }
6974 func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
6975 v_0 := v.Args[0]
6976 b := v.Block
6977
6978
6979
6980 for {
6981 y := auxIntToInt32(v.AuxInt)
6982 if v_0.Op != OpAMD64MOVLconst {
6983 break
6984 }
6985 x := auxIntToInt32(v_0.AuxInt)
6986 if !(x == y) {
6987 break
6988 }
6989 v.reset(OpAMD64FlagEQ)
6990 return true
6991 }
6992
6993
6994
6995 for {
6996 y := auxIntToInt32(v.AuxInt)
6997 if v_0.Op != OpAMD64MOVLconst {
6998 break
6999 }
7000 x := auxIntToInt32(v_0.AuxInt)
7001 if !(x < y && uint32(x) < uint32(y)) {
7002 break
7003 }
7004 v.reset(OpAMD64FlagLT_ULT)
7005 return true
7006 }
7007
7008
7009
7010 for {
7011 y := auxIntToInt32(v.AuxInt)
7012 if v_0.Op != OpAMD64MOVLconst {
7013 break
7014 }
7015 x := auxIntToInt32(v_0.AuxInt)
7016 if !(x < y && uint32(x) > uint32(y)) {
7017 break
7018 }
7019 v.reset(OpAMD64FlagLT_UGT)
7020 return true
7021 }
7022
7023
7024
7025 for {
7026 y := auxIntToInt32(v.AuxInt)
7027 if v_0.Op != OpAMD64MOVLconst {
7028 break
7029 }
7030 x := auxIntToInt32(v_0.AuxInt)
7031 if !(x > y && uint32(x) < uint32(y)) {
7032 break
7033 }
7034 v.reset(OpAMD64FlagGT_ULT)
7035 return true
7036 }
7037
7038
7039
7040 for {
7041 y := auxIntToInt32(v.AuxInt)
7042 if v_0.Op != OpAMD64MOVLconst {
7043 break
7044 }
7045 x := auxIntToInt32(v_0.AuxInt)
7046 if !(x > y && uint32(x) > uint32(y)) {
7047 break
7048 }
7049 v.reset(OpAMD64FlagGT_UGT)
7050 return true
7051 }
7052
7053
7054
7055 for {
7056 n := auxIntToInt32(v.AuxInt)
7057 if v_0.Op != OpAMD64SHRLconst {
7058 break
7059 }
7060 c := auxIntToInt8(v_0.AuxInt)
7061 if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
7062 break
7063 }
7064 v.reset(OpAMD64FlagLT_ULT)
7065 return true
7066 }
7067
7068
7069
7070 for {
7071 n := auxIntToInt32(v.AuxInt)
7072 if v_0.Op != OpAMD64ANDLconst {
7073 break
7074 }
7075 m := auxIntToInt32(v_0.AuxInt)
7076 if !(0 <= m && m < n) {
7077 break
7078 }
7079 v.reset(OpAMD64FlagLT_ULT)
7080 return true
7081 }
7082
7083
7084
7085 for {
7086 if auxIntToInt32(v.AuxInt) != 0 {
7087 break
7088 }
7089 a := v_0
7090 if a.Op != OpAMD64ANDL {
7091 break
7092 }
7093 y := a.Args[1]
7094 x := a.Args[0]
7095 if !(a.Uses == 1) {
7096 break
7097 }
7098 v.reset(OpAMD64TESTL)
7099 v.AddArg2(x, y)
7100 return true
7101 }
7102
7103
7104
7105 for {
7106 if auxIntToInt32(v.AuxInt) != 0 {
7107 break
7108 }
7109 a := v_0
7110 if a.Op != OpAMD64ANDLconst {
7111 break
7112 }
7113 c := auxIntToInt32(a.AuxInt)
7114 x := a.Args[0]
7115 if !(a.Uses == 1) {
7116 break
7117 }
7118 v.reset(OpAMD64TESTLconst)
7119 v.AuxInt = int32ToAuxInt(c)
7120 v.AddArg(x)
7121 return true
7122 }
7123
7124
7125 for {
7126 if auxIntToInt32(v.AuxInt) != 0 {
7127 break
7128 }
7129 x := v_0
7130 v.reset(OpAMD64TESTL)
7131 v.AddArg2(x, x)
7132 return true
7133 }
7134
7135
7136
7137 for {
7138 c := auxIntToInt32(v.AuxInt)
7139 l := v_0
7140 if l.Op != OpAMD64MOVLload {
7141 break
7142 }
7143 off := auxIntToInt32(l.AuxInt)
7144 sym := auxToSym(l.Aux)
7145 mem := l.Args[1]
7146 ptr := l.Args[0]
7147 if !(l.Uses == 1 && clobber(l)) {
7148 break
7149 }
7150 b = l.Block
7151 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
7152 v.copyOf(v0)
7153 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7154 v0.Aux = symToAux(sym)
7155 v0.AddArg2(ptr, mem)
7156 return true
7157 }
7158 return false
7159 }
7160 func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
7161 v_1 := v.Args[1]
7162 v_0 := v.Args[0]
7163
7164
7165
7166 for {
7167 valoff1 := auxIntToValAndOff(v.AuxInt)
7168 sym := auxToSym(v.Aux)
7169 if v_0.Op != OpAMD64ADDQconst {
7170 break
7171 }
7172 off2 := auxIntToInt32(v_0.AuxInt)
7173 base := v_0.Args[0]
7174 mem := v_1
7175 if !(ValAndOff(valoff1).canAdd32(off2)) {
7176 break
7177 }
7178 v.reset(OpAMD64CMPLconstload)
7179 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7180 v.Aux = symToAux(sym)
7181 v.AddArg2(base, mem)
7182 return true
7183 }
7184
7185
7186
7187 for {
7188 valoff1 := auxIntToValAndOff(v.AuxInt)
7189 sym1 := auxToSym(v.Aux)
7190 if v_0.Op != OpAMD64LEAQ {
7191 break
7192 }
7193 off2 := auxIntToInt32(v_0.AuxInt)
7194 sym2 := auxToSym(v_0.Aux)
7195 base := v_0.Args[0]
7196 mem := v_1
7197 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7198 break
7199 }
7200 v.reset(OpAMD64CMPLconstload)
7201 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7202 v.Aux = symToAux(mergeSym(sym1, sym2))
7203 v.AddArg2(base, mem)
7204 return true
7205 }
7206 return false
7207 }
7208 func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
7209 v_2 := v.Args[2]
7210 v_1 := v.Args[1]
7211 v_0 := v.Args[0]
7212
7213
7214
7215 for {
7216 off1 := auxIntToInt32(v.AuxInt)
7217 sym := auxToSym(v.Aux)
7218 if v_0.Op != OpAMD64ADDQconst {
7219 break
7220 }
7221 off2 := auxIntToInt32(v_0.AuxInt)
7222 base := v_0.Args[0]
7223 val := v_1
7224 mem := v_2
7225 if !(is32Bit(int64(off1) + int64(off2))) {
7226 break
7227 }
7228 v.reset(OpAMD64CMPLload)
7229 v.AuxInt = int32ToAuxInt(off1 + off2)
7230 v.Aux = symToAux(sym)
7231 v.AddArg3(base, val, mem)
7232 return true
7233 }
7234
7235
7236
7237 for {
7238 off1 := auxIntToInt32(v.AuxInt)
7239 sym1 := auxToSym(v.Aux)
7240 if v_0.Op != OpAMD64LEAQ {
7241 break
7242 }
7243 off2 := auxIntToInt32(v_0.AuxInt)
7244 sym2 := auxToSym(v_0.Aux)
7245 base := v_0.Args[0]
7246 val := v_1
7247 mem := v_2
7248 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7249 break
7250 }
7251 v.reset(OpAMD64CMPLload)
7252 v.AuxInt = int32ToAuxInt(off1 + off2)
7253 v.Aux = symToAux(mergeSym(sym1, sym2))
7254 v.AddArg3(base, val, mem)
7255 return true
7256 }
7257
7258
7259 for {
7260 off := auxIntToInt32(v.AuxInt)
7261 sym := auxToSym(v.Aux)
7262 ptr := v_0
7263 if v_1.Op != OpAMD64MOVLconst {
7264 break
7265 }
7266 c := auxIntToInt32(v_1.AuxInt)
7267 mem := v_2
7268 v.reset(OpAMD64CMPLconstload)
7269 v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7270 v.Aux = symToAux(sym)
7271 v.AddArg2(ptr, mem)
7272 return true
7273 }
7274 return false
7275 }
7276 func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
7277 v_1 := v.Args[1]
7278 v_0 := v.Args[0]
7279 b := v.Block
7280
7281
7282
7283 for {
7284 x := v_0
7285 if v_1.Op != OpAMD64MOVQconst {
7286 break
7287 }
7288 c := auxIntToInt64(v_1.AuxInt)
7289 if !(is32Bit(c)) {
7290 break
7291 }
7292 v.reset(OpAMD64CMPQconst)
7293 v.AuxInt = int32ToAuxInt(int32(c))
7294 v.AddArg(x)
7295 return true
7296 }
7297
7298
7299
7300 for {
7301 if v_0.Op != OpAMD64MOVQconst {
7302 break
7303 }
7304 c := auxIntToInt64(v_0.AuxInt)
7305 x := v_1
7306 if !(is32Bit(c)) {
7307 break
7308 }
7309 v.reset(OpAMD64InvertFlags)
7310 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
7311 v0.AuxInt = int32ToAuxInt(int32(c))
7312 v0.AddArg(x)
7313 v.AddArg(v0)
7314 return true
7315 }
7316
7317
7318
7319 for {
7320 x := v_0
7321 y := v_1
7322 if !(canonLessThan(x, y)) {
7323 break
7324 }
7325 v.reset(OpAMD64InvertFlags)
7326 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
7327 v0.AddArg2(y, x)
7328 v.AddArg(v0)
7329 return true
7330 }
7331
7332
7333
7334 for {
7335 if v_0.Op != OpAMD64MOVQconst {
7336 break
7337 }
7338 x := auxIntToInt64(v_0.AuxInt)
7339 if v_1.Op != OpAMD64MOVQconst {
7340 break
7341 }
7342 y := auxIntToInt64(v_1.AuxInt)
7343 if !(x == y) {
7344 break
7345 }
7346 v.reset(OpAMD64FlagEQ)
7347 return true
7348 }
7349
7350
7351
7352 for {
7353 if v_0.Op != OpAMD64MOVQconst {
7354 break
7355 }
7356 x := auxIntToInt64(v_0.AuxInt)
7357 if v_1.Op != OpAMD64MOVQconst {
7358 break
7359 }
7360 y := auxIntToInt64(v_1.AuxInt)
7361 if !(x < y && uint64(x) < uint64(y)) {
7362 break
7363 }
7364 v.reset(OpAMD64FlagLT_ULT)
7365 return true
7366 }
7367
7368
7369
7370 for {
7371 if v_0.Op != OpAMD64MOVQconst {
7372 break
7373 }
7374 x := auxIntToInt64(v_0.AuxInt)
7375 if v_1.Op != OpAMD64MOVQconst {
7376 break
7377 }
7378 y := auxIntToInt64(v_1.AuxInt)
7379 if !(x < y && uint64(x) > uint64(y)) {
7380 break
7381 }
7382 v.reset(OpAMD64FlagLT_UGT)
7383 return true
7384 }
7385
7386
7387
7388 for {
7389 if v_0.Op != OpAMD64MOVQconst {
7390 break
7391 }
7392 x := auxIntToInt64(v_0.AuxInt)
7393 if v_1.Op != OpAMD64MOVQconst {
7394 break
7395 }
7396 y := auxIntToInt64(v_1.AuxInt)
7397 if !(x > y && uint64(x) < uint64(y)) {
7398 break
7399 }
7400 v.reset(OpAMD64FlagGT_ULT)
7401 return true
7402 }
7403
7404
7405
7406 for {
7407 if v_0.Op != OpAMD64MOVQconst {
7408 break
7409 }
7410 x := auxIntToInt64(v_0.AuxInt)
7411 if v_1.Op != OpAMD64MOVQconst {
7412 break
7413 }
7414 y := auxIntToInt64(v_1.AuxInt)
7415 if !(x > y && uint64(x) > uint64(y)) {
7416 break
7417 }
7418 v.reset(OpAMD64FlagGT_UGT)
7419 return true
7420 }
7421
7422
7423
7424 for {
7425 l := v_0
7426 if l.Op != OpAMD64MOVQload {
7427 break
7428 }
7429 off := auxIntToInt32(l.AuxInt)
7430 sym := auxToSym(l.Aux)
7431 mem := l.Args[1]
7432 ptr := l.Args[0]
7433 x := v_1
7434 if !(canMergeLoad(v, l) && clobber(l)) {
7435 break
7436 }
7437 v.reset(OpAMD64CMPQload)
7438 v.AuxInt = int32ToAuxInt(off)
7439 v.Aux = symToAux(sym)
7440 v.AddArg3(ptr, x, mem)
7441 return true
7442 }
7443
7444
7445
7446 for {
7447 x := v_0
7448 l := v_1
7449 if l.Op != OpAMD64MOVQload {
7450 break
7451 }
7452 off := auxIntToInt32(l.AuxInt)
7453 sym := auxToSym(l.Aux)
7454 mem := l.Args[1]
7455 ptr := l.Args[0]
7456 if !(canMergeLoad(v, l) && clobber(l)) {
7457 break
7458 }
7459 v.reset(OpAMD64InvertFlags)
7460 v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
7461 v0.AuxInt = int32ToAuxInt(off)
7462 v0.Aux = symToAux(sym)
7463 v0.AddArg3(ptr, x, mem)
7464 v.AddArg(v0)
7465 return true
7466 }
7467 return false
7468 }
7469 func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
7470 v_0 := v.Args[0]
7471 b := v.Block
7472
7473
7474
7475 for {
7476 y := auxIntToInt32(v.AuxInt)
7477 if v_0.Op != OpAMD64MOVQconst {
7478 break
7479 }
7480 x := auxIntToInt64(v_0.AuxInt)
7481 if !(x == int64(y)) {
7482 break
7483 }
7484 v.reset(OpAMD64FlagEQ)
7485 return true
7486 }
7487
7488
7489
7490 for {
7491 y := auxIntToInt32(v.AuxInt)
7492 if v_0.Op != OpAMD64MOVQconst {
7493 break
7494 }
7495 x := auxIntToInt64(v_0.AuxInt)
7496 if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
7497 break
7498 }
7499 v.reset(OpAMD64FlagLT_ULT)
7500 return true
7501 }
7502
7503
7504
7505 for {
7506 y := auxIntToInt32(v.AuxInt)
7507 if v_0.Op != OpAMD64MOVQconst {
7508 break
7509 }
7510 x := auxIntToInt64(v_0.AuxInt)
7511 if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
7512 break
7513 }
7514 v.reset(OpAMD64FlagLT_UGT)
7515 return true
7516 }
7517
7518
7519
7520 for {
7521 y := auxIntToInt32(v.AuxInt)
7522 if v_0.Op != OpAMD64MOVQconst {
7523 break
7524 }
7525 x := auxIntToInt64(v_0.AuxInt)
7526 if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
7527 break
7528 }
7529 v.reset(OpAMD64FlagGT_ULT)
7530 return true
7531 }
7532
7533
7534
7535 for {
7536 y := auxIntToInt32(v.AuxInt)
7537 if v_0.Op != OpAMD64MOVQconst {
7538 break
7539 }
7540 x := auxIntToInt64(v_0.AuxInt)
7541 if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
7542 break
7543 }
7544 v.reset(OpAMD64FlagGT_UGT)
7545 return true
7546 }
7547
7548
7549
7550 for {
7551 c := auxIntToInt32(v.AuxInt)
7552 if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
7553 break
7554 }
7555 v.reset(OpAMD64FlagLT_ULT)
7556 return true
7557 }
7558
7559
7560
7561 for {
7562 c := auxIntToInt32(v.AuxInt)
7563 if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
7564 break
7565 }
7566 v.reset(OpAMD64FlagLT_ULT)
7567 return true
7568 }
7569
7570
7571
7572 for {
7573 n := auxIntToInt32(v.AuxInt)
7574 if v_0.Op != OpAMD64SHRQconst {
7575 break
7576 }
7577 c := auxIntToInt8(v_0.AuxInt)
7578 if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
7579 break
7580 }
7581 v.reset(OpAMD64FlagLT_ULT)
7582 return true
7583 }
7584
7585
7586
7587 for {
7588 n := auxIntToInt32(v.AuxInt)
7589 if v_0.Op != OpAMD64ANDQconst {
7590 break
7591 }
7592 m := auxIntToInt32(v_0.AuxInt)
7593 if !(0 <= m && m < n) {
7594 break
7595 }
7596 v.reset(OpAMD64FlagLT_ULT)
7597 return true
7598 }
7599
7600
7601
7602 for {
7603 n := auxIntToInt32(v.AuxInt)
7604 if v_0.Op != OpAMD64ANDLconst {
7605 break
7606 }
7607 m := auxIntToInt32(v_0.AuxInt)
7608 if !(0 <= m && m < n) {
7609 break
7610 }
7611 v.reset(OpAMD64FlagLT_ULT)
7612 return true
7613 }
7614
7615
7616
7617 for {
7618 if auxIntToInt32(v.AuxInt) != 0 {
7619 break
7620 }
7621 a := v_0
7622 if a.Op != OpAMD64ANDQ {
7623 break
7624 }
7625 y := a.Args[1]
7626 x := a.Args[0]
7627 if !(a.Uses == 1) {
7628 break
7629 }
7630 v.reset(OpAMD64TESTQ)
7631 v.AddArg2(x, y)
7632 return true
7633 }
7634
7635
7636
7637 for {
7638 if auxIntToInt32(v.AuxInt) != 0 {
7639 break
7640 }
7641 a := v_0
7642 if a.Op != OpAMD64ANDQconst {
7643 break
7644 }
7645 c := auxIntToInt32(a.AuxInt)
7646 x := a.Args[0]
7647 if !(a.Uses == 1) {
7648 break
7649 }
7650 v.reset(OpAMD64TESTQconst)
7651 v.AuxInt = int32ToAuxInt(c)
7652 v.AddArg(x)
7653 return true
7654 }
7655
7656
7657 for {
7658 if auxIntToInt32(v.AuxInt) != 0 {
7659 break
7660 }
7661 x := v_0
7662 v.reset(OpAMD64TESTQ)
7663 v.AddArg2(x, x)
7664 return true
7665 }
7666
7667
7668
7669 for {
7670 c := auxIntToInt32(v.AuxInt)
7671 l := v_0
7672 if l.Op != OpAMD64MOVQload {
7673 break
7674 }
7675 off := auxIntToInt32(l.AuxInt)
7676 sym := auxToSym(l.Aux)
7677 mem := l.Args[1]
7678 ptr := l.Args[0]
7679 if !(l.Uses == 1 && clobber(l)) {
7680 break
7681 }
7682 b = l.Block
7683 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
7684 v.copyOf(v0)
7685 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
7686 v0.Aux = symToAux(sym)
7687 v0.AddArg2(ptr, mem)
7688 return true
7689 }
7690 return false
7691 }
7692 func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
7693 v_1 := v.Args[1]
7694 v_0 := v.Args[0]
7695
7696
7697
7698 for {
7699 valoff1 := auxIntToValAndOff(v.AuxInt)
7700 sym := auxToSym(v.Aux)
7701 if v_0.Op != OpAMD64ADDQconst {
7702 break
7703 }
7704 off2 := auxIntToInt32(v_0.AuxInt)
7705 base := v_0.Args[0]
7706 mem := v_1
7707 if !(ValAndOff(valoff1).canAdd32(off2)) {
7708 break
7709 }
7710 v.reset(OpAMD64CMPQconstload)
7711 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7712 v.Aux = symToAux(sym)
7713 v.AddArg2(base, mem)
7714 return true
7715 }
7716
7717
7718
7719 for {
7720 valoff1 := auxIntToValAndOff(v.AuxInt)
7721 sym1 := auxToSym(v.Aux)
7722 if v_0.Op != OpAMD64LEAQ {
7723 break
7724 }
7725 off2 := auxIntToInt32(v_0.AuxInt)
7726 sym2 := auxToSym(v_0.Aux)
7727 base := v_0.Args[0]
7728 mem := v_1
7729 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
7730 break
7731 }
7732 v.reset(OpAMD64CMPQconstload)
7733 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
7734 v.Aux = symToAux(mergeSym(sym1, sym2))
7735 v.AddArg2(base, mem)
7736 return true
7737 }
7738 return false
7739 }
7740 func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
7741 v_2 := v.Args[2]
7742 v_1 := v.Args[1]
7743 v_0 := v.Args[0]
7744
7745
7746
7747 for {
7748 off1 := auxIntToInt32(v.AuxInt)
7749 sym := auxToSym(v.Aux)
7750 if v_0.Op != OpAMD64ADDQconst {
7751 break
7752 }
7753 off2 := auxIntToInt32(v_0.AuxInt)
7754 base := v_0.Args[0]
7755 val := v_1
7756 mem := v_2
7757 if !(is32Bit(int64(off1) + int64(off2))) {
7758 break
7759 }
7760 v.reset(OpAMD64CMPQload)
7761 v.AuxInt = int32ToAuxInt(off1 + off2)
7762 v.Aux = symToAux(sym)
7763 v.AddArg3(base, val, mem)
7764 return true
7765 }
7766
7767
7768
7769 for {
7770 off1 := auxIntToInt32(v.AuxInt)
7771 sym1 := auxToSym(v.Aux)
7772 if v_0.Op != OpAMD64LEAQ {
7773 break
7774 }
7775 off2 := auxIntToInt32(v_0.AuxInt)
7776 sym2 := auxToSym(v_0.Aux)
7777 base := v_0.Args[0]
7778 val := v_1
7779 mem := v_2
7780 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
7781 break
7782 }
7783 v.reset(OpAMD64CMPQload)
7784 v.AuxInt = int32ToAuxInt(off1 + off2)
7785 v.Aux = symToAux(mergeSym(sym1, sym2))
7786 v.AddArg3(base, val, mem)
7787 return true
7788 }
7789
7790
7791
7792 for {
7793 off := auxIntToInt32(v.AuxInt)
7794 sym := auxToSym(v.Aux)
7795 ptr := v_0
7796 if v_1.Op != OpAMD64MOVQconst {
7797 break
7798 }
7799 c := auxIntToInt64(v_1.AuxInt)
7800 mem := v_2
7801 if !(validVal(c)) {
7802 break
7803 }
7804 v.reset(OpAMD64CMPQconstload)
7805 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
7806 v.Aux = symToAux(sym)
7807 v.AddArg2(ptr, mem)
7808 return true
7809 }
7810 return false
7811 }
7812 func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
7813 v_1 := v.Args[1]
7814 v_0 := v.Args[0]
7815 b := v.Block
7816
7817
7818 for {
7819 x := v_0
7820 if v_1.Op != OpAMD64MOVLconst {
7821 break
7822 }
7823 c := auxIntToInt32(v_1.AuxInt)
7824 v.reset(OpAMD64CMPWconst)
7825 v.AuxInt = int16ToAuxInt(int16(c))
7826 v.AddArg(x)
7827 return true
7828 }
7829
7830
7831 for {
7832 if v_0.Op != OpAMD64MOVLconst {
7833 break
7834 }
7835 c := auxIntToInt32(v_0.AuxInt)
7836 x := v_1
7837 v.reset(OpAMD64InvertFlags)
7838 v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
7839 v0.AuxInt = int16ToAuxInt(int16(c))
7840 v0.AddArg(x)
7841 v.AddArg(v0)
7842 return true
7843 }
7844
7845
7846
7847 for {
7848 x := v_0
7849 y := v_1
7850 if !(canonLessThan(x, y)) {
7851 break
7852 }
7853 v.reset(OpAMD64InvertFlags)
7854 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
7855 v0.AddArg2(y, x)
7856 v.AddArg(v0)
7857 return true
7858 }
7859
7860
7861
7862 for {
7863 l := v_0
7864 if l.Op != OpAMD64MOVWload {
7865 break
7866 }
7867 off := auxIntToInt32(l.AuxInt)
7868 sym := auxToSym(l.Aux)
7869 mem := l.Args[1]
7870 ptr := l.Args[0]
7871 x := v_1
7872 if !(canMergeLoad(v, l) && clobber(l)) {
7873 break
7874 }
7875 v.reset(OpAMD64CMPWload)
7876 v.AuxInt = int32ToAuxInt(off)
7877 v.Aux = symToAux(sym)
7878 v.AddArg3(ptr, x, mem)
7879 return true
7880 }
7881
7882
7883
7884 for {
7885 x := v_0
7886 l := v_1
7887 if l.Op != OpAMD64MOVWload {
7888 break
7889 }
7890 off := auxIntToInt32(l.AuxInt)
7891 sym := auxToSym(l.Aux)
7892 mem := l.Args[1]
7893 ptr := l.Args[0]
7894 if !(canMergeLoad(v, l) && clobber(l)) {
7895 break
7896 }
7897 v.reset(OpAMD64InvertFlags)
7898 v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
7899 v0.AuxInt = int32ToAuxInt(off)
7900 v0.Aux = symToAux(sym)
7901 v0.AddArg3(ptr, x, mem)
7902 v.AddArg(v0)
7903 return true
7904 }
7905 return false
7906 }
7907 func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
7908 v_0 := v.Args[0]
7909 b := v.Block
7910
7911
7912
7913 for {
7914 y := auxIntToInt16(v.AuxInt)
7915 if v_0.Op != OpAMD64MOVLconst {
7916 break
7917 }
7918 x := auxIntToInt32(v_0.AuxInt)
7919 if !(int16(x) == y) {
7920 break
7921 }
7922 v.reset(OpAMD64FlagEQ)
7923 return true
7924 }
7925
7926
7927
7928 for {
7929 y := auxIntToInt16(v.AuxInt)
7930 if v_0.Op != OpAMD64MOVLconst {
7931 break
7932 }
7933 x := auxIntToInt32(v_0.AuxInt)
7934 if !(int16(x) < y && uint16(x) < uint16(y)) {
7935 break
7936 }
7937 v.reset(OpAMD64FlagLT_ULT)
7938 return true
7939 }
7940
7941
7942
7943 for {
7944 y := auxIntToInt16(v.AuxInt)
7945 if v_0.Op != OpAMD64MOVLconst {
7946 break
7947 }
7948 x := auxIntToInt32(v_0.AuxInt)
7949 if !(int16(x) < y && uint16(x) > uint16(y)) {
7950 break
7951 }
7952 v.reset(OpAMD64FlagLT_UGT)
7953 return true
7954 }
7955
7956
7957
7958 for {
7959 y := auxIntToInt16(v.AuxInt)
7960 if v_0.Op != OpAMD64MOVLconst {
7961 break
7962 }
7963 x := auxIntToInt32(v_0.AuxInt)
7964 if !(int16(x) > y && uint16(x) < uint16(y)) {
7965 break
7966 }
7967 v.reset(OpAMD64FlagGT_ULT)
7968 return true
7969 }
7970
7971
7972
7973 for {
7974 y := auxIntToInt16(v.AuxInt)
7975 if v_0.Op != OpAMD64MOVLconst {
7976 break
7977 }
7978 x := auxIntToInt32(v_0.AuxInt)
7979 if !(int16(x) > y && uint16(x) > uint16(y)) {
7980 break
7981 }
7982 v.reset(OpAMD64FlagGT_UGT)
7983 return true
7984 }
7985
7986
7987
7988 for {
7989 n := auxIntToInt16(v.AuxInt)
7990 if v_0.Op != OpAMD64ANDLconst {
7991 break
7992 }
7993 m := auxIntToInt32(v_0.AuxInt)
7994 if !(0 <= int16(m) && int16(m) < n) {
7995 break
7996 }
7997 v.reset(OpAMD64FlagLT_ULT)
7998 return true
7999 }
8000
8001
8002
8003 for {
8004 if auxIntToInt16(v.AuxInt) != 0 {
8005 break
8006 }
8007 a := v_0
8008 if a.Op != OpAMD64ANDL {
8009 break
8010 }
8011 y := a.Args[1]
8012 x := a.Args[0]
8013 if !(a.Uses == 1) {
8014 break
8015 }
8016 v.reset(OpAMD64TESTW)
8017 v.AddArg2(x, y)
8018 return true
8019 }
8020
8021
8022
8023 for {
8024 if auxIntToInt16(v.AuxInt) != 0 {
8025 break
8026 }
8027 a := v_0
8028 if a.Op != OpAMD64ANDLconst {
8029 break
8030 }
8031 c := auxIntToInt32(a.AuxInt)
8032 x := a.Args[0]
8033 if !(a.Uses == 1) {
8034 break
8035 }
8036 v.reset(OpAMD64TESTWconst)
8037 v.AuxInt = int16ToAuxInt(int16(c))
8038 v.AddArg(x)
8039 return true
8040 }
8041
8042
8043 for {
8044 if auxIntToInt16(v.AuxInt) != 0 {
8045 break
8046 }
8047 x := v_0
8048 v.reset(OpAMD64TESTW)
8049 v.AddArg2(x, x)
8050 return true
8051 }
8052
8053
8054
8055 for {
8056 c := auxIntToInt16(v.AuxInt)
8057 l := v_0
8058 if l.Op != OpAMD64MOVWload {
8059 break
8060 }
8061 off := auxIntToInt32(l.AuxInt)
8062 sym := auxToSym(l.Aux)
8063 mem := l.Args[1]
8064 ptr := l.Args[0]
8065 if !(l.Uses == 1 && clobber(l)) {
8066 break
8067 }
8068 b = l.Block
8069 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
8070 v.copyOf(v0)
8071 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
8072 v0.Aux = symToAux(sym)
8073 v0.AddArg2(ptr, mem)
8074 return true
8075 }
8076 return false
8077 }
8078 func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
8079 v_1 := v.Args[1]
8080 v_0 := v.Args[0]
8081
8082
8083
8084 for {
8085 valoff1 := auxIntToValAndOff(v.AuxInt)
8086 sym := auxToSym(v.Aux)
8087 if v_0.Op != OpAMD64ADDQconst {
8088 break
8089 }
8090 off2 := auxIntToInt32(v_0.AuxInt)
8091 base := v_0.Args[0]
8092 mem := v_1
8093 if !(ValAndOff(valoff1).canAdd32(off2)) {
8094 break
8095 }
8096 v.reset(OpAMD64CMPWconstload)
8097 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8098 v.Aux = symToAux(sym)
8099 v.AddArg2(base, mem)
8100 return true
8101 }
8102
8103
8104
8105 for {
8106 valoff1 := auxIntToValAndOff(v.AuxInt)
8107 sym1 := auxToSym(v.Aux)
8108 if v_0.Op != OpAMD64LEAQ {
8109 break
8110 }
8111 off2 := auxIntToInt32(v_0.AuxInt)
8112 sym2 := auxToSym(v_0.Aux)
8113 base := v_0.Args[0]
8114 mem := v_1
8115 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
8116 break
8117 }
8118 v.reset(OpAMD64CMPWconstload)
8119 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
8120 v.Aux = symToAux(mergeSym(sym1, sym2))
8121 v.AddArg2(base, mem)
8122 return true
8123 }
8124 return false
8125 }
8126 func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
8127 v_2 := v.Args[2]
8128 v_1 := v.Args[1]
8129 v_0 := v.Args[0]
8130
8131
8132
8133 for {
8134 off1 := auxIntToInt32(v.AuxInt)
8135 sym := auxToSym(v.Aux)
8136 if v_0.Op != OpAMD64ADDQconst {
8137 break
8138 }
8139 off2 := auxIntToInt32(v_0.AuxInt)
8140 base := v_0.Args[0]
8141 val := v_1
8142 mem := v_2
8143 if !(is32Bit(int64(off1) + int64(off2))) {
8144 break
8145 }
8146 v.reset(OpAMD64CMPWload)
8147 v.AuxInt = int32ToAuxInt(off1 + off2)
8148 v.Aux = symToAux(sym)
8149 v.AddArg3(base, val, mem)
8150 return true
8151 }
8152
8153
8154
8155 for {
8156 off1 := auxIntToInt32(v.AuxInt)
8157 sym1 := auxToSym(v.Aux)
8158 if v_0.Op != OpAMD64LEAQ {
8159 break
8160 }
8161 off2 := auxIntToInt32(v_0.AuxInt)
8162 sym2 := auxToSym(v_0.Aux)
8163 base := v_0.Args[0]
8164 val := v_1
8165 mem := v_2
8166 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8167 break
8168 }
8169 v.reset(OpAMD64CMPWload)
8170 v.AuxInt = int32ToAuxInt(off1 + off2)
8171 v.Aux = symToAux(mergeSym(sym1, sym2))
8172 v.AddArg3(base, val, mem)
8173 return true
8174 }
8175
8176
8177 for {
8178 off := auxIntToInt32(v.AuxInt)
8179 sym := auxToSym(v.Aux)
8180 ptr := v_0
8181 if v_1.Op != OpAMD64MOVLconst {
8182 break
8183 }
8184 c := auxIntToInt32(v_1.AuxInt)
8185 mem := v_2
8186 v.reset(OpAMD64CMPWconstload)
8187 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
8188 v.Aux = symToAux(sym)
8189 v.AddArg2(ptr, mem)
8190 return true
8191 }
8192 return false
8193 }
8194 func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
8195 v_3 := v.Args[3]
8196 v_2 := v.Args[2]
8197 v_1 := v.Args[1]
8198 v_0 := v.Args[0]
8199
8200
8201
8202 for {
8203 off1 := auxIntToInt32(v.AuxInt)
8204 sym := auxToSym(v.Aux)
8205 if v_0.Op != OpAMD64ADDQconst {
8206 break
8207 }
8208 off2 := auxIntToInt32(v_0.AuxInt)
8209 ptr := v_0.Args[0]
8210 old := v_1
8211 new_ := v_2
8212 mem := v_3
8213 if !(is32Bit(int64(off1) + int64(off2))) {
8214 break
8215 }
8216 v.reset(OpAMD64CMPXCHGLlock)
8217 v.AuxInt = int32ToAuxInt(off1 + off2)
8218 v.Aux = symToAux(sym)
8219 v.AddArg4(ptr, old, new_, mem)
8220 return true
8221 }
8222 return false
8223 }
8224 func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
8225 v_3 := v.Args[3]
8226 v_2 := v.Args[2]
8227 v_1 := v.Args[1]
8228 v_0 := v.Args[0]
8229
8230
8231
8232 for {
8233 off1 := auxIntToInt32(v.AuxInt)
8234 sym := auxToSym(v.Aux)
8235 if v_0.Op != OpAMD64ADDQconst {
8236 break
8237 }
8238 off2 := auxIntToInt32(v_0.AuxInt)
8239 ptr := v_0.Args[0]
8240 old := v_1
8241 new_ := v_2
8242 mem := v_3
8243 if !(is32Bit(int64(off1) + int64(off2))) {
8244 break
8245 }
8246 v.reset(OpAMD64CMPXCHGQlock)
8247 v.AuxInt = int32ToAuxInt(off1 + off2)
8248 v.Aux = symToAux(sym)
8249 v.AddArg4(ptr, old, new_, mem)
8250 return true
8251 }
8252 return false
8253 }
8254 func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
8255 v_1 := v.Args[1]
8256 v_0 := v.Args[0]
8257
8258
8259
8260 for {
8261 x := v_0
8262 l := v_1
8263 if l.Op != OpAMD64MOVSDload {
8264 break
8265 }
8266 off := auxIntToInt32(l.AuxInt)
8267 sym := auxToSym(l.Aux)
8268 mem := l.Args[1]
8269 ptr := l.Args[0]
8270 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8271 break
8272 }
8273 v.reset(OpAMD64DIVSDload)
8274 v.AuxInt = int32ToAuxInt(off)
8275 v.Aux = symToAux(sym)
8276 v.AddArg3(x, ptr, mem)
8277 return true
8278 }
8279 return false
8280 }
8281 func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
8282 v_2 := v.Args[2]
8283 v_1 := v.Args[1]
8284 v_0 := v.Args[0]
8285
8286
8287
8288 for {
8289 off1 := auxIntToInt32(v.AuxInt)
8290 sym := auxToSym(v.Aux)
8291 val := v_0
8292 if v_1.Op != OpAMD64ADDQconst {
8293 break
8294 }
8295 off2 := auxIntToInt32(v_1.AuxInt)
8296 base := v_1.Args[0]
8297 mem := v_2
8298 if !(is32Bit(int64(off1) + int64(off2))) {
8299 break
8300 }
8301 v.reset(OpAMD64DIVSDload)
8302 v.AuxInt = int32ToAuxInt(off1 + off2)
8303 v.Aux = symToAux(sym)
8304 v.AddArg3(val, base, mem)
8305 return true
8306 }
8307
8308
8309
8310 for {
8311 off1 := auxIntToInt32(v.AuxInt)
8312 sym1 := auxToSym(v.Aux)
8313 val := v_0
8314 if v_1.Op != OpAMD64LEAQ {
8315 break
8316 }
8317 off2 := auxIntToInt32(v_1.AuxInt)
8318 sym2 := auxToSym(v_1.Aux)
8319 base := v_1.Args[0]
8320 mem := v_2
8321 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8322 break
8323 }
8324 v.reset(OpAMD64DIVSDload)
8325 v.AuxInt = int32ToAuxInt(off1 + off2)
8326 v.Aux = symToAux(mergeSym(sym1, sym2))
8327 v.AddArg3(val, base, mem)
8328 return true
8329 }
8330 return false
8331 }
8332 func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
8333 v_1 := v.Args[1]
8334 v_0 := v.Args[0]
8335
8336
8337
8338 for {
8339 x := v_0
8340 l := v_1
8341 if l.Op != OpAMD64MOVSSload {
8342 break
8343 }
8344 off := auxIntToInt32(l.AuxInt)
8345 sym := auxToSym(l.Aux)
8346 mem := l.Args[1]
8347 ptr := l.Args[0]
8348 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
8349 break
8350 }
8351 v.reset(OpAMD64DIVSSload)
8352 v.AuxInt = int32ToAuxInt(off)
8353 v.Aux = symToAux(sym)
8354 v.AddArg3(x, ptr, mem)
8355 return true
8356 }
8357 return false
8358 }
8359 func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
8360 v_2 := v.Args[2]
8361 v_1 := v.Args[1]
8362 v_0 := v.Args[0]
8363
8364
8365
8366 for {
8367 off1 := auxIntToInt32(v.AuxInt)
8368 sym := auxToSym(v.Aux)
8369 val := v_0
8370 if v_1.Op != OpAMD64ADDQconst {
8371 break
8372 }
8373 off2 := auxIntToInt32(v_1.AuxInt)
8374 base := v_1.Args[0]
8375 mem := v_2
8376 if !(is32Bit(int64(off1) + int64(off2))) {
8377 break
8378 }
8379 v.reset(OpAMD64DIVSSload)
8380 v.AuxInt = int32ToAuxInt(off1 + off2)
8381 v.Aux = symToAux(sym)
8382 v.AddArg3(val, base, mem)
8383 return true
8384 }
8385
8386
8387
8388 for {
8389 off1 := auxIntToInt32(v.AuxInt)
8390 sym1 := auxToSym(v.Aux)
8391 val := v_0
8392 if v_1.Op != OpAMD64LEAQ {
8393 break
8394 }
8395 off2 := auxIntToInt32(v_1.AuxInt)
8396 sym2 := auxToSym(v_1.Aux)
8397 base := v_1.Args[0]
8398 mem := v_2
8399 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8400 break
8401 }
8402 v.reset(OpAMD64DIVSSload)
8403 v.AuxInt = int32ToAuxInt(off1 + off2)
8404 v.Aux = symToAux(mergeSym(sym1, sym2))
8405 v.AddArg3(val, base, mem)
8406 return true
8407 }
8408 return false
8409 }
8410 func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
8411 v_1 := v.Args[1]
8412 v_0 := v.Args[0]
8413
8414
8415
8416 for {
8417 x := v_0
8418 y := v_1
8419 if !(!x.rematerializeable() && y.rematerializeable()) {
8420 break
8421 }
8422 v.reset(OpAMD64HMULL)
8423 v.AddArg2(y, x)
8424 return true
8425 }
8426 return false
8427 }
8428 func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
8429 v_1 := v.Args[1]
8430 v_0 := v.Args[0]
8431
8432
8433
8434 for {
8435 x := v_0
8436 y := v_1
8437 if !(!x.rematerializeable() && y.rematerializeable()) {
8438 break
8439 }
8440 v.reset(OpAMD64HMULLU)
8441 v.AddArg2(y, x)
8442 return true
8443 }
8444 return false
8445 }
8446 func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
8447 v_1 := v.Args[1]
8448 v_0 := v.Args[0]
8449
8450
8451
8452 for {
8453 x := v_0
8454 y := v_1
8455 if !(!x.rematerializeable() && y.rematerializeable()) {
8456 break
8457 }
8458 v.reset(OpAMD64HMULQ)
8459 v.AddArg2(y, x)
8460 return true
8461 }
8462 return false
8463 }
8464 func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
8465 v_1 := v.Args[1]
8466 v_0 := v.Args[0]
8467
8468
8469
8470 for {
8471 x := v_0
8472 y := v_1
8473 if !(!x.rematerializeable() && y.rematerializeable()) {
8474 break
8475 }
8476 v.reset(OpAMD64HMULQU)
8477 v.AddArg2(y, x)
8478 return true
8479 }
8480 return false
8481 }
8482 func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
8483 v_0 := v.Args[0]
8484
8485
8486
8487 for {
8488 c := auxIntToInt32(v.AuxInt)
8489 s := auxToSym(v.Aux)
8490 if v_0.Op != OpAMD64ADDLconst {
8491 break
8492 }
8493 d := auxIntToInt32(v_0.AuxInt)
8494 x := v_0.Args[0]
8495 if !(is32Bit(int64(c) + int64(d))) {
8496 break
8497 }
8498 v.reset(OpAMD64LEAL)
8499 v.AuxInt = int32ToAuxInt(c + d)
8500 v.Aux = symToAux(s)
8501 v.AddArg(x)
8502 return true
8503 }
8504
8505
8506
8507 for {
8508 c := auxIntToInt32(v.AuxInt)
8509 s := auxToSym(v.Aux)
8510 if v_0.Op != OpAMD64ADDL {
8511 break
8512 }
8513 _ = v_0.Args[1]
8514 v_0_0 := v_0.Args[0]
8515 v_0_1 := v_0.Args[1]
8516 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8517 x := v_0_0
8518 y := v_0_1
8519 if !(x.Op != OpSB && y.Op != OpSB) {
8520 continue
8521 }
8522 v.reset(OpAMD64LEAL1)
8523 v.AuxInt = int32ToAuxInt(c)
8524 v.Aux = symToAux(s)
8525 v.AddArg2(x, y)
8526 return true
8527 }
8528 break
8529 }
8530 return false
8531 }
8532 func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
8533 v_1 := v.Args[1]
8534 v_0 := v.Args[0]
8535
8536
8537
8538 for {
8539 c := auxIntToInt32(v.AuxInt)
8540 s := auxToSym(v.Aux)
8541 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8542 if v_0.Op != OpAMD64ADDLconst {
8543 continue
8544 }
8545 d := auxIntToInt32(v_0.AuxInt)
8546 x := v_0.Args[0]
8547 y := v_1
8548 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8549 continue
8550 }
8551 v.reset(OpAMD64LEAL1)
8552 v.AuxInt = int32ToAuxInt(c + d)
8553 v.Aux = symToAux(s)
8554 v.AddArg2(x, y)
8555 return true
8556 }
8557 break
8558 }
8559
8560
8561
8562 for {
8563 c := auxIntToInt32(v.AuxInt)
8564 s := auxToSym(v.Aux)
8565 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8566 x := v_0
8567 z := v_1
8568 if z.Op != OpAMD64ADDL {
8569 continue
8570 }
8571 y := z.Args[1]
8572 if y != z.Args[0] || !(x != z) {
8573 continue
8574 }
8575 v.reset(OpAMD64LEAL2)
8576 v.AuxInt = int32ToAuxInt(c)
8577 v.Aux = symToAux(s)
8578 v.AddArg2(x, y)
8579 return true
8580 }
8581 break
8582 }
8583
8584
8585 for {
8586 c := auxIntToInt32(v.AuxInt)
8587 s := auxToSym(v.Aux)
8588 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8589 x := v_0
8590 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8591 continue
8592 }
8593 y := v_1.Args[0]
8594 v.reset(OpAMD64LEAL4)
8595 v.AuxInt = int32ToAuxInt(c)
8596 v.Aux = symToAux(s)
8597 v.AddArg2(x, y)
8598 return true
8599 }
8600 break
8601 }
8602
8603
8604 for {
8605 c := auxIntToInt32(v.AuxInt)
8606 s := auxToSym(v.Aux)
8607 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
8608 x := v_0
8609 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
8610 continue
8611 }
8612 y := v_1.Args[0]
8613 v.reset(OpAMD64LEAL8)
8614 v.AuxInt = int32ToAuxInt(c)
8615 v.Aux = symToAux(s)
8616 v.AddArg2(x, y)
8617 return true
8618 }
8619 break
8620 }
8621 return false
8622 }
8623 func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
8624 v_1 := v.Args[1]
8625 v_0 := v.Args[0]
8626
8627
8628
8629 for {
8630 c := auxIntToInt32(v.AuxInt)
8631 s := auxToSym(v.Aux)
8632 if v_0.Op != OpAMD64ADDLconst {
8633 break
8634 }
8635 d := auxIntToInt32(v_0.AuxInt)
8636 x := v_0.Args[0]
8637 y := v_1
8638 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8639 break
8640 }
8641 v.reset(OpAMD64LEAL2)
8642 v.AuxInt = int32ToAuxInt(c + d)
8643 v.Aux = symToAux(s)
8644 v.AddArg2(x, y)
8645 return true
8646 }
8647
8648
8649
8650 for {
8651 c := auxIntToInt32(v.AuxInt)
8652 s := auxToSym(v.Aux)
8653 x := v_0
8654 if v_1.Op != OpAMD64ADDLconst {
8655 break
8656 }
8657 d := auxIntToInt32(v_1.AuxInt)
8658 y := v_1.Args[0]
8659 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
8660 break
8661 }
8662 v.reset(OpAMD64LEAL2)
8663 v.AuxInt = int32ToAuxInt(c + 2*d)
8664 v.Aux = symToAux(s)
8665 v.AddArg2(x, y)
8666 return true
8667 }
8668
8669
8670
8671 for {
8672 c := auxIntToInt32(v.AuxInt)
8673 s := auxToSym(v.Aux)
8674 x := v_0
8675 z := v_1
8676 if z.Op != OpAMD64ADDL {
8677 break
8678 }
8679 y := z.Args[1]
8680 if y != z.Args[0] || !(x != z) {
8681 break
8682 }
8683 v.reset(OpAMD64LEAL4)
8684 v.AuxInt = int32ToAuxInt(c)
8685 v.Aux = symToAux(s)
8686 v.AddArg2(x, y)
8687 return true
8688 }
8689
8690
8691 for {
8692 c := auxIntToInt32(v.AuxInt)
8693 s := auxToSym(v.Aux)
8694 x := v_0
8695 if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
8696 break
8697 }
8698 y := v_1.Args[0]
8699 v.reset(OpAMD64LEAL8)
8700 v.AuxInt = int32ToAuxInt(c)
8701 v.Aux = symToAux(s)
8702 v.AddArg2(x, y)
8703 return true
8704 }
8705
8706
8707
8708 for {
8709 if auxIntToInt32(v.AuxInt) != 0 {
8710 break
8711 }
8712 s := auxToSym(v.Aux)
8713 if v_0.Op != OpAMD64ADDL {
8714 break
8715 }
8716 x := v_0.Args[1]
8717 if x != v_0.Args[0] || x != v_1 || !(s == nil) {
8718 break
8719 }
8720 v.reset(OpAMD64SHLLconst)
8721 v.AuxInt = int8ToAuxInt(2)
8722 v.AddArg(x)
8723 return true
8724 }
8725 return false
8726 }
8727 func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
8728 v_1 := v.Args[1]
8729 v_0 := v.Args[0]
8730
8731
8732
8733 for {
8734 c := auxIntToInt32(v.AuxInt)
8735 s := auxToSym(v.Aux)
8736 if v_0.Op != OpAMD64ADDLconst {
8737 break
8738 }
8739 d := auxIntToInt32(v_0.AuxInt)
8740 x := v_0.Args[0]
8741 y := v_1
8742 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8743 break
8744 }
8745 v.reset(OpAMD64LEAL4)
8746 v.AuxInt = int32ToAuxInt(c + d)
8747 v.Aux = symToAux(s)
8748 v.AddArg2(x, y)
8749 return true
8750 }
8751
8752
8753
8754 for {
8755 c := auxIntToInt32(v.AuxInt)
8756 s := auxToSym(v.Aux)
8757 x := v_0
8758 if v_1.Op != OpAMD64ADDLconst {
8759 break
8760 }
8761 d := auxIntToInt32(v_1.AuxInt)
8762 y := v_1.Args[0]
8763 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
8764 break
8765 }
8766 v.reset(OpAMD64LEAL4)
8767 v.AuxInt = int32ToAuxInt(c + 4*d)
8768 v.Aux = symToAux(s)
8769 v.AddArg2(x, y)
8770 return true
8771 }
8772
8773
8774
8775 for {
8776 c := auxIntToInt32(v.AuxInt)
8777 s := auxToSym(v.Aux)
8778 x := v_0
8779 z := v_1
8780 if z.Op != OpAMD64ADDL {
8781 break
8782 }
8783 y := z.Args[1]
8784 if y != z.Args[0] || !(x != z) {
8785 break
8786 }
8787 v.reset(OpAMD64LEAL8)
8788 v.AuxInt = int32ToAuxInt(c)
8789 v.Aux = symToAux(s)
8790 v.AddArg2(x, y)
8791 return true
8792 }
8793 return false
8794 }
8795 func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
8796 v_1 := v.Args[1]
8797 v_0 := v.Args[0]
8798
8799
8800
8801 for {
8802 c := auxIntToInt32(v.AuxInt)
8803 s := auxToSym(v.Aux)
8804 if v_0.Op != OpAMD64ADDLconst {
8805 break
8806 }
8807 d := auxIntToInt32(v_0.AuxInt)
8808 x := v_0.Args[0]
8809 y := v_1
8810 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
8811 break
8812 }
8813 v.reset(OpAMD64LEAL8)
8814 v.AuxInt = int32ToAuxInt(c + d)
8815 v.Aux = symToAux(s)
8816 v.AddArg2(x, y)
8817 return true
8818 }
8819
8820
8821
8822 for {
8823 c := auxIntToInt32(v.AuxInt)
8824 s := auxToSym(v.Aux)
8825 x := v_0
8826 if v_1.Op != OpAMD64ADDLconst {
8827 break
8828 }
8829 d := auxIntToInt32(v_1.AuxInt)
8830 y := v_1.Args[0]
8831 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
8832 break
8833 }
8834 v.reset(OpAMD64LEAL8)
8835 v.AuxInt = int32ToAuxInt(c + 8*d)
8836 v.Aux = symToAux(s)
8837 v.AddArg2(x, y)
8838 return true
8839 }
8840 return false
8841 }
8842 func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
8843 v_0 := v.Args[0]
8844
8845
8846
8847 for {
8848 c := auxIntToInt32(v.AuxInt)
8849 s := auxToSym(v.Aux)
8850 if v_0.Op != OpAMD64ADDQconst {
8851 break
8852 }
8853 d := auxIntToInt32(v_0.AuxInt)
8854 x := v_0.Args[0]
8855 if !(is32Bit(int64(c) + int64(d))) {
8856 break
8857 }
8858 v.reset(OpAMD64LEAQ)
8859 v.AuxInt = int32ToAuxInt(c + d)
8860 v.Aux = symToAux(s)
8861 v.AddArg(x)
8862 return true
8863 }
8864
8865
8866
8867 for {
8868 c := auxIntToInt32(v.AuxInt)
8869 s := auxToSym(v.Aux)
8870 if v_0.Op != OpAMD64ADDQ {
8871 break
8872 }
8873 _ = v_0.Args[1]
8874 v_0_0 := v_0.Args[0]
8875 v_0_1 := v_0.Args[1]
8876 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
8877 x := v_0_0
8878 y := v_0_1
8879 if !(x.Op != OpSB && y.Op != OpSB) {
8880 continue
8881 }
8882 v.reset(OpAMD64LEAQ1)
8883 v.AuxInt = int32ToAuxInt(c)
8884 v.Aux = symToAux(s)
8885 v.AddArg2(x, y)
8886 return true
8887 }
8888 break
8889 }
8890
8891
8892
8893 for {
8894 off1 := auxIntToInt32(v.AuxInt)
8895 sym1 := auxToSym(v.Aux)
8896 if v_0.Op != OpAMD64LEAQ {
8897 break
8898 }
8899 off2 := auxIntToInt32(v_0.AuxInt)
8900 sym2 := auxToSym(v_0.Aux)
8901 x := v_0.Args[0]
8902 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8903 break
8904 }
8905 v.reset(OpAMD64LEAQ)
8906 v.AuxInt = int32ToAuxInt(off1 + off2)
8907 v.Aux = symToAux(mergeSym(sym1, sym2))
8908 v.AddArg(x)
8909 return true
8910 }
8911
8912
8913
8914 for {
8915 off1 := auxIntToInt32(v.AuxInt)
8916 sym1 := auxToSym(v.Aux)
8917 if v_0.Op != OpAMD64LEAQ1 {
8918 break
8919 }
8920 off2 := auxIntToInt32(v_0.AuxInt)
8921 sym2 := auxToSym(v_0.Aux)
8922 y := v_0.Args[1]
8923 x := v_0.Args[0]
8924 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8925 break
8926 }
8927 v.reset(OpAMD64LEAQ1)
8928 v.AuxInt = int32ToAuxInt(off1 + off2)
8929 v.Aux = symToAux(mergeSym(sym1, sym2))
8930 v.AddArg2(x, y)
8931 return true
8932 }
8933
8934
8935
8936 for {
8937 off1 := auxIntToInt32(v.AuxInt)
8938 sym1 := auxToSym(v.Aux)
8939 if v_0.Op != OpAMD64LEAQ2 {
8940 break
8941 }
8942 off2 := auxIntToInt32(v_0.AuxInt)
8943 sym2 := auxToSym(v_0.Aux)
8944 y := v_0.Args[1]
8945 x := v_0.Args[0]
8946 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8947 break
8948 }
8949 v.reset(OpAMD64LEAQ2)
8950 v.AuxInt = int32ToAuxInt(off1 + off2)
8951 v.Aux = symToAux(mergeSym(sym1, sym2))
8952 v.AddArg2(x, y)
8953 return true
8954 }
8955
8956
8957
8958 for {
8959 off1 := auxIntToInt32(v.AuxInt)
8960 sym1 := auxToSym(v.Aux)
8961 if v_0.Op != OpAMD64LEAQ4 {
8962 break
8963 }
8964 off2 := auxIntToInt32(v_0.AuxInt)
8965 sym2 := auxToSym(v_0.Aux)
8966 y := v_0.Args[1]
8967 x := v_0.Args[0]
8968 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8969 break
8970 }
8971 v.reset(OpAMD64LEAQ4)
8972 v.AuxInt = int32ToAuxInt(off1 + off2)
8973 v.Aux = symToAux(mergeSym(sym1, sym2))
8974 v.AddArg2(x, y)
8975 return true
8976 }
8977
8978
8979
8980 for {
8981 off1 := auxIntToInt32(v.AuxInt)
8982 sym1 := auxToSym(v.Aux)
8983 if v_0.Op != OpAMD64LEAQ8 {
8984 break
8985 }
8986 off2 := auxIntToInt32(v_0.AuxInt)
8987 sym2 := auxToSym(v_0.Aux)
8988 y := v_0.Args[1]
8989 x := v_0.Args[0]
8990 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
8991 break
8992 }
8993 v.reset(OpAMD64LEAQ8)
8994 v.AuxInt = int32ToAuxInt(off1 + off2)
8995 v.Aux = symToAux(mergeSym(sym1, sym2))
8996 v.AddArg2(x, y)
8997 return true
8998 }
8999 return false
9000 }
9001 func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
9002 v_1 := v.Args[1]
9003 v_0 := v.Args[0]
9004
9005
9006
9007 for {
9008 c := auxIntToInt32(v.AuxInt)
9009 s := auxToSym(v.Aux)
9010 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9011 if v_0.Op != OpAMD64ADDQconst {
9012 continue
9013 }
9014 d := auxIntToInt32(v_0.AuxInt)
9015 x := v_0.Args[0]
9016 y := v_1
9017 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9018 continue
9019 }
9020 v.reset(OpAMD64LEAQ1)
9021 v.AuxInt = int32ToAuxInt(c + d)
9022 v.Aux = symToAux(s)
9023 v.AddArg2(x, y)
9024 return true
9025 }
9026 break
9027 }
9028
9029
9030
9031 for {
9032 c := auxIntToInt32(v.AuxInt)
9033 s := auxToSym(v.Aux)
9034 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9035 x := v_0
9036 z := v_1
9037 if z.Op != OpAMD64ADDQ {
9038 continue
9039 }
9040 y := z.Args[1]
9041 if y != z.Args[0] || !(x != z) {
9042 continue
9043 }
9044 v.reset(OpAMD64LEAQ2)
9045 v.AuxInt = int32ToAuxInt(c)
9046 v.Aux = symToAux(s)
9047 v.AddArg2(x, y)
9048 return true
9049 }
9050 break
9051 }
9052
9053
9054 for {
9055 c := auxIntToInt32(v.AuxInt)
9056 s := auxToSym(v.Aux)
9057 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9058 x := v_0
9059 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9060 continue
9061 }
9062 y := v_1.Args[0]
9063 v.reset(OpAMD64LEAQ4)
9064 v.AuxInt = int32ToAuxInt(c)
9065 v.Aux = symToAux(s)
9066 v.AddArg2(x, y)
9067 return true
9068 }
9069 break
9070 }
9071
9072
9073 for {
9074 c := auxIntToInt32(v.AuxInt)
9075 s := auxToSym(v.Aux)
9076 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9077 x := v_0
9078 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
9079 continue
9080 }
9081 y := v_1.Args[0]
9082 v.reset(OpAMD64LEAQ8)
9083 v.AuxInt = int32ToAuxInt(c)
9084 v.Aux = symToAux(s)
9085 v.AddArg2(x, y)
9086 return true
9087 }
9088 break
9089 }
9090
9091
9092
9093 for {
9094 off1 := auxIntToInt32(v.AuxInt)
9095 sym1 := auxToSym(v.Aux)
9096 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9097 if v_0.Op != OpAMD64LEAQ {
9098 continue
9099 }
9100 off2 := auxIntToInt32(v_0.AuxInt)
9101 sym2 := auxToSym(v_0.Aux)
9102 x := v_0.Args[0]
9103 y := v_1
9104 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9105 continue
9106 }
9107 v.reset(OpAMD64LEAQ1)
9108 v.AuxInt = int32ToAuxInt(off1 + off2)
9109 v.Aux = symToAux(mergeSym(sym1, sym2))
9110 v.AddArg2(x, y)
9111 return true
9112 }
9113 break
9114 }
9115
9116
9117
9118 for {
9119 off1 := auxIntToInt32(v.AuxInt)
9120 sym1 := auxToSym(v.Aux)
9121 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9122 x := v_0
9123 if v_1.Op != OpAMD64LEAQ1 {
9124 continue
9125 }
9126 off2 := auxIntToInt32(v_1.AuxInt)
9127 sym2 := auxToSym(v_1.Aux)
9128 y := v_1.Args[1]
9129 if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9130 continue
9131 }
9132 v.reset(OpAMD64LEAQ2)
9133 v.AuxInt = int32ToAuxInt(off1 + off2)
9134 v.Aux = symToAux(mergeSym(sym1, sym2))
9135 v.AddArg2(x, y)
9136 return true
9137 }
9138 break
9139 }
9140
9141
9142
9143 for {
9144 off1 := auxIntToInt32(v.AuxInt)
9145 sym1 := auxToSym(v.Aux)
9146 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
9147 x := v_0
9148 if v_1.Op != OpAMD64LEAQ1 {
9149 continue
9150 }
9151 off2 := auxIntToInt32(v_1.AuxInt)
9152 sym2 := auxToSym(v_1.Aux)
9153 _ = v_1.Args[1]
9154 v_1_0 := v_1.Args[0]
9155 v_1_1 := v_1.Args[1]
9156 for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
9157 if x != v_1_0 {
9158 continue
9159 }
9160 y := v_1_1
9161 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9162 continue
9163 }
9164 v.reset(OpAMD64LEAQ2)
9165 v.AuxInt = int32ToAuxInt(off1 + off2)
9166 v.Aux = symToAux(mergeSym(sym1, sym2))
9167 v.AddArg2(y, x)
9168 return true
9169 }
9170 }
9171 break
9172 }
9173
9174
9175
9176 for {
9177 if auxIntToInt32(v.AuxInt) != 0 {
9178 break
9179 }
9180 x := v_0
9181 y := v_1
9182 if !(v.Aux == nil) {
9183 break
9184 }
9185 v.reset(OpAMD64ADDQ)
9186 v.AddArg2(x, y)
9187 return true
9188 }
9189 return false
9190 }
9191 func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
9192 v_1 := v.Args[1]
9193 v_0 := v.Args[0]
9194
9195
9196
9197 for {
9198 c := auxIntToInt32(v.AuxInt)
9199 s := auxToSym(v.Aux)
9200 if v_0.Op != OpAMD64ADDQconst {
9201 break
9202 }
9203 d := auxIntToInt32(v_0.AuxInt)
9204 x := v_0.Args[0]
9205 y := v_1
9206 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9207 break
9208 }
9209 v.reset(OpAMD64LEAQ2)
9210 v.AuxInt = int32ToAuxInt(c + d)
9211 v.Aux = symToAux(s)
9212 v.AddArg2(x, y)
9213 return true
9214 }
9215
9216
9217
9218 for {
9219 c := auxIntToInt32(v.AuxInt)
9220 s := auxToSym(v.Aux)
9221 x := v_0
9222 if v_1.Op != OpAMD64ADDQconst {
9223 break
9224 }
9225 d := auxIntToInt32(v_1.AuxInt)
9226 y := v_1.Args[0]
9227 if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
9228 break
9229 }
9230 v.reset(OpAMD64LEAQ2)
9231 v.AuxInt = int32ToAuxInt(c + 2*d)
9232 v.Aux = symToAux(s)
9233 v.AddArg2(x, y)
9234 return true
9235 }
9236
9237
9238
9239 for {
9240 c := auxIntToInt32(v.AuxInt)
9241 s := auxToSym(v.Aux)
9242 x := v_0
9243 z := v_1
9244 if z.Op != OpAMD64ADDQ {
9245 break
9246 }
9247 y := z.Args[1]
9248 if y != z.Args[0] || !(x != z) {
9249 break
9250 }
9251 v.reset(OpAMD64LEAQ4)
9252 v.AuxInt = int32ToAuxInt(c)
9253 v.Aux = symToAux(s)
9254 v.AddArg2(x, y)
9255 return true
9256 }
9257
9258
9259 for {
9260 c := auxIntToInt32(v.AuxInt)
9261 s := auxToSym(v.Aux)
9262 x := v_0
9263 if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
9264 break
9265 }
9266 y := v_1.Args[0]
9267 v.reset(OpAMD64LEAQ8)
9268 v.AuxInt = int32ToAuxInt(c)
9269 v.Aux = symToAux(s)
9270 v.AddArg2(x, y)
9271 return true
9272 }
9273
9274
9275
9276 for {
9277 if auxIntToInt32(v.AuxInt) != 0 {
9278 break
9279 }
9280 s := auxToSym(v.Aux)
9281 if v_0.Op != OpAMD64ADDQ {
9282 break
9283 }
9284 x := v_0.Args[1]
9285 if x != v_0.Args[0] || x != v_1 || !(s == nil) {
9286 break
9287 }
9288 v.reset(OpAMD64SHLQconst)
9289 v.AuxInt = int8ToAuxInt(2)
9290 v.AddArg(x)
9291 return true
9292 }
9293
9294
9295
9296 for {
9297 off1 := auxIntToInt32(v.AuxInt)
9298 sym1 := auxToSym(v.Aux)
9299 if v_0.Op != OpAMD64LEAQ {
9300 break
9301 }
9302 off2 := auxIntToInt32(v_0.AuxInt)
9303 sym2 := auxToSym(v_0.Aux)
9304 x := v_0.Args[0]
9305 y := v_1
9306 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9307 break
9308 }
9309 v.reset(OpAMD64LEAQ2)
9310 v.AuxInt = int32ToAuxInt(off1 + off2)
9311 v.Aux = symToAux(mergeSym(sym1, sym2))
9312 v.AddArg2(x, y)
9313 return true
9314 }
9315
9316
9317
9318 for {
9319 off1 := auxIntToInt32(v.AuxInt)
9320 sym1 := auxToSym(v.Aux)
9321 x := v_0
9322 if v_1.Op != OpAMD64LEAQ1 {
9323 break
9324 }
9325 off2 := auxIntToInt32(v_1.AuxInt)
9326 sym2 := auxToSym(v_1.Aux)
9327 y := v_1.Args[1]
9328 if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
9329 break
9330 }
9331 v.reset(OpAMD64LEAQ4)
9332 v.AuxInt = int32ToAuxInt(off1 + 2*off2)
9333 v.Aux = symToAux(sym1)
9334 v.AddArg2(x, y)
9335 return true
9336 }
9337
9338
9339
9340 for {
9341 off := auxIntToInt32(v.AuxInt)
9342 sym := auxToSym(v.Aux)
9343 x := v_0
9344 if v_1.Op != OpAMD64MOVQconst {
9345 break
9346 }
9347 scale := auxIntToInt64(v_1.AuxInt)
9348 if !(is32Bit(int64(off) + int64(scale)*2)) {
9349 break
9350 }
9351 v.reset(OpAMD64LEAQ)
9352 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9353 v.Aux = symToAux(sym)
9354 v.AddArg(x)
9355 return true
9356 }
9357
9358
9359
9360 for {
9361 off := auxIntToInt32(v.AuxInt)
9362 sym := auxToSym(v.Aux)
9363 x := v_0
9364 if v_1.Op != OpAMD64MOVLconst {
9365 break
9366 }
9367 scale := auxIntToInt32(v_1.AuxInt)
9368 if !(is32Bit(int64(off) + int64(scale)*2)) {
9369 break
9370 }
9371 v.reset(OpAMD64LEAQ)
9372 v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
9373 v.Aux = symToAux(sym)
9374 v.AddArg(x)
9375 return true
9376 }
9377 return false
9378 }
9379 func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
9380 v_1 := v.Args[1]
9381 v_0 := v.Args[0]
9382
9383
9384
9385 for {
9386 c := auxIntToInt32(v.AuxInt)
9387 s := auxToSym(v.Aux)
9388 if v_0.Op != OpAMD64ADDQconst {
9389 break
9390 }
9391 d := auxIntToInt32(v_0.AuxInt)
9392 x := v_0.Args[0]
9393 y := v_1
9394 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9395 break
9396 }
9397 v.reset(OpAMD64LEAQ4)
9398 v.AuxInt = int32ToAuxInt(c + d)
9399 v.Aux = symToAux(s)
9400 v.AddArg2(x, y)
9401 return true
9402 }
9403
9404
9405
9406 for {
9407 c := auxIntToInt32(v.AuxInt)
9408 s := auxToSym(v.Aux)
9409 x := v_0
9410 if v_1.Op != OpAMD64ADDQconst {
9411 break
9412 }
9413 d := auxIntToInt32(v_1.AuxInt)
9414 y := v_1.Args[0]
9415 if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
9416 break
9417 }
9418 v.reset(OpAMD64LEAQ4)
9419 v.AuxInt = int32ToAuxInt(c + 4*d)
9420 v.Aux = symToAux(s)
9421 v.AddArg2(x, y)
9422 return true
9423 }
9424
9425
9426
9427 for {
9428 c := auxIntToInt32(v.AuxInt)
9429 s := auxToSym(v.Aux)
9430 x := v_0
9431 z := v_1
9432 if z.Op != OpAMD64ADDQ {
9433 break
9434 }
9435 y := z.Args[1]
9436 if y != z.Args[0] || !(x != z) {
9437 break
9438 }
9439 v.reset(OpAMD64LEAQ8)
9440 v.AuxInt = int32ToAuxInt(c)
9441 v.Aux = symToAux(s)
9442 v.AddArg2(x, y)
9443 return true
9444 }
9445
9446
9447
9448 for {
9449 off1 := auxIntToInt32(v.AuxInt)
9450 sym1 := auxToSym(v.Aux)
9451 if v_0.Op != OpAMD64LEAQ {
9452 break
9453 }
9454 off2 := auxIntToInt32(v_0.AuxInt)
9455 sym2 := auxToSym(v_0.Aux)
9456 x := v_0.Args[0]
9457 y := v_1
9458 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9459 break
9460 }
9461 v.reset(OpAMD64LEAQ4)
9462 v.AuxInt = int32ToAuxInt(off1 + off2)
9463 v.Aux = symToAux(mergeSym(sym1, sym2))
9464 v.AddArg2(x, y)
9465 return true
9466 }
9467
9468
9469
9470 for {
9471 off1 := auxIntToInt32(v.AuxInt)
9472 sym1 := auxToSym(v.Aux)
9473 x := v_0
9474 if v_1.Op != OpAMD64LEAQ1 {
9475 break
9476 }
9477 off2 := auxIntToInt32(v_1.AuxInt)
9478 sym2 := auxToSym(v_1.Aux)
9479 y := v_1.Args[1]
9480 if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
9481 break
9482 }
9483 v.reset(OpAMD64LEAQ8)
9484 v.AuxInt = int32ToAuxInt(off1 + 4*off2)
9485 v.Aux = symToAux(sym1)
9486 v.AddArg2(x, y)
9487 return true
9488 }
9489
9490
9491
9492 for {
9493 off := auxIntToInt32(v.AuxInt)
9494 sym := auxToSym(v.Aux)
9495 x := v_0
9496 if v_1.Op != OpAMD64MOVQconst {
9497 break
9498 }
9499 scale := auxIntToInt64(v_1.AuxInt)
9500 if !(is32Bit(int64(off) + int64(scale)*4)) {
9501 break
9502 }
9503 v.reset(OpAMD64LEAQ)
9504 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9505 v.Aux = symToAux(sym)
9506 v.AddArg(x)
9507 return true
9508 }
9509
9510
9511
9512 for {
9513 off := auxIntToInt32(v.AuxInt)
9514 sym := auxToSym(v.Aux)
9515 x := v_0
9516 if v_1.Op != OpAMD64MOVLconst {
9517 break
9518 }
9519 scale := auxIntToInt32(v_1.AuxInt)
9520 if !(is32Bit(int64(off) + int64(scale)*4)) {
9521 break
9522 }
9523 v.reset(OpAMD64LEAQ)
9524 v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
9525 v.Aux = symToAux(sym)
9526 v.AddArg(x)
9527 return true
9528 }
9529 return false
9530 }
9531 func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
9532 v_1 := v.Args[1]
9533 v_0 := v.Args[0]
9534
9535
9536
9537 for {
9538 c := auxIntToInt32(v.AuxInt)
9539 s := auxToSym(v.Aux)
9540 if v_0.Op != OpAMD64ADDQconst {
9541 break
9542 }
9543 d := auxIntToInt32(v_0.AuxInt)
9544 x := v_0.Args[0]
9545 y := v_1
9546 if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
9547 break
9548 }
9549 v.reset(OpAMD64LEAQ8)
9550 v.AuxInt = int32ToAuxInt(c + d)
9551 v.Aux = symToAux(s)
9552 v.AddArg2(x, y)
9553 return true
9554 }
9555
9556
9557
9558 for {
9559 c := auxIntToInt32(v.AuxInt)
9560 s := auxToSym(v.Aux)
9561 x := v_0
9562 if v_1.Op != OpAMD64ADDQconst {
9563 break
9564 }
9565 d := auxIntToInt32(v_1.AuxInt)
9566 y := v_1.Args[0]
9567 if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
9568 break
9569 }
9570 v.reset(OpAMD64LEAQ8)
9571 v.AuxInt = int32ToAuxInt(c + 8*d)
9572 v.Aux = symToAux(s)
9573 v.AddArg2(x, y)
9574 return true
9575 }
9576
9577
9578
9579 for {
9580 off1 := auxIntToInt32(v.AuxInt)
9581 sym1 := auxToSym(v.Aux)
9582 if v_0.Op != OpAMD64LEAQ {
9583 break
9584 }
9585 off2 := auxIntToInt32(v_0.AuxInt)
9586 sym2 := auxToSym(v_0.Aux)
9587 x := v_0.Args[0]
9588 y := v_1
9589 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
9590 break
9591 }
9592 v.reset(OpAMD64LEAQ8)
9593 v.AuxInt = int32ToAuxInt(off1 + off2)
9594 v.Aux = symToAux(mergeSym(sym1, sym2))
9595 v.AddArg2(x, y)
9596 return true
9597 }
9598
9599
9600
9601 for {
9602 off := auxIntToInt32(v.AuxInt)
9603 sym := auxToSym(v.Aux)
9604 x := v_0
9605 if v_1.Op != OpAMD64MOVQconst {
9606 break
9607 }
9608 scale := auxIntToInt64(v_1.AuxInt)
9609 if !(is32Bit(int64(off) + int64(scale)*8)) {
9610 break
9611 }
9612 v.reset(OpAMD64LEAQ)
9613 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9614 v.Aux = symToAux(sym)
9615 v.AddArg(x)
9616 return true
9617 }
9618
9619
9620
9621 for {
9622 off := auxIntToInt32(v.AuxInt)
9623 sym := auxToSym(v.Aux)
9624 x := v_0
9625 if v_1.Op != OpAMD64MOVLconst {
9626 break
9627 }
9628 scale := auxIntToInt32(v_1.AuxInt)
9629 if !(is32Bit(int64(off) + int64(scale)*8)) {
9630 break
9631 }
9632 v.reset(OpAMD64LEAQ)
9633 v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
9634 v.Aux = symToAux(sym)
9635 v.AddArg(x)
9636 return true
9637 }
9638 return false
9639 }
9640 func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
9641 v_2 := v.Args[2]
9642 v_1 := v.Args[1]
9643 v_0 := v.Args[0]
9644
9645
9646
9647 for {
9648 i := auxIntToInt32(v.AuxInt)
9649 s := auxToSym(v.Aux)
9650 p := v_0
9651 x := v_1
9652 if x.Op != OpAMD64BSWAPL {
9653 break
9654 }
9655 w := x.Args[0]
9656 mem := v_2
9657 if !(x.Uses == 1) {
9658 break
9659 }
9660 v.reset(OpAMD64MOVLstore)
9661 v.AuxInt = int32ToAuxInt(i)
9662 v.Aux = symToAux(s)
9663 v.AddArg3(p, w, mem)
9664 return true
9665 }
9666 return false
9667 }
9668 func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
9669 v_2 := v.Args[2]
9670 v_1 := v.Args[1]
9671 v_0 := v.Args[0]
9672
9673
9674
9675 for {
9676 i := auxIntToInt32(v.AuxInt)
9677 s := auxToSym(v.Aux)
9678 p := v_0
9679 x := v_1
9680 if x.Op != OpAMD64BSWAPQ {
9681 break
9682 }
9683 w := x.Args[0]
9684 mem := v_2
9685 if !(x.Uses == 1) {
9686 break
9687 }
9688 v.reset(OpAMD64MOVQstore)
9689 v.AuxInt = int32ToAuxInt(i)
9690 v.Aux = symToAux(s)
9691 v.AddArg3(p, w, mem)
9692 return true
9693 }
9694 return false
9695 }
9696 func rewriteValueAMD64_OpAMD64MOVBEWstore(v *Value) bool {
9697 v_2 := v.Args[2]
9698 v_1 := v.Args[1]
9699 v_0 := v.Args[0]
9700
9701
9702
9703 for {
9704 i := auxIntToInt32(v.AuxInt)
9705 s := auxToSym(v.Aux)
9706 p := v_0
9707 x := v_1
9708 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
9709 break
9710 }
9711 w := x.Args[0]
9712 mem := v_2
9713 if !(x.Uses == 1) {
9714 break
9715 }
9716 v.reset(OpAMD64MOVWstore)
9717 v.AuxInt = int32ToAuxInt(i)
9718 v.Aux = symToAux(s)
9719 v.AddArg3(p, w, mem)
9720 return true
9721 }
9722 return false
9723 }
9724 func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
9725 v_0 := v.Args[0]
9726 b := v.Block
9727
9728
9729
9730 for {
9731 x := v_0
9732 if x.Op != OpAMD64MOVBload {
9733 break
9734 }
9735 off := auxIntToInt32(x.AuxInt)
9736 sym := auxToSym(x.Aux)
9737 mem := x.Args[1]
9738 ptr := x.Args[0]
9739 if !(x.Uses == 1 && clobber(x)) {
9740 break
9741 }
9742 b = x.Block
9743 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9744 v.copyOf(v0)
9745 v0.AuxInt = int32ToAuxInt(off)
9746 v0.Aux = symToAux(sym)
9747 v0.AddArg2(ptr, mem)
9748 return true
9749 }
9750
9751
9752
9753 for {
9754 x := v_0
9755 if x.Op != OpAMD64MOVWload {
9756 break
9757 }
9758 off := auxIntToInt32(x.AuxInt)
9759 sym := auxToSym(x.Aux)
9760 mem := x.Args[1]
9761 ptr := x.Args[0]
9762 if !(x.Uses == 1 && clobber(x)) {
9763 break
9764 }
9765 b = x.Block
9766 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9767 v.copyOf(v0)
9768 v0.AuxInt = int32ToAuxInt(off)
9769 v0.Aux = symToAux(sym)
9770 v0.AddArg2(ptr, mem)
9771 return true
9772 }
9773
9774
9775
9776 for {
9777 x := v_0
9778 if x.Op != OpAMD64MOVLload {
9779 break
9780 }
9781 off := auxIntToInt32(x.AuxInt)
9782 sym := auxToSym(x.Aux)
9783 mem := x.Args[1]
9784 ptr := x.Args[0]
9785 if !(x.Uses == 1 && clobber(x)) {
9786 break
9787 }
9788 b = x.Block
9789 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9790 v.copyOf(v0)
9791 v0.AuxInt = int32ToAuxInt(off)
9792 v0.Aux = symToAux(sym)
9793 v0.AddArg2(ptr, mem)
9794 return true
9795 }
9796
9797
9798
9799 for {
9800 x := v_0
9801 if x.Op != OpAMD64MOVQload {
9802 break
9803 }
9804 off := auxIntToInt32(x.AuxInt)
9805 sym := auxToSym(x.Aux)
9806 mem := x.Args[1]
9807 ptr := x.Args[0]
9808 if !(x.Uses == 1 && clobber(x)) {
9809 break
9810 }
9811 b = x.Block
9812 v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
9813 v.copyOf(v0)
9814 v0.AuxInt = int32ToAuxInt(off)
9815 v0.Aux = symToAux(sym)
9816 v0.AddArg2(ptr, mem)
9817 return true
9818 }
9819
9820
9821
9822 for {
9823 if v_0.Op != OpAMD64ANDLconst {
9824 break
9825 }
9826 c := auxIntToInt32(v_0.AuxInt)
9827 x := v_0.Args[0]
9828 if !(c&0x80 == 0) {
9829 break
9830 }
9831 v.reset(OpAMD64ANDLconst)
9832 v.AuxInt = int32ToAuxInt(c & 0x7f)
9833 v.AddArg(x)
9834 return true
9835 }
9836
9837
9838 for {
9839 if v_0.Op != OpAMD64MOVBQSX {
9840 break
9841 }
9842 x := v_0.Args[0]
9843 v.reset(OpAMD64MOVBQSX)
9844 v.AddArg(x)
9845 return true
9846 }
9847 return false
9848 }
9849 func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
9850 v_1 := v.Args[1]
9851 v_0 := v.Args[0]
9852
9853
9854
9855 for {
9856 off := auxIntToInt32(v.AuxInt)
9857 sym := auxToSym(v.Aux)
9858 ptr := v_0
9859 if v_1.Op != OpAMD64MOVBstore {
9860 break
9861 }
9862 off2 := auxIntToInt32(v_1.AuxInt)
9863 sym2 := auxToSym(v_1.Aux)
9864 x := v_1.Args[1]
9865 ptr2 := v_1.Args[0]
9866 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
9867 break
9868 }
9869 v.reset(OpAMD64MOVBQSX)
9870 v.AddArg(x)
9871 return true
9872 }
9873
9874
9875
9876 for {
9877 off1 := auxIntToInt32(v.AuxInt)
9878 sym1 := auxToSym(v.Aux)
9879 if v_0.Op != OpAMD64LEAQ {
9880 break
9881 }
9882 off2 := auxIntToInt32(v_0.AuxInt)
9883 sym2 := auxToSym(v_0.Aux)
9884 base := v_0.Args[0]
9885 mem := v_1
9886 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
9887 break
9888 }
9889 v.reset(OpAMD64MOVBQSXload)
9890 v.AuxInt = int32ToAuxInt(off1 + off2)
9891 v.Aux = symToAux(mergeSym(sym1, sym2))
9892 v.AddArg2(base, mem)
9893 return true
9894 }
9895
9896
9897
9898 for {
9899 off := auxIntToInt32(v.AuxInt)
9900 sym := auxToSym(v.Aux)
9901 if v_0.Op != OpSB || !(symIsRO(sym)) {
9902 break
9903 }
9904 v.reset(OpAMD64MOVQconst)
9905 v.AuxInt = int64ToAuxInt(int64(int8(read8(sym, int64(off)))))
9906 return true
9907 }
9908 return false
9909 }
9910 func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
9911 v_0 := v.Args[0]
9912 b := v.Block
9913
9914
9915
9916 for {
9917 x := v_0
9918 if x.Op != OpAMD64MOVBload {
9919 break
9920 }
9921 off := auxIntToInt32(x.AuxInt)
9922 sym := auxToSym(x.Aux)
9923 mem := x.Args[1]
9924 ptr := x.Args[0]
9925 if !(x.Uses == 1 && clobber(x)) {
9926 break
9927 }
9928 b = x.Block
9929 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9930 v.copyOf(v0)
9931 v0.AuxInt = int32ToAuxInt(off)
9932 v0.Aux = symToAux(sym)
9933 v0.AddArg2(ptr, mem)
9934 return true
9935 }
9936
9937
9938
9939 for {
9940 x := v_0
9941 if x.Op != OpAMD64MOVWload {
9942 break
9943 }
9944 off := auxIntToInt32(x.AuxInt)
9945 sym := auxToSym(x.Aux)
9946 mem := x.Args[1]
9947 ptr := x.Args[0]
9948 if !(x.Uses == 1 && clobber(x)) {
9949 break
9950 }
9951 b = x.Block
9952 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9953 v.copyOf(v0)
9954 v0.AuxInt = int32ToAuxInt(off)
9955 v0.Aux = symToAux(sym)
9956 v0.AddArg2(ptr, mem)
9957 return true
9958 }
9959
9960
9961
9962 for {
9963 x := v_0
9964 if x.Op != OpAMD64MOVLload {
9965 break
9966 }
9967 off := auxIntToInt32(x.AuxInt)
9968 sym := auxToSym(x.Aux)
9969 mem := x.Args[1]
9970 ptr := x.Args[0]
9971 if !(x.Uses == 1 && clobber(x)) {
9972 break
9973 }
9974 b = x.Block
9975 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9976 v.copyOf(v0)
9977 v0.AuxInt = int32ToAuxInt(off)
9978 v0.Aux = symToAux(sym)
9979 v0.AddArg2(ptr, mem)
9980 return true
9981 }
9982
9983
9984
9985 for {
9986 x := v_0
9987 if x.Op != OpAMD64MOVQload {
9988 break
9989 }
9990 off := auxIntToInt32(x.AuxInt)
9991 sym := auxToSym(x.Aux)
9992 mem := x.Args[1]
9993 ptr := x.Args[0]
9994 if !(x.Uses == 1 && clobber(x)) {
9995 break
9996 }
9997 b = x.Block
9998 v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
9999 v.copyOf(v0)
10000 v0.AuxInt = int32ToAuxInt(off)
10001 v0.Aux = symToAux(sym)
10002 v0.AddArg2(ptr, mem)
10003 return true
10004 }
10005
10006
10007 for {
10008 if v_0.Op != OpAMD64ANDLconst {
10009 break
10010 }
10011 c := auxIntToInt32(v_0.AuxInt)
10012 x := v_0.Args[0]
10013 v.reset(OpAMD64ANDLconst)
10014 v.AuxInt = int32ToAuxInt(c & 0xff)
10015 v.AddArg(x)
10016 return true
10017 }
10018
10019
10020 for {
10021 if v_0.Op != OpAMD64MOVBQZX {
10022 break
10023 }
10024 x := v_0.Args[0]
10025 v.reset(OpAMD64MOVBQZX)
10026 v.AddArg(x)
10027 return true
10028 }
10029 return false
10030 }
10031 func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
10032 v_1 := v.Args[1]
10033 v_0 := v.Args[0]
10034
10035
10036
10037 for {
10038 off1 := auxIntToInt32(v.AuxInt)
10039 sym := auxToSym(v.Aux)
10040 if v_0.Op != OpAMD64ADDQconst {
10041 break
10042 }
10043 off2 := auxIntToInt32(v_0.AuxInt)
10044 ptr := v_0.Args[0]
10045 mem := v_1
10046 if !(is32Bit(int64(off1) + int64(off2))) {
10047 break
10048 }
10049 v.reset(OpAMD64MOVBatomicload)
10050 v.AuxInt = int32ToAuxInt(off1 + off2)
10051 v.Aux = symToAux(sym)
10052 v.AddArg2(ptr, mem)
10053 return true
10054 }
10055
10056
10057
10058 for {
10059 off1 := auxIntToInt32(v.AuxInt)
10060 sym1 := auxToSym(v.Aux)
10061 if v_0.Op != OpAMD64LEAQ {
10062 break
10063 }
10064 off2 := auxIntToInt32(v_0.AuxInt)
10065 sym2 := auxToSym(v_0.Aux)
10066 ptr := v_0.Args[0]
10067 mem := v_1
10068 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10069 break
10070 }
10071 v.reset(OpAMD64MOVBatomicload)
10072 v.AuxInt = int32ToAuxInt(off1 + off2)
10073 v.Aux = symToAux(mergeSym(sym1, sym2))
10074 v.AddArg2(ptr, mem)
10075 return true
10076 }
10077 return false
10078 }
10079 func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
10080 v_1 := v.Args[1]
10081 v_0 := v.Args[0]
10082
10083
10084
10085 for {
10086 off := auxIntToInt32(v.AuxInt)
10087 sym := auxToSym(v.Aux)
10088 ptr := v_0
10089 if v_1.Op != OpAMD64MOVBstore {
10090 break
10091 }
10092 off2 := auxIntToInt32(v_1.AuxInt)
10093 sym2 := auxToSym(v_1.Aux)
10094 x := v_1.Args[1]
10095 ptr2 := v_1.Args[0]
10096 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10097 break
10098 }
10099 v.reset(OpAMD64MOVBQZX)
10100 v.AddArg(x)
10101 return true
10102 }
10103
10104
10105
10106 for {
10107 off1 := auxIntToInt32(v.AuxInt)
10108 sym := auxToSym(v.Aux)
10109 if v_0.Op != OpAMD64ADDQconst {
10110 break
10111 }
10112 off2 := auxIntToInt32(v_0.AuxInt)
10113 ptr := v_0.Args[0]
10114 mem := v_1
10115 if !(is32Bit(int64(off1) + int64(off2))) {
10116 break
10117 }
10118 v.reset(OpAMD64MOVBload)
10119 v.AuxInt = int32ToAuxInt(off1 + off2)
10120 v.Aux = symToAux(sym)
10121 v.AddArg2(ptr, mem)
10122 return true
10123 }
10124
10125
10126
10127 for {
10128 off1 := auxIntToInt32(v.AuxInt)
10129 sym1 := auxToSym(v.Aux)
10130 if v_0.Op != OpAMD64LEAQ {
10131 break
10132 }
10133 off2 := auxIntToInt32(v_0.AuxInt)
10134 sym2 := auxToSym(v_0.Aux)
10135 base := v_0.Args[0]
10136 mem := v_1
10137 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10138 break
10139 }
10140 v.reset(OpAMD64MOVBload)
10141 v.AuxInt = int32ToAuxInt(off1 + off2)
10142 v.Aux = symToAux(mergeSym(sym1, sym2))
10143 v.AddArg2(base, mem)
10144 return true
10145 }
10146
10147
10148
10149 for {
10150 off := auxIntToInt32(v.AuxInt)
10151 sym := auxToSym(v.Aux)
10152 if v_0.Op != OpSB || !(symIsRO(sym)) {
10153 break
10154 }
10155 v.reset(OpAMD64MOVLconst)
10156 v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
10157 return true
10158 }
10159 return false
10160 }
10161 func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
10162 v_2 := v.Args[2]
10163 v_1 := v.Args[1]
10164 v_0 := v.Args[0]
10165
10166
10167
10168 for {
10169 off := auxIntToInt32(v.AuxInt)
10170 sym := auxToSym(v.Aux)
10171 ptr := v_0
10172 y := v_1
10173 if y.Op != OpAMD64SETL {
10174 break
10175 }
10176 x := y.Args[0]
10177 mem := v_2
10178 if !(y.Uses == 1) {
10179 break
10180 }
10181 v.reset(OpAMD64SETLstore)
10182 v.AuxInt = int32ToAuxInt(off)
10183 v.Aux = symToAux(sym)
10184 v.AddArg3(ptr, x, mem)
10185 return true
10186 }
10187
10188
10189
10190 for {
10191 off := auxIntToInt32(v.AuxInt)
10192 sym := auxToSym(v.Aux)
10193 ptr := v_0
10194 y := v_1
10195 if y.Op != OpAMD64SETLE {
10196 break
10197 }
10198 x := y.Args[0]
10199 mem := v_2
10200 if !(y.Uses == 1) {
10201 break
10202 }
10203 v.reset(OpAMD64SETLEstore)
10204 v.AuxInt = int32ToAuxInt(off)
10205 v.Aux = symToAux(sym)
10206 v.AddArg3(ptr, x, mem)
10207 return true
10208 }
10209
10210
10211
10212 for {
10213 off := auxIntToInt32(v.AuxInt)
10214 sym := auxToSym(v.Aux)
10215 ptr := v_0
10216 y := v_1
10217 if y.Op != OpAMD64SETG {
10218 break
10219 }
10220 x := y.Args[0]
10221 mem := v_2
10222 if !(y.Uses == 1) {
10223 break
10224 }
10225 v.reset(OpAMD64SETGstore)
10226 v.AuxInt = int32ToAuxInt(off)
10227 v.Aux = symToAux(sym)
10228 v.AddArg3(ptr, x, mem)
10229 return true
10230 }
10231
10232
10233
10234 for {
10235 off := auxIntToInt32(v.AuxInt)
10236 sym := auxToSym(v.Aux)
10237 ptr := v_0
10238 y := v_1
10239 if y.Op != OpAMD64SETGE {
10240 break
10241 }
10242 x := y.Args[0]
10243 mem := v_2
10244 if !(y.Uses == 1) {
10245 break
10246 }
10247 v.reset(OpAMD64SETGEstore)
10248 v.AuxInt = int32ToAuxInt(off)
10249 v.Aux = symToAux(sym)
10250 v.AddArg3(ptr, x, mem)
10251 return true
10252 }
10253
10254
10255
10256 for {
10257 off := auxIntToInt32(v.AuxInt)
10258 sym := auxToSym(v.Aux)
10259 ptr := v_0
10260 y := v_1
10261 if y.Op != OpAMD64SETEQ {
10262 break
10263 }
10264 x := y.Args[0]
10265 mem := v_2
10266 if !(y.Uses == 1) {
10267 break
10268 }
10269 v.reset(OpAMD64SETEQstore)
10270 v.AuxInt = int32ToAuxInt(off)
10271 v.Aux = symToAux(sym)
10272 v.AddArg3(ptr, x, mem)
10273 return true
10274 }
10275
10276
10277
10278 for {
10279 off := auxIntToInt32(v.AuxInt)
10280 sym := auxToSym(v.Aux)
10281 ptr := v_0
10282 y := v_1
10283 if y.Op != OpAMD64SETNE {
10284 break
10285 }
10286 x := y.Args[0]
10287 mem := v_2
10288 if !(y.Uses == 1) {
10289 break
10290 }
10291 v.reset(OpAMD64SETNEstore)
10292 v.AuxInt = int32ToAuxInt(off)
10293 v.Aux = symToAux(sym)
10294 v.AddArg3(ptr, x, mem)
10295 return true
10296 }
10297
10298
10299
10300 for {
10301 off := auxIntToInt32(v.AuxInt)
10302 sym := auxToSym(v.Aux)
10303 ptr := v_0
10304 y := v_1
10305 if y.Op != OpAMD64SETB {
10306 break
10307 }
10308 x := y.Args[0]
10309 mem := v_2
10310 if !(y.Uses == 1) {
10311 break
10312 }
10313 v.reset(OpAMD64SETBstore)
10314 v.AuxInt = int32ToAuxInt(off)
10315 v.Aux = symToAux(sym)
10316 v.AddArg3(ptr, x, mem)
10317 return true
10318 }
10319
10320
10321
10322 for {
10323 off := auxIntToInt32(v.AuxInt)
10324 sym := auxToSym(v.Aux)
10325 ptr := v_0
10326 y := v_1
10327 if y.Op != OpAMD64SETBE {
10328 break
10329 }
10330 x := y.Args[0]
10331 mem := v_2
10332 if !(y.Uses == 1) {
10333 break
10334 }
10335 v.reset(OpAMD64SETBEstore)
10336 v.AuxInt = int32ToAuxInt(off)
10337 v.Aux = symToAux(sym)
10338 v.AddArg3(ptr, x, mem)
10339 return true
10340 }
10341
10342
10343
10344 for {
10345 off := auxIntToInt32(v.AuxInt)
10346 sym := auxToSym(v.Aux)
10347 ptr := v_0
10348 y := v_1
10349 if y.Op != OpAMD64SETA {
10350 break
10351 }
10352 x := y.Args[0]
10353 mem := v_2
10354 if !(y.Uses == 1) {
10355 break
10356 }
10357 v.reset(OpAMD64SETAstore)
10358 v.AuxInt = int32ToAuxInt(off)
10359 v.Aux = symToAux(sym)
10360 v.AddArg3(ptr, x, mem)
10361 return true
10362 }
10363
10364
10365
10366 for {
10367 off := auxIntToInt32(v.AuxInt)
10368 sym := auxToSym(v.Aux)
10369 ptr := v_0
10370 y := v_1
10371 if y.Op != OpAMD64SETAE {
10372 break
10373 }
10374 x := y.Args[0]
10375 mem := v_2
10376 if !(y.Uses == 1) {
10377 break
10378 }
10379 v.reset(OpAMD64SETAEstore)
10380 v.AuxInt = int32ToAuxInt(off)
10381 v.Aux = symToAux(sym)
10382 v.AddArg3(ptr, x, mem)
10383 return true
10384 }
10385
10386
10387 for {
10388 off := auxIntToInt32(v.AuxInt)
10389 sym := auxToSym(v.Aux)
10390 ptr := v_0
10391 if v_1.Op != OpAMD64MOVBQSX {
10392 break
10393 }
10394 x := v_1.Args[0]
10395 mem := v_2
10396 v.reset(OpAMD64MOVBstore)
10397 v.AuxInt = int32ToAuxInt(off)
10398 v.Aux = symToAux(sym)
10399 v.AddArg3(ptr, x, mem)
10400 return true
10401 }
10402
10403
10404 for {
10405 off := auxIntToInt32(v.AuxInt)
10406 sym := auxToSym(v.Aux)
10407 ptr := v_0
10408 if v_1.Op != OpAMD64MOVBQZX {
10409 break
10410 }
10411 x := v_1.Args[0]
10412 mem := v_2
10413 v.reset(OpAMD64MOVBstore)
10414 v.AuxInt = int32ToAuxInt(off)
10415 v.Aux = symToAux(sym)
10416 v.AddArg3(ptr, x, mem)
10417 return true
10418 }
10419
10420
10421
10422 for {
10423 off1 := auxIntToInt32(v.AuxInt)
10424 sym := auxToSym(v.Aux)
10425 if v_0.Op != OpAMD64ADDQconst {
10426 break
10427 }
10428 off2 := auxIntToInt32(v_0.AuxInt)
10429 ptr := v_0.Args[0]
10430 val := v_1
10431 mem := v_2
10432 if !(is32Bit(int64(off1) + int64(off2))) {
10433 break
10434 }
10435 v.reset(OpAMD64MOVBstore)
10436 v.AuxInt = int32ToAuxInt(off1 + off2)
10437 v.Aux = symToAux(sym)
10438 v.AddArg3(ptr, val, mem)
10439 return true
10440 }
10441
10442
10443 for {
10444 off := auxIntToInt32(v.AuxInt)
10445 sym := auxToSym(v.Aux)
10446 ptr := v_0
10447 if v_1.Op != OpAMD64MOVLconst {
10448 break
10449 }
10450 c := auxIntToInt32(v_1.AuxInt)
10451 mem := v_2
10452 v.reset(OpAMD64MOVBstoreconst)
10453 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10454 v.Aux = symToAux(sym)
10455 v.AddArg2(ptr, mem)
10456 return true
10457 }
10458
10459
10460 for {
10461 off := auxIntToInt32(v.AuxInt)
10462 sym := auxToSym(v.Aux)
10463 ptr := v_0
10464 if v_1.Op != OpAMD64MOVQconst {
10465 break
10466 }
10467 c := auxIntToInt64(v_1.AuxInt)
10468 mem := v_2
10469 v.reset(OpAMD64MOVBstoreconst)
10470 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
10471 v.Aux = symToAux(sym)
10472 v.AddArg2(ptr, mem)
10473 return true
10474 }
10475
10476
10477
10478 for {
10479 off1 := auxIntToInt32(v.AuxInt)
10480 sym1 := auxToSym(v.Aux)
10481 if v_0.Op != OpAMD64LEAQ {
10482 break
10483 }
10484 off2 := auxIntToInt32(v_0.AuxInt)
10485 sym2 := auxToSym(v_0.Aux)
10486 base := v_0.Args[0]
10487 val := v_1
10488 mem := v_2
10489 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10490 break
10491 }
10492 v.reset(OpAMD64MOVBstore)
10493 v.AuxInt = int32ToAuxInt(off1 + off2)
10494 v.Aux = symToAux(mergeSym(sym1, sym2))
10495 v.AddArg3(base, val, mem)
10496 return true
10497 }
10498 return false
10499 }
10500 func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
10501 v_1 := v.Args[1]
10502 v_0 := v.Args[0]
10503
10504
10505
10506 for {
10507 sc := auxIntToValAndOff(v.AuxInt)
10508 s := auxToSym(v.Aux)
10509 if v_0.Op != OpAMD64ADDQconst {
10510 break
10511 }
10512 off := auxIntToInt32(v_0.AuxInt)
10513 ptr := v_0.Args[0]
10514 mem := v_1
10515 if !(ValAndOff(sc).canAdd32(off)) {
10516 break
10517 }
10518 v.reset(OpAMD64MOVBstoreconst)
10519 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10520 v.Aux = symToAux(s)
10521 v.AddArg2(ptr, mem)
10522 return true
10523 }
10524
10525
10526
10527 for {
10528 sc := auxIntToValAndOff(v.AuxInt)
10529 sym1 := auxToSym(v.Aux)
10530 if v_0.Op != OpAMD64LEAQ {
10531 break
10532 }
10533 off := auxIntToInt32(v_0.AuxInt)
10534 sym2 := auxToSym(v_0.Aux)
10535 ptr := v_0.Args[0]
10536 mem := v_1
10537 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
10538 break
10539 }
10540 v.reset(OpAMD64MOVBstoreconst)
10541 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
10542 v.Aux = symToAux(mergeSym(sym1, sym2))
10543 v.AddArg2(ptr, mem)
10544 return true
10545 }
10546 return false
10547 }
10548 func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
10549 v_0 := v.Args[0]
10550 b := v.Block
10551
10552
10553
10554 for {
10555 x := v_0
10556 if x.Op != OpAMD64MOVLload {
10557 break
10558 }
10559 off := auxIntToInt32(x.AuxInt)
10560 sym := auxToSym(x.Aux)
10561 mem := x.Args[1]
10562 ptr := x.Args[0]
10563 if !(x.Uses == 1 && clobber(x)) {
10564 break
10565 }
10566 b = x.Block
10567 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10568 v.copyOf(v0)
10569 v0.AuxInt = int32ToAuxInt(off)
10570 v0.Aux = symToAux(sym)
10571 v0.AddArg2(ptr, mem)
10572 return true
10573 }
10574
10575
10576
10577 for {
10578 x := v_0
10579 if x.Op != OpAMD64MOVQload {
10580 break
10581 }
10582 off := auxIntToInt32(x.AuxInt)
10583 sym := auxToSym(x.Aux)
10584 mem := x.Args[1]
10585 ptr := x.Args[0]
10586 if !(x.Uses == 1 && clobber(x)) {
10587 break
10588 }
10589 b = x.Block
10590 v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
10591 v.copyOf(v0)
10592 v0.AuxInt = int32ToAuxInt(off)
10593 v0.Aux = symToAux(sym)
10594 v0.AddArg2(ptr, mem)
10595 return true
10596 }
10597
10598
10599
10600 for {
10601 if v_0.Op != OpAMD64ANDLconst {
10602 break
10603 }
10604 c := auxIntToInt32(v_0.AuxInt)
10605 x := v_0.Args[0]
10606 if !(uint32(c)&0x80000000 == 0) {
10607 break
10608 }
10609 v.reset(OpAMD64ANDLconst)
10610 v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
10611 v.AddArg(x)
10612 return true
10613 }
10614
10615
10616 for {
10617 if v_0.Op != OpAMD64MOVLQSX {
10618 break
10619 }
10620 x := v_0.Args[0]
10621 v.reset(OpAMD64MOVLQSX)
10622 v.AddArg(x)
10623 return true
10624 }
10625
10626
10627 for {
10628 if v_0.Op != OpAMD64MOVWQSX {
10629 break
10630 }
10631 x := v_0.Args[0]
10632 v.reset(OpAMD64MOVWQSX)
10633 v.AddArg(x)
10634 return true
10635 }
10636
10637
10638 for {
10639 if v_0.Op != OpAMD64MOVBQSX {
10640 break
10641 }
10642 x := v_0.Args[0]
10643 v.reset(OpAMD64MOVBQSX)
10644 v.AddArg(x)
10645 return true
10646 }
10647 return false
10648 }
10649 func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
10650 v_1 := v.Args[1]
10651 v_0 := v.Args[0]
10652 b := v.Block
10653 config := b.Func.Config
10654
10655
10656
10657 for {
10658 off := auxIntToInt32(v.AuxInt)
10659 sym := auxToSym(v.Aux)
10660 ptr := v_0
10661 if v_1.Op != OpAMD64MOVLstore {
10662 break
10663 }
10664 off2 := auxIntToInt32(v_1.AuxInt)
10665 sym2 := auxToSym(v_1.Aux)
10666 x := v_1.Args[1]
10667 ptr2 := v_1.Args[0]
10668 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10669 break
10670 }
10671 v.reset(OpAMD64MOVLQSX)
10672 v.AddArg(x)
10673 return true
10674 }
10675
10676
10677
10678 for {
10679 off1 := auxIntToInt32(v.AuxInt)
10680 sym1 := auxToSym(v.Aux)
10681 if v_0.Op != OpAMD64LEAQ {
10682 break
10683 }
10684 off2 := auxIntToInt32(v_0.AuxInt)
10685 sym2 := auxToSym(v_0.Aux)
10686 base := v_0.Args[0]
10687 mem := v_1
10688 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10689 break
10690 }
10691 v.reset(OpAMD64MOVLQSXload)
10692 v.AuxInt = int32ToAuxInt(off1 + off2)
10693 v.Aux = symToAux(mergeSym(sym1, sym2))
10694 v.AddArg2(base, mem)
10695 return true
10696 }
10697
10698
10699
10700 for {
10701 off := auxIntToInt32(v.AuxInt)
10702 sym := auxToSym(v.Aux)
10703 if v_0.Op != OpSB || !(symIsRO(sym)) {
10704 break
10705 }
10706 v.reset(OpAMD64MOVQconst)
10707 v.AuxInt = int64ToAuxInt(int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))))
10708 return true
10709 }
10710 return false
10711 }
10712 func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
10713 v_0 := v.Args[0]
10714 b := v.Block
10715
10716
10717
10718 for {
10719 x := v_0
10720 if x.Op != OpAMD64MOVLload {
10721 break
10722 }
10723 off := auxIntToInt32(x.AuxInt)
10724 sym := auxToSym(x.Aux)
10725 mem := x.Args[1]
10726 ptr := x.Args[0]
10727 if !(x.Uses == 1 && clobber(x)) {
10728 break
10729 }
10730 b = x.Block
10731 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10732 v.copyOf(v0)
10733 v0.AuxInt = int32ToAuxInt(off)
10734 v0.Aux = symToAux(sym)
10735 v0.AddArg2(ptr, mem)
10736 return true
10737 }
10738
10739
10740
10741 for {
10742 x := v_0
10743 if x.Op != OpAMD64MOVQload {
10744 break
10745 }
10746 off := auxIntToInt32(x.AuxInt)
10747 sym := auxToSym(x.Aux)
10748 mem := x.Args[1]
10749 ptr := x.Args[0]
10750 if !(x.Uses == 1 && clobber(x)) {
10751 break
10752 }
10753 b = x.Block
10754 v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
10755 v.copyOf(v0)
10756 v0.AuxInt = int32ToAuxInt(off)
10757 v0.Aux = symToAux(sym)
10758 v0.AddArg2(ptr, mem)
10759 return true
10760 }
10761
10762
10763 for {
10764 if v_0.Op != OpAMD64ANDLconst {
10765 break
10766 }
10767 c := auxIntToInt32(v_0.AuxInt)
10768 x := v_0.Args[0]
10769 v.reset(OpAMD64ANDLconst)
10770 v.AuxInt = int32ToAuxInt(c)
10771 v.AddArg(x)
10772 return true
10773 }
10774
10775
10776 for {
10777 if v_0.Op != OpAMD64MOVLQZX {
10778 break
10779 }
10780 x := v_0.Args[0]
10781 v.reset(OpAMD64MOVLQZX)
10782 v.AddArg(x)
10783 return true
10784 }
10785
10786
10787 for {
10788 if v_0.Op != OpAMD64MOVWQZX {
10789 break
10790 }
10791 x := v_0.Args[0]
10792 v.reset(OpAMD64MOVWQZX)
10793 v.AddArg(x)
10794 return true
10795 }
10796
10797
10798 for {
10799 if v_0.Op != OpAMD64MOVBQZX {
10800 break
10801 }
10802 x := v_0.Args[0]
10803 v.reset(OpAMD64MOVBQZX)
10804 v.AddArg(x)
10805 return true
10806 }
10807 return false
10808 }
10809 func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
10810 v_1 := v.Args[1]
10811 v_0 := v.Args[0]
10812
10813
10814
10815 for {
10816 off1 := auxIntToInt32(v.AuxInt)
10817 sym := auxToSym(v.Aux)
10818 if v_0.Op != OpAMD64ADDQconst {
10819 break
10820 }
10821 off2 := auxIntToInt32(v_0.AuxInt)
10822 ptr := v_0.Args[0]
10823 mem := v_1
10824 if !(is32Bit(int64(off1) + int64(off2))) {
10825 break
10826 }
10827 v.reset(OpAMD64MOVLatomicload)
10828 v.AuxInt = int32ToAuxInt(off1 + off2)
10829 v.Aux = symToAux(sym)
10830 v.AddArg2(ptr, mem)
10831 return true
10832 }
10833
10834
10835
10836 for {
10837 off1 := auxIntToInt32(v.AuxInt)
10838 sym1 := auxToSym(v.Aux)
10839 if v_0.Op != OpAMD64LEAQ {
10840 break
10841 }
10842 off2 := auxIntToInt32(v_0.AuxInt)
10843 sym2 := auxToSym(v_0.Aux)
10844 ptr := v_0.Args[0]
10845 mem := v_1
10846 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10847 break
10848 }
10849 v.reset(OpAMD64MOVLatomicload)
10850 v.AuxInt = int32ToAuxInt(off1 + off2)
10851 v.Aux = symToAux(mergeSym(sym1, sym2))
10852 v.AddArg2(ptr, mem)
10853 return true
10854 }
10855 return false
10856 }
10857 func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
10858 v_0 := v.Args[0]
10859 b := v.Block
10860
10861
10862
10863 for {
10864 t := v.Type
10865 if v_0.Op != OpArg {
10866 break
10867 }
10868 u := v_0.Type
10869 off := auxIntToInt32(v_0.AuxInt)
10870 sym := auxToSym(v_0.Aux)
10871 if !(t.Size() == u.Size()) {
10872 break
10873 }
10874 b = b.Func.Entry
10875 v0 := b.NewValue0(v.Pos, OpArg, t)
10876 v.copyOf(v0)
10877 v0.AuxInt = int32ToAuxInt(off)
10878 v0.Aux = symToAux(sym)
10879 return true
10880 }
10881 return false
10882 }
10883 func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
10884 v_0 := v.Args[0]
10885 b := v.Block
10886
10887
10888
10889 for {
10890 t := v.Type
10891 if v_0.Op != OpArg {
10892 break
10893 }
10894 u := v_0.Type
10895 off := auxIntToInt32(v_0.AuxInt)
10896 sym := auxToSym(v_0.Aux)
10897 if !(t.Size() == u.Size()) {
10898 break
10899 }
10900 b = b.Func.Entry
10901 v0 := b.NewValue0(v.Pos, OpArg, t)
10902 v.copyOf(v0)
10903 v0.AuxInt = int32ToAuxInt(off)
10904 v0.Aux = symToAux(sym)
10905 return true
10906 }
10907 return false
10908 }
10909 func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
10910 v_1 := v.Args[1]
10911 v_0 := v.Args[0]
10912 b := v.Block
10913 config := b.Func.Config
10914
10915
10916
10917 for {
10918 off := auxIntToInt32(v.AuxInt)
10919 sym := auxToSym(v.Aux)
10920 ptr := v_0
10921 if v_1.Op != OpAMD64MOVLstore {
10922 break
10923 }
10924 off2 := auxIntToInt32(v_1.AuxInt)
10925 sym2 := auxToSym(v_1.Aux)
10926 x := v_1.Args[1]
10927 ptr2 := v_1.Args[0]
10928 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
10929 break
10930 }
10931 v.reset(OpAMD64MOVLQZX)
10932 v.AddArg(x)
10933 return true
10934 }
10935
10936
10937
10938 for {
10939 off1 := auxIntToInt32(v.AuxInt)
10940 sym := auxToSym(v.Aux)
10941 if v_0.Op != OpAMD64ADDQconst {
10942 break
10943 }
10944 off2 := auxIntToInt32(v_0.AuxInt)
10945 ptr := v_0.Args[0]
10946 mem := v_1
10947 if !(is32Bit(int64(off1) + int64(off2))) {
10948 break
10949 }
10950 v.reset(OpAMD64MOVLload)
10951 v.AuxInt = int32ToAuxInt(off1 + off2)
10952 v.Aux = symToAux(sym)
10953 v.AddArg2(ptr, mem)
10954 return true
10955 }
10956
10957
10958
10959 for {
10960 off1 := auxIntToInt32(v.AuxInt)
10961 sym1 := auxToSym(v.Aux)
10962 if v_0.Op != OpAMD64LEAQ {
10963 break
10964 }
10965 off2 := auxIntToInt32(v_0.AuxInt)
10966 sym2 := auxToSym(v_0.Aux)
10967 base := v_0.Args[0]
10968 mem := v_1
10969 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
10970 break
10971 }
10972 v.reset(OpAMD64MOVLload)
10973 v.AuxInt = int32ToAuxInt(off1 + off2)
10974 v.Aux = symToAux(mergeSym(sym1, sym2))
10975 v.AddArg2(base, mem)
10976 return true
10977 }
10978
10979
10980 for {
10981 off := auxIntToInt32(v.AuxInt)
10982 sym := auxToSym(v.Aux)
10983 ptr := v_0
10984 if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
10985 break
10986 }
10987 val := v_1.Args[1]
10988 if ptr != v_1.Args[0] {
10989 break
10990 }
10991 v.reset(OpAMD64MOVLf2i)
10992 v.AddArg(val)
10993 return true
10994 }
10995
10996
10997
10998 for {
10999 off := auxIntToInt32(v.AuxInt)
11000 sym := auxToSym(v.Aux)
11001 if v_0.Op != OpSB || !(symIsRO(sym)) {
11002 break
11003 }
11004 v.reset(OpAMD64MOVLconst)
11005 v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11006 return true
11007 }
11008 return false
11009 }
11010 func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
11011 v_2 := v.Args[2]
11012 v_1 := v.Args[1]
11013 v_0 := v.Args[0]
11014
11015
11016 for {
11017 off := auxIntToInt32(v.AuxInt)
11018 sym := auxToSym(v.Aux)
11019 ptr := v_0
11020 if v_1.Op != OpAMD64MOVLQSX {
11021 break
11022 }
11023 x := v_1.Args[0]
11024 mem := v_2
11025 v.reset(OpAMD64MOVLstore)
11026 v.AuxInt = int32ToAuxInt(off)
11027 v.Aux = symToAux(sym)
11028 v.AddArg3(ptr, x, mem)
11029 return true
11030 }
11031
11032
11033 for {
11034 off := auxIntToInt32(v.AuxInt)
11035 sym := auxToSym(v.Aux)
11036 ptr := v_0
11037 if v_1.Op != OpAMD64MOVLQZX {
11038 break
11039 }
11040 x := v_1.Args[0]
11041 mem := v_2
11042 v.reset(OpAMD64MOVLstore)
11043 v.AuxInt = int32ToAuxInt(off)
11044 v.Aux = symToAux(sym)
11045 v.AddArg3(ptr, x, mem)
11046 return true
11047 }
11048
11049
11050
11051 for {
11052 off1 := auxIntToInt32(v.AuxInt)
11053 sym := auxToSym(v.Aux)
11054 if v_0.Op != OpAMD64ADDQconst {
11055 break
11056 }
11057 off2 := auxIntToInt32(v_0.AuxInt)
11058 ptr := v_0.Args[0]
11059 val := v_1
11060 mem := v_2
11061 if !(is32Bit(int64(off1) + int64(off2))) {
11062 break
11063 }
11064 v.reset(OpAMD64MOVLstore)
11065 v.AuxInt = int32ToAuxInt(off1 + off2)
11066 v.Aux = symToAux(sym)
11067 v.AddArg3(ptr, val, mem)
11068 return true
11069 }
11070
11071
11072 for {
11073 off := auxIntToInt32(v.AuxInt)
11074 sym := auxToSym(v.Aux)
11075 ptr := v_0
11076 if v_1.Op != OpAMD64MOVLconst {
11077 break
11078 }
11079 c := auxIntToInt32(v_1.AuxInt)
11080 mem := v_2
11081 v.reset(OpAMD64MOVLstoreconst)
11082 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11083 v.Aux = symToAux(sym)
11084 v.AddArg2(ptr, mem)
11085 return true
11086 }
11087
11088
11089 for {
11090 off := auxIntToInt32(v.AuxInt)
11091 sym := auxToSym(v.Aux)
11092 ptr := v_0
11093 if v_1.Op != OpAMD64MOVQconst {
11094 break
11095 }
11096 c := auxIntToInt64(v_1.AuxInt)
11097 mem := v_2
11098 v.reset(OpAMD64MOVLstoreconst)
11099 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11100 v.Aux = symToAux(sym)
11101 v.AddArg2(ptr, mem)
11102 return true
11103 }
11104
11105
11106
11107 for {
11108 off1 := auxIntToInt32(v.AuxInt)
11109 sym1 := auxToSym(v.Aux)
11110 if v_0.Op != OpAMD64LEAQ {
11111 break
11112 }
11113 off2 := auxIntToInt32(v_0.AuxInt)
11114 sym2 := auxToSym(v_0.Aux)
11115 base := v_0.Args[0]
11116 val := v_1
11117 mem := v_2
11118 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11119 break
11120 }
11121 v.reset(OpAMD64MOVLstore)
11122 v.AuxInt = int32ToAuxInt(off1 + off2)
11123 v.Aux = symToAux(mergeSym(sym1, sym2))
11124 v.AddArg3(base, val, mem)
11125 return true
11126 }
11127
11128
11129
11130 for {
11131 off := auxIntToInt32(v.AuxInt)
11132 sym := auxToSym(v.Aux)
11133 ptr := v_0
11134 y := v_1
11135 if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11136 break
11137 }
11138 mem := y.Args[2]
11139 x := y.Args[0]
11140 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11141 break
11142 }
11143 v.reset(OpAMD64ADDLmodify)
11144 v.AuxInt = int32ToAuxInt(off)
11145 v.Aux = symToAux(sym)
11146 v.AddArg3(ptr, x, mem)
11147 return true
11148 }
11149
11150
11151
11152 for {
11153 off := auxIntToInt32(v.AuxInt)
11154 sym := auxToSym(v.Aux)
11155 ptr := v_0
11156 y := v_1
11157 if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11158 break
11159 }
11160 mem := y.Args[2]
11161 x := y.Args[0]
11162 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11163 break
11164 }
11165 v.reset(OpAMD64ANDLmodify)
11166 v.AuxInt = int32ToAuxInt(off)
11167 v.Aux = symToAux(sym)
11168 v.AddArg3(ptr, x, mem)
11169 return true
11170 }
11171
11172
11173
11174 for {
11175 off := auxIntToInt32(v.AuxInt)
11176 sym := auxToSym(v.Aux)
11177 ptr := v_0
11178 y := v_1
11179 if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11180 break
11181 }
11182 mem := y.Args[2]
11183 x := y.Args[0]
11184 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11185 break
11186 }
11187 v.reset(OpAMD64ORLmodify)
11188 v.AuxInt = int32ToAuxInt(off)
11189 v.Aux = symToAux(sym)
11190 v.AddArg3(ptr, x, mem)
11191 return true
11192 }
11193
11194
11195
11196 for {
11197 off := auxIntToInt32(v.AuxInt)
11198 sym := auxToSym(v.Aux)
11199 ptr := v_0
11200 y := v_1
11201 if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
11202 break
11203 }
11204 mem := y.Args[2]
11205 x := y.Args[0]
11206 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
11207 break
11208 }
11209 v.reset(OpAMD64XORLmodify)
11210 v.AuxInt = int32ToAuxInt(off)
11211 v.Aux = symToAux(sym)
11212 v.AddArg3(ptr, x, mem)
11213 return true
11214 }
11215
11216
11217
11218 for {
11219 off := auxIntToInt32(v.AuxInt)
11220 sym := auxToSym(v.Aux)
11221 ptr := v_0
11222 y := v_1
11223 if y.Op != OpAMD64ADDL {
11224 break
11225 }
11226 _ = y.Args[1]
11227 y_0 := y.Args[0]
11228 y_1 := y.Args[1]
11229 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11230 l := y_0
11231 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11232 continue
11233 }
11234 mem := l.Args[1]
11235 if ptr != l.Args[0] {
11236 continue
11237 }
11238 x := y_1
11239 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11240 continue
11241 }
11242 v.reset(OpAMD64ADDLmodify)
11243 v.AuxInt = int32ToAuxInt(off)
11244 v.Aux = symToAux(sym)
11245 v.AddArg3(ptr, x, mem)
11246 return true
11247 }
11248 break
11249 }
11250
11251
11252
11253 for {
11254 off := auxIntToInt32(v.AuxInt)
11255 sym := auxToSym(v.Aux)
11256 ptr := v_0
11257 y := v_1
11258 if y.Op != OpAMD64SUBL {
11259 break
11260 }
11261 x := y.Args[1]
11262 l := y.Args[0]
11263 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11264 break
11265 }
11266 mem := l.Args[1]
11267 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11268 break
11269 }
11270 v.reset(OpAMD64SUBLmodify)
11271 v.AuxInt = int32ToAuxInt(off)
11272 v.Aux = symToAux(sym)
11273 v.AddArg3(ptr, x, mem)
11274 return true
11275 }
11276
11277
11278
11279 for {
11280 off := auxIntToInt32(v.AuxInt)
11281 sym := auxToSym(v.Aux)
11282 ptr := v_0
11283 y := v_1
11284 if y.Op != OpAMD64ANDL {
11285 break
11286 }
11287 _ = y.Args[1]
11288 y_0 := y.Args[0]
11289 y_1 := y.Args[1]
11290 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11291 l := y_0
11292 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11293 continue
11294 }
11295 mem := l.Args[1]
11296 if ptr != l.Args[0] {
11297 continue
11298 }
11299 x := y_1
11300 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11301 continue
11302 }
11303 v.reset(OpAMD64ANDLmodify)
11304 v.AuxInt = int32ToAuxInt(off)
11305 v.Aux = symToAux(sym)
11306 v.AddArg3(ptr, x, mem)
11307 return true
11308 }
11309 break
11310 }
11311
11312
11313
11314 for {
11315 off := auxIntToInt32(v.AuxInt)
11316 sym := auxToSym(v.Aux)
11317 ptr := v_0
11318 y := v_1
11319 if y.Op != OpAMD64ORL {
11320 break
11321 }
11322 _ = y.Args[1]
11323 y_0 := y.Args[0]
11324 y_1 := y.Args[1]
11325 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11326 l := y_0
11327 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11328 continue
11329 }
11330 mem := l.Args[1]
11331 if ptr != l.Args[0] {
11332 continue
11333 }
11334 x := y_1
11335 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11336 continue
11337 }
11338 v.reset(OpAMD64ORLmodify)
11339 v.AuxInt = int32ToAuxInt(off)
11340 v.Aux = symToAux(sym)
11341 v.AddArg3(ptr, x, mem)
11342 return true
11343 }
11344 break
11345 }
11346
11347
11348
11349 for {
11350 off := auxIntToInt32(v.AuxInt)
11351 sym := auxToSym(v.Aux)
11352 ptr := v_0
11353 y := v_1
11354 if y.Op != OpAMD64XORL {
11355 break
11356 }
11357 _ = y.Args[1]
11358 y_0 := y.Args[0]
11359 y_1 := y.Args[1]
11360 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
11361 l := y_0
11362 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11363 continue
11364 }
11365 mem := l.Args[1]
11366 if ptr != l.Args[0] {
11367 continue
11368 }
11369 x := y_1
11370 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
11371 continue
11372 }
11373 v.reset(OpAMD64XORLmodify)
11374 v.AuxInt = int32ToAuxInt(off)
11375 v.Aux = symToAux(sym)
11376 v.AddArg3(ptr, x, mem)
11377 return true
11378 }
11379 break
11380 }
11381
11382
11383
11384 for {
11385 off := auxIntToInt32(v.AuxInt)
11386 sym := auxToSym(v.Aux)
11387 ptr := v_0
11388 a := v_1
11389 if a.Op != OpAMD64ADDLconst {
11390 break
11391 }
11392 c := auxIntToInt32(a.AuxInt)
11393 l := a.Args[0]
11394 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11395 break
11396 }
11397 mem := l.Args[1]
11398 ptr2 := l.Args[0]
11399 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11400 break
11401 }
11402 v.reset(OpAMD64ADDLconstmodify)
11403 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11404 v.Aux = symToAux(sym)
11405 v.AddArg2(ptr, mem)
11406 return true
11407 }
11408
11409
11410
11411 for {
11412 off := auxIntToInt32(v.AuxInt)
11413 sym := auxToSym(v.Aux)
11414 ptr := v_0
11415 a := v_1
11416 if a.Op != OpAMD64ANDLconst {
11417 break
11418 }
11419 c := auxIntToInt32(a.AuxInt)
11420 l := a.Args[0]
11421 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11422 break
11423 }
11424 mem := l.Args[1]
11425 ptr2 := l.Args[0]
11426 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11427 break
11428 }
11429 v.reset(OpAMD64ANDLconstmodify)
11430 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11431 v.Aux = symToAux(sym)
11432 v.AddArg2(ptr, mem)
11433 return true
11434 }
11435
11436
11437
11438 for {
11439 off := auxIntToInt32(v.AuxInt)
11440 sym := auxToSym(v.Aux)
11441 ptr := v_0
11442 a := v_1
11443 if a.Op != OpAMD64ORLconst {
11444 break
11445 }
11446 c := auxIntToInt32(a.AuxInt)
11447 l := a.Args[0]
11448 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11449 break
11450 }
11451 mem := l.Args[1]
11452 ptr2 := l.Args[0]
11453 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11454 break
11455 }
11456 v.reset(OpAMD64ORLconstmodify)
11457 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11458 v.Aux = symToAux(sym)
11459 v.AddArg2(ptr, mem)
11460 return true
11461 }
11462
11463
11464
11465 for {
11466 off := auxIntToInt32(v.AuxInt)
11467 sym := auxToSym(v.Aux)
11468 ptr := v_0
11469 a := v_1
11470 if a.Op != OpAMD64XORLconst {
11471 break
11472 }
11473 c := auxIntToInt32(a.AuxInt)
11474 l := a.Args[0]
11475 if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
11476 break
11477 }
11478 mem := l.Args[1]
11479 ptr2 := l.Args[0]
11480 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
11481 break
11482 }
11483 v.reset(OpAMD64XORLconstmodify)
11484 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
11485 v.Aux = symToAux(sym)
11486 v.AddArg2(ptr, mem)
11487 return true
11488 }
11489
11490
11491 for {
11492 off := auxIntToInt32(v.AuxInt)
11493 sym := auxToSym(v.Aux)
11494 ptr := v_0
11495 if v_1.Op != OpAMD64MOVLf2i {
11496 break
11497 }
11498 val := v_1.Args[0]
11499 mem := v_2
11500 v.reset(OpAMD64MOVSSstore)
11501 v.AuxInt = int32ToAuxInt(off)
11502 v.Aux = symToAux(sym)
11503 v.AddArg3(ptr, val, mem)
11504 return true
11505 }
11506
11507
11508
11509 for {
11510 i := auxIntToInt32(v.AuxInt)
11511 s := auxToSym(v.Aux)
11512 p := v_0
11513 x := v_1
11514 if x.Op != OpAMD64BSWAPL {
11515 break
11516 }
11517 w := x.Args[0]
11518 mem := v_2
11519 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
11520 break
11521 }
11522 v.reset(OpAMD64MOVBELstore)
11523 v.AuxInt = int32ToAuxInt(i)
11524 v.Aux = symToAux(s)
11525 v.AddArg3(p, w, mem)
11526 return true
11527 }
11528 return false
11529 }
11530 func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
11531 v_1 := v.Args[1]
11532 v_0 := v.Args[0]
11533
11534
11535
11536 for {
11537 sc := auxIntToValAndOff(v.AuxInt)
11538 s := auxToSym(v.Aux)
11539 if v_0.Op != OpAMD64ADDQconst {
11540 break
11541 }
11542 off := auxIntToInt32(v_0.AuxInt)
11543 ptr := v_0.Args[0]
11544 mem := v_1
11545 if !(ValAndOff(sc).canAdd32(off)) {
11546 break
11547 }
11548 v.reset(OpAMD64MOVLstoreconst)
11549 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11550 v.Aux = symToAux(s)
11551 v.AddArg2(ptr, mem)
11552 return true
11553 }
11554
11555
11556
11557 for {
11558 sc := auxIntToValAndOff(v.AuxInt)
11559 sym1 := auxToSym(v.Aux)
11560 if v_0.Op != OpAMD64LEAQ {
11561 break
11562 }
11563 off := auxIntToInt32(v_0.AuxInt)
11564 sym2 := auxToSym(v_0.Aux)
11565 ptr := v_0.Args[0]
11566 mem := v_1
11567 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11568 break
11569 }
11570 v.reset(OpAMD64MOVLstoreconst)
11571 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11572 v.Aux = symToAux(mergeSym(sym1, sym2))
11573 v.AddArg2(ptr, mem)
11574 return true
11575 }
11576 return false
11577 }
11578 func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
11579 v_1 := v.Args[1]
11580 v_0 := v.Args[0]
11581
11582
11583
11584 for {
11585 off1 := auxIntToInt32(v.AuxInt)
11586 sym := auxToSym(v.Aux)
11587 if v_0.Op != OpAMD64ADDQconst {
11588 break
11589 }
11590 off2 := auxIntToInt32(v_0.AuxInt)
11591 ptr := v_0.Args[0]
11592 mem := v_1
11593 if !(is32Bit(int64(off1) + int64(off2))) {
11594 break
11595 }
11596 v.reset(OpAMD64MOVOload)
11597 v.AuxInt = int32ToAuxInt(off1 + off2)
11598 v.Aux = symToAux(sym)
11599 v.AddArg2(ptr, mem)
11600 return true
11601 }
11602
11603
11604
11605 for {
11606 off1 := auxIntToInt32(v.AuxInt)
11607 sym1 := auxToSym(v.Aux)
11608 if v_0.Op != OpAMD64LEAQ {
11609 break
11610 }
11611 off2 := auxIntToInt32(v_0.AuxInt)
11612 sym2 := auxToSym(v_0.Aux)
11613 base := v_0.Args[0]
11614 mem := v_1
11615 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11616 break
11617 }
11618 v.reset(OpAMD64MOVOload)
11619 v.AuxInt = int32ToAuxInt(off1 + off2)
11620 v.Aux = symToAux(mergeSym(sym1, sym2))
11621 v.AddArg2(base, mem)
11622 return true
11623 }
11624 return false
11625 }
11626 func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
11627 v_2 := v.Args[2]
11628 v_1 := v.Args[1]
11629 v_0 := v.Args[0]
11630 b := v.Block
11631 config := b.Func.Config
11632 typ := &b.Func.Config.Types
11633
11634
11635
11636 for {
11637 off1 := auxIntToInt32(v.AuxInt)
11638 sym := auxToSym(v.Aux)
11639 if v_0.Op != OpAMD64ADDQconst {
11640 break
11641 }
11642 off2 := auxIntToInt32(v_0.AuxInt)
11643 ptr := v_0.Args[0]
11644 val := v_1
11645 mem := v_2
11646 if !(is32Bit(int64(off1) + int64(off2))) {
11647 break
11648 }
11649 v.reset(OpAMD64MOVOstore)
11650 v.AuxInt = int32ToAuxInt(off1 + off2)
11651 v.Aux = symToAux(sym)
11652 v.AddArg3(ptr, val, mem)
11653 return true
11654 }
11655
11656
11657
11658 for {
11659 off1 := auxIntToInt32(v.AuxInt)
11660 sym1 := auxToSym(v.Aux)
11661 if v_0.Op != OpAMD64LEAQ {
11662 break
11663 }
11664 off2 := auxIntToInt32(v_0.AuxInt)
11665 sym2 := auxToSym(v_0.Aux)
11666 base := v_0.Args[0]
11667 val := v_1
11668 mem := v_2
11669 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11670 break
11671 }
11672 v.reset(OpAMD64MOVOstore)
11673 v.AuxInt = int32ToAuxInt(off1 + off2)
11674 v.Aux = symToAux(mergeSym(sym1, sym2))
11675 v.AddArg3(base, val, mem)
11676 return true
11677 }
11678
11679
11680
11681 for {
11682 dstOff := auxIntToInt32(v.AuxInt)
11683 dstSym := auxToSym(v.Aux)
11684 ptr := v_0
11685 if v_1.Op != OpAMD64MOVOload {
11686 break
11687 }
11688 srcOff := auxIntToInt32(v_1.AuxInt)
11689 srcSym := auxToSym(v_1.Aux)
11690 v_1_0 := v_1.Args[0]
11691 if v_1_0.Op != OpSB {
11692 break
11693 }
11694 mem := v_2
11695 if !(symIsRO(srcSym)) {
11696 break
11697 }
11698 v.reset(OpAMD64MOVQstore)
11699 v.AuxInt = int32ToAuxInt(dstOff + 8)
11700 v.Aux = symToAux(dstSym)
11701 v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11702 v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
11703 v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
11704 v1.AuxInt = int32ToAuxInt(dstOff)
11705 v1.Aux = symToAux(dstSym)
11706 v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
11707 v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
11708 v1.AddArg3(ptr, v2, mem)
11709 v.AddArg3(ptr, v0, v1)
11710 return true
11711 }
11712 return false
11713 }
11714 func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
11715 v_1 := v.Args[1]
11716 v_0 := v.Args[0]
11717
11718
11719
11720 for {
11721 sc := auxIntToValAndOff(v.AuxInt)
11722 s := auxToSym(v.Aux)
11723 if v_0.Op != OpAMD64ADDQconst {
11724 break
11725 }
11726 off := auxIntToInt32(v_0.AuxInt)
11727 ptr := v_0.Args[0]
11728 mem := v_1
11729 if !(ValAndOff(sc).canAdd32(off)) {
11730 break
11731 }
11732 v.reset(OpAMD64MOVOstoreconst)
11733 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11734 v.Aux = symToAux(s)
11735 v.AddArg2(ptr, mem)
11736 return true
11737 }
11738
11739
11740
11741 for {
11742 sc := auxIntToValAndOff(v.AuxInt)
11743 sym1 := auxToSym(v.Aux)
11744 if v_0.Op != OpAMD64LEAQ {
11745 break
11746 }
11747 off := auxIntToInt32(v_0.AuxInt)
11748 sym2 := auxToSym(v_0.Aux)
11749 ptr := v_0.Args[0]
11750 mem := v_1
11751 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
11752 break
11753 }
11754 v.reset(OpAMD64MOVOstoreconst)
11755 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
11756 v.Aux = symToAux(mergeSym(sym1, sym2))
11757 v.AddArg2(ptr, mem)
11758 return true
11759 }
11760 return false
11761 }
11762 func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
11763 v_1 := v.Args[1]
11764 v_0 := v.Args[0]
11765
11766
11767
11768 for {
11769 off1 := auxIntToInt32(v.AuxInt)
11770 sym := auxToSym(v.Aux)
11771 if v_0.Op != OpAMD64ADDQconst {
11772 break
11773 }
11774 off2 := auxIntToInt32(v_0.AuxInt)
11775 ptr := v_0.Args[0]
11776 mem := v_1
11777 if !(is32Bit(int64(off1) + int64(off2))) {
11778 break
11779 }
11780 v.reset(OpAMD64MOVQatomicload)
11781 v.AuxInt = int32ToAuxInt(off1 + off2)
11782 v.Aux = symToAux(sym)
11783 v.AddArg2(ptr, mem)
11784 return true
11785 }
11786
11787
11788
11789 for {
11790 off1 := auxIntToInt32(v.AuxInt)
11791 sym1 := auxToSym(v.Aux)
11792 if v_0.Op != OpAMD64LEAQ {
11793 break
11794 }
11795 off2 := auxIntToInt32(v_0.AuxInt)
11796 sym2 := auxToSym(v_0.Aux)
11797 ptr := v_0.Args[0]
11798 mem := v_1
11799 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11800 break
11801 }
11802 v.reset(OpAMD64MOVQatomicload)
11803 v.AuxInt = int32ToAuxInt(off1 + off2)
11804 v.Aux = symToAux(mergeSym(sym1, sym2))
11805 v.AddArg2(ptr, mem)
11806 return true
11807 }
11808 return false
11809 }
11810 func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
11811 v_0 := v.Args[0]
11812 b := v.Block
11813
11814
11815
11816 for {
11817 t := v.Type
11818 if v_0.Op != OpArg {
11819 break
11820 }
11821 u := v_0.Type
11822 off := auxIntToInt32(v_0.AuxInt)
11823 sym := auxToSym(v_0.Aux)
11824 if !(t.Size() == u.Size()) {
11825 break
11826 }
11827 b = b.Func.Entry
11828 v0 := b.NewValue0(v.Pos, OpArg, t)
11829 v.copyOf(v0)
11830 v0.AuxInt = int32ToAuxInt(off)
11831 v0.Aux = symToAux(sym)
11832 return true
11833 }
11834 return false
11835 }
11836 func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
11837 v_0 := v.Args[0]
11838 b := v.Block
11839
11840
11841
11842 for {
11843 t := v.Type
11844 if v_0.Op != OpArg {
11845 break
11846 }
11847 u := v_0.Type
11848 off := auxIntToInt32(v_0.AuxInt)
11849 sym := auxToSym(v_0.Aux)
11850 if !(t.Size() == u.Size()) {
11851 break
11852 }
11853 b = b.Func.Entry
11854 v0 := b.NewValue0(v.Pos, OpArg, t)
11855 v.copyOf(v0)
11856 v0.AuxInt = int32ToAuxInt(off)
11857 v0.Aux = symToAux(sym)
11858 return true
11859 }
11860 return false
11861 }
11862 func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
11863 v_1 := v.Args[1]
11864 v_0 := v.Args[0]
11865 b := v.Block
11866 config := b.Func.Config
11867
11868
11869
11870 for {
11871 off := auxIntToInt32(v.AuxInt)
11872 sym := auxToSym(v.Aux)
11873 ptr := v_0
11874 if v_1.Op != OpAMD64MOVQstore {
11875 break
11876 }
11877 off2 := auxIntToInt32(v_1.AuxInt)
11878 sym2 := auxToSym(v_1.Aux)
11879 x := v_1.Args[1]
11880 ptr2 := v_1.Args[0]
11881 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
11882 break
11883 }
11884 v.copyOf(x)
11885 return true
11886 }
11887
11888
11889
11890 for {
11891 off1 := auxIntToInt32(v.AuxInt)
11892 sym := auxToSym(v.Aux)
11893 if v_0.Op != OpAMD64ADDQconst {
11894 break
11895 }
11896 off2 := auxIntToInt32(v_0.AuxInt)
11897 ptr := v_0.Args[0]
11898 mem := v_1
11899 if !(is32Bit(int64(off1) + int64(off2))) {
11900 break
11901 }
11902 v.reset(OpAMD64MOVQload)
11903 v.AuxInt = int32ToAuxInt(off1 + off2)
11904 v.Aux = symToAux(sym)
11905 v.AddArg2(ptr, mem)
11906 return true
11907 }
11908
11909
11910
11911 for {
11912 off1 := auxIntToInt32(v.AuxInt)
11913 sym1 := auxToSym(v.Aux)
11914 if v_0.Op != OpAMD64LEAQ {
11915 break
11916 }
11917 off2 := auxIntToInt32(v_0.AuxInt)
11918 sym2 := auxToSym(v_0.Aux)
11919 base := v_0.Args[0]
11920 mem := v_1
11921 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
11922 break
11923 }
11924 v.reset(OpAMD64MOVQload)
11925 v.AuxInt = int32ToAuxInt(off1 + off2)
11926 v.Aux = symToAux(mergeSym(sym1, sym2))
11927 v.AddArg2(base, mem)
11928 return true
11929 }
11930
11931
11932 for {
11933 off := auxIntToInt32(v.AuxInt)
11934 sym := auxToSym(v.Aux)
11935 ptr := v_0
11936 if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
11937 break
11938 }
11939 val := v_1.Args[1]
11940 if ptr != v_1.Args[0] {
11941 break
11942 }
11943 v.reset(OpAMD64MOVQf2i)
11944 v.AddArg(val)
11945 return true
11946 }
11947
11948
11949
11950 for {
11951 off := auxIntToInt32(v.AuxInt)
11952 sym := auxToSym(v.Aux)
11953 if v_0.Op != OpSB || !(symIsRO(sym)) {
11954 break
11955 }
11956 v.reset(OpAMD64MOVQconst)
11957 v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
11958 return true
11959 }
11960 return false
11961 }
11962 func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
11963 v_2 := v.Args[2]
11964 v_1 := v.Args[1]
11965 v_0 := v.Args[0]
11966
11967
11968
11969 for {
11970 off1 := auxIntToInt32(v.AuxInt)
11971 sym := auxToSym(v.Aux)
11972 if v_0.Op != OpAMD64ADDQconst {
11973 break
11974 }
11975 off2 := auxIntToInt32(v_0.AuxInt)
11976 ptr := v_0.Args[0]
11977 val := v_1
11978 mem := v_2
11979 if !(is32Bit(int64(off1) + int64(off2))) {
11980 break
11981 }
11982 v.reset(OpAMD64MOVQstore)
11983 v.AuxInt = int32ToAuxInt(off1 + off2)
11984 v.Aux = symToAux(sym)
11985 v.AddArg3(ptr, val, mem)
11986 return true
11987 }
11988
11989
11990
11991 for {
11992 off := auxIntToInt32(v.AuxInt)
11993 sym := auxToSym(v.Aux)
11994 ptr := v_0
11995 if v_1.Op != OpAMD64MOVQconst {
11996 break
11997 }
11998 c := auxIntToInt64(v_1.AuxInt)
11999 mem := v_2
12000 if !(validVal(c)) {
12001 break
12002 }
12003 v.reset(OpAMD64MOVQstoreconst)
12004 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12005 v.Aux = symToAux(sym)
12006 v.AddArg2(ptr, mem)
12007 return true
12008 }
12009
12010
12011
12012 for {
12013 off1 := auxIntToInt32(v.AuxInt)
12014 sym1 := auxToSym(v.Aux)
12015 if v_0.Op != OpAMD64LEAQ {
12016 break
12017 }
12018 off2 := auxIntToInt32(v_0.AuxInt)
12019 sym2 := auxToSym(v_0.Aux)
12020 base := v_0.Args[0]
12021 val := v_1
12022 mem := v_2
12023 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12024 break
12025 }
12026 v.reset(OpAMD64MOVQstore)
12027 v.AuxInt = int32ToAuxInt(off1 + off2)
12028 v.Aux = symToAux(mergeSym(sym1, sym2))
12029 v.AddArg3(base, val, mem)
12030 return true
12031 }
12032
12033
12034
12035 for {
12036 off := auxIntToInt32(v.AuxInt)
12037 sym := auxToSym(v.Aux)
12038 ptr := v_0
12039 y := v_1
12040 if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12041 break
12042 }
12043 mem := y.Args[2]
12044 x := y.Args[0]
12045 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12046 break
12047 }
12048 v.reset(OpAMD64ADDQmodify)
12049 v.AuxInt = int32ToAuxInt(off)
12050 v.Aux = symToAux(sym)
12051 v.AddArg3(ptr, x, mem)
12052 return true
12053 }
12054
12055
12056
12057 for {
12058 off := auxIntToInt32(v.AuxInt)
12059 sym := auxToSym(v.Aux)
12060 ptr := v_0
12061 y := v_1
12062 if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12063 break
12064 }
12065 mem := y.Args[2]
12066 x := y.Args[0]
12067 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12068 break
12069 }
12070 v.reset(OpAMD64ANDQmodify)
12071 v.AuxInt = int32ToAuxInt(off)
12072 v.Aux = symToAux(sym)
12073 v.AddArg3(ptr, x, mem)
12074 return true
12075 }
12076
12077
12078
12079 for {
12080 off := auxIntToInt32(v.AuxInt)
12081 sym := auxToSym(v.Aux)
12082 ptr := v_0
12083 y := v_1
12084 if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12085 break
12086 }
12087 mem := y.Args[2]
12088 x := y.Args[0]
12089 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12090 break
12091 }
12092 v.reset(OpAMD64ORQmodify)
12093 v.AuxInt = int32ToAuxInt(off)
12094 v.Aux = symToAux(sym)
12095 v.AddArg3(ptr, x, mem)
12096 return true
12097 }
12098
12099
12100
12101 for {
12102 off := auxIntToInt32(v.AuxInt)
12103 sym := auxToSym(v.Aux)
12104 ptr := v_0
12105 y := v_1
12106 if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
12107 break
12108 }
12109 mem := y.Args[2]
12110 x := y.Args[0]
12111 if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
12112 break
12113 }
12114 v.reset(OpAMD64XORQmodify)
12115 v.AuxInt = int32ToAuxInt(off)
12116 v.Aux = symToAux(sym)
12117 v.AddArg3(ptr, x, mem)
12118 return true
12119 }
12120
12121
12122
12123 for {
12124 off := auxIntToInt32(v.AuxInt)
12125 sym := auxToSym(v.Aux)
12126 ptr := v_0
12127 y := v_1
12128 if y.Op != OpAMD64ADDQ {
12129 break
12130 }
12131 _ = y.Args[1]
12132 y_0 := y.Args[0]
12133 y_1 := y.Args[1]
12134 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12135 l := y_0
12136 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12137 continue
12138 }
12139 mem := l.Args[1]
12140 if ptr != l.Args[0] {
12141 continue
12142 }
12143 x := y_1
12144 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12145 continue
12146 }
12147 v.reset(OpAMD64ADDQmodify)
12148 v.AuxInt = int32ToAuxInt(off)
12149 v.Aux = symToAux(sym)
12150 v.AddArg3(ptr, x, mem)
12151 return true
12152 }
12153 break
12154 }
12155
12156
12157
12158 for {
12159 off := auxIntToInt32(v.AuxInt)
12160 sym := auxToSym(v.Aux)
12161 ptr := v_0
12162 y := v_1
12163 if y.Op != OpAMD64SUBQ {
12164 break
12165 }
12166 x := y.Args[1]
12167 l := y.Args[0]
12168 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12169 break
12170 }
12171 mem := l.Args[1]
12172 if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12173 break
12174 }
12175 v.reset(OpAMD64SUBQmodify)
12176 v.AuxInt = int32ToAuxInt(off)
12177 v.Aux = symToAux(sym)
12178 v.AddArg3(ptr, x, mem)
12179 return true
12180 }
12181
12182
12183
12184 for {
12185 off := auxIntToInt32(v.AuxInt)
12186 sym := auxToSym(v.Aux)
12187 ptr := v_0
12188 y := v_1
12189 if y.Op != OpAMD64ANDQ {
12190 break
12191 }
12192 _ = y.Args[1]
12193 y_0 := y.Args[0]
12194 y_1 := y.Args[1]
12195 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12196 l := y_0
12197 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12198 continue
12199 }
12200 mem := l.Args[1]
12201 if ptr != l.Args[0] {
12202 continue
12203 }
12204 x := y_1
12205 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12206 continue
12207 }
12208 v.reset(OpAMD64ANDQmodify)
12209 v.AuxInt = int32ToAuxInt(off)
12210 v.Aux = symToAux(sym)
12211 v.AddArg3(ptr, x, mem)
12212 return true
12213 }
12214 break
12215 }
12216
12217
12218
12219 for {
12220 off := auxIntToInt32(v.AuxInt)
12221 sym := auxToSym(v.Aux)
12222 ptr := v_0
12223 y := v_1
12224 if y.Op != OpAMD64ORQ {
12225 break
12226 }
12227 _ = y.Args[1]
12228 y_0 := y.Args[0]
12229 y_1 := y.Args[1]
12230 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12231 l := y_0
12232 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12233 continue
12234 }
12235 mem := l.Args[1]
12236 if ptr != l.Args[0] {
12237 continue
12238 }
12239 x := y_1
12240 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12241 continue
12242 }
12243 v.reset(OpAMD64ORQmodify)
12244 v.AuxInt = int32ToAuxInt(off)
12245 v.Aux = symToAux(sym)
12246 v.AddArg3(ptr, x, mem)
12247 return true
12248 }
12249 break
12250 }
12251
12252
12253
12254 for {
12255 off := auxIntToInt32(v.AuxInt)
12256 sym := auxToSym(v.Aux)
12257 ptr := v_0
12258 y := v_1
12259 if y.Op != OpAMD64XORQ {
12260 break
12261 }
12262 _ = y.Args[1]
12263 y_0 := y.Args[0]
12264 y_1 := y.Args[1]
12265 for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
12266 l := y_0
12267 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12268 continue
12269 }
12270 mem := l.Args[1]
12271 if ptr != l.Args[0] {
12272 continue
12273 }
12274 x := y_1
12275 if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
12276 continue
12277 }
12278 v.reset(OpAMD64XORQmodify)
12279 v.AuxInt = int32ToAuxInt(off)
12280 v.Aux = symToAux(sym)
12281 v.AddArg3(ptr, x, mem)
12282 return true
12283 }
12284 break
12285 }
12286
12287
12288
12289 for {
12290 off := auxIntToInt32(v.AuxInt)
12291 sym := auxToSym(v.Aux)
12292 ptr := v_0
12293 x := v_1
12294 if x.Op != OpAMD64BTSQconst {
12295 break
12296 }
12297 c := auxIntToInt8(x.AuxInt)
12298 l := x.Args[0]
12299 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12300 break
12301 }
12302 mem := l.Args[1]
12303 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12304 break
12305 }
12306 v.reset(OpAMD64BTSQconstmodify)
12307 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12308 v.Aux = symToAux(sym)
12309 v.AddArg2(ptr, mem)
12310 return true
12311 }
12312
12313
12314
12315 for {
12316 off := auxIntToInt32(v.AuxInt)
12317 sym := auxToSym(v.Aux)
12318 ptr := v_0
12319 x := v_1
12320 if x.Op != OpAMD64BTRQconst {
12321 break
12322 }
12323 c := auxIntToInt8(x.AuxInt)
12324 l := x.Args[0]
12325 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12326 break
12327 }
12328 mem := l.Args[1]
12329 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12330 break
12331 }
12332 v.reset(OpAMD64BTRQconstmodify)
12333 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12334 v.Aux = symToAux(sym)
12335 v.AddArg2(ptr, mem)
12336 return true
12337 }
12338
12339
12340
12341 for {
12342 off := auxIntToInt32(v.AuxInt)
12343 sym := auxToSym(v.Aux)
12344 ptr := v_0
12345 x := v_1
12346 if x.Op != OpAMD64BTCQconst {
12347 break
12348 }
12349 c := auxIntToInt8(x.AuxInt)
12350 l := x.Args[0]
12351 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12352 break
12353 }
12354 mem := l.Args[1]
12355 if ptr != l.Args[0] || mem != v_2 || !(x.Uses == 1 && l.Uses == 1 && clobber(x, l)) {
12356 break
12357 }
12358 v.reset(OpAMD64BTCQconstmodify)
12359 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12360 v.Aux = symToAux(sym)
12361 v.AddArg2(ptr, mem)
12362 return true
12363 }
12364
12365
12366
12367 for {
12368 off := auxIntToInt32(v.AuxInt)
12369 sym := auxToSym(v.Aux)
12370 ptr := v_0
12371 a := v_1
12372 if a.Op != OpAMD64ADDQconst {
12373 break
12374 }
12375 c := auxIntToInt32(a.AuxInt)
12376 l := a.Args[0]
12377 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12378 break
12379 }
12380 mem := l.Args[1]
12381 ptr2 := l.Args[0]
12382 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12383 break
12384 }
12385 v.reset(OpAMD64ADDQconstmodify)
12386 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12387 v.Aux = symToAux(sym)
12388 v.AddArg2(ptr, mem)
12389 return true
12390 }
12391
12392
12393
12394 for {
12395 off := auxIntToInt32(v.AuxInt)
12396 sym := auxToSym(v.Aux)
12397 ptr := v_0
12398 a := v_1
12399 if a.Op != OpAMD64ANDQconst {
12400 break
12401 }
12402 c := auxIntToInt32(a.AuxInt)
12403 l := a.Args[0]
12404 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12405 break
12406 }
12407 mem := l.Args[1]
12408 ptr2 := l.Args[0]
12409 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12410 break
12411 }
12412 v.reset(OpAMD64ANDQconstmodify)
12413 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12414 v.Aux = symToAux(sym)
12415 v.AddArg2(ptr, mem)
12416 return true
12417 }
12418
12419
12420
12421 for {
12422 off := auxIntToInt32(v.AuxInt)
12423 sym := auxToSym(v.Aux)
12424 ptr := v_0
12425 a := v_1
12426 if a.Op != OpAMD64ORQconst {
12427 break
12428 }
12429 c := auxIntToInt32(a.AuxInt)
12430 l := a.Args[0]
12431 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12432 break
12433 }
12434 mem := l.Args[1]
12435 ptr2 := l.Args[0]
12436 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12437 break
12438 }
12439 v.reset(OpAMD64ORQconstmodify)
12440 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12441 v.Aux = symToAux(sym)
12442 v.AddArg2(ptr, mem)
12443 return true
12444 }
12445
12446
12447
12448 for {
12449 off := auxIntToInt32(v.AuxInt)
12450 sym := auxToSym(v.Aux)
12451 ptr := v_0
12452 a := v_1
12453 if a.Op != OpAMD64XORQconst {
12454 break
12455 }
12456 c := auxIntToInt32(a.AuxInt)
12457 l := a.Args[0]
12458 if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
12459 break
12460 }
12461 mem := l.Args[1]
12462 ptr2 := l.Args[0]
12463 if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
12464 break
12465 }
12466 v.reset(OpAMD64XORQconstmodify)
12467 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
12468 v.Aux = symToAux(sym)
12469 v.AddArg2(ptr, mem)
12470 return true
12471 }
12472
12473
12474 for {
12475 off := auxIntToInt32(v.AuxInt)
12476 sym := auxToSym(v.Aux)
12477 ptr := v_0
12478 if v_1.Op != OpAMD64MOVQf2i {
12479 break
12480 }
12481 val := v_1.Args[0]
12482 mem := v_2
12483 v.reset(OpAMD64MOVSDstore)
12484 v.AuxInt = int32ToAuxInt(off)
12485 v.Aux = symToAux(sym)
12486 v.AddArg3(ptr, val, mem)
12487 return true
12488 }
12489
12490
12491
12492 for {
12493 i := auxIntToInt32(v.AuxInt)
12494 s := auxToSym(v.Aux)
12495 p := v_0
12496 x := v_1
12497 if x.Op != OpAMD64BSWAPQ {
12498 break
12499 }
12500 w := x.Args[0]
12501 mem := v_2
12502 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
12503 break
12504 }
12505 v.reset(OpAMD64MOVBEQstore)
12506 v.AuxInt = int32ToAuxInt(i)
12507 v.Aux = symToAux(s)
12508 v.AddArg3(p, w, mem)
12509 return true
12510 }
12511 return false
12512 }
12513 func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
12514 v_1 := v.Args[1]
12515 v_0 := v.Args[0]
12516
12517
12518
12519 for {
12520 sc := auxIntToValAndOff(v.AuxInt)
12521 s := auxToSym(v.Aux)
12522 if v_0.Op != OpAMD64ADDQconst {
12523 break
12524 }
12525 off := auxIntToInt32(v_0.AuxInt)
12526 ptr := v_0.Args[0]
12527 mem := v_1
12528 if !(ValAndOff(sc).canAdd32(off)) {
12529 break
12530 }
12531 v.reset(OpAMD64MOVQstoreconst)
12532 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12533 v.Aux = symToAux(s)
12534 v.AddArg2(ptr, mem)
12535 return true
12536 }
12537
12538
12539
12540 for {
12541 sc := auxIntToValAndOff(v.AuxInt)
12542 sym1 := auxToSym(v.Aux)
12543 if v_0.Op != OpAMD64LEAQ {
12544 break
12545 }
12546 off := auxIntToInt32(v_0.AuxInt)
12547 sym2 := auxToSym(v_0.Aux)
12548 ptr := v_0.Args[0]
12549 mem := v_1
12550 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
12551 break
12552 }
12553 v.reset(OpAMD64MOVQstoreconst)
12554 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
12555 v.Aux = symToAux(mergeSym(sym1, sym2))
12556 v.AddArg2(ptr, mem)
12557 return true
12558 }
12559
12560
12561
12562 for {
12563 c := auxIntToValAndOff(v.AuxInt)
12564 s := auxToSym(v.Aux)
12565 p1 := v_0
12566 x := v_1
12567 if x.Op != OpAMD64MOVQstoreconst {
12568 break
12569 }
12570 a := auxIntToValAndOff(x.AuxInt)
12571 if auxToSym(x.Aux) != s {
12572 break
12573 }
12574 mem := x.Args[1]
12575 p0 := x.Args[0]
12576 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12577 break
12578 }
12579 v.reset(OpAMD64MOVOstoreconst)
12580 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12581 v.Aux = symToAux(s)
12582 v.AddArg2(p0, mem)
12583 return true
12584 }
12585
12586
12587
12588 for {
12589 a := auxIntToValAndOff(v.AuxInt)
12590 s := auxToSym(v.Aux)
12591 p0 := v_0
12592 x := v_1
12593 if x.Op != OpAMD64MOVQstoreconst {
12594 break
12595 }
12596 c := auxIntToValAndOff(x.AuxInt)
12597 if auxToSym(x.Aux) != s {
12598 break
12599 }
12600 mem := x.Args[1]
12601 p1 := x.Args[0]
12602 if !(x.Uses == 1 && sequentialAddresses(p0, p1, int64(a.Off()+8-c.Off())) && a.Val() == 0 && c.Val() == 0 && setPos(v, x.Pos) && clobber(x)) {
12603 break
12604 }
12605 v.reset(OpAMD64MOVOstoreconst)
12606 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
12607 v.Aux = symToAux(s)
12608 v.AddArg2(p0, mem)
12609 return true
12610 }
12611 return false
12612 }
12613 func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
12614 v_1 := v.Args[1]
12615 v_0 := v.Args[0]
12616
12617
12618
12619 for {
12620 off1 := auxIntToInt32(v.AuxInt)
12621 sym := auxToSym(v.Aux)
12622 if v_0.Op != OpAMD64ADDQconst {
12623 break
12624 }
12625 off2 := auxIntToInt32(v_0.AuxInt)
12626 ptr := v_0.Args[0]
12627 mem := v_1
12628 if !(is32Bit(int64(off1) + int64(off2))) {
12629 break
12630 }
12631 v.reset(OpAMD64MOVSDload)
12632 v.AuxInt = int32ToAuxInt(off1 + off2)
12633 v.Aux = symToAux(sym)
12634 v.AddArg2(ptr, mem)
12635 return true
12636 }
12637
12638
12639
12640 for {
12641 off1 := auxIntToInt32(v.AuxInt)
12642 sym1 := auxToSym(v.Aux)
12643 if v_0.Op != OpAMD64LEAQ {
12644 break
12645 }
12646 off2 := auxIntToInt32(v_0.AuxInt)
12647 sym2 := auxToSym(v_0.Aux)
12648 base := v_0.Args[0]
12649 mem := v_1
12650 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12651 break
12652 }
12653 v.reset(OpAMD64MOVSDload)
12654 v.AuxInt = int32ToAuxInt(off1 + off2)
12655 v.Aux = symToAux(mergeSym(sym1, sym2))
12656 v.AddArg2(base, mem)
12657 return true
12658 }
12659
12660
12661 for {
12662 off := auxIntToInt32(v.AuxInt)
12663 sym := auxToSym(v.Aux)
12664 ptr := v_0
12665 if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12666 break
12667 }
12668 val := v_1.Args[1]
12669 if ptr != v_1.Args[0] {
12670 break
12671 }
12672 v.reset(OpAMD64MOVQi2f)
12673 v.AddArg(val)
12674 return true
12675 }
12676 return false
12677 }
12678 func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
12679 v_2 := v.Args[2]
12680 v_1 := v.Args[1]
12681 v_0 := v.Args[0]
12682 b := v.Block
12683 typ := &b.Func.Config.Types
12684
12685
12686
12687 for {
12688 off1 := auxIntToInt32(v.AuxInt)
12689 sym := auxToSym(v.Aux)
12690 if v_0.Op != OpAMD64ADDQconst {
12691 break
12692 }
12693 off2 := auxIntToInt32(v_0.AuxInt)
12694 ptr := v_0.Args[0]
12695 val := v_1
12696 mem := v_2
12697 if !(is32Bit(int64(off1) + int64(off2))) {
12698 break
12699 }
12700 v.reset(OpAMD64MOVSDstore)
12701 v.AuxInt = int32ToAuxInt(off1 + off2)
12702 v.Aux = symToAux(sym)
12703 v.AddArg3(ptr, val, mem)
12704 return true
12705 }
12706
12707
12708
12709 for {
12710 off1 := auxIntToInt32(v.AuxInt)
12711 sym1 := auxToSym(v.Aux)
12712 if v_0.Op != OpAMD64LEAQ {
12713 break
12714 }
12715 off2 := auxIntToInt32(v_0.AuxInt)
12716 sym2 := auxToSym(v_0.Aux)
12717 base := v_0.Args[0]
12718 val := v_1
12719 mem := v_2
12720 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12721 break
12722 }
12723 v.reset(OpAMD64MOVSDstore)
12724 v.AuxInt = int32ToAuxInt(off1 + off2)
12725 v.Aux = symToAux(mergeSym(sym1, sym2))
12726 v.AddArg3(base, val, mem)
12727 return true
12728 }
12729
12730
12731 for {
12732 off := auxIntToInt32(v.AuxInt)
12733 sym := auxToSym(v.Aux)
12734 ptr := v_0
12735 if v_1.Op != OpAMD64MOVQi2f {
12736 break
12737 }
12738 val := v_1.Args[0]
12739 mem := v_2
12740 v.reset(OpAMD64MOVQstore)
12741 v.AuxInt = int32ToAuxInt(off)
12742 v.Aux = symToAux(sym)
12743 v.AddArg3(ptr, val, mem)
12744 return true
12745 }
12746
12747
12748
12749 for {
12750 off := auxIntToInt32(v.AuxInt)
12751 sym := auxToSym(v.Aux)
12752 ptr := v_0
12753 if v_1.Op != OpAMD64MOVSDconst {
12754 break
12755 }
12756 f := auxIntToFloat64(v_1.AuxInt)
12757 mem := v_2
12758 if !(f == f) {
12759 break
12760 }
12761 v.reset(OpAMD64MOVQstore)
12762 v.AuxInt = int32ToAuxInt(off)
12763 v.Aux = symToAux(sym)
12764 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
12765 v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(f)))
12766 v.AddArg3(ptr, v0, mem)
12767 return true
12768 }
12769 return false
12770 }
12771 func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
12772 v_1 := v.Args[1]
12773 v_0 := v.Args[0]
12774
12775
12776
12777 for {
12778 off1 := auxIntToInt32(v.AuxInt)
12779 sym := auxToSym(v.Aux)
12780 if v_0.Op != OpAMD64ADDQconst {
12781 break
12782 }
12783 off2 := auxIntToInt32(v_0.AuxInt)
12784 ptr := v_0.Args[0]
12785 mem := v_1
12786 if !(is32Bit(int64(off1) + int64(off2))) {
12787 break
12788 }
12789 v.reset(OpAMD64MOVSSload)
12790 v.AuxInt = int32ToAuxInt(off1 + off2)
12791 v.Aux = symToAux(sym)
12792 v.AddArg2(ptr, mem)
12793 return true
12794 }
12795
12796
12797
12798 for {
12799 off1 := auxIntToInt32(v.AuxInt)
12800 sym1 := auxToSym(v.Aux)
12801 if v_0.Op != OpAMD64LEAQ {
12802 break
12803 }
12804 off2 := auxIntToInt32(v_0.AuxInt)
12805 sym2 := auxToSym(v_0.Aux)
12806 base := v_0.Args[0]
12807 mem := v_1
12808 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12809 break
12810 }
12811 v.reset(OpAMD64MOVSSload)
12812 v.AuxInt = int32ToAuxInt(off1 + off2)
12813 v.Aux = symToAux(mergeSym(sym1, sym2))
12814 v.AddArg2(base, mem)
12815 return true
12816 }
12817
12818
12819 for {
12820 off := auxIntToInt32(v.AuxInt)
12821 sym := auxToSym(v.Aux)
12822 ptr := v_0
12823 if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
12824 break
12825 }
12826 val := v_1.Args[1]
12827 if ptr != v_1.Args[0] {
12828 break
12829 }
12830 v.reset(OpAMD64MOVLi2f)
12831 v.AddArg(val)
12832 return true
12833 }
12834 return false
12835 }
12836 func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
12837 v_2 := v.Args[2]
12838 v_1 := v.Args[1]
12839 v_0 := v.Args[0]
12840 b := v.Block
12841 typ := &b.Func.Config.Types
12842
12843
12844
12845 for {
12846 off1 := auxIntToInt32(v.AuxInt)
12847 sym := auxToSym(v.Aux)
12848 if v_0.Op != OpAMD64ADDQconst {
12849 break
12850 }
12851 off2 := auxIntToInt32(v_0.AuxInt)
12852 ptr := v_0.Args[0]
12853 val := v_1
12854 mem := v_2
12855 if !(is32Bit(int64(off1) + int64(off2))) {
12856 break
12857 }
12858 v.reset(OpAMD64MOVSSstore)
12859 v.AuxInt = int32ToAuxInt(off1 + off2)
12860 v.Aux = symToAux(sym)
12861 v.AddArg3(ptr, val, mem)
12862 return true
12863 }
12864
12865
12866
12867 for {
12868 off1 := auxIntToInt32(v.AuxInt)
12869 sym1 := auxToSym(v.Aux)
12870 if v_0.Op != OpAMD64LEAQ {
12871 break
12872 }
12873 off2 := auxIntToInt32(v_0.AuxInt)
12874 sym2 := auxToSym(v_0.Aux)
12875 base := v_0.Args[0]
12876 val := v_1
12877 mem := v_2
12878 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
12879 break
12880 }
12881 v.reset(OpAMD64MOVSSstore)
12882 v.AuxInt = int32ToAuxInt(off1 + off2)
12883 v.Aux = symToAux(mergeSym(sym1, sym2))
12884 v.AddArg3(base, val, mem)
12885 return true
12886 }
12887
12888
12889 for {
12890 off := auxIntToInt32(v.AuxInt)
12891 sym := auxToSym(v.Aux)
12892 ptr := v_0
12893 if v_1.Op != OpAMD64MOVLi2f {
12894 break
12895 }
12896 val := v_1.Args[0]
12897 mem := v_2
12898 v.reset(OpAMD64MOVLstore)
12899 v.AuxInt = int32ToAuxInt(off)
12900 v.Aux = symToAux(sym)
12901 v.AddArg3(ptr, val, mem)
12902 return true
12903 }
12904
12905
12906
12907 for {
12908 off := auxIntToInt32(v.AuxInt)
12909 sym := auxToSym(v.Aux)
12910 ptr := v_0
12911 if v_1.Op != OpAMD64MOVSSconst {
12912 break
12913 }
12914 f := auxIntToFloat32(v_1.AuxInt)
12915 mem := v_2
12916 if !(f == f) {
12917 break
12918 }
12919 v.reset(OpAMD64MOVLstore)
12920 v.AuxInt = int32ToAuxInt(off)
12921 v.Aux = symToAux(sym)
12922 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt32)
12923 v0.AuxInt = int32ToAuxInt(int32(math.Float32bits(f)))
12924 v.AddArg3(ptr, v0, mem)
12925 return true
12926 }
12927 return false
12928 }
12929 func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
12930 v_0 := v.Args[0]
12931 b := v.Block
12932
12933
12934
12935 for {
12936 x := v_0
12937 if x.Op != OpAMD64MOVWload {
12938 break
12939 }
12940 off := auxIntToInt32(x.AuxInt)
12941 sym := auxToSym(x.Aux)
12942 mem := x.Args[1]
12943 ptr := x.Args[0]
12944 if !(x.Uses == 1 && clobber(x)) {
12945 break
12946 }
12947 b = x.Block
12948 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12949 v.copyOf(v0)
12950 v0.AuxInt = int32ToAuxInt(off)
12951 v0.Aux = symToAux(sym)
12952 v0.AddArg2(ptr, mem)
12953 return true
12954 }
12955
12956
12957
12958 for {
12959 x := v_0
12960 if x.Op != OpAMD64MOVLload {
12961 break
12962 }
12963 off := auxIntToInt32(x.AuxInt)
12964 sym := auxToSym(x.Aux)
12965 mem := x.Args[1]
12966 ptr := x.Args[0]
12967 if !(x.Uses == 1 && clobber(x)) {
12968 break
12969 }
12970 b = x.Block
12971 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12972 v.copyOf(v0)
12973 v0.AuxInt = int32ToAuxInt(off)
12974 v0.Aux = symToAux(sym)
12975 v0.AddArg2(ptr, mem)
12976 return true
12977 }
12978
12979
12980
12981 for {
12982 x := v_0
12983 if x.Op != OpAMD64MOVQload {
12984 break
12985 }
12986 off := auxIntToInt32(x.AuxInt)
12987 sym := auxToSym(x.Aux)
12988 mem := x.Args[1]
12989 ptr := x.Args[0]
12990 if !(x.Uses == 1 && clobber(x)) {
12991 break
12992 }
12993 b = x.Block
12994 v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
12995 v.copyOf(v0)
12996 v0.AuxInt = int32ToAuxInt(off)
12997 v0.Aux = symToAux(sym)
12998 v0.AddArg2(ptr, mem)
12999 return true
13000 }
13001
13002
13003
13004 for {
13005 if v_0.Op != OpAMD64ANDLconst {
13006 break
13007 }
13008 c := auxIntToInt32(v_0.AuxInt)
13009 x := v_0.Args[0]
13010 if !(c&0x8000 == 0) {
13011 break
13012 }
13013 v.reset(OpAMD64ANDLconst)
13014 v.AuxInt = int32ToAuxInt(c & 0x7fff)
13015 v.AddArg(x)
13016 return true
13017 }
13018
13019
13020 for {
13021 if v_0.Op != OpAMD64MOVWQSX {
13022 break
13023 }
13024 x := v_0.Args[0]
13025 v.reset(OpAMD64MOVWQSX)
13026 v.AddArg(x)
13027 return true
13028 }
13029
13030
13031 for {
13032 if v_0.Op != OpAMD64MOVBQSX {
13033 break
13034 }
13035 x := v_0.Args[0]
13036 v.reset(OpAMD64MOVBQSX)
13037 v.AddArg(x)
13038 return true
13039 }
13040 return false
13041 }
13042 func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
13043 v_1 := v.Args[1]
13044 v_0 := v.Args[0]
13045 b := v.Block
13046 config := b.Func.Config
13047
13048
13049
13050 for {
13051 off := auxIntToInt32(v.AuxInt)
13052 sym := auxToSym(v.Aux)
13053 ptr := v_0
13054 if v_1.Op != OpAMD64MOVWstore {
13055 break
13056 }
13057 off2 := auxIntToInt32(v_1.AuxInt)
13058 sym2 := auxToSym(v_1.Aux)
13059 x := v_1.Args[1]
13060 ptr2 := v_1.Args[0]
13061 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13062 break
13063 }
13064 v.reset(OpAMD64MOVWQSX)
13065 v.AddArg(x)
13066 return true
13067 }
13068
13069
13070
13071 for {
13072 off1 := auxIntToInt32(v.AuxInt)
13073 sym1 := auxToSym(v.Aux)
13074 if v_0.Op != OpAMD64LEAQ {
13075 break
13076 }
13077 off2 := auxIntToInt32(v_0.AuxInt)
13078 sym2 := auxToSym(v_0.Aux)
13079 base := v_0.Args[0]
13080 mem := v_1
13081 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13082 break
13083 }
13084 v.reset(OpAMD64MOVWQSXload)
13085 v.AuxInt = int32ToAuxInt(off1 + off2)
13086 v.Aux = symToAux(mergeSym(sym1, sym2))
13087 v.AddArg2(base, mem)
13088 return true
13089 }
13090
13091
13092
13093 for {
13094 off := auxIntToInt32(v.AuxInt)
13095 sym := auxToSym(v.Aux)
13096 if v_0.Op != OpSB || !(symIsRO(sym)) {
13097 break
13098 }
13099 v.reset(OpAMD64MOVQconst)
13100 v.AuxInt = int64ToAuxInt(int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))))
13101 return true
13102 }
13103 return false
13104 }
13105 func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
13106 v_0 := v.Args[0]
13107 b := v.Block
13108
13109
13110
13111 for {
13112 x := v_0
13113 if x.Op != OpAMD64MOVWload {
13114 break
13115 }
13116 off := auxIntToInt32(x.AuxInt)
13117 sym := auxToSym(x.Aux)
13118 mem := x.Args[1]
13119 ptr := x.Args[0]
13120 if !(x.Uses == 1 && clobber(x)) {
13121 break
13122 }
13123 b = x.Block
13124 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13125 v.copyOf(v0)
13126 v0.AuxInt = int32ToAuxInt(off)
13127 v0.Aux = symToAux(sym)
13128 v0.AddArg2(ptr, mem)
13129 return true
13130 }
13131
13132
13133
13134 for {
13135 x := v_0
13136 if x.Op != OpAMD64MOVLload {
13137 break
13138 }
13139 off := auxIntToInt32(x.AuxInt)
13140 sym := auxToSym(x.Aux)
13141 mem := x.Args[1]
13142 ptr := x.Args[0]
13143 if !(x.Uses == 1 && clobber(x)) {
13144 break
13145 }
13146 b = x.Block
13147 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13148 v.copyOf(v0)
13149 v0.AuxInt = int32ToAuxInt(off)
13150 v0.Aux = symToAux(sym)
13151 v0.AddArg2(ptr, mem)
13152 return true
13153 }
13154
13155
13156
13157 for {
13158 x := v_0
13159 if x.Op != OpAMD64MOVQload {
13160 break
13161 }
13162 off := auxIntToInt32(x.AuxInt)
13163 sym := auxToSym(x.Aux)
13164 mem := x.Args[1]
13165 ptr := x.Args[0]
13166 if !(x.Uses == 1 && clobber(x)) {
13167 break
13168 }
13169 b = x.Block
13170 v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
13171 v.copyOf(v0)
13172 v0.AuxInt = int32ToAuxInt(off)
13173 v0.Aux = symToAux(sym)
13174 v0.AddArg2(ptr, mem)
13175 return true
13176 }
13177
13178
13179 for {
13180 if v_0.Op != OpAMD64ANDLconst {
13181 break
13182 }
13183 c := auxIntToInt32(v_0.AuxInt)
13184 x := v_0.Args[0]
13185 v.reset(OpAMD64ANDLconst)
13186 v.AuxInt = int32ToAuxInt(c & 0xffff)
13187 v.AddArg(x)
13188 return true
13189 }
13190
13191
13192 for {
13193 if v_0.Op != OpAMD64MOVWQZX {
13194 break
13195 }
13196 x := v_0.Args[0]
13197 v.reset(OpAMD64MOVWQZX)
13198 v.AddArg(x)
13199 return true
13200 }
13201
13202
13203 for {
13204 if v_0.Op != OpAMD64MOVBQZX {
13205 break
13206 }
13207 x := v_0.Args[0]
13208 v.reset(OpAMD64MOVBQZX)
13209 v.AddArg(x)
13210 return true
13211 }
13212 return false
13213 }
13214 func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
13215 v_1 := v.Args[1]
13216 v_0 := v.Args[0]
13217 b := v.Block
13218 config := b.Func.Config
13219
13220
13221
13222 for {
13223 off := auxIntToInt32(v.AuxInt)
13224 sym := auxToSym(v.Aux)
13225 ptr := v_0
13226 if v_1.Op != OpAMD64MOVWstore {
13227 break
13228 }
13229 off2 := auxIntToInt32(v_1.AuxInt)
13230 sym2 := auxToSym(v_1.Aux)
13231 x := v_1.Args[1]
13232 ptr2 := v_1.Args[0]
13233 if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
13234 break
13235 }
13236 v.reset(OpAMD64MOVWQZX)
13237 v.AddArg(x)
13238 return true
13239 }
13240
13241
13242
13243 for {
13244 off1 := auxIntToInt32(v.AuxInt)
13245 sym := auxToSym(v.Aux)
13246 if v_0.Op != OpAMD64ADDQconst {
13247 break
13248 }
13249 off2 := auxIntToInt32(v_0.AuxInt)
13250 ptr := v_0.Args[0]
13251 mem := v_1
13252 if !(is32Bit(int64(off1) + int64(off2))) {
13253 break
13254 }
13255 v.reset(OpAMD64MOVWload)
13256 v.AuxInt = int32ToAuxInt(off1 + off2)
13257 v.Aux = symToAux(sym)
13258 v.AddArg2(ptr, mem)
13259 return true
13260 }
13261
13262
13263
13264 for {
13265 off1 := auxIntToInt32(v.AuxInt)
13266 sym1 := auxToSym(v.Aux)
13267 if v_0.Op != OpAMD64LEAQ {
13268 break
13269 }
13270 off2 := auxIntToInt32(v_0.AuxInt)
13271 sym2 := auxToSym(v_0.Aux)
13272 base := v_0.Args[0]
13273 mem := v_1
13274 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13275 break
13276 }
13277 v.reset(OpAMD64MOVWload)
13278 v.AuxInt = int32ToAuxInt(off1 + off2)
13279 v.Aux = symToAux(mergeSym(sym1, sym2))
13280 v.AddArg2(base, mem)
13281 return true
13282 }
13283
13284
13285
13286 for {
13287 off := auxIntToInt32(v.AuxInt)
13288 sym := auxToSym(v.Aux)
13289 if v_0.Op != OpSB || !(symIsRO(sym)) {
13290 break
13291 }
13292 v.reset(OpAMD64MOVLconst)
13293 v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
13294 return true
13295 }
13296 return false
13297 }
13298 func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
13299 v_2 := v.Args[2]
13300 v_1 := v.Args[1]
13301 v_0 := v.Args[0]
13302
13303
13304 for {
13305 off := auxIntToInt32(v.AuxInt)
13306 sym := auxToSym(v.Aux)
13307 ptr := v_0
13308 if v_1.Op != OpAMD64MOVWQSX {
13309 break
13310 }
13311 x := v_1.Args[0]
13312 mem := v_2
13313 v.reset(OpAMD64MOVWstore)
13314 v.AuxInt = int32ToAuxInt(off)
13315 v.Aux = symToAux(sym)
13316 v.AddArg3(ptr, x, mem)
13317 return true
13318 }
13319
13320
13321 for {
13322 off := auxIntToInt32(v.AuxInt)
13323 sym := auxToSym(v.Aux)
13324 ptr := v_0
13325 if v_1.Op != OpAMD64MOVWQZX {
13326 break
13327 }
13328 x := v_1.Args[0]
13329 mem := v_2
13330 v.reset(OpAMD64MOVWstore)
13331 v.AuxInt = int32ToAuxInt(off)
13332 v.Aux = symToAux(sym)
13333 v.AddArg3(ptr, x, mem)
13334 return true
13335 }
13336
13337
13338
13339 for {
13340 off1 := auxIntToInt32(v.AuxInt)
13341 sym := auxToSym(v.Aux)
13342 if v_0.Op != OpAMD64ADDQconst {
13343 break
13344 }
13345 off2 := auxIntToInt32(v_0.AuxInt)
13346 ptr := v_0.Args[0]
13347 val := v_1
13348 mem := v_2
13349 if !(is32Bit(int64(off1) + int64(off2))) {
13350 break
13351 }
13352 v.reset(OpAMD64MOVWstore)
13353 v.AuxInt = int32ToAuxInt(off1 + off2)
13354 v.Aux = symToAux(sym)
13355 v.AddArg3(ptr, val, mem)
13356 return true
13357 }
13358
13359
13360 for {
13361 off := auxIntToInt32(v.AuxInt)
13362 sym := auxToSym(v.Aux)
13363 ptr := v_0
13364 if v_1.Op != OpAMD64MOVLconst {
13365 break
13366 }
13367 c := auxIntToInt32(v_1.AuxInt)
13368 mem := v_2
13369 v.reset(OpAMD64MOVWstoreconst)
13370 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
13371 v.Aux = symToAux(sym)
13372 v.AddArg2(ptr, mem)
13373 return true
13374 }
13375
13376
13377 for {
13378 off := auxIntToInt32(v.AuxInt)
13379 sym := auxToSym(v.Aux)
13380 ptr := v_0
13381 if v_1.Op != OpAMD64MOVQconst {
13382 break
13383 }
13384 c := auxIntToInt64(v_1.AuxInt)
13385 mem := v_2
13386 v.reset(OpAMD64MOVWstoreconst)
13387 v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
13388 v.Aux = symToAux(sym)
13389 v.AddArg2(ptr, mem)
13390 return true
13391 }
13392
13393
13394
13395 for {
13396 off1 := auxIntToInt32(v.AuxInt)
13397 sym1 := auxToSym(v.Aux)
13398 if v_0.Op != OpAMD64LEAQ {
13399 break
13400 }
13401 off2 := auxIntToInt32(v_0.AuxInt)
13402 sym2 := auxToSym(v_0.Aux)
13403 base := v_0.Args[0]
13404 val := v_1
13405 mem := v_2
13406 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13407 break
13408 }
13409 v.reset(OpAMD64MOVWstore)
13410 v.AuxInt = int32ToAuxInt(off1 + off2)
13411 v.Aux = symToAux(mergeSym(sym1, sym2))
13412 v.AddArg3(base, val, mem)
13413 return true
13414 }
13415
13416
13417
13418 for {
13419 i := auxIntToInt32(v.AuxInt)
13420 s := auxToSym(v.Aux)
13421 p := v_0
13422 x := v_1
13423 if x.Op != OpAMD64ROLWconst || auxIntToInt8(x.AuxInt) != 8 {
13424 break
13425 }
13426 w := x.Args[0]
13427 mem := v_2
13428 if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
13429 break
13430 }
13431 v.reset(OpAMD64MOVBEWstore)
13432 v.AuxInt = int32ToAuxInt(i)
13433 v.Aux = symToAux(s)
13434 v.AddArg3(p, w, mem)
13435 return true
13436 }
13437 return false
13438 }
13439 func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
13440 v_1 := v.Args[1]
13441 v_0 := v.Args[0]
13442
13443
13444
13445 for {
13446 sc := auxIntToValAndOff(v.AuxInt)
13447 s := auxToSym(v.Aux)
13448 if v_0.Op != OpAMD64ADDQconst {
13449 break
13450 }
13451 off := auxIntToInt32(v_0.AuxInt)
13452 ptr := v_0.Args[0]
13453 mem := v_1
13454 if !(ValAndOff(sc).canAdd32(off)) {
13455 break
13456 }
13457 v.reset(OpAMD64MOVWstoreconst)
13458 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13459 v.Aux = symToAux(s)
13460 v.AddArg2(ptr, mem)
13461 return true
13462 }
13463
13464
13465
13466 for {
13467 sc := auxIntToValAndOff(v.AuxInt)
13468 sym1 := auxToSym(v.Aux)
13469 if v_0.Op != OpAMD64LEAQ {
13470 break
13471 }
13472 off := auxIntToInt32(v_0.AuxInt)
13473 sym2 := auxToSym(v_0.Aux)
13474 ptr := v_0.Args[0]
13475 mem := v_1
13476 if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
13477 break
13478 }
13479 v.reset(OpAMD64MOVWstoreconst)
13480 v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
13481 v.Aux = symToAux(mergeSym(sym1, sym2))
13482 v.AddArg2(ptr, mem)
13483 return true
13484 }
13485 return false
13486 }
13487 func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
13488 v_1 := v.Args[1]
13489 v_0 := v.Args[0]
13490
13491
13492 for {
13493 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13494 x := v_0
13495 if v_1.Op != OpAMD64MOVLconst {
13496 continue
13497 }
13498 c := auxIntToInt32(v_1.AuxInt)
13499 v.reset(OpAMD64MULLconst)
13500 v.AuxInt = int32ToAuxInt(c)
13501 v.AddArg(x)
13502 return true
13503 }
13504 break
13505 }
13506 return false
13507 }
13508 func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
13509 v_0 := v.Args[0]
13510 b := v.Block
13511 config := b.Func.Config
13512
13513
13514 for {
13515 c := auxIntToInt32(v.AuxInt)
13516 if v_0.Op != OpAMD64MULLconst {
13517 break
13518 }
13519 d := auxIntToInt32(v_0.AuxInt)
13520 x := v_0.Args[0]
13521 v.reset(OpAMD64MULLconst)
13522 v.AuxInt = int32ToAuxInt(c * d)
13523 v.AddArg(x)
13524 return true
13525 }
13526
13527
13528 for {
13529 if auxIntToInt32(v.AuxInt) != 0 {
13530 break
13531 }
13532 v.reset(OpAMD64MOVLconst)
13533 v.AuxInt = int32ToAuxInt(0)
13534 return true
13535 }
13536
13537
13538 for {
13539 if auxIntToInt32(v.AuxInt) != 1 {
13540 break
13541 }
13542 x := v_0
13543 v.copyOf(x)
13544 return true
13545 }
13546
13547
13548
13549 for {
13550 c := auxIntToInt32(v.AuxInt)
13551 x := v_0
13552 if !(v.Type.Size() <= 4 && canMulStrengthReduce32(config, c)) {
13553 break
13554 }
13555 v.copyOf(mulStrengthReduce32(v, x, c))
13556 return true
13557 }
13558
13559
13560 for {
13561 c := auxIntToInt32(v.AuxInt)
13562 if v_0.Op != OpAMD64MOVLconst {
13563 break
13564 }
13565 d := auxIntToInt32(v_0.AuxInt)
13566 v.reset(OpAMD64MOVLconst)
13567 v.AuxInt = int32ToAuxInt(c * d)
13568 return true
13569 }
13570 return false
13571 }
13572 func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
13573 v_1 := v.Args[1]
13574 v_0 := v.Args[0]
13575
13576
13577
13578 for {
13579 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13580 x := v_0
13581 if v_1.Op != OpAMD64MOVQconst {
13582 continue
13583 }
13584 c := auxIntToInt64(v_1.AuxInt)
13585 if !(is32Bit(c)) {
13586 continue
13587 }
13588 v.reset(OpAMD64MULQconst)
13589 v.AuxInt = int32ToAuxInt(int32(c))
13590 v.AddArg(x)
13591 return true
13592 }
13593 break
13594 }
13595 return false
13596 }
13597 func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
13598 v_0 := v.Args[0]
13599 b := v.Block
13600 config := b.Func.Config
13601
13602
13603
13604 for {
13605 c := auxIntToInt32(v.AuxInt)
13606 if v_0.Op != OpAMD64MULQconst {
13607 break
13608 }
13609 d := auxIntToInt32(v_0.AuxInt)
13610 x := v_0.Args[0]
13611 if !(is32Bit(int64(c) * int64(d))) {
13612 break
13613 }
13614 v.reset(OpAMD64MULQconst)
13615 v.AuxInt = int32ToAuxInt(c * d)
13616 v.AddArg(x)
13617 return true
13618 }
13619
13620
13621 for {
13622 if auxIntToInt32(v.AuxInt) != 0 {
13623 break
13624 }
13625 v.reset(OpAMD64MOVQconst)
13626 v.AuxInt = int64ToAuxInt(0)
13627 return true
13628 }
13629
13630
13631 for {
13632 if auxIntToInt32(v.AuxInt) != 1 {
13633 break
13634 }
13635 x := v_0
13636 v.copyOf(x)
13637 return true
13638 }
13639
13640
13641
13642 for {
13643 c := auxIntToInt32(v.AuxInt)
13644 x := v_0
13645 if !(canMulStrengthReduce(config, int64(c))) {
13646 break
13647 }
13648 v.copyOf(mulStrengthReduce(v, x, int64(c)))
13649 return true
13650 }
13651
13652
13653 for {
13654 c := auxIntToInt32(v.AuxInt)
13655 if v_0.Op != OpAMD64MOVQconst {
13656 break
13657 }
13658 d := auxIntToInt64(v_0.AuxInt)
13659 v.reset(OpAMD64MOVQconst)
13660 v.AuxInt = int64ToAuxInt(int64(c) * d)
13661 return true
13662 }
13663
13664
13665
13666 for {
13667 c := auxIntToInt32(v.AuxInt)
13668 if v_0.Op != OpAMD64NEGQ {
13669 break
13670 }
13671 x := v_0.Args[0]
13672 if !(c != -(1 << 31)) {
13673 break
13674 }
13675 v.reset(OpAMD64MULQconst)
13676 v.AuxInt = int32ToAuxInt(-c)
13677 v.AddArg(x)
13678 return true
13679 }
13680 return false
13681 }
13682 func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
13683 v_1 := v.Args[1]
13684 v_0 := v.Args[0]
13685
13686
13687
13688 for {
13689 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13690 x := v_0
13691 l := v_1
13692 if l.Op != OpAMD64MOVSDload {
13693 continue
13694 }
13695 off := auxIntToInt32(l.AuxInt)
13696 sym := auxToSym(l.Aux)
13697 mem := l.Args[1]
13698 ptr := l.Args[0]
13699 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
13700 continue
13701 }
13702 v.reset(OpAMD64MULSDload)
13703 v.AuxInt = int32ToAuxInt(off)
13704 v.Aux = symToAux(sym)
13705 v.AddArg3(x, ptr, mem)
13706 return true
13707 }
13708 break
13709 }
13710 return false
13711 }
13712 func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
13713 v_2 := v.Args[2]
13714 v_1 := v.Args[1]
13715 v_0 := v.Args[0]
13716 b := v.Block
13717 typ := &b.Func.Config.Types
13718
13719
13720
13721 for {
13722 off1 := auxIntToInt32(v.AuxInt)
13723 sym := auxToSym(v.Aux)
13724 val := v_0
13725 if v_1.Op != OpAMD64ADDQconst {
13726 break
13727 }
13728 off2 := auxIntToInt32(v_1.AuxInt)
13729 base := v_1.Args[0]
13730 mem := v_2
13731 if !(is32Bit(int64(off1) + int64(off2))) {
13732 break
13733 }
13734 v.reset(OpAMD64MULSDload)
13735 v.AuxInt = int32ToAuxInt(off1 + off2)
13736 v.Aux = symToAux(sym)
13737 v.AddArg3(val, base, mem)
13738 return true
13739 }
13740
13741
13742
13743 for {
13744 off1 := auxIntToInt32(v.AuxInt)
13745 sym1 := auxToSym(v.Aux)
13746 val := v_0
13747 if v_1.Op != OpAMD64LEAQ {
13748 break
13749 }
13750 off2 := auxIntToInt32(v_1.AuxInt)
13751 sym2 := auxToSym(v_1.Aux)
13752 base := v_1.Args[0]
13753 mem := v_2
13754 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13755 break
13756 }
13757 v.reset(OpAMD64MULSDload)
13758 v.AuxInt = int32ToAuxInt(off1 + off2)
13759 v.Aux = symToAux(mergeSym(sym1, sym2))
13760 v.AddArg3(val, base, mem)
13761 return true
13762 }
13763
13764
13765 for {
13766 off := auxIntToInt32(v.AuxInt)
13767 sym := auxToSym(v.Aux)
13768 x := v_0
13769 ptr := v_1
13770 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
13771 break
13772 }
13773 y := v_2.Args[1]
13774 if ptr != v_2.Args[0] {
13775 break
13776 }
13777 v.reset(OpAMD64MULSD)
13778 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
13779 v0.AddArg(y)
13780 v.AddArg2(x, v0)
13781 return true
13782 }
13783 return false
13784 }
13785 func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
13786 v_1 := v.Args[1]
13787 v_0 := v.Args[0]
13788
13789
13790
13791 for {
13792 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
13793 x := v_0
13794 l := v_1
13795 if l.Op != OpAMD64MOVSSload {
13796 continue
13797 }
13798 off := auxIntToInt32(l.AuxInt)
13799 sym := auxToSym(l.Aux)
13800 mem := l.Args[1]
13801 ptr := l.Args[0]
13802 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
13803 continue
13804 }
13805 v.reset(OpAMD64MULSSload)
13806 v.AuxInt = int32ToAuxInt(off)
13807 v.Aux = symToAux(sym)
13808 v.AddArg3(x, ptr, mem)
13809 return true
13810 }
13811 break
13812 }
13813 return false
13814 }
13815 func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
13816 v_2 := v.Args[2]
13817 v_1 := v.Args[1]
13818 v_0 := v.Args[0]
13819 b := v.Block
13820 typ := &b.Func.Config.Types
13821
13822
13823
13824 for {
13825 off1 := auxIntToInt32(v.AuxInt)
13826 sym := auxToSym(v.Aux)
13827 val := v_0
13828 if v_1.Op != OpAMD64ADDQconst {
13829 break
13830 }
13831 off2 := auxIntToInt32(v_1.AuxInt)
13832 base := v_1.Args[0]
13833 mem := v_2
13834 if !(is32Bit(int64(off1) + int64(off2))) {
13835 break
13836 }
13837 v.reset(OpAMD64MULSSload)
13838 v.AuxInt = int32ToAuxInt(off1 + off2)
13839 v.Aux = symToAux(sym)
13840 v.AddArg3(val, base, mem)
13841 return true
13842 }
13843
13844
13845
13846 for {
13847 off1 := auxIntToInt32(v.AuxInt)
13848 sym1 := auxToSym(v.Aux)
13849 val := v_0
13850 if v_1.Op != OpAMD64LEAQ {
13851 break
13852 }
13853 off2 := auxIntToInt32(v_1.AuxInt)
13854 sym2 := auxToSym(v_1.Aux)
13855 base := v_1.Args[0]
13856 mem := v_2
13857 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
13858 break
13859 }
13860 v.reset(OpAMD64MULSSload)
13861 v.AuxInt = int32ToAuxInt(off1 + off2)
13862 v.Aux = symToAux(mergeSym(sym1, sym2))
13863 v.AddArg3(val, base, mem)
13864 return true
13865 }
13866
13867
13868 for {
13869 off := auxIntToInt32(v.AuxInt)
13870 sym := auxToSym(v.Aux)
13871 x := v_0
13872 ptr := v_1
13873 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
13874 break
13875 }
13876 y := v_2.Args[1]
13877 if ptr != v_2.Args[0] {
13878 break
13879 }
13880 v.reset(OpAMD64MULSS)
13881 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
13882 v0.AddArg(y)
13883 v.AddArg2(x, v0)
13884 return true
13885 }
13886 return false
13887 }
13888 func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
13889 v_0 := v.Args[0]
13890
13891
13892 for {
13893 if v_0.Op != OpAMD64NEGL {
13894 break
13895 }
13896 x := v_0.Args[0]
13897 v.copyOf(x)
13898 return true
13899 }
13900
13901
13902
13903 for {
13904 s := v_0
13905 if s.Op != OpAMD64SUBL {
13906 break
13907 }
13908 y := s.Args[1]
13909 x := s.Args[0]
13910 if !(s.Uses == 1) {
13911 break
13912 }
13913 v.reset(OpAMD64SUBL)
13914 v.AddArg2(y, x)
13915 return true
13916 }
13917
13918
13919 for {
13920 if v_0.Op != OpAMD64MOVLconst {
13921 break
13922 }
13923 c := auxIntToInt32(v_0.AuxInt)
13924 v.reset(OpAMD64MOVLconst)
13925 v.AuxInt = int32ToAuxInt(-c)
13926 return true
13927 }
13928 return false
13929 }
13930 func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
13931 v_0 := v.Args[0]
13932
13933
13934 for {
13935 if v_0.Op != OpAMD64NEGQ {
13936 break
13937 }
13938 x := v_0.Args[0]
13939 v.copyOf(x)
13940 return true
13941 }
13942
13943
13944
13945 for {
13946 s := v_0
13947 if s.Op != OpAMD64SUBQ {
13948 break
13949 }
13950 y := s.Args[1]
13951 x := s.Args[0]
13952 if !(s.Uses == 1) {
13953 break
13954 }
13955 v.reset(OpAMD64SUBQ)
13956 v.AddArg2(y, x)
13957 return true
13958 }
13959
13960
13961 for {
13962 if v_0.Op != OpAMD64MOVQconst {
13963 break
13964 }
13965 c := auxIntToInt64(v_0.AuxInt)
13966 v.reset(OpAMD64MOVQconst)
13967 v.AuxInt = int64ToAuxInt(-c)
13968 return true
13969 }
13970
13971
13972
13973 for {
13974 if v_0.Op != OpAMD64ADDQconst {
13975 break
13976 }
13977 c := auxIntToInt32(v_0.AuxInt)
13978 v_0_0 := v_0.Args[0]
13979 if v_0_0.Op != OpAMD64NEGQ {
13980 break
13981 }
13982 x := v_0_0.Args[0]
13983 if !(c != -(1 << 31)) {
13984 break
13985 }
13986 v.reset(OpAMD64ADDQconst)
13987 v.AuxInt = int32ToAuxInt(-c)
13988 v.AddArg(x)
13989 return true
13990 }
13991 return false
13992 }
13993 func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
13994 v_0 := v.Args[0]
13995
13996
13997 for {
13998 if v_0.Op != OpAMD64MOVLconst {
13999 break
14000 }
14001 c := auxIntToInt32(v_0.AuxInt)
14002 v.reset(OpAMD64MOVLconst)
14003 v.AuxInt = int32ToAuxInt(^c)
14004 return true
14005 }
14006 return false
14007 }
14008 func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
14009 v_0 := v.Args[0]
14010
14011
14012 for {
14013 if v_0.Op != OpAMD64MOVQconst {
14014 break
14015 }
14016 c := auxIntToInt64(v_0.AuxInt)
14017 v.reset(OpAMD64MOVQconst)
14018 v.AuxInt = int64ToAuxInt(^c)
14019 return true
14020 }
14021 return false
14022 }
14023 func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
14024 v_1 := v.Args[1]
14025 v_0 := v.Args[0]
14026
14027
14028 for {
14029 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14030 if v_0.Op != OpAMD64SHLL {
14031 continue
14032 }
14033 y := v_0.Args[1]
14034 v_0_0 := v_0.Args[0]
14035 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
14036 continue
14037 }
14038 x := v_1
14039 v.reset(OpAMD64BTSL)
14040 v.AddArg2(x, y)
14041 return true
14042 }
14043 break
14044 }
14045
14046
14047 for {
14048 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14049 x := v_0
14050 if v_1.Op != OpAMD64MOVLconst {
14051 continue
14052 }
14053 c := auxIntToInt32(v_1.AuxInt)
14054 v.reset(OpAMD64ORLconst)
14055 v.AuxInt = int32ToAuxInt(c)
14056 v.AddArg(x)
14057 return true
14058 }
14059 break
14060 }
14061
14062
14063 for {
14064 x := v_0
14065 if x != v_1 {
14066 break
14067 }
14068 v.copyOf(x)
14069 return true
14070 }
14071
14072
14073
14074 for {
14075 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14076 x := v_0
14077 l := v_1
14078 if l.Op != OpAMD64MOVLload {
14079 continue
14080 }
14081 off := auxIntToInt32(l.AuxInt)
14082 sym := auxToSym(l.Aux)
14083 mem := l.Args[1]
14084 ptr := l.Args[0]
14085 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14086 continue
14087 }
14088 v.reset(OpAMD64ORLload)
14089 v.AuxInt = int32ToAuxInt(off)
14090 v.Aux = symToAux(sym)
14091 v.AddArg3(x, ptr, mem)
14092 return true
14093 }
14094 break
14095 }
14096 return false
14097 }
14098 func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
14099 v_0 := v.Args[0]
14100
14101
14102 for {
14103 c := auxIntToInt32(v.AuxInt)
14104 if v_0.Op != OpAMD64ORLconst {
14105 break
14106 }
14107 d := auxIntToInt32(v_0.AuxInt)
14108 x := v_0.Args[0]
14109 v.reset(OpAMD64ORLconst)
14110 v.AuxInt = int32ToAuxInt(c | d)
14111 v.AddArg(x)
14112 return true
14113 }
14114
14115
14116
14117 for {
14118 c := auxIntToInt32(v.AuxInt)
14119 x := v_0
14120 if !(c == 0) {
14121 break
14122 }
14123 v.copyOf(x)
14124 return true
14125 }
14126
14127
14128
14129 for {
14130 c := auxIntToInt32(v.AuxInt)
14131 if !(c == -1) {
14132 break
14133 }
14134 v.reset(OpAMD64MOVLconst)
14135 v.AuxInt = int32ToAuxInt(-1)
14136 return true
14137 }
14138
14139
14140 for {
14141 c := auxIntToInt32(v.AuxInt)
14142 if v_0.Op != OpAMD64MOVLconst {
14143 break
14144 }
14145 d := auxIntToInt32(v_0.AuxInt)
14146 v.reset(OpAMD64MOVLconst)
14147 v.AuxInt = int32ToAuxInt(c | d)
14148 return true
14149 }
14150 return false
14151 }
14152 func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
14153 v_1 := v.Args[1]
14154 v_0 := v.Args[0]
14155
14156
14157
14158 for {
14159 valoff1 := auxIntToValAndOff(v.AuxInt)
14160 sym := auxToSym(v.Aux)
14161 if v_0.Op != OpAMD64ADDQconst {
14162 break
14163 }
14164 off2 := auxIntToInt32(v_0.AuxInt)
14165 base := v_0.Args[0]
14166 mem := v_1
14167 if !(ValAndOff(valoff1).canAdd32(off2)) {
14168 break
14169 }
14170 v.reset(OpAMD64ORLconstmodify)
14171 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14172 v.Aux = symToAux(sym)
14173 v.AddArg2(base, mem)
14174 return true
14175 }
14176
14177
14178
14179 for {
14180 valoff1 := auxIntToValAndOff(v.AuxInt)
14181 sym1 := auxToSym(v.Aux)
14182 if v_0.Op != OpAMD64LEAQ {
14183 break
14184 }
14185 off2 := auxIntToInt32(v_0.AuxInt)
14186 sym2 := auxToSym(v_0.Aux)
14187 base := v_0.Args[0]
14188 mem := v_1
14189 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14190 break
14191 }
14192 v.reset(OpAMD64ORLconstmodify)
14193 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14194 v.Aux = symToAux(mergeSym(sym1, sym2))
14195 v.AddArg2(base, mem)
14196 return true
14197 }
14198 return false
14199 }
14200 func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
14201 v_2 := v.Args[2]
14202 v_1 := v.Args[1]
14203 v_0 := v.Args[0]
14204 b := v.Block
14205 typ := &b.Func.Config.Types
14206
14207
14208
14209 for {
14210 off1 := auxIntToInt32(v.AuxInt)
14211 sym := auxToSym(v.Aux)
14212 val := v_0
14213 if v_1.Op != OpAMD64ADDQconst {
14214 break
14215 }
14216 off2 := auxIntToInt32(v_1.AuxInt)
14217 base := v_1.Args[0]
14218 mem := v_2
14219 if !(is32Bit(int64(off1) + int64(off2))) {
14220 break
14221 }
14222 v.reset(OpAMD64ORLload)
14223 v.AuxInt = int32ToAuxInt(off1 + off2)
14224 v.Aux = symToAux(sym)
14225 v.AddArg3(val, base, mem)
14226 return true
14227 }
14228
14229
14230
14231 for {
14232 off1 := auxIntToInt32(v.AuxInt)
14233 sym1 := auxToSym(v.Aux)
14234 val := v_0
14235 if v_1.Op != OpAMD64LEAQ {
14236 break
14237 }
14238 off2 := auxIntToInt32(v_1.AuxInt)
14239 sym2 := auxToSym(v_1.Aux)
14240 base := v_1.Args[0]
14241 mem := v_2
14242 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14243 break
14244 }
14245 v.reset(OpAMD64ORLload)
14246 v.AuxInt = int32ToAuxInt(off1 + off2)
14247 v.Aux = symToAux(mergeSym(sym1, sym2))
14248 v.AddArg3(val, base, mem)
14249 return true
14250 }
14251
14252
14253 for {
14254 off := auxIntToInt32(v.AuxInt)
14255 sym := auxToSym(v.Aux)
14256 x := v_0
14257 ptr := v_1
14258 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14259 break
14260 }
14261 y := v_2.Args[1]
14262 if ptr != v_2.Args[0] {
14263 break
14264 }
14265 v.reset(OpAMD64ORL)
14266 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
14267 v0.AddArg(y)
14268 v.AddArg2(x, v0)
14269 return true
14270 }
14271 return false
14272 }
14273 func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
14274 v_2 := v.Args[2]
14275 v_1 := v.Args[1]
14276 v_0 := v.Args[0]
14277
14278
14279
14280 for {
14281 off1 := auxIntToInt32(v.AuxInt)
14282 sym := auxToSym(v.Aux)
14283 if v_0.Op != OpAMD64ADDQconst {
14284 break
14285 }
14286 off2 := auxIntToInt32(v_0.AuxInt)
14287 base := v_0.Args[0]
14288 val := v_1
14289 mem := v_2
14290 if !(is32Bit(int64(off1) + int64(off2))) {
14291 break
14292 }
14293 v.reset(OpAMD64ORLmodify)
14294 v.AuxInt = int32ToAuxInt(off1 + off2)
14295 v.Aux = symToAux(sym)
14296 v.AddArg3(base, val, mem)
14297 return true
14298 }
14299
14300
14301
14302 for {
14303 off1 := auxIntToInt32(v.AuxInt)
14304 sym1 := auxToSym(v.Aux)
14305 if v_0.Op != OpAMD64LEAQ {
14306 break
14307 }
14308 off2 := auxIntToInt32(v_0.AuxInt)
14309 sym2 := auxToSym(v_0.Aux)
14310 base := v_0.Args[0]
14311 val := v_1
14312 mem := v_2
14313 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14314 break
14315 }
14316 v.reset(OpAMD64ORLmodify)
14317 v.AuxInt = int32ToAuxInt(off1 + off2)
14318 v.Aux = symToAux(mergeSym(sym1, sym2))
14319 v.AddArg3(base, val, mem)
14320 return true
14321 }
14322 return false
14323 }
14324 func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
14325 v_1 := v.Args[1]
14326 v_0 := v.Args[0]
14327
14328
14329 for {
14330 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14331 if v_0.Op != OpAMD64SHLQ {
14332 continue
14333 }
14334 y := v_0.Args[1]
14335 v_0_0 := v_0.Args[0]
14336 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
14337 continue
14338 }
14339 x := v_1
14340 v.reset(OpAMD64BTSQ)
14341 v.AddArg2(x, y)
14342 return true
14343 }
14344 break
14345 }
14346
14347
14348
14349 for {
14350 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14351 if v_0.Op != OpAMD64MOVQconst {
14352 continue
14353 }
14354 c := auxIntToInt64(v_0.AuxInt)
14355 x := v_1
14356 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
14357 continue
14358 }
14359 v.reset(OpAMD64BTSQconst)
14360 v.AuxInt = int8ToAuxInt(int8(log64(c)))
14361 v.AddArg(x)
14362 return true
14363 }
14364 break
14365 }
14366
14367
14368
14369 for {
14370 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14371 x := v_0
14372 if v_1.Op != OpAMD64MOVQconst {
14373 continue
14374 }
14375 c := auxIntToInt64(v_1.AuxInt)
14376 if !(is32Bit(c)) {
14377 continue
14378 }
14379 v.reset(OpAMD64ORQconst)
14380 v.AuxInt = int32ToAuxInt(int32(c))
14381 v.AddArg(x)
14382 return true
14383 }
14384 break
14385 }
14386
14387
14388 for {
14389 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14390 x := v_0
14391 if v_1.Op != OpAMD64MOVLconst {
14392 continue
14393 }
14394 c := auxIntToInt32(v_1.AuxInt)
14395 v.reset(OpAMD64ORQconst)
14396 v.AuxInt = int32ToAuxInt(c)
14397 v.AddArg(x)
14398 return true
14399 }
14400 break
14401 }
14402
14403
14404 for {
14405 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14406 if v_0.Op != OpAMD64SHRQ {
14407 continue
14408 }
14409 bits := v_0.Args[1]
14410 lo := v_0.Args[0]
14411 if v_1.Op != OpAMD64SHLQ {
14412 continue
14413 }
14414 _ = v_1.Args[1]
14415 hi := v_1.Args[0]
14416 v_1_1 := v_1.Args[1]
14417 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14418 continue
14419 }
14420 v.reset(OpAMD64SHRDQ)
14421 v.AddArg3(lo, hi, bits)
14422 return true
14423 }
14424 break
14425 }
14426
14427
14428 for {
14429 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14430 if v_0.Op != OpAMD64SHLQ {
14431 continue
14432 }
14433 bits := v_0.Args[1]
14434 lo := v_0.Args[0]
14435 if v_1.Op != OpAMD64SHRQ {
14436 continue
14437 }
14438 _ = v_1.Args[1]
14439 hi := v_1.Args[0]
14440 v_1_1 := v_1.Args[1]
14441 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14442 continue
14443 }
14444 v.reset(OpAMD64SHLDQ)
14445 v.AddArg3(lo, hi, bits)
14446 return true
14447 }
14448 break
14449 }
14450
14451
14452 for {
14453 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14454 if v_0.Op != OpAMD64SHRXQ {
14455 continue
14456 }
14457 bits := v_0.Args[1]
14458 lo := v_0.Args[0]
14459 if v_1.Op != OpAMD64SHLXQ {
14460 continue
14461 }
14462 _ = v_1.Args[1]
14463 hi := v_1.Args[0]
14464 v_1_1 := v_1.Args[1]
14465 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14466 continue
14467 }
14468 v.reset(OpAMD64SHRDQ)
14469 v.AddArg3(lo, hi, bits)
14470 return true
14471 }
14472 break
14473 }
14474
14475
14476 for {
14477 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14478 if v_0.Op != OpAMD64SHLXQ {
14479 continue
14480 }
14481 bits := v_0.Args[1]
14482 lo := v_0.Args[0]
14483 if v_1.Op != OpAMD64SHRXQ {
14484 continue
14485 }
14486 _ = v_1.Args[1]
14487 hi := v_1.Args[0]
14488 v_1_1 := v_1.Args[1]
14489 if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
14490 continue
14491 }
14492 v.reset(OpAMD64SHLDQ)
14493 v.AddArg3(lo, hi, bits)
14494 return true
14495 }
14496 break
14497 }
14498
14499
14500 for {
14501 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14502 if v_0.Op != OpAMD64MOVQconst {
14503 continue
14504 }
14505 c := auxIntToInt64(v_0.AuxInt)
14506 if v_1.Op != OpAMD64MOVQconst {
14507 continue
14508 }
14509 d := auxIntToInt64(v_1.AuxInt)
14510 v.reset(OpAMD64MOVQconst)
14511 v.AuxInt = int64ToAuxInt(c | d)
14512 return true
14513 }
14514 break
14515 }
14516
14517
14518 for {
14519 x := v_0
14520 if x != v_1 {
14521 break
14522 }
14523 v.copyOf(x)
14524 return true
14525 }
14526
14527
14528
14529 for {
14530 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
14531 x := v_0
14532 l := v_1
14533 if l.Op != OpAMD64MOVQload {
14534 continue
14535 }
14536 off := auxIntToInt32(l.AuxInt)
14537 sym := auxToSym(l.Aux)
14538 mem := l.Args[1]
14539 ptr := l.Args[0]
14540 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
14541 continue
14542 }
14543 v.reset(OpAMD64ORQload)
14544 v.AuxInt = int32ToAuxInt(off)
14545 v.Aux = symToAux(sym)
14546 v.AddArg3(x, ptr, mem)
14547 return true
14548 }
14549 break
14550 }
14551 return false
14552 }
14553 func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
14554 v_0 := v.Args[0]
14555
14556
14557 for {
14558 c := auxIntToInt32(v.AuxInt)
14559 if v_0.Op != OpAMD64ORQconst {
14560 break
14561 }
14562 d := auxIntToInt32(v_0.AuxInt)
14563 x := v_0.Args[0]
14564 v.reset(OpAMD64ORQconst)
14565 v.AuxInt = int32ToAuxInt(c | d)
14566 v.AddArg(x)
14567 return true
14568 }
14569
14570
14571 for {
14572 if auxIntToInt32(v.AuxInt) != 0 {
14573 break
14574 }
14575 x := v_0
14576 v.copyOf(x)
14577 return true
14578 }
14579
14580
14581 for {
14582 if auxIntToInt32(v.AuxInt) != -1 {
14583 break
14584 }
14585 v.reset(OpAMD64MOVQconst)
14586 v.AuxInt = int64ToAuxInt(-1)
14587 return true
14588 }
14589
14590
14591 for {
14592 c := auxIntToInt32(v.AuxInt)
14593 if v_0.Op != OpAMD64MOVQconst {
14594 break
14595 }
14596 d := auxIntToInt64(v_0.AuxInt)
14597 v.reset(OpAMD64MOVQconst)
14598 v.AuxInt = int64ToAuxInt(int64(c) | d)
14599 return true
14600 }
14601 return false
14602 }
14603 func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
14604 v_1 := v.Args[1]
14605 v_0 := v.Args[0]
14606
14607
14608
14609 for {
14610 valoff1 := auxIntToValAndOff(v.AuxInt)
14611 sym := auxToSym(v.Aux)
14612 if v_0.Op != OpAMD64ADDQconst {
14613 break
14614 }
14615 off2 := auxIntToInt32(v_0.AuxInt)
14616 base := v_0.Args[0]
14617 mem := v_1
14618 if !(ValAndOff(valoff1).canAdd32(off2)) {
14619 break
14620 }
14621 v.reset(OpAMD64ORQconstmodify)
14622 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14623 v.Aux = symToAux(sym)
14624 v.AddArg2(base, mem)
14625 return true
14626 }
14627
14628
14629
14630 for {
14631 valoff1 := auxIntToValAndOff(v.AuxInt)
14632 sym1 := auxToSym(v.Aux)
14633 if v_0.Op != OpAMD64LEAQ {
14634 break
14635 }
14636 off2 := auxIntToInt32(v_0.AuxInt)
14637 sym2 := auxToSym(v_0.Aux)
14638 base := v_0.Args[0]
14639 mem := v_1
14640 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
14641 break
14642 }
14643 v.reset(OpAMD64ORQconstmodify)
14644 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
14645 v.Aux = symToAux(mergeSym(sym1, sym2))
14646 v.AddArg2(base, mem)
14647 return true
14648 }
14649 return false
14650 }
14651 func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
14652 v_2 := v.Args[2]
14653 v_1 := v.Args[1]
14654 v_0 := v.Args[0]
14655 b := v.Block
14656 typ := &b.Func.Config.Types
14657
14658
14659
14660 for {
14661 off1 := auxIntToInt32(v.AuxInt)
14662 sym := auxToSym(v.Aux)
14663 val := v_0
14664 if v_1.Op != OpAMD64ADDQconst {
14665 break
14666 }
14667 off2 := auxIntToInt32(v_1.AuxInt)
14668 base := v_1.Args[0]
14669 mem := v_2
14670 if !(is32Bit(int64(off1) + int64(off2))) {
14671 break
14672 }
14673 v.reset(OpAMD64ORQload)
14674 v.AuxInt = int32ToAuxInt(off1 + off2)
14675 v.Aux = symToAux(sym)
14676 v.AddArg3(val, base, mem)
14677 return true
14678 }
14679
14680
14681
14682 for {
14683 off1 := auxIntToInt32(v.AuxInt)
14684 sym1 := auxToSym(v.Aux)
14685 val := v_0
14686 if v_1.Op != OpAMD64LEAQ {
14687 break
14688 }
14689 off2 := auxIntToInt32(v_1.AuxInt)
14690 sym2 := auxToSym(v_1.Aux)
14691 base := v_1.Args[0]
14692 mem := v_2
14693 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14694 break
14695 }
14696 v.reset(OpAMD64ORQload)
14697 v.AuxInt = int32ToAuxInt(off1 + off2)
14698 v.Aux = symToAux(mergeSym(sym1, sym2))
14699 v.AddArg3(val, base, mem)
14700 return true
14701 }
14702
14703
14704 for {
14705 off := auxIntToInt32(v.AuxInt)
14706 sym := auxToSym(v.Aux)
14707 x := v_0
14708 ptr := v_1
14709 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
14710 break
14711 }
14712 y := v_2.Args[1]
14713 if ptr != v_2.Args[0] {
14714 break
14715 }
14716 v.reset(OpAMD64ORQ)
14717 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
14718 v0.AddArg(y)
14719 v.AddArg2(x, v0)
14720 return true
14721 }
14722 return false
14723 }
14724 func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
14725 v_2 := v.Args[2]
14726 v_1 := v.Args[1]
14727 v_0 := v.Args[0]
14728
14729
14730
14731 for {
14732 off1 := auxIntToInt32(v.AuxInt)
14733 sym := auxToSym(v.Aux)
14734 if v_0.Op != OpAMD64ADDQconst {
14735 break
14736 }
14737 off2 := auxIntToInt32(v_0.AuxInt)
14738 base := v_0.Args[0]
14739 val := v_1
14740 mem := v_2
14741 if !(is32Bit(int64(off1) + int64(off2))) {
14742 break
14743 }
14744 v.reset(OpAMD64ORQmodify)
14745 v.AuxInt = int32ToAuxInt(off1 + off2)
14746 v.Aux = symToAux(sym)
14747 v.AddArg3(base, val, mem)
14748 return true
14749 }
14750
14751
14752
14753 for {
14754 off1 := auxIntToInt32(v.AuxInt)
14755 sym1 := auxToSym(v.Aux)
14756 if v_0.Op != OpAMD64LEAQ {
14757 break
14758 }
14759 off2 := auxIntToInt32(v_0.AuxInt)
14760 sym2 := auxToSym(v_0.Aux)
14761 base := v_0.Args[0]
14762 val := v_1
14763 mem := v_2
14764 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
14765 break
14766 }
14767 v.reset(OpAMD64ORQmodify)
14768 v.AuxInt = int32ToAuxInt(off1 + off2)
14769 v.Aux = symToAux(mergeSym(sym1, sym2))
14770 v.AddArg3(base, val, mem)
14771 return true
14772 }
14773 return false
14774 }
14775 func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
14776 v_1 := v.Args[1]
14777 v_0 := v.Args[0]
14778
14779
14780 for {
14781 x := v_0
14782 if v_1.Op != OpAMD64NEGQ {
14783 break
14784 }
14785 y := v_1.Args[0]
14786 v.reset(OpAMD64RORB)
14787 v.AddArg2(x, y)
14788 return true
14789 }
14790
14791
14792 for {
14793 x := v_0
14794 if v_1.Op != OpAMD64NEGL {
14795 break
14796 }
14797 y := v_1.Args[0]
14798 v.reset(OpAMD64RORB)
14799 v.AddArg2(x, y)
14800 return true
14801 }
14802
14803
14804 for {
14805 x := v_0
14806 if v_1.Op != OpAMD64MOVQconst {
14807 break
14808 }
14809 c := auxIntToInt64(v_1.AuxInt)
14810 v.reset(OpAMD64ROLBconst)
14811 v.AuxInt = int8ToAuxInt(int8(c & 7))
14812 v.AddArg(x)
14813 return true
14814 }
14815
14816
14817 for {
14818 x := v_0
14819 if v_1.Op != OpAMD64MOVLconst {
14820 break
14821 }
14822 c := auxIntToInt32(v_1.AuxInt)
14823 v.reset(OpAMD64ROLBconst)
14824 v.AuxInt = int8ToAuxInt(int8(c & 7))
14825 v.AddArg(x)
14826 return true
14827 }
14828 return false
14829 }
14830 func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
14831 v_0 := v.Args[0]
14832
14833
14834 for {
14835 if auxIntToInt8(v.AuxInt) != 0 {
14836 break
14837 }
14838 x := v_0
14839 v.copyOf(x)
14840 return true
14841 }
14842 return false
14843 }
14844 func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
14845 v_1 := v.Args[1]
14846 v_0 := v.Args[0]
14847
14848
14849 for {
14850 x := v_0
14851 if v_1.Op != OpAMD64NEGQ {
14852 break
14853 }
14854 y := v_1.Args[0]
14855 v.reset(OpAMD64RORL)
14856 v.AddArg2(x, y)
14857 return true
14858 }
14859
14860
14861 for {
14862 x := v_0
14863 if v_1.Op != OpAMD64NEGL {
14864 break
14865 }
14866 y := v_1.Args[0]
14867 v.reset(OpAMD64RORL)
14868 v.AddArg2(x, y)
14869 return true
14870 }
14871
14872
14873 for {
14874 x := v_0
14875 if v_1.Op != OpAMD64MOVQconst {
14876 break
14877 }
14878 c := auxIntToInt64(v_1.AuxInt)
14879 v.reset(OpAMD64ROLLconst)
14880 v.AuxInt = int8ToAuxInt(int8(c & 31))
14881 v.AddArg(x)
14882 return true
14883 }
14884
14885
14886 for {
14887 x := v_0
14888 if v_1.Op != OpAMD64MOVLconst {
14889 break
14890 }
14891 c := auxIntToInt32(v_1.AuxInt)
14892 v.reset(OpAMD64ROLLconst)
14893 v.AuxInt = int8ToAuxInt(int8(c & 31))
14894 v.AddArg(x)
14895 return true
14896 }
14897 return false
14898 }
14899 func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
14900 v_0 := v.Args[0]
14901
14902
14903 for {
14904 if auxIntToInt8(v.AuxInt) != 0 {
14905 break
14906 }
14907 x := v_0
14908 v.copyOf(x)
14909 return true
14910 }
14911 return false
14912 }
14913 func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
14914 v_1 := v.Args[1]
14915 v_0 := v.Args[0]
14916
14917
14918 for {
14919 x := v_0
14920 if v_1.Op != OpAMD64NEGQ {
14921 break
14922 }
14923 y := v_1.Args[0]
14924 v.reset(OpAMD64RORQ)
14925 v.AddArg2(x, y)
14926 return true
14927 }
14928
14929
14930 for {
14931 x := v_0
14932 if v_1.Op != OpAMD64NEGL {
14933 break
14934 }
14935 y := v_1.Args[0]
14936 v.reset(OpAMD64RORQ)
14937 v.AddArg2(x, y)
14938 return true
14939 }
14940
14941
14942 for {
14943 x := v_0
14944 if v_1.Op != OpAMD64MOVQconst {
14945 break
14946 }
14947 c := auxIntToInt64(v_1.AuxInt)
14948 v.reset(OpAMD64ROLQconst)
14949 v.AuxInt = int8ToAuxInt(int8(c & 63))
14950 v.AddArg(x)
14951 return true
14952 }
14953
14954
14955 for {
14956 x := v_0
14957 if v_1.Op != OpAMD64MOVLconst {
14958 break
14959 }
14960 c := auxIntToInt32(v_1.AuxInt)
14961 v.reset(OpAMD64ROLQconst)
14962 v.AuxInt = int8ToAuxInt(int8(c & 63))
14963 v.AddArg(x)
14964 return true
14965 }
14966 return false
14967 }
14968 func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
14969 v_0 := v.Args[0]
14970
14971
14972 for {
14973 if auxIntToInt8(v.AuxInt) != 0 {
14974 break
14975 }
14976 x := v_0
14977 v.copyOf(x)
14978 return true
14979 }
14980 return false
14981 }
14982 func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
14983 v_1 := v.Args[1]
14984 v_0 := v.Args[0]
14985
14986
14987 for {
14988 x := v_0
14989 if v_1.Op != OpAMD64NEGQ {
14990 break
14991 }
14992 y := v_1.Args[0]
14993 v.reset(OpAMD64RORW)
14994 v.AddArg2(x, y)
14995 return true
14996 }
14997
14998
14999 for {
15000 x := v_0
15001 if v_1.Op != OpAMD64NEGL {
15002 break
15003 }
15004 y := v_1.Args[0]
15005 v.reset(OpAMD64RORW)
15006 v.AddArg2(x, y)
15007 return true
15008 }
15009
15010
15011 for {
15012 x := v_0
15013 if v_1.Op != OpAMD64MOVQconst {
15014 break
15015 }
15016 c := auxIntToInt64(v_1.AuxInt)
15017 v.reset(OpAMD64ROLWconst)
15018 v.AuxInt = int8ToAuxInt(int8(c & 15))
15019 v.AddArg(x)
15020 return true
15021 }
15022
15023
15024 for {
15025 x := v_0
15026 if v_1.Op != OpAMD64MOVLconst {
15027 break
15028 }
15029 c := auxIntToInt32(v_1.AuxInt)
15030 v.reset(OpAMD64ROLWconst)
15031 v.AuxInt = int8ToAuxInt(int8(c & 15))
15032 v.AddArg(x)
15033 return true
15034 }
15035 return false
15036 }
15037 func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
15038 v_0 := v.Args[0]
15039
15040
15041 for {
15042 if auxIntToInt8(v.AuxInt) != 0 {
15043 break
15044 }
15045 x := v_0
15046 v.copyOf(x)
15047 return true
15048 }
15049 return false
15050 }
15051 func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
15052 v_1 := v.Args[1]
15053 v_0 := v.Args[0]
15054
15055
15056 for {
15057 x := v_0
15058 if v_1.Op != OpAMD64NEGQ {
15059 break
15060 }
15061 y := v_1.Args[0]
15062 v.reset(OpAMD64ROLB)
15063 v.AddArg2(x, y)
15064 return true
15065 }
15066
15067
15068 for {
15069 x := v_0
15070 if v_1.Op != OpAMD64NEGL {
15071 break
15072 }
15073 y := v_1.Args[0]
15074 v.reset(OpAMD64ROLB)
15075 v.AddArg2(x, y)
15076 return true
15077 }
15078
15079
15080 for {
15081 x := v_0
15082 if v_1.Op != OpAMD64MOVQconst {
15083 break
15084 }
15085 c := auxIntToInt64(v_1.AuxInt)
15086 v.reset(OpAMD64ROLBconst)
15087 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15088 v.AddArg(x)
15089 return true
15090 }
15091
15092
15093 for {
15094 x := v_0
15095 if v_1.Op != OpAMD64MOVLconst {
15096 break
15097 }
15098 c := auxIntToInt32(v_1.AuxInt)
15099 v.reset(OpAMD64ROLBconst)
15100 v.AuxInt = int8ToAuxInt(int8((-c) & 7))
15101 v.AddArg(x)
15102 return true
15103 }
15104 return false
15105 }
15106 func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
15107 v_1 := v.Args[1]
15108 v_0 := v.Args[0]
15109
15110
15111 for {
15112 x := v_0
15113 if v_1.Op != OpAMD64NEGQ {
15114 break
15115 }
15116 y := v_1.Args[0]
15117 v.reset(OpAMD64ROLL)
15118 v.AddArg2(x, y)
15119 return true
15120 }
15121
15122
15123 for {
15124 x := v_0
15125 if v_1.Op != OpAMD64NEGL {
15126 break
15127 }
15128 y := v_1.Args[0]
15129 v.reset(OpAMD64ROLL)
15130 v.AddArg2(x, y)
15131 return true
15132 }
15133
15134
15135 for {
15136 x := v_0
15137 if v_1.Op != OpAMD64MOVQconst {
15138 break
15139 }
15140 c := auxIntToInt64(v_1.AuxInt)
15141 v.reset(OpAMD64ROLLconst)
15142 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15143 v.AddArg(x)
15144 return true
15145 }
15146
15147
15148 for {
15149 x := v_0
15150 if v_1.Op != OpAMD64MOVLconst {
15151 break
15152 }
15153 c := auxIntToInt32(v_1.AuxInt)
15154 v.reset(OpAMD64ROLLconst)
15155 v.AuxInt = int8ToAuxInt(int8((-c) & 31))
15156 v.AddArg(x)
15157 return true
15158 }
15159 return false
15160 }
15161 func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
15162 v_1 := v.Args[1]
15163 v_0 := v.Args[0]
15164
15165
15166 for {
15167 x := v_0
15168 if v_1.Op != OpAMD64NEGQ {
15169 break
15170 }
15171 y := v_1.Args[0]
15172 v.reset(OpAMD64ROLQ)
15173 v.AddArg2(x, y)
15174 return true
15175 }
15176
15177
15178 for {
15179 x := v_0
15180 if v_1.Op != OpAMD64NEGL {
15181 break
15182 }
15183 y := v_1.Args[0]
15184 v.reset(OpAMD64ROLQ)
15185 v.AddArg2(x, y)
15186 return true
15187 }
15188
15189
15190 for {
15191 x := v_0
15192 if v_1.Op != OpAMD64MOVQconst {
15193 break
15194 }
15195 c := auxIntToInt64(v_1.AuxInt)
15196 v.reset(OpAMD64ROLQconst)
15197 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15198 v.AddArg(x)
15199 return true
15200 }
15201
15202
15203 for {
15204 x := v_0
15205 if v_1.Op != OpAMD64MOVLconst {
15206 break
15207 }
15208 c := auxIntToInt32(v_1.AuxInt)
15209 v.reset(OpAMD64ROLQconst)
15210 v.AuxInt = int8ToAuxInt(int8((-c) & 63))
15211 v.AddArg(x)
15212 return true
15213 }
15214 return false
15215 }
15216 func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
15217 v_1 := v.Args[1]
15218 v_0 := v.Args[0]
15219
15220
15221 for {
15222 x := v_0
15223 if v_1.Op != OpAMD64NEGQ {
15224 break
15225 }
15226 y := v_1.Args[0]
15227 v.reset(OpAMD64ROLW)
15228 v.AddArg2(x, y)
15229 return true
15230 }
15231
15232
15233 for {
15234 x := v_0
15235 if v_1.Op != OpAMD64NEGL {
15236 break
15237 }
15238 y := v_1.Args[0]
15239 v.reset(OpAMD64ROLW)
15240 v.AddArg2(x, y)
15241 return true
15242 }
15243
15244
15245 for {
15246 x := v_0
15247 if v_1.Op != OpAMD64MOVQconst {
15248 break
15249 }
15250 c := auxIntToInt64(v_1.AuxInt)
15251 v.reset(OpAMD64ROLWconst)
15252 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15253 v.AddArg(x)
15254 return true
15255 }
15256
15257
15258 for {
15259 x := v_0
15260 if v_1.Op != OpAMD64MOVLconst {
15261 break
15262 }
15263 c := auxIntToInt32(v_1.AuxInt)
15264 v.reset(OpAMD64ROLWconst)
15265 v.AuxInt = int8ToAuxInt(int8((-c) & 15))
15266 v.AddArg(x)
15267 return true
15268 }
15269 return false
15270 }
15271 func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
15272 v_1 := v.Args[1]
15273 v_0 := v.Args[0]
15274
15275
15276 for {
15277 x := v_0
15278 if v_1.Op != OpAMD64MOVQconst {
15279 break
15280 }
15281 c := auxIntToInt64(v_1.AuxInt)
15282 v.reset(OpAMD64SARBconst)
15283 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15284 v.AddArg(x)
15285 return true
15286 }
15287
15288
15289 for {
15290 x := v_0
15291 if v_1.Op != OpAMD64MOVLconst {
15292 break
15293 }
15294 c := auxIntToInt32(v_1.AuxInt)
15295 v.reset(OpAMD64SARBconst)
15296 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
15297 v.AddArg(x)
15298 return true
15299 }
15300 return false
15301 }
15302 func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
15303 v_0 := v.Args[0]
15304
15305
15306 for {
15307 if auxIntToInt8(v.AuxInt) != 0 {
15308 break
15309 }
15310 x := v_0
15311 v.copyOf(x)
15312 return true
15313 }
15314
15315
15316 for {
15317 c := auxIntToInt8(v.AuxInt)
15318 if v_0.Op != OpAMD64MOVQconst {
15319 break
15320 }
15321 d := auxIntToInt64(v_0.AuxInt)
15322 v.reset(OpAMD64MOVQconst)
15323 v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
15324 return true
15325 }
15326 return false
15327 }
15328 func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
15329 v_1 := v.Args[1]
15330 v_0 := v.Args[0]
15331 b := v.Block
15332
15333
15334 for {
15335 x := v_0
15336 if v_1.Op != OpAMD64MOVQconst {
15337 break
15338 }
15339 c := auxIntToInt64(v_1.AuxInt)
15340 v.reset(OpAMD64SARLconst)
15341 v.AuxInt = int8ToAuxInt(int8(c & 31))
15342 v.AddArg(x)
15343 return true
15344 }
15345
15346
15347 for {
15348 x := v_0
15349 if v_1.Op != OpAMD64MOVLconst {
15350 break
15351 }
15352 c := auxIntToInt32(v_1.AuxInt)
15353 v.reset(OpAMD64SARLconst)
15354 v.AuxInt = int8ToAuxInt(int8(c & 31))
15355 v.AddArg(x)
15356 return true
15357 }
15358
15359
15360
15361 for {
15362 x := v_0
15363 if v_1.Op != OpAMD64ADDQconst {
15364 break
15365 }
15366 c := auxIntToInt32(v_1.AuxInt)
15367 y := v_1.Args[0]
15368 if !(c&31 == 0) {
15369 break
15370 }
15371 v.reset(OpAMD64SARL)
15372 v.AddArg2(x, y)
15373 return true
15374 }
15375
15376
15377
15378 for {
15379 x := v_0
15380 if v_1.Op != OpAMD64NEGQ {
15381 break
15382 }
15383 t := v_1.Type
15384 v_1_0 := v_1.Args[0]
15385 if v_1_0.Op != OpAMD64ADDQconst {
15386 break
15387 }
15388 c := auxIntToInt32(v_1_0.AuxInt)
15389 y := v_1_0.Args[0]
15390 if !(c&31 == 0) {
15391 break
15392 }
15393 v.reset(OpAMD64SARL)
15394 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15395 v0.AddArg(y)
15396 v.AddArg2(x, v0)
15397 return true
15398 }
15399
15400
15401
15402 for {
15403 x := v_0
15404 if v_1.Op != OpAMD64ANDQconst {
15405 break
15406 }
15407 c := auxIntToInt32(v_1.AuxInt)
15408 y := v_1.Args[0]
15409 if !(c&31 == 31) {
15410 break
15411 }
15412 v.reset(OpAMD64SARL)
15413 v.AddArg2(x, y)
15414 return true
15415 }
15416
15417
15418
15419 for {
15420 x := v_0
15421 if v_1.Op != OpAMD64NEGQ {
15422 break
15423 }
15424 t := v_1.Type
15425 v_1_0 := v_1.Args[0]
15426 if v_1_0.Op != OpAMD64ANDQconst {
15427 break
15428 }
15429 c := auxIntToInt32(v_1_0.AuxInt)
15430 y := v_1_0.Args[0]
15431 if !(c&31 == 31) {
15432 break
15433 }
15434 v.reset(OpAMD64SARL)
15435 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15436 v0.AddArg(y)
15437 v.AddArg2(x, v0)
15438 return true
15439 }
15440
15441
15442
15443 for {
15444 x := v_0
15445 if v_1.Op != OpAMD64ADDLconst {
15446 break
15447 }
15448 c := auxIntToInt32(v_1.AuxInt)
15449 y := v_1.Args[0]
15450 if !(c&31 == 0) {
15451 break
15452 }
15453 v.reset(OpAMD64SARL)
15454 v.AddArg2(x, y)
15455 return true
15456 }
15457
15458
15459
15460 for {
15461 x := v_0
15462 if v_1.Op != OpAMD64NEGL {
15463 break
15464 }
15465 t := v_1.Type
15466 v_1_0 := v_1.Args[0]
15467 if v_1_0.Op != OpAMD64ADDLconst {
15468 break
15469 }
15470 c := auxIntToInt32(v_1_0.AuxInt)
15471 y := v_1_0.Args[0]
15472 if !(c&31 == 0) {
15473 break
15474 }
15475 v.reset(OpAMD64SARL)
15476 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15477 v0.AddArg(y)
15478 v.AddArg2(x, v0)
15479 return true
15480 }
15481
15482
15483
15484 for {
15485 x := v_0
15486 if v_1.Op != OpAMD64ANDLconst {
15487 break
15488 }
15489 c := auxIntToInt32(v_1.AuxInt)
15490 y := v_1.Args[0]
15491 if !(c&31 == 31) {
15492 break
15493 }
15494 v.reset(OpAMD64SARL)
15495 v.AddArg2(x, y)
15496 return true
15497 }
15498
15499
15500
15501 for {
15502 x := v_0
15503 if v_1.Op != OpAMD64NEGL {
15504 break
15505 }
15506 t := v_1.Type
15507 v_1_0 := v_1.Args[0]
15508 if v_1_0.Op != OpAMD64ANDLconst {
15509 break
15510 }
15511 c := auxIntToInt32(v_1_0.AuxInt)
15512 y := v_1_0.Args[0]
15513 if !(c&31 == 31) {
15514 break
15515 }
15516 v.reset(OpAMD64SARL)
15517 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15518 v0.AddArg(y)
15519 v.AddArg2(x, v0)
15520 return true
15521 }
15522
15523
15524
15525 for {
15526 l := v_0
15527 if l.Op != OpAMD64MOVLload {
15528 break
15529 }
15530 off := auxIntToInt32(l.AuxInt)
15531 sym := auxToSym(l.Aux)
15532 mem := l.Args[1]
15533 ptr := l.Args[0]
15534 x := v_1
15535 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
15536 break
15537 }
15538 v.reset(OpAMD64SARXLload)
15539 v.AuxInt = int32ToAuxInt(off)
15540 v.Aux = symToAux(sym)
15541 v.AddArg3(ptr, x, mem)
15542 return true
15543 }
15544 return false
15545 }
15546 func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
15547 v_0 := v.Args[0]
15548
15549
15550 for {
15551 if auxIntToInt8(v.AuxInt) != 0 {
15552 break
15553 }
15554 x := v_0
15555 v.copyOf(x)
15556 return true
15557 }
15558
15559
15560 for {
15561 c := auxIntToInt8(v.AuxInt)
15562 if v_0.Op != OpAMD64MOVQconst {
15563 break
15564 }
15565 d := auxIntToInt64(v_0.AuxInt)
15566 v.reset(OpAMD64MOVQconst)
15567 v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
15568 return true
15569 }
15570 return false
15571 }
15572 func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
15573 v_1 := v.Args[1]
15574 v_0 := v.Args[0]
15575 b := v.Block
15576
15577
15578 for {
15579 x := v_0
15580 if v_1.Op != OpAMD64MOVQconst {
15581 break
15582 }
15583 c := auxIntToInt64(v_1.AuxInt)
15584 v.reset(OpAMD64SARQconst)
15585 v.AuxInt = int8ToAuxInt(int8(c & 63))
15586 v.AddArg(x)
15587 return true
15588 }
15589
15590
15591 for {
15592 x := v_0
15593 if v_1.Op != OpAMD64MOVLconst {
15594 break
15595 }
15596 c := auxIntToInt32(v_1.AuxInt)
15597 v.reset(OpAMD64SARQconst)
15598 v.AuxInt = int8ToAuxInt(int8(c & 63))
15599 v.AddArg(x)
15600 return true
15601 }
15602
15603
15604
15605 for {
15606 x := v_0
15607 if v_1.Op != OpAMD64ADDQconst {
15608 break
15609 }
15610 c := auxIntToInt32(v_1.AuxInt)
15611 y := v_1.Args[0]
15612 if !(c&63 == 0) {
15613 break
15614 }
15615 v.reset(OpAMD64SARQ)
15616 v.AddArg2(x, y)
15617 return true
15618 }
15619
15620
15621
15622 for {
15623 x := v_0
15624 if v_1.Op != OpAMD64NEGQ {
15625 break
15626 }
15627 t := v_1.Type
15628 v_1_0 := v_1.Args[0]
15629 if v_1_0.Op != OpAMD64ADDQconst {
15630 break
15631 }
15632 c := auxIntToInt32(v_1_0.AuxInt)
15633 y := v_1_0.Args[0]
15634 if !(c&63 == 0) {
15635 break
15636 }
15637 v.reset(OpAMD64SARQ)
15638 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15639 v0.AddArg(y)
15640 v.AddArg2(x, v0)
15641 return true
15642 }
15643
15644
15645
15646 for {
15647 x := v_0
15648 if v_1.Op != OpAMD64ANDQconst {
15649 break
15650 }
15651 c := auxIntToInt32(v_1.AuxInt)
15652 y := v_1.Args[0]
15653 if !(c&63 == 63) {
15654 break
15655 }
15656 v.reset(OpAMD64SARQ)
15657 v.AddArg2(x, y)
15658 return true
15659 }
15660
15661
15662
15663 for {
15664 x := v_0
15665 if v_1.Op != OpAMD64NEGQ {
15666 break
15667 }
15668 t := v_1.Type
15669 v_1_0 := v_1.Args[0]
15670 if v_1_0.Op != OpAMD64ANDQconst {
15671 break
15672 }
15673 c := auxIntToInt32(v_1_0.AuxInt)
15674 y := v_1_0.Args[0]
15675 if !(c&63 == 63) {
15676 break
15677 }
15678 v.reset(OpAMD64SARQ)
15679 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
15680 v0.AddArg(y)
15681 v.AddArg2(x, v0)
15682 return true
15683 }
15684
15685
15686
15687 for {
15688 x := v_0
15689 if v_1.Op != OpAMD64ADDLconst {
15690 break
15691 }
15692 c := auxIntToInt32(v_1.AuxInt)
15693 y := v_1.Args[0]
15694 if !(c&63 == 0) {
15695 break
15696 }
15697 v.reset(OpAMD64SARQ)
15698 v.AddArg2(x, y)
15699 return true
15700 }
15701
15702
15703
15704 for {
15705 x := v_0
15706 if v_1.Op != OpAMD64NEGL {
15707 break
15708 }
15709 t := v_1.Type
15710 v_1_0 := v_1.Args[0]
15711 if v_1_0.Op != OpAMD64ADDLconst {
15712 break
15713 }
15714 c := auxIntToInt32(v_1_0.AuxInt)
15715 y := v_1_0.Args[0]
15716 if !(c&63 == 0) {
15717 break
15718 }
15719 v.reset(OpAMD64SARQ)
15720 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15721 v0.AddArg(y)
15722 v.AddArg2(x, v0)
15723 return true
15724 }
15725
15726
15727
15728 for {
15729 x := v_0
15730 if v_1.Op != OpAMD64ANDLconst {
15731 break
15732 }
15733 c := auxIntToInt32(v_1.AuxInt)
15734 y := v_1.Args[0]
15735 if !(c&63 == 63) {
15736 break
15737 }
15738 v.reset(OpAMD64SARQ)
15739 v.AddArg2(x, y)
15740 return true
15741 }
15742
15743
15744
15745 for {
15746 x := v_0
15747 if v_1.Op != OpAMD64NEGL {
15748 break
15749 }
15750 t := v_1.Type
15751 v_1_0 := v_1.Args[0]
15752 if v_1_0.Op != OpAMD64ANDLconst {
15753 break
15754 }
15755 c := auxIntToInt32(v_1_0.AuxInt)
15756 y := v_1_0.Args[0]
15757 if !(c&63 == 63) {
15758 break
15759 }
15760 v.reset(OpAMD64SARQ)
15761 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
15762 v0.AddArg(y)
15763 v.AddArg2(x, v0)
15764 return true
15765 }
15766
15767
15768
15769 for {
15770 l := v_0
15771 if l.Op != OpAMD64MOVQload {
15772 break
15773 }
15774 off := auxIntToInt32(l.AuxInt)
15775 sym := auxToSym(l.Aux)
15776 mem := l.Args[1]
15777 ptr := l.Args[0]
15778 x := v_1
15779 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
15780 break
15781 }
15782 v.reset(OpAMD64SARXQload)
15783 v.AuxInt = int32ToAuxInt(off)
15784 v.Aux = symToAux(sym)
15785 v.AddArg3(ptr, x, mem)
15786 return true
15787 }
15788 return false
15789 }
15790 func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
15791 v_0 := v.Args[0]
15792
15793
15794 for {
15795 if auxIntToInt8(v.AuxInt) != 0 {
15796 break
15797 }
15798 x := v_0
15799 v.copyOf(x)
15800 return true
15801 }
15802
15803
15804 for {
15805 c := auxIntToInt8(v.AuxInt)
15806 if v_0.Op != OpAMD64MOVQconst {
15807 break
15808 }
15809 d := auxIntToInt64(v_0.AuxInt)
15810 v.reset(OpAMD64MOVQconst)
15811 v.AuxInt = int64ToAuxInt(d >> uint64(c))
15812 return true
15813 }
15814 return false
15815 }
15816 func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
15817 v_1 := v.Args[1]
15818 v_0 := v.Args[0]
15819
15820
15821 for {
15822 x := v_0
15823 if v_1.Op != OpAMD64MOVQconst {
15824 break
15825 }
15826 c := auxIntToInt64(v_1.AuxInt)
15827 v.reset(OpAMD64SARWconst)
15828 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
15829 v.AddArg(x)
15830 return true
15831 }
15832
15833
15834 for {
15835 x := v_0
15836 if v_1.Op != OpAMD64MOVLconst {
15837 break
15838 }
15839 c := auxIntToInt32(v_1.AuxInt)
15840 v.reset(OpAMD64SARWconst)
15841 v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
15842 v.AddArg(x)
15843 return true
15844 }
15845 return false
15846 }
15847 func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
15848 v_0 := v.Args[0]
15849
15850
15851 for {
15852 if auxIntToInt8(v.AuxInt) != 0 {
15853 break
15854 }
15855 x := v_0
15856 v.copyOf(x)
15857 return true
15858 }
15859
15860
15861 for {
15862 c := auxIntToInt8(v.AuxInt)
15863 if v_0.Op != OpAMD64MOVQconst {
15864 break
15865 }
15866 d := auxIntToInt64(v_0.AuxInt)
15867 v.reset(OpAMD64MOVQconst)
15868 v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
15869 return true
15870 }
15871 return false
15872 }
15873 func rewriteValueAMD64_OpAMD64SARXLload(v *Value) bool {
15874 v_2 := v.Args[2]
15875 v_1 := v.Args[1]
15876 v_0 := v.Args[0]
15877 b := v.Block
15878 typ := &b.Func.Config.Types
15879
15880
15881 for {
15882 off := auxIntToInt32(v.AuxInt)
15883 sym := auxToSym(v.Aux)
15884 ptr := v_0
15885 if v_1.Op != OpAMD64MOVLconst {
15886 break
15887 }
15888 c := auxIntToInt32(v_1.AuxInt)
15889 mem := v_2
15890 v.reset(OpAMD64SARLconst)
15891 v.AuxInt = int8ToAuxInt(int8(c & 31))
15892 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
15893 v0.AuxInt = int32ToAuxInt(off)
15894 v0.Aux = symToAux(sym)
15895 v0.AddArg2(ptr, mem)
15896 v.AddArg(v0)
15897 return true
15898 }
15899 return false
15900 }
15901 func rewriteValueAMD64_OpAMD64SARXQload(v *Value) bool {
15902 v_2 := v.Args[2]
15903 v_1 := v.Args[1]
15904 v_0 := v.Args[0]
15905 b := v.Block
15906 typ := &b.Func.Config.Types
15907
15908
15909 for {
15910 off := auxIntToInt32(v.AuxInt)
15911 sym := auxToSym(v.Aux)
15912 ptr := v_0
15913 if v_1.Op != OpAMD64MOVQconst {
15914 break
15915 }
15916 c := auxIntToInt64(v_1.AuxInt)
15917 mem := v_2
15918 v.reset(OpAMD64SARQconst)
15919 v.AuxInt = int8ToAuxInt(int8(c & 63))
15920 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
15921 v0.AuxInt = int32ToAuxInt(off)
15922 v0.Aux = symToAux(sym)
15923 v0.AddArg2(ptr, mem)
15924 v.AddArg(v0)
15925 return true
15926 }
15927
15928
15929 for {
15930 off := auxIntToInt32(v.AuxInt)
15931 sym := auxToSym(v.Aux)
15932 ptr := v_0
15933 if v_1.Op != OpAMD64MOVLconst {
15934 break
15935 }
15936 c := auxIntToInt32(v_1.AuxInt)
15937 mem := v_2
15938 v.reset(OpAMD64SARQconst)
15939 v.AuxInt = int8ToAuxInt(int8(c & 63))
15940 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
15941 v0.AuxInt = int32ToAuxInt(off)
15942 v0.Aux = symToAux(sym)
15943 v0.AddArg2(ptr, mem)
15944 v.AddArg(v0)
15945 return true
15946 }
15947 return false
15948 }
15949 func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
15950 v_0 := v.Args[0]
15951
15952
15953 for {
15954 if v_0.Op != OpAMD64FlagEQ {
15955 break
15956 }
15957 v.reset(OpAMD64MOVLconst)
15958 v.AuxInt = int32ToAuxInt(0)
15959 return true
15960 }
15961
15962
15963 for {
15964 if v_0.Op != OpAMD64FlagLT_ULT {
15965 break
15966 }
15967 v.reset(OpAMD64MOVLconst)
15968 v.AuxInt = int32ToAuxInt(-1)
15969 return true
15970 }
15971
15972
15973 for {
15974 if v_0.Op != OpAMD64FlagLT_UGT {
15975 break
15976 }
15977 v.reset(OpAMD64MOVLconst)
15978 v.AuxInt = int32ToAuxInt(0)
15979 return true
15980 }
15981
15982
15983 for {
15984 if v_0.Op != OpAMD64FlagGT_ULT {
15985 break
15986 }
15987 v.reset(OpAMD64MOVLconst)
15988 v.AuxInt = int32ToAuxInt(-1)
15989 return true
15990 }
15991
15992
15993 for {
15994 if v_0.Op != OpAMD64FlagGT_UGT {
15995 break
15996 }
15997 v.reset(OpAMD64MOVLconst)
15998 v.AuxInt = int32ToAuxInt(0)
15999 return true
16000 }
16001 return false
16002 }
16003 func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
16004 v_2 := v.Args[2]
16005 v_1 := v.Args[1]
16006 v_0 := v.Args[0]
16007
16008
16009
16010 for {
16011 x := v_0
16012 if v_1.Op != OpAMD64MOVQconst {
16013 break
16014 }
16015 c := auxIntToInt64(v_1.AuxInt)
16016 borrow := v_2
16017 if !(is32Bit(c)) {
16018 break
16019 }
16020 v.reset(OpAMD64SBBQconst)
16021 v.AuxInt = int32ToAuxInt(int32(c))
16022 v.AddArg2(x, borrow)
16023 return true
16024 }
16025
16026
16027 for {
16028 x := v_0
16029 y := v_1
16030 if v_2.Op != OpAMD64FlagEQ {
16031 break
16032 }
16033 v.reset(OpAMD64SUBQborrow)
16034 v.AddArg2(x, y)
16035 return true
16036 }
16037 return false
16038 }
16039 func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
16040 v_0 := v.Args[0]
16041
16042
16043 for {
16044 if v_0.Op != OpAMD64FlagEQ {
16045 break
16046 }
16047 v.reset(OpAMD64MOVQconst)
16048 v.AuxInt = int64ToAuxInt(0)
16049 return true
16050 }
16051
16052
16053 for {
16054 if v_0.Op != OpAMD64FlagLT_ULT {
16055 break
16056 }
16057 v.reset(OpAMD64MOVQconst)
16058 v.AuxInt = int64ToAuxInt(-1)
16059 return true
16060 }
16061
16062
16063 for {
16064 if v_0.Op != OpAMD64FlagLT_UGT {
16065 break
16066 }
16067 v.reset(OpAMD64MOVQconst)
16068 v.AuxInt = int64ToAuxInt(0)
16069 return true
16070 }
16071
16072
16073 for {
16074 if v_0.Op != OpAMD64FlagGT_ULT {
16075 break
16076 }
16077 v.reset(OpAMD64MOVQconst)
16078 v.AuxInt = int64ToAuxInt(-1)
16079 return true
16080 }
16081
16082
16083 for {
16084 if v_0.Op != OpAMD64FlagGT_UGT {
16085 break
16086 }
16087 v.reset(OpAMD64MOVQconst)
16088 v.AuxInt = int64ToAuxInt(0)
16089 return true
16090 }
16091 return false
16092 }
16093 func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
16094 v_1 := v.Args[1]
16095 v_0 := v.Args[0]
16096
16097
16098 for {
16099 c := auxIntToInt32(v.AuxInt)
16100 x := v_0
16101 if v_1.Op != OpAMD64FlagEQ {
16102 break
16103 }
16104 v.reset(OpAMD64SUBQconstborrow)
16105 v.AuxInt = int32ToAuxInt(c)
16106 v.AddArg(x)
16107 return true
16108 }
16109 return false
16110 }
16111 func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
16112 v_0 := v.Args[0]
16113
16114
16115 for {
16116 if v_0.Op != OpAMD64InvertFlags {
16117 break
16118 }
16119 x := v_0.Args[0]
16120 v.reset(OpAMD64SETB)
16121 v.AddArg(x)
16122 return true
16123 }
16124
16125
16126 for {
16127 if v_0.Op != OpAMD64FlagEQ {
16128 break
16129 }
16130 v.reset(OpAMD64MOVLconst)
16131 v.AuxInt = int32ToAuxInt(0)
16132 return true
16133 }
16134
16135
16136 for {
16137 if v_0.Op != OpAMD64FlagLT_ULT {
16138 break
16139 }
16140 v.reset(OpAMD64MOVLconst)
16141 v.AuxInt = int32ToAuxInt(0)
16142 return true
16143 }
16144
16145
16146 for {
16147 if v_0.Op != OpAMD64FlagLT_UGT {
16148 break
16149 }
16150 v.reset(OpAMD64MOVLconst)
16151 v.AuxInt = int32ToAuxInt(1)
16152 return true
16153 }
16154
16155
16156 for {
16157 if v_0.Op != OpAMD64FlagGT_ULT {
16158 break
16159 }
16160 v.reset(OpAMD64MOVLconst)
16161 v.AuxInt = int32ToAuxInt(0)
16162 return true
16163 }
16164
16165
16166 for {
16167 if v_0.Op != OpAMD64FlagGT_UGT {
16168 break
16169 }
16170 v.reset(OpAMD64MOVLconst)
16171 v.AuxInt = int32ToAuxInt(1)
16172 return true
16173 }
16174 return false
16175 }
16176 func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
16177 v_0 := v.Args[0]
16178 b := v.Block
16179 typ := &b.Func.Config.Types
16180
16181
16182 for {
16183 if v_0.Op != OpAMD64TESTQ {
16184 break
16185 }
16186 x := v_0.Args[1]
16187 if x != v_0.Args[0] {
16188 break
16189 }
16190 v.reset(OpConstBool)
16191 v.AuxInt = boolToAuxInt(true)
16192 return true
16193 }
16194
16195
16196 for {
16197 if v_0.Op != OpAMD64TESTL {
16198 break
16199 }
16200 x := v_0.Args[1]
16201 if x != v_0.Args[0] {
16202 break
16203 }
16204 v.reset(OpConstBool)
16205 v.AuxInt = boolToAuxInt(true)
16206 return true
16207 }
16208
16209
16210 for {
16211 if v_0.Op != OpAMD64TESTW {
16212 break
16213 }
16214 x := v_0.Args[1]
16215 if x != v_0.Args[0] {
16216 break
16217 }
16218 v.reset(OpConstBool)
16219 v.AuxInt = boolToAuxInt(true)
16220 return true
16221 }
16222
16223
16224 for {
16225 if v_0.Op != OpAMD64TESTB {
16226 break
16227 }
16228 x := v_0.Args[1]
16229 if x != v_0.Args[0] {
16230 break
16231 }
16232 v.reset(OpConstBool)
16233 v.AuxInt = boolToAuxInt(true)
16234 return true
16235 }
16236
16237
16238 for {
16239 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
16240 break
16241 }
16242 x := v_0.Args[0]
16243 v.reset(OpAMD64XORLconst)
16244 v.AuxInt = int32ToAuxInt(1)
16245 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, typ.Bool)
16246 v0.AuxInt = int32ToAuxInt(1)
16247 v0.AddArg(x)
16248 v.AddArg(v0)
16249 return true
16250 }
16251
16252
16253 for {
16254 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
16255 break
16256 }
16257 x := v_0.Args[0]
16258 v.reset(OpAMD64XORLconst)
16259 v.AuxInt = int32ToAuxInt(1)
16260 v0 := b.NewValue0(v.Pos, OpAMD64ANDLconst, typ.Bool)
16261 v0.AuxInt = int32ToAuxInt(1)
16262 v0.AddArg(x)
16263 v.AddArg(v0)
16264 return true
16265 }
16266
16267
16268
16269 for {
16270 c := v_0
16271 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
16272 break
16273 }
16274 x := c.Args[0]
16275 if !(c.Uses == 1) {
16276 break
16277 }
16278 v.reset(OpAMD64SETA)
16279 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
16280 v0.AuxInt = int32ToAuxInt(127)
16281 v0.AddArg(x)
16282 v.AddArg(v0)
16283 return true
16284 }
16285
16286
16287
16288 for {
16289 c := v_0
16290 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
16291 break
16292 }
16293 x := c.Args[0]
16294 if !(c.Uses == 1) {
16295 break
16296 }
16297 v.reset(OpAMD64SETA)
16298 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
16299 v0.AuxInt = int32ToAuxInt(127)
16300 v0.AddArg(x)
16301 v.AddArg(v0)
16302 return true
16303 }
16304
16305
16306 for {
16307 if v_0.Op != OpAMD64InvertFlags {
16308 break
16309 }
16310 x := v_0.Args[0]
16311 v.reset(OpAMD64SETBE)
16312 v.AddArg(x)
16313 return true
16314 }
16315
16316
16317 for {
16318 if v_0.Op != OpAMD64FlagEQ {
16319 break
16320 }
16321 v.reset(OpAMD64MOVLconst)
16322 v.AuxInt = int32ToAuxInt(1)
16323 return true
16324 }
16325
16326
16327 for {
16328 if v_0.Op != OpAMD64FlagLT_ULT {
16329 break
16330 }
16331 v.reset(OpAMD64MOVLconst)
16332 v.AuxInt = int32ToAuxInt(0)
16333 return true
16334 }
16335
16336
16337 for {
16338 if v_0.Op != OpAMD64FlagLT_UGT {
16339 break
16340 }
16341 v.reset(OpAMD64MOVLconst)
16342 v.AuxInt = int32ToAuxInt(1)
16343 return true
16344 }
16345
16346
16347 for {
16348 if v_0.Op != OpAMD64FlagGT_ULT {
16349 break
16350 }
16351 v.reset(OpAMD64MOVLconst)
16352 v.AuxInt = int32ToAuxInt(0)
16353 return true
16354 }
16355
16356
16357 for {
16358 if v_0.Op != OpAMD64FlagGT_UGT {
16359 break
16360 }
16361 v.reset(OpAMD64MOVLconst)
16362 v.AuxInt = int32ToAuxInt(1)
16363 return true
16364 }
16365 return false
16366 }
16367 func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
16368 v_2 := v.Args[2]
16369 v_1 := v.Args[1]
16370 v_0 := v.Args[0]
16371 b := v.Block
16372 typ := &b.Func.Config.Types
16373
16374
16375 for {
16376 off := auxIntToInt32(v.AuxInt)
16377 sym := auxToSym(v.Aux)
16378 ptr := v_0
16379 if v_1.Op != OpAMD64InvertFlags {
16380 break
16381 }
16382 x := v_1.Args[0]
16383 mem := v_2
16384 v.reset(OpAMD64SETBEstore)
16385 v.AuxInt = int32ToAuxInt(off)
16386 v.Aux = symToAux(sym)
16387 v.AddArg3(ptr, x, mem)
16388 return true
16389 }
16390
16391
16392
16393 for {
16394 off1 := auxIntToInt32(v.AuxInt)
16395 sym := auxToSym(v.Aux)
16396 if v_0.Op != OpAMD64ADDQconst {
16397 break
16398 }
16399 off2 := auxIntToInt32(v_0.AuxInt)
16400 base := v_0.Args[0]
16401 val := v_1
16402 mem := v_2
16403 if !(is32Bit(int64(off1) + int64(off2))) {
16404 break
16405 }
16406 v.reset(OpAMD64SETAEstore)
16407 v.AuxInt = int32ToAuxInt(off1 + off2)
16408 v.Aux = symToAux(sym)
16409 v.AddArg3(base, val, mem)
16410 return true
16411 }
16412
16413
16414
16415 for {
16416 off1 := auxIntToInt32(v.AuxInt)
16417 sym1 := auxToSym(v.Aux)
16418 if v_0.Op != OpAMD64LEAQ {
16419 break
16420 }
16421 off2 := auxIntToInt32(v_0.AuxInt)
16422 sym2 := auxToSym(v_0.Aux)
16423 base := v_0.Args[0]
16424 val := v_1
16425 mem := v_2
16426 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16427 break
16428 }
16429 v.reset(OpAMD64SETAEstore)
16430 v.AuxInt = int32ToAuxInt(off1 + off2)
16431 v.Aux = symToAux(mergeSym(sym1, sym2))
16432 v.AddArg3(base, val, mem)
16433 return true
16434 }
16435
16436
16437 for {
16438 off := auxIntToInt32(v.AuxInt)
16439 sym := auxToSym(v.Aux)
16440 ptr := v_0
16441 if v_1.Op != OpAMD64FlagEQ {
16442 break
16443 }
16444 mem := v_2
16445 v.reset(OpAMD64MOVBstore)
16446 v.AuxInt = int32ToAuxInt(off)
16447 v.Aux = symToAux(sym)
16448 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16449 v0.AuxInt = int32ToAuxInt(1)
16450 v.AddArg3(ptr, v0, mem)
16451 return true
16452 }
16453
16454
16455 for {
16456 off := auxIntToInt32(v.AuxInt)
16457 sym := auxToSym(v.Aux)
16458 ptr := v_0
16459 if v_1.Op != OpAMD64FlagLT_ULT {
16460 break
16461 }
16462 mem := v_2
16463 v.reset(OpAMD64MOVBstore)
16464 v.AuxInt = int32ToAuxInt(off)
16465 v.Aux = symToAux(sym)
16466 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16467 v0.AuxInt = int32ToAuxInt(0)
16468 v.AddArg3(ptr, v0, mem)
16469 return true
16470 }
16471
16472
16473 for {
16474 off := auxIntToInt32(v.AuxInt)
16475 sym := auxToSym(v.Aux)
16476 ptr := v_0
16477 if v_1.Op != OpAMD64FlagLT_UGT {
16478 break
16479 }
16480 mem := v_2
16481 v.reset(OpAMD64MOVBstore)
16482 v.AuxInt = int32ToAuxInt(off)
16483 v.Aux = symToAux(sym)
16484 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16485 v0.AuxInt = int32ToAuxInt(1)
16486 v.AddArg3(ptr, v0, mem)
16487 return true
16488 }
16489
16490
16491 for {
16492 off := auxIntToInt32(v.AuxInt)
16493 sym := auxToSym(v.Aux)
16494 ptr := v_0
16495 if v_1.Op != OpAMD64FlagGT_ULT {
16496 break
16497 }
16498 mem := v_2
16499 v.reset(OpAMD64MOVBstore)
16500 v.AuxInt = int32ToAuxInt(off)
16501 v.Aux = symToAux(sym)
16502 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16503 v0.AuxInt = int32ToAuxInt(0)
16504 v.AddArg3(ptr, v0, mem)
16505 return true
16506 }
16507
16508
16509 for {
16510 off := auxIntToInt32(v.AuxInt)
16511 sym := auxToSym(v.Aux)
16512 ptr := v_0
16513 if v_1.Op != OpAMD64FlagGT_UGT {
16514 break
16515 }
16516 mem := v_2
16517 v.reset(OpAMD64MOVBstore)
16518 v.AuxInt = int32ToAuxInt(off)
16519 v.Aux = symToAux(sym)
16520 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16521 v0.AuxInt = int32ToAuxInt(1)
16522 v.AddArg3(ptr, v0, mem)
16523 return true
16524 }
16525 return false
16526 }
16527 func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
16528 v_2 := v.Args[2]
16529 v_1 := v.Args[1]
16530 v_0 := v.Args[0]
16531 b := v.Block
16532 typ := &b.Func.Config.Types
16533
16534
16535 for {
16536 off := auxIntToInt32(v.AuxInt)
16537 sym := auxToSym(v.Aux)
16538 ptr := v_0
16539 if v_1.Op != OpAMD64InvertFlags {
16540 break
16541 }
16542 x := v_1.Args[0]
16543 mem := v_2
16544 v.reset(OpAMD64SETBstore)
16545 v.AuxInt = int32ToAuxInt(off)
16546 v.Aux = symToAux(sym)
16547 v.AddArg3(ptr, x, mem)
16548 return true
16549 }
16550
16551
16552
16553 for {
16554 off1 := auxIntToInt32(v.AuxInt)
16555 sym := auxToSym(v.Aux)
16556 if v_0.Op != OpAMD64ADDQconst {
16557 break
16558 }
16559 off2 := auxIntToInt32(v_0.AuxInt)
16560 base := v_0.Args[0]
16561 val := v_1
16562 mem := v_2
16563 if !(is32Bit(int64(off1) + int64(off2))) {
16564 break
16565 }
16566 v.reset(OpAMD64SETAstore)
16567 v.AuxInt = int32ToAuxInt(off1 + off2)
16568 v.Aux = symToAux(sym)
16569 v.AddArg3(base, val, mem)
16570 return true
16571 }
16572
16573
16574
16575 for {
16576 off1 := auxIntToInt32(v.AuxInt)
16577 sym1 := auxToSym(v.Aux)
16578 if v_0.Op != OpAMD64LEAQ {
16579 break
16580 }
16581 off2 := auxIntToInt32(v_0.AuxInt)
16582 sym2 := auxToSym(v_0.Aux)
16583 base := v_0.Args[0]
16584 val := v_1
16585 mem := v_2
16586 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16587 break
16588 }
16589 v.reset(OpAMD64SETAstore)
16590 v.AuxInt = int32ToAuxInt(off1 + off2)
16591 v.Aux = symToAux(mergeSym(sym1, sym2))
16592 v.AddArg3(base, val, mem)
16593 return true
16594 }
16595
16596
16597 for {
16598 off := auxIntToInt32(v.AuxInt)
16599 sym := auxToSym(v.Aux)
16600 ptr := v_0
16601 if v_1.Op != OpAMD64FlagEQ {
16602 break
16603 }
16604 mem := v_2
16605 v.reset(OpAMD64MOVBstore)
16606 v.AuxInt = int32ToAuxInt(off)
16607 v.Aux = symToAux(sym)
16608 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16609 v0.AuxInt = int32ToAuxInt(0)
16610 v.AddArg3(ptr, v0, mem)
16611 return true
16612 }
16613
16614
16615 for {
16616 off := auxIntToInt32(v.AuxInt)
16617 sym := auxToSym(v.Aux)
16618 ptr := v_0
16619 if v_1.Op != OpAMD64FlagLT_ULT {
16620 break
16621 }
16622 mem := v_2
16623 v.reset(OpAMD64MOVBstore)
16624 v.AuxInt = int32ToAuxInt(off)
16625 v.Aux = symToAux(sym)
16626 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16627 v0.AuxInt = int32ToAuxInt(0)
16628 v.AddArg3(ptr, v0, mem)
16629 return true
16630 }
16631
16632
16633 for {
16634 off := auxIntToInt32(v.AuxInt)
16635 sym := auxToSym(v.Aux)
16636 ptr := v_0
16637 if v_1.Op != OpAMD64FlagLT_UGT {
16638 break
16639 }
16640 mem := v_2
16641 v.reset(OpAMD64MOVBstore)
16642 v.AuxInt = int32ToAuxInt(off)
16643 v.Aux = symToAux(sym)
16644 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16645 v0.AuxInt = int32ToAuxInt(1)
16646 v.AddArg3(ptr, v0, mem)
16647 return true
16648 }
16649
16650
16651 for {
16652 off := auxIntToInt32(v.AuxInt)
16653 sym := auxToSym(v.Aux)
16654 ptr := v_0
16655 if v_1.Op != OpAMD64FlagGT_ULT {
16656 break
16657 }
16658 mem := v_2
16659 v.reset(OpAMD64MOVBstore)
16660 v.AuxInt = int32ToAuxInt(off)
16661 v.Aux = symToAux(sym)
16662 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16663 v0.AuxInt = int32ToAuxInt(0)
16664 v.AddArg3(ptr, v0, mem)
16665 return true
16666 }
16667
16668
16669 for {
16670 off := auxIntToInt32(v.AuxInt)
16671 sym := auxToSym(v.Aux)
16672 ptr := v_0
16673 if v_1.Op != OpAMD64FlagGT_UGT {
16674 break
16675 }
16676 mem := v_2
16677 v.reset(OpAMD64MOVBstore)
16678 v.AuxInt = int32ToAuxInt(off)
16679 v.Aux = symToAux(sym)
16680 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
16681 v0.AuxInt = int32ToAuxInt(1)
16682 v.AddArg3(ptr, v0, mem)
16683 return true
16684 }
16685 return false
16686 }
16687 func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
16688 v_0 := v.Args[0]
16689 b := v.Block
16690
16691
16692 for {
16693 if v_0.Op != OpAMD64TESTQ {
16694 break
16695 }
16696 x := v_0.Args[1]
16697 if x != v_0.Args[0] {
16698 break
16699 }
16700 v.reset(OpConstBool)
16701 v.AuxInt = boolToAuxInt(false)
16702 return true
16703 }
16704
16705
16706 for {
16707 if v_0.Op != OpAMD64TESTL {
16708 break
16709 }
16710 x := v_0.Args[1]
16711 if x != v_0.Args[0] {
16712 break
16713 }
16714 v.reset(OpConstBool)
16715 v.AuxInt = boolToAuxInt(false)
16716 return true
16717 }
16718
16719
16720 for {
16721 if v_0.Op != OpAMD64TESTW {
16722 break
16723 }
16724 x := v_0.Args[1]
16725 if x != v_0.Args[0] {
16726 break
16727 }
16728 v.reset(OpConstBool)
16729 v.AuxInt = boolToAuxInt(false)
16730 return true
16731 }
16732
16733
16734 for {
16735 if v_0.Op != OpAMD64TESTB {
16736 break
16737 }
16738 x := v_0.Args[1]
16739 if x != v_0.Args[0] {
16740 break
16741 }
16742 v.reset(OpConstBool)
16743 v.AuxInt = boolToAuxInt(false)
16744 return true
16745 }
16746
16747
16748 for {
16749 if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
16750 break
16751 }
16752 x := v_0.Args[0]
16753 v.reset(OpAMD64ANDLconst)
16754 v.AuxInt = int32ToAuxInt(1)
16755 v.AddArg(x)
16756 return true
16757 }
16758
16759
16760 for {
16761 if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
16762 break
16763 }
16764 x := v_0.Args[0]
16765 v.reset(OpAMD64ANDQconst)
16766 v.AuxInt = int32ToAuxInt(1)
16767 v.AddArg(x)
16768 return true
16769 }
16770
16771
16772
16773 for {
16774 c := v_0
16775 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
16776 break
16777 }
16778 x := c.Args[0]
16779 if !(c.Uses == 1) {
16780 break
16781 }
16782 v.reset(OpAMD64SETBE)
16783 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
16784 v0.AuxInt = int32ToAuxInt(127)
16785 v0.AddArg(x)
16786 v.AddArg(v0)
16787 return true
16788 }
16789
16790
16791
16792 for {
16793 c := v_0
16794 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
16795 break
16796 }
16797 x := c.Args[0]
16798 if !(c.Uses == 1) {
16799 break
16800 }
16801 v.reset(OpAMD64SETBE)
16802 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
16803 v0.AuxInt = int32ToAuxInt(127)
16804 v0.AddArg(x)
16805 v.AddArg(v0)
16806 return true
16807 }
16808
16809
16810 for {
16811 if v_0.Op != OpAMD64InvertFlags {
16812 break
16813 }
16814 x := v_0.Args[0]
16815 v.reset(OpAMD64SETA)
16816 v.AddArg(x)
16817 return true
16818 }
16819
16820
16821 for {
16822 if v_0.Op != OpAMD64FlagEQ {
16823 break
16824 }
16825 v.reset(OpAMD64MOVLconst)
16826 v.AuxInt = int32ToAuxInt(0)
16827 return true
16828 }
16829
16830
16831 for {
16832 if v_0.Op != OpAMD64FlagLT_ULT {
16833 break
16834 }
16835 v.reset(OpAMD64MOVLconst)
16836 v.AuxInt = int32ToAuxInt(1)
16837 return true
16838 }
16839
16840
16841 for {
16842 if v_0.Op != OpAMD64FlagLT_UGT {
16843 break
16844 }
16845 v.reset(OpAMD64MOVLconst)
16846 v.AuxInt = int32ToAuxInt(0)
16847 return true
16848 }
16849
16850
16851 for {
16852 if v_0.Op != OpAMD64FlagGT_ULT {
16853 break
16854 }
16855 v.reset(OpAMD64MOVLconst)
16856 v.AuxInt = int32ToAuxInt(1)
16857 return true
16858 }
16859
16860
16861 for {
16862 if v_0.Op != OpAMD64FlagGT_UGT {
16863 break
16864 }
16865 v.reset(OpAMD64MOVLconst)
16866 v.AuxInt = int32ToAuxInt(0)
16867 return true
16868 }
16869 return false
16870 }
16871 func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
16872 v_0 := v.Args[0]
16873
16874
16875 for {
16876 if v_0.Op != OpAMD64InvertFlags {
16877 break
16878 }
16879 x := v_0.Args[0]
16880 v.reset(OpAMD64SETAE)
16881 v.AddArg(x)
16882 return true
16883 }
16884
16885
16886 for {
16887 if v_0.Op != OpAMD64FlagEQ {
16888 break
16889 }
16890 v.reset(OpAMD64MOVLconst)
16891 v.AuxInt = int32ToAuxInt(1)
16892 return true
16893 }
16894
16895
16896 for {
16897 if v_0.Op != OpAMD64FlagLT_ULT {
16898 break
16899 }
16900 v.reset(OpAMD64MOVLconst)
16901 v.AuxInt = int32ToAuxInt(1)
16902 return true
16903 }
16904
16905
16906 for {
16907 if v_0.Op != OpAMD64FlagLT_UGT {
16908 break
16909 }
16910 v.reset(OpAMD64MOVLconst)
16911 v.AuxInt = int32ToAuxInt(0)
16912 return true
16913 }
16914
16915
16916 for {
16917 if v_0.Op != OpAMD64FlagGT_ULT {
16918 break
16919 }
16920 v.reset(OpAMD64MOVLconst)
16921 v.AuxInt = int32ToAuxInt(1)
16922 return true
16923 }
16924
16925
16926 for {
16927 if v_0.Op != OpAMD64FlagGT_UGT {
16928 break
16929 }
16930 v.reset(OpAMD64MOVLconst)
16931 v.AuxInt = int32ToAuxInt(0)
16932 return true
16933 }
16934 return false
16935 }
16936 func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
16937 v_2 := v.Args[2]
16938 v_1 := v.Args[1]
16939 v_0 := v.Args[0]
16940 b := v.Block
16941 typ := &b.Func.Config.Types
16942
16943
16944 for {
16945 off := auxIntToInt32(v.AuxInt)
16946 sym := auxToSym(v.Aux)
16947 ptr := v_0
16948 if v_1.Op != OpAMD64InvertFlags {
16949 break
16950 }
16951 x := v_1.Args[0]
16952 mem := v_2
16953 v.reset(OpAMD64SETAEstore)
16954 v.AuxInt = int32ToAuxInt(off)
16955 v.Aux = symToAux(sym)
16956 v.AddArg3(ptr, x, mem)
16957 return true
16958 }
16959
16960
16961
16962 for {
16963 off1 := auxIntToInt32(v.AuxInt)
16964 sym := auxToSym(v.Aux)
16965 if v_0.Op != OpAMD64ADDQconst {
16966 break
16967 }
16968 off2 := auxIntToInt32(v_0.AuxInt)
16969 base := v_0.Args[0]
16970 val := v_1
16971 mem := v_2
16972 if !(is32Bit(int64(off1) + int64(off2))) {
16973 break
16974 }
16975 v.reset(OpAMD64SETBEstore)
16976 v.AuxInt = int32ToAuxInt(off1 + off2)
16977 v.Aux = symToAux(sym)
16978 v.AddArg3(base, val, mem)
16979 return true
16980 }
16981
16982
16983
16984 for {
16985 off1 := auxIntToInt32(v.AuxInt)
16986 sym1 := auxToSym(v.Aux)
16987 if v_0.Op != OpAMD64LEAQ {
16988 break
16989 }
16990 off2 := auxIntToInt32(v_0.AuxInt)
16991 sym2 := auxToSym(v_0.Aux)
16992 base := v_0.Args[0]
16993 val := v_1
16994 mem := v_2
16995 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
16996 break
16997 }
16998 v.reset(OpAMD64SETBEstore)
16999 v.AuxInt = int32ToAuxInt(off1 + off2)
17000 v.Aux = symToAux(mergeSym(sym1, sym2))
17001 v.AddArg3(base, val, mem)
17002 return true
17003 }
17004
17005
17006 for {
17007 off := auxIntToInt32(v.AuxInt)
17008 sym := auxToSym(v.Aux)
17009 ptr := v_0
17010 if v_1.Op != OpAMD64FlagEQ {
17011 break
17012 }
17013 mem := v_2
17014 v.reset(OpAMD64MOVBstore)
17015 v.AuxInt = int32ToAuxInt(off)
17016 v.Aux = symToAux(sym)
17017 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17018 v0.AuxInt = int32ToAuxInt(1)
17019 v.AddArg3(ptr, v0, mem)
17020 return true
17021 }
17022
17023
17024 for {
17025 off := auxIntToInt32(v.AuxInt)
17026 sym := auxToSym(v.Aux)
17027 ptr := v_0
17028 if v_1.Op != OpAMD64FlagLT_ULT {
17029 break
17030 }
17031 mem := v_2
17032 v.reset(OpAMD64MOVBstore)
17033 v.AuxInt = int32ToAuxInt(off)
17034 v.Aux = symToAux(sym)
17035 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17036 v0.AuxInt = int32ToAuxInt(1)
17037 v.AddArg3(ptr, v0, mem)
17038 return true
17039 }
17040
17041
17042 for {
17043 off := auxIntToInt32(v.AuxInt)
17044 sym := auxToSym(v.Aux)
17045 ptr := v_0
17046 if v_1.Op != OpAMD64FlagLT_UGT {
17047 break
17048 }
17049 mem := v_2
17050 v.reset(OpAMD64MOVBstore)
17051 v.AuxInt = int32ToAuxInt(off)
17052 v.Aux = symToAux(sym)
17053 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17054 v0.AuxInt = int32ToAuxInt(0)
17055 v.AddArg3(ptr, v0, mem)
17056 return true
17057 }
17058
17059
17060 for {
17061 off := auxIntToInt32(v.AuxInt)
17062 sym := auxToSym(v.Aux)
17063 ptr := v_0
17064 if v_1.Op != OpAMD64FlagGT_ULT {
17065 break
17066 }
17067 mem := v_2
17068 v.reset(OpAMD64MOVBstore)
17069 v.AuxInt = int32ToAuxInt(off)
17070 v.Aux = symToAux(sym)
17071 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17072 v0.AuxInt = int32ToAuxInt(1)
17073 v.AddArg3(ptr, v0, mem)
17074 return true
17075 }
17076
17077
17078 for {
17079 off := auxIntToInt32(v.AuxInt)
17080 sym := auxToSym(v.Aux)
17081 ptr := v_0
17082 if v_1.Op != OpAMD64FlagGT_UGT {
17083 break
17084 }
17085 mem := v_2
17086 v.reset(OpAMD64MOVBstore)
17087 v.AuxInt = int32ToAuxInt(off)
17088 v.Aux = symToAux(sym)
17089 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17090 v0.AuxInt = int32ToAuxInt(0)
17091 v.AddArg3(ptr, v0, mem)
17092 return true
17093 }
17094 return false
17095 }
17096 func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
17097 v_2 := v.Args[2]
17098 v_1 := v.Args[1]
17099 v_0 := v.Args[0]
17100 b := v.Block
17101 typ := &b.Func.Config.Types
17102
17103
17104 for {
17105 off := auxIntToInt32(v.AuxInt)
17106 sym := auxToSym(v.Aux)
17107 ptr := v_0
17108 if v_1.Op != OpAMD64InvertFlags {
17109 break
17110 }
17111 x := v_1.Args[0]
17112 mem := v_2
17113 v.reset(OpAMD64SETAstore)
17114 v.AuxInt = int32ToAuxInt(off)
17115 v.Aux = symToAux(sym)
17116 v.AddArg3(ptr, x, mem)
17117 return true
17118 }
17119
17120
17121
17122 for {
17123 off1 := auxIntToInt32(v.AuxInt)
17124 sym := auxToSym(v.Aux)
17125 if v_0.Op != OpAMD64ADDQconst {
17126 break
17127 }
17128 off2 := auxIntToInt32(v_0.AuxInt)
17129 base := v_0.Args[0]
17130 val := v_1
17131 mem := v_2
17132 if !(is32Bit(int64(off1) + int64(off2))) {
17133 break
17134 }
17135 v.reset(OpAMD64SETBstore)
17136 v.AuxInt = int32ToAuxInt(off1 + off2)
17137 v.Aux = symToAux(sym)
17138 v.AddArg3(base, val, mem)
17139 return true
17140 }
17141
17142
17143
17144 for {
17145 off1 := auxIntToInt32(v.AuxInt)
17146 sym1 := auxToSym(v.Aux)
17147 if v_0.Op != OpAMD64LEAQ {
17148 break
17149 }
17150 off2 := auxIntToInt32(v_0.AuxInt)
17151 sym2 := auxToSym(v_0.Aux)
17152 base := v_0.Args[0]
17153 val := v_1
17154 mem := v_2
17155 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
17156 break
17157 }
17158 v.reset(OpAMD64SETBstore)
17159 v.AuxInt = int32ToAuxInt(off1 + off2)
17160 v.Aux = symToAux(mergeSym(sym1, sym2))
17161 v.AddArg3(base, val, mem)
17162 return true
17163 }
17164
17165
17166 for {
17167 off := auxIntToInt32(v.AuxInt)
17168 sym := auxToSym(v.Aux)
17169 ptr := v_0
17170 if v_1.Op != OpAMD64FlagEQ {
17171 break
17172 }
17173 mem := v_2
17174 v.reset(OpAMD64MOVBstore)
17175 v.AuxInt = int32ToAuxInt(off)
17176 v.Aux = symToAux(sym)
17177 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17178 v0.AuxInt = int32ToAuxInt(0)
17179 v.AddArg3(ptr, v0, mem)
17180 return true
17181 }
17182
17183
17184 for {
17185 off := auxIntToInt32(v.AuxInt)
17186 sym := auxToSym(v.Aux)
17187 ptr := v_0
17188 if v_1.Op != OpAMD64FlagLT_ULT {
17189 break
17190 }
17191 mem := v_2
17192 v.reset(OpAMD64MOVBstore)
17193 v.AuxInt = int32ToAuxInt(off)
17194 v.Aux = symToAux(sym)
17195 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17196 v0.AuxInt = int32ToAuxInt(1)
17197 v.AddArg3(ptr, v0, mem)
17198 return true
17199 }
17200
17201
17202 for {
17203 off := auxIntToInt32(v.AuxInt)
17204 sym := auxToSym(v.Aux)
17205 ptr := v_0
17206 if v_1.Op != OpAMD64FlagLT_UGT {
17207 break
17208 }
17209 mem := v_2
17210 v.reset(OpAMD64MOVBstore)
17211 v.AuxInt = int32ToAuxInt(off)
17212 v.Aux = symToAux(sym)
17213 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17214 v0.AuxInt = int32ToAuxInt(0)
17215 v.AddArg3(ptr, v0, mem)
17216 return true
17217 }
17218
17219
17220 for {
17221 off := auxIntToInt32(v.AuxInt)
17222 sym := auxToSym(v.Aux)
17223 ptr := v_0
17224 if v_1.Op != OpAMD64FlagGT_ULT {
17225 break
17226 }
17227 mem := v_2
17228 v.reset(OpAMD64MOVBstore)
17229 v.AuxInt = int32ToAuxInt(off)
17230 v.Aux = symToAux(sym)
17231 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17232 v0.AuxInt = int32ToAuxInt(1)
17233 v.AddArg3(ptr, v0, mem)
17234 return true
17235 }
17236
17237
17238 for {
17239 off := auxIntToInt32(v.AuxInt)
17240 sym := auxToSym(v.Aux)
17241 ptr := v_0
17242 if v_1.Op != OpAMD64FlagGT_UGT {
17243 break
17244 }
17245 mem := v_2
17246 v.reset(OpAMD64MOVBstore)
17247 v.AuxInt = int32ToAuxInt(off)
17248 v.Aux = symToAux(sym)
17249 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
17250 v0.AuxInt = int32ToAuxInt(0)
17251 v.AddArg3(ptr, v0, mem)
17252 return true
17253 }
17254 return false
17255 }
17256 func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
17257 v_0 := v.Args[0]
17258 b := v.Block
17259
17260
17261 for {
17262 if v_0.Op != OpAMD64TESTL {
17263 break
17264 }
17265 _ = v_0.Args[1]
17266 v_0_0 := v_0.Args[0]
17267 v_0_1 := v_0.Args[1]
17268 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17269 if v_0_0.Op != OpAMD64SHLL {
17270 continue
17271 }
17272 x := v_0_0.Args[1]
17273 v_0_0_0 := v_0_0.Args[0]
17274 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
17275 continue
17276 }
17277 y := v_0_1
17278 v.reset(OpAMD64SETAE)
17279 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17280 v0.AddArg2(x, y)
17281 v.AddArg(v0)
17282 return true
17283 }
17284 break
17285 }
17286
17287
17288 for {
17289 if v_0.Op != OpAMD64TESTQ {
17290 break
17291 }
17292 _ = v_0.Args[1]
17293 v_0_0 := v_0.Args[0]
17294 v_0_1 := v_0.Args[1]
17295 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17296 if v_0_0.Op != OpAMD64SHLQ {
17297 continue
17298 }
17299 x := v_0_0.Args[1]
17300 v_0_0_0 := v_0_0.Args[0]
17301 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
17302 continue
17303 }
17304 y := v_0_1
17305 v.reset(OpAMD64SETAE)
17306 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17307 v0.AddArg2(x, y)
17308 v.AddArg(v0)
17309 return true
17310 }
17311 break
17312 }
17313
17314
17315
17316 for {
17317 if v_0.Op != OpAMD64TESTLconst {
17318 break
17319 }
17320 c := auxIntToInt32(v_0.AuxInt)
17321 x := v_0.Args[0]
17322 if !(isUint32PowerOfTwo(int64(c))) {
17323 break
17324 }
17325 v.reset(OpAMD64SETAE)
17326 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17327 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17328 v0.AddArg(x)
17329 v.AddArg(v0)
17330 return true
17331 }
17332
17333
17334
17335 for {
17336 if v_0.Op != OpAMD64TESTQconst {
17337 break
17338 }
17339 c := auxIntToInt32(v_0.AuxInt)
17340 x := v_0.Args[0]
17341 if !(isUint64PowerOfTwo(int64(c))) {
17342 break
17343 }
17344 v.reset(OpAMD64SETAE)
17345 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17346 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17347 v0.AddArg(x)
17348 v.AddArg(v0)
17349 return true
17350 }
17351
17352
17353
17354 for {
17355 if v_0.Op != OpAMD64TESTQ {
17356 break
17357 }
17358 _ = v_0.Args[1]
17359 v_0_0 := v_0.Args[0]
17360 v_0_1 := v_0.Args[1]
17361 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17362 if v_0_0.Op != OpAMD64MOVQconst {
17363 continue
17364 }
17365 c := auxIntToInt64(v_0_0.AuxInt)
17366 x := v_0_1
17367 if !(isUint64PowerOfTwo(c)) {
17368 continue
17369 }
17370 v.reset(OpAMD64SETAE)
17371 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17372 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
17373 v0.AddArg(x)
17374 v.AddArg(v0)
17375 return true
17376 }
17377 break
17378 }
17379
17380
17381 for {
17382 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
17383 break
17384 }
17385 s := v_0.Args[0]
17386 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
17387 break
17388 }
17389 v.reset(OpAMD64SETNE)
17390 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17391 v0.AuxInt = int32ToAuxInt(0)
17392 v0.AddArg(s)
17393 v.AddArg(v0)
17394 return true
17395 }
17396
17397
17398 for {
17399 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
17400 break
17401 }
17402 s := v_0.Args[0]
17403 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
17404 break
17405 }
17406 v.reset(OpAMD64SETNE)
17407 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
17408 v0.AuxInt = int32ToAuxInt(0)
17409 v0.AddArg(s)
17410 v.AddArg(v0)
17411 return true
17412 }
17413
17414
17415
17416 for {
17417 if v_0.Op != OpAMD64TESTQ {
17418 break
17419 }
17420 _ = v_0.Args[1]
17421 v_0_0 := v_0.Args[0]
17422 v_0_1 := v_0.Args[1]
17423 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17424 z1 := v_0_0
17425 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
17426 continue
17427 }
17428 z1_0 := z1.Args[0]
17429 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17430 continue
17431 }
17432 x := z1_0.Args[0]
17433 z2 := v_0_1
17434 if !(z1 == z2) {
17435 continue
17436 }
17437 v.reset(OpAMD64SETAE)
17438 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17439 v0.AuxInt = int8ToAuxInt(63)
17440 v0.AddArg(x)
17441 v.AddArg(v0)
17442 return true
17443 }
17444 break
17445 }
17446
17447
17448
17449 for {
17450 if v_0.Op != OpAMD64TESTL {
17451 break
17452 }
17453 _ = v_0.Args[1]
17454 v_0_0 := v_0.Args[0]
17455 v_0_1 := v_0.Args[1]
17456 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17457 z1 := v_0_0
17458 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
17459 continue
17460 }
17461 z1_0 := z1.Args[0]
17462 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17463 continue
17464 }
17465 x := z1_0.Args[0]
17466 z2 := v_0_1
17467 if !(z1 == z2) {
17468 continue
17469 }
17470 v.reset(OpAMD64SETAE)
17471 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17472 v0.AuxInt = int8ToAuxInt(31)
17473 v0.AddArg(x)
17474 v.AddArg(v0)
17475 return true
17476 }
17477 break
17478 }
17479
17480
17481
17482 for {
17483 if v_0.Op != OpAMD64TESTQ {
17484 break
17485 }
17486 _ = v_0.Args[1]
17487 v_0_0 := v_0.Args[0]
17488 v_0_1 := v_0.Args[1]
17489 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17490 z1 := v_0_0
17491 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17492 continue
17493 }
17494 z1_0 := z1.Args[0]
17495 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17496 continue
17497 }
17498 x := z1_0.Args[0]
17499 z2 := v_0_1
17500 if !(z1 == z2) {
17501 continue
17502 }
17503 v.reset(OpAMD64SETAE)
17504 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17505 v0.AuxInt = int8ToAuxInt(0)
17506 v0.AddArg(x)
17507 v.AddArg(v0)
17508 return true
17509 }
17510 break
17511 }
17512
17513
17514
17515 for {
17516 if v_0.Op != OpAMD64TESTL {
17517 break
17518 }
17519 _ = v_0.Args[1]
17520 v_0_0 := v_0.Args[0]
17521 v_0_1 := v_0.Args[1]
17522 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17523 z1 := v_0_0
17524 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17525 continue
17526 }
17527 z1_0 := z1.Args[0]
17528 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17529 continue
17530 }
17531 x := z1_0.Args[0]
17532 z2 := v_0_1
17533 if !(z1 == z2) {
17534 continue
17535 }
17536 v.reset(OpAMD64SETAE)
17537 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17538 v0.AuxInt = int8ToAuxInt(0)
17539 v0.AddArg(x)
17540 v.AddArg(v0)
17541 return true
17542 }
17543 break
17544 }
17545
17546
17547
17548 for {
17549 if v_0.Op != OpAMD64TESTQ {
17550 break
17551 }
17552 _ = v_0.Args[1]
17553 v_0_0 := v_0.Args[0]
17554 v_0_1 := v_0.Args[1]
17555 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17556 z1 := v_0_0
17557 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
17558 continue
17559 }
17560 x := z1.Args[0]
17561 z2 := v_0_1
17562 if !(z1 == z2) {
17563 continue
17564 }
17565 v.reset(OpAMD64SETAE)
17566 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17567 v0.AuxInt = int8ToAuxInt(63)
17568 v0.AddArg(x)
17569 v.AddArg(v0)
17570 return true
17571 }
17572 break
17573 }
17574
17575
17576
17577 for {
17578 if v_0.Op != OpAMD64TESTL {
17579 break
17580 }
17581 _ = v_0.Args[1]
17582 v_0_0 := v_0.Args[0]
17583 v_0_1 := v_0.Args[1]
17584 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17585 z1 := v_0_0
17586 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
17587 continue
17588 }
17589 x := z1.Args[0]
17590 z2 := v_0_1
17591 if !(z1 == z2) {
17592 continue
17593 }
17594 v.reset(OpAMD64SETAE)
17595 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17596 v0.AuxInt = int8ToAuxInt(31)
17597 v0.AddArg(x)
17598 v.AddArg(v0)
17599 return true
17600 }
17601 break
17602 }
17603
17604
17605 for {
17606 if v_0.Op != OpAMD64InvertFlags {
17607 break
17608 }
17609 x := v_0.Args[0]
17610 v.reset(OpAMD64SETEQ)
17611 v.AddArg(x)
17612 return true
17613 }
17614
17615
17616 for {
17617 if v_0.Op != OpAMD64FlagEQ {
17618 break
17619 }
17620 v.reset(OpAMD64MOVLconst)
17621 v.AuxInt = int32ToAuxInt(1)
17622 return true
17623 }
17624
17625
17626 for {
17627 if v_0.Op != OpAMD64FlagLT_ULT {
17628 break
17629 }
17630 v.reset(OpAMD64MOVLconst)
17631 v.AuxInt = int32ToAuxInt(0)
17632 return true
17633 }
17634
17635
17636 for {
17637 if v_0.Op != OpAMD64FlagLT_UGT {
17638 break
17639 }
17640 v.reset(OpAMD64MOVLconst)
17641 v.AuxInt = int32ToAuxInt(0)
17642 return true
17643 }
17644
17645
17646 for {
17647 if v_0.Op != OpAMD64FlagGT_ULT {
17648 break
17649 }
17650 v.reset(OpAMD64MOVLconst)
17651 v.AuxInt = int32ToAuxInt(0)
17652 return true
17653 }
17654
17655
17656 for {
17657 if v_0.Op != OpAMD64FlagGT_UGT {
17658 break
17659 }
17660 v.reset(OpAMD64MOVLconst)
17661 v.AuxInt = int32ToAuxInt(0)
17662 return true
17663 }
17664
17665
17666 for {
17667 if v_0.Op != OpAMD64TESTQ {
17668 break
17669 }
17670 _ = v_0.Args[1]
17671 v_0_0 := v_0.Args[0]
17672 v_0_1 := v_0.Args[1]
17673 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17674 s := v_0_0
17675 if s.Op != OpSelect0 {
17676 continue
17677 }
17678 blsr := s.Args[0]
17679 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
17680 continue
17681 }
17682 v.reset(OpAMD64SETEQ)
17683 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17684 v0.AddArg(blsr)
17685 v.AddArg(v0)
17686 return true
17687 }
17688 break
17689 }
17690
17691
17692 for {
17693 if v_0.Op != OpAMD64TESTL {
17694 break
17695 }
17696 _ = v_0.Args[1]
17697 v_0_0 := v_0.Args[0]
17698 v_0_1 := v_0.Args[1]
17699 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
17700 s := v_0_0
17701 if s.Op != OpSelect0 {
17702 continue
17703 }
17704 blsr := s.Args[0]
17705 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
17706 continue
17707 }
17708 v.reset(OpAMD64SETEQ)
17709 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
17710 v0.AddArg(blsr)
17711 v.AddArg(v0)
17712 return true
17713 }
17714 break
17715 }
17716 return false
17717 }
17718 func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
17719 v_2 := v.Args[2]
17720 v_1 := v.Args[1]
17721 v_0 := v.Args[0]
17722 b := v.Block
17723 typ := &b.Func.Config.Types
17724
17725
17726 for {
17727 off := auxIntToInt32(v.AuxInt)
17728 sym := auxToSym(v.Aux)
17729 ptr := v_0
17730 if v_1.Op != OpAMD64TESTL {
17731 break
17732 }
17733 _ = v_1.Args[1]
17734 v_1_0 := v_1.Args[0]
17735 v_1_1 := v_1.Args[1]
17736 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17737 if v_1_0.Op != OpAMD64SHLL {
17738 continue
17739 }
17740 x := v_1_0.Args[1]
17741 v_1_0_0 := v_1_0.Args[0]
17742 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
17743 continue
17744 }
17745 y := v_1_1
17746 mem := v_2
17747 v.reset(OpAMD64SETAEstore)
17748 v.AuxInt = int32ToAuxInt(off)
17749 v.Aux = symToAux(sym)
17750 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
17751 v0.AddArg2(x, y)
17752 v.AddArg3(ptr, v0, mem)
17753 return true
17754 }
17755 break
17756 }
17757
17758
17759 for {
17760 off := auxIntToInt32(v.AuxInt)
17761 sym := auxToSym(v.Aux)
17762 ptr := v_0
17763 if v_1.Op != OpAMD64TESTQ {
17764 break
17765 }
17766 _ = v_1.Args[1]
17767 v_1_0 := v_1.Args[0]
17768 v_1_1 := v_1.Args[1]
17769 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17770 if v_1_0.Op != OpAMD64SHLQ {
17771 continue
17772 }
17773 x := v_1_0.Args[1]
17774 v_1_0_0 := v_1_0.Args[0]
17775 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
17776 continue
17777 }
17778 y := v_1_1
17779 mem := v_2
17780 v.reset(OpAMD64SETAEstore)
17781 v.AuxInt = int32ToAuxInt(off)
17782 v.Aux = symToAux(sym)
17783 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
17784 v0.AddArg2(x, y)
17785 v.AddArg3(ptr, v0, mem)
17786 return true
17787 }
17788 break
17789 }
17790
17791
17792
17793 for {
17794 off := auxIntToInt32(v.AuxInt)
17795 sym := auxToSym(v.Aux)
17796 ptr := v_0
17797 if v_1.Op != OpAMD64TESTLconst {
17798 break
17799 }
17800 c := auxIntToInt32(v_1.AuxInt)
17801 x := v_1.Args[0]
17802 mem := v_2
17803 if !(isUint32PowerOfTwo(int64(c))) {
17804 break
17805 }
17806 v.reset(OpAMD64SETAEstore)
17807 v.AuxInt = int32ToAuxInt(off)
17808 v.Aux = symToAux(sym)
17809 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17810 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17811 v0.AddArg(x)
17812 v.AddArg3(ptr, v0, mem)
17813 return true
17814 }
17815
17816
17817
17818 for {
17819 off := auxIntToInt32(v.AuxInt)
17820 sym := auxToSym(v.Aux)
17821 ptr := v_0
17822 if v_1.Op != OpAMD64TESTQconst {
17823 break
17824 }
17825 c := auxIntToInt32(v_1.AuxInt)
17826 x := v_1.Args[0]
17827 mem := v_2
17828 if !(isUint64PowerOfTwo(int64(c))) {
17829 break
17830 }
17831 v.reset(OpAMD64SETAEstore)
17832 v.AuxInt = int32ToAuxInt(off)
17833 v.Aux = symToAux(sym)
17834 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17835 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
17836 v0.AddArg(x)
17837 v.AddArg3(ptr, v0, mem)
17838 return true
17839 }
17840
17841
17842
17843 for {
17844 off := auxIntToInt32(v.AuxInt)
17845 sym := auxToSym(v.Aux)
17846 ptr := v_0
17847 if v_1.Op != OpAMD64TESTQ {
17848 break
17849 }
17850 _ = v_1.Args[1]
17851 v_1_0 := v_1.Args[0]
17852 v_1_1 := v_1.Args[1]
17853 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17854 if v_1_0.Op != OpAMD64MOVQconst {
17855 continue
17856 }
17857 c := auxIntToInt64(v_1_0.AuxInt)
17858 x := v_1_1
17859 mem := v_2
17860 if !(isUint64PowerOfTwo(c)) {
17861 continue
17862 }
17863 v.reset(OpAMD64SETAEstore)
17864 v.AuxInt = int32ToAuxInt(off)
17865 v.Aux = symToAux(sym)
17866 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17867 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
17868 v0.AddArg(x)
17869 v.AddArg3(ptr, v0, mem)
17870 return true
17871 }
17872 break
17873 }
17874
17875
17876 for {
17877 off := auxIntToInt32(v.AuxInt)
17878 sym := auxToSym(v.Aux)
17879 ptr := v_0
17880 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
17881 break
17882 }
17883 s := v_1.Args[0]
17884 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
17885 break
17886 }
17887 mem := v_2
17888 v.reset(OpAMD64SETNEstore)
17889 v.AuxInt = int32ToAuxInt(off)
17890 v.Aux = symToAux(sym)
17891 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
17892 v0.AuxInt = int32ToAuxInt(0)
17893 v0.AddArg(s)
17894 v.AddArg3(ptr, v0, mem)
17895 return true
17896 }
17897
17898
17899 for {
17900 off := auxIntToInt32(v.AuxInt)
17901 sym := auxToSym(v.Aux)
17902 ptr := v_0
17903 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
17904 break
17905 }
17906 s := v_1.Args[0]
17907 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
17908 break
17909 }
17910 mem := v_2
17911 v.reset(OpAMD64SETNEstore)
17912 v.AuxInt = int32ToAuxInt(off)
17913 v.Aux = symToAux(sym)
17914 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
17915 v0.AuxInt = int32ToAuxInt(0)
17916 v0.AddArg(s)
17917 v.AddArg3(ptr, v0, mem)
17918 return true
17919 }
17920
17921
17922
17923 for {
17924 off := auxIntToInt32(v.AuxInt)
17925 sym := auxToSym(v.Aux)
17926 ptr := v_0
17927 if v_1.Op != OpAMD64TESTQ {
17928 break
17929 }
17930 _ = v_1.Args[1]
17931 v_1_0 := v_1.Args[0]
17932 v_1_1 := v_1.Args[1]
17933 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17934 z1 := v_1_0
17935 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
17936 continue
17937 }
17938 z1_0 := z1.Args[0]
17939 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
17940 continue
17941 }
17942 x := z1_0.Args[0]
17943 z2 := v_1_1
17944 mem := v_2
17945 if !(z1 == z2) {
17946 continue
17947 }
17948 v.reset(OpAMD64SETAEstore)
17949 v.AuxInt = int32ToAuxInt(off)
17950 v.Aux = symToAux(sym)
17951 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
17952 v0.AuxInt = int8ToAuxInt(63)
17953 v0.AddArg(x)
17954 v.AddArg3(ptr, v0, mem)
17955 return true
17956 }
17957 break
17958 }
17959
17960
17961
17962 for {
17963 off := auxIntToInt32(v.AuxInt)
17964 sym := auxToSym(v.Aux)
17965 ptr := v_0
17966 if v_1.Op != OpAMD64TESTL {
17967 break
17968 }
17969 _ = v_1.Args[1]
17970 v_1_0 := v_1.Args[0]
17971 v_1_1 := v_1.Args[1]
17972 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
17973 z1 := v_1_0
17974 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
17975 continue
17976 }
17977 z1_0 := z1.Args[0]
17978 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
17979 continue
17980 }
17981 x := z1_0.Args[0]
17982 z2 := v_1_1
17983 mem := v_2
17984 if !(z1 == z2) {
17985 continue
17986 }
17987 v.reset(OpAMD64SETAEstore)
17988 v.AuxInt = int32ToAuxInt(off)
17989 v.Aux = symToAux(sym)
17990 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
17991 v0.AuxInt = int8ToAuxInt(31)
17992 v0.AddArg(x)
17993 v.AddArg3(ptr, v0, mem)
17994 return true
17995 }
17996 break
17997 }
17998
17999
18000
18001 for {
18002 off := auxIntToInt32(v.AuxInt)
18003 sym := auxToSym(v.Aux)
18004 ptr := v_0
18005 if v_1.Op != OpAMD64TESTQ {
18006 break
18007 }
18008 _ = v_1.Args[1]
18009 v_1_0 := v_1.Args[0]
18010 v_1_1 := v_1.Args[1]
18011 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18012 z1 := v_1_0
18013 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18014 continue
18015 }
18016 z1_0 := z1.Args[0]
18017 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
18018 continue
18019 }
18020 x := z1_0.Args[0]
18021 z2 := v_1_1
18022 mem := v_2
18023 if !(z1 == z2) {
18024 continue
18025 }
18026 v.reset(OpAMD64SETAEstore)
18027 v.AuxInt = int32ToAuxInt(off)
18028 v.Aux = symToAux(sym)
18029 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18030 v0.AuxInt = int8ToAuxInt(0)
18031 v0.AddArg(x)
18032 v.AddArg3(ptr, v0, mem)
18033 return true
18034 }
18035 break
18036 }
18037
18038
18039
18040 for {
18041 off := auxIntToInt32(v.AuxInt)
18042 sym := auxToSym(v.Aux)
18043 ptr := v_0
18044 if v_1.Op != OpAMD64TESTL {
18045 break
18046 }
18047 _ = v_1.Args[1]
18048 v_1_0 := v_1.Args[0]
18049 v_1_1 := v_1.Args[1]
18050 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18051 z1 := v_1_0
18052 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18053 continue
18054 }
18055 z1_0 := z1.Args[0]
18056 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
18057 continue
18058 }
18059 x := z1_0.Args[0]
18060 z2 := v_1_1
18061 mem := v_2
18062 if !(z1 == z2) {
18063 continue
18064 }
18065 v.reset(OpAMD64SETAEstore)
18066 v.AuxInt = int32ToAuxInt(off)
18067 v.Aux = symToAux(sym)
18068 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18069 v0.AuxInt = int8ToAuxInt(0)
18070 v0.AddArg(x)
18071 v.AddArg3(ptr, v0, mem)
18072 return true
18073 }
18074 break
18075 }
18076
18077
18078
18079 for {
18080 off := auxIntToInt32(v.AuxInt)
18081 sym := auxToSym(v.Aux)
18082 ptr := v_0
18083 if v_1.Op != OpAMD64TESTQ {
18084 break
18085 }
18086 _ = v_1.Args[1]
18087 v_1_0 := v_1.Args[0]
18088 v_1_1 := v_1.Args[1]
18089 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18090 z1 := v_1_0
18091 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
18092 continue
18093 }
18094 x := z1.Args[0]
18095 z2 := v_1_1
18096 mem := v_2
18097 if !(z1 == z2) {
18098 continue
18099 }
18100 v.reset(OpAMD64SETAEstore)
18101 v.AuxInt = int32ToAuxInt(off)
18102 v.Aux = symToAux(sym)
18103 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
18104 v0.AuxInt = int8ToAuxInt(63)
18105 v0.AddArg(x)
18106 v.AddArg3(ptr, v0, mem)
18107 return true
18108 }
18109 break
18110 }
18111
18112
18113
18114 for {
18115 off := auxIntToInt32(v.AuxInt)
18116 sym := auxToSym(v.Aux)
18117 ptr := v_0
18118 if v_1.Op != OpAMD64TESTL {
18119 break
18120 }
18121 _ = v_1.Args[1]
18122 v_1_0 := v_1.Args[0]
18123 v_1_1 := v_1.Args[1]
18124 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
18125 z1 := v_1_0
18126 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
18127 continue
18128 }
18129 x := z1.Args[0]
18130 z2 := v_1_1
18131 mem := v_2
18132 if !(z1 == z2) {
18133 continue
18134 }
18135 v.reset(OpAMD64SETAEstore)
18136 v.AuxInt = int32ToAuxInt(off)
18137 v.Aux = symToAux(sym)
18138 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
18139 v0.AuxInt = int8ToAuxInt(31)
18140 v0.AddArg(x)
18141 v.AddArg3(ptr, v0, mem)
18142 return true
18143 }
18144 break
18145 }
18146
18147
18148 for {
18149 off := auxIntToInt32(v.AuxInt)
18150 sym := auxToSym(v.Aux)
18151 ptr := v_0
18152 if v_1.Op != OpAMD64InvertFlags {
18153 break
18154 }
18155 x := v_1.Args[0]
18156 mem := v_2
18157 v.reset(OpAMD64SETEQstore)
18158 v.AuxInt = int32ToAuxInt(off)
18159 v.Aux = symToAux(sym)
18160 v.AddArg3(ptr, x, mem)
18161 return true
18162 }
18163
18164
18165
18166 for {
18167 off1 := auxIntToInt32(v.AuxInt)
18168 sym := auxToSym(v.Aux)
18169 if v_0.Op != OpAMD64ADDQconst {
18170 break
18171 }
18172 off2 := auxIntToInt32(v_0.AuxInt)
18173 base := v_0.Args[0]
18174 val := v_1
18175 mem := v_2
18176 if !(is32Bit(int64(off1) + int64(off2))) {
18177 break
18178 }
18179 v.reset(OpAMD64SETEQstore)
18180 v.AuxInt = int32ToAuxInt(off1 + off2)
18181 v.Aux = symToAux(sym)
18182 v.AddArg3(base, val, mem)
18183 return true
18184 }
18185
18186
18187
18188 for {
18189 off1 := auxIntToInt32(v.AuxInt)
18190 sym1 := auxToSym(v.Aux)
18191 if v_0.Op != OpAMD64LEAQ {
18192 break
18193 }
18194 off2 := auxIntToInt32(v_0.AuxInt)
18195 sym2 := auxToSym(v_0.Aux)
18196 base := v_0.Args[0]
18197 val := v_1
18198 mem := v_2
18199 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18200 break
18201 }
18202 v.reset(OpAMD64SETEQstore)
18203 v.AuxInt = int32ToAuxInt(off1 + off2)
18204 v.Aux = symToAux(mergeSym(sym1, sym2))
18205 v.AddArg3(base, val, mem)
18206 return true
18207 }
18208
18209
18210 for {
18211 off := auxIntToInt32(v.AuxInt)
18212 sym := auxToSym(v.Aux)
18213 ptr := v_0
18214 if v_1.Op != OpAMD64FlagEQ {
18215 break
18216 }
18217 mem := v_2
18218 v.reset(OpAMD64MOVBstore)
18219 v.AuxInt = int32ToAuxInt(off)
18220 v.Aux = symToAux(sym)
18221 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18222 v0.AuxInt = int32ToAuxInt(1)
18223 v.AddArg3(ptr, v0, mem)
18224 return true
18225 }
18226
18227
18228 for {
18229 off := auxIntToInt32(v.AuxInt)
18230 sym := auxToSym(v.Aux)
18231 ptr := v_0
18232 if v_1.Op != OpAMD64FlagLT_ULT {
18233 break
18234 }
18235 mem := v_2
18236 v.reset(OpAMD64MOVBstore)
18237 v.AuxInt = int32ToAuxInt(off)
18238 v.Aux = symToAux(sym)
18239 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18240 v0.AuxInt = int32ToAuxInt(0)
18241 v.AddArg3(ptr, v0, mem)
18242 return true
18243 }
18244
18245
18246 for {
18247 off := auxIntToInt32(v.AuxInt)
18248 sym := auxToSym(v.Aux)
18249 ptr := v_0
18250 if v_1.Op != OpAMD64FlagLT_UGT {
18251 break
18252 }
18253 mem := v_2
18254 v.reset(OpAMD64MOVBstore)
18255 v.AuxInt = int32ToAuxInt(off)
18256 v.Aux = symToAux(sym)
18257 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18258 v0.AuxInt = int32ToAuxInt(0)
18259 v.AddArg3(ptr, v0, mem)
18260 return true
18261 }
18262
18263
18264 for {
18265 off := auxIntToInt32(v.AuxInt)
18266 sym := auxToSym(v.Aux)
18267 ptr := v_0
18268 if v_1.Op != OpAMD64FlagGT_ULT {
18269 break
18270 }
18271 mem := v_2
18272 v.reset(OpAMD64MOVBstore)
18273 v.AuxInt = int32ToAuxInt(off)
18274 v.Aux = symToAux(sym)
18275 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18276 v0.AuxInt = int32ToAuxInt(0)
18277 v.AddArg3(ptr, v0, mem)
18278 return true
18279 }
18280
18281
18282 for {
18283 off := auxIntToInt32(v.AuxInt)
18284 sym := auxToSym(v.Aux)
18285 ptr := v_0
18286 if v_1.Op != OpAMD64FlagGT_UGT {
18287 break
18288 }
18289 mem := v_2
18290 v.reset(OpAMD64MOVBstore)
18291 v.AuxInt = int32ToAuxInt(off)
18292 v.Aux = symToAux(sym)
18293 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18294 v0.AuxInt = int32ToAuxInt(0)
18295 v.AddArg3(ptr, v0, mem)
18296 return true
18297 }
18298 return false
18299 }
18300 func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
18301 v_0 := v.Args[0]
18302
18303
18304 for {
18305 if v_0.Op != OpAMD64InvertFlags {
18306 break
18307 }
18308 x := v_0.Args[0]
18309 v.reset(OpAMD64SETL)
18310 v.AddArg(x)
18311 return true
18312 }
18313
18314
18315 for {
18316 if v_0.Op != OpAMD64FlagEQ {
18317 break
18318 }
18319 v.reset(OpAMD64MOVLconst)
18320 v.AuxInt = int32ToAuxInt(0)
18321 return true
18322 }
18323
18324
18325 for {
18326 if v_0.Op != OpAMD64FlagLT_ULT {
18327 break
18328 }
18329 v.reset(OpAMD64MOVLconst)
18330 v.AuxInt = int32ToAuxInt(0)
18331 return true
18332 }
18333
18334
18335 for {
18336 if v_0.Op != OpAMD64FlagLT_UGT {
18337 break
18338 }
18339 v.reset(OpAMD64MOVLconst)
18340 v.AuxInt = int32ToAuxInt(0)
18341 return true
18342 }
18343
18344
18345 for {
18346 if v_0.Op != OpAMD64FlagGT_ULT {
18347 break
18348 }
18349 v.reset(OpAMD64MOVLconst)
18350 v.AuxInt = int32ToAuxInt(1)
18351 return true
18352 }
18353
18354
18355 for {
18356 if v_0.Op != OpAMD64FlagGT_UGT {
18357 break
18358 }
18359 v.reset(OpAMD64MOVLconst)
18360 v.AuxInt = int32ToAuxInt(1)
18361 return true
18362 }
18363 return false
18364 }
18365 func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
18366 v_0 := v.Args[0]
18367 b := v.Block
18368
18369
18370
18371 for {
18372 c := v_0
18373 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
18374 break
18375 }
18376 x := c.Args[0]
18377 if !(c.Uses == 1) {
18378 break
18379 }
18380 v.reset(OpAMD64SETG)
18381 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18382 v0.AuxInt = int32ToAuxInt(127)
18383 v0.AddArg(x)
18384 v.AddArg(v0)
18385 return true
18386 }
18387
18388
18389
18390 for {
18391 c := v_0
18392 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
18393 break
18394 }
18395 x := c.Args[0]
18396 if !(c.Uses == 1) {
18397 break
18398 }
18399 v.reset(OpAMD64SETG)
18400 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18401 v0.AuxInt = int32ToAuxInt(127)
18402 v0.AddArg(x)
18403 v.AddArg(v0)
18404 return true
18405 }
18406
18407
18408 for {
18409 if v_0.Op != OpAMD64InvertFlags {
18410 break
18411 }
18412 x := v_0.Args[0]
18413 v.reset(OpAMD64SETLE)
18414 v.AddArg(x)
18415 return true
18416 }
18417
18418
18419 for {
18420 if v_0.Op != OpAMD64FlagEQ {
18421 break
18422 }
18423 v.reset(OpAMD64MOVLconst)
18424 v.AuxInt = int32ToAuxInt(1)
18425 return true
18426 }
18427
18428
18429 for {
18430 if v_0.Op != OpAMD64FlagLT_ULT {
18431 break
18432 }
18433 v.reset(OpAMD64MOVLconst)
18434 v.AuxInt = int32ToAuxInt(0)
18435 return true
18436 }
18437
18438
18439 for {
18440 if v_0.Op != OpAMD64FlagLT_UGT {
18441 break
18442 }
18443 v.reset(OpAMD64MOVLconst)
18444 v.AuxInt = int32ToAuxInt(0)
18445 return true
18446 }
18447
18448
18449 for {
18450 if v_0.Op != OpAMD64FlagGT_ULT {
18451 break
18452 }
18453 v.reset(OpAMD64MOVLconst)
18454 v.AuxInt = int32ToAuxInt(1)
18455 return true
18456 }
18457
18458
18459 for {
18460 if v_0.Op != OpAMD64FlagGT_UGT {
18461 break
18462 }
18463 v.reset(OpAMD64MOVLconst)
18464 v.AuxInt = int32ToAuxInt(1)
18465 return true
18466 }
18467 return false
18468 }
18469 func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
18470 v_2 := v.Args[2]
18471 v_1 := v.Args[1]
18472 v_0 := v.Args[0]
18473 b := v.Block
18474 typ := &b.Func.Config.Types
18475
18476
18477 for {
18478 off := auxIntToInt32(v.AuxInt)
18479 sym := auxToSym(v.Aux)
18480 ptr := v_0
18481 if v_1.Op != OpAMD64InvertFlags {
18482 break
18483 }
18484 x := v_1.Args[0]
18485 mem := v_2
18486 v.reset(OpAMD64SETLEstore)
18487 v.AuxInt = int32ToAuxInt(off)
18488 v.Aux = symToAux(sym)
18489 v.AddArg3(ptr, x, mem)
18490 return true
18491 }
18492
18493
18494
18495 for {
18496 off1 := auxIntToInt32(v.AuxInt)
18497 sym := auxToSym(v.Aux)
18498 if v_0.Op != OpAMD64ADDQconst {
18499 break
18500 }
18501 off2 := auxIntToInt32(v_0.AuxInt)
18502 base := v_0.Args[0]
18503 val := v_1
18504 mem := v_2
18505 if !(is32Bit(int64(off1) + int64(off2))) {
18506 break
18507 }
18508 v.reset(OpAMD64SETGEstore)
18509 v.AuxInt = int32ToAuxInt(off1 + off2)
18510 v.Aux = symToAux(sym)
18511 v.AddArg3(base, val, mem)
18512 return true
18513 }
18514
18515
18516
18517 for {
18518 off1 := auxIntToInt32(v.AuxInt)
18519 sym1 := auxToSym(v.Aux)
18520 if v_0.Op != OpAMD64LEAQ {
18521 break
18522 }
18523 off2 := auxIntToInt32(v_0.AuxInt)
18524 sym2 := auxToSym(v_0.Aux)
18525 base := v_0.Args[0]
18526 val := v_1
18527 mem := v_2
18528 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18529 break
18530 }
18531 v.reset(OpAMD64SETGEstore)
18532 v.AuxInt = int32ToAuxInt(off1 + off2)
18533 v.Aux = symToAux(mergeSym(sym1, sym2))
18534 v.AddArg3(base, val, mem)
18535 return true
18536 }
18537
18538
18539 for {
18540 off := auxIntToInt32(v.AuxInt)
18541 sym := auxToSym(v.Aux)
18542 ptr := v_0
18543 if v_1.Op != OpAMD64FlagEQ {
18544 break
18545 }
18546 mem := v_2
18547 v.reset(OpAMD64MOVBstore)
18548 v.AuxInt = int32ToAuxInt(off)
18549 v.Aux = symToAux(sym)
18550 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18551 v0.AuxInt = int32ToAuxInt(1)
18552 v.AddArg3(ptr, v0, mem)
18553 return true
18554 }
18555
18556
18557 for {
18558 off := auxIntToInt32(v.AuxInt)
18559 sym := auxToSym(v.Aux)
18560 ptr := v_0
18561 if v_1.Op != OpAMD64FlagLT_ULT {
18562 break
18563 }
18564 mem := v_2
18565 v.reset(OpAMD64MOVBstore)
18566 v.AuxInt = int32ToAuxInt(off)
18567 v.Aux = symToAux(sym)
18568 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18569 v0.AuxInt = int32ToAuxInt(0)
18570 v.AddArg3(ptr, v0, mem)
18571 return true
18572 }
18573
18574
18575 for {
18576 off := auxIntToInt32(v.AuxInt)
18577 sym := auxToSym(v.Aux)
18578 ptr := v_0
18579 if v_1.Op != OpAMD64FlagLT_UGT {
18580 break
18581 }
18582 mem := v_2
18583 v.reset(OpAMD64MOVBstore)
18584 v.AuxInt = int32ToAuxInt(off)
18585 v.Aux = symToAux(sym)
18586 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18587 v0.AuxInt = int32ToAuxInt(0)
18588 v.AddArg3(ptr, v0, mem)
18589 return true
18590 }
18591
18592
18593 for {
18594 off := auxIntToInt32(v.AuxInt)
18595 sym := auxToSym(v.Aux)
18596 ptr := v_0
18597 if v_1.Op != OpAMD64FlagGT_ULT {
18598 break
18599 }
18600 mem := v_2
18601 v.reset(OpAMD64MOVBstore)
18602 v.AuxInt = int32ToAuxInt(off)
18603 v.Aux = symToAux(sym)
18604 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18605 v0.AuxInt = int32ToAuxInt(1)
18606 v.AddArg3(ptr, v0, mem)
18607 return true
18608 }
18609
18610
18611 for {
18612 off := auxIntToInt32(v.AuxInt)
18613 sym := auxToSym(v.Aux)
18614 ptr := v_0
18615 if v_1.Op != OpAMD64FlagGT_UGT {
18616 break
18617 }
18618 mem := v_2
18619 v.reset(OpAMD64MOVBstore)
18620 v.AuxInt = int32ToAuxInt(off)
18621 v.Aux = symToAux(sym)
18622 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18623 v0.AuxInt = int32ToAuxInt(1)
18624 v.AddArg3(ptr, v0, mem)
18625 return true
18626 }
18627 return false
18628 }
18629 func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
18630 v_2 := v.Args[2]
18631 v_1 := v.Args[1]
18632 v_0 := v.Args[0]
18633 b := v.Block
18634 typ := &b.Func.Config.Types
18635
18636
18637 for {
18638 off := auxIntToInt32(v.AuxInt)
18639 sym := auxToSym(v.Aux)
18640 ptr := v_0
18641 if v_1.Op != OpAMD64InvertFlags {
18642 break
18643 }
18644 x := v_1.Args[0]
18645 mem := v_2
18646 v.reset(OpAMD64SETLstore)
18647 v.AuxInt = int32ToAuxInt(off)
18648 v.Aux = symToAux(sym)
18649 v.AddArg3(ptr, x, mem)
18650 return true
18651 }
18652
18653
18654
18655 for {
18656 off1 := auxIntToInt32(v.AuxInt)
18657 sym := auxToSym(v.Aux)
18658 if v_0.Op != OpAMD64ADDQconst {
18659 break
18660 }
18661 off2 := auxIntToInt32(v_0.AuxInt)
18662 base := v_0.Args[0]
18663 val := v_1
18664 mem := v_2
18665 if !(is32Bit(int64(off1) + int64(off2))) {
18666 break
18667 }
18668 v.reset(OpAMD64SETGstore)
18669 v.AuxInt = int32ToAuxInt(off1 + off2)
18670 v.Aux = symToAux(sym)
18671 v.AddArg3(base, val, mem)
18672 return true
18673 }
18674
18675
18676
18677 for {
18678 off1 := auxIntToInt32(v.AuxInt)
18679 sym1 := auxToSym(v.Aux)
18680 if v_0.Op != OpAMD64LEAQ {
18681 break
18682 }
18683 off2 := auxIntToInt32(v_0.AuxInt)
18684 sym2 := auxToSym(v_0.Aux)
18685 base := v_0.Args[0]
18686 val := v_1
18687 mem := v_2
18688 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
18689 break
18690 }
18691 v.reset(OpAMD64SETGstore)
18692 v.AuxInt = int32ToAuxInt(off1 + off2)
18693 v.Aux = symToAux(mergeSym(sym1, sym2))
18694 v.AddArg3(base, val, mem)
18695 return true
18696 }
18697
18698
18699 for {
18700 off := auxIntToInt32(v.AuxInt)
18701 sym := auxToSym(v.Aux)
18702 ptr := v_0
18703 if v_1.Op != OpAMD64FlagEQ {
18704 break
18705 }
18706 mem := v_2
18707 v.reset(OpAMD64MOVBstore)
18708 v.AuxInt = int32ToAuxInt(off)
18709 v.Aux = symToAux(sym)
18710 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18711 v0.AuxInt = int32ToAuxInt(0)
18712 v.AddArg3(ptr, v0, mem)
18713 return true
18714 }
18715
18716
18717 for {
18718 off := auxIntToInt32(v.AuxInt)
18719 sym := auxToSym(v.Aux)
18720 ptr := v_0
18721 if v_1.Op != OpAMD64FlagLT_ULT {
18722 break
18723 }
18724 mem := v_2
18725 v.reset(OpAMD64MOVBstore)
18726 v.AuxInt = int32ToAuxInt(off)
18727 v.Aux = symToAux(sym)
18728 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18729 v0.AuxInt = int32ToAuxInt(0)
18730 v.AddArg3(ptr, v0, mem)
18731 return true
18732 }
18733
18734
18735 for {
18736 off := auxIntToInt32(v.AuxInt)
18737 sym := auxToSym(v.Aux)
18738 ptr := v_0
18739 if v_1.Op != OpAMD64FlagLT_UGT {
18740 break
18741 }
18742 mem := v_2
18743 v.reset(OpAMD64MOVBstore)
18744 v.AuxInt = int32ToAuxInt(off)
18745 v.Aux = symToAux(sym)
18746 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18747 v0.AuxInt = int32ToAuxInt(0)
18748 v.AddArg3(ptr, v0, mem)
18749 return true
18750 }
18751
18752
18753 for {
18754 off := auxIntToInt32(v.AuxInt)
18755 sym := auxToSym(v.Aux)
18756 ptr := v_0
18757 if v_1.Op != OpAMD64FlagGT_ULT {
18758 break
18759 }
18760 mem := v_2
18761 v.reset(OpAMD64MOVBstore)
18762 v.AuxInt = int32ToAuxInt(off)
18763 v.Aux = symToAux(sym)
18764 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18765 v0.AuxInt = int32ToAuxInt(1)
18766 v.AddArg3(ptr, v0, mem)
18767 return true
18768 }
18769
18770
18771 for {
18772 off := auxIntToInt32(v.AuxInt)
18773 sym := auxToSym(v.Aux)
18774 ptr := v_0
18775 if v_1.Op != OpAMD64FlagGT_UGT {
18776 break
18777 }
18778 mem := v_2
18779 v.reset(OpAMD64MOVBstore)
18780 v.AuxInt = int32ToAuxInt(off)
18781 v.Aux = symToAux(sym)
18782 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
18783 v0.AuxInt = int32ToAuxInt(1)
18784 v.AddArg3(ptr, v0, mem)
18785 return true
18786 }
18787 return false
18788 }
18789 func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
18790 v_0 := v.Args[0]
18791 b := v.Block
18792
18793
18794
18795 for {
18796 c := v_0
18797 if c.Op != OpAMD64CMPQconst || auxIntToInt32(c.AuxInt) != 128 {
18798 break
18799 }
18800 x := c.Args[0]
18801 if !(c.Uses == 1) {
18802 break
18803 }
18804 v.reset(OpAMD64SETLE)
18805 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
18806 v0.AuxInt = int32ToAuxInt(127)
18807 v0.AddArg(x)
18808 v.AddArg(v0)
18809 return true
18810 }
18811
18812
18813
18814 for {
18815 c := v_0
18816 if c.Op != OpAMD64CMPLconst || auxIntToInt32(c.AuxInt) != 128 {
18817 break
18818 }
18819 x := c.Args[0]
18820 if !(c.Uses == 1) {
18821 break
18822 }
18823 v.reset(OpAMD64SETLE)
18824 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
18825 v0.AuxInt = int32ToAuxInt(127)
18826 v0.AddArg(x)
18827 v.AddArg(v0)
18828 return true
18829 }
18830
18831
18832 for {
18833 if v_0.Op != OpAMD64InvertFlags {
18834 break
18835 }
18836 x := v_0.Args[0]
18837 v.reset(OpAMD64SETG)
18838 v.AddArg(x)
18839 return true
18840 }
18841
18842
18843 for {
18844 if v_0.Op != OpAMD64FlagEQ {
18845 break
18846 }
18847 v.reset(OpAMD64MOVLconst)
18848 v.AuxInt = int32ToAuxInt(0)
18849 return true
18850 }
18851
18852
18853 for {
18854 if v_0.Op != OpAMD64FlagLT_ULT {
18855 break
18856 }
18857 v.reset(OpAMD64MOVLconst)
18858 v.AuxInt = int32ToAuxInt(1)
18859 return true
18860 }
18861
18862
18863 for {
18864 if v_0.Op != OpAMD64FlagLT_UGT {
18865 break
18866 }
18867 v.reset(OpAMD64MOVLconst)
18868 v.AuxInt = int32ToAuxInt(1)
18869 return true
18870 }
18871
18872
18873 for {
18874 if v_0.Op != OpAMD64FlagGT_ULT {
18875 break
18876 }
18877 v.reset(OpAMD64MOVLconst)
18878 v.AuxInt = int32ToAuxInt(0)
18879 return true
18880 }
18881
18882
18883 for {
18884 if v_0.Op != OpAMD64FlagGT_UGT {
18885 break
18886 }
18887 v.reset(OpAMD64MOVLconst)
18888 v.AuxInt = int32ToAuxInt(0)
18889 return true
18890 }
18891 return false
18892 }
18893 func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
18894 v_0 := v.Args[0]
18895
18896
18897 for {
18898 if v_0.Op != OpAMD64InvertFlags {
18899 break
18900 }
18901 x := v_0.Args[0]
18902 v.reset(OpAMD64SETGE)
18903 v.AddArg(x)
18904 return true
18905 }
18906
18907
18908 for {
18909 if v_0.Op != OpAMD64FlagEQ {
18910 break
18911 }
18912 v.reset(OpAMD64MOVLconst)
18913 v.AuxInt = int32ToAuxInt(1)
18914 return true
18915 }
18916
18917
18918 for {
18919 if v_0.Op != OpAMD64FlagLT_ULT {
18920 break
18921 }
18922 v.reset(OpAMD64MOVLconst)
18923 v.AuxInt = int32ToAuxInt(1)
18924 return true
18925 }
18926
18927
18928 for {
18929 if v_0.Op != OpAMD64FlagLT_UGT {
18930 break
18931 }
18932 v.reset(OpAMD64MOVLconst)
18933 v.AuxInt = int32ToAuxInt(1)
18934 return true
18935 }
18936
18937
18938 for {
18939 if v_0.Op != OpAMD64FlagGT_ULT {
18940 break
18941 }
18942 v.reset(OpAMD64MOVLconst)
18943 v.AuxInt = int32ToAuxInt(0)
18944 return true
18945 }
18946
18947
18948 for {
18949 if v_0.Op != OpAMD64FlagGT_UGT {
18950 break
18951 }
18952 v.reset(OpAMD64MOVLconst)
18953 v.AuxInt = int32ToAuxInt(0)
18954 return true
18955 }
18956 return false
18957 }
18958 func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
18959 v_2 := v.Args[2]
18960 v_1 := v.Args[1]
18961 v_0 := v.Args[0]
18962 b := v.Block
18963 typ := &b.Func.Config.Types
18964
18965
18966 for {
18967 off := auxIntToInt32(v.AuxInt)
18968 sym := auxToSym(v.Aux)
18969 ptr := v_0
18970 if v_1.Op != OpAMD64InvertFlags {
18971 break
18972 }
18973 x := v_1.Args[0]
18974 mem := v_2
18975 v.reset(OpAMD64SETGEstore)
18976 v.AuxInt = int32ToAuxInt(off)
18977 v.Aux = symToAux(sym)
18978 v.AddArg3(ptr, x, mem)
18979 return true
18980 }
18981
18982
18983
18984 for {
18985 off1 := auxIntToInt32(v.AuxInt)
18986 sym := auxToSym(v.Aux)
18987 if v_0.Op != OpAMD64ADDQconst {
18988 break
18989 }
18990 off2 := auxIntToInt32(v_0.AuxInt)
18991 base := v_0.Args[0]
18992 val := v_1
18993 mem := v_2
18994 if !(is32Bit(int64(off1) + int64(off2))) {
18995 break
18996 }
18997 v.reset(OpAMD64SETLEstore)
18998 v.AuxInt = int32ToAuxInt(off1 + off2)
18999 v.Aux = symToAux(sym)
19000 v.AddArg3(base, val, mem)
19001 return true
19002 }
19003
19004
19005
19006 for {
19007 off1 := auxIntToInt32(v.AuxInt)
19008 sym1 := auxToSym(v.Aux)
19009 if v_0.Op != OpAMD64LEAQ {
19010 break
19011 }
19012 off2 := auxIntToInt32(v_0.AuxInt)
19013 sym2 := auxToSym(v_0.Aux)
19014 base := v_0.Args[0]
19015 val := v_1
19016 mem := v_2
19017 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19018 break
19019 }
19020 v.reset(OpAMD64SETLEstore)
19021 v.AuxInt = int32ToAuxInt(off1 + off2)
19022 v.Aux = symToAux(mergeSym(sym1, sym2))
19023 v.AddArg3(base, val, mem)
19024 return true
19025 }
19026
19027
19028 for {
19029 off := auxIntToInt32(v.AuxInt)
19030 sym := auxToSym(v.Aux)
19031 ptr := v_0
19032 if v_1.Op != OpAMD64FlagEQ {
19033 break
19034 }
19035 mem := v_2
19036 v.reset(OpAMD64MOVBstore)
19037 v.AuxInt = int32ToAuxInt(off)
19038 v.Aux = symToAux(sym)
19039 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19040 v0.AuxInt = int32ToAuxInt(1)
19041 v.AddArg3(ptr, v0, mem)
19042 return true
19043 }
19044
19045
19046 for {
19047 off := auxIntToInt32(v.AuxInt)
19048 sym := auxToSym(v.Aux)
19049 ptr := v_0
19050 if v_1.Op != OpAMD64FlagLT_ULT {
19051 break
19052 }
19053 mem := v_2
19054 v.reset(OpAMD64MOVBstore)
19055 v.AuxInt = int32ToAuxInt(off)
19056 v.Aux = symToAux(sym)
19057 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19058 v0.AuxInt = int32ToAuxInt(1)
19059 v.AddArg3(ptr, v0, mem)
19060 return true
19061 }
19062
19063
19064 for {
19065 off := auxIntToInt32(v.AuxInt)
19066 sym := auxToSym(v.Aux)
19067 ptr := v_0
19068 if v_1.Op != OpAMD64FlagLT_UGT {
19069 break
19070 }
19071 mem := v_2
19072 v.reset(OpAMD64MOVBstore)
19073 v.AuxInt = int32ToAuxInt(off)
19074 v.Aux = symToAux(sym)
19075 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19076 v0.AuxInt = int32ToAuxInt(1)
19077 v.AddArg3(ptr, v0, mem)
19078 return true
19079 }
19080
19081
19082 for {
19083 off := auxIntToInt32(v.AuxInt)
19084 sym := auxToSym(v.Aux)
19085 ptr := v_0
19086 if v_1.Op != OpAMD64FlagGT_ULT {
19087 break
19088 }
19089 mem := v_2
19090 v.reset(OpAMD64MOVBstore)
19091 v.AuxInt = int32ToAuxInt(off)
19092 v.Aux = symToAux(sym)
19093 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19094 v0.AuxInt = int32ToAuxInt(0)
19095 v.AddArg3(ptr, v0, mem)
19096 return true
19097 }
19098
19099
19100 for {
19101 off := auxIntToInt32(v.AuxInt)
19102 sym := auxToSym(v.Aux)
19103 ptr := v_0
19104 if v_1.Op != OpAMD64FlagGT_UGT {
19105 break
19106 }
19107 mem := v_2
19108 v.reset(OpAMD64MOVBstore)
19109 v.AuxInt = int32ToAuxInt(off)
19110 v.Aux = symToAux(sym)
19111 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19112 v0.AuxInt = int32ToAuxInt(0)
19113 v.AddArg3(ptr, v0, mem)
19114 return true
19115 }
19116 return false
19117 }
19118 func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
19119 v_2 := v.Args[2]
19120 v_1 := v.Args[1]
19121 v_0 := v.Args[0]
19122 b := v.Block
19123 typ := &b.Func.Config.Types
19124
19125
19126 for {
19127 off := auxIntToInt32(v.AuxInt)
19128 sym := auxToSym(v.Aux)
19129 ptr := v_0
19130 if v_1.Op != OpAMD64InvertFlags {
19131 break
19132 }
19133 x := v_1.Args[0]
19134 mem := v_2
19135 v.reset(OpAMD64SETGstore)
19136 v.AuxInt = int32ToAuxInt(off)
19137 v.Aux = symToAux(sym)
19138 v.AddArg3(ptr, x, mem)
19139 return true
19140 }
19141
19142
19143
19144 for {
19145 off1 := auxIntToInt32(v.AuxInt)
19146 sym := auxToSym(v.Aux)
19147 if v_0.Op != OpAMD64ADDQconst {
19148 break
19149 }
19150 off2 := auxIntToInt32(v_0.AuxInt)
19151 base := v_0.Args[0]
19152 val := v_1
19153 mem := v_2
19154 if !(is32Bit(int64(off1) + int64(off2))) {
19155 break
19156 }
19157 v.reset(OpAMD64SETLstore)
19158 v.AuxInt = int32ToAuxInt(off1 + off2)
19159 v.Aux = symToAux(sym)
19160 v.AddArg3(base, val, mem)
19161 return true
19162 }
19163
19164
19165
19166 for {
19167 off1 := auxIntToInt32(v.AuxInt)
19168 sym1 := auxToSym(v.Aux)
19169 if v_0.Op != OpAMD64LEAQ {
19170 break
19171 }
19172 off2 := auxIntToInt32(v_0.AuxInt)
19173 sym2 := auxToSym(v_0.Aux)
19174 base := v_0.Args[0]
19175 val := v_1
19176 mem := v_2
19177 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
19178 break
19179 }
19180 v.reset(OpAMD64SETLstore)
19181 v.AuxInt = int32ToAuxInt(off1 + off2)
19182 v.Aux = symToAux(mergeSym(sym1, sym2))
19183 v.AddArg3(base, val, mem)
19184 return true
19185 }
19186
19187
19188 for {
19189 off := auxIntToInt32(v.AuxInt)
19190 sym := auxToSym(v.Aux)
19191 ptr := v_0
19192 if v_1.Op != OpAMD64FlagEQ {
19193 break
19194 }
19195 mem := v_2
19196 v.reset(OpAMD64MOVBstore)
19197 v.AuxInt = int32ToAuxInt(off)
19198 v.Aux = symToAux(sym)
19199 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19200 v0.AuxInt = int32ToAuxInt(0)
19201 v.AddArg3(ptr, v0, mem)
19202 return true
19203 }
19204
19205
19206 for {
19207 off := auxIntToInt32(v.AuxInt)
19208 sym := auxToSym(v.Aux)
19209 ptr := v_0
19210 if v_1.Op != OpAMD64FlagLT_ULT {
19211 break
19212 }
19213 mem := v_2
19214 v.reset(OpAMD64MOVBstore)
19215 v.AuxInt = int32ToAuxInt(off)
19216 v.Aux = symToAux(sym)
19217 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19218 v0.AuxInt = int32ToAuxInt(1)
19219 v.AddArg3(ptr, v0, mem)
19220 return true
19221 }
19222
19223
19224 for {
19225 off := auxIntToInt32(v.AuxInt)
19226 sym := auxToSym(v.Aux)
19227 ptr := v_0
19228 if v_1.Op != OpAMD64FlagLT_UGT {
19229 break
19230 }
19231 mem := v_2
19232 v.reset(OpAMD64MOVBstore)
19233 v.AuxInt = int32ToAuxInt(off)
19234 v.Aux = symToAux(sym)
19235 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19236 v0.AuxInt = int32ToAuxInt(1)
19237 v.AddArg3(ptr, v0, mem)
19238 return true
19239 }
19240
19241
19242 for {
19243 off := auxIntToInt32(v.AuxInt)
19244 sym := auxToSym(v.Aux)
19245 ptr := v_0
19246 if v_1.Op != OpAMD64FlagGT_ULT {
19247 break
19248 }
19249 mem := v_2
19250 v.reset(OpAMD64MOVBstore)
19251 v.AuxInt = int32ToAuxInt(off)
19252 v.Aux = symToAux(sym)
19253 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19254 v0.AuxInt = int32ToAuxInt(0)
19255 v.AddArg3(ptr, v0, mem)
19256 return true
19257 }
19258
19259
19260 for {
19261 off := auxIntToInt32(v.AuxInt)
19262 sym := auxToSym(v.Aux)
19263 ptr := v_0
19264 if v_1.Op != OpAMD64FlagGT_UGT {
19265 break
19266 }
19267 mem := v_2
19268 v.reset(OpAMD64MOVBstore)
19269 v.AuxInt = int32ToAuxInt(off)
19270 v.Aux = symToAux(sym)
19271 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
19272 v0.AuxInt = int32ToAuxInt(0)
19273 v.AddArg3(ptr, v0, mem)
19274 return true
19275 }
19276 return false
19277 }
19278 func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
19279 v_0 := v.Args[0]
19280 b := v.Block
19281
19282
19283 for {
19284 if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
19285 break
19286 }
19287 x := v_0.Args[0]
19288 v.reset(OpAMD64ANDLconst)
19289 v.AuxInt = int32ToAuxInt(1)
19290 v.AddArg(x)
19291 return true
19292 }
19293
19294
19295 for {
19296 if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
19297 break
19298 }
19299 x := v_0.Args[0]
19300 v.reset(OpAMD64ANDLconst)
19301 v.AuxInt = int32ToAuxInt(1)
19302 v.AddArg(x)
19303 return true
19304 }
19305
19306
19307 for {
19308 if v_0.Op != OpAMD64TESTL {
19309 break
19310 }
19311 _ = v_0.Args[1]
19312 v_0_0 := v_0.Args[0]
19313 v_0_1 := v_0.Args[1]
19314 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19315 if v_0_0.Op != OpAMD64SHLL {
19316 continue
19317 }
19318 x := v_0_0.Args[1]
19319 v_0_0_0 := v_0_0.Args[0]
19320 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
19321 continue
19322 }
19323 y := v_0_1
19324 v.reset(OpAMD64SETB)
19325 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19326 v0.AddArg2(x, y)
19327 v.AddArg(v0)
19328 return true
19329 }
19330 break
19331 }
19332
19333
19334 for {
19335 if v_0.Op != OpAMD64TESTQ {
19336 break
19337 }
19338 _ = v_0.Args[1]
19339 v_0_0 := v_0.Args[0]
19340 v_0_1 := v_0.Args[1]
19341 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19342 if v_0_0.Op != OpAMD64SHLQ {
19343 continue
19344 }
19345 x := v_0_0.Args[1]
19346 v_0_0_0 := v_0_0.Args[0]
19347 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
19348 continue
19349 }
19350 y := v_0_1
19351 v.reset(OpAMD64SETB)
19352 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19353 v0.AddArg2(x, y)
19354 v.AddArg(v0)
19355 return true
19356 }
19357 break
19358 }
19359
19360
19361
19362 for {
19363 if v_0.Op != OpAMD64TESTLconst {
19364 break
19365 }
19366 c := auxIntToInt32(v_0.AuxInt)
19367 x := v_0.Args[0]
19368 if !(isUint32PowerOfTwo(int64(c))) {
19369 break
19370 }
19371 v.reset(OpAMD64SETB)
19372 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19373 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19374 v0.AddArg(x)
19375 v.AddArg(v0)
19376 return true
19377 }
19378
19379
19380
19381 for {
19382 if v_0.Op != OpAMD64TESTQconst {
19383 break
19384 }
19385 c := auxIntToInt32(v_0.AuxInt)
19386 x := v_0.Args[0]
19387 if !(isUint64PowerOfTwo(int64(c))) {
19388 break
19389 }
19390 v.reset(OpAMD64SETB)
19391 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19392 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19393 v0.AddArg(x)
19394 v.AddArg(v0)
19395 return true
19396 }
19397
19398
19399
19400 for {
19401 if v_0.Op != OpAMD64TESTQ {
19402 break
19403 }
19404 _ = v_0.Args[1]
19405 v_0_0 := v_0.Args[0]
19406 v_0_1 := v_0.Args[1]
19407 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19408 if v_0_0.Op != OpAMD64MOVQconst {
19409 continue
19410 }
19411 c := auxIntToInt64(v_0_0.AuxInt)
19412 x := v_0_1
19413 if !(isUint64PowerOfTwo(c)) {
19414 continue
19415 }
19416 v.reset(OpAMD64SETB)
19417 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19418 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19419 v0.AddArg(x)
19420 v.AddArg(v0)
19421 return true
19422 }
19423 break
19424 }
19425
19426
19427 for {
19428 if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
19429 break
19430 }
19431 s := v_0.Args[0]
19432 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
19433 break
19434 }
19435 v.reset(OpAMD64SETEQ)
19436 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19437 v0.AuxInt = int32ToAuxInt(0)
19438 v0.AddArg(s)
19439 v.AddArg(v0)
19440 return true
19441 }
19442
19443
19444 for {
19445 if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
19446 break
19447 }
19448 s := v_0.Args[0]
19449 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
19450 break
19451 }
19452 v.reset(OpAMD64SETEQ)
19453 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19454 v0.AuxInt = int32ToAuxInt(0)
19455 v0.AddArg(s)
19456 v.AddArg(v0)
19457 return true
19458 }
19459
19460
19461
19462 for {
19463 if v_0.Op != OpAMD64TESTQ {
19464 break
19465 }
19466 _ = v_0.Args[1]
19467 v_0_0 := v_0.Args[0]
19468 v_0_1 := v_0.Args[1]
19469 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19470 z1 := v_0_0
19471 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
19472 continue
19473 }
19474 z1_0 := z1.Args[0]
19475 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19476 continue
19477 }
19478 x := z1_0.Args[0]
19479 z2 := v_0_1
19480 if !(z1 == z2) {
19481 continue
19482 }
19483 v.reset(OpAMD64SETB)
19484 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19485 v0.AuxInt = int8ToAuxInt(63)
19486 v0.AddArg(x)
19487 v.AddArg(v0)
19488 return true
19489 }
19490 break
19491 }
19492
19493
19494
19495 for {
19496 if v_0.Op != OpAMD64TESTL {
19497 break
19498 }
19499 _ = v_0.Args[1]
19500 v_0_0 := v_0.Args[0]
19501 v_0_1 := v_0.Args[1]
19502 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19503 z1 := v_0_0
19504 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
19505 continue
19506 }
19507 z1_0 := z1.Args[0]
19508 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19509 continue
19510 }
19511 x := z1_0.Args[0]
19512 z2 := v_0_1
19513 if !(z1 == z2) {
19514 continue
19515 }
19516 v.reset(OpAMD64SETB)
19517 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19518 v0.AuxInt = int8ToAuxInt(31)
19519 v0.AddArg(x)
19520 v.AddArg(v0)
19521 return true
19522 }
19523 break
19524 }
19525
19526
19527
19528 for {
19529 if v_0.Op != OpAMD64TESTQ {
19530 break
19531 }
19532 _ = v_0.Args[1]
19533 v_0_0 := v_0.Args[0]
19534 v_0_1 := v_0.Args[1]
19535 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19536 z1 := v_0_0
19537 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19538 continue
19539 }
19540 z1_0 := z1.Args[0]
19541 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19542 continue
19543 }
19544 x := z1_0.Args[0]
19545 z2 := v_0_1
19546 if !(z1 == z2) {
19547 continue
19548 }
19549 v.reset(OpAMD64SETB)
19550 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19551 v0.AuxInt = int8ToAuxInt(0)
19552 v0.AddArg(x)
19553 v.AddArg(v0)
19554 return true
19555 }
19556 break
19557 }
19558
19559
19560
19561 for {
19562 if v_0.Op != OpAMD64TESTL {
19563 break
19564 }
19565 _ = v_0.Args[1]
19566 v_0_0 := v_0.Args[0]
19567 v_0_1 := v_0.Args[1]
19568 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19569 z1 := v_0_0
19570 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19571 continue
19572 }
19573 z1_0 := z1.Args[0]
19574 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
19575 continue
19576 }
19577 x := z1_0.Args[0]
19578 z2 := v_0_1
19579 if !(z1 == z2) {
19580 continue
19581 }
19582 v.reset(OpAMD64SETB)
19583 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19584 v0.AuxInt = int8ToAuxInt(0)
19585 v0.AddArg(x)
19586 v.AddArg(v0)
19587 return true
19588 }
19589 break
19590 }
19591
19592
19593
19594 for {
19595 if v_0.Op != OpAMD64TESTQ {
19596 break
19597 }
19598 _ = v_0.Args[1]
19599 v_0_0 := v_0.Args[0]
19600 v_0_1 := v_0.Args[1]
19601 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19602 z1 := v_0_0
19603 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
19604 continue
19605 }
19606 x := z1.Args[0]
19607 z2 := v_0_1
19608 if !(z1 == z2) {
19609 continue
19610 }
19611 v.reset(OpAMD64SETB)
19612 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19613 v0.AuxInt = int8ToAuxInt(63)
19614 v0.AddArg(x)
19615 v.AddArg(v0)
19616 return true
19617 }
19618 break
19619 }
19620
19621
19622
19623 for {
19624 if v_0.Op != OpAMD64TESTL {
19625 break
19626 }
19627 _ = v_0.Args[1]
19628 v_0_0 := v_0.Args[0]
19629 v_0_1 := v_0.Args[1]
19630 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19631 z1 := v_0_0
19632 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
19633 continue
19634 }
19635 x := z1.Args[0]
19636 z2 := v_0_1
19637 if !(z1 == z2) {
19638 continue
19639 }
19640 v.reset(OpAMD64SETB)
19641 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19642 v0.AuxInt = int8ToAuxInt(31)
19643 v0.AddArg(x)
19644 v.AddArg(v0)
19645 return true
19646 }
19647 break
19648 }
19649
19650
19651 for {
19652 if v_0.Op != OpAMD64InvertFlags {
19653 break
19654 }
19655 x := v_0.Args[0]
19656 v.reset(OpAMD64SETNE)
19657 v.AddArg(x)
19658 return true
19659 }
19660
19661
19662 for {
19663 if v_0.Op != OpAMD64FlagEQ {
19664 break
19665 }
19666 v.reset(OpAMD64MOVLconst)
19667 v.AuxInt = int32ToAuxInt(0)
19668 return true
19669 }
19670
19671
19672 for {
19673 if v_0.Op != OpAMD64FlagLT_ULT {
19674 break
19675 }
19676 v.reset(OpAMD64MOVLconst)
19677 v.AuxInt = int32ToAuxInt(1)
19678 return true
19679 }
19680
19681
19682 for {
19683 if v_0.Op != OpAMD64FlagLT_UGT {
19684 break
19685 }
19686 v.reset(OpAMD64MOVLconst)
19687 v.AuxInt = int32ToAuxInt(1)
19688 return true
19689 }
19690
19691
19692 for {
19693 if v_0.Op != OpAMD64FlagGT_ULT {
19694 break
19695 }
19696 v.reset(OpAMD64MOVLconst)
19697 v.AuxInt = int32ToAuxInt(1)
19698 return true
19699 }
19700
19701
19702 for {
19703 if v_0.Op != OpAMD64FlagGT_UGT {
19704 break
19705 }
19706 v.reset(OpAMD64MOVLconst)
19707 v.AuxInt = int32ToAuxInt(1)
19708 return true
19709 }
19710
19711
19712 for {
19713 if v_0.Op != OpAMD64TESTQ {
19714 break
19715 }
19716 _ = v_0.Args[1]
19717 v_0_0 := v_0.Args[0]
19718 v_0_1 := v_0.Args[1]
19719 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19720 s := v_0_0
19721 if s.Op != OpSelect0 {
19722 continue
19723 }
19724 blsr := s.Args[0]
19725 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
19726 continue
19727 }
19728 v.reset(OpAMD64SETNE)
19729 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19730 v0.AddArg(blsr)
19731 v.AddArg(v0)
19732 return true
19733 }
19734 break
19735 }
19736
19737
19738 for {
19739 if v_0.Op != OpAMD64TESTL {
19740 break
19741 }
19742 _ = v_0.Args[1]
19743 v_0_0 := v_0.Args[0]
19744 v_0_1 := v_0.Args[1]
19745 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
19746 s := v_0_0
19747 if s.Op != OpSelect0 {
19748 continue
19749 }
19750 blsr := s.Args[0]
19751 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
19752 continue
19753 }
19754 v.reset(OpAMD64SETNE)
19755 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
19756 v0.AddArg(blsr)
19757 v.AddArg(v0)
19758 return true
19759 }
19760 break
19761 }
19762 return false
19763 }
19764 func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
19765 v_2 := v.Args[2]
19766 v_1 := v.Args[1]
19767 v_0 := v.Args[0]
19768 b := v.Block
19769 typ := &b.Func.Config.Types
19770
19771
19772 for {
19773 off := auxIntToInt32(v.AuxInt)
19774 sym := auxToSym(v.Aux)
19775 ptr := v_0
19776 if v_1.Op != OpAMD64TESTL {
19777 break
19778 }
19779 _ = v_1.Args[1]
19780 v_1_0 := v_1.Args[0]
19781 v_1_1 := v_1.Args[1]
19782 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19783 if v_1_0.Op != OpAMD64SHLL {
19784 continue
19785 }
19786 x := v_1_0.Args[1]
19787 v_1_0_0 := v_1_0.Args[0]
19788 if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
19789 continue
19790 }
19791 y := v_1_1
19792 mem := v_2
19793 v.reset(OpAMD64SETBstore)
19794 v.AuxInt = int32ToAuxInt(off)
19795 v.Aux = symToAux(sym)
19796 v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
19797 v0.AddArg2(x, y)
19798 v.AddArg3(ptr, v0, mem)
19799 return true
19800 }
19801 break
19802 }
19803
19804
19805 for {
19806 off := auxIntToInt32(v.AuxInt)
19807 sym := auxToSym(v.Aux)
19808 ptr := v_0
19809 if v_1.Op != OpAMD64TESTQ {
19810 break
19811 }
19812 _ = v_1.Args[1]
19813 v_1_0 := v_1.Args[0]
19814 v_1_1 := v_1.Args[1]
19815 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19816 if v_1_0.Op != OpAMD64SHLQ {
19817 continue
19818 }
19819 x := v_1_0.Args[1]
19820 v_1_0_0 := v_1_0.Args[0]
19821 if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
19822 continue
19823 }
19824 y := v_1_1
19825 mem := v_2
19826 v.reset(OpAMD64SETBstore)
19827 v.AuxInt = int32ToAuxInt(off)
19828 v.Aux = symToAux(sym)
19829 v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
19830 v0.AddArg2(x, y)
19831 v.AddArg3(ptr, v0, mem)
19832 return true
19833 }
19834 break
19835 }
19836
19837
19838
19839 for {
19840 off := auxIntToInt32(v.AuxInt)
19841 sym := auxToSym(v.Aux)
19842 ptr := v_0
19843 if v_1.Op != OpAMD64TESTLconst {
19844 break
19845 }
19846 c := auxIntToInt32(v_1.AuxInt)
19847 x := v_1.Args[0]
19848 mem := v_2
19849 if !(isUint32PowerOfTwo(int64(c))) {
19850 break
19851 }
19852 v.reset(OpAMD64SETBstore)
19853 v.AuxInt = int32ToAuxInt(off)
19854 v.Aux = symToAux(sym)
19855 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
19856 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19857 v0.AddArg(x)
19858 v.AddArg3(ptr, v0, mem)
19859 return true
19860 }
19861
19862
19863
19864 for {
19865 off := auxIntToInt32(v.AuxInt)
19866 sym := auxToSym(v.Aux)
19867 ptr := v_0
19868 if v_1.Op != OpAMD64TESTQconst {
19869 break
19870 }
19871 c := auxIntToInt32(v_1.AuxInt)
19872 x := v_1.Args[0]
19873 mem := v_2
19874 if !(isUint64PowerOfTwo(int64(c))) {
19875 break
19876 }
19877 v.reset(OpAMD64SETBstore)
19878 v.AuxInt = int32ToAuxInt(off)
19879 v.Aux = symToAux(sym)
19880 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19881 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
19882 v0.AddArg(x)
19883 v.AddArg3(ptr, v0, mem)
19884 return true
19885 }
19886
19887
19888
19889 for {
19890 off := auxIntToInt32(v.AuxInt)
19891 sym := auxToSym(v.Aux)
19892 ptr := v_0
19893 if v_1.Op != OpAMD64TESTQ {
19894 break
19895 }
19896 _ = v_1.Args[1]
19897 v_1_0 := v_1.Args[0]
19898 v_1_1 := v_1.Args[1]
19899 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19900 if v_1_0.Op != OpAMD64MOVQconst {
19901 continue
19902 }
19903 c := auxIntToInt64(v_1_0.AuxInt)
19904 x := v_1_1
19905 mem := v_2
19906 if !(isUint64PowerOfTwo(c)) {
19907 continue
19908 }
19909 v.reset(OpAMD64SETBstore)
19910 v.AuxInt = int32ToAuxInt(off)
19911 v.Aux = symToAux(sym)
19912 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19913 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
19914 v0.AddArg(x)
19915 v.AddArg3(ptr, v0, mem)
19916 return true
19917 }
19918 break
19919 }
19920
19921
19922 for {
19923 off := auxIntToInt32(v.AuxInt)
19924 sym := auxToSym(v.Aux)
19925 ptr := v_0
19926 if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
19927 break
19928 }
19929 s := v_1.Args[0]
19930 if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
19931 break
19932 }
19933 mem := v_2
19934 v.reset(OpAMD64SETEQstore)
19935 v.AuxInt = int32ToAuxInt(off)
19936 v.Aux = symToAux(sym)
19937 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
19938 v0.AuxInt = int32ToAuxInt(0)
19939 v0.AddArg(s)
19940 v.AddArg3(ptr, v0, mem)
19941 return true
19942 }
19943
19944
19945 for {
19946 off := auxIntToInt32(v.AuxInt)
19947 sym := auxToSym(v.Aux)
19948 ptr := v_0
19949 if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
19950 break
19951 }
19952 s := v_1.Args[0]
19953 if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
19954 break
19955 }
19956 mem := v_2
19957 v.reset(OpAMD64SETEQstore)
19958 v.AuxInt = int32ToAuxInt(off)
19959 v.Aux = symToAux(sym)
19960 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
19961 v0.AuxInt = int32ToAuxInt(0)
19962 v0.AddArg(s)
19963 v.AddArg3(ptr, v0, mem)
19964 return true
19965 }
19966
19967
19968
19969 for {
19970 off := auxIntToInt32(v.AuxInt)
19971 sym := auxToSym(v.Aux)
19972 ptr := v_0
19973 if v_1.Op != OpAMD64TESTQ {
19974 break
19975 }
19976 _ = v_1.Args[1]
19977 v_1_0 := v_1.Args[0]
19978 v_1_1 := v_1.Args[1]
19979 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
19980 z1 := v_1_0
19981 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
19982 continue
19983 }
19984 z1_0 := z1.Args[0]
19985 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
19986 continue
19987 }
19988 x := z1_0.Args[0]
19989 z2 := v_1_1
19990 mem := v_2
19991 if !(z1 == z2) {
19992 continue
19993 }
19994 v.reset(OpAMD64SETBstore)
19995 v.AuxInt = int32ToAuxInt(off)
19996 v.Aux = symToAux(sym)
19997 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
19998 v0.AuxInt = int8ToAuxInt(63)
19999 v0.AddArg(x)
20000 v.AddArg3(ptr, v0, mem)
20001 return true
20002 }
20003 break
20004 }
20005
20006
20007
20008 for {
20009 off := auxIntToInt32(v.AuxInt)
20010 sym := auxToSym(v.Aux)
20011 ptr := v_0
20012 if v_1.Op != OpAMD64TESTL {
20013 break
20014 }
20015 _ = v_1.Args[1]
20016 v_1_0 := v_1.Args[0]
20017 v_1_1 := v_1.Args[1]
20018 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20019 z1 := v_1_0
20020 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
20021 continue
20022 }
20023 z1_0 := z1.Args[0]
20024 if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20025 continue
20026 }
20027 x := z1_0.Args[0]
20028 z2 := v_1_1
20029 mem := v_2
20030 if !(z1 == z2) {
20031 continue
20032 }
20033 v.reset(OpAMD64SETBstore)
20034 v.AuxInt = int32ToAuxInt(off)
20035 v.Aux = symToAux(sym)
20036 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20037 v0.AuxInt = int8ToAuxInt(31)
20038 v0.AddArg(x)
20039 v.AddArg3(ptr, v0, mem)
20040 return true
20041 }
20042 break
20043 }
20044
20045
20046
20047 for {
20048 off := auxIntToInt32(v.AuxInt)
20049 sym := auxToSym(v.Aux)
20050 ptr := v_0
20051 if v_1.Op != OpAMD64TESTQ {
20052 break
20053 }
20054 _ = v_1.Args[1]
20055 v_1_0 := v_1.Args[0]
20056 v_1_1 := v_1.Args[1]
20057 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20058 z1 := v_1_0
20059 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20060 continue
20061 }
20062 z1_0 := z1.Args[0]
20063 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
20064 continue
20065 }
20066 x := z1_0.Args[0]
20067 z2 := v_1_1
20068 mem := v_2
20069 if !(z1 == z2) {
20070 continue
20071 }
20072 v.reset(OpAMD64SETBstore)
20073 v.AuxInt = int32ToAuxInt(off)
20074 v.Aux = symToAux(sym)
20075 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20076 v0.AuxInt = int8ToAuxInt(0)
20077 v0.AddArg(x)
20078 v.AddArg3(ptr, v0, mem)
20079 return true
20080 }
20081 break
20082 }
20083
20084
20085
20086 for {
20087 off := auxIntToInt32(v.AuxInt)
20088 sym := auxToSym(v.Aux)
20089 ptr := v_0
20090 if v_1.Op != OpAMD64TESTL {
20091 break
20092 }
20093 _ = v_1.Args[1]
20094 v_1_0 := v_1.Args[0]
20095 v_1_1 := v_1.Args[1]
20096 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20097 z1 := v_1_0
20098 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20099 continue
20100 }
20101 z1_0 := z1.Args[0]
20102 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
20103 continue
20104 }
20105 x := z1_0.Args[0]
20106 z2 := v_1_1
20107 mem := v_2
20108 if !(z1 == z2) {
20109 continue
20110 }
20111 v.reset(OpAMD64SETBstore)
20112 v.AuxInt = int32ToAuxInt(off)
20113 v.Aux = symToAux(sym)
20114 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20115 v0.AuxInt = int8ToAuxInt(0)
20116 v0.AddArg(x)
20117 v.AddArg3(ptr, v0, mem)
20118 return true
20119 }
20120 break
20121 }
20122
20123
20124
20125 for {
20126 off := auxIntToInt32(v.AuxInt)
20127 sym := auxToSym(v.Aux)
20128 ptr := v_0
20129 if v_1.Op != OpAMD64TESTQ {
20130 break
20131 }
20132 _ = v_1.Args[1]
20133 v_1_0 := v_1.Args[0]
20134 v_1_1 := v_1.Args[1]
20135 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20136 z1 := v_1_0
20137 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
20138 continue
20139 }
20140 x := z1.Args[0]
20141 z2 := v_1_1
20142 mem := v_2
20143 if !(z1 == z2) {
20144 continue
20145 }
20146 v.reset(OpAMD64SETBstore)
20147 v.AuxInt = int32ToAuxInt(off)
20148 v.Aux = symToAux(sym)
20149 v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
20150 v0.AuxInt = int8ToAuxInt(63)
20151 v0.AddArg(x)
20152 v.AddArg3(ptr, v0, mem)
20153 return true
20154 }
20155 break
20156 }
20157
20158
20159
20160 for {
20161 off := auxIntToInt32(v.AuxInt)
20162 sym := auxToSym(v.Aux)
20163 ptr := v_0
20164 if v_1.Op != OpAMD64TESTL {
20165 break
20166 }
20167 _ = v_1.Args[1]
20168 v_1_0 := v_1.Args[0]
20169 v_1_1 := v_1.Args[1]
20170 for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
20171 z1 := v_1_0
20172 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
20173 continue
20174 }
20175 x := z1.Args[0]
20176 z2 := v_1_1
20177 mem := v_2
20178 if !(z1 == z2) {
20179 continue
20180 }
20181 v.reset(OpAMD64SETBstore)
20182 v.AuxInt = int32ToAuxInt(off)
20183 v.Aux = symToAux(sym)
20184 v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
20185 v0.AuxInt = int8ToAuxInt(31)
20186 v0.AddArg(x)
20187 v.AddArg3(ptr, v0, mem)
20188 return true
20189 }
20190 break
20191 }
20192
20193
20194 for {
20195 off := auxIntToInt32(v.AuxInt)
20196 sym := auxToSym(v.Aux)
20197 ptr := v_0
20198 if v_1.Op != OpAMD64InvertFlags {
20199 break
20200 }
20201 x := v_1.Args[0]
20202 mem := v_2
20203 v.reset(OpAMD64SETNEstore)
20204 v.AuxInt = int32ToAuxInt(off)
20205 v.Aux = symToAux(sym)
20206 v.AddArg3(ptr, x, mem)
20207 return true
20208 }
20209
20210
20211
20212 for {
20213 off1 := auxIntToInt32(v.AuxInt)
20214 sym := auxToSym(v.Aux)
20215 if v_0.Op != OpAMD64ADDQconst {
20216 break
20217 }
20218 off2 := auxIntToInt32(v_0.AuxInt)
20219 base := v_0.Args[0]
20220 val := v_1
20221 mem := v_2
20222 if !(is32Bit(int64(off1) + int64(off2))) {
20223 break
20224 }
20225 v.reset(OpAMD64SETNEstore)
20226 v.AuxInt = int32ToAuxInt(off1 + off2)
20227 v.Aux = symToAux(sym)
20228 v.AddArg3(base, val, mem)
20229 return true
20230 }
20231
20232
20233
20234 for {
20235 off1 := auxIntToInt32(v.AuxInt)
20236 sym1 := auxToSym(v.Aux)
20237 if v_0.Op != OpAMD64LEAQ {
20238 break
20239 }
20240 off2 := auxIntToInt32(v_0.AuxInt)
20241 sym2 := auxToSym(v_0.Aux)
20242 base := v_0.Args[0]
20243 val := v_1
20244 mem := v_2
20245 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
20246 break
20247 }
20248 v.reset(OpAMD64SETNEstore)
20249 v.AuxInt = int32ToAuxInt(off1 + off2)
20250 v.Aux = symToAux(mergeSym(sym1, sym2))
20251 v.AddArg3(base, val, mem)
20252 return true
20253 }
20254
20255
20256 for {
20257 off := auxIntToInt32(v.AuxInt)
20258 sym := auxToSym(v.Aux)
20259 ptr := v_0
20260 if v_1.Op != OpAMD64FlagEQ {
20261 break
20262 }
20263 mem := v_2
20264 v.reset(OpAMD64MOVBstore)
20265 v.AuxInt = int32ToAuxInt(off)
20266 v.Aux = symToAux(sym)
20267 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20268 v0.AuxInt = int32ToAuxInt(0)
20269 v.AddArg3(ptr, v0, mem)
20270 return true
20271 }
20272
20273
20274 for {
20275 off := auxIntToInt32(v.AuxInt)
20276 sym := auxToSym(v.Aux)
20277 ptr := v_0
20278 if v_1.Op != OpAMD64FlagLT_ULT {
20279 break
20280 }
20281 mem := v_2
20282 v.reset(OpAMD64MOVBstore)
20283 v.AuxInt = int32ToAuxInt(off)
20284 v.Aux = symToAux(sym)
20285 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20286 v0.AuxInt = int32ToAuxInt(1)
20287 v.AddArg3(ptr, v0, mem)
20288 return true
20289 }
20290
20291
20292 for {
20293 off := auxIntToInt32(v.AuxInt)
20294 sym := auxToSym(v.Aux)
20295 ptr := v_0
20296 if v_1.Op != OpAMD64FlagLT_UGT {
20297 break
20298 }
20299 mem := v_2
20300 v.reset(OpAMD64MOVBstore)
20301 v.AuxInt = int32ToAuxInt(off)
20302 v.Aux = symToAux(sym)
20303 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20304 v0.AuxInt = int32ToAuxInt(1)
20305 v.AddArg3(ptr, v0, mem)
20306 return true
20307 }
20308
20309
20310 for {
20311 off := auxIntToInt32(v.AuxInt)
20312 sym := auxToSym(v.Aux)
20313 ptr := v_0
20314 if v_1.Op != OpAMD64FlagGT_ULT {
20315 break
20316 }
20317 mem := v_2
20318 v.reset(OpAMD64MOVBstore)
20319 v.AuxInt = int32ToAuxInt(off)
20320 v.Aux = symToAux(sym)
20321 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20322 v0.AuxInt = int32ToAuxInt(1)
20323 v.AddArg3(ptr, v0, mem)
20324 return true
20325 }
20326
20327
20328 for {
20329 off := auxIntToInt32(v.AuxInt)
20330 sym := auxToSym(v.Aux)
20331 ptr := v_0
20332 if v_1.Op != OpAMD64FlagGT_UGT {
20333 break
20334 }
20335 mem := v_2
20336 v.reset(OpAMD64MOVBstore)
20337 v.AuxInt = int32ToAuxInt(off)
20338 v.Aux = symToAux(sym)
20339 v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
20340 v0.AuxInt = int32ToAuxInt(1)
20341 v.AddArg3(ptr, v0, mem)
20342 return true
20343 }
20344 return false
20345 }
20346 func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
20347 v_1 := v.Args[1]
20348 v_0 := v.Args[0]
20349 b := v.Block
20350
20351
20352 for {
20353 x := v_0
20354 if v_1.Op != OpAMD64MOVQconst {
20355 break
20356 }
20357 c := auxIntToInt64(v_1.AuxInt)
20358 v.reset(OpAMD64SHLLconst)
20359 v.AuxInt = int8ToAuxInt(int8(c & 31))
20360 v.AddArg(x)
20361 return true
20362 }
20363
20364
20365 for {
20366 x := v_0
20367 if v_1.Op != OpAMD64MOVLconst {
20368 break
20369 }
20370 c := auxIntToInt32(v_1.AuxInt)
20371 v.reset(OpAMD64SHLLconst)
20372 v.AuxInt = int8ToAuxInt(int8(c & 31))
20373 v.AddArg(x)
20374 return true
20375 }
20376
20377
20378
20379 for {
20380 x := v_0
20381 if v_1.Op != OpAMD64ADDQconst {
20382 break
20383 }
20384 c := auxIntToInt32(v_1.AuxInt)
20385 y := v_1.Args[0]
20386 if !(c&31 == 0) {
20387 break
20388 }
20389 v.reset(OpAMD64SHLL)
20390 v.AddArg2(x, y)
20391 return true
20392 }
20393
20394
20395
20396 for {
20397 x := v_0
20398 if v_1.Op != OpAMD64NEGQ {
20399 break
20400 }
20401 t := v_1.Type
20402 v_1_0 := v_1.Args[0]
20403 if v_1_0.Op != OpAMD64ADDQconst {
20404 break
20405 }
20406 c := auxIntToInt32(v_1_0.AuxInt)
20407 y := v_1_0.Args[0]
20408 if !(c&31 == 0) {
20409 break
20410 }
20411 v.reset(OpAMD64SHLL)
20412 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20413 v0.AddArg(y)
20414 v.AddArg2(x, v0)
20415 return true
20416 }
20417
20418
20419
20420 for {
20421 x := v_0
20422 if v_1.Op != OpAMD64ANDQconst {
20423 break
20424 }
20425 c := auxIntToInt32(v_1.AuxInt)
20426 y := v_1.Args[0]
20427 if !(c&31 == 31) {
20428 break
20429 }
20430 v.reset(OpAMD64SHLL)
20431 v.AddArg2(x, y)
20432 return true
20433 }
20434
20435
20436
20437 for {
20438 x := v_0
20439 if v_1.Op != OpAMD64NEGQ {
20440 break
20441 }
20442 t := v_1.Type
20443 v_1_0 := v_1.Args[0]
20444 if v_1_0.Op != OpAMD64ANDQconst {
20445 break
20446 }
20447 c := auxIntToInt32(v_1_0.AuxInt)
20448 y := v_1_0.Args[0]
20449 if !(c&31 == 31) {
20450 break
20451 }
20452 v.reset(OpAMD64SHLL)
20453 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20454 v0.AddArg(y)
20455 v.AddArg2(x, v0)
20456 return true
20457 }
20458
20459
20460
20461 for {
20462 x := v_0
20463 if v_1.Op != OpAMD64ADDLconst {
20464 break
20465 }
20466 c := auxIntToInt32(v_1.AuxInt)
20467 y := v_1.Args[0]
20468 if !(c&31 == 0) {
20469 break
20470 }
20471 v.reset(OpAMD64SHLL)
20472 v.AddArg2(x, y)
20473 return true
20474 }
20475
20476
20477
20478 for {
20479 x := v_0
20480 if v_1.Op != OpAMD64NEGL {
20481 break
20482 }
20483 t := v_1.Type
20484 v_1_0 := v_1.Args[0]
20485 if v_1_0.Op != OpAMD64ADDLconst {
20486 break
20487 }
20488 c := auxIntToInt32(v_1_0.AuxInt)
20489 y := v_1_0.Args[0]
20490 if !(c&31 == 0) {
20491 break
20492 }
20493 v.reset(OpAMD64SHLL)
20494 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20495 v0.AddArg(y)
20496 v.AddArg2(x, v0)
20497 return true
20498 }
20499
20500
20501
20502 for {
20503 x := v_0
20504 if v_1.Op != OpAMD64ANDLconst {
20505 break
20506 }
20507 c := auxIntToInt32(v_1.AuxInt)
20508 y := v_1.Args[0]
20509 if !(c&31 == 31) {
20510 break
20511 }
20512 v.reset(OpAMD64SHLL)
20513 v.AddArg2(x, y)
20514 return true
20515 }
20516
20517
20518
20519 for {
20520 x := v_0
20521 if v_1.Op != OpAMD64NEGL {
20522 break
20523 }
20524 t := v_1.Type
20525 v_1_0 := v_1.Args[0]
20526 if v_1_0.Op != OpAMD64ANDLconst {
20527 break
20528 }
20529 c := auxIntToInt32(v_1_0.AuxInt)
20530 y := v_1_0.Args[0]
20531 if !(c&31 == 31) {
20532 break
20533 }
20534 v.reset(OpAMD64SHLL)
20535 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20536 v0.AddArg(y)
20537 v.AddArg2(x, v0)
20538 return true
20539 }
20540
20541
20542
20543 for {
20544 l := v_0
20545 if l.Op != OpAMD64MOVLload {
20546 break
20547 }
20548 off := auxIntToInt32(l.AuxInt)
20549 sym := auxToSym(l.Aux)
20550 mem := l.Args[1]
20551 ptr := l.Args[0]
20552 x := v_1
20553 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20554 break
20555 }
20556 v.reset(OpAMD64SHLXLload)
20557 v.AuxInt = int32ToAuxInt(off)
20558 v.Aux = symToAux(sym)
20559 v.AddArg3(ptr, x, mem)
20560 return true
20561 }
20562 return false
20563 }
20564 func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
20565 v_0 := v.Args[0]
20566
20567
20568 for {
20569 if auxIntToInt8(v.AuxInt) != 0 {
20570 break
20571 }
20572 x := v_0
20573 v.copyOf(x)
20574 return true
20575 }
20576
20577
20578 for {
20579 if auxIntToInt8(v.AuxInt) != 1 {
20580 break
20581 }
20582 x := v_0
20583 v.reset(OpAMD64ADDL)
20584 v.AddArg2(x, x)
20585 return true
20586 }
20587
20588
20589 for {
20590 c := auxIntToInt8(v.AuxInt)
20591 if v_0.Op != OpAMD64ADDL {
20592 break
20593 }
20594 x := v_0.Args[1]
20595 if x != v_0.Args[0] {
20596 break
20597 }
20598 v.reset(OpAMD64SHLLconst)
20599 v.AuxInt = int8ToAuxInt(c + 1)
20600 v.AddArg(x)
20601 return true
20602 }
20603
20604
20605 for {
20606 d := auxIntToInt8(v.AuxInt)
20607 if v_0.Op != OpAMD64MOVLconst {
20608 break
20609 }
20610 c := auxIntToInt32(v_0.AuxInt)
20611 v.reset(OpAMD64MOVLconst)
20612 v.AuxInt = int32ToAuxInt(c << uint64(d))
20613 return true
20614 }
20615 return false
20616 }
20617 func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
20618 v_1 := v.Args[1]
20619 v_0 := v.Args[0]
20620 b := v.Block
20621
20622
20623 for {
20624 x := v_0
20625 if v_1.Op != OpAMD64MOVQconst {
20626 break
20627 }
20628 c := auxIntToInt64(v_1.AuxInt)
20629 v.reset(OpAMD64SHLQconst)
20630 v.AuxInt = int8ToAuxInt(int8(c & 63))
20631 v.AddArg(x)
20632 return true
20633 }
20634
20635
20636 for {
20637 x := v_0
20638 if v_1.Op != OpAMD64MOVLconst {
20639 break
20640 }
20641 c := auxIntToInt32(v_1.AuxInt)
20642 v.reset(OpAMD64SHLQconst)
20643 v.AuxInt = int8ToAuxInt(int8(c & 63))
20644 v.AddArg(x)
20645 return true
20646 }
20647
20648
20649
20650 for {
20651 x := v_0
20652 if v_1.Op != OpAMD64ADDQconst {
20653 break
20654 }
20655 c := auxIntToInt32(v_1.AuxInt)
20656 y := v_1.Args[0]
20657 if !(c&63 == 0) {
20658 break
20659 }
20660 v.reset(OpAMD64SHLQ)
20661 v.AddArg2(x, y)
20662 return true
20663 }
20664
20665
20666
20667 for {
20668 x := v_0
20669 if v_1.Op != OpAMD64NEGQ {
20670 break
20671 }
20672 t := v_1.Type
20673 v_1_0 := v_1.Args[0]
20674 if v_1_0.Op != OpAMD64ADDQconst {
20675 break
20676 }
20677 c := auxIntToInt32(v_1_0.AuxInt)
20678 y := v_1_0.Args[0]
20679 if !(c&63 == 0) {
20680 break
20681 }
20682 v.reset(OpAMD64SHLQ)
20683 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20684 v0.AddArg(y)
20685 v.AddArg2(x, v0)
20686 return true
20687 }
20688
20689
20690
20691 for {
20692 x := v_0
20693 if v_1.Op != OpAMD64ANDQconst {
20694 break
20695 }
20696 c := auxIntToInt32(v_1.AuxInt)
20697 y := v_1.Args[0]
20698 if !(c&63 == 63) {
20699 break
20700 }
20701 v.reset(OpAMD64SHLQ)
20702 v.AddArg2(x, y)
20703 return true
20704 }
20705
20706
20707
20708 for {
20709 x := v_0
20710 if v_1.Op != OpAMD64NEGQ {
20711 break
20712 }
20713 t := v_1.Type
20714 v_1_0 := v_1.Args[0]
20715 if v_1_0.Op != OpAMD64ANDQconst {
20716 break
20717 }
20718 c := auxIntToInt32(v_1_0.AuxInt)
20719 y := v_1_0.Args[0]
20720 if !(c&63 == 63) {
20721 break
20722 }
20723 v.reset(OpAMD64SHLQ)
20724 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
20725 v0.AddArg(y)
20726 v.AddArg2(x, v0)
20727 return true
20728 }
20729
20730
20731
20732 for {
20733 x := v_0
20734 if v_1.Op != OpAMD64ADDLconst {
20735 break
20736 }
20737 c := auxIntToInt32(v_1.AuxInt)
20738 y := v_1.Args[0]
20739 if !(c&63 == 0) {
20740 break
20741 }
20742 v.reset(OpAMD64SHLQ)
20743 v.AddArg2(x, y)
20744 return true
20745 }
20746
20747
20748
20749 for {
20750 x := v_0
20751 if v_1.Op != OpAMD64NEGL {
20752 break
20753 }
20754 t := v_1.Type
20755 v_1_0 := v_1.Args[0]
20756 if v_1_0.Op != OpAMD64ADDLconst {
20757 break
20758 }
20759 c := auxIntToInt32(v_1_0.AuxInt)
20760 y := v_1_0.Args[0]
20761 if !(c&63 == 0) {
20762 break
20763 }
20764 v.reset(OpAMD64SHLQ)
20765 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20766 v0.AddArg(y)
20767 v.AddArg2(x, v0)
20768 return true
20769 }
20770
20771
20772
20773 for {
20774 x := v_0
20775 if v_1.Op != OpAMD64ANDLconst {
20776 break
20777 }
20778 c := auxIntToInt32(v_1.AuxInt)
20779 y := v_1.Args[0]
20780 if !(c&63 == 63) {
20781 break
20782 }
20783 v.reset(OpAMD64SHLQ)
20784 v.AddArg2(x, y)
20785 return true
20786 }
20787
20788
20789
20790 for {
20791 x := v_0
20792 if v_1.Op != OpAMD64NEGL {
20793 break
20794 }
20795 t := v_1.Type
20796 v_1_0 := v_1.Args[0]
20797 if v_1_0.Op != OpAMD64ANDLconst {
20798 break
20799 }
20800 c := auxIntToInt32(v_1_0.AuxInt)
20801 y := v_1_0.Args[0]
20802 if !(c&63 == 63) {
20803 break
20804 }
20805 v.reset(OpAMD64SHLQ)
20806 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
20807 v0.AddArg(y)
20808 v.AddArg2(x, v0)
20809 return true
20810 }
20811
20812
20813
20814 for {
20815 l := v_0
20816 if l.Op != OpAMD64MOVQload {
20817 break
20818 }
20819 off := auxIntToInt32(l.AuxInt)
20820 sym := auxToSym(l.Aux)
20821 mem := l.Args[1]
20822 ptr := l.Args[0]
20823 x := v_1
20824 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
20825 break
20826 }
20827 v.reset(OpAMD64SHLXQload)
20828 v.AuxInt = int32ToAuxInt(off)
20829 v.Aux = symToAux(sym)
20830 v.AddArg3(ptr, x, mem)
20831 return true
20832 }
20833 return false
20834 }
20835 func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
20836 v_0 := v.Args[0]
20837
20838
20839 for {
20840 if auxIntToInt8(v.AuxInt) != 0 {
20841 break
20842 }
20843 x := v_0
20844 v.copyOf(x)
20845 return true
20846 }
20847
20848
20849 for {
20850 if auxIntToInt8(v.AuxInt) != 1 {
20851 break
20852 }
20853 x := v_0
20854 v.reset(OpAMD64ADDQ)
20855 v.AddArg2(x, x)
20856 return true
20857 }
20858
20859
20860 for {
20861 c := auxIntToInt8(v.AuxInt)
20862 if v_0.Op != OpAMD64ADDQ {
20863 break
20864 }
20865 x := v_0.Args[1]
20866 if x != v_0.Args[0] {
20867 break
20868 }
20869 v.reset(OpAMD64SHLQconst)
20870 v.AuxInt = int8ToAuxInt(c + 1)
20871 v.AddArg(x)
20872 return true
20873 }
20874
20875
20876 for {
20877 d := auxIntToInt8(v.AuxInt)
20878 if v_0.Op != OpAMD64MOVQconst {
20879 break
20880 }
20881 c := auxIntToInt64(v_0.AuxInt)
20882 v.reset(OpAMD64MOVQconst)
20883 v.AuxInt = int64ToAuxInt(c << uint64(d))
20884 return true
20885 }
20886
20887
20888 for {
20889 d := auxIntToInt8(v.AuxInt)
20890 if v_0.Op != OpAMD64MOVLconst {
20891 break
20892 }
20893 c := auxIntToInt32(v_0.AuxInt)
20894 v.reset(OpAMD64MOVQconst)
20895 v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
20896 return true
20897 }
20898 return false
20899 }
20900 func rewriteValueAMD64_OpAMD64SHLXLload(v *Value) bool {
20901 v_2 := v.Args[2]
20902 v_1 := v.Args[1]
20903 v_0 := v.Args[0]
20904 b := v.Block
20905 typ := &b.Func.Config.Types
20906
20907
20908 for {
20909 off := auxIntToInt32(v.AuxInt)
20910 sym := auxToSym(v.Aux)
20911 ptr := v_0
20912 if v_1.Op != OpAMD64MOVLconst {
20913 break
20914 }
20915 c := auxIntToInt32(v_1.AuxInt)
20916 mem := v_2
20917 v.reset(OpAMD64SHLLconst)
20918 v.AuxInt = int8ToAuxInt(int8(c & 31))
20919 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
20920 v0.AuxInt = int32ToAuxInt(off)
20921 v0.Aux = symToAux(sym)
20922 v0.AddArg2(ptr, mem)
20923 v.AddArg(v0)
20924 return true
20925 }
20926 return false
20927 }
20928 func rewriteValueAMD64_OpAMD64SHLXQload(v *Value) bool {
20929 v_2 := v.Args[2]
20930 v_1 := v.Args[1]
20931 v_0 := v.Args[0]
20932 b := v.Block
20933 typ := &b.Func.Config.Types
20934
20935
20936 for {
20937 off := auxIntToInt32(v.AuxInt)
20938 sym := auxToSym(v.Aux)
20939 ptr := v_0
20940 if v_1.Op != OpAMD64MOVQconst {
20941 break
20942 }
20943 c := auxIntToInt64(v_1.AuxInt)
20944 mem := v_2
20945 v.reset(OpAMD64SHLQconst)
20946 v.AuxInt = int8ToAuxInt(int8(c & 63))
20947 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
20948 v0.AuxInt = int32ToAuxInt(off)
20949 v0.Aux = symToAux(sym)
20950 v0.AddArg2(ptr, mem)
20951 v.AddArg(v0)
20952 return true
20953 }
20954
20955
20956 for {
20957 off := auxIntToInt32(v.AuxInt)
20958 sym := auxToSym(v.Aux)
20959 ptr := v_0
20960 if v_1.Op != OpAMD64MOVLconst {
20961 break
20962 }
20963 c := auxIntToInt32(v_1.AuxInt)
20964 mem := v_2
20965 v.reset(OpAMD64SHLQconst)
20966 v.AuxInt = int8ToAuxInt(int8(c & 63))
20967 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
20968 v0.AuxInt = int32ToAuxInt(off)
20969 v0.Aux = symToAux(sym)
20970 v0.AddArg2(ptr, mem)
20971 v.AddArg(v0)
20972 return true
20973 }
20974 return false
20975 }
20976 func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
20977 v_1 := v.Args[1]
20978 v_0 := v.Args[0]
20979
20980
20981
20982 for {
20983 x := v_0
20984 if v_1.Op != OpAMD64MOVQconst {
20985 break
20986 }
20987 c := auxIntToInt64(v_1.AuxInt)
20988 if !(c&31 < 8) {
20989 break
20990 }
20991 v.reset(OpAMD64SHRBconst)
20992 v.AuxInt = int8ToAuxInt(int8(c & 31))
20993 v.AddArg(x)
20994 return true
20995 }
20996
20997
20998
20999 for {
21000 x := v_0
21001 if v_1.Op != OpAMD64MOVLconst {
21002 break
21003 }
21004 c := auxIntToInt32(v_1.AuxInt)
21005 if !(c&31 < 8) {
21006 break
21007 }
21008 v.reset(OpAMD64SHRBconst)
21009 v.AuxInt = int8ToAuxInt(int8(c & 31))
21010 v.AddArg(x)
21011 return true
21012 }
21013
21014
21015
21016 for {
21017 if v_1.Op != OpAMD64MOVQconst {
21018 break
21019 }
21020 c := auxIntToInt64(v_1.AuxInt)
21021 if !(c&31 >= 8) {
21022 break
21023 }
21024 v.reset(OpAMD64MOVLconst)
21025 v.AuxInt = int32ToAuxInt(0)
21026 return true
21027 }
21028
21029
21030
21031 for {
21032 if v_1.Op != OpAMD64MOVLconst {
21033 break
21034 }
21035 c := auxIntToInt32(v_1.AuxInt)
21036 if !(c&31 >= 8) {
21037 break
21038 }
21039 v.reset(OpAMD64MOVLconst)
21040 v.AuxInt = int32ToAuxInt(0)
21041 return true
21042 }
21043 return false
21044 }
21045 func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
21046 v_0 := v.Args[0]
21047
21048
21049 for {
21050 if auxIntToInt8(v.AuxInt) != 0 {
21051 break
21052 }
21053 x := v_0
21054 v.copyOf(x)
21055 return true
21056 }
21057 return false
21058 }
21059 func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
21060 v_1 := v.Args[1]
21061 v_0 := v.Args[0]
21062 b := v.Block
21063
21064
21065 for {
21066 x := v_0
21067 if v_1.Op != OpAMD64MOVQconst {
21068 break
21069 }
21070 c := auxIntToInt64(v_1.AuxInt)
21071 v.reset(OpAMD64SHRLconst)
21072 v.AuxInt = int8ToAuxInt(int8(c & 31))
21073 v.AddArg(x)
21074 return true
21075 }
21076
21077
21078 for {
21079 x := v_0
21080 if v_1.Op != OpAMD64MOVLconst {
21081 break
21082 }
21083 c := auxIntToInt32(v_1.AuxInt)
21084 v.reset(OpAMD64SHRLconst)
21085 v.AuxInt = int8ToAuxInt(int8(c & 31))
21086 v.AddArg(x)
21087 return true
21088 }
21089
21090
21091
21092 for {
21093 x := v_0
21094 if v_1.Op != OpAMD64ADDQconst {
21095 break
21096 }
21097 c := auxIntToInt32(v_1.AuxInt)
21098 y := v_1.Args[0]
21099 if !(c&31 == 0) {
21100 break
21101 }
21102 v.reset(OpAMD64SHRL)
21103 v.AddArg2(x, y)
21104 return true
21105 }
21106
21107
21108
21109 for {
21110 x := v_0
21111 if v_1.Op != OpAMD64NEGQ {
21112 break
21113 }
21114 t := v_1.Type
21115 v_1_0 := v_1.Args[0]
21116 if v_1_0.Op != OpAMD64ADDQconst {
21117 break
21118 }
21119 c := auxIntToInt32(v_1_0.AuxInt)
21120 y := v_1_0.Args[0]
21121 if !(c&31 == 0) {
21122 break
21123 }
21124 v.reset(OpAMD64SHRL)
21125 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21126 v0.AddArg(y)
21127 v.AddArg2(x, v0)
21128 return true
21129 }
21130
21131
21132
21133 for {
21134 x := v_0
21135 if v_1.Op != OpAMD64ANDQconst {
21136 break
21137 }
21138 c := auxIntToInt32(v_1.AuxInt)
21139 y := v_1.Args[0]
21140 if !(c&31 == 31) {
21141 break
21142 }
21143 v.reset(OpAMD64SHRL)
21144 v.AddArg2(x, y)
21145 return true
21146 }
21147
21148
21149
21150 for {
21151 x := v_0
21152 if v_1.Op != OpAMD64NEGQ {
21153 break
21154 }
21155 t := v_1.Type
21156 v_1_0 := v_1.Args[0]
21157 if v_1_0.Op != OpAMD64ANDQconst {
21158 break
21159 }
21160 c := auxIntToInt32(v_1_0.AuxInt)
21161 y := v_1_0.Args[0]
21162 if !(c&31 == 31) {
21163 break
21164 }
21165 v.reset(OpAMD64SHRL)
21166 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21167 v0.AddArg(y)
21168 v.AddArg2(x, v0)
21169 return true
21170 }
21171
21172
21173
21174 for {
21175 x := v_0
21176 if v_1.Op != OpAMD64ADDLconst {
21177 break
21178 }
21179 c := auxIntToInt32(v_1.AuxInt)
21180 y := v_1.Args[0]
21181 if !(c&31 == 0) {
21182 break
21183 }
21184 v.reset(OpAMD64SHRL)
21185 v.AddArg2(x, y)
21186 return true
21187 }
21188
21189
21190
21191 for {
21192 x := v_0
21193 if v_1.Op != OpAMD64NEGL {
21194 break
21195 }
21196 t := v_1.Type
21197 v_1_0 := v_1.Args[0]
21198 if v_1_0.Op != OpAMD64ADDLconst {
21199 break
21200 }
21201 c := auxIntToInt32(v_1_0.AuxInt)
21202 y := v_1_0.Args[0]
21203 if !(c&31 == 0) {
21204 break
21205 }
21206 v.reset(OpAMD64SHRL)
21207 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21208 v0.AddArg(y)
21209 v.AddArg2(x, v0)
21210 return true
21211 }
21212
21213
21214
21215 for {
21216 x := v_0
21217 if v_1.Op != OpAMD64ANDLconst {
21218 break
21219 }
21220 c := auxIntToInt32(v_1.AuxInt)
21221 y := v_1.Args[0]
21222 if !(c&31 == 31) {
21223 break
21224 }
21225 v.reset(OpAMD64SHRL)
21226 v.AddArg2(x, y)
21227 return true
21228 }
21229
21230
21231
21232 for {
21233 x := v_0
21234 if v_1.Op != OpAMD64NEGL {
21235 break
21236 }
21237 t := v_1.Type
21238 v_1_0 := v_1.Args[0]
21239 if v_1_0.Op != OpAMD64ANDLconst {
21240 break
21241 }
21242 c := auxIntToInt32(v_1_0.AuxInt)
21243 y := v_1_0.Args[0]
21244 if !(c&31 == 31) {
21245 break
21246 }
21247 v.reset(OpAMD64SHRL)
21248 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21249 v0.AddArg(y)
21250 v.AddArg2(x, v0)
21251 return true
21252 }
21253
21254
21255
21256 for {
21257 l := v_0
21258 if l.Op != OpAMD64MOVLload {
21259 break
21260 }
21261 off := auxIntToInt32(l.AuxInt)
21262 sym := auxToSym(l.Aux)
21263 mem := l.Args[1]
21264 ptr := l.Args[0]
21265 x := v_1
21266 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21267 break
21268 }
21269 v.reset(OpAMD64SHRXLload)
21270 v.AuxInt = int32ToAuxInt(off)
21271 v.Aux = symToAux(sym)
21272 v.AddArg3(ptr, x, mem)
21273 return true
21274 }
21275 return false
21276 }
21277 func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
21278 v_0 := v.Args[0]
21279
21280
21281 for {
21282 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64ADDL {
21283 break
21284 }
21285 x := v_0.Args[1]
21286 if x != v_0.Args[0] {
21287 break
21288 }
21289 v.reset(OpAMD64ANDLconst)
21290 v.AuxInt = int32ToAuxInt(0x7fffffff)
21291 v.AddArg(x)
21292 return true
21293 }
21294
21295
21296 for {
21297 if auxIntToInt8(v.AuxInt) != 0 {
21298 break
21299 }
21300 x := v_0
21301 v.copyOf(x)
21302 return true
21303 }
21304 return false
21305 }
21306 func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
21307 v_1 := v.Args[1]
21308 v_0 := v.Args[0]
21309 b := v.Block
21310
21311
21312 for {
21313 x := v_0
21314 if v_1.Op != OpAMD64MOVQconst {
21315 break
21316 }
21317 c := auxIntToInt64(v_1.AuxInt)
21318 v.reset(OpAMD64SHRQconst)
21319 v.AuxInt = int8ToAuxInt(int8(c & 63))
21320 v.AddArg(x)
21321 return true
21322 }
21323
21324
21325 for {
21326 x := v_0
21327 if v_1.Op != OpAMD64MOVLconst {
21328 break
21329 }
21330 c := auxIntToInt32(v_1.AuxInt)
21331 v.reset(OpAMD64SHRQconst)
21332 v.AuxInt = int8ToAuxInt(int8(c & 63))
21333 v.AddArg(x)
21334 return true
21335 }
21336
21337
21338
21339 for {
21340 x := v_0
21341 if v_1.Op != OpAMD64ADDQconst {
21342 break
21343 }
21344 c := auxIntToInt32(v_1.AuxInt)
21345 y := v_1.Args[0]
21346 if !(c&63 == 0) {
21347 break
21348 }
21349 v.reset(OpAMD64SHRQ)
21350 v.AddArg2(x, y)
21351 return true
21352 }
21353
21354
21355
21356 for {
21357 x := v_0
21358 if v_1.Op != OpAMD64NEGQ {
21359 break
21360 }
21361 t := v_1.Type
21362 v_1_0 := v_1.Args[0]
21363 if v_1_0.Op != OpAMD64ADDQconst {
21364 break
21365 }
21366 c := auxIntToInt32(v_1_0.AuxInt)
21367 y := v_1_0.Args[0]
21368 if !(c&63 == 0) {
21369 break
21370 }
21371 v.reset(OpAMD64SHRQ)
21372 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21373 v0.AddArg(y)
21374 v.AddArg2(x, v0)
21375 return true
21376 }
21377
21378
21379
21380 for {
21381 x := v_0
21382 if v_1.Op != OpAMD64ANDQconst {
21383 break
21384 }
21385 c := auxIntToInt32(v_1.AuxInt)
21386 y := v_1.Args[0]
21387 if !(c&63 == 63) {
21388 break
21389 }
21390 v.reset(OpAMD64SHRQ)
21391 v.AddArg2(x, y)
21392 return true
21393 }
21394
21395
21396
21397 for {
21398 x := v_0
21399 if v_1.Op != OpAMD64NEGQ {
21400 break
21401 }
21402 t := v_1.Type
21403 v_1_0 := v_1.Args[0]
21404 if v_1_0.Op != OpAMD64ANDQconst {
21405 break
21406 }
21407 c := auxIntToInt32(v_1_0.AuxInt)
21408 y := v_1_0.Args[0]
21409 if !(c&63 == 63) {
21410 break
21411 }
21412 v.reset(OpAMD64SHRQ)
21413 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
21414 v0.AddArg(y)
21415 v.AddArg2(x, v0)
21416 return true
21417 }
21418
21419
21420
21421 for {
21422 x := v_0
21423 if v_1.Op != OpAMD64ADDLconst {
21424 break
21425 }
21426 c := auxIntToInt32(v_1.AuxInt)
21427 y := v_1.Args[0]
21428 if !(c&63 == 0) {
21429 break
21430 }
21431 v.reset(OpAMD64SHRQ)
21432 v.AddArg2(x, y)
21433 return true
21434 }
21435
21436
21437
21438 for {
21439 x := v_0
21440 if v_1.Op != OpAMD64NEGL {
21441 break
21442 }
21443 t := v_1.Type
21444 v_1_0 := v_1.Args[0]
21445 if v_1_0.Op != OpAMD64ADDLconst {
21446 break
21447 }
21448 c := auxIntToInt32(v_1_0.AuxInt)
21449 y := v_1_0.Args[0]
21450 if !(c&63 == 0) {
21451 break
21452 }
21453 v.reset(OpAMD64SHRQ)
21454 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21455 v0.AddArg(y)
21456 v.AddArg2(x, v0)
21457 return true
21458 }
21459
21460
21461
21462 for {
21463 x := v_0
21464 if v_1.Op != OpAMD64ANDLconst {
21465 break
21466 }
21467 c := auxIntToInt32(v_1.AuxInt)
21468 y := v_1.Args[0]
21469 if !(c&63 == 63) {
21470 break
21471 }
21472 v.reset(OpAMD64SHRQ)
21473 v.AddArg2(x, y)
21474 return true
21475 }
21476
21477
21478
21479 for {
21480 x := v_0
21481 if v_1.Op != OpAMD64NEGL {
21482 break
21483 }
21484 t := v_1.Type
21485 v_1_0 := v_1.Args[0]
21486 if v_1_0.Op != OpAMD64ANDLconst {
21487 break
21488 }
21489 c := auxIntToInt32(v_1_0.AuxInt)
21490 y := v_1_0.Args[0]
21491 if !(c&63 == 63) {
21492 break
21493 }
21494 v.reset(OpAMD64SHRQ)
21495 v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
21496 v0.AddArg(y)
21497 v.AddArg2(x, v0)
21498 return true
21499 }
21500
21501
21502
21503 for {
21504 l := v_0
21505 if l.Op != OpAMD64MOVQload {
21506 break
21507 }
21508 off := auxIntToInt32(l.AuxInt)
21509 sym := auxToSym(l.Aux)
21510 mem := l.Args[1]
21511 ptr := l.Args[0]
21512 x := v_1
21513 if !(buildcfg.GOAMD64 >= 3 && canMergeLoad(v, l) && clobber(l)) {
21514 break
21515 }
21516 v.reset(OpAMD64SHRXQload)
21517 v.AuxInt = int32ToAuxInt(off)
21518 v.Aux = symToAux(sym)
21519 v.AddArg3(ptr, x, mem)
21520 return true
21521 }
21522 return false
21523 }
21524 func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
21525 v_0 := v.Args[0]
21526
21527
21528 for {
21529 if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64ADDQ {
21530 break
21531 }
21532 x := v_0.Args[1]
21533 if x != v_0.Args[0] {
21534 break
21535 }
21536 v.reset(OpAMD64BTRQconst)
21537 v.AuxInt = int8ToAuxInt(63)
21538 v.AddArg(x)
21539 return true
21540 }
21541
21542
21543 for {
21544 if auxIntToInt8(v.AuxInt) != 0 {
21545 break
21546 }
21547 x := v_0
21548 v.copyOf(x)
21549 return true
21550 }
21551 return false
21552 }
21553 func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
21554 v_1 := v.Args[1]
21555 v_0 := v.Args[0]
21556
21557
21558
21559 for {
21560 x := v_0
21561 if v_1.Op != OpAMD64MOVQconst {
21562 break
21563 }
21564 c := auxIntToInt64(v_1.AuxInt)
21565 if !(c&31 < 16) {
21566 break
21567 }
21568 v.reset(OpAMD64SHRWconst)
21569 v.AuxInt = int8ToAuxInt(int8(c & 31))
21570 v.AddArg(x)
21571 return true
21572 }
21573
21574
21575
21576 for {
21577 x := v_0
21578 if v_1.Op != OpAMD64MOVLconst {
21579 break
21580 }
21581 c := auxIntToInt32(v_1.AuxInt)
21582 if !(c&31 < 16) {
21583 break
21584 }
21585 v.reset(OpAMD64SHRWconst)
21586 v.AuxInt = int8ToAuxInt(int8(c & 31))
21587 v.AddArg(x)
21588 return true
21589 }
21590
21591
21592
21593 for {
21594 if v_1.Op != OpAMD64MOVQconst {
21595 break
21596 }
21597 c := auxIntToInt64(v_1.AuxInt)
21598 if !(c&31 >= 16) {
21599 break
21600 }
21601 v.reset(OpAMD64MOVLconst)
21602 v.AuxInt = int32ToAuxInt(0)
21603 return true
21604 }
21605
21606
21607
21608 for {
21609 if v_1.Op != OpAMD64MOVLconst {
21610 break
21611 }
21612 c := auxIntToInt32(v_1.AuxInt)
21613 if !(c&31 >= 16) {
21614 break
21615 }
21616 v.reset(OpAMD64MOVLconst)
21617 v.AuxInt = int32ToAuxInt(0)
21618 return true
21619 }
21620 return false
21621 }
21622 func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
21623 v_0 := v.Args[0]
21624
21625
21626 for {
21627 if auxIntToInt8(v.AuxInt) != 0 {
21628 break
21629 }
21630 x := v_0
21631 v.copyOf(x)
21632 return true
21633 }
21634 return false
21635 }
21636 func rewriteValueAMD64_OpAMD64SHRXLload(v *Value) bool {
21637 v_2 := v.Args[2]
21638 v_1 := v.Args[1]
21639 v_0 := v.Args[0]
21640 b := v.Block
21641 typ := &b.Func.Config.Types
21642
21643
21644 for {
21645 off := auxIntToInt32(v.AuxInt)
21646 sym := auxToSym(v.Aux)
21647 ptr := v_0
21648 if v_1.Op != OpAMD64MOVLconst {
21649 break
21650 }
21651 c := auxIntToInt32(v_1.AuxInt)
21652 mem := v_2
21653 v.reset(OpAMD64SHRLconst)
21654 v.AuxInt = int8ToAuxInt(int8(c & 31))
21655 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
21656 v0.AuxInt = int32ToAuxInt(off)
21657 v0.Aux = symToAux(sym)
21658 v0.AddArg2(ptr, mem)
21659 v.AddArg(v0)
21660 return true
21661 }
21662 return false
21663 }
21664 func rewriteValueAMD64_OpAMD64SHRXQload(v *Value) bool {
21665 v_2 := v.Args[2]
21666 v_1 := v.Args[1]
21667 v_0 := v.Args[0]
21668 b := v.Block
21669 typ := &b.Func.Config.Types
21670
21671
21672 for {
21673 off := auxIntToInt32(v.AuxInt)
21674 sym := auxToSym(v.Aux)
21675 ptr := v_0
21676 if v_1.Op != OpAMD64MOVQconst {
21677 break
21678 }
21679 c := auxIntToInt64(v_1.AuxInt)
21680 mem := v_2
21681 v.reset(OpAMD64SHRQconst)
21682 v.AuxInt = int8ToAuxInt(int8(c & 63))
21683 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21684 v0.AuxInt = int32ToAuxInt(off)
21685 v0.Aux = symToAux(sym)
21686 v0.AddArg2(ptr, mem)
21687 v.AddArg(v0)
21688 return true
21689 }
21690
21691
21692 for {
21693 off := auxIntToInt32(v.AuxInt)
21694 sym := auxToSym(v.Aux)
21695 ptr := v_0
21696 if v_1.Op != OpAMD64MOVLconst {
21697 break
21698 }
21699 c := auxIntToInt32(v_1.AuxInt)
21700 mem := v_2
21701 v.reset(OpAMD64SHRQconst)
21702 v.AuxInt = int8ToAuxInt(int8(c & 63))
21703 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
21704 v0.AuxInt = int32ToAuxInt(off)
21705 v0.Aux = symToAux(sym)
21706 v0.AddArg2(ptr, mem)
21707 v.AddArg(v0)
21708 return true
21709 }
21710 return false
21711 }
21712 func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
21713 v_1 := v.Args[1]
21714 v_0 := v.Args[0]
21715 b := v.Block
21716
21717
21718 for {
21719 x := v_0
21720 if v_1.Op != OpAMD64MOVLconst {
21721 break
21722 }
21723 c := auxIntToInt32(v_1.AuxInt)
21724 v.reset(OpAMD64SUBLconst)
21725 v.AuxInt = int32ToAuxInt(c)
21726 v.AddArg(x)
21727 return true
21728 }
21729
21730
21731 for {
21732 if v_0.Op != OpAMD64MOVLconst {
21733 break
21734 }
21735 c := auxIntToInt32(v_0.AuxInt)
21736 x := v_1
21737 v.reset(OpAMD64NEGL)
21738 v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
21739 v0.AuxInt = int32ToAuxInt(c)
21740 v0.AddArg(x)
21741 v.AddArg(v0)
21742 return true
21743 }
21744
21745
21746 for {
21747 x := v_0
21748 if x != v_1 {
21749 break
21750 }
21751 v.reset(OpAMD64MOVLconst)
21752 v.AuxInt = int32ToAuxInt(0)
21753 return true
21754 }
21755
21756
21757
21758 for {
21759 x := v_0
21760 l := v_1
21761 if l.Op != OpAMD64MOVLload {
21762 break
21763 }
21764 off := auxIntToInt32(l.AuxInt)
21765 sym := auxToSym(l.Aux)
21766 mem := l.Args[1]
21767 ptr := l.Args[0]
21768 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
21769 break
21770 }
21771 v.reset(OpAMD64SUBLload)
21772 v.AuxInt = int32ToAuxInt(off)
21773 v.Aux = symToAux(sym)
21774 v.AddArg3(x, ptr, mem)
21775 return true
21776 }
21777 return false
21778 }
21779 func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
21780 v_0 := v.Args[0]
21781
21782
21783
21784 for {
21785 c := auxIntToInt32(v.AuxInt)
21786 x := v_0
21787 if !(c == 0) {
21788 break
21789 }
21790 v.copyOf(x)
21791 return true
21792 }
21793
21794
21795 for {
21796 c := auxIntToInt32(v.AuxInt)
21797 x := v_0
21798 v.reset(OpAMD64ADDLconst)
21799 v.AuxInt = int32ToAuxInt(-c)
21800 v.AddArg(x)
21801 return true
21802 }
21803 }
21804 func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
21805 v_2 := v.Args[2]
21806 v_1 := v.Args[1]
21807 v_0 := v.Args[0]
21808 b := v.Block
21809 typ := &b.Func.Config.Types
21810
21811
21812
21813 for {
21814 off1 := auxIntToInt32(v.AuxInt)
21815 sym := auxToSym(v.Aux)
21816 val := v_0
21817 if v_1.Op != OpAMD64ADDQconst {
21818 break
21819 }
21820 off2 := auxIntToInt32(v_1.AuxInt)
21821 base := v_1.Args[0]
21822 mem := v_2
21823 if !(is32Bit(int64(off1) + int64(off2))) {
21824 break
21825 }
21826 v.reset(OpAMD64SUBLload)
21827 v.AuxInt = int32ToAuxInt(off1 + off2)
21828 v.Aux = symToAux(sym)
21829 v.AddArg3(val, base, mem)
21830 return true
21831 }
21832
21833
21834
21835 for {
21836 off1 := auxIntToInt32(v.AuxInt)
21837 sym1 := auxToSym(v.Aux)
21838 val := v_0
21839 if v_1.Op != OpAMD64LEAQ {
21840 break
21841 }
21842 off2 := auxIntToInt32(v_1.AuxInt)
21843 sym2 := auxToSym(v_1.Aux)
21844 base := v_1.Args[0]
21845 mem := v_2
21846 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21847 break
21848 }
21849 v.reset(OpAMD64SUBLload)
21850 v.AuxInt = int32ToAuxInt(off1 + off2)
21851 v.Aux = symToAux(mergeSym(sym1, sym2))
21852 v.AddArg3(val, base, mem)
21853 return true
21854 }
21855
21856
21857 for {
21858 off := auxIntToInt32(v.AuxInt)
21859 sym := auxToSym(v.Aux)
21860 x := v_0
21861 ptr := v_1
21862 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
21863 break
21864 }
21865 y := v_2.Args[1]
21866 if ptr != v_2.Args[0] {
21867 break
21868 }
21869 v.reset(OpAMD64SUBL)
21870 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
21871 v0.AddArg(y)
21872 v.AddArg2(x, v0)
21873 return true
21874 }
21875 return false
21876 }
21877 func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
21878 v_2 := v.Args[2]
21879 v_1 := v.Args[1]
21880 v_0 := v.Args[0]
21881
21882
21883
21884 for {
21885 off1 := auxIntToInt32(v.AuxInt)
21886 sym := auxToSym(v.Aux)
21887 if v_0.Op != OpAMD64ADDQconst {
21888 break
21889 }
21890 off2 := auxIntToInt32(v_0.AuxInt)
21891 base := v_0.Args[0]
21892 val := v_1
21893 mem := v_2
21894 if !(is32Bit(int64(off1) + int64(off2))) {
21895 break
21896 }
21897 v.reset(OpAMD64SUBLmodify)
21898 v.AuxInt = int32ToAuxInt(off1 + off2)
21899 v.Aux = symToAux(sym)
21900 v.AddArg3(base, val, mem)
21901 return true
21902 }
21903
21904
21905
21906 for {
21907 off1 := auxIntToInt32(v.AuxInt)
21908 sym1 := auxToSym(v.Aux)
21909 if v_0.Op != OpAMD64LEAQ {
21910 break
21911 }
21912 off2 := auxIntToInt32(v_0.AuxInt)
21913 sym2 := auxToSym(v_0.Aux)
21914 base := v_0.Args[0]
21915 val := v_1
21916 mem := v_2
21917 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
21918 break
21919 }
21920 v.reset(OpAMD64SUBLmodify)
21921 v.AuxInt = int32ToAuxInt(off1 + off2)
21922 v.Aux = symToAux(mergeSym(sym1, sym2))
21923 v.AddArg3(base, val, mem)
21924 return true
21925 }
21926 return false
21927 }
21928 func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
21929 v_1 := v.Args[1]
21930 v_0 := v.Args[0]
21931 b := v.Block
21932
21933
21934
21935 for {
21936 x := v_0
21937 if v_1.Op != OpAMD64MOVQconst {
21938 break
21939 }
21940 c := auxIntToInt64(v_1.AuxInt)
21941 if !(is32Bit(c)) {
21942 break
21943 }
21944 v.reset(OpAMD64SUBQconst)
21945 v.AuxInt = int32ToAuxInt(int32(c))
21946 v.AddArg(x)
21947 return true
21948 }
21949
21950
21951
21952 for {
21953 if v_0.Op != OpAMD64MOVQconst {
21954 break
21955 }
21956 c := auxIntToInt64(v_0.AuxInt)
21957 x := v_1
21958 if !(is32Bit(c)) {
21959 break
21960 }
21961 v.reset(OpAMD64NEGQ)
21962 v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
21963 v0.AuxInt = int32ToAuxInt(int32(c))
21964 v0.AddArg(x)
21965 v.AddArg(v0)
21966 return true
21967 }
21968
21969
21970 for {
21971 x := v_0
21972 if x != v_1 {
21973 break
21974 }
21975 v.reset(OpAMD64MOVQconst)
21976 v.AuxInt = int64ToAuxInt(0)
21977 return true
21978 }
21979
21980
21981
21982 for {
21983 x := v_0
21984 l := v_1
21985 if l.Op != OpAMD64MOVQload {
21986 break
21987 }
21988 off := auxIntToInt32(l.AuxInt)
21989 sym := auxToSym(l.Aux)
21990 mem := l.Args[1]
21991 ptr := l.Args[0]
21992 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
21993 break
21994 }
21995 v.reset(OpAMD64SUBQload)
21996 v.AuxInt = int32ToAuxInt(off)
21997 v.Aux = symToAux(sym)
21998 v.AddArg3(x, ptr, mem)
21999 return true
22000 }
22001 return false
22002 }
22003 func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
22004 v_1 := v.Args[1]
22005 v_0 := v.Args[0]
22006
22007
22008
22009 for {
22010 x := v_0
22011 if v_1.Op != OpAMD64MOVQconst {
22012 break
22013 }
22014 c := auxIntToInt64(v_1.AuxInt)
22015 if !(is32Bit(c)) {
22016 break
22017 }
22018 v.reset(OpAMD64SUBQconstborrow)
22019 v.AuxInt = int32ToAuxInt(int32(c))
22020 v.AddArg(x)
22021 return true
22022 }
22023 return false
22024 }
22025 func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
22026 v_0 := v.Args[0]
22027
22028
22029 for {
22030 if auxIntToInt32(v.AuxInt) != 0 {
22031 break
22032 }
22033 x := v_0
22034 v.copyOf(x)
22035 return true
22036 }
22037
22038
22039
22040 for {
22041 c := auxIntToInt32(v.AuxInt)
22042 x := v_0
22043 if !(c != -(1 << 31)) {
22044 break
22045 }
22046 v.reset(OpAMD64ADDQconst)
22047 v.AuxInt = int32ToAuxInt(-c)
22048 v.AddArg(x)
22049 return true
22050 }
22051
22052
22053 for {
22054 c := auxIntToInt32(v.AuxInt)
22055 if v_0.Op != OpAMD64MOVQconst {
22056 break
22057 }
22058 d := auxIntToInt64(v_0.AuxInt)
22059 v.reset(OpAMD64MOVQconst)
22060 v.AuxInt = int64ToAuxInt(d - int64(c))
22061 return true
22062 }
22063
22064
22065
22066 for {
22067 c := auxIntToInt32(v.AuxInt)
22068 if v_0.Op != OpAMD64SUBQconst {
22069 break
22070 }
22071 d := auxIntToInt32(v_0.AuxInt)
22072 x := v_0.Args[0]
22073 if !(is32Bit(int64(-c) - int64(d))) {
22074 break
22075 }
22076 v.reset(OpAMD64ADDQconst)
22077 v.AuxInt = int32ToAuxInt(-c - d)
22078 v.AddArg(x)
22079 return true
22080 }
22081 return false
22082 }
22083 func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
22084 v_2 := v.Args[2]
22085 v_1 := v.Args[1]
22086 v_0 := v.Args[0]
22087 b := v.Block
22088 typ := &b.Func.Config.Types
22089
22090
22091
22092 for {
22093 off1 := auxIntToInt32(v.AuxInt)
22094 sym := auxToSym(v.Aux)
22095 val := v_0
22096 if v_1.Op != OpAMD64ADDQconst {
22097 break
22098 }
22099 off2 := auxIntToInt32(v_1.AuxInt)
22100 base := v_1.Args[0]
22101 mem := v_2
22102 if !(is32Bit(int64(off1) + int64(off2))) {
22103 break
22104 }
22105 v.reset(OpAMD64SUBQload)
22106 v.AuxInt = int32ToAuxInt(off1 + off2)
22107 v.Aux = symToAux(sym)
22108 v.AddArg3(val, base, mem)
22109 return true
22110 }
22111
22112
22113
22114 for {
22115 off1 := auxIntToInt32(v.AuxInt)
22116 sym1 := auxToSym(v.Aux)
22117 val := v_0
22118 if v_1.Op != OpAMD64LEAQ {
22119 break
22120 }
22121 off2 := auxIntToInt32(v_1.AuxInt)
22122 sym2 := auxToSym(v_1.Aux)
22123 base := v_1.Args[0]
22124 mem := v_2
22125 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22126 break
22127 }
22128 v.reset(OpAMD64SUBQload)
22129 v.AuxInt = int32ToAuxInt(off1 + off2)
22130 v.Aux = symToAux(mergeSym(sym1, sym2))
22131 v.AddArg3(val, base, mem)
22132 return true
22133 }
22134
22135
22136 for {
22137 off := auxIntToInt32(v.AuxInt)
22138 sym := auxToSym(v.Aux)
22139 x := v_0
22140 ptr := v_1
22141 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22142 break
22143 }
22144 y := v_2.Args[1]
22145 if ptr != v_2.Args[0] {
22146 break
22147 }
22148 v.reset(OpAMD64SUBQ)
22149 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
22150 v0.AddArg(y)
22151 v.AddArg2(x, v0)
22152 return true
22153 }
22154 return false
22155 }
22156 func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
22157 v_2 := v.Args[2]
22158 v_1 := v.Args[1]
22159 v_0 := v.Args[0]
22160
22161
22162
22163 for {
22164 off1 := auxIntToInt32(v.AuxInt)
22165 sym := auxToSym(v.Aux)
22166 if v_0.Op != OpAMD64ADDQconst {
22167 break
22168 }
22169 off2 := auxIntToInt32(v_0.AuxInt)
22170 base := v_0.Args[0]
22171 val := v_1
22172 mem := v_2
22173 if !(is32Bit(int64(off1) + int64(off2))) {
22174 break
22175 }
22176 v.reset(OpAMD64SUBQmodify)
22177 v.AuxInt = int32ToAuxInt(off1 + off2)
22178 v.Aux = symToAux(sym)
22179 v.AddArg3(base, val, mem)
22180 return true
22181 }
22182
22183
22184
22185 for {
22186 off1 := auxIntToInt32(v.AuxInt)
22187 sym1 := auxToSym(v.Aux)
22188 if v_0.Op != OpAMD64LEAQ {
22189 break
22190 }
22191 off2 := auxIntToInt32(v_0.AuxInt)
22192 sym2 := auxToSym(v_0.Aux)
22193 base := v_0.Args[0]
22194 val := v_1
22195 mem := v_2
22196 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22197 break
22198 }
22199 v.reset(OpAMD64SUBQmodify)
22200 v.AuxInt = int32ToAuxInt(off1 + off2)
22201 v.Aux = symToAux(mergeSym(sym1, sym2))
22202 v.AddArg3(base, val, mem)
22203 return true
22204 }
22205 return false
22206 }
22207 func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
22208 v_1 := v.Args[1]
22209 v_0 := v.Args[0]
22210
22211
22212
22213 for {
22214 x := v_0
22215 l := v_1
22216 if l.Op != OpAMD64MOVSDload {
22217 break
22218 }
22219 off := auxIntToInt32(l.AuxInt)
22220 sym := auxToSym(l.Aux)
22221 mem := l.Args[1]
22222 ptr := l.Args[0]
22223 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22224 break
22225 }
22226 v.reset(OpAMD64SUBSDload)
22227 v.AuxInt = int32ToAuxInt(off)
22228 v.Aux = symToAux(sym)
22229 v.AddArg3(x, ptr, mem)
22230 return true
22231 }
22232 return false
22233 }
22234 func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
22235 v_2 := v.Args[2]
22236 v_1 := v.Args[1]
22237 v_0 := v.Args[0]
22238 b := v.Block
22239 typ := &b.Func.Config.Types
22240
22241
22242
22243 for {
22244 off1 := auxIntToInt32(v.AuxInt)
22245 sym := auxToSym(v.Aux)
22246 val := v_0
22247 if v_1.Op != OpAMD64ADDQconst {
22248 break
22249 }
22250 off2 := auxIntToInt32(v_1.AuxInt)
22251 base := v_1.Args[0]
22252 mem := v_2
22253 if !(is32Bit(int64(off1) + int64(off2))) {
22254 break
22255 }
22256 v.reset(OpAMD64SUBSDload)
22257 v.AuxInt = int32ToAuxInt(off1 + off2)
22258 v.Aux = symToAux(sym)
22259 v.AddArg3(val, base, mem)
22260 return true
22261 }
22262
22263
22264
22265 for {
22266 off1 := auxIntToInt32(v.AuxInt)
22267 sym1 := auxToSym(v.Aux)
22268 val := v_0
22269 if v_1.Op != OpAMD64LEAQ {
22270 break
22271 }
22272 off2 := auxIntToInt32(v_1.AuxInt)
22273 sym2 := auxToSym(v_1.Aux)
22274 base := v_1.Args[0]
22275 mem := v_2
22276 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22277 break
22278 }
22279 v.reset(OpAMD64SUBSDload)
22280 v.AuxInt = int32ToAuxInt(off1 + off2)
22281 v.Aux = symToAux(mergeSym(sym1, sym2))
22282 v.AddArg3(val, base, mem)
22283 return true
22284 }
22285
22286
22287 for {
22288 off := auxIntToInt32(v.AuxInt)
22289 sym := auxToSym(v.Aux)
22290 x := v_0
22291 ptr := v_1
22292 if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22293 break
22294 }
22295 y := v_2.Args[1]
22296 if ptr != v_2.Args[0] {
22297 break
22298 }
22299 v.reset(OpAMD64SUBSD)
22300 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
22301 v0.AddArg(y)
22302 v.AddArg2(x, v0)
22303 return true
22304 }
22305 return false
22306 }
22307 func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
22308 v_1 := v.Args[1]
22309 v_0 := v.Args[0]
22310
22311
22312
22313 for {
22314 x := v_0
22315 l := v_1
22316 if l.Op != OpAMD64MOVSSload {
22317 break
22318 }
22319 off := auxIntToInt32(l.AuxInt)
22320 sym := auxToSym(l.Aux)
22321 mem := l.Args[1]
22322 ptr := l.Args[0]
22323 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
22324 break
22325 }
22326 v.reset(OpAMD64SUBSSload)
22327 v.AuxInt = int32ToAuxInt(off)
22328 v.Aux = symToAux(sym)
22329 v.AddArg3(x, ptr, mem)
22330 return true
22331 }
22332 return false
22333 }
22334 func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
22335 v_2 := v.Args[2]
22336 v_1 := v.Args[1]
22337 v_0 := v.Args[0]
22338 b := v.Block
22339 typ := &b.Func.Config.Types
22340
22341
22342
22343 for {
22344 off1 := auxIntToInt32(v.AuxInt)
22345 sym := auxToSym(v.Aux)
22346 val := v_0
22347 if v_1.Op != OpAMD64ADDQconst {
22348 break
22349 }
22350 off2 := auxIntToInt32(v_1.AuxInt)
22351 base := v_1.Args[0]
22352 mem := v_2
22353 if !(is32Bit(int64(off1) + int64(off2))) {
22354 break
22355 }
22356 v.reset(OpAMD64SUBSSload)
22357 v.AuxInt = int32ToAuxInt(off1 + off2)
22358 v.Aux = symToAux(sym)
22359 v.AddArg3(val, base, mem)
22360 return true
22361 }
22362
22363
22364
22365 for {
22366 off1 := auxIntToInt32(v.AuxInt)
22367 sym1 := auxToSym(v.Aux)
22368 val := v_0
22369 if v_1.Op != OpAMD64LEAQ {
22370 break
22371 }
22372 off2 := auxIntToInt32(v_1.AuxInt)
22373 sym2 := auxToSym(v_1.Aux)
22374 base := v_1.Args[0]
22375 mem := v_2
22376 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
22377 break
22378 }
22379 v.reset(OpAMD64SUBSSload)
22380 v.AuxInt = int32ToAuxInt(off1 + off2)
22381 v.Aux = symToAux(mergeSym(sym1, sym2))
22382 v.AddArg3(val, base, mem)
22383 return true
22384 }
22385
22386
22387 for {
22388 off := auxIntToInt32(v.AuxInt)
22389 sym := auxToSym(v.Aux)
22390 x := v_0
22391 ptr := v_1
22392 if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
22393 break
22394 }
22395 y := v_2.Args[1]
22396 if ptr != v_2.Args[0] {
22397 break
22398 }
22399 v.reset(OpAMD64SUBSS)
22400 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
22401 v0.AddArg(y)
22402 v.AddArg2(x, v0)
22403 return true
22404 }
22405 return false
22406 }
22407 func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
22408 v_1 := v.Args[1]
22409 v_0 := v.Args[0]
22410 b := v.Block
22411
22412
22413 for {
22414 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22415 if v_0.Op != OpAMD64MOVLconst {
22416 continue
22417 }
22418 c := auxIntToInt32(v_0.AuxInt)
22419 x := v_1
22420 v.reset(OpAMD64TESTBconst)
22421 v.AuxInt = int8ToAuxInt(int8(c))
22422 v.AddArg(x)
22423 return true
22424 }
22425 break
22426 }
22427
22428
22429
22430 for {
22431 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22432 l := v_0
22433 if l.Op != OpAMD64MOVBload {
22434 continue
22435 }
22436 off := auxIntToInt32(l.AuxInt)
22437 sym := auxToSym(l.Aux)
22438 mem := l.Args[1]
22439 ptr := l.Args[0]
22440 l2 := v_1
22441 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22442 continue
22443 }
22444 b = l.Block
22445 v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
22446 v.copyOf(v0)
22447 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22448 v0.Aux = symToAux(sym)
22449 v0.AddArg2(ptr, mem)
22450 return true
22451 }
22452 break
22453 }
22454 return false
22455 }
22456 func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
22457 v_0 := v.Args[0]
22458
22459
22460
22461 for {
22462 if auxIntToInt8(v.AuxInt) != -1 {
22463 break
22464 }
22465 x := v_0
22466 if !(x.Op != OpAMD64MOVLconst) {
22467 break
22468 }
22469 v.reset(OpAMD64TESTB)
22470 v.AddArg2(x, x)
22471 return true
22472 }
22473 return false
22474 }
22475 func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
22476 v_1 := v.Args[1]
22477 v_0 := v.Args[0]
22478 b := v.Block
22479
22480
22481 for {
22482 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22483 if v_0.Op != OpAMD64MOVLconst {
22484 continue
22485 }
22486 c := auxIntToInt32(v_0.AuxInt)
22487 x := v_1
22488 v.reset(OpAMD64TESTLconst)
22489 v.AuxInt = int32ToAuxInt(c)
22490 v.AddArg(x)
22491 return true
22492 }
22493 break
22494 }
22495
22496
22497
22498 for {
22499 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22500 l := v_0
22501 if l.Op != OpAMD64MOVLload {
22502 continue
22503 }
22504 off := auxIntToInt32(l.AuxInt)
22505 sym := auxToSym(l.Aux)
22506 mem := l.Args[1]
22507 ptr := l.Args[0]
22508 l2 := v_1
22509 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22510 continue
22511 }
22512 b = l.Block
22513 v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
22514 v.copyOf(v0)
22515 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22516 v0.Aux = symToAux(sym)
22517 v0.AddArg2(ptr, mem)
22518 return true
22519 }
22520 break
22521 }
22522
22523
22524
22525 for {
22526 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22527 a := v_0
22528 if a.Op != OpAMD64ANDLload {
22529 continue
22530 }
22531 off := auxIntToInt32(a.AuxInt)
22532 sym := auxToSym(a.Aux)
22533 mem := a.Args[2]
22534 x := a.Args[0]
22535 ptr := a.Args[1]
22536 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22537 continue
22538 }
22539 v.reset(OpAMD64TESTL)
22540 v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
22541 v0.AuxInt = int32ToAuxInt(off)
22542 v0.Aux = symToAux(sym)
22543 v0.AddArg2(ptr, mem)
22544 v.AddArg2(v0, x)
22545 return true
22546 }
22547 break
22548 }
22549 return false
22550 }
22551 func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
22552 v_0 := v.Args[0]
22553
22554
22555
22556 for {
22557 c := auxIntToInt32(v.AuxInt)
22558 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
22559 break
22560 }
22561 v.reset(OpAMD64FlagEQ)
22562 return true
22563 }
22564
22565
22566
22567 for {
22568 c := auxIntToInt32(v.AuxInt)
22569 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
22570 break
22571 }
22572 v.reset(OpAMD64FlagLT_UGT)
22573 return true
22574 }
22575
22576
22577
22578 for {
22579 c := auxIntToInt32(v.AuxInt)
22580 if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
22581 break
22582 }
22583 v.reset(OpAMD64FlagGT_UGT)
22584 return true
22585 }
22586
22587
22588
22589 for {
22590 if auxIntToInt32(v.AuxInt) != -1 {
22591 break
22592 }
22593 x := v_0
22594 if !(x.Op != OpAMD64MOVLconst) {
22595 break
22596 }
22597 v.reset(OpAMD64TESTL)
22598 v.AddArg2(x, x)
22599 return true
22600 }
22601 return false
22602 }
22603 func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
22604 v_1 := v.Args[1]
22605 v_0 := v.Args[0]
22606 b := v.Block
22607
22608
22609
22610 for {
22611 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22612 if v_0.Op != OpAMD64MOVQconst {
22613 continue
22614 }
22615 c := auxIntToInt64(v_0.AuxInt)
22616 x := v_1
22617 if !(is32Bit(c)) {
22618 continue
22619 }
22620 v.reset(OpAMD64TESTQconst)
22621 v.AuxInt = int32ToAuxInt(int32(c))
22622 v.AddArg(x)
22623 return true
22624 }
22625 break
22626 }
22627
22628
22629
22630 for {
22631 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22632 l := v_0
22633 if l.Op != OpAMD64MOVQload {
22634 continue
22635 }
22636 off := auxIntToInt32(l.AuxInt)
22637 sym := auxToSym(l.Aux)
22638 mem := l.Args[1]
22639 ptr := l.Args[0]
22640 l2 := v_1
22641 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22642 continue
22643 }
22644 b = l.Block
22645 v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
22646 v.copyOf(v0)
22647 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22648 v0.Aux = symToAux(sym)
22649 v0.AddArg2(ptr, mem)
22650 return true
22651 }
22652 break
22653 }
22654
22655
22656
22657 for {
22658 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22659 a := v_0
22660 if a.Op != OpAMD64ANDQload {
22661 continue
22662 }
22663 off := auxIntToInt32(a.AuxInt)
22664 sym := auxToSym(a.Aux)
22665 mem := a.Args[2]
22666 x := a.Args[0]
22667 ptr := a.Args[1]
22668 if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
22669 continue
22670 }
22671 v.reset(OpAMD64TESTQ)
22672 v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
22673 v0.AuxInt = int32ToAuxInt(off)
22674 v0.Aux = symToAux(sym)
22675 v0.AddArg2(ptr, mem)
22676 v.AddArg2(v0, x)
22677 return true
22678 }
22679 break
22680 }
22681 return false
22682 }
22683 func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
22684 v_0 := v.Args[0]
22685
22686
22687
22688 for {
22689 c := auxIntToInt32(v.AuxInt)
22690 if v_0.Op != OpAMD64MOVQconst {
22691 break
22692 }
22693 d := auxIntToInt64(v_0.AuxInt)
22694 if !(int64(c) == d && c == 0) {
22695 break
22696 }
22697 v.reset(OpAMD64FlagEQ)
22698 return true
22699 }
22700
22701
22702
22703 for {
22704 c := auxIntToInt32(v.AuxInt)
22705 if v_0.Op != OpAMD64MOVQconst {
22706 break
22707 }
22708 d := auxIntToInt64(v_0.AuxInt)
22709 if !(int64(c) == d && c < 0) {
22710 break
22711 }
22712 v.reset(OpAMD64FlagLT_UGT)
22713 return true
22714 }
22715
22716
22717
22718 for {
22719 c := auxIntToInt32(v.AuxInt)
22720 if v_0.Op != OpAMD64MOVQconst {
22721 break
22722 }
22723 d := auxIntToInt64(v_0.AuxInt)
22724 if !(int64(c) == d && c > 0) {
22725 break
22726 }
22727 v.reset(OpAMD64FlagGT_UGT)
22728 return true
22729 }
22730
22731
22732
22733 for {
22734 if auxIntToInt32(v.AuxInt) != -1 {
22735 break
22736 }
22737 x := v_0
22738 if !(x.Op != OpAMD64MOVQconst) {
22739 break
22740 }
22741 v.reset(OpAMD64TESTQ)
22742 v.AddArg2(x, x)
22743 return true
22744 }
22745 return false
22746 }
22747 func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
22748 v_1 := v.Args[1]
22749 v_0 := v.Args[0]
22750 b := v.Block
22751
22752
22753 for {
22754 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22755 if v_0.Op != OpAMD64MOVLconst {
22756 continue
22757 }
22758 c := auxIntToInt32(v_0.AuxInt)
22759 x := v_1
22760 v.reset(OpAMD64TESTWconst)
22761 v.AuxInt = int16ToAuxInt(int16(c))
22762 v.AddArg(x)
22763 return true
22764 }
22765 break
22766 }
22767
22768
22769
22770 for {
22771 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22772 l := v_0
22773 if l.Op != OpAMD64MOVWload {
22774 continue
22775 }
22776 off := auxIntToInt32(l.AuxInt)
22777 sym := auxToSym(l.Aux)
22778 mem := l.Args[1]
22779 ptr := l.Args[0]
22780 l2 := v_1
22781 if !(l == l2 && l.Uses == 2 && clobber(l)) {
22782 continue
22783 }
22784 b = l.Block
22785 v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
22786 v.copyOf(v0)
22787 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
22788 v0.Aux = symToAux(sym)
22789 v0.AddArg2(ptr, mem)
22790 return true
22791 }
22792 break
22793 }
22794 return false
22795 }
22796 func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
22797 v_0 := v.Args[0]
22798
22799
22800
22801 for {
22802 if auxIntToInt16(v.AuxInt) != -1 {
22803 break
22804 }
22805 x := v_0
22806 if !(x.Op != OpAMD64MOVLconst) {
22807 break
22808 }
22809 v.reset(OpAMD64TESTW)
22810 v.AddArg2(x, x)
22811 return true
22812 }
22813 return false
22814 }
22815 func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
22816 v_2 := v.Args[2]
22817 v_1 := v.Args[1]
22818 v_0 := v.Args[0]
22819
22820
22821
22822 for {
22823 off1 := auxIntToInt32(v.AuxInt)
22824 sym := auxToSym(v.Aux)
22825 val := v_0
22826 if v_1.Op != OpAMD64ADDQconst {
22827 break
22828 }
22829 off2 := auxIntToInt32(v_1.AuxInt)
22830 ptr := v_1.Args[0]
22831 mem := v_2
22832 if !(is32Bit(int64(off1) + int64(off2))) {
22833 break
22834 }
22835 v.reset(OpAMD64XADDLlock)
22836 v.AuxInt = int32ToAuxInt(off1 + off2)
22837 v.Aux = symToAux(sym)
22838 v.AddArg3(val, ptr, mem)
22839 return true
22840 }
22841 return false
22842 }
22843 func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
22844 v_2 := v.Args[2]
22845 v_1 := v.Args[1]
22846 v_0 := v.Args[0]
22847
22848
22849
22850 for {
22851 off1 := auxIntToInt32(v.AuxInt)
22852 sym := auxToSym(v.Aux)
22853 val := v_0
22854 if v_1.Op != OpAMD64ADDQconst {
22855 break
22856 }
22857 off2 := auxIntToInt32(v_1.AuxInt)
22858 ptr := v_1.Args[0]
22859 mem := v_2
22860 if !(is32Bit(int64(off1) + int64(off2))) {
22861 break
22862 }
22863 v.reset(OpAMD64XADDQlock)
22864 v.AuxInt = int32ToAuxInt(off1 + off2)
22865 v.Aux = symToAux(sym)
22866 v.AddArg3(val, ptr, mem)
22867 return true
22868 }
22869 return false
22870 }
22871 func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
22872 v_2 := v.Args[2]
22873 v_1 := v.Args[1]
22874 v_0 := v.Args[0]
22875
22876
22877
22878 for {
22879 off1 := auxIntToInt32(v.AuxInt)
22880 sym := auxToSym(v.Aux)
22881 val := v_0
22882 if v_1.Op != OpAMD64ADDQconst {
22883 break
22884 }
22885 off2 := auxIntToInt32(v_1.AuxInt)
22886 ptr := v_1.Args[0]
22887 mem := v_2
22888 if !(is32Bit(int64(off1) + int64(off2))) {
22889 break
22890 }
22891 v.reset(OpAMD64XCHGL)
22892 v.AuxInt = int32ToAuxInt(off1 + off2)
22893 v.Aux = symToAux(sym)
22894 v.AddArg3(val, ptr, mem)
22895 return true
22896 }
22897
22898
22899
22900 for {
22901 off1 := auxIntToInt32(v.AuxInt)
22902 sym1 := auxToSym(v.Aux)
22903 val := v_0
22904 if v_1.Op != OpAMD64LEAQ {
22905 break
22906 }
22907 off2 := auxIntToInt32(v_1.AuxInt)
22908 sym2 := auxToSym(v_1.Aux)
22909 ptr := v_1.Args[0]
22910 mem := v_2
22911 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
22912 break
22913 }
22914 v.reset(OpAMD64XCHGL)
22915 v.AuxInt = int32ToAuxInt(off1 + off2)
22916 v.Aux = symToAux(mergeSym(sym1, sym2))
22917 v.AddArg3(val, ptr, mem)
22918 return true
22919 }
22920 return false
22921 }
22922 func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
22923 v_2 := v.Args[2]
22924 v_1 := v.Args[1]
22925 v_0 := v.Args[0]
22926
22927
22928
22929 for {
22930 off1 := auxIntToInt32(v.AuxInt)
22931 sym := auxToSym(v.Aux)
22932 val := v_0
22933 if v_1.Op != OpAMD64ADDQconst {
22934 break
22935 }
22936 off2 := auxIntToInt32(v_1.AuxInt)
22937 ptr := v_1.Args[0]
22938 mem := v_2
22939 if !(is32Bit(int64(off1) + int64(off2))) {
22940 break
22941 }
22942 v.reset(OpAMD64XCHGQ)
22943 v.AuxInt = int32ToAuxInt(off1 + off2)
22944 v.Aux = symToAux(sym)
22945 v.AddArg3(val, ptr, mem)
22946 return true
22947 }
22948
22949
22950
22951 for {
22952 off1 := auxIntToInt32(v.AuxInt)
22953 sym1 := auxToSym(v.Aux)
22954 val := v_0
22955 if v_1.Op != OpAMD64LEAQ {
22956 break
22957 }
22958 off2 := auxIntToInt32(v_1.AuxInt)
22959 sym2 := auxToSym(v_1.Aux)
22960 ptr := v_1.Args[0]
22961 mem := v_2
22962 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
22963 break
22964 }
22965 v.reset(OpAMD64XCHGQ)
22966 v.AuxInt = int32ToAuxInt(off1 + off2)
22967 v.Aux = symToAux(mergeSym(sym1, sym2))
22968 v.AddArg3(val, ptr, mem)
22969 return true
22970 }
22971 return false
22972 }
22973 func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
22974 v_1 := v.Args[1]
22975 v_0 := v.Args[0]
22976
22977
22978 for {
22979 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22980 if v_0.Op != OpAMD64SHLL {
22981 continue
22982 }
22983 y := v_0.Args[1]
22984 v_0_0 := v_0.Args[0]
22985 if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
22986 continue
22987 }
22988 x := v_1
22989 v.reset(OpAMD64BTCL)
22990 v.AddArg2(x, y)
22991 return true
22992 }
22993 break
22994 }
22995
22996
22997 for {
22998 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
22999 x := v_0
23000 if v_1.Op != OpAMD64MOVLconst {
23001 continue
23002 }
23003 c := auxIntToInt32(v_1.AuxInt)
23004 v.reset(OpAMD64XORLconst)
23005 v.AuxInt = int32ToAuxInt(c)
23006 v.AddArg(x)
23007 return true
23008 }
23009 break
23010 }
23011
23012
23013 for {
23014 x := v_0
23015 if x != v_1 {
23016 break
23017 }
23018 v.reset(OpAMD64MOVLconst)
23019 v.AuxInt = int32ToAuxInt(0)
23020 return true
23021 }
23022
23023
23024
23025 for {
23026 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23027 x := v_0
23028 l := v_1
23029 if l.Op != OpAMD64MOVLload {
23030 continue
23031 }
23032 off := auxIntToInt32(l.AuxInt)
23033 sym := auxToSym(l.Aux)
23034 mem := l.Args[1]
23035 ptr := l.Args[0]
23036 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23037 continue
23038 }
23039 v.reset(OpAMD64XORLload)
23040 v.AuxInt = int32ToAuxInt(off)
23041 v.Aux = symToAux(sym)
23042 v.AddArg3(x, ptr, mem)
23043 return true
23044 }
23045 break
23046 }
23047
23048
23049
23050 for {
23051 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23052 x := v_0
23053 if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23054 continue
23055 }
23056 v.reset(OpAMD64BLSMSKL)
23057 v.AddArg(x)
23058 return true
23059 }
23060 break
23061 }
23062 return false
23063 }
23064 func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
23065 v_0 := v.Args[0]
23066
23067
23068 for {
23069 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
23070 break
23071 }
23072 x := v_0.Args[0]
23073 v.reset(OpAMD64SETEQ)
23074 v.AddArg(x)
23075 return true
23076 }
23077
23078
23079 for {
23080 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
23081 break
23082 }
23083 x := v_0.Args[0]
23084 v.reset(OpAMD64SETNE)
23085 v.AddArg(x)
23086 return true
23087 }
23088
23089
23090 for {
23091 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
23092 break
23093 }
23094 x := v_0.Args[0]
23095 v.reset(OpAMD64SETGE)
23096 v.AddArg(x)
23097 return true
23098 }
23099
23100
23101 for {
23102 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
23103 break
23104 }
23105 x := v_0.Args[0]
23106 v.reset(OpAMD64SETL)
23107 v.AddArg(x)
23108 return true
23109 }
23110
23111
23112 for {
23113 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
23114 break
23115 }
23116 x := v_0.Args[0]
23117 v.reset(OpAMD64SETG)
23118 v.AddArg(x)
23119 return true
23120 }
23121
23122
23123 for {
23124 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
23125 break
23126 }
23127 x := v_0.Args[0]
23128 v.reset(OpAMD64SETLE)
23129 v.AddArg(x)
23130 return true
23131 }
23132
23133
23134 for {
23135 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
23136 break
23137 }
23138 x := v_0.Args[0]
23139 v.reset(OpAMD64SETAE)
23140 v.AddArg(x)
23141 return true
23142 }
23143
23144
23145 for {
23146 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
23147 break
23148 }
23149 x := v_0.Args[0]
23150 v.reset(OpAMD64SETB)
23151 v.AddArg(x)
23152 return true
23153 }
23154
23155
23156 for {
23157 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
23158 break
23159 }
23160 x := v_0.Args[0]
23161 v.reset(OpAMD64SETA)
23162 v.AddArg(x)
23163 return true
23164 }
23165
23166
23167 for {
23168 if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
23169 break
23170 }
23171 x := v_0.Args[0]
23172 v.reset(OpAMD64SETBE)
23173 v.AddArg(x)
23174 return true
23175 }
23176
23177
23178 for {
23179 c := auxIntToInt32(v.AuxInt)
23180 if v_0.Op != OpAMD64XORLconst {
23181 break
23182 }
23183 d := auxIntToInt32(v_0.AuxInt)
23184 x := v_0.Args[0]
23185 v.reset(OpAMD64XORLconst)
23186 v.AuxInt = int32ToAuxInt(c ^ d)
23187 v.AddArg(x)
23188 return true
23189 }
23190
23191
23192
23193 for {
23194 c := auxIntToInt32(v.AuxInt)
23195 x := v_0
23196 if !(c == 0) {
23197 break
23198 }
23199 v.copyOf(x)
23200 return true
23201 }
23202
23203
23204 for {
23205 c := auxIntToInt32(v.AuxInt)
23206 if v_0.Op != OpAMD64MOVLconst {
23207 break
23208 }
23209 d := auxIntToInt32(v_0.AuxInt)
23210 v.reset(OpAMD64MOVLconst)
23211 v.AuxInt = int32ToAuxInt(c ^ d)
23212 return true
23213 }
23214 return false
23215 }
23216 func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
23217 v_1 := v.Args[1]
23218 v_0 := v.Args[0]
23219
23220
23221
23222 for {
23223 valoff1 := auxIntToValAndOff(v.AuxInt)
23224 sym := auxToSym(v.Aux)
23225 if v_0.Op != OpAMD64ADDQconst {
23226 break
23227 }
23228 off2 := auxIntToInt32(v_0.AuxInt)
23229 base := v_0.Args[0]
23230 mem := v_1
23231 if !(ValAndOff(valoff1).canAdd32(off2)) {
23232 break
23233 }
23234 v.reset(OpAMD64XORLconstmodify)
23235 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23236 v.Aux = symToAux(sym)
23237 v.AddArg2(base, mem)
23238 return true
23239 }
23240
23241
23242
23243 for {
23244 valoff1 := auxIntToValAndOff(v.AuxInt)
23245 sym1 := auxToSym(v.Aux)
23246 if v_0.Op != OpAMD64LEAQ {
23247 break
23248 }
23249 off2 := auxIntToInt32(v_0.AuxInt)
23250 sym2 := auxToSym(v_0.Aux)
23251 base := v_0.Args[0]
23252 mem := v_1
23253 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23254 break
23255 }
23256 v.reset(OpAMD64XORLconstmodify)
23257 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23258 v.Aux = symToAux(mergeSym(sym1, sym2))
23259 v.AddArg2(base, mem)
23260 return true
23261 }
23262 return false
23263 }
23264 func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
23265 v_2 := v.Args[2]
23266 v_1 := v.Args[1]
23267 v_0 := v.Args[0]
23268 b := v.Block
23269 typ := &b.Func.Config.Types
23270
23271
23272
23273 for {
23274 off1 := auxIntToInt32(v.AuxInt)
23275 sym := auxToSym(v.Aux)
23276 val := v_0
23277 if v_1.Op != OpAMD64ADDQconst {
23278 break
23279 }
23280 off2 := auxIntToInt32(v_1.AuxInt)
23281 base := v_1.Args[0]
23282 mem := v_2
23283 if !(is32Bit(int64(off1) + int64(off2))) {
23284 break
23285 }
23286 v.reset(OpAMD64XORLload)
23287 v.AuxInt = int32ToAuxInt(off1 + off2)
23288 v.Aux = symToAux(sym)
23289 v.AddArg3(val, base, mem)
23290 return true
23291 }
23292
23293
23294
23295 for {
23296 off1 := auxIntToInt32(v.AuxInt)
23297 sym1 := auxToSym(v.Aux)
23298 val := v_0
23299 if v_1.Op != OpAMD64LEAQ {
23300 break
23301 }
23302 off2 := auxIntToInt32(v_1.AuxInt)
23303 sym2 := auxToSym(v_1.Aux)
23304 base := v_1.Args[0]
23305 mem := v_2
23306 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23307 break
23308 }
23309 v.reset(OpAMD64XORLload)
23310 v.AuxInt = int32ToAuxInt(off1 + off2)
23311 v.Aux = symToAux(mergeSym(sym1, sym2))
23312 v.AddArg3(val, base, mem)
23313 return true
23314 }
23315
23316
23317 for {
23318 off := auxIntToInt32(v.AuxInt)
23319 sym := auxToSym(v.Aux)
23320 x := v_0
23321 ptr := v_1
23322 if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23323 break
23324 }
23325 y := v_2.Args[1]
23326 if ptr != v_2.Args[0] {
23327 break
23328 }
23329 v.reset(OpAMD64XORL)
23330 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
23331 v0.AddArg(y)
23332 v.AddArg2(x, v0)
23333 return true
23334 }
23335 return false
23336 }
23337 func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
23338 v_2 := v.Args[2]
23339 v_1 := v.Args[1]
23340 v_0 := v.Args[0]
23341
23342
23343
23344 for {
23345 off1 := auxIntToInt32(v.AuxInt)
23346 sym := auxToSym(v.Aux)
23347 if v_0.Op != OpAMD64ADDQconst {
23348 break
23349 }
23350 off2 := auxIntToInt32(v_0.AuxInt)
23351 base := v_0.Args[0]
23352 val := v_1
23353 mem := v_2
23354 if !(is32Bit(int64(off1) + int64(off2))) {
23355 break
23356 }
23357 v.reset(OpAMD64XORLmodify)
23358 v.AuxInt = int32ToAuxInt(off1 + off2)
23359 v.Aux = symToAux(sym)
23360 v.AddArg3(base, val, mem)
23361 return true
23362 }
23363
23364
23365
23366 for {
23367 off1 := auxIntToInt32(v.AuxInt)
23368 sym1 := auxToSym(v.Aux)
23369 if v_0.Op != OpAMD64LEAQ {
23370 break
23371 }
23372 off2 := auxIntToInt32(v_0.AuxInt)
23373 sym2 := auxToSym(v_0.Aux)
23374 base := v_0.Args[0]
23375 val := v_1
23376 mem := v_2
23377 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23378 break
23379 }
23380 v.reset(OpAMD64XORLmodify)
23381 v.AuxInt = int32ToAuxInt(off1 + off2)
23382 v.Aux = symToAux(mergeSym(sym1, sym2))
23383 v.AddArg3(base, val, mem)
23384 return true
23385 }
23386 return false
23387 }
23388 func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
23389 v_1 := v.Args[1]
23390 v_0 := v.Args[0]
23391
23392
23393 for {
23394 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23395 if v_0.Op != OpAMD64SHLQ {
23396 continue
23397 }
23398 y := v_0.Args[1]
23399 v_0_0 := v_0.Args[0]
23400 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
23401 continue
23402 }
23403 x := v_1
23404 v.reset(OpAMD64BTCQ)
23405 v.AddArg2(x, y)
23406 return true
23407 }
23408 break
23409 }
23410
23411
23412
23413 for {
23414 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23415 if v_0.Op != OpAMD64MOVQconst {
23416 continue
23417 }
23418 c := auxIntToInt64(v_0.AuxInt)
23419 x := v_1
23420 if !(isUint64PowerOfTwo(c) && uint64(c) >= 1<<31) {
23421 continue
23422 }
23423 v.reset(OpAMD64BTCQconst)
23424 v.AuxInt = int8ToAuxInt(int8(log64(c)))
23425 v.AddArg(x)
23426 return true
23427 }
23428 break
23429 }
23430
23431
23432
23433 for {
23434 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23435 x := v_0
23436 if v_1.Op != OpAMD64MOVQconst {
23437 continue
23438 }
23439 c := auxIntToInt64(v_1.AuxInt)
23440 if !(is32Bit(c)) {
23441 continue
23442 }
23443 v.reset(OpAMD64XORQconst)
23444 v.AuxInt = int32ToAuxInt(int32(c))
23445 v.AddArg(x)
23446 return true
23447 }
23448 break
23449 }
23450
23451
23452 for {
23453 x := v_0
23454 if x != v_1 {
23455 break
23456 }
23457 v.reset(OpAMD64MOVQconst)
23458 v.AuxInt = int64ToAuxInt(0)
23459 return true
23460 }
23461
23462
23463
23464 for {
23465 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23466 x := v_0
23467 l := v_1
23468 if l.Op != OpAMD64MOVQload {
23469 continue
23470 }
23471 off := auxIntToInt32(l.AuxInt)
23472 sym := auxToSym(l.Aux)
23473 mem := l.Args[1]
23474 ptr := l.Args[0]
23475 if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
23476 continue
23477 }
23478 v.reset(OpAMD64XORQload)
23479 v.AuxInt = int32ToAuxInt(off)
23480 v.Aux = symToAux(sym)
23481 v.AddArg3(x, ptr, mem)
23482 return true
23483 }
23484 break
23485 }
23486
23487
23488
23489 for {
23490 for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
23491 x := v_0
23492 if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
23493 continue
23494 }
23495 v.reset(OpAMD64BLSMSKQ)
23496 v.AddArg(x)
23497 return true
23498 }
23499 break
23500 }
23501 return false
23502 }
23503 func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
23504 v_0 := v.Args[0]
23505
23506
23507 for {
23508 c := auxIntToInt32(v.AuxInt)
23509 if v_0.Op != OpAMD64XORQconst {
23510 break
23511 }
23512 d := auxIntToInt32(v_0.AuxInt)
23513 x := v_0.Args[0]
23514 v.reset(OpAMD64XORQconst)
23515 v.AuxInt = int32ToAuxInt(c ^ d)
23516 v.AddArg(x)
23517 return true
23518 }
23519
23520
23521 for {
23522 if auxIntToInt32(v.AuxInt) != 0 {
23523 break
23524 }
23525 x := v_0
23526 v.copyOf(x)
23527 return true
23528 }
23529
23530
23531 for {
23532 c := auxIntToInt32(v.AuxInt)
23533 if v_0.Op != OpAMD64MOVQconst {
23534 break
23535 }
23536 d := auxIntToInt64(v_0.AuxInt)
23537 v.reset(OpAMD64MOVQconst)
23538 v.AuxInt = int64ToAuxInt(int64(c) ^ d)
23539 return true
23540 }
23541 return false
23542 }
23543 func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
23544 v_1 := v.Args[1]
23545 v_0 := v.Args[0]
23546
23547
23548
23549 for {
23550 valoff1 := auxIntToValAndOff(v.AuxInt)
23551 sym := auxToSym(v.Aux)
23552 if v_0.Op != OpAMD64ADDQconst {
23553 break
23554 }
23555 off2 := auxIntToInt32(v_0.AuxInt)
23556 base := v_0.Args[0]
23557 mem := v_1
23558 if !(ValAndOff(valoff1).canAdd32(off2)) {
23559 break
23560 }
23561 v.reset(OpAMD64XORQconstmodify)
23562 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23563 v.Aux = symToAux(sym)
23564 v.AddArg2(base, mem)
23565 return true
23566 }
23567
23568
23569
23570 for {
23571 valoff1 := auxIntToValAndOff(v.AuxInt)
23572 sym1 := auxToSym(v.Aux)
23573 if v_0.Op != OpAMD64LEAQ {
23574 break
23575 }
23576 off2 := auxIntToInt32(v_0.AuxInt)
23577 sym2 := auxToSym(v_0.Aux)
23578 base := v_0.Args[0]
23579 mem := v_1
23580 if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
23581 break
23582 }
23583 v.reset(OpAMD64XORQconstmodify)
23584 v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
23585 v.Aux = symToAux(mergeSym(sym1, sym2))
23586 v.AddArg2(base, mem)
23587 return true
23588 }
23589 return false
23590 }
23591 func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
23592 v_2 := v.Args[2]
23593 v_1 := v.Args[1]
23594 v_0 := v.Args[0]
23595 b := v.Block
23596 typ := &b.Func.Config.Types
23597
23598
23599
23600 for {
23601 off1 := auxIntToInt32(v.AuxInt)
23602 sym := auxToSym(v.Aux)
23603 val := v_0
23604 if v_1.Op != OpAMD64ADDQconst {
23605 break
23606 }
23607 off2 := auxIntToInt32(v_1.AuxInt)
23608 base := v_1.Args[0]
23609 mem := v_2
23610 if !(is32Bit(int64(off1) + int64(off2))) {
23611 break
23612 }
23613 v.reset(OpAMD64XORQload)
23614 v.AuxInt = int32ToAuxInt(off1 + off2)
23615 v.Aux = symToAux(sym)
23616 v.AddArg3(val, base, mem)
23617 return true
23618 }
23619
23620
23621
23622 for {
23623 off1 := auxIntToInt32(v.AuxInt)
23624 sym1 := auxToSym(v.Aux)
23625 val := v_0
23626 if v_1.Op != OpAMD64LEAQ {
23627 break
23628 }
23629 off2 := auxIntToInt32(v_1.AuxInt)
23630 sym2 := auxToSym(v_1.Aux)
23631 base := v_1.Args[0]
23632 mem := v_2
23633 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23634 break
23635 }
23636 v.reset(OpAMD64XORQload)
23637 v.AuxInt = int32ToAuxInt(off1 + off2)
23638 v.Aux = symToAux(mergeSym(sym1, sym2))
23639 v.AddArg3(val, base, mem)
23640 return true
23641 }
23642
23643
23644 for {
23645 off := auxIntToInt32(v.AuxInt)
23646 sym := auxToSym(v.Aux)
23647 x := v_0
23648 ptr := v_1
23649 if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
23650 break
23651 }
23652 y := v_2.Args[1]
23653 if ptr != v_2.Args[0] {
23654 break
23655 }
23656 v.reset(OpAMD64XORQ)
23657 v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
23658 v0.AddArg(y)
23659 v.AddArg2(x, v0)
23660 return true
23661 }
23662 return false
23663 }
23664 func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
23665 v_2 := v.Args[2]
23666 v_1 := v.Args[1]
23667 v_0 := v.Args[0]
23668
23669
23670
23671 for {
23672 off1 := auxIntToInt32(v.AuxInt)
23673 sym := auxToSym(v.Aux)
23674 if v_0.Op != OpAMD64ADDQconst {
23675 break
23676 }
23677 off2 := auxIntToInt32(v_0.AuxInt)
23678 base := v_0.Args[0]
23679 val := v_1
23680 mem := v_2
23681 if !(is32Bit(int64(off1) + int64(off2))) {
23682 break
23683 }
23684 v.reset(OpAMD64XORQmodify)
23685 v.AuxInt = int32ToAuxInt(off1 + off2)
23686 v.Aux = symToAux(sym)
23687 v.AddArg3(base, val, mem)
23688 return true
23689 }
23690
23691
23692
23693 for {
23694 off1 := auxIntToInt32(v.AuxInt)
23695 sym1 := auxToSym(v.Aux)
23696 if v_0.Op != OpAMD64LEAQ {
23697 break
23698 }
23699 off2 := auxIntToInt32(v_0.AuxInt)
23700 sym2 := auxToSym(v_0.Aux)
23701 base := v_0.Args[0]
23702 val := v_1
23703 mem := v_2
23704 if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
23705 break
23706 }
23707 v.reset(OpAMD64XORQmodify)
23708 v.AuxInt = int32ToAuxInt(off1 + off2)
23709 v.Aux = symToAux(mergeSym(sym1, sym2))
23710 v.AddArg3(base, val, mem)
23711 return true
23712 }
23713 return false
23714 }
23715 func rewriteValueAMD64_OpAddr(v *Value) bool {
23716 v_0 := v.Args[0]
23717
23718
23719 for {
23720 sym := auxToSym(v.Aux)
23721 base := v_0
23722 v.reset(OpAMD64LEAQ)
23723 v.Aux = symToAux(sym)
23724 v.AddArg(base)
23725 return true
23726 }
23727 }
23728 func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
23729 v_2 := v.Args[2]
23730 v_1 := v.Args[1]
23731 v_0 := v.Args[0]
23732 b := v.Block
23733 typ := &b.Func.Config.Types
23734
23735
23736 for {
23737 ptr := v_0
23738 val := v_1
23739 mem := v_2
23740 v.reset(OpAMD64AddTupleFirst32)
23741 v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
23742 v0.AddArg3(val, ptr, mem)
23743 v.AddArg2(val, v0)
23744 return true
23745 }
23746 }
23747 func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
23748 v_2 := v.Args[2]
23749 v_1 := v.Args[1]
23750 v_0 := v.Args[0]
23751 b := v.Block
23752 typ := &b.Func.Config.Types
23753
23754
23755 for {
23756 ptr := v_0
23757 val := v_1
23758 mem := v_2
23759 v.reset(OpAMD64AddTupleFirst64)
23760 v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
23761 v0.AddArg3(val, ptr, mem)
23762 v.AddArg2(val, v0)
23763 return true
23764 }
23765 }
23766 func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
23767 v_2 := v.Args[2]
23768 v_1 := v.Args[1]
23769 v_0 := v.Args[0]
23770
23771
23772 for {
23773 ptr := v_0
23774 val := v_1
23775 mem := v_2
23776 v.reset(OpAMD64ANDLlock)
23777 v.AddArg3(ptr, val, mem)
23778 return true
23779 }
23780 }
23781 func rewriteValueAMD64_OpAtomicAnd32value(v *Value) bool {
23782 v_2 := v.Args[2]
23783 v_1 := v.Args[1]
23784 v_0 := v.Args[0]
23785
23786
23787 for {
23788 ptr := v_0
23789 val := v_1
23790 mem := v_2
23791 v.reset(OpAMD64LoweredAtomicAnd32)
23792 v.AddArg3(ptr, val, mem)
23793 return true
23794 }
23795 }
23796 func rewriteValueAMD64_OpAtomicAnd64value(v *Value) bool {
23797 v_2 := v.Args[2]
23798 v_1 := v.Args[1]
23799 v_0 := v.Args[0]
23800
23801
23802 for {
23803 ptr := v_0
23804 val := v_1
23805 mem := v_2
23806 v.reset(OpAMD64LoweredAtomicAnd64)
23807 v.AddArg3(ptr, val, mem)
23808 return true
23809 }
23810 }
23811 func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
23812 v_2 := v.Args[2]
23813 v_1 := v.Args[1]
23814 v_0 := v.Args[0]
23815
23816
23817 for {
23818 ptr := v_0
23819 val := v_1
23820 mem := v_2
23821 v.reset(OpAMD64ANDBlock)
23822 v.AddArg3(ptr, val, mem)
23823 return true
23824 }
23825 }
23826 func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
23827 v_3 := v.Args[3]
23828 v_2 := v.Args[2]
23829 v_1 := v.Args[1]
23830 v_0 := v.Args[0]
23831
23832
23833 for {
23834 ptr := v_0
23835 old := v_1
23836 new_ := v_2
23837 mem := v_3
23838 v.reset(OpAMD64CMPXCHGLlock)
23839 v.AddArg4(ptr, old, new_, mem)
23840 return true
23841 }
23842 }
23843 func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
23844 v_3 := v.Args[3]
23845 v_2 := v.Args[2]
23846 v_1 := v.Args[1]
23847 v_0 := v.Args[0]
23848
23849
23850 for {
23851 ptr := v_0
23852 old := v_1
23853 new_ := v_2
23854 mem := v_3
23855 v.reset(OpAMD64CMPXCHGQlock)
23856 v.AddArg4(ptr, old, new_, mem)
23857 return true
23858 }
23859 }
23860 func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
23861 v_2 := v.Args[2]
23862 v_1 := v.Args[1]
23863 v_0 := v.Args[0]
23864
23865
23866 for {
23867 ptr := v_0
23868 val := v_1
23869 mem := v_2
23870 v.reset(OpAMD64XCHGL)
23871 v.AddArg3(val, ptr, mem)
23872 return true
23873 }
23874 }
23875 func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
23876 v_2 := v.Args[2]
23877 v_1 := v.Args[1]
23878 v_0 := v.Args[0]
23879
23880
23881 for {
23882 ptr := v_0
23883 val := v_1
23884 mem := v_2
23885 v.reset(OpAMD64XCHGQ)
23886 v.AddArg3(val, ptr, mem)
23887 return true
23888 }
23889 }
23890 func rewriteValueAMD64_OpAtomicExchange8(v *Value) bool {
23891 v_2 := v.Args[2]
23892 v_1 := v.Args[1]
23893 v_0 := v.Args[0]
23894
23895
23896 for {
23897 ptr := v_0
23898 val := v_1
23899 mem := v_2
23900 v.reset(OpAMD64XCHGB)
23901 v.AddArg3(val, ptr, mem)
23902 return true
23903 }
23904 }
23905 func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
23906 v_1 := v.Args[1]
23907 v_0 := v.Args[0]
23908
23909
23910 for {
23911 ptr := v_0
23912 mem := v_1
23913 v.reset(OpAMD64MOVLatomicload)
23914 v.AddArg2(ptr, mem)
23915 return true
23916 }
23917 }
23918 func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
23919 v_1 := v.Args[1]
23920 v_0 := v.Args[0]
23921
23922
23923 for {
23924 ptr := v_0
23925 mem := v_1
23926 v.reset(OpAMD64MOVQatomicload)
23927 v.AddArg2(ptr, mem)
23928 return true
23929 }
23930 }
23931 func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
23932 v_1 := v.Args[1]
23933 v_0 := v.Args[0]
23934
23935
23936 for {
23937 ptr := v_0
23938 mem := v_1
23939 v.reset(OpAMD64MOVBatomicload)
23940 v.AddArg2(ptr, mem)
23941 return true
23942 }
23943 }
23944 func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
23945 v_1 := v.Args[1]
23946 v_0 := v.Args[0]
23947
23948
23949 for {
23950 ptr := v_0
23951 mem := v_1
23952 v.reset(OpAMD64MOVQatomicload)
23953 v.AddArg2(ptr, mem)
23954 return true
23955 }
23956 }
23957 func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
23958 v_2 := v.Args[2]
23959 v_1 := v.Args[1]
23960 v_0 := v.Args[0]
23961
23962
23963 for {
23964 ptr := v_0
23965 val := v_1
23966 mem := v_2
23967 v.reset(OpAMD64ORLlock)
23968 v.AddArg3(ptr, val, mem)
23969 return true
23970 }
23971 }
23972 func rewriteValueAMD64_OpAtomicOr32value(v *Value) bool {
23973 v_2 := v.Args[2]
23974 v_1 := v.Args[1]
23975 v_0 := v.Args[0]
23976
23977
23978 for {
23979 ptr := v_0
23980 val := v_1
23981 mem := v_2
23982 v.reset(OpAMD64LoweredAtomicOr32)
23983 v.AddArg3(ptr, val, mem)
23984 return true
23985 }
23986 }
23987 func rewriteValueAMD64_OpAtomicOr64value(v *Value) bool {
23988 v_2 := v.Args[2]
23989 v_1 := v.Args[1]
23990 v_0 := v.Args[0]
23991
23992
23993 for {
23994 ptr := v_0
23995 val := v_1
23996 mem := v_2
23997 v.reset(OpAMD64LoweredAtomicOr64)
23998 v.AddArg3(ptr, val, mem)
23999 return true
24000 }
24001 }
24002 func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
24003 v_2 := v.Args[2]
24004 v_1 := v.Args[1]
24005 v_0 := v.Args[0]
24006
24007
24008 for {
24009 ptr := v_0
24010 val := v_1
24011 mem := v_2
24012 v.reset(OpAMD64ORBlock)
24013 v.AddArg3(ptr, val, mem)
24014 return true
24015 }
24016 }
24017 func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
24018 v_2 := v.Args[2]
24019 v_1 := v.Args[1]
24020 v_0 := v.Args[0]
24021 b := v.Block
24022 typ := &b.Func.Config.Types
24023
24024
24025 for {
24026 ptr := v_0
24027 val := v_1
24028 mem := v_2
24029 v.reset(OpSelect1)
24030 v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
24031 v0.AddArg3(val, ptr, mem)
24032 v.AddArg(v0)
24033 return true
24034 }
24035 }
24036 func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
24037 v_2 := v.Args[2]
24038 v_1 := v.Args[1]
24039 v_0 := v.Args[0]
24040 b := v.Block
24041 typ := &b.Func.Config.Types
24042
24043
24044 for {
24045 ptr := v_0
24046 val := v_1
24047 mem := v_2
24048 v.reset(OpSelect1)
24049 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
24050 v0.AddArg3(val, ptr, mem)
24051 v.AddArg(v0)
24052 return true
24053 }
24054 }
24055 func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
24056 v_2 := v.Args[2]
24057 v_1 := v.Args[1]
24058 v_0 := v.Args[0]
24059 b := v.Block
24060 typ := &b.Func.Config.Types
24061
24062
24063 for {
24064 ptr := v_0
24065 val := v_1
24066 mem := v_2
24067 v.reset(OpSelect1)
24068 v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
24069 v0.AddArg3(val, ptr, mem)
24070 v.AddArg(v0)
24071 return true
24072 }
24073 }
24074 func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
24075 v_2 := v.Args[2]
24076 v_1 := v.Args[1]
24077 v_0 := v.Args[0]
24078 b := v.Block
24079 typ := &b.Func.Config.Types
24080
24081
24082 for {
24083 ptr := v_0
24084 val := v_1
24085 mem := v_2
24086 v.reset(OpSelect1)
24087 v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
24088 v0.AddArg3(val, ptr, mem)
24089 v.AddArg(v0)
24090 return true
24091 }
24092 }
24093 func rewriteValueAMD64_OpBitLen16(v *Value) bool {
24094 v_0 := v.Args[0]
24095 b := v.Block
24096 typ := &b.Func.Config.Types
24097
24098
24099
24100 for {
24101 x := v_0
24102 if !(buildcfg.GOAMD64 < 3) {
24103 break
24104 }
24105 v.reset(OpAMD64BSRL)
24106 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24107 v0.AuxInt = int32ToAuxInt(1)
24108 v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
24109 v1.AddArg(x)
24110 v0.AddArg2(v1, v1)
24111 v.AddArg(v0)
24112 return true
24113 }
24114
24115
24116
24117 for {
24118 t := v.Type
24119 x := v_0
24120 if !(buildcfg.GOAMD64 >= 3) {
24121 break
24122 }
24123 v.reset(OpAMD64NEGQ)
24124 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24125 v0.AuxInt = int32ToAuxInt(-32)
24126 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24127 v2 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, x.Type)
24128 v2.AddArg(x)
24129 v1.AddArg(v2)
24130 v0.AddArg(v1)
24131 v.AddArg(v0)
24132 return true
24133 }
24134 return false
24135 }
24136 func rewriteValueAMD64_OpBitLen32(v *Value) bool {
24137 v_0 := v.Args[0]
24138 b := v.Block
24139 typ := &b.Func.Config.Types
24140
24141
24142
24143 for {
24144 x := v_0
24145 if !(buildcfg.GOAMD64 < 3) {
24146 break
24147 }
24148 v.reset(OpSelect0)
24149 v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24150 v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
24151 v1.AuxInt = int32ToAuxInt(1)
24152 v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
24153 v2.AddArg(x)
24154 v1.AddArg2(v2, v2)
24155 v0.AddArg(v1)
24156 v.AddArg(v0)
24157 return true
24158 }
24159
24160
24161
24162 for {
24163 t := v.Type
24164 x := v_0
24165 if !(buildcfg.GOAMD64 >= 3) {
24166 break
24167 }
24168 v.reset(OpAMD64NEGQ)
24169 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24170 v0.AuxInt = int32ToAuxInt(-32)
24171 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24172 v1.AddArg(x)
24173 v0.AddArg(v1)
24174 v.AddArg(v0)
24175 return true
24176 }
24177 return false
24178 }
24179 func rewriteValueAMD64_OpBitLen64(v *Value) bool {
24180 v_0 := v.Args[0]
24181 b := v.Block
24182 typ := &b.Func.Config.Types
24183
24184
24185
24186 for {
24187 t := v.Type
24188 x := v_0
24189 if !(buildcfg.GOAMD64 < 3) {
24190 break
24191 }
24192 v.reset(OpAMD64ADDQconst)
24193 v.AuxInt = int32ToAuxInt(1)
24194 v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
24195 v1 := b.NewValue0(v.Pos, OpSelect0, t)
24196 v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
24197 v2.AddArg(x)
24198 v1.AddArg(v2)
24199 v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
24200 v3.AuxInt = int64ToAuxInt(-1)
24201 v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
24202 v4.AddArg(v2)
24203 v0.AddArg3(v1, v3, v4)
24204 v.AddArg(v0)
24205 return true
24206 }
24207
24208
24209
24210 for {
24211 t := v.Type
24212 x := v_0
24213 if !(buildcfg.GOAMD64 >= 3) {
24214 break
24215 }
24216 v.reset(OpAMD64NEGQ)
24217 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24218 v0.AuxInt = int32ToAuxInt(-64)
24219 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTQ, typ.UInt64)
24220 v1.AddArg(x)
24221 v0.AddArg(v1)
24222 v.AddArg(v0)
24223 return true
24224 }
24225 return false
24226 }
24227 func rewriteValueAMD64_OpBitLen8(v *Value) bool {
24228 v_0 := v.Args[0]
24229 b := v.Block
24230 typ := &b.Func.Config.Types
24231
24232
24233
24234 for {
24235 x := v_0
24236 if !(buildcfg.GOAMD64 < 3) {
24237 break
24238 }
24239 v.reset(OpAMD64BSRL)
24240 v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
24241 v0.AuxInt = int32ToAuxInt(1)
24242 v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
24243 v1.AddArg(x)
24244 v0.AddArg2(v1, v1)
24245 v.AddArg(v0)
24246 return true
24247 }
24248
24249
24250
24251 for {
24252 t := v.Type
24253 x := v_0
24254 if !(buildcfg.GOAMD64 >= 3) {
24255 break
24256 }
24257 v.reset(OpAMD64NEGQ)
24258 v0 := b.NewValue0(v.Pos, OpAMD64ADDQconst, t)
24259 v0.AuxInt = int32ToAuxInt(-32)
24260 v1 := b.NewValue0(v.Pos, OpAMD64LZCNTL, typ.UInt32)
24261 v2 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, x.Type)
24262 v2.AddArg(x)
24263 v1.AddArg(v2)
24264 v0.AddArg(v1)
24265 v.AddArg(v0)
24266 return true
24267 }
24268 return false
24269 }
24270 func rewriteValueAMD64_OpBswap16(v *Value) bool {
24271 v_0 := v.Args[0]
24272
24273
24274 for {
24275 x := v_0
24276 v.reset(OpAMD64ROLWconst)
24277 v.AuxInt = int8ToAuxInt(8)
24278 v.AddArg(x)
24279 return true
24280 }
24281 }
24282 func rewriteValueAMD64_OpCeil(v *Value) bool {
24283 v_0 := v.Args[0]
24284
24285
24286 for {
24287 x := v_0
24288 v.reset(OpAMD64ROUNDSD)
24289 v.AuxInt = int8ToAuxInt(2)
24290 v.AddArg(x)
24291 return true
24292 }
24293 }
24294 func rewriteValueAMD64_OpCondSelect(v *Value) bool {
24295 v_2 := v.Args[2]
24296 v_1 := v.Args[1]
24297 v_0 := v.Args[0]
24298 b := v.Block
24299 typ := &b.Func.Config.Types
24300
24301
24302
24303 for {
24304 t := v.Type
24305 x := v_0
24306 y := v_1
24307 if v_2.Op != OpAMD64SETEQ {
24308 break
24309 }
24310 cond := v_2.Args[0]
24311 if !(is64BitInt(t) || isPtr(t)) {
24312 break
24313 }
24314 v.reset(OpAMD64CMOVQEQ)
24315 v.AddArg3(y, x, cond)
24316 return true
24317 }
24318
24319
24320
24321 for {
24322 t := v.Type
24323 x := v_0
24324 y := v_1
24325 if v_2.Op != OpAMD64SETNE {
24326 break
24327 }
24328 cond := v_2.Args[0]
24329 if !(is64BitInt(t) || isPtr(t)) {
24330 break
24331 }
24332 v.reset(OpAMD64CMOVQNE)
24333 v.AddArg3(y, x, cond)
24334 return true
24335 }
24336
24337
24338
24339 for {
24340 t := v.Type
24341 x := v_0
24342 y := v_1
24343 if v_2.Op != OpAMD64SETL {
24344 break
24345 }
24346 cond := v_2.Args[0]
24347 if !(is64BitInt(t) || isPtr(t)) {
24348 break
24349 }
24350 v.reset(OpAMD64CMOVQLT)
24351 v.AddArg3(y, x, cond)
24352 return true
24353 }
24354
24355
24356
24357 for {
24358 t := v.Type
24359 x := v_0
24360 y := v_1
24361 if v_2.Op != OpAMD64SETG {
24362 break
24363 }
24364 cond := v_2.Args[0]
24365 if !(is64BitInt(t) || isPtr(t)) {
24366 break
24367 }
24368 v.reset(OpAMD64CMOVQGT)
24369 v.AddArg3(y, x, cond)
24370 return true
24371 }
24372
24373
24374
24375 for {
24376 t := v.Type
24377 x := v_0
24378 y := v_1
24379 if v_2.Op != OpAMD64SETLE {
24380 break
24381 }
24382 cond := v_2.Args[0]
24383 if !(is64BitInt(t) || isPtr(t)) {
24384 break
24385 }
24386 v.reset(OpAMD64CMOVQLE)
24387 v.AddArg3(y, x, cond)
24388 return true
24389 }
24390
24391
24392
24393 for {
24394 t := v.Type
24395 x := v_0
24396 y := v_1
24397 if v_2.Op != OpAMD64SETGE {
24398 break
24399 }
24400 cond := v_2.Args[0]
24401 if !(is64BitInt(t) || isPtr(t)) {
24402 break
24403 }
24404 v.reset(OpAMD64CMOVQGE)
24405 v.AddArg3(y, x, cond)
24406 return true
24407 }
24408
24409
24410
24411 for {
24412 t := v.Type
24413 x := v_0
24414 y := v_1
24415 if v_2.Op != OpAMD64SETA {
24416 break
24417 }
24418 cond := v_2.Args[0]
24419 if !(is64BitInt(t) || isPtr(t)) {
24420 break
24421 }
24422 v.reset(OpAMD64CMOVQHI)
24423 v.AddArg3(y, x, cond)
24424 return true
24425 }
24426
24427
24428
24429 for {
24430 t := v.Type
24431 x := v_0
24432 y := v_1
24433 if v_2.Op != OpAMD64SETB {
24434 break
24435 }
24436 cond := v_2.Args[0]
24437 if !(is64BitInt(t) || isPtr(t)) {
24438 break
24439 }
24440 v.reset(OpAMD64CMOVQCS)
24441 v.AddArg3(y, x, cond)
24442 return true
24443 }
24444
24445
24446
24447 for {
24448 t := v.Type
24449 x := v_0
24450 y := v_1
24451 if v_2.Op != OpAMD64SETAE {
24452 break
24453 }
24454 cond := v_2.Args[0]
24455 if !(is64BitInt(t) || isPtr(t)) {
24456 break
24457 }
24458 v.reset(OpAMD64CMOVQCC)
24459 v.AddArg3(y, x, cond)
24460 return true
24461 }
24462
24463
24464
24465 for {
24466 t := v.Type
24467 x := v_0
24468 y := v_1
24469 if v_2.Op != OpAMD64SETBE {
24470 break
24471 }
24472 cond := v_2.Args[0]
24473 if !(is64BitInt(t) || isPtr(t)) {
24474 break
24475 }
24476 v.reset(OpAMD64CMOVQLS)
24477 v.AddArg3(y, x, cond)
24478 return true
24479 }
24480
24481
24482
24483 for {
24484 t := v.Type
24485 x := v_0
24486 y := v_1
24487 if v_2.Op != OpAMD64SETEQF {
24488 break
24489 }
24490 cond := v_2.Args[0]
24491 if !(is64BitInt(t) || isPtr(t)) {
24492 break
24493 }
24494 v.reset(OpAMD64CMOVQEQF)
24495 v.AddArg3(y, x, cond)
24496 return true
24497 }
24498
24499
24500
24501 for {
24502 t := v.Type
24503 x := v_0
24504 y := v_1
24505 if v_2.Op != OpAMD64SETNEF {
24506 break
24507 }
24508 cond := v_2.Args[0]
24509 if !(is64BitInt(t) || isPtr(t)) {
24510 break
24511 }
24512 v.reset(OpAMD64CMOVQNEF)
24513 v.AddArg3(y, x, cond)
24514 return true
24515 }
24516
24517
24518
24519 for {
24520 t := v.Type
24521 x := v_0
24522 y := v_1
24523 if v_2.Op != OpAMD64SETGF {
24524 break
24525 }
24526 cond := v_2.Args[0]
24527 if !(is64BitInt(t) || isPtr(t)) {
24528 break
24529 }
24530 v.reset(OpAMD64CMOVQGTF)
24531 v.AddArg3(y, x, cond)
24532 return true
24533 }
24534
24535
24536
24537 for {
24538 t := v.Type
24539 x := v_0
24540 y := v_1
24541 if v_2.Op != OpAMD64SETGEF {
24542 break
24543 }
24544 cond := v_2.Args[0]
24545 if !(is64BitInt(t) || isPtr(t)) {
24546 break
24547 }
24548 v.reset(OpAMD64CMOVQGEF)
24549 v.AddArg3(y, x, cond)
24550 return true
24551 }
24552
24553
24554
24555 for {
24556 t := v.Type
24557 x := v_0
24558 y := v_1
24559 if v_2.Op != OpAMD64SETEQ {
24560 break
24561 }
24562 cond := v_2.Args[0]
24563 if !(is32BitInt(t)) {
24564 break
24565 }
24566 v.reset(OpAMD64CMOVLEQ)
24567 v.AddArg3(y, x, cond)
24568 return true
24569 }
24570
24571
24572
24573 for {
24574 t := v.Type
24575 x := v_0
24576 y := v_1
24577 if v_2.Op != OpAMD64SETNE {
24578 break
24579 }
24580 cond := v_2.Args[0]
24581 if !(is32BitInt(t)) {
24582 break
24583 }
24584 v.reset(OpAMD64CMOVLNE)
24585 v.AddArg3(y, x, cond)
24586 return true
24587 }
24588
24589
24590
24591 for {
24592 t := v.Type
24593 x := v_0
24594 y := v_1
24595 if v_2.Op != OpAMD64SETL {
24596 break
24597 }
24598 cond := v_2.Args[0]
24599 if !(is32BitInt(t)) {
24600 break
24601 }
24602 v.reset(OpAMD64CMOVLLT)
24603 v.AddArg3(y, x, cond)
24604 return true
24605 }
24606
24607
24608
24609 for {
24610 t := v.Type
24611 x := v_0
24612 y := v_1
24613 if v_2.Op != OpAMD64SETG {
24614 break
24615 }
24616 cond := v_2.Args[0]
24617 if !(is32BitInt(t)) {
24618 break
24619 }
24620 v.reset(OpAMD64CMOVLGT)
24621 v.AddArg3(y, x, cond)
24622 return true
24623 }
24624
24625
24626
24627 for {
24628 t := v.Type
24629 x := v_0
24630 y := v_1
24631 if v_2.Op != OpAMD64SETLE {
24632 break
24633 }
24634 cond := v_2.Args[0]
24635 if !(is32BitInt(t)) {
24636 break
24637 }
24638 v.reset(OpAMD64CMOVLLE)
24639 v.AddArg3(y, x, cond)
24640 return true
24641 }
24642
24643
24644
24645 for {
24646 t := v.Type
24647 x := v_0
24648 y := v_1
24649 if v_2.Op != OpAMD64SETGE {
24650 break
24651 }
24652 cond := v_2.Args[0]
24653 if !(is32BitInt(t)) {
24654 break
24655 }
24656 v.reset(OpAMD64CMOVLGE)
24657 v.AddArg3(y, x, cond)
24658 return true
24659 }
24660
24661
24662
24663 for {
24664 t := v.Type
24665 x := v_0
24666 y := v_1
24667 if v_2.Op != OpAMD64SETA {
24668 break
24669 }
24670 cond := v_2.Args[0]
24671 if !(is32BitInt(t)) {
24672 break
24673 }
24674 v.reset(OpAMD64CMOVLHI)
24675 v.AddArg3(y, x, cond)
24676 return true
24677 }
24678
24679
24680
24681 for {
24682 t := v.Type
24683 x := v_0
24684 y := v_1
24685 if v_2.Op != OpAMD64SETB {
24686 break
24687 }
24688 cond := v_2.Args[0]
24689 if !(is32BitInt(t)) {
24690 break
24691 }
24692 v.reset(OpAMD64CMOVLCS)
24693 v.AddArg3(y, x, cond)
24694 return true
24695 }
24696
24697
24698
24699 for {
24700 t := v.Type
24701 x := v_0
24702 y := v_1
24703 if v_2.Op != OpAMD64SETAE {
24704 break
24705 }
24706 cond := v_2.Args[0]
24707 if !(is32BitInt(t)) {
24708 break
24709 }
24710 v.reset(OpAMD64CMOVLCC)
24711 v.AddArg3(y, x, cond)
24712 return true
24713 }
24714
24715
24716
24717 for {
24718 t := v.Type
24719 x := v_0
24720 y := v_1
24721 if v_2.Op != OpAMD64SETBE {
24722 break
24723 }
24724 cond := v_2.Args[0]
24725 if !(is32BitInt(t)) {
24726 break
24727 }
24728 v.reset(OpAMD64CMOVLLS)
24729 v.AddArg3(y, x, cond)
24730 return true
24731 }
24732
24733
24734
24735 for {
24736 t := v.Type
24737 x := v_0
24738 y := v_1
24739 if v_2.Op != OpAMD64SETEQF {
24740 break
24741 }
24742 cond := v_2.Args[0]
24743 if !(is32BitInt(t)) {
24744 break
24745 }
24746 v.reset(OpAMD64CMOVLEQF)
24747 v.AddArg3(y, x, cond)
24748 return true
24749 }
24750
24751
24752
24753 for {
24754 t := v.Type
24755 x := v_0
24756 y := v_1
24757 if v_2.Op != OpAMD64SETNEF {
24758 break
24759 }
24760 cond := v_2.Args[0]
24761 if !(is32BitInt(t)) {
24762 break
24763 }
24764 v.reset(OpAMD64CMOVLNEF)
24765 v.AddArg3(y, x, cond)
24766 return true
24767 }
24768
24769
24770
24771 for {
24772 t := v.Type
24773 x := v_0
24774 y := v_1
24775 if v_2.Op != OpAMD64SETGF {
24776 break
24777 }
24778 cond := v_2.Args[0]
24779 if !(is32BitInt(t)) {
24780 break
24781 }
24782 v.reset(OpAMD64CMOVLGTF)
24783 v.AddArg3(y, x, cond)
24784 return true
24785 }
24786
24787
24788
24789 for {
24790 t := v.Type
24791 x := v_0
24792 y := v_1
24793 if v_2.Op != OpAMD64SETGEF {
24794 break
24795 }
24796 cond := v_2.Args[0]
24797 if !(is32BitInt(t)) {
24798 break
24799 }
24800 v.reset(OpAMD64CMOVLGEF)
24801 v.AddArg3(y, x, cond)
24802 return true
24803 }
24804
24805
24806
24807 for {
24808 t := v.Type
24809 x := v_0
24810 y := v_1
24811 if v_2.Op != OpAMD64SETEQ {
24812 break
24813 }
24814 cond := v_2.Args[0]
24815 if !(is16BitInt(t)) {
24816 break
24817 }
24818 v.reset(OpAMD64CMOVWEQ)
24819 v.AddArg3(y, x, cond)
24820 return true
24821 }
24822
24823
24824
24825 for {
24826 t := v.Type
24827 x := v_0
24828 y := v_1
24829 if v_2.Op != OpAMD64SETNE {
24830 break
24831 }
24832 cond := v_2.Args[0]
24833 if !(is16BitInt(t)) {
24834 break
24835 }
24836 v.reset(OpAMD64CMOVWNE)
24837 v.AddArg3(y, x, cond)
24838 return true
24839 }
24840
24841
24842
24843 for {
24844 t := v.Type
24845 x := v_0
24846 y := v_1
24847 if v_2.Op != OpAMD64SETL {
24848 break
24849 }
24850 cond := v_2.Args[0]
24851 if !(is16BitInt(t)) {
24852 break
24853 }
24854 v.reset(OpAMD64CMOVWLT)
24855 v.AddArg3(y, x, cond)
24856 return true
24857 }
24858
24859
24860
24861 for {
24862 t := v.Type
24863 x := v_0
24864 y := v_1
24865 if v_2.Op != OpAMD64SETG {
24866 break
24867 }
24868 cond := v_2.Args[0]
24869 if !(is16BitInt(t)) {
24870 break
24871 }
24872 v.reset(OpAMD64CMOVWGT)
24873 v.AddArg3(y, x, cond)
24874 return true
24875 }
24876
24877
24878
24879 for {
24880 t := v.Type
24881 x := v_0
24882 y := v_1
24883 if v_2.Op != OpAMD64SETLE {
24884 break
24885 }
24886 cond := v_2.Args[0]
24887 if !(is16BitInt(t)) {
24888 break
24889 }
24890 v.reset(OpAMD64CMOVWLE)
24891 v.AddArg3(y, x, cond)
24892 return true
24893 }
24894
24895
24896
24897 for {
24898 t := v.Type
24899 x := v_0
24900 y := v_1
24901 if v_2.Op != OpAMD64SETGE {
24902 break
24903 }
24904 cond := v_2.Args[0]
24905 if !(is16BitInt(t)) {
24906 break
24907 }
24908 v.reset(OpAMD64CMOVWGE)
24909 v.AddArg3(y, x, cond)
24910 return true
24911 }
24912
24913
24914
24915 for {
24916 t := v.Type
24917 x := v_0
24918 y := v_1
24919 if v_2.Op != OpAMD64SETA {
24920 break
24921 }
24922 cond := v_2.Args[0]
24923 if !(is16BitInt(t)) {
24924 break
24925 }
24926 v.reset(OpAMD64CMOVWHI)
24927 v.AddArg3(y, x, cond)
24928 return true
24929 }
24930
24931
24932
24933 for {
24934 t := v.Type
24935 x := v_0
24936 y := v_1
24937 if v_2.Op != OpAMD64SETB {
24938 break
24939 }
24940 cond := v_2.Args[0]
24941 if !(is16BitInt(t)) {
24942 break
24943 }
24944 v.reset(OpAMD64CMOVWCS)
24945 v.AddArg3(y, x, cond)
24946 return true
24947 }
24948
24949
24950
24951 for {
24952 t := v.Type
24953 x := v_0
24954 y := v_1
24955 if v_2.Op != OpAMD64SETAE {
24956 break
24957 }
24958 cond := v_2.Args[0]
24959 if !(is16BitInt(t)) {
24960 break
24961 }
24962 v.reset(OpAMD64CMOVWCC)
24963 v.AddArg3(y, x, cond)
24964 return true
24965 }
24966
24967
24968
24969 for {
24970 t := v.Type
24971 x := v_0
24972 y := v_1
24973 if v_2.Op != OpAMD64SETBE {
24974 break
24975 }
24976 cond := v_2.Args[0]
24977 if !(is16BitInt(t)) {
24978 break
24979 }
24980 v.reset(OpAMD64CMOVWLS)
24981 v.AddArg3(y, x, cond)
24982 return true
24983 }
24984
24985
24986
24987 for {
24988 t := v.Type
24989 x := v_0
24990 y := v_1
24991 if v_2.Op != OpAMD64SETEQF {
24992 break
24993 }
24994 cond := v_2.Args[0]
24995 if !(is16BitInt(t)) {
24996 break
24997 }
24998 v.reset(OpAMD64CMOVWEQF)
24999 v.AddArg3(y, x, cond)
25000 return true
25001 }
25002
25003
25004
25005 for {
25006 t := v.Type
25007 x := v_0
25008 y := v_1
25009 if v_2.Op != OpAMD64SETNEF {
25010 break
25011 }
25012 cond := v_2.Args[0]
25013 if !(is16BitInt(t)) {
25014 break
25015 }
25016 v.reset(OpAMD64CMOVWNEF)
25017 v.AddArg3(y, x, cond)
25018 return true
25019 }
25020
25021
25022
25023 for {
25024 t := v.Type
25025 x := v_0
25026 y := v_1
25027 if v_2.Op != OpAMD64SETGF {
25028 break
25029 }
25030 cond := v_2.Args[0]
25031 if !(is16BitInt(t)) {
25032 break
25033 }
25034 v.reset(OpAMD64CMOVWGTF)
25035 v.AddArg3(y, x, cond)
25036 return true
25037 }
25038
25039
25040
25041 for {
25042 t := v.Type
25043 x := v_0
25044 y := v_1
25045 if v_2.Op != OpAMD64SETGEF {
25046 break
25047 }
25048 cond := v_2.Args[0]
25049 if !(is16BitInt(t)) {
25050 break
25051 }
25052 v.reset(OpAMD64CMOVWGEF)
25053 v.AddArg3(y, x, cond)
25054 return true
25055 }
25056
25057
25058
25059 for {
25060 t := v.Type
25061 x := v_0
25062 y := v_1
25063 check := v_2
25064 if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
25065 break
25066 }
25067 v.reset(OpCondSelect)
25068 v.Type = t
25069 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
25070 v0.AddArg(check)
25071 v.AddArg3(x, y, v0)
25072 return true
25073 }
25074
25075
25076
25077 for {
25078 t := v.Type
25079 x := v_0
25080 y := v_1
25081 check := v_2
25082 if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
25083 break
25084 }
25085 v.reset(OpCondSelect)
25086 v.Type = t
25087 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
25088 v0.AddArg(check)
25089 v.AddArg3(x, y, v0)
25090 return true
25091 }
25092
25093
25094
25095 for {
25096 t := v.Type
25097 x := v_0
25098 y := v_1
25099 check := v_2
25100 if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
25101 break
25102 }
25103 v.reset(OpCondSelect)
25104 v.Type = t
25105 v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
25106 v0.AddArg(check)
25107 v.AddArg3(x, y, v0)
25108 return true
25109 }
25110
25111
25112
25113 for {
25114 t := v.Type
25115 x := v_0
25116 y := v_1
25117 check := v_2
25118 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
25119 break
25120 }
25121 v.reset(OpAMD64CMOVQNE)
25122 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25123 v0.AuxInt = int32ToAuxInt(0)
25124 v0.AddArg(check)
25125 v.AddArg3(y, x, v0)
25126 return true
25127 }
25128
25129
25130
25131 for {
25132 t := v.Type
25133 x := v_0
25134 y := v_1
25135 check := v_2
25136 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
25137 break
25138 }
25139 v.reset(OpAMD64CMOVLNE)
25140 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25141 v0.AuxInt = int32ToAuxInt(0)
25142 v0.AddArg(check)
25143 v.AddArg3(y, x, v0)
25144 return true
25145 }
25146
25147
25148
25149 for {
25150 t := v.Type
25151 x := v_0
25152 y := v_1
25153 check := v_2
25154 if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
25155 break
25156 }
25157 v.reset(OpAMD64CMOVWNE)
25158 v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
25159 v0.AuxInt = int32ToAuxInt(0)
25160 v0.AddArg(check)
25161 v.AddArg3(y, x, v0)
25162 return true
25163 }
25164 return false
25165 }
25166 func rewriteValueAMD64_OpConst16(v *Value) bool {
25167
25168
25169 for {
25170 c := auxIntToInt16(v.AuxInt)
25171 v.reset(OpAMD64MOVLconst)
25172 v.AuxInt = int32ToAuxInt(int32(c))
25173 return true
25174 }
25175 }
25176 func rewriteValueAMD64_OpConst8(v *Value) bool {
25177
25178
25179 for {
25180 c := auxIntToInt8(v.AuxInt)
25181 v.reset(OpAMD64MOVLconst)
25182 v.AuxInt = int32ToAuxInt(int32(c))
25183 return true
25184 }
25185 }
25186 func rewriteValueAMD64_OpConstBool(v *Value) bool {
25187
25188
25189 for {
25190 c := auxIntToBool(v.AuxInt)
25191 v.reset(OpAMD64MOVLconst)
25192 v.AuxInt = int32ToAuxInt(b2i32(c))
25193 return true
25194 }
25195 }
25196 func rewriteValueAMD64_OpConstNil(v *Value) bool {
25197
25198
25199 for {
25200 v.reset(OpAMD64MOVQconst)
25201 v.AuxInt = int64ToAuxInt(0)
25202 return true
25203 }
25204 }
25205 func rewriteValueAMD64_OpCtz16(v *Value) bool {
25206 v_0 := v.Args[0]
25207 b := v.Block
25208 typ := &b.Func.Config.Types
25209
25210
25211 for {
25212 x := v_0
25213 v.reset(OpAMD64BSFL)
25214 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25215 v0.AuxInt = int32ToAuxInt(1 << 16)
25216 v0.AddArg(x)
25217 v.AddArg(v0)
25218 return true
25219 }
25220 }
25221 func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
25222 v_0 := v.Args[0]
25223
25224
25225
25226 for {
25227 x := v_0
25228 if !(buildcfg.GOAMD64 >= 3) {
25229 break
25230 }
25231 v.reset(OpAMD64TZCNTL)
25232 v.AddArg(x)
25233 return true
25234 }
25235
25236
25237
25238 for {
25239 x := v_0
25240 if !(buildcfg.GOAMD64 < 3) {
25241 break
25242 }
25243 v.reset(OpAMD64BSFL)
25244 v.AddArg(x)
25245 return true
25246 }
25247 return false
25248 }
25249 func rewriteValueAMD64_OpCtz32(v *Value) bool {
25250 v_0 := v.Args[0]
25251 b := v.Block
25252 typ := &b.Func.Config.Types
25253
25254
25255
25256 for {
25257 x := v_0
25258 if !(buildcfg.GOAMD64 >= 3) {
25259 break
25260 }
25261 v.reset(OpAMD64TZCNTL)
25262 v.AddArg(x)
25263 return true
25264 }
25265
25266
25267
25268 for {
25269 x := v_0
25270 if !(buildcfg.GOAMD64 < 3) {
25271 break
25272 }
25273 v.reset(OpSelect0)
25274 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25275 v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
25276 v1.AuxInt = int8ToAuxInt(32)
25277 v1.AddArg(x)
25278 v0.AddArg(v1)
25279 v.AddArg(v0)
25280 return true
25281 }
25282 return false
25283 }
25284 func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
25285 v_0 := v.Args[0]
25286
25287
25288
25289 for {
25290 x := v_0
25291 if !(buildcfg.GOAMD64 >= 3) {
25292 break
25293 }
25294 v.reset(OpAMD64TZCNTL)
25295 v.AddArg(x)
25296 return true
25297 }
25298
25299
25300
25301 for {
25302 x := v_0
25303 if !(buildcfg.GOAMD64 < 3) {
25304 break
25305 }
25306 v.reset(OpAMD64BSFL)
25307 v.AddArg(x)
25308 return true
25309 }
25310 return false
25311 }
25312 func rewriteValueAMD64_OpCtz64(v *Value) bool {
25313 v_0 := v.Args[0]
25314 b := v.Block
25315 typ := &b.Func.Config.Types
25316
25317
25318
25319 for {
25320 x := v_0
25321 if !(buildcfg.GOAMD64 >= 3) {
25322 break
25323 }
25324 v.reset(OpAMD64TZCNTQ)
25325 v.AddArg(x)
25326 return true
25327 }
25328
25329
25330
25331 for {
25332 t := v.Type
25333 x := v_0
25334 if !(buildcfg.GOAMD64 < 3) {
25335 break
25336 }
25337 v.reset(OpAMD64CMOVQEQ)
25338 v0 := b.NewValue0(v.Pos, OpSelect0, t)
25339 v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25340 v1.AddArg(x)
25341 v0.AddArg(v1)
25342 v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
25343 v2.AuxInt = int64ToAuxInt(64)
25344 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
25345 v3.AddArg(v1)
25346 v.AddArg3(v0, v2, v3)
25347 return true
25348 }
25349 return false
25350 }
25351 func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
25352 v_0 := v.Args[0]
25353 b := v.Block
25354 typ := &b.Func.Config.Types
25355
25356
25357
25358 for {
25359 x := v_0
25360 if !(buildcfg.GOAMD64 >= 3) {
25361 break
25362 }
25363 v.reset(OpAMD64TZCNTQ)
25364 v.AddArg(x)
25365 return true
25366 }
25367
25368
25369
25370 for {
25371 x := v_0
25372 if !(buildcfg.GOAMD64 < 3) {
25373 break
25374 }
25375 v.reset(OpSelect0)
25376 v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
25377 v0.AddArg(x)
25378 v.AddArg(v0)
25379 return true
25380 }
25381 return false
25382 }
25383 func rewriteValueAMD64_OpCtz8(v *Value) bool {
25384 v_0 := v.Args[0]
25385 b := v.Block
25386 typ := &b.Func.Config.Types
25387
25388
25389 for {
25390 x := v_0
25391 v.reset(OpAMD64BSFL)
25392 v0 := b.NewValue0(v.Pos, OpAMD64ORLconst, typ.UInt32)
25393 v0.AuxInt = int32ToAuxInt(1 << 8)
25394 v0.AddArg(x)
25395 v.AddArg(v0)
25396 return true
25397 }
25398 }
25399 func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
25400 v_0 := v.Args[0]
25401
25402
25403
25404 for {
25405 x := v_0
25406 if !(buildcfg.GOAMD64 >= 3) {
25407 break
25408 }
25409 v.reset(OpAMD64TZCNTL)
25410 v.AddArg(x)
25411 return true
25412 }
25413
25414
25415
25416 for {
25417 x := v_0
25418 if !(buildcfg.GOAMD64 < 3) {
25419 break
25420 }
25421 v.reset(OpAMD64BSFL)
25422 v.AddArg(x)
25423 return true
25424 }
25425 return false
25426 }
25427 func rewriteValueAMD64_OpDiv16(v *Value) bool {
25428 v_1 := v.Args[1]
25429 v_0 := v.Args[0]
25430 b := v.Block
25431 typ := &b.Func.Config.Types
25432
25433
25434 for {
25435 a := auxIntToBool(v.AuxInt)
25436 x := v_0
25437 y := v_1
25438 v.reset(OpSelect0)
25439 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25440 v0.AuxInt = boolToAuxInt(a)
25441 v0.AddArg2(x, y)
25442 v.AddArg(v0)
25443 return true
25444 }
25445 }
25446 func rewriteValueAMD64_OpDiv16u(v *Value) bool {
25447 v_1 := v.Args[1]
25448 v_0 := v.Args[0]
25449 b := v.Block
25450 typ := &b.Func.Config.Types
25451
25452
25453 for {
25454 x := v_0
25455 y := v_1
25456 v.reset(OpSelect0)
25457 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25458 v0.AddArg2(x, y)
25459 v.AddArg(v0)
25460 return true
25461 }
25462 }
25463 func rewriteValueAMD64_OpDiv32(v *Value) bool {
25464 v_1 := v.Args[1]
25465 v_0 := v.Args[0]
25466 b := v.Block
25467 typ := &b.Func.Config.Types
25468
25469
25470 for {
25471 a := auxIntToBool(v.AuxInt)
25472 x := v_0
25473 y := v_1
25474 v.reset(OpSelect0)
25475 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
25476 v0.AuxInt = boolToAuxInt(a)
25477 v0.AddArg2(x, y)
25478 v.AddArg(v0)
25479 return true
25480 }
25481 }
25482 func rewriteValueAMD64_OpDiv32u(v *Value) bool {
25483 v_1 := v.Args[1]
25484 v_0 := v.Args[0]
25485 b := v.Block
25486 typ := &b.Func.Config.Types
25487
25488
25489 for {
25490 x := v_0
25491 y := v_1
25492 v.reset(OpSelect0)
25493 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
25494 v0.AddArg2(x, y)
25495 v.AddArg(v0)
25496 return true
25497 }
25498 }
25499 func rewriteValueAMD64_OpDiv64(v *Value) bool {
25500 v_1 := v.Args[1]
25501 v_0 := v.Args[0]
25502 b := v.Block
25503 typ := &b.Func.Config.Types
25504
25505
25506 for {
25507 a := auxIntToBool(v.AuxInt)
25508 x := v_0
25509 y := v_1
25510 v.reset(OpSelect0)
25511 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
25512 v0.AuxInt = boolToAuxInt(a)
25513 v0.AddArg2(x, y)
25514 v.AddArg(v0)
25515 return true
25516 }
25517 }
25518 func rewriteValueAMD64_OpDiv64u(v *Value) bool {
25519 v_1 := v.Args[1]
25520 v_0 := v.Args[0]
25521 b := v.Block
25522 typ := &b.Func.Config.Types
25523
25524
25525 for {
25526 x := v_0
25527 y := v_1
25528 v.reset(OpSelect0)
25529 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
25530 v0.AddArg2(x, y)
25531 v.AddArg(v0)
25532 return true
25533 }
25534 }
25535 func rewriteValueAMD64_OpDiv8(v *Value) bool {
25536 v_1 := v.Args[1]
25537 v_0 := v.Args[0]
25538 b := v.Block
25539 typ := &b.Func.Config.Types
25540
25541
25542 for {
25543 x := v_0
25544 y := v_1
25545 v.reset(OpSelect0)
25546 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
25547 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25548 v1.AddArg(x)
25549 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
25550 v2.AddArg(y)
25551 v0.AddArg2(v1, v2)
25552 v.AddArg(v0)
25553 return true
25554 }
25555 }
25556 func rewriteValueAMD64_OpDiv8u(v *Value) bool {
25557 v_1 := v.Args[1]
25558 v_0 := v.Args[0]
25559 b := v.Block
25560 typ := &b.Func.Config.Types
25561
25562
25563 for {
25564 x := v_0
25565 y := v_1
25566 v.reset(OpSelect0)
25567 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
25568 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25569 v1.AddArg(x)
25570 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
25571 v2.AddArg(y)
25572 v0.AddArg2(v1, v2)
25573 v.AddArg(v0)
25574 return true
25575 }
25576 }
25577 func rewriteValueAMD64_OpEq16(v *Value) bool {
25578 v_1 := v.Args[1]
25579 v_0 := v.Args[0]
25580 b := v.Block
25581
25582
25583 for {
25584 x := v_0
25585 y := v_1
25586 v.reset(OpAMD64SETEQ)
25587 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25588 v0.AddArg2(x, y)
25589 v.AddArg(v0)
25590 return true
25591 }
25592 }
25593 func rewriteValueAMD64_OpEq32(v *Value) bool {
25594 v_1 := v.Args[1]
25595 v_0 := v.Args[0]
25596 b := v.Block
25597
25598
25599 for {
25600 x := v_0
25601 y := v_1
25602 v.reset(OpAMD64SETEQ)
25603 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25604 v0.AddArg2(x, y)
25605 v.AddArg(v0)
25606 return true
25607 }
25608 }
25609 func rewriteValueAMD64_OpEq32F(v *Value) bool {
25610 v_1 := v.Args[1]
25611 v_0 := v.Args[0]
25612 b := v.Block
25613
25614
25615 for {
25616 x := v_0
25617 y := v_1
25618 v.reset(OpAMD64SETEQF)
25619 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25620 v0.AddArg2(x, y)
25621 v.AddArg(v0)
25622 return true
25623 }
25624 }
25625 func rewriteValueAMD64_OpEq64(v *Value) bool {
25626 v_1 := v.Args[1]
25627 v_0 := v.Args[0]
25628 b := v.Block
25629
25630
25631 for {
25632 x := v_0
25633 y := v_1
25634 v.reset(OpAMD64SETEQ)
25635 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25636 v0.AddArg2(x, y)
25637 v.AddArg(v0)
25638 return true
25639 }
25640 }
25641 func rewriteValueAMD64_OpEq64F(v *Value) bool {
25642 v_1 := v.Args[1]
25643 v_0 := v.Args[0]
25644 b := v.Block
25645
25646
25647 for {
25648 x := v_0
25649 y := v_1
25650 v.reset(OpAMD64SETEQF)
25651 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25652 v0.AddArg2(x, y)
25653 v.AddArg(v0)
25654 return true
25655 }
25656 }
25657 func rewriteValueAMD64_OpEq8(v *Value) bool {
25658 v_1 := v.Args[1]
25659 v_0 := v.Args[0]
25660 b := v.Block
25661
25662
25663 for {
25664 x := v_0
25665 y := v_1
25666 v.reset(OpAMD64SETEQ)
25667 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25668 v0.AddArg2(x, y)
25669 v.AddArg(v0)
25670 return true
25671 }
25672 }
25673 func rewriteValueAMD64_OpEqB(v *Value) bool {
25674 v_1 := v.Args[1]
25675 v_0 := v.Args[0]
25676 b := v.Block
25677
25678
25679 for {
25680 x := v_0
25681 y := v_1
25682 v.reset(OpAMD64SETEQ)
25683 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25684 v0.AddArg2(x, y)
25685 v.AddArg(v0)
25686 return true
25687 }
25688 }
25689 func rewriteValueAMD64_OpEqPtr(v *Value) bool {
25690 v_1 := v.Args[1]
25691 v_0 := v.Args[0]
25692 b := v.Block
25693
25694
25695 for {
25696 x := v_0
25697 y := v_1
25698 v.reset(OpAMD64SETEQ)
25699 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25700 v0.AddArg2(x, y)
25701 v.AddArg(v0)
25702 return true
25703 }
25704 }
25705 func rewriteValueAMD64_OpFMA(v *Value) bool {
25706 v_2 := v.Args[2]
25707 v_1 := v.Args[1]
25708 v_0 := v.Args[0]
25709
25710
25711 for {
25712 x := v_0
25713 y := v_1
25714 z := v_2
25715 v.reset(OpAMD64VFMADD231SD)
25716 v.AddArg3(z, x, y)
25717 return true
25718 }
25719 }
25720 func rewriteValueAMD64_OpFloor(v *Value) bool {
25721 v_0 := v.Args[0]
25722
25723
25724 for {
25725 x := v_0
25726 v.reset(OpAMD64ROUNDSD)
25727 v.AuxInt = int8ToAuxInt(1)
25728 v.AddArg(x)
25729 return true
25730 }
25731 }
25732 func rewriteValueAMD64_OpGetG(v *Value) bool {
25733 v_0 := v.Args[0]
25734
25735
25736
25737 for {
25738 mem := v_0
25739 if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
25740 break
25741 }
25742 v.reset(OpAMD64LoweredGetG)
25743 v.AddArg(mem)
25744 return true
25745 }
25746 return false
25747 }
25748 func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
25749 b := v.Block
25750 typ := &b.Func.Config.Types
25751
25752
25753 for {
25754 s := auxToSym(v.Aux)
25755 v.reset(OpAMD64SETNE)
25756 v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
25757 v0.AuxInt = int32ToAuxInt(0)
25758 v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
25759 v1.Aux = symToAux(s)
25760 v0.AddArg(v1)
25761 v.AddArg(v0)
25762 return true
25763 }
25764 }
25765 func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
25766 v_1 := v.Args[1]
25767 v_0 := v.Args[0]
25768 b := v.Block
25769
25770
25771 for {
25772 idx := v_0
25773 len := v_1
25774 v.reset(OpAMD64SETB)
25775 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25776 v0.AddArg2(idx, len)
25777 v.AddArg(v0)
25778 return true
25779 }
25780 }
25781 func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
25782 v_0 := v.Args[0]
25783 b := v.Block
25784
25785
25786 for {
25787 p := v_0
25788 v.reset(OpAMD64SETNE)
25789 v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
25790 v0.AddArg2(p, p)
25791 v.AddArg(v0)
25792 return true
25793 }
25794 }
25795 func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
25796 v_1 := v.Args[1]
25797 v_0 := v.Args[0]
25798 b := v.Block
25799
25800
25801 for {
25802 idx := v_0
25803 len := v_1
25804 v.reset(OpAMD64SETBE)
25805 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25806 v0.AddArg2(idx, len)
25807 v.AddArg(v0)
25808 return true
25809 }
25810 }
25811 func rewriteValueAMD64_OpLeq16(v *Value) bool {
25812 v_1 := v.Args[1]
25813 v_0 := v.Args[0]
25814 b := v.Block
25815
25816
25817 for {
25818 x := v_0
25819 y := v_1
25820 v.reset(OpAMD64SETLE)
25821 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25822 v0.AddArg2(x, y)
25823 v.AddArg(v0)
25824 return true
25825 }
25826 }
25827 func rewriteValueAMD64_OpLeq16U(v *Value) bool {
25828 v_1 := v.Args[1]
25829 v_0 := v.Args[0]
25830 b := v.Block
25831
25832
25833 for {
25834 x := v_0
25835 y := v_1
25836 v.reset(OpAMD64SETBE)
25837 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25838 v0.AddArg2(x, y)
25839 v.AddArg(v0)
25840 return true
25841 }
25842 }
25843 func rewriteValueAMD64_OpLeq32(v *Value) bool {
25844 v_1 := v.Args[1]
25845 v_0 := v.Args[0]
25846 b := v.Block
25847
25848
25849 for {
25850 x := v_0
25851 y := v_1
25852 v.reset(OpAMD64SETLE)
25853 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25854 v0.AddArg2(x, y)
25855 v.AddArg(v0)
25856 return true
25857 }
25858 }
25859 func rewriteValueAMD64_OpLeq32F(v *Value) bool {
25860 v_1 := v.Args[1]
25861 v_0 := v.Args[0]
25862 b := v.Block
25863
25864
25865 for {
25866 x := v_0
25867 y := v_1
25868 v.reset(OpAMD64SETGEF)
25869 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
25870 v0.AddArg2(y, x)
25871 v.AddArg(v0)
25872 return true
25873 }
25874 }
25875 func rewriteValueAMD64_OpLeq32U(v *Value) bool {
25876 v_1 := v.Args[1]
25877 v_0 := v.Args[0]
25878 b := v.Block
25879
25880
25881 for {
25882 x := v_0
25883 y := v_1
25884 v.reset(OpAMD64SETBE)
25885 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
25886 v0.AddArg2(x, y)
25887 v.AddArg(v0)
25888 return true
25889 }
25890 }
25891 func rewriteValueAMD64_OpLeq64(v *Value) bool {
25892 v_1 := v.Args[1]
25893 v_0 := v.Args[0]
25894 b := v.Block
25895
25896
25897 for {
25898 x := v_0
25899 y := v_1
25900 v.reset(OpAMD64SETLE)
25901 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25902 v0.AddArg2(x, y)
25903 v.AddArg(v0)
25904 return true
25905 }
25906 }
25907 func rewriteValueAMD64_OpLeq64F(v *Value) bool {
25908 v_1 := v.Args[1]
25909 v_0 := v.Args[0]
25910 b := v.Block
25911
25912
25913 for {
25914 x := v_0
25915 y := v_1
25916 v.reset(OpAMD64SETGEF)
25917 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
25918 v0.AddArg2(y, x)
25919 v.AddArg(v0)
25920 return true
25921 }
25922 }
25923 func rewriteValueAMD64_OpLeq64U(v *Value) bool {
25924 v_1 := v.Args[1]
25925 v_0 := v.Args[0]
25926 b := v.Block
25927
25928
25929 for {
25930 x := v_0
25931 y := v_1
25932 v.reset(OpAMD64SETBE)
25933 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
25934 v0.AddArg2(x, y)
25935 v.AddArg(v0)
25936 return true
25937 }
25938 }
25939 func rewriteValueAMD64_OpLeq8(v *Value) bool {
25940 v_1 := v.Args[1]
25941 v_0 := v.Args[0]
25942 b := v.Block
25943
25944
25945 for {
25946 x := v_0
25947 y := v_1
25948 v.reset(OpAMD64SETLE)
25949 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25950 v0.AddArg2(x, y)
25951 v.AddArg(v0)
25952 return true
25953 }
25954 }
25955 func rewriteValueAMD64_OpLeq8U(v *Value) bool {
25956 v_1 := v.Args[1]
25957 v_0 := v.Args[0]
25958 b := v.Block
25959
25960
25961 for {
25962 x := v_0
25963 y := v_1
25964 v.reset(OpAMD64SETBE)
25965 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
25966 v0.AddArg2(x, y)
25967 v.AddArg(v0)
25968 return true
25969 }
25970 }
25971 func rewriteValueAMD64_OpLess16(v *Value) bool {
25972 v_1 := v.Args[1]
25973 v_0 := v.Args[0]
25974 b := v.Block
25975
25976
25977 for {
25978 x := v_0
25979 y := v_1
25980 v.reset(OpAMD64SETL)
25981 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25982 v0.AddArg2(x, y)
25983 v.AddArg(v0)
25984 return true
25985 }
25986 }
25987 func rewriteValueAMD64_OpLess16U(v *Value) bool {
25988 v_1 := v.Args[1]
25989 v_0 := v.Args[0]
25990 b := v.Block
25991
25992
25993 for {
25994 x := v_0
25995 y := v_1
25996 v.reset(OpAMD64SETB)
25997 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
25998 v0.AddArg2(x, y)
25999 v.AddArg(v0)
26000 return true
26001 }
26002 }
26003 func rewriteValueAMD64_OpLess32(v *Value) bool {
26004 v_1 := v.Args[1]
26005 v_0 := v.Args[0]
26006 b := v.Block
26007
26008
26009 for {
26010 x := v_0
26011 y := v_1
26012 v.reset(OpAMD64SETL)
26013 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26014 v0.AddArg2(x, y)
26015 v.AddArg(v0)
26016 return true
26017 }
26018 }
26019 func rewriteValueAMD64_OpLess32F(v *Value) bool {
26020 v_1 := v.Args[1]
26021 v_0 := v.Args[0]
26022 b := v.Block
26023
26024
26025 for {
26026 x := v_0
26027 y := v_1
26028 v.reset(OpAMD64SETGF)
26029 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
26030 v0.AddArg2(y, x)
26031 v.AddArg(v0)
26032 return true
26033 }
26034 }
26035 func rewriteValueAMD64_OpLess32U(v *Value) bool {
26036 v_1 := v.Args[1]
26037 v_0 := v.Args[0]
26038 b := v.Block
26039
26040
26041 for {
26042 x := v_0
26043 y := v_1
26044 v.reset(OpAMD64SETB)
26045 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
26046 v0.AddArg2(x, y)
26047 v.AddArg(v0)
26048 return true
26049 }
26050 }
26051 func rewriteValueAMD64_OpLess64(v *Value) bool {
26052 v_1 := v.Args[1]
26053 v_0 := v.Args[0]
26054 b := v.Block
26055
26056
26057 for {
26058 x := v_0
26059 y := v_1
26060 v.reset(OpAMD64SETL)
26061 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26062 v0.AddArg2(x, y)
26063 v.AddArg(v0)
26064 return true
26065 }
26066 }
26067 func rewriteValueAMD64_OpLess64F(v *Value) bool {
26068 v_1 := v.Args[1]
26069 v_0 := v.Args[0]
26070 b := v.Block
26071
26072
26073 for {
26074 x := v_0
26075 y := v_1
26076 v.reset(OpAMD64SETGF)
26077 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
26078 v0.AddArg2(y, x)
26079 v.AddArg(v0)
26080 return true
26081 }
26082 }
26083 func rewriteValueAMD64_OpLess64U(v *Value) bool {
26084 v_1 := v.Args[1]
26085 v_0 := v.Args[0]
26086 b := v.Block
26087
26088
26089 for {
26090 x := v_0
26091 y := v_1
26092 v.reset(OpAMD64SETB)
26093 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
26094 v0.AddArg2(x, y)
26095 v.AddArg(v0)
26096 return true
26097 }
26098 }
26099 func rewriteValueAMD64_OpLess8(v *Value) bool {
26100 v_1 := v.Args[1]
26101 v_0 := v.Args[0]
26102 b := v.Block
26103
26104
26105 for {
26106 x := v_0
26107 y := v_1
26108 v.reset(OpAMD64SETL)
26109 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26110 v0.AddArg2(x, y)
26111 v.AddArg(v0)
26112 return true
26113 }
26114 }
26115 func rewriteValueAMD64_OpLess8U(v *Value) bool {
26116 v_1 := v.Args[1]
26117 v_0 := v.Args[0]
26118 b := v.Block
26119
26120
26121 for {
26122 x := v_0
26123 y := v_1
26124 v.reset(OpAMD64SETB)
26125 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
26126 v0.AddArg2(x, y)
26127 v.AddArg(v0)
26128 return true
26129 }
26130 }
26131 func rewriteValueAMD64_OpLoad(v *Value) bool {
26132 v_1 := v.Args[1]
26133 v_0 := v.Args[0]
26134
26135
26136
26137 for {
26138 t := v.Type
26139 ptr := v_0
26140 mem := v_1
26141 if !(is64BitInt(t) || isPtr(t)) {
26142 break
26143 }
26144 v.reset(OpAMD64MOVQload)
26145 v.AddArg2(ptr, mem)
26146 return true
26147 }
26148
26149
26150
26151 for {
26152 t := v.Type
26153 ptr := v_0
26154 mem := v_1
26155 if !(is32BitInt(t)) {
26156 break
26157 }
26158 v.reset(OpAMD64MOVLload)
26159 v.AddArg2(ptr, mem)
26160 return true
26161 }
26162
26163
26164
26165 for {
26166 t := v.Type
26167 ptr := v_0
26168 mem := v_1
26169 if !(is16BitInt(t)) {
26170 break
26171 }
26172 v.reset(OpAMD64MOVWload)
26173 v.AddArg2(ptr, mem)
26174 return true
26175 }
26176
26177
26178
26179 for {
26180 t := v.Type
26181 ptr := v_0
26182 mem := v_1
26183 if !(t.IsBoolean() || is8BitInt(t)) {
26184 break
26185 }
26186 v.reset(OpAMD64MOVBload)
26187 v.AddArg2(ptr, mem)
26188 return true
26189 }
26190
26191
26192
26193 for {
26194 t := v.Type
26195 ptr := v_0
26196 mem := v_1
26197 if !(is32BitFloat(t)) {
26198 break
26199 }
26200 v.reset(OpAMD64MOVSSload)
26201 v.AddArg2(ptr, mem)
26202 return true
26203 }
26204
26205
26206
26207 for {
26208 t := v.Type
26209 ptr := v_0
26210 mem := v_1
26211 if !(is64BitFloat(t)) {
26212 break
26213 }
26214 v.reset(OpAMD64MOVSDload)
26215 v.AddArg2(ptr, mem)
26216 return true
26217 }
26218 return false
26219 }
26220 func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
26221 v_1 := v.Args[1]
26222 v_0 := v.Args[0]
26223 b := v.Block
26224 typ := &b.Func.Config.Types
26225
26226
26227
26228 for {
26229 t := v.Type
26230 sym := auxToSym(v.Aux)
26231 base := v_0
26232 mem := v_1
26233 if !(t.Elem().HasPointers()) {
26234 break
26235 }
26236 v.reset(OpAMD64LEAQ)
26237 v.Aux = symToAux(sym)
26238 v0 := b.NewValue0(v.Pos, OpSPanchored, typ.Uintptr)
26239 v0.AddArg2(base, mem)
26240 v.AddArg(v0)
26241 return true
26242 }
26243
26244
26245
26246 for {
26247 t := v.Type
26248 sym := auxToSym(v.Aux)
26249 base := v_0
26250 if !(!t.Elem().HasPointers()) {
26251 break
26252 }
26253 v.reset(OpAMD64LEAQ)
26254 v.Aux = symToAux(sym)
26255 v.AddArg(base)
26256 return true
26257 }
26258 return false
26259 }
26260 func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
26261 v_1 := v.Args[1]
26262 v_0 := v.Args[0]
26263 b := v.Block
26264
26265
26266
26267 for {
26268 t := v.Type
26269 x := v_0
26270 y := v_1
26271 if !(!shiftIsBounded(v)) {
26272 break
26273 }
26274 v.reset(OpAMD64ANDL)
26275 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26276 v0.AddArg2(x, y)
26277 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26278 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26279 v2.AuxInt = int16ToAuxInt(32)
26280 v2.AddArg(y)
26281 v1.AddArg(v2)
26282 v.AddArg2(v0, v1)
26283 return true
26284 }
26285
26286
26287
26288 for {
26289 x := v_0
26290 y := v_1
26291 if !(shiftIsBounded(v)) {
26292 break
26293 }
26294 v.reset(OpAMD64SHLL)
26295 v.AddArg2(x, y)
26296 return true
26297 }
26298 return false
26299 }
26300 func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
26301 v_1 := v.Args[1]
26302 v_0 := v.Args[0]
26303 b := v.Block
26304
26305
26306
26307 for {
26308 t := v.Type
26309 x := v_0
26310 y := v_1
26311 if !(!shiftIsBounded(v)) {
26312 break
26313 }
26314 v.reset(OpAMD64ANDL)
26315 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26316 v0.AddArg2(x, y)
26317 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26318 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26319 v2.AuxInt = int32ToAuxInt(32)
26320 v2.AddArg(y)
26321 v1.AddArg(v2)
26322 v.AddArg2(v0, v1)
26323 return true
26324 }
26325
26326
26327
26328 for {
26329 x := v_0
26330 y := v_1
26331 if !(shiftIsBounded(v)) {
26332 break
26333 }
26334 v.reset(OpAMD64SHLL)
26335 v.AddArg2(x, y)
26336 return true
26337 }
26338 return false
26339 }
26340 func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
26341 v_1 := v.Args[1]
26342 v_0 := v.Args[0]
26343 b := v.Block
26344
26345
26346
26347 for {
26348 t := v.Type
26349 x := v_0
26350 y := v_1
26351 if !(!shiftIsBounded(v)) {
26352 break
26353 }
26354 v.reset(OpAMD64ANDL)
26355 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26356 v0.AddArg2(x, y)
26357 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26358 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26359 v2.AuxInt = int32ToAuxInt(32)
26360 v2.AddArg(y)
26361 v1.AddArg(v2)
26362 v.AddArg2(v0, v1)
26363 return true
26364 }
26365
26366
26367
26368 for {
26369 x := v_0
26370 y := v_1
26371 if !(shiftIsBounded(v)) {
26372 break
26373 }
26374 v.reset(OpAMD64SHLL)
26375 v.AddArg2(x, y)
26376 return true
26377 }
26378 return false
26379 }
26380 func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
26381 v_1 := v.Args[1]
26382 v_0 := v.Args[0]
26383 b := v.Block
26384
26385
26386
26387 for {
26388 t := v.Type
26389 x := v_0
26390 y := v_1
26391 if !(!shiftIsBounded(v)) {
26392 break
26393 }
26394 v.reset(OpAMD64ANDL)
26395 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26396 v0.AddArg2(x, y)
26397 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26398 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26399 v2.AuxInt = int8ToAuxInt(32)
26400 v2.AddArg(y)
26401 v1.AddArg(v2)
26402 v.AddArg2(v0, v1)
26403 return true
26404 }
26405
26406
26407
26408 for {
26409 x := v_0
26410 y := v_1
26411 if !(shiftIsBounded(v)) {
26412 break
26413 }
26414 v.reset(OpAMD64SHLL)
26415 v.AddArg2(x, y)
26416 return true
26417 }
26418 return false
26419 }
26420 func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
26421 v_1 := v.Args[1]
26422 v_0 := v.Args[0]
26423 b := v.Block
26424
26425
26426
26427 for {
26428 t := v.Type
26429 x := v_0
26430 y := v_1
26431 if !(!shiftIsBounded(v)) {
26432 break
26433 }
26434 v.reset(OpAMD64ANDL)
26435 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26436 v0.AddArg2(x, y)
26437 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26438 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26439 v2.AuxInt = int16ToAuxInt(32)
26440 v2.AddArg(y)
26441 v1.AddArg(v2)
26442 v.AddArg2(v0, v1)
26443 return true
26444 }
26445
26446
26447
26448 for {
26449 x := v_0
26450 y := v_1
26451 if !(shiftIsBounded(v)) {
26452 break
26453 }
26454 v.reset(OpAMD64SHLL)
26455 v.AddArg2(x, y)
26456 return true
26457 }
26458 return false
26459 }
26460 func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
26461 v_1 := v.Args[1]
26462 v_0 := v.Args[0]
26463 b := v.Block
26464
26465
26466
26467 for {
26468 t := v.Type
26469 x := v_0
26470 y := v_1
26471 if !(!shiftIsBounded(v)) {
26472 break
26473 }
26474 v.reset(OpAMD64ANDL)
26475 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26476 v0.AddArg2(x, y)
26477 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26478 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26479 v2.AuxInt = int32ToAuxInt(32)
26480 v2.AddArg(y)
26481 v1.AddArg(v2)
26482 v.AddArg2(v0, v1)
26483 return true
26484 }
26485
26486
26487
26488 for {
26489 x := v_0
26490 y := v_1
26491 if !(shiftIsBounded(v)) {
26492 break
26493 }
26494 v.reset(OpAMD64SHLL)
26495 v.AddArg2(x, y)
26496 return true
26497 }
26498 return false
26499 }
26500 func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
26501 v_1 := v.Args[1]
26502 v_0 := v.Args[0]
26503 b := v.Block
26504
26505
26506
26507 for {
26508 t := v.Type
26509 x := v_0
26510 y := v_1
26511 if !(!shiftIsBounded(v)) {
26512 break
26513 }
26514 v.reset(OpAMD64ANDL)
26515 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26516 v0.AddArg2(x, y)
26517 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26518 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26519 v2.AuxInt = int32ToAuxInt(32)
26520 v2.AddArg(y)
26521 v1.AddArg(v2)
26522 v.AddArg2(v0, v1)
26523 return true
26524 }
26525
26526
26527
26528 for {
26529 x := v_0
26530 y := v_1
26531 if !(shiftIsBounded(v)) {
26532 break
26533 }
26534 v.reset(OpAMD64SHLL)
26535 v.AddArg2(x, y)
26536 return true
26537 }
26538 return false
26539 }
26540 func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
26541 v_1 := v.Args[1]
26542 v_0 := v.Args[0]
26543 b := v.Block
26544
26545
26546
26547 for {
26548 t := v.Type
26549 x := v_0
26550 y := v_1
26551 if !(!shiftIsBounded(v)) {
26552 break
26553 }
26554 v.reset(OpAMD64ANDL)
26555 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26556 v0.AddArg2(x, y)
26557 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26558 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26559 v2.AuxInt = int8ToAuxInt(32)
26560 v2.AddArg(y)
26561 v1.AddArg(v2)
26562 v.AddArg2(v0, v1)
26563 return true
26564 }
26565
26566
26567
26568 for {
26569 x := v_0
26570 y := v_1
26571 if !(shiftIsBounded(v)) {
26572 break
26573 }
26574 v.reset(OpAMD64SHLL)
26575 v.AddArg2(x, y)
26576 return true
26577 }
26578 return false
26579 }
26580 func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
26581 v_1 := v.Args[1]
26582 v_0 := v.Args[0]
26583 b := v.Block
26584
26585
26586
26587 for {
26588 t := v.Type
26589 x := v_0
26590 y := v_1
26591 if !(!shiftIsBounded(v)) {
26592 break
26593 }
26594 v.reset(OpAMD64ANDQ)
26595 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26596 v0.AddArg2(x, y)
26597 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26598 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26599 v2.AuxInt = int16ToAuxInt(64)
26600 v2.AddArg(y)
26601 v1.AddArg(v2)
26602 v.AddArg2(v0, v1)
26603 return true
26604 }
26605
26606
26607
26608 for {
26609 x := v_0
26610 y := v_1
26611 if !(shiftIsBounded(v)) {
26612 break
26613 }
26614 v.reset(OpAMD64SHLQ)
26615 v.AddArg2(x, y)
26616 return true
26617 }
26618 return false
26619 }
26620 func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
26621 v_1 := v.Args[1]
26622 v_0 := v.Args[0]
26623 b := v.Block
26624
26625
26626
26627 for {
26628 t := v.Type
26629 x := v_0
26630 y := v_1
26631 if !(!shiftIsBounded(v)) {
26632 break
26633 }
26634 v.reset(OpAMD64ANDQ)
26635 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26636 v0.AddArg2(x, y)
26637 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26638 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26639 v2.AuxInt = int32ToAuxInt(64)
26640 v2.AddArg(y)
26641 v1.AddArg(v2)
26642 v.AddArg2(v0, v1)
26643 return true
26644 }
26645
26646
26647
26648 for {
26649 x := v_0
26650 y := v_1
26651 if !(shiftIsBounded(v)) {
26652 break
26653 }
26654 v.reset(OpAMD64SHLQ)
26655 v.AddArg2(x, y)
26656 return true
26657 }
26658 return false
26659 }
26660 func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
26661 v_1 := v.Args[1]
26662 v_0 := v.Args[0]
26663 b := v.Block
26664
26665
26666
26667 for {
26668 t := v.Type
26669 x := v_0
26670 y := v_1
26671 if !(!shiftIsBounded(v)) {
26672 break
26673 }
26674 v.reset(OpAMD64ANDQ)
26675 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26676 v0.AddArg2(x, y)
26677 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26678 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26679 v2.AuxInt = int32ToAuxInt(64)
26680 v2.AddArg(y)
26681 v1.AddArg(v2)
26682 v.AddArg2(v0, v1)
26683 return true
26684 }
26685
26686
26687
26688 for {
26689 x := v_0
26690 y := v_1
26691 if !(shiftIsBounded(v)) {
26692 break
26693 }
26694 v.reset(OpAMD64SHLQ)
26695 v.AddArg2(x, y)
26696 return true
26697 }
26698 return false
26699 }
26700 func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
26701 v_1 := v.Args[1]
26702 v_0 := v.Args[0]
26703 b := v.Block
26704
26705
26706
26707 for {
26708 t := v.Type
26709 x := v_0
26710 y := v_1
26711 if !(!shiftIsBounded(v)) {
26712 break
26713 }
26714 v.reset(OpAMD64ANDQ)
26715 v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
26716 v0.AddArg2(x, y)
26717 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
26718 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26719 v2.AuxInt = int8ToAuxInt(64)
26720 v2.AddArg(y)
26721 v1.AddArg(v2)
26722 v.AddArg2(v0, v1)
26723 return true
26724 }
26725
26726
26727
26728 for {
26729 x := v_0
26730 y := v_1
26731 if !(shiftIsBounded(v)) {
26732 break
26733 }
26734 v.reset(OpAMD64SHLQ)
26735 v.AddArg2(x, y)
26736 return true
26737 }
26738 return false
26739 }
26740 func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
26741 v_1 := v.Args[1]
26742 v_0 := v.Args[0]
26743 b := v.Block
26744
26745
26746
26747 for {
26748 t := v.Type
26749 x := v_0
26750 y := v_1
26751 if !(!shiftIsBounded(v)) {
26752 break
26753 }
26754 v.reset(OpAMD64ANDL)
26755 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26756 v0.AddArg2(x, y)
26757 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26758 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
26759 v2.AuxInt = int16ToAuxInt(32)
26760 v2.AddArg(y)
26761 v1.AddArg(v2)
26762 v.AddArg2(v0, v1)
26763 return true
26764 }
26765
26766
26767
26768 for {
26769 x := v_0
26770 y := v_1
26771 if !(shiftIsBounded(v)) {
26772 break
26773 }
26774 v.reset(OpAMD64SHLL)
26775 v.AddArg2(x, y)
26776 return true
26777 }
26778 return false
26779 }
26780 func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
26781 v_1 := v.Args[1]
26782 v_0 := v.Args[0]
26783 b := v.Block
26784
26785
26786
26787 for {
26788 t := v.Type
26789 x := v_0
26790 y := v_1
26791 if !(!shiftIsBounded(v)) {
26792 break
26793 }
26794 v.reset(OpAMD64ANDL)
26795 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26796 v0.AddArg2(x, y)
26797 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26798 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
26799 v2.AuxInt = int32ToAuxInt(32)
26800 v2.AddArg(y)
26801 v1.AddArg(v2)
26802 v.AddArg2(v0, v1)
26803 return true
26804 }
26805
26806
26807
26808 for {
26809 x := v_0
26810 y := v_1
26811 if !(shiftIsBounded(v)) {
26812 break
26813 }
26814 v.reset(OpAMD64SHLL)
26815 v.AddArg2(x, y)
26816 return true
26817 }
26818 return false
26819 }
26820 func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
26821 v_1 := v.Args[1]
26822 v_0 := v.Args[0]
26823 b := v.Block
26824
26825
26826
26827 for {
26828 t := v.Type
26829 x := v_0
26830 y := v_1
26831 if !(!shiftIsBounded(v)) {
26832 break
26833 }
26834 v.reset(OpAMD64ANDL)
26835 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26836 v0.AddArg2(x, y)
26837 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26838 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
26839 v2.AuxInt = int32ToAuxInt(32)
26840 v2.AddArg(y)
26841 v1.AddArg(v2)
26842 v.AddArg2(v0, v1)
26843 return true
26844 }
26845
26846
26847
26848 for {
26849 x := v_0
26850 y := v_1
26851 if !(shiftIsBounded(v)) {
26852 break
26853 }
26854 v.reset(OpAMD64SHLL)
26855 v.AddArg2(x, y)
26856 return true
26857 }
26858 return false
26859 }
26860 func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
26861 v_1 := v.Args[1]
26862 v_0 := v.Args[0]
26863 b := v.Block
26864
26865
26866
26867 for {
26868 t := v.Type
26869 x := v_0
26870 y := v_1
26871 if !(!shiftIsBounded(v)) {
26872 break
26873 }
26874 v.reset(OpAMD64ANDL)
26875 v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
26876 v0.AddArg2(x, y)
26877 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
26878 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
26879 v2.AuxInt = int8ToAuxInt(32)
26880 v2.AddArg(y)
26881 v1.AddArg(v2)
26882 v.AddArg2(v0, v1)
26883 return true
26884 }
26885
26886
26887
26888 for {
26889 x := v_0
26890 y := v_1
26891 if !(shiftIsBounded(v)) {
26892 break
26893 }
26894 v.reset(OpAMD64SHLL)
26895 v.AddArg2(x, y)
26896 return true
26897 }
26898 return false
26899 }
26900 func rewriteValueAMD64_OpMax32F(v *Value) bool {
26901 v_1 := v.Args[1]
26902 v_0 := v.Args[0]
26903 b := v.Block
26904
26905
26906 for {
26907 t := v.Type
26908 x := v_0
26909 y := v_1
26910 v.reset(OpNeg32F)
26911 v.Type = t
26912 v0 := b.NewValue0(v.Pos, OpMin32F, t)
26913 v1 := b.NewValue0(v.Pos, OpNeg32F, t)
26914 v1.AddArg(x)
26915 v2 := b.NewValue0(v.Pos, OpNeg32F, t)
26916 v2.AddArg(y)
26917 v0.AddArg2(v1, v2)
26918 v.AddArg(v0)
26919 return true
26920 }
26921 }
26922 func rewriteValueAMD64_OpMax64F(v *Value) bool {
26923 v_1 := v.Args[1]
26924 v_0 := v.Args[0]
26925 b := v.Block
26926
26927
26928 for {
26929 t := v.Type
26930 x := v_0
26931 y := v_1
26932 v.reset(OpNeg64F)
26933 v.Type = t
26934 v0 := b.NewValue0(v.Pos, OpMin64F, t)
26935 v1 := b.NewValue0(v.Pos, OpNeg64F, t)
26936 v1.AddArg(x)
26937 v2 := b.NewValue0(v.Pos, OpNeg64F, t)
26938 v2.AddArg(y)
26939 v0.AddArg2(v1, v2)
26940 v.AddArg(v0)
26941 return true
26942 }
26943 }
26944 func rewriteValueAMD64_OpMin32F(v *Value) bool {
26945 v_1 := v.Args[1]
26946 v_0 := v.Args[0]
26947 b := v.Block
26948
26949
26950 for {
26951 t := v.Type
26952 x := v_0
26953 y := v_1
26954 v.reset(OpAMD64POR)
26955 v0 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
26956 v1 := b.NewValue0(v.Pos, OpAMD64MINSS, t)
26957 v1.AddArg2(x, y)
26958 v0.AddArg2(v1, x)
26959 v.AddArg2(v0, v1)
26960 return true
26961 }
26962 }
26963 func rewriteValueAMD64_OpMin64F(v *Value) bool {
26964 v_1 := v.Args[1]
26965 v_0 := v.Args[0]
26966 b := v.Block
26967
26968
26969 for {
26970 t := v.Type
26971 x := v_0
26972 y := v_1
26973 v.reset(OpAMD64POR)
26974 v0 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
26975 v1 := b.NewValue0(v.Pos, OpAMD64MINSD, t)
26976 v1.AddArg2(x, y)
26977 v0.AddArg2(v1, x)
26978 v.AddArg2(v0, v1)
26979 return true
26980 }
26981 }
26982 func rewriteValueAMD64_OpMod16(v *Value) bool {
26983 v_1 := v.Args[1]
26984 v_0 := v.Args[0]
26985 b := v.Block
26986 typ := &b.Func.Config.Types
26987
26988
26989 for {
26990 a := auxIntToBool(v.AuxInt)
26991 x := v_0
26992 y := v_1
26993 v.reset(OpSelect1)
26994 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
26995 v0.AuxInt = boolToAuxInt(a)
26996 v0.AddArg2(x, y)
26997 v.AddArg(v0)
26998 return true
26999 }
27000 }
27001 func rewriteValueAMD64_OpMod16u(v *Value) bool {
27002 v_1 := v.Args[1]
27003 v_0 := v.Args[0]
27004 b := v.Block
27005 typ := &b.Func.Config.Types
27006
27007
27008 for {
27009 x := v_0
27010 y := v_1
27011 v.reset(OpSelect1)
27012 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27013 v0.AddArg2(x, y)
27014 v.AddArg(v0)
27015 return true
27016 }
27017 }
27018 func rewriteValueAMD64_OpMod32(v *Value) bool {
27019 v_1 := v.Args[1]
27020 v_0 := v.Args[0]
27021 b := v.Block
27022 typ := &b.Func.Config.Types
27023
27024
27025 for {
27026 a := auxIntToBool(v.AuxInt)
27027 x := v_0
27028 y := v_1
27029 v.reset(OpSelect1)
27030 v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
27031 v0.AuxInt = boolToAuxInt(a)
27032 v0.AddArg2(x, y)
27033 v.AddArg(v0)
27034 return true
27035 }
27036 }
27037 func rewriteValueAMD64_OpMod32u(v *Value) bool {
27038 v_1 := v.Args[1]
27039 v_0 := v.Args[0]
27040 b := v.Block
27041 typ := &b.Func.Config.Types
27042
27043
27044 for {
27045 x := v_0
27046 y := v_1
27047 v.reset(OpSelect1)
27048 v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
27049 v0.AddArg2(x, y)
27050 v.AddArg(v0)
27051 return true
27052 }
27053 }
27054 func rewriteValueAMD64_OpMod64(v *Value) bool {
27055 v_1 := v.Args[1]
27056 v_0 := v.Args[0]
27057 b := v.Block
27058 typ := &b.Func.Config.Types
27059
27060
27061 for {
27062 a := auxIntToBool(v.AuxInt)
27063 x := v_0
27064 y := v_1
27065 v.reset(OpSelect1)
27066 v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
27067 v0.AuxInt = boolToAuxInt(a)
27068 v0.AddArg2(x, y)
27069 v.AddArg(v0)
27070 return true
27071 }
27072 }
27073 func rewriteValueAMD64_OpMod64u(v *Value) bool {
27074 v_1 := v.Args[1]
27075 v_0 := v.Args[0]
27076 b := v.Block
27077 typ := &b.Func.Config.Types
27078
27079
27080 for {
27081 x := v_0
27082 y := v_1
27083 v.reset(OpSelect1)
27084 v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
27085 v0.AddArg2(x, y)
27086 v.AddArg(v0)
27087 return true
27088 }
27089 }
27090 func rewriteValueAMD64_OpMod8(v *Value) bool {
27091 v_1 := v.Args[1]
27092 v_0 := v.Args[0]
27093 b := v.Block
27094 typ := &b.Func.Config.Types
27095
27096
27097 for {
27098 x := v_0
27099 y := v_1
27100 v.reset(OpSelect1)
27101 v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
27102 v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27103 v1.AddArg(x)
27104 v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
27105 v2.AddArg(y)
27106 v0.AddArg2(v1, v2)
27107 v.AddArg(v0)
27108 return true
27109 }
27110 }
27111 func rewriteValueAMD64_OpMod8u(v *Value) bool {
27112 v_1 := v.Args[1]
27113 v_0 := v.Args[0]
27114 b := v.Block
27115 typ := &b.Func.Config.Types
27116
27117
27118 for {
27119 x := v_0
27120 y := v_1
27121 v.reset(OpSelect1)
27122 v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
27123 v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27124 v1.AddArg(x)
27125 v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
27126 v2.AddArg(y)
27127 v0.AddArg2(v1, v2)
27128 v.AddArg(v0)
27129 return true
27130 }
27131 }
27132 func rewriteValueAMD64_OpMove(v *Value) bool {
27133 v_2 := v.Args[2]
27134 v_1 := v.Args[1]
27135 v_0 := v.Args[0]
27136 b := v.Block
27137 typ := &b.Func.Config.Types
27138
27139
27140 for {
27141 if auxIntToInt64(v.AuxInt) != 0 {
27142 break
27143 }
27144 mem := v_2
27145 v.copyOf(mem)
27146 return true
27147 }
27148
27149
27150 for {
27151 if auxIntToInt64(v.AuxInt) != 1 {
27152 break
27153 }
27154 dst := v_0
27155 src := v_1
27156 mem := v_2
27157 v.reset(OpAMD64MOVBstore)
27158 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27159 v0.AddArg2(src, mem)
27160 v.AddArg3(dst, v0, mem)
27161 return true
27162 }
27163
27164
27165 for {
27166 if auxIntToInt64(v.AuxInt) != 2 {
27167 break
27168 }
27169 dst := v_0
27170 src := v_1
27171 mem := v_2
27172 v.reset(OpAMD64MOVWstore)
27173 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27174 v0.AddArg2(src, mem)
27175 v.AddArg3(dst, v0, mem)
27176 return true
27177 }
27178
27179
27180 for {
27181 if auxIntToInt64(v.AuxInt) != 4 {
27182 break
27183 }
27184 dst := v_0
27185 src := v_1
27186 mem := v_2
27187 v.reset(OpAMD64MOVLstore)
27188 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27189 v0.AddArg2(src, mem)
27190 v.AddArg3(dst, v0, mem)
27191 return true
27192 }
27193
27194
27195 for {
27196 if auxIntToInt64(v.AuxInt) != 8 {
27197 break
27198 }
27199 dst := v_0
27200 src := v_1
27201 mem := v_2
27202 v.reset(OpAMD64MOVQstore)
27203 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27204 v0.AddArg2(src, mem)
27205 v.AddArg3(dst, v0, mem)
27206 return true
27207 }
27208
27209
27210 for {
27211 if auxIntToInt64(v.AuxInt) != 16 {
27212 break
27213 }
27214 dst := v_0
27215 src := v_1
27216 mem := v_2
27217 v.reset(OpAMD64MOVOstore)
27218 v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27219 v0.AddArg2(src, mem)
27220 v.AddArg3(dst, v0, mem)
27221 return true
27222 }
27223
27224
27225 for {
27226 if auxIntToInt64(v.AuxInt) != 32 {
27227 break
27228 }
27229 dst := v_0
27230 src := v_1
27231 mem := v_2
27232 v.reset(OpMove)
27233 v.AuxInt = int64ToAuxInt(16)
27234 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27235 v0.AuxInt = int64ToAuxInt(16)
27236 v0.AddArg(dst)
27237 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27238 v1.AuxInt = int64ToAuxInt(16)
27239 v1.AddArg(src)
27240 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27241 v2.AuxInt = int64ToAuxInt(16)
27242 v2.AddArg3(dst, src, mem)
27243 v.AddArg3(v0, v1, v2)
27244 return true
27245 }
27246
27247
27248 for {
27249 if auxIntToInt64(v.AuxInt) != 48 {
27250 break
27251 }
27252 dst := v_0
27253 src := v_1
27254 mem := v_2
27255 v.reset(OpMove)
27256 v.AuxInt = int64ToAuxInt(32)
27257 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27258 v0.AuxInt = int64ToAuxInt(16)
27259 v0.AddArg(dst)
27260 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27261 v1.AuxInt = int64ToAuxInt(16)
27262 v1.AddArg(src)
27263 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27264 v2.AuxInt = int64ToAuxInt(16)
27265 v2.AddArg3(dst, src, mem)
27266 v.AddArg3(v0, v1, v2)
27267 return true
27268 }
27269
27270
27271 for {
27272 if auxIntToInt64(v.AuxInt) != 64 {
27273 break
27274 }
27275 dst := v_0
27276 src := v_1
27277 mem := v_2
27278 v.reset(OpMove)
27279 v.AuxInt = int64ToAuxInt(32)
27280 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27281 v0.AuxInt = int64ToAuxInt(32)
27282 v0.AddArg(dst)
27283 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27284 v1.AuxInt = int64ToAuxInt(32)
27285 v1.AddArg(src)
27286 v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
27287 v2.AuxInt = int64ToAuxInt(32)
27288 v2.AddArg3(dst, src, mem)
27289 v.AddArg3(v0, v1, v2)
27290 return true
27291 }
27292
27293
27294 for {
27295 if auxIntToInt64(v.AuxInt) != 3 {
27296 break
27297 }
27298 dst := v_0
27299 src := v_1
27300 mem := v_2
27301 v.reset(OpAMD64MOVBstore)
27302 v.AuxInt = int32ToAuxInt(2)
27303 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27304 v0.AuxInt = int32ToAuxInt(2)
27305 v0.AddArg2(src, mem)
27306 v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
27307 v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27308 v2.AddArg2(src, mem)
27309 v1.AddArg3(dst, v2, mem)
27310 v.AddArg3(dst, v0, v1)
27311 return true
27312 }
27313
27314
27315 for {
27316 if auxIntToInt64(v.AuxInt) != 5 {
27317 break
27318 }
27319 dst := v_0
27320 src := v_1
27321 mem := v_2
27322 v.reset(OpAMD64MOVBstore)
27323 v.AuxInt = int32ToAuxInt(4)
27324 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27325 v0.AuxInt = int32ToAuxInt(4)
27326 v0.AddArg2(src, mem)
27327 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27328 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27329 v2.AddArg2(src, mem)
27330 v1.AddArg3(dst, v2, mem)
27331 v.AddArg3(dst, v0, v1)
27332 return true
27333 }
27334
27335
27336 for {
27337 if auxIntToInt64(v.AuxInt) != 6 {
27338 break
27339 }
27340 dst := v_0
27341 src := v_1
27342 mem := v_2
27343 v.reset(OpAMD64MOVWstore)
27344 v.AuxInt = int32ToAuxInt(4)
27345 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27346 v0.AuxInt = int32ToAuxInt(4)
27347 v0.AddArg2(src, mem)
27348 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27349 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27350 v2.AddArg2(src, mem)
27351 v1.AddArg3(dst, v2, mem)
27352 v.AddArg3(dst, v0, v1)
27353 return true
27354 }
27355
27356
27357 for {
27358 if auxIntToInt64(v.AuxInt) != 7 {
27359 break
27360 }
27361 dst := v_0
27362 src := v_1
27363 mem := v_2
27364 v.reset(OpAMD64MOVLstore)
27365 v.AuxInt = int32ToAuxInt(3)
27366 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27367 v0.AuxInt = int32ToAuxInt(3)
27368 v0.AddArg2(src, mem)
27369 v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
27370 v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27371 v2.AddArg2(src, mem)
27372 v1.AddArg3(dst, v2, mem)
27373 v.AddArg3(dst, v0, v1)
27374 return true
27375 }
27376
27377
27378 for {
27379 if auxIntToInt64(v.AuxInt) != 9 {
27380 break
27381 }
27382 dst := v_0
27383 src := v_1
27384 mem := v_2
27385 v.reset(OpAMD64MOVBstore)
27386 v.AuxInt = int32ToAuxInt(8)
27387 v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
27388 v0.AuxInt = int32ToAuxInt(8)
27389 v0.AddArg2(src, mem)
27390 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27391 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27392 v2.AddArg2(src, mem)
27393 v1.AddArg3(dst, v2, mem)
27394 v.AddArg3(dst, v0, v1)
27395 return true
27396 }
27397
27398
27399 for {
27400 if auxIntToInt64(v.AuxInt) != 10 {
27401 break
27402 }
27403 dst := v_0
27404 src := v_1
27405 mem := v_2
27406 v.reset(OpAMD64MOVWstore)
27407 v.AuxInt = int32ToAuxInt(8)
27408 v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
27409 v0.AuxInt = int32ToAuxInt(8)
27410 v0.AddArg2(src, mem)
27411 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27412 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27413 v2.AddArg2(src, mem)
27414 v1.AddArg3(dst, v2, mem)
27415 v.AddArg3(dst, v0, v1)
27416 return true
27417 }
27418
27419
27420 for {
27421 if auxIntToInt64(v.AuxInt) != 11 {
27422 break
27423 }
27424 dst := v_0
27425 src := v_1
27426 mem := v_2
27427 v.reset(OpAMD64MOVLstore)
27428 v.AuxInt = int32ToAuxInt(7)
27429 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27430 v0.AuxInt = int32ToAuxInt(7)
27431 v0.AddArg2(src, mem)
27432 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27433 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27434 v2.AddArg2(src, mem)
27435 v1.AddArg3(dst, v2, mem)
27436 v.AddArg3(dst, v0, v1)
27437 return true
27438 }
27439
27440
27441 for {
27442 if auxIntToInt64(v.AuxInt) != 12 {
27443 break
27444 }
27445 dst := v_0
27446 src := v_1
27447 mem := v_2
27448 v.reset(OpAMD64MOVLstore)
27449 v.AuxInt = int32ToAuxInt(8)
27450 v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
27451 v0.AuxInt = int32ToAuxInt(8)
27452 v0.AddArg2(src, mem)
27453 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27454 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27455 v2.AddArg2(src, mem)
27456 v1.AddArg3(dst, v2, mem)
27457 v.AddArg3(dst, v0, v1)
27458 return true
27459 }
27460
27461
27462
27463 for {
27464 s := auxIntToInt64(v.AuxInt)
27465 dst := v_0
27466 src := v_1
27467 mem := v_2
27468 if !(s >= 13 && s <= 15) {
27469 break
27470 }
27471 v.reset(OpAMD64MOVQstore)
27472 v.AuxInt = int32ToAuxInt(int32(s - 8))
27473 v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27474 v0.AuxInt = int32ToAuxInt(int32(s - 8))
27475 v0.AddArg2(src, mem)
27476 v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27477 v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27478 v2.AddArg2(src, mem)
27479 v1.AddArg3(dst, v2, mem)
27480 v.AddArg3(dst, v0, v1)
27481 return true
27482 }
27483
27484
27485
27486 for {
27487 s := auxIntToInt64(v.AuxInt)
27488 dst := v_0
27489 src := v_1
27490 mem := v_2
27491 if !(s > 16 && s%16 != 0 && s%16 <= 8) {
27492 break
27493 }
27494 v.reset(OpMove)
27495 v.AuxInt = int64ToAuxInt(s - s%16)
27496 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27497 v0.AuxInt = int64ToAuxInt(s % 16)
27498 v0.AddArg(dst)
27499 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27500 v1.AuxInt = int64ToAuxInt(s % 16)
27501 v1.AddArg(src)
27502 v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
27503 v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
27504 v3.AddArg2(src, mem)
27505 v2.AddArg3(dst, v3, mem)
27506 v.AddArg3(v0, v1, v2)
27507 return true
27508 }
27509
27510
27511
27512 for {
27513 s := auxIntToInt64(v.AuxInt)
27514 dst := v_0
27515 src := v_1
27516 mem := v_2
27517 if !(s > 16 && s%16 != 0 && s%16 > 8) {
27518 break
27519 }
27520 v.reset(OpMove)
27521 v.AuxInt = int64ToAuxInt(s - s%16)
27522 v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
27523 v0.AuxInt = int64ToAuxInt(s % 16)
27524 v0.AddArg(dst)
27525 v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
27526 v1.AuxInt = int64ToAuxInt(s % 16)
27527 v1.AddArg(src)
27528 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
27529 v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
27530 v3.AddArg2(src, mem)
27531 v2.AddArg3(dst, v3, mem)
27532 v.AddArg3(v0, v1, v2)
27533 return true
27534 }
27535
27536
27537
27538 for {
27539 s := auxIntToInt64(v.AuxInt)
27540 dst := v_0
27541 src := v_1
27542 mem := v_2
27543 if !(s > 64 && s <= 16*64 && s%16 == 0 && logLargeCopy(v, s)) {
27544 break
27545 }
27546 v.reset(OpAMD64DUFFCOPY)
27547 v.AuxInt = int64ToAuxInt(s)
27548 v.AddArg3(dst, src, mem)
27549 return true
27550 }
27551
27552
27553
27554 for {
27555 s := auxIntToInt64(v.AuxInt)
27556 dst := v_0
27557 src := v_1
27558 mem := v_2
27559 if !(s > 16*64 && s%8 == 0 && logLargeCopy(v, s)) {
27560 break
27561 }
27562 v.reset(OpAMD64REPMOVSQ)
27563 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27564 v0.AuxInt = int64ToAuxInt(s / 8)
27565 v.AddArg4(dst, src, v0, mem)
27566 return true
27567 }
27568 return false
27569 }
27570 func rewriteValueAMD64_OpNeg32F(v *Value) bool {
27571 v_0 := v.Args[0]
27572 b := v.Block
27573 typ := &b.Func.Config.Types
27574
27575
27576 for {
27577 x := v_0
27578 v.reset(OpAMD64PXOR)
27579 v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
27580 v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
27581 v.AddArg2(x, v0)
27582 return true
27583 }
27584 }
27585 func rewriteValueAMD64_OpNeg64F(v *Value) bool {
27586 v_0 := v.Args[0]
27587 b := v.Block
27588 typ := &b.Func.Config.Types
27589
27590
27591 for {
27592 x := v_0
27593 v.reset(OpAMD64PXOR)
27594 v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
27595 v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
27596 v.AddArg2(x, v0)
27597 return true
27598 }
27599 }
27600 func rewriteValueAMD64_OpNeq16(v *Value) bool {
27601 v_1 := v.Args[1]
27602 v_0 := v.Args[0]
27603 b := v.Block
27604
27605
27606 for {
27607 x := v_0
27608 y := v_1
27609 v.reset(OpAMD64SETNE)
27610 v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
27611 v0.AddArg2(x, y)
27612 v.AddArg(v0)
27613 return true
27614 }
27615 }
27616 func rewriteValueAMD64_OpNeq32(v *Value) bool {
27617 v_1 := v.Args[1]
27618 v_0 := v.Args[0]
27619 b := v.Block
27620
27621
27622 for {
27623 x := v_0
27624 y := v_1
27625 v.reset(OpAMD64SETNE)
27626 v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
27627 v0.AddArg2(x, y)
27628 v.AddArg(v0)
27629 return true
27630 }
27631 }
27632 func rewriteValueAMD64_OpNeq32F(v *Value) bool {
27633 v_1 := v.Args[1]
27634 v_0 := v.Args[0]
27635 b := v.Block
27636
27637
27638 for {
27639 x := v_0
27640 y := v_1
27641 v.reset(OpAMD64SETNEF)
27642 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
27643 v0.AddArg2(x, y)
27644 v.AddArg(v0)
27645 return true
27646 }
27647 }
27648 func rewriteValueAMD64_OpNeq64(v *Value) bool {
27649 v_1 := v.Args[1]
27650 v_0 := v.Args[0]
27651 b := v.Block
27652
27653
27654 for {
27655 x := v_0
27656 y := v_1
27657 v.reset(OpAMD64SETNE)
27658 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27659 v0.AddArg2(x, y)
27660 v.AddArg(v0)
27661 return true
27662 }
27663 }
27664 func rewriteValueAMD64_OpNeq64F(v *Value) bool {
27665 v_1 := v.Args[1]
27666 v_0 := v.Args[0]
27667 b := v.Block
27668
27669
27670 for {
27671 x := v_0
27672 y := v_1
27673 v.reset(OpAMD64SETNEF)
27674 v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
27675 v0.AddArg2(x, y)
27676 v.AddArg(v0)
27677 return true
27678 }
27679 }
27680 func rewriteValueAMD64_OpNeq8(v *Value) bool {
27681 v_1 := v.Args[1]
27682 v_0 := v.Args[0]
27683 b := v.Block
27684
27685
27686 for {
27687 x := v_0
27688 y := v_1
27689 v.reset(OpAMD64SETNE)
27690 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27691 v0.AddArg2(x, y)
27692 v.AddArg(v0)
27693 return true
27694 }
27695 }
27696 func rewriteValueAMD64_OpNeqB(v *Value) bool {
27697 v_1 := v.Args[1]
27698 v_0 := v.Args[0]
27699 b := v.Block
27700
27701
27702 for {
27703 x := v_0
27704 y := v_1
27705 v.reset(OpAMD64SETNE)
27706 v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
27707 v0.AddArg2(x, y)
27708 v.AddArg(v0)
27709 return true
27710 }
27711 }
27712 func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
27713 v_1 := v.Args[1]
27714 v_0 := v.Args[0]
27715 b := v.Block
27716
27717
27718 for {
27719 x := v_0
27720 y := v_1
27721 v.reset(OpAMD64SETNE)
27722 v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
27723 v0.AddArg2(x, y)
27724 v.AddArg(v0)
27725 return true
27726 }
27727 }
27728 func rewriteValueAMD64_OpNot(v *Value) bool {
27729 v_0 := v.Args[0]
27730
27731
27732 for {
27733 x := v_0
27734 v.reset(OpAMD64XORLconst)
27735 v.AuxInt = int32ToAuxInt(1)
27736 v.AddArg(x)
27737 return true
27738 }
27739 }
27740 func rewriteValueAMD64_OpOffPtr(v *Value) bool {
27741 v_0 := v.Args[0]
27742 b := v.Block
27743 typ := &b.Func.Config.Types
27744
27745
27746
27747 for {
27748 off := auxIntToInt64(v.AuxInt)
27749 ptr := v_0
27750 if !(is32Bit(off)) {
27751 break
27752 }
27753 v.reset(OpAMD64ADDQconst)
27754 v.AuxInt = int32ToAuxInt(int32(off))
27755 v.AddArg(ptr)
27756 return true
27757 }
27758
27759
27760 for {
27761 off := auxIntToInt64(v.AuxInt)
27762 ptr := v_0
27763 v.reset(OpAMD64ADDQ)
27764 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
27765 v0.AuxInt = int64ToAuxInt(off)
27766 v.AddArg2(v0, ptr)
27767 return true
27768 }
27769 }
27770 func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
27771 v_2 := v.Args[2]
27772 v_1 := v.Args[1]
27773 v_0 := v.Args[0]
27774
27775
27776
27777 for {
27778 kind := auxIntToInt64(v.AuxInt)
27779 x := v_0
27780 y := v_1
27781 mem := v_2
27782 if !(boundsABI(kind) == 0) {
27783 break
27784 }
27785 v.reset(OpAMD64LoweredPanicBoundsA)
27786 v.AuxInt = int64ToAuxInt(kind)
27787 v.AddArg3(x, y, mem)
27788 return true
27789 }
27790
27791
27792
27793 for {
27794 kind := auxIntToInt64(v.AuxInt)
27795 x := v_0
27796 y := v_1
27797 mem := v_2
27798 if !(boundsABI(kind) == 1) {
27799 break
27800 }
27801 v.reset(OpAMD64LoweredPanicBoundsB)
27802 v.AuxInt = int64ToAuxInt(kind)
27803 v.AddArg3(x, y, mem)
27804 return true
27805 }
27806
27807
27808
27809 for {
27810 kind := auxIntToInt64(v.AuxInt)
27811 x := v_0
27812 y := v_1
27813 mem := v_2
27814 if !(boundsABI(kind) == 2) {
27815 break
27816 }
27817 v.reset(OpAMD64LoweredPanicBoundsC)
27818 v.AuxInt = int64ToAuxInt(kind)
27819 v.AddArg3(x, y, mem)
27820 return true
27821 }
27822 return false
27823 }
27824 func rewriteValueAMD64_OpPopCount16(v *Value) bool {
27825 v_0 := v.Args[0]
27826 b := v.Block
27827 typ := &b.Func.Config.Types
27828
27829
27830 for {
27831 x := v_0
27832 v.reset(OpAMD64POPCNTL)
27833 v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
27834 v0.AddArg(x)
27835 v.AddArg(v0)
27836 return true
27837 }
27838 }
27839 func rewriteValueAMD64_OpPopCount8(v *Value) bool {
27840 v_0 := v.Args[0]
27841 b := v.Block
27842 typ := &b.Func.Config.Types
27843
27844
27845 for {
27846 x := v_0
27847 v.reset(OpAMD64POPCNTL)
27848 v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
27849 v0.AddArg(x)
27850 v.AddArg(v0)
27851 return true
27852 }
27853 }
27854 func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
27855 v_0 := v.Args[0]
27856
27857
27858 for {
27859 x := v_0
27860 v.reset(OpAMD64ROUNDSD)
27861 v.AuxInt = int8ToAuxInt(0)
27862 v.AddArg(x)
27863 return true
27864 }
27865 }
27866 func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
27867 v_1 := v.Args[1]
27868 v_0 := v.Args[0]
27869 b := v.Block
27870
27871
27872
27873 for {
27874 t := v.Type
27875 x := v_0
27876 y := v_1
27877 if !(!shiftIsBounded(v)) {
27878 break
27879 }
27880 v.reset(OpAMD64ANDL)
27881 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27882 v0.AddArg2(x, y)
27883 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27884 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
27885 v2.AuxInt = int16ToAuxInt(16)
27886 v2.AddArg(y)
27887 v1.AddArg(v2)
27888 v.AddArg2(v0, v1)
27889 return true
27890 }
27891
27892
27893
27894 for {
27895 x := v_0
27896 y := v_1
27897 if !(shiftIsBounded(v)) {
27898 break
27899 }
27900 v.reset(OpAMD64SHRW)
27901 v.AddArg2(x, y)
27902 return true
27903 }
27904 return false
27905 }
27906 func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
27907 v_1 := v.Args[1]
27908 v_0 := v.Args[0]
27909 b := v.Block
27910
27911
27912
27913 for {
27914 t := v.Type
27915 x := v_0
27916 y := v_1
27917 if !(!shiftIsBounded(v)) {
27918 break
27919 }
27920 v.reset(OpAMD64ANDL)
27921 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27922 v0.AddArg2(x, y)
27923 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27924 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
27925 v2.AuxInt = int32ToAuxInt(16)
27926 v2.AddArg(y)
27927 v1.AddArg(v2)
27928 v.AddArg2(v0, v1)
27929 return true
27930 }
27931
27932
27933
27934 for {
27935 x := v_0
27936 y := v_1
27937 if !(shiftIsBounded(v)) {
27938 break
27939 }
27940 v.reset(OpAMD64SHRW)
27941 v.AddArg2(x, y)
27942 return true
27943 }
27944 return false
27945 }
27946 func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
27947 v_1 := v.Args[1]
27948 v_0 := v.Args[0]
27949 b := v.Block
27950
27951
27952
27953 for {
27954 t := v.Type
27955 x := v_0
27956 y := v_1
27957 if !(!shiftIsBounded(v)) {
27958 break
27959 }
27960 v.reset(OpAMD64ANDL)
27961 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
27962 v0.AddArg2(x, y)
27963 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
27964 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
27965 v2.AuxInt = int32ToAuxInt(16)
27966 v2.AddArg(y)
27967 v1.AddArg(v2)
27968 v.AddArg2(v0, v1)
27969 return true
27970 }
27971
27972
27973
27974 for {
27975 x := v_0
27976 y := v_1
27977 if !(shiftIsBounded(v)) {
27978 break
27979 }
27980 v.reset(OpAMD64SHRW)
27981 v.AddArg2(x, y)
27982 return true
27983 }
27984 return false
27985 }
27986 func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
27987 v_1 := v.Args[1]
27988 v_0 := v.Args[0]
27989 b := v.Block
27990
27991
27992
27993 for {
27994 t := v.Type
27995 x := v_0
27996 y := v_1
27997 if !(!shiftIsBounded(v)) {
27998 break
27999 }
28000 v.reset(OpAMD64ANDL)
28001 v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
28002 v0.AddArg2(x, y)
28003 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28004 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28005 v2.AuxInt = int8ToAuxInt(16)
28006 v2.AddArg(y)
28007 v1.AddArg(v2)
28008 v.AddArg2(v0, v1)
28009 return true
28010 }
28011
28012
28013
28014 for {
28015 x := v_0
28016 y := v_1
28017 if !(shiftIsBounded(v)) {
28018 break
28019 }
28020 v.reset(OpAMD64SHRW)
28021 v.AddArg2(x, y)
28022 return true
28023 }
28024 return false
28025 }
28026 func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
28027 v_1 := v.Args[1]
28028 v_0 := v.Args[0]
28029 b := v.Block
28030
28031
28032
28033 for {
28034 t := v.Type
28035 x := v_0
28036 y := v_1
28037 if !(!shiftIsBounded(v)) {
28038 break
28039 }
28040 v.reset(OpAMD64SARW)
28041 v.Type = t
28042 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28043 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28044 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28045 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28046 v3.AuxInt = int16ToAuxInt(16)
28047 v3.AddArg(y)
28048 v2.AddArg(v3)
28049 v1.AddArg(v2)
28050 v0.AddArg2(y, v1)
28051 v.AddArg2(x, v0)
28052 return true
28053 }
28054
28055
28056
28057 for {
28058 x := v_0
28059 y := v_1
28060 if !(shiftIsBounded(v)) {
28061 break
28062 }
28063 v.reset(OpAMD64SARW)
28064 v.AddArg2(x, y)
28065 return true
28066 }
28067 return false
28068 }
28069 func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
28070 v_1 := v.Args[1]
28071 v_0 := v.Args[0]
28072 b := v.Block
28073
28074
28075
28076 for {
28077 t := v.Type
28078 x := v_0
28079 y := v_1
28080 if !(!shiftIsBounded(v)) {
28081 break
28082 }
28083 v.reset(OpAMD64SARW)
28084 v.Type = t
28085 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28086 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28087 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28088 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28089 v3.AuxInt = int32ToAuxInt(16)
28090 v3.AddArg(y)
28091 v2.AddArg(v3)
28092 v1.AddArg(v2)
28093 v0.AddArg2(y, v1)
28094 v.AddArg2(x, v0)
28095 return true
28096 }
28097
28098
28099
28100 for {
28101 x := v_0
28102 y := v_1
28103 if !(shiftIsBounded(v)) {
28104 break
28105 }
28106 v.reset(OpAMD64SARW)
28107 v.AddArg2(x, y)
28108 return true
28109 }
28110 return false
28111 }
28112 func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
28113 v_1 := v.Args[1]
28114 v_0 := v.Args[0]
28115 b := v.Block
28116
28117
28118
28119 for {
28120 t := v.Type
28121 x := v_0
28122 y := v_1
28123 if !(!shiftIsBounded(v)) {
28124 break
28125 }
28126 v.reset(OpAMD64SARW)
28127 v.Type = t
28128 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28129 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28130 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28131 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28132 v3.AuxInt = int32ToAuxInt(16)
28133 v3.AddArg(y)
28134 v2.AddArg(v3)
28135 v1.AddArg(v2)
28136 v0.AddArg2(y, v1)
28137 v.AddArg2(x, v0)
28138 return true
28139 }
28140
28141
28142
28143 for {
28144 x := v_0
28145 y := v_1
28146 if !(shiftIsBounded(v)) {
28147 break
28148 }
28149 v.reset(OpAMD64SARW)
28150 v.AddArg2(x, y)
28151 return true
28152 }
28153 return false
28154 }
28155 func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
28156 v_1 := v.Args[1]
28157 v_0 := v.Args[0]
28158 b := v.Block
28159
28160
28161
28162 for {
28163 t := v.Type
28164 x := v_0
28165 y := v_1
28166 if !(!shiftIsBounded(v)) {
28167 break
28168 }
28169 v.reset(OpAMD64SARW)
28170 v.Type = t
28171 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28172 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28173 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28174 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28175 v3.AuxInt = int8ToAuxInt(16)
28176 v3.AddArg(y)
28177 v2.AddArg(v3)
28178 v1.AddArg(v2)
28179 v0.AddArg2(y, v1)
28180 v.AddArg2(x, v0)
28181 return true
28182 }
28183
28184
28185
28186 for {
28187 x := v_0
28188 y := v_1
28189 if !(shiftIsBounded(v)) {
28190 break
28191 }
28192 v.reset(OpAMD64SARW)
28193 v.AddArg2(x, y)
28194 return true
28195 }
28196 return false
28197 }
28198 func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
28199 v_1 := v.Args[1]
28200 v_0 := v.Args[0]
28201 b := v.Block
28202
28203
28204
28205 for {
28206 t := v.Type
28207 x := v_0
28208 y := v_1
28209 if !(!shiftIsBounded(v)) {
28210 break
28211 }
28212 v.reset(OpAMD64ANDL)
28213 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28214 v0.AddArg2(x, y)
28215 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28216 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28217 v2.AuxInt = int16ToAuxInt(32)
28218 v2.AddArg(y)
28219 v1.AddArg(v2)
28220 v.AddArg2(v0, v1)
28221 return true
28222 }
28223
28224
28225
28226 for {
28227 x := v_0
28228 y := v_1
28229 if !(shiftIsBounded(v)) {
28230 break
28231 }
28232 v.reset(OpAMD64SHRL)
28233 v.AddArg2(x, y)
28234 return true
28235 }
28236 return false
28237 }
28238 func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
28239 v_1 := v.Args[1]
28240 v_0 := v.Args[0]
28241 b := v.Block
28242
28243
28244
28245 for {
28246 t := v.Type
28247 x := v_0
28248 y := v_1
28249 if !(!shiftIsBounded(v)) {
28250 break
28251 }
28252 v.reset(OpAMD64ANDL)
28253 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28254 v0.AddArg2(x, y)
28255 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28256 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28257 v2.AuxInt = int32ToAuxInt(32)
28258 v2.AddArg(y)
28259 v1.AddArg(v2)
28260 v.AddArg2(v0, v1)
28261 return true
28262 }
28263
28264
28265
28266 for {
28267 x := v_0
28268 y := v_1
28269 if !(shiftIsBounded(v)) {
28270 break
28271 }
28272 v.reset(OpAMD64SHRL)
28273 v.AddArg2(x, y)
28274 return true
28275 }
28276 return false
28277 }
28278 func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
28279 v_1 := v.Args[1]
28280 v_0 := v.Args[0]
28281 b := v.Block
28282
28283
28284
28285 for {
28286 t := v.Type
28287 x := v_0
28288 y := v_1
28289 if !(!shiftIsBounded(v)) {
28290 break
28291 }
28292 v.reset(OpAMD64ANDL)
28293 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28294 v0.AddArg2(x, y)
28295 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28296 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28297 v2.AuxInt = int32ToAuxInt(32)
28298 v2.AddArg(y)
28299 v1.AddArg(v2)
28300 v.AddArg2(v0, v1)
28301 return true
28302 }
28303
28304
28305
28306 for {
28307 x := v_0
28308 y := v_1
28309 if !(shiftIsBounded(v)) {
28310 break
28311 }
28312 v.reset(OpAMD64SHRL)
28313 v.AddArg2(x, y)
28314 return true
28315 }
28316 return false
28317 }
28318 func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
28319 v_1 := v.Args[1]
28320 v_0 := v.Args[0]
28321 b := v.Block
28322
28323
28324
28325 for {
28326 t := v.Type
28327 x := v_0
28328 y := v_1
28329 if !(!shiftIsBounded(v)) {
28330 break
28331 }
28332 v.reset(OpAMD64ANDL)
28333 v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
28334 v0.AddArg2(x, y)
28335 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28336 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28337 v2.AuxInt = int8ToAuxInt(32)
28338 v2.AddArg(y)
28339 v1.AddArg(v2)
28340 v.AddArg2(v0, v1)
28341 return true
28342 }
28343
28344
28345
28346 for {
28347 x := v_0
28348 y := v_1
28349 if !(shiftIsBounded(v)) {
28350 break
28351 }
28352 v.reset(OpAMD64SHRL)
28353 v.AddArg2(x, y)
28354 return true
28355 }
28356 return false
28357 }
28358 func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
28359 v_1 := v.Args[1]
28360 v_0 := v.Args[0]
28361 b := v.Block
28362
28363
28364
28365 for {
28366 t := v.Type
28367 x := v_0
28368 y := v_1
28369 if !(!shiftIsBounded(v)) {
28370 break
28371 }
28372 v.reset(OpAMD64SARL)
28373 v.Type = t
28374 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28375 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28376 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28377 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28378 v3.AuxInt = int16ToAuxInt(32)
28379 v3.AddArg(y)
28380 v2.AddArg(v3)
28381 v1.AddArg(v2)
28382 v0.AddArg2(y, v1)
28383 v.AddArg2(x, v0)
28384 return true
28385 }
28386
28387
28388
28389 for {
28390 x := v_0
28391 y := v_1
28392 if !(shiftIsBounded(v)) {
28393 break
28394 }
28395 v.reset(OpAMD64SARL)
28396 v.AddArg2(x, y)
28397 return true
28398 }
28399 return false
28400 }
28401 func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
28402 v_1 := v.Args[1]
28403 v_0 := v.Args[0]
28404 b := v.Block
28405
28406
28407
28408 for {
28409 t := v.Type
28410 x := v_0
28411 y := v_1
28412 if !(!shiftIsBounded(v)) {
28413 break
28414 }
28415 v.reset(OpAMD64SARL)
28416 v.Type = t
28417 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28418 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28419 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28420 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28421 v3.AuxInt = int32ToAuxInt(32)
28422 v3.AddArg(y)
28423 v2.AddArg(v3)
28424 v1.AddArg(v2)
28425 v0.AddArg2(y, v1)
28426 v.AddArg2(x, v0)
28427 return true
28428 }
28429
28430
28431
28432 for {
28433 x := v_0
28434 y := v_1
28435 if !(shiftIsBounded(v)) {
28436 break
28437 }
28438 v.reset(OpAMD64SARL)
28439 v.AddArg2(x, y)
28440 return true
28441 }
28442 return false
28443 }
28444 func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
28445 v_1 := v.Args[1]
28446 v_0 := v.Args[0]
28447 b := v.Block
28448
28449
28450
28451 for {
28452 t := v.Type
28453 x := v_0
28454 y := v_1
28455 if !(!shiftIsBounded(v)) {
28456 break
28457 }
28458 v.reset(OpAMD64SARL)
28459 v.Type = t
28460 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28461 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28462 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28463 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28464 v3.AuxInt = int32ToAuxInt(32)
28465 v3.AddArg(y)
28466 v2.AddArg(v3)
28467 v1.AddArg(v2)
28468 v0.AddArg2(y, v1)
28469 v.AddArg2(x, v0)
28470 return true
28471 }
28472
28473
28474
28475 for {
28476 x := v_0
28477 y := v_1
28478 if !(shiftIsBounded(v)) {
28479 break
28480 }
28481 v.reset(OpAMD64SARL)
28482 v.AddArg2(x, y)
28483 return true
28484 }
28485 return false
28486 }
28487 func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
28488 v_1 := v.Args[1]
28489 v_0 := v.Args[0]
28490 b := v.Block
28491
28492
28493
28494 for {
28495 t := v.Type
28496 x := v_0
28497 y := v_1
28498 if !(!shiftIsBounded(v)) {
28499 break
28500 }
28501 v.reset(OpAMD64SARL)
28502 v.Type = t
28503 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28504 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28505 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28506 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28507 v3.AuxInt = int8ToAuxInt(32)
28508 v3.AddArg(y)
28509 v2.AddArg(v3)
28510 v1.AddArg(v2)
28511 v0.AddArg2(y, v1)
28512 v.AddArg2(x, v0)
28513 return true
28514 }
28515
28516
28517
28518 for {
28519 x := v_0
28520 y := v_1
28521 if !(shiftIsBounded(v)) {
28522 break
28523 }
28524 v.reset(OpAMD64SARL)
28525 v.AddArg2(x, y)
28526 return true
28527 }
28528 return false
28529 }
28530 func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
28531 v_1 := v.Args[1]
28532 v_0 := v.Args[0]
28533 b := v.Block
28534
28535
28536
28537 for {
28538 t := v.Type
28539 x := v_0
28540 y := v_1
28541 if !(!shiftIsBounded(v)) {
28542 break
28543 }
28544 v.reset(OpAMD64ANDQ)
28545 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28546 v0.AddArg2(x, y)
28547 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28548 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28549 v2.AuxInt = int16ToAuxInt(64)
28550 v2.AddArg(y)
28551 v1.AddArg(v2)
28552 v.AddArg2(v0, v1)
28553 return true
28554 }
28555
28556
28557
28558 for {
28559 x := v_0
28560 y := v_1
28561 if !(shiftIsBounded(v)) {
28562 break
28563 }
28564 v.reset(OpAMD64SHRQ)
28565 v.AddArg2(x, y)
28566 return true
28567 }
28568 return false
28569 }
28570 func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
28571 v_1 := v.Args[1]
28572 v_0 := v.Args[0]
28573 b := v.Block
28574
28575
28576
28577 for {
28578 t := v.Type
28579 x := v_0
28580 y := v_1
28581 if !(!shiftIsBounded(v)) {
28582 break
28583 }
28584 v.reset(OpAMD64ANDQ)
28585 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28586 v0.AddArg2(x, y)
28587 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28588 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28589 v2.AuxInt = int32ToAuxInt(64)
28590 v2.AddArg(y)
28591 v1.AddArg(v2)
28592 v.AddArg2(v0, v1)
28593 return true
28594 }
28595
28596
28597
28598 for {
28599 x := v_0
28600 y := v_1
28601 if !(shiftIsBounded(v)) {
28602 break
28603 }
28604 v.reset(OpAMD64SHRQ)
28605 v.AddArg2(x, y)
28606 return true
28607 }
28608 return false
28609 }
28610 func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
28611 v_1 := v.Args[1]
28612 v_0 := v.Args[0]
28613 b := v.Block
28614
28615
28616
28617 for {
28618 t := v.Type
28619 x := v_0
28620 y := v_1
28621 if !(!shiftIsBounded(v)) {
28622 break
28623 }
28624 v.reset(OpAMD64ANDQ)
28625 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28626 v0.AddArg2(x, y)
28627 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28628 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28629 v2.AuxInt = int32ToAuxInt(64)
28630 v2.AddArg(y)
28631 v1.AddArg(v2)
28632 v.AddArg2(v0, v1)
28633 return true
28634 }
28635
28636
28637
28638 for {
28639 x := v_0
28640 y := v_1
28641 if !(shiftIsBounded(v)) {
28642 break
28643 }
28644 v.reset(OpAMD64SHRQ)
28645 v.AddArg2(x, y)
28646 return true
28647 }
28648 return false
28649 }
28650 func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
28651 v_1 := v.Args[1]
28652 v_0 := v.Args[0]
28653 b := v.Block
28654
28655
28656
28657 for {
28658 t := v.Type
28659 x := v_0
28660 y := v_1
28661 if !(!shiftIsBounded(v)) {
28662 break
28663 }
28664 v.reset(OpAMD64ANDQ)
28665 v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
28666 v0.AddArg2(x, y)
28667 v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
28668 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28669 v2.AuxInt = int8ToAuxInt(64)
28670 v2.AddArg(y)
28671 v1.AddArg(v2)
28672 v.AddArg2(v0, v1)
28673 return true
28674 }
28675
28676
28677
28678 for {
28679 x := v_0
28680 y := v_1
28681 if !(shiftIsBounded(v)) {
28682 break
28683 }
28684 v.reset(OpAMD64SHRQ)
28685 v.AddArg2(x, y)
28686 return true
28687 }
28688 return false
28689 }
28690 func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
28691 v_1 := v.Args[1]
28692 v_0 := v.Args[0]
28693 b := v.Block
28694
28695
28696
28697 for {
28698 t := v.Type
28699 x := v_0
28700 y := v_1
28701 if !(!shiftIsBounded(v)) {
28702 break
28703 }
28704 v.reset(OpAMD64SARQ)
28705 v.Type = t
28706 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28707 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28708 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28709 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28710 v3.AuxInt = int16ToAuxInt(64)
28711 v3.AddArg(y)
28712 v2.AddArg(v3)
28713 v1.AddArg(v2)
28714 v0.AddArg2(y, v1)
28715 v.AddArg2(x, v0)
28716 return true
28717 }
28718
28719
28720
28721 for {
28722 x := v_0
28723 y := v_1
28724 if !(shiftIsBounded(v)) {
28725 break
28726 }
28727 v.reset(OpAMD64SARQ)
28728 v.AddArg2(x, y)
28729 return true
28730 }
28731 return false
28732 }
28733 func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
28734 v_1 := v.Args[1]
28735 v_0 := v.Args[0]
28736 b := v.Block
28737
28738
28739
28740 for {
28741 t := v.Type
28742 x := v_0
28743 y := v_1
28744 if !(!shiftIsBounded(v)) {
28745 break
28746 }
28747 v.reset(OpAMD64SARQ)
28748 v.Type = t
28749 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28750 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28751 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28752 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28753 v3.AuxInt = int32ToAuxInt(64)
28754 v3.AddArg(y)
28755 v2.AddArg(v3)
28756 v1.AddArg(v2)
28757 v0.AddArg2(y, v1)
28758 v.AddArg2(x, v0)
28759 return true
28760 }
28761
28762
28763
28764 for {
28765 x := v_0
28766 y := v_1
28767 if !(shiftIsBounded(v)) {
28768 break
28769 }
28770 v.reset(OpAMD64SARQ)
28771 v.AddArg2(x, y)
28772 return true
28773 }
28774 return false
28775 }
28776 func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
28777 v_1 := v.Args[1]
28778 v_0 := v.Args[0]
28779 b := v.Block
28780
28781
28782
28783 for {
28784 t := v.Type
28785 x := v_0
28786 y := v_1
28787 if !(!shiftIsBounded(v)) {
28788 break
28789 }
28790 v.reset(OpAMD64SARQ)
28791 v.Type = t
28792 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
28793 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
28794 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
28795 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28796 v3.AuxInt = int32ToAuxInt(64)
28797 v3.AddArg(y)
28798 v2.AddArg(v3)
28799 v1.AddArg(v2)
28800 v0.AddArg2(y, v1)
28801 v.AddArg2(x, v0)
28802 return true
28803 }
28804
28805
28806
28807 for {
28808 x := v_0
28809 y := v_1
28810 if !(shiftIsBounded(v)) {
28811 break
28812 }
28813 v.reset(OpAMD64SARQ)
28814 v.AddArg2(x, y)
28815 return true
28816 }
28817 return false
28818 }
28819 func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
28820 v_1 := v.Args[1]
28821 v_0 := v.Args[0]
28822 b := v.Block
28823
28824
28825
28826 for {
28827 t := v.Type
28828 x := v_0
28829 y := v_1
28830 if !(!shiftIsBounded(v)) {
28831 break
28832 }
28833 v.reset(OpAMD64SARQ)
28834 v.Type = t
28835 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
28836 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
28837 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
28838 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
28839 v3.AuxInt = int8ToAuxInt(64)
28840 v3.AddArg(y)
28841 v2.AddArg(v3)
28842 v1.AddArg(v2)
28843 v0.AddArg2(y, v1)
28844 v.AddArg2(x, v0)
28845 return true
28846 }
28847
28848
28849
28850 for {
28851 x := v_0
28852 y := v_1
28853 if !(shiftIsBounded(v)) {
28854 break
28855 }
28856 v.reset(OpAMD64SARQ)
28857 v.AddArg2(x, y)
28858 return true
28859 }
28860 return false
28861 }
28862 func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
28863 v_1 := v.Args[1]
28864 v_0 := v.Args[0]
28865 b := v.Block
28866
28867
28868
28869 for {
28870 t := v.Type
28871 x := v_0
28872 y := v_1
28873 if !(!shiftIsBounded(v)) {
28874 break
28875 }
28876 v.reset(OpAMD64ANDL)
28877 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28878 v0.AddArg2(x, y)
28879 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28880 v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
28881 v2.AuxInt = int16ToAuxInt(8)
28882 v2.AddArg(y)
28883 v1.AddArg(v2)
28884 v.AddArg2(v0, v1)
28885 return true
28886 }
28887
28888
28889
28890 for {
28891 x := v_0
28892 y := v_1
28893 if !(shiftIsBounded(v)) {
28894 break
28895 }
28896 v.reset(OpAMD64SHRB)
28897 v.AddArg2(x, y)
28898 return true
28899 }
28900 return false
28901 }
28902 func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
28903 v_1 := v.Args[1]
28904 v_0 := v.Args[0]
28905 b := v.Block
28906
28907
28908
28909 for {
28910 t := v.Type
28911 x := v_0
28912 y := v_1
28913 if !(!shiftIsBounded(v)) {
28914 break
28915 }
28916 v.reset(OpAMD64ANDL)
28917 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28918 v0.AddArg2(x, y)
28919 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28920 v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
28921 v2.AuxInt = int32ToAuxInt(8)
28922 v2.AddArg(y)
28923 v1.AddArg(v2)
28924 v.AddArg2(v0, v1)
28925 return true
28926 }
28927
28928
28929
28930 for {
28931 x := v_0
28932 y := v_1
28933 if !(shiftIsBounded(v)) {
28934 break
28935 }
28936 v.reset(OpAMD64SHRB)
28937 v.AddArg2(x, y)
28938 return true
28939 }
28940 return false
28941 }
28942 func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
28943 v_1 := v.Args[1]
28944 v_0 := v.Args[0]
28945 b := v.Block
28946
28947
28948
28949 for {
28950 t := v.Type
28951 x := v_0
28952 y := v_1
28953 if !(!shiftIsBounded(v)) {
28954 break
28955 }
28956 v.reset(OpAMD64ANDL)
28957 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28958 v0.AddArg2(x, y)
28959 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
28960 v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
28961 v2.AuxInt = int32ToAuxInt(8)
28962 v2.AddArg(y)
28963 v1.AddArg(v2)
28964 v.AddArg2(v0, v1)
28965 return true
28966 }
28967
28968
28969
28970 for {
28971 x := v_0
28972 y := v_1
28973 if !(shiftIsBounded(v)) {
28974 break
28975 }
28976 v.reset(OpAMD64SHRB)
28977 v.AddArg2(x, y)
28978 return true
28979 }
28980 return false
28981 }
28982 func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
28983 v_1 := v.Args[1]
28984 v_0 := v.Args[0]
28985 b := v.Block
28986
28987
28988
28989 for {
28990 t := v.Type
28991 x := v_0
28992 y := v_1
28993 if !(!shiftIsBounded(v)) {
28994 break
28995 }
28996 v.reset(OpAMD64ANDL)
28997 v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
28998 v0.AddArg2(x, y)
28999 v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
29000 v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29001 v2.AuxInt = int8ToAuxInt(8)
29002 v2.AddArg(y)
29003 v1.AddArg(v2)
29004 v.AddArg2(v0, v1)
29005 return true
29006 }
29007
29008
29009
29010 for {
29011 x := v_0
29012 y := v_1
29013 if !(shiftIsBounded(v)) {
29014 break
29015 }
29016 v.reset(OpAMD64SHRB)
29017 v.AddArg2(x, y)
29018 return true
29019 }
29020 return false
29021 }
29022 func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
29023 v_1 := v.Args[1]
29024 v_0 := v.Args[0]
29025 b := v.Block
29026
29027
29028
29029 for {
29030 t := v.Type
29031 x := v_0
29032 y := v_1
29033 if !(!shiftIsBounded(v)) {
29034 break
29035 }
29036 v.reset(OpAMD64SARB)
29037 v.Type = t
29038 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29039 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29040 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29041 v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
29042 v3.AuxInt = int16ToAuxInt(8)
29043 v3.AddArg(y)
29044 v2.AddArg(v3)
29045 v1.AddArg(v2)
29046 v0.AddArg2(y, v1)
29047 v.AddArg2(x, v0)
29048 return true
29049 }
29050
29051
29052
29053 for {
29054 x := v_0
29055 y := v_1
29056 if !(shiftIsBounded(v)) {
29057 break
29058 }
29059 v.reset(OpAMD64SARB)
29060 v.AddArg2(x, y)
29061 return true
29062 }
29063 return false
29064 }
29065 func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
29066 v_1 := v.Args[1]
29067 v_0 := v.Args[0]
29068 b := v.Block
29069
29070
29071
29072 for {
29073 t := v.Type
29074 x := v_0
29075 y := v_1
29076 if !(!shiftIsBounded(v)) {
29077 break
29078 }
29079 v.reset(OpAMD64SARB)
29080 v.Type = t
29081 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29082 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29083 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29084 v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
29085 v3.AuxInt = int32ToAuxInt(8)
29086 v3.AddArg(y)
29087 v2.AddArg(v3)
29088 v1.AddArg(v2)
29089 v0.AddArg2(y, v1)
29090 v.AddArg2(x, v0)
29091 return true
29092 }
29093
29094
29095
29096 for {
29097 x := v_0
29098 y := v_1
29099 if !(shiftIsBounded(v)) {
29100 break
29101 }
29102 v.reset(OpAMD64SARB)
29103 v.AddArg2(x, y)
29104 return true
29105 }
29106 return false
29107 }
29108 func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
29109 v_1 := v.Args[1]
29110 v_0 := v.Args[0]
29111 b := v.Block
29112
29113
29114
29115 for {
29116 t := v.Type
29117 x := v_0
29118 y := v_1
29119 if !(!shiftIsBounded(v)) {
29120 break
29121 }
29122 v.reset(OpAMD64SARB)
29123 v.Type = t
29124 v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
29125 v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
29126 v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
29127 v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
29128 v3.AuxInt = int32ToAuxInt(8)
29129 v3.AddArg(y)
29130 v2.AddArg(v3)
29131 v1.AddArg(v2)
29132 v0.AddArg2(y, v1)
29133 v.AddArg2(x, v0)
29134 return true
29135 }
29136
29137
29138
29139 for {
29140 x := v_0
29141 y := v_1
29142 if !(shiftIsBounded(v)) {
29143 break
29144 }
29145 v.reset(OpAMD64SARB)
29146 v.AddArg2(x, y)
29147 return true
29148 }
29149 return false
29150 }
29151 func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
29152 v_1 := v.Args[1]
29153 v_0 := v.Args[0]
29154 b := v.Block
29155
29156
29157
29158 for {
29159 t := v.Type
29160 x := v_0
29161 y := v_1
29162 if !(!shiftIsBounded(v)) {
29163 break
29164 }
29165 v.reset(OpAMD64SARB)
29166 v.Type = t
29167 v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
29168 v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
29169 v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
29170 v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
29171 v3.AuxInt = int8ToAuxInt(8)
29172 v3.AddArg(y)
29173 v2.AddArg(v3)
29174 v1.AddArg(v2)
29175 v0.AddArg2(y, v1)
29176 v.AddArg2(x, v0)
29177 return true
29178 }
29179
29180
29181
29182 for {
29183 x := v_0
29184 y := v_1
29185 if !(shiftIsBounded(v)) {
29186 break
29187 }
29188 v.reset(OpAMD64SARB)
29189 v.AddArg2(x, y)
29190 return true
29191 }
29192 return false
29193 }
29194 func rewriteValueAMD64_OpSelect0(v *Value) bool {
29195 v_0 := v.Args[0]
29196 b := v.Block
29197 typ := &b.Func.Config.Types
29198
29199
29200 for {
29201 if v_0.Op != OpMul64uover {
29202 break
29203 }
29204 y := v_0.Args[1]
29205 x := v_0.Args[0]
29206 v.reset(OpSelect0)
29207 v.Type = typ.UInt64
29208 v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29209 v0.AddArg2(x, y)
29210 v.AddArg(v0)
29211 return true
29212 }
29213
29214
29215 for {
29216 if v_0.Op != OpMul32uover {
29217 break
29218 }
29219 y := v_0.Args[1]
29220 x := v_0.Args[0]
29221 v.reset(OpSelect0)
29222 v.Type = typ.UInt32
29223 v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29224 v0.AddArg2(x, y)
29225 v.AddArg(v0)
29226 return true
29227 }
29228
29229
29230 for {
29231 if v_0.Op != OpAdd64carry {
29232 break
29233 }
29234 c := v_0.Args[2]
29235 x := v_0.Args[0]
29236 y := v_0.Args[1]
29237 v.reset(OpSelect0)
29238 v.Type = typ.UInt64
29239 v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29240 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29241 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29242 v2.AddArg(c)
29243 v1.AddArg(v2)
29244 v0.AddArg3(x, y, v1)
29245 v.AddArg(v0)
29246 return true
29247 }
29248
29249
29250 for {
29251 if v_0.Op != OpSub64borrow {
29252 break
29253 }
29254 c := v_0.Args[2]
29255 x := v_0.Args[0]
29256 y := v_0.Args[1]
29257 v.reset(OpSelect0)
29258 v.Type = typ.UInt64
29259 v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29260 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29261 v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29262 v2.AddArg(c)
29263 v1.AddArg(v2)
29264 v0.AddArg3(x, y, v1)
29265 v.AddArg(v0)
29266 return true
29267 }
29268
29269
29270 for {
29271 t := v.Type
29272 if v_0.Op != OpAMD64AddTupleFirst32 {
29273 break
29274 }
29275 tuple := v_0.Args[1]
29276 val := v_0.Args[0]
29277 v.reset(OpAMD64ADDL)
29278 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29279 v0.AddArg(tuple)
29280 v.AddArg2(val, v0)
29281 return true
29282 }
29283
29284
29285 for {
29286 t := v.Type
29287 if v_0.Op != OpAMD64AddTupleFirst64 {
29288 break
29289 }
29290 tuple := v_0.Args[1]
29291 val := v_0.Args[0]
29292 v.reset(OpAMD64ADDQ)
29293 v0 := b.NewValue0(v.Pos, OpSelect0, t)
29294 v0.AddArg(tuple)
29295 v.AddArg2(val, v0)
29296 return true
29297 }
29298
29299
29300
29301 for {
29302 a := v_0
29303 if a.Op != OpAMD64ADDQconstflags {
29304 break
29305 }
29306 c := auxIntToInt32(a.AuxInt)
29307 x := a.Args[0]
29308 if !(a.Uses == 1) {
29309 break
29310 }
29311 v.reset(OpAMD64ADDQconst)
29312 v.AuxInt = int32ToAuxInt(c)
29313 v.AddArg(x)
29314 return true
29315 }
29316
29317
29318
29319 for {
29320 a := v_0
29321 if a.Op != OpAMD64ADDLconstflags {
29322 break
29323 }
29324 c := auxIntToInt32(a.AuxInt)
29325 x := a.Args[0]
29326 if !(a.Uses == 1) {
29327 break
29328 }
29329 v.reset(OpAMD64ADDLconst)
29330 v.AuxInt = int32ToAuxInt(c)
29331 v.AddArg(x)
29332 return true
29333 }
29334 return false
29335 }
29336 func rewriteValueAMD64_OpSelect1(v *Value) bool {
29337 v_0 := v.Args[0]
29338 b := v.Block
29339 typ := &b.Func.Config.Types
29340
29341
29342 for {
29343 if v_0.Op != OpMul64uover {
29344 break
29345 }
29346 y := v_0.Args[1]
29347 x := v_0.Args[0]
29348 v.reset(OpAMD64SETO)
29349 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29350 v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
29351 v1.AddArg2(x, y)
29352 v0.AddArg(v1)
29353 v.AddArg(v0)
29354 return true
29355 }
29356
29357
29358 for {
29359 if v_0.Op != OpMul32uover {
29360 break
29361 }
29362 y := v_0.Args[1]
29363 x := v_0.Args[0]
29364 v.reset(OpAMD64SETO)
29365 v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29366 v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
29367 v1.AddArg2(x, y)
29368 v0.AddArg(v1)
29369 v.AddArg(v0)
29370 return true
29371 }
29372
29373
29374 for {
29375 if v_0.Op != OpAdd64carry {
29376 break
29377 }
29378 c := v_0.Args[2]
29379 x := v_0.Args[0]
29380 y := v_0.Args[1]
29381 v.reset(OpAMD64NEGQ)
29382 v.Type = typ.UInt64
29383 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29384 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29385 v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29386 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29387 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29388 v4.AddArg(c)
29389 v3.AddArg(v4)
29390 v2.AddArg3(x, y, v3)
29391 v1.AddArg(v2)
29392 v0.AddArg(v1)
29393 v.AddArg(v0)
29394 return true
29395 }
29396
29397
29398 for {
29399 if v_0.Op != OpSub64borrow {
29400 break
29401 }
29402 c := v_0.Args[2]
29403 x := v_0.Args[0]
29404 y := v_0.Args[1]
29405 v.reset(OpAMD64NEGQ)
29406 v.Type = typ.UInt64
29407 v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
29408 v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29409 v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
29410 v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
29411 v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
29412 v4.AddArg(c)
29413 v3.AddArg(v4)
29414 v2.AddArg3(x, y, v3)
29415 v1.AddArg(v2)
29416 v0.AddArg(v1)
29417 v.AddArg(v0)
29418 return true
29419 }
29420
29421
29422 for {
29423 if v_0.Op != OpAMD64NEGLflags {
29424 break
29425 }
29426 v_0_0 := v_0.Args[0]
29427 if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
29428 break
29429 }
29430 v.reset(OpAMD64FlagEQ)
29431 return true
29432 }
29433
29434
29435 for {
29436 if v_0.Op != OpAMD64NEGLflags {
29437 break
29438 }
29439 v_0_0 := v_0.Args[0]
29440 if v_0_0.Op != OpAMD64NEGQ {
29441 break
29442 }
29443 v_0_0_0 := v_0_0.Args[0]
29444 if v_0_0_0.Op != OpAMD64SBBQcarrymask {
29445 break
29446 }
29447 x := v_0_0_0.Args[0]
29448 v.copyOf(x)
29449 return true
29450 }
29451
29452
29453 for {
29454 if v_0.Op != OpAMD64AddTupleFirst32 {
29455 break
29456 }
29457 tuple := v_0.Args[1]
29458 v.reset(OpSelect1)
29459 v.AddArg(tuple)
29460 return true
29461 }
29462
29463
29464 for {
29465 if v_0.Op != OpAMD64AddTupleFirst64 {
29466 break
29467 }
29468 tuple := v_0.Args[1]
29469 v.reset(OpSelect1)
29470 v.AddArg(tuple)
29471 return true
29472 }
29473
29474
29475
29476 for {
29477 a := v_0
29478 if a.Op != OpAMD64LoweredAtomicAnd64 {
29479 break
29480 }
29481 mem := a.Args[2]
29482 ptr := a.Args[0]
29483 val := a.Args[1]
29484 if !(a.Uses == 1 && clobber(a)) {
29485 break
29486 }
29487 v.reset(OpAMD64ANDQlock)
29488 v.AddArg3(ptr, val, mem)
29489 return true
29490 }
29491
29492
29493
29494 for {
29495 a := v_0
29496 if a.Op != OpAMD64LoweredAtomicAnd32 {
29497 break
29498 }
29499 mem := a.Args[2]
29500 ptr := a.Args[0]
29501 val := a.Args[1]
29502 if !(a.Uses == 1 && clobber(a)) {
29503 break
29504 }
29505 v.reset(OpAMD64ANDLlock)
29506 v.AddArg3(ptr, val, mem)
29507 return true
29508 }
29509
29510
29511
29512 for {
29513 a := v_0
29514 if a.Op != OpAMD64LoweredAtomicOr64 {
29515 break
29516 }
29517 mem := a.Args[2]
29518 ptr := a.Args[0]
29519 val := a.Args[1]
29520 if !(a.Uses == 1 && clobber(a)) {
29521 break
29522 }
29523 v.reset(OpAMD64ORQlock)
29524 v.AddArg3(ptr, val, mem)
29525 return true
29526 }
29527
29528
29529
29530 for {
29531 a := v_0
29532 if a.Op != OpAMD64LoweredAtomicOr32 {
29533 break
29534 }
29535 mem := a.Args[2]
29536 ptr := a.Args[0]
29537 val := a.Args[1]
29538 if !(a.Uses == 1 && clobber(a)) {
29539 break
29540 }
29541 v.reset(OpAMD64ORLlock)
29542 v.AddArg3(ptr, val, mem)
29543 return true
29544 }
29545 return false
29546 }
29547 func rewriteValueAMD64_OpSelectN(v *Value) bool {
29548 v_0 := v.Args[0]
29549 b := v.Block
29550 config := b.Func.Config
29551
29552
29553
29554 for {
29555 if auxIntToInt64(v.AuxInt) != 0 {
29556 break
29557 }
29558 call := v_0
29559 if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
29560 break
29561 }
29562 sym := auxToCall(call.Aux)
29563 s1 := call.Args[0]
29564 if s1.Op != OpAMD64MOVQstoreconst {
29565 break
29566 }
29567 sc := auxIntToValAndOff(s1.AuxInt)
29568 _ = s1.Args[1]
29569 s2 := s1.Args[1]
29570 if s2.Op != OpAMD64MOVQstore {
29571 break
29572 }
29573 _ = s2.Args[2]
29574 src := s2.Args[1]
29575 s3 := s2.Args[2]
29576 if s3.Op != OpAMD64MOVQstore {
29577 break
29578 }
29579 mem := s3.Args[2]
29580 dst := s3.Args[1]
29581 if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
29582 break
29583 }
29584 v.reset(OpMove)
29585 v.AuxInt = int64ToAuxInt(sc.Val64())
29586 v.AddArg3(dst, src, mem)
29587 return true
29588 }
29589
29590
29591
29592 for {
29593 if auxIntToInt64(v.AuxInt) != 0 {
29594 break
29595 }
29596 call := v_0
29597 if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
29598 break
29599 }
29600 sym := auxToCall(call.Aux)
29601 mem := call.Args[3]
29602 dst := call.Args[0]
29603 src := call.Args[1]
29604 call_2 := call.Args[2]
29605 if call_2.Op != OpAMD64MOVQconst {
29606 break
29607 }
29608 sz := auxIntToInt64(call_2.AuxInt)
29609 if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
29610 break
29611 }
29612 v.reset(OpMove)
29613 v.AuxInt = int64ToAuxInt(sz)
29614 v.AddArg3(dst, src, mem)
29615 return true
29616 }
29617 return false
29618 }
29619 func rewriteValueAMD64_OpSlicemask(v *Value) bool {
29620 v_0 := v.Args[0]
29621 b := v.Block
29622
29623
29624 for {
29625 t := v.Type
29626 x := v_0
29627 v.reset(OpAMD64SARQconst)
29628 v.AuxInt = int8ToAuxInt(63)
29629 v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
29630 v0.AddArg(x)
29631 v.AddArg(v0)
29632 return true
29633 }
29634 }
29635 func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
29636 v_1 := v.Args[1]
29637 v_0 := v.Args[0]
29638 b := v.Block
29639 typ := &b.Func.Config.Types
29640
29641
29642 for {
29643 x := v_0
29644 y := v_1
29645 v.reset(OpAMD64CMOVQCC)
29646 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29647 v0.AuxInt = int64ToAuxInt(0)
29648 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29649 v1.AddArg2(x, y)
29650 v.AddArg3(x, v0, v1)
29651 return true
29652 }
29653 }
29654 func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
29655 v_1 := v.Args[1]
29656 v_0 := v.Args[0]
29657 b := v.Block
29658 typ := &b.Func.Config.Types
29659
29660
29661 for {
29662 x := v_0
29663 y := v_1
29664 v.reset(OpAMD64CMOVQHI)
29665 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
29666 v0.AuxInt = int64ToAuxInt(0)
29667 v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
29668 v1.AddArg2(x, y)
29669 v.AddArg3(x, v0, v1)
29670 return true
29671 }
29672 }
29673 func rewriteValueAMD64_OpStore(v *Value) bool {
29674 v_2 := v.Args[2]
29675 v_1 := v.Args[1]
29676 v_0 := v.Args[0]
29677
29678
29679
29680 for {
29681 t := auxToType(v.Aux)
29682 ptr := v_0
29683 val := v_1
29684 mem := v_2
29685 if !(t.Size() == 8 && t.IsFloat()) {
29686 break
29687 }
29688 v.reset(OpAMD64MOVSDstore)
29689 v.AddArg3(ptr, val, mem)
29690 return true
29691 }
29692
29693
29694
29695 for {
29696 t := auxToType(v.Aux)
29697 ptr := v_0
29698 val := v_1
29699 mem := v_2
29700 if !(t.Size() == 4 && t.IsFloat()) {
29701 break
29702 }
29703 v.reset(OpAMD64MOVSSstore)
29704 v.AddArg3(ptr, val, mem)
29705 return true
29706 }
29707
29708
29709
29710 for {
29711 t := auxToType(v.Aux)
29712 ptr := v_0
29713 val := v_1
29714 mem := v_2
29715 if !(t.Size() == 8 && !t.IsFloat()) {
29716 break
29717 }
29718 v.reset(OpAMD64MOVQstore)
29719 v.AddArg3(ptr, val, mem)
29720 return true
29721 }
29722
29723
29724
29725 for {
29726 t := auxToType(v.Aux)
29727 ptr := v_0
29728 val := v_1
29729 mem := v_2
29730 if !(t.Size() == 4 && !t.IsFloat()) {
29731 break
29732 }
29733 v.reset(OpAMD64MOVLstore)
29734 v.AddArg3(ptr, val, mem)
29735 return true
29736 }
29737
29738
29739
29740 for {
29741 t := auxToType(v.Aux)
29742 ptr := v_0
29743 val := v_1
29744 mem := v_2
29745 if !(t.Size() == 2) {
29746 break
29747 }
29748 v.reset(OpAMD64MOVWstore)
29749 v.AddArg3(ptr, val, mem)
29750 return true
29751 }
29752
29753
29754
29755 for {
29756 t := auxToType(v.Aux)
29757 ptr := v_0
29758 val := v_1
29759 mem := v_2
29760 if !(t.Size() == 1) {
29761 break
29762 }
29763 v.reset(OpAMD64MOVBstore)
29764 v.AddArg3(ptr, val, mem)
29765 return true
29766 }
29767 return false
29768 }
29769 func rewriteValueAMD64_OpTrunc(v *Value) bool {
29770 v_0 := v.Args[0]
29771
29772
29773 for {
29774 x := v_0
29775 v.reset(OpAMD64ROUNDSD)
29776 v.AuxInt = int8ToAuxInt(3)
29777 v.AddArg(x)
29778 return true
29779 }
29780 }
29781 func rewriteValueAMD64_OpZero(v *Value) bool {
29782 v_1 := v.Args[1]
29783 v_0 := v.Args[0]
29784 b := v.Block
29785 typ := &b.Func.Config.Types
29786
29787
29788 for {
29789 if auxIntToInt64(v.AuxInt) != 0 {
29790 break
29791 }
29792 mem := v_1
29793 v.copyOf(mem)
29794 return true
29795 }
29796
29797
29798 for {
29799 if auxIntToInt64(v.AuxInt) != 1 {
29800 break
29801 }
29802 destptr := v_0
29803 mem := v_1
29804 v.reset(OpAMD64MOVBstoreconst)
29805 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29806 v.AddArg2(destptr, mem)
29807 return true
29808 }
29809
29810
29811 for {
29812 if auxIntToInt64(v.AuxInt) != 2 {
29813 break
29814 }
29815 destptr := v_0
29816 mem := v_1
29817 v.reset(OpAMD64MOVWstoreconst)
29818 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29819 v.AddArg2(destptr, mem)
29820 return true
29821 }
29822
29823
29824 for {
29825 if auxIntToInt64(v.AuxInt) != 4 {
29826 break
29827 }
29828 destptr := v_0
29829 mem := v_1
29830 v.reset(OpAMD64MOVLstoreconst)
29831 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29832 v.AddArg2(destptr, mem)
29833 return true
29834 }
29835
29836
29837 for {
29838 if auxIntToInt64(v.AuxInt) != 8 {
29839 break
29840 }
29841 destptr := v_0
29842 mem := v_1
29843 v.reset(OpAMD64MOVQstoreconst)
29844 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29845 v.AddArg2(destptr, mem)
29846 return true
29847 }
29848
29849
29850 for {
29851 if auxIntToInt64(v.AuxInt) != 3 {
29852 break
29853 }
29854 destptr := v_0
29855 mem := v_1
29856 v.reset(OpAMD64MOVBstoreconst)
29857 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
29858 v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
29859 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29860 v0.AddArg2(destptr, mem)
29861 v.AddArg2(destptr, v0)
29862 return true
29863 }
29864
29865
29866 for {
29867 if auxIntToInt64(v.AuxInt) != 5 {
29868 break
29869 }
29870 destptr := v_0
29871 mem := v_1
29872 v.reset(OpAMD64MOVBstoreconst)
29873 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29874 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29875 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29876 v0.AddArg2(destptr, mem)
29877 v.AddArg2(destptr, v0)
29878 return true
29879 }
29880
29881
29882 for {
29883 if auxIntToInt64(v.AuxInt) != 6 {
29884 break
29885 }
29886 destptr := v_0
29887 mem := v_1
29888 v.reset(OpAMD64MOVWstoreconst)
29889 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
29890 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29891 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29892 v0.AddArg2(destptr, mem)
29893 v.AddArg2(destptr, v0)
29894 return true
29895 }
29896
29897
29898 for {
29899 if auxIntToInt64(v.AuxInt) != 7 {
29900 break
29901 }
29902 destptr := v_0
29903 mem := v_1
29904 v.reset(OpAMD64MOVLstoreconst)
29905 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
29906 v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
29907 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29908 v0.AddArg2(destptr, mem)
29909 v.AddArg2(destptr, v0)
29910 return true
29911 }
29912
29913
29914 for {
29915 if auxIntToInt64(v.AuxInt) != 9 {
29916 break
29917 }
29918 destptr := v_0
29919 mem := v_1
29920 v.reset(OpAMD64MOVBstoreconst)
29921 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29922 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29923 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29924 v0.AddArg2(destptr, mem)
29925 v.AddArg2(destptr, v0)
29926 return true
29927 }
29928
29929
29930 for {
29931 if auxIntToInt64(v.AuxInt) != 10 {
29932 break
29933 }
29934 destptr := v_0
29935 mem := v_1
29936 v.reset(OpAMD64MOVWstoreconst)
29937 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29938 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29939 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29940 v0.AddArg2(destptr, mem)
29941 v.AddArg2(destptr, v0)
29942 return true
29943 }
29944
29945
29946 for {
29947 if auxIntToInt64(v.AuxInt) != 11 {
29948 break
29949 }
29950 destptr := v_0
29951 mem := v_1
29952 v.reset(OpAMD64MOVLstoreconst)
29953 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 7))
29954 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29955 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29956 v0.AddArg2(destptr, mem)
29957 v.AddArg2(destptr, v0)
29958 return true
29959 }
29960
29961
29962 for {
29963 if auxIntToInt64(v.AuxInt) != 12 {
29964 break
29965 }
29966 destptr := v_0
29967 mem := v_1
29968 v.reset(OpAMD64MOVLstoreconst)
29969 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
29970 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29971 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29972 v0.AddArg2(destptr, mem)
29973 v.AddArg2(destptr, v0)
29974 return true
29975 }
29976
29977
29978
29979 for {
29980 s := auxIntToInt64(v.AuxInt)
29981 destptr := v_0
29982 mem := v_1
29983 if !(s > 12 && s < 16) {
29984 break
29985 }
29986 v.reset(OpAMD64MOVQstoreconst)
29987 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
29988 v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
29989 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
29990 v0.AddArg2(destptr, mem)
29991 v.AddArg2(destptr, v0)
29992 return true
29993 }
29994
29995
29996
29997 for {
29998 s := auxIntToInt64(v.AuxInt)
29999 destptr := v_0
30000 mem := v_1
30001 if !(s%16 != 0 && s > 16) {
30002 break
30003 }
30004 v.reset(OpZero)
30005 v.AuxInt = int64ToAuxInt(s - s%16)
30006 v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
30007 v0.AuxInt = int64ToAuxInt(s % 16)
30008 v0.AddArg(destptr)
30009 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30010 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30011 v1.AddArg2(destptr, mem)
30012 v.AddArg2(v0, v1)
30013 return true
30014 }
30015
30016
30017 for {
30018 if auxIntToInt64(v.AuxInt) != 16 {
30019 break
30020 }
30021 destptr := v_0
30022 mem := v_1
30023 v.reset(OpAMD64MOVOstoreconst)
30024 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30025 v.AddArg2(destptr, mem)
30026 return true
30027 }
30028
30029
30030 for {
30031 if auxIntToInt64(v.AuxInt) != 32 {
30032 break
30033 }
30034 destptr := v_0
30035 mem := v_1
30036 v.reset(OpAMD64MOVOstoreconst)
30037 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30038 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30039 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30040 v0.AddArg2(destptr, mem)
30041 v.AddArg2(destptr, v0)
30042 return true
30043 }
30044
30045
30046 for {
30047 if auxIntToInt64(v.AuxInt) != 48 {
30048 break
30049 }
30050 destptr := v_0
30051 mem := v_1
30052 v.reset(OpAMD64MOVOstoreconst)
30053 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30054 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30055 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30056 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30057 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30058 v1.AddArg2(destptr, mem)
30059 v0.AddArg2(destptr, v1)
30060 v.AddArg2(destptr, v0)
30061 return true
30062 }
30063
30064
30065 for {
30066 if auxIntToInt64(v.AuxInt) != 64 {
30067 break
30068 }
30069 destptr := v_0
30070 mem := v_1
30071 v.reset(OpAMD64MOVOstoreconst)
30072 v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
30073 v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30074 v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
30075 v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30076 v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
30077 v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
30078 v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
30079 v2.AddArg2(destptr, mem)
30080 v1.AddArg2(destptr, v2)
30081 v0.AddArg2(destptr, v1)
30082 v.AddArg2(destptr, v0)
30083 return true
30084 }
30085
30086
30087
30088 for {
30089 s := auxIntToInt64(v.AuxInt)
30090 destptr := v_0
30091 mem := v_1
30092 if !(s > 64 && s <= 1024 && s%16 == 0) {
30093 break
30094 }
30095 v.reset(OpAMD64DUFFZERO)
30096 v.AuxInt = int64ToAuxInt(s)
30097 v.AddArg2(destptr, mem)
30098 return true
30099 }
30100
30101
30102
30103 for {
30104 s := auxIntToInt64(v.AuxInt)
30105 destptr := v_0
30106 mem := v_1
30107 if !(s > 1024 && s%8 == 0) {
30108 break
30109 }
30110 v.reset(OpAMD64REPSTOSQ)
30111 v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30112 v0.AuxInt = int64ToAuxInt(s / 8)
30113 v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
30114 v1.AuxInt = int64ToAuxInt(0)
30115 v.AddArg4(destptr, v0, v1, mem)
30116 return true
30117 }
30118 return false
30119 }
30120 func rewriteBlockAMD64(b *Block) bool {
30121 typ := &b.Func.Config.Types
30122 switch b.Kind {
30123 case BlockAMD64EQ:
30124
30125
30126 for b.Controls[0].Op == OpAMD64TESTL {
30127 v_0 := b.Controls[0]
30128 _ = v_0.Args[1]
30129 v_0_0 := v_0.Args[0]
30130 v_0_1 := v_0.Args[1]
30131 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30132 if v_0_0.Op != OpAMD64SHLL {
30133 continue
30134 }
30135 x := v_0_0.Args[1]
30136 v_0_0_0 := v_0_0.Args[0]
30137 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
30138 continue
30139 }
30140 y := v_0_1
30141 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
30142 v0.AddArg2(x, y)
30143 b.resetWithControl(BlockAMD64UGE, v0)
30144 return true
30145 }
30146 break
30147 }
30148
30149
30150 for b.Controls[0].Op == OpAMD64TESTQ {
30151 v_0 := b.Controls[0]
30152 _ = v_0.Args[1]
30153 v_0_0 := v_0.Args[0]
30154 v_0_1 := v_0.Args[1]
30155 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30156 if v_0_0.Op != OpAMD64SHLQ {
30157 continue
30158 }
30159 x := v_0_0.Args[1]
30160 v_0_0_0 := v_0_0.Args[0]
30161 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
30162 continue
30163 }
30164 y := v_0_1
30165 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
30166 v0.AddArg2(x, y)
30167 b.resetWithControl(BlockAMD64UGE, v0)
30168 return true
30169 }
30170 break
30171 }
30172
30173
30174
30175 for b.Controls[0].Op == OpAMD64TESTLconst {
30176 v_0 := b.Controls[0]
30177 c := auxIntToInt32(v_0.AuxInt)
30178 x := v_0.Args[0]
30179 if !(isUint32PowerOfTwo(int64(c))) {
30180 break
30181 }
30182 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30183 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30184 v0.AddArg(x)
30185 b.resetWithControl(BlockAMD64UGE, v0)
30186 return true
30187 }
30188
30189
30190
30191 for b.Controls[0].Op == OpAMD64TESTQconst {
30192 v_0 := b.Controls[0]
30193 c := auxIntToInt32(v_0.AuxInt)
30194 x := v_0.Args[0]
30195 if !(isUint64PowerOfTwo(int64(c))) {
30196 break
30197 }
30198 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30199 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
30200 v0.AddArg(x)
30201 b.resetWithControl(BlockAMD64UGE, v0)
30202 return true
30203 }
30204
30205
30206
30207 for b.Controls[0].Op == OpAMD64TESTQ {
30208 v_0 := b.Controls[0]
30209 _ = v_0.Args[1]
30210 v_0_0 := v_0.Args[0]
30211 v_0_1 := v_0.Args[1]
30212 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30213 if v_0_0.Op != OpAMD64MOVQconst {
30214 continue
30215 }
30216 c := auxIntToInt64(v_0_0.AuxInt)
30217 x := v_0_1
30218 if !(isUint64PowerOfTwo(c)) {
30219 continue
30220 }
30221 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30222 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
30223 v0.AddArg(x)
30224 b.resetWithControl(BlockAMD64UGE, v0)
30225 return true
30226 }
30227 break
30228 }
30229
30230
30231
30232 for b.Controls[0].Op == OpAMD64TESTQ {
30233 v_0 := b.Controls[0]
30234 _ = v_0.Args[1]
30235 v_0_0 := v_0.Args[0]
30236 v_0_1 := v_0.Args[1]
30237 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30238 z1 := v_0_0
30239 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
30240 continue
30241 }
30242 z1_0 := z1.Args[0]
30243 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30244 continue
30245 }
30246 x := z1_0.Args[0]
30247 z2 := v_0_1
30248 if !(z1 == z2) {
30249 continue
30250 }
30251 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30252 v0.AuxInt = int8ToAuxInt(63)
30253 v0.AddArg(x)
30254 b.resetWithControl(BlockAMD64UGE, v0)
30255 return true
30256 }
30257 break
30258 }
30259
30260
30261
30262 for b.Controls[0].Op == OpAMD64TESTL {
30263 v_0 := b.Controls[0]
30264 _ = v_0.Args[1]
30265 v_0_0 := v_0.Args[0]
30266 v_0_1 := v_0.Args[1]
30267 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30268 z1 := v_0_0
30269 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
30270 continue
30271 }
30272 z1_0 := z1.Args[0]
30273 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30274 continue
30275 }
30276 x := z1_0.Args[0]
30277 z2 := v_0_1
30278 if !(z1 == z2) {
30279 continue
30280 }
30281 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30282 v0.AuxInt = int8ToAuxInt(31)
30283 v0.AddArg(x)
30284 b.resetWithControl(BlockAMD64UGE, v0)
30285 return true
30286 }
30287 break
30288 }
30289
30290
30291
30292 for b.Controls[0].Op == OpAMD64TESTQ {
30293 v_0 := b.Controls[0]
30294 _ = v_0.Args[1]
30295 v_0_0 := v_0.Args[0]
30296 v_0_1 := v_0.Args[1]
30297 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30298 z1 := v_0_0
30299 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30300 continue
30301 }
30302 z1_0 := z1.Args[0]
30303 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
30304 continue
30305 }
30306 x := z1_0.Args[0]
30307 z2 := v_0_1
30308 if !(z1 == z2) {
30309 continue
30310 }
30311 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30312 v0.AuxInt = int8ToAuxInt(0)
30313 v0.AddArg(x)
30314 b.resetWithControl(BlockAMD64UGE, v0)
30315 return true
30316 }
30317 break
30318 }
30319
30320
30321
30322 for b.Controls[0].Op == OpAMD64TESTL {
30323 v_0 := b.Controls[0]
30324 _ = v_0.Args[1]
30325 v_0_0 := v_0.Args[0]
30326 v_0_1 := v_0.Args[1]
30327 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30328 z1 := v_0_0
30329 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30330 continue
30331 }
30332 z1_0 := z1.Args[0]
30333 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
30334 continue
30335 }
30336 x := z1_0.Args[0]
30337 z2 := v_0_1
30338 if !(z1 == z2) {
30339 continue
30340 }
30341 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30342 v0.AuxInt = int8ToAuxInt(0)
30343 v0.AddArg(x)
30344 b.resetWithControl(BlockAMD64UGE, v0)
30345 return true
30346 }
30347 break
30348 }
30349
30350
30351
30352 for b.Controls[0].Op == OpAMD64TESTQ {
30353 v_0 := b.Controls[0]
30354 _ = v_0.Args[1]
30355 v_0_0 := v_0.Args[0]
30356 v_0_1 := v_0.Args[1]
30357 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30358 z1 := v_0_0
30359 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
30360 continue
30361 }
30362 x := z1.Args[0]
30363 z2 := v_0_1
30364 if !(z1 == z2) {
30365 continue
30366 }
30367 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
30368 v0.AuxInt = int8ToAuxInt(63)
30369 v0.AddArg(x)
30370 b.resetWithControl(BlockAMD64UGE, v0)
30371 return true
30372 }
30373 break
30374 }
30375
30376
30377
30378 for b.Controls[0].Op == OpAMD64TESTL {
30379 v_0 := b.Controls[0]
30380 _ = v_0.Args[1]
30381 v_0_0 := v_0.Args[0]
30382 v_0_1 := v_0.Args[1]
30383 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30384 z1 := v_0_0
30385 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
30386 continue
30387 }
30388 x := z1.Args[0]
30389 z2 := v_0_1
30390 if !(z1 == z2) {
30391 continue
30392 }
30393 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
30394 v0.AuxInt = int8ToAuxInt(31)
30395 v0.AddArg(x)
30396 b.resetWithControl(BlockAMD64UGE, v0)
30397 return true
30398 }
30399 break
30400 }
30401
30402
30403 for b.Controls[0].Op == OpAMD64InvertFlags {
30404 v_0 := b.Controls[0]
30405 cmp := v_0.Args[0]
30406 b.resetWithControl(BlockAMD64EQ, cmp)
30407 return true
30408 }
30409
30410
30411 for b.Controls[0].Op == OpAMD64FlagEQ {
30412 b.Reset(BlockFirst)
30413 return true
30414 }
30415
30416
30417 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30418 b.Reset(BlockFirst)
30419 b.swapSuccessors()
30420 return true
30421 }
30422
30423
30424 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30425 b.Reset(BlockFirst)
30426 b.swapSuccessors()
30427 return true
30428 }
30429
30430
30431 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30432 b.Reset(BlockFirst)
30433 b.swapSuccessors()
30434 return true
30435 }
30436
30437
30438 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30439 b.Reset(BlockFirst)
30440 b.swapSuccessors()
30441 return true
30442 }
30443
30444
30445 for b.Controls[0].Op == OpAMD64TESTQ {
30446 v_0 := b.Controls[0]
30447 _ = v_0.Args[1]
30448 v_0_0 := v_0.Args[0]
30449 v_0_1 := v_0.Args[1]
30450 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30451 s := v_0_0
30452 if s.Op != OpSelect0 {
30453 continue
30454 }
30455 blsr := s.Args[0]
30456 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
30457 continue
30458 }
30459 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30460 v0.AddArg(blsr)
30461 b.resetWithControl(BlockAMD64EQ, v0)
30462 return true
30463 }
30464 break
30465 }
30466
30467
30468 for b.Controls[0].Op == OpAMD64TESTL {
30469 v_0 := b.Controls[0]
30470 _ = v_0.Args[1]
30471 v_0_0 := v_0.Args[0]
30472 v_0_1 := v_0.Args[1]
30473 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
30474 s := v_0_0
30475 if s.Op != OpSelect0 {
30476 continue
30477 }
30478 blsr := s.Args[0]
30479 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
30480 continue
30481 }
30482 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
30483 v0.AddArg(blsr)
30484 b.resetWithControl(BlockAMD64EQ, v0)
30485 return true
30486 }
30487 break
30488 }
30489
30490
30491
30492 for b.Controls[0].Op == OpAMD64TESTQ {
30493 t := b.Controls[0]
30494 _ = t.Args[1]
30495 t_0 := t.Args[0]
30496 t_1 := t.Args[1]
30497 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
30498 a := t_0
30499 if a.Op != OpAMD64ADDQconst {
30500 continue
30501 }
30502 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
30503 continue
30504 }
30505 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
30506 v0.AddArg(a.Args[0])
30507 b.resetWithControl(BlockAMD64EQ, v0)
30508 return true
30509 }
30510 break
30511 }
30512
30513
30514
30515 for b.Controls[0].Op == OpAMD64TESTL {
30516 t := b.Controls[0]
30517 _ = t.Args[1]
30518 t_0 := t.Args[0]
30519 t_1 := t.Args[1]
30520 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
30521 a := t_0
30522 if a.Op != OpAMD64ADDLconst {
30523 continue
30524 }
30525 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
30526 continue
30527 }
30528 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
30529 v0.AddArg(a.Args[0])
30530 b.resetWithControl(BlockAMD64EQ, v0)
30531 return true
30532 }
30533 break
30534 }
30535 case BlockAMD64GE:
30536
30537
30538
30539 for b.Controls[0].Op == OpAMD64CMPQconst {
30540 c := b.Controls[0]
30541 if auxIntToInt32(c.AuxInt) != 128 {
30542 break
30543 }
30544 z := c.Args[0]
30545 if !(c.Uses == 1) {
30546 break
30547 }
30548 v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
30549 v0.AuxInt = int32ToAuxInt(127)
30550 v0.AddArg(z)
30551 b.resetWithControl(BlockAMD64GT, v0)
30552 return true
30553 }
30554
30555
30556
30557 for b.Controls[0].Op == OpAMD64CMPLconst {
30558 c := b.Controls[0]
30559 if auxIntToInt32(c.AuxInt) != 128 {
30560 break
30561 }
30562 z := c.Args[0]
30563 if !(c.Uses == 1) {
30564 break
30565 }
30566 v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
30567 v0.AuxInt = int32ToAuxInt(127)
30568 v0.AddArg(z)
30569 b.resetWithControl(BlockAMD64GT, v0)
30570 return true
30571 }
30572
30573
30574 for b.Controls[0].Op == OpAMD64InvertFlags {
30575 v_0 := b.Controls[0]
30576 cmp := v_0.Args[0]
30577 b.resetWithControl(BlockAMD64LE, cmp)
30578 return true
30579 }
30580
30581
30582 for b.Controls[0].Op == OpAMD64FlagEQ {
30583 b.Reset(BlockFirst)
30584 return true
30585 }
30586
30587
30588 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30589 b.Reset(BlockFirst)
30590 b.swapSuccessors()
30591 return true
30592 }
30593
30594
30595 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30596 b.Reset(BlockFirst)
30597 b.swapSuccessors()
30598 return true
30599 }
30600
30601
30602 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30603 b.Reset(BlockFirst)
30604 return true
30605 }
30606
30607
30608 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30609 b.Reset(BlockFirst)
30610 return true
30611 }
30612 case BlockAMD64GT:
30613
30614
30615 for b.Controls[0].Op == OpAMD64InvertFlags {
30616 v_0 := b.Controls[0]
30617 cmp := v_0.Args[0]
30618 b.resetWithControl(BlockAMD64LT, cmp)
30619 return true
30620 }
30621
30622
30623 for b.Controls[0].Op == OpAMD64FlagEQ {
30624 b.Reset(BlockFirst)
30625 b.swapSuccessors()
30626 return true
30627 }
30628
30629
30630 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30631 b.Reset(BlockFirst)
30632 b.swapSuccessors()
30633 return true
30634 }
30635
30636
30637 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30638 b.Reset(BlockFirst)
30639 b.swapSuccessors()
30640 return true
30641 }
30642
30643
30644 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30645 b.Reset(BlockFirst)
30646 return true
30647 }
30648
30649
30650 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30651 b.Reset(BlockFirst)
30652 return true
30653 }
30654 case BlockIf:
30655
30656
30657 for b.Controls[0].Op == OpAMD64SETL {
30658 v_0 := b.Controls[0]
30659 cmp := v_0.Args[0]
30660 b.resetWithControl(BlockAMD64LT, cmp)
30661 return true
30662 }
30663
30664
30665 for b.Controls[0].Op == OpAMD64SETLE {
30666 v_0 := b.Controls[0]
30667 cmp := v_0.Args[0]
30668 b.resetWithControl(BlockAMD64LE, cmp)
30669 return true
30670 }
30671
30672
30673 for b.Controls[0].Op == OpAMD64SETG {
30674 v_0 := b.Controls[0]
30675 cmp := v_0.Args[0]
30676 b.resetWithControl(BlockAMD64GT, cmp)
30677 return true
30678 }
30679
30680
30681 for b.Controls[0].Op == OpAMD64SETGE {
30682 v_0 := b.Controls[0]
30683 cmp := v_0.Args[0]
30684 b.resetWithControl(BlockAMD64GE, cmp)
30685 return true
30686 }
30687
30688
30689 for b.Controls[0].Op == OpAMD64SETEQ {
30690 v_0 := b.Controls[0]
30691 cmp := v_0.Args[0]
30692 b.resetWithControl(BlockAMD64EQ, cmp)
30693 return true
30694 }
30695
30696
30697 for b.Controls[0].Op == OpAMD64SETNE {
30698 v_0 := b.Controls[0]
30699 cmp := v_0.Args[0]
30700 b.resetWithControl(BlockAMD64NE, cmp)
30701 return true
30702 }
30703
30704
30705 for b.Controls[0].Op == OpAMD64SETB {
30706 v_0 := b.Controls[0]
30707 cmp := v_0.Args[0]
30708 b.resetWithControl(BlockAMD64ULT, cmp)
30709 return true
30710 }
30711
30712
30713 for b.Controls[0].Op == OpAMD64SETBE {
30714 v_0 := b.Controls[0]
30715 cmp := v_0.Args[0]
30716 b.resetWithControl(BlockAMD64ULE, cmp)
30717 return true
30718 }
30719
30720
30721 for b.Controls[0].Op == OpAMD64SETA {
30722 v_0 := b.Controls[0]
30723 cmp := v_0.Args[0]
30724 b.resetWithControl(BlockAMD64UGT, cmp)
30725 return true
30726 }
30727
30728
30729 for b.Controls[0].Op == OpAMD64SETAE {
30730 v_0 := b.Controls[0]
30731 cmp := v_0.Args[0]
30732 b.resetWithControl(BlockAMD64UGE, cmp)
30733 return true
30734 }
30735
30736
30737 for b.Controls[0].Op == OpAMD64SETO {
30738 v_0 := b.Controls[0]
30739 cmp := v_0.Args[0]
30740 b.resetWithControl(BlockAMD64OS, cmp)
30741 return true
30742 }
30743
30744
30745 for b.Controls[0].Op == OpAMD64SETGF {
30746 v_0 := b.Controls[0]
30747 cmp := v_0.Args[0]
30748 b.resetWithControl(BlockAMD64UGT, cmp)
30749 return true
30750 }
30751
30752
30753 for b.Controls[0].Op == OpAMD64SETGEF {
30754 v_0 := b.Controls[0]
30755 cmp := v_0.Args[0]
30756 b.resetWithControl(BlockAMD64UGE, cmp)
30757 return true
30758 }
30759
30760
30761 for b.Controls[0].Op == OpAMD64SETEQF {
30762 v_0 := b.Controls[0]
30763 cmp := v_0.Args[0]
30764 b.resetWithControl(BlockAMD64EQF, cmp)
30765 return true
30766 }
30767
30768
30769 for b.Controls[0].Op == OpAMD64SETNEF {
30770 v_0 := b.Controls[0]
30771 cmp := v_0.Args[0]
30772 b.resetWithControl(BlockAMD64NEF, cmp)
30773 return true
30774 }
30775
30776
30777 for {
30778 cond := b.Controls[0]
30779 v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
30780 v0.AddArg2(cond, cond)
30781 b.resetWithControl(BlockAMD64NE, v0)
30782 return true
30783 }
30784 case BlockJumpTable:
30785
30786
30787 for {
30788 idx := b.Controls[0]
30789 v0 := b.NewValue0(b.Pos, OpAMD64LEAQ, typ.Uintptr)
30790 v0.Aux = symToAux(makeJumpTableSym(b))
30791 v1 := b.NewValue0(b.Pos, OpSB, typ.Uintptr)
30792 v0.AddArg(v1)
30793 b.resetWithControl2(BlockAMD64JUMPTABLE, idx, v0)
30794 b.Aux = symToAux(makeJumpTableSym(b))
30795 return true
30796 }
30797 case BlockAMD64LE:
30798
30799
30800 for b.Controls[0].Op == OpAMD64InvertFlags {
30801 v_0 := b.Controls[0]
30802 cmp := v_0.Args[0]
30803 b.resetWithControl(BlockAMD64GE, cmp)
30804 return true
30805 }
30806
30807
30808 for b.Controls[0].Op == OpAMD64FlagEQ {
30809 b.Reset(BlockFirst)
30810 return true
30811 }
30812
30813
30814 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30815 b.Reset(BlockFirst)
30816 return true
30817 }
30818
30819
30820 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30821 b.Reset(BlockFirst)
30822 return true
30823 }
30824
30825
30826 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30827 b.Reset(BlockFirst)
30828 b.swapSuccessors()
30829 return true
30830 }
30831
30832
30833 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30834 b.Reset(BlockFirst)
30835 b.swapSuccessors()
30836 return true
30837 }
30838 case BlockAMD64LT:
30839
30840
30841
30842 for b.Controls[0].Op == OpAMD64CMPQconst {
30843 c := b.Controls[0]
30844 if auxIntToInt32(c.AuxInt) != 128 {
30845 break
30846 }
30847 z := c.Args[0]
30848 if !(c.Uses == 1) {
30849 break
30850 }
30851 v0 := b.NewValue0(c.Pos, OpAMD64CMPQconst, types.TypeFlags)
30852 v0.AuxInt = int32ToAuxInt(127)
30853 v0.AddArg(z)
30854 b.resetWithControl(BlockAMD64LE, v0)
30855 return true
30856 }
30857
30858
30859
30860 for b.Controls[0].Op == OpAMD64CMPLconst {
30861 c := b.Controls[0]
30862 if auxIntToInt32(c.AuxInt) != 128 {
30863 break
30864 }
30865 z := c.Args[0]
30866 if !(c.Uses == 1) {
30867 break
30868 }
30869 v0 := b.NewValue0(c.Pos, OpAMD64CMPLconst, types.TypeFlags)
30870 v0.AuxInt = int32ToAuxInt(127)
30871 v0.AddArg(z)
30872 b.resetWithControl(BlockAMD64LE, v0)
30873 return true
30874 }
30875
30876
30877 for b.Controls[0].Op == OpAMD64InvertFlags {
30878 v_0 := b.Controls[0]
30879 cmp := v_0.Args[0]
30880 b.resetWithControl(BlockAMD64GT, cmp)
30881 return true
30882 }
30883
30884
30885 for b.Controls[0].Op == OpAMD64FlagEQ {
30886 b.Reset(BlockFirst)
30887 b.swapSuccessors()
30888 return true
30889 }
30890
30891
30892 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
30893 b.Reset(BlockFirst)
30894 return true
30895 }
30896
30897
30898 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
30899 b.Reset(BlockFirst)
30900 return true
30901 }
30902
30903
30904 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
30905 b.Reset(BlockFirst)
30906 b.swapSuccessors()
30907 return true
30908 }
30909
30910
30911 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
30912 b.Reset(BlockFirst)
30913 b.swapSuccessors()
30914 return true
30915 }
30916 case BlockAMD64NE:
30917
30918
30919 for b.Controls[0].Op == OpAMD64TESTB {
30920 v_0 := b.Controls[0]
30921 _ = v_0.Args[1]
30922 v_0_0 := v_0.Args[0]
30923 if v_0_0.Op != OpAMD64SETL {
30924 break
30925 }
30926 cmp := v_0_0.Args[0]
30927 v_0_1 := v_0.Args[1]
30928 if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
30929 break
30930 }
30931 b.resetWithControl(BlockAMD64LT, cmp)
30932 return true
30933 }
30934
30935
30936 for b.Controls[0].Op == OpAMD64TESTB {
30937 v_0 := b.Controls[0]
30938 _ = v_0.Args[1]
30939 v_0_0 := v_0.Args[0]
30940 if v_0_0.Op != OpAMD64SETLE {
30941 break
30942 }
30943 cmp := v_0_0.Args[0]
30944 v_0_1 := v_0.Args[1]
30945 if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
30946 break
30947 }
30948 b.resetWithControl(BlockAMD64LE, cmp)
30949 return true
30950 }
30951
30952
30953 for b.Controls[0].Op == OpAMD64TESTB {
30954 v_0 := b.Controls[0]
30955 _ = v_0.Args[1]
30956 v_0_0 := v_0.Args[0]
30957 if v_0_0.Op != OpAMD64SETG {
30958 break
30959 }
30960 cmp := v_0_0.Args[0]
30961 v_0_1 := v_0.Args[1]
30962 if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
30963 break
30964 }
30965 b.resetWithControl(BlockAMD64GT, cmp)
30966 return true
30967 }
30968
30969
30970 for b.Controls[0].Op == OpAMD64TESTB {
30971 v_0 := b.Controls[0]
30972 _ = v_0.Args[1]
30973 v_0_0 := v_0.Args[0]
30974 if v_0_0.Op != OpAMD64SETGE {
30975 break
30976 }
30977 cmp := v_0_0.Args[0]
30978 v_0_1 := v_0.Args[1]
30979 if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
30980 break
30981 }
30982 b.resetWithControl(BlockAMD64GE, cmp)
30983 return true
30984 }
30985
30986
30987 for b.Controls[0].Op == OpAMD64TESTB {
30988 v_0 := b.Controls[0]
30989 _ = v_0.Args[1]
30990 v_0_0 := v_0.Args[0]
30991 if v_0_0.Op != OpAMD64SETEQ {
30992 break
30993 }
30994 cmp := v_0_0.Args[0]
30995 v_0_1 := v_0.Args[1]
30996 if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
30997 break
30998 }
30999 b.resetWithControl(BlockAMD64EQ, cmp)
31000 return true
31001 }
31002
31003
31004 for b.Controls[0].Op == OpAMD64TESTB {
31005 v_0 := b.Controls[0]
31006 _ = v_0.Args[1]
31007 v_0_0 := v_0.Args[0]
31008 if v_0_0.Op != OpAMD64SETNE {
31009 break
31010 }
31011 cmp := v_0_0.Args[0]
31012 v_0_1 := v_0.Args[1]
31013 if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
31014 break
31015 }
31016 b.resetWithControl(BlockAMD64NE, cmp)
31017 return true
31018 }
31019
31020
31021 for b.Controls[0].Op == OpAMD64TESTB {
31022 v_0 := b.Controls[0]
31023 _ = v_0.Args[1]
31024 v_0_0 := v_0.Args[0]
31025 if v_0_0.Op != OpAMD64SETB {
31026 break
31027 }
31028 cmp := v_0_0.Args[0]
31029 v_0_1 := v_0.Args[1]
31030 if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
31031 break
31032 }
31033 b.resetWithControl(BlockAMD64ULT, cmp)
31034 return true
31035 }
31036
31037
31038 for b.Controls[0].Op == OpAMD64TESTB {
31039 v_0 := b.Controls[0]
31040 _ = v_0.Args[1]
31041 v_0_0 := v_0.Args[0]
31042 if v_0_0.Op != OpAMD64SETBE {
31043 break
31044 }
31045 cmp := v_0_0.Args[0]
31046 v_0_1 := v_0.Args[1]
31047 if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
31048 break
31049 }
31050 b.resetWithControl(BlockAMD64ULE, cmp)
31051 return true
31052 }
31053
31054
31055 for b.Controls[0].Op == OpAMD64TESTB {
31056 v_0 := b.Controls[0]
31057 _ = v_0.Args[1]
31058 v_0_0 := v_0.Args[0]
31059 if v_0_0.Op != OpAMD64SETA {
31060 break
31061 }
31062 cmp := v_0_0.Args[0]
31063 v_0_1 := v_0.Args[1]
31064 if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
31065 break
31066 }
31067 b.resetWithControl(BlockAMD64UGT, cmp)
31068 return true
31069 }
31070
31071
31072 for b.Controls[0].Op == OpAMD64TESTB {
31073 v_0 := b.Controls[0]
31074 _ = v_0.Args[1]
31075 v_0_0 := v_0.Args[0]
31076 if v_0_0.Op != OpAMD64SETAE {
31077 break
31078 }
31079 cmp := v_0_0.Args[0]
31080 v_0_1 := v_0.Args[1]
31081 if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
31082 break
31083 }
31084 b.resetWithControl(BlockAMD64UGE, cmp)
31085 return true
31086 }
31087
31088
31089 for b.Controls[0].Op == OpAMD64TESTB {
31090 v_0 := b.Controls[0]
31091 _ = v_0.Args[1]
31092 v_0_0 := v_0.Args[0]
31093 if v_0_0.Op != OpAMD64SETO {
31094 break
31095 }
31096 cmp := v_0_0.Args[0]
31097 v_0_1 := v_0.Args[1]
31098 if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
31099 break
31100 }
31101 b.resetWithControl(BlockAMD64OS, cmp)
31102 return true
31103 }
31104
31105
31106 for b.Controls[0].Op == OpAMD64TESTL {
31107 v_0 := b.Controls[0]
31108 _ = v_0.Args[1]
31109 v_0_0 := v_0.Args[0]
31110 v_0_1 := v_0.Args[1]
31111 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31112 if v_0_0.Op != OpAMD64SHLL {
31113 continue
31114 }
31115 x := v_0_0.Args[1]
31116 v_0_0_0 := v_0_0.Args[0]
31117 if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
31118 continue
31119 }
31120 y := v_0_1
31121 v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
31122 v0.AddArg2(x, y)
31123 b.resetWithControl(BlockAMD64ULT, v0)
31124 return true
31125 }
31126 break
31127 }
31128
31129
31130 for b.Controls[0].Op == OpAMD64TESTQ {
31131 v_0 := b.Controls[0]
31132 _ = v_0.Args[1]
31133 v_0_0 := v_0.Args[0]
31134 v_0_1 := v_0.Args[1]
31135 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31136 if v_0_0.Op != OpAMD64SHLQ {
31137 continue
31138 }
31139 x := v_0_0.Args[1]
31140 v_0_0_0 := v_0_0.Args[0]
31141 if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
31142 continue
31143 }
31144 y := v_0_1
31145 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
31146 v0.AddArg2(x, y)
31147 b.resetWithControl(BlockAMD64ULT, v0)
31148 return true
31149 }
31150 break
31151 }
31152
31153
31154
31155 for b.Controls[0].Op == OpAMD64TESTLconst {
31156 v_0 := b.Controls[0]
31157 c := auxIntToInt32(v_0.AuxInt)
31158 x := v_0.Args[0]
31159 if !(isUint32PowerOfTwo(int64(c))) {
31160 break
31161 }
31162 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31163 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31164 v0.AddArg(x)
31165 b.resetWithControl(BlockAMD64ULT, v0)
31166 return true
31167 }
31168
31169
31170
31171 for b.Controls[0].Op == OpAMD64TESTQconst {
31172 v_0 := b.Controls[0]
31173 c := auxIntToInt32(v_0.AuxInt)
31174 x := v_0.Args[0]
31175 if !(isUint64PowerOfTwo(int64(c))) {
31176 break
31177 }
31178 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31179 v0.AuxInt = int8ToAuxInt(int8(log32(c)))
31180 v0.AddArg(x)
31181 b.resetWithControl(BlockAMD64ULT, v0)
31182 return true
31183 }
31184
31185
31186
31187 for b.Controls[0].Op == OpAMD64TESTQ {
31188 v_0 := b.Controls[0]
31189 _ = v_0.Args[1]
31190 v_0_0 := v_0.Args[0]
31191 v_0_1 := v_0.Args[1]
31192 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31193 if v_0_0.Op != OpAMD64MOVQconst {
31194 continue
31195 }
31196 c := auxIntToInt64(v_0_0.AuxInt)
31197 x := v_0_1
31198 if !(isUint64PowerOfTwo(c)) {
31199 continue
31200 }
31201 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31202 v0.AuxInt = int8ToAuxInt(int8(log64(c)))
31203 v0.AddArg(x)
31204 b.resetWithControl(BlockAMD64ULT, v0)
31205 return true
31206 }
31207 break
31208 }
31209
31210
31211
31212 for b.Controls[0].Op == OpAMD64TESTQ {
31213 v_0 := b.Controls[0]
31214 _ = v_0.Args[1]
31215 v_0_0 := v_0.Args[0]
31216 v_0_1 := v_0.Args[1]
31217 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31218 z1 := v_0_0
31219 if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
31220 continue
31221 }
31222 z1_0 := z1.Args[0]
31223 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31224 continue
31225 }
31226 x := z1_0.Args[0]
31227 z2 := v_0_1
31228 if !(z1 == z2) {
31229 continue
31230 }
31231 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31232 v0.AuxInt = int8ToAuxInt(63)
31233 v0.AddArg(x)
31234 b.resetWithControl(BlockAMD64ULT, v0)
31235 return true
31236 }
31237 break
31238 }
31239
31240
31241
31242 for b.Controls[0].Op == OpAMD64TESTL {
31243 v_0 := b.Controls[0]
31244 _ = v_0.Args[1]
31245 v_0_0 := v_0.Args[0]
31246 v_0_1 := v_0.Args[1]
31247 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31248 z1 := v_0_0
31249 if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
31250 continue
31251 }
31252 z1_0 := z1.Args[0]
31253 if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31254 continue
31255 }
31256 x := z1_0.Args[0]
31257 z2 := v_0_1
31258 if !(z1 == z2) {
31259 continue
31260 }
31261 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31262 v0.AuxInt = int8ToAuxInt(31)
31263 v0.AddArg(x)
31264 b.resetWithControl(BlockAMD64ULT, v0)
31265 return true
31266 }
31267 break
31268 }
31269
31270
31271
31272 for b.Controls[0].Op == OpAMD64TESTQ {
31273 v_0 := b.Controls[0]
31274 _ = v_0.Args[1]
31275 v_0_0 := v_0.Args[0]
31276 v_0_1 := v_0.Args[1]
31277 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31278 z1 := v_0_0
31279 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31280 continue
31281 }
31282 z1_0 := z1.Args[0]
31283 if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
31284 continue
31285 }
31286 x := z1_0.Args[0]
31287 z2 := v_0_1
31288 if !(z1 == z2) {
31289 continue
31290 }
31291 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31292 v0.AuxInt = int8ToAuxInt(0)
31293 v0.AddArg(x)
31294 b.resetWithControl(BlockAMD64ULT, v0)
31295 return true
31296 }
31297 break
31298 }
31299
31300
31301
31302 for b.Controls[0].Op == OpAMD64TESTL {
31303 v_0 := b.Controls[0]
31304 _ = v_0.Args[1]
31305 v_0_0 := v_0.Args[0]
31306 v_0_1 := v_0.Args[1]
31307 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31308 z1 := v_0_0
31309 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31310 continue
31311 }
31312 z1_0 := z1.Args[0]
31313 if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
31314 continue
31315 }
31316 x := z1_0.Args[0]
31317 z2 := v_0_1
31318 if !(z1 == z2) {
31319 continue
31320 }
31321 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31322 v0.AuxInt = int8ToAuxInt(0)
31323 v0.AddArg(x)
31324 b.resetWithControl(BlockAMD64ULT, v0)
31325 return true
31326 }
31327 break
31328 }
31329
31330
31331
31332 for b.Controls[0].Op == OpAMD64TESTQ {
31333 v_0 := b.Controls[0]
31334 _ = v_0.Args[1]
31335 v_0_0 := v_0.Args[0]
31336 v_0_1 := v_0.Args[1]
31337 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31338 z1 := v_0_0
31339 if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
31340 continue
31341 }
31342 x := z1.Args[0]
31343 z2 := v_0_1
31344 if !(z1 == z2) {
31345 continue
31346 }
31347 v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
31348 v0.AuxInt = int8ToAuxInt(63)
31349 v0.AddArg(x)
31350 b.resetWithControl(BlockAMD64ULT, v0)
31351 return true
31352 }
31353 break
31354 }
31355
31356
31357
31358 for b.Controls[0].Op == OpAMD64TESTL {
31359 v_0 := b.Controls[0]
31360 _ = v_0.Args[1]
31361 v_0_0 := v_0.Args[0]
31362 v_0_1 := v_0.Args[1]
31363 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31364 z1 := v_0_0
31365 if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
31366 continue
31367 }
31368 x := z1.Args[0]
31369 z2 := v_0_1
31370 if !(z1 == z2) {
31371 continue
31372 }
31373 v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
31374 v0.AuxInt = int8ToAuxInt(31)
31375 v0.AddArg(x)
31376 b.resetWithControl(BlockAMD64ULT, v0)
31377 return true
31378 }
31379 break
31380 }
31381
31382
31383 for b.Controls[0].Op == OpAMD64TESTB {
31384 v_0 := b.Controls[0]
31385 _ = v_0.Args[1]
31386 v_0_0 := v_0.Args[0]
31387 if v_0_0.Op != OpAMD64SETGF {
31388 break
31389 }
31390 cmp := v_0_0.Args[0]
31391 v_0_1 := v_0.Args[1]
31392 if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
31393 break
31394 }
31395 b.resetWithControl(BlockAMD64UGT, cmp)
31396 return true
31397 }
31398
31399
31400 for b.Controls[0].Op == OpAMD64TESTB {
31401 v_0 := b.Controls[0]
31402 _ = v_0.Args[1]
31403 v_0_0 := v_0.Args[0]
31404 if v_0_0.Op != OpAMD64SETGEF {
31405 break
31406 }
31407 cmp := v_0_0.Args[0]
31408 v_0_1 := v_0.Args[1]
31409 if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
31410 break
31411 }
31412 b.resetWithControl(BlockAMD64UGE, cmp)
31413 return true
31414 }
31415
31416
31417 for b.Controls[0].Op == OpAMD64TESTB {
31418 v_0 := b.Controls[0]
31419 _ = v_0.Args[1]
31420 v_0_0 := v_0.Args[0]
31421 if v_0_0.Op != OpAMD64SETEQF {
31422 break
31423 }
31424 cmp := v_0_0.Args[0]
31425 v_0_1 := v_0.Args[1]
31426 if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
31427 break
31428 }
31429 b.resetWithControl(BlockAMD64EQF, cmp)
31430 return true
31431 }
31432
31433
31434 for b.Controls[0].Op == OpAMD64TESTB {
31435 v_0 := b.Controls[0]
31436 _ = v_0.Args[1]
31437 v_0_0 := v_0.Args[0]
31438 if v_0_0.Op != OpAMD64SETNEF {
31439 break
31440 }
31441 cmp := v_0_0.Args[0]
31442 v_0_1 := v_0.Args[1]
31443 if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
31444 break
31445 }
31446 b.resetWithControl(BlockAMD64NEF, cmp)
31447 return true
31448 }
31449
31450
31451 for b.Controls[0].Op == OpAMD64InvertFlags {
31452 v_0 := b.Controls[0]
31453 cmp := v_0.Args[0]
31454 b.resetWithControl(BlockAMD64NE, cmp)
31455 return true
31456 }
31457
31458
31459 for b.Controls[0].Op == OpAMD64FlagEQ {
31460 b.Reset(BlockFirst)
31461 b.swapSuccessors()
31462 return true
31463 }
31464
31465
31466 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31467 b.Reset(BlockFirst)
31468 return true
31469 }
31470
31471
31472 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31473 b.Reset(BlockFirst)
31474 return true
31475 }
31476
31477
31478 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31479 b.Reset(BlockFirst)
31480 return true
31481 }
31482
31483
31484 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31485 b.Reset(BlockFirst)
31486 return true
31487 }
31488
31489
31490 for b.Controls[0].Op == OpAMD64TESTQ {
31491 v_0 := b.Controls[0]
31492 _ = v_0.Args[1]
31493 v_0_0 := v_0.Args[0]
31494 v_0_1 := v_0.Args[1]
31495 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31496 s := v_0_0
31497 if s.Op != OpSelect0 {
31498 continue
31499 }
31500 blsr := s.Args[0]
31501 if blsr.Op != OpAMD64BLSRQ || s != v_0_1 {
31502 continue
31503 }
31504 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31505 v0.AddArg(blsr)
31506 b.resetWithControl(BlockAMD64NE, v0)
31507 return true
31508 }
31509 break
31510 }
31511
31512
31513 for b.Controls[0].Op == OpAMD64TESTL {
31514 v_0 := b.Controls[0]
31515 _ = v_0.Args[1]
31516 v_0_0 := v_0.Args[0]
31517 v_0_1 := v_0.Args[1]
31518 for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
31519 s := v_0_0
31520 if s.Op != OpSelect0 {
31521 continue
31522 }
31523 blsr := s.Args[0]
31524 if blsr.Op != OpAMD64BLSRL || s != v_0_1 {
31525 continue
31526 }
31527 v0 := b.NewValue0(v_0.Pos, OpSelect1, types.TypeFlags)
31528 v0.AddArg(blsr)
31529 b.resetWithControl(BlockAMD64NE, v0)
31530 return true
31531 }
31532 break
31533 }
31534
31535
31536
31537 for b.Controls[0].Op == OpAMD64TESTQ {
31538 t := b.Controls[0]
31539 _ = t.Args[1]
31540 t_0 := t.Args[0]
31541 t_1 := t.Args[1]
31542 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
31543 a := t_0
31544 if a.Op != OpAMD64ADDQconst {
31545 continue
31546 }
31547 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
31548 continue
31549 }
31550 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
31551 v0.AddArg(a.Args[0])
31552 b.resetWithControl(BlockAMD64NE, v0)
31553 return true
31554 }
31555 break
31556 }
31557
31558
31559
31560 for b.Controls[0].Op == OpAMD64TESTL {
31561 t := b.Controls[0]
31562 _ = t.Args[1]
31563 t_0 := t.Args[0]
31564 t_1 := t.Args[1]
31565 for _i0 := 0; _i0 <= 1; _i0, t_0, t_1 = _i0+1, t_1, t_0 {
31566 a := t_0
31567 if a.Op != OpAMD64ADDLconst {
31568 continue
31569 }
31570 if a != t_1 || !(t.Uses == 1 && flagify(a)) {
31571 continue
31572 }
31573 v0 := b.NewValue0(t.Pos, OpSelect1, types.TypeFlags)
31574 v0.AddArg(a.Args[0])
31575 b.resetWithControl(BlockAMD64NE, v0)
31576 return true
31577 }
31578 break
31579 }
31580 case BlockAMD64UGE:
31581
31582
31583 for b.Controls[0].Op == OpAMD64TESTQ {
31584 v_0 := b.Controls[0]
31585 x := v_0.Args[1]
31586 if x != v_0.Args[0] {
31587 break
31588 }
31589 b.Reset(BlockFirst)
31590 return true
31591 }
31592
31593
31594 for b.Controls[0].Op == OpAMD64TESTL {
31595 v_0 := b.Controls[0]
31596 x := v_0.Args[1]
31597 if x != v_0.Args[0] {
31598 break
31599 }
31600 b.Reset(BlockFirst)
31601 return true
31602 }
31603
31604
31605 for b.Controls[0].Op == OpAMD64TESTW {
31606 v_0 := b.Controls[0]
31607 x := v_0.Args[1]
31608 if x != v_0.Args[0] {
31609 break
31610 }
31611 b.Reset(BlockFirst)
31612 return true
31613 }
31614
31615
31616 for b.Controls[0].Op == OpAMD64TESTB {
31617 v_0 := b.Controls[0]
31618 x := v_0.Args[1]
31619 if x != v_0.Args[0] {
31620 break
31621 }
31622 b.Reset(BlockFirst)
31623 return true
31624 }
31625
31626
31627 for b.Controls[0].Op == OpAMD64InvertFlags {
31628 v_0 := b.Controls[0]
31629 cmp := v_0.Args[0]
31630 b.resetWithControl(BlockAMD64ULE, cmp)
31631 return true
31632 }
31633
31634
31635 for b.Controls[0].Op == OpAMD64FlagEQ {
31636 b.Reset(BlockFirst)
31637 return true
31638 }
31639
31640
31641 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31642 b.Reset(BlockFirst)
31643 b.swapSuccessors()
31644 return true
31645 }
31646
31647
31648 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31649 b.Reset(BlockFirst)
31650 return true
31651 }
31652
31653
31654 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31655 b.Reset(BlockFirst)
31656 b.swapSuccessors()
31657 return true
31658 }
31659
31660
31661 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31662 b.Reset(BlockFirst)
31663 return true
31664 }
31665 case BlockAMD64UGT:
31666
31667
31668 for b.Controls[0].Op == OpAMD64InvertFlags {
31669 v_0 := b.Controls[0]
31670 cmp := v_0.Args[0]
31671 b.resetWithControl(BlockAMD64ULT, cmp)
31672 return true
31673 }
31674
31675
31676 for b.Controls[0].Op == OpAMD64FlagEQ {
31677 b.Reset(BlockFirst)
31678 b.swapSuccessors()
31679 return true
31680 }
31681
31682
31683 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31684 b.Reset(BlockFirst)
31685 b.swapSuccessors()
31686 return true
31687 }
31688
31689
31690 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31691 b.Reset(BlockFirst)
31692 return true
31693 }
31694
31695
31696 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31697 b.Reset(BlockFirst)
31698 b.swapSuccessors()
31699 return true
31700 }
31701
31702
31703 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31704 b.Reset(BlockFirst)
31705 return true
31706 }
31707 case BlockAMD64ULE:
31708
31709
31710 for b.Controls[0].Op == OpAMD64InvertFlags {
31711 v_0 := b.Controls[0]
31712 cmp := v_0.Args[0]
31713 b.resetWithControl(BlockAMD64UGE, cmp)
31714 return true
31715 }
31716
31717
31718 for b.Controls[0].Op == OpAMD64FlagEQ {
31719 b.Reset(BlockFirst)
31720 return true
31721 }
31722
31723
31724 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31725 b.Reset(BlockFirst)
31726 return true
31727 }
31728
31729
31730 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31731 b.Reset(BlockFirst)
31732 b.swapSuccessors()
31733 return true
31734 }
31735
31736
31737 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31738 b.Reset(BlockFirst)
31739 return true
31740 }
31741
31742
31743 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31744 b.Reset(BlockFirst)
31745 b.swapSuccessors()
31746 return true
31747 }
31748 case BlockAMD64ULT:
31749
31750
31751 for b.Controls[0].Op == OpAMD64TESTQ {
31752 v_0 := b.Controls[0]
31753 x := v_0.Args[1]
31754 if x != v_0.Args[0] {
31755 break
31756 }
31757 b.Reset(BlockFirst)
31758 b.swapSuccessors()
31759 return true
31760 }
31761
31762
31763 for b.Controls[0].Op == OpAMD64TESTL {
31764 v_0 := b.Controls[0]
31765 x := v_0.Args[1]
31766 if x != v_0.Args[0] {
31767 break
31768 }
31769 b.Reset(BlockFirst)
31770 b.swapSuccessors()
31771 return true
31772 }
31773
31774
31775 for b.Controls[0].Op == OpAMD64TESTW {
31776 v_0 := b.Controls[0]
31777 x := v_0.Args[1]
31778 if x != v_0.Args[0] {
31779 break
31780 }
31781 b.Reset(BlockFirst)
31782 b.swapSuccessors()
31783 return true
31784 }
31785
31786
31787 for b.Controls[0].Op == OpAMD64TESTB {
31788 v_0 := b.Controls[0]
31789 x := v_0.Args[1]
31790 if x != v_0.Args[0] {
31791 break
31792 }
31793 b.Reset(BlockFirst)
31794 b.swapSuccessors()
31795 return true
31796 }
31797
31798
31799 for b.Controls[0].Op == OpAMD64InvertFlags {
31800 v_0 := b.Controls[0]
31801 cmp := v_0.Args[0]
31802 b.resetWithControl(BlockAMD64UGT, cmp)
31803 return true
31804 }
31805
31806
31807 for b.Controls[0].Op == OpAMD64FlagEQ {
31808 b.Reset(BlockFirst)
31809 b.swapSuccessors()
31810 return true
31811 }
31812
31813
31814 for b.Controls[0].Op == OpAMD64FlagLT_ULT {
31815 b.Reset(BlockFirst)
31816 return true
31817 }
31818
31819
31820 for b.Controls[0].Op == OpAMD64FlagLT_UGT {
31821 b.Reset(BlockFirst)
31822 b.swapSuccessors()
31823 return true
31824 }
31825
31826
31827 for b.Controls[0].Op == OpAMD64FlagGT_ULT {
31828 b.Reset(BlockFirst)
31829 return true
31830 }
31831
31832
31833 for b.Controls[0].Op == OpAMD64FlagGT_UGT {
31834 b.Reset(BlockFirst)
31835 b.swapSuccessors()
31836 return true
31837 }
31838 }
31839 return false
31840 }
31841
View as plain text