1 2# qhasm: int64 input_0 3 4# qhasm: int64 input_1 5 6# qhasm: int64 input_2 7 8# qhasm: int64 input_3 9 10# qhasm: int64 input_4 11 12# qhasm: int64 input_5 13 14# qhasm: stack64 input_6 15 16# qhasm: stack64 input_7 17 18# qhasm: int64 caller_r11 19 20# qhasm: int64 caller_r12 21 22# qhasm: int64 caller_r13 23 24# qhasm: int64 caller_r14 25 26# qhasm: int64 caller_r15 27 28# qhasm: int64 caller_rbx 29 30# qhasm: int64 caller_rbp 31 32# qhasm: reg256 a0 33 34# qhasm: reg256 a1 35 36# qhasm: reg256 a2 37 38# qhasm: reg256 a3 39 40# qhasm: reg256 a4 41 42# qhasm: reg256 a5 43 44# qhasm: reg256 a6 45 46# qhasm: reg256 a7 47 48# qhasm: reg256 a8 49 50# qhasm: reg256 a9 51 52# qhasm: reg256 a10 53 54# qhasm: reg256 a11 55 56# qhasm: reg256 a12 57 58# qhasm: reg256 b0 59 60# qhasm: reg256 b1 61 62# qhasm: reg256 r0 63 64# qhasm: reg256 r1 65 66# qhasm: reg256 r2 67 68# qhasm: reg256 r3 69 70# qhasm: reg256 r4 71 72# qhasm: reg256 r5 73 74# qhasm: reg256 r6 75 76# qhasm: reg256 r7 77 78# qhasm: reg256 r8 79 80# qhasm: reg256 r9 81 82# qhasm: reg256 r10 83 84# qhasm: reg256 r11 85 86# qhasm: reg256 r12 87 88# qhasm: reg256 r13 89 90# qhasm: reg256 r14 91 92# qhasm: reg256 r15 93 94# qhasm: reg256 r16 95 96# qhasm: reg256 r17 97 98# qhasm: reg256 r18 99 100# qhasm: reg256 r19 101 102# qhasm: reg256 r20 103 104# qhasm: reg256 r21 105 106# qhasm: reg256 r22 107 108# qhasm: reg256 r23 109 110# qhasm: reg256 r24 111 112# qhasm: reg256 r 113 114# qhasm: enter vec256_mul_asm 115.p2align 5 116.global _PQCLEAN_MCELIECE8192128F_AVX_vec256_mul_asm 117.global PQCLEAN_MCELIECE8192128F_AVX_vec256_mul_asm 118_PQCLEAN_MCELIECE8192128F_AVX_vec256_mul_asm: 119PQCLEAN_MCELIECE8192128F_AVX_vec256_mul_asm: 120mov %rsp,%r11 121and $31,%r11 122add $0,%r11 123sub %r11,%rsp 124 125# qhasm: b0 = mem256[ input_2 + 0 ] 126# asm 1: vmovupd 0(<input_2=int64#3),>b0=reg256#1 127# asm 2: vmovupd 0(<input_2=%rdx),>b0=%ymm0 128vmovupd 0(%rdx),%ymm0 129 130# qhasm: a12 = mem256[ input_1 + 384 ] 131# asm 1: vmovupd 384(<input_1=int64#2),>a12=reg256#2 132# asm 2: vmovupd 384(<input_1=%rsi),>a12=%ymm1 133vmovupd 384(%rsi),%ymm1 134 135# qhasm: r12 = a12 & b0 136# asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3 137# asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2 138vpand %ymm1,%ymm0,%ymm2 139 140# qhasm: r13 = a12 & mem256[input_2 + 32] 141# asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4 142# asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3 143vpand 32(%rdx),%ymm1,%ymm3 144 145# qhasm: r14 = a12 & mem256[input_2 + 64] 146# asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5 147# asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4 148vpand 64(%rdx),%ymm1,%ymm4 149 150# qhasm: r15 = a12 & mem256[input_2 + 96] 151# asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6 152# asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5 153vpand 96(%rdx),%ymm1,%ymm5 154 155# qhasm: r16 = a12 & mem256[input_2 + 128] 156# asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7 157# asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6 158vpand 128(%rdx),%ymm1,%ymm6 159 160# qhasm: r17 = a12 & mem256[input_2 + 160] 161# asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8 162# asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7 163vpand 160(%rdx),%ymm1,%ymm7 164 165# qhasm: r18 = a12 & mem256[input_2 + 192] 166# asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9 167# asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8 168vpand 192(%rdx),%ymm1,%ymm8 169 170# qhasm: r19 = a12 & mem256[input_2 + 224] 171# asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10 172# asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9 173vpand 224(%rdx),%ymm1,%ymm9 174 175# qhasm: r20 = a12 & mem256[input_2 + 256] 176# asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11 177# asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10 178vpand 256(%rdx),%ymm1,%ymm10 179 180# qhasm: r21 = a12 & mem256[input_2 + 288] 181# asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12 182# asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11 183vpand 288(%rdx),%ymm1,%ymm11 184 185# qhasm: r22 = a12 & mem256[input_2 + 320] 186# asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13 187# asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12 188vpand 320(%rdx),%ymm1,%ymm12 189 190# qhasm: r23 = a12 & mem256[input_2 + 352] 191# asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14 192# asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13 193vpand 352(%rdx),%ymm1,%ymm13 194 195# qhasm: r24 = a12 & mem256[input_2 + 384] 196# asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2 197# asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1 198vpand 384(%rdx),%ymm1,%ymm1 199 200# qhasm: r15 ^= r24 201# asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6 202# asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5 203vpxor %ymm1,%ymm5,%ymm5 204 205# qhasm: r14 ^= r24 206# asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5 207# asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4 208vpxor %ymm1,%ymm4,%ymm4 209 210# qhasm: r12 ^= r24 211# asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3 212# asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2 213vpxor %ymm1,%ymm2,%ymm2 214 215# qhasm: r11 = r24 216# asm 1: vmovapd <r24=reg256#2,>r11=reg256#2 217# asm 2: vmovapd <r24=%ymm1,>r11=%ymm1 218vmovapd %ymm1,%ymm1 219 220# qhasm: a11 = mem256[ input_1 + 352 ] 221# asm 1: vmovupd 352(<input_1=int64#2),>a11=reg256#15 222# asm 2: vmovupd 352(<input_1=%rsi),>a11=%ymm14 223vmovupd 352(%rsi),%ymm14 224 225# qhasm: r = a11 & b0 226# asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16 227# asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15 228vpand %ymm14,%ymm0,%ymm15 229 230# qhasm: r11 ^= r 231# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 232# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 233vpxor %ymm15,%ymm1,%ymm1 234 235# qhasm: r = a11 & mem256[input_2 + 32] 236# asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 237# asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 238vpand 32(%rdx),%ymm14,%ymm15 239 240# qhasm: r12 ^= r 241# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 242# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 243vpxor %ymm15,%ymm2,%ymm2 244 245# qhasm: r = a11 & mem256[input_2 + 64] 246# asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 247# asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 248vpand 64(%rdx),%ymm14,%ymm15 249 250# qhasm: r13 ^= r 251# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 252# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 253vpxor %ymm15,%ymm3,%ymm3 254 255# qhasm: r = a11 & mem256[input_2 + 96] 256# asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 257# asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 258vpand 96(%rdx),%ymm14,%ymm15 259 260# qhasm: r14 ^= r 261# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 262# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 263vpxor %ymm15,%ymm4,%ymm4 264 265# qhasm: r = a11 & mem256[input_2 + 128] 266# asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 267# asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 268vpand 128(%rdx),%ymm14,%ymm15 269 270# qhasm: r15 ^= r 271# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 272# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 273vpxor %ymm15,%ymm5,%ymm5 274 275# qhasm: r = a11 & mem256[input_2 + 160] 276# asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 277# asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 278vpand 160(%rdx),%ymm14,%ymm15 279 280# qhasm: r16 ^= r 281# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 282# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 283vpxor %ymm15,%ymm6,%ymm6 284 285# qhasm: r = a11 & mem256[input_2 + 192] 286# asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 287# asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 288vpand 192(%rdx),%ymm14,%ymm15 289 290# qhasm: r17 ^= r 291# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 292# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 293vpxor %ymm15,%ymm7,%ymm7 294 295# qhasm: r = a11 & mem256[input_2 + 224] 296# asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 297# asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 298vpand 224(%rdx),%ymm14,%ymm15 299 300# qhasm: r18 ^= r 301# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 302# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 303vpxor %ymm15,%ymm8,%ymm8 304 305# qhasm: r = a11 & mem256[input_2 + 256] 306# asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 307# asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 308vpand 256(%rdx),%ymm14,%ymm15 309 310# qhasm: r19 ^= r 311# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 312# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 313vpxor %ymm15,%ymm9,%ymm9 314 315# qhasm: r = a11 & mem256[input_2 + 288] 316# asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 317# asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 318vpand 288(%rdx),%ymm14,%ymm15 319 320# qhasm: r20 ^= r 321# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 322# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 323vpxor %ymm15,%ymm10,%ymm10 324 325# qhasm: r = a11 & mem256[input_2 + 320] 326# asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 327# asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 328vpand 320(%rdx),%ymm14,%ymm15 329 330# qhasm: r21 ^= r 331# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 332# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 333vpxor %ymm15,%ymm11,%ymm11 334 335# qhasm: r = a11 & mem256[input_2 + 352] 336# asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16 337# asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15 338vpand 352(%rdx),%ymm14,%ymm15 339 340# qhasm: r22 ^= r 341# asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13 342# asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12 343vpxor %ymm15,%ymm12,%ymm12 344 345# qhasm: r = a11 & mem256[input_2 + 384] 346# asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15 347# asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14 348vpand 384(%rdx),%ymm14,%ymm14 349 350# qhasm: r23 ^= r 351# asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14 352# asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13 353vpxor %ymm14,%ymm13,%ymm13 354 355# qhasm: r14 ^= r23 356# asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5 357# asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4 358vpxor %ymm13,%ymm4,%ymm4 359 360# qhasm: r13 ^= r23 361# asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4 362# asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3 363vpxor %ymm13,%ymm3,%ymm3 364 365# qhasm: r11 ^= r23 366# asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2 367# asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1 368vpxor %ymm13,%ymm1,%ymm1 369 370# qhasm: r10 = r23 371# asm 1: vmovapd <r23=reg256#14,>r10=reg256#14 372# asm 2: vmovapd <r23=%ymm13,>r10=%ymm13 373vmovapd %ymm13,%ymm13 374 375# qhasm: a10 = mem256[ input_1 + 320 ] 376# asm 1: vmovupd 320(<input_1=int64#2),>a10=reg256#15 377# asm 2: vmovupd 320(<input_1=%rsi),>a10=%ymm14 378vmovupd 320(%rsi),%ymm14 379 380# qhasm: r = a10 & b0 381# asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16 382# asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15 383vpand %ymm14,%ymm0,%ymm15 384 385# qhasm: r10 ^= r 386# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 387# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 388vpxor %ymm15,%ymm13,%ymm13 389 390# qhasm: r = a10 & mem256[input_2 + 32] 391# asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 392# asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 393vpand 32(%rdx),%ymm14,%ymm15 394 395# qhasm: r11 ^= r 396# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 397# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 398vpxor %ymm15,%ymm1,%ymm1 399 400# qhasm: r = a10 & mem256[input_2 + 64] 401# asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 402# asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 403vpand 64(%rdx),%ymm14,%ymm15 404 405# qhasm: r12 ^= r 406# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 407# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 408vpxor %ymm15,%ymm2,%ymm2 409 410# qhasm: r = a10 & mem256[input_2 + 96] 411# asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 412# asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 413vpand 96(%rdx),%ymm14,%ymm15 414 415# qhasm: r13 ^= r 416# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 417# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 418vpxor %ymm15,%ymm3,%ymm3 419 420# qhasm: r = a10 & mem256[input_2 + 128] 421# asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 422# asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 423vpand 128(%rdx),%ymm14,%ymm15 424 425# qhasm: r14 ^= r 426# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 427# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 428vpxor %ymm15,%ymm4,%ymm4 429 430# qhasm: r = a10 & mem256[input_2 + 160] 431# asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 432# asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 433vpand 160(%rdx),%ymm14,%ymm15 434 435# qhasm: r15 ^= r 436# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 437# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 438vpxor %ymm15,%ymm5,%ymm5 439 440# qhasm: r = a10 & mem256[input_2 + 192] 441# asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 442# asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 443vpand 192(%rdx),%ymm14,%ymm15 444 445# qhasm: r16 ^= r 446# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 447# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 448vpxor %ymm15,%ymm6,%ymm6 449 450# qhasm: r = a10 & mem256[input_2 + 224] 451# asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 452# asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 453vpand 224(%rdx),%ymm14,%ymm15 454 455# qhasm: r17 ^= r 456# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 457# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 458vpxor %ymm15,%ymm7,%ymm7 459 460# qhasm: r = a10 & mem256[input_2 + 256] 461# asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 462# asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 463vpand 256(%rdx),%ymm14,%ymm15 464 465# qhasm: r18 ^= r 466# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 467# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 468vpxor %ymm15,%ymm8,%ymm8 469 470# qhasm: r = a10 & mem256[input_2 + 288] 471# asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 472# asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 473vpand 288(%rdx),%ymm14,%ymm15 474 475# qhasm: r19 ^= r 476# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 477# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 478vpxor %ymm15,%ymm9,%ymm9 479 480# qhasm: r = a10 & mem256[input_2 + 320] 481# asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 482# asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 483vpand 320(%rdx),%ymm14,%ymm15 484 485# qhasm: r20 ^= r 486# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 487# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 488vpxor %ymm15,%ymm10,%ymm10 489 490# qhasm: r = a10 & mem256[input_2 + 352] 491# asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16 492# asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15 493vpand 352(%rdx),%ymm14,%ymm15 494 495# qhasm: r21 ^= r 496# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12 497# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11 498vpxor %ymm15,%ymm11,%ymm11 499 500# qhasm: r = a10 & mem256[input_2 + 384] 501# asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15 502# asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14 503vpand 384(%rdx),%ymm14,%ymm14 504 505# qhasm: r22 ^= r 506# asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13 507# asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12 508vpxor %ymm14,%ymm12,%ymm12 509 510# qhasm: r13 ^= r22 511# asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4 512# asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3 513vpxor %ymm12,%ymm3,%ymm3 514 515# qhasm: r12 ^= r22 516# asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3 517# asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2 518vpxor %ymm12,%ymm2,%ymm2 519 520# qhasm: r10 ^= r22 521# asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14 522# asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13 523vpxor %ymm12,%ymm13,%ymm13 524 525# qhasm: r9 = r22 526# asm 1: vmovapd <r22=reg256#13,>r9=reg256#13 527# asm 2: vmovapd <r22=%ymm12,>r9=%ymm12 528vmovapd %ymm12,%ymm12 529 530# qhasm: a9 = mem256[ input_1 + 288 ] 531# asm 1: vmovupd 288(<input_1=int64#2),>a9=reg256#15 532# asm 2: vmovupd 288(<input_1=%rsi),>a9=%ymm14 533vmovupd 288(%rsi),%ymm14 534 535# qhasm: r = a9 & b0 536# asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16 537# asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15 538vpand %ymm14,%ymm0,%ymm15 539 540# qhasm: r9 ^= r 541# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 542# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 543vpxor %ymm15,%ymm12,%ymm12 544 545# qhasm: r = a9 & mem256[input_2 + 32] 546# asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 547# asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 548vpand 32(%rdx),%ymm14,%ymm15 549 550# qhasm: r10 ^= r 551# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 552# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 553vpxor %ymm15,%ymm13,%ymm13 554 555# qhasm: r = a9 & mem256[input_2 + 64] 556# asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 557# asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 558vpand 64(%rdx),%ymm14,%ymm15 559 560# qhasm: r11 ^= r 561# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 562# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 563vpxor %ymm15,%ymm1,%ymm1 564 565# qhasm: r = a9 & mem256[input_2 + 96] 566# asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 567# asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 568vpand 96(%rdx),%ymm14,%ymm15 569 570# qhasm: r12 ^= r 571# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 572# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 573vpxor %ymm15,%ymm2,%ymm2 574 575# qhasm: r = a9 & mem256[input_2 + 128] 576# asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 577# asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 578vpand 128(%rdx),%ymm14,%ymm15 579 580# qhasm: r13 ^= r 581# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 582# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 583vpxor %ymm15,%ymm3,%ymm3 584 585# qhasm: r = a9 & mem256[input_2 + 160] 586# asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 587# asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 588vpand 160(%rdx),%ymm14,%ymm15 589 590# qhasm: r14 ^= r 591# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 592# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 593vpxor %ymm15,%ymm4,%ymm4 594 595# qhasm: r = a9 & mem256[input_2 + 192] 596# asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 597# asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 598vpand 192(%rdx),%ymm14,%ymm15 599 600# qhasm: r15 ^= r 601# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 602# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 603vpxor %ymm15,%ymm5,%ymm5 604 605# qhasm: r = a9 & mem256[input_2 + 224] 606# asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 607# asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 608vpand 224(%rdx),%ymm14,%ymm15 609 610# qhasm: r16 ^= r 611# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 612# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 613vpxor %ymm15,%ymm6,%ymm6 614 615# qhasm: r = a9 & mem256[input_2 + 256] 616# asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 617# asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 618vpand 256(%rdx),%ymm14,%ymm15 619 620# qhasm: r17 ^= r 621# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 622# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 623vpxor %ymm15,%ymm7,%ymm7 624 625# qhasm: r = a9 & mem256[input_2 + 288] 626# asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 627# asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 628vpand 288(%rdx),%ymm14,%ymm15 629 630# qhasm: r18 ^= r 631# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 632# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 633vpxor %ymm15,%ymm8,%ymm8 634 635# qhasm: r = a9 & mem256[input_2 + 320] 636# asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 637# asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 638vpand 320(%rdx),%ymm14,%ymm15 639 640# qhasm: r19 ^= r 641# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 642# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 643vpxor %ymm15,%ymm9,%ymm9 644 645# qhasm: r = a9 & mem256[input_2 + 352] 646# asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16 647# asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15 648vpand 352(%rdx),%ymm14,%ymm15 649 650# qhasm: r20 ^= r 651# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11 652# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10 653vpxor %ymm15,%ymm10,%ymm10 654 655# qhasm: r = a9 & mem256[input_2 + 384] 656# asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15 657# asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14 658vpand 384(%rdx),%ymm14,%ymm14 659 660# qhasm: r21 ^= r 661# asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12 662# asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11 663vpxor %ymm14,%ymm11,%ymm11 664 665# qhasm: r12 ^= r21 666# asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3 667# asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2 668vpxor %ymm11,%ymm2,%ymm2 669 670# qhasm: r11 ^= r21 671# asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2 672# asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1 673vpxor %ymm11,%ymm1,%ymm1 674 675# qhasm: r9 ^= r21 676# asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13 677# asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12 678vpxor %ymm11,%ymm12,%ymm12 679 680# qhasm: r8 = r21 681# asm 1: vmovapd <r21=reg256#12,>r8=reg256#12 682# asm 2: vmovapd <r21=%ymm11,>r8=%ymm11 683vmovapd %ymm11,%ymm11 684 685# qhasm: a8 = mem256[ input_1 + 256 ] 686# asm 1: vmovupd 256(<input_1=int64#2),>a8=reg256#15 687# asm 2: vmovupd 256(<input_1=%rsi),>a8=%ymm14 688vmovupd 256(%rsi),%ymm14 689 690# qhasm: r = a8 & b0 691# asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16 692# asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15 693vpand %ymm14,%ymm0,%ymm15 694 695# qhasm: r8 ^= r 696# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 697# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 698vpxor %ymm15,%ymm11,%ymm11 699 700# qhasm: r = a8 & mem256[input_2 + 32] 701# asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 702# asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 703vpand 32(%rdx),%ymm14,%ymm15 704 705# qhasm: r9 ^= r 706# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 707# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 708vpxor %ymm15,%ymm12,%ymm12 709 710# qhasm: r = a8 & mem256[input_2 + 64] 711# asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 712# asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 713vpand 64(%rdx),%ymm14,%ymm15 714 715# qhasm: r10 ^= r 716# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 717# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 718vpxor %ymm15,%ymm13,%ymm13 719 720# qhasm: r = a8 & mem256[input_2 + 96] 721# asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 722# asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 723vpand 96(%rdx),%ymm14,%ymm15 724 725# qhasm: r11 ^= r 726# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 727# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 728vpxor %ymm15,%ymm1,%ymm1 729 730# qhasm: r = a8 & mem256[input_2 + 128] 731# asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 732# asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 733vpand 128(%rdx),%ymm14,%ymm15 734 735# qhasm: r12 ^= r 736# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 737# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 738vpxor %ymm15,%ymm2,%ymm2 739 740# qhasm: r = a8 & mem256[input_2 + 160] 741# asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 742# asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 743vpand 160(%rdx),%ymm14,%ymm15 744 745# qhasm: r13 ^= r 746# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 747# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 748vpxor %ymm15,%ymm3,%ymm3 749 750# qhasm: r = a8 & mem256[input_2 + 192] 751# asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 752# asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 753vpand 192(%rdx),%ymm14,%ymm15 754 755# qhasm: r14 ^= r 756# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 757# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 758vpxor %ymm15,%ymm4,%ymm4 759 760# qhasm: r = a8 & mem256[input_2 + 224] 761# asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 762# asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 763vpand 224(%rdx),%ymm14,%ymm15 764 765# qhasm: r15 ^= r 766# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 767# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 768vpxor %ymm15,%ymm5,%ymm5 769 770# qhasm: r = a8 & mem256[input_2 + 256] 771# asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 772# asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 773vpand 256(%rdx),%ymm14,%ymm15 774 775# qhasm: r16 ^= r 776# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 777# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 778vpxor %ymm15,%ymm6,%ymm6 779 780# qhasm: r = a8 & mem256[input_2 + 288] 781# asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 782# asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 783vpand 288(%rdx),%ymm14,%ymm15 784 785# qhasm: r17 ^= r 786# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 787# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 788vpxor %ymm15,%ymm7,%ymm7 789 790# qhasm: r = a8 & mem256[input_2 + 320] 791# asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 792# asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 793vpand 320(%rdx),%ymm14,%ymm15 794 795# qhasm: r18 ^= r 796# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 797# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 798vpxor %ymm15,%ymm8,%ymm8 799 800# qhasm: r = a8 & mem256[input_2 + 352] 801# asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16 802# asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15 803vpand 352(%rdx),%ymm14,%ymm15 804 805# qhasm: r19 ^= r 806# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10 807# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9 808vpxor %ymm15,%ymm9,%ymm9 809 810# qhasm: r = a8 & mem256[input_2 + 384] 811# asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15 812# asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14 813vpand 384(%rdx),%ymm14,%ymm14 814 815# qhasm: r20 ^= r 816# asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11 817# asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10 818vpxor %ymm14,%ymm10,%ymm10 819 820# qhasm: r11 ^= r20 821# asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2 822# asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1 823vpxor %ymm10,%ymm1,%ymm1 824 825# qhasm: r10 ^= r20 826# asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14 827# asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13 828vpxor %ymm10,%ymm13,%ymm13 829 830# qhasm: r8 ^= r20 831# asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12 832# asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11 833vpxor %ymm10,%ymm11,%ymm11 834 835# qhasm: r7 = r20 836# asm 1: vmovapd <r20=reg256#11,>r7=reg256#11 837# asm 2: vmovapd <r20=%ymm10,>r7=%ymm10 838vmovapd %ymm10,%ymm10 839 840# qhasm: a7 = mem256[ input_1 + 224 ] 841# asm 1: vmovupd 224(<input_1=int64#2),>a7=reg256#15 842# asm 2: vmovupd 224(<input_1=%rsi),>a7=%ymm14 843vmovupd 224(%rsi),%ymm14 844 845# qhasm: r = a7 & b0 846# asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16 847# asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15 848vpand %ymm14,%ymm0,%ymm15 849 850# qhasm: r7 ^= r 851# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 852# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 853vpxor %ymm15,%ymm10,%ymm10 854 855# qhasm: r = a7 & mem256[input_2 + 32] 856# asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 857# asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 858vpand 32(%rdx),%ymm14,%ymm15 859 860# qhasm: r8 ^= r 861# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 862# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 863vpxor %ymm15,%ymm11,%ymm11 864 865# qhasm: r = a7 & mem256[input_2 + 64] 866# asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 867# asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 868vpand 64(%rdx),%ymm14,%ymm15 869 870# qhasm: r9 ^= r 871# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 872# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 873vpxor %ymm15,%ymm12,%ymm12 874 875# qhasm: r = a7 & mem256[input_2 + 96] 876# asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 877# asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 878vpand 96(%rdx),%ymm14,%ymm15 879 880# qhasm: r10 ^= r 881# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 882# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 883vpxor %ymm15,%ymm13,%ymm13 884 885# qhasm: r = a7 & mem256[input_2 + 128] 886# asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 887# asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 888vpand 128(%rdx),%ymm14,%ymm15 889 890# qhasm: r11 ^= r 891# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 892# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 893vpxor %ymm15,%ymm1,%ymm1 894 895# qhasm: r = a7 & mem256[input_2 + 160] 896# asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 897# asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 898vpand 160(%rdx),%ymm14,%ymm15 899 900# qhasm: r12 ^= r 901# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 902# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 903vpxor %ymm15,%ymm2,%ymm2 904 905# qhasm: r = a7 & mem256[input_2 + 192] 906# asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 907# asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 908vpand 192(%rdx),%ymm14,%ymm15 909 910# qhasm: r13 ^= r 911# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 912# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 913vpxor %ymm15,%ymm3,%ymm3 914 915# qhasm: r = a7 & mem256[input_2 + 224] 916# asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 917# asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 918vpand 224(%rdx),%ymm14,%ymm15 919 920# qhasm: r14 ^= r 921# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 922# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 923vpxor %ymm15,%ymm4,%ymm4 924 925# qhasm: r = a7 & mem256[input_2 + 256] 926# asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 927# asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 928vpand 256(%rdx),%ymm14,%ymm15 929 930# qhasm: r15 ^= r 931# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 932# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 933vpxor %ymm15,%ymm5,%ymm5 934 935# qhasm: r = a7 & mem256[input_2 + 288] 936# asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 937# asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 938vpand 288(%rdx),%ymm14,%ymm15 939 940# qhasm: r16 ^= r 941# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 942# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 943vpxor %ymm15,%ymm6,%ymm6 944 945# qhasm: r = a7 & mem256[input_2 + 320] 946# asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 947# asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 948vpand 320(%rdx),%ymm14,%ymm15 949 950# qhasm: r17 ^= r 951# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 952# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 953vpxor %ymm15,%ymm7,%ymm7 954 955# qhasm: r = a7 & mem256[input_2 + 352] 956# asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16 957# asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15 958vpand 352(%rdx),%ymm14,%ymm15 959 960# qhasm: r18 ^= r 961# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9 962# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8 963vpxor %ymm15,%ymm8,%ymm8 964 965# qhasm: r = a7 & mem256[input_2 + 384] 966# asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15 967# asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14 968vpand 384(%rdx),%ymm14,%ymm14 969 970# qhasm: r19 ^= r 971# asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10 972# asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9 973vpxor %ymm14,%ymm9,%ymm9 974 975# qhasm: r10 ^= r19 976# asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14 977# asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13 978vpxor %ymm9,%ymm13,%ymm13 979 980# qhasm: r9 ^= r19 981# asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13 982# asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12 983vpxor %ymm9,%ymm12,%ymm12 984 985# qhasm: r7 ^= r19 986# asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11 987# asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10 988vpxor %ymm9,%ymm10,%ymm10 989 990# qhasm: r6 = r19 991# asm 1: vmovapd <r19=reg256#10,>r6=reg256#10 992# asm 2: vmovapd <r19=%ymm9,>r6=%ymm9 993vmovapd %ymm9,%ymm9 994 995# qhasm: a6 = mem256[ input_1 + 192 ] 996# asm 1: vmovupd 192(<input_1=int64#2),>a6=reg256#15 997# asm 2: vmovupd 192(<input_1=%rsi),>a6=%ymm14 998vmovupd 192(%rsi),%ymm14 999 1000# qhasm: r = a6 & b0 1001# asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16 1002# asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15 1003vpand %ymm14,%ymm0,%ymm15 1004 1005# qhasm: r6 ^= r 1006# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 1007# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 1008vpxor %ymm15,%ymm9,%ymm9 1009 1010# qhasm: r = a6 & mem256[input_2 + 32] 1011# asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1012# asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1013vpand 32(%rdx),%ymm14,%ymm15 1014 1015# qhasm: r7 ^= r 1016# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 1017# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 1018vpxor %ymm15,%ymm10,%ymm10 1019 1020# qhasm: r = a6 & mem256[input_2 + 64] 1021# asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1022# asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1023vpand 64(%rdx),%ymm14,%ymm15 1024 1025# qhasm: r8 ^= r 1026# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 1027# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 1028vpxor %ymm15,%ymm11,%ymm11 1029 1030# qhasm: r = a6 & mem256[input_2 + 96] 1031# asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1032# asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1033vpand 96(%rdx),%ymm14,%ymm15 1034 1035# qhasm: r9 ^= r 1036# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 1037# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 1038vpxor %ymm15,%ymm12,%ymm12 1039 1040# qhasm: r = a6 & mem256[input_2 + 128] 1041# asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1042# asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1043vpand 128(%rdx),%ymm14,%ymm15 1044 1045# qhasm: r10 ^= r 1046# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 1047# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 1048vpxor %ymm15,%ymm13,%ymm13 1049 1050# qhasm: r = a6 & mem256[input_2 + 160] 1051# asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1052# asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1053vpand 160(%rdx),%ymm14,%ymm15 1054 1055# qhasm: r11 ^= r 1056# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 1057# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 1058vpxor %ymm15,%ymm1,%ymm1 1059 1060# qhasm: r = a6 & mem256[input_2 + 192] 1061# asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1062# asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1063vpand 192(%rdx),%ymm14,%ymm15 1064 1065# qhasm: r12 ^= r 1066# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 1067# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 1068vpxor %ymm15,%ymm2,%ymm2 1069 1070# qhasm: r = a6 & mem256[input_2 + 224] 1071# asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1072# asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1073vpand 224(%rdx),%ymm14,%ymm15 1074 1075# qhasm: r13 ^= r 1076# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 1077# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 1078vpxor %ymm15,%ymm3,%ymm3 1079 1080# qhasm: r = a6 & mem256[input_2 + 256] 1081# asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1082# asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1083vpand 256(%rdx),%ymm14,%ymm15 1084 1085# qhasm: r14 ^= r 1086# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 1087# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 1088vpxor %ymm15,%ymm4,%ymm4 1089 1090# qhasm: r = a6 & mem256[input_2 + 288] 1091# asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1092# asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1093vpand 288(%rdx),%ymm14,%ymm15 1094 1095# qhasm: r15 ^= r 1096# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 1097# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 1098vpxor %ymm15,%ymm5,%ymm5 1099 1100# qhasm: r = a6 & mem256[input_2 + 320] 1101# asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1102# asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1103vpand 320(%rdx),%ymm14,%ymm15 1104 1105# qhasm: r16 ^= r 1106# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 1107# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 1108vpxor %ymm15,%ymm6,%ymm6 1109 1110# qhasm: r = a6 & mem256[input_2 + 352] 1111# asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16 1112# asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15 1113vpand 352(%rdx),%ymm14,%ymm15 1114 1115# qhasm: r17 ^= r 1116# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8 1117# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7 1118vpxor %ymm15,%ymm7,%ymm7 1119 1120# qhasm: r = a6 & mem256[input_2 + 384] 1121# asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15 1122# asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14 1123vpand 384(%rdx),%ymm14,%ymm14 1124 1125# qhasm: r18 ^= r 1126# asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9 1127# asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8 1128vpxor %ymm14,%ymm8,%ymm8 1129 1130# qhasm: r9 ^= r18 1131# asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13 1132# asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12 1133vpxor %ymm8,%ymm12,%ymm12 1134 1135# qhasm: r8 ^= r18 1136# asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12 1137# asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11 1138vpxor %ymm8,%ymm11,%ymm11 1139 1140# qhasm: r6 ^= r18 1141# asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10 1142# asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9 1143vpxor %ymm8,%ymm9,%ymm9 1144 1145# qhasm: r5 = r18 1146# asm 1: vmovapd <r18=reg256#9,>r5=reg256#9 1147# asm 2: vmovapd <r18=%ymm8,>r5=%ymm8 1148vmovapd %ymm8,%ymm8 1149 1150# qhasm: a5 = mem256[ input_1 + 160 ] 1151# asm 1: vmovupd 160(<input_1=int64#2),>a5=reg256#15 1152# asm 2: vmovupd 160(<input_1=%rsi),>a5=%ymm14 1153vmovupd 160(%rsi),%ymm14 1154 1155# qhasm: r = a5 & b0 1156# asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16 1157# asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15 1158vpand %ymm14,%ymm0,%ymm15 1159 1160# qhasm: r5 ^= r 1161# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 1162# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 1163vpxor %ymm15,%ymm8,%ymm8 1164 1165# qhasm: r = a5 & mem256[input_2 + 32] 1166# asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1167# asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1168vpand 32(%rdx),%ymm14,%ymm15 1169 1170# qhasm: r6 ^= r 1171# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 1172# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 1173vpxor %ymm15,%ymm9,%ymm9 1174 1175# qhasm: r = a5 & mem256[input_2 + 64] 1176# asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1177# asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1178vpand 64(%rdx),%ymm14,%ymm15 1179 1180# qhasm: r7 ^= r 1181# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 1182# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 1183vpxor %ymm15,%ymm10,%ymm10 1184 1185# qhasm: r = a5 & mem256[input_2 + 96] 1186# asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1187# asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1188vpand 96(%rdx),%ymm14,%ymm15 1189 1190# qhasm: r8 ^= r 1191# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 1192# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 1193vpxor %ymm15,%ymm11,%ymm11 1194 1195# qhasm: r = a5 & mem256[input_2 + 128] 1196# asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1197# asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1198vpand 128(%rdx),%ymm14,%ymm15 1199 1200# qhasm: r9 ^= r 1201# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 1202# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 1203vpxor %ymm15,%ymm12,%ymm12 1204 1205# qhasm: r = a5 & mem256[input_2 + 160] 1206# asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1207# asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1208vpand 160(%rdx),%ymm14,%ymm15 1209 1210# qhasm: r10 ^= r 1211# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 1212# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 1213vpxor %ymm15,%ymm13,%ymm13 1214 1215# qhasm: r = a5 & mem256[input_2 + 192] 1216# asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1217# asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1218vpand 192(%rdx),%ymm14,%ymm15 1219 1220# qhasm: r11 ^= r 1221# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 1222# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 1223vpxor %ymm15,%ymm1,%ymm1 1224 1225# qhasm: r = a5 & mem256[input_2 + 224] 1226# asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1227# asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1228vpand 224(%rdx),%ymm14,%ymm15 1229 1230# qhasm: r12 ^= r 1231# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 1232# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 1233vpxor %ymm15,%ymm2,%ymm2 1234 1235# qhasm: r = a5 & mem256[input_2 + 256] 1236# asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1237# asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1238vpand 256(%rdx),%ymm14,%ymm15 1239 1240# qhasm: r13 ^= r 1241# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 1242# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 1243vpxor %ymm15,%ymm3,%ymm3 1244 1245# qhasm: r = a5 & mem256[input_2 + 288] 1246# asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1247# asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1248vpand 288(%rdx),%ymm14,%ymm15 1249 1250# qhasm: r14 ^= r 1251# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 1252# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 1253vpxor %ymm15,%ymm4,%ymm4 1254 1255# qhasm: r = a5 & mem256[input_2 + 320] 1256# asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1257# asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1258vpand 320(%rdx),%ymm14,%ymm15 1259 1260# qhasm: r15 ^= r 1261# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 1262# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 1263vpxor %ymm15,%ymm5,%ymm5 1264 1265# qhasm: r = a5 & mem256[input_2 + 352] 1266# asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16 1267# asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15 1268vpand 352(%rdx),%ymm14,%ymm15 1269 1270# qhasm: r16 ^= r 1271# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7 1272# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6 1273vpxor %ymm15,%ymm6,%ymm6 1274 1275# qhasm: r = a5 & mem256[input_2 + 384] 1276# asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15 1277# asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14 1278vpand 384(%rdx),%ymm14,%ymm14 1279 1280# qhasm: r17 ^= r 1281# asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8 1282# asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7 1283vpxor %ymm14,%ymm7,%ymm7 1284 1285# qhasm: r8 ^= r17 1286# asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12 1287# asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11 1288vpxor %ymm7,%ymm11,%ymm11 1289 1290# qhasm: r7 ^= r17 1291# asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11 1292# asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10 1293vpxor %ymm7,%ymm10,%ymm10 1294 1295# qhasm: r5 ^= r17 1296# asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9 1297# asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8 1298vpxor %ymm7,%ymm8,%ymm8 1299 1300# qhasm: r4 = r17 1301# asm 1: vmovapd <r17=reg256#8,>r4=reg256#8 1302# asm 2: vmovapd <r17=%ymm7,>r4=%ymm7 1303vmovapd %ymm7,%ymm7 1304 1305# qhasm: a4 = mem256[ input_1 + 128 ] 1306# asm 1: vmovupd 128(<input_1=int64#2),>a4=reg256#15 1307# asm 2: vmovupd 128(<input_1=%rsi),>a4=%ymm14 1308vmovupd 128(%rsi),%ymm14 1309 1310# qhasm: r = a4 & b0 1311# asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16 1312# asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15 1313vpand %ymm14,%ymm0,%ymm15 1314 1315# qhasm: r4 ^= r 1316# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 1317# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 1318vpxor %ymm15,%ymm7,%ymm7 1319 1320# qhasm: r = a4 & mem256[input_2 + 32] 1321# asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1322# asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1323vpand 32(%rdx),%ymm14,%ymm15 1324 1325# qhasm: r5 ^= r 1326# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 1327# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 1328vpxor %ymm15,%ymm8,%ymm8 1329 1330# qhasm: r = a4 & mem256[input_2 + 64] 1331# asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1332# asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1333vpand 64(%rdx),%ymm14,%ymm15 1334 1335# qhasm: r6 ^= r 1336# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 1337# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 1338vpxor %ymm15,%ymm9,%ymm9 1339 1340# qhasm: r = a4 & mem256[input_2 + 96] 1341# asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1342# asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1343vpand 96(%rdx),%ymm14,%ymm15 1344 1345# qhasm: r7 ^= r 1346# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 1347# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 1348vpxor %ymm15,%ymm10,%ymm10 1349 1350# qhasm: r = a4 & mem256[input_2 + 128] 1351# asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1352# asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1353vpand 128(%rdx),%ymm14,%ymm15 1354 1355# qhasm: r8 ^= r 1356# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 1357# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 1358vpxor %ymm15,%ymm11,%ymm11 1359 1360# qhasm: r = a4 & mem256[input_2 + 160] 1361# asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1362# asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1363vpand 160(%rdx),%ymm14,%ymm15 1364 1365# qhasm: r9 ^= r 1366# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 1367# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 1368vpxor %ymm15,%ymm12,%ymm12 1369 1370# qhasm: r = a4 & mem256[input_2 + 192] 1371# asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1372# asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1373vpand 192(%rdx),%ymm14,%ymm15 1374 1375# qhasm: r10 ^= r 1376# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 1377# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 1378vpxor %ymm15,%ymm13,%ymm13 1379 1380# qhasm: r = a4 & mem256[input_2 + 224] 1381# asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1382# asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1383vpand 224(%rdx),%ymm14,%ymm15 1384 1385# qhasm: r11 ^= r 1386# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 1387# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 1388vpxor %ymm15,%ymm1,%ymm1 1389 1390# qhasm: r = a4 & mem256[input_2 + 256] 1391# asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1392# asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1393vpand 256(%rdx),%ymm14,%ymm15 1394 1395# qhasm: r12 ^= r 1396# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 1397# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 1398vpxor %ymm15,%ymm2,%ymm2 1399 1400# qhasm: r = a4 & mem256[input_2 + 288] 1401# asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1402# asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1403vpand 288(%rdx),%ymm14,%ymm15 1404 1405# qhasm: r13 ^= r 1406# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 1407# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 1408vpxor %ymm15,%ymm3,%ymm3 1409 1410# qhasm: r = a4 & mem256[input_2 + 320] 1411# asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1412# asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1413vpand 320(%rdx),%ymm14,%ymm15 1414 1415# qhasm: r14 ^= r 1416# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 1417# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 1418vpxor %ymm15,%ymm4,%ymm4 1419 1420# qhasm: r = a4 & mem256[input_2 + 352] 1421# asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16 1422# asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15 1423vpand 352(%rdx),%ymm14,%ymm15 1424 1425# qhasm: r15 ^= r 1426# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6 1427# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5 1428vpxor %ymm15,%ymm5,%ymm5 1429 1430# qhasm: r = a4 & mem256[input_2 + 384] 1431# asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15 1432# asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14 1433vpand 384(%rdx),%ymm14,%ymm14 1434 1435# qhasm: r16 ^= r 1436# asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7 1437# asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6 1438vpxor %ymm14,%ymm6,%ymm6 1439 1440# qhasm: r7 ^= r16 1441# asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11 1442# asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10 1443vpxor %ymm6,%ymm10,%ymm10 1444 1445# qhasm: r6 ^= r16 1446# asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10 1447# asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9 1448vpxor %ymm6,%ymm9,%ymm9 1449 1450# qhasm: r4 ^= r16 1451# asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8 1452# asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7 1453vpxor %ymm6,%ymm7,%ymm7 1454 1455# qhasm: r3 = r16 1456# asm 1: vmovapd <r16=reg256#7,>r3=reg256#7 1457# asm 2: vmovapd <r16=%ymm6,>r3=%ymm6 1458vmovapd %ymm6,%ymm6 1459 1460# qhasm: a3 = mem256[ input_1 + 96 ] 1461# asm 1: vmovupd 96(<input_1=int64#2),>a3=reg256#15 1462# asm 2: vmovupd 96(<input_1=%rsi),>a3=%ymm14 1463vmovupd 96(%rsi),%ymm14 1464 1465# qhasm: r = a3 & b0 1466# asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16 1467# asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15 1468vpand %ymm14,%ymm0,%ymm15 1469 1470# qhasm: r3 ^= r 1471# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 1472# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 1473vpxor %ymm15,%ymm6,%ymm6 1474 1475# qhasm: r = a3 & mem256[input_2 + 32] 1476# asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1477# asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1478vpand 32(%rdx),%ymm14,%ymm15 1479 1480# qhasm: r4 ^= r 1481# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 1482# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 1483vpxor %ymm15,%ymm7,%ymm7 1484 1485# qhasm: r = a3 & mem256[input_2 + 64] 1486# asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1487# asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1488vpand 64(%rdx),%ymm14,%ymm15 1489 1490# qhasm: r5 ^= r 1491# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 1492# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 1493vpxor %ymm15,%ymm8,%ymm8 1494 1495# qhasm: r = a3 & mem256[input_2 + 96] 1496# asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1497# asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1498vpand 96(%rdx),%ymm14,%ymm15 1499 1500# qhasm: r6 ^= r 1501# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 1502# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 1503vpxor %ymm15,%ymm9,%ymm9 1504 1505# qhasm: r = a3 & mem256[input_2 + 128] 1506# asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1507# asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1508vpand 128(%rdx),%ymm14,%ymm15 1509 1510# qhasm: r7 ^= r 1511# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 1512# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 1513vpxor %ymm15,%ymm10,%ymm10 1514 1515# qhasm: r = a3 & mem256[input_2 + 160] 1516# asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1517# asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1518vpand 160(%rdx),%ymm14,%ymm15 1519 1520# qhasm: r8 ^= r 1521# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 1522# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 1523vpxor %ymm15,%ymm11,%ymm11 1524 1525# qhasm: r = a3 & mem256[input_2 + 192] 1526# asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1527# asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1528vpand 192(%rdx),%ymm14,%ymm15 1529 1530# qhasm: r9 ^= r 1531# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 1532# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 1533vpxor %ymm15,%ymm12,%ymm12 1534 1535# qhasm: r = a3 & mem256[input_2 + 224] 1536# asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1537# asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1538vpand 224(%rdx),%ymm14,%ymm15 1539 1540# qhasm: r10 ^= r 1541# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 1542# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 1543vpxor %ymm15,%ymm13,%ymm13 1544 1545# qhasm: r = a3 & mem256[input_2 + 256] 1546# asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1547# asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1548vpand 256(%rdx),%ymm14,%ymm15 1549 1550# qhasm: r11 ^= r 1551# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 1552# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 1553vpxor %ymm15,%ymm1,%ymm1 1554 1555# qhasm: r = a3 & mem256[input_2 + 288] 1556# asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1557# asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1558vpand 288(%rdx),%ymm14,%ymm15 1559 1560# qhasm: r12 ^= r 1561# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 1562# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 1563vpxor %ymm15,%ymm2,%ymm2 1564 1565# qhasm: r = a3 & mem256[input_2 + 320] 1566# asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1567# asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1568vpand 320(%rdx),%ymm14,%ymm15 1569 1570# qhasm: r13 ^= r 1571# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 1572# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 1573vpxor %ymm15,%ymm3,%ymm3 1574 1575# qhasm: r = a3 & mem256[input_2 + 352] 1576# asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16 1577# asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15 1578vpand 352(%rdx),%ymm14,%ymm15 1579 1580# qhasm: r14 ^= r 1581# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5 1582# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4 1583vpxor %ymm15,%ymm4,%ymm4 1584 1585# qhasm: r = a3 & mem256[input_2 + 384] 1586# asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15 1587# asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14 1588vpand 384(%rdx),%ymm14,%ymm14 1589 1590# qhasm: r15 ^= r 1591# asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6 1592# asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5 1593vpxor %ymm14,%ymm5,%ymm5 1594 1595# qhasm: r6 ^= r15 1596# asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10 1597# asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9 1598vpxor %ymm5,%ymm9,%ymm9 1599 1600# qhasm: r5 ^= r15 1601# asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9 1602# asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8 1603vpxor %ymm5,%ymm8,%ymm8 1604 1605# qhasm: r3 ^= r15 1606# asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7 1607# asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6 1608vpxor %ymm5,%ymm6,%ymm6 1609 1610# qhasm: r2 = r15 1611# asm 1: vmovapd <r15=reg256#6,>r2=reg256#6 1612# asm 2: vmovapd <r15=%ymm5,>r2=%ymm5 1613vmovapd %ymm5,%ymm5 1614 1615# qhasm: a2 = mem256[ input_1 + 64 ] 1616# asm 1: vmovupd 64(<input_1=int64#2),>a2=reg256#15 1617# asm 2: vmovupd 64(<input_1=%rsi),>a2=%ymm14 1618vmovupd 64(%rsi),%ymm14 1619 1620# qhasm: r = a2 & b0 1621# asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16 1622# asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15 1623vpand %ymm14,%ymm0,%ymm15 1624 1625# qhasm: r2 ^= r 1626# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 1627# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 1628vpxor %ymm15,%ymm5,%ymm5 1629 1630# qhasm: r = a2 & mem256[input_2 + 32] 1631# asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1632# asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1633vpand 32(%rdx),%ymm14,%ymm15 1634 1635# qhasm: r3 ^= r 1636# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 1637# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 1638vpxor %ymm15,%ymm6,%ymm6 1639 1640# qhasm: r = a2 & mem256[input_2 + 64] 1641# asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1642# asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1643vpand 64(%rdx),%ymm14,%ymm15 1644 1645# qhasm: r4 ^= r 1646# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 1647# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 1648vpxor %ymm15,%ymm7,%ymm7 1649 1650# qhasm: r = a2 & mem256[input_2 + 96] 1651# asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1652# asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1653vpand 96(%rdx),%ymm14,%ymm15 1654 1655# qhasm: r5 ^= r 1656# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 1657# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 1658vpxor %ymm15,%ymm8,%ymm8 1659 1660# qhasm: r = a2 & mem256[input_2 + 128] 1661# asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1662# asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1663vpand 128(%rdx),%ymm14,%ymm15 1664 1665# qhasm: r6 ^= r 1666# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 1667# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 1668vpxor %ymm15,%ymm9,%ymm9 1669 1670# qhasm: r = a2 & mem256[input_2 + 160] 1671# asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1672# asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1673vpand 160(%rdx),%ymm14,%ymm15 1674 1675# qhasm: r7 ^= r 1676# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 1677# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 1678vpxor %ymm15,%ymm10,%ymm10 1679 1680# qhasm: r = a2 & mem256[input_2 + 192] 1681# asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1682# asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1683vpand 192(%rdx),%ymm14,%ymm15 1684 1685# qhasm: r8 ^= r 1686# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 1687# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 1688vpxor %ymm15,%ymm11,%ymm11 1689 1690# qhasm: r = a2 & mem256[input_2 + 224] 1691# asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1692# asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1693vpand 224(%rdx),%ymm14,%ymm15 1694 1695# qhasm: r9 ^= r 1696# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 1697# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 1698vpxor %ymm15,%ymm12,%ymm12 1699 1700# qhasm: r = a2 & mem256[input_2 + 256] 1701# asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1702# asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1703vpand 256(%rdx),%ymm14,%ymm15 1704 1705# qhasm: r10 ^= r 1706# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 1707# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 1708vpxor %ymm15,%ymm13,%ymm13 1709 1710# qhasm: r = a2 & mem256[input_2 + 288] 1711# asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1712# asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1713vpand 288(%rdx),%ymm14,%ymm15 1714 1715# qhasm: r11 ^= r 1716# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 1717# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 1718vpxor %ymm15,%ymm1,%ymm1 1719 1720# qhasm: r = a2 & mem256[input_2 + 320] 1721# asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1722# asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1723vpand 320(%rdx),%ymm14,%ymm15 1724 1725# qhasm: r12 ^= r 1726# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 1727# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 1728vpxor %ymm15,%ymm2,%ymm2 1729 1730# qhasm: r = a2 & mem256[input_2 + 352] 1731# asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16 1732# asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15 1733vpand 352(%rdx),%ymm14,%ymm15 1734 1735# qhasm: r13 ^= r 1736# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4 1737# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3 1738vpxor %ymm15,%ymm3,%ymm3 1739 1740# qhasm: r = a2 & mem256[input_2 + 384] 1741# asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15 1742# asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14 1743vpand 384(%rdx),%ymm14,%ymm14 1744 1745# qhasm: r14 ^= r 1746# asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5 1747# asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4 1748vpxor %ymm14,%ymm4,%ymm4 1749 1750# qhasm: r5 ^= r14 1751# asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9 1752# asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8 1753vpxor %ymm4,%ymm8,%ymm8 1754 1755# qhasm: r4 ^= r14 1756# asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8 1757# asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7 1758vpxor %ymm4,%ymm7,%ymm7 1759 1760# qhasm: r2 ^= r14 1761# asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6 1762# asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5 1763vpxor %ymm4,%ymm5,%ymm5 1764 1765# qhasm: r1 = r14 1766# asm 1: vmovapd <r14=reg256#5,>r1=reg256#5 1767# asm 2: vmovapd <r14=%ymm4,>r1=%ymm4 1768vmovapd %ymm4,%ymm4 1769 1770# qhasm: a1 = mem256[ input_1 + 32 ] 1771# asm 1: vmovupd 32(<input_1=int64#2),>a1=reg256#15 1772# asm 2: vmovupd 32(<input_1=%rsi),>a1=%ymm14 1773vmovupd 32(%rsi),%ymm14 1774 1775# qhasm: r = a1 & b0 1776# asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16 1777# asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15 1778vpand %ymm14,%ymm0,%ymm15 1779 1780# qhasm: r1 ^= r 1781# asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5 1782# asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4 1783vpxor %ymm15,%ymm4,%ymm4 1784 1785# qhasm: r = a1 & mem256[input_2 + 32] 1786# asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1787# asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1788vpand 32(%rdx),%ymm14,%ymm15 1789 1790# qhasm: r2 ^= r 1791# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6 1792# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5 1793vpxor %ymm15,%ymm5,%ymm5 1794 1795# qhasm: r = a1 & mem256[input_2 + 64] 1796# asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1797# asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1798vpand 64(%rdx),%ymm14,%ymm15 1799 1800# qhasm: r3 ^= r 1801# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7 1802# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6 1803vpxor %ymm15,%ymm6,%ymm6 1804 1805# qhasm: r = a1 & mem256[input_2 + 96] 1806# asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1807# asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1808vpand 96(%rdx),%ymm14,%ymm15 1809 1810# qhasm: r4 ^= r 1811# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8 1812# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7 1813vpxor %ymm15,%ymm7,%ymm7 1814 1815# qhasm: r = a1 & mem256[input_2 + 128] 1816# asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1817# asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1818vpand 128(%rdx),%ymm14,%ymm15 1819 1820# qhasm: r5 ^= r 1821# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9 1822# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8 1823vpxor %ymm15,%ymm8,%ymm8 1824 1825# qhasm: r = a1 & mem256[input_2 + 160] 1826# asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1827# asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1828vpand 160(%rdx),%ymm14,%ymm15 1829 1830# qhasm: r6 ^= r 1831# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10 1832# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9 1833vpxor %ymm15,%ymm9,%ymm9 1834 1835# qhasm: r = a1 & mem256[input_2 + 192] 1836# asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1837# asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1838vpand 192(%rdx),%ymm14,%ymm15 1839 1840# qhasm: r7 ^= r 1841# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11 1842# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10 1843vpxor %ymm15,%ymm10,%ymm10 1844 1845# qhasm: r = a1 & mem256[input_2 + 224] 1846# asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1847# asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1848vpand 224(%rdx),%ymm14,%ymm15 1849 1850# qhasm: r8 ^= r 1851# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12 1852# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11 1853vpxor %ymm15,%ymm11,%ymm11 1854 1855# qhasm: r = a1 & mem256[input_2 + 256] 1856# asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1857# asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1858vpand 256(%rdx),%ymm14,%ymm15 1859 1860# qhasm: r9 ^= r 1861# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13 1862# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12 1863vpxor %ymm15,%ymm12,%ymm12 1864 1865# qhasm: r = a1 & mem256[input_2 + 288] 1866# asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1867# asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1868vpand 288(%rdx),%ymm14,%ymm15 1869 1870# qhasm: r10 ^= r 1871# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14 1872# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13 1873vpxor %ymm15,%ymm13,%ymm13 1874 1875# qhasm: r = a1 & mem256[input_2 + 320] 1876# asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1877# asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1878vpand 320(%rdx),%ymm14,%ymm15 1879 1880# qhasm: r11 ^= r 1881# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2 1882# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1 1883vpxor %ymm15,%ymm1,%ymm1 1884 1885# qhasm: r = a1 & mem256[input_2 + 352] 1886# asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16 1887# asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15 1888vpand 352(%rdx),%ymm14,%ymm15 1889 1890# qhasm: r12 ^= r 1891# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3 1892# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2 1893vpxor %ymm15,%ymm2,%ymm2 1894 1895# qhasm: r = a1 & mem256[input_2 + 384] 1896# asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15 1897# asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14 1898vpand 384(%rdx),%ymm14,%ymm14 1899 1900# qhasm: r13 ^= r 1901# asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4 1902# asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3 1903vpxor %ymm14,%ymm3,%ymm3 1904 1905# qhasm: r4 ^= r13 1906# asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8 1907# asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7 1908vpxor %ymm3,%ymm7,%ymm7 1909 1910# qhasm: r3 ^= r13 1911# asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7 1912# asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6 1913vpxor %ymm3,%ymm6,%ymm6 1914 1915# qhasm: r1 ^= r13 1916# asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5 1917# asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4 1918vpxor %ymm3,%ymm4,%ymm4 1919 1920# qhasm: r0 = r13 1921# asm 1: vmovapd <r13=reg256#4,>r0=reg256#4 1922# asm 2: vmovapd <r13=%ymm3,>r0=%ymm3 1923vmovapd %ymm3,%ymm3 1924 1925# qhasm: a0 = mem256[ input_1 + 0 ] 1926# asm 1: vmovupd 0(<input_1=int64#2),>a0=reg256#15 1927# asm 2: vmovupd 0(<input_1=%rsi),>a0=%ymm14 1928vmovupd 0(%rsi),%ymm14 1929 1930# qhasm: r = a0 & b0 1931# asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1 1932# asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0 1933vpand %ymm14,%ymm0,%ymm0 1934 1935# qhasm: r0 ^= r 1936# asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4 1937# asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3 1938vpxor %ymm0,%ymm3,%ymm3 1939 1940# qhasm: r = a0 & mem256[input_2 + 32] 1941# asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 1942# asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 1943vpand 32(%rdx),%ymm14,%ymm0 1944 1945# qhasm: r1 ^= r 1946# asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5 1947# asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4 1948vpxor %ymm0,%ymm4,%ymm4 1949 1950# qhasm: r = a0 & mem256[input_2 + 64] 1951# asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 1952# asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 1953vpand 64(%rdx),%ymm14,%ymm0 1954 1955# qhasm: r2 ^= r 1956# asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6 1957# asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5 1958vpxor %ymm0,%ymm5,%ymm5 1959 1960# qhasm: r = a0 & mem256[input_2 + 96] 1961# asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 1962# asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 1963vpand 96(%rdx),%ymm14,%ymm0 1964 1965# qhasm: r3 ^= r 1966# asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7 1967# asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6 1968vpxor %ymm0,%ymm6,%ymm6 1969 1970# qhasm: r = a0 & mem256[input_2 + 128] 1971# asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 1972# asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 1973vpand 128(%rdx),%ymm14,%ymm0 1974 1975# qhasm: r4 ^= r 1976# asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8 1977# asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7 1978vpxor %ymm0,%ymm7,%ymm7 1979 1980# qhasm: r = a0 & mem256[input_2 + 160] 1981# asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 1982# asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 1983vpand 160(%rdx),%ymm14,%ymm0 1984 1985# qhasm: r5 ^= r 1986# asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9 1987# asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8 1988vpxor %ymm0,%ymm8,%ymm8 1989 1990# qhasm: r = a0 & mem256[input_2 + 192] 1991# asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 1992# asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 1993vpand 192(%rdx),%ymm14,%ymm0 1994 1995# qhasm: r6 ^= r 1996# asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10 1997# asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9 1998vpxor %ymm0,%ymm9,%ymm9 1999 2000# qhasm: r = a0 & mem256[input_2 + 224] 2001# asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 2002# asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 2003vpand 224(%rdx),%ymm14,%ymm0 2004 2005# qhasm: r7 ^= r 2006# asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11 2007# asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10 2008vpxor %ymm0,%ymm10,%ymm10 2009 2010# qhasm: r = a0 & mem256[input_2 + 256] 2011# asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 2012# asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 2013vpand 256(%rdx),%ymm14,%ymm0 2014 2015# qhasm: r8 ^= r 2016# asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12 2017# asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11 2018vpxor %ymm0,%ymm11,%ymm11 2019 2020# qhasm: r = a0 & mem256[input_2 + 288] 2021# asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 2022# asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 2023vpand 288(%rdx),%ymm14,%ymm0 2024 2025# qhasm: r9 ^= r 2026# asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13 2027# asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12 2028vpxor %ymm0,%ymm12,%ymm12 2029 2030# qhasm: r = a0 & mem256[input_2 + 320] 2031# asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 2032# asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 2033vpand 320(%rdx),%ymm14,%ymm0 2034 2035# qhasm: r10 ^= r 2036# asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14 2037# asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13 2038vpxor %ymm0,%ymm13,%ymm13 2039 2040# qhasm: r = a0 & mem256[input_2 + 352] 2041# asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 2042# asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 2043vpand 352(%rdx),%ymm14,%ymm0 2044 2045# qhasm: r11 ^= r 2046# asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2 2047# asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1 2048vpxor %ymm0,%ymm1,%ymm1 2049 2050# qhasm: r = a0 & mem256[input_2 + 384] 2051# asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1 2052# asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0 2053vpand 384(%rdx),%ymm14,%ymm0 2054 2055# qhasm: r12 ^= r 2056# asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3 2057# asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2 2058vpxor %ymm0,%ymm2,%ymm2 2059 2060# qhasm: mem256[ input_0 + 384 ] = r12 2061# asm 1: vmovupd <r12=reg256#3,384(<input_0=int64#1) 2062# asm 2: vmovupd <r12=%ymm2,384(<input_0=%rdi) 2063vmovupd %ymm2,384(%rdi) 2064 2065# qhasm: mem256[ input_0 + 352 ] = r11 2066# asm 1: vmovupd <r11=reg256#2,352(<input_0=int64#1) 2067# asm 2: vmovupd <r11=%ymm1,352(<input_0=%rdi) 2068vmovupd %ymm1,352(%rdi) 2069 2070# qhasm: mem256[ input_0 + 320 ] = r10 2071# asm 1: vmovupd <r10=reg256#14,320(<input_0=int64#1) 2072# asm 2: vmovupd <r10=%ymm13,320(<input_0=%rdi) 2073vmovupd %ymm13,320(%rdi) 2074 2075# qhasm: mem256[ input_0 + 288 ] = r9 2076# asm 1: vmovupd <r9=reg256#13,288(<input_0=int64#1) 2077# asm 2: vmovupd <r9=%ymm12,288(<input_0=%rdi) 2078vmovupd %ymm12,288(%rdi) 2079 2080# qhasm: mem256[ input_0 + 256 ] = r8 2081# asm 1: vmovupd <r8=reg256#12,256(<input_0=int64#1) 2082# asm 2: vmovupd <r8=%ymm11,256(<input_0=%rdi) 2083vmovupd %ymm11,256(%rdi) 2084 2085# qhasm: mem256[ input_0 + 224 ] = r7 2086# asm 1: vmovupd <r7=reg256#11,224(<input_0=int64#1) 2087# asm 2: vmovupd <r7=%ymm10,224(<input_0=%rdi) 2088vmovupd %ymm10,224(%rdi) 2089 2090# qhasm: mem256[ input_0 + 192 ] = r6 2091# asm 1: vmovupd <r6=reg256#10,192(<input_0=int64#1) 2092# asm 2: vmovupd <r6=%ymm9,192(<input_0=%rdi) 2093vmovupd %ymm9,192(%rdi) 2094 2095# qhasm: mem256[ input_0 + 160 ] = r5 2096# asm 1: vmovupd <r5=reg256#9,160(<input_0=int64#1) 2097# asm 2: vmovupd <r5=%ymm8,160(<input_0=%rdi) 2098vmovupd %ymm8,160(%rdi) 2099 2100# qhasm: mem256[ input_0 + 128 ] = r4 2101# asm 1: vmovupd <r4=reg256#8,128(<input_0=int64#1) 2102# asm 2: vmovupd <r4=%ymm7,128(<input_0=%rdi) 2103vmovupd %ymm7,128(%rdi) 2104 2105# qhasm: mem256[ input_0 + 96 ] = r3 2106# asm 1: vmovupd <r3=reg256#7,96(<input_0=int64#1) 2107# asm 2: vmovupd <r3=%ymm6,96(<input_0=%rdi) 2108vmovupd %ymm6,96(%rdi) 2109 2110# qhasm: mem256[ input_0 + 64 ] = r2 2111# asm 1: vmovupd <r2=reg256#6,64(<input_0=int64#1) 2112# asm 2: vmovupd <r2=%ymm5,64(<input_0=%rdi) 2113vmovupd %ymm5,64(%rdi) 2114 2115# qhasm: mem256[ input_0 + 32 ] = r1 2116# asm 1: vmovupd <r1=reg256#5,32(<input_0=int64#1) 2117# asm 2: vmovupd <r1=%ymm4,32(<input_0=%rdi) 2118vmovupd %ymm4,32(%rdi) 2119 2120# qhasm: mem256[ input_0 + 0 ] = r0 2121# asm 1: vmovupd <r0=reg256#4,0(<input_0=int64#1) 2122# asm 2: vmovupd <r0=%ymm3,0(<input_0=%rdi) 2123vmovupd %ymm3,0(%rdi) 2124 2125# qhasm: return 2126add %r11,%rsp 2127ret 2128