1
2# qhasm: int64 input_0
3
4# qhasm: int64 input_1
5
6# qhasm: int64 input_2
7
8# qhasm: int64 input_3
9
10# qhasm: int64 input_4
11
12# qhasm: int64 input_5
13
14# qhasm: stack64 input_6
15
16# qhasm: stack64 input_7
17
18# qhasm: int64 caller_r11
19
20# qhasm: int64 caller_r12
21
22# qhasm: int64 caller_r13
23
24# qhasm: int64 caller_r14
25
26# qhasm: int64 caller_r15
27
28# qhasm: int64 caller_rbx
29
30# qhasm: int64 caller_rbp
31
32# qhasm: reg256 a0
33
34# qhasm: reg256 a1
35
36# qhasm: reg256 a2
37
38# qhasm: reg256 a3
39
40# qhasm: reg256 a4
41
42# qhasm: reg256 a5
43
44# qhasm: reg256 a6
45
46# qhasm: reg256 a7
47
48# qhasm: reg256 a8
49
50# qhasm: reg256 a9
51
52# qhasm: reg256 a10
53
54# qhasm: reg256 a11
55
56# qhasm: reg256 a12
57
58# qhasm: reg256 b0
59
60# qhasm: reg256 b1
61
62# qhasm: reg256 r0
63
64# qhasm: reg256 r1
65
66# qhasm: reg256 r2
67
68# qhasm: reg256 r3
69
70# qhasm: reg256 r4
71
72# qhasm: reg256 r5
73
74# qhasm: reg256 r6
75
76# qhasm: reg256 r7
77
78# qhasm: reg256 r8
79
80# qhasm: reg256 r9
81
82# qhasm: reg256 r10
83
84# qhasm: reg256 r11
85
86# qhasm: reg256 r12
87
88# qhasm: reg256 r13
89
90# qhasm: reg256 r14
91
92# qhasm: reg256 r15
93
94# qhasm: reg256 r16
95
96# qhasm: reg256 r17
97
98# qhasm: reg256 r18
99
100# qhasm: reg256 r19
101
102# qhasm: reg256 r20
103
104# qhasm: reg256 r21
105
106# qhasm: reg256 r22
107
108# qhasm: reg256 r23
109
110# qhasm: reg256 r24
111
112# qhasm: reg256 r
113
114# qhasm: enter vec256_ama_asm
115.p2align 5
116.global _PQCLEAN_MCELIECE8192128_AVX_vec256_ama_asm
117.global PQCLEAN_MCELIECE8192128_AVX_vec256_ama_asm
118_PQCLEAN_MCELIECE8192128_AVX_vec256_ama_asm:
119PQCLEAN_MCELIECE8192128_AVX_vec256_ama_asm:
120mov %rsp,%r11
121and $31,%r11
122add $0,%r11
123sub %r11,%rsp
124
125# qhasm: b0 = mem256[ input_2 + 0 ]
126# asm 1: vmovupd   0(<input_2=int64#3),>b0=reg256#1
127# asm 2: vmovupd   0(<input_2=%rdx),>b0=%ymm0
128vmovupd   0(%rdx),%ymm0
129
130# qhasm: a12 = mem256[ input_0 + 384 ]
131# asm 1: vmovupd   384(<input_0=int64#1),>a12=reg256#2
132# asm 2: vmovupd   384(<input_0=%rdi),>a12=%ymm1
133vmovupd   384(%rdi),%ymm1
134
135# qhasm: a12 = a12 ^ mem256[ input_1 + 384 ]
136# asm 1: vpxor 384(<input_1=int64#2),<a12=reg256#2,>a12=reg256#2
137# asm 2: vpxor 384(<input_1=%rsi),<a12=%ymm1,>a12=%ymm1
138vpxor 384(%rsi),%ymm1,%ymm1
139
140# qhasm: mem256[ input_0 + 384 ] = a12
141# asm 1: vmovupd   <a12=reg256#2,384(<input_0=int64#1)
142# asm 2: vmovupd   <a12=%ymm1,384(<input_0=%rdi)
143vmovupd   %ymm1,384(%rdi)
144
145# qhasm: r12 = a12 & b0
146# asm 1: vpand <a12=reg256#2,<b0=reg256#1,>r12=reg256#3
147# asm 2: vpand <a12=%ymm1,<b0=%ymm0,>r12=%ymm2
148vpand %ymm1,%ymm0,%ymm2
149
150# qhasm: r13 = a12 & mem256[input_2 + 32]
151# asm 1: vpand 32(<input_2=int64#3),<a12=reg256#2,>r13=reg256#4
152# asm 2: vpand 32(<input_2=%rdx),<a12=%ymm1,>r13=%ymm3
153vpand 32(%rdx),%ymm1,%ymm3
154
155# qhasm: r14 = a12 & mem256[input_2 + 64]
156# asm 1: vpand 64(<input_2=int64#3),<a12=reg256#2,>r14=reg256#5
157# asm 2: vpand 64(<input_2=%rdx),<a12=%ymm1,>r14=%ymm4
158vpand 64(%rdx),%ymm1,%ymm4
159
160# qhasm: r15 = a12 & mem256[input_2 + 96]
161# asm 1: vpand 96(<input_2=int64#3),<a12=reg256#2,>r15=reg256#6
162# asm 2: vpand 96(<input_2=%rdx),<a12=%ymm1,>r15=%ymm5
163vpand 96(%rdx),%ymm1,%ymm5
164
165# qhasm: r16 = a12 & mem256[input_2 + 128]
166# asm 1: vpand 128(<input_2=int64#3),<a12=reg256#2,>r16=reg256#7
167# asm 2: vpand 128(<input_2=%rdx),<a12=%ymm1,>r16=%ymm6
168vpand 128(%rdx),%ymm1,%ymm6
169
170# qhasm: r17 = a12 & mem256[input_2 + 160]
171# asm 1: vpand 160(<input_2=int64#3),<a12=reg256#2,>r17=reg256#8
172# asm 2: vpand 160(<input_2=%rdx),<a12=%ymm1,>r17=%ymm7
173vpand 160(%rdx),%ymm1,%ymm7
174
175# qhasm: r18 = a12 & mem256[input_2 + 192]
176# asm 1: vpand 192(<input_2=int64#3),<a12=reg256#2,>r18=reg256#9
177# asm 2: vpand 192(<input_2=%rdx),<a12=%ymm1,>r18=%ymm8
178vpand 192(%rdx),%ymm1,%ymm8
179
180# qhasm: r19 = a12 & mem256[input_2 + 224]
181# asm 1: vpand 224(<input_2=int64#3),<a12=reg256#2,>r19=reg256#10
182# asm 2: vpand 224(<input_2=%rdx),<a12=%ymm1,>r19=%ymm9
183vpand 224(%rdx),%ymm1,%ymm9
184
185# qhasm: r20 = a12 & mem256[input_2 + 256]
186# asm 1: vpand 256(<input_2=int64#3),<a12=reg256#2,>r20=reg256#11
187# asm 2: vpand 256(<input_2=%rdx),<a12=%ymm1,>r20=%ymm10
188vpand 256(%rdx),%ymm1,%ymm10
189
190# qhasm: r21 = a12 & mem256[input_2 + 288]
191# asm 1: vpand 288(<input_2=int64#3),<a12=reg256#2,>r21=reg256#12
192# asm 2: vpand 288(<input_2=%rdx),<a12=%ymm1,>r21=%ymm11
193vpand 288(%rdx),%ymm1,%ymm11
194
195# qhasm: r22 = a12 & mem256[input_2 + 320]
196# asm 1: vpand 320(<input_2=int64#3),<a12=reg256#2,>r22=reg256#13
197# asm 2: vpand 320(<input_2=%rdx),<a12=%ymm1,>r22=%ymm12
198vpand 320(%rdx),%ymm1,%ymm12
199
200# qhasm: r23 = a12 & mem256[input_2 + 352]
201# asm 1: vpand 352(<input_2=int64#3),<a12=reg256#2,>r23=reg256#14
202# asm 2: vpand 352(<input_2=%rdx),<a12=%ymm1,>r23=%ymm13
203vpand 352(%rdx),%ymm1,%ymm13
204
205# qhasm: r24 = a12 & mem256[input_2 + 384]
206# asm 1: vpand 384(<input_2=int64#3),<a12=reg256#2,>r24=reg256#2
207# asm 2: vpand 384(<input_2=%rdx),<a12=%ymm1,>r24=%ymm1
208vpand 384(%rdx),%ymm1,%ymm1
209
210# qhasm: r15 ^= r24
211# asm 1: vpxor <r24=reg256#2,<r15=reg256#6,<r15=reg256#6
212# asm 2: vpxor <r24=%ymm1,<r15=%ymm5,<r15=%ymm5
213vpxor %ymm1,%ymm5,%ymm5
214
215# qhasm: r14 ^= r24
216# asm 1: vpxor <r24=reg256#2,<r14=reg256#5,<r14=reg256#5
217# asm 2: vpxor <r24=%ymm1,<r14=%ymm4,<r14=%ymm4
218vpxor %ymm1,%ymm4,%ymm4
219
220# qhasm: r12 ^= r24
221# asm 1: vpxor <r24=reg256#2,<r12=reg256#3,<r12=reg256#3
222# asm 2: vpxor <r24=%ymm1,<r12=%ymm2,<r12=%ymm2
223vpxor %ymm1,%ymm2,%ymm2
224
225# qhasm: r11 = r24
226# asm 1: vmovapd <r24=reg256#2,>r11=reg256#2
227# asm 2: vmovapd <r24=%ymm1,>r11=%ymm1
228vmovapd %ymm1,%ymm1
229
230# qhasm: a11 = mem256[ input_0 + 352 ]
231# asm 1: vmovupd   352(<input_0=int64#1),>a11=reg256#15
232# asm 2: vmovupd   352(<input_0=%rdi),>a11=%ymm14
233vmovupd   352(%rdi),%ymm14
234
235# qhasm: a11 = a11 ^ mem256[ input_1 + 352 ]
236# asm 1: vpxor 352(<input_1=int64#2),<a11=reg256#15,>a11=reg256#15
237# asm 2: vpxor 352(<input_1=%rsi),<a11=%ymm14,>a11=%ymm14
238vpxor 352(%rsi),%ymm14,%ymm14
239
240# qhasm: mem256[ input_0 + 352 ] = a11
241# asm 1: vmovupd   <a11=reg256#15,352(<input_0=int64#1)
242# asm 2: vmovupd   <a11=%ymm14,352(<input_0=%rdi)
243vmovupd   %ymm14,352(%rdi)
244
245# qhasm: r = a11 & b0
246# asm 1: vpand <a11=reg256#15,<b0=reg256#1,>r=reg256#16
247# asm 2: vpand <a11=%ymm14,<b0=%ymm0,>r=%ymm15
248vpand %ymm14,%ymm0,%ymm15
249
250# qhasm: r11 ^= r
251# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
252# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
253vpxor %ymm15,%ymm1,%ymm1
254
255# qhasm: r = a11 & mem256[input_2 + 32]
256# asm 1: vpand 32(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
257# asm 2: vpand 32(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
258vpand 32(%rdx),%ymm14,%ymm15
259
260# qhasm: r12 ^= r
261# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
262# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
263vpxor %ymm15,%ymm2,%ymm2
264
265# qhasm: r = a11 & mem256[input_2 + 64]
266# asm 1: vpand 64(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
267# asm 2: vpand 64(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
268vpand 64(%rdx),%ymm14,%ymm15
269
270# qhasm: r13 ^= r
271# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
272# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
273vpxor %ymm15,%ymm3,%ymm3
274
275# qhasm: r = a11 & mem256[input_2 + 96]
276# asm 1: vpand 96(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
277# asm 2: vpand 96(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
278vpand 96(%rdx),%ymm14,%ymm15
279
280# qhasm: r14 ^= r
281# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
282# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
283vpxor %ymm15,%ymm4,%ymm4
284
285# qhasm: r = a11 & mem256[input_2 + 128]
286# asm 1: vpand 128(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
287# asm 2: vpand 128(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
288vpand 128(%rdx),%ymm14,%ymm15
289
290# qhasm: r15 ^= r
291# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
292# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
293vpxor %ymm15,%ymm5,%ymm5
294
295# qhasm: r = a11 & mem256[input_2 + 160]
296# asm 1: vpand 160(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
297# asm 2: vpand 160(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
298vpand 160(%rdx),%ymm14,%ymm15
299
300# qhasm: r16 ^= r
301# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
302# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
303vpxor %ymm15,%ymm6,%ymm6
304
305# qhasm: r = a11 & mem256[input_2 + 192]
306# asm 1: vpand 192(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
307# asm 2: vpand 192(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
308vpand 192(%rdx),%ymm14,%ymm15
309
310# qhasm: r17 ^= r
311# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
312# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
313vpxor %ymm15,%ymm7,%ymm7
314
315# qhasm: r = a11 & mem256[input_2 + 224]
316# asm 1: vpand 224(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
317# asm 2: vpand 224(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
318vpand 224(%rdx),%ymm14,%ymm15
319
320# qhasm: r18 ^= r
321# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
322# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
323vpxor %ymm15,%ymm8,%ymm8
324
325# qhasm: r = a11 & mem256[input_2 + 256]
326# asm 1: vpand 256(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
327# asm 2: vpand 256(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
328vpand 256(%rdx),%ymm14,%ymm15
329
330# qhasm: r19 ^= r
331# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
332# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
333vpxor %ymm15,%ymm9,%ymm9
334
335# qhasm: r = a11 & mem256[input_2 + 288]
336# asm 1: vpand 288(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
337# asm 2: vpand 288(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
338vpand 288(%rdx),%ymm14,%ymm15
339
340# qhasm: r20 ^= r
341# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
342# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
343vpxor %ymm15,%ymm10,%ymm10
344
345# qhasm: r = a11 & mem256[input_2 + 320]
346# asm 1: vpand 320(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
347# asm 2: vpand 320(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
348vpand 320(%rdx),%ymm14,%ymm15
349
350# qhasm: r21 ^= r
351# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
352# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
353vpxor %ymm15,%ymm11,%ymm11
354
355# qhasm: r = a11 & mem256[input_2 + 352]
356# asm 1: vpand 352(<input_2=int64#3),<a11=reg256#15,>r=reg256#16
357# asm 2: vpand 352(<input_2=%rdx),<a11=%ymm14,>r=%ymm15
358vpand 352(%rdx),%ymm14,%ymm15
359
360# qhasm: r22 ^= r
361# asm 1: vpxor <r=reg256#16,<r22=reg256#13,<r22=reg256#13
362# asm 2: vpxor <r=%ymm15,<r22=%ymm12,<r22=%ymm12
363vpxor %ymm15,%ymm12,%ymm12
364
365# qhasm: r = a11 & mem256[input_2 + 384]
366# asm 1: vpand 384(<input_2=int64#3),<a11=reg256#15,>r=reg256#15
367# asm 2: vpand 384(<input_2=%rdx),<a11=%ymm14,>r=%ymm14
368vpand 384(%rdx),%ymm14,%ymm14
369
370# qhasm: r23 ^= r
371# asm 1: vpxor <r=reg256#15,<r23=reg256#14,<r23=reg256#14
372# asm 2: vpxor <r=%ymm14,<r23=%ymm13,<r23=%ymm13
373vpxor %ymm14,%ymm13,%ymm13
374
375# qhasm: r14 ^= r23
376# asm 1: vpxor <r23=reg256#14,<r14=reg256#5,<r14=reg256#5
377# asm 2: vpxor <r23=%ymm13,<r14=%ymm4,<r14=%ymm4
378vpxor %ymm13,%ymm4,%ymm4
379
380# qhasm: r13 ^= r23
381# asm 1: vpxor <r23=reg256#14,<r13=reg256#4,<r13=reg256#4
382# asm 2: vpxor <r23=%ymm13,<r13=%ymm3,<r13=%ymm3
383vpxor %ymm13,%ymm3,%ymm3
384
385# qhasm: r11 ^= r23
386# asm 1: vpxor <r23=reg256#14,<r11=reg256#2,<r11=reg256#2
387# asm 2: vpxor <r23=%ymm13,<r11=%ymm1,<r11=%ymm1
388vpxor %ymm13,%ymm1,%ymm1
389
390# qhasm: r10 = r23
391# asm 1: vmovapd <r23=reg256#14,>r10=reg256#14
392# asm 2: vmovapd <r23=%ymm13,>r10=%ymm13
393vmovapd %ymm13,%ymm13
394
395# qhasm: a10 = mem256[ input_0 + 320 ]
396# asm 1: vmovupd   320(<input_0=int64#1),>a10=reg256#15
397# asm 2: vmovupd   320(<input_0=%rdi),>a10=%ymm14
398vmovupd   320(%rdi),%ymm14
399
400# qhasm: a10 = a10 ^ mem256[ input_1 + 320 ]
401# asm 1: vpxor 320(<input_1=int64#2),<a10=reg256#15,>a10=reg256#15
402# asm 2: vpxor 320(<input_1=%rsi),<a10=%ymm14,>a10=%ymm14
403vpxor 320(%rsi),%ymm14,%ymm14
404
405# qhasm: mem256[ input_0 + 320 ] = a10
406# asm 1: vmovupd   <a10=reg256#15,320(<input_0=int64#1)
407# asm 2: vmovupd   <a10=%ymm14,320(<input_0=%rdi)
408vmovupd   %ymm14,320(%rdi)
409
410# qhasm: r = a10 & b0
411# asm 1: vpand <a10=reg256#15,<b0=reg256#1,>r=reg256#16
412# asm 2: vpand <a10=%ymm14,<b0=%ymm0,>r=%ymm15
413vpand %ymm14,%ymm0,%ymm15
414
415# qhasm: r10 ^= r
416# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
417# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
418vpxor %ymm15,%ymm13,%ymm13
419
420# qhasm: r = a10 & mem256[input_2 + 32]
421# asm 1: vpand 32(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
422# asm 2: vpand 32(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
423vpand 32(%rdx),%ymm14,%ymm15
424
425# qhasm: r11 ^= r
426# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
427# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
428vpxor %ymm15,%ymm1,%ymm1
429
430# qhasm: r = a10 & mem256[input_2 + 64]
431# asm 1: vpand 64(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
432# asm 2: vpand 64(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
433vpand 64(%rdx),%ymm14,%ymm15
434
435# qhasm: r12 ^= r
436# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
437# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
438vpxor %ymm15,%ymm2,%ymm2
439
440# qhasm: r = a10 & mem256[input_2 + 96]
441# asm 1: vpand 96(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
442# asm 2: vpand 96(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
443vpand 96(%rdx),%ymm14,%ymm15
444
445# qhasm: r13 ^= r
446# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
447# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
448vpxor %ymm15,%ymm3,%ymm3
449
450# qhasm: r = a10 & mem256[input_2 + 128]
451# asm 1: vpand 128(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
452# asm 2: vpand 128(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
453vpand 128(%rdx),%ymm14,%ymm15
454
455# qhasm: r14 ^= r
456# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
457# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
458vpxor %ymm15,%ymm4,%ymm4
459
460# qhasm: r = a10 & mem256[input_2 + 160]
461# asm 1: vpand 160(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
462# asm 2: vpand 160(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
463vpand 160(%rdx),%ymm14,%ymm15
464
465# qhasm: r15 ^= r
466# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
467# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
468vpxor %ymm15,%ymm5,%ymm5
469
470# qhasm: r = a10 & mem256[input_2 + 192]
471# asm 1: vpand 192(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
472# asm 2: vpand 192(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
473vpand 192(%rdx),%ymm14,%ymm15
474
475# qhasm: r16 ^= r
476# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
477# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
478vpxor %ymm15,%ymm6,%ymm6
479
480# qhasm: r = a10 & mem256[input_2 + 224]
481# asm 1: vpand 224(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
482# asm 2: vpand 224(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
483vpand 224(%rdx),%ymm14,%ymm15
484
485# qhasm: r17 ^= r
486# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
487# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
488vpxor %ymm15,%ymm7,%ymm7
489
490# qhasm: r = a10 & mem256[input_2 + 256]
491# asm 1: vpand 256(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
492# asm 2: vpand 256(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
493vpand 256(%rdx),%ymm14,%ymm15
494
495# qhasm: r18 ^= r
496# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
497# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
498vpxor %ymm15,%ymm8,%ymm8
499
500# qhasm: r = a10 & mem256[input_2 + 288]
501# asm 1: vpand 288(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
502# asm 2: vpand 288(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
503vpand 288(%rdx),%ymm14,%ymm15
504
505# qhasm: r19 ^= r
506# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
507# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
508vpxor %ymm15,%ymm9,%ymm9
509
510# qhasm: r = a10 & mem256[input_2 + 320]
511# asm 1: vpand 320(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
512# asm 2: vpand 320(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
513vpand 320(%rdx),%ymm14,%ymm15
514
515# qhasm: r20 ^= r
516# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
517# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
518vpxor %ymm15,%ymm10,%ymm10
519
520# qhasm: r = a10 & mem256[input_2 + 352]
521# asm 1: vpand 352(<input_2=int64#3),<a10=reg256#15,>r=reg256#16
522# asm 2: vpand 352(<input_2=%rdx),<a10=%ymm14,>r=%ymm15
523vpand 352(%rdx),%ymm14,%ymm15
524
525# qhasm: r21 ^= r
526# asm 1: vpxor <r=reg256#16,<r21=reg256#12,<r21=reg256#12
527# asm 2: vpxor <r=%ymm15,<r21=%ymm11,<r21=%ymm11
528vpxor %ymm15,%ymm11,%ymm11
529
530# qhasm: r = a10 & mem256[input_2 + 384]
531# asm 1: vpand 384(<input_2=int64#3),<a10=reg256#15,>r=reg256#15
532# asm 2: vpand 384(<input_2=%rdx),<a10=%ymm14,>r=%ymm14
533vpand 384(%rdx),%ymm14,%ymm14
534
535# qhasm: r22 ^= r
536# asm 1: vpxor <r=reg256#15,<r22=reg256#13,<r22=reg256#13
537# asm 2: vpxor <r=%ymm14,<r22=%ymm12,<r22=%ymm12
538vpxor %ymm14,%ymm12,%ymm12
539
540# qhasm: r13 ^= r22
541# asm 1: vpxor <r22=reg256#13,<r13=reg256#4,<r13=reg256#4
542# asm 2: vpxor <r22=%ymm12,<r13=%ymm3,<r13=%ymm3
543vpxor %ymm12,%ymm3,%ymm3
544
545# qhasm: r12 ^= r22
546# asm 1: vpxor <r22=reg256#13,<r12=reg256#3,<r12=reg256#3
547# asm 2: vpxor <r22=%ymm12,<r12=%ymm2,<r12=%ymm2
548vpxor %ymm12,%ymm2,%ymm2
549
550# qhasm: r10 ^= r22
551# asm 1: vpxor <r22=reg256#13,<r10=reg256#14,<r10=reg256#14
552# asm 2: vpxor <r22=%ymm12,<r10=%ymm13,<r10=%ymm13
553vpxor %ymm12,%ymm13,%ymm13
554
555# qhasm: r9 = r22
556# asm 1: vmovapd <r22=reg256#13,>r9=reg256#13
557# asm 2: vmovapd <r22=%ymm12,>r9=%ymm12
558vmovapd %ymm12,%ymm12
559
560# qhasm: a9 = mem256[ input_0 + 288 ]
561# asm 1: vmovupd   288(<input_0=int64#1),>a9=reg256#15
562# asm 2: vmovupd   288(<input_0=%rdi),>a9=%ymm14
563vmovupd   288(%rdi),%ymm14
564
565# qhasm: a9 = a9 ^ mem256[ input_1 + 288 ]
566# asm 1: vpxor 288(<input_1=int64#2),<a9=reg256#15,>a9=reg256#15
567# asm 2: vpxor 288(<input_1=%rsi),<a9=%ymm14,>a9=%ymm14
568vpxor 288(%rsi),%ymm14,%ymm14
569
570# qhasm: mem256[ input_0 + 288 ] = a9
571# asm 1: vmovupd   <a9=reg256#15,288(<input_0=int64#1)
572# asm 2: vmovupd   <a9=%ymm14,288(<input_0=%rdi)
573vmovupd   %ymm14,288(%rdi)
574
575# qhasm: r = a9 & b0
576# asm 1: vpand <a9=reg256#15,<b0=reg256#1,>r=reg256#16
577# asm 2: vpand <a9=%ymm14,<b0=%ymm0,>r=%ymm15
578vpand %ymm14,%ymm0,%ymm15
579
580# qhasm: r9 ^= r
581# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
582# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
583vpxor %ymm15,%ymm12,%ymm12
584
585# qhasm: r = a9 & mem256[input_2 + 32]
586# asm 1: vpand 32(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
587# asm 2: vpand 32(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
588vpand 32(%rdx),%ymm14,%ymm15
589
590# qhasm: r10 ^= r
591# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
592# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
593vpxor %ymm15,%ymm13,%ymm13
594
595# qhasm: r = a9 & mem256[input_2 + 64]
596# asm 1: vpand 64(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
597# asm 2: vpand 64(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
598vpand 64(%rdx),%ymm14,%ymm15
599
600# qhasm: r11 ^= r
601# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
602# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
603vpxor %ymm15,%ymm1,%ymm1
604
605# qhasm: r = a9 & mem256[input_2 + 96]
606# asm 1: vpand 96(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
607# asm 2: vpand 96(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
608vpand 96(%rdx),%ymm14,%ymm15
609
610# qhasm: r12 ^= r
611# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
612# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
613vpxor %ymm15,%ymm2,%ymm2
614
615# qhasm: r = a9 & mem256[input_2 + 128]
616# asm 1: vpand 128(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
617# asm 2: vpand 128(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
618vpand 128(%rdx),%ymm14,%ymm15
619
620# qhasm: r13 ^= r
621# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
622# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
623vpxor %ymm15,%ymm3,%ymm3
624
625# qhasm: r = a9 & mem256[input_2 + 160]
626# asm 1: vpand 160(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
627# asm 2: vpand 160(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
628vpand 160(%rdx),%ymm14,%ymm15
629
630# qhasm: r14 ^= r
631# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
632# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
633vpxor %ymm15,%ymm4,%ymm4
634
635# qhasm: r = a9 & mem256[input_2 + 192]
636# asm 1: vpand 192(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
637# asm 2: vpand 192(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
638vpand 192(%rdx),%ymm14,%ymm15
639
640# qhasm: r15 ^= r
641# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
642# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
643vpxor %ymm15,%ymm5,%ymm5
644
645# qhasm: r = a9 & mem256[input_2 + 224]
646# asm 1: vpand 224(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
647# asm 2: vpand 224(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
648vpand 224(%rdx),%ymm14,%ymm15
649
650# qhasm: r16 ^= r
651# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
652# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
653vpxor %ymm15,%ymm6,%ymm6
654
655# qhasm: r = a9 & mem256[input_2 + 256]
656# asm 1: vpand 256(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
657# asm 2: vpand 256(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
658vpand 256(%rdx),%ymm14,%ymm15
659
660# qhasm: r17 ^= r
661# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
662# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
663vpxor %ymm15,%ymm7,%ymm7
664
665# qhasm: r = a9 & mem256[input_2 + 288]
666# asm 1: vpand 288(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
667# asm 2: vpand 288(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
668vpand 288(%rdx),%ymm14,%ymm15
669
670# qhasm: r18 ^= r
671# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
672# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
673vpxor %ymm15,%ymm8,%ymm8
674
675# qhasm: r = a9 & mem256[input_2 + 320]
676# asm 1: vpand 320(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
677# asm 2: vpand 320(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
678vpand 320(%rdx),%ymm14,%ymm15
679
680# qhasm: r19 ^= r
681# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
682# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
683vpxor %ymm15,%ymm9,%ymm9
684
685# qhasm: r = a9 & mem256[input_2 + 352]
686# asm 1: vpand 352(<input_2=int64#3),<a9=reg256#15,>r=reg256#16
687# asm 2: vpand 352(<input_2=%rdx),<a9=%ymm14,>r=%ymm15
688vpand 352(%rdx),%ymm14,%ymm15
689
690# qhasm: r20 ^= r
691# asm 1: vpxor <r=reg256#16,<r20=reg256#11,<r20=reg256#11
692# asm 2: vpxor <r=%ymm15,<r20=%ymm10,<r20=%ymm10
693vpxor %ymm15,%ymm10,%ymm10
694
695# qhasm: r = a9 & mem256[input_2 + 384]
696# asm 1: vpand 384(<input_2=int64#3),<a9=reg256#15,>r=reg256#15
697# asm 2: vpand 384(<input_2=%rdx),<a9=%ymm14,>r=%ymm14
698vpand 384(%rdx),%ymm14,%ymm14
699
700# qhasm: r21 ^= r
701# asm 1: vpxor <r=reg256#15,<r21=reg256#12,<r21=reg256#12
702# asm 2: vpxor <r=%ymm14,<r21=%ymm11,<r21=%ymm11
703vpxor %ymm14,%ymm11,%ymm11
704
705# qhasm: r12 ^= r21
706# asm 1: vpxor <r21=reg256#12,<r12=reg256#3,<r12=reg256#3
707# asm 2: vpxor <r21=%ymm11,<r12=%ymm2,<r12=%ymm2
708vpxor %ymm11,%ymm2,%ymm2
709
710# qhasm: r11 ^= r21
711# asm 1: vpxor <r21=reg256#12,<r11=reg256#2,<r11=reg256#2
712# asm 2: vpxor <r21=%ymm11,<r11=%ymm1,<r11=%ymm1
713vpxor %ymm11,%ymm1,%ymm1
714
715# qhasm: r9 ^= r21
716# asm 1: vpxor <r21=reg256#12,<r9=reg256#13,<r9=reg256#13
717# asm 2: vpxor <r21=%ymm11,<r9=%ymm12,<r9=%ymm12
718vpxor %ymm11,%ymm12,%ymm12
719
720# qhasm: r8 = r21
721# asm 1: vmovapd <r21=reg256#12,>r8=reg256#12
722# asm 2: vmovapd <r21=%ymm11,>r8=%ymm11
723vmovapd %ymm11,%ymm11
724
725# qhasm: a8 = mem256[ input_0 + 256 ]
726# asm 1: vmovupd   256(<input_0=int64#1),>a8=reg256#15
727# asm 2: vmovupd   256(<input_0=%rdi),>a8=%ymm14
728vmovupd   256(%rdi),%ymm14
729
730# qhasm: a8 = a8 ^ mem256[ input_1 + 256 ]
731# asm 1: vpxor 256(<input_1=int64#2),<a8=reg256#15,>a8=reg256#15
732# asm 2: vpxor 256(<input_1=%rsi),<a8=%ymm14,>a8=%ymm14
733vpxor 256(%rsi),%ymm14,%ymm14
734
735# qhasm: mem256[ input_0 + 256 ] = a8
736# asm 1: vmovupd   <a8=reg256#15,256(<input_0=int64#1)
737# asm 2: vmovupd   <a8=%ymm14,256(<input_0=%rdi)
738vmovupd   %ymm14,256(%rdi)
739
740# qhasm: r = a8 & b0
741# asm 1: vpand <a8=reg256#15,<b0=reg256#1,>r=reg256#16
742# asm 2: vpand <a8=%ymm14,<b0=%ymm0,>r=%ymm15
743vpand %ymm14,%ymm0,%ymm15
744
745# qhasm: r8 ^= r
746# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
747# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
748vpxor %ymm15,%ymm11,%ymm11
749
750# qhasm: r = a8 & mem256[input_2 + 32]
751# asm 1: vpand 32(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
752# asm 2: vpand 32(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
753vpand 32(%rdx),%ymm14,%ymm15
754
755# qhasm: r9 ^= r
756# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
757# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
758vpxor %ymm15,%ymm12,%ymm12
759
760# qhasm: r = a8 & mem256[input_2 + 64]
761# asm 1: vpand 64(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
762# asm 2: vpand 64(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
763vpand 64(%rdx),%ymm14,%ymm15
764
765# qhasm: r10 ^= r
766# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
767# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
768vpxor %ymm15,%ymm13,%ymm13
769
770# qhasm: r = a8 & mem256[input_2 + 96]
771# asm 1: vpand 96(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
772# asm 2: vpand 96(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
773vpand 96(%rdx),%ymm14,%ymm15
774
775# qhasm: r11 ^= r
776# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
777# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
778vpxor %ymm15,%ymm1,%ymm1
779
780# qhasm: r = a8 & mem256[input_2 + 128]
781# asm 1: vpand 128(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
782# asm 2: vpand 128(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
783vpand 128(%rdx),%ymm14,%ymm15
784
785# qhasm: r12 ^= r
786# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
787# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
788vpxor %ymm15,%ymm2,%ymm2
789
790# qhasm: r = a8 & mem256[input_2 + 160]
791# asm 1: vpand 160(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
792# asm 2: vpand 160(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
793vpand 160(%rdx),%ymm14,%ymm15
794
795# qhasm: r13 ^= r
796# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
797# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
798vpxor %ymm15,%ymm3,%ymm3
799
800# qhasm: r = a8 & mem256[input_2 + 192]
801# asm 1: vpand 192(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
802# asm 2: vpand 192(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
803vpand 192(%rdx),%ymm14,%ymm15
804
805# qhasm: r14 ^= r
806# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
807# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
808vpxor %ymm15,%ymm4,%ymm4
809
810# qhasm: r = a8 & mem256[input_2 + 224]
811# asm 1: vpand 224(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
812# asm 2: vpand 224(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
813vpand 224(%rdx),%ymm14,%ymm15
814
815# qhasm: r15 ^= r
816# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
817# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
818vpxor %ymm15,%ymm5,%ymm5
819
820# qhasm: r = a8 & mem256[input_2 + 256]
821# asm 1: vpand 256(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
822# asm 2: vpand 256(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
823vpand 256(%rdx),%ymm14,%ymm15
824
825# qhasm: r16 ^= r
826# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
827# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
828vpxor %ymm15,%ymm6,%ymm6
829
830# qhasm: r = a8 & mem256[input_2 + 288]
831# asm 1: vpand 288(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
832# asm 2: vpand 288(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
833vpand 288(%rdx),%ymm14,%ymm15
834
835# qhasm: r17 ^= r
836# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
837# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
838vpxor %ymm15,%ymm7,%ymm7
839
840# qhasm: r = a8 & mem256[input_2 + 320]
841# asm 1: vpand 320(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
842# asm 2: vpand 320(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
843vpand 320(%rdx),%ymm14,%ymm15
844
845# qhasm: r18 ^= r
846# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
847# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
848vpxor %ymm15,%ymm8,%ymm8
849
850# qhasm: r = a8 & mem256[input_2 + 352]
851# asm 1: vpand 352(<input_2=int64#3),<a8=reg256#15,>r=reg256#16
852# asm 2: vpand 352(<input_2=%rdx),<a8=%ymm14,>r=%ymm15
853vpand 352(%rdx),%ymm14,%ymm15
854
855# qhasm: r19 ^= r
856# asm 1: vpxor <r=reg256#16,<r19=reg256#10,<r19=reg256#10
857# asm 2: vpxor <r=%ymm15,<r19=%ymm9,<r19=%ymm9
858vpxor %ymm15,%ymm9,%ymm9
859
860# qhasm: r = a8 & mem256[input_2 + 384]
861# asm 1: vpand 384(<input_2=int64#3),<a8=reg256#15,>r=reg256#15
862# asm 2: vpand 384(<input_2=%rdx),<a8=%ymm14,>r=%ymm14
863vpand 384(%rdx),%ymm14,%ymm14
864
865# qhasm: r20 ^= r
866# asm 1: vpxor <r=reg256#15,<r20=reg256#11,<r20=reg256#11
867# asm 2: vpxor <r=%ymm14,<r20=%ymm10,<r20=%ymm10
868vpxor %ymm14,%ymm10,%ymm10
869
870# qhasm: r11 ^= r20
871# asm 1: vpxor <r20=reg256#11,<r11=reg256#2,<r11=reg256#2
872# asm 2: vpxor <r20=%ymm10,<r11=%ymm1,<r11=%ymm1
873vpxor %ymm10,%ymm1,%ymm1
874
875# qhasm: r10 ^= r20
876# asm 1: vpxor <r20=reg256#11,<r10=reg256#14,<r10=reg256#14
877# asm 2: vpxor <r20=%ymm10,<r10=%ymm13,<r10=%ymm13
878vpxor %ymm10,%ymm13,%ymm13
879
880# qhasm: r8 ^= r20
881# asm 1: vpxor <r20=reg256#11,<r8=reg256#12,<r8=reg256#12
882# asm 2: vpxor <r20=%ymm10,<r8=%ymm11,<r8=%ymm11
883vpxor %ymm10,%ymm11,%ymm11
884
885# qhasm: r7 = r20
886# asm 1: vmovapd <r20=reg256#11,>r7=reg256#11
887# asm 2: vmovapd <r20=%ymm10,>r7=%ymm10
888vmovapd %ymm10,%ymm10
889
890# qhasm: a7 = mem256[ input_0 + 224 ]
891# asm 1: vmovupd   224(<input_0=int64#1),>a7=reg256#15
892# asm 2: vmovupd   224(<input_0=%rdi),>a7=%ymm14
893vmovupd   224(%rdi),%ymm14
894
895# qhasm: a7 = a7 ^ mem256[ input_1 + 224 ]
896# asm 1: vpxor 224(<input_1=int64#2),<a7=reg256#15,>a7=reg256#15
897# asm 2: vpxor 224(<input_1=%rsi),<a7=%ymm14,>a7=%ymm14
898vpxor 224(%rsi),%ymm14,%ymm14
899
900# qhasm: mem256[ input_0 + 224 ] = a7
901# asm 1: vmovupd   <a7=reg256#15,224(<input_0=int64#1)
902# asm 2: vmovupd   <a7=%ymm14,224(<input_0=%rdi)
903vmovupd   %ymm14,224(%rdi)
904
905# qhasm: r = a7 & b0
906# asm 1: vpand <a7=reg256#15,<b0=reg256#1,>r=reg256#16
907# asm 2: vpand <a7=%ymm14,<b0=%ymm0,>r=%ymm15
908vpand %ymm14,%ymm0,%ymm15
909
910# qhasm: r7 ^= r
911# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
912# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
913vpxor %ymm15,%ymm10,%ymm10
914
915# qhasm: r = a7 & mem256[input_2 + 32]
916# asm 1: vpand 32(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
917# asm 2: vpand 32(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
918vpand 32(%rdx),%ymm14,%ymm15
919
920# qhasm: r8 ^= r
921# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
922# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
923vpxor %ymm15,%ymm11,%ymm11
924
925# qhasm: r = a7 & mem256[input_2 + 64]
926# asm 1: vpand 64(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
927# asm 2: vpand 64(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
928vpand 64(%rdx),%ymm14,%ymm15
929
930# qhasm: r9 ^= r
931# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
932# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
933vpxor %ymm15,%ymm12,%ymm12
934
935# qhasm: r = a7 & mem256[input_2 + 96]
936# asm 1: vpand 96(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
937# asm 2: vpand 96(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
938vpand 96(%rdx),%ymm14,%ymm15
939
940# qhasm: r10 ^= r
941# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
942# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
943vpxor %ymm15,%ymm13,%ymm13
944
945# qhasm: r = a7 & mem256[input_2 + 128]
946# asm 1: vpand 128(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
947# asm 2: vpand 128(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
948vpand 128(%rdx),%ymm14,%ymm15
949
950# qhasm: r11 ^= r
951# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
952# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
953vpxor %ymm15,%ymm1,%ymm1
954
955# qhasm: r = a7 & mem256[input_2 + 160]
956# asm 1: vpand 160(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
957# asm 2: vpand 160(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
958vpand 160(%rdx),%ymm14,%ymm15
959
960# qhasm: r12 ^= r
961# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
962# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
963vpxor %ymm15,%ymm2,%ymm2
964
965# qhasm: r = a7 & mem256[input_2 + 192]
966# asm 1: vpand 192(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
967# asm 2: vpand 192(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
968vpand 192(%rdx),%ymm14,%ymm15
969
970# qhasm: r13 ^= r
971# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
972# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
973vpxor %ymm15,%ymm3,%ymm3
974
975# qhasm: r = a7 & mem256[input_2 + 224]
976# asm 1: vpand 224(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
977# asm 2: vpand 224(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
978vpand 224(%rdx),%ymm14,%ymm15
979
980# qhasm: r14 ^= r
981# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
982# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
983vpxor %ymm15,%ymm4,%ymm4
984
985# qhasm: r = a7 & mem256[input_2 + 256]
986# asm 1: vpand 256(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
987# asm 2: vpand 256(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
988vpand 256(%rdx),%ymm14,%ymm15
989
990# qhasm: r15 ^= r
991# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
992# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
993vpxor %ymm15,%ymm5,%ymm5
994
995# qhasm: r = a7 & mem256[input_2 + 288]
996# asm 1: vpand 288(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
997# asm 2: vpand 288(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
998vpand 288(%rdx),%ymm14,%ymm15
999
1000# qhasm: r16 ^= r
1001# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
1002# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
1003vpxor %ymm15,%ymm6,%ymm6
1004
1005# qhasm: r = a7 & mem256[input_2 + 320]
1006# asm 1: vpand 320(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
1007# asm 2: vpand 320(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
1008vpand 320(%rdx),%ymm14,%ymm15
1009
1010# qhasm: r17 ^= r
1011# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
1012# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
1013vpxor %ymm15,%ymm7,%ymm7
1014
1015# qhasm: r = a7 & mem256[input_2 + 352]
1016# asm 1: vpand 352(<input_2=int64#3),<a7=reg256#15,>r=reg256#16
1017# asm 2: vpand 352(<input_2=%rdx),<a7=%ymm14,>r=%ymm15
1018vpand 352(%rdx),%ymm14,%ymm15
1019
1020# qhasm: r18 ^= r
1021# asm 1: vpxor <r=reg256#16,<r18=reg256#9,<r18=reg256#9
1022# asm 2: vpxor <r=%ymm15,<r18=%ymm8,<r18=%ymm8
1023vpxor %ymm15,%ymm8,%ymm8
1024
1025# qhasm: r = a7 & mem256[input_2 + 384]
1026# asm 1: vpand 384(<input_2=int64#3),<a7=reg256#15,>r=reg256#15
1027# asm 2: vpand 384(<input_2=%rdx),<a7=%ymm14,>r=%ymm14
1028vpand 384(%rdx),%ymm14,%ymm14
1029
1030# qhasm: r19 ^= r
1031# asm 1: vpxor <r=reg256#15,<r19=reg256#10,<r19=reg256#10
1032# asm 2: vpxor <r=%ymm14,<r19=%ymm9,<r19=%ymm9
1033vpxor %ymm14,%ymm9,%ymm9
1034
1035# qhasm: r10 ^= r19
1036# asm 1: vpxor <r19=reg256#10,<r10=reg256#14,<r10=reg256#14
1037# asm 2: vpxor <r19=%ymm9,<r10=%ymm13,<r10=%ymm13
1038vpxor %ymm9,%ymm13,%ymm13
1039
1040# qhasm: r9 ^= r19
1041# asm 1: vpxor <r19=reg256#10,<r9=reg256#13,<r9=reg256#13
1042# asm 2: vpxor <r19=%ymm9,<r9=%ymm12,<r9=%ymm12
1043vpxor %ymm9,%ymm12,%ymm12
1044
1045# qhasm: r7 ^= r19
1046# asm 1: vpxor <r19=reg256#10,<r7=reg256#11,<r7=reg256#11
1047# asm 2: vpxor <r19=%ymm9,<r7=%ymm10,<r7=%ymm10
1048vpxor %ymm9,%ymm10,%ymm10
1049
1050# qhasm: r6 = r19
1051# asm 1: vmovapd <r19=reg256#10,>r6=reg256#10
1052# asm 2: vmovapd <r19=%ymm9,>r6=%ymm9
1053vmovapd %ymm9,%ymm9
1054
1055# qhasm: a6 = mem256[ input_0 + 192 ]
1056# asm 1: vmovupd   192(<input_0=int64#1),>a6=reg256#15
1057# asm 2: vmovupd   192(<input_0=%rdi),>a6=%ymm14
1058vmovupd   192(%rdi),%ymm14
1059
1060# qhasm: a6 = a6 ^ mem256[ input_1 + 192 ]
1061# asm 1: vpxor 192(<input_1=int64#2),<a6=reg256#15,>a6=reg256#15
1062# asm 2: vpxor 192(<input_1=%rsi),<a6=%ymm14,>a6=%ymm14
1063vpxor 192(%rsi),%ymm14,%ymm14
1064
1065# qhasm: mem256[ input_0 + 192 ] = a6
1066# asm 1: vmovupd   <a6=reg256#15,192(<input_0=int64#1)
1067# asm 2: vmovupd   <a6=%ymm14,192(<input_0=%rdi)
1068vmovupd   %ymm14,192(%rdi)
1069
1070# qhasm: r = a6 & b0
1071# asm 1: vpand <a6=reg256#15,<b0=reg256#1,>r=reg256#16
1072# asm 2: vpand <a6=%ymm14,<b0=%ymm0,>r=%ymm15
1073vpand %ymm14,%ymm0,%ymm15
1074
1075# qhasm: r6 ^= r
1076# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
1077# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
1078vpxor %ymm15,%ymm9,%ymm9
1079
1080# qhasm: r = a6 & mem256[input_2 + 32]
1081# asm 1: vpand 32(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1082# asm 2: vpand 32(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1083vpand 32(%rdx),%ymm14,%ymm15
1084
1085# qhasm: r7 ^= r
1086# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
1087# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
1088vpxor %ymm15,%ymm10,%ymm10
1089
1090# qhasm: r = a6 & mem256[input_2 + 64]
1091# asm 1: vpand 64(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1092# asm 2: vpand 64(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1093vpand 64(%rdx),%ymm14,%ymm15
1094
1095# qhasm: r8 ^= r
1096# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
1097# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
1098vpxor %ymm15,%ymm11,%ymm11
1099
1100# qhasm: r = a6 & mem256[input_2 + 96]
1101# asm 1: vpand 96(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1102# asm 2: vpand 96(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1103vpand 96(%rdx),%ymm14,%ymm15
1104
1105# qhasm: r9 ^= r
1106# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
1107# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
1108vpxor %ymm15,%ymm12,%ymm12
1109
1110# qhasm: r = a6 & mem256[input_2 + 128]
1111# asm 1: vpand 128(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1112# asm 2: vpand 128(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1113vpand 128(%rdx),%ymm14,%ymm15
1114
1115# qhasm: r10 ^= r
1116# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
1117# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
1118vpxor %ymm15,%ymm13,%ymm13
1119
1120# qhasm: r = a6 & mem256[input_2 + 160]
1121# asm 1: vpand 160(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1122# asm 2: vpand 160(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1123vpand 160(%rdx),%ymm14,%ymm15
1124
1125# qhasm: r11 ^= r
1126# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
1127# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
1128vpxor %ymm15,%ymm1,%ymm1
1129
1130# qhasm: r = a6 & mem256[input_2 + 192]
1131# asm 1: vpand 192(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1132# asm 2: vpand 192(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1133vpand 192(%rdx),%ymm14,%ymm15
1134
1135# qhasm: r12 ^= r
1136# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
1137# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
1138vpxor %ymm15,%ymm2,%ymm2
1139
1140# qhasm: r = a6 & mem256[input_2 + 224]
1141# asm 1: vpand 224(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1142# asm 2: vpand 224(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1143vpand 224(%rdx),%ymm14,%ymm15
1144
1145# qhasm: r13 ^= r
1146# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
1147# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
1148vpxor %ymm15,%ymm3,%ymm3
1149
1150# qhasm: r = a6 & mem256[input_2 + 256]
1151# asm 1: vpand 256(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1152# asm 2: vpand 256(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1153vpand 256(%rdx),%ymm14,%ymm15
1154
1155# qhasm: r14 ^= r
1156# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
1157# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
1158vpxor %ymm15,%ymm4,%ymm4
1159
1160# qhasm: r = a6 & mem256[input_2 + 288]
1161# asm 1: vpand 288(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1162# asm 2: vpand 288(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1163vpand 288(%rdx),%ymm14,%ymm15
1164
1165# qhasm: r15 ^= r
1166# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
1167# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
1168vpxor %ymm15,%ymm5,%ymm5
1169
1170# qhasm: r = a6 & mem256[input_2 + 320]
1171# asm 1: vpand 320(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1172# asm 2: vpand 320(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1173vpand 320(%rdx),%ymm14,%ymm15
1174
1175# qhasm: r16 ^= r
1176# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
1177# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
1178vpxor %ymm15,%ymm6,%ymm6
1179
1180# qhasm: r = a6 & mem256[input_2 + 352]
1181# asm 1: vpand 352(<input_2=int64#3),<a6=reg256#15,>r=reg256#16
1182# asm 2: vpand 352(<input_2=%rdx),<a6=%ymm14,>r=%ymm15
1183vpand 352(%rdx),%ymm14,%ymm15
1184
1185# qhasm: r17 ^= r
1186# asm 1: vpxor <r=reg256#16,<r17=reg256#8,<r17=reg256#8
1187# asm 2: vpxor <r=%ymm15,<r17=%ymm7,<r17=%ymm7
1188vpxor %ymm15,%ymm7,%ymm7
1189
1190# qhasm: r = a6 & mem256[input_2 + 384]
1191# asm 1: vpand 384(<input_2=int64#3),<a6=reg256#15,>r=reg256#15
1192# asm 2: vpand 384(<input_2=%rdx),<a6=%ymm14,>r=%ymm14
1193vpand 384(%rdx),%ymm14,%ymm14
1194
1195# qhasm: r18 ^= r
1196# asm 1: vpxor <r=reg256#15,<r18=reg256#9,<r18=reg256#9
1197# asm 2: vpxor <r=%ymm14,<r18=%ymm8,<r18=%ymm8
1198vpxor %ymm14,%ymm8,%ymm8
1199
1200# qhasm: r9 ^= r18
1201# asm 1: vpxor <r18=reg256#9,<r9=reg256#13,<r9=reg256#13
1202# asm 2: vpxor <r18=%ymm8,<r9=%ymm12,<r9=%ymm12
1203vpxor %ymm8,%ymm12,%ymm12
1204
1205# qhasm: r8 ^= r18
1206# asm 1: vpxor <r18=reg256#9,<r8=reg256#12,<r8=reg256#12
1207# asm 2: vpxor <r18=%ymm8,<r8=%ymm11,<r8=%ymm11
1208vpxor %ymm8,%ymm11,%ymm11
1209
1210# qhasm: r6 ^= r18
1211# asm 1: vpxor <r18=reg256#9,<r6=reg256#10,<r6=reg256#10
1212# asm 2: vpxor <r18=%ymm8,<r6=%ymm9,<r6=%ymm9
1213vpxor %ymm8,%ymm9,%ymm9
1214
1215# qhasm: r5 = r18
1216# asm 1: vmovapd <r18=reg256#9,>r5=reg256#9
1217# asm 2: vmovapd <r18=%ymm8,>r5=%ymm8
1218vmovapd %ymm8,%ymm8
1219
1220# qhasm: a5 = mem256[ input_0 + 160 ]
1221# asm 1: vmovupd   160(<input_0=int64#1),>a5=reg256#15
1222# asm 2: vmovupd   160(<input_0=%rdi),>a5=%ymm14
1223vmovupd   160(%rdi),%ymm14
1224
1225# qhasm: a5 = a5 ^ mem256[ input_1 + 160 ]
1226# asm 1: vpxor 160(<input_1=int64#2),<a5=reg256#15,>a5=reg256#15
1227# asm 2: vpxor 160(<input_1=%rsi),<a5=%ymm14,>a5=%ymm14
1228vpxor 160(%rsi),%ymm14,%ymm14
1229
1230# qhasm: mem256[ input_0 + 160 ] = a5
1231# asm 1: vmovupd   <a5=reg256#15,160(<input_0=int64#1)
1232# asm 2: vmovupd   <a5=%ymm14,160(<input_0=%rdi)
1233vmovupd   %ymm14,160(%rdi)
1234
1235# qhasm: r = a5 & b0
1236# asm 1: vpand <a5=reg256#15,<b0=reg256#1,>r=reg256#16
1237# asm 2: vpand <a5=%ymm14,<b0=%ymm0,>r=%ymm15
1238vpand %ymm14,%ymm0,%ymm15
1239
1240# qhasm: r5 ^= r
1241# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
1242# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
1243vpxor %ymm15,%ymm8,%ymm8
1244
1245# qhasm: r = a5 & mem256[input_2 + 32]
1246# asm 1: vpand 32(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1247# asm 2: vpand 32(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1248vpand 32(%rdx),%ymm14,%ymm15
1249
1250# qhasm: r6 ^= r
1251# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
1252# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
1253vpxor %ymm15,%ymm9,%ymm9
1254
1255# qhasm: r = a5 & mem256[input_2 + 64]
1256# asm 1: vpand 64(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1257# asm 2: vpand 64(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1258vpand 64(%rdx),%ymm14,%ymm15
1259
1260# qhasm: r7 ^= r
1261# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
1262# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
1263vpxor %ymm15,%ymm10,%ymm10
1264
1265# qhasm: r = a5 & mem256[input_2 + 96]
1266# asm 1: vpand 96(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1267# asm 2: vpand 96(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1268vpand 96(%rdx),%ymm14,%ymm15
1269
1270# qhasm: r8 ^= r
1271# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
1272# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
1273vpxor %ymm15,%ymm11,%ymm11
1274
1275# qhasm: r = a5 & mem256[input_2 + 128]
1276# asm 1: vpand 128(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1277# asm 2: vpand 128(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1278vpand 128(%rdx),%ymm14,%ymm15
1279
1280# qhasm: r9 ^= r
1281# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
1282# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
1283vpxor %ymm15,%ymm12,%ymm12
1284
1285# qhasm: r = a5 & mem256[input_2 + 160]
1286# asm 1: vpand 160(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1287# asm 2: vpand 160(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1288vpand 160(%rdx),%ymm14,%ymm15
1289
1290# qhasm: r10 ^= r
1291# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
1292# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
1293vpxor %ymm15,%ymm13,%ymm13
1294
1295# qhasm: r = a5 & mem256[input_2 + 192]
1296# asm 1: vpand 192(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1297# asm 2: vpand 192(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1298vpand 192(%rdx),%ymm14,%ymm15
1299
1300# qhasm: r11 ^= r
1301# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
1302# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
1303vpxor %ymm15,%ymm1,%ymm1
1304
1305# qhasm: r = a5 & mem256[input_2 + 224]
1306# asm 1: vpand 224(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1307# asm 2: vpand 224(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1308vpand 224(%rdx),%ymm14,%ymm15
1309
1310# qhasm: r12 ^= r
1311# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
1312# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
1313vpxor %ymm15,%ymm2,%ymm2
1314
1315# qhasm: r = a5 & mem256[input_2 + 256]
1316# asm 1: vpand 256(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1317# asm 2: vpand 256(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1318vpand 256(%rdx),%ymm14,%ymm15
1319
1320# qhasm: r13 ^= r
1321# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
1322# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
1323vpxor %ymm15,%ymm3,%ymm3
1324
1325# qhasm: r = a5 & mem256[input_2 + 288]
1326# asm 1: vpand 288(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1327# asm 2: vpand 288(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1328vpand 288(%rdx),%ymm14,%ymm15
1329
1330# qhasm: r14 ^= r
1331# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
1332# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
1333vpxor %ymm15,%ymm4,%ymm4
1334
1335# qhasm: r = a5 & mem256[input_2 + 320]
1336# asm 1: vpand 320(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1337# asm 2: vpand 320(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1338vpand 320(%rdx),%ymm14,%ymm15
1339
1340# qhasm: r15 ^= r
1341# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
1342# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
1343vpxor %ymm15,%ymm5,%ymm5
1344
1345# qhasm: r = a5 & mem256[input_2 + 352]
1346# asm 1: vpand 352(<input_2=int64#3),<a5=reg256#15,>r=reg256#16
1347# asm 2: vpand 352(<input_2=%rdx),<a5=%ymm14,>r=%ymm15
1348vpand 352(%rdx),%ymm14,%ymm15
1349
1350# qhasm: r16 ^= r
1351# asm 1: vpxor <r=reg256#16,<r16=reg256#7,<r16=reg256#7
1352# asm 2: vpxor <r=%ymm15,<r16=%ymm6,<r16=%ymm6
1353vpxor %ymm15,%ymm6,%ymm6
1354
1355# qhasm: r = a5 & mem256[input_2 + 384]
1356# asm 1: vpand 384(<input_2=int64#3),<a5=reg256#15,>r=reg256#15
1357# asm 2: vpand 384(<input_2=%rdx),<a5=%ymm14,>r=%ymm14
1358vpand 384(%rdx),%ymm14,%ymm14
1359
1360# qhasm: r17 ^= r
1361# asm 1: vpxor <r=reg256#15,<r17=reg256#8,<r17=reg256#8
1362# asm 2: vpxor <r=%ymm14,<r17=%ymm7,<r17=%ymm7
1363vpxor %ymm14,%ymm7,%ymm7
1364
1365# qhasm: r8 ^= r17
1366# asm 1: vpxor <r17=reg256#8,<r8=reg256#12,<r8=reg256#12
1367# asm 2: vpxor <r17=%ymm7,<r8=%ymm11,<r8=%ymm11
1368vpxor %ymm7,%ymm11,%ymm11
1369
1370# qhasm: r7 ^= r17
1371# asm 1: vpxor <r17=reg256#8,<r7=reg256#11,<r7=reg256#11
1372# asm 2: vpxor <r17=%ymm7,<r7=%ymm10,<r7=%ymm10
1373vpxor %ymm7,%ymm10,%ymm10
1374
1375# qhasm: r5 ^= r17
1376# asm 1: vpxor <r17=reg256#8,<r5=reg256#9,<r5=reg256#9
1377# asm 2: vpxor <r17=%ymm7,<r5=%ymm8,<r5=%ymm8
1378vpxor %ymm7,%ymm8,%ymm8
1379
1380# qhasm: r4 = r17
1381# asm 1: vmovapd <r17=reg256#8,>r4=reg256#8
1382# asm 2: vmovapd <r17=%ymm7,>r4=%ymm7
1383vmovapd %ymm7,%ymm7
1384
1385# qhasm: a4 = mem256[ input_0 + 128 ]
1386# asm 1: vmovupd   128(<input_0=int64#1),>a4=reg256#15
1387# asm 2: vmovupd   128(<input_0=%rdi),>a4=%ymm14
1388vmovupd   128(%rdi),%ymm14
1389
1390# qhasm: a4 = a4 ^ mem256[ input_1 + 128 ]
1391# asm 1: vpxor 128(<input_1=int64#2),<a4=reg256#15,>a4=reg256#15
1392# asm 2: vpxor 128(<input_1=%rsi),<a4=%ymm14,>a4=%ymm14
1393vpxor 128(%rsi),%ymm14,%ymm14
1394
1395# qhasm: mem256[ input_0 + 128 ] = a4
1396# asm 1: vmovupd   <a4=reg256#15,128(<input_0=int64#1)
1397# asm 2: vmovupd   <a4=%ymm14,128(<input_0=%rdi)
1398vmovupd   %ymm14,128(%rdi)
1399
1400# qhasm: r = a4 & b0
1401# asm 1: vpand <a4=reg256#15,<b0=reg256#1,>r=reg256#16
1402# asm 2: vpand <a4=%ymm14,<b0=%ymm0,>r=%ymm15
1403vpand %ymm14,%ymm0,%ymm15
1404
1405# qhasm: r4 ^= r
1406# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
1407# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
1408vpxor %ymm15,%ymm7,%ymm7
1409
1410# qhasm: r = a4 & mem256[input_2 + 32]
1411# asm 1: vpand 32(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1412# asm 2: vpand 32(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1413vpand 32(%rdx),%ymm14,%ymm15
1414
1415# qhasm: r5 ^= r
1416# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
1417# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
1418vpxor %ymm15,%ymm8,%ymm8
1419
1420# qhasm: r = a4 & mem256[input_2 + 64]
1421# asm 1: vpand 64(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1422# asm 2: vpand 64(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1423vpand 64(%rdx),%ymm14,%ymm15
1424
1425# qhasm: r6 ^= r
1426# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
1427# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
1428vpxor %ymm15,%ymm9,%ymm9
1429
1430# qhasm: r = a4 & mem256[input_2 + 96]
1431# asm 1: vpand 96(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1432# asm 2: vpand 96(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1433vpand 96(%rdx),%ymm14,%ymm15
1434
1435# qhasm: r7 ^= r
1436# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
1437# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
1438vpxor %ymm15,%ymm10,%ymm10
1439
1440# qhasm: r = a4 & mem256[input_2 + 128]
1441# asm 1: vpand 128(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1442# asm 2: vpand 128(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1443vpand 128(%rdx),%ymm14,%ymm15
1444
1445# qhasm: r8 ^= r
1446# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
1447# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
1448vpxor %ymm15,%ymm11,%ymm11
1449
1450# qhasm: r = a4 & mem256[input_2 + 160]
1451# asm 1: vpand 160(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1452# asm 2: vpand 160(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1453vpand 160(%rdx),%ymm14,%ymm15
1454
1455# qhasm: r9 ^= r
1456# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
1457# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
1458vpxor %ymm15,%ymm12,%ymm12
1459
1460# qhasm: r = a4 & mem256[input_2 + 192]
1461# asm 1: vpand 192(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1462# asm 2: vpand 192(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1463vpand 192(%rdx),%ymm14,%ymm15
1464
1465# qhasm: r10 ^= r
1466# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
1467# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
1468vpxor %ymm15,%ymm13,%ymm13
1469
1470# qhasm: r = a4 & mem256[input_2 + 224]
1471# asm 1: vpand 224(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1472# asm 2: vpand 224(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1473vpand 224(%rdx),%ymm14,%ymm15
1474
1475# qhasm: r11 ^= r
1476# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
1477# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
1478vpxor %ymm15,%ymm1,%ymm1
1479
1480# qhasm: r = a4 & mem256[input_2 + 256]
1481# asm 1: vpand 256(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1482# asm 2: vpand 256(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1483vpand 256(%rdx),%ymm14,%ymm15
1484
1485# qhasm: r12 ^= r
1486# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
1487# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
1488vpxor %ymm15,%ymm2,%ymm2
1489
1490# qhasm: r = a4 & mem256[input_2 + 288]
1491# asm 1: vpand 288(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1492# asm 2: vpand 288(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1493vpand 288(%rdx),%ymm14,%ymm15
1494
1495# qhasm: r13 ^= r
1496# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
1497# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
1498vpxor %ymm15,%ymm3,%ymm3
1499
1500# qhasm: r = a4 & mem256[input_2 + 320]
1501# asm 1: vpand 320(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1502# asm 2: vpand 320(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1503vpand 320(%rdx),%ymm14,%ymm15
1504
1505# qhasm: r14 ^= r
1506# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
1507# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
1508vpxor %ymm15,%ymm4,%ymm4
1509
1510# qhasm: r = a4 & mem256[input_2 + 352]
1511# asm 1: vpand 352(<input_2=int64#3),<a4=reg256#15,>r=reg256#16
1512# asm 2: vpand 352(<input_2=%rdx),<a4=%ymm14,>r=%ymm15
1513vpand 352(%rdx),%ymm14,%ymm15
1514
1515# qhasm: r15 ^= r
1516# asm 1: vpxor <r=reg256#16,<r15=reg256#6,<r15=reg256#6
1517# asm 2: vpxor <r=%ymm15,<r15=%ymm5,<r15=%ymm5
1518vpxor %ymm15,%ymm5,%ymm5
1519
1520# qhasm: r = a4 & mem256[input_2 + 384]
1521# asm 1: vpand 384(<input_2=int64#3),<a4=reg256#15,>r=reg256#15
1522# asm 2: vpand 384(<input_2=%rdx),<a4=%ymm14,>r=%ymm14
1523vpand 384(%rdx),%ymm14,%ymm14
1524
1525# qhasm: r16 ^= r
1526# asm 1: vpxor <r=reg256#15,<r16=reg256#7,<r16=reg256#7
1527# asm 2: vpxor <r=%ymm14,<r16=%ymm6,<r16=%ymm6
1528vpxor %ymm14,%ymm6,%ymm6
1529
1530# qhasm: r7 ^= r16
1531# asm 1: vpxor <r16=reg256#7,<r7=reg256#11,<r7=reg256#11
1532# asm 2: vpxor <r16=%ymm6,<r7=%ymm10,<r7=%ymm10
1533vpxor %ymm6,%ymm10,%ymm10
1534
1535# qhasm: r6 ^= r16
1536# asm 1: vpxor <r16=reg256#7,<r6=reg256#10,<r6=reg256#10
1537# asm 2: vpxor <r16=%ymm6,<r6=%ymm9,<r6=%ymm9
1538vpxor %ymm6,%ymm9,%ymm9
1539
1540# qhasm: r4 ^= r16
1541# asm 1: vpxor <r16=reg256#7,<r4=reg256#8,<r4=reg256#8
1542# asm 2: vpxor <r16=%ymm6,<r4=%ymm7,<r4=%ymm7
1543vpxor %ymm6,%ymm7,%ymm7
1544
1545# qhasm: r3 = r16
1546# asm 1: vmovapd <r16=reg256#7,>r3=reg256#7
1547# asm 2: vmovapd <r16=%ymm6,>r3=%ymm6
1548vmovapd %ymm6,%ymm6
1549
1550# qhasm: a3 = mem256[ input_0 + 96 ]
1551# asm 1: vmovupd   96(<input_0=int64#1),>a3=reg256#15
1552# asm 2: vmovupd   96(<input_0=%rdi),>a3=%ymm14
1553vmovupd   96(%rdi),%ymm14
1554
1555# qhasm: a3 = a3 ^ mem256[ input_1 + 96 ]
1556# asm 1: vpxor 96(<input_1=int64#2),<a3=reg256#15,>a3=reg256#15
1557# asm 2: vpxor 96(<input_1=%rsi),<a3=%ymm14,>a3=%ymm14
1558vpxor 96(%rsi),%ymm14,%ymm14
1559
1560# qhasm: mem256[ input_0 + 96 ] = a3
1561# asm 1: vmovupd   <a3=reg256#15,96(<input_0=int64#1)
1562# asm 2: vmovupd   <a3=%ymm14,96(<input_0=%rdi)
1563vmovupd   %ymm14,96(%rdi)
1564
1565# qhasm: r = a3 & b0
1566# asm 1: vpand <a3=reg256#15,<b0=reg256#1,>r=reg256#16
1567# asm 2: vpand <a3=%ymm14,<b0=%ymm0,>r=%ymm15
1568vpand %ymm14,%ymm0,%ymm15
1569
1570# qhasm: r3 ^= r
1571# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
1572# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
1573vpxor %ymm15,%ymm6,%ymm6
1574
1575# qhasm: r = a3 & mem256[input_2 + 32]
1576# asm 1: vpand 32(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1577# asm 2: vpand 32(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1578vpand 32(%rdx),%ymm14,%ymm15
1579
1580# qhasm: r4 ^= r
1581# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
1582# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
1583vpxor %ymm15,%ymm7,%ymm7
1584
1585# qhasm: r = a3 & mem256[input_2 + 64]
1586# asm 1: vpand 64(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1587# asm 2: vpand 64(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1588vpand 64(%rdx),%ymm14,%ymm15
1589
1590# qhasm: r5 ^= r
1591# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
1592# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
1593vpxor %ymm15,%ymm8,%ymm8
1594
1595# qhasm: r = a3 & mem256[input_2 + 96]
1596# asm 1: vpand 96(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1597# asm 2: vpand 96(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1598vpand 96(%rdx),%ymm14,%ymm15
1599
1600# qhasm: r6 ^= r
1601# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
1602# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
1603vpxor %ymm15,%ymm9,%ymm9
1604
1605# qhasm: r = a3 & mem256[input_2 + 128]
1606# asm 1: vpand 128(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1607# asm 2: vpand 128(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1608vpand 128(%rdx),%ymm14,%ymm15
1609
1610# qhasm: r7 ^= r
1611# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
1612# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
1613vpxor %ymm15,%ymm10,%ymm10
1614
1615# qhasm: r = a3 & mem256[input_2 + 160]
1616# asm 1: vpand 160(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1617# asm 2: vpand 160(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1618vpand 160(%rdx),%ymm14,%ymm15
1619
1620# qhasm: r8 ^= r
1621# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
1622# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
1623vpxor %ymm15,%ymm11,%ymm11
1624
1625# qhasm: r = a3 & mem256[input_2 + 192]
1626# asm 1: vpand 192(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1627# asm 2: vpand 192(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1628vpand 192(%rdx),%ymm14,%ymm15
1629
1630# qhasm: r9 ^= r
1631# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
1632# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
1633vpxor %ymm15,%ymm12,%ymm12
1634
1635# qhasm: r = a3 & mem256[input_2 + 224]
1636# asm 1: vpand 224(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1637# asm 2: vpand 224(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1638vpand 224(%rdx),%ymm14,%ymm15
1639
1640# qhasm: r10 ^= r
1641# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
1642# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
1643vpxor %ymm15,%ymm13,%ymm13
1644
1645# qhasm: r = a3 & mem256[input_2 + 256]
1646# asm 1: vpand 256(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1647# asm 2: vpand 256(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1648vpand 256(%rdx),%ymm14,%ymm15
1649
1650# qhasm: r11 ^= r
1651# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
1652# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
1653vpxor %ymm15,%ymm1,%ymm1
1654
1655# qhasm: r = a3 & mem256[input_2 + 288]
1656# asm 1: vpand 288(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1657# asm 2: vpand 288(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1658vpand 288(%rdx),%ymm14,%ymm15
1659
1660# qhasm: r12 ^= r
1661# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
1662# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
1663vpxor %ymm15,%ymm2,%ymm2
1664
1665# qhasm: r = a3 & mem256[input_2 + 320]
1666# asm 1: vpand 320(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1667# asm 2: vpand 320(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1668vpand 320(%rdx),%ymm14,%ymm15
1669
1670# qhasm: r13 ^= r
1671# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
1672# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
1673vpxor %ymm15,%ymm3,%ymm3
1674
1675# qhasm: r = a3 & mem256[input_2 + 352]
1676# asm 1: vpand 352(<input_2=int64#3),<a3=reg256#15,>r=reg256#16
1677# asm 2: vpand 352(<input_2=%rdx),<a3=%ymm14,>r=%ymm15
1678vpand 352(%rdx),%ymm14,%ymm15
1679
1680# qhasm: r14 ^= r
1681# asm 1: vpxor <r=reg256#16,<r14=reg256#5,<r14=reg256#5
1682# asm 2: vpxor <r=%ymm15,<r14=%ymm4,<r14=%ymm4
1683vpxor %ymm15,%ymm4,%ymm4
1684
1685# qhasm: r = a3 & mem256[input_2 + 384]
1686# asm 1: vpand 384(<input_2=int64#3),<a3=reg256#15,>r=reg256#15
1687# asm 2: vpand 384(<input_2=%rdx),<a3=%ymm14,>r=%ymm14
1688vpand 384(%rdx),%ymm14,%ymm14
1689
1690# qhasm: r15 ^= r
1691# asm 1: vpxor <r=reg256#15,<r15=reg256#6,<r15=reg256#6
1692# asm 2: vpxor <r=%ymm14,<r15=%ymm5,<r15=%ymm5
1693vpxor %ymm14,%ymm5,%ymm5
1694
1695# qhasm: r6 ^= r15
1696# asm 1: vpxor <r15=reg256#6,<r6=reg256#10,<r6=reg256#10
1697# asm 2: vpxor <r15=%ymm5,<r6=%ymm9,<r6=%ymm9
1698vpxor %ymm5,%ymm9,%ymm9
1699
1700# qhasm: r5 ^= r15
1701# asm 1: vpxor <r15=reg256#6,<r5=reg256#9,<r5=reg256#9
1702# asm 2: vpxor <r15=%ymm5,<r5=%ymm8,<r5=%ymm8
1703vpxor %ymm5,%ymm8,%ymm8
1704
1705# qhasm: r3 ^= r15
1706# asm 1: vpxor <r15=reg256#6,<r3=reg256#7,<r3=reg256#7
1707# asm 2: vpxor <r15=%ymm5,<r3=%ymm6,<r3=%ymm6
1708vpxor %ymm5,%ymm6,%ymm6
1709
1710# qhasm: r2 = r15
1711# asm 1: vmovapd <r15=reg256#6,>r2=reg256#6
1712# asm 2: vmovapd <r15=%ymm5,>r2=%ymm5
1713vmovapd %ymm5,%ymm5
1714
1715# qhasm: a2 = mem256[ input_0 + 64 ]
1716# asm 1: vmovupd   64(<input_0=int64#1),>a2=reg256#15
1717# asm 2: vmovupd   64(<input_0=%rdi),>a2=%ymm14
1718vmovupd   64(%rdi),%ymm14
1719
1720# qhasm: a2 = a2 ^ mem256[ input_1 + 64 ]
1721# asm 1: vpxor 64(<input_1=int64#2),<a2=reg256#15,>a2=reg256#15
1722# asm 2: vpxor 64(<input_1=%rsi),<a2=%ymm14,>a2=%ymm14
1723vpxor 64(%rsi),%ymm14,%ymm14
1724
1725# qhasm: mem256[ input_0 + 64 ] = a2
1726# asm 1: vmovupd   <a2=reg256#15,64(<input_0=int64#1)
1727# asm 2: vmovupd   <a2=%ymm14,64(<input_0=%rdi)
1728vmovupd   %ymm14,64(%rdi)
1729
1730# qhasm: r = a2 & b0
1731# asm 1: vpand <a2=reg256#15,<b0=reg256#1,>r=reg256#16
1732# asm 2: vpand <a2=%ymm14,<b0=%ymm0,>r=%ymm15
1733vpand %ymm14,%ymm0,%ymm15
1734
1735# qhasm: r2 ^= r
1736# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
1737# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
1738vpxor %ymm15,%ymm5,%ymm5
1739
1740# qhasm: r = a2 & mem256[input_2 + 32]
1741# asm 1: vpand 32(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1742# asm 2: vpand 32(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1743vpand 32(%rdx),%ymm14,%ymm15
1744
1745# qhasm: r3 ^= r
1746# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
1747# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
1748vpxor %ymm15,%ymm6,%ymm6
1749
1750# qhasm: r = a2 & mem256[input_2 + 64]
1751# asm 1: vpand 64(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1752# asm 2: vpand 64(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1753vpand 64(%rdx),%ymm14,%ymm15
1754
1755# qhasm: r4 ^= r
1756# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
1757# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
1758vpxor %ymm15,%ymm7,%ymm7
1759
1760# qhasm: r = a2 & mem256[input_2 + 96]
1761# asm 1: vpand 96(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1762# asm 2: vpand 96(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1763vpand 96(%rdx),%ymm14,%ymm15
1764
1765# qhasm: r5 ^= r
1766# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
1767# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
1768vpxor %ymm15,%ymm8,%ymm8
1769
1770# qhasm: r = a2 & mem256[input_2 + 128]
1771# asm 1: vpand 128(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1772# asm 2: vpand 128(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1773vpand 128(%rdx),%ymm14,%ymm15
1774
1775# qhasm: r6 ^= r
1776# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
1777# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
1778vpxor %ymm15,%ymm9,%ymm9
1779
1780# qhasm: r = a2 & mem256[input_2 + 160]
1781# asm 1: vpand 160(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1782# asm 2: vpand 160(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1783vpand 160(%rdx),%ymm14,%ymm15
1784
1785# qhasm: r7 ^= r
1786# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
1787# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
1788vpxor %ymm15,%ymm10,%ymm10
1789
1790# qhasm: r = a2 & mem256[input_2 + 192]
1791# asm 1: vpand 192(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1792# asm 2: vpand 192(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1793vpand 192(%rdx),%ymm14,%ymm15
1794
1795# qhasm: r8 ^= r
1796# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
1797# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
1798vpxor %ymm15,%ymm11,%ymm11
1799
1800# qhasm: r = a2 & mem256[input_2 + 224]
1801# asm 1: vpand 224(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1802# asm 2: vpand 224(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1803vpand 224(%rdx),%ymm14,%ymm15
1804
1805# qhasm: r9 ^= r
1806# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
1807# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
1808vpxor %ymm15,%ymm12,%ymm12
1809
1810# qhasm: r = a2 & mem256[input_2 + 256]
1811# asm 1: vpand 256(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1812# asm 2: vpand 256(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1813vpand 256(%rdx),%ymm14,%ymm15
1814
1815# qhasm: r10 ^= r
1816# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
1817# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
1818vpxor %ymm15,%ymm13,%ymm13
1819
1820# qhasm: r = a2 & mem256[input_2 + 288]
1821# asm 1: vpand 288(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1822# asm 2: vpand 288(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1823vpand 288(%rdx),%ymm14,%ymm15
1824
1825# qhasm: r11 ^= r
1826# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
1827# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
1828vpxor %ymm15,%ymm1,%ymm1
1829
1830# qhasm: r = a2 & mem256[input_2 + 320]
1831# asm 1: vpand 320(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1832# asm 2: vpand 320(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1833vpand 320(%rdx),%ymm14,%ymm15
1834
1835# qhasm: r12 ^= r
1836# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
1837# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
1838vpxor %ymm15,%ymm2,%ymm2
1839
1840# qhasm: r = a2 & mem256[input_2 + 352]
1841# asm 1: vpand 352(<input_2=int64#3),<a2=reg256#15,>r=reg256#16
1842# asm 2: vpand 352(<input_2=%rdx),<a2=%ymm14,>r=%ymm15
1843vpand 352(%rdx),%ymm14,%ymm15
1844
1845# qhasm: r13 ^= r
1846# asm 1: vpxor <r=reg256#16,<r13=reg256#4,<r13=reg256#4
1847# asm 2: vpxor <r=%ymm15,<r13=%ymm3,<r13=%ymm3
1848vpxor %ymm15,%ymm3,%ymm3
1849
1850# qhasm: r = a2 & mem256[input_2 + 384]
1851# asm 1: vpand 384(<input_2=int64#3),<a2=reg256#15,>r=reg256#15
1852# asm 2: vpand 384(<input_2=%rdx),<a2=%ymm14,>r=%ymm14
1853vpand 384(%rdx),%ymm14,%ymm14
1854
1855# qhasm: r14 ^= r
1856# asm 1: vpxor <r=reg256#15,<r14=reg256#5,<r14=reg256#5
1857# asm 2: vpxor <r=%ymm14,<r14=%ymm4,<r14=%ymm4
1858vpxor %ymm14,%ymm4,%ymm4
1859
1860# qhasm: r5 ^= r14
1861# asm 1: vpxor <r14=reg256#5,<r5=reg256#9,<r5=reg256#9
1862# asm 2: vpxor <r14=%ymm4,<r5=%ymm8,<r5=%ymm8
1863vpxor %ymm4,%ymm8,%ymm8
1864
1865# qhasm: r4 ^= r14
1866# asm 1: vpxor <r14=reg256#5,<r4=reg256#8,<r4=reg256#8
1867# asm 2: vpxor <r14=%ymm4,<r4=%ymm7,<r4=%ymm7
1868vpxor %ymm4,%ymm7,%ymm7
1869
1870# qhasm: r2 ^= r14
1871# asm 1: vpxor <r14=reg256#5,<r2=reg256#6,<r2=reg256#6
1872# asm 2: vpxor <r14=%ymm4,<r2=%ymm5,<r2=%ymm5
1873vpxor %ymm4,%ymm5,%ymm5
1874
1875# qhasm: r1 = r14
1876# asm 1: vmovapd <r14=reg256#5,>r1=reg256#5
1877# asm 2: vmovapd <r14=%ymm4,>r1=%ymm4
1878vmovapd %ymm4,%ymm4
1879
1880# qhasm: a1 = mem256[ input_0 + 32 ]
1881# asm 1: vmovupd   32(<input_0=int64#1),>a1=reg256#15
1882# asm 2: vmovupd   32(<input_0=%rdi),>a1=%ymm14
1883vmovupd   32(%rdi),%ymm14
1884
1885# qhasm: a1 = a1 ^ mem256[ input_1 + 32 ]
1886# asm 1: vpxor 32(<input_1=int64#2),<a1=reg256#15,>a1=reg256#15
1887# asm 2: vpxor 32(<input_1=%rsi),<a1=%ymm14,>a1=%ymm14
1888vpxor 32(%rsi),%ymm14,%ymm14
1889
1890# qhasm: mem256[ input_0 + 32 ] = a1
1891# asm 1: vmovupd   <a1=reg256#15,32(<input_0=int64#1)
1892# asm 2: vmovupd   <a1=%ymm14,32(<input_0=%rdi)
1893vmovupd   %ymm14,32(%rdi)
1894
1895# qhasm: r = a1 & b0
1896# asm 1: vpand <a1=reg256#15,<b0=reg256#1,>r=reg256#16
1897# asm 2: vpand <a1=%ymm14,<b0=%ymm0,>r=%ymm15
1898vpand %ymm14,%ymm0,%ymm15
1899
1900# qhasm: r1 ^= r
1901# asm 1: vpxor <r=reg256#16,<r1=reg256#5,<r1=reg256#5
1902# asm 2: vpxor <r=%ymm15,<r1=%ymm4,<r1=%ymm4
1903vpxor %ymm15,%ymm4,%ymm4
1904
1905# qhasm: r = a1 & mem256[input_2 + 32]
1906# asm 1: vpand 32(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1907# asm 2: vpand 32(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1908vpand 32(%rdx),%ymm14,%ymm15
1909
1910# qhasm: r2 ^= r
1911# asm 1: vpxor <r=reg256#16,<r2=reg256#6,<r2=reg256#6
1912# asm 2: vpxor <r=%ymm15,<r2=%ymm5,<r2=%ymm5
1913vpxor %ymm15,%ymm5,%ymm5
1914
1915# qhasm: r = a1 & mem256[input_2 + 64]
1916# asm 1: vpand 64(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1917# asm 2: vpand 64(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1918vpand 64(%rdx),%ymm14,%ymm15
1919
1920# qhasm: r3 ^= r
1921# asm 1: vpxor <r=reg256#16,<r3=reg256#7,<r3=reg256#7
1922# asm 2: vpxor <r=%ymm15,<r3=%ymm6,<r3=%ymm6
1923vpxor %ymm15,%ymm6,%ymm6
1924
1925# qhasm: r = a1 & mem256[input_2 + 96]
1926# asm 1: vpand 96(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1927# asm 2: vpand 96(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1928vpand 96(%rdx),%ymm14,%ymm15
1929
1930# qhasm: r4 ^= r
1931# asm 1: vpxor <r=reg256#16,<r4=reg256#8,<r4=reg256#8
1932# asm 2: vpxor <r=%ymm15,<r4=%ymm7,<r4=%ymm7
1933vpxor %ymm15,%ymm7,%ymm7
1934
1935# qhasm: r = a1 & mem256[input_2 + 128]
1936# asm 1: vpand 128(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1937# asm 2: vpand 128(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1938vpand 128(%rdx),%ymm14,%ymm15
1939
1940# qhasm: r5 ^= r
1941# asm 1: vpxor <r=reg256#16,<r5=reg256#9,<r5=reg256#9
1942# asm 2: vpxor <r=%ymm15,<r5=%ymm8,<r5=%ymm8
1943vpxor %ymm15,%ymm8,%ymm8
1944
1945# qhasm: r = a1 & mem256[input_2 + 160]
1946# asm 1: vpand 160(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1947# asm 2: vpand 160(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1948vpand 160(%rdx),%ymm14,%ymm15
1949
1950# qhasm: r6 ^= r
1951# asm 1: vpxor <r=reg256#16,<r6=reg256#10,<r6=reg256#10
1952# asm 2: vpxor <r=%ymm15,<r6=%ymm9,<r6=%ymm9
1953vpxor %ymm15,%ymm9,%ymm9
1954
1955# qhasm: r = a1 & mem256[input_2 + 192]
1956# asm 1: vpand 192(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1957# asm 2: vpand 192(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1958vpand 192(%rdx),%ymm14,%ymm15
1959
1960# qhasm: r7 ^= r
1961# asm 1: vpxor <r=reg256#16,<r7=reg256#11,<r7=reg256#11
1962# asm 2: vpxor <r=%ymm15,<r7=%ymm10,<r7=%ymm10
1963vpxor %ymm15,%ymm10,%ymm10
1964
1965# qhasm: r = a1 & mem256[input_2 + 224]
1966# asm 1: vpand 224(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1967# asm 2: vpand 224(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1968vpand 224(%rdx),%ymm14,%ymm15
1969
1970# qhasm: r8 ^= r
1971# asm 1: vpxor <r=reg256#16,<r8=reg256#12,<r8=reg256#12
1972# asm 2: vpxor <r=%ymm15,<r8=%ymm11,<r8=%ymm11
1973vpxor %ymm15,%ymm11,%ymm11
1974
1975# qhasm: r = a1 & mem256[input_2 + 256]
1976# asm 1: vpand 256(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1977# asm 2: vpand 256(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1978vpand 256(%rdx),%ymm14,%ymm15
1979
1980# qhasm: r9 ^= r
1981# asm 1: vpxor <r=reg256#16,<r9=reg256#13,<r9=reg256#13
1982# asm 2: vpxor <r=%ymm15,<r9=%ymm12,<r9=%ymm12
1983vpxor %ymm15,%ymm12,%ymm12
1984
1985# qhasm: r = a1 & mem256[input_2 + 288]
1986# asm 1: vpand 288(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1987# asm 2: vpand 288(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1988vpand 288(%rdx),%ymm14,%ymm15
1989
1990# qhasm: r10 ^= r
1991# asm 1: vpxor <r=reg256#16,<r10=reg256#14,<r10=reg256#14
1992# asm 2: vpxor <r=%ymm15,<r10=%ymm13,<r10=%ymm13
1993vpxor %ymm15,%ymm13,%ymm13
1994
1995# qhasm: r = a1 & mem256[input_2 + 320]
1996# asm 1: vpand 320(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
1997# asm 2: vpand 320(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
1998vpand 320(%rdx),%ymm14,%ymm15
1999
2000# qhasm: r11 ^= r
2001# asm 1: vpxor <r=reg256#16,<r11=reg256#2,<r11=reg256#2
2002# asm 2: vpxor <r=%ymm15,<r11=%ymm1,<r11=%ymm1
2003vpxor %ymm15,%ymm1,%ymm1
2004
2005# qhasm: r = a1 & mem256[input_2 + 352]
2006# asm 1: vpand 352(<input_2=int64#3),<a1=reg256#15,>r=reg256#16
2007# asm 2: vpand 352(<input_2=%rdx),<a1=%ymm14,>r=%ymm15
2008vpand 352(%rdx),%ymm14,%ymm15
2009
2010# qhasm: r12 ^= r
2011# asm 1: vpxor <r=reg256#16,<r12=reg256#3,<r12=reg256#3
2012# asm 2: vpxor <r=%ymm15,<r12=%ymm2,<r12=%ymm2
2013vpxor %ymm15,%ymm2,%ymm2
2014
2015# qhasm: r = a1 & mem256[input_2 + 384]
2016# asm 1: vpand 384(<input_2=int64#3),<a1=reg256#15,>r=reg256#15
2017# asm 2: vpand 384(<input_2=%rdx),<a1=%ymm14,>r=%ymm14
2018vpand 384(%rdx),%ymm14,%ymm14
2019
2020# qhasm: r13 ^= r
2021# asm 1: vpxor <r=reg256#15,<r13=reg256#4,<r13=reg256#4
2022# asm 2: vpxor <r=%ymm14,<r13=%ymm3,<r13=%ymm3
2023vpxor %ymm14,%ymm3,%ymm3
2024
2025# qhasm: r4 ^= r13
2026# asm 1: vpxor <r13=reg256#4,<r4=reg256#8,<r4=reg256#8
2027# asm 2: vpxor <r13=%ymm3,<r4=%ymm7,<r4=%ymm7
2028vpxor %ymm3,%ymm7,%ymm7
2029
2030# qhasm: r3 ^= r13
2031# asm 1: vpxor <r13=reg256#4,<r3=reg256#7,<r3=reg256#7
2032# asm 2: vpxor <r13=%ymm3,<r3=%ymm6,<r3=%ymm6
2033vpxor %ymm3,%ymm6,%ymm6
2034
2035# qhasm: r1 ^= r13
2036# asm 1: vpxor <r13=reg256#4,<r1=reg256#5,<r1=reg256#5
2037# asm 2: vpxor <r13=%ymm3,<r1=%ymm4,<r1=%ymm4
2038vpxor %ymm3,%ymm4,%ymm4
2039
2040# qhasm: r0 = r13
2041# asm 1: vmovapd <r13=reg256#4,>r0=reg256#4
2042# asm 2: vmovapd <r13=%ymm3,>r0=%ymm3
2043vmovapd %ymm3,%ymm3
2044
2045# qhasm: a0 = mem256[ input_0 + 0 ]
2046# asm 1: vmovupd   0(<input_0=int64#1),>a0=reg256#15
2047# asm 2: vmovupd   0(<input_0=%rdi),>a0=%ymm14
2048vmovupd   0(%rdi),%ymm14
2049
2050# qhasm: a0 = a0 ^ mem256[ input_1 + 0 ]
2051# asm 1: vpxor 0(<input_1=int64#2),<a0=reg256#15,>a0=reg256#15
2052# asm 2: vpxor 0(<input_1=%rsi),<a0=%ymm14,>a0=%ymm14
2053vpxor 0(%rsi),%ymm14,%ymm14
2054
2055# qhasm: mem256[ input_0 + 0 ] = a0
2056# asm 1: vmovupd   <a0=reg256#15,0(<input_0=int64#1)
2057# asm 2: vmovupd   <a0=%ymm14,0(<input_0=%rdi)
2058vmovupd   %ymm14,0(%rdi)
2059
2060# qhasm: r = a0 & b0
2061# asm 1: vpand <a0=reg256#15,<b0=reg256#1,>r=reg256#1
2062# asm 2: vpand <a0=%ymm14,<b0=%ymm0,>r=%ymm0
2063vpand %ymm14,%ymm0,%ymm0
2064
2065# qhasm: r0 ^= r
2066# asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4
2067# asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3
2068vpxor %ymm0,%ymm3,%ymm3
2069
2070# qhasm: r = a0 & mem256[input_2 + 32]
2071# asm 1: vpand 32(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2072# asm 2: vpand 32(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2073vpand 32(%rdx),%ymm14,%ymm0
2074
2075# qhasm: r1 ^= r
2076# asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5
2077# asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4
2078vpxor %ymm0,%ymm4,%ymm4
2079
2080# qhasm: r = a0 & mem256[input_2 + 64]
2081# asm 1: vpand 64(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2082# asm 2: vpand 64(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2083vpand 64(%rdx),%ymm14,%ymm0
2084
2085# qhasm: r2 ^= r
2086# asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6
2087# asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5
2088vpxor %ymm0,%ymm5,%ymm5
2089
2090# qhasm: r = a0 & mem256[input_2 + 96]
2091# asm 1: vpand 96(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2092# asm 2: vpand 96(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2093vpand 96(%rdx),%ymm14,%ymm0
2094
2095# qhasm: r3 ^= r
2096# asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7
2097# asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6
2098vpxor %ymm0,%ymm6,%ymm6
2099
2100# qhasm: r = a0 & mem256[input_2 + 128]
2101# asm 1: vpand 128(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2102# asm 2: vpand 128(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2103vpand 128(%rdx),%ymm14,%ymm0
2104
2105# qhasm: r4 ^= r
2106# asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8
2107# asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7
2108vpxor %ymm0,%ymm7,%ymm7
2109
2110# qhasm: r = a0 & mem256[input_2 + 160]
2111# asm 1: vpand 160(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2112# asm 2: vpand 160(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2113vpand 160(%rdx),%ymm14,%ymm0
2114
2115# qhasm: r5 ^= r
2116# asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9
2117# asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8
2118vpxor %ymm0,%ymm8,%ymm8
2119
2120# qhasm: r = a0 & mem256[input_2 + 192]
2121# asm 1: vpand 192(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2122# asm 2: vpand 192(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2123vpand 192(%rdx),%ymm14,%ymm0
2124
2125# qhasm: r6 ^= r
2126# asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10
2127# asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9
2128vpxor %ymm0,%ymm9,%ymm9
2129
2130# qhasm: r = a0 & mem256[input_2 + 224]
2131# asm 1: vpand 224(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2132# asm 2: vpand 224(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2133vpand 224(%rdx),%ymm14,%ymm0
2134
2135# qhasm: r7 ^= r
2136# asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11
2137# asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10
2138vpxor %ymm0,%ymm10,%ymm10
2139
2140# qhasm: r = a0 & mem256[input_2 + 256]
2141# asm 1: vpand 256(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2142# asm 2: vpand 256(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2143vpand 256(%rdx),%ymm14,%ymm0
2144
2145# qhasm: r8 ^= r
2146# asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12
2147# asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11
2148vpxor %ymm0,%ymm11,%ymm11
2149
2150# qhasm: r = a0 & mem256[input_2 + 288]
2151# asm 1: vpand 288(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2152# asm 2: vpand 288(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2153vpand 288(%rdx),%ymm14,%ymm0
2154
2155# qhasm: r9 ^= r
2156# asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13
2157# asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12
2158vpxor %ymm0,%ymm12,%ymm12
2159
2160# qhasm: r = a0 & mem256[input_2 + 320]
2161# asm 1: vpand 320(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2162# asm 2: vpand 320(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2163vpand 320(%rdx),%ymm14,%ymm0
2164
2165# qhasm: r10 ^= r
2166# asm 1: vpxor <r=reg256#1,<r10=reg256#14,<r10=reg256#14
2167# asm 2: vpxor <r=%ymm0,<r10=%ymm13,<r10=%ymm13
2168vpxor %ymm0,%ymm13,%ymm13
2169
2170# qhasm: r = a0 & mem256[input_2 + 352]
2171# asm 1: vpand 352(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2172# asm 2: vpand 352(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2173vpand 352(%rdx),%ymm14,%ymm0
2174
2175# qhasm: r11 ^= r
2176# asm 1: vpxor <r=reg256#1,<r11=reg256#2,<r11=reg256#2
2177# asm 2: vpxor <r=%ymm0,<r11=%ymm1,<r11=%ymm1
2178vpxor %ymm0,%ymm1,%ymm1
2179
2180# qhasm: r = a0 & mem256[input_2 + 384]
2181# asm 1: vpand 384(<input_2=int64#3),<a0=reg256#15,>r=reg256#1
2182# asm 2: vpand 384(<input_2=%rdx),<a0=%ymm14,>r=%ymm0
2183vpand 384(%rdx),%ymm14,%ymm0
2184
2185# qhasm: r12 ^= r
2186# asm 1: vpxor <r=reg256#1,<r12=reg256#3,<r12=reg256#3
2187# asm 2: vpxor <r=%ymm0,<r12=%ymm2,<r12=%ymm2
2188vpxor %ymm0,%ymm2,%ymm2
2189
2190# qhasm: r12 = r12 ^ mem256[ input_1 + 384 ]
2191# asm 1: vpxor 384(<input_1=int64#2),<r12=reg256#3,>r12=reg256#1
2192# asm 2: vpxor 384(<input_1=%rsi),<r12=%ymm2,>r12=%ymm0
2193vpxor 384(%rsi),%ymm2,%ymm0
2194
2195# qhasm: mem256[ input_1 + 384 ] = r12
2196# asm 1: vmovupd   <r12=reg256#1,384(<input_1=int64#2)
2197# asm 2: vmovupd   <r12=%ymm0,384(<input_1=%rsi)
2198vmovupd   %ymm0,384(%rsi)
2199
2200# qhasm: r11 = r11 ^ mem256[ input_1 + 352 ]
2201# asm 1: vpxor 352(<input_1=int64#2),<r11=reg256#2,>r11=reg256#1
2202# asm 2: vpxor 352(<input_1=%rsi),<r11=%ymm1,>r11=%ymm0
2203vpxor 352(%rsi),%ymm1,%ymm0
2204
2205# qhasm: mem256[ input_1 + 352 ] = r11
2206# asm 1: vmovupd   <r11=reg256#1,352(<input_1=int64#2)
2207# asm 2: vmovupd   <r11=%ymm0,352(<input_1=%rsi)
2208vmovupd   %ymm0,352(%rsi)
2209
2210# qhasm: r10 = r10 ^ mem256[ input_1 + 320 ]
2211# asm 1: vpxor 320(<input_1=int64#2),<r10=reg256#14,>r10=reg256#1
2212# asm 2: vpxor 320(<input_1=%rsi),<r10=%ymm13,>r10=%ymm0
2213vpxor 320(%rsi),%ymm13,%ymm0
2214
2215# qhasm: mem256[ input_1 + 320 ] = r10
2216# asm 1: vmovupd   <r10=reg256#1,320(<input_1=int64#2)
2217# asm 2: vmovupd   <r10=%ymm0,320(<input_1=%rsi)
2218vmovupd   %ymm0,320(%rsi)
2219
2220# qhasm: r9 = r9 ^ mem256[ input_1 + 288 ]
2221# asm 1: vpxor 288(<input_1=int64#2),<r9=reg256#13,>r9=reg256#1
2222# asm 2: vpxor 288(<input_1=%rsi),<r9=%ymm12,>r9=%ymm0
2223vpxor 288(%rsi),%ymm12,%ymm0
2224
2225# qhasm: mem256[ input_1 + 288 ] = r9
2226# asm 1: vmovupd   <r9=reg256#1,288(<input_1=int64#2)
2227# asm 2: vmovupd   <r9=%ymm0,288(<input_1=%rsi)
2228vmovupd   %ymm0,288(%rsi)
2229
2230# qhasm: r8 = r8 ^ mem256[ input_1 + 256 ]
2231# asm 1: vpxor 256(<input_1=int64#2),<r8=reg256#12,>r8=reg256#1
2232# asm 2: vpxor 256(<input_1=%rsi),<r8=%ymm11,>r8=%ymm0
2233vpxor 256(%rsi),%ymm11,%ymm0
2234
2235# qhasm: mem256[ input_1 + 256 ] = r8
2236# asm 1: vmovupd   <r8=reg256#1,256(<input_1=int64#2)
2237# asm 2: vmovupd   <r8=%ymm0,256(<input_1=%rsi)
2238vmovupd   %ymm0,256(%rsi)
2239
2240# qhasm: r7 = r7 ^ mem256[ input_1 + 224 ]
2241# asm 1: vpxor 224(<input_1=int64#2),<r7=reg256#11,>r7=reg256#1
2242# asm 2: vpxor 224(<input_1=%rsi),<r7=%ymm10,>r7=%ymm0
2243vpxor 224(%rsi),%ymm10,%ymm0
2244
2245# qhasm: mem256[ input_1 + 224 ] = r7
2246# asm 1: vmovupd   <r7=reg256#1,224(<input_1=int64#2)
2247# asm 2: vmovupd   <r7=%ymm0,224(<input_1=%rsi)
2248vmovupd   %ymm0,224(%rsi)
2249
2250# qhasm: r6 = r6 ^ mem256[ input_1 + 192 ]
2251# asm 1: vpxor 192(<input_1=int64#2),<r6=reg256#10,>r6=reg256#1
2252# asm 2: vpxor 192(<input_1=%rsi),<r6=%ymm9,>r6=%ymm0
2253vpxor 192(%rsi),%ymm9,%ymm0
2254
2255# qhasm: mem256[ input_1 + 192 ] = r6
2256# asm 1: vmovupd   <r6=reg256#1,192(<input_1=int64#2)
2257# asm 2: vmovupd   <r6=%ymm0,192(<input_1=%rsi)
2258vmovupd   %ymm0,192(%rsi)
2259
2260# qhasm: r5 = r5 ^ mem256[ input_1 + 160 ]
2261# asm 1: vpxor 160(<input_1=int64#2),<r5=reg256#9,>r5=reg256#1
2262# asm 2: vpxor 160(<input_1=%rsi),<r5=%ymm8,>r5=%ymm0
2263vpxor 160(%rsi),%ymm8,%ymm0
2264
2265# qhasm: mem256[ input_1 + 160 ] = r5
2266# asm 1: vmovupd   <r5=reg256#1,160(<input_1=int64#2)
2267# asm 2: vmovupd   <r5=%ymm0,160(<input_1=%rsi)
2268vmovupd   %ymm0,160(%rsi)
2269
2270# qhasm: r4 = r4 ^ mem256[ input_1 + 128 ]
2271# asm 1: vpxor 128(<input_1=int64#2),<r4=reg256#8,>r4=reg256#1
2272# asm 2: vpxor 128(<input_1=%rsi),<r4=%ymm7,>r4=%ymm0
2273vpxor 128(%rsi),%ymm7,%ymm0
2274
2275# qhasm: mem256[ input_1 + 128 ] = r4
2276# asm 1: vmovupd   <r4=reg256#1,128(<input_1=int64#2)
2277# asm 2: vmovupd   <r4=%ymm0,128(<input_1=%rsi)
2278vmovupd   %ymm0,128(%rsi)
2279
2280# qhasm: r3 = r3 ^ mem256[ input_1 + 96 ]
2281# asm 1: vpxor 96(<input_1=int64#2),<r3=reg256#7,>r3=reg256#1
2282# asm 2: vpxor 96(<input_1=%rsi),<r3=%ymm6,>r3=%ymm0
2283vpxor 96(%rsi),%ymm6,%ymm0
2284
2285# qhasm: mem256[ input_1 + 96 ] = r3
2286# asm 1: vmovupd   <r3=reg256#1,96(<input_1=int64#2)
2287# asm 2: vmovupd   <r3=%ymm0,96(<input_1=%rsi)
2288vmovupd   %ymm0,96(%rsi)
2289
2290# qhasm: r2 = r2 ^ mem256[ input_1 + 64 ]
2291# asm 1: vpxor 64(<input_1=int64#2),<r2=reg256#6,>r2=reg256#1
2292# asm 2: vpxor 64(<input_1=%rsi),<r2=%ymm5,>r2=%ymm0
2293vpxor 64(%rsi),%ymm5,%ymm0
2294
2295# qhasm: mem256[ input_1 + 64 ] = r2
2296# asm 1: vmovupd   <r2=reg256#1,64(<input_1=int64#2)
2297# asm 2: vmovupd   <r2=%ymm0,64(<input_1=%rsi)
2298vmovupd   %ymm0,64(%rsi)
2299
2300# qhasm: r1 = r1 ^ mem256[ input_1 + 32 ]
2301# asm 1: vpxor 32(<input_1=int64#2),<r1=reg256#5,>r1=reg256#1
2302# asm 2: vpxor 32(<input_1=%rsi),<r1=%ymm4,>r1=%ymm0
2303vpxor 32(%rsi),%ymm4,%ymm0
2304
2305# qhasm: mem256[ input_1 + 32 ] = r1
2306# asm 1: vmovupd   <r1=reg256#1,32(<input_1=int64#2)
2307# asm 2: vmovupd   <r1=%ymm0,32(<input_1=%rsi)
2308vmovupd   %ymm0,32(%rsi)
2309
2310# qhasm: r0 = r0 ^ mem256[ input_1 + 0 ]
2311# asm 1: vpxor 0(<input_1=int64#2),<r0=reg256#4,>r0=reg256#1
2312# asm 2: vpxor 0(<input_1=%rsi),<r0=%ymm3,>r0=%ymm0
2313vpxor 0(%rsi),%ymm3,%ymm0
2314
2315# qhasm: mem256[ input_1 + 0 ] = r0
2316# asm 1: vmovupd   <r0=reg256#1,0(<input_1=int64#2)
2317# asm 2: vmovupd   <r0=%ymm0,0(<input_1=%rsi)
2318vmovupd   %ymm0,0(%rsi)
2319
2320# qhasm: return
2321add %r11,%rsp
2322ret
2323