1
2# qhasm: int64 input_0
3
4# qhasm: int64 input_1
5
6# qhasm: int64 input_2
7
8# qhasm: int64 input_3
9
10# qhasm: int64 input_4
11
12# qhasm: int64 input_5
13
14# qhasm: stack64 input_6
15
16# qhasm: stack64 input_7
17
18# qhasm: int64 caller_r11
19
20# qhasm: int64 caller_r12
21
22# qhasm: int64 caller_r13
23
24# qhasm: int64 caller_r14
25
26# qhasm: int64 caller_r15
27
28# qhasm: int64 caller_rbx
29
30# qhasm: int64 caller_rbp
31
32# qhasm: reg256 a0
33
34# qhasm: reg256 a1
35
36# qhasm: reg256 a2
37
38# qhasm: reg256 a3
39
40# qhasm: reg256 a4
41
42# qhasm: reg256 a5
43
44# qhasm: reg256 a6
45
46# qhasm: reg256 a7
47
48# qhasm: reg256 a8
49
50# qhasm: reg256 a9
51
52# qhasm: reg256 a10
53
54# qhasm: reg256 a11
55
56# qhasm: reg256 b0
57
58# qhasm: reg256 b1
59
60# qhasm: reg256 r0
61
62# qhasm: reg256 r1
63
64# qhasm: reg256 r2
65
66# qhasm: reg256 r3
67
68# qhasm: reg256 r4
69
70# qhasm: reg256 r5
71
72# qhasm: reg256 r6
73
74# qhasm: reg256 r7
75
76# qhasm: reg256 r8
77
78# qhasm: reg256 r9
79
80# qhasm: reg256 r10
81
82# qhasm: reg256 r11
83
84# qhasm: reg256 r12
85
86# qhasm: reg256 r13
87
88# qhasm: reg256 r14
89
90# qhasm: reg256 r15
91
92# qhasm: reg256 r16
93
94# qhasm: reg256 r17
95
96# qhasm: reg256 r18
97
98# qhasm: reg256 r19
99
100# qhasm: reg256 r20
101
102# qhasm: reg256 r21
103
104# qhasm: reg256 r22
105
106# qhasm: reg256 r
107
108# qhasm: enter vec256_mul_asm
109.p2align 5
110.global _PQCLEAN_MCELIECE348864_AVX_vec256_mul_asm
111.global PQCLEAN_MCELIECE348864_AVX_vec256_mul_asm
112_PQCLEAN_MCELIECE348864_AVX_vec256_mul_asm:
113PQCLEAN_MCELIECE348864_AVX_vec256_mul_asm:
114mov %rsp,%r11
115and $31,%r11
116add $0,%r11
117sub %r11,%rsp
118
119# qhasm: b0 = mem256[ input_2 + 0 ]
120# asm 1: vmovupd   0(<input_2=int64#3),>b0=reg256#1
121# asm 2: vmovupd   0(<input_2=%rdx),>b0=%ymm0
122vmovupd   0(%rdx),%ymm0
123
124# qhasm: a11 = mem256[ input_1 + 352 ]
125# asm 1: vmovupd   352(<input_1=int64#2),>a11=reg256#2
126# asm 2: vmovupd   352(<input_1=%rsi),>a11=%ymm1
127vmovupd   352(%rsi),%ymm1
128
129# qhasm: r11 = a11 & b0
130# asm 1: vpand <a11=reg256#2,<b0=reg256#1,>r11=reg256#3
131# asm 2: vpand <a11=%ymm1,<b0=%ymm0,>r11=%ymm2
132vpand %ymm1,%ymm0,%ymm2
133
134# qhasm: r12 = a11 & mem256[input_2 + 32]
135# asm 1: vpand 32(<input_2=int64#3),<a11=reg256#2,>r12=reg256#4
136# asm 2: vpand 32(<input_2=%rdx),<a11=%ymm1,>r12=%ymm3
137vpand 32(%rdx),%ymm1,%ymm3
138
139# qhasm: r13 = a11 & mem256[input_2 + 64]
140# asm 1: vpand 64(<input_2=int64#3),<a11=reg256#2,>r13=reg256#5
141# asm 2: vpand 64(<input_2=%rdx),<a11=%ymm1,>r13=%ymm4
142vpand 64(%rdx),%ymm1,%ymm4
143
144# qhasm: r14 = a11 & mem256[input_2 + 96]
145# asm 1: vpand 96(<input_2=int64#3),<a11=reg256#2,>r14=reg256#6
146# asm 2: vpand 96(<input_2=%rdx),<a11=%ymm1,>r14=%ymm5
147vpand 96(%rdx),%ymm1,%ymm5
148
149# qhasm: r15 = a11 & mem256[input_2 + 128]
150# asm 1: vpand 128(<input_2=int64#3),<a11=reg256#2,>r15=reg256#7
151# asm 2: vpand 128(<input_2=%rdx),<a11=%ymm1,>r15=%ymm6
152vpand 128(%rdx),%ymm1,%ymm6
153
154# qhasm: r16 = a11 & mem256[input_2 + 160]
155# asm 1: vpand 160(<input_2=int64#3),<a11=reg256#2,>r16=reg256#8
156# asm 2: vpand 160(<input_2=%rdx),<a11=%ymm1,>r16=%ymm7
157vpand 160(%rdx),%ymm1,%ymm7
158
159# qhasm: r17 = a11 & mem256[input_2 + 192]
160# asm 1: vpand 192(<input_2=int64#3),<a11=reg256#2,>r17=reg256#9
161# asm 2: vpand 192(<input_2=%rdx),<a11=%ymm1,>r17=%ymm8
162vpand 192(%rdx),%ymm1,%ymm8
163
164# qhasm: r18 = a11 & mem256[input_2 + 224]
165# asm 1: vpand 224(<input_2=int64#3),<a11=reg256#2,>r18=reg256#10
166# asm 2: vpand 224(<input_2=%rdx),<a11=%ymm1,>r18=%ymm9
167vpand 224(%rdx),%ymm1,%ymm9
168
169# qhasm: r19 = a11 & mem256[input_2 + 256]
170# asm 1: vpand 256(<input_2=int64#3),<a11=reg256#2,>r19=reg256#11
171# asm 2: vpand 256(<input_2=%rdx),<a11=%ymm1,>r19=%ymm10
172vpand 256(%rdx),%ymm1,%ymm10
173
174# qhasm: r20 = a11 & mem256[input_2 + 288]
175# asm 1: vpand 288(<input_2=int64#3),<a11=reg256#2,>r20=reg256#12
176# asm 2: vpand 288(<input_2=%rdx),<a11=%ymm1,>r20=%ymm11
177vpand 288(%rdx),%ymm1,%ymm11
178
179# qhasm: r21 = a11 & mem256[input_2 + 320]
180# asm 1: vpand 320(<input_2=int64#3),<a11=reg256#2,>r21=reg256#13
181# asm 2: vpand 320(<input_2=%rdx),<a11=%ymm1,>r21=%ymm12
182vpand 320(%rdx),%ymm1,%ymm12
183
184# qhasm: r22 = a11 & mem256[input_2 + 352]
185# asm 1: vpand 352(<input_2=int64#3),<a11=reg256#2,>r22=reg256#2
186# asm 2: vpand 352(<input_2=%rdx),<a11=%ymm1,>r22=%ymm1
187vpand 352(%rdx),%ymm1,%ymm1
188
189# qhasm: r13 ^= r22
190# asm 1: vpxor <r22=reg256#2,<r13=reg256#5,<r13=reg256#5
191# asm 2: vpxor <r22=%ymm1,<r13=%ymm4,<r13=%ymm4
192vpxor %ymm1,%ymm4,%ymm4
193
194# qhasm: r10 = r22
195# asm 1: vmovapd <r22=reg256#2,>r10=reg256#2
196# asm 2: vmovapd <r22=%ymm1,>r10=%ymm1
197vmovapd %ymm1,%ymm1
198
199# qhasm: a10 = mem256[ input_1 + 320 ]
200# asm 1: vmovupd   320(<input_1=int64#2),>a10=reg256#14
201# asm 2: vmovupd   320(<input_1=%rsi),>a10=%ymm13
202vmovupd   320(%rsi),%ymm13
203
204# qhasm: r = a10 & b0
205# asm 1: vpand <a10=reg256#14,<b0=reg256#1,>r=reg256#15
206# asm 2: vpand <a10=%ymm13,<b0=%ymm0,>r=%ymm14
207vpand %ymm13,%ymm0,%ymm14
208
209# qhasm: r10 ^= r
210# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
211# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
212vpxor %ymm14,%ymm1,%ymm1
213
214# qhasm: r = a10 & mem256[input_2 + 32]
215# asm 1: vpand 32(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
216# asm 2: vpand 32(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
217vpand 32(%rdx),%ymm13,%ymm14
218
219# qhasm: r11 ^= r
220# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
221# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
222vpxor %ymm14,%ymm2,%ymm2
223
224# qhasm: r = a10 & mem256[input_2 + 64]
225# asm 1: vpand 64(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
226# asm 2: vpand 64(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
227vpand 64(%rdx),%ymm13,%ymm14
228
229# qhasm: r12 ^= r
230# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
231# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
232vpxor %ymm14,%ymm3,%ymm3
233
234# qhasm: r = a10 & mem256[input_2 + 96]
235# asm 1: vpand 96(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
236# asm 2: vpand 96(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
237vpand 96(%rdx),%ymm13,%ymm14
238
239# qhasm: r13 ^= r
240# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
241# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
242vpxor %ymm14,%ymm4,%ymm4
243
244# qhasm: r = a10 & mem256[input_2 + 128]
245# asm 1: vpand 128(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
246# asm 2: vpand 128(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
247vpand 128(%rdx),%ymm13,%ymm14
248
249# qhasm: r14 ^= r
250# asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6
251# asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5
252vpxor %ymm14,%ymm5,%ymm5
253
254# qhasm: r = a10 & mem256[input_2 + 160]
255# asm 1: vpand 160(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
256# asm 2: vpand 160(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
257vpand 160(%rdx),%ymm13,%ymm14
258
259# qhasm: r15 ^= r
260# asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7
261# asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6
262vpxor %ymm14,%ymm6,%ymm6
263
264# qhasm: r = a10 & mem256[input_2 + 192]
265# asm 1: vpand 192(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
266# asm 2: vpand 192(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
267vpand 192(%rdx),%ymm13,%ymm14
268
269# qhasm: r16 ^= r
270# asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8
271# asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7
272vpxor %ymm14,%ymm7,%ymm7
273
274# qhasm: r = a10 & mem256[input_2 + 224]
275# asm 1: vpand 224(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
276# asm 2: vpand 224(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
277vpand 224(%rdx),%ymm13,%ymm14
278
279# qhasm: r17 ^= r
280# asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9
281# asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8
282vpxor %ymm14,%ymm8,%ymm8
283
284# qhasm: r = a10 & mem256[input_2 + 256]
285# asm 1: vpand 256(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
286# asm 2: vpand 256(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
287vpand 256(%rdx),%ymm13,%ymm14
288
289# qhasm: r18 ^= r
290# asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10
291# asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9
292vpxor %ymm14,%ymm9,%ymm9
293
294# qhasm: r = a10 & mem256[input_2 + 288]
295# asm 1: vpand 288(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
296# asm 2: vpand 288(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
297vpand 288(%rdx),%ymm13,%ymm14
298
299# qhasm: r19 ^= r
300# asm 1: vpxor <r=reg256#15,<r19=reg256#11,<r19=reg256#11
301# asm 2: vpxor <r=%ymm14,<r19=%ymm10,<r19=%ymm10
302vpxor %ymm14,%ymm10,%ymm10
303
304# qhasm: r = a10 & mem256[input_2 + 320]
305# asm 1: vpand 320(<input_2=int64#3),<a10=reg256#14,>r=reg256#15
306# asm 2: vpand 320(<input_2=%rdx),<a10=%ymm13,>r=%ymm14
307vpand 320(%rdx),%ymm13,%ymm14
308
309# qhasm: r20 ^= r
310# asm 1: vpxor <r=reg256#15,<r20=reg256#12,<r20=reg256#12
311# asm 2: vpxor <r=%ymm14,<r20=%ymm11,<r20=%ymm11
312vpxor %ymm14,%ymm11,%ymm11
313
314# qhasm: r = a10 & mem256[input_2 + 352]
315# asm 1: vpand 352(<input_2=int64#3),<a10=reg256#14,>r=reg256#14
316# asm 2: vpand 352(<input_2=%rdx),<a10=%ymm13,>r=%ymm13
317vpand 352(%rdx),%ymm13,%ymm13
318
319# qhasm: r21 ^= r
320# asm 1: vpxor <r=reg256#14,<r21=reg256#13,<r21=reg256#13
321# asm 2: vpxor <r=%ymm13,<r21=%ymm12,<r21=%ymm12
322vpxor %ymm13,%ymm12,%ymm12
323
324# qhasm: r12 ^= r21
325# asm 1: vpxor <r21=reg256#13,<r12=reg256#4,<r12=reg256#4
326# asm 2: vpxor <r21=%ymm12,<r12=%ymm3,<r12=%ymm3
327vpxor %ymm12,%ymm3,%ymm3
328
329# qhasm: r9 = r21
330# asm 1: vmovapd <r21=reg256#13,>r9=reg256#13
331# asm 2: vmovapd <r21=%ymm12,>r9=%ymm12
332vmovapd %ymm12,%ymm12
333
334# qhasm: a9 = mem256[ input_1 + 288 ]
335# asm 1: vmovupd   288(<input_1=int64#2),>a9=reg256#14
336# asm 2: vmovupd   288(<input_1=%rsi),>a9=%ymm13
337vmovupd   288(%rsi),%ymm13
338
339# qhasm: r = a9 & b0
340# asm 1: vpand <a9=reg256#14,<b0=reg256#1,>r=reg256#15
341# asm 2: vpand <a9=%ymm13,<b0=%ymm0,>r=%ymm14
342vpand %ymm13,%ymm0,%ymm14
343
344# qhasm: r9 ^= r
345# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
346# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
347vpxor %ymm14,%ymm12,%ymm12
348
349# qhasm: r = a9 & mem256[input_2 + 32]
350# asm 1: vpand 32(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
351# asm 2: vpand 32(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
352vpand 32(%rdx),%ymm13,%ymm14
353
354# qhasm: r10 ^= r
355# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
356# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
357vpxor %ymm14,%ymm1,%ymm1
358
359# qhasm: r = a9 & mem256[input_2 + 64]
360# asm 1: vpand 64(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
361# asm 2: vpand 64(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
362vpand 64(%rdx),%ymm13,%ymm14
363
364# qhasm: r11 ^= r
365# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
366# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
367vpxor %ymm14,%ymm2,%ymm2
368
369# qhasm: r = a9 & mem256[input_2 + 96]
370# asm 1: vpand 96(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
371# asm 2: vpand 96(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
372vpand 96(%rdx),%ymm13,%ymm14
373
374# qhasm: r12 ^= r
375# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
376# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
377vpxor %ymm14,%ymm3,%ymm3
378
379# qhasm: r = a9 & mem256[input_2 + 128]
380# asm 1: vpand 128(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
381# asm 2: vpand 128(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
382vpand 128(%rdx),%ymm13,%ymm14
383
384# qhasm: r13 ^= r
385# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
386# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
387vpxor %ymm14,%ymm4,%ymm4
388
389# qhasm: r = a9 & mem256[input_2 + 160]
390# asm 1: vpand 160(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
391# asm 2: vpand 160(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
392vpand 160(%rdx),%ymm13,%ymm14
393
394# qhasm: r14 ^= r
395# asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6
396# asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5
397vpxor %ymm14,%ymm5,%ymm5
398
399# qhasm: r = a9 & mem256[input_2 + 192]
400# asm 1: vpand 192(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
401# asm 2: vpand 192(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
402vpand 192(%rdx),%ymm13,%ymm14
403
404# qhasm: r15 ^= r
405# asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7
406# asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6
407vpxor %ymm14,%ymm6,%ymm6
408
409# qhasm: r = a9 & mem256[input_2 + 224]
410# asm 1: vpand 224(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
411# asm 2: vpand 224(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
412vpand 224(%rdx),%ymm13,%ymm14
413
414# qhasm: r16 ^= r
415# asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8
416# asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7
417vpxor %ymm14,%ymm7,%ymm7
418
419# qhasm: r = a9 & mem256[input_2 + 256]
420# asm 1: vpand 256(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
421# asm 2: vpand 256(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
422vpand 256(%rdx),%ymm13,%ymm14
423
424# qhasm: r17 ^= r
425# asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9
426# asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8
427vpxor %ymm14,%ymm8,%ymm8
428
429# qhasm: r = a9 & mem256[input_2 + 288]
430# asm 1: vpand 288(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
431# asm 2: vpand 288(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
432vpand 288(%rdx),%ymm13,%ymm14
433
434# qhasm: r18 ^= r
435# asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10
436# asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9
437vpxor %ymm14,%ymm9,%ymm9
438
439# qhasm: r = a9 & mem256[input_2 + 320]
440# asm 1: vpand 320(<input_2=int64#3),<a9=reg256#14,>r=reg256#15
441# asm 2: vpand 320(<input_2=%rdx),<a9=%ymm13,>r=%ymm14
442vpand 320(%rdx),%ymm13,%ymm14
443
444# qhasm: r19 ^= r
445# asm 1: vpxor <r=reg256#15,<r19=reg256#11,<r19=reg256#11
446# asm 2: vpxor <r=%ymm14,<r19=%ymm10,<r19=%ymm10
447vpxor %ymm14,%ymm10,%ymm10
448
449# qhasm: r = a9 & mem256[input_2 + 352]
450# asm 1: vpand 352(<input_2=int64#3),<a9=reg256#14,>r=reg256#14
451# asm 2: vpand 352(<input_2=%rdx),<a9=%ymm13,>r=%ymm13
452vpand 352(%rdx),%ymm13,%ymm13
453
454# qhasm: r20 ^= r
455# asm 1: vpxor <r=reg256#14,<r20=reg256#12,<r20=reg256#12
456# asm 2: vpxor <r=%ymm13,<r20=%ymm11,<r20=%ymm11
457vpxor %ymm13,%ymm11,%ymm11
458
459# qhasm: r11 ^= r20
460# asm 1: vpxor <r20=reg256#12,<r11=reg256#3,<r11=reg256#3
461# asm 2: vpxor <r20=%ymm11,<r11=%ymm2,<r11=%ymm2
462vpxor %ymm11,%ymm2,%ymm2
463
464# qhasm: r8 = r20
465# asm 1: vmovapd <r20=reg256#12,>r8=reg256#12
466# asm 2: vmovapd <r20=%ymm11,>r8=%ymm11
467vmovapd %ymm11,%ymm11
468
469# qhasm: a8 = mem256[ input_1 + 256 ]
470# asm 1: vmovupd   256(<input_1=int64#2),>a8=reg256#14
471# asm 2: vmovupd   256(<input_1=%rsi),>a8=%ymm13
472vmovupd   256(%rsi),%ymm13
473
474# qhasm: r = a8 & b0
475# asm 1: vpand <a8=reg256#14,<b0=reg256#1,>r=reg256#15
476# asm 2: vpand <a8=%ymm13,<b0=%ymm0,>r=%ymm14
477vpand %ymm13,%ymm0,%ymm14
478
479# qhasm: r8 ^= r
480# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
481# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
482vpxor %ymm14,%ymm11,%ymm11
483
484# qhasm: r = a8 & mem256[input_2 + 32]
485# asm 1: vpand 32(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
486# asm 2: vpand 32(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
487vpand 32(%rdx),%ymm13,%ymm14
488
489# qhasm: r9 ^= r
490# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
491# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
492vpxor %ymm14,%ymm12,%ymm12
493
494# qhasm: r = a8 & mem256[input_2 + 64]
495# asm 1: vpand 64(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
496# asm 2: vpand 64(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
497vpand 64(%rdx),%ymm13,%ymm14
498
499# qhasm: r10 ^= r
500# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
501# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
502vpxor %ymm14,%ymm1,%ymm1
503
504# qhasm: r = a8 & mem256[input_2 + 96]
505# asm 1: vpand 96(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
506# asm 2: vpand 96(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
507vpand 96(%rdx),%ymm13,%ymm14
508
509# qhasm: r11 ^= r
510# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
511# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
512vpxor %ymm14,%ymm2,%ymm2
513
514# qhasm: r = a8 & mem256[input_2 + 128]
515# asm 1: vpand 128(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
516# asm 2: vpand 128(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
517vpand 128(%rdx),%ymm13,%ymm14
518
519# qhasm: r12 ^= r
520# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
521# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
522vpxor %ymm14,%ymm3,%ymm3
523
524# qhasm: r = a8 & mem256[input_2 + 160]
525# asm 1: vpand 160(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
526# asm 2: vpand 160(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
527vpand 160(%rdx),%ymm13,%ymm14
528
529# qhasm: r13 ^= r
530# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
531# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
532vpxor %ymm14,%ymm4,%ymm4
533
534# qhasm: r = a8 & mem256[input_2 + 192]
535# asm 1: vpand 192(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
536# asm 2: vpand 192(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
537vpand 192(%rdx),%ymm13,%ymm14
538
539# qhasm: r14 ^= r
540# asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6
541# asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5
542vpxor %ymm14,%ymm5,%ymm5
543
544# qhasm: r = a8 & mem256[input_2 + 224]
545# asm 1: vpand 224(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
546# asm 2: vpand 224(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
547vpand 224(%rdx),%ymm13,%ymm14
548
549# qhasm: r15 ^= r
550# asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7
551# asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6
552vpxor %ymm14,%ymm6,%ymm6
553
554# qhasm: r = a8 & mem256[input_2 + 256]
555# asm 1: vpand 256(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
556# asm 2: vpand 256(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
557vpand 256(%rdx),%ymm13,%ymm14
558
559# qhasm: r16 ^= r
560# asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8
561# asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7
562vpxor %ymm14,%ymm7,%ymm7
563
564# qhasm: r = a8 & mem256[input_2 + 288]
565# asm 1: vpand 288(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
566# asm 2: vpand 288(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
567vpand 288(%rdx),%ymm13,%ymm14
568
569# qhasm: r17 ^= r
570# asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9
571# asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8
572vpxor %ymm14,%ymm8,%ymm8
573
574# qhasm: r = a8 & mem256[input_2 + 320]
575# asm 1: vpand 320(<input_2=int64#3),<a8=reg256#14,>r=reg256#15
576# asm 2: vpand 320(<input_2=%rdx),<a8=%ymm13,>r=%ymm14
577vpand 320(%rdx),%ymm13,%ymm14
578
579# qhasm: r18 ^= r
580# asm 1: vpxor <r=reg256#15,<r18=reg256#10,<r18=reg256#10
581# asm 2: vpxor <r=%ymm14,<r18=%ymm9,<r18=%ymm9
582vpxor %ymm14,%ymm9,%ymm9
583
584# qhasm: r = a8 & mem256[input_2 + 352]
585# asm 1: vpand 352(<input_2=int64#3),<a8=reg256#14,>r=reg256#14
586# asm 2: vpand 352(<input_2=%rdx),<a8=%ymm13,>r=%ymm13
587vpand 352(%rdx),%ymm13,%ymm13
588
589# qhasm: r19 ^= r
590# asm 1: vpxor <r=reg256#14,<r19=reg256#11,<r19=reg256#11
591# asm 2: vpxor <r=%ymm13,<r19=%ymm10,<r19=%ymm10
592vpxor %ymm13,%ymm10,%ymm10
593
594# qhasm: r10 ^= r19
595# asm 1: vpxor <r19=reg256#11,<r10=reg256#2,<r10=reg256#2
596# asm 2: vpxor <r19=%ymm10,<r10=%ymm1,<r10=%ymm1
597vpxor %ymm10,%ymm1,%ymm1
598
599# qhasm: r7 = r19
600# asm 1: vmovapd <r19=reg256#11,>r7=reg256#11
601# asm 2: vmovapd <r19=%ymm10,>r7=%ymm10
602vmovapd %ymm10,%ymm10
603
604# qhasm: a7 = mem256[ input_1 + 224 ]
605# asm 1: vmovupd   224(<input_1=int64#2),>a7=reg256#14
606# asm 2: vmovupd   224(<input_1=%rsi),>a7=%ymm13
607vmovupd   224(%rsi),%ymm13
608
609# qhasm: r = a7 & b0
610# asm 1: vpand <a7=reg256#14,<b0=reg256#1,>r=reg256#15
611# asm 2: vpand <a7=%ymm13,<b0=%ymm0,>r=%ymm14
612vpand %ymm13,%ymm0,%ymm14
613
614# qhasm: r7 ^= r
615# asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11
616# asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10
617vpxor %ymm14,%ymm10,%ymm10
618
619# qhasm: r = a7 & mem256[input_2 + 32]
620# asm 1: vpand 32(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
621# asm 2: vpand 32(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
622vpand 32(%rdx),%ymm13,%ymm14
623
624# qhasm: r8 ^= r
625# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
626# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
627vpxor %ymm14,%ymm11,%ymm11
628
629# qhasm: r = a7 & mem256[input_2 + 64]
630# asm 1: vpand 64(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
631# asm 2: vpand 64(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
632vpand 64(%rdx),%ymm13,%ymm14
633
634# qhasm: r9 ^= r
635# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
636# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
637vpxor %ymm14,%ymm12,%ymm12
638
639# qhasm: r = a7 & mem256[input_2 + 96]
640# asm 1: vpand 96(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
641# asm 2: vpand 96(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
642vpand 96(%rdx),%ymm13,%ymm14
643
644# qhasm: r10 ^= r
645# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
646# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
647vpxor %ymm14,%ymm1,%ymm1
648
649# qhasm: r = a7 & mem256[input_2 + 128]
650# asm 1: vpand 128(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
651# asm 2: vpand 128(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
652vpand 128(%rdx),%ymm13,%ymm14
653
654# qhasm: r11 ^= r
655# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
656# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
657vpxor %ymm14,%ymm2,%ymm2
658
659# qhasm: r = a7 & mem256[input_2 + 160]
660# asm 1: vpand 160(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
661# asm 2: vpand 160(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
662vpand 160(%rdx),%ymm13,%ymm14
663
664# qhasm: r12 ^= r
665# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
666# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
667vpxor %ymm14,%ymm3,%ymm3
668
669# qhasm: r = a7 & mem256[input_2 + 192]
670# asm 1: vpand 192(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
671# asm 2: vpand 192(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
672vpand 192(%rdx),%ymm13,%ymm14
673
674# qhasm: r13 ^= r
675# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
676# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
677vpxor %ymm14,%ymm4,%ymm4
678
679# qhasm: r = a7 & mem256[input_2 + 224]
680# asm 1: vpand 224(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
681# asm 2: vpand 224(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
682vpand 224(%rdx),%ymm13,%ymm14
683
684# qhasm: r14 ^= r
685# asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6
686# asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5
687vpxor %ymm14,%ymm5,%ymm5
688
689# qhasm: r = a7 & mem256[input_2 + 256]
690# asm 1: vpand 256(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
691# asm 2: vpand 256(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
692vpand 256(%rdx),%ymm13,%ymm14
693
694# qhasm: r15 ^= r
695# asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7
696# asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6
697vpxor %ymm14,%ymm6,%ymm6
698
699# qhasm: r = a7 & mem256[input_2 + 288]
700# asm 1: vpand 288(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
701# asm 2: vpand 288(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
702vpand 288(%rdx),%ymm13,%ymm14
703
704# qhasm: r16 ^= r
705# asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8
706# asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7
707vpxor %ymm14,%ymm7,%ymm7
708
709# qhasm: r = a7 & mem256[input_2 + 320]
710# asm 1: vpand 320(<input_2=int64#3),<a7=reg256#14,>r=reg256#15
711# asm 2: vpand 320(<input_2=%rdx),<a7=%ymm13,>r=%ymm14
712vpand 320(%rdx),%ymm13,%ymm14
713
714# qhasm: r17 ^= r
715# asm 1: vpxor <r=reg256#15,<r17=reg256#9,<r17=reg256#9
716# asm 2: vpxor <r=%ymm14,<r17=%ymm8,<r17=%ymm8
717vpxor %ymm14,%ymm8,%ymm8
718
719# qhasm: r = a7 & mem256[input_2 + 352]
720# asm 1: vpand 352(<input_2=int64#3),<a7=reg256#14,>r=reg256#14
721# asm 2: vpand 352(<input_2=%rdx),<a7=%ymm13,>r=%ymm13
722vpand 352(%rdx),%ymm13,%ymm13
723
724# qhasm: r18 ^= r
725# asm 1: vpxor <r=reg256#14,<r18=reg256#10,<r18=reg256#10
726# asm 2: vpxor <r=%ymm13,<r18=%ymm9,<r18=%ymm9
727vpxor %ymm13,%ymm9,%ymm9
728
729# qhasm: r9 ^= r18
730# asm 1: vpxor <r18=reg256#10,<r9=reg256#13,<r9=reg256#13
731# asm 2: vpxor <r18=%ymm9,<r9=%ymm12,<r9=%ymm12
732vpxor %ymm9,%ymm12,%ymm12
733
734# qhasm: r6 = r18
735# asm 1: vmovapd <r18=reg256#10,>r6=reg256#10
736# asm 2: vmovapd <r18=%ymm9,>r6=%ymm9
737vmovapd %ymm9,%ymm9
738
739# qhasm: a6 = mem256[ input_1 + 192 ]
740# asm 1: vmovupd   192(<input_1=int64#2),>a6=reg256#14
741# asm 2: vmovupd   192(<input_1=%rsi),>a6=%ymm13
742vmovupd   192(%rsi),%ymm13
743
744# qhasm: r = a6 & b0
745# asm 1: vpand <a6=reg256#14,<b0=reg256#1,>r=reg256#15
746# asm 2: vpand <a6=%ymm13,<b0=%ymm0,>r=%ymm14
747vpand %ymm13,%ymm0,%ymm14
748
749# qhasm: r6 ^= r
750# asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10
751# asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9
752vpxor %ymm14,%ymm9,%ymm9
753
754# qhasm: r = a6 & mem256[input_2 + 32]
755# asm 1: vpand 32(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
756# asm 2: vpand 32(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
757vpand 32(%rdx),%ymm13,%ymm14
758
759# qhasm: r7 ^= r
760# asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11
761# asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10
762vpxor %ymm14,%ymm10,%ymm10
763
764# qhasm: r = a6 & mem256[input_2 + 64]
765# asm 1: vpand 64(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
766# asm 2: vpand 64(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
767vpand 64(%rdx),%ymm13,%ymm14
768
769# qhasm: r8 ^= r
770# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
771# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
772vpxor %ymm14,%ymm11,%ymm11
773
774# qhasm: r = a6 & mem256[input_2 + 96]
775# asm 1: vpand 96(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
776# asm 2: vpand 96(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
777vpand 96(%rdx),%ymm13,%ymm14
778
779# qhasm: r9 ^= r
780# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
781# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
782vpxor %ymm14,%ymm12,%ymm12
783
784# qhasm: r = a6 & mem256[input_2 + 128]
785# asm 1: vpand 128(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
786# asm 2: vpand 128(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
787vpand 128(%rdx),%ymm13,%ymm14
788
789# qhasm: r10 ^= r
790# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
791# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
792vpxor %ymm14,%ymm1,%ymm1
793
794# qhasm: r = a6 & mem256[input_2 + 160]
795# asm 1: vpand 160(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
796# asm 2: vpand 160(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
797vpand 160(%rdx),%ymm13,%ymm14
798
799# qhasm: r11 ^= r
800# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
801# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
802vpxor %ymm14,%ymm2,%ymm2
803
804# qhasm: r = a6 & mem256[input_2 + 192]
805# asm 1: vpand 192(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
806# asm 2: vpand 192(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
807vpand 192(%rdx),%ymm13,%ymm14
808
809# qhasm: r12 ^= r
810# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
811# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
812vpxor %ymm14,%ymm3,%ymm3
813
814# qhasm: r = a6 & mem256[input_2 + 224]
815# asm 1: vpand 224(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
816# asm 2: vpand 224(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
817vpand 224(%rdx),%ymm13,%ymm14
818
819# qhasm: r13 ^= r
820# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
821# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
822vpxor %ymm14,%ymm4,%ymm4
823
824# qhasm: r = a6 & mem256[input_2 + 256]
825# asm 1: vpand 256(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
826# asm 2: vpand 256(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
827vpand 256(%rdx),%ymm13,%ymm14
828
829# qhasm: r14 ^= r
830# asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6
831# asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5
832vpxor %ymm14,%ymm5,%ymm5
833
834# qhasm: r = a6 & mem256[input_2 + 288]
835# asm 1: vpand 288(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
836# asm 2: vpand 288(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
837vpand 288(%rdx),%ymm13,%ymm14
838
839# qhasm: r15 ^= r
840# asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7
841# asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6
842vpxor %ymm14,%ymm6,%ymm6
843
844# qhasm: r = a6 & mem256[input_2 + 320]
845# asm 1: vpand 320(<input_2=int64#3),<a6=reg256#14,>r=reg256#15
846# asm 2: vpand 320(<input_2=%rdx),<a6=%ymm13,>r=%ymm14
847vpand 320(%rdx),%ymm13,%ymm14
848
849# qhasm: r16 ^= r
850# asm 1: vpxor <r=reg256#15,<r16=reg256#8,<r16=reg256#8
851# asm 2: vpxor <r=%ymm14,<r16=%ymm7,<r16=%ymm7
852vpxor %ymm14,%ymm7,%ymm7
853
854# qhasm: r = a6 & mem256[input_2 + 352]
855# asm 1: vpand 352(<input_2=int64#3),<a6=reg256#14,>r=reg256#14
856# asm 2: vpand 352(<input_2=%rdx),<a6=%ymm13,>r=%ymm13
857vpand 352(%rdx),%ymm13,%ymm13
858
859# qhasm: r17 ^= r
860# asm 1: vpxor <r=reg256#14,<r17=reg256#9,<r17=reg256#9
861# asm 2: vpxor <r=%ymm13,<r17=%ymm8,<r17=%ymm8
862vpxor %ymm13,%ymm8,%ymm8
863
864# qhasm: r8 ^= r17
865# asm 1: vpxor <r17=reg256#9,<r8=reg256#12,<r8=reg256#12
866# asm 2: vpxor <r17=%ymm8,<r8=%ymm11,<r8=%ymm11
867vpxor %ymm8,%ymm11,%ymm11
868
869# qhasm: r5 = r17
870# asm 1: vmovapd <r17=reg256#9,>r5=reg256#9
871# asm 2: vmovapd <r17=%ymm8,>r5=%ymm8
872vmovapd %ymm8,%ymm8
873
874# qhasm: a5 = mem256[ input_1 + 160 ]
875# asm 1: vmovupd   160(<input_1=int64#2),>a5=reg256#14
876# asm 2: vmovupd   160(<input_1=%rsi),>a5=%ymm13
877vmovupd   160(%rsi),%ymm13
878
879# qhasm: r = a5 & b0
880# asm 1: vpand <a5=reg256#14,<b0=reg256#1,>r=reg256#15
881# asm 2: vpand <a5=%ymm13,<b0=%ymm0,>r=%ymm14
882vpand %ymm13,%ymm0,%ymm14
883
884# qhasm: r5 ^= r
885# asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9
886# asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8
887vpxor %ymm14,%ymm8,%ymm8
888
889# qhasm: r = a5 & mem256[input_2 + 32]
890# asm 1: vpand 32(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
891# asm 2: vpand 32(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
892vpand 32(%rdx),%ymm13,%ymm14
893
894# qhasm: r6 ^= r
895# asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10
896# asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9
897vpxor %ymm14,%ymm9,%ymm9
898
899# qhasm: r = a5 & mem256[input_2 + 64]
900# asm 1: vpand 64(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
901# asm 2: vpand 64(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
902vpand 64(%rdx),%ymm13,%ymm14
903
904# qhasm: r7 ^= r
905# asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11
906# asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10
907vpxor %ymm14,%ymm10,%ymm10
908
909# qhasm: r = a5 & mem256[input_2 + 96]
910# asm 1: vpand 96(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
911# asm 2: vpand 96(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
912vpand 96(%rdx),%ymm13,%ymm14
913
914# qhasm: r8 ^= r
915# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
916# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
917vpxor %ymm14,%ymm11,%ymm11
918
919# qhasm: r = a5 & mem256[input_2 + 128]
920# asm 1: vpand 128(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
921# asm 2: vpand 128(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
922vpand 128(%rdx),%ymm13,%ymm14
923
924# qhasm: r9 ^= r
925# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
926# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
927vpxor %ymm14,%ymm12,%ymm12
928
929# qhasm: r = a5 & mem256[input_2 + 160]
930# asm 1: vpand 160(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
931# asm 2: vpand 160(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
932vpand 160(%rdx),%ymm13,%ymm14
933
934# qhasm: r10 ^= r
935# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
936# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
937vpxor %ymm14,%ymm1,%ymm1
938
939# qhasm: r = a5 & mem256[input_2 + 192]
940# asm 1: vpand 192(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
941# asm 2: vpand 192(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
942vpand 192(%rdx),%ymm13,%ymm14
943
944# qhasm: r11 ^= r
945# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
946# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
947vpxor %ymm14,%ymm2,%ymm2
948
949# qhasm: r = a5 & mem256[input_2 + 224]
950# asm 1: vpand 224(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
951# asm 2: vpand 224(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
952vpand 224(%rdx),%ymm13,%ymm14
953
954# qhasm: r12 ^= r
955# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
956# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
957vpxor %ymm14,%ymm3,%ymm3
958
959# qhasm: r = a5 & mem256[input_2 + 256]
960# asm 1: vpand 256(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
961# asm 2: vpand 256(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
962vpand 256(%rdx),%ymm13,%ymm14
963
964# qhasm: r13 ^= r
965# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
966# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
967vpxor %ymm14,%ymm4,%ymm4
968
969# qhasm: r = a5 & mem256[input_2 + 288]
970# asm 1: vpand 288(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
971# asm 2: vpand 288(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
972vpand 288(%rdx),%ymm13,%ymm14
973
974# qhasm: r14 ^= r
975# asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6
976# asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5
977vpxor %ymm14,%ymm5,%ymm5
978
979# qhasm: r = a5 & mem256[input_2 + 320]
980# asm 1: vpand 320(<input_2=int64#3),<a5=reg256#14,>r=reg256#15
981# asm 2: vpand 320(<input_2=%rdx),<a5=%ymm13,>r=%ymm14
982vpand 320(%rdx),%ymm13,%ymm14
983
984# qhasm: r15 ^= r
985# asm 1: vpxor <r=reg256#15,<r15=reg256#7,<r15=reg256#7
986# asm 2: vpxor <r=%ymm14,<r15=%ymm6,<r15=%ymm6
987vpxor %ymm14,%ymm6,%ymm6
988
989# qhasm: r = a5 & mem256[input_2 + 352]
990# asm 1: vpand 352(<input_2=int64#3),<a5=reg256#14,>r=reg256#14
991# asm 2: vpand 352(<input_2=%rdx),<a5=%ymm13,>r=%ymm13
992vpand 352(%rdx),%ymm13,%ymm13
993
994# qhasm: r16 ^= r
995# asm 1: vpxor <r=reg256#14,<r16=reg256#8,<r16=reg256#8
996# asm 2: vpxor <r=%ymm13,<r16=%ymm7,<r16=%ymm7
997vpxor %ymm13,%ymm7,%ymm7
998
999# qhasm: r7 ^= r16
1000# asm 1: vpxor <r16=reg256#8,<r7=reg256#11,<r7=reg256#11
1001# asm 2: vpxor <r16=%ymm7,<r7=%ymm10,<r7=%ymm10
1002vpxor %ymm7,%ymm10,%ymm10
1003
1004# qhasm: r4 = r16
1005# asm 1: vmovapd <r16=reg256#8,>r4=reg256#8
1006# asm 2: vmovapd <r16=%ymm7,>r4=%ymm7
1007vmovapd %ymm7,%ymm7
1008
1009# qhasm: a4 = mem256[ input_1 + 128 ]
1010# asm 1: vmovupd   128(<input_1=int64#2),>a4=reg256#14
1011# asm 2: vmovupd   128(<input_1=%rsi),>a4=%ymm13
1012vmovupd   128(%rsi),%ymm13
1013
1014# qhasm: r = a4 & b0
1015# asm 1: vpand <a4=reg256#14,<b0=reg256#1,>r=reg256#15
1016# asm 2: vpand <a4=%ymm13,<b0=%ymm0,>r=%ymm14
1017vpand %ymm13,%ymm0,%ymm14
1018
1019# qhasm: r4 ^= r
1020# asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8
1021# asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7
1022vpxor %ymm14,%ymm7,%ymm7
1023
1024# qhasm: r = a4 & mem256[input_2 + 32]
1025# asm 1: vpand 32(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1026# asm 2: vpand 32(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1027vpand 32(%rdx),%ymm13,%ymm14
1028
1029# qhasm: r5 ^= r
1030# asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9
1031# asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8
1032vpxor %ymm14,%ymm8,%ymm8
1033
1034# qhasm: r = a4 & mem256[input_2 + 64]
1035# asm 1: vpand 64(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1036# asm 2: vpand 64(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1037vpand 64(%rdx),%ymm13,%ymm14
1038
1039# qhasm: r6 ^= r
1040# asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10
1041# asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9
1042vpxor %ymm14,%ymm9,%ymm9
1043
1044# qhasm: r = a4 & mem256[input_2 + 96]
1045# asm 1: vpand 96(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1046# asm 2: vpand 96(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1047vpand 96(%rdx),%ymm13,%ymm14
1048
1049# qhasm: r7 ^= r
1050# asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11
1051# asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10
1052vpxor %ymm14,%ymm10,%ymm10
1053
1054# qhasm: r = a4 & mem256[input_2 + 128]
1055# asm 1: vpand 128(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1056# asm 2: vpand 128(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1057vpand 128(%rdx),%ymm13,%ymm14
1058
1059# qhasm: r8 ^= r
1060# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
1061# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
1062vpxor %ymm14,%ymm11,%ymm11
1063
1064# qhasm: r = a4 & mem256[input_2 + 160]
1065# asm 1: vpand 160(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1066# asm 2: vpand 160(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1067vpand 160(%rdx),%ymm13,%ymm14
1068
1069# qhasm: r9 ^= r
1070# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
1071# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
1072vpxor %ymm14,%ymm12,%ymm12
1073
1074# qhasm: r = a4 & mem256[input_2 + 192]
1075# asm 1: vpand 192(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1076# asm 2: vpand 192(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1077vpand 192(%rdx),%ymm13,%ymm14
1078
1079# qhasm: r10 ^= r
1080# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
1081# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
1082vpxor %ymm14,%ymm1,%ymm1
1083
1084# qhasm: r = a4 & mem256[input_2 + 224]
1085# asm 1: vpand 224(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1086# asm 2: vpand 224(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1087vpand 224(%rdx),%ymm13,%ymm14
1088
1089# qhasm: r11 ^= r
1090# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
1091# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
1092vpxor %ymm14,%ymm2,%ymm2
1093
1094# qhasm: r = a4 & mem256[input_2 + 256]
1095# asm 1: vpand 256(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1096# asm 2: vpand 256(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1097vpand 256(%rdx),%ymm13,%ymm14
1098
1099# qhasm: r12 ^= r
1100# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
1101# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
1102vpxor %ymm14,%ymm3,%ymm3
1103
1104# qhasm: r = a4 & mem256[input_2 + 288]
1105# asm 1: vpand 288(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1106# asm 2: vpand 288(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1107vpand 288(%rdx),%ymm13,%ymm14
1108
1109# qhasm: r13 ^= r
1110# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
1111# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
1112vpxor %ymm14,%ymm4,%ymm4
1113
1114# qhasm: r = a4 & mem256[input_2 + 320]
1115# asm 1: vpand 320(<input_2=int64#3),<a4=reg256#14,>r=reg256#15
1116# asm 2: vpand 320(<input_2=%rdx),<a4=%ymm13,>r=%ymm14
1117vpand 320(%rdx),%ymm13,%ymm14
1118
1119# qhasm: r14 ^= r
1120# asm 1: vpxor <r=reg256#15,<r14=reg256#6,<r14=reg256#6
1121# asm 2: vpxor <r=%ymm14,<r14=%ymm5,<r14=%ymm5
1122vpxor %ymm14,%ymm5,%ymm5
1123
1124# qhasm: r = a4 & mem256[input_2 + 352]
1125# asm 1: vpand 352(<input_2=int64#3),<a4=reg256#14,>r=reg256#14
1126# asm 2: vpand 352(<input_2=%rdx),<a4=%ymm13,>r=%ymm13
1127vpand 352(%rdx),%ymm13,%ymm13
1128
1129# qhasm: r15 ^= r
1130# asm 1: vpxor <r=reg256#14,<r15=reg256#7,<r15=reg256#7
1131# asm 2: vpxor <r=%ymm13,<r15=%ymm6,<r15=%ymm6
1132vpxor %ymm13,%ymm6,%ymm6
1133
1134# qhasm: r6 ^= r15
1135# asm 1: vpxor <r15=reg256#7,<r6=reg256#10,<r6=reg256#10
1136# asm 2: vpxor <r15=%ymm6,<r6=%ymm9,<r6=%ymm9
1137vpxor %ymm6,%ymm9,%ymm9
1138
1139# qhasm: r3 = r15
1140# asm 1: vmovapd <r15=reg256#7,>r3=reg256#7
1141# asm 2: vmovapd <r15=%ymm6,>r3=%ymm6
1142vmovapd %ymm6,%ymm6
1143
1144# qhasm: a3 = mem256[ input_1 + 96 ]
1145# asm 1: vmovupd   96(<input_1=int64#2),>a3=reg256#14
1146# asm 2: vmovupd   96(<input_1=%rsi),>a3=%ymm13
1147vmovupd   96(%rsi),%ymm13
1148
1149# qhasm: r = a3 & b0
1150# asm 1: vpand <a3=reg256#14,<b0=reg256#1,>r=reg256#15
1151# asm 2: vpand <a3=%ymm13,<b0=%ymm0,>r=%ymm14
1152vpand %ymm13,%ymm0,%ymm14
1153
1154# qhasm: r3 ^= r
1155# asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7
1156# asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6
1157vpxor %ymm14,%ymm6,%ymm6
1158
1159# qhasm: r = a3 & mem256[input_2 + 32]
1160# asm 1: vpand 32(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1161# asm 2: vpand 32(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1162vpand 32(%rdx),%ymm13,%ymm14
1163
1164# qhasm: r4 ^= r
1165# asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8
1166# asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7
1167vpxor %ymm14,%ymm7,%ymm7
1168
1169# qhasm: r = a3 & mem256[input_2 + 64]
1170# asm 1: vpand 64(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1171# asm 2: vpand 64(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1172vpand 64(%rdx),%ymm13,%ymm14
1173
1174# qhasm: r5 ^= r
1175# asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9
1176# asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8
1177vpxor %ymm14,%ymm8,%ymm8
1178
1179# qhasm: r = a3 & mem256[input_2 + 96]
1180# asm 1: vpand 96(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1181# asm 2: vpand 96(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1182vpand 96(%rdx),%ymm13,%ymm14
1183
1184# qhasm: r6 ^= r
1185# asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10
1186# asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9
1187vpxor %ymm14,%ymm9,%ymm9
1188
1189# qhasm: r = a3 & mem256[input_2 + 128]
1190# asm 1: vpand 128(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1191# asm 2: vpand 128(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1192vpand 128(%rdx),%ymm13,%ymm14
1193
1194# qhasm: r7 ^= r
1195# asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11
1196# asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10
1197vpxor %ymm14,%ymm10,%ymm10
1198
1199# qhasm: r = a3 & mem256[input_2 + 160]
1200# asm 1: vpand 160(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1201# asm 2: vpand 160(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1202vpand 160(%rdx),%ymm13,%ymm14
1203
1204# qhasm: r8 ^= r
1205# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
1206# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
1207vpxor %ymm14,%ymm11,%ymm11
1208
1209# qhasm: r = a3 & mem256[input_2 + 192]
1210# asm 1: vpand 192(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1211# asm 2: vpand 192(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1212vpand 192(%rdx),%ymm13,%ymm14
1213
1214# qhasm: r9 ^= r
1215# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
1216# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
1217vpxor %ymm14,%ymm12,%ymm12
1218
1219# qhasm: r = a3 & mem256[input_2 + 224]
1220# asm 1: vpand 224(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1221# asm 2: vpand 224(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1222vpand 224(%rdx),%ymm13,%ymm14
1223
1224# qhasm: r10 ^= r
1225# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
1226# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
1227vpxor %ymm14,%ymm1,%ymm1
1228
1229# qhasm: r = a3 & mem256[input_2 + 256]
1230# asm 1: vpand 256(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1231# asm 2: vpand 256(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1232vpand 256(%rdx),%ymm13,%ymm14
1233
1234# qhasm: r11 ^= r
1235# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
1236# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
1237vpxor %ymm14,%ymm2,%ymm2
1238
1239# qhasm: r = a3 & mem256[input_2 + 288]
1240# asm 1: vpand 288(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1241# asm 2: vpand 288(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1242vpand 288(%rdx),%ymm13,%ymm14
1243
1244# qhasm: r12 ^= r
1245# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
1246# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
1247vpxor %ymm14,%ymm3,%ymm3
1248
1249# qhasm: r = a3 & mem256[input_2 + 320]
1250# asm 1: vpand 320(<input_2=int64#3),<a3=reg256#14,>r=reg256#15
1251# asm 2: vpand 320(<input_2=%rdx),<a3=%ymm13,>r=%ymm14
1252vpand 320(%rdx),%ymm13,%ymm14
1253
1254# qhasm: r13 ^= r
1255# asm 1: vpxor <r=reg256#15,<r13=reg256#5,<r13=reg256#5
1256# asm 2: vpxor <r=%ymm14,<r13=%ymm4,<r13=%ymm4
1257vpxor %ymm14,%ymm4,%ymm4
1258
1259# qhasm: r = a3 & mem256[input_2 + 352]
1260# asm 1: vpand 352(<input_2=int64#3),<a3=reg256#14,>r=reg256#14
1261# asm 2: vpand 352(<input_2=%rdx),<a3=%ymm13,>r=%ymm13
1262vpand 352(%rdx),%ymm13,%ymm13
1263
1264# qhasm: r14 ^= r
1265# asm 1: vpxor <r=reg256#14,<r14=reg256#6,<r14=reg256#6
1266# asm 2: vpxor <r=%ymm13,<r14=%ymm5,<r14=%ymm5
1267vpxor %ymm13,%ymm5,%ymm5
1268
1269# qhasm: r5 ^= r14
1270# asm 1: vpxor <r14=reg256#6,<r5=reg256#9,<r5=reg256#9
1271# asm 2: vpxor <r14=%ymm5,<r5=%ymm8,<r5=%ymm8
1272vpxor %ymm5,%ymm8,%ymm8
1273
1274# qhasm: r2 = r14
1275# asm 1: vmovapd <r14=reg256#6,>r2=reg256#6
1276# asm 2: vmovapd <r14=%ymm5,>r2=%ymm5
1277vmovapd %ymm5,%ymm5
1278
1279# qhasm: a2 = mem256[ input_1 + 64 ]
1280# asm 1: vmovupd   64(<input_1=int64#2),>a2=reg256#14
1281# asm 2: vmovupd   64(<input_1=%rsi),>a2=%ymm13
1282vmovupd   64(%rsi),%ymm13
1283
1284# qhasm: r = a2 & b0
1285# asm 1: vpand <a2=reg256#14,<b0=reg256#1,>r=reg256#15
1286# asm 2: vpand <a2=%ymm13,<b0=%ymm0,>r=%ymm14
1287vpand %ymm13,%ymm0,%ymm14
1288
1289# qhasm: r2 ^= r
1290# asm 1: vpxor <r=reg256#15,<r2=reg256#6,<r2=reg256#6
1291# asm 2: vpxor <r=%ymm14,<r2=%ymm5,<r2=%ymm5
1292vpxor %ymm14,%ymm5,%ymm5
1293
1294# qhasm: r = a2 & mem256[input_2 + 32]
1295# asm 1: vpand 32(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1296# asm 2: vpand 32(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1297vpand 32(%rdx),%ymm13,%ymm14
1298
1299# qhasm: r3 ^= r
1300# asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7
1301# asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6
1302vpxor %ymm14,%ymm6,%ymm6
1303
1304# qhasm: r = a2 & mem256[input_2 + 64]
1305# asm 1: vpand 64(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1306# asm 2: vpand 64(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1307vpand 64(%rdx),%ymm13,%ymm14
1308
1309# qhasm: r4 ^= r
1310# asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8
1311# asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7
1312vpxor %ymm14,%ymm7,%ymm7
1313
1314# qhasm: r = a2 & mem256[input_2 + 96]
1315# asm 1: vpand 96(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1316# asm 2: vpand 96(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1317vpand 96(%rdx),%ymm13,%ymm14
1318
1319# qhasm: r5 ^= r
1320# asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9
1321# asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8
1322vpxor %ymm14,%ymm8,%ymm8
1323
1324# qhasm: r = a2 & mem256[input_2 + 128]
1325# asm 1: vpand 128(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1326# asm 2: vpand 128(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1327vpand 128(%rdx),%ymm13,%ymm14
1328
1329# qhasm: r6 ^= r
1330# asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10
1331# asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9
1332vpxor %ymm14,%ymm9,%ymm9
1333
1334# qhasm: r = a2 & mem256[input_2 + 160]
1335# asm 1: vpand 160(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1336# asm 2: vpand 160(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1337vpand 160(%rdx),%ymm13,%ymm14
1338
1339# qhasm: r7 ^= r
1340# asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11
1341# asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10
1342vpxor %ymm14,%ymm10,%ymm10
1343
1344# qhasm: r = a2 & mem256[input_2 + 192]
1345# asm 1: vpand 192(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1346# asm 2: vpand 192(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1347vpand 192(%rdx),%ymm13,%ymm14
1348
1349# qhasm: r8 ^= r
1350# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
1351# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
1352vpxor %ymm14,%ymm11,%ymm11
1353
1354# qhasm: r = a2 & mem256[input_2 + 224]
1355# asm 1: vpand 224(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1356# asm 2: vpand 224(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1357vpand 224(%rdx),%ymm13,%ymm14
1358
1359# qhasm: r9 ^= r
1360# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
1361# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
1362vpxor %ymm14,%ymm12,%ymm12
1363
1364# qhasm: r = a2 & mem256[input_2 + 256]
1365# asm 1: vpand 256(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1366# asm 2: vpand 256(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1367vpand 256(%rdx),%ymm13,%ymm14
1368
1369# qhasm: r10 ^= r
1370# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
1371# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
1372vpxor %ymm14,%ymm1,%ymm1
1373
1374# qhasm: r = a2 & mem256[input_2 + 288]
1375# asm 1: vpand 288(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1376# asm 2: vpand 288(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1377vpand 288(%rdx),%ymm13,%ymm14
1378
1379# qhasm: r11 ^= r
1380# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
1381# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
1382vpxor %ymm14,%ymm2,%ymm2
1383
1384# qhasm: r = a2 & mem256[input_2 + 320]
1385# asm 1: vpand 320(<input_2=int64#3),<a2=reg256#14,>r=reg256#15
1386# asm 2: vpand 320(<input_2=%rdx),<a2=%ymm13,>r=%ymm14
1387vpand 320(%rdx),%ymm13,%ymm14
1388
1389# qhasm: r12 ^= r
1390# asm 1: vpxor <r=reg256#15,<r12=reg256#4,<r12=reg256#4
1391# asm 2: vpxor <r=%ymm14,<r12=%ymm3,<r12=%ymm3
1392vpxor %ymm14,%ymm3,%ymm3
1393
1394# qhasm: r = a2 & mem256[input_2 + 352]
1395# asm 1: vpand 352(<input_2=int64#3),<a2=reg256#14,>r=reg256#14
1396# asm 2: vpand 352(<input_2=%rdx),<a2=%ymm13,>r=%ymm13
1397vpand 352(%rdx),%ymm13,%ymm13
1398
1399# qhasm: r13 ^= r
1400# asm 1: vpxor <r=reg256#14,<r13=reg256#5,<r13=reg256#5
1401# asm 2: vpxor <r=%ymm13,<r13=%ymm4,<r13=%ymm4
1402vpxor %ymm13,%ymm4,%ymm4
1403
1404# qhasm: r4 ^= r13
1405# asm 1: vpxor <r13=reg256#5,<r4=reg256#8,<r4=reg256#8
1406# asm 2: vpxor <r13=%ymm4,<r4=%ymm7,<r4=%ymm7
1407vpxor %ymm4,%ymm7,%ymm7
1408
1409# qhasm: r1 = r13
1410# asm 1: vmovapd <r13=reg256#5,>r1=reg256#5
1411# asm 2: vmovapd <r13=%ymm4,>r1=%ymm4
1412vmovapd %ymm4,%ymm4
1413
1414# qhasm: a1 = mem256[ input_1 + 32 ]
1415# asm 1: vmovupd   32(<input_1=int64#2),>a1=reg256#14
1416# asm 2: vmovupd   32(<input_1=%rsi),>a1=%ymm13
1417vmovupd   32(%rsi),%ymm13
1418
1419# qhasm: r = a1 & b0
1420# asm 1: vpand <a1=reg256#14,<b0=reg256#1,>r=reg256#15
1421# asm 2: vpand <a1=%ymm13,<b0=%ymm0,>r=%ymm14
1422vpand %ymm13,%ymm0,%ymm14
1423
1424# qhasm: r1 ^= r
1425# asm 1: vpxor <r=reg256#15,<r1=reg256#5,<r1=reg256#5
1426# asm 2: vpxor <r=%ymm14,<r1=%ymm4,<r1=%ymm4
1427vpxor %ymm14,%ymm4,%ymm4
1428
1429# qhasm: r = a1 & mem256[input_2 + 32]
1430# asm 1: vpand 32(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1431# asm 2: vpand 32(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1432vpand 32(%rdx),%ymm13,%ymm14
1433
1434# qhasm: r2 ^= r
1435# asm 1: vpxor <r=reg256#15,<r2=reg256#6,<r2=reg256#6
1436# asm 2: vpxor <r=%ymm14,<r2=%ymm5,<r2=%ymm5
1437vpxor %ymm14,%ymm5,%ymm5
1438
1439# qhasm: r = a1 & mem256[input_2 + 64]
1440# asm 1: vpand 64(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1441# asm 2: vpand 64(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1442vpand 64(%rdx),%ymm13,%ymm14
1443
1444# qhasm: r3 ^= r
1445# asm 1: vpxor <r=reg256#15,<r3=reg256#7,<r3=reg256#7
1446# asm 2: vpxor <r=%ymm14,<r3=%ymm6,<r3=%ymm6
1447vpxor %ymm14,%ymm6,%ymm6
1448
1449# qhasm: r = a1 & mem256[input_2 + 96]
1450# asm 1: vpand 96(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1451# asm 2: vpand 96(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1452vpand 96(%rdx),%ymm13,%ymm14
1453
1454# qhasm: r4 ^= r
1455# asm 1: vpxor <r=reg256#15,<r4=reg256#8,<r4=reg256#8
1456# asm 2: vpxor <r=%ymm14,<r4=%ymm7,<r4=%ymm7
1457vpxor %ymm14,%ymm7,%ymm7
1458
1459# qhasm: r = a1 & mem256[input_2 + 128]
1460# asm 1: vpand 128(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1461# asm 2: vpand 128(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1462vpand 128(%rdx),%ymm13,%ymm14
1463
1464# qhasm: r5 ^= r
1465# asm 1: vpxor <r=reg256#15,<r5=reg256#9,<r5=reg256#9
1466# asm 2: vpxor <r=%ymm14,<r5=%ymm8,<r5=%ymm8
1467vpxor %ymm14,%ymm8,%ymm8
1468
1469# qhasm: r = a1 & mem256[input_2 + 160]
1470# asm 1: vpand 160(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1471# asm 2: vpand 160(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1472vpand 160(%rdx),%ymm13,%ymm14
1473
1474# qhasm: r6 ^= r
1475# asm 1: vpxor <r=reg256#15,<r6=reg256#10,<r6=reg256#10
1476# asm 2: vpxor <r=%ymm14,<r6=%ymm9,<r6=%ymm9
1477vpxor %ymm14,%ymm9,%ymm9
1478
1479# qhasm: r = a1 & mem256[input_2 + 192]
1480# asm 1: vpand 192(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1481# asm 2: vpand 192(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1482vpand 192(%rdx),%ymm13,%ymm14
1483
1484# qhasm: r7 ^= r
1485# asm 1: vpxor <r=reg256#15,<r7=reg256#11,<r7=reg256#11
1486# asm 2: vpxor <r=%ymm14,<r7=%ymm10,<r7=%ymm10
1487vpxor %ymm14,%ymm10,%ymm10
1488
1489# qhasm: r = a1 & mem256[input_2 + 224]
1490# asm 1: vpand 224(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1491# asm 2: vpand 224(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1492vpand 224(%rdx),%ymm13,%ymm14
1493
1494# qhasm: r8 ^= r
1495# asm 1: vpxor <r=reg256#15,<r8=reg256#12,<r8=reg256#12
1496# asm 2: vpxor <r=%ymm14,<r8=%ymm11,<r8=%ymm11
1497vpxor %ymm14,%ymm11,%ymm11
1498
1499# qhasm: r = a1 & mem256[input_2 + 256]
1500# asm 1: vpand 256(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1501# asm 2: vpand 256(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1502vpand 256(%rdx),%ymm13,%ymm14
1503
1504# qhasm: r9 ^= r
1505# asm 1: vpxor <r=reg256#15,<r9=reg256#13,<r9=reg256#13
1506# asm 2: vpxor <r=%ymm14,<r9=%ymm12,<r9=%ymm12
1507vpxor %ymm14,%ymm12,%ymm12
1508
1509# qhasm: r = a1 & mem256[input_2 + 288]
1510# asm 1: vpand 288(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1511# asm 2: vpand 288(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1512vpand 288(%rdx),%ymm13,%ymm14
1513
1514# qhasm: r10 ^= r
1515# asm 1: vpxor <r=reg256#15,<r10=reg256#2,<r10=reg256#2
1516# asm 2: vpxor <r=%ymm14,<r10=%ymm1,<r10=%ymm1
1517vpxor %ymm14,%ymm1,%ymm1
1518
1519# qhasm: r = a1 & mem256[input_2 + 320]
1520# asm 1: vpand 320(<input_2=int64#3),<a1=reg256#14,>r=reg256#15
1521# asm 2: vpand 320(<input_2=%rdx),<a1=%ymm13,>r=%ymm14
1522vpand 320(%rdx),%ymm13,%ymm14
1523
1524# qhasm: r11 ^= r
1525# asm 1: vpxor <r=reg256#15,<r11=reg256#3,<r11=reg256#3
1526# asm 2: vpxor <r=%ymm14,<r11=%ymm2,<r11=%ymm2
1527vpxor %ymm14,%ymm2,%ymm2
1528
1529# qhasm: r = a1 & mem256[input_2 + 352]
1530# asm 1: vpand 352(<input_2=int64#3),<a1=reg256#14,>r=reg256#14
1531# asm 2: vpand 352(<input_2=%rdx),<a1=%ymm13,>r=%ymm13
1532vpand 352(%rdx),%ymm13,%ymm13
1533
1534# qhasm: r12 ^= r
1535# asm 1: vpxor <r=reg256#14,<r12=reg256#4,<r12=reg256#4
1536# asm 2: vpxor <r=%ymm13,<r12=%ymm3,<r12=%ymm3
1537vpxor %ymm13,%ymm3,%ymm3
1538
1539# qhasm: r3 ^= r12
1540# asm 1: vpxor <r12=reg256#4,<r3=reg256#7,<r3=reg256#7
1541# asm 2: vpxor <r12=%ymm3,<r3=%ymm6,<r3=%ymm6
1542vpxor %ymm3,%ymm6,%ymm6
1543
1544# qhasm: r0 = r12
1545# asm 1: vmovapd <r12=reg256#4,>r0=reg256#4
1546# asm 2: vmovapd <r12=%ymm3,>r0=%ymm3
1547vmovapd %ymm3,%ymm3
1548
1549# qhasm: a0 = mem256[ input_1 + 0 ]
1550# asm 1: vmovupd   0(<input_1=int64#2),>a0=reg256#14
1551# asm 2: vmovupd   0(<input_1=%rsi),>a0=%ymm13
1552vmovupd   0(%rsi),%ymm13
1553
1554# qhasm: r = a0 & b0
1555# asm 1: vpand <a0=reg256#14,<b0=reg256#1,>r=reg256#1
1556# asm 2: vpand <a0=%ymm13,<b0=%ymm0,>r=%ymm0
1557vpand %ymm13,%ymm0,%ymm0
1558
1559# qhasm: r0 ^= r
1560# asm 1: vpxor <r=reg256#1,<r0=reg256#4,<r0=reg256#4
1561# asm 2: vpxor <r=%ymm0,<r0=%ymm3,<r0=%ymm3
1562vpxor %ymm0,%ymm3,%ymm3
1563
1564# qhasm: r = a0 & mem256[input_2 + 32]
1565# asm 1: vpand 32(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1566# asm 2: vpand 32(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1567vpand 32(%rdx),%ymm13,%ymm0
1568
1569# qhasm: r1 ^= r
1570# asm 1: vpxor <r=reg256#1,<r1=reg256#5,<r1=reg256#5
1571# asm 2: vpxor <r=%ymm0,<r1=%ymm4,<r1=%ymm4
1572vpxor %ymm0,%ymm4,%ymm4
1573
1574# qhasm: r = a0 & mem256[input_2 + 64]
1575# asm 1: vpand 64(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1576# asm 2: vpand 64(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1577vpand 64(%rdx),%ymm13,%ymm0
1578
1579# qhasm: r2 ^= r
1580# asm 1: vpxor <r=reg256#1,<r2=reg256#6,<r2=reg256#6
1581# asm 2: vpxor <r=%ymm0,<r2=%ymm5,<r2=%ymm5
1582vpxor %ymm0,%ymm5,%ymm5
1583
1584# qhasm: r = a0 & mem256[input_2 + 96]
1585# asm 1: vpand 96(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1586# asm 2: vpand 96(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1587vpand 96(%rdx),%ymm13,%ymm0
1588
1589# qhasm: r3 ^= r
1590# asm 1: vpxor <r=reg256#1,<r3=reg256#7,<r3=reg256#7
1591# asm 2: vpxor <r=%ymm0,<r3=%ymm6,<r3=%ymm6
1592vpxor %ymm0,%ymm6,%ymm6
1593
1594# qhasm: r = a0 & mem256[input_2 + 128]
1595# asm 1: vpand 128(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1596# asm 2: vpand 128(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1597vpand 128(%rdx),%ymm13,%ymm0
1598
1599# qhasm: r4 ^= r
1600# asm 1: vpxor <r=reg256#1,<r4=reg256#8,<r4=reg256#8
1601# asm 2: vpxor <r=%ymm0,<r4=%ymm7,<r4=%ymm7
1602vpxor %ymm0,%ymm7,%ymm7
1603
1604# qhasm: r = a0 & mem256[input_2 + 160]
1605# asm 1: vpand 160(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1606# asm 2: vpand 160(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1607vpand 160(%rdx),%ymm13,%ymm0
1608
1609# qhasm: r5 ^= r
1610# asm 1: vpxor <r=reg256#1,<r5=reg256#9,<r5=reg256#9
1611# asm 2: vpxor <r=%ymm0,<r5=%ymm8,<r5=%ymm8
1612vpxor %ymm0,%ymm8,%ymm8
1613
1614# qhasm: r = a0 & mem256[input_2 + 192]
1615# asm 1: vpand 192(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1616# asm 2: vpand 192(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1617vpand 192(%rdx),%ymm13,%ymm0
1618
1619# qhasm: r6 ^= r
1620# asm 1: vpxor <r=reg256#1,<r6=reg256#10,<r6=reg256#10
1621# asm 2: vpxor <r=%ymm0,<r6=%ymm9,<r6=%ymm9
1622vpxor %ymm0,%ymm9,%ymm9
1623
1624# qhasm: r = a0 & mem256[input_2 + 224]
1625# asm 1: vpand 224(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1626# asm 2: vpand 224(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1627vpand 224(%rdx),%ymm13,%ymm0
1628
1629# qhasm: r7 ^= r
1630# asm 1: vpxor <r=reg256#1,<r7=reg256#11,<r7=reg256#11
1631# asm 2: vpxor <r=%ymm0,<r7=%ymm10,<r7=%ymm10
1632vpxor %ymm0,%ymm10,%ymm10
1633
1634# qhasm: r = a0 & mem256[input_2 + 256]
1635# asm 1: vpand 256(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1636# asm 2: vpand 256(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1637vpand 256(%rdx),%ymm13,%ymm0
1638
1639# qhasm: r8 ^= r
1640# asm 1: vpxor <r=reg256#1,<r8=reg256#12,<r8=reg256#12
1641# asm 2: vpxor <r=%ymm0,<r8=%ymm11,<r8=%ymm11
1642vpxor %ymm0,%ymm11,%ymm11
1643
1644# qhasm: r = a0 & mem256[input_2 + 288]
1645# asm 1: vpand 288(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1646# asm 2: vpand 288(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1647vpand 288(%rdx),%ymm13,%ymm0
1648
1649# qhasm: r9 ^= r
1650# asm 1: vpxor <r=reg256#1,<r9=reg256#13,<r9=reg256#13
1651# asm 2: vpxor <r=%ymm0,<r9=%ymm12,<r9=%ymm12
1652vpxor %ymm0,%ymm12,%ymm12
1653
1654# qhasm: r = a0 & mem256[input_2 + 320]
1655# asm 1: vpand 320(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1656# asm 2: vpand 320(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1657vpand 320(%rdx),%ymm13,%ymm0
1658
1659# qhasm: r10 ^= r
1660# asm 1: vpxor <r=reg256#1,<r10=reg256#2,<r10=reg256#2
1661# asm 2: vpxor <r=%ymm0,<r10=%ymm1,<r10=%ymm1
1662vpxor %ymm0,%ymm1,%ymm1
1663
1664# qhasm: r = a0 & mem256[input_2 + 352]
1665# asm 1: vpand 352(<input_2=int64#3),<a0=reg256#14,>r=reg256#1
1666# asm 2: vpand 352(<input_2=%rdx),<a0=%ymm13,>r=%ymm0
1667vpand 352(%rdx),%ymm13,%ymm0
1668
1669# qhasm: r11 ^= r
1670# asm 1: vpxor <r=reg256#1,<r11=reg256#3,<r11=reg256#3
1671# asm 2: vpxor <r=%ymm0,<r11=%ymm2,<r11=%ymm2
1672vpxor %ymm0,%ymm2,%ymm2
1673
1674# qhasm: mem256[ input_0 + 352 ] = r11
1675# asm 1: vmovupd   <r11=reg256#3,352(<input_0=int64#1)
1676# asm 2: vmovupd   <r11=%ymm2,352(<input_0=%rdi)
1677vmovupd   %ymm2,352(%rdi)
1678
1679# qhasm: mem256[ input_0 + 320 ] = r10
1680# asm 1: vmovupd   <r10=reg256#2,320(<input_0=int64#1)
1681# asm 2: vmovupd   <r10=%ymm1,320(<input_0=%rdi)
1682vmovupd   %ymm1,320(%rdi)
1683
1684# qhasm: mem256[ input_0 + 288 ] = r9
1685# asm 1: vmovupd   <r9=reg256#13,288(<input_0=int64#1)
1686# asm 2: vmovupd   <r9=%ymm12,288(<input_0=%rdi)
1687vmovupd   %ymm12,288(%rdi)
1688
1689# qhasm: mem256[ input_0 + 256 ] = r8
1690# asm 1: vmovupd   <r8=reg256#12,256(<input_0=int64#1)
1691# asm 2: vmovupd   <r8=%ymm11,256(<input_0=%rdi)
1692vmovupd   %ymm11,256(%rdi)
1693
1694# qhasm: mem256[ input_0 + 224 ] = r7
1695# asm 1: vmovupd   <r7=reg256#11,224(<input_0=int64#1)
1696# asm 2: vmovupd   <r7=%ymm10,224(<input_0=%rdi)
1697vmovupd   %ymm10,224(%rdi)
1698
1699# qhasm: mem256[ input_0 + 192 ] = r6
1700# asm 1: vmovupd   <r6=reg256#10,192(<input_0=int64#1)
1701# asm 2: vmovupd   <r6=%ymm9,192(<input_0=%rdi)
1702vmovupd   %ymm9,192(%rdi)
1703
1704# qhasm: mem256[ input_0 + 160 ] = r5
1705# asm 1: vmovupd   <r5=reg256#9,160(<input_0=int64#1)
1706# asm 2: vmovupd   <r5=%ymm8,160(<input_0=%rdi)
1707vmovupd   %ymm8,160(%rdi)
1708
1709# qhasm: mem256[ input_0 + 128 ] = r4
1710# asm 1: vmovupd   <r4=reg256#8,128(<input_0=int64#1)
1711# asm 2: vmovupd   <r4=%ymm7,128(<input_0=%rdi)
1712vmovupd   %ymm7,128(%rdi)
1713
1714# qhasm: mem256[ input_0 + 96 ] = r3
1715# asm 1: vmovupd   <r3=reg256#7,96(<input_0=int64#1)
1716# asm 2: vmovupd   <r3=%ymm6,96(<input_0=%rdi)
1717vmovupd   %ymm6,96(%rdi)
1718
1719# qhasm: mem256[ input_0 + 64 ] = r2
1720# asm 1: vmovupd   <r2=reg256#6,64(<input_0=int64#1)
1721# asm 2: vmovupd   <r2=%ymm5,64(<input_0=%rdi)
1722vmovupd   %ymm5,64(%rdi)
1723
1724# qhasm: mem256[ input_0 + 32 ] = r1
1725# asm 1: vmovupd   <r1=reg256#5,32(<input_0=int64#1)
1726# asm 2: vmovupd   <r1=%ymm4,32(<input_0=%rdi)
1727vmovupd   %ymm4,32(%rdi)
1728
1729# qhasm: mem256[ input_0 + 0 ] = r0
1730# asm 1: vmovupd   <r0=reg256#4,0(<input_0=int64#1)
1731# asm 2: vmovupd   <r0=%ymm3,0(<input_0=%rdi)
1732vmovupd   %ymm3,0(%rdi)
1733
1734# qhasm: return
1735add %r11,%rsp
1736ret
1737