1
2# qhasm: int64 input_0
3
4# qhasm: int64 input_1
5
6# qhasm: int64 input_2
7
8# qhasm: int64 input_3
9
10# qhasm: int64 input_4
11
12# qhasm: int64 input_5
13
14# qhasm: stack64 input_6
15
16# qhasm: stack64 input_7
17
18# qhasm: int64 caller_r11
19
20# qhasm: int64 caller_r12
21
22# qhasm: int64 caller_r13
23
24# qhasm: int64 caller_r14
25
26# qhasm: int64 caller_r15
27
28# qhasm: int64 caller_rbx
29
30# qhasm: int64 caller_rbp
31
32# qhasm: reg256 x0
33
34# qhasm: reg256 x1
35
36# qhasm: reg256 x2
37
38# qhasm: reg256 x3
39
40# qhasm: reg256 x4
41
42# qhasm: reg256 x5
43
44# qhasm: reg256 x6
45
46# qhasm: reg256 x7
47
48# qhasm: reg256 t0
49
50# qhasm: reg256 t1
51
52# qhasm: reg256 v00
53
54# qhasm: reg256 v01
55
56# qhasm: reg256 v10
57
58# qhasm: reg256 v11
59
60# qhasm: reg256 mask0
61
62# qhasm: reg256 mask1
63
64# qhasm: reg256 mask2
65
66# qhasm: reg256 mask3
67
68# qhasm: reg256 mask4
69
70# qhasm: reg256 mask5
71
72# qhasm: enter transpose_64x256_sp_asm
73.p2align 5
74.global _PQCLEAN_MCELIECE348864_AVX_transpose_64x256_sp_asm
75.global PQCLEAN_MCELIECE348864_AVX_transpose_64x256_sp_asm
76_PQCLEAN_MCELIECE348864_AVX_transpose_64x256_sp_asm:
77PQCLEAN_MCELIECE348864_AVX_transpose_64x256_sp_asm:
78mov %rsp,%r11
79and $31,%r11
80add $0,%r11
81sub %r11,%rsp
82
83# qhasm: mask0 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK5_0 ]
84# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK5_0,>mask0=reg256#1
85# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK5_0,>mask0=%ymm0
86vmovapd PQCLEAN_MCELIECE348864_AVX_MASK5_0(%rip),%ymm0
87
88# qhasm: mask1 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK5_1 ]
89# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK5_1,>mask1=reg256#2
90# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK5_1,>mask1=%ymm1
91vmovapd PQCLEAN_MCELIECE348864_AVX_MASK5_1(%rip),%ymm1
92
93# qhasm: mask2 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK4_0 ]
94# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK4_0,>mask2=reg256#3
95# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK4_0,>mask2=%ymm2
96vmovapd PQCLEAN_MCELIECE348864_AVX_MASK4_0(%rip),%ymm2
97
98# qhasm: mask3 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK4_1 ]
99# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK4_1,>mask3=reg256#4
100# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK4_1,>mask3=%ymm3
101vmovapd PQCLEAN_MCELIECE348864_AVX_MASK4_1(%rip),%ymm3
102
103# qhasm: mask4 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK3_0 ]
104# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK3_0,>mask4=reg256#5
105# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK3_0,>mask4=%ymm4
106vmovapd PQCLEAN_MCELIECE348864_AVX_MASK3_0(%rip),%ymm4
107
108# qhasm: mask5 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK3_1 ]
109# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK3_1,>mask5=reg256#6
110# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK3_1,>mask5=%ymm5
111vmovapd PQCLEAN_MCELIECE348864_AVX_MASK3_1(%rip),%ymm5
112
113# qhasm: x0 = mem256[ input_0 + 0 ]
114# asm 1: vmovupd   0(<input_0=int64#1),>x0=reg256#7
115# asm 2: vmovupd   0(<input_0=%rdi),>x0=%ymm6
116vmovupd   0(%rdi),%ymm6
117
118# qhasm: x1 = mem256[ input_0 + 256 ]
119# asm 1: vmovupd   256(<input_0=int64#1),>x1=reg256#8
120# asm 2: vmovupd   256(<input_0=%rdi),>x1=%ymm7
121vmovupd   256(%rdi),%ymm7
122
123# qhasm: x2 = mem256[ input_0 + 512 ]
124# asm 1: vmovupd   512(<input_0=int64#1),>x2=reg256#9
125# asm 2: vmovupd   512(<input_0=%rdi),>x2=%ymm8
126vmovupd   512(%rdi),%ymm8
127
128# qhasm: x3 = mem256[ input_0 + 768 ]
129# asm 1: vmovupd   768(<input_0=int64#1),>x3=reg256#10
130# asm 2: vmovupd   768(<input_0=%rdi),>x3=%ymm9
131vmovupd   768(%rdi),%ymm9
132
133# qhasm: x4 = mem256[ input_0 + 1024 ]
134# asm 1: vmovupd   1024(<input_0=int64#1),>x4=reg256#11
135# asm 2: vmovupd   1024(<input_0=%rdi),>x4=%ymm10
136vmovupd   1024(%rdi),%ymm10
137
138# qhasm: x5 = mem256[ input_0 + 1280 ]
139# asm 1: vmovupd   1280(<input_0=int64#1),>x5=reg256#12
140# asm 2: vmovupd   1280(<input_0=%rdi),>x5=%ymm11
141vmovupd   1280(%rdi),%ymm11
142
143# qhasm: x6 = mem256[ input_0 + 1536 ]
144# asm 1: vmovupd   1536(<input_0=int64#1),>x6=reg256#13
145# asm 2: vmovupd   1536(<input_0=%rdi),>x6=%ymm12
146vmovupd   1536(%rdi),%ymm12
147
148# qhasm: x7 = mem256[ input_0 + 1792 ]
149# asm 1: vmovupd   1792(<input_0=int64#1),>x7=reg256#14
150# asm 2: vmovupd   1792(<input_0=%rdi),>x7=%ymm13
151vmovupd   1792(%rdi),%ymm13
152
153# qhasm: v00 = x0 & mask0
154# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
155# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
156vpand %ymm6,%ymm0,%ymm14
157
158# qhasm: 4x v10 = x4 << 32
159# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
160# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
161vpsllq $32,%ymm10,%ymm15
162
163# qhasm: 4x v01 = x0 unsigned>> 32
164# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
165# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
166vpsrlq $32,%ymm6,%ymm6
167
168# qhasm: v11 = x4 & mask1
169# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
170# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
171vpand %ymm10,%ymm1,%ymm10
172
173# qhasm: x0 = v00 | v10
174# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
175# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
176vpor  %ymm14,%ymm15,%ymm14
177
178# qhasm: x4 = v01 | v11
179# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
180# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
181vpor  %ymm6,%ymm10,%ymm6
182
183# qhasm: v00 = x1 & mask0
184# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
185# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
186vpand %ymm7,%ymm0,%ymm10
187
188# qhasm: 4x v10 = x5 << 32
189# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
190# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
191vpsllq $32,%ymm11,%ymm15
192
193# qhasm: 4x v01 = x1 unsigned>> 32
194# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
195# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
196vpsrlq $32,%ymm7,%ymm7
197
198# qhasm: v11 = x5 & mask1
199# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
200# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
201vpand %ymm11,%ymm1,%ymm11
202
203# qhasm: x1 = v00 | v10
204# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
205# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
206vpor  %ymm10,%ymm15,%ymm10
207
208# qhasm: x5 = v01 | v11
209# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
210# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
211vpor  %ymm7,%ymm11,%ymm7
212
213# qhasm: v00 = x2 & mask0
214# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
215# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
216vpand %ymm8,%ymm0,%ymm11
217
218# qhasm: 4x v10 = x6 << 32
219# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
220# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
221vpsllq $32,%ymm12,%ymm15
222
223# qhasm: 4x v01 = x2 unsigned>> 32
224# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
225# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
226vpsrlq $32,%ymm8,%ymm8
227
228# qhasm: v11 = x6 & mask1
229# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
230# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
231vpand %ymm12,%ymm1,%ymm12
232
233# qhasm: x2 = v00 | v10
234# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
235# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
236vpor  %ymm11,%ymm15,%ymm11
237
238# qhasm: x6 = v01 | v11
239# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
240# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
241vpor  %ymm8,%ymm12,%ymm8
242
243# qhasm: v00 = x3 & mask0
244# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
245# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
246vpand %ymm9,%ymm0,%ymm12
247
248# qhasm: 4x v10 = x7 << 32
249# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
250# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
251vpsllq $32,%ymm13,%ymm15
252
253# qhasm: 4x v01 = x3 unsigned>> 32
254# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
255# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
256vpsrlq $32,%ymm9,%ymm9
257
258# qhasm: v11 = x7 & mask1
259# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
260# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
261vpand %ymm13,%ymm1,%ymm13
262
263# qhasm: x3 = v00 | v10
264# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
265# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
266vpor  %ymm12,%ymm15,%ymm12
267
268# qhasm: x7 = v01 | v11
269# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
270# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
271vpor  %ymm9,%ymm13,%ymm9
272
273# qhasm: v00 = x0 & mask2
274# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
275# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
276vpand %ymm14,%ymm2,%ymm13
277
278# qhasm: 8x v10 = x2 << 16
279# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
280# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
281vpslld $16,%ymm11,%ymm15
282
283# qhasm: 8x v01 = x0 unsigned>> 16
284# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
285# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
286vpsrld $16,%ymm14,%ymm14
287
288# qhasm: v11 = x2 & mask3
289# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
290# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
291vpand %ymm11,%ymm3,%ymm11
292
293# qhasm: x0 = v00 | v10
294# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
295# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
296vpor  %ymm13,%ymm15,%ymm13
297
298# qhasm: x2 = v01 | v11
299# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
300# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
301vpor  %ymm14,%ymm11,%ymm11
302
303# qhasm: v00 = x1 & mask2
304# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
305# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
306vpand %ymm10,%ymm2,%ymm14
307
308# qhasm: 8x v10 = x3 << 16
309# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
310# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
311vpslld $16,%ymm12,%ymm15
312
313# qhasm: 8x v01 = x1 unsigned>> 16
314# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
315# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
316vpsrld $16,%ymm10,%ymm10
317
318# qhasm: v11 = x3 & mask3
319# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
320# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
321vpand %ymm12,%ymm3,%ymm12
322
323# qhasm: x1 = v00 | v10
324# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
325# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
326vpor  %ymm14,%ymm15,%ymm14
327
328# qhasm: x3 = v01 | v11
329# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
330# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
331vpor  %ymm10,%ymm12,%ymm10
332
333# qhasm: v00 = x4 & mask2
334# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
335# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
336vpand %ymm6,%ymm2,%ymm12
337
338# qhasm: 8x v10 = x6 << 16
339# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
340# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
341vpslld $16,%ymm8,%ymm15
342
343# qhasm: 8x v01 = x4 unsigned>> 16
344# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
345# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
346vpsrld $16,%ymm6,%ymm6
347
348# qhasm: v11 = x6 & mask3
349# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
350# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
351vpand %ymm8,%ymm3,%ymm8
352
353# qhasm: x4 = v00 | v10
354# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
355# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
356vpor  %ymm12,%ymm15,%ymm12
357
358# qhasm: x6 = v01 | v11
359# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
360# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
361vpor  %ymm6,%ymm8,%ymm6
362
363# qhasm: v00 = x5 & mask2
364# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
365# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
366vpand %ymm7,%ymm2,%ymm8
367
368# qhasm: 8x v10 = x7 << 16
369# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
370# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
371vpslld $16,%ymm9,%ymm15
372
373# qhasm: 8x v01 = x5 unsigned>> 16
374# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
375# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
376vpsrld $16,%ymm7,%ymm7
377
378# qhasm: v11 = x7 & mask3
379# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
380# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
381vpand %ymm9,%ymm3,%ymm9
382
383# qhasm: x5 = v00 | v10
384# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
385# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
386vpor  %ymm8,%ymm15,%ymm8
387
388# qhasm: x7 = v01 | v11
389# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
390# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
391vpor  %ymm7,%ymm9,%ymm7
392
393# qhasm: v00 = x0 & mask4
394# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
395# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
396vpand %ymm13,%ymm4,%ymm9
397
398# qhasm: 16x v10 = x1 << 8
399# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
400# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
401vpsllw $8,%ymm14,%ymm15
402
403# qhasm: 16x v01 = x0 unsigned>> 8
404# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
405# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
406vpsrlw $8,%ymm13,%ymm13
407
408# qhasm: v11 = x1 & mask5
409# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
410# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
411vpand %ymm14,%ymm5,%ymm14
412
413# qhasm: x0 = v00 | v10
414# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
415# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
416vpor  %ymm9,%ymm15,%ymm9
417
418# qhasm: x1 = v01 | v11
419# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
420# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
421vpor  %ymm13,%ymm14,%ymm13
422
423# qhasm: v00 = x2 & mask4
424# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
425# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
426vpand %ymm11,%ymm4,%ymm14
427
428# qhasm: 16x v10 = x3 << 8
429# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
430# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
431vpsllw $8,%ymm10,%ymm15
432
433# qhasm: 16x v01 = x2 unsigned>> 8
434# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
435# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
436vpsrlw $8,%ymm11,%ymm11
437
438# qhasm: v11 = x3 & mask5
439# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
440# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
441vpand %ymm10,%ymm5,%ymm10
442
443# qhasm: x2 = v00 | v10
444# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
445# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
446vpor  %ymm14,%ymm15,%ymm14
447
448# qhasm: x3 = v01 | v11
449# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
450# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
451vpor  %ymm11,%ymm10,%ymm10
452
453# qhasm: v00 = x4 & mask4
454# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
455# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
456vpand %ymm12,%ymm4,%ymm11
457
458# qhasm: 16x v10 = x5 << 8
459# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
460# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
461vpsllw $8,%ymm8,%ymm15
462
463# qhasm: 16x v01 = x4 unsigned>> 8
464# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
465# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
466vpsrlw $8,%ymm12,%ymm12
467
468# qhasm: v11 = x5 & mask5
469# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
470# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
471vpand %ymm8,%ymm5,%ymm8
472
473# qhasm: x4 = v00 | v10
474# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
475# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
476vpor  %ymm11,%ymm15,%ymm11
477
478# qhasm: x5 = v01 | v11
479# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
480# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
481vpor  %ymm12,%ymm8,%ymm8
482
483# qhasm: v00 = x6 & mask4
484# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
485# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
486vpand %ymm6,%ymm4,%ymm12
487
488# qhasm: 16x v10 = x7 << 8
489# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
490# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
491vpsllw $8,%ymm7,%ymm15
492
493# qhasm: 16x v01 = x6 unsigned>> 8
494# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
495# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
496vpsrlw $8,%ymm6,%ymm6
497
498# qhasm: v11 = x7 & mask5
499# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
500# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
501vpand %ymm7,%ymm5,%ymm7
502
503# qhasm: x6 = v00 | v10
504# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
505# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
506vpor  %ymm12,%ymm15,%ymm12
507
508# qhasm: x7 = v01 | v11
509# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
510# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
511vpor  %ymm6,%ymm7,%ymm6
512
513# qhasm: mem256[ input_0 + 0 ] = x0
514# asm 1: vmovupd   <x0=reg256#10,0(<input_0=int64#1)
515# asm 2: vmovupd   <x0=%ymm9,0(<input_0=%rdi)
516vmovupd   %ymm9,0(%rdi)
517
518# qhasm: mem256[ input_0 + 256 ] = x1
519# asm 1: vmovupd   <x1=reg256#14,256(<input_0=int64#1)
520# asm 2: vmovupd   <x1=%ymm13,256(<input_0=%rdi)
521vmovupd   %ymm13,256(%rdi)
522
523# qhasm: mem256[ input_0 + 512 ] = x2
524# asm 1: vmovupd   <x2=reg256#15,512(<input_0=int64#1)
525# asm 2: vmovupd   <x2=%ymm14,512(<input_0=%rdi)
526vmovupd   %ymm14,512(%rdi)
527
528# qhasm: mem256[ input_0 + 768 ] = x3
529# asm 1: vmovupd   <x3=reg256#11,768(<input_0=int64#1)
530# asm 2: vmovupd   <x3=%ymm10,768(<input_0=%rdi)
531vmovupd   %ymm10,768(%rdi)
532
533# qhasm: mem256[ input_0 + 1024 ] = x4
534# asm 1: vmovupd   <x4=reg256#12,1024(<input_0=int64#1)
535# asm 2: vmovupd   <x4=%ymm11,1024(<input_0=%rdi)
536vmovupd   %ymm11,1024(%rdi)
537
538# qhasm: mem256[ input_0 + 1280 ] = x5
539# asm 1: vmovupd   <x5=reg256#9,1280(<input_0=int64#1)
540# asm 2: vmovupd   <x5=%ymm8,1280(<input_0=%rdi)
541vmovupd   %ymm8,1280(%rdi)
542
543# qhasm: mem256[ input_0 + 1536 ] = x6
544# asm 1: vmovupd   <x6=reg256#13,1536(<input_0=int64#1)
545# asm 2: vmovupd   <x6=%ymm12,1536(<input_0=%rdi)
546vmovupd   %ymm12,1536(%rdi)
547
548# qhasm: mem256[ input_0 + 1792 ] = x7
549# asm 1: vmovupd   <x7=reg256#7,1792(<input_0=int64#1)
550# asm 2: vmovupd   <x7=%ymm6,1792(<input_0=%rdi)
551vmovupd   %ymm6,1792(%rdi)
552
553# qhasm: x0 = mem256[ input_0 + 32 ]
554# asm 1: vmovupd   32(<input_0=int64#1),>x0=reg256#7
555# asm 2: vmovupd   32(<input_0=%rdi),>x0=%ymm6
556vmovupd   32(%rdi),%ymm6
557
558# qhasm: x1 = mem256[ input_0 + 288 ]
559# asm 1: vmovupd   288(<input_0=int64#1),>x1=reg256#8
560# asm 2: vmovupd   288(<input_0=%rdi),>x1=%ymm7
561vmovupd   288(%rdi),%ymm7
562
563# qhasm: x2 = mem256[ input_0 + 544 ]
564# asm 1: vmovupd   544(<input_0=int64#1),>x2=reg256#9
565# asm 2: vmovupd   544(<input_0=%rdi),>x2=%ymm8
566vmovupd   544(%rdi),%ymm8
567
568# qhasm: x3 = mem256[ input_0 + 800 ]
569# asm 1: vmovupd   800(<input_0=int64#1),>x3=reg256#10
570# asm 2: vmovupd   800(<input_0=%rdi),>x3=%ymm9
571vmovupd   800(%rdi),%ymm9
572
573# qhasm: x4 = mem256[ input_0 + 1056 ]
574# asm 1: vmovupd   1056(<input_0=int64#1),>x4=reg256#11
575# asm 2: vmovupd   1056(<input_0=%rdi),>x4=%ymm10
576vmovupd   1056(%rdi),%ymm10
577
578# qhasm: x5 = mem256[ input_0 + 1312 ]
579# asm 1: vmovupd   1312(<input_0=int64#1),>x5=reg256#12
580# asm 2: vmovupd   1312(<input_0=%rdi),>x5=%ymm11
581vmovupd   1312(%rdi),%ymm11
582
583# qhasm: x6 = mem256[ input_0 + 1568 ]
584# asm 1: vmovupd   1568(<input_0=int64#1),>x6=reg256#13
585# asm 2: vmovupd   1568(<input_0=%rdi),>x6=%ymm12
586vmovupd   1568(%rdi),%ymm12
587
588# qhasm: x7 = mem256[ input_0 + 1824 ]
589# asm 1: vmovupd   1824(<input_0=int64#1),>x7=reg256#14
590# asm 2: vmovupd   1824(<input_0=%rdi),>x7=%ymm13
591vmovupd   1824(%rdi),%ymm13
592
593# qhasm: v00 = x0 & mask0
594# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
595# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
596vpand %ymm6,%ymm0,%ymm14
597
598# qhasm: 4x v10 = x4 << 32
599# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
600# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
601vpsllq $32,%ymm10,%ymm15
602
603# qhasm: 4x v01 = x0 unsigned>> 32
604# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
605# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
606vpsrlq $32,%ymm6,%ymm6
607
608# qhasm: v11 = x4 & mask1
609# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
610# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
611vpand %ymm10,%ymm1,%ymm10
612
613# qhasm: x0 = v00 | v10
614# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
615# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
616vpor  %ymm14,%ymm15,%ymm14
617
618# qhasm: x4 = v01 | v11
619# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
620# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
621vpor  %ymm6,%ymm10,%ymm6
622
623# qhasm: v00 = x1 & mask0
624# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
625# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
626vpand %ymm7,%ymm0,%ymm10
627
628# qhasm: 4x v10 = x5 << 32
629# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
630# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
631vpsllq $32,%ymm11,%ymm15
632
633# qhasm: 4x v01 = x1 unsigned>> 32
634# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
635# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
636vpsrlq $32,%ymm7,%ymm7
637
638# qhasm: v11 = x5 & mask1
639# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
640# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
641vpand %ymm11,%ymm1,%ymm11
642
643# qhasm: x1 = v00 | v10
644# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
645# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
646vpor  %ymm10,%ymm15,%ymm10
647
648# qhasm: x5 = v01 | v11
649# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
650# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
651vpor  %ymm7,%ymm11,%ymm7
652
653# qhasm: v00 = x2 & mask0
654# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
655# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
656vpand %ymm8,%ymm0,%ymm11
657
658# qhasm: 4x v10 = x6 << 32
659# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
660# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
661vpsllq $32,%ymm12,%ymm15
662
663# qhasm: 4x v01 = x2 unsigned>> 32
664# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
665# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
666vpsrlq $32,%ymm8,%ymm8
667
668# qhasm: v11 = x6 & mask1
669# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
670# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
671vpand %ymm12,%ymm1,%ymm12
672
673# qhasm: x2 = v00 | v10
674# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
675# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
676vpor  %ymm11,%ymm15,%ymm11
677
678# qhasm: x6 = v01 | v11
679# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
680# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
681vpor  %ymm8,%ymm12,%ymm8
682
683# qhasm: v00 = x3 & mask0
684# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
685# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
686vpand %ymm9,%ymm0,%ymm12
687
688# qhasm: 4x v10 = x7 << 32
689# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
690# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
691vpsllq $32,%ymm13,%ymm15
692
693# qhasm: 4x v01 = x3 unsigned>> 32
694# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
695# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
696vpsrlq $32,%ymm9,%ymm9
697
698# qhasm: v11 = x7 & mask1
699# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
700# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
701vpand %ymm13,%ymm1,%ymm13
702
703# qhasm: x3 = v00 | v10
704# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
705# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
706vpor  %ymm12,%ymm15,%ymm12
707
708# qhasm: x7 = v01 | v11
709# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
710# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
711vpor  %ymm9,%ymm13,%ymm9
712
713# qhasm: v00 = x0 & mask2
714# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
715# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
716vpand %ymm14,%ymm2,%ymm13
717
718# qhasm: 8x v10 = x2 << 16
719# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
720# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
721vpslld $16,%ymm11,%ymm15
722
723# qhasm: 8x v01 = x0 unsigned>> 16
724# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
725# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
726vpsrld $16,%ymm14,%ymm14
727
728# qhasm: v11 = x2 & mask3
729# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
730# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
731vpand %ymm11,%ymm3,%ymm11
732
733# qhasm: x0 = v00 | v10
734# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
735# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
736vpor  %ymm13,%ymm15,%ymm13
737
738# qhasm: x2 = v01 | v11
739# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
740# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
741vpor  %ymm14,%ymm11,%ymm11
742
743# qhasm: v00 = x1 & mask2
744# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
745# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
746vpand %ymm10,%ymm2,%ymm14
747
748# qhasm: 8x v10 = x3 << 16
749# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
750# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
751vpslld $16,%ymm12,%ymm15
752
753# qhasm: 8x v01 = x1 unsigned>> 16
754# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
755# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
756vpsrld $16,%ymm10,%ymm10
757
758# qhasm: v11 = x3 & mask3
759# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
760# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
761vpand %ymm12,%ymm3,%ymm12
762
763# qhasm: x1 = v00 | v10
764# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
765# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
766vpor  %ymm14,%ymm15,%ymm14
767
768# qhasm: x3 = v01 | v11
769# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
770# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
771vpor  %ymm10,%ymm12,%ymm10
772
773# qhasm: v00 = x4 & mask2
774# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
775# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
776vpand %ymm6,%ymm2,%ymm12
777
778# qhasm: 8x v10 = x6 << 16
779# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
780# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
781vpslld $16,%ymm8,%ymm15
782
783# qhasm: 8x v01 = x4 unsigned>> 16
784# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
785# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
786vpsrld $16,%ymm6,%ymm6
787
788# qhasm: v11 = x6 & mask3
789# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
790# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
791vpand %ymm8,%ymm3,%ymm8
792
793# qhasm: x4 = v00 | v10
794# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
795# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
796vpor  %ymm12,%ymm15,%ymm12
797
798# qhasm: x6 = v01 | v11
799# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
800# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
801vpor  %ymm6,%ymm8,%ymm6
802
803# qhasm: v00 = x5 & mask2
804# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
805# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
806vpand %ymm7,%ymm2,%ymm8
807
808# qhasm: 8x v10 = x7 << 16
809# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
810# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
811vpslld $16,%ymm9,%ymm15
812
813# qhasm: 8x v01 = x5 unsigned>> 16
814# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
815# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
816vpsrld $16,%ymm7,%ymm7
817
818# qhasm: v11 = x7 & mask3
819# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
820# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
821vpand %ymm9,%ymm3,%ymm9
822
823# qhasm: x5 = v00 | v10
824# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
825# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
826vpor  %ymm8,%ymm15,%ymm8
827
828# qhasm: x7 = v01 | v11
829# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
830# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
831vpor  %ymm7,%ymm9,%ymm7
832
833# qhasm: v00 = x0 & mask4
834# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
835# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
836vpand %ymm13,%ymm4,%ymm9
837
838# qhasm: 16x v10 = x1 << 8
839# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
840# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
841vpsllw $8,%ymm14,%ymm15
842
843# qhasm: 16x v01 = x0 unsigned>> 8
844# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
845# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
846vpsrlw $8,%ymm13,%ymm13
847
848# qhasm: v11 = x1 & mask5
849# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
850# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
851vpand %ymm14,%ymm5,%ymm14
852
853# qhasm: x0 = v00 | v10
854# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
855# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
856vpor  %ymm9,%ymm15,%ymm9
857
858# qhasm: x1 = v01 | v11
859# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
860# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
861vpor  %ymm13,%ymm14,%ymm13
862
863# qhasm: v00 = x2 & mask4
864# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
865# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
866vpand %ymm11,%ymm4,%ymm14
867
868# qhasm: 16x v10 = x3 << 8
869# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
870# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
871vpsllw $8,%ymm10,%ymm15
872
873# qhasm: 16x v01 = x2 unsigned>> 8
874# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
875# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
876vpsrlw $8,%ymm11,%ymm11
877
878# qhasm: v11 = x3 & mask5
879# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
880# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
881vpand %ymm10,%ymm5,%ymm10
882
883# qhasm: x2 = v00 | v10
884# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
885# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
886vpor  %ymm14,%ymm15,%ymm14
887
888# qhasm: x3 = v01 | v11
889# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
890# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
891vpor  %ymm11,%ymm10,%ymm10
892
893# qhasm: v00 = x4 & mask4
894# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
895# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
896vpand %ymm12,%ymm4,%ymm11
897
898# qhasm: 16x v10 = x5 << 8
899# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
900# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
901vpsllw $8,%ymm8,%ymm15
902
903# qhasm: 16x v01 = x4 unsigned>> 8
904# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
905# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
906vpsrlw $8,%ymm12,%ymm12
907
908# qhasm: v11 = x5 & mask5
909# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
910# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
911vpand %ymm8,%ymm5,%ymm8
912
913# qhasm: x4 = v00 | v10
914# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
915# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
916vpor  %ymm11,%ymm15,%ymm11
917
918# qhasm: x5 = v01 | v11
919# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
920# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
921vpor  %ymm12,%ymm8,%ymm8
922
923# qhasm: v00 = x6 & mask4
924# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
925# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
926vpand %ymm6,%ymm4,%ymm12
927
928# qhasm: 16x v10 = x7 << 8
929# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
930# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
931vpsllw $8,%ymm7,%ymm15
932
933# qhasm: 16x v01 = x6 unsigned>> 8
934# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
935# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
936vpsrlw $8,%ymm6,%ymm6
937
938# qhasm: v11 = x7 & mask5
939# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
940# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
941vpand %ymm7,%ymm5,%ymm7
942
943# qhasm: x6 = v00 | v10
944# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
945# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
946vpor  %ymm12,%ymm15,%ymm12
947
948# qhasm: x7 = v01 | v11
949# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
950# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
951vpor  %ymm6,%ymm7,%ymm6
952
953# qhasm: mem256[ input_0 + 32 ] = x0
954# asm 1: vmovupd   <x0=reg256#10,32(<input_0=int64#1)
955# asm 2: vmovupd   <x0=%ymm9,32(<input_0=%rdi)
956vmovupd   %ymm9,32(%rdi)
957
958# qhasm: mem256[ input_0 + 288 ] = x1
959# asm 1: vmovupd   <x1=reg256#14,288(<input_0=int64#1)
960# asm 2: vmovupd   <x1=%ymm13,288(<input_0=%rdi)
961vmovupd   %ymm13,288(%rdi)
962
963# qhasm: mem256[ input_0 + 544 ] = x2
964# asm 1: vmovupd   <x2=reg256#15,544(<input_0=int64#1)
965# asm 2: vmovupd   <x2=%ymm14,544(<input_0=%rdi)
966vmovupd   %ymm14,544(%rdi)
967
968# qhasm: mem256[ input_0 + 800 ] = x3
969# asm 1: vmovupd   <x3=reg256#11,800(<input_0=int64#1)
970# asm 2: vmovupd   <x3=%ymm10,800(<input_0=%rdi)
971vmovupd   %ymm10,800(%rdi)
972
973# qhasm: mem256[ input_0 + 1056 ] = x4
974# asm 1: vmovupd   <x4=reg256#12,1056(<input_0=int64#1)
975# asm 2: vmovupd   <x4=%ymm11,1056(<input_0=%rdi)
976vmovupd   %ymm11,1056(%rdi)
977
978# qhasm: mem256[ input_0 + 1312 ] = x5
979# asm 1: vmovupd   <x5=reg256#9,1312(<input_0=int64#1)
980# asm 2: vmovupd   <x5=%ymm8,1312(<input_0=%rdi)
981vmovupd   %ymm8,1312(%rdi)
982
983# qhasm: mem256[ input_0 + 1568 ] = x6
984# asm 1: vmovupd   <x6=reg256#13,1568(<input_0=int64#1)
985# asm 2: vmovupd   <x6=%ymm12,1568(<input_0=%rdi)
986vmovupd   %ymm12,1568(%rdi)
987
988# qhasm: mem256[ input_0 + 1824 ] = x7
989# asm 1: vmovupd   <x7=reg256#7,1824(<input_0=int64#1)
990# asm 2: vmovupd   <x7=%ymm6,1824(<input_0=%rdi)
991vmovupd   %ymm6,1824(%rdi)
992
993# qhasm: x0 = mem256[ input_0 + 64 ]
994# asm 1: vmovupd   64(<input_0=int64#1),>x0=reg256#7
995# asm 2: vmovupd   64(<input_0=%rdi),>x0=%ymm6
996vmovupd   64(%rdi),%ymm6
997
998# qhasm: x1 = mem256[ input_0 + 320 ]
999# asm 1: vmovupd   320(<input_0=int64#1),>x1=reg256#8
1000# asm 2: vmovupd   320(<input_0=%rdi),>x1=%ymm7
1001vmovupd   320(%rdi),%ymm7
1002
1003# qhasm: x2 = mem256[ input_0 + 576 ]
1004# asm 1: vmovupd   576(<input_0=int64#1),>x2=reg256#9
1005# asm 2: vmovupd   576(<input_0=%rdi),>x2=%ymm8
1006vmovupd   576(%rdi),%ymm8
1007
1008# qhasm: x3 = mem256[ input_0 + 832 ]
1009# asm 1: vmovupd   832(<input_0=int64#1),>x3=reg256#10
1010# asm 2: vmovupd   832(<input_0=%rdi),>x3=%ymm9
1011vmovupd   832(%rdi),%ymm9
1012
1013# qhasm: x4 = mem256[ input_0 + 1088 ]
1014# asm 1: vmovupd   1088(<input_0=int64#1),>x4=reg256#11
1015# asm 2: vmovupd   1088(<input_0=%rdi),>x4=%ymm10
1016vmovupd   1088(%rdi),%ymm10
1017
1018# qhasm: x5 = mem256[ input_0 + 1344 ]
1019# asm 1: vmovupd   1344(<input_0=int64#1),>x5=reg256#12
1020# asm 2: vmovupd   1344(<input_0=%rdi),>x5=%ymm11
1021vmovupd   1344(%rdi),%ymm11
1022
1023# qhasm: x6 = mem256[ input_0 + 1600 ]
1024# asm 1: vmovupd   1600(<input_0=int64#1),>x6=reg256#13
1025# asm 2: vmovupd   1600(<input_0=%rdi),>x6=%ymm12
1026vmovupd   1600(%rdi),%ymm12
1027
1028# qhasm: x7 = mem256[ input_0 + 1856 ]
1029# asm 1: vmovupd   1856(<input_0=int64#1),>x7=reg256#14
1030# asm 2: vmovupd   1856(<input_0=%rdi),>x7=%ymm13
1031vmovupd   1856(%rdi),%ymm13
1032
1033# qhasm: v00 = x0 & mask0
1034# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
1035# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
1036vpand %ymm6,%ymm0,%ymm14
1037
1038# qhasm: 4x v10 = x4 << 32
1039# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
1040# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
1041vpsllq $32,%ymm10,%ymm15
1042
1043# qhasm: 4x v01 = x0 unsigned>> 32
1044# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
1045# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
1046vpsrlq $32,%ymm6,%ymm6
1047
1048# qhasm: v11 = x4 & mask1
1049# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
1050# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
1051vpand %ymm10,%ymm1,%ymm10
1052
1053# qhasm: x0 = v00 | v10
1054# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
1055# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
1056vpor  %ymm14,%ymm15,%ymm14
1057
1058# qhasm: x4 = v01 | v11
1059# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
1060# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
1061vpor  %ymm6,%ymm10,%ymm6
1062
1063# qhasm: v00 = x1 & mask0
1064# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
1065# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
1066vpand %ymm7,%ymm0,%ymm10
1067
1068# qhasm: 4x v10 = x5 << 32
1069# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
1070# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
1071vpsllq $32,%ymm11,%ymm15
1072
1073# qhasm: 4x v01 = x1 unsigned>> 32
1074# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
1075# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
1076vpsrlq $32,%ymm7,%ymm7
1077
1078# qhasm: v11 = x5 & mask1
1079# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
1080# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
1081vpand %ymm11,%ymm1,%ymm11
1082
1083# qhasm: x1 = v00 | v10
1084# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
1085# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
1086vpor  %ymm10,%ymm15,%ymm10
1087
1088# qhasm: x5 = v01 | v11
1089# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
1090# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
1091vpor  %ymm7,%ymm11,%ymm7
1092
1093# qhasm: v00 = x2 & mask0
1094# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
1095# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
1096vpand %ymm8,%ymm0,%ymm11
1097
1098# qhasm: 4x v10 = x6 << 32
1099# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
1100# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
1101vpsllq $32,%ymm12,%ymm15
1102
1103# qhasm: 4x v01 = x2 unsigned>> 32
1104# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
1105# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
1106vpsrlq $32,%ymm8,%ymm8
1107
1108# qhasm: v11 = x6 & mask1
1109# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
1110# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
1111vpand %ymm12,%ymm1,%ymm12
1112
1113# qhasm: x2 = v00 | v10
1114# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
1115# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
1116vpor  %ymm11,%ymm15,%ymm11
1117
1118# qhasm: x6 = v01 | v11
1119# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
1120# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
1121vpor  %ymm8,%ymm12,%ymm8
1122
1123# qhasm: v00 = x3 & mask0
1124# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
1125# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
1126vpand %ymm9,%ymm0,%ymm12
1127
1128# qhasm: 4x v10 = x7 << 32
1129# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
1130# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
1131vpsllq $32,%ymm13,%ymm15
1132
1133# qhasm: 4x v01 = x3 unsigned>> 32
1134# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
1135# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
1136vpsrlq $32,%ymm9,%ymm9
1137
1138# qhasm: v11 = x7 & mask1
1139# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
1140# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
1141vpand %ymm13,%ymm1,%ymm13
1142
1143# qhasm: x3 = v00 | v10
1144# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
1145# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
1146vpor  %ymm12,%ymm15,%ymm12
1147
1148# qhasm: x7 = v01 | v11
1149# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
1150# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
1151vpor  %ymm9,%ymm13,%ymm9
1152
1153# qhasm: v00 = x0 & mask2
1154# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
1155# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
1156vpand %ymm14,%ymm2,%ymm13
1157
1158# qhasm: 8x v10 = x2 << 16
1159# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
1160# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
1161vpslld $16,%ymm11,%ymm15
1162
1163# qhasm: 8x v01 = x0 unsigned>> 16
1164# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
1165# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
1166vpsrld $16,%ymm14,%ymm14
1167
1168# qhasm: v11 = x2 & mask3
1169# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
1170# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
1171vpand %ymm11,%ymm3,%ymm11
1172
1173# qhasm: x0 = v00 | v10
1174# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
1175# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
1176vpor  %ymm13,%ymm15,%ymm13
1177
1178# qhasm: x2 = v01 | v11
1179# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
1180# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
1181vpor  %ymm14,%ymm11,%ymm11
1182
1183# qhasm: v00 = x1 & mask2
1184# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
1185# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
1186vpand %ymm10,%ymm2,%ymm14
1187
1188# qhasm: 8x v10 = x3 << 16
1189# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
1190# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
1191vpslld $16,%ymm12,%ymm15
1192
1193# qhasm: 8x v01 = x1 unsigned>> 16
1194# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
1195# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
1196vpsrld $16,%ymm10,%ymm10
1197
1198# qhasm: v11 = x3 & mask3
1199# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
1200# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
1201vpand %ymm12,%ymm3,%ymm12
1202
1203# qhasm: x1 = v00 | v10
1204# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
1205# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
1206vpor  %ymm14,%ymm15,%ymm14
1207
1208# qhasm: x3 = v01 | v11
1209# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
1210# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
1211vpor  %ymm10,%ymm12,%ymm10
1212
1213# qhasm: v00 = x4 & mask2
1214# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
1215# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
1216vpand %ymm6,%ymm2,%ymm12
1217
1218# qhasm: 8x v10 = x6 << 16
1219# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
1220# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
1221vpslld $16,%ymm8,%ymm15
1222
1223# qhasm: 8x v01 = x4 unsigned>> 16
1224# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
1225# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
1226vpsrld $16,%ymm6,%ymm6
1227
1228# qhasm: v11 = x6 & mask3
1229# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
1230# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
1231vpand %ymm8,%ymm3,%ymm8
1232
1233# qhasm: x4 = v00 | v10
1234# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
1235# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
1236vpor  %ymm12,%ymm15,%ymm12
1237
1238# qhasm: x6 = v01 | v11
1239# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
1240# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
1241vpor  %ymm6,%ymm8,%ymm6
1242
1243# qhasm: v00 = x5 & mask2
1244# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
1245# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
1246vpand %ymm7,%ymm2,%ymm8
1247
1248# qhasm: 8x v10 = x7 << 16
1249# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
1250# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
1251vpslld $16,%ymm9,%ymm15
1252
1253# qhasm: 8x v01 = x5 unsigned>> 16
1254# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
1255# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
1256vpsrld $16,%ymm7,%ymm7
1257
1258# qhasm: v11 = x7 & mask3
1259# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
1260# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
1261vpand %ymm9,%ymm3,%ymm9
1262
1263# qhasm: x5 = v00 | v10
1264# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
1265# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
1266vpor  %ymm8,%ymm15,%ymm8
1267
1268# qhasm: x7 = v01 | v11
1269# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
1270# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
1271vpor  %ymm7,%ymm9,%ymm7
1272
1273# qhasm: v00 = x0 & mask4
1274# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
1275# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
1276vpand %ymm13,%ymm4,%ymm9
1277
1278# qhasm: 16x v10 = x1 << 8
1279# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
1280# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
1281vpsllw $8,%ymm14,%ymm15
1282
1283# qhasm: 16x v01 = x0 unsigned>> 8
1284# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
1285# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
1286vpsrlw $8,%ymm13,%ymm13
1287
1288# qhasm: v11 = x1 & mask5
1289# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
1290# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
1291vpand %ymm14,%ymm5,%ymm14
1292
1293# qhasm: x0 = v00 | v10
1294# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
1295# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
1296vpor  %ymm9,%ymm15,%ymm9
1297
1298# qhasm: x1 = v01 | v11
1299# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
1300# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
1301vpor  %ymm13,%ymm14,%ymm13
1302
1303# qhasm: v00 = x2 & mask4
1304# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
1305# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
1306vpand %ymm11,%ymm4,%ymm14
1307
1308# qhasm: 16x v10 = x3 << 8
1309# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
1310# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
1311vpsllw $8,%ymm10,%ymm15
1312
1313# qhasm: 16x v01 = x2 unsigned>> 8
1314# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
1315# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
1316vpsrlw $8,%ymm11,%ymm11
1317
1318# qhasm: v11 = x3 & mask5
1319# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
1320# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
1321vpand %ymm10,%ymm5,%ymm10
1322
1323# qhasm: x2 = v00 | v10
1324# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
1325# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
1326vpor  %ymm14,%ymm15,%ymm14
1327
1328# qhasm: x3 = v01 | v11
1329# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
1330# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
1331vpor  %ymm11,%ymm10,%ymm10
1332
1333# qhasm: v00 = x4 & mask4
1334# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
1335# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
1336vpand %ymm12,%ymm4,%ymm11
1337
1338# qhasm: 16x v10 = x5 << 8
1339# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
1340# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
1341vpsllw $8,%ymm8,%ymm15
1342
1343# qhasm: 16x v01 = x4 unsigned>> 8
1344# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
1345# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
1346vpsrlw $8,%ymm12,%ymm12
1347
1348# qhasm: v11 = x5 & mask5
1349# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
1350# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
1351vpand %ymm8,%ymm5,%ymm8
1352
1353# qhasm: x4 = v00 | v10
1354# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
1355# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
1356vpor  %ymm11,%ymm15,%ymm11
1357
1358# qhasm: x5 = v01 | v11
1359# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
1360# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
1361vpor  %ymm12,%ymm8,%ymm8
1362
1363# qhasm: v00 = x6 & mask4
1364# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
1365# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
1366vpand %ymm6,%ymm4,%ymm12
1367
1368# qhasm: 16x v10 = x7 << 8
1369# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
1370# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
1371vpsllw $8,%ymm7,%ymm15
1372
1373# qhasm: 16x v01 = x6 unsigned>> 8
1374# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
1375# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
1376vpsrlw $8,%ymm6,%ymm6
1377
1378# qhasm: v11 = x7 & mask5
1379# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
1380# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
1381vpand %ymm7,%ymm5,%ymm7
1382
1383# qhasm: x6 = v00 | v10
1384# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
1385# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
1386vpor  %ymm12,%ymm15,%ymm12
1387
1388# qhasm: x7 = v01 | v11
1389# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
1390# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
1391vpor  %ymm6,%ymm7,%ymm6
1392
1393# qhasm: mem256[ input_0 + 64 ] = x0
1394# asm 1: vmovupd   <x0=reg256#10,64(<input_0=int64#1)
1395# asm 2: vmovupd   <x0=%ymm9,64(<input_0=%rdi)
1396vmovupd   %ymm9,64(%rdi)
1397
1398# qhasm: mem256[ input_0 + 320 ] = x1
1399# asm 1: vmovupd   <x1=reg256#14,320(<input_0=int64#1)
1400# asm 2: vmovupd   <x1=%ymm13,320(<input_0=%rdi)
1401vmovupd   %ymm13,320(%rdi)
1402
1403# qhasm: mem256[ input_0 + 576 ] = x2
1404# asm 1: vmovupd   <x2=reg256#15,576(<input_0=int64#1)
1405# asm 2: vmovupd   <x2=%ymm14,576(<input_0=%rdi)
1406vmovupd   %ymm14,576(%rdi)
1407
1408# qhasm: mem256[ input_0 + 832 ] = x3
1409# asm 1: vmovupd   <x3=reg256#11,832(<input_0=int64#1)
1410# asm 2: vmovupd   <x3=%ymm10,832(<input_0=%rdi)
1411vmovupd   %ymm10,832(%rdi)
1412
1413# qhasm: mem256[ input_0 + 1088 ] = x4
1414# asm 1: vmovupd   <x4=reg256#12,1088(<input_0=int64#1)
1415# asm 2: vmovupd   <x4=%ymm11,1088(<input_0=%rdi)
1416vmovupd   %ymm11,1088(%rdi)
1417
1418# qhasm: mem256[ input_0 + 1344 ] = x5
1419# asm 1: vmovupd   <x5=reg256#9,1344(<input_0=int64#1)
1420# asm 2: vmovupd   <x5=%ymm8,1344(<input_0=%rdi)
1421vmovupd   %ymm8,1344(%rdi)
1422
1423# qhasm: mem256[ input_0 + 1600 ] = x6
1424# asm 1: vmovupd   <x6=reg256#13,1600(<input_0=int64#1)
1425# asm 2: vmovupd   <x6=%ymm12,1600(<input_0=%rdi)
1426vmovupd   %ymm12,1600(%rdi)
1427
1428# qhasm: mem256[ input_0 + 1856 ] = x7
1429# asm 1: vmovupd   <x7=reg256#7,1856(<input_0=int64#1)
1430# asm 2: vmovupd   <x7=%ymm6,1856(<input_0=%rdi)
1431vmovupd   %ymm6,1856(%rdi)
1432
1433# qhasm: x0 = mem256[ input_0 + 96 ]
1434# asm 1: vmovupd   96(<input_0=int64#1),>x0=reg256#7
1435# asm 2: vmovupd   96(<input_0=%rdi),>x0=%ymm6
1436vmovupd   96(%rdi),%ymm6
1437
1438# qhasm: x1 = mem256[ input_0 + 352 ]
1439# asm 1: vmovupd   352(<input_0=int64#1),>x1=reg256#8
1440# asm 2: vmovupd   352(<input_0=%rdi),>x1=%ymm7
1441vmovupd   352(%rdi),%ymm7
1442
1443# qhasm: x2 = mem256[ input_0 + 608 ]
1444# asm 1: vmovupd   608(<input_0=int64#1),>x2=reg256#9
1445# asm 2: vmovupd   608(<input_0=%rdi),>x2=%ymm8
1446vmovupd   608(%rdi),%ymm8
1447
1448# qhasm: x3 = mem256[ input_0 + 864 ]
1449# asm 1: vmovupd   864(<input_0=int64#1),>x3=reg256#10
1450# asm 2: vmovupd   864(<input_0=%rdi),>x3=%ymm9
1451vmovupd   864(%rdi),%ymm9
1452
1453# qhasm: x4 = mem256[ input_0 + 1120 ]
1454# asm 1: vmovupd   1120(<input_0=int64#1),>x4=reg256#11
1455# asm 2: vmovupd   1120(<input_0=%rdi),>x4=%ymm10
1456vmovupd   1120(%rdi),%ymm10
1457
1458# qhasm: x5 = mem256[ input_0 + 1376 ]
1459# asm 1: vmovupd   1376(<input_0=int64#1),>x5=reg256#12
1460# asm 2: vmovupd   1376(<input_0=%rdi),>x5=%ymm11
1461vmovupd   1376(%rdi),%ymm11
1462
1463# qhasm: x6 = mem256[ input_0 + 1632 ]
1464# asm 1: vmovupd   1632(<input_0=int64#1),>x6=reg256#13
1465# asm 2: vmovupd   1632(<input_0=%rdi),>x6=%ymm12
1466vmovupd   1632(%rdi),%ymm12
1467
1468# qhasm: x7 = mem256[ input_0 + 1888 ]
1469# asm 1: vmovupd   1888(<input_0=int64#1),>x7=reg256#14
1470# asm 2: vmovupd   1888(<input_0=%rdi),>x7=%ymm13
1471vmovupd   1888(%rdi),%ymm13
1472
1473# qhasm: v00 = x0 & mask0
1474# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
1475# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
1476vpand %ymm6,%ymm0,%ymm14
1477
1478# qhasm: 4x v10 = x4 << 32
1479# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
1480# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
1481vpsllq $32,%ymm10,%ymm15
1482
1483# qhasm: 4x v01 = x0 unsigned>> 32
1484# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
1485# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
1486vpsrlq $32,%ymm6,%ymm6
1487
1488# qhasm: v11 = x4 & mask1
1489# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
1490# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
1491vpand %ymm10,%ymm1,%ymm10
1492
1493# qhasm: x0 = v00 | v10
1494# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
1495# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
1496vpor  %ymm14,%ymm15,%ymm14
1497
1498# qhasm: x4 = v01 | v11
1499# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
1500# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
1501vpor  %ymm6,%ymm10,%ymm6
1502
1503# qhasm: v00 = x1 & mask0
1504# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
1505# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
1506vpand %ymm7,%ymm0,%ymm10
1507
1508# qhasm: 4x v10 = x5 << 32
1509# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
1510# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
1511vpsllq $32,%ymm11,%ymm15
1512
1513# qhasm: 4x v01 = x1 unsigned>> 32
1514# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
1515# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
1516vpsrlq $32,%ymm7,%ymm7
1517
1518# qhasm: v11 = x5 & mask1
1519# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
1520# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
1521vpand %ymm11,%ymm1,%ymm11
1522
1523# qhasm: x1 = v00 | v10
1524# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
1525# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
1526vpor  %ymm10,%ymm15,%ymm10
1527
1528# qhasm: x5 = v01 | v11
1529# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
1530# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
1531vpor  %ymm7,%ymm11,%ymm7
1532
1533# qhasm: v00 = x2 & mask0
1534# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
1535# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
1536vpand %ymm8,%ymm0,%ymm11
1537
1538# qhasm: 4x v10 = x6 << 32
1539# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
1540# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
1541vpsllq $32,%ymm12,%ymm15
1542
1543# qhasm: 4x v01 = x2 unsigned>> 32
1544# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
1545# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
1546vpsrlq $32,%ymm8,%ymm8
1547
1548# qhasm: v11 = x6 & mask1
1549# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
1550# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
1551vpand %ymm12,%ymm1,%ymm12
1552
1553# qhasm: x2 = v00 | v10
1554# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
1555# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
1556vpor  %ymm11,%ymm15,%ymm11
1557
1558# qhasm: x6 = v01 | v11
1559# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
1560# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
1561vpor  %ymm8,%ymm12,%ymm8
1562
1563# qhasm: v00 = x3 & mask0
1564# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
1565# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
1566vpand %ymm9,%ymm0,%ymm12
1567
1568# qhasm: 4x v10 = x7 << 32
1569# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
1570# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
1571vpsllq $32,%ymm13,%ymm15
1572
1573# qhasm: 4x v01 = x3 unsigned>> 32
1574# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
1575# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
1576vpsrlq $32,%ymm9,%ymm9
1577
1578# qhasm: v11 = x7 & mask1
1579# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
1580# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
1581vpand %ymm13,%ymm1,%ymm13
1582
1583# qhasm: x3 = v00 | v10
1584# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
1585# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
1586vpor  %ymm12,%ymm15,%ymm12
1587
1588# qhasm: x7 = v01 | v11
1589# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
1590# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
1591vpor  %ymm9,%ymm13,%ymm9
1592
1593# qhasm: v00 = x0 & mask2
1594# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
1595# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
1596vpand %ymm14,%ymm2,%ymm13
1597
1598# qhasm: 8x v10 = x2 << 16
1599# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
1600# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
1601vpslld $16,%ymm11,%ymm15
1602
1603# qhasm: 8x v01 = x0 unsigned>> 16
1604# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
1605# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
1606vpsrld $16,%ymm14,%ymm14
1607
1608# qhasm: v11 = x2 & mask3
1609# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
1610# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
1611vpand %ymm11,%ymm3,%ymm11
1612
1613# qhasm: x0 = v00 | v10
1614# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
1615# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
1616vpor  %ymm13,%ymm15,%ymm13
1617
1618# qhasm: x2 = v01 | v11
1619# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
1620# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
1621vpor  %ymm14,%ymm11,%ymm11
1622
1623# qhasm: v00 = x1 & mask2
1624# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
1625# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
1626vpand %ymm10,%ymm2,%ymm14
1627
1628# qhasm: 8x v10 = x3 << 16
1629# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
1630# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
1631vpslld $16,%ymm12,%ymm15
1632
1633# qhasm: 8x v01 = x1 unsigned>> 16
1634# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
1635# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
1636vpsrld $16,%ymm10,%ymm10
1637
1638# qhasm: v11 = x3 & mask3
1639# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
1640# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
1641vpand %ymm12,%ymm3,%ymm12
1642
1643# qhasm: x1 = v00 | v10
1644# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
1645# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
1646vpor  %ymm14,%ymm15,%ymm14
1647
1648# qhasm: x3 = v01 | v11
1649# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
1650# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
1651vpor  %ymm10,%ymm12,%ymm10
1652
1653# qhasm: v00 = x4 & mask2
1654# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
1655# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
1656vpand %ymm6,%ymm2,%ymm12
1657
1658# qhasm: 8x v10 = x6 << 16
1659# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
1660# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
1661vpslld $16,%ymm8,%ymm15
1662
1663# qhasm: 8x v01 = x4 unsigned>> 16
1664# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
1665# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
1666vpsrld $16,%ymm6,%ymm6
1667
1668# qhasm: v11 = x6 & mask3
1669# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
1670# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
1671vpand %ymm8,%ymm3,%ymm8
1672
1673# qhasm: x4 = v00 | v10
1674# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
1675# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
1676vpor  %ymm12,%ymm15,%ymm12
1677
1678# qhasm: x6 = v01 | v11
1679# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
1680# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
1681vpor  %ymm6,%ymm8,%ymm6
1682
1683# qhasm: v00 = x5 & mask2
1684# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
1685# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
1686vpand %ymm7,%ymm2,%ymm8
1687
1688# qhasm: 8x v10 = x7 << 16
1689# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
1690# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
1691vpslld $16,%ymm9,%ymm15
1692
1693# qhasm: 8x v01 = x5 unsigned>> 16
1694# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
1695# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
1696vpsrld $16,%ymm7,%ymm7
1697
1698# qhasm: v11 = x7 & mask3
1699# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
1700# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
1701vpand %ymm9,%ymm3,%ymm9
1702
1703# qhasm: x5 = v00 | v10
1704# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
1705# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
1706vpor  %ymm8,%ymm15,%ymm8
1707
1708# qhasm: x7 = v01 | v11
1709# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
1710# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
1711vpor  %ymm7,%ymm9,%ymm7
1712
1713# qhasm: v00 = x0 & mask4
1714# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
1715# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
1716vpand %ymm13,%ymm4,%ymm9
1717
1718# qhasm: 16x v10 = x1 << 8
1719# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
1720# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
1721vpsllw $8,%ymm14,%ymm15
1722
1723# qhasm: 16x v01 = x0 unsigned>> 8
1724# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
1725# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
1726vpsrlw $8,%ymm13,%ymm13
1727
1728# qhasm: v11 = x1 & mask5
1729# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
1730# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
1731vpand %ymm14,%ymm5,%ymm14
1732
1733# qhasm: x0 = v00 | v10
1734# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
1735# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
1736vpor  %ymm9,%ymm15,%ymm9
1737
1738# qhasm: x1 = v01 | v11
1739# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
1740# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
1741vpor  %ymm13,%ymm14,%ymm13
1742
1743# qhasm: v00 = x2 & mask4
1744# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
1745# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
1746vpand %ymm11,%ymm4,%ymm14
1747
1748# qhasm: 16x v10 = x3 << 8
1749# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
1750# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
1751vpsllw $8,%ymm10,%ymm15
1752
1753# qhasm: 16x v01 = x2 unsigned>> 8
1754# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
1755# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
1756vpsrlw $8,%ymm11,%ymm11
1757
1758# qhasm: v11 = x3 & mask5
1759# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
1760# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
1761vpand %ymm10,%ymm5,%ymm10
1762
1763# qhasm: x2 = v00 | v10
1764# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
1765# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
1766vpor  %ymm14,%ymm15,%ymm14
1767
1768# qhasm: x3 = v01 | v11
1769# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
1770# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
1771vpor  %ymm11,%ymm10,%ymm10
1772
1773# qhasm: v00 = x4 & mask4
1774# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
1775# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
1776vpand %ymm12,%ymm4,%ymm11
1777
1778# qhasm: 16x v10 = x5 << 8
1779# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
1780# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
1781vpsllw $8,%ymm8,%ymm15
1782
1783# qhasm: 16x v01 = x4 unsigned>> 8
1784# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
1785# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
1786vpsrlw $8,%ymm12,%ymm12
1787
1788# qhasm: v11 = x5 & mask5
1789# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
1790# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
1791vpand %ymm8,%ymm5,%ymm8
1792
1793# qhasm: x4 = v00 | v10
1794# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
1795# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
1796vpor  %ymm11,%ymm15,%ymm11
1797
1798# qhasm: x5 = v01 | v11
1799# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
1800# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
1801vpor  %ymm12,%ymm8,%ymm8
1802
1803# qhasm: v00 = x6 & mask4
1804# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
1805# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
1806vpand %ymm6,%ymm4,%ymm12
1807
1808# qhasm: 16x v10 = x7 << 8
1809# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
1810# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
1811vpsllw $8,%ymm7,%ymm15
1812
1813# qhasm: 16x v01 = x6 unsigned>> 8
1814# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
1815# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
1816vpsrlw $8,%ymm6,%ymm6
1817
1818# qhasm: v11 = x7 & mask5
1819# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
1820# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
1821vpand %ymm7,%ymm5,%ymm7
1822
1823# qhasm: x6 = v00 | v10
1824# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
1825# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
1826vpor  %ymm12,%ymm15,%ymm12
1827
1828# qhasm: x7 = v01 | v11
1829# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
1830# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
1831vpor  %ymm6,%ymm7,%ymm6
1832
1833# qhasm: mem256[ input_0 + 96 ] = x0
1834# asm 1: vmovupd   <x0=reg256#10,96(<input_0=int64#1)
1835# asm 2: vmovupd   <x0=%ymm9,96(<input_0=%rdi)
1836vmovupd   %ymm9,96(%rdi)
1837
1838# qhasm: mem256[ input_0 + 352 ] = x1
1839# asm 1: vmovupd   <x1=reg256#14,352(<input_0=int64#1)
1840# asm 2: vmovupd   <x1=%ymm13,352(<input_0=%rdi)
1841vmovupd   %ymm13,352(%rdi)
1842
1843# qhasm: mem256[ input_0 + 608 ] = x2
1844# asm 1: vmovupd   <x2=reg256#15,608(<input_0=int64#1)
1845# asm 2: vmovupd   <x2=%ymm14,608(<input_0=%rdi)
1846vmovupd   %ymm14,608(%rdi)
1847
1848# qhasm: mem256[ input_0 + 864 ] = x3
1849# asm 1: vmovupd   <x3=reg256#11,864(<input_0=int64#1)
1850# asm 2: vmovupd   <x3=%ymm10,864(<input_0=%rdi)
1851vmovupd   %ymm10,864(%rdi)
1852
1853# qhasm: mem256[ input_0 + 1120 ] = x4
1854# asm 1: vmovupd   <x4=reg256#12,1120(<input_0=int64#1)
1855# asm 2: vmovupd   <x4=%ymm11,1120(<input_0=%rdi)
1856vmovupd   %ymm11,1120(%rdi)
1857
1858# qhasm: mem256[ input_0 + 1376 ] = x5
1859# asm 1: vmovupd   <x5=reg256#9,1376(<input_0=int64#1)
1860# asm 2: vmovupd   <x5=%ymm8,1376(<input_0=%rdi)
1861vmovupd   %ymm8,1376(%rdi)
1862
1863# qhasm: mem256[ input_0 + 1632 ] = x6
1864# asm 1: vmovupd   <x6=reg256#13,1632(<input_0=int64#1)
1865# asm 2: vmovupd   <x6=%ymm12,1632(<input_0=%rdi)
1866vmovupd   %ymm12,1632(%rdi)
1867
1868# qhasm: mem256[ input_0 + 1888 ] = x7
1869# asm 1: vmovupd   <x7=reg256#7,1888(<input_0=int64#1)
1870# asm 2: vmovupd   <x7=%ymm6,1888(<input_0=%rdi)
1871vmovupd   %ymm6,1888(%rdi)
1872
1873# qhasm: x0 = mem256[ input_0 + 128 ]
1874# asm 1: vmovupd   128(<input_0=int64#1),>x0=reg256#7
1875# asm 2: vmovupd   128(<input_0=%rdi),>x0=%ymm6
1876vmovupd   128(%rdi),%ymm6
1877
1878# qhasm: x1 = mem256[ input_0 + 384 ]
1879# asm 1: vmovupd   384(<input_0=int64#1),>x1=reg256#8
1880# asm 2: vmovupd   384(<input_0=%rdi),>x1=%ymm7
1881vmovupd   384(%rdi),%ymm7
1882
1883# qhasm: x2 = mem256[ input_0 + 640 ]
1884# asm 1: vmovupd   640(<input_0=int64#1),>x2=reg256#9
1885# asm 2: vmovupd   640(<input_0=%rdi),>x2=%ymm8
1886vmovupd   640(%rdi),%ymm8
1887
1888# qhasm: x3 = mem256[ input_0 + 896 ]
1889# asm 1: vmovupd   896(<input_0=int64#1),>x3=reg256#10
1890# asm 2: vmovupd   896(<input_0=%rdi),>x3=%ymm9
1891vmovupd   896(%rdi),%ymm9
1892
1893# qhasm: x4 = mem256[ input_0 + 1152 ]
1894# asm 1: vmovupd   1152(<input_0=int64#1),>x4=reg256#11
1895# asm 2: vmovupd   1152(<input_0=%rdi),>x4=%ymm10
1896vmovupd   1152(%rdi),%ymm10
1897
1898# qhasm: x5 = mem256[ input_0 + 1408 ]
1899# asm 1: vmovupd   1408(<input_0=int64#1),>x5=reg256#12
1900# asm 2: vmovupd   1408(<input_0=%rdi),>x5=%ymm11
1901vmovupd   1408(%rdi),%ymm11
1902
1903# qhasm: x6 = mem256[ input_0 + 1664 ]
1904# asm 1: vmovupd   1664(<input_0=int64#1),>x6=reg256#13
1905# asm 2: vmovupd   1664(<input_0=%rdi),>x6=%ymm12
1906vmovupd   1664(%rdi),%ymm12
1907
1908# qhasm: x7 = mem256[ input_0 + 1920 ]
1909# asm 1: vmovupd   1920(<input_0=int64#1),>x7=reg256#14
1910# asm 2: vmovupd   1920(<input_0=%rdi),>x7=%ymm13
1911vmovupd   1920(%rdi),%ymm13
1912
1913# qhasm: v00 = x0 & mask0
1914# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
1915# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
1916vpand %ymm6,%ymm0,%ymm14
1917
1918# qhasm: 4x v10 = x4 << 32
1919# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
1920# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
1921vpsllq $32,%ymm10,%ymm15
1922
1923# qhasm: 4x v01 = x0 unsigned>> 32
1924# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
1925# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
1926vpsrlq $32,%ymm6,%ymm6
1927
1928# qhasm: v11 = x4 & mask1
1929# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
1930# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
1931vpand %ymm10,%ymm1,%ymm10
1932
1933# qhasm: x0 = v00 | v10
1934# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
1935# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
1936vpor  %ymm14,%ymm15,%ymm14
1937
1938# qhasm: x4 = v01 | v11
1939# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
1940# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
1941vpor  %ymm6,%ymm10,%ymm6
1942
1943# qhasm: v00 = x1 & mask0
1944# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
1945# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
1946vpand %ymm7,%ymm0,%ymm10
1947
1948# qhasm: 4x v10 = x5 << 32
1949# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
1950# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
1951vpsllq $32,%ymm11,%ymm15
1952
1953# qhasm: 4x v01 = x1 unsigned>> 32
1954# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
1955# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
1956vpsrlq $32,%ymm7,%ymm7
1957
1958# qhasm: v11 = x5 & mask1
1959# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
1960# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
1961vpand %ymm11,%ymm1,%ymm11
1962
1963# qhasm: x1 = v00 | v10
1964# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
1965# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
1966vpor  %ymm10,%ymm15,%ymm10
1967
1968# qhasm: x5 = v01 | v11
1969# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
1970# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
1971vpor  %ymm7,%ymm11,%ymm7
1972
1973# qhasm: v00 = x2 & mask0
1974# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
1975# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
1976vpand %ymm8,%ymm0,%ymm11
1977
1978# qhasm: 4x v10 = x6 << 32
1979# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
1980# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
1981vpsllq $32,%ymm12,%ymm15
1982
1983# qhasm: 4x v01 = x2 unsigned>> 32
1984# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
1985# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
1986vpsrlq $32,%ymm8,%ymm8
1987
1988# qhasm: v11 = x6 & mask1
1989# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
1990# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
1991vpand %ymm12,%ymm1,%ymm12
1992
1993# qhasm: x2 = v00 | v10
1994# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
1995# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
1996vpor  %ymm11,%ymm15,%ymm11
1997
1998# qhasm: x6 = v01 | v11
1999# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
2000# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
2001vpor  %ymm8,%ymm12,%ymm8
2002
2003# qhasm: v00 = x3 & mask0
2004# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
2005# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
2006vpand %ymm9,%ymm0,%ymm12
2007
2008# qhasm: 4x v10 = x7 << 32
2009# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
2010# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
2011vpsllq $32,%ymm13,%ymm15
2012
2013# qhasm: 4x v01 = x3 unsigned>> 32
2014# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
2015# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
2016vpsrlq $32,%ymm9,%ymm9
2017
2018# qhasm: v11 = x7 & mask1
2019# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
2020# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
2021vpand %ymm13,%ymm1,%ymm13
2022
2023# qhasm: x3 = v00 | v10
2024# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
2025# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
2026vpor  %ymm12,%ymm15,%ymm12
2027
2028# qhasm: x7 = v01 | v11
2029# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
2030# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
2031vpor  %ymm9,%ymm13,%ymm9
2032
2033# qhasm: v00 = x0 & mask2
2034# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
2035# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
2036vpand %ymm14,%ymm2,%ymm13
2037
2038# qhasm: 8x v10 = x2 << 16
2039# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
2040# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
2041vpslld $16,%ymm11,%ymm15
2042
2043# qhasm: 8x v01 = x0 unsigned>> 16
2044# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
2045# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
2046vpsrld $16,%ymm14,%ymm14
2047
2048# qhasm: v11 = x2 & mask3
2049# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
2050# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
2051vpand %ymm11,%ymm3,%ymm11
2052
2053# qhasm: x0 = v00 | v10
2054# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
2055# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
2056vpor  %ymm13,%ymm15,%ymm13
2057
2058# qhasm: x2 = v01 | v11
2059# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
2060# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
2061vpor  %ymm14,%ymm11,%ymm11
2062
2063# qhasm: v00 = x1 & mask2
2064# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
2065# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
2066vpand %ymm10,%ymm2,%ymm14
2067
2068# qhasm: 8x v10 = x3 << 16
2069# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
2070# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
2071vpslld $16,%ymm12,%ymm15
2072
2073# qhasm: 8x v01 = x1 unsigned>> 16
2074# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
2075# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
2076vpsrld $16,%ymm10,%ymm10
2077
2078# qhasm: v11 = x3 & mask3
2079# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
2080# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
2081vpand %ymm12,%ymm3,%ymm12
2082
2083# qhasm: x1 = v00 | v10
2084# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
2085# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
2086vpor  %ymm14,%ymm15,%ymm14
2087
2088# qhasm: x3 = v01 | v11
2089# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
2090# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
2091vpor  %ymm10,%ymm12,%ymm10
2092
2093# qhasm: v00 = x4 & mask2
2094# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
2095# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
2096vpand %ymm6,%ymm2,%ymm12
2097
2098# qhasm: 8x v10 = x6 << 16
2099# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
2100# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
2101vpslld $16,%ymm8,%ymm15
2102
2103# qhasm: 8x v01 = x4 unsigned>> 16
2104# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
2105# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
2106vpsrld $16,%ymm6,%ymm6
2107
2108# qhasm: v11 = x6 & mask3
2109# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
2110# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
2111vpand %ymm8,%ymm3,%ymm8
2112
2113# qhasm: x4 = v00 | v10
2114# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
2115# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
2116vpor  %ymm12,%ymm15,%ymm12
2117
2118# qhasm: x6 = v01 | v11
2119# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
2120# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
2121vpor  %ymm6,%ymm8,%ymm6
2122
2123# qhasm: v00 = x5 & mask2
2124# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
2125# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
2126vpand %ymm7,%ymm2,%ymm8
2127
2128# qhasm: 8x v10 = x7 << 16
2129# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
2130# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
2131vpslld $16,%ymm9,%ymm15
2132
2133# qhasm: 8x v01 = x5 unsigned>> 16
2134# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
2135# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
2136vpsrld $16,%ymm7,%ymm7
2137
2138# qhasm: v11 = x7 & mask3
2139# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
2140# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
2141vpand %ymm9,%ymm3,%ymm9
2142
2143# qhasm: x5 = v00 | v10
2144# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
2145# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
2146vpor  %ymm8,%ymm15,%ymm8
2147
2148# qhasm: x7 = v01 | v11
2149# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
2150# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
2151vpor  %ymm7,%ymm9,%ymm7
2152
2153# qhasm: v00 = x0 & mask4
2154# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
2155# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
2156vpand %ymm13,%ymm4,%ymm9
2157
2158# qhasm: 16x v10 = x1 << 8
2159# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
2160# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
2161vpsllw $8,%ymm14,%ymm15
2162
2163# qhasm: 16x v01 = x0 unsigned>> 8
2164# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
2165# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
2166vpsrlw $8,%ymm13,%ymm13
2167
2168# qhasm: v11 = x1 & mask5
2169# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
2170# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
2171vpand %ymm14,%ymm5,%ymm14
2172
2173# qhasm: x0 = v00 | v10
2174# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
2175# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
2176vpor  %ymm9,%ymm15,%ymm9
2177
2178# qhasm: x1 = v01 | v11
2179# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
2180# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
2181vpor  %ymm13,%ymm14,%ymm13
2182
2183# qhasm: v00 = x2 & mask4
2184# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
2185# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
2186vpand %ymm11,%ymm4,%ymm14
2187
2188# qhasm: 16x v10 = x3 << 8
2189# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
2190# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
2191vpsllw $8,%ymm10,%ymm15
2192
2193# qhasm: 16x v01 = x2 unsigned>> 8
2194# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
2195# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
2196vpsrlw $8,%ymm11,%ymm11
2197
2198# qhasm: v11 = x3 & mask5
2199# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
2200# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
2201vpand %ymm10,%ymm5,%ymm10
2202
2203# qhasm: x2 = v00 | v10
2204# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
2205# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
2206vpor  %ymm14,%ymm15,%ymm14
2207
2208# qhasm: x3 = v01 | v11
2209# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
2210# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
2211vpor  %ymm11,%ymm10,%ymm10
2212
2213# qhasm: v00 = x4 & mask4
2214# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
2215# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
2216vpand %ymm12,%ymm4,%ymm11
2217
2218# qhasm: 16x v10 = x5 << 8
2219# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
2220# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
2221vpsllw $8,%ymm8,%ymm15
2222
2223# qhasm: 16x v01 = x4 unsigned>> 8
2224# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
2225# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
2226vpsrlw $8,%ymm12,%ymm12
2227
2228# qhasm: v11 = x5 & mask5
2229# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
2230# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
2231vpand %ymm8,%ymm5,%ymm8
2232
2233# qhasm: x4 = v00 | v10
2234# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
2235# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
2236vpor  %ymm11,%ymm15,%ymm11
2237
2238# qhasm: x5 = v01 | v11
2239# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
2240# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
2241vpor  %ymm12,%ymm8,%ymm8
2242
2243# qhasm: v00 = x6 & mask4
2244# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
2245# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
2246vpand %ymm6,%ymm4,%ymm12
2247
2248# qhasm: 16x v10 = x7 << 8
2249# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
2250# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
2251vpsllw $8,%ymm7,%ymm15
2252
2253# qhasm: 16x v01 = x6 unsigned>> 8
2254# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
2255# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
2256vpsrlw $8,%ymm6,%ymm6
2257
2258# qhasm: v11 = x7 & mask5
2259# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
2260# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
2261vpand %ymm7,%ymm5,%ymm7
2262
2263# qhasm: x6 = v00 | v10
2264# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
2265# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
2266vpor  %ymm12,%ymm15,%ymm12
2267
2268# qhasm: x7 = v01 | v11
2269# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
2270# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
2271vpor  %ymm6,%ymm7,%ymm6
2272
2273# qhasm: mem256[ input_0 + 128 ] = x0
2274# asm 1: vmovupd   <x0=reg256#10,128(<input_0=int64#1)
2275# asm 2: vmovupd   <x0=%ymm9,128(<input_0=%rdi)
2276vmovupd   %ymm9,128(%rdi)
2277
2278# qhasm: mem256[ input_0 + 384 ] = x1
2279# asm 1: vmovupd   <x1=reg256#14,384(<input_0=int64#1)
2280# asm 2: vmovupd   <x1=%ymm13,384(<input_0=%rdi)
2281vmovupd   %ymm13,384(%rdi)
2282
2283# qhasm: mem256[ input_0 + 640 ] = x2
2284# asm 1: vmovupd   <x2=reg256#15,640(<input_0=int64#1)
2285# asm 2: vmovupd   <x2=%ymm14,640(<input_0=%rdi)
2286vmovupd   %ymm14,640(%rdi)
2287
2288# qhasm: mem256[ input_0 + 896 ] = x3
2289# asm 1: vmovupd   <x3=reg256#11,896(<input_0=int64#1)
2290# asm 2: vmovupd   <x3=%ymm10,896(<input_0=%rdi)
2291vmovupd   %ymm10,896(%rdi)
2292
2293# qhasm: mem256[ input_0 + 1152 ] = x4
2294# asm 1: vmovupd   <x4=reg256#12,1152(<input_0=int64#1)
2295# asm 2: vmovupd   <x4=%ymm11,1152(<input_0=%rdi)
2296vmovupd   %ymm11,1152(%rdi)
2297
2298# qhasm: mem256[ input_0 + 1408 ] = x5
2299# asm 1: vmovupd   <x5=reg256#9,1408(<input_0=int64#1)
2300# asm 2: vmovupd   <x5=%ymm8,1408(<input_0=%rdi)
2301vmovupd   %ymm8,1408(%rdi)
2302
2303# qhasm: mem256[ input_0 + 1664 ] = x6
2304# asm 1: vmovupd   <x6=reg256#13,1664(<input_0=int64#1)
2305# asm 2: vmovupd   <x6=%ymm12,1664(<input_0=%rdi)
2306vmovupd   %ymm12,1664(%rdi)
2307
2308# qhasm: mem256[ input_0 + 1920 ] = x7
2309# asm 1: vmovupd   <x7=reg256#7,1920(<input_0=int64#1)
2310# asm 2: vmovupd   <x7=%ymm6,1920(<input_0=%rdi)
2311vmovupd   %ymm6,1920(%rdi)
2312
2313# qhasm: x0 = mem256[ input_0 + 160 ]
2314# asm 1: vmovupd   160(<input_0=int64#1),>x0=reg256#7
2315# asm 2: vmovupd   160(<input_0=%rdi),>x0=%ymm6
2316vmovupd   160(%rdi),%ymm6
2317
2318# qhasm: x1 = mem256[ input_0 + 416 ]
2319# asm 1: vmovupd   416(<input_0=int64#1),>x1=reg256#8
2320# asm 2: vmovupd   416(<input_0=%rdi),>x1=%ymm7
2321vmovupd   416(%rdi),%ymm7
2322
2323# qhasm: x2 = mem256[ input_0 + 672 ]
2324# asm 1: vmovupd   672(<input_0=int64#1),>x2=reg256#9
2325# asm 2: vmovupd   672(<input_0=%rdi),>x2=%ymm8
2326vmovupd   672(%rdi),%ymm8
2327
2328# qhasm: x3 = mem256[ input_0 + 928 ]
2329# asm 1: vmovupd   928(<input_0=int64#1),>x3=reg256#10
2330# asm 2: vmovupd   928(<input_0=%rdi),>x3=%ymm9
2331vmovupd   928(%rdi),%ymm9
2332
2333# qhasm: x4 = mem256[ input_0 + 1184 ]
2334# asm 1: vmovupd   1184(<input_0=int64#1),>x4=reg256#11
2335# asm 2: vmovupd   1184(<input_0=%rdi),>x4=%ymm10
2336vmovupd   1184(%rdi),%ymm10
2337
2338# qhasm: x5 = mem256[ input_0 + 1440 ]
2339# asm 1: vmovupd   1440(<input_0=int64#1),>x5=reg256#12
2340# asm 2: vmovupd   1440(<input_0=%rdi),>x5=%ymm11
2341vmovupd   1440(%rdi),%ymm11
2342
2343# qhasm: x6 = mem256[ input_0 + 1696 ]
2344# asm 1: vmovupd   1696(<input_0=int64#1),>x6=reg256#13
2345# asm 2: vmovupd   1696(<input_0=%rdi),>x6=%ymm12
2346vmovupd   1696(%rdi),%ymm12
2347
2348# qhasm: x7 = mem256[ input_0 + 1952 ]
2349# asm 1: vmovupd   1952(<input_0=int64#1),>x7=reg256#14
2350# asm 2: vmovupd   1952(<input_0=%rdi),>x7=%ymm13
2351vmovupd   1952(%rdi),%ymm13
2352
2353# qhasm: v00 = x0 & mask0
2354# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
2355# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
2356vpand %ymm6,%ymm0,%ymm14
2357
2358# qhasm: 4x v10 = x4 << 32
2359# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
2360# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
2361vpsllq $32,%ymm10,%ymm15
2362
2363# qhasm: 4x v01 = x0 unsigned>> 32
2364# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
2365# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
2366vpsrlq $32,%ymm6,%ymm6
2367
2368# qhasm: v11 = x4 & mask1
2369# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
2370# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
2371vpand %ymm10,%ymm1,%ymm10
2372
2373# qhasm: x0 = v00 | v10
2374# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
2375# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
2376vpor  %ymm14,%ymm15,%ymm14
2377
2378# qhasm: x4 = v01 | v11
2379# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
2380# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
2381vpor  %ymm6,%ymm10,%ymm6
2382
2383# qhasm: v00 = x1 & mask0
2384# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
2385# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
2386vpand %ymm7,%ymm0,%ymm10
2387
2388# qhasm: 4x v10 = x5 << 32
2389# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
2390# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
2391vpsllq $32,%ymm11,%ymm15
2392
2393# qhasm: 4x v01 = x1 unsigned>> 32
2394# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
2395# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
2396vpsrlq $32,%ymm7,%ymm7
2397
2398# qhasm: v11 = x5 & mask1
2399# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
2400# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
2401vpand %ymm11,%ymm1,%ymm11
2402
2403# qhasm: x1 = v00 | v10
2404# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
2405# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
2406vpor  %ymm10,%ymm15,%ymm10
2407
2408# qhasm: x5 = v01 | v11
2409# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
2410# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
2411vpor  %ymm7,%ymm11,%ymm7
2412
2413# qhasm: v00 = x2 & mask0
2414# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
2415# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
2416vpand %ymm8,%ymm0,%ymm11
2417
2418# qhasm: 4x v10 = x6 << 32
2419# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
2420# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
2421vpsllq $32,%ymm12,%ymm15
2422
2423# qhasm: 4x v01 = x2 unsigned>> 32
2424# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
2425# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
2426vpsrlq $32,%ymm8,%ymm8
2427
2428# qhasm: v11 = x6 & mask1
2429# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
2430# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
2431vpand %ymm12,%ymm1,%ymm12
2432
2433# qhasm: x2 = v00 | v10
2434# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
2435# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
2436vpor  %ymm11,%ymm15,%ymm11
2437
2438# qhasm: x6 = v01 | v11
2439# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
2440# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
2441vpor  %ymm8,%ymm12,%ymm8
2442
2443# qhasm: v00 = x3 & mask0
2444# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
2445# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
2446vpand %ymm9,%ymm0,%ymm12
2447
2448# qhasm: 4x v10 = x7 << 32
2449# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
2450# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
2451vpsllq $32,%ymm13,%ymm15
2452
2453# qhasm: 4x v01 = x3 unsigned>> 32
2454# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
2455# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
2456vpsrlq $32,%ymm9,%ymm9
2457
2458# qhasm: v11 = x7 & mask1
2459# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
2460# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
2461vpand %ymm13,%ymm1,%ymm13
2462
2463# qhasm: x3 = v00 | v10
2464# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
2465# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
2466vpor  %ymm12,%ymm15,%ymm12
2467
2468# qhasm: x7 = v01 | v11
2469# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
2470# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
2471vpor  %ymm9,%ymm13,%ymm9
2472
2473# qhasm: v00 = x0 & mask2
2474# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
2475# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
2476vpand %ymm14,%ymm2,%ymm13
2477
2478# qhasm: 8x v10 = x2 << 16
2479# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
2480# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
2481vpslld $16,%ymm11,%ymm15
2482
2483# qhasm: 8x v01 = x0 unsigned>> 16
2484# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
2485# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
2486vpsrld $16,%ymm14,%ymm14
2487
2488# qhasm: v11 = x2 & mask3
2489# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
2490# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
2491vpand %ymm11,%ymm3,%ymm11
2492
2493# qhasm: x0 = v00 | v10
2494# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
2495# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
2496vpor  %ymm13,%ymm15,%ymm13
2497
2498# qhasm: x2 = v01 | v11
2499# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
2500# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
2501vpor  %ymm14,%ymm11,%ymm11
2502
2503# qhasm: v00 = x1 & mask2
2504# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
2505# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
2506vpand %ymm10,%ymm2,%ymm14
2507
2508# qhasm: 8x v10 = x3 << 16
2509# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
2510# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
2511vpslld $16,%ymm12,%ymm15
2512
2513# qhasm: 8x v01 = x1 unsigned>> 16
2514# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
2515# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
2516vpsrld $16,%ymm10,%ymm10
2517
2518# qhasm: v11 = x3 & mask3
2519# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
2520# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
2521vpand %ymm12,%ymm3,%ymm12
2522
2523# qhasm: x1 = v00 | v10
2524# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
2525# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
2526vpor  %ymm14,%ymm15,%ymm14
2527
2528# qhasm: x3 = v01 | v11
2529# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
2530# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
2531vpor  %ymm10,%ymm12,%ymm10
2532
2533# qhasm: v00 = x4 & mask2
2534# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
2535# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
2536vpand %ymm6,%ymm2,%ymm12
2537
2538# qhasm: 8x v10 = x6 << 16
2539# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
2540# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
2541vpslld $16,%ymm8,%ymm15
2542
2543# qhasm: 8x v01 = x4 unsigned>> 16
2544# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
2545# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
2546vpsrld $16,%ymm6,%ymm6
2547
2548# qhasm: v11 = x6 & mask3
2549# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
2550# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
2551vpand %ymm8,%ymm3,%ymm8
2552
2553# qhasm: x4 = v00 | v10
2554# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
2555# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
2556vpor  %ymm12,%ymm15,%ymm12
2557
2558# qhasm: x6 = v01 | v11
2559# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
2560# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
2561vpor  %ymm6,%ymm8,%ymm6
2562
2563# qhasm: v00 = x5 & mask2
2564# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
2565# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
2566vpand %ymm7,%ymm2,%ymm8
2567
2568# qhasm: 8x v10 = x7 << 16
2569# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
2570# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
2571vpslld $16,%ymm9,%ymm15
2572
2573# qhasm: 8x v01 = x5 unsigned>> 16
2574# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
2575# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
2576vpsrld $16,%ymm7,%ymm7
2577
2578# qhasm: v11 = x7 & mask3
2579# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
2580# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
2581vpand %ymm9,%ymm3,%ymm9
2582
2583# qhasm: x5 = v00 | v10
2584# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
2585# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
2586vpor  %ymm8,%ymm15,%ymm8
2587
2588# qhasm: x7 = v01 | v11
2589# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
2590# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
2591vpor  %ymm7,%ymm9,%ymm7
2592
2593# qhasm: v00 = x0 & mask4
2594# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
2595# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
2596vpand %ymm13,%ymm4,%ymm9
2597
2598# qhasm: 16x v10 = x1 << 8
2599# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
2600# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
2601vpsllw $8,%ymm14,%ymm15
2602
2603# qhasm: 16x v01 = x0 unsigned>> 8
2604# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
2605# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
2606vpsrlw $8,%ymm13,%ymm13
2607
2608# qhasm: v11 = x1 & mask5
2609# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
2610# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
2611vpand %ymm14,%ymm5,%ymm14
2612
2613# qhasm: x0 = v00 | v10
2614# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
2615# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
2616vpor  %ymm9,%ymm15,%ymm9
2617
2618# qhasm: x1 = v01 | v11
2619# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
2620# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
2621vpor  %ymm13,%ymm14,%ymm13
2622
2623# qhasm: v00 = x2 & mask4
2624# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
2625# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
2626vpand %ymm11,%ymm4,%ymm14
2627
2628# qhasm: 16x v10 = x3 << 8
2629# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
2630# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
2631vpsllw $8,%ymm10,%ymm15
2632
2633# qhasm: 16x v01 = x2 unsigned>> 8
2634# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
2635# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
2636vpsrlw $8,%ymm11,%ymm11
2637
2638# qhasm: v11 = x3 & mask5
2639# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
2640# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
2641vpand %ymm10,%ymm5,%ymm10
2642
2643# qhasm: x2 = v00 | v10
2644# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
2645# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
2646vpor  %ymm14,%ymm15,%ymm14
2647
2648# qhasm: x3 = v01 | v11
2649# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
2650# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
2651vpor  %ymm11,%ymm10,%ymm10
2652
2653# qhasm: v00 = x4 & mask4
2654# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
2655# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
2656vpand %ymm12,%ymm4,%ymm11
2657
2658# qhasm: 16x v10 = x5 << 8
2659# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
2660# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
2661vpsllw $8,%ymm8,%ymm15
2662
2663# qhasm: 16x v01 = x4 unsigned>> 8
2664# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
2665# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
2666vpsrlw $8,%ymm12,%ymm12
2667
2668# qhasm: v11 = x5 & mask5
2669# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
2670# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
2671vpand %ymm8,%ymm5,%ymm8
2672
2673# qhasm: x4 = v00 | v10
2674# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
2675# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
2676vpor  %ymm11,%ymm15,%ymm11
2677
2678# qhasm: x5 = v01 | v11
2679# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
2680# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
2681vpor  %ymm12,%ymm8,%ymm8
2682
2683# qhasm: v00 = x6 & mask4
2684# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
2685# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
2686vpand %ymm6,%ymm4,%ymm12
2687
2688# qhasm: 16x v10 = x7 << 8
2689# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
2690# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
2691vpsllw $8,%ymm7,%ymm15
2692
2693# qhasm: 16x v01 = x6 unsigned>> 8
2694# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
2695# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
2696vpsrlw $8,%ymm6,%ymm6
2697
2698# qhasm: v11 = x7 & mask5
2699# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
2700# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
2701vpand %ymm7,%ymm5,%ymm7
2702
2703# qhasm: x6 = v00 | v10
2704# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
2705# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
2706vpor  %ymm12,%ymm15,%ymm12
2707
2708# qhasm: x7 = v01 | v11
2709# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
2710# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
2711vpor  %ymm6,%ymm7,%ymm6
2712
2713# qhasm: mem256[ input_0 + 160 ] = x0
2714# asm 1: vmovupd   <x0=reg256#10,160(<input_0=int64#1)
2715# asm 2: vmovupd   <x0=%ymm9,160(<input_0=%rdi)
2716vmovupd   %ymm9,160(%rdi)
2717
2718# qhasm: mem256[ input_0 + 416 ] = x1
2719# asm 1: vmovupd   <x1=reg256#14,416(<input_0=int64#1)
2720# asm 2: vmovupd   <x1=%ymm13,416(<input_0=%rdi)
2721vmovupd   %ymm13,416(%rdi)
2722
2723# qhasm: mem256[ input_0 + 672 ] = x2
2724# asm 1: vmovupd   <x2=reg256#15,672(<input_0=int64#1)
2725# asm 2: vmovupd   <x2=%ymm14,672(<input_0=%rdi)
2726vmovupd   %ymm14,672(%rdi)
2727
2728# qhasm: mem256[ input_0 + 928 ] = x3
2729# asm 1: vmovupd   <x3=reg256#11,928(<input_0=int64#1)
2730# asm 2: vmovupd   <x3=%ymm10,928(<input_0=%rdi)
2731vmovupd   %ymm10,928(%rdi)
2732
2733# qhasm: mem256[ input_0 + 1184 ] = x4
2734# asm 1: vmovupd   <x4=reg256#12,1184(<input_0=int64#1)
2735# asm 2: vmovupd   <x4=%ymm11,1184(<input_0=%rdi)
2736vmovupd   %ymm11,1184(%rdi)
2737
2738# qhasm: mem256[ input_0 + 1440 ] = x5
2739# asm 1: vmovupd   <x5=reg256#9,1440(<input_0=int64#1)
2740# asm 2: vmovupd   <x5=%ymm8,1440(<input_0=%rdi)
2741vmovupd   %ymm8,1440(%rdi)
2742
2743# qhasm: mem256[ input_0 + 1696 ] = x6
2744# asm 1: vmovupd   <x6=reg256#13,1696(<input_0=int64#1)
2745# asm 2: vmovupd   <x6=%ymm12,1696(<input_0=%rdi)
2746vmovupd   %ymm12,1696(%rdi)
2747
2748# qhasm: mem256[ input_0 + 1952 ] = x7
2749# asm 1: vmovupd   <x7=reg256#7,1952(<input_0=int64#1)
2750# asm 2: vmovupd   <x7=%ymm6,1952(<input_0=%rdi)
2751vmovupd   %ymm6,1952(%rdi)
2752
2753# qhasm: x0 = mem256[ input_0 + 192 ]
2754# asm 1: vmovupd   192(<input_0=int64#1),>x0=reg256#7
2755# asm 2: vmovupd   192(<input_0=%rdi),>x0=%ymm6
2756vmovupd   192(%rdi),%ymm6
2757
2758# qhasm: x1 = mem256[ input_0 + 448 ]
2759# asm 1: vmovupd   448(<input_0=int64#1),>x1=reg256#8
2760# asm 2: vmovupd   448(<input_0=%rdi),>x1=%ymm7
2761vmovupd   448(%rdi),%ymm7
2762
2763# qhasm: x2 = mem256[ input_0 + 704 ]
2764# asm 1: vmovupd   704(<input_0=int64#1),>x2=reg256#9
2765# asm 2: vmovupd   704(<input_0=%rdi),>x2=%ymm8
2766vmovupd   704(%rdi),%ymm8
2767
2768# qhasm: x3 = mem256[ input_0 + 960 ]
2769# asm 1: vmovupd   960(<input_0=int64#1),>x3=reg256#10
2770# asm 2: vmovupd   960(<input_0=%rdi),>x3=%ymm9
2771vmovupd   960(%rdi),%ymm9
2772
2773# qhasm: x4 = mem256[ input_0 + 1216 ]
2774# asm 1: vmovupd   1216(<input_0=int64#1),>x4=reg256#11
2775# asm 2: vmovupd   1216(<input_0=%rdi),>x4=%ymm10
2776vmovupd   1216(%rdi),%ymm10
2777
2778# qhasm: x5 = mem256[ input_0 + 1472 ]
2779# asm 1: vmovupd   1472(<input_0=int64#1),>x5=reg256#12
2780# asm 2: vmovupd   1472(<input_0=%rdi),>x5=%ymm11
2781vmovupd   1472(%rdi),%ymm11
2782
2783# qhasm: x6 = mem256[ input_0 + 1728 ]
2784# asm 1: vmovupd   1728(<input_0=int64#1),>x6=reg256#13
2785# asm 2: vmovupd   1728(<input_0=%rdi),>x6=%ymm12
2786vmovupd   1728(%rdi),%ymm12
2787
2788# qhasm: x7 = mem256[ input_0 + 1984 ]
2789# asm 1: vmovupd   1984(<input_0=int64#1),>x7=reg256#14
2790# asm 2: vmovupd   1984(<input_0=%rdi),>x7=%ymm13
2791vmovupd   1984(%rdi),%ymm13
2792
2793# qhasm: v00 = x0 & mask0
2794# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
2795# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
2796vpand %ymm6,%ymm0,%ymm14
2797
2798# qhasm: 4x v10 = x4 << 32
2799# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
2800# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
2801vpsllq $32,%ymm10,%ymm15
2802
2803# qhasm: 4x v01 = x0 unsigned>> 32
2804# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
2805# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
2806vpsrlq $32,%ymm6,%ymm6
2807
2808# qhasm: v11 = x4 & mask1
2809# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
2810# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
2811vpand %ymm10,%ymm1,%ymm10
2812
2813# qhasm: x0 = v00 | v10
2814# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
2815# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
2816vpor  %ymm14,%ymm15,%ymm14
2817
2818# qhasm: x4 = v01 | v11
2819# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
2820# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
2821vpor  %ymm6,%ymm10,%ymm6
2822
2823# qhasm: v00 = x1 & mask0
2824# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
2825# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
2826vpand %ymm7,%ymm0,%ymm10
2827
2828# qhasm: 4x v10 = x5 << 32
2829# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
2830# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
2831vpsllq $32,%ymm11,%ymm15
2832
2833# qhasm: 4x v01 = x1 unsigned>> 32
2834# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
2835# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
2836vpsrlq $32,%ymm7,%ymm7
2837
2838# qhasm: v11 = x5 & mask1
2839# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
2840# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
2841vpand %ymm11,%ymm1,%ymm11
2842
2843# qhasm: x1 = v00 | v10
2844# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
2845# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
2846vpor  %ymm10,%ymm15,%ymm10
2847
2848# qhasm: x5 = v01 | v11
2849# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
2850# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
2851vpor  %ymm7,%ymm11,%ymm7
2852
2853# qhasm: v00 = x2 & mask0
2854# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
2855# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
2856vpand %ymm8,%ymm0,%ymm11
2857
2858# qhasm: 4x v10 = x6 << 32
2859# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
2860# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
2861vpsllq $32,%ymm12,%ymm15
2862
2863# qhasm: 4x v01 = x2 unsigned>> 32
2864# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
2865# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
2866vpsrlq $32,%ymm8,%ymm8
2867
2868# qhasm: v11 = x6 & mask1
2869# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
2870# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
2871vpand %ymm12,%ymm1,%ymm12
2872
2873# qhasm: x2 = v00 | v10
2874# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
2875# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
2876vpor  %ymm11,%ymm15,%ymm11
2877
2878# qhasm: x6 = v01 | v11
2879# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
2880# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
2881vpor  %ymm8,%ymm12,%ymm8
2882
2883# qhasm: v00 = x3 & mask0
2884# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
2885# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
2886vpand %ymm9,%ymm0,%ymm12
2887
2888# qhasm: 4x v10 = x7 << 32
2889# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#16
2890# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm15
2891vpsllq $32,%ymm13,%ymm15
2892
2893# qhasm: 4x v01 = x3 unsigned>> 32
2894# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
2895# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
2896vpsrlq $32,%ymm9,%ymm9
2897
2898# qhasm: v11 = x7 & mask1
2899# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
2900# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
2901vpand %ymm13,%ymm1,%ymm13
2902
2903# qhasm: x3 = v00 | v10
2904# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
2905# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
2906vpor  %ymm12,%ymm15,%ymm12
2907
2908# qhasm: x7 = v01 | v11
2909# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
2910# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
2911vpor  %ymm9,%ymm13,%ymm9
2912
2913# qhasm: v00 = x0 & mask2
2914# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
2915# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
2916vpand %ymm14,%ymm2,%ymm13
2917
2918# qhasm: 8x v10 = x2 << 16
2919# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#16
2920# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm15
2921vpslld $16,%ymm11,%ymm15
2922
2923# qhasm: 8x v01 = x0 unsigned>> 16
2924# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#15
2925# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm14
2926vpsrld $16,%ymm14,%ymm14
2927
2928# qhasm: v11 = x2 & mask3
2929# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
2930# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
2931vpand %ymm11,%ymm3,%ymm11
2932
2933# qhasm: x0 = v00 | v10
2934# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
2935# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
2936vpor  %ymm13,%ymm15,%ymm13
2937
2938# qhasm: x2 = v01 | v11
2939# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
2940# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
2941vpor  %ymm14,%ymm11,%ymm11
2942
2943# qhasm: v00 = x1 & mask2
2944# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
2945# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
2946vpand %ymm10,%ymm2,%ymm14
2947
2948# qhasm: 8x v10 = x3 << 16
2949# asm 1: vpslld $16,<x3=reg256#13,>v10=reg256#16
2950# asm 2: vpslld $16,<x3=%ymm12,>v10=%ymm15
2951vpslld $16,%ymm12,%ymm15
2952
2953# qhasm: 8x v01 = x1 unsigned>> 16
2954# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
2955# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
2956vpsrld $16,%ymm10,%ymm10
2957
2958# qhasm: v11 = x3 & mask3
2959# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
2960# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
2961vpand %ymm12,%ymm3,%ymm12
2962
2963# qhasm: x1 = v00 | v10
2964# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
2965# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
2966vpor  %ymm14,%ymm15,%ymm14
2967
2968# qhasm: x3 = v01 | v11
2969# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
2970# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
2971vpor  %ymm10,%ymm12,%ymm10
2972
2973# qhasm: v00 = x4 & mask2
2974# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
2975# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
2976vpand %ymm6,%ymm2,%ymm12
2977
2978# qhasm: 8x v10 = x6 << 16
2979# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#16
2980# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm15
2981vpslld $16,%ymm8,%ymm15
2982
2983# qhasm: 8x v01 = x4 unsigned>> 16
2984# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
2985# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
2986vpsrld $16,%ymm6,%ymm6
2987
2988# qhasm: v11 = x6 & mask3
2989# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
2990# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
2991vpand %ymm8,%ymm3,%ymm8
2992
2993# qhasm: x4 = v00 | v10
2994# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
2995# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
2996vpor  %ymm12,%ymm15,%ymm12
2997
2998# qhasm: x6 = v01 | v11
2999# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
3000# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
3001vpor  %ymm6,%ymm8,%ymm6
3002
3003# qhasm: v00 = x5 & mask2
3004# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
3005# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
3006vpand %ymm7,%ymm2,%ymm8
3007
3008# qhasm: 8x v10 = x7 << 16
3009# asm 1: vpslld $16,<x7=reg256#10,>v10=reg256#16
3010# asm 2: vpslld $16,<x7=%ymm9,>v10=%ymm15
3011vpslld $16,%ymm9,%ymm15
3012
3013# qhasm: 8x v01 = x5 unsigned>> 16
3014# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
3015# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
3016vpsrld $16,%ymm7,%ymm7
3017
3018# qhasm: v11 = x7 & mask3
3019# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
3020# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
3021vpand %ymm9,%ymm3,%ymm9
3022
3023# qhasm: x5 = v00 | v10
3024# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
3025# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
3026vpor  %ymm8,%ymm15,%ymm8
3027
3028# qhasm: x7 = v01 | v11
3029# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
3030# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
3031vpor  %ymm7,%ymm9,%ymm7
3032
3033# qhasm: v00 = x0 & mask4
3034# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
3035# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
3036vpand %ymm13,%ymm4,%ymm9
3037
3038# qhasm: 16x v10 = x1 << 8
3039# asm 1: vpsllw $8,<x1=reg256#15,>v10=reg256#16
3040# asm 2: vpsllw $8,<x1=%ymm14,>v10=%ymm15
3041vpsllw $8,%ymm14,%ymm15
3042
3043# qhasm: 16x v01 = x0 unsigned>> 8
3044# asm 1: vpsrlw $8,<x0=reg256#14,>v01=reg256#14
3045# asm 2: vpsrlw $8,<x0=%ymm13,>v01=%ymm13
3046vpsrlw $8,%ymm13,%ymm13
3047
3048# qhasm: v11 = x1 & mask5
3049# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
3050# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
3051vpand %ymm14,%ymm5,%ymm14
3052
3053# qhasm: x0 = v00 | v10
3054# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
3055# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
3056vpor  %ymm9,%ymm15,%ymm9
3057
3058# qhasm: x1 = v01 | v11
3059# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
3060# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
3061vpor  %ymm13,%ymm14,%ymm13
3062
3063# qhasm: v00 = x2 & mask4
3064# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
3065# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
3066vpand %ymm11,%ymm4,%ymm14
3067
3068# qhasm: 16x v10 = x3 << 8
3069# asm 1: vpsllw $8,<x3=reg256#11,>v10=reg256#16
3070# asm 2: vpsllw $8,<x3=%ymm10,>v10=%ymm15
3071vpsllw $8,%ymm10,%ymm15
3072
3073# qhasm: 16x v01 = x2 unsigned>> 8
3074# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
3075# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
3076vpsrlw $8,%ymm11,%ymm11
3077
3078# qhasm: v11 = x3 & mask5
3079# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
3080# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
3081vpand %ymm10,%ymm5,%ymm10
3082
3083# qhasm: x2 = v00 | v10
3084# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
3085# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
3086vpor  %ymm14,%ymm15,%ymm14
3087
3088# qhasm: x3 = v01 | v11
3089# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
3090# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
3091vpor  %ymm11,%ymm10,%ymm10
3092
3093# qhasm: v00 = x4 & mask4
3094# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
3095# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
3096vpand %ymm12,%ymm4,%ymm11
3097
3098# qhasm: 16x v10 = x5 << 8
3099# asm 1: vpsllw $8,<x5=reg256#9,>v10=reg256#16
3100# asm 2: vpsllw $8,<x5=%ymm8,>v10=%ymm15
3101vpsllw $8,%ymm8,%ymm15
3102
3103# qhasm: 16x v01 = x4 unsigned>> 8
3104# asm 1: vpsrlw $8,<x4=reg256#13,>v01=reg256#13
3105# asm 2: vpsrlw $8,<x4=%ymm12,>v01=%ymm12
3106vpsrlw $8,%ymm12,%ymm12
3107
3108# qhasm: v11 = x5 & mask5
3109# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
3110# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
3111vpand %ymm8,%ymm5,%ymm8
3112
3113# qhasm: x4 = v00 | v10
3114# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
3115# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
3116vpor  %ymm11,%ymm15,%ymm11
3117
3118# qhasm: x5 = v01 | v11
3119# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
3120# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
3121vpor  %ymm12,%ymm8,%ymm8
3122
3123# qhasm: v00 = x6 & mask4
3124# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
3125# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
3126vpand %ymm6,%ymm4,%ymm12
3127
3128# qhasm: 16x v10 = x7 << 8
3129# asm 1: vpsllw $8,<x7=reg256#8,>v10=reg256#16
3130# asm 2: vpsllw $8,<x7=%ymm7,>v10=%ymm15
3131vpsllw $8,%ymm7,%ymm15
3132
3133# qhasm: 16x v01 = x6 unsigned>> 8
3134# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
3135# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
3136vpsrlw $8,%ymm6,%ymm6
3137
3138# qhasm: v11 = x7 & mask5
3139# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
3140# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
3141vpand %ymm7,%ymm5,%ymm7
3142
3143# qhasm: x6 = v00 | v10
3144# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
3145# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
3146vpor  %ymm12,%ymm15,%ymm12
3147
3148# qhasm: x7 = v01 | v11
3149# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
3150# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
3151vpor  %ymm6,%ymm7,%ymm6
3152
3153# qhasm: mem256[ input_0 + 192 ] = x0
3154# asm 1: vmovupd   <x0=reg256#10,192(<input_0=int64#1)
3155# asm 2: vmovupd   <x0=%ymm9,192(<input_0=%rdi)
3156vmovupd   %ymm9,192(%rdi)
3157
3158# qhasm: mem256[ input_0 + 448 ] = x1
3159# asm 1: vmovupd   <x1=reg256#14,448(<input_0=int64#1)
3160# asm 2: vmovupd   <x1=%ymm13,448(<input_0=%rdi)
3161vmovupd   %ymm13,448(%rdi)
3162
3163# qhasm: mem256[ input_0 + 704 ] = x2
3164# asm 1: vmovupd   <x2=reg256#15,704(<input_0=int64#1)
3165# asm 2: vmovupd   <x2=%ymm14,704(<input_0=%rdi)
3166vmovupd   %ymm14,704(%rdi)
3167
3168# qhasm: mem256[ input_0 + 960 ] = x3
3169# asm 1: vmovupd   <x3=reg256#11,960(<input_0=int64#1)
3170# asm 2: vmovupd   <x3=%ymm10,960(<input_0=%rdi)
3171vmovupd   %ymm10,960(%rdi)
3172
3173# qhasm: mem256[ input_0 + 1216 ] = x4
3174# asm 1: vmovupd   <x4=reg256#12,1216(<input_0=int64#1)
3175# asm 2: vmovupd   <x4=%ymm11,1216(<input_0=%rdi)
3176vmovupd   %ymm11,1216(%rdi)
3177
3178# qhasm: mem256[ input_0 + 1472 ] = x5
3179# asm 1: vmovupd   <x5=reg256#9,1472(<input_0=int64#1)
3180# asm 2: vmovupd   <x5=%ymm8,1472(<input_0=%rdi)
3181vmovupd   %ymm8,1472(%rdi)
3182
3183# qhasm: mem256[ input_0 + 1728 ] = x6
3184# asm 1: vmovupd   <x6=reg256#13,1728(<input_0=int64#1)
3185# asm 2: vmovupd   <x6=%ymm12,1728(<input_0=%rdi)
3186vmovupd   %ymm12,1728(%rdi)
3187
3188# qhasm: mem256[ input_0 + 1984 ] = x7
3189# asm 1: vmovupd   <x7=reg256#7,1984(<input_0=int64#1)
3190# asm 2: vmovupd   <x7=%ymm6,1984(<input_0=%rdi)
3191vmovupd   %ymm6,1984(%rdi)
3192
3193# qhasm: x0 = mem256[ input_0 + 224 ]
3194# asm 1: vmovupd   224(<input_0=int64#1),>x0=reg256#7
3195# asm 2: vmovupd   224(<input_0=%rdi),>x0=%ymm6
3196vmovupd   224(%rdi),%ymm6
3197
3198# qhasm: x1 = mem256[ input_0 + 480 ]
3199# asm 1: vmovupd   480(<input_0=int64#1),>x1=reg256#8
3200# asm 2: vmovupd   480(<input_0=%rdi),>x1=%ymm7
3201vmovupd   480(%rdi),%ymm7
3202
3203# qhasm: x2 = mem256[ input_0 + 736 ]
3204# asm 1: vmovupd   736(<input_0=int64#1),>x2=reg256#9
3205# asm 2: vmovupd   736(<input_0=%rdi),>x2=%ymm8
3206vmovupd   736(%rdi),%ymm8
3207
3208# qhasm: x3 = mem256[ input_0 + 992 ]
3209# asm 1: vmovupd   992(<input_0=int64#1),>x3=reg256#10
3210# asm 2: vmovupd   992(<input_0=%rdi),>x3=%ymm9
3211vmovupd   992(%rdi),%ymm9
3212
3213# qhasm: x4 = mem256[ input_0 + 1248 ]
3214# asm 1: vmovupd   1248(<input_0=int64#1),>x4=reg256#11
3215# asm 2: vmovupd   1248(<input_0=%rdi),>x4=%ymm10
3216vmovupd   1248(%rdi),%ymm10
3217
3218# qhasm: x5 = mem256[ input_0 + 1504 ]
3219# asm 1: vmovupd   1504(<input_0=int64#1),>x5=reg256#12
3220# asm 2: vmovupd   1504(<input_0=%rdi),>x5=%ymm11
3221vmovupd   1504(%rdi),%ymm11
3222
3223# qhasm: x6 = mem256[ input_0 + 1760 ]
3224# asm 1: vmovupd   1760(<input_0=int64#1),>x6=reg256#13
3225# asm 2: vmovupd   1760(<input_0=%rdi),>x6=%ymm12
3226vmovupd   1760(%rdi),%ymm12
3227
3228# qhasm: x7 = mem256[ input_0 + 2016 ]
3229# asm 1: vmovupd   2016(<input_0=int64#1),>x7=reg256#14
3230# asm 2: vmovupd   2016(<input_0=%rdi),>x7=%ymm13
3231vmovupd   2016(%rdi),%ymm13
3232
3233# qhasm: v00 = x0 & mask0
3234# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
3235# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
3236vpand %ymm6,%ymm0,%ymm14
3237
3238# qhasm: 4x v10 = x4 << 32
3239# asm 1: vpsllq $32,<x4=reg256#11,>v10=reg256#16
3240# asm 2: vpsllq $32,<x4=%ymm10,>v10=%ymm15
3241vpsllq $32,%ymm10,%ymm15
3242
3243# qhasm: 4x v01 = x0 unsigned>> 32
3244# asm 1: vpsrlq $32,<x0=reg256#7,>v01=reg256#7
3245# asm 2: vpsrlq $32,<x0=%ymm6,>v01=%ymm6
3246vpsrlq $32,%ymm6,%ymm6
3247
3248# qhasm: v11 = x4 & mask1
3249# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
3250# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
3251vpand %ymm10,%ymm1,%ymm10
3252
3253# qhasm: x0 = v00 | v10
3254# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
3255# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
3256vpor  %ymm14,%ymm15,%ymm14
3257
3258# qhasm: x4 = v01 | v11
3259# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
3260# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
3261vpor  %ymm6,%ymm10,%ymm6
3262
3263# qhasm: v00 = x1 & mask0
3264# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
3265# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
3266vpand %ymm7,%ymm0,%ymm10
3267
3268# qhasm: 4x v10 = x5 << 32
3269# asm 1: vpsllq $32,<x5=reg256#12,>v10=reg256#16
3270# asm 2: vpsllq $32,<x5=%ymm11,>v10=%ymm15
3271vpsllq $32,%ymm11,%ymm15
3272
3273# qhasm: 4x v01 = x1 unsigned>> 32
3274# asm 1: vpsrlq $32,<x1=reg256#8,>v01=reg256#8
3275# asm 2: vpsrlq $32,<x1=%ymm7,>v01=%ymm7
3276vpsrlq $32,%ymm7,%ymm7
3277
3278# qhasm: v11 = x5 & mask1
3279# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
3280# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
3281vpand %ymm11,%ymm1,%ymm11
3282
3283# qhasm: x1 = v00 | v10
3284# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
3285# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
3286vpor  %ymm10,%ymm15,%ymm10
3287
3288# qhasm: x5 = v01 | v11
3289# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
3290# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
3291vpor  %ymm7,%ymm11,%ymm7
3292
3293# qhasm: v00 = x2 & mask0
3294# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
3295# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
3296vpand %ymm8,%ymm0,%ymm11
3297
3298# qhasm: 4x v10 = x6 << 32
3299# asm 1: vpsllq $32,<x6=reg256#13,>v10=reg256#16
3300# asm 2: vpsllq $32,<x6=%ymm12,>v10=%ymm15
3301vpsllq $32,%ymm12,%ymm15
3302
3303# qhasm: 4x v01 = x2 unsigned>> 32
3304# asm 1: vpsrlq $32,<x2=reg256#9,>v01=reg256#9
3305# asm 2: vpsrlq $32,<x2=%ymm8,>v01=%ymm8
3306vpsrlq $32,%ymm8,%ymm8
3307
3308# qhasm: v11 = x6 & mask1
3309# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
3310# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
3311vpand %ymm12,%ymm1,%ymm12
3312
3313# qhasm: x2 = v00 | v10
3314# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
3315# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
3316vpor  %ymm11,%ymm15,%ymm11
3317
3318# qhasm: x6 = v01 | v11
3319# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
3320# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
3321vpor  %ymm8,%ymm12,%ymm8
3322
3323# qhasm: v00 = x3 & mask0
3324# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#1
3325# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm0
3326vpand %ymm9,%ymm0,%ymm0
3327
3328# qhasm: 4x v10 = x7 << 32
3329# asm 1: vpsllq $32,<x7=reg256#14,>v10=reg256#13
3330# asm 2: vpsllq $32,<x7=%ymm13,>v10=%ymm12
3331vpsllq $32,%ymm13,%ymm12
3332
3333# qhasm: 4x v01 = x3 unsigned>> 32
3334# asm 1: vpsrlq $32,<x3=reg256#10,>v01=reg256#10
3335# asm 2: vpsrlq $32,<x3=%ymm9,>v01=%ymm9
3336vpsrlq $32,%ymm9,%ymm9
3337
3338# qhasm: v11 = x7 & mask1
3339# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2
3340# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1
3341vpand %ymm13,%ymm1,%ymm1
3342
3343# qhasm: x3 = v00 | v10
3344# asm 1: vpor  <v00=reg256#1,<v10=reg256#13,>x3=reg256#1
3345# asm 2: vpor  <v00=%ymm0,<v10=%ymm12,>x3=%ymm0
3346vpor  %ymm0,%ymm12,%ymm0
3347
3348# qhasm: x7 = v01 | v11
3349# asm 1: vpor  <v01=reg256#10,<v11=reg256#2,>x7=reg256#2
3350# asm 2: vpor  <v01=%ymm9,<v11=%ymm1,>x7=%ymm1
3351vpor  %ymm9,%ymm1,%ymm1
3352
3353# qhasm: v00 = x0 & mask2
3354# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10
3355# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9
3356vpand %ymm14,%ymm2,%ymm9
3357
3358# qhasm: 8x v10 = x2 << 16
3359# asm 1: vpslld $16,<x2=reg256#12,>v10=reg256#13
3360# asm 2: vpslld $16,<x2=%ymm11,>v10=%ymm12
3361vpslld $16,%ymm11,%ymm12
3362
3363# qhasm: 8x v01 = x0 unsigned>> 16
3364# asm 1: vpsrld $16,<x0=reg256#15,>v01=reg256#14
3365# asm 2: vpsrld $16,<x0=%ymm14,>v01=%ymm13
3366vpsrld $16,%ymm14,%ymm13
3367
3368# qhasm: v11 = x2 & mask3
3369# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
3370# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
3371vpand %ymm11,%ymm3,%ymm11
3372
3373# qhasm: x0 = v00 | v10
3374# asm 1: vpor  <v00=reg256#10,<v10=reg256#13,>x0=reg256#10
3375# asm 2: vpor  <v00=%ymm9,<v10=%ymm12,>x0=%ymm9
3376vpor  %ymm9,%ymm12,%ymm9
3377
3378# qhasm: x2 = v01 | v11
3379# asm 1: vpor  <v01=reg256#14,<v11=reg256#12,>x2=reg256#12
3380# asm 2: vpor  <v01=%ymm13,<v11=%ymm11,>x2=%ymm11
3381vpor  %ymm13,%ymm11,%ymm11
3382
3383# qhasm: v00 = x1 & mask2
3384# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13
3385# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12
3386vpand %ymm10,%ymm2,%ymm12
3387
3388# qhasm: 8x v10 = x3 << 16
3389# asm 1: vpslld $16,<x3=reg256#1,>v10=reg256#14
3390# asm 2: vpslld $16,<x3=%ymm0,>v10=%ymm13
3391vpslld $16,%ymm0,%ymm13
3392
3393# qhasm: 8x v01 = x1 unsigned>> 16
3394# asm 1: vpsrld $16,<x1=reg256#11,>v01=reg256#11
3395# asm 2: vpsrld $16,<x1=%ymm10,>v01=%ymm10
3396vpsrld $16,%ymm10,%ymm10
3397
3398# qhasm: v11 = x3 & mask3
3399# asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1
3400# asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0
3401vpand %ymm0,%ymm3,%ymm0
3402
3403# qhasm: x1 = v00 | v10
3404# asm 1: vpor  <v00=reg256#13,<v10=reg256#14,>x1=reg256#13
3405# asm 2: vpor  <v00=%ymm12,<v10=%ymm13,>x1=%ymm12
3406vpor  %ymm12,%ymm13,%ymm12
3407
3408# qhasm: x3 = v01 | v11
3409# asm 1: vpor  <v01=reg256#11,<v11=reg256#1,>x3=reg256#1
3410# asm 2: vpor  <v01=%ymm10,<v11=%ymm0,>x3=%ymm0
3411vpor  %ymm10,%ymm0,%ymm0
3412
3413# qhasm: v00 = x4 & mask2
3414# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11
3415# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10
3416vpand %ymm6,%ymm2,%ymm10
3417
3418# qhasm: 8x v10 = x6 << 16
3419# asm 1: vpslld $16,<x6=reg256#9,>v10=reg256#14
3420# asm 2: vpslld $16,<x6=%ymm8,>v10=%ymm13
3421vpslld $16,%ymm8,%ymm13
3422
3423# qhasm: 8x v01 = x4 unsigned>> 16
3424# asm 1: vpsrld $16,<x4=reg256#7,>v01=reg256#7
3425# asm 2: vpsrld $16,<x4=%ymm6,>v01=%ymm6
3426vpsrld $16,%ymm6,%ymm6
3427
3428# qhasm: v11 = x6 & mask3
3429# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
3430# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
3431vpand %ymm8,%ymm3,%ymm8
3432
3433# qhasm: x4 = v00 | v10
3434# asm 1: vpor  <v00=reg256#11,<v10=reg256#14,>x4=reg256#11
3435# asm 2: vpor  <v00=%ymm10,<v10=%ymm13,>x4=%ymm10
3436vpor  %ymm10,%ymm13,%ymm10
3437
3438# qhasm: x6 = v01 | v11
3439# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
3440# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
3441vpor  %ymm6,%ymm8,%ymm6
3442
3443# qhasm: v00 = x5 & mask2
3444# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#3
3445# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm2
3446vpand %ymm7,%ymm2,%ymm2
3447
3448# qhasm: 8x v10 = x7 << 16
3449# asm 1: vpslld $16,<x7=reg256#2,>v10=reg256#9
3450# asm 2: vpslld $16,<x7=%ymm1,>v10=%ymm8
3451vpslld $16,%ymm1,%ymm8
3452
3453# qhasm: 8x v01 = x5 unsigned>> 16
3454# asm 1: vpsrld $16,<x5=reg256#8,>v01=reg256#8
3455# asm 2: vpsrld $16,<x5=%ymm7,>v01=%ymm7
3456vpsrld $16,%ymm7,%ymm7
3457
3458# qhasm: v11 = x7 & mask3
3459# asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2
3460# asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1
3461vpand %ymm1,%ymm3,%ymm1
3462
3463# qhasm: x5 = v00 | v10
3464# asm 1: vpor  <v00=reg256#3,<v10=reg256#9,>x5=reg256#3
3465# asm 2: vpor  <v00=%ymm2,<v10=%ymm8,>x5=%ymm2
3466vpor  %ymm2,%ymm8,%ymm2
3467
3468# qhasm: x7 = v01 | v11
3469# asm 1: vpor  <v01=reg256#8,<v11=reg256#2,>x7=reg256#2
3470# asm 2: vpor  <v01=%ymm7,<v11=%ymm1,>x7=%ymm1
3471vpor  %ymm7,%ymm1,%ymm1
3472
3473# qhasm: v00 = x0 & mask4
3474# asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4
3475# asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3
3476vpand %ymm9,%ymm4,%ymm3
3477
3478# qhasm: 16x v10 = x1 << 8
3479# asm 1: vpsllw $8,<x1=reg256#13,>v10=reg256#8
3480# asm 2: vpsllw $8,<x1=%ymm12,>v10=%ymm7
3481vpsllw $8,%ymm12,%ymm7
3482
3483# qhasm: 16x v01 = x0 unsigned>> 8
3484# asm 1: vpsrlw $8,<x0=reg256#10,>v01=reg256#9
3485# asm 2: vpsrlw $8,<x0=%ymm9,>v01=%ymm8
3486vpsrlw $8,%ymm9,%ymm8
3487
3488# qhasm: v11 = x1 & mask5
3489# asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10
3490# asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9
3491vpand %ymm12,%ymm5,%ymm9
3492
3493# qhasm: x0 = v00 | v10
3494# asm 1: vpor  <v00=reg256#4,<v10=reg256#8,>x0=reg256#4
3495# asm 2: vpor  <v00=%ymm3,<v10=%ymm7,>x0=%ymm3
3496vpor  %ymm3,%ymm7,%ymm3
3497
3498# qhasm: x1 = v01 | v11
3499# asm 1: vpor  <v01=reg256#9,<v11=reg256#10,>x1=reg256#8
3500# asm 2: vpor  <v01=%ymm8,<v11=%ymm9,>x1=%ymm7
3501vpor  %ymm8,%ymm9,%ymm7
3502
3503# qhasm: v00 = x2 & mask4
3504# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9
3505# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8
3506vpand %ymm11,%ymm4,%ymm8
3507
3508# qhasm: 16x v10 = x3 << 8
3509# asm 1: vpsllw $8,<x3=reg256#1,>v10=reg256#10
3510# asm 2: vpsllw $8,<x3=%ymm0,>v10=%ymm9
3511vpsllw $8,%ymm0,%ymm9
3512
3513# qhasm: 16x v01 = x2 unsigned>> 8
3514# asm 1: vpsrlw $8,<x2=reg256#12,>v01=reg256#12
3515# asm 2: vpsrlw $8,<x2=%ymm11,>v01=%ymm11
3516vpsrlw $8,%ymm11,%ymm11
3517
3518# qhasm: v11 = x3 & mask5
3519# asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1
3520# asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0
3521vpand %ymm0,%ymm5,%ymm0
3522
3523# qhasm: x2 = v00 | v10
3524# asm 1: vpor  <v00=reg256#9,<v10=reg256#10,>x2=reg256#9
3525# asm 2: vpor  <v00=%ymm8,<v10=%ymm9,>x2=%ymm8
3526vpor  %ymm8,%ymm9,%ymm8
3527
3528# qhasm: x3 = v01 | v11
3529# asm 1: vpor  <v01=reg256#12,<v11=reg256#1,>x3=reg256#1
3530# asm 2: vpor  <v01=%ymm11,<v11=%ymm0,>x3=%ymm0
3531vpor  %ymm11,%ymm0,%ymm0
3532
3533# qhasm: v00 = x4 & mask4
3534# asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10
3535# asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9
3536vpand %ymm10,%ymm4,%ymm9
3537
3538# qhasm: 16x v10 = x5 << 8
3539# asm 1: vpsllw $8,<x5=reg256#3,>v10=reg256#12
3540# asm 2: vpsllw $8,<x5=%ymm2,>v10=%ymm11
3541vpsllw $8,%ymm2,%ymm11
3542
3543# qhasm: 16x v01 = x4 unsigned>> 8
3544# asm 1: vpsrlw $8,<x4=reg256#11,>v01=reg256#11
3545# asm 2: vpsrlw $8,<x4=%ymm10,>v01=%ymm10
3546vpsrlw $8,%ymm10,%ymm10
3547
3548# qhasm: v11 = x5 & mask5
3549# asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3
3550# asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2
3551vpand %ymm2,%ymm5,%ymm2
3552
3553# qhasm: x4 = v00 | v10
3554# asm 1: vpor  <v00=reg256#10,<v10=reg256#12,>x4=reg256#10
3555# asm 2: vpor  <v00=%ymm9,<v10=%ymm11,>x4=%ymm9
3556vpor  %ymm9,%ymm11,%ymm9
3557
3558# qhasm: x5 = v01 | v11
3559# asm 1: vpor  <v01=reg256#11,<v11=reg256#3,>x5=reg256#3
3560# asm 2: vpor  <v01=%ymm10,<v11=%ymm2,>x5=%ymm2
3561vpor  %ymm10,%ymm2,%ymm2
3562
3563# qhasm: v00 = x6 & mask4
3564# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#5
3565# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm4
3566vpand %ymm6,%ymm4,%ymm4
3567
3568# qhasm: 16x v10 = x7 << 8
3569# asm 1: vpsllw $8,<x7=reg256#2,>v10=reg256#11
3570# asm 2: vpsllw $8,<x7=%ymm1,>v10=%ymm10
3571vpsllw $8,%ymm1,%ymm10
3572
3573# qhasm: 16x v01 = x6 unsigned>> 8
3574# asm 1: vpsrlw $8,<x6=reg256#7,>v01=reg256#7
3575# asm 2: vpsrlw $8,<x6=%ymm6,>v01=%ymm6
3576vpsrlw $8,%ymm6,%ymm6
3577
3578# qhasm: v11 = x7 & mask5
3579# asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2
3580# asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1
3581vpand %ymm1,%ymm5,%ymm1
3582
3583# qhasm: x6 = v00 | v10
3584# asm 1: vpor  <v00=reg256#5,<v10=reg256#11,>x6=reg256#5
3585# asm 2: vpor  <v00=%ymm4,<v10=%ymm10,>x6=%ymm4
3586vpor  %ymm4,%ymm10,%ymm4
3587
3588# qhasm: x7 = v01 | v11
3589# asm 1: vpor  <v01=reg256#7,<v11=reg256#2,>x7=reg256#2
3590# asm 2: vpor  <v01=%ymm6,<v11=%ymm1,>x7=%ymm1
3591vpor  %ymm6,%ymm1,%ymm1
3592
3593# qhasm: mem256[ input_0 + 224 ] = x0
3594# asm 1: vmovupd   <x0=reg256#4,224(<input_0=int64#1)
3595# asm 2: vmovupd   <x0=%ymm3,224(<input_0=%rdi)
3596vmovupd   %ymm3,224(%rdi)
3597
3598# qhasm: mem256[ input_0 + 480 ] = x1
3599# asm 1: vmovupd   <x1=reg256#8,480(<input_0=int64#1)
3600# asm 2: vmovupd   <x1=%ymm7,480(<input_0=%rdi)
3601vmovupd   %ymm7,480(%rdi)
3602
3603# qhasm: mem256[ input_0 + 736 ] = x2
3604# asm 1: vmovupd   <x2=reg256#9,736(<input_0=int64#1)
3605# asm 2: vmovupd   <x2=%ymm8,736(<input_0=%rdi)
3606vmovupd   %ymm8,736(%rdi)
3607
3608# qhasm: mem256[ input_0 + 992 ] = x3
3609# asm 1: vmovupd   <x3=reg256#1,992(<input_0=int64#1)
3610# asm 2: vmovupd   <x3=%ymm0,992(<input_0=%rdi)
3611vmovupd   %ymm0,992(%rdi)
3612
3613# qhasm: mem256[ input_0 + 1248 ] = x4
3614# asm 1: vmovupd   <x4=reg256#10,1248(<input_0=int64#1)
3615# asm 2: vmovupd   <x4=%ymm9,1248(<input_0=%rdi)
3616vmovupd   %ymm9,1248(%rdi)
3617
3618# qhasm: mem256[ input_0 + 1504 ] = x5
3619# asm 1: vmovupd   <x5=reg256#3,1504(<input_0=int64#1)
3620# asm 2: vmovupd   <x5=%ymm2,1504(<input_0=%rdi)
3621vmovupd   %ymm2,1504(%rdi)
3622
3623# qhasm: mem256[ input_0 + 1760 ] = x6
3624# asm 1: vmovupd   <x6=reg256#5,1760(<input_0=int64#1)
3625# asm 2: vmovupd   <x6=%ymm4,1760(<input_0=%rdi)
3626vmovupd   %ymm4,1760(%rdi)
3627
3628# qhasm: mem256[ input_0 + 2016 ] = x7
3629# asm 1: vmovupd   <x7=reg256#2,2016(<input_0=int64#1)
3630# asm 2: vmovupd   <x7=%ymm1,2016(<input_0=%rdi)
3631vmovupd   %ymm1,2016(%rdi)
3632
3633# qhasm: mask0 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK2_0 ]
3634# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK2_0,>mask0=reg256#1
3635# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK2_0,>mask0=%ymm0
3636vmovapd PQCLEAN_MCELIECE348864_AVX_MASK2_0(%rip),%ymm0
3637
3638# qhasm: mask1 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK2_1 ]
3639# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK2_1,>mask1=reg256#2
3640# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK2_1,>mask1=%ymm1
3641vmovapd PQCLEAN_MCELIECE348864_AVX_MASK2_1(%rip),%ymm1
3642
3643# qhasm: mask2 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK1_0 ]
3644# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK1_0,>mask2=reg256#3
3645# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK1_0,>mask2=%ymm2
3646vmovapd PQCLEAN_MCELIECE348864_AVX_MASK1_0(%rip),%ymm2
3647
3648# qhasm: mask3 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK1_1 ]
3649# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK1_1,>mask3=reg256#4
3650# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK1_1,>mask3=%ymm3
3651vmovapd PQCLEAN_MCELIECE348864_AVX_MASK1_1(%rip),%ymm3
3652
3653# qhasm: mask4 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK0_0 ]
3654# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK0_0,>mask4=reg256#5
3655# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK0_0,>mask4=%ymm4
3656vmovapd PQCLEAN_MCELIECE348864_AVX_MASK0_0(%rip),%ymm4
3657
3658# qhasm: mask5 aligned= mem256[ PQCLEAN_MCELIECE348864_AVX_MASK0_1 ]
3659# asm 1: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK0_1,>mask5=reg256#6
3660# asm 2: vmovapd PQCLEAN_MCELIECE348864_AVX_MASK0_1,>mask5=%ymm5
3661vmovapd PQCLEAN_MCELIECE348864_AVX_MASK0_1(%rip),%ymm5
3662
3663# qhasm: x0 = mem256[ input_0 + 0 ]
3664# asm 1: vmovupd   0(<input_0=int64#1),>x0=reg256#7
3665# asm 2: vmovupd   0(<input_0=%rdi),>x0=%ymm6
3666vmovupd   0(%rdi),%ymm6
3667
3668# qhasm: x1 = mem256[ input_0 + 32 ]
3669# asm 1: vmovupd   32(<input_0=int64#1),>x1=reg256#8
3670# asm 2: vmovupd   32(<input_0=%rdi),>x1=%ymm7
3671vmovupd   32(%rdi),%ymm7
3672
3673# qhasm: x2 = mem256[ input_0 + 64 ]
3674# asm 1: vmovupd   64(<input_0=int64#1),>x2=reg256#9
3675# asm 2: vmovupd   64(<input_0=%rdi),>x2=%ymm8
3676vmovupd   64(%rdi),%ymm8
3677
3678# qhasm: x3 = mem256[ input_0 + 96 ]
3679# asm 1: vmovupd   96(<input_0=int64#1),>x3=reg256#10
3680# asm 2: vmovupd   96(<input_0=%rdi),>x3=%ymm9
3681vmovupd   96(%rdi),%ymm9
3682
3683# qhasm: x4 = mem256[ input_0 + 128 ]
3684# asm 1: vmovupd   128(<input_0=int64#1),>x4=reg256#11
3685# asm 2: vmovupd   128(<input_0=%rdi),>x4=%ymm10
3686vmovupd   128(%rdi),%ymm10
3687
3688# qhasm: x5 = mem256[ input_0 + 160 ]
3689# asm 1: vmovupd   160(<input_0=int64#1),>x5=reg256#12
3690# asm 2: vmovupd   160(<input_0=%rdi),>x5=%ymm11
3691vmovupd   160(%rdi),%ymm11
3692
3693# qhasm: x6 = mem256[ input_0 + 192 ]
3694# asm 1: vmovupd   192(<input_0=int64#1),>x6=reg256#13
3695# asm 2: vmovupd   192(<input_0=%rdi),>x6=%ymm12
3696vmovupd   192(%rdi),%ymm12
3697
3698# qhasm: x7 = mem256[ input_0 + 224 ]
3699# asm 1: vmovupd   224(<input_0=int64#1),>x7=reg256#14
3700# asm 2: vmovupd   224(<input_0=%rdi),>x7=%ymm13
3701vmovupd   224(%rdi),%ymm13
3702
3703# qhasm: v00 = x0 & mask0
3704# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
3705# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
3706vpand %ymm6,%ymm0,%ymm14
3707
3708# qhasm: v10 = x4 & mask0
3709# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
3710# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
3711vpand %ymm10,%ymm0,%ymm15
3712
3713# qhasm: 4x v10 <<= 4
3714# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
3715# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
3716vpsllq $4,%ymm15,%ymm15
3717
3718# qhasm: v01 = x0 & mask1
3719# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
3720# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
3721vpand %ymm6,%ymm1,%ymm6
3722
3723# qhasm: v11 = x4 & mask1
3724# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
3725# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
3726vpand %ymm10,%ymm1,%ymm10
3727
3728# qhasm: 4x v01 unsigned>>= 4
3729# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
3730# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
3731vpsrlq $4,%ymm6,%ymm6
3732
3733# qhasm: x0 = v00 | v10
3734# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
3735# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
3736vpor  %ymm14,%ymm15,%ymm14
3737
3738# qhasm: x4 = v01 | v11
3739# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
3740# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
3741vpor  %ymm6,%ymm10,%ymm6
3742
3743# qhasm: v00 = x1 & mask0
3744# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
3745# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
3746vpand %ymm7,%ymm0,%ymm10
3747
3748# qhasm: v10 = x5 & mask0
3749# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
3750# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
3751vpand %ymm11,%ymm0,%ymm15
3752
3753# qhasm: 4x v10 <<= 4
3754# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
3755# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
3756vpsllq $4,%ymm15,%ymm15
3757
3758# qhasm: v01 = x1 & mask1
3759# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
3760# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
3761vpand %ymm7,%ymm1,%ymm7
3762
3763# qhasm: v11 = x5 & mask1
3764# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
3765# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
3766vpand %ymm11,%ymm1,%ymm11
3767
3768# qhasm: 4x v01 unsigned>>= 4
3769# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
3770# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
3771vpsrlq $4,%ymm7,%ymm7
3772
3773# qhasm: x1 = v00 | v10
3774# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
3775# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
3776vpor  %ymm10,%ymm15,%ymm10
3777
3778# qhasm: x5 = v01 | v11
3779# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
3780# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
3781vpor  %ymm7,%ymm11,%ymm7
3782
3783# qhasm: v00 = x2 & mask0
3784# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
3785# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
3786vpand %ymm8,%ymm0,%ymm11
3787
3788# qhasm: v10 = x6 & mask0
3789# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
3790# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
3791vpand %ymm12,%ymm0,%ymm15
3792
3793# qhasm: 4x v10 <<= 4
3794# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
3795# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
3796vpsllq $4,%ymm15,%ymm15
3797
3798# qhasm: v01 = x2 & mask1
3799# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
3800# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
3801vpand %ymm8,%ymm1,%ymm8
3802
3803# qhasm: v11 = x6 & mask1
3804# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
3805# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
3806vpand %ymm12,%ymm1,%ymm12
3807
3808# qhasm: 4x v01 unsigned>>= 4
3809# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
3810# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
3811vpsrlq $4,%ymm8,%ymm8
3812
3813# qhasm: x2 = v00 | v10
3814# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
3815# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
3816vpor  %ymm11,%ymm15,%ymm11
3817
3818# qhasm: x6 = v01 | v11
3819# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
3820# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
3821vpor  %ymm8,%ymm12,%ymm8
3822
3823# qhasm: v00 = x3 & mask0
3824# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
3825# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
3826vpand %ymm9,%ymm0,%ymm12
3827
3828# qhasm: v10 = x7 & mask0
3829# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
3830# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
3831vpand %ymm13,%ymm0,%ymm15
3832
3833# qhasm: 4x v10 <<= 4
3834# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
3835# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
3836vpsllq $4,%ymm15,%ymm15
3837
3838# qhasm: v01 = x3 & mask1
3839# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
3840# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
3841vpand %ymm9,%ymm1,%ymm9
3842
3843# qhasm: v11 = x7 & mask1
3844# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
3845# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
3846vpand %ymm13,%ymm1,%ymm13
3847
3848# qhasm: 4x v01 unsigned>>= 4
3849# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
3850# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
3851vpsrlq $4,%ymm9,%ymm9
3852
3853# qhasm: x3 = v00 | v10
3854# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
3855# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
3856vpor  %ymm12,%ymm15,%ymm12
3857
3858# qhasm: x7 = v01 | v11
3859# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
3860# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
3861vpor  %ymm9,%ymm13,%ymm9
3862
3863# qhasm: v00 = x0 & mask2
3864# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
3865# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
3866vpand %ymm14,%ymm2,%ymm13
3867
3868# qhasm: v10 = x2 & mask2
3869# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
3870# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
3871vpand %ymm11,%ymm2,%ymm15
3872
3873# qhasm: 4x v10 <<= 2
3874# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
3875# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
3876vpsllq $2,%ymm15,%ymm15
3877
3878# qhasm: v01 = x0 & mask3
3879# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
3880# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
3881vpand %ymm14,%ymm3,%ymm14
3882
3883# qhasm: v11 = x2 & mask3
3884# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
3885# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
3886vpand %ymm11,%ymm3,%ymm11
3887
3888# qhasm: 4x v01 unsigned>>= 2
3889# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
3890# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
3891vpsrlq $2,%ymm14,%ymm14
3892
3893# qhasm: x0 = v00 | v10
3894# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
3895# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
3896vpor  %ymm13,%ymm15,%ymm13
3897
3898# qhasm: x2 = v01 | v11
3899# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
3900# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
3901vpor  %ymm14,%ymm11,%ymm11
3902
3903# qhasm: v00 = x1 & mask2
3904# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
3905# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
3906vpand %ymm10,%ymm2,%ymm14
3907
3908# qhasm: v10 = x3 & mask2
3909# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
3910# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
3911vpand %ymm12,%ymm2,%ymm15
3912
3913# qhasm: 4x v10 <<= 2
3914# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
3915# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
3916vpsllq $2,%ymm15,%ymm15
3917
3918# qhasm: v01 = x1 & mask3
3919# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
3920# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
3921vpand %ymm10,%ymm3,%ymm10
3922
3923# qhasm: v11 = x3 & mask3
3924# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
3925# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
3926vpand %ymm12,%ymm3,%ymm12
3927
3928# qhasm: 4x v01 unsigned>>= 2
3929# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
3930# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
3931vpsrlq $2,%ymm10,%ymm10
3932
3933# qhasm: x1 = v00 | v10
3934# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
3935# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
3936vpor  %ymm14,%ymm15,%ymm14
3937
3938# qhasm: x3 = v01 | v11
3939# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
3940# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
3941vpor  %ymm10,%ymm12,%ymm10
3942
3943# qhasm: v00 = x4 & mask2
3944# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
3945# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
3946vpand %ymm6,%ymm2,%ymm12
3947
3948# qhasm: v10 = x6 & mask2
3949# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
3950# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
3951vpand %ymm8,%ymm2,%ymm15
3952
3953# qhasm: 4x v10 <<= 2
3954# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
3955# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
3956vpsllq $2,%ymm15,%ymm15
3957
3958# qhasm: v01 = x4 & mask3
3959# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
3960# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
3961vpand %ymm6,%ymm3,%ymm6
3962
3963# qhasm: v11 = x6 & mask3
3964# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
3965# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
3966vpand %ymm8,%ymm3,%ymm8
3967
3968# qhasm: 4x v01 unsigned>>= 2
3969# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
3970# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
3971vpsrlq $2,%ymm6,%ymm6
3972
3973# qhasm: x4 = v00 | v10
3974# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
3975# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
3976vpor  %ymm12,%ymm15,%ymm12
3977
3978# qhasm: x6 = v01 | v11
3979# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
3980# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
3981vpor  %ymm6,%ymm8,%ymm6
3982
3983# qhasm: v00 = x5 & mask2
3984# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
3985# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
3986vpand %ymm7,%ymm2,%ymm8
3987
3988# qhasm: v10 = x7 & mask2
3989# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
3990# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
3991vpand %ymm9,%ymm2,%ymm15
3992
3993# qhasm: 4x v10 <<= 2
3994# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
3995# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
3996vpsllq $2,%ymm15,%ymm15
3997
3998# qhasm: v01 = x5 & mask3
3999# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
4000# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
4001vpand %ymm7,%ymm3,%ymm7
4002
4003# qhasm: v11 = x7 & mask3
4004# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
4005# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
4006vpand %ymm9,%ymm3,%ymm9
4007
4008# qhasm: 4x v01 unsigned>>= 2
4009# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
4010# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
4011vpsrlq $2,%ymm7,%ymm7
4012
4013# qhasm: x5 = v00 | v10
4014# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
4015# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
4016vpor  %ymm8,%ymm15,%ymm8
4017
4018# qhasm: x7 = v01 | v11
4019# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
4020# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
4021vpor  %ymm7,%ymm9,%ymm7
4022
4023# qhasm: v00 = x0 & mask4
4024# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
4025# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
4026vpand %ymm13,%ymm4,%ymm9
4027
4028# qhasm: v10 = x1 & mask4
4029# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
4030# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
4031vpand %ymm14,%ymm4,%ymm15
4032
4033# qhasm: 4x v10 <<= 1
4034# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4035# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4036vpsllq $1,%ymm15,%ymm15
4037
4038# qhasm: v01 = x0 & mask5
4039# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
4040# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
4041vpand %ymm13,%ymm5,%ymm13
4042
4043# qhasm: v11 = x1 & mask5
4044# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
4045# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
4046vpand %ymm14,%ymm5,%ymm14
4047
4048# qhasm: 4x v01 unsigned>>= 1
4049# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
4050# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
4051vpsrlq $1,%ymm13,%ymm13
4052
4053# qhasm: x0 = v00 | v10
4054# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
4055# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
4056vpor  %ymm9,%ymm15,%ymm9
4057
4058# qhasm: x1 = v01 | v11
4059# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
4060# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
4061vpor  %ymm13,%ymm14,%ymm13
4062
4063# qhasm: v00 = x2 & mask4
4064# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
4065# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
4066vpand %ymm11,%ymm4,%ymm14
4067
4068# qhasm: v10 = x3 & mask4
4069# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
4070# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
4071vpand %ymm10,%ymm4,%ymm15
4072
4073# qhasm: 4x v10 <<= 1
4074# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4075# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4076vpsllq $1,%ymm15,%ymm15
4077
4078# qhasm: v01 = x2 & mask5
4079# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
4080# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
4081vpand %ymm11,%ymm5,%ymm11
4082
4083# qhasm: v11 = x3 & mask5
4084# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
4085# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
4086vpand %ymm10,%ymm5,%ymm10
4087
4088# qhasm: 4x v01 unsigned>>= 1
4089# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
4090# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
4091vpsrlq $1,%ymm11,%ymm11
4092
4093# qhasm: x2 = v00 | v10
4094# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
4095# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
4096vpor  %ymm14,%ymm15,%ymm14
4097
4098# qhasm: x3 = v01 | v11
4099# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
4100# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
4101vpor  %ymm11,%ymm10,%ymm10
4102
4103# qhasm: v00 = x4 & mask4
4104# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
4105# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
4106vpand %ymm12,%ymm4,%ymm11
4107
4108# qhasm: v10 = x5 & mask4
4109# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
4110# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
4111vpand %ymm8,%ymm4,%ymm15
4112
4113# qhasm: 4x v10 <<= 1
4114# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4115# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4116vpsllq $1,%ymm15,%ymm15
4117
4118# qhasm: v01 = x4 & mask5
4119# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
4120# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
4121vpand %ymm12,%ymm5,%ymm12
4122
4123# qhasm: v11 = x5 & mask5
4124# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
4125# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
4126vpand %ymm8,%ymm5,%ymm8
4127
4128# qhasm: 4x v01 unsigned>>= 1
4129# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
4130# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
4131vpsrlq $1,%ymm12,%ymm12
4132
4133# qhasm: x4 = v00 | v10
4134# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
4135# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
4136vpor  %ymm11,%ymm15,%ymm11
4137
4138# qhasm: x5 = v01 | v11
4139# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
4140# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
4141vpor  %ymm12,%ymm8,%ymm8
4142
4143# qhasm: v00 = x6 & mask4
4144# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
4145# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
4146vpand %ymm6,%ymm4,%ymm12
4147
4148# qhasm: v10 = x7 & mask4
4149# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
4150# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
4151vpand %ymm7,%ymm4,%ymm15
4152
4153# qhasm: 4x v10 <<= 1
4154# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4155# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4156vpsllq $1,%ymm15,%ymm15
4157
4158# qhasm: v01 = x6 & mask5
4159# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
4160# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
4161vpand %ymm6,%ymm5,%ymm6
4162
4163# qhasm: v11 = x7 & mask5
4164# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
4165# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
4166vpand %ymm7,%ymm5,%ymm7
4167
4168# qhasm: 4x v01 unsigned>>= 1
4169# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
4170# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
4171vpsrlq $1,%ymm6,%ymm6
4172
4173# qhasm: x6 = v00 | v10
4174# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
4175# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
4176vpor  %ymm12,%ymm15,%ymm12
4177
4178# qhasm: x7 = v01 | v11
4179# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
4180# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
4181vpor  %ymm6,%ymm7,%ymm6
4182
4183# qhasm: mem256[ input_0 + 0 ] = x0
4184# asm 1: vmovupd   <x0=reg256#10,0(<input_0=int64#1)
4185# asm 2: vmovupd   <x0=%ymm9,0(<input_0=%rdi)
4186vmovupd   %ymm9,0(%rdi)
4187
4188# qhasm: mem256[ input_0 + 32 ] = x1
4189# asm 1: vmovupd   <x1=reg256#14,32(<input_0=int64#1)
4190# asm 2: vmovupd   <x1=%ymm13,32(<input_0=%rdi)
4191vmovupd   %ymm13,32(%rdi)
4192
4193# qhasm: mem256[ input_0 + 64 ] = x2
4194# asm 1: vmovupd   <x2=reg256#15,64(<input_0=int64#1)
4195# asm 2: vmovupd   <x2=%ymm14,64(<input_0=%rdi)
4196vmovupd   %ymm14,64(%rdi)
4197
4198# qhasm: mem256[ input_0 + 96 ] = x3
4199# asm 1: vmovupd   <x3=reg256#11,96(<input_0=int64#1)
4200# asm 2: vmovupd   <x3=%ymm10,96(<input_0=%rdi)
4201vmovupd   %ymm10,96(%rdi)
4202
4203# qhasm: mem256[ input_0 + 128 ] = x4
4204# asm 1: vmovupd   <x4=reg256#12,128(<input_0=int64#1)
4205# asm 2: vmovupd   <x4=%ymm11,128(<input_0=%rdi)
4206vmovupd   %ymm11,128(%rdi)
4207
4208# qhasm: mem256[ input_0 + 160 ] = x5
4209# asm 1: vmovupd   <x5=reg256#9,160(<input_0=int64#1)
4210# asm 2: vmovupd   <x5=%ymm8,160(<input_0=%rdi)
4211vmovupd   %ymm8,160(%rdi)
4212
4213# qhasm: mem256[ input_0 + 192 ] = x6
4214# asm 1: vmovupd   <x6=reg256#13,192(<input_0=int64#1)
4215# asm 2: vmovupd   <x6=%ymm12,192(<input_0=%rdi)
4216vmovupd   %ymm12,192(%rdi)
4217
4218# qhasm: mem256[ input_0 + 224 ] = x7
4219# asm 1: vmovupd   <x7=reg256#7,224(<input_0=int64#1)
4220# asm 2: vmovupd   <x7=%ymm6,224(<input_0=%rdi)
4221vmovupd   %ymm6,224(%rdi)
4222
4223# qhasm: x0 = mem256[ input_0 + 256 ]
4224# asm 1: vmovupd   256(<input_0=int64#1),>x0=reg256#7
4225# asm 2: vmovupd   256(<input_0=%rdi),>x0=%ymm6
4226vmovupd   256(%rdi),%ymm6
4227
4228# qhasm: x1 = mem256[ input_0 + 288 ]
4229# asm 1: vmovupd   288(<input_0=int64#1),>x1=reg256#8
4230# asm 2: vmovupd   288(<input_0=%rdi),>x1=%ymm7
4231vmovupd   288(%rdi),%ymm7
4232
4233# qhasm: x2 = mem256[ input_0 + 320 ]
4234# asm 1: vmovupd   320(<input_0=int64#1),>x2=reg256#9
4235# asm 2: vmovupd   320(<input_0=%rdi),>x2=%ymm8
4236vmovupd   320(%rdi),%ymm8
4237
4238# qhasm: x3 = mem256[ input_0 + 352 ]
4239# asm 1: vmovupd   352(<input_0=int64#1),>x3=reg256#10
4240# asm 2: vmovupd   352(<input_0=%rdi),>x3=%ymm9
4241vmovupd   352(%rdi),%ymm9
4242
4243# qhasm: x4 = mem256[ input_0 + 384 ]
4244# asm 1: vmovupd   384(<input_0=int64#1),>x4=reg256#11
4245# asm 2: vmovupd   384(<input_0=%rdi),>x4=%ymm10
4246vmovupd   384(%rdi),%ymm10
4247
4248# qhasm: x5 = mem256[ input_0 + 416 ]
4249# asm 1: vmovupd   416(<input_0=int64#1),>x5=reg256#12
4250# asm 2: vmovupd   416(<input_0=%rdi),>x5=%ymm11
4251vmovupd   416(%rdi),%ymm11
4252
4253# qhasm: x6 = mem256[ input_0 + 448 ]
4254# asm 1: vmovupd   448(<input_0=int64#1),>x6=reg256#13
4255# asm 2: vmovupd   448(<input_0=%rdi),>x6=%ymm12
4256vmovupd   448(%rdi),%ymm12
4257
4258# qhasm: x7 = mem256[ input_0 + 480 ]
4259# asm 1: vmovupd   480(<input_0=int64#1),>x7=reg256#14
4260# asm 2: vmovupd   480(<input_0=%rdi),>x7=%ymm13
4261vmovupd   480(%rdi),%ymm13
4262
4263# qhasm: v00 = x0 & mask0
4264# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
4265# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
4266vpand %ymm6,%ymm0,%ymm14
4267
4268# qhasm: v10 = x4 & mask0
4269# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
4270# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
4271vpand %ymm10,%ymm0,%ymm15
4272
4273# qhasm: 4x v10 <<= 4
4274# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4275# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4276vpsllq $4,%ymm15,%ymm15
4277
4278# qhasm: v01 = x0 & mask1
4279# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
4280# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
4281vpand %ymm6,%ymm1,%ymm6
4282
4283# qhasm: v11 = x4 & mask1
4284# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
4285# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
4286vpand %ymm10,%ymm1,%ymm10
4287
4288# qhasm: 4x v01 unsigned>>= 4
4289# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
4290# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
4291vpsrlq $4,%ymm6,%ymm6
4292
4293# qhasm: x0 = v00 | v10
4294# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
4295# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
4296vpor  %ymm14,%ymm15,%ymm14
4297
4298# qhasm: x4 = v01 | v11
4299# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
4300# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
4301vpor  %ymm6,%ymm10,%ymm6
4302
4303# qhasm: v00 = x1 & mask0
4304# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
4305# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
4306vpand %ymm7,%ymm0,%ymm10
4307
4308# qhasm: v10 = x5 & mask0
4309# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
4310# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
4311vpand %ymm11,%ymm0,%ymm15
4312
4313# qhasm: 4x v10 <<= 4
4314# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4315# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4316vpsllq $4,%ymm15,%ymm15
4317
4318# qhasm: v01 = x1 & mask1
4319# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
4320# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
4321vpand %ymm7,%ymm1,%ymm7
4322
4323# qhasm: v11 = x5 & mask1
4324# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
4325# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
4326vpand %ymm11,%ymm1,%ymm11
4327
4328# qhasm: 4x v01 unsigned>>= 4
4329# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
4330# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
4331vpsrlq $4,%ymm7,%ymm7
4332
4333# qhasm: x1 = v00 | v10
4334# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
4335# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
4336vpor  %ymm10,%ymm15,%ymm10
4337
4338# qhasm: x5 = v01 | v11
4339# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
4340# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
4341vpor  %ymm7,%ymm11,%ymm7
4342
4343# qhasm: v00 = x2 & mask0
4344# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
4345# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
4346vpand %ymm8,%ymm0,%ymm11
4347
4348# qhasm: v10 = x6 & mask0
4349# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
4350# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
4351vpand %ymm12,%ymm0,%ymm15
4352
4353# qhasm: 4x v10 <<= 4
4354# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4355# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4356vpsllq $4,%ymm15,%ymm15
4357
4358# qhasm: v01 = x2 & mask1
4359# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
4360# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
4361vpand %ymm8,%ymm1,%ymm8
4362
4363# qhasm: v11 = x6 & mask1
4364# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
4365# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
4366vpand %ymm12,%ymm1,%ymm12
4367
4368# qhasm: 4x v01 unsigned>>= 4
4369# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
4370# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
4371vpsrlq $4,%ymm8,%ymm8
4372
4373# qhasm: x2 = v00 | v10
4374# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
4375# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
4376vpor  %ymm11,%ymm15,%ymm11
4377
4378# qhasm: x6 = v01 | v11
4379# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
4380# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
4381vpor  %ymm8,%ymm12,%ymm8
4382
4383# qhasm: v00 = x3 & mask0
4384# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
4385# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
4386vpand %ymm9,%ymm0,%ymm12
4387
4388# qhasm: v10 = x7 & mask0
4389# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
4390# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
4391vpand %ymm13,%ymm0,%ymm15
4392
4393# qhasm: 4x v10 <<= 4
4394# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4395# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4396vpsllq $4,%ymm15,%ymm15
4397
4398# qhasm: v01 = x3 & mask1
4399# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
4400# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
4401vpand %ymm9,%ymm1,%ymm9
4402
4403# qhasm: v11 = x7 & mask1
4404# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
4405# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
4406vpand %ymm13,%ymm1,%ymm13
4407
4408# qhasm: 4x v01 unsigned>>= 4
4409# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
4410# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
4411vpsrlq $4,%ymm9,%ymm9
4412
4413# qhasm: x3 = v00 | v10
4414# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
4415# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
4416vpor  %ymm12,%ymm15,%ymm12
4417
4418# qhasm: x7 = v01 | v11
4419# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
4420# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
4421vpor  %ymm9,%ymm13,%ymm9
4422
4423# qhasm: v00 = x0 & mask2
4424# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
4425# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
4426vpand %ymm14,%ymm2,%ymm13
4427
4428# qhasm: v10 = x2 & mask2
4429# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
4430# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
4431vpand %ymm11,%ymm2,%ymm15
4432
4433# qhasm: 4x v10 <<= 2
4434# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
4435# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
4436vpsllq $2,%ymm15,%ymm15
4437
4438# qhasm: v01 = x0 & mask3
4439# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
4440# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
4441vpand %ymm14,%ymm3,%ymm14
4442
4443# qhasm: v11 = x2 & mask3
4444# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
4445# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
4446vpand %ymm11,%ymm3,%ymm11
4447
4448# qhasm: 4x v01 unsigned>>= 2
4449# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
4450# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
4451vpsrlq $2,%ymm14,%ymm14
4452
4453# qhasm: x0 = v00 | v10
4454# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
4455# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
4456vpor  %ymm13,%ymm15,%ymm13
4457
4458# qhasm: x2 = v01 | v11
4459# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
4460# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
4461vpor  %ymm14,%ymm11,%ymm11
4462
4463# qhasm: v00 = x1 & mask2
4464# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
4465# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
4466vpand %ymm10,%ymm2,%ymm14
4467
4468# qhasm: v10 = x3 & mask2
4469# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
4470# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
4471vpand %ymm12,%ymm2,%ymm15
4472
4473# qhasm: 4x v10 <<= 2
4474# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
4475# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
4476vpsllq $2,%ymm15,%ymm15
4477
4478# qhasm: v01 = x1 & mask3
4479# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
4480# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
4481vpand %ymm10,%ymm3,%ymm10
4482
4483# qhasm: v11 = x3 & mask3
4484# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
4485# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
4486vpand %ymm12,%ymm3,%ymm12
4487
4488# qhasm: 4x v01 unsigned>>= 2
4489# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
4490# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
4491vpsrlq $2,%ymm10,%ymm10
4492
4493# qhasm: x1 = v00 | v10
4494# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
4495# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
4496vpor  %ymm14,%ymm15,%ymm14
4497
4498# qhasm: x3 = v01 | v11
4499# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
4500# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
4501vpor  %ymm10,%ymm12,%ymm10
4502
4503# qhasm: v00 = x4 & mask2
4504# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
4505# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
4506vpand %ymm6,%ymm2,%ymm12
4507
4508# qhasm: v10 = x6 & mask2
4509# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
4510# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
4511vpand %ymm8,%ymm2,%ymm15
4512
4513# qhasm: 4x v10 <<= 2
4514# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
4515# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
4516vpsllq $2,%ymm15,%ymm15
4517
4518# qhasm: v01 = x4 & mask3
4519# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
4520# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
4521vpand %ymm6,%ymm3,%ymm6
4522
4523# qhasm: v11 = x6 & mask3
4524# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
4525# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
4526vpand %ymm8,%ymm3,%ymm8
4527
4528# qhasm: 4x v01 unsigned>>= 2
4529# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
4530# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
4531vpsrlq $2,%ymm6,%ymm6
4532
4533# qhasm: x4 = v00 | v10
4534# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
4535# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
4536vpor  %ymm12,%ymm15,%ymm12
4537
4538# qhasm: x6 = v01 | v11
4539# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
4540# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
4541vpor  %ymm6,%ymm8,%ymm6
4542
4543# qhasm: v00 = x5 & mask2
4544# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
4545# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
4546vpand %ymm7,%ymm2,%ymm8
4547
4548# qhasm: v10 = x7 & mask2
4549# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
4550# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
4551vpand %ymm9,%ymm2,%ymm15
4552
4553# qhasm: 4x v10 <<= 2
4554# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
4555# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
4556vpsllq $2,%ymm15,%ymm15
4557
4558# qhasm: v01 = x5 & mask3
4559# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
4560# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
4561vpand %ymm7,%ymm3,%ymm7
4562
4563# qhasm: v11 = x7 & mask3
4564# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
4565# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
4566vpand %ymm9,%ymm3,%ymm9
4567
4568# qhasm: 4x v01 unsigned>>= 2
4569# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
4570# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
4571vpsrlq $2,%ymm7,%ymm7
4572
4573# qhasm: x5 = v00 | v10
4574# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
4575# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
4576vpor  %ymm8,%ymm15,%ymm8
4577
4578# qhasm: x7 = v01 | v11
4579# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
4580# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
4581vpor  %ymm7,%ymm9,%ymm7
4582
4583# qhasm: v00 = x0 & mask4
4584# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
4585# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
4586vpand %ymm13,%ymm4,%ymm9
4587
4588# qhasm: v10 = x1 & mask4
4589# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
4590# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
4591vpand %ymm14,%ymm4,%ymm15
4592
4593# qhasm: 4x v10 <<= 1
4594# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4595# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4596vpsllq $1,%ymm15,%ymm15
4597
4598# qhasm: v01 = x0 & mask5
4599# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
4600# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
4601vpand %ymm13,%ymm5,%ymm13
4602
4603# qhasm: v11 = x1 & mask5
4604# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
4605# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
4606vpand %ymm14,%ymm5,%ymm14
4607
4608# qhasm: 4x v01 unsigned>>= 1
4609# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
4610# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
4611vpsrlq $1,%ymm13,%ymm13
4612
4613# qhasm: x0 = v00 | v10
4614# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
4615# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
4616vpor  %ymm9,%ymm15,%ymm9
4617
4618# qhasm: x1 = v01 | v11
4619# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
4620# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
4621vpor  %ymm13,%ymm14,%ymm13
4622
4623# qhasm: v00 = x2 & mask4
4624# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
4625# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
4626vpand %ymm11,%ymm4,%ymm14
4627
4628# qhasm: v10 = x3 & mask4
4629# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
4630# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
4631vpand %ymm10,%ymm4,%ymm15
4632
4633# qhasm: 4x v10 <<= 1
4634# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4635# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4636vpsllq $1,%ymm15,%ymm15
4637
4638# qhasm: v01 = x2 & mask5
4639# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
4640# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
4641vpand %ymm11,%ymm5,%ymm11
4642
4643# qhasm: v11 = x3 & mask5
4644# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
4645# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
4646vpand %ymm10,%ymm5,%ymm10
4647
4648# qhasm: 4x v01 unsigned>>= 1
4649# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
4650# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
4651vpsrlq $1,%ymm11,%ymm11
4652
4653# qhasm: x2 = v00 | v10
4654# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
4655# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
4656vpor  %ymm14,%ymm15,%ymm14
4657
4658# qhasm: x3 = v01 | v11
4659# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
4660# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
4661vpor  %ymm11,%ymm10,%ymm10
4662
4663# qhasm: v00 = x4 & mask4
4664# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
4665# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
4666vpand %ymm12,%ymm4,%ymm11
4667
4668# qhasm: v10 = x5 & mask4
4669# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
4670# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
4671vpand %ymm8,%ymm4,%ymm15
4672
4673# qhasm: 4x v10 <<= 1
4674# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4675# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4676vpsllq $1,%ymm15,%ymm15
4677
4678# qhasm: v01 = x4 & mask5
4679# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
4680# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
4681vpand %ymm12,%ymm5,%ymm12
4682
4683# qhasm: v11 = x5 & mask5
4684# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
4685# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
4686vpand %ymm8,%ymm5,%ymm8
4687
4688# qhasm: 4x v01 unsigned>>= 1
4689# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
4690# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
4691vpsrlq $1,%ymm12,%ymm12
4692
4693# qhasm: x4 = v00 | v10
4694# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
4695# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
4696vpor  %ymm11,%ymm15,%ymm11
4697
4698# qhasm: x5 = v01 | v11
4699# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
4700# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
4701vpor  %ymm12,%ymm8,%ymm8
4702
4703# qhasm: v00 = x6 & mask4
4704# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
4705# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
4706vpand %ymm6,%ymm4,%ymm12
4707
4708# qhasm: v10 = x7 & mask4
4709# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
4710# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
4711vpand %ymm7,%ymm4,%ymm15
4712
4713# qhasm: 4x v10 <<= 1
4714# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
4715# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
4716vpsllq $1,%ymm15,%ymm15
4717
4718# qhasm: v01 = x6 & mask5
4719# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
4720# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
4721vpand %ymm6,%ymm5,%ymm6
4722
4723# qhasm: v11 = x7 & mask5
4724# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
4725# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
4726vpand %ymm7,%ymm5,%ymm7
4727
4728# qhasm: 4x v01 unsigned>>= 1
4729# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
4730# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
4731vpsrlq $1,%ymm6,%ymm6
4732
4733# qhasm: x6 = v00 | v10
4734# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
4735# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
4736vpor  %ymm12,%ymm15,%ymm12
4737
4738# qhasm: x7 = v01 | v11
4739# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
4740# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
4741vpor  %ymm6,%ymm7,%ymm6
4742
4743# qhasm: mem256[ input_0 + 256 ] = x0
4744# asm 1: vmovupd   <x0=reg256#10,256(<input_0=int64#1)
4745# asm 2: vmovupd   <x0=%ymm9,256(<input_0=%rdi)
4746vmovupd   %ymm9,256(%rdi)
4747
4748# qhasm: mem256[ input_0 + 288 ] = x1
4749# asm 1: vmovupd   <x1=reg256#14,288(<input_0=int64#1)
4750# asm 2: vmovupd   <x1=%ymm13,288(<input_0=%rdi)
4751vmovupd   %ymm13,288(%rdi)
4752
4753# qhasm: mem256[ input_0 + 320 ] = x2
4754# asm 1: vmovupd   <x2=reg256#15,320(<input_0=int64#1)
4755# asm 2: vmovupd   <x2=%ymm14,320(<input_0=%rdi)
4756vmovupd   %ymm14,320(%rdi)
4757
4758# qhasm: mem256[ input_0 + 352 ] = x3
4759# asm 1: vmovupd   <x3=reg256#11,352(<input_0=int64#1)
4760# asm 2: vmovupd   <x3=%ymm10,352(<input_0=%rdi)
4761vmovupd   %ymm10,352(%rdi)
4762
4763# qhasm: mem256[ input_0 + 384 ] = x4
4764# asm 1: vmovupd   <x4=reg256#12,384(<input_0=int64#1)
4765# asm 2: vmovupd   <x4=%ymm11,384(<input_0=%rdi)
4766vmovupd   %ymm11,384(%rdi)
4767
4768# qhasm: mem256[ input_0 + 416 ] = x5
4769# asm 1: vmovupd   <x5=reg256#9,416(<input_0=int64#1)
4770# asm 2: vmovupd   <x5=%ymm8,416(<input_0=%rdi)
4771vmovupd   %ymm8,416(%rdi)
4772
4773# qhasm: mem256[ input_0 + 448 ] = x6
4774# asm 1: vmovupd   <x6=reg256#13,448(<input_0=int64#1)
4775# asm 2: vmovupd   <x6=%ymm12,448(<input_0=%rdi)
4776vmovupd   %ymm12,448(%rdi)
4777
4778# qhasm: mem256[ input_0 + 480 ] = x7
4779# asm 1: vmovupd   <x7=reg256#7,480(<input_0=int64#1)
4780# asm 2: vmovupd   <x7=%ymm6,480(<input_0=%rdi)
4781vmovupd   %ymm6,480(%rdi)
4782
4783# qhasm: x0 = mem256[ input_0 + 512 ]
4784# asm 1: vmovupd   512(<input_0=int64#1),>x0=reg256#7
4785# asm 2: vmovupd   512(<input_0=%rdi),>x0=%ymm6
4786vmovupd   512(%rdi),%ymm6
4787
4788# qhasm: x1 = mem256[ input_0 + 544 ]
4789# asm 1: vmovupd   544(<input_0=int64#1),>x1=reg256#8
4790# asm 2: vmovupd   544(<input_0=%rdi),>x1=%ymm7
4791vmovupd   544(%rdi),%ymm7
4792
4793# qhasm: x2 = mem256[ input_0 + 576 ]
4794# asm 1: vmovupd   576(<input_0=int64#1),>x2=reg256#9
4795# asm 2: vmovupd   576(<input_0=%rdi),>x2=%ymm8
4796vmovupd   576(%rdi),%ymm8
4797
4798# qhasm: x3 = mem256[ input_0 + 608 ]
4799# asm 1: vmovupd   608(<input_0=int64#1),>x3=reg256#10
4800# asm 2: vmovupd   608(<input_0=%rdi),>x3=%ymm9
4801vmovupd   608(%rdi),%ymm9
4802
4803# qhasm: x4 = mem256[ input_0 + 640 ]
4804# asm 1: vmovupd   640(<input_0=int64#1),>x4=reg256#11
4805# asm 2: vmovupd   640(<input_0=%rdi),>x4=%ymm10
4806vmovupd   640(%rdi),%ymm10
4807
4808# qhasm: x5 = mem256[ input_0 + 672 ]
4809# asm 1: vmovupd   672(<input_0=int64#1),>x5=reg256#12
4810# asm 2: vmovupd   672(<input_0=%rdi),>x5=%ymm11
4811vmovupd   672(%rdi),%ymm11
4812
4813# qhasm: x6 = mem256[ input_0 + 704 ]
4814# asm 1: vmovupd   704(<input_0=int64#1),>x6=reg256#13
4815# asm 2: vmovupd   704(<input_0=%rdi),>x6=%ymm12
4816vmovupd   704(%rdi),%ymm12
4817
4818# qhasm: x7 = mem256[ input_0 + 736 ]
4819# asm 1: vmovupd   736(<input_0=int64#1),>x7=reg256#14
4820# asm 2: vmovupd   736(<input_0=%rdi),>x7=%ymm13
4821vmovupd   736(%rdi),%ymm13
4822
4823# qhasm: v00 = x0 & mask0
4824# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
4825# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
4826vpand %ymm6,%ymm0,%ymm14
4827
4828# qhasm: v10 = x4 & mask0
4829# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
4830# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
4831vpand %ymm10,%ymm0,%ymm15
4832
4833# qhasm: 4x v10 <<= 4
4834# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4835# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4836vpsllq $4,%ymm15,%ymm15
4837
4838# qhasm: v01 = x0 & mask1
4839# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
4840# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
4841vpand %ymm6,%ymm1,%ymm6
4842
4843# qhasm: v11 = x4 & mask1
4844# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
4845# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
4846vpand %ymm10,%ymm1,%ymm10
4847
4848# qhasm: 4x v01 unsigned>>= 4
4849# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
4850# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
4851vpsrlq $4,%ymm6,%ymm6
4852
4853# qhasm: x0 = v00 | v10
4854# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
4855# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
4856vpor  %ymm14,%ymm15,%ymm14
4857
4858# qhasm: x4 = v01 | v11
4859# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
4860# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
4861vpor  %ymm6,%ymm10,%ymm6
4862
4863# qhasm: v00 = x1 & mask0
4864# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
4865# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
4866vpand %ymm7,%ymm0,%ymm10
4867
4868# qhasm: v10 = x5 & mask0
4869# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
4870# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
4871vpand %ymm11,%ymm0,%ymm15
4872
4873# qhasm: 4x v10 <<= 4
4874# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4875# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4876vpsllq $4,%ymm15,%ymm15
4877
4878# qhasm: v01 = x1 & mask1
4879# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
4880# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
4881vpand %ymm7,%ymm1,%ymm7
4882
4883# qhasm: v11 = x5 & mask1
4884# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
4885# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
4886vpand %ymm11,%ymm1,%ymm11
4887
4888# qhasm: 4x v01 unsigned>>= 4
4889# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
4890# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
4891vpsrlq $4,%ymm7,%ymm7
4892
4893# qhasm: x1 = v00 | v10
4894# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
4895# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
4896vpor  %ymm10,%ymm15,%ymm10
4897
4898# qhasm: x5 = v01 | v11
4899# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
4900# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
4901vpor  %ymm7,%ymm11,%ymm7
4902
4903# qhasm: v00 = x2 & mask0
4904# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
4905# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
4906vpand %ymm8,%ymm0,%ymm11
4907
4908# qhasm: v10 = x6 & mask0
4909# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
4910# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
4911vpand %ymm12,%ymm0,%ymm15
4912
4913# qhasm: 4x v10 <<= 4
4914# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4915# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4916vpsllq $4,%ymm15,%ymm15
4917
4918# qhasm: v01 = x2 & mask1
4919# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
4920# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
4921vpand %ymm8,%ymm1,%ymm8
4922
4923# qhasm: v11 = x6 & mask1
4924# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
4925# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
4926vpand %ymm12,%ymm1,%ymm12
4927
4928# qhasm: 4x v01 unsigned>>= 4
4929# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
4930# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
4931vpsrlq $4,%ymm8,%ymm8
4932
4933# qhasm: x2 = v00 | v10
4934# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
4935# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
4936vpor  %ymm11,%ymm15,%ymm11
4937
4938# qhasm: x6 = v01 | v11
4939# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
4940# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
4941vpor  %ymm8,%ymm12,%ymm8
4942
4943# qhasm: v00 = x3 & mask0
4944# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
4945# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
4946vpand %ymm9,%ymm0,%ymm12
4947
4948# qhasm: v10 = x7 & mask0
4949# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
4950# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
4951vpand %ymm13,%ymm0,%ymm15
4952
4953# qhasm: 4x v10 <<= 4
4954# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
4955# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
4956vpsllq $4,%ymm15,%ymm15
4957
4958# qhasm: v01 = x3 & mask1
4959# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
4960# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
4961vpand %ymm9,%ymm1,%ymm9
4962
4963# qhasm: v11 = x7 & mask1
4964# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
4965# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
4966vpand %ymm13,%ymm1,%ymm13
4967
4968# qhasm: 4x v01 unsigned>>= 4
4969# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
4970# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
4971vpsrlq $4,%ymm9,%ymm9
4972
4973# qhasm: x3 = v00 | v10
4974# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
4975# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
4976vpor  %ymm12,%ymm15,%ymm12
4977
4978# qhasm: x7 = v01 | v11
4979# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
4980# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
4981vpor  %ymm9,%ymm13,%ymm9
4982
4983# qhasm: v00 = x0 & mask2
4984# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
4985# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
4986vpand %ymm14,%ymm2,%ymm13
4987
4988# qhasm: v10 = x2 & mask2
4989# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
4990# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
4991vpand %ymm11,%ymm2,%ymm15
4992
4993# qhasm: 4x v10 <<= 2
4994# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
4995# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
4996vpsllq $2,%ymm15,%ymm15
4997
4998# qhasm: v01 = x0 & mask3
4999# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
5000# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
5001vpand %ymm14,%ymm3,%ymm14
5002
5003# qhasm: v11 = x2 & mask3
5004# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
5005# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
5006vpand %ymm11,%ymm3,%ymm11
5007
5008# qhasm: 4x v01 unsigned>>= 2
5009# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
5010# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
5011vpsrlq $2,%ymm14,%ymm14
5012
5013# qhasm: x0 = v00 | v10
5014# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
5015# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
5016vpor  %ymm13,%ymm15,%ymm13
5017
5018# qhasm: x2 = v01 | v11
5019# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
5020# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
5021vpor  %ymm14,%ymm11,%ymm11
5022
5023# qhasm: v00 = x1 & mask2
5024# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
5025# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
5026vpand %ymm10,%ymm2,%ymm14
5027
5028# qhasm: v10 = x3 & mask2
5029# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
5030# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
5031vpand %ymm12,%ymm2,%ymm15
5032
5033# qhasm: 4x v10 <<= 2
5034# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
5035# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
5036vpsllq $2,%ymm15,%ymm15
5037
5038# qhasm: v01 = x1 & mask3
5039# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
5040# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
5041vpand %ymm10,%ymm3,%ymm10
5042
5043# qhasm: v11 = x3 & mask3
5044# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
5045# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
5046vpand %ymm12,%ymm3,%ymm12
5047
5048# qhasm: 4x v01 unsigned>>= 2
5049# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
5050# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
5051vpsrlq $2,%ymm10,%ymm10
5052
5053# qhasm: x1 = v00 | v10
5054# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
5055# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
5056vpor  %ymm14,%ymm15,%ymm14
5057
5058# qhasm: x3 = v01 | v11
5059# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
5060# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
5061vpor  %ymm10,%ymm12,%ymm10
5062
5063# qhasm: v00 = x4 & mask2
5064# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
5065# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
5066vpand %ymm6,%ymm2,%ymm12
5067
5068# qhasm: v10 = x6 & mask2
5069# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
5070# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
5071vpand %ymm8,%ymm2,%ymm15
5072
5073# qhasm: 4x v10 <<= 2
5074# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
5075# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
5076vpsllq $2,%ymm15,%ymm15
5077
5078# qhasm: v01 = x4 & mask3
5079# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
5080# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
5081vpand %ymm6,%ymm3,%ymm6
5082
5083# qhasm: v11 = x6 & mask3
5084# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
5085# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
5086vpand %ymm8,%ymm3,%ymm8
5087
5088# qhasm: 4x v01 unsigned>>= 2
5089# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
5090# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
5091vpsrlq $2,%ymm6,%ymm6
5092
5093# qhasm: x4 = v00 | v10
5094# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
5095# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
5096vpor  %ymm12,%ymm15,%ymm12
5097
5098# qhasm: x6 = v01 | v11
5099# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
5100# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
5101vpor  %ymm6,%ymm8,%ymm6
5102
5103# qhasm: v00 = x5 & mask2
5104# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
5105# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
5106vpand %ymm7,%ymm2,%ymm8
5107
5108# qhasm: v10 = x7 & mask2
5109# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
5110# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
5111vpand %ymm9,%ymm2,%ymm15
5112
5113# qhasm: 4x v10 <<= 2
5114# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
5115# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
5116vpsllq $2,%ymm15,%ymm15
5117
5118# qhasm: v01 = x5 & mask3
5119# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
5120# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
5121vpand %ymm7,%ymm3,%ymm7
5122
5123# qhasm: v11 = x7 & mask3
5124# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
5125# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
5126vpand %ymm9,%ymm3,%ymm9
5127
5128# qhasm: 4x v01 unsigned>>= 2
5129# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
5130# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
5131vpsrlq $2,%ymm7,%ymm7
5132
5133# qhasm: x5 = v00 | v10
5134# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
5135# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
5136vpor  %ymm8,%ymm15,%ymm8
5137
5138# qhasm: x7 = v01 | v11
5139# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
5140# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
5141vpor  %ymm7,%ymm9,%ymm7
5142
5143# qhasm: v00 = x0 & mask4
5144# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
5145# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
5146vpand %ymm13,%ymm4,%ymm9
5147
5148# qhasm: v10 = x1 & mask4
5149# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
5150# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
5151vpand %ymm14,%ymm4,%ymm15
5152
5153# qhasm: 4x v10 <<= 1
5154# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5155# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5156vpsllq $1,%ymm15,%ymm15
5157
5158# qhasm: v01 = x0 & mask5
5159# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
5160# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
5161vpand %ymm13,%ymm5,%ymm13
5162
5163# qhasm: v11 = x1 & mask5
5164# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
5165# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
5166vpand %ymm14,%ymm5,%ymm14
5167
5168# qhasm: 4x v01 unsigned>>= 1
5169# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
5170# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
5171vpsrlq $1,%ymm13,%ymm13
5172
5173# qhasm: x0 = v00 | v10
5174# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
5175# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
5176vpor  %ymm9,%ymm15,%ymm9
5177
5178# qhasm: x1 = v01 | v11
5179# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
5180# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
5181vpor  %ymm13,%ymm14,%ymm13
5182
5183# qhasm: v00 = x2 & mask4
5184# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
5185# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
5186vpand %ymm11,%ymm4,%ymm14
5187
5188# qhasm: v10 = x3 & mask4
5189# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
5190# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
5191vpand %ymm10,%ymm4,%ymm15
5192
5193# qhasm: 4x v10 <<= 1
5194# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5195# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5196vpsllq $1,%ymm15,%ymm15
5197
5198# qhasm: v01 = x2 & mask5
5199# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
5200# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
5201vpand %ymm11,%ymm5,%ymm11
5202
5203# qhasm: v11 = x3 & mask5
5204# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
5205# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
5206vpand %ymm10,%ymm5,%ymm10
5207
5208# qhasm: 4x v01 unsigned>>= 1
5209# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
5210# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
5211vpsrlq $1,%ymm11,%ymm11
5212
5213# qhasm: x2 = v00 | v10
5214# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
5215# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
5216vpor  %ymm14,%ymm15,%ymm14
5217
5218# qhasm: x3 = v01 | v11
5219# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
5220# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
5221vpor  %ymm11,%ymm10,%ymm10
5222
5223# qhasm: v00 = x4 & mask4
5224# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
5225# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
5226vpand %ymm12,%ymm4,%ymm11
5227
5228# qhasm: v10 = x5 & mask4
5229# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
5230# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
5231vpand %ymm8,%ymm4,%ymm15
5232
5233# qhasm: 4x v10 <<= 1
5234# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5235# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5236vpsllq $1,%ymm15,%ymm15
5237
5238# qhasm: v01 = x4 & mask5
5239# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
5240# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
5241vpand %ymm12,%ymm5,%ymm12
5242
5243# qhasm: v11 = x5 & mask5
5244# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
5245# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
5246vpand %ymm8,%ymm5,%ymm8
5247
5248# qhasm: 4x v01 unsigned>>= 1
5249# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
5250# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
5251vpsrlq $1,%ymm12,%ymm12
5252
5253# qhasm: x4 = v00 | v10
5254# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
5255# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
5256vpor  %ymm11,%ymm15,%ymm11
5257
5258# qhasm: x5 = v01 | v11
5259# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
5260# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
5261vpor  %ymm12,%ymm8,%ymm8
5262
5263# qhasm: v00 = x6 & mask4
5264# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
5265# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
5266vpand %ymm6,%ymm4,%ymm12
5267
5268# qhasm: v10 = x7 & mask4
5269# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
5270# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
5271vpand %ymm7,%ymm4,%ymm15
5272
5273# qhasm: 4x v10 <<= 1
5274# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5275# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5276vpsllq $1,%ymm15,%ymm15
5277
5278# qhasm: v01 = x6 & mask5
5279# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
5280# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
5281vpand %ymm6,%ymm5,%ymm6
5282
5283# qhasm: v11 = x7 & mask5
5284# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
5285# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
5286vpand %ymm7,%ymm5,%ymm7
5287
5288# qhasm: 4x v01 unsigned>>= 1
5289# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
5290# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
5291vpsrlq $1,%ymm6,%ymm6
5292
5293# qhasm: x6 = v00 | v10
5294# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
5295# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
5296vpor  %ymm12,%ymm15,%ymm12
5297
5298# qhasm: x7 = v01 | v11
5299# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
5300# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
5301vpor  %ymm6,%ymm7,%ymm6
5302
5303# qhasm: mem256[ input_0 + 512 ] = x0
5304# asm 1: vmovupd   <x0=reg256#10,512(<input_0=int64#1)
5305# asm 2: vmovupd   <x0=%ymm9,512(<input_0=%rdi)
5306vmovupd   %ymm9,512(%rdi)
5307
5308# qhasm: mem256[ input_0 + 544 ] = x1
5309# asm 1: vmovupd   <x1=reg256#14,544(<input_0=int64#1)
5310# asm 2: vmovupd   <x1=%ymm13,544(<input_0=%rdi)
5311vmovupd   %ymm13,544(%rdi)
5312
5313# qhasm: mem256[ input_0 + 576 ] = x2
5314# asm 1: vmovupd   <x2=reg256#15,576(<input_0=int64#1)
5315# asm 2: vmovupd   <x2=%ymm14,576(<input_0=%rdi)
5316vmovupd   %ymm14,576(%rdi)
5317
5318# qhasm: mem256[ input_0 + 608 ] = x3
5319# asm 1: vmovupd   <x3=reg256#11,608(<input_0=int64#1)
5320# asm 2: vmovupd   <x3=%ymm10,608(<input_0=%rdi)
5321vmovupd   %ymm10,608(%rdi)
5322
5323# qhasm: mem256[ input_0 + 640 ] = x4
5324# asm 1: vmovupd   <x4=reg256#12,640(<input_0=int64#1)
5325# asm 2: vmovupd   <x4=%ymm11,640(<input_0=%rdi)
5326vmovupd   %ymm11,640(%rdi)
5327
5328# qhasm: mem256[ input_0 + 672 ] = x5
5329# asm 1: vmovupd   <x5=reg256#9,672(<input_0=int64#1)
5330# asm 2: vmovupd   <x5=%ymm8,672(<input_0=%rdi)
5331vmovupd   %ymm8,672(%rdi)
5332
5333# qhasm: mem256[ input_0 + 704 ] = x6
5334# asm 1: vmovupd   <x6=reg256#13,704(<input_0=int64#1)
5335# asm 2: vmovupd   <x6=%ymm12,704(<input_0=%rdi)
5336vmovupd   %ymm12,704(%rdi)
5337
5338# qhasm: mem256[ input_0 + 736 ] = x7
5339# asm 1: vmovupd   <x7=reg256#7,736(<input_0=int64#1)
5340# asm 2: vmovupd   <x7=%ymm6,736(<input_0=%rdi)
5341vmovupd   %ymm6,736(%rdi)
5342
5343# qhasm: x0 = mem256[ input_0 + 768 ]
5344# asm 1: vmovupd   768(<input_0=int64#1),>x0=reg256#7
5345# asm 2: vmovupd   768(<input_0=%rdi),>x0=%ymm6
5346vmovupd   768(%rdi),%ymm6
5347
5348# qhasm: x1 = mem256[ input_0 + 800 ]
5349# asm 1: vmovupd   800(<input_0=int64#1),>x1=reg256#8
5350# asm 2: vmovupd   800(<input_0=%rdi),>x1=%ymm7
5351vmovupd   800(%rdi),%ymm7
5352
5353# qhasm: x2 = mem256[ input_0 + 832 ]
5354# asm 1: vmovupd   832(<input_0=int64#1),>x2=reg256#9
5355# asm 2: vmovupd   832(<input_0=%rdi),>x2=%ymm8
5356vmovupd   832(%rdi),%ymm8
5357
5358# qhasm: x3 = mem256[ input_0 + 864 ]
5359# asm 1: vmovupd   864(<input_0=int64#1),>x3=reg256#10
5360# asm 2: vmovupd   864(<input_0=%rdi),>x3=%ymm9
5361vmovupd   864(%rdi),%ymm9
5362
5363# qhasm: x4 = mem256[ input_0 + 896 ]
5364# asm 1: vmovupd   896(<input_0=int64#1),>x4=reg256#11
5365# asm 2: vmovupd   896(<input_0=%rdi),>x4=%ymm10
5366vmovupd   896(%rdi),%ymm10
5367
5368# qhasm: x5 = mem256[ input_0 + 928 ]
5369# asm 1: vmovupd   928(<input_0=int64#1),>x5=reg256#12
5370# asm 2: vmovupd   928(<input_0=%rdi),>x5=%ymm11
5371vmovupd   928(%rdi),%ymm11
5372
5373# qhasm: x6 = mem256[ input_0 + 960 ]
5374# asm 1: vmovupd   960(<input_0=int64#1),>x6=reg256#13
5375# asm 2: vmovupd   960(<input_0=%rdi),>x6=%ymm12
5376vmovupd   960(%rdi),%ymm12
5377
5378# qhasm: x7 = mem256[ input_0 + 992 ]
5379# asm 1: vmovupd   992(<input_0=int64#1),>x7=reg256#14
5380# asm 2: vmovupd   992(<input_0=%rdi),>x7=%ymm13
5381vmovupd   992(%rdi),%ymm13
5382
5383# qhasm: v00 = x0 & mask0
5384# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
5385# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
5386vpand %ymm6,%ymm0,%ymm14
5387
5388# qhasm: v10 = x4 & mask0
5389# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
5390# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
5391vpand %ymm10,%ymm0,%ymm15
5392
5393# qhasm: 4x v10 <<= 4
5394# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
5395# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
5396vpsllq $4,%ymm15,%ymm15
5397
5398# qhasm: v01 = x0 & mask1
5399# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
5400# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
5401vpand %ymm6,%ymm1,%ymm6
5402
5403# qhasm: v11 = x4 & mask1
5404# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
5405# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
5406vpand %ymm10,%ymm1,%ymm10
5407
5408# qhasm: 4x v01 unsigned>>= 4
5409# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
5410# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
5411vpsrlq $4,%ymm6,%ymm6
5412
5413# qhasm: x0 = v00 | v10
5414# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
5415# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
5416vpor  %ymm14,%ymm15,%ymm14
5417
5418# qhasm: x4 = v01 | v11
5419# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
5420# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
5421vpor  %ymm6,%ymm10,%ymm6
5422
5423# qhasm: v00 = x1 & mask0
5424# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
5425# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
5426vpand %ymm7,%ymm0,%ymm10
5427
5428# qhasm: v10 = x5 & mask0
5429# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
5430# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
5431vpand %ymm11,%ymm0,%ymm15
5432
5433# qhasm: 4x v10 <<= 4
5434# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
5435# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
5436vpsllq $4,%ymm15,%ymm15
5437
5438# qhasm: v01 = x1 & mask1
5439# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
5440# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
5441vpand %ymm7,%ymm1,%ymm7
5442
5443# qhasm: v11 = x5 & mask1
5444# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
5445# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
5446vpand %ymm11,%ymm1,%ymm11
5447
5448# qhasm: 4x v01 unsigned>>= 4
5449# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
5450# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
5451vpsrlq $4,%ymm7,%ymm7
5452
5453# qhasm: x1 = v00 | v10
5454# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
5455# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
5456vpor  %ymm10,%ymm15,%ymm10
5457
5458# qhasm: x5 = v01 | v11
5459# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
5460# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
5461vpor  %ymm7,%ymm11,%ymm7
5462
5463# qhasm: v00 = x2 & mask0
5464# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
5465# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
5466vpand %ymm8,%ymm0,%ymm11
5467
5468# qhasm: v10 = x6 & mask0
5469# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
5470# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
5471vpand %ymm12,%ymm0,%ymm15
5472
5473# qhasm: 4x v10 <<= 4
5474# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
5475# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
5476vpsllq $4,%ymm15,%ymm15
5477
5478# qhasm: v01 = x2 & mask1
5479# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
5480# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
5481vpand %ymm8,%ymm1,%ymm8
5482
5483# qhasm: v11 = x6 & mask1
5484# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
5485# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
5486vpand %ymm12,%ymm1,%ymm12
5487
5488# qhasm: 4x v01 unsigned>>= 4
5489# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
5490# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
5491vpsrlq $4,%ymm8,%ymm8
5492
5493# qhasm: x2 = v00 | v10
5494# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
5495# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
5496vpor  %ymm11,%ymm15,%ymm11
5497
5498# qhasm: x6 = v01 | v11
5499# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
5500# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
5501vpor  %ymm8,%ymm12,%ymm8
5502
5503# qhasm: v00 = x3 & mask0
5504# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
5505# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
5506vpand %ymm9,%ymm0,%ymm12
5507
5508# qhasm: v10 = x7 & mask0
5509# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
5510# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
5511vpand %ymm13,%ymm0,%ymm15
5512
5513# qhasm: 4x v10 <<= 4
5514# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
5515# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
5516vpsllq $4,%ymm15,%ymm15
5517
5518# qhasm: v01 = x3 & mask1
5519# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
5520# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
5521vpand %ymm9,%ymm1,%ymm9
5522
5523# qhasm: v11 = x7 & mask1
5524# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
5525# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
5526vpand %ymm13,%ymm1,%ymm13
5527
5528# qhasm: 4x v01 unsigned>>= 4
5529# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
5530# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
5531vpsrlq $4,%ymm9,%ymm9
5532
5533# qhasm: x3 = v00 | v10
5534# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
5535# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
5536vpor  %ymm12,%ymm15,%ymm12
5537
5538# qhasm: x7 = v01 | v11
5539# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
5540# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
5541vpor  %ymm9,%ymm13,%ymm9
5542
5543# qhasm: v00 = x0 & mask2
5544# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
5545# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
5546vpand %ymm14,%ymm2,%ymm13
5547
5548# qhasm: v10 = x2 & mask2
5549# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
5550# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
5551vpand %ymm11,%ymm2,%ymm15
5552
5553# qhasm: 4x v10 <<= 2
5554# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
5555# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
5556vpsllq $2,%ymm15,%ymm15
5557
5558# qhasm: v01 = x0 & mask3
5559# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
5560# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
5561vpand %ymm14,%ymm3,%ymm14
5562
5563# qhasm: v11 = x2 & mask3
5564# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
5565# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
5566vpand %ymm11,%ymm3,%ymm11
5567
5568# qhasm: 4x v01 unsigned>>= 2
5569# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
5570# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
5571vpsrlq $2,%ymm14,%ymm14
5572
5573# qhasm: x0 = v00 | v10
5574# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
5575# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
5576vpor  %ymm13,%ymm15,%ymm13
5577
5578# qhasm: x2 = v01 | v11
5579# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
5580# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
5581vpor  %ymm14,%ymm11,%ymm11
5582
5583# qhasm: v00 = x1 & mask2
5584# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
5585# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
5586vpand %ymm10,%ymm2,%ymm14
5587
5588# qhasm: v10 = x3 & mask2
5589# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
5590# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
5591vpand %ymm12,%ymm2,%ymm15
5592
5593# qhasm: 4x v10 <<= 2
5594# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
5595# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
5596vpsllq $2,%ymm15,%ymm15
5597
5598# qhasm: v01 = x1 & mask3
5599# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
5600# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
5601vpand %ymm10,%ymm3,%ymm10
5602
5603# qhasm: v11 = x3 & mask3
5604# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
5605# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
5606vpand %ymm12,%ymm3,%ymm12
5607
5608# qhasm: 4x v01 unsigned>>= 2
5609# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
5610# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
5611vpsrlq $2,%ymm10,%ymm10
5612
5613# qhasm: x1 = v00 | v10
5614# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
5615# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
5616vpor  %ymm14,%ymm15,%ymm14
5617
5618# qhasm: x3 = v01 | v11
5619# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
5620# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
5621vpor  %ymm10,%ymm12,%ymm10
5622
5623# qhasm: v00 = x4 & mask2
5624# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
5625# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
5626vpand %ymm6,%ymm2,%ymm12
5627
5628# qhasm: v10 = x6 & mask2
5629# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
5630# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
5631vpand %ymm8,%ymm2,%ymm15
5632
5633# qhasm: 4x v10 <<= 2
5634# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
5635# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
5636vpsllq $2,%ymm15,%ymm15
5637
5638# qhasm: v01 = x4 & mask3
5639# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
5640# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
5641vpand %ymm6,%ymm3,%ymm6
5642
5643# qhasm: v11 = x6 & mask3
5644# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
5645# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
5646vpand %ymm8,%ymm3,%ymm8
5647
5648# qhasm: 4x v01 unsigned>>= 2
5649# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
5650# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
5651vpsrlq $2,%ymm6,%ymm6
5652
5653# qhasm: x4 = v00 | v10
5654# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
5655# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
5656vpor  %ymm12,%ymm15,%ymm12
5657
5658# qhasm: x6 = v01 | v11
5659# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
5660# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
5661vpor  %ymm6,%ymm8,%ymm6
5662
5663# qhasm: v00 = x5 & mask2
5664# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
5665# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
5666vpand %ymm7,%ymm2,%ymm8
5667
5668# qhasm: v10 = x7 & mask2
5669# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
5670# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
5671vpand %ymm9,%ymm2,%ymm15
5672
5673# qhasm: 4x v10 <<= 2
5674# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
5675# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
5676vpsllq $2,%ymm15,%ymm15
5677
5678# qhasm: v01 = x5 & mask3
5679# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
5680# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
5681vpand %ymm7,%ymm3,%ymm7
5682
5683# qhasm: v11 = x7 & mask3
5684# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
5685# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
5686vpand %ymm9,%ymm3,%ymm9
5687
5688# qhasm: 4x v01 unsigned>>= 2
5689# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
5690# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
5691vpsrlq $2,%ymm7,%ymm7
5692
5693# qhasm: x5 = v00 | v10
5694# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
5695# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
5696vpor  %ymm8,%ymm15,%ymm8
5697
5698# qhasm: x7 = v01 | v11
5699# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
5700# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
5701vpor  %ymm7,%ymm9,%ymm7
5702
5703# qhasm: v00 = x0 & mask4
5704# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
5705# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
5706vpand %ymm13,%ymm4,%ymm9
5707
5708# qhasm: v10 = x1 & mask4
5709# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
5710# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
5711vpand %ymm14,%ymm4,%ymm15
5712
5713# qhasm: 4x v10 <<= 1
5714# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5715# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5716vpsllq $1,%ymm15,%ymm15
5717
5718# qhasm: v01 = x0 & mask5
5719# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
5720# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
5721vpand %ymm13,%ymm5,%ymm13
5722
5723# qhasm: v11 = x1 & mask5
5724# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
5725# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
5726vpand %ymm14,%ymm5,%ymm14
5727
5728# qhasm: 4x v01 unsigned>>= 1
5729# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
5730# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
5731vpsrlq $1,%ymm13,%ymm13
5732
5733# qhasm: x0 = v00 | v10
5734# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
5735# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
5736vpor  %ymm9,%ymm15,%ymm9
5737
5738# qhasm: x1 = v01 | v11
5739# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
5740# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
5741vpor  %ymm13,%ymm14,%ymm13
5742
5743# qhasm: v00 = x2 & mask4
5744# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
5745# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
5746vpand %ymm11,%ymm4,%ymm14
5747
5748# qhasm: v10 = x3 & mask4
5749# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
5750# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
5751vpand %ymm10,%ymm4,%ymm15
5752
5753# qhasm: 4x v10 <<= 1
5754# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5755# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5756vpsllq $1,%ymm15,%ymm15
5757
5758# qhasm: v01 = x2 & mask5
5759# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
5760# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
5761vpand %ymm11,%ymm5,%ymm11
5762
5763# qhasm: v11 = x3 & mask5
5764# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
5765# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
5766vpand %ymm10,%ymm5,%ymm10
5767
5768# qhasm: 4x v01 unsigned>>= 1
5769# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
5770# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
5771vpsrlq $1,%ymm11,%ymm11
5772
5773# qhasm: x2 = v00 | v10
5774# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
5775# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
5776vpor  %ymm14,%ymm15,%ymm14
5777
5778# qhasm: x3 = v01 | v11
5779# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
5780# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
5781vpor  %ymm11,%ymm10,%ymm10
5782
5783# qhasm: v00 = x4 & mask4
5784# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
5785# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
5786vpand %ymm12,%ymm4,%ymm11
5787
5788# qhasm: v10 = x5 & mask4
5789# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
5790# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
5791vpand %ymm8,%ymm4,%ymm15
5792
5793# qhasm: 4x v10 <<= 1
5794# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5795# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5796vpsllq $1,%ymm15,%ymm15
5797
5798# qhasm: v01 = x4 & mask5
5799# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
5800# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
5801vpand %ymm12,%ymm5,%ymm12
5802
5803# qhasm: v11 = x5 & mask5
5804# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
5805# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
5806vpand %ymm8,%ymm5,%ymm8
5807
5808# qhasm: 4x v01 unsigned>>= 1
5809# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
5810# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
5811vpsrlq $1,%ymm12,%ymm12
5812
5813# qhasm: x4 = v00 | v10
5814# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
5815# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
5816vpor  %ymm11,%ymm15,%ymm11
5817
5818# qhasm: x5 = v01 | v11
5819# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
5820# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
5821vpor  %ymm12,%ymm8,%ymm8
5822
5823# qhasm: v00 = x6 & mask4
5824# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
5825# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
5826vpand %ymm6,%ymm4,%ymm12
5827
5828# qhasm: v10 = x7 & mask4
5829# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
5830# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
5831vpand %ymm7,%ymm4,%ymm15
5832
5833# qhasm: 4x v10 <<= 1
5834# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
5835# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
5836vpsllq $1,%ymm15,%ymm15
5837
5838# qhasm: v01 = x6 & mask5
5839# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
5840# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
5841vpand %ymm6,%ymm5,%ymm6
5842
5843# qhasm: v11 = x7 & mask5
5844# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
5845# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
5846vpand %ymm7,%ymm5,%ymm7
5847
5848# qhasm: 4x v01 unsigned>>= 1
5849# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
5850# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
5851vpsrlq $1,%ymm6,%ymm6
5852
5853# qhasm: x6 = v00 | v10
5854# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
5855# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
5856vpor  %ymm12,%ymm15,%ymm12
5857
5858# qhasm: x7 = v01 | v11
5859# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
5860# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
5861vpor  %ymm6,%ymm7,%ymm6
5862
5863# qhasm: mem256[ input_0 + 768 ] = x0
5864# asm 1: vmovupd   <x0=reg256#10,768(<input_0=int64#1)
5865# asm 2: vmovupd   <x0=%ymm9,768(<input_0=%rdi)
5866vmovupd   %ymm9,768(%rdi)
5867
5868# qhasm: mem256[ input_0 + 800 ] = x1
5869# asm 1: vmovupd   <x1=reg256#14,800(<input_0=int64#1)
5870# asm 2: vmovupd   <x1=%ymm13,800(<input_0=%rdi)
5871vmovupd   %ymm13,800(%rdi)
5872
5873# qhasm: mem256[ input_0 + 832 ] = x2
5874# asm 1: vmovupd   <x2=reg256#15,832(<input_0=int64#1)
5875# asm 2: vmovupd   <x2=%ymm14,832(<input_0=%rdi)
5876vmovupd   %ymm14,832(%rdi)
5877
5878# qhasm: mem256[ input_0 + 864 ] = x3
5879# asm 1: vmovupd   <x3=reg256#11,864(<input_0=int64#1)
5880# asm 2: vmovupd   <x3=%ymm10,864(<input_0=%rdi)
5881vmovupd   %ymm10,864(%rdi)
5882
5883# qhasm: mem256[ input_0 + 896 ] = x4
5884# asm 1: vmovupd   <x4=reg256#12,896(<input_0=int64#1)
5885# asm 2: vmovupd   <x4=%ymm11,896(<input_0=%rdi)
5886vmovupd   %ymm11,896(%rdi)
5887
5888# qhasm: mem256[ input_0 + 928 ] = x5
5889# asm 1: vmovupd   <x5=reg256#9,928(<input_0=int64#1)
5890# asm 2: vmovupd   <x5=%ymm8,928(<input_0=%rdi)
5891vmovupd   %ymm8,928(%rdi)
5892
5893# qhasm: mem256[ input_0 + 960 ] = x6
5894# asm 1: vmovupd   <x6=reg256#13,960(<input_0=int64#1)
5895# asm 2: vmovupd   <x6=%ymm12,960(<input_0=%rdi)
5896vmovupd   %ymm12,960(%rdi)
5897
5898# qhasm: mem256[ input_0 + 992 ] = x7
5899# asm 1: vmovupd   <x7=reg256#7,992(<input_0=int64#1)
5900# asm 2: vmovupd   <x7=%ymm6,992(<input_0=%rdi)
5901vmovupd   %ymm6,992(%rdi)
5902
5903# qhasm: x0 = mem256[ input_0 + 1024 ]
5904# asm 1: vmovupd   1024(<input_0=int64#1),>x0=reg256#7
5905# asm 2: vmovupd   1024(<input_0=%rdi),>x0=%ymm6
5906vmovupd   1024(%rdi),%ymm6
5907
5908# qhasm: x1 = mem256[ input_0 + 1056 ]
5909# asm 1: vmovupd   1056(<input_0=int64#1),>x1=reg256#8
5910# asm 2: vmovupd   1056(<input_0=%rdi),>x1=%ymm7
5911vmovupd   1056(%rdi),%ymm7
5912
5913# qhasm: x2 = mem256[ input_0 + 1088 ]
5914# asm 1: vmovupd   1088(<input_0=int64#1),>x2=reg256#9
5915# asm 2: vmovupd   1088(<input_0=%rdi),>x2=%ymm8
5916vmovupd   1088(%rdi),%ymm8
5917
5918# qhasm: x3 = mem256[ input_0 + 1120 ]
5919# asm 1: vmovupd   1120(<input_0=int64#1),>x3=reg256#10
5920# asm 2: vmovupd   1120(<input_0=%rdi),>x3=%ymm9
5921vmovupd   1120(%rdi),%ymm9
5922
5923# qhasm: x4 = mem256[ input_0 + 1152 ]
5924# asm 1: vmovupd   1152(<input_0=int64#1),>x4=reg256#11
5925# asm 2: vmovupd   1152(<input_0=%rdi),>x4=%ymm10
5926vmovupd   1152(%rdi),%ymm10
5927
5928# qhasm: x5 = mem256[ input_0 + 1184 ]
5929# asm 1: vmovupd   1184(<input_0=int64#1),>x5=reg256#12
5930# asm 2: vmovupd   1184(<input_0=%rdi),>x5=%ymm11
5931vmovupd   1184(%rdi),%ymm11
5932
5933# qhasm: x6 = mem256[ input_0 + 1216 ]
5934# asm 1: vmovupd   1216(<input_0=int64#1),>x6=reg256#13
5935# asm 2: vmovupd   1216(<input_0=%rdi),>x6=%ymm12
5936vmovupd   1216(%rdi),%ymm12
5937
5938# qhasm: x7 = mem256[ input_0 + 1248 ]
5939# asm 1: vmovupd   1248(<input_0=int64#1),>x7=reg256#14
5940# asm 2: vmovupd   1248(<input_0=%rdi),>x7=%ymm13
5941vmovupd   1248(%rdi),%ymm13
5942
5943# qhasm: v00 = x0 & mask0
5944# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
5945# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
5946vpand %ymm6,%ymm0,%ymm14
5947
5948# qhasm: v10 = x4 & mask0
5949# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
5950# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
5951vpand %ymm10,%ymm0,%ymm15
5952
5953# qhasm: 4x v10 <<= 4
5954# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
5955# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
5956vpsllq $4,%ymm15,%ymm15
5957
5958# qhasm: v01 = x0 & mask1
5959# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
5960# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
5961vpand %ymm6,%ymm1,%ymm6
5962
5963# qhasm: v11 = x4 & mask1
5964# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
5965# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
5966vpand %ymm10,%ymm1,%ymm10
5967
5968# qhasm: 4x v01 unsigned>>= 4
5969# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
5970# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
5971vpsrlq $4,%ymm6,%ymm6
5972
5973# qhasm: x0 = v00 | v10
5974# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
5975# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
5976vpor  %ymm14,%ymm15,%ymm14
5977
5978# qhasm: x4 = v01 | v11
5979# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
5980# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
5981vpor  %ymm6,%ymm10,%ymm6
5982
5983# qhasm: v00 = x1 & mask0
5984# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
5985# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
5986vpand %ymm7,%ymm0,%ymm10
5987
5988# qhasm: v10 = x5 & mask0
5989# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
5990# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
5991vpand %ymm11,%ymm0,%ymm15
5992
5993# qhasm: 4x v10 <<= 4
5994# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
5995# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
5996vpsllq $4,%ymm15,%ymm15
5997
5998# qhasm: v01 = x1 & mask1
5999# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
6000# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
6001vpand %ymm7,%ymm1,%ymm7
6002
6003# qhasm: v11 = x5 & mask1
6004# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
6005# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
6006vpand %ymm11,%ymm1,%ymm11
6007
6008# qhasm: 4x v01 unsigned>>= 4
6009# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
6010# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
6011vpsrlq $4,%ymm7,%ymm7
6012
6013# qhasm: x1 = v00 | v10
6014# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
6015# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
6016vpor  %ymm10,%ymm15,%ymm10
6017
6018# qhasm: x5 = v01 | v11
6019# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
6020# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
6021vpor  %ymm7,%ymm11,%ymm7
6022
6023# qhasm: v00 = x2 & mask0
6024# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
6025# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
6026vpand %ymm8,%ymm0,%ymm11
6027
6028# qhasm: v10 = x6 & mask0
6029# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
6030# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
6031vpand %ymm12,%ymm0,%ymm15
6032
6033# qhasm: 4x v10 <<= 4
6034# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
6035# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
6036vpsllq $4,%ymm15,%ymm15
6037
6038# qhasm: v01 = x2 & mask1
6039# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
6040# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
6041vpand %ymm8,%ymm1,%ymm8
6042
6043# qhasm: v11 = x6 & mask1
6044# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
6045# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
6046vpand %ymm12,%ymm1,%ymm12
6047
6048# qhasm: 4x v01 unsigned>>= 4
6049# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
6050# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
6051vpsrlq $4,%ymm8,%ymm8
6052
6053# qhasm: x2 = v00 | v10
6054# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
6055# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
6056vpor  %ymm11,%ymm15,%ymm11
6057
6058# qhasm: x6 = v01 | v11
6059# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
6060# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
6061vpor  %ymm8,%ymm12,%ymm8
6062
6063# qhasm: v00 = x3 & mask0
6064# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
6065# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
6066vpand %ymm9,%ymm0,%ymm12
6067
6068# qhasm: v10 = x7 & mask0
6069# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
6070# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
6071vpand %ymm13,%ymm0,%ymm15
6072
6073# qhasm: 4x v10 <<= 4
6074# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
6075# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
6076vpsllq $4,%ymm15,%ymm15
6077
6078# qhasm: v01 = x3 & mask1
6079# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
6080# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
6081vpand %ymm9,%ymm1,%ymm9
6082
6083# qhasm: v11 = x7 & mask1
6084# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
6085# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
6086vpand %ymm13,%ymm1,%ymm13
6087
6088# qhasm: 4x v01 unsigned>>= 4
6089# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
6090# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
6091vpsrlq $4,%ymm9,%ymm9
6092
6093# qhasm: x3 = v00 | v10
6094# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
6095# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
6096vpor  %ymm12,%ymm15,%ymm12
6097
6098# qhasm: x7 = v01 | v11
6099# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
6100# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
6101vpor  %ymm9,%ymm13,%ymm9
6102
6103# qhasm: v00 = x0 & mask2
6104# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
6105# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
6106vpand %ymm14,%ymm2,%ymm13
6107
6108# qhasm: v10 = x2 & mask2
6109# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
6110# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
6111vpand %ymm11,%ymm2,%ymm15
6112
6113# qhasm: 4x v10 <<= 2
6114# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6115# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6116vpsllq $2,%ymm15,%ymm15
6117
6118# qhasm: v01 = x0 & mask3
6119# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
6120# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
6121vpand %ymm14,%ymm3,%ymm14
6122
6123# qhasm: v11 = x2 & mask3
6124# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
6125# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
6126vpand %ymm11,%ymm3,%ymm11
6127
6128# qhasm: 4x v01 unsigned>>= 2
6129# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
6130# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
6131vpsrlq $2,%ymm14,%ymm14
6132
6133# qhasm: x0 = v00 | v10
6134# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
6135# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
6136vpor  %ymm13,%ymm15,%ymm13
6137
6138# qhasm: x2 = v01 | v11
6139# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
6140# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
6141vpor  %ymm14,%ymm11,%ymm11
6142
6143# qhasm: v00 = x1 & mask2
6144# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
6145# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
6146vpand %ymm10,%ymm2,%ymm14
6147
6148# qhasm: v10 = x3 & mask2
6149# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
6150# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
6151vpand %ymm12,%ymm2,%ymm15
6152
6153# qhasm: 4x v10 <<= 2
6154# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6155# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6156vpsllq $2,%ymm15,%ymm15
6157
6158# qhasm: v01 = x1 & mask3
6159# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
6160# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
6161vpand %ymm10,%ymm3,%ymm10
6162
6163# qhasm: v11 = x3 & mask3
6164# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
6165# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
6166vpand %ymm12,%ymm3,%ymm12
6167
6168# qhasm: 4x v01 unsigned>>= 2
6169# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
6170# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
6171vpsrlq $2,%ymm10,%ymm10
6172
6173# qhasm: x1 = v00 | v10
6174# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
6175# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
6176vpor  %ymm14,%ymm15,%ymm14
6177
6178# qhasm: x3 = v01 | v11
6179# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
6180# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
6181vpor  %ymm10,%ymm12,%ymm10
6182
6183# qhasm: v00 = x4 & mask2
6184# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
6185# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
6186vpand %ymm6,%ymm2,%ymm12
6187
6188# qhasm: v10 = x6 & mask2
6189# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
6190# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
6191vpand %ymm8,%ymm2,%ymm15
6192
6193# qhasm: 4x v10 <<= 2
6194# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6195# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6196vpsllq $2,%ymm15,%ymm15
6197
6198# qhasm: v01 = x4 & mask3
6199# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
6200# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
6201vpand %ymm6,%ymm3,%ymm6
6202
6203# qhasm: v11 = x6 & mask3
6204# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
6205# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
6206vpand %ymm8,%ymm3,%ymm8
6207
6208# qhasm: 4x v01 unsigned>>= 2
6209# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
6210# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
6211vpsrlq $2,%ymm6,%ymm6
6212
6213# qhasm: x4 = v00 | v10
6214# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
6215# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
6216vpor  %ymm12,%ymm15,%ymm12
6217
6218# qhasm: x6 = v01 | v11
6219# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
6220# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
6221vpor  %ymm6,%ymm8,%ymm6
6222
6223# qhasm: v00 = x5 & mask2
6224# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
6225# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
6226vpand %ymm7,%ymm2,%ymm8
6227
6228# qhasm: v10 = x7 & mask2
6229# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
6230# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
6231vpand %ymm9,%ymm2,%ymm15
6232
6233# qhasm: 4x v10 <<= 2
6234# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6235# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6236vpsllq $2,%ymm15,%ymm15
6237
6238# qhasm: v01 = x5 & mask3
6239# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
6240# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
6241vpand %ymm7,%ymm3,%ymm7
6242
6243# qhasm: v11 = x7 & mask3
6244# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
6245# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
6246vpand %ymm9,%ymm3,%ymm9
6247
6248# qhasm: 4x v01 unsigned>>= 2
6249# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
6250# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
6251vpsrlq $2,%ymm7,%ymm7
6252
6253# qhasm: x5 = v00 | v10
6254# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
6255# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
6256vpor  %ymm8,%ymm15,%ymm8
6257
6258# qhasm: x7 = v01 | v11
6259# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
6260# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
6261vpor  %ymm7,%ymm9,%ymm7
6262
6263# qhasm: v00 = x0 & mask4
6264# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
6265# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
6266vpand %ymm13,%ymm4,%ymm9
6267
6268# qhasm: v10 = x1 & mask4
6269# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
6270# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
6271vpand %ymm14,%ymm4,%ymm15
6272
6273# qhasm: 4x v10 <<= 1
6274# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6275# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6276vpsllq $1,%ymm15,%ymm15
6277
6278# qhasm: v01 = x0 & mask5
6279# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
6280# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
6281vpand %ymm13,%ymm5,%ymm13
6282
6283# qhasm: v11 = x1 & mask5
6284# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
6285# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
6286vpand %ymm14,%ymm5,%ymm14
6287
6288# qhasm: 4x v01 unsigned>>= 1
6289# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
6290# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
6291vpsrlq $1,%ymm13,%ymm13
6292
6293# qhasm: x0 = v00 | v10
6294# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
6295# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
6296vpor  %ymm9,%ymm15,%ymm9
6297
6298# qhasm: x1 = v01 | v11
6299# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
6300# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
6301vpor  %ymm13,%ymm14,%ymm13
6302
6303# qhasm: v00 = x2 & mask4
6304# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
6305# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
6306vpand %ymm11,%ymm4,%ymm14
6307
6308# qhasm: v10 = x3 & mask4
6309# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
6310# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
6311vpand %ymm10,%ymm4,%ymm15
6312
6313# qhasm: 4x v10 <<= 1
6314# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6315# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6316vpsllq $1,%ymm15,%ymm15
6317
6318# qhasm: v01 = x2 & mask5
6319# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
6320# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
6321vpand %ymm11,%ymm5,%ymm11
6322
6323# qhasm: v11 = x3 & mask5
6324# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
6325# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
6326vpand %ymm10,%ymm5,%ymm10
6327
6328# qhasm: 4x v01 unsigned>>= 1
6329# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
6330# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
6331vpsrlq $1,%ymm11,%ymm11
6332
6333# qhasm: x2 = v00 | v10
6334# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
6335# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
6336vpor  %ymm14,%ymm15,%ymm14
6337
6338# qhasm: x3 = v01 | v11
6339# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
6340# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
6341vpor  %ymm11,%ymm10,%ymm10
6342
6343# qhasm: v00 = x4 & mask4
6344# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
6345# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
6346vpand %ymm12,%ymm4,%ymm11
6347
6348# qhasm: v10 = x5 & mask4
6349# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
6350# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
6351vpand %ymm8,%ymm4,%ymm15
6352
6353# qhasm: 4x v10 <<= 1
6354# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6355# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6356vpsllq $1,%ymm15,%ymm15
6357
6358# qhasm: v01 = x4 & mask5
6359# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
6360# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
6361vpand %ymm12,%ymm5,%ymm12
6362
6363# qhasm: v11 = x5 & mask5
6364# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
6365# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
6366vpand %ymm8,%ymm5,%ymm8
6367
6368# qhasm: 4x v01 unsigned>>= 1
6369# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
6370# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
6371vpsrlq $1,%ymm12,%ymm12
6372
6373# qhasm: x4 = v00 | v10
6374# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
6375# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
6376vpor  %ymm11,%ymm15,%ymm11
6377
6378# qhasm: x5 = v01 | v11
6379# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
6380# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
6381vpor  %ymm12,%ymm8,%ymm8
6382
6383# qhasm: v00 = x6 & mask4
6384# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
6385# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
6386vpand %ymm6,%ymm4,%ymm12
6387
6388# qhasm: v10 = x7 & mask4
6389# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
6390# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
6391vpand %ymm7,%ymm4,%ymm15
6392
6393# qhasm: 4x v10 <<= 1
6394# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6395# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6396vpsllq $1,%ymm15,%ymm15
6397
6398# qhasm: v01 = x6 & mask5
6399# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
6400# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
6401vpand %ymm6,%ymm5,%ymm6
6402
6403# qhasm: v11 = x7 & mask5
6404# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
6405# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
6406vpand %ymm7,%ymm5,%ymm7
6407
6408# qhasm: 4x v01 unsigned>>= 1
6409# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
6410# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
6411vpsrlq $1,%ymm6,%ymm6
6412
6413# qhasm: x6 = v00 | v10
6414# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
6415# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
6416vpor  %ymm12,%ymm15,%ymm12
6417
6418# qhasm: x7 = v01 | v11
6419# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
6420# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
6421vpor  %ymm6,%ymm7,%ymm6
6422
6423# qhasm: mem256[ input_0 + 1024 ] = x0
6424# asm 1: vmovupd   <x0=reg256#10,1024(<input_0=int64#1)
6425# asm 2: vmovupd   <x0=%ymm9,1024(<input_0=%rdi)
6426vmovupd   %ymm9,1024(%rdi)
6427
6428# qhasm: mem256[ input_0 + 1056 ] = x1
6429# asm 1: vmovupd   <x1=reg256#14,1056(<input_0=int64#1)
6430# asm 2: vmovupd   <x1=%ymm13,1056(<input_0=%rdi)
6431vmovupd   %ymm13,1056(%rdi)
6432
6433# qhasm: mem256[ input_0 + 1088 ] = x2
6434# asm 1: vmovupd   <x2=reg256#15,1088(<input_0=int64#1)
6435# asm 2: vmovupd   <x2=%ymm14,1088(<input_0=%rdi)
6436vmovupd   %ymm14,1088(%rdi)
6437
6438# qhasm: mem256[ input_0 + 1120 ] = x3
6439# asm 1: vmovupd   <x3=reg256#11,1120(<input_0=int64#1)
6440# asm 2: vmovupd   <x3=%ymm10,1120(<input_0=%rdi)
6441vmovupd   %ymm10,1120(%rdi)
6442
6443# qhasm: mem256[ input_0 + 1152 ] = x4
6444# asm 1: vmovupd   <x4=reg256#12,1152(<input_0=int64#1)
6445# asm 2: vmovupd   <x4=%ymm11,1152(<input_0=%rdi)
6446vmovupd   %ymm11,1152(%rdi)
6447
6448# qhasm: mem256[ input_0 + 1184 ] = x5
6449# asm 1: vmovupd   <x5=reg256#9,1184(<input_0=int64#1)
6450# asm 2: vmovupd   <x5=%ymm8,1184(<input_0=%rdi)
6451vmovupd   %ymm8,1184(%rdi)
6452
6453# qhasm: mem256[ input_0 + 1216 ] = x6
6454# asm 1: vmovupd   <x6=reg256#13,1216(<input_0=int64#1)
6455# asm 2: vmovupd   <x6=%ymm12,1216(<input_0=%rdi)
6456vmovupd   %ymm12,1216(%rdi)
6457
6458# qhasm: mem256[ input_0 + 1248 ] = x7
6459# asm 1: vmovupd   <x7=reg256#7,1248(<input_0=int64#1)
6460# asm 2: vmovupd   <x7=%ymm6,1248(<input_0=%rdi)
6461vmovupd   %ymm6,1248(%rdi)
6462
6463# qhasm: x0 = mem256[ input_0 + 1280 ]
6464# asm 1: vmovupd   1280(<input_0=int64#1),>x0=reg256#7
6465# asm 2: vmovupd   1280(<input_0=%rdi),>x0=%ymm6
6466vmovupd   1280(%rdi),%ymm6
6467
6468# qhasm: x1 = mem256[ input_0 + 1312 ]
6469# asm 1: vmovupd   1312(<input_0=int64#1),>x1=reg256#8
6470# asm 2: vmovupd   1312(<input_0=%rdi),>x1=%ymm7
6471vmovupd   1312(%rdi),%ymm7
6472
6473# qhasm: x2 = mem256[ input_0 + 1344 ]
6474# asm 1: vmovupd   1344(<input_0=int64#1),>x2=reg256#9
6475# asm 2: vmovupd   1344(<input_0=%rdi),>x2=%ymm8
6476vmovupd   1344(%rdi),%ymm8
6477
6478# qhasm: x3 = mem256[ input_0 + 1376 ]
6479# asm 1: vmovupd   1376(<input_0=int64#1),>x3=reg256#10
6480# asm 2: vmovupd   1376(<input_0=%rdi),>x3=%ymm9
6481vmovupd   1376(%rdi),%ymm9
6482
6483# qhasm: x4 = mem256[ input_0 + 1408 ]
6484# asm 1: vmovupd   1408(<input_0=int64#1),>x4=reg256#11
6485# asm 2: vmovupd   1408(<input_0=%rdi),>x4=%ymm10
6486vmovupd   1408(%rdi),%ymm10
6487
6488# qhasm: x5 = mem256[ input_0 + 1440 ]
6489# asm 1: vmovupd   1440(<input_0=int64#1),>x5=reg256#12
6490# asm 2: vmovupd   1440(<input_0=%rdi),>x5=%ymm11
6491vmovupd   1440(%rdi),%ymm11
6492
6493# qhasm: x6 = mem256[ input_0 + 1472 ]
6494# asm 1: vmovupd   1472(<input_0=int64#1),>x6=reg256#13
6495# asm 2: vmovupd   1472(<input_0=%rdi),>x6=%ymm12
6496vmovupd   1472(%rdi),%ymm12
6497
6498# qhasm: x7 = mem256[ input_0 + 1504 ]
6499# asm 1: vmovupd   1504(<input_0=int64#1),>x7=reg256#14
6500# asm 2: vmovupd   1504(<input_0=%rdi),>x7=%ymm13
6501vmovupd   1504(%rdi),%ymm13
6502
6503# qhasm: v00 = x0 & mask0
6504# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
6505# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
6506vpand %ymm6,%ymm0,%ymm14
6507
6508# qhasm: v10 = x4 & mask0
6509# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
6510# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
6511vpand %ymm10,%ymm0,%ymm15
6512
6513# qhasm: 4x v10 <<= 4
6514# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
6515# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
6516vpsllq $4,%ymm15,%ymm15
6517
6518# qhasm: v01 = x0 & mask1
6519# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
6520# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
6521vpand %ymm6,%ymm1,%ymm6
6522
6523# qhasm: v11 = x4 & mask1
6524# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
6525# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
6526vpand %ymm10,%ymm1,%ymm10
6527
6528# qhasm: 4x v01 unsigned>>= 4
6529# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
6530# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
6531vpsrlq $4,%ymm6,%ymm6
6532
6533# qhasm: x0 = v00 | v10
6534# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
6535# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
6536vpor  %ymm14,%ymm15,%ymm14
6537
6538# qhasm: x4 = v01 | v11
6539# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
6540# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
6541vpor  %ymm6,%ymm10,%ymm6
6542
6543# qhasm: v00 = x1 & mask0
6544# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
6545# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
6546vpand %ymm7,%ymm0,%ymm10
6547
6548# qhasm: v10 = x5 & mask0
6549# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
6550# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
6551vpand %ymm11,%ymm0,%ymm15
6552
6553# qhasm: 4x v10 <<= 4
6554# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
6555# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
6556vpsllq $4,%ymm15,%ymm15
6557
6558# qhasm: v01 = x1 & mask1
6559# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
6560# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
6561vpand %ymm7,%ymm1,%ymm7
6562
6563# qhasm: v11 = x5 & mask1
6564# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
6565# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
6566vpand %ymm11,%ymm1,%ymm11
6567
6568# qhasm: 4x v01 unsigned>>= 4
6569# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
6570# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
6571vpsrlq $4,%ymm7,%ymm7
6572
6573# qhasm: x1 = v00 | v10
6574# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
6575# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
6576vpor  %ymm10,%ymm15,%ymm10
6577
6578# qhasm: x5 = v01 | v11
6579# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
6580# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
6581vpor  %ymm7,%ymm11,%ymm7
6582
6583# qhasm: v00 = x2 & mask0
6584# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
6585# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
6586vpand %ymm8,%ymm0,%ymm11
6587
6588# qhasm: v10 = x6 & mask0
6589# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
6590# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
6591vpand %ymm12,%ymm0,%ymm15
6592
6593# qhasm: 4x v10 <<= 4
6594# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
6595# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
6596vpsllq $4,%ymm15,%ymm15
6597
6598# qhasm: v01 = x2 & mask1
6599# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
6600# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
6601vpand %ymm8,%ymm1,%ymm8
6602
6603# qhasm: v11 = x6 & mask1
6604# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
6605# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
6606vpand %ymm12,%ymm1,%ymm12
6607
6608# qhasm: 4x v01 unsigned>>= 4
6609# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
6610# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
6611vpsrlq $4,%ymm8,%ymm8
6612
6613# qhasm: x2 = v00 | v10
6614# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
6615# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
6616vpor  %ymm11,%ymm15,%ymm11
6617
6618# qhasm: x6 = v01 | v11
6619# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
6620# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
6621vpor  %ymm8,%ymm12,%ymm8
6622
6623# qhasm: v00 = x3 & mask0
6624# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
6625# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
6626vpand %ymm9,%ymm0,%ymm12
6627
6628# qhasm: v10 = x7 & mask0
6629# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
6630# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
6631vpand %ymm13,%ymm0,%ymm15
6632
6633# qhasm: 4x v10 <<= 4
6634# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
6635# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
6636vpsllq $4,%ymm15,%ymm15
6637
6638# qhasm: v01 = x3 & mask1
6639# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
6640# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
6641vpand %ymm9,%ymm1,%ymm9
6642
6643# qhasm: v11 = x7 & mask1
6644# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
6645# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
6646vpand %ymm13,%ymm1,%ymm13
6647
6648# qhasm: 4x v01 unsigned>>= 4
6649# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
6650# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
6651vpsrlq $4,%ymm9,%ymm9
6652
6653# qhasm: x3 = v00 | v10
6654# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
6655# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
6656vpor  %ymm12,%ymm15,%ymm12
6657
6658# qhasm: x7 = v01 | v11
6659# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
6660# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
6661vpor  %ymm9,%ymm13,%ymm9
6662
6663# qhasm: v00 = x0 & mask2
6664# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
6665# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
6666vpand %ymm14,%ymm2,%ymm13
6667
6668# qhasm: v10 = x2 & mask2
6669# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
6670# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
6671vpand %ymm11,%ymm2,%ymm15
6672
6673# qhasm: 4x v10 <<= 2
6674# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6675# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6676vpsllq $2,%ymm15,%ymm15
6677
6678# qhasm: v01 = x0 & mask3
6679# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
6680# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
6681vpand %ymm14,%ymm3,%ymm14
6682
6683# qhasm: v11 = x2 & mask3
6684# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
6685# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
6686vpand %ymm11,%ymm3,%ymm11
6687
6688# qhasm: 4x v01 unsigned>>= 2
6689# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
6690# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
6691vpsrlq $2,%ymm14,%ymm14
6692
6693# qhasm: x0 = v00 | v10
6694# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
6695# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
6696vpor  %ymm13,%ymm15,%ymm13
6697
6698# qhasm: x2 = v01 | v11
6699# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
6700# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
6701vpor  %ymm14,%ymm11,%ymm11
6702
6703# qhasm: v00 = x1 & mask2
6704# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
6705# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
6706vpand %ymm10,%ymm2,%ymm14
6707
6708# qhasm: v10 = x3 & mask2
6709# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
6710# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
6711vpand %ymm12,%ymm2,%ymm15
6712
6713# qhasm: 4x v10 <<= 2
6714# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6715# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6716vpsllq $2,%ymm15,%ymm15
6717
6718# qhasm: v01 = x1 & mask3
6719# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
6720# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
6721vpand %ymm10,%ymm3,%ymm10
6722
6723# qhasm: v11 = x3 & mask3
6724# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
6725# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
6726vpand %ymm12,%ymm3,%ymm12
6727
6728# qhasm: 4x v01 unsigned>>= 2
6729# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
6730# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
6731vpsrlq $2,%ymm10,%ymm10
6732
6733# qhasm: x1 = v00 | v10
6734# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
6735# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
6736vpor  %ymm14,%ymm15,%ymm14
6737
6738# qhasm: x3 = v01 | v11
6739# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
6740# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
6741vpor  %ymm10,%ymm12,%ymm10
6742
6743# qhasm: v00 = x4 & mask2
6744# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
6745# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
6746vpand %ymm6,%ymm2,%ymm12
6747
6748# qhasm: v10 = x6 & mask2
6749# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
6750# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
6751vpand %ymm8,%ymm2,%ymm15
6752
6753# qhasm: 4x v10 <<= 2
6754# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6755# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6756vpsllq $2,%ymm15,%ymm15
6757
6758# qhasm: v01 = x4 & mask3
6759# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
6760# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
6761vpand %ymm6,%ymm3,%ymm6
6762
6763# qhasm: v11 = x6 & mask3
6764# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
6765# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
6766vpand %ymm8,%ymm3,%ymm8
6767
6768# qhasm: 4x v01 unsigned>>= 2
6769# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
6770# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
6771vpsrlq $2,%ymm6,%ymm6
6772
6773# qhasm: x4 = v00 | v10
6774# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
6775# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
6776vpor  %ymm12,%ymm15,%ymm12
6777
6778# qhasm: x6 = v01 | v11
6779# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
6780# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
6781vpor  %ymm6,%ymm8,%ymm6
6782
6783# qhasm: v00 = x5 & mask2
6784# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
6785# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
6786vpand %ymm7,%ymm2,%ymm8
6787
6788# qhasm: v10 = x7 & mask2
6789# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
6790# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
6791vpand %ymm9,%ymm2,%ymm15
6792
6793# qhasm: 4x v10 <<= 2
6794# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
6795# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
6796vpsllq $2,%ymm15,%ymm15
6797
6798# qhasm: v01 = x5 & mask3
6799# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
6800# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
6801vpand %ymm7,%ymm3,%ymm7
6802
6803# qhasm: v11 = x7 & mask3
6804# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
6805# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
6806vpand %ymm9,%ymm3,%ymm9
6807
6808# qhasm: 4x v01 unsigned>>= 2
6809# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
6810# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
6811vpsrlq $2,%ymm7,%ymm7
6812
6813# qhasm: x5 = v00 | v10
6814# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
6815# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
6816vpor  %ymm8,%ymm15,%ymm8
6817
6818# qhasm: x7 = v01 | v11
6819# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
6820# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
6821vpor  %ymm7,%ymm9,%ymm7
6822
6823# qhasm: v00 = x0 & mask4
6824# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
6825# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
6826vpand %ymm13,%ymm4,%ymm9
6827
6828# qhasm: v10 = x1 & mask4
6829# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
6830# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
6831vpand %ymm14,%ymm4,%ymm15
6832
6833# qhasm: 4x v10 <<= 1
6834# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6835# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6836vpsllq $1,%ymm15,%ymm15
6837
6838# qhasm: v01 = x0 & mask5
6839# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
6840# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
6841vpand %ymm13,%ymm5,%ymm13
6842
6843# qhasm: v11 = x1 & mask5
6844# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
6845# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
6846vpand %ymm14,%ymm5,%ymm14
6847
6848# qhasm: 4x v01 unsigned>>= 1
6849# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
6850# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
6851vpsrlq $1,%ymm13,%ymm13
6852
6853# qhasm: x0 = v00 | v10
6854# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
6855# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
6856vpor  %ymm9,%ymm15,%ymm9
6857
6858# qhasm: x1 = v01 | v11
6859# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
6860# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
6861vpor  %ymm13,%ymm14,%ymm13
6862
6863# qhasm: v00 = x2 & mask4
6864# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
6865# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
6866vpand %ymm11,%ymm4,%ymm14
6867
6868# qhasm: v10 = x3 & mask4
6869# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
6870# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
6871vpand %ymm10,%ymm4,%ymm15
6872
6873# qhasm: 4x v10 <<= 1
6874# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6875# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6876vpsllq $1,%ymm15,%ymm15
6877
6878# qhasm: v01 = x2 & mask5
6879# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
6880# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
6881vpand %ymm11,%ymm5,%ymm11
6882
6883# qhasm: v11 = x3 & mask5
6884# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
6885# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
6886vpand %ymm10,%ymm5,%ymm10
6887
6888# qhasm: 4x v01 unsigned>>= 1
6889# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
6890# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
6891vpsrlq $1,%ymm11,%ymm11
6892
6893# qhasm: x2 = v00 | v10
6894# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
6895# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
6896vpor  %ymm14,%ymm15,%ymm14
6897
6898# qhasm: x3 = v01 | v11
6899# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
6900# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
6901vpor  %ymm11,%ymm10,%ymm10
6902
6903# qhasm: v00 = x4 & mask4
6904# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
6905# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
6906vpand %ymm12,%ymm4,%ymm11
6907
6908# qhasm: v10 = x5 & mask4
6909# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
6910# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
6911vpand %ymm8,%ymm4,%ymm15
6912
6913# qhasm: 4x v10 <<= 1
6914# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6915# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6916vpsllq $1,%ymm15,%ymm15
6917
6918# qhasm: v01 = x4 & mask5
6919# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
6920# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
6921vpand %ymm12,%ymm5,%ymm12
6922
6923# qhasm: v11 = x5 & mask5
6924# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
6925# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
6926vpand %ymm8,%ymm5,%ymm8
6927
6928# qhasm: 4x v01 unsigned>>= 1
6929# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
6930# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
6931vpsrlq $1,%ymm12,%ymm12
6932
6933# qhasm: x4 = v00 | v10
6934# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
6935# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
6936vpor  %ymm11,%ymm15,%ymm11
6937
6938# qhasm: x5 = v01 | v11
6939# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
6940# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
6941vpor  %ymm12,%ymm8,%ymm8
6942
6943# qhasm: v00 = x6 & mask4
6944# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
6945# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
6946vpand %ymm6,%ymm4,%ymm12
6947
6948# qhasm: v10 = x7 & mask4
6949# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
6950# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
6951vpand %ymm7,%ymm4,%ymm15
6952
6953# qhasm: 4x v10 <<= 1
6954# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
6955# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
6956vpsllq $1,%ymm15,%ymm15
6957
6958# qhasm: v01 = x6 & mask5
6959# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
6960# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
6961vpand %ymm6,%ymm5,%ymm6
6962
6963# qhasm: v11 = x7 & mask5
6964# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
6965# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
6966vpand %ymm7,%ymm5,%ymm7
6967
6968# qhasm: 4x v01 unsigned>>= 1
6969# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
6970# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
6971vpsrlq $1,%ymm6,%ymm6
6972
6973# qhasm: x6 = v00 | v10
6974# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
6975# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
6976vpor  %ymm12,%ymm15,%ymm12
6977
6978# qhasm: x7 = v01 | v11
6979# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
6980# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
6981vpor  %ymm6,%ymm7,%ymm6
6982
6983# qhasm: mem256[ input_0 + 1280 ] = x0
6984# asm 1: vmovupd   <x0=reg256#10,1280(<input_0=int64#1)
6985# asm 2: vmovupd   <x0=%ymm9,1280(<input_0=%rdi)
6986vmovupd   %ymm9,1280(%rdi)
6987
6988# qhasm: mem256[ input_0 + 1312 ] = x1
6989# asm 1: vmovupd   <x1=reg256#14,1312(<input_0=int64#1)
6990# asm 2: vmovupd   <x1=%ymm13,1312(<input_0=%rdi)
6991vmovupd   %ymm13,1312(%rdi)
6992
6993# qhasm: mem256[ input_0 + 1344 ] = x2
6994# asm 1: vmovupd   <x2=reg256#15,1344(<input_0=int64#1)
6995# asm 2: vmovupd   <x2=%ymm14,1344(<input_0=%rdi)
6996vmovupd   %ymm14,1344(%rdi)
6997
6998# qhasm: mem256[ input_0 + 1376 ] = x3
6999# asm 1: vmovupd   <x3=reg256#11,1376(<input_0=int64#1)
7000# asm 2: vmovupd   <x3=%ymm10,1376(<input_0=%rdi)
7001vmovupd   %ymm10,1376(%rdi)
7002
7003# qhasm: mem256[ input_0 + 1408 ] = x4
7004# asm 1: vmovupd   <x4=reg256#12,1408(<input_0=int64#1)
7005# asm 2: vmovupd   <x4=%ymm11,1408(<input_0=%rdi)
7006vmovupd   %ymm11,1408(%rdi)
7007
7008# qhasm: mem256[ input_0 + 1440 ] = x5
7009# asm 1: vmovupd   <x5=reg256#9,1440(<input_0=int64#1)
7010# asm 2: vmovupd   <x5=%ymm8,1440(<input_0=%rdi)
7011vmovupd   %ymm8,1440(%rdi)
7012
7013# qhasm: mem256[ input_0 + 1472 ] = x6
7014# asm 1: vmovupd   <x6=reg256#13,1472(<input_0=int64#1)
7015# asm 2: vmovupd   <x6=%ymm12,1472(<input_0=%rdi)
7016vmovupd   %ymm12,1472(%rdi)
7017
7018# qhasm: mem256[ input_0 + 1504 ] = x7
7019# asm 1: vmovupd   <x7=reg256#7,1504(<input_0=int64#1)
7020# asm 2: vmovupd   <x7=%ymm6,1504(<input_0=%rdi)
7021vmovupd   %ymm6,1504(%rdi)
7022
7023# qhasm: x0 = mem256[ input_0 + 1536 ]
7024# asm 1: vmovupd   1536(<input_0=int64#1),>x0=reg256#7
7025# asm 2: vmovupd   1536(<input_0=%rdi),>x0=%ymm6
7026vmovupd   1536(%rdi),%ymm6
7027
7028# qhasm: x1 = mem256[ input_0 + 1568 ]
7029# asm 1: vmovupd   1568(<input_0=int64#1),>x1=reg256#8
7030# asm 2: vmovupd   1568(<input_0=%rdi),>x1=%ymm7
7031vmovupd   1568(%rdi),%ymm7
7032
7033# qhasm: x2 = mem256[ input_0 + 1600 ]
7034# asm 1: vmovupd   1600(<input_0=int64#1),>x2=reg256#9
7035# asm 2: vmovupd   1600(<input_0=%rdi),>x2=%ymm8
7036vmovupd   1600(%rdi),%ymm8
7037
7038# qhasm: x3 = mem256[ input_0 + 1632 ]
7039# asm 1: vmovupd   1632(<input_0=int64#1),>x3=reg256#10
7040# asm 2: vmovupd   1632(<input_0=%rdi),>x3=%ymm9
7041vmovupd   1632(%rdi),%ymm9
7042
7043# qhasm: x4 = mem256[ input_0 + 1664 ]
7044# asm 1: vmovupd   1664(<input_0=int64#1),>x4=reg256#11
7045# asm 2: vmovupd   1664(<input_0=%rdi),>x4=%ymm10
7046vmovupd   1664(%rdi),%ymm10
7047
7048# qhasm: x5 = mem256[ input_0 + 1696 ]
7049# asm 1: vmovupd   1696(<input_0=int64#1),>x5=reg256#12
7050# asm 2: vmovupd   1696(<input_0=%rdi),>x5=%ymm11
7051vmovupd   1696(%rdi),%ymm11
7052
7053# qhasm: x6 = mem256[ input_0 + 1728 ]
7054# asm 1: vmovupd   1728(<input_0=int64#1),>x6=reg256#13
7055# asm 2: vmovupd   1728(<input_0=%rdi),>x6=%ymm12
7056vmovupd   1728(%rdi),%ymm12
7057
7058# qhasm: x7 = mem256[ input_0 + 1760 ]
7059# asm 1: vmovupd   1760(<input_0=int64#1),>x7=reg256#14
7060# asm 2: vmovupd   1760(<input_0=%rdi),>x7=%ymm13
7061vmovupd   1760(%rdi),%ymm13
7062
7063# qhasm: v00 = x0 & mask0
7064# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
7065# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
7066vpand %ymm6,%ymm0,%ymm14
7067
7068# qhasm: v10 = x4 & mask0
7069# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
7070# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
7071vpand %ymm10,%ymm0,%ymm15
7072
7073# qhasm: 4x v10 <<= 4
7074# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
7075# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
7076vpsllq $4,%ymm15,%ymm15
7077
7078# qhasm: v01 = x0 & mask1
7079# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
7080# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
7081vpand %ymm6,%ymm1,%ymm6
7082
7083# qhasm: v11 = x4 & mask1
7084# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
7085# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
7086vpand %ymm10,%ymm1,%ymm10
7087
7088# qhasm: 4x v01 unsigned>>= 4
7089# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
7090# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
7091vpsrlq $4,%ymm6,%ymm6
7092
7093# qhasm: x0 = v00 | v10
7094# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
7095# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
7096vpor  %ymm14,%ymm15,%ymm14
7097
7098# qhasm: x4 = v01 | v11
7099# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
7100# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
7101vpor  %ymm6,%ymm10,%ymm6
7102
7103# qhasm: v00 = x1 & mask0
7104# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
7105# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
7106vpand %ymm7,%ymm0,%ymm10
7107
7108# qhasm: v10 = x5 & mask0
7109# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
7110# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
7111vpand %ymm11,%ymm0,%ymm15
7112
7113# qhasm: 4x v10 <<= 4
7114# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
7115# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
7116vpsllq $4,%ymm15,%ymm15
7117
7118# qhasm: v01 = x1 & mask1
7119# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
7120# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
7121vpand %ymm7,%ymm1,%ymm7
7122
7123# qhasm: v11 = x5 & mask1
7124# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
7125# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
7126vpand %ymm11,%ymm1,%ymm11
7127
7128# qhasm: 4x v01 unsigned>>= 4
7129# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
7130# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
7131vpsrlq $4,%ymm7,%ymm7
7132
7133# qhasm: x1 = v00 | v10
7134# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
7135# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
7136vpor  %ymm10,%ymm15,%ymm10
7137
7138# qhasm: x5 = v01 | v11
7139# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
7140# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
7141vpor  %ymm7,%ymm11,%ymm7
7142
7143# qhasm: v00 = x2 & mask0
7144# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
7145# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
7146vpand %ymm8,%ymm0,%ymm11
7147
7148# qhasm: v10 = x6 & mask0
7149# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
7150# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
7151vpand %ymm12,%ymm0,%ymm15
7152
7153# qhasm: 4x v10 <<= 4
7154# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
7155# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
7156vpsllq $4,%ymm15,%ymm15
7157
7158# qhasm: v01 = x2 & mask1
7159# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
7160# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
7161vpand %ymm8,%ymm1,%ymm8
7162
7163# qhasm: v11 = x6 & mask1
7164# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
7165# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
7166vpand %ymm12,%ymm1,%ymm12
7167
7168# qhasm: 4x v01 unsigned>>= 4
7169# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
7170# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
7171vpsrlq $4,%ymm8,%ymm8
7172
7173# qhasm: x2 = v00 | v10
7174# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
7175# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
7176vpor  %ymm11,%ymm15,%ymm11
7177
7178# qhasm: x6 = v01 | v11
7179# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
7180# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
7181vpor  %ymm8,%ymm12,%ymm8
7182
7183# qhasm: v00 = x3 & mask0
7184# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
7185# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
7186vpand %ymm9,%ymm0,%ymm12
7187
7188# qhasm: v10 = x7 & mask0
7189# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#16
7190# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm15
7191vpand %ymm13,%ymm0,%ymm15
7192
7193# qhasm: 4x v10 <<= 4
7194# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
7195# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
7196vpsllq $4,%ymm15,%ymm15
7197
7198# qhasm: v01 = x3 & mask1
7199# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
7200# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
7201vpand %ymm9,%ymm1,%ymm9
7202
7203# qhasm: v11 = x7 & mask1
7204# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#14
7205# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm13
7206vpand %ymm13,%ymm1,%ymm13
7207
7208# qhasm: 4x v01 unsigned>>= 4
7209# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
7210# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
7211vpsrlq $4,%ymm9,%ymm9
7212
7213# qhasm: x3 = v00 | v10
7214# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x3=reg256#13
7215# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x3=%ymm12
7216vpor  %ymm12,%ymm15,%ymm12
7217
7218# qhasm: x7 = v01 | v11
7219# asm 1: vpor  <v01=reg256#10,<v11=reg256#14,>x7=reg256#10
7220# asm 2: vpor  <v01=%ymm9,<v11=%ymm13,>x7=%ymm9
7221vpor  %ymm9,%ymm13,%ymm9
7222
7223# qhasm: v00 = x0 & mask2
7224# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#14
7225# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm13
7226vpand %ymm14,%ymm2,%ymm13
7227
7228# qhasm: v10 = x2 & mask2
7229# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#16
7230# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm15
7231vpand %ymm11,%ymm2,%ymm15
7232
7233# qhasm: 4x v10 <<= 2
7234# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
7235# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
7236vpsllq $2,%ymm15,%ymm15
7237
7238# qhasm: v01 = x0 & mask3
7239# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#15
7240# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm14
7241vpand %ymm14,%ymm3,%ymm14
7242
7243# qhasm: v11 = x2 & mask3
7244# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
7245# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
7246vpand %ymm11,%ymm3,%ymm11
7247
7248# qhasm: 4x v01 unsigned>>= 2
7249# asm 1: vpsrlq $2,<v01=reg256#15,<v01=reg256#15
7250# asm 2: vpsrlq $2,<v01=%ymm14,<v01=%ymm14
7251vpsrlq $2,%ymm14,%ymm14
7252
7253# qhasm: x0 = v00 | v10
7254# asm 1: vpor  <v00=reg256#14,<v10=reg256#16,>x0=reg256#14
7255# asm 2: vpor  <v00=%ymm13,<v10=%ymm15,>x0=%ymm13
7256vpor  %ymm13,%ymm15,%ymm13
7257
7258# qhasm: x2 = v01 | v11
7259# asm 1: vpor  <v01=reg256#15,<v11=reg256#12,>x2=reg256#12
7260# asm 2: vpor  <v01=%ymm14,<v11=%ymm11,>x2=%ymm11
7261vpor  %ymm14,%ymm11,%ymm11
7262
7263# qhasm: v00 = x1 & mask2
7264# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#15
7265# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm14
7266vpand %ymm10,%ymm2,%ymm14
7267
7268# qhasm: v10 = x3 & mask2
7269# asm 1: vpand <x3=reg256#13,<mask2=reg256#3,>v10=reg256#16
7270# asm 2: vpand <x3=%ymm12,<mask2=%ymm2,>v10=%ymm15
7271vpand %ymm12,%ymm2,%ymm15
7272
7273# qhasm: 4x v10 <<= 2
7274# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
7275# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
7276vpsllq $2,%ymm15,%ymm15
7277
7278# qhasm: v01 = x1 & mask3
7279# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
7280# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
7281vpand %ymm10,%ymm3,%ymm10
7282
7283# qhasm: v11 = x3 & mask3
7284# asm 1: vpand <x3=reg256#13,<mask3=reg256#4,>v11=reg256#13
7285# asm 2: vpand <x3=%ymm12,<mask3=%ymm3,>v11=%ymm12
7286vpand %ymm12,%ymm3,%ymm12
7287
7288# qhasm: 4x v01 unsigned>>= 2
7289# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
7290# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
7291vpsrlq $2,%ymm10,%ymm10
7292
7293# qhasm: x1 = v00 | v10
7294# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x1=reg256#15
7295# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x1=%ymm14
7296vpor  %ymm14,%ymm15,%ymm14
7297
7298# qhasm: x3 = v01 | v11
7299# asm 1: vpor  <v01=reg256#11,<v11=reg256#13,>x3=reg256#11
7300# asm 2: vpor  <v01=%ymm10,<v11=%ymm12,>x3=%ymm10
7301vpor  %ymm10,%ymm12,%ymm10
7302
7303# qhasm: v00 = x4 & mask2
7304# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#13
7305# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm12
7306vpand %ymm6,%ymm2,%ymm12
7307
7308# qhasm: v10 = x6 & mask2
7309# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#16
7310# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm15
7311vpand %ymm8,%ymm2,%ymm15
7312
7313# qhasm: 4x v10 <<= 2
7314# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
7315# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
7316vpsllq $2,%ymm15,%ymm15
7317
7318# qhasm: v01 = x4 & mask3
7319# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
7320# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
7321vpand %ymm6,%ymm3,%ymm6
7322
7323# qhasm: v11 = x6 & mask3
7324# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
7325# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
7326vpand %ymm8,%ymm3,%ymm8
7327
7328# qhasm: 4x v01 unsigned>>= 2
7329# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
7330# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
7331vpsrlq $2,%ymm6,%ymm6
7332
7333# qhasm: x4 = v00 | v10
7334# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x4=reg256#13
7335# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x4=%ymm12
7336vpor  %ymm12,%ymm15,%ymm12
7337
7338# qhasm: x6 = v01 | v11
7339# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
7340# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
7341vpor  %ymm6,%ymm8,%ymm6
7342
7343# qhasm: v00 = x5 & mask2
7344# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
7345# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
7346vpand %ymm7,%ymm2,%ymm8
7347
7348# qhasm: v10 = x7 & mask2
7349# asm 1: vpand <x7=reg256#10,<mask2=reg256#3,>v10=reg256#16
7350# asm 2: vpand <x7=%ymm9,<mask2=%ymm2,>v10=%ymm15
7351vpand %ymm9,%ymm2,%ymm15
7352
7353# qhasm: 4x v10 <<= 2
7354# asm 1: vpsllq $2,<v10=reg256#16,<v10=reg256#16
7355# asm 2: vpsllq $2,<v10=%ymm15,<v10=%ymm15
7356vpsllq $2,%ymm15,%ymm15
7357
7358# qhasm: v01 = x5 & mask3
7359# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
7360# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
7361vpand %ymm7,%ymm3,%ymm7
7362
7363# qhasm: v11 = x7 & mask3
7364# asm 1: vpand <x7=reg256#10,<mask3=reg256#4,>v11=reg256#10
7365# asm 2: vpand <x7=%ymm9,<mask3=%ymm3,>v11=%ymm9
7366vpand %ymm9,%ymm3,%ymm9
7367
7368# qhasm: 4x v01 unsigned>>= 2
7369# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
7370# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
7371vpsrlq $2,%ymm7,%ymm7
7372
7373# qhasm: x5 = v00 | v10
7374# asm 1: vpor  <v00=reg256#9,<v10=reg256#16,>x5=reg256#9
7375# asm 2: vpor  <v00=%ymm8,<v10=%ymm15,>x5=%ymm8
7376vpor  %ymm8,%ymm15,%ymm8
7377
7378# qhasm: x7 = v01 | v11
7379# asm 1: vpor  <v01=reg256#8,<v11=reg256#10,>x7=reg256#8
7380# asm 2: vpor  <v01=%ymm7,<v11=%ymm9,>x7=%ymm7
7381vpor  %ymm7,%ymm9,%ymm7
7382
7383# qhasm: v00 = x0 & mask4
7384# asm 1: vpand <x0=reg256#14,<mask4=reg256#5,>v00=reg256#10
7385# asm 2: vpand <x0=%ymm13,<mask4=%ymm4,>v00=%ymm9
7386vpand %ymm13,%ymm4,%ymm9
7387
7388# qhasm: v10 = x1 & mask4
7389# asm 1: vpand <x1=reg256#15,<mask4=reg256#5,>v10=reg256#16
7390# asm 2: vpand <x1=%ymm14,<mask4=%ymm4,>v10=%ymm15
7391vpand %ymm14,%ymm4,%ymm15
7392
7393# qhasm: 4x v10 <<= 1
7394# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
7395# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
7396vpsllq $1,%ymm15,%ymm15
7397
7398# qhasm: v01 = x0 & mask5
7399# asm 1: vpand <x0=reg256#14,<mask5=reg256#6,>v01=reg256#14
7400# asm 2: vpand <x0=%ymm13,<mask5=%ymm5,>v01=%ymm13
7401vpand %ymm13,%ymm5,%ymm13
7402
7403# qhasm: v11 = x1 & mask5
7404# asm 1: vpand <x1=reg256#15,<mask5=reg256#6,>v11=reg256#15
7405# asm 2: vpand <x1=%ymm14,<mask5=%ymm5,>v11=%ymm14
7406vpand %ymm14,%ymm5,%ymm14
7407
7408# qhasm: 4x v01 unsigned>>= 1
7409# asm 1: vpsrlq $1,<v01=reg256#14,<v01=reg256#14
7410# asm 2: vpsrlq $1,<v01=%ymm13,<v01=%ymm13
7411vpsrlq $1,%ymm13,%ymm13
7412
7413# qhasm: x0 = v00 | v10
7414# asm 1: vpor  <v00=reg256#10,<v10=reg256#16,>x0=reg256#10
7415# asm 2: vpor  <v00=%ymm9,<v10=%ymm15,>x0=%ymm9
7416vpor  %ymm9,%ymm15,%ymm9
7417
7418# qhasm: x1 = v01 | v11
7419# asm 1: vpor  <v01=reg256#14,<v11=reg256#15,>x1=reg256#14
7420# asm 2: vpor  <v01=%ymm13,<v11=%ymm14,>x1=%ymm13
7421vpor  %ymm13,%ymm14,%ymm13
7422
7423# qhasm: v00 = x2 & mask4
7424# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#15
7425# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm14
7426vpand %ymm11,%ymm4,%ymm14
7427
7428# qhasm: v10 = x3 & mask4
7429# asm 1: vpand <x3=reg256#11,<mask4=reg256#5,>v10=reg256#16
7430# asm 2: vpand <x3=%ymm10,<mask4=%ymm4,>v10=%ymm15
7431vpand %ymm10,%ymm4,%ymm15
7432
7433# qhasm: 4x v10 <<= 1
7434# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
7435# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
7436vpsllq $1,%ymm15,%ymm15
7437
7438# qhasm: v01 = x2 & mask5
7439# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
7440# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
7441vpand %ymm11,%ymm5,%ymm11
7442
7443# qhasm: v11 = x3 & mask5
7444# asm 1: vpand <x3=reg256#11,<mask5=reg256#6,>v11=reg256#11
7445# asm 2: vpand <x3=%ymm10,<mask5=%ymm5,>v11=%ymm10
7446vpand %ymm10,%ymm5,%ymm10
7447
7448# qhasm: 4x v01 unsigned>>= 1
7449# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
7450# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
7451vpsrlq $1,%ymm11,%ymm11
7452
7453# qhasm: x2 = v00 | v10
7454# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x2=reg256#15
7455# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x2=%ymm14
7456vpor  %ymm14,%ymm15,%ymm14
7457
7458# qhasm: x3 = v01 | v11
7459# asm 1: vpor  <v01=reg256#12,<v11=reg256#11,>x3=reg256#11
7460# asm 2: vpor  <v01=%ymm11,<v11=%ymm10,>x3=%ymm10
7461vpor  %ymm11,%ymm10,%ymm10
7462
7463# qhasm: v00 = x4 & mask4
7464# asm 1: vpand <x4=reg256#13,<mask4=reg256#5,>v00=reg256#12
7465# asm 2: vpand <x4=%ymm12,<mask4=%ymm4,>v00=%ymm11
7466vpand %ymm12,%ymm4,%ymm11
7467
7468# qhasm: v10 = x5 & mask4
7469# asm 1: vpand <x5=reg256#9,<mask4=reg256#5,>v10=reg256#16
7470# asm 2: vpand <x5=%ymm8,<mask4=%ymm4,>v10=%ymm15
7471vpand %ymm8,%ymm4,%ymm15
7472
7473# qhasm: 4x v10 <<= 1
7474# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
7475# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
7476vpsllq $1,%ymm15,%ymm15
7477
7478# qhasm: v01 = x4 & mask5
7479# asm 1: vpand <x4=reg256#13,<mask5=reg256#6,>v01=reg256#13
7480# asm 2: vpand <x4=%ymm12,<mask5=%ymm5,>v01=%ymm12
7481vpand %ymm12,%ymm5,%ymm12
7482
7483# qhasm: v11 = x5 & mask5
7484# asm 1: vpand <x5=reg256#9,<mask5=reg256#6,>v11=reg256#9
7485# asm 2: vpand <x5=%ymm8,<mask5=%ymm5,>v11=%ymm8
7486vpand %ymm8,%ymm5,%ymm8
7487
7488# qhasm: 4x v01 unsigned>>= 1
7489# asm 1: vpsrlq $1,<v01=reg256#13,<v01=reg256#13
7490# asm 2: vpsrlq $1,<v01=%ymm12,<v01=%ymm12
7491vpsrlq $1,%ymm12,%ymm12
7492
7493# qhasm: x4 = v00 | v10
7494# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x4=reg256#12
7495# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x4=%ymm11
7496vpor  %ymm11,%ymm15,%ymm11
7497
7498# qhasm: x5 = v01 | v11
7499# asm 1: vpor  <v01=reg256#13,<v11=reg256#9,>x5=reg256#9
7500# asm 2: vpor  <v01=%ymm12,<v11=%ymm8,>x5=%ymm8
7501vpor  %ymm12,%ymm8,%ymm8
7502
7503# qhasm: v00 = x6 & mask4
7504# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#13
7505# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm12
7506vpand %ymm6,%ymm4,%ymm12
7507
7508# qhasm: v10 = x7 & mask4
7509# asm 1: vpand <x7=reg256#8,<mask4=reg256#5,>v10=reg256#16
7510# asm 2: vpand <x7=%ymm7,<mask4=%ymm4,>v10=%ymm15
7511vpand %ymm7,%ymm4,%ymm15
7512
7513# qhasm: 4x v10 <<= 1
7514# asm 1: vpsllq $1,<v10=reg256#16,<v10=reg256#16
7515# asm 2: vpsllq $1,<v10=%ymm15,<v10=%ymm15
7516vpsllq $1,%ymm15,%ymm15
7517
7518# qhasm: v01 = x6 & mask5
7519# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
7520# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
7521vpand %ymm6,%ymm5,%ymm6
7522
7523# qhasm: v11 = x7 & mask5
7524# asm 1: vpand <x7=reg256#8,<mask5=reg256#6,>v11=reg256#8
7525# asm 2: vpand <x7=%ymm7,<mask5=%ymm5,>v11=%ymm7
7526vpand %ymm7,%ymm5,%ymm7
7527
7528# qhasm: 4x v01 unsigned>>= 1
7529# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
7530# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
7531vpsrlq $1,%ymm6,%ymm6
7532
7533# qhasm: x6 = v00 | v10
7534# asm 1: vpor  <v00=reg256#13,<v10=reg256#16,>x6=reg256#13
7535# asm 2: vpor  <v00=%ymm12,<v10=%ymm15,>x6=%ymm12
7536vpor  %ymm12,%ymm15,%ymm12
7537
7538# qhasm: x7 = v01 | v11
7539# asm 1: vpor  <v01=reg256#7,<v11=reg256#8,>x7=reg256#7
7540# asm 2: vpor  <v01=%ymm6,<v11=%ymm7,>x7=%ymm6
7541vpor  %ymm6,%ymm7,%ymm6
7542
7543# qhasm: mem256[ input_0 + 1536 ] = x0
7544# asm 1: vmovupd   <x0=reg256#10,1536(<input_0=int64#1)
7545# asm 2: vmovupd   <x0=%ymm9,1536(<input_0=%rdi)
7546vmovupd   %ymm9,1536(%rdi)
7547
7548# qhasm: mem256[ input_0 + 1568 ] = x1
7549# asm 1: vmovupd   <x1=reg256#14,1568(<input_0=int64#1)
7550# asm 2: vmovupd   <x1=%ymm13,1568(<input_0=%rdi)
7551vmovupd   %ymm13,1568(%rdi)
7552
7553# qhasm: mem256[ input_0 + 1600 ] = x2
7554# asm 1: vmovupd   <x2=reg256#15,1600(<input_0=int64#1)
7555# asm 2: vmovupd   <x2=%ymm14,1600(<input_0=%rdi)
7556vmovupd   %ymm14,1600(%rdi)
7557
7558# qhasm: mem256[ input_0 + 1632 ] = x3
7559# asm 1: vmovupd   <x3=reg256#11,1632(<input_0=int64#1)
7560# asm 2: vmovupd   <x3=%ymm10,1632(<input_0=%rdi)
7561vmovupd   %ymm10,1632(%rdi)
7562
7563# qhasm: mem256[ input_0 + 1664 ] = x4
7564# asm 1: vmovupd   <x4=reg256#12,1664(<input_0=int64#1)
7565# asm 2: vmovupd   <x4=%ymm11,1664(<input_0=%rdi)
7566vmovupd   %ymm11,1664(%rdi)
7567
7568# qhasm: mem256[ input_0 + 1696 ] = x5
7569# asm 1: vmovupd   <x5=reg256#9,1696(<input_0=int64#1)
7570# asm 2: vmovupd   <x5=%ymm8,1696(<input_0=%rdi)
7571vmovupd   %ymm8,1696(%rdi)
7572
7573# qhasm: mem256[ input_0 + 1728 ] = x6
7574# asm 1: vmovupd   <x6=reg256#13,1728(<input_0=int64#1)
7575# asm 2: vmovupd   <x6=%ymm12,1728(<input_0=%rdi)
7576vmovupd   %ymm12,1728(%rdi)
7577
7578# qhasm: mem256[ input_0 + 1760 ] = x7
7579# asm 1: vmovupd   <x7=reg256#7,1760(<input_0=int64#1)
7580# asm 2: vmovupd   <x7=%ymm6,1760(<input_0=%rdi)
7581vmovupd   %ymm6,1760(%rdi)
7582
7583# qhasm: x0 = mem256[ input_0 + 1792 ]
7584# asm 1: vmovupd   1792(<input_0=int64#1),>x0=reg256#7
7585# asm 2: vmovupd   1792(<input_0=%rdi),>x0=%ymm6
7586vmovupd   1792(%rdi),%ymm6
7587
7588# qhasm: x1 = mem256[ input_0 + 1824 ]
7589# asm 1: vmovupd   1824(<input_0=int64#1),>x1=reg256#8
7590# asm 2: vmovupd   1824(<input_0=%rdi),>x1=%ymm7
7591vmovupd   1824(%rdi),%ymm7
7592
7593# qhasm: x2 = mem256[ input_0 + 1856 ]
7594# asm 1: vmovupd   1856(<input_0=int64#1),>x2=reg256#9
7595# asm 2: vmovupd   1856(<input_0=%rdi),>x2=%ymm8
7596vmovupd   1856(%rdi),%ymm8
7597
7598# qhasm: x3 = mem256[ input_0 + 1888 ]
7599# asm 1: vmovupd   1888(<input_0=int64#1),>x3=reg256#10
7600# asm 2: vmovupd   1888(<input_0=%rdi),>x3=%ymm9
7601vmovupd   1888(%rdi),%ymm9
7602
7603# qhasm: x4 = mem256[ input_0 + 1920 ]
7604# asm 1: vmovupd   1920(<input_0=int64#1),>x4=reg256#11
7605# asm 2: vmovupd   1920(<input_0=%rdi),>x4=%ymm10
7606vmovupd   1920(%rdi),%ymm10
7607
7608# qhasm: x5 = mem256[ input_0 + 1952 ]
7609# asm 1: vmovupd   1952(<input_0=int64#1),>x5=reg256#12
7610# asm 2: vmovupd   1952(<input_0=%rdi),>x5=%ymm11
7611vmovupd   1952(%rdi),%ymm11
7612
7613# qhasm: x6 = mem256[ input_0 + 1984 ]
7614# asm 1: vmovupd   1984(<input_0=int64#1),>x6=reg256#13
7615# asm 2: vmovupd   1984(<input_0=%rdi),>x6=%ymm12
7616vmovupd   1984(%rdi),%ymm12
7617
7618# qhasm: x7 = mem256[ input_0 + 2016 ]
7619# asm 1: vmovupd   2016(<input_0=int64#1),>x7=reg256#14
7620# asm 2: vmovupd   2016(<input_0=%rdi),>x7=%ymm13
7621vmovupd   2016(%rdi),%ymm13
7622
7623# qhasm: v00 = x0 & mask0
7624# asm 1: vpand <x0=reg256#7,<mask0=reg256#1,>v00=reg256#15
7625# asm 2: vpand <x0=%ymm6,<mask0=%ymm0,>v00=%ymm14
7626vpand %ymm6,%ymm0,%ymm14
7627
7628# qhasm: v10 = x4 & mask0
7629# asm 1: vpand <x4=reg256#11,<mask0=reg256#1,>v10=reg256#16
7630# asm 2: vpand <x4=%ymm10,<mask0=%ymm0,>v10=%ymm15
7631vpand %ymm10,%ymm0,%ymm15
7632
7633# qhasm: 4x v10 <<= 4
7634# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
7635# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
7636vpsllq $4,%ymm15,%ymm15
7637
7638# qhasm: v01 = x0 & mask1
7639# asm 1: vpand <x0=reg256#7,<mask1=reg256#2,>v01=reg256#7
7640# asm 2: vpand <x0=%ymm6,<mask1=%ymm1,>v01=%ymm6
7641vpand %ymm6,%ymm1,%ymm6
7642
7643# qhasm: v11 = x4 & mask1
7644# asm 1: vpand <x4=reg256#11,<mask1=reg256#2,>v11=reg256#11
7645# asm 2: vpand <x4=%ymm10,<mask1=%ymm1,>v11=%ymm10
7646vpand %ymm10,%ymm1,%ymm10
7647
7648# qhasm: 4x v01 unsigned>>= 4
7649# asm 1: vpsrlq $4,<v01=reg256#7,<v01=reg256#7
7650# asm 2: vpsrlq $4,<v01=%ymm6,<v01=%ymm6
7651vpsrlq $4,%ymm6,%ymm6
7652
7653# qhasm: x0 = v00 | v10
7654# asm 1: vpor  <v00=reg256#15,<v10=reg256#16,>x0=reg256#15
7655# asm 2: vpor  <v00=%ymm14,<v10=%ymm15,>x0=%ymm14
7656vpor  %ymm14,%ymm15,%ymm14
7657
7658# qhasm: x4 = v01 | v11
7659# asm 1: vpor  <v01=reg256#7,<v11=reg256#11,>x4=reg256#7
7660# asm 2: vpor  <v01=%ymm6,<v11=%ymm10,>x4=%ymm6
7661vpor  %ymm6,%ymm10,%ymm6
7662
7663# qhasm: v00 = x1 & mask0
7664# asm 1: vpand <x1=reg256#8,<mask0=reg256#1,>v00=reg256#11
7665# asm 2: vpand <x1=%ymm7,<mask0=%ymm0,>v00=%ymm10
7666vpand %ymm7,%ymm0,%ymm10
7667
7668# qhasm: v10 = x5 & mask0
7669# asm 1: vpand <x5=reg256#12,<mask0=reg256#1,>v10=reg256#16
7670# asm 2: vpand <x5=%ymm11,<mask0=%ymm0,>v10=%ymm15
7671vpand %ymm11,%ymm0,%ymm15
7672
7673# qhasm: 4x v10 <<= 4
7674# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
7675# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
7676vpsllq $4,%ymm15,%ymm15
7677
7678# qhasm: v01 = x1 & mask1
7679# asm 1: vpand <x1=reg256#8,<mask1=reg256#2,>v01=reg256#8
7680# asm 2: vpand <x1=%ymm7,<mask1=%ymm1,>v01=%ymm7
7681vpand %ymm7,%ymm1,%ymm7
7682
7683# qhasm: v11 = x5 & mask1
7684# asm 1: vpand <x5=reg256#12,<mask1=reg256#2,>v11=reg256#12
7685# asm 2: vpand <x5=%ymm11,<mask1=%ymm1,>v11=%ymm11
7686vpand %ymm11,%ymm1,%ymm11
7687
7688# qhasm: 4x v01 unsigned>>= 4
7689# asm 1: vpsrlq $4,<v01=reg256#8,<v01=reg256#8
7690# asm 2: vpsrlq $4,<v01=%ymm7,<v01=%ymm7
7691vpsrlq $4,%ymm7,%ymm7
7692
7693# qhasm: x1 = v00 | v10
7694# asm 1: vpor  <v00=reg256#11,<v10=reg256#16,>x1=reg256#11
7695# asm 2: vpor  <v00=%ymm10,<v10=%ymm15,>x1=%ymm10
7696vpor  %ymm10,%ymm15,%ymm10
7697
7698# qhasm: x5 = v01 | v11
7699# asm 1: vpor  <v01=reg256#8,<v11=reg256#12,>x5=reg256#8
7700# asm 2: vpor  <v01=%ymm7,<v11=%ymm11,>x5=%ymm7
7701vpor  %ymm7,%ymm11,%ymm7
7702
7703# qhasm: v00 = x2 & mask0
7704# asm 1: vpand <x2=reg256#9,<mask0=reg256#1,>v00=reg256#12
7705# asm 2: vpand <x2=%ymm8,<mask0=%ymm0,>v00=%ymm11
7706vpand %ymm8,%ymm0,%ymm11
7707
7708# qhasm: v10 = x6 & mask0
7709# asm 1: vpand <x6=reg256#13,<mask0=reg256#1,>v10=reg256#16
7710# asm 2: vpand <x6=%ymm12,<mask0=%ymm0,>v10=%ymm15
7711vpand %ymm12,%ymm0,%ymm15
7712
7713# qhasm: 4x v10 <<= 4
7714# asm 1: vpsllq $4,<v10=reg256#16,<v10=reg256#16
7715# asm 2: vpsllq $4,<v10=%ymm15,<v10=%ymm15
7716vpsllq $4,%ymm15,%ymm15
7717
7718# qhasm: v01 = x2 & mask1
7719# asm 1: vpand <x2=reg256#9,<mask1=reg256#2,>v01=reg256#9
7720# asm 2: vpand <x2=%ymm8,<mask1=%ymm1,>v01=%ymm8
7721vpand %ymm8,%ymm1,%ymm8
7722
7723# qhasm: v11 = x6 & mask1
7724# asm 1: vpand <x6=reg256#13,<mask1=reg256#2,>v11=reg256#13
7725# asm 2: vpand <x6=%ymm12,<mask1=%ymm1,>v11=%ymm12
7726vpand %ymm12,%ymm1,%ymm12
7727
7728# qhasm: 4x v01 unsigned>>= 4
7729# asm 1: vpsrlq $4,<v01=reg256#9,<v01=reg256#9
7730# asm 2: vpsrlq $4,<v01=%ymm8,<v01=%ymm8
7731vpsrlq $4,%ymm8,%ymm8
7732
7733# qhasm: x2 = v00 | v10
7734# asm 1: vpor  <v00=reg256#12,<v10=reg256#16,>x2=reg256#12
7735# asm 2: vpor  <v00=%ymm11,<v10=%ymm15,>x2=%ymm11
7736vpor  %ymm11,%ymm15,%ymm11
7737
7738# qhasm: x6 = v01 | v11
7739# asm 1: vpor  <v01=reg256#9,<v11=reg256#13,>x6=reg256#9
7740# asm 2: vpor  <v01=%ymm8,<v11=%ymm12,>x6=%ymm8
7741vpor  %ymm8,%ymm12,%ymm8
7742
7743# qhasm: v00 = x3 & mask0
7744# asm 1: vpand <x3=reg256#10,<mask0=reg256#1,>v00=reg256#13
7745# asm 2: vpand <x3=%ymm9,<mask0=%ymm0,>v00=%ymm12
7746vpand %ymm9,%ymm0,%ymm12
7747
7748# qhasm: v10 = x7 & mask0
7749# asm 1: vpand <x7=reg256#14,<mask0=reg256#1,>v10=reg256#1
7750# asm 2: vpand <x7=%ymm13,<mask0=%ymm0,>v10=%ymm0
7751vpand %ymm13,%ymm0,%ymm0
7752
7753# qhasm: 4x v10 <<= 4
7754# asm 1: vpsllq $4,<v10=reg256#1,<v10=reg256#1
7755# asm 2: vpsllq $4,<v10=%ymm0,<v10=%ymm0
7756vpsllq $4,%ymm0,%ymm0
7757
7758# qhasm: v01 = x3 & mask1
7759# asm 1: vpand <x3=reg256#10,<mask1=reg256#2,>v01=reg256#10
7760# asm 2: vpand <x3=%ymm9,<mask1=%ymm1,>v01=%ymm9
7761vpand %ymm9,%ymm1,%ymm9
7762
7763# qhasm: v11 = x7 & mask1
7764# asm 1: vpand <x7=reg256#14,<mask1=reg256#2,>v11=reg256#2
7765# asm 2: vpand <x7=%ymm13,<mask1=%ymm1,>v11=%ymm1
7766vpand %ymm13,%ymm1,%ymm1
7767
7768# qhasm: 4x v01 unsigned>>= 4
7769# asm 1: vpsrlq $4,<v01=reg256#10,<v01=reg256#10
7770# asm 2: vpsrlq $4,<v01=%ymm9,<v01=%ymm9
7771vpsrlq $4,%ymm9,%ymm9
7772
7773# qhasm: x3 = v00 | v10
7774# asm 1: vpor  <v00=reg256#13,<v10=reg256#1,>x3=reg256#1
7775# asm 2: vpor  <v00=%ymm12,<v10=%ymm0,>x3=%ymm0
7776vpor  %ymm12,%ymm0,%ymm0
7777
7778# qhasm: x7 = v01 | v11
7779# asm 1: vpor  <v01=reg256#10,<v11=reg256#2,>x7=reg256#2
7780# asm 2: vpor  <v01=%ymm9,<v11=%ymm1,>x7=%ymm1
7781vpor  %ymm9,%ymm1,%ymm1
7782
7783# qhasm: v00 = x0 & mask2
7784# asm 1: vpand <x0=reg256#15,<mask2=reg256#3,>v00=reg256#10
7785# asm 2: vpand <x0=%ymm14,<mask2=%ymm2,>v00=%ymm9
7786vpand %ymm14,%ymm2,%ymm9
7787
7788# qhasm: v10 = x2 & mask2
7789# asm 1: vpand <x2=reg256#12,<mask2=reg256#3,>v10=reg256#13
7790# asm 2: vpand <x2=%ymm11,<mask2=%ymm2,>v10=%ymm12
7791vpand %ymm11,%ymm2,%ymm12
7792
7793# qhasm: 4x v10 <<= 2
7794# asm 1: vpsllq $2,<v10=reg256#13,<v10=reg256#13
7795# asm 2: vpsllq $2,<v10=%ymm12,<v10=%ymm12
7796vpsllq $2,%ymm12,%ymm12
7797
7798# qhasm: v01 = x0 & mask3
7799# asm 1: vpand <x0=reg256#15,<mask3=reg256#4,>v01=reg256#14
7800# asm 2: vpand <x0=%ymm14,<mask3=%ymm3,>v01=%ymm13
7801vpand %ymm14,%ymm3,%ymm13
7802
7803# qhasm: v11 = x2 & mask3
7804# asm 1: vpand <x2=reg256#12,<mask3=reg256#4,>v11=reg256#12
7805# asm 2: vpand <x2=%ymm11,<mask3=%ymm3,>v11=%ymm11
7806vpand %ymm11,%ymm3,%ymm11
7807
7808# qhasm: 4x v01 unsigned>>= 2
7809# asm 1: vpsrlq $2,<v01=reg256#14,<v01=reg256#14
7810# asm 2: vpsrlq $2,<v01=%ymm13,<v01=%ymm13
7811vpsrlq $2,%ymm13,%ymm13
7812
7813# qhasm: x0 = v00 | v10
7814# asm 1: vpor  <v00=reg256#10,<v10=reg256#13,>x0=reg256#10
7815# asm 2: vpor  <v00=%ymm9,<v10=%ymm12,>x0=%ymm9
7816vpor  %ymm9,%ymm12,%ymm9
7817
7818# qhasm: x2 = v01 | v11
7819# asm 1: vpor  <v01=reg256#14,<v11=reg256#12,>x2=reg256#12
7820# asm 2: vpor  <v01=%ymm13,<v11=%ymm11,>x2=%ymm11
7821vpor  %ymm13,%ymm11,%ymm11
7822
7823# qhasm: v00 = x1 & mask2
7824# asm 1: vpand <x1=reg256#11,<mask2=reg256#3,>v00=reg256#13
7825# asm 2: vpand <x1=%ymm10,<mask2=%ymm2,>v00=%ymm12
7826vpand %ymm10,%ymm2,%ymm12
7827
7828# qhasm: v10 = x3 & mask2
7829# asm 1: vpand <x3=reg256#1,<mask2=reg256#3,>v10=reg256#14
7830# asm 2: vpand <x3=%ymm0,<mask2=%ymm2,>v10=%ymm13
7831vpand %ymm0,%ymm2,%ymm13
7832
7833# qhasm: 4x v10 <<= 2
7834# asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14
7835# asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13
7836vpsllq $2,%ymm13,%ymm13
7837
7838# qhasm: v01 = x1 & mask3
7839# asm 1: vpand <x1=reg256#11,<mask3=reg256#4,>v01=reg256#11
7840# asm 2: vpand <x1=%ymm10,<mask3=%ymm3,>v01=%ymm10
7841vpand %ymm10,%ymm3,%ymm10
7842
7843# qhasm: v11 = x3 & mask3
7844# asm 1: vpand <x3=reg256#1,<mask3=reg256#4,>v11=reg256#1
7845# asm 2: vpand <x3=%ymm0,<mask3=%ymm3,>v11=%ymm0
7846vpand %ymm0,%ymm3,%ymm0
7847
7848# qhasm: 4x v01 unsigned>>= 2
7849# asm 1: vpsrlq $2,<v01=reg256#11,<v01=reg256#11
7850# asm 2: vpsrlq $2,<v01=%ymm10,<v01=%ymm10
7851vpsrlq $2,%ymm10,%ymm10
7852
7853# qhasm: x1 = v00 | v10
7854# asm 1: vpor  <v00=reg256#13,<v10=reg256#14,>x1=reg256#13
7855# asm 2: vpor  <v00=%ymm12,<v10=%ymm13,>x1=%ymm12
7856vpor  %ymm12,%ymm13,%ymm12
7857
7858# qhasm: x3 = v01 | v11
7859# asm 1: vpor  <v01=reg256#11,<v11=reg256#1,>x3=reg256#1
7860# asm 2: vpor  <v01=%ymm10,<v11=%ymm0,>x3=%ymm0
7861vpor  %ymm10,%ymm0,%ymm0
7862
7863# qhasm: v00 = x4 & mask2
7864# asm 1: vpand <x4=reg256#7,<mask2=reg256#3,>v00=reg256#11
7865# asm 2: vpand <x4=%ymm6,<mask2=%ymm2,>v00=%ymm10
7866vpand %ymm6,%ymm2,%ymm10
7867
7868# qhasm: v10 = x6 & mask2
7869# asm 1: vpand <x6=reg256#9,<mask2=reg256#3,>v10=reg256#14
7870# asm 2: vpand <x6=%ymm8,<mask2=%ymm2,>v10=%ymm13
7871vpand %ymm8,%ymm2,%ymm13
7872
7873# qhasm: 4x v10 <<= 2
7874# asm 1: vpsllq $2,<v10=reg256#14,<v10=reg256#14
7875# asm 2: vpsllq $2,<v10=%ymm13,<v10=%ymm13
7876vpsllq $2,%ymm13,%ymm13
7877
7878# qhasm: v01 = x4 & mask3
7879# asm 1: vpand <x4=reg256#7,<mask3=reg256#4,>v01=reg256#7
7880# asm 2: vpand <x4=%ymm6,<mask3=%ymm3,>v01=%ymm6
7881vpand %ymm6,%ymm3,%ymm6
7882
7883# qhasm: v11 = x6 & mask3
7884# asm 1: vpand <x6=reg256#9,<mask3=reg256#4,>v11=reg256#9
7885# asm 2: vpand <x6=%ymm8,<mask3=%ymm3,>v11=%ymm8
7886vpand %ymm8,%ymm3,%ymm8
7887
7888# qhasm: 4x v01 unsigned>>= 2
7889# asm 1: vpsrlq $2,<v01=reg256#7,<v01=reg256#7
7890# asm 2: vpsrlq $2,<v01=%ymm6,<v01=%ymm6
7891vpsrlq $2,%ymm6,%ymm6
7892
7893# qhasm: x4 = v00 | v10
7894# asm 1: vpor  <v00=reg256#11,<v10=reg256#14,>x4=reg256#11
7895# asm 2: vpor  <v00=%ymm10,<v10=%ymm13,>x4=%ymm10
7896vpor  %ymm10,%ymm13,%ymm10
7897
7898# qhasm: x6 = v01 | v11
7899# asm 1: vpor  <v01=reg256#7,<v11=reg256#9,>x6=reg256#7
7900# asm 2: vpor  <v01=%ymm6,<v11=%ymm8,>x6=%ymm6
7901vpor  %ymm6,%ymm8,%ymm6
7902
7903# qhasm: v00 = x5 & mask2
7904# asm 1: vpand <x5=reg256#8,<mask2=reg256#3,>v00=reg256#9
7905# asm 2: vpand <x5=%ymm7,<mask2=%ymm2,>v00=%ymm8
7906vpand %ymm7,%ymm2,%ymm8
7907
7908# qhasm: v10 = x7 & mask2
7909# asm 1: vpand <x7=reg256#2,<mask2=reg256#3,>v10=reg256#3
7910# asm 2: vpand <x7=%ymm1,<mask2=%ymm2,>v10=%ymm2
7911vpand %ymm1,%ymm2,%ymm2
7912
7913# qhasm: 4x v10 <<= 2
7914# asm 1: vpsllq $2,<v10=reg256#3,<v10=reg256#3
7915# asm 2: vpsllq $2,<v10=%ymm2,<v10=%ymm2
7916vpsllq $2,%ymm2,%ymm2
7917
7918# qhasm: v01 = x5 & mask3
7919# asm 1: vpand <x5=reg256#8,<mask3=reg256#4,>v01=reg256#8
7920# asm 2: vpand <x5=%ymm7,<mask3=%ymm3,>v01=%ymm7
7921vpand %ymm7,%ymm3,%ymm7
7922
7923# qhasm: v11 = x7 & mask3
7924# asm 1: vpand <x7=reg256#2,<mask3=reg256#4,>v11=reg256#2
7925# asm 2: vpand <x7=%ymm1,<mask3=%ymm3,>v11=%ymm1
7926vpand %ymm1,%ymm3,%ymm1
7927
7928# qhasm: 4x v01 unsigned>>= 2
7929# asm 1: vpsrlq $2,<v01=reg256#8,<v01=reg256#8
7930# asm 2: vpsrlq $2,<v01=%ymm7,<v01=%ymm7
7931vpsrlq $2,%ymm7,%ymm7
7932
7933# qhasm: x5 = v00 | v10
7934# asm 1: vpor  <v00=reg256#9,<v10=reg256#3,>x5=reg256#3
7935# asm 2: vpor  <v00=%ymm8,<v10=%ymm2,>x5=%ymm2
7936vpor  %ymm8,%ymm2,%ymm2
7937
7938# qhasm: x7 = v01 | v11
7939# asm 1: vpor  <v01=reg256#8,<v11=reg256#2,>x7=reg256#2
7940# asm 2: vpor  <v01=%ymm7,<v11=%ymm1,>x7=%ymm1
7941vpor  %ymm7,%ymm1,%ymm1
7942
7943# qhasm: v00 = x0 & mask4
7944# asm 1: vpand <x0=reg256#10,<mask4=reg256#5,>v00=reg256#4
7945# asm 2: vpand <x0=%ymm9,<mask4=%ymm4,>v00=%ymm3
7946vpand %ymm9,%ymm4,%ymm3
7947
7948# qhasm: v10 = x1 & mask4
7949# asm 1: vpand <x1=reg256#13,<mask4=reg256#5,>v10=reg256#8
7950# asm 2: vpand <x1=%ymm12,<mask4=%ymm4,>v10=%ymm7
7951vpand %ymm12,%ymm4,%ymm7
7952
7953# qhasm: 4x v10 <<= 1
7954# asm 1: vpsllq $1,<v10=reg256#8,<v10=reg256#8
7955# asm 2: vpsllq $1,<v10=%ymm7,<v10=%ymm7
7956vpsllq $1,%ymm7,%ymm7
7957
7958# qhasm: v01 = x0 & mask5
7959# asm 1: vpand <x0=reg256#10,<mask5=reg256#6,>v01=reg256#9
7960# asm 2: vpand <x0=%ymm9,<mask5=%ymm5,>v01=%ymm8
7961vpand %ymm9,%ymm5,%ymm8
7962
7963# qhasm: v11 = x1 & mask5
7964# asm 1: vpand <x1=reg256#13,<mask5=reg256#6,>v11=reg256#10
7965# asm 2: vpand <x1=%ymm12,<mask5=%ymm5,>v11=%ymm9
7966vpand %ymm12,%ymm5,%ymm9
7967
7968# qhasm: 4x v01 unsigned>>= 1
7969# asm 1: vpsrlq $1,<v01=reg256#9,<v01=reg256#9
7970# asm 2: vpsrlq $1,<v01=%ymm8,<v01=%ymm8
7971vpsrlq $1,%ymm8,%ymm8
7972
7973# qhasm: x0 = v00 | v10
7974# asm 1: vpor  <v00=reg256#4,<v10=reg256#8,>x0=reg256#4
7975# asm 2: vpor  <v00=%ymm3,<v10=%ymm7,>x0=%ymm3
7976vpor  %ymm3,%ymm7,%ymm3
7977
7978# qhasm: x1 = v01 | v11
7979# asm 1: vpor  <v01=reg256#9,<v11=reg256#10,>x1=reg256#8
7980# asm 2: vpor  <v01=%ymm8,<v11=%ymm9,>x1=%ymm7
7981vpor  %ymm8,%ymm9,%ymm7
7982
7983# qhasm: v00 = x2 & mask4
7984# asm 1: vpand <x2=reg256#12,<mask4=reg256#5,>v00=reg256#9
7985# asm 2: vpand <x2=%ymm11,<mask4=%ymm4,>v00=%ymm8
7986vpand %ymm11,%ymm4,%ymm8
7987
7988# qhasm: v10 = x3 & mask4
7989# asm 1: vpand <x3=reg256#1,<mask4=reg256#5,>v10=reg256#10
7990# asm 2: vpand <x3=%ymm0,<mask4=%ymm4,>v10=%ymm9
7991vpand %ymm0,%ymm4,%ymm9
7992
7993# qhasm: 4x v10 <<= 1
7994# asm 1: vpsllq $1,<v10=reg256#10,<v10=reg256#10
7995# asm 2: vpsllq $1,<v10=%ymm9,<v10=%ymm9
7996vpsllq $1,%ymm9,%ymm9
7997
7998# qhasm: v01 = x2 & mask5
7999# asm 1: vpand <x2=reg256#12,<mask5=reg256#6,>v01=reg256#12
8000# asm 2: vpand <x2=%ymm11,<mask5=%ymm5,>v01=%ymm11
8001vpand %ymm11,%ymm5,%ymm11
8002
8003# qhasm: v11 = x3 & mask5
8004# asm 1: vpand <x3=reg256#1,<mask5=reg256#6,>v11=reg256#1
8005# asm 2: vpand <x3=%ymm0,<mask5=%ymm5,>v11=%ymm0
8006vpand %ymm0,%ymm5,%ymm0
8007
8008# qhasm: 4x v01 unsigned>>= 1
8009# asm 1: vpsrlq $1,<v01=reg256#12,<v01=reg256#12
8010# asm 2: vpsrlq $1,<v01=%ymm11,<v01=%ymm11
8011vpsrlq $1,%ymm11,%ymm11
8012
8013# qhasm: x2 = v00 | v10
8014# asm 1: vpor  <v00=reg256#9,<v10=reg256#10,>x2=reg256#9
8015# asm 2: vpor  <v00=%ymm8,<v10=%ymm9,>x2=%ymm8
8016vpor  %ymm8,%ymm9,%ymm8
8017
8018# qhasm: x3 = v01 | v11
8019# asm 1: vpor  <v01=reg256#12,<v11=reg256#1,>x3=reg256#1
8020# asm 2: vpor  <v01=%ymm11,<v11=%ymm0,>x3=%ymm0
8021vpor  %ymm11,%ymm0,%ymm0
8022
8023# qhasm: v00 = x4 & mask4
8024# asm 1: vpand <x4=reg256#11,<mask4=reg256#5,>v00=reg256#10
8025# asm 2: vpand <x4=%ymm10,<mask4=%ymm4,>v00=%ymm9
8026vpand %ymm10,%ymm4,%ymm9
8027
8028# qhasm: v10 = x5 & mask4
8029# asm 1: vpand <x5=reg256#3,<mask4=reg256#5,>v10=reg256#12
8030# asm 2: vpand <x5=%ymm2,<mask4=%ymm4,>v10=%ymm11
8031vpand %ymm2,%ymm4,%ymm11
8032
8033# qhasm: 4x v10 <<= 1
8034# asm 1: vpsllq $1,<v10=reg256#12,<v10=reg256#12
8035# asm 2: vpsllq $1,<v10=%ymm11,<v10=%ymm11
8036vpsllq $1,%ymm11,%ymm11
8037
8038# qhasm: v01 = x4 & mask5
8039# asm 1: vpand <x4=reg256#11,<mask5=reg256#6,>v01=reg256#11
8040# asm 2: vpand <x4=%ymm10,<mask5=%ymm5,>v01=%ymm10
8041vpand %ymm10,%ymm5,%ymm10
8042
8043# qhasm: v11 = x5 & mask5
8044# asm 1: vpand <x5=reg256#3,<mask5=reg256#6,>v11=reg256#3
8045# asm 2: vpand <x5=%ymm2,<mask5=%ymm5,>v11=%ymm2
8046vpand %ymm2,%ymm5,%ymm2
8047
8048# qhasm: 4x v01 unsigned>>= 1
8049# asm 1: vpsrlq $1,<v01=reg256#11,<v01=reg256#11
8050# asm 2: vpsrlq $1,<v01=%ymm10,<v01=%ymm10
8051vpsrlq $1,%ymm10,%ymm10
8052
8053# qhasm: x4 = v00 | v10
8054# asm 1: vpor  <v00=reg256#10,<v10=reg256#12,>x4=reg256#10
8055# asm 2: vpor  <v00=%ymm9,<v10=%ymm11,>x4=%ymm9
8056vpor  %ymm9,%ymm11,%ymm9
8057
8058# qhasm: x5 = v01 | v11
8059# asm 1: vpor  <v01=reg256#11,<v11=reg256#3,>x5=reg256#3
8060# asm 2: vpor  <v01=%ymm10,<v11=%ymm2,>x5=%ymm2
8061vpor  %ymm10,%ymm2,%ymm2
8062
8063# qhasm: v00 = x6 & mask4
8064# asm 1: vpand <x6=reg256#7,<mask4=reg256#5,>v00=reg256#11
8065# asm 2: vpand <x6=%ymm6,<mask4=%ymm4,>v00=%ymm10
8066vpand %ymm6,%ymm4,%ymm10
8067
8068# qhasm: v10 = x7 & mask4
8069# asm 1: vpand <x7=reg256#2,<mask4=reg256#5,>v10=reg256#5
8070# asm 2: vpand <x7=%ymm1,<mask4=%ymm4,>v10=%ymm4
8071vpand %ymm1,%ymm4,%ymm4
8072
8073# qhasm: 4x v10 <<= 1
8074# asm 1: vpsllq $1,<v10=reg256#5,<v10=reg256#5
8075# asm 2: vpsllq $1,<v10=%ymm4,<v10=%ymm4
8076vpsllq $1,%ymm4,%ymm4
8077
8078# qhasm: v01 = x6 & mask5
8079# asm 1: vpand <x6=reg256#7,<mask5=reg256#6,>v01=reg256#7
8080# asm 2: vpand <x6=%ymm6,<mask5=%ymm5,>v01=%ymm6
8081vpand %ymm6,%ymm5,%ymm6
8082
8083# qhasm: v11 = x7 & mask5
8084# asm 1: vpand <x7=reg256#2,<mask5=reg256#6,>v11=reg256#2
8085# asm 2: vpand <x7=%ymm1,<mask5=%ymm5,>v11=%ymm1
8086vpand %ymm1,%ymm5,%ymm1
8087
8088# qhasm: 4x v01 unsigned>>= 1
8089# asm 1: vpsrlq $1,<v01=reg256#7,<v01=reg256#7
8090# asm 2: vpsrlq $1,<v01=%ymm6,<v01=%ymm6
8091vpsrlq $1,%ymm6,%ymm6
8092
8093# qhasm: x6 = v00 | v10
8094# asm 1: vpor  <v00=reg256#11,<v10=reg256#5,>x6=reg256#5
8095# asm 2: vpor  <v00=%ymm10,<v10=%ymm4,>x6=%ymm4
8096vpor  %ymm10,%ymm4,%ymm4
8097
8098# qhasm: x7 = v01 | v11
8099# asm 1: vpor  <v01=reg256#7,<v11=reg256#2,>x7=reg256#2
8100# asm 2: vpor  <v01=%ymm6,<v11=%ymm1,>x7=%ymm1
8101vpor  %ymm6,%ymm1,%ymm1
8102
8103# qhasm: mem256[ input_0 + 1792 ] = x0
8104# asm 1: vmovupd   <x0=reg256#4,1792(<input_0=int64#1)
8105# asm 2: vmovupd   <x0=%ymm3,1792(<input_0=%rdi)
8106vmovupd   %ymm3,1792(%rdi)
8107
8108# qhasm: mem256[ input_0 + 1824 ] = x1
8109# asm 1: vmovupd   <x1=reg256#8,1824(<input_0=int64#1)
8110# asm 2: vmovupd   <x1=%ymm7,1824(<input_0=%rdi)
8111vmovupd   %ymm7,1824(%rdi)
8112
8113# qhasm: mem256[ input_0 + 1856 ] = x2
8114# asm 1: vmovupd   <x2=reg256#9,1856(<input_0=int64#1)
8115# asm 2: vmovupd   <x2=%ymm8,1856(<input_0=%rdi)
8116vmovupd   %ymm8,1856(%rdi)
8117
8118# qhasm: mem256[ input_0 + 1888 ] = x3
8119# asm 1: vmovupd   <x3=reg256#1,1888(<input_0=int64#1)
8120# asm 2: vmovupd   <x3=%ymm0,1888(<input_0=%rdi)
8121vmovupd   %ymm0,1888(%rdi)
8122
8123# qhasm: mem256[ input_0 + 1920 ] = x4
8124# asm 1: vmovupd   <x4=reg256#10,1920(<input_0=int64#1)
8125# asm 2: vmovupd   <x4=%ymm9,1920(<input_0=%rdi)
8126vmovupd   %ymm9,1920(%rdi)
8127
8128# qhasm: mem256[ input_0 + 1952 ] = x5
8129# asm 1: vmovupd   <x5=reg256#3,1952(<input_0=int64#1)
8130# asm 2: vmovupd   <x5=%ymm2,1952(<input_0=%rdi)
8131vmovupd   %ymm2,1952(%rdi)
8132
8133# qhasm: mem256[ input_0 + 1984 ] = x6
8134# asm 1: vmovupd   <x6=reg256#5,1984(<input_0=int64#1)
8135# asm 2: vmovupd   <x6=%ymm4,1984(<input_0=%rdi)
8136vmovupd   %ymm4,1984(%rdi)
8137
8138# qhasm: mem256[ input_0 + 2016 ] = x7
8139# asm 1: vmovupd   <x7=reg256#2,2016(<input_0=int64#1)
8140# asm 2: vmovupd   <x7=%ymm1,2016(<input_0=%rdi)
8141vmovupd   %ymm1,2016(%rdi)
8142
8143# qhasm: return
8144add %r11,%rsp
8145ret
8146