1
2# qhasm: int64 input_0
3
4# qhasm: int64 input_1
5
6# qhasm: int64 input_2
7
8# qhasm: int64 input_3
9
10# qhasm: int64 input_4
11
12# qhasm: int64 input_5
13
14# qhasm: stack64 input_6
15
16# qhasm: stack64 input_7
17
18# qhasm: int64 caller_r11
19
20# qhasm: int64 caller_r12
21
22# qhasm: int64 caller_r13
23
24# qhasm: int64 caller_r14
25
26# qhasm: int64 caller_r15
27
28# qhasm: int64 caller_rbx
29
30# qhasm: int64 caller_rbp
31
32# qhasm: reg128 r0
33
34# qhasm: reg128 r1
35
36# qhasm: reg128 r2
37
38# qhasm: reg128 r3
39
40# qhasm: reg128 r4
41
42# qhasm: reg128 r5
43
44# qhasm: reg128 r6
45
46# qhasm: reg128 r7
47
48# qhasm: reg128 t0
49
50# qhasm: reg128 t1
51
52# qhasm: reg128 v00
53
54# qhasm: reg128 v01
55
56# qhasm: reg128 v10
57
58# qhasm: reg128 v11
59
60# qhasm: int64 buf
61
62# qhasm: reg128 mask0
63
64# qhasm: reg128 mask1
65
66# qhasm: reg128 mask2
67
68# qhasm: reg128 mask3
69
70# qhasm: reg128 mask4
71
72# qhasm: reg128 mask5
73
74# qhasm: enter transpose_64x64_asm
75.p2align 5
76.global _PQCLEAN_MCELIECE348864F_AVX_transpose_64x64_asm
77.global PQCLEAN_MCELIECE348864F_AVX_transpose_64x64_asm
78_PQCLEAN_MCELIECE348864F_AVX_transpose_64x64_asm:
79PQCLEAN_MCELIECE348864F_AVX_transpose_64x64_asm:
80mov %rsp,%r11
81and $31,%r11
82add $0,%r11
83sub %r11,%rsp
84
85# qhasm: mask0 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK5_0 ]
86# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK5_0(%rip),>mask0=reg128#1
87# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK5_0(%rip),>mask0=%xmm0
88movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK5_0(%rip),%xmm0
89
90# qhasm: mask1 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK5_1 ]
91# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK5_1(%rip),>mask1=reg128#2
92# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK5_1(%rip),>mask1=%xmm1
93movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK5_1(%rip),%xmm1
94
95# qhasm: mask2 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK4_0 ]
96# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK4_0(%rip),>mask2=reg128#3
97# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK4_0(%rip),>mask2=%xmm2
98movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK4_0(%rip),%xmm2
99
100# qhasm: mask3 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK4_1 ]
101# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK4_1(%rip),>mask3=reg128#4
102# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK4_1(%rip),>mask3=%xmm3
103movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK4_1(%rip),%xmm3
104
105# qhasm: mask4 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK3_0 ]
106# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK3_0(%rip),>mask4=reg128#5
107# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK3_0(%rip),>mask4=%xmm4
108movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK3_0(%rip),%xmm4
109
110# qhasm: mask5 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK3_1 ]
111# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK3_1(%rip),>mask5=reg128#6
112# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK3_1(%rip),>mask5=%xmm5
113movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK3_1(%rip),%xmm5
114
115# qhasm: r0 = mem64[ input_0 + 0 ] x2
116# asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7
117# asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6
118movddup 0(%rdi),%xmm6
119
120# qhasm: r1 = mem64[ input_0 + 64 ] x2
121# asm 1: movddup 64(<input_0=int64#1),>r1=reg128#8
122# asm 2: movddup 64(<input_0=%rdi),>r1=%xmm7
123movddup 64(%rdi),%xmm7
124
125# qhasm: r2 = mem64[ input_0 + 128 ] x2
126# asm 1: movddup 128(<input_0=int64#1),>r2=reg128#9
127# asm 2: movddup 128(<input_0=%rdi),>r2=%xmm8
128movddup 128(%rdi),%xmm8
129
130# qhasm: r3 = mem64[ input_0 + 192 ] x2
131# asm 1: movddup 192(<input_0=int64#1),>r3=reg128#10
132# asm 2: movddup 192(<input_0=%rdi),>r3=%xmm9
133movddup 192(%rdi),%xmm9
134
135# qhasm: r4 = mem64[ input_0 + 256 ] x2
136# asm 1: movddup 256(<input_0=int64#1),>r4=reg128#11
137# asm 2: movddup 256(<input_0=%rdi),>r4=%xmm10
138movddup 256(%rdi),%xmm10
139
140# qhasm: r5 = mem64[ input_0 + 320 ] x2
141# asm 1: movddup 320(<input_0=int64#1),>r5=reg128#12
142# asm 2: movddup 320(<input_0=%rdi),>r5=%xmm11
143movddup 320(%rdi),%xmm11
144
145# qhasm: r6 = mem64[ input_0 + 384 ] x2
146# asm 1: movddup 384(<input_0=int64#1),>r6=reg128#13
147# asm 2: movddup 384(<input_0=%rdi),>r6=%xmm12
148movddup 384(%rdi),%xmm12
149
150# qhasm: r7 = mem64[ input_0 + 448 ] x2
151# asm 1: movddup 448(<input_0=int64#1),>r7=reg128#14
152# asm 2: movddup 448(<input_0=%rdi),>r7=%xmm13
153movddup 448(%rdi),%xmm13
154
155# qhasm: v00 = r0 & mask0
156# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
157# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
158vpand %xmm0,%xmm6,%xmm14
159
160# qhasm: 2x v10 = r4 << 32
161# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
162# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
163vpsllq $32,%xmm10,%xmm15
164
165# qhasm: 2x v01 = r0 unsigned>> 32
166# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
167# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
168vpsrlq $32,%xmm6,%xmm6
169
170# qhasm: v11 = r4 & mask1
171# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
172# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
173vpand %xmm1,%xmm10,%xmm10
174
175# qhasm: r0 = v00 | v10
176# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
177# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
178vpor %xmm15,%xmm14,%xmm14
179
180# qhasm: r4 = v01 | v11
181# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
182# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
183vpor %xmm10,%xmm6,%xmm6
184
185# qhasm: v00 = r1 & mask0
186# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
187# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
188vpand %xmm0,%xmm7,%xmm10
189
190# qhasm: 2x v10 = r5 << 32
191# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
192# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
193vpsllq $32,%xmm11,%xmm15
194
195# qhasm: 2x v01 = r1 unsigned>> 32
196# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
197# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
198vpsrlq $32,%xmm7,%xmm7
199
200# qhasm: v11 = r5 & mask1
201# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
202# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
203vpand %xmm1,%xmm11,%xmm11
204
205# qhasm: r1 = v00 | v10
206# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
207# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
208vpor %xmm15,%xmm10,%xmm10
209
210# qhasm: r5 = v01 | v11
211# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
212# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
213vpor %xmm11,%xmm7,%xmm7
214
215# qhasm: v00 = r2 & mask0
216# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
217# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
218vpand %xmm0,%xmm8,%xmm11
219
220# qhasm: 2x v10 = r6 << 32
221# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
222# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
223vpsllq $32,%xmm12,%xmm15
224
225# qhasm: 2x v01 = r2 unsigned>> 32
226# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
227# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
228vpsrlq $32,%xmm8,%xmm8
229
230# qhasm: v11 = r6 & mask1
231# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
232# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
233vpand %xmm1,%xmm12,%xmm12
234
235# qhasm: r2 = v00 | v10
236# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
237# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
238vpor %xmm15,%xmm11,%xmm11
239
240# qhasm: r6 = v01 | v11
241# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
242# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
243vpor %xmm12,%xmm8,%xmm8
244
245# qhasm: v00 = r3 & mask0
246# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
247# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
248vpand %xmm0,%xmm9,%xmm12
249
250# qhasm: 2x v10 = r7 << 32
251# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
252# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
253vpsllq $32,%xmm13,%xmm15
254
255# qhasm: 2x v01 = r3 unsigned>> 32
256# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
257# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
258vpsrlq $32,%xmm9,%xmm9
259
260# qhasm: v11 = r7 & mask1
261# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
262# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
263vpand %xmm1,%xmm13,%xmm13
264
265# qhasm: r3 = v00 | v10
266# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
267# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
268vpor %xmm15,%xmm12,%xmm12
269
270# qhasm: r7 = v01 | v11
271# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
272# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
273vpor %xmm13,%xmm9,%xmm9
274
275# qhasm: v00 = r0 & mask2
276# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
277# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
278vpand %xmm2,%xmm14,%xmm13
279
280# qhasm: 4x v10 = r2 << 16
281# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
282# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
283vpslld $16,%xmm11,%xmm15
284
285# qhasm: 4x v01 = r0 unsigned>> 16
286# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
287# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
288vpsrld $16,%xmm14,%xmm14
289
290# qhasm: v11 = r2 & mask3
291# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
292# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
293vpand %xmm3,%xmm11,%xmm11
294
295# qhasm: r0 = v00 | v10
296# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
297# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
298vpor %xmm15,%xmm13,%xmm13
299
300# qhasm: r2 = v01 | v11
301# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
302# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
303vpor %xmm11,%xmm14,%xmm11
304
305# qhasm: v00 = r1 & mask2
306# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
307# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
308vpand %xmm2,%xmm10,%xmm14
309
310# qhasm: 4x v10 = r3 << 16
311# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
312# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
313vpslld $16,%xmm12,%xmm15
314
315# qhasm: 4x v01 = r1 unsigned>> 16
316# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
317# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
318vpsrld $16,%xmm10,%xmm10
319
320# qhasm: v11 = r3 & mask3
321# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
322# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
323vpand %xmm3,%xmm12,%xmm12
324
325# qhasm: r1 = v00 | v10
326# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
327# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
328vpor %xmm15,%xmm14,%xmm14
329
330# qhasm: r3 = v01 | v11
331# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
332# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
333vpor %xmm12,%xmm10,%xmm10
334
335# qhasm: v00 = r4 & mask2
336# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
337# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
338vpand %xmm2,%xmm6,%xmm12
339
340# qhasm: 4x v10 = r6 << 16
341# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
342# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
343vpslld $16,%xmm8,%xmm15
344
345# qhasm: 4x v01 = r4 unsigned>> 16
346# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
347# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
348vpsrld $16,%xmm6,%xmm6
349
350# qhasm: v11 = r6 & mask3
351# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
352# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
353vpand %xmm3,%xmm8,%xmm8
354
355# qhasm: r4 = v00 | v10
356# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
357# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
358vpor %xmm15,%xmm12,%xmm12
359
360# qhasm: r6 = v01 | v11
361# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
362# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
363vpor %xmm8,%xmm6,%xmm6
364
365# qhasm: v00 = r5 & mask2
366# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
367# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
368vpand %xmm2,%xmm7,%xmm8
369
370# qhasm: 4x v10 = r7 << 16
371# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
372# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
373vpslld $16,%xmm9,%xmm15
374
375# qhasm: 4x v01 = r5 unsigned>> 16
376# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
377# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
378vpsrld $16,%xmm7,%xmm7
379
380# qhasm: v11 = r7 & mask3
381# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
382# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
383vpand %xmm3,%xmm9,%xmm9
384
385# qhasm: r5 = v00 | v10
386# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
387# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
388vpor %xmm15,%xmm8,%xmm8
389
390# qhasm: r7 = v01 | v11
391# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
392# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
393vpor %xmm9,%xmm7,%xmm7
394
395# qhasm: v00 = r0 & mask4
396# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
397# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
398vpand %xmm4,%xmm13,%xmm9
399
400# qhasm: 8x v10 = r1 << 8
401# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
402# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
403vpsllw $8,%xmm14,%xmm15
404
405# qhasm: 8x v01 = r0 unsigned>> 8
406# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
407# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
408vpsrlw $8,%xmm13,%xmm13
409
410# qhasm: v11 = r1 & mask5
411# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
412# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
413vpand %xmm5,%xmm14,%xmm14
414
415# qhasm: r0 = v00 | v10
416# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
417# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
418vpor %xmm15,%xmm9,%xmm9
419
420# qhasm: r1 = v01 | v11
421# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
422# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
423vpor %xmm14,%xmm13,%xmm13
424
425# qhasm: v00 = r2 & mask4
426# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
427# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
428vpand %xmm4,%xmm11,%xmm14
429
430# qhasm: 8x v10 = r3 << 8
431# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
432# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
433vpsllw $8,%xmm10,%xmm15
434
435# qhasm: 8x v01 = r2 unsigned>> 8
436# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
437# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
438vpsrlw $8,%xmm11,%xmm11
439
440# qhasm: v11 = r3 & mask5
441# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
442# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
443vpand %xmm5,%xmm10,%xmm10
444
445# qhasm: r2 = v00 | v10
446# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
447# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
448vpor %xmm15,%xmm14,%xmm14
449
450# qhasm: r3 = v01 | v11
451# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
452# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
453vpor %xmm10,%xmm11,%xmm10
454
455# qhasm: v00 = r4 & mask4
456# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
457# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
458vpand %xmm4,%xmm12,%xmm11
459
460# qhasm: 8x v10 = r5 << 8
461# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
462# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
463vpsllw $8,%xmm8,%xmm15
464
465# qhasm: 8x v01 = r4 unsigned>> 8
466# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
467# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
468vpsrlw $8,%xmm12,%xmm12
469
470# qhasm: v11 = r5 & mask5
471# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
472# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
473vpand %xmm5,%xmm8,%xmm8
474
475# qhasm: r4 = v00 | v10
476# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
477# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
478vpor %xmm15,%xmm11,%xmm11
479
480# qhasm: r5 = v01 | v11
481# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
482# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
483vpor %xmm8,%xmm12,%xmm8
484
485# qhasm: v00 = r6 & mask4
486# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
487# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
488vpand %xmm4,%xmm6,%xmm12
489
490# qhasm: 8x v10 = r7 << 8
491# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
492# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
493vpsllw $8,%xmm7,%xmm15
494
495# qhasm: 8x v01 = r6 unsigned>> 8
496# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
497# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
498vpsrlw $8,%xmm6,%xmm6
499
500# qhasm: v11 = r7 & mask5
501# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
502# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
503vpand %xmm5,%xmm7,%xmm7
504
505# qhasm: r6 = v00 | v10
506# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
507# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
508vpor %xmm15,%xmm12,%xmm12
509
510# qhasm: r7 = v01 | v11
511# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
512# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
513vpor %xmm7,%xmm6,%xmm6
514
515# qhasm: buf = r0[0]
516# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
517# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
518pextrq $0x0,%xmm9,%rsi
519
520# qhasm: mem64[ input_0 + 0 ] = buf
521# asm 1: movq   <buf=int64#2,0(<input_0=int64#1)
522# asm 2: movq   <buf=%rsi,0(<input_0=%rdi)
523movq   %rsi,0(%rdi)
524
525# qhasm: buf = r1[0]
526# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
527# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
528pextrq $0x0,%xmm13,%rsi
529
530# qhasm: mem64[ input_0 + 64 ] = buf
531# asm 1: movq   <buf=int64#2,64(<input_0=int64#1)
532# asm 2: movq   <buf=%rsi,64(<input_0=%rdi)
533movq   %rsi,64(%rdi)
534
535# qhasm: buf = r2[0]
536# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
537# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
538pextrq $0x0,%xmm14,%rsi
539
540# qhasm: mem64[ input_0 + 128 ] = buf
541# asm 1: movq   <buf=int64#2,128(<input_0=int64#1)
542# asm 2: movq   <buf=%rsi,128(<input_0=%rdi)
543movq   %rsi,128(%rdi)
544
545# qhasm: buf = r3[0]
546# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
547# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
548pextrq $0x0,%xmm10,%rsi
549
550# qhasm: mem64[ input_0 + 192 ] = buf
551# asm 1: movq   <buf=int64#2,192(<input_0=int64#1)
552# asm 2: movq   <buf=%rsi,192(<input_0=%rdi)
553movq   %rsi,192(%rdi)
554
555# qhasm: buf = r4[0]
556# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
557# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
558pextrq $0x0,%xmm11,%rsi
559
560# qhasm: mem64[ input_0 + 256 ] = buf
561# asm 1: movq   <buf=int64#2,256(<input_0=int64#1)
562# asm 2: movq   <buf=%rsi,256(<input_0=%rdi)
563movq   %rsi,256(%rdi)
564
565# qhasm: buf = r5[0]
566# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
567# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
568pextrq $0x0,%xmm8,%rsi
569
570# qhasm: mem64[ input_0 + 320 ] = buf
571# asm 1: movq   <buf=int64#2,320(<input_0=int64#1)
572# asm 2: movq   <buf=%rsi,320(<input_0=%rdi)
573movq   %rsi,320(%rdi)
574
575# qhasm: buf = r6[0]
576# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
577# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
578pextrq $0x0,%xmm12,%rsi
579
580# qhasm: mem64[ input_0 + 384 ] = buf
581# asm 1: movq   <buf=int64#2,384(<input_0=int64#1)
582# asm 2: movq   <buf=%rsi,384(<input_0=%rdi)
583movq   %rsi,384(%rdi)
584
585# qhasm: buf = r7[0]
586# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
587# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
588pextrq $0x0,%xmm6,%rsi
589
590# qhasm: mem64[ input_0 + 448 ] = buf
591# asm 1: movq   <buf=int64#2,448(<input_0=int64#1)
592# asm 2: movq   <buf=%rsi,448(<input_0=%rdi)
593movq   %rsi,448(%rdi)
594
595# qhasm: r0 = mem64[ input_0 + 8 ] x2
596# asm 1: movddup 8(<input_0=int64#1),>r0=reg128#7
597# asm 2: movddup 8(<input_0=%rdi),>r0=%xmm6
598movddup 8(%rdi),%xmm6
599
600# qhasm: r1 = mem64[ input_0 + 72 ] x2
601# asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8
602# asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7
603movddup 72(%rdi),%xmm7
604
605# qhasm: r2 = mem64[ input_0 + 136 ] x2
606# asm 1: movddup 136(<input_0=int64#1),>r2=reg128#9
607# asm 2: movddup 136(<input_0=%rdi),>r2=%xmm8
608movddup 136(%rdi),%xmm8
609
610# qhasm: r3 = mem64[ input_0 + 200 ] x2
611# asm 1: movddup 200(<input_0=int64#1),>r3=reg128#10
612# asm 2: movddup 200(<input_0=%rdi),>r3=%xmm9
613movddup 200(%rdi),%xmm9
614
615# qhasm: r4 = mem64[ input_0 + 264 ] x2
616# asm 1: movddup 264(<input_0=int64#1),>r4=reg128#11
617# asm 2: movddup 264(<input_0=%rdi),>r4=%xmm10
618movddup 264(%rdi),%xmm10
619
620# qhasm: r5 = mem64[ input_0 + 328 ] x2
621# asm 1: movddup 328(<input_0=int64#1),>r5=reg128#12
622# asm 2: movddup 328(<input_0=%rdi),>r5=%xmm11
623movddup 328(%rdi),%xmm11
624
625# qhasm: r6 = mem64[ input_0 + 392 ] x2
626# asm 1: movddup 392(<input_0=int64#1),>r6=reg128#13
627# asm 2: movddup 392(<input_0=%rdi),>r6=%xmm12
628movddup 392(%rdi),%xmm12
629
630# qhasm: r7 = mem64[ input_0 + 456 ] x2
631# asm 1: movddup 456(<input_0=int64#1),>r7=reg128#14
632# asm 2: movddup 456(<input_0=%rdi),>r7=%xmm13
633movddup 456(%rdi),%xmm13
634
635# qhasm: v00 = r0 & mask0
636# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
637# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
638vpand %xmm0,%xmm6,%xmm14
639
640# qhasm: 2x v10 = r4 << 32
641# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
642# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
643vpsllq $32,%xmm10,%xmm15
644
645# qhasm: 2x v01 = r0 unsigned>> 32
646# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
647# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
648vpsrlq $32,%xmm6,%xmm6
649
650# qhasm: v11 = r4 & mask1
651# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
652# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
653vpand %xmm1,%xmm10,%xmm10
654
655# qhasm: r0 = v00 | v10
656# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
657# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
658vpor %xmm15,%xmm14,%xmm14
659
660# qhasm: r4 = v01 | v11
661# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
662# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
663vpor %xmm10,%xmm6,%xmm6
664
665# qhasm: v00 = r1 & mask0
666# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
667# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
668vpand %xmm0,%xmm7,%xmm10
669
670# qhasm: 2x v10 = r5 << 32
671# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
672# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
673vpsllq $32,%xmm11,%xmm15
674
675# qhasm: 2x v01 = r1 unsigned>> 32
676# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
677# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
678vpsrlq $32,%xmm7,%xmm7
679
680# qhasm: v11 = r5 & mask1
681# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
682# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
683vpand %xmm1,%xmm11,%xmm11
684
685# qhasm: r1 = v00 | v10
686# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
687# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
688vpor %xmm15,%xmm10,%xmm10
689
690# qhasm: r5 = v01 | v11
691# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
692# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
693vpor %xmm11,%xmm7,%xmm7
694
695# qhasm: v00 = r2 & mask0
696# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
697# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
698vpand %xmm0,%xmm8,%xmm11
699
700# qhasm: 2x v10 = r6 << 32
701# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
702# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
703vpsllq $32,%xmm12,%xmm15
704
705# qhasm: 2x v01 = r2 unsigned>> 32
706# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
707# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
708vpsrlq $32,%xmm8,%xmm8
709
710# qhasm: v11 = r6 & mask1
711# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
712# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
713vpand %xmm1,%xmm12,%xmm12
714
715# qhasm: r2 = v00 | v10
716# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
717# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
718vpor %xmm15,%xmm11,%xmm11
719
720# qhasm: r6 = v01 | v11
721# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
722# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
723vpor %xmm12,%xmm8,%xmm8
724
725# qhasm: v00 = r3 & mask0
726# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
727# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
728vpand %xmm0,%xmm9,%xmm12
729
730# qhasm: 2x v10 = r7 << 32
731# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
732# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
733vpsllq $32,%xmm13,%xmm15
734
735# qhasm: 2x v01 = r3 unsigned>> 32
736# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
737# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
738vpsrlq $32,%xmm9,%xmm9
739
740# qhasm: v11 = r7 & mask1
741# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
742# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
743vpand %xmm1,%xmm13,%xmm13
744
745# qhasm: r3 = v00 | v10
746# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
747# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
748vpor %xmm15,%xmm12,%xmm12
749
750# qhasm: r7 = v01 | v11
751# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
752# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
753vpor %xmm13,%xmm9,%xmm9
754
755# qhasm: v00 = r0 & mask2
756# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
757# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
758vpand %xmm2,%xmm14,%xmm13
759
760# qhasm: 4x v10 = r2 << 16
761# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
762# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
763vpslld $16,%xmm11,%xmm15
764
765# qhasm: 4x v01 = r0 unsigned>> 16
766# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
767# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
768vpsrld $16,%xmm14,%xmm14
769
770# qhasm: v11 = r2 & mask3
771# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
772# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
773vpand %xmm3,%xmm11,%xmm11
774
775# qhasm: r0 = v00 | v10
776# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
777# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
778vpor %xmm15,%xmm13,%xmm13
779
780# qhasm: r2 = v01 | v11
781# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
782# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
783vpor %xmm11,%xmm14,%xmm11
784
785# qhasm: v00 = r1 & mask2
786# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
787# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
788vpand %xmm2,%xmm10,%xmm14
789
790# qhasm: 4x v10 = r3 << 16
791# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
792# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
793vpslld $16,%xmm12,%xmm15
794
795# qhasm: 4x v01 = r1 unsigned>> 16
796# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
797# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
798vpsrld $16,%xmm10,%xmm10
799
800# qhasm: v11 = r3 & mask3
801# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
802# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
803vpand %xmm3,%xmm12,%xmm12
804
805# qhasm: r1 = v00 | v10
806# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
807# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
808vpor %xmm15,%xmm14,%xmm14
809
810# qhasm: r3 = v01 | v11
811# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
812# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
813vpor %xmm12,%xmm10,%xmm10
814
815# qhasm: v00 = r4 & mask2
816# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
817# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
818vpand %xmm2,%xmm6,%xmm12
819
820# qhasm: 4x v10 = r6 << 16
821# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
822# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
823vpslld $16,%xmm8,%xmm15
824
825# qhasm: 4x v01 = r4 unsigned>> 16
826# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
827# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
828vpsrld $16,%xmm6,%xmm6
829
830# qhasm: v11 = r6 & mask3
831# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
832# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
833vpand %xmm3,%xmm8,%xmm8
834
835# qhasm: r4 = v00 | v10
836# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
837# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
838vpor %xmm15,%xmm12,%xmm12
839
840# qhasm: r6 = v01 | v11
841# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
842# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
843vpor %xmm8,%xmm6,%xmm6
844
845# qhasm: v00 = r5 & mask2
846# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
847# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
848vpand %xmm2,%xmm7,%xmm8
849
850# qhasm: 4x v10 = r7 << 16
851# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
852# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
853vpslld $16,%xmm9,%xmm15
854
855# qhasm: 4x v01 = r5 unsigned>> 16
856# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
857# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
858vpsrld $16,%xmm7,%xmm7
859
860# qhasm: v11 = r7 & mask3
861# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
862# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
863vpand %xmm3,%xmm9,%xmm9
864
865# qhasm: r5 = v00 | v10
866# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
867# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
868vpor %xmm15,%xmm8,%xmm8
869
870# qhasm: r7 = v01 | v11
871# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
872# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
873vpor %xmm9,%xmm7,%xmm7
874
875# qhasm: v00 = r0 & mask4
876# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
877# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
878vpand %xmm4,%xmm13,%xmm9
879
880# qhasm: 8x v10 = r1 << 8
881# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
882# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
883vpsllw $8,%xmm14,%xmm15
884
885# qhasm: 8x v01 = r0 unsigned>> 8
886# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
887# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
888vpsrlw $8,%xmm13,%xmm13
889
890# qhasm: v11 = r1 & mask5
891# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
892# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
893vpand %xmm5,%xmm14,%xmm14
894
895# qhasm: r0 = v00 | v10
896# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
897# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
898vpor %xmm15,%xmm9,%xmm9
899
900# qhasm: r1 = v01 | v11
901# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
902# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
903vpor %xmm14,%xmm13,%xmm13
904
905# qhasm: v00 = r2 & mask4
906# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
907# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
908vpand %xmm4,%xmm11,%xmm14
909
910# qhasm: 8x v10 = r3 << 8
911# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
912# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
913vpsllw $8,%xmm10,%xmm15
914
915# qhasm: 8x v01 = r2 unsigned>> 8
916# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
917# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
918vpsrlw $8,%xmm11,%xmm11
919
920# qhasm: v11 = r3 & mask5
921# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
922# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
923vpand %xmm5,%xmm10,%xmm10
924
925# qhasm: r2 = v00 | v10
926# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
927# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
928vpor %xmm15,%xmm14,%xmm14
929
930# qhasm: r3 = v01 | v11
931# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
932# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
933vpor %xmm10,%xmm11,%xmm10
934
935# qhasm: v00 = r4 & mask4
936# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
937# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
938vpand %xmm4,%xmm12,%xmm11
939
940# qhasm: 8x v10 = r5 << 8
941# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
942# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
943vpsllw $8,%xmm8,%xmm15
944
945# qhasm: 8x v01 = r4 unsigned>> 8
946# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
947# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
948vpsrlw $8,%xmm12,%xmm12
949
950# qhasm: v11 = r5 & mask5
951# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
952# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
953vpand %xmm5,%xmm8,%xmm8
954
955# qhasm: r4 = v00 | v10
956# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
957# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
958vpor %xmm15,%xmm11,%xmm11
959
960# qhasm: r5 = v01 | v11
961# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
962# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
963vpor %xmm8,%xmm12,%xmm8
964
965# qhasm: v00 = r6 & mask4
966# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
967# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
968vpand %xmm4,%xmm6,%xmm12
969
970# qhasm: 8x v10 = r7 << 8
971# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
972# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
973vpsllw $8,%xmm7,%xmm15
974
975# qhasm: 8x v01 = r6 unsigned>> 8
976# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
977# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
978vpsrlw $8,%xmm6,%xmm6
979
980# qhasm: v11 = r7 & mask5
981# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
982# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
983vpand %xmm5,%xmm7,%xmm7
984
985# qhasm: r6 = v00 | v10
986# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
987# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
988vpor %xmm15,%xmm12,%xmm12
989
990# qhasm: r7 = v01 | v11
991# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
992# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
993vpor %xmm7,%xmm6,%xmm6
994
995# qhasm: buf = r0[0]
996# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
997# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
998pextrq $0x0,%xmm9,%rsi
999
1000# qhasm: mem64[ input_0 + 8 ] = buf
1001# asm 1: movq   <buf=int64#2,8(<input_0=int64#1)
1002# asm 2: movq   <buf=%rsi,8(<input_0=%rdi)
1003movq   %rsi,8(%rdi)
1004
1005# qhasm: buf = r1[0]
1006# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
1007# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
1008pextrq $0x0,%xmm13,%rsi
1009
1010# qhasm: mem64[ input_0 + 72 ] = buf
1011# asm 1: movq   <buf=int64#2,72(<input_0=int64#1)
1012# asm 2: movq   <buf=%rsi,72(<input_0=%rdi)
1013movq   %rsi,72(%rdi)
1014
1015# qhasm: buf = r2[0]
1016# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
1017# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
1018pextrq $0x0,%xmm14,%rsi
1019
1020# qhasm: mem64[ input_0 + 136 ] = buf
1021# asm 1: movq   <buf=int64#2,136(<input_0=int64#1)
1022# asm 2: movq   <buf=%rsi,136(<input_0=%rdi)
1023movq   %rsi,136(%rdi)
1024
1025# qhasm: buf = r3[0]
1026# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
1027# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
1028pextrq $0x0,%xmm10,%rsi
1029
1030# qhasm: mem64[ input_0 + 200 ] = buf
1031# asm 1: movq   <buf=int64#2,200(<input_0=int64#1)
1032# asm 2: movq   <buf=%rsi,200(<input_0=%rdi)
1033movq   %rsi,200(%rdi)
1034
1035# qhasm: buf = r4[0]
1036# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
1037# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
1038pextrq $0x0,%xmm11,%rsi
1039
1040# qhasm: mem64[ input_0 + 264 ] = buf
1041# asm 1: movq   <buf=int64#2,264(<input_0=int64#1)
1042# asm 2: movq   <buf=%rsi,264(<input_0=%rdi)
1043movq   %rsi,264(%rdi)
1044
1045# qhasm: buf = r5[0]
1046# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
1047# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
1048pextrq $0x0,%xmm8,%rsi
1049
1050# qhasm: mem64[ input_0 + 328 ] = buf
1051# asm 1: movq   <buf=int64#2,328(<input_0=int64#1)
1052# asm 2: movq   <buf=%rsi,328(<input_0=%rdi)
1053movq   %rsi,328(%rdi)
1054
1055# qhasm: buf = r6[0]
1056# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
1057# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
1058pextrq $0x0,%xmm12,%rsi
1059
1060# qhasm: mem64[ input_0 + 392 ] = buf
1061# asm 1: movq   <buf=int64#2,392(<input_0=int64#1)
1062# asm 2: movq   <buf=%rsi,392(<input_0=%rdi)
1063movq   %rsi,392(%rdi)
1064
1065# qhasm: buf = r7[0]
1066# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
1067# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
1068pextrq $0x0,%xmm6,%rsi
1069
1070# qhasm: mem64[ input_0 + 456 ] = buf
1071# asm 1: movq   <buf=int64#2,456(<input_0=int64#1)
1072# asm 2: movq   <buf=%rsi,456(<input_0=%rdi)
1073movq   %rsi,456(%rdi)
1074
1075# qhasm: r0 = mem64[ input_0 + 16 ] x2
1076# asm 1: movddup 16(<input_0=int64#1),>r0=reg128#7
1077# asm 2: movddup 16(<input_0=%rdi),>r0=%xmm6
1078movddup 16(%rdi),%xmm6
1079
1080# qhasm: r1 = mem64[ input_0 + 80 ] x2
1081# asm 1: movddup 80(<input_0=int64#1),>r1=reg128#8
1082# asm 2: movddup 80(<input_0=%rdi),>r1=%xmm7
1083movddup 80(%rdi),%xmm7
1084
1085# qhasm: r2 = mem64[ input_0 + 144 ] x2
1086# asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9
1087# asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8
1088movddup 144(%rdi),%xmm8
1089
1090# qhasm: r3 = mem64[ input_0 + 208 ] x2
1091# asm 1: movddup 208(<input_0=int64#1),>r3=reg128#10
1092# asm 2: movddup 208(<input_0=%rdi),>r3=%xmm9
1093movddup 208(%rdi),%xmm9
1094
1095# qhasm: r4 = mem64[ input_0 + 272 ] x2
1096# asm 1: movddup 272(<input_0=int64#1),>r4=reg128#11
1097# asm 2: movddup 272(<input_0=%rdi),>r4=%xmm10
1098movddup 272(%rdi),%xmm10
1099
1100# qhasm: r5 = mem64[ input_0 + 336 ] x2
1101# asm 1: movddup 336(<input_0=int64#1),>r5=reg128#12
1102# asm 2: movddup 336(<input_0=%rdi),>r5=%xmm11
1103movddup 336(%rdi),%xmm11
1104
1105# qhasm: r6 = mem64[ input_0 + 400 ] x2
1106# asm 1: movddup 400(<input_0=int64#1),>r6=reg128#13
1107# asm 2: movddup 400(<input_0=%rdi),>r6=%xmm12
1108movddup 400(%rdi),%xmm12
1109
1110# qhasm: r7 = mem64[ input_0 + 464 ] x2
1111# asm 1: movddup 464(<input_0=int64#1),>r7=reg128#14
1112# asm 2: movddup 464(<input_0=%rdi),>r7=%xmm13
1113movddup 464(%rdi),%xmm13
1114
1115# qhasm: v00 = r0 & mask0
1116# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
1117# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
1118vpand %xmm0,%xmm6,%xmm14
1119
1120# qhasm: 2x v10 = r4 << 32
1121# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
1122# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
1123vpsllq $32,%xmm10,%xmm15
1124
1125# qhasm: 2x v01 = r0 unsigned>> 32
1126# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
1127# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
1128vpsrlq $32,%xmm6,%xmm6
1129
1130# qhasm: v11 = r4 & mask1
1131# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
1132# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
1133vpand %xmm1,%xmm10,%xmm10
1134
1135# qhasm: r0 = v00 | v10
1136# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
1137# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
1138vpor %xmm15,%xmm14,%xmm14
1139
1140# qhasm: r4 = v01 | v11
1141# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
1142# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
1143vpor %xmm10,%xmm6,%xmm6
1144
1145# qhasm: v00 = r1 & mask0
1146# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
1147# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
1148vpand %xmm0,%xmm7,%xmm10
1149
1150# qhasm: 2x v10 = r5 << 32
1151# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
1152# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
1153vpsllq $32,%xmm11,%xmm15
1154
1155# qhasm: 2x v01 = r1 unsigned>> 32
1156# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
1157# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
1158vpsrlq $32,%xmm7,%xmm7
1159
1160# qhasm: v11 = r5 & mask1
1161# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
1162# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
1163vpand %xmm1,%xmm11,%xmm11
1164
1165# qhasm: r1 = v00 | v10
1166# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
1167# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
1168vpor %xmm15,%xmm10,%xmm10
1169
1170# qhasm: r5 = v01 | v11
1171# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
1172# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
1173vpor %xmm11,%xmm7,%xmm7
1174
1175# qhasm: v00 = r2 & mask0
1176# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
1177# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
1178vpand %xmm0,%xmm8,%xmm11
1179
1180# qhasm: 2x v10 = r6 << 32
1181# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
1182# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
1183vpsllq $32,%xmm12,%xmm15
1184
1185# qhasm: 2x v01 = r2 unsigned>> 32
1186# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
1187# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
1188vpsrlq $32,%xmm8,%xmm8
1189
1190# qhasm: v11 = r6 & mask1
1191# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
1192# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
1193vpand %xmm1,%xmm12,%xmm12
1194
1195# qhasm: r2 = v00 | v10
1196# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
1197# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
1198vpor %xmm15,%xmm11,%xmm11
1199
1200# qhasm: r6 = v01 | v11
1201# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
1202# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
1203vpor %xmm12,%xmm8,%xmm8
1204
1205# qhasm: v00 = r3 & mask0
1206# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
1207# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
1208vpand %xmm0,%xmm9,%xmm12
1209
1210# qhasm: 2x v10 = r7 << 32
1211# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
1212# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
1213vpsllq $32,%xmm13,%xmm15
1214
1215# qhasm: 2x v01 = r3 unsigned>> 32
1216# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
1217# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
1218vpsrlq $32,%xmm9,%xmm9
1219
1220# qhasm: v11 = r7 & mask1
1221# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
1222# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
1223vpand %xmm1,%xmm13,%xmm13
1224
1225# qhasm: r3 = v00 | v10
1226# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
1227# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
1228vpor %xmm15,%xmm12,%xmm12
1229
1230# qhasm: r7 = v01 | v11
1231# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
1232# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
1233vpor %xmm13,%xmm9,%xmm9
1234
1235# qhasm: v00 = r0 & mask2
1236# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
1237# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
1238vpand %xmm2,%xmm14,%xmm13
1239
1240# qhasm: 4x v10 = r2 << 16
1241# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
1242# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
1243vpslld $16,%xmm11,%xmm15
1244
1245# qhasm: 4x v01 = r0 unsigned>> 16
1246# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
1247# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
1248vpsrld $16,%xmm14,%xmm14
1249
1250# qhasm: v11 = r2 & mask3
1251# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
1252# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
1253vpand %xmm3,%xmm11,%xmm11
1254
1255# qhasm: r0 = v00 | v10
1256# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
1257# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
1258vpor %xmm15,%xmm13,%xmm13
1259
1260# qhasm: r2 = v01 | v11
1261# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
1262# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
1263vpor %xmm11,%xmm14,%xmm11
1264
1265# qhasm: v00 = r1 & mask2
1266# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
1267# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
1268vpand %xmm2,%xmm10,%xmm14
1269
1270# qhasm: 4x v10 = r3 << 16
1271# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
1272# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
1273vpslld $16,%xmm12,%xmm15
1274
1275# qhasm: 4x v01 = r1 unsigned>> 16
1276# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
1277# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
1278vpsrld $16,%xmm10,%xmm10
1279
1280# qhasm: v11 = r3 & mask3
1281# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
1282# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
1283vpand %xmm3,%xmm12,%xmm12
1284
1285# qhasm: r1 = v00 | v10
1286# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
1287# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
1288vpor %xmm15,%xmm14,%xmm14
1289
1290# qhasm: r3 = v01 | v11
1291# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
1292# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
1293vpor %xmm12,%xmm10,%xmm10
1294
1295# qhasm: v00 = r4 & mask2
1296# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
1297# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
1298vpand %xmm2,%xmm6,%xmm12
1299
1300# qhasm: 4x v10 = r6 << 16
1301# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
1302# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
1303vpslld $16,%xmm8,%xmm15
1304
1305# qhasm: 4x v01 = r4 unsigned>> 16
1306# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
1307# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
1308vpsrld $16,%xmm6,%xmm6
1309
1310# qhasm: v11 = r6 & mask3
1311# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
1312# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
1313vpand %xmm3,%xmm8,%xmm8
1314
1315# qhasm: r4 = v00 | v10
1316# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
1317# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
1318vpor %xmm15,%xmm12,%xmm12
1319
1320# qhasm: r6 = v01 | v11
1321# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
1322# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
1323vpor %xmm8,%xmm6,%xmm6
1324
1325# qhasm: v00 = r5 & mask2
1326# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
1327# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
1328vpand %xmm2,%xmm7,%xmm8
1329
1330# qhasm: 4x v10 = r7 << 16
1331# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
1332# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
1333vpslld $16,%xmm9,%xmm15
1334
1335# qhasm: 4x v01 = r5 unsigned>> 16
1336# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
1337# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
1338vpsrld $16,%xmm7,%xmm7
1339
1340# qhasm: v11 = r7 & mask3
1341# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
1342# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
1343vpand %xmm3,%xmm9,%xmm9
1344
1345# qhasm: r5 = v00 | v10
1346# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
1347# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
1348vpor %xmm15,%xmm8,%xmm8
1349
1350# qhasm: r7 = v01 | v11
1351# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
1352# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
1353vpor %xmm9,%xmm7,%xmm7
1354
1355# qhasm: v00 = r0 & mask4
1356# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
1357# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
1358vpand %xmm4,%xmm13,%xmm9
1359
1360# qhasm: 8x v10 = r1 << 8
1361# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
1362# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
1363vpsllw $8,%xmm14,%xmm15
1364
1365# qhasm: 8x v01 = r0 unsigned>> 8
1366# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
1367# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
1368vpsrlw $8,%xmm13,%xmm13
1369
1370# qhasm: v11 = r1 & mask5
1371# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
1372# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
1373vpand %xmm5,%xmm14,%xmm14
1374
1375# qhasm: r0 = v00 | v10
1376# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
1377# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
1378vpor %xmm15,%xmm9,%xmm9
1379
1380# qhasm: r1 = v01 | v11
1381# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
1382# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
1383vpor %xmm14,%xmm13,%xmm13
1384
1385# qhasm: v00 = r2 & mask4
1386# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
1387# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
1388vpand %xmm4,%xmm11,%xmm14
1389
1390# qhasm: 8x v10 = r3 << 8
1391# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
1392# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
1393vpsllw $8,%xmm10,%xmm15
1394
1395# qhasm: 8x v01 = r2 unsigned>> 8
1396# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
1397# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
1398vpsrlw $8,%xmm11,%xmm11
1399
1400# qhasm: v11 = r3 & mask5
1401# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
1402# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
1403vpand %xmm5,%xmm10,%xmm10
1404
1405# qhasm: r2 = v00 | v10
1406# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
1407# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
1408vpor %xmm15,%xmm14,%xmm14
1409
1410# qhasm: r3 = v01 | v11
1411# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
1412# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
1413vpor %xmm10,%xmm11,%xmm10
1414
1415# qhasm: v00 = r4 & mask4
1416# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
1417# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
1418vpand %xmm4,%xmm12,%xmm11
1419
1420# qhasm: 8x v10 = r5 << 8
1421# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
1422# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
1423vpsllw $8,%xmm8,%xmm15
1424
1425# qhasm: 8x v01 = r4 unsigned>> 8
1426# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
1427# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
1428vpsrlw $8,%xmm12,%xmm12
1429
1430# qhasm: v11 = r5 & mask5
1431# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
1432# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
1433vpand %xmm5,%xmm8,%xmm8
1434
1435# qhasm: r4 = v00 | v10
1436# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
1437# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
1438vpor %xmm15,%xmm11,%xmm11
1439
1440# qhasm: r5 = v01 | v11
1441# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
1442# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
1443vpor %xmm8,%xmm12,%xmm8
1444
1445# qhasm: v00 = r6 & mask4
1446# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
1447# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
1448vpand %xmm4,%xmm6,%xmm12
1449
1450# qhasm: 8x v10 = r7 << 8
1451# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
1452# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
1453vpsllw $8,%xmm7,%xmm15
1454
1455# qhasm: 8x v01 = r6 unsigned>> 8
1456# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
1457# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
1458vpsrlw $8,%xmm6,%xmm6
1459
1460# qhasm: v11 = r7 & mask5
1461# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
1462# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
1463vpand %xmm5,%xmm7,%xmm7
1464
1465# qhasm: r6 = v00 | v10
1466# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
1467# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
1468vpor %xmm15,%xmm12,%xmm12
1469
1470# qhasm: r7 = v01 | v11
1471# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
1472# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
1473vpor %xmm7,%xmm6,%xmm6
1474
1475# qhasm: buf = r0[0]
1476# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
1477# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
1478pextrq $0x0,%xmm9,%rsi
1479
1480# qhasm: mem64[ input_0 + 16 ] = buf
1481# asm 1: movq   <buf=int64#2,16(<input_0=int64#1)
1482# asm 2: movq   <buf=%rsi,16(<input_0=%rdi)
1483movq   %rsi,16(%rdi)
1484
1485# qhasm: buf = r1[0]
1486# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
1487# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
1488pextrq $0x0,%xmm13,%rsi
1489
1490# qhasm: mem64[ input_0 + 80 ] = buf
1491# asm 1: movq   <buf=int64#2,80(<input_0=int64#1)
1492# asm 2: movq   <buf=%rsi,80(<input_0=%rdi)
1493movq   %rsi,80(%rdi)
1494
1495# qhasm: buf = r2[0]
1496# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
1497# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
1498pextrq $0x0,%xmm14,%rsi
1499
1500# qhasm: mem64[ input_0 + 144 ] = buf
1501# asm 1: movq   <buf=int64#2,144(<input_0=int64#1)
1502# asm 2: movq   <buf=%rsi,144(<input_0=%rdi)
1503movq   %rsi,144(%rdi)
1504
1505# qhasm: buf = r3[0]
1506# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
1507# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
1508pextrq $0x0,%xmm10,%rsi
1509
1510# qhasm: mem64[ input_0 + 208 ] = buf
1511# asm 1: movq   <buf=int64#2,208(<input_0=int64#1)
1512# asm 2: movq   <buf=%rsi,208(<input_0=%rdi)
1513movq   %rsi,208(%rdi)
1514
1515# qhasm: buf = r4[0]
1516# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
1517# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
1518pextrq $0x0,%xmm11,%rsi
1519
1520# qhasm: mem64[ input_0 + 272 ] = buf
1521# asm 1: movq   <buf=int64#2,272(<input_0=int64#1)
1522# asm 2: movq   <buf=%rsi,272(<input_0=%rdi)
1523movq   %rsi,272(%rdi)
1524
1525# qhasm: buf = r5[0]
1526# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
1527# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
1528pextrq $0x0,%xmm8,%rsi
1529
1530# qhasm: mem64[ input_0 + 336 ] = buf
1531# asm 1: movq   <buf=int64#2,336(<input_0=int64#1)
1532# asm 2: movq   <buf=%rsi,336(<input_0=%rdi)
1533movq   %rsi,336(%rdi)
1534
1535# qhasm: buf = r6[0]
1536# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
1537# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
1538pextrq $0x0,%xmm12,%rsi
1539
1540# qhasm: mem64[ input_0 + 400 ] = buf
1541# asm 1: movq   <buf=int64#2,400(<input_0=int64#1)
1542# asm 2: movq   <buf=%rsi,400(<input_0=%rdi)
1543movq   %rsi,400(%rdi)
1544
1545# qhasm: buf = r7[0]
1546# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
1547# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
1548pextrq $0x0,%xmm6,%rsi
1549
1550# qhasm: mem64[ input_0 + 464 ] = buf
1551# asm 1: movq   <buf=int64#2,464(<input_0=int64#1)
1552# asm 2: movq   <buf=%rsi,464(<input_0=%rdi)
1553movq   %rsi,464(%rdi)
1554
1555# qhasm: r0 = mem64[ input_0 + 24 ] x2
1556# asm 1: movddup 24(<input_0=int64#1),>r0=reg128#7
1557# asm 2: movddup 24(<input_0=%rdi),>r0=%xmm6
1558movddup 24(%rdi),%xmm6
1559
1560# qhasm: r1 = mem64[ input_0 + 88 ] x2
1561# asm 1: movddup 88(<input_0=int64#1),>r1=reg128#8
1562# asm 2: movddup 88(<input_0=%rdi),>r1=%xmm7
1563movddup 88(%rdi),%xmm7
1564
1565# qhasm: r2 = mem64[ input_0 + 152 ] x2
1566# asm 1: movddup 152(<input_0=int64#1),>r2=reg128#9
1567# asm 2: movddup 152(<input_0=%rdi),>r2=%xmm8
1568movddup 152(%rdi),%xmm8
1569
1570# qhasm: r3 = mem64[ input_0 + 216 ] x2
1571# asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10
1572# asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9
1573movddup 216(%rdi),%xmm9
1574
1575# qhasm: r4 = mem64[ input_0 + 280 ] x2
1576# asm 1: movddup 280(<input_0=int64#1),>r4=reg128#11
1577# asm 2: movddup 280(<input_0=%rdi),>r4=%xmm10
1578movddup 280(%rdi),%xmm10
1579
1580# qhasm: r5 = mem64[ input_0 + 344 ] x2
1581# asm 1: movddup 344(<input_0=int64#1),>r5=reg128#12
1582# asm 2: movddup 344(<input_0=%rdi),>r5=%xmm11
1583movddup 344(%rdi),%xmm11
1584
1585# qhasm: r6 = mem64[ input_0 + 408 ] x2
1586# asm 1: movddup 408(<input_0=int64#1),>r6=reg128#13
1587# asm 2: movddup 408(<input_0=%rdi),>r6=%xmm12
1588movddup 408(%rdi),%xmm12
1589
1590# qhasm: r7 = mem64[ input_0 + 472 ] x2
1591# asm 1: movddup 472(<input_0=int64#1),>r7=reg128#14
1592# asm 2: movddup 472(<input_0=%rdi),>r7=%xmm13
1593movddup 472(%rdi),%xmm13
1594
1595# qhasm: v00 = r0 & mask0
1596# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
1597# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
1598vpand %xmm0,%xmm6,%xmm14
1599
1600# qhasm: 2x v10 = r4 << 32
1601# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
1602# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
1603vpsllq $32,%xmm10,%xmm15
1604
1605# qhasm: 2x v01 = r0 unsigned>> 32
1606# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
1607# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
1608vpsrlq $32,%xmm6,%xmm6
1609
1610# qhasm: v11 = r4 & mask1
1611# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
1612# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
1613vpand %xmm1,%xmm10,%xmm10
1614
1615# qhasm: r0 = v00 | v10
1616# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
1617# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
1618vpor %xmm15,%xmm14,%xmm14
1619
1620# qhasm: r4 = v01 | v11
1621# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
1622# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
1623vpor %xmm10,%xmm6,%xmm6
1624
1625# qhasm: v00 = r1 & mask0
1626# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
1627# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
1628vpand %xmm0,%xmm7,%xmm10
1629
1630# qhasm: 2x v10 = r5 << 32
1631# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
1632# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
1633vpsllq $32,%xmm11,%xmm15
1634
1635# qhasm: 2x v01 = r1 unsigned>> 32
1636# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
1637# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
1638vpsrlq $32,%xmm7,%xmm7
1639
1640# qhasm: v11 = r5 & mask1
1641# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
1642# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
1643vpand %xmm1,%xmm11,%xmm11
1644
1645# qhasm: r1 = v00 | v10
1646# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
1647# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
1648vpor %xmm15,%xmm10,%xmm10
1649
1650# qhasm: r5 = v01 | v11
1651# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
1652# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
1653vpor %xmm11,%xmm7,%xmm7
1654
1655# qhasm: v00 = r2 & mask0
1656# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
1657# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
1658vpand %xmm0,%xmm8,%xmm11
1659
1660# qhasm: 2x v10 = r6 << 32
1661# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
1662# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
1663vpsllq $32,%xmm12,%xmm15
1664
1665# qhasm: 2x v01 = r2 unsigned>> 32
1666# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
1667# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
1668vpsrlq $32,%xmm8,%xmm8
1669
1670# qhasm: v11 = r6 & mask1
1671# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
1672# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
1673vpand %xmm1,%xmm12,%xmm12
1674
1675# qhasm: r2 = v00 | v10
1676# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
1677# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
1678vpor %xmm15,%xmm11,%xmm11
1679
1680# qhasm: r6 = v01 | v11
1681# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
1682# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
1683vpor %xmm12,%xmm8,%xmm8
1684
1685# qhasm: v00 = r3 & mask0
1686# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
1687# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
1688vpand %xmm0,%xmm9,%xmm12
1689
1690# qhasm: 2x v10 = r7 << 32
1691# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
1692# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
1693vpsllq $32,%xmm13,%xmm15
1694
1695# qhasm: 2x v01 = r3 unsigned>> 32
1696# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
1697# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
1698vpsrlq $32,%xmm9,%xmm9
1699
1700# qhasm: v11 = r7 & mask1
1701# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
1702# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
1703vpand %xmm1,%xmm13,%xmm13
1704
1705# qhasm: r3 = v00 | v10
1706# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
1707# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
1708vpor %xmm15,%xmm12,%xmm12
1709
1710# qhasm: r7 = v01 | v11
1711# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
1712# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
1713vpor %xmm13,%xmm9,%xmm9
1714
1715# qhasm: v00 = r0 & mask2
1716# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
1717# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
1718vpand %xmm2,%xmm14,%xmm13
1719
1720# qhasm: 4x v10 = r2 << 16
1721# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
1722# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
1723vpslld $16,%xmm11,%xmm15
1724
1725# qhasm: 4x v01 = r0 unsigned>> 16
1726# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
1727# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
1728vpsrld $16,%xmm14,%xmm14
1729
1730# qhasm: v11 = r2 & mask3
1731# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
1732# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
1733vpand %xmm3,%xmm11,%xmm11
1734
1735# qhasm: r0 = v00 | v10
1736# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
1737# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
1738vpor %xmm15,%xmm13,%xmm13
1739
1740# qhasm: r2 = v01 | v11
1741# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
1742# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
1743vpor %xmm11,%xmm14,%xmm11
1744
1745# qhasm: v00 = r1 & mask2
1746# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
1747# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
1748vpand %xmm2,%xmm10,%xmm14
1749
1750# qhasm: 4x v10 = r3 << 16
1751# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
1752# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
1753vpslld $16,%xmm12,%xmm15
1754
1755# qhasm: 4x v01 = r1 unsigned>> 16
1756# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
1757# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
1758vpsrld $16,%xmm10,%xmm10
1759
1760# qhasm: v11 = r3 & mask3
1761# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
1762# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
1763vpand %xmm3,%xmm12,%xmm12
1764
1765# qhasm: r1 = v00 | v10
1766# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
1767# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
1768vpor %xmm15,%xmm14,%xmm14
1769
1770# qhasm: r3 = v01 | v11
1771# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
1772# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
1773vpor %xmm12,%xmm10,%xmm10
1774
1775# qhasm: v00 = r4 & mask2
1776# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
1777# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
1778vpand %xmm2,%xmm6,%xmm12
1779
1780# qhasm: 4x v10 = r6 << 16
1781# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
1782# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
1783vpslld $16,%xmm8,%xmm15
1784
1785# qhasm: 4x v01 = r4 unsigned>> 16
1786# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
1787# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
1788vpsrld $16,%xmm6,%xmm6
1789
1790# qhasm: v11 = r6 & mask3
1791# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
1792# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
1793vpand %xmm3,%xmm8,%xmm8
1794
1795# qhasm: r4 = v00 | v10
1796# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
1797# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
1798vpor %xmm15,%xmm12,%xmm12
1799
1800# qhasm: r6 = v01 | v11
1801# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
1802# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
1803vpor %xmm8,%xmm6,%xmm6
1804
1805# qhasm: v00 = r5 & mask2
1806# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
1807# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
1808vpand %xmm2,%xmm7,%xmm8
1809
1810# qhasm: 4x v10 = r7 << 16
1811# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
1812# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
1813vpslld $16,%xmm9,%xmm15
1814
1815# qhasm: 4x v01 = r5 unsigned>> 16
1816# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
1817# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
1818vpsrld $16,%xmm7,%xmm7
1819
1820# qhasm: v11 = r7 & mask3
1821# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
1822# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
1823vpand %xmm3,%xmm9,%xmm9
1824
1825# qhasm: r5 = v00 | v10
1826# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
1827# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
1828vpor %xmm15,%xmm8,%xmm8
1829
1830# qhasm: r7 = v01 | v11
1831# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
1832# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
1833vpor %xmm9,%xmm7,%xmm7
1834
1835# qhasm: v00 = r0 & mask4
1836# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
1837# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
1838vpand %xmm4,%xmm13,%xmm9
1839
1840# qhasm: 8x v10 = r1 << 8
1841# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
1842# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
1843vpsllw $8,%xmm14,%xmm15
1844
1845# qhasm: 8x v01 = r0 unsigned>> 8
1846# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
1847# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
1848vpsrlw $8,%xmm13,%xmm13
1849
1850# qhasm: v11 = r1 & mask5
1851# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
1852# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
1853vpand %xmm5,%xmm14,%xmm14
1854
1855# qhasm: r0 = v00 | v10
1856# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
1857# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
1858vpor %xmm15,%xmm9,%xmm9
1859
1860# qhasm: r1 = v01 | v11
1861# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
1862# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
1863vpor %xmm14,%xmm13,%xmm13
1864
1865# qhasm: v00 = r2 & mask4
1866# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
1867# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
1868vpand %xmm4,%xmm11,%xmm14
1869
1870# qhasm: 8x v10 = r3 << 8
1871# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
1872# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
1873vpsllw $8,%xmm10,%xmm15
1874
1875# qhasm: 8x v01 = r2 unsigned>> 8
1876# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
1877# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
1878vpsrlw $8,%xmm11,%xmm11
1879
1880# qhasm: v11 = r3 & mask5
1881# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
1882# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
1883vpand %xmm5,%xmm10,%xmm10
1884
1885# qhasm: r2 = v00 | v10
1886# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
1887# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
1888vpor %xmm15,%xmm14,%xmm14
1889
1890# qhasm: r3 = v01 | v11
1891# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
1892# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
1893vpor %xmm10,%xmm11,%xmm10
1894
1895# qhasm: v00 = r4 & mask4
1896# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
1897# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
1898vpand %xmm4,%xmm12,%xmm11
1899
1900# qhasm: 8x v10 = r5 << 8
1901# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
1902# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
1903vpsllw $8,%xmm8,%xmm15
1904
1905# qhasm: 8x v01 = r4 unsigned>> 8
1906# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
1907# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
1908vpsrlw $8,%xmm12,%xmm12
1909
1910# qhasm: v11 = r5 & mask5
1911# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
1912# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
1913vpand %xmm5,%xmm8,%xmm8
1914
1915# qhasm: r4 = v00 | v10
1916# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
1917# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
1918vpor %xmm15,%xmm11,%xmm11
1919
1920# qhasm: r5 = v01 | v11
1921# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
1922# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
1923vpor %xmm8,%xmm12,%xmm8
1924
1925# qhasm: v00 = r6 & mask4
1926# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
1927# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
1928vpand %xmm4,%xmm6,%xmm12
1929
1930# qhasm: 8x v10 = r7 << 8
1931# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
1932# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
1933vpsllw $8,%xmm7,%xmm15
1934
1935# qhasm: 8x v01 = r6 unsigned>> 8
1936# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
1937# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
1938vpsrlw $8,%xmm6,%xmm6
1939
1940# qhasm: v11 = r7 & mask5
1941# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
1942# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
1943vpand %xmm5,%xmm7,%xmm7
1944
1945# qhasm: r6 = v00 | v10
1946# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
1947# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
1948vpor %xmm15,%xmm12,%xmm12
1949
1950# qhasm: r7 = v01 | v11
1951# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
1952# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
1953vpor %xmm7,%xmm6,%xmm6
1954
1955# qhasm: buf = r0[0]
1956# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
1957# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
1958pextrq $0x0,%xmm9,%rsi
1959
1960# qhasm: mem64[ input_0 + 24 ] = buf
1961# asm 1: movq   <buf=int64#2,24(<input_0=int64#1)
1962# asm 2: movq   <buf=%rsi,24(<input_0=%rdi)
1963movq   %rsi,24(%rdi)
1964
1965# qhasm: buf = r1[0]
1966# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
1967# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
1968pextrq $0x0,%xmm13,%rsi
1969
1970# qhasm: mem64[ input_0 + 88 ] = buf
1971# asm 1: movq   <buf=int64#2,88(<input_0=int64#1)
1972# asm 2: movq   <buf=%rsi,88(<input_0=%rdi)
1973movq   %rsi,88(%rdi)
1974
1975# qhasm: buf = r2[0]
1976# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
1977# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
1978pextrq $0x0,%xmm14,%rsi
1979
1980# qhasm: mem64[ input_0 + 152 ] = buf
1981# asm 1: movq   <buf=int64#2,152(<input_0=int64#1)
1982# asm 2: movq   <buf=%rsi,152(<input_0=%rdi)
1983movq   %rsi,152(%rdi)
1984
1985# qhasm: buf = r3[0]
1986# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
1987# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
1988pextrq $0x0,%xmm10,%rsi
1989
1990# qhasm: mem64[ input_0 + 216 ] = buf
1991# asm 1: movq   <buf=int64#2,216(<input_0=int64#1)
1992# asm 2: movq   <buf=%rsi,216(<input_0=%rdi)
1993movq   %rsi,216(%rdi)
1994
1995# qhasm: buf = r4[0]
1996# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
1997# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
1998pextrq $0x0,%xmm11,%rsi
1999
2000# qhasm: mem64[ input_0 + 280 ] = buf
2001# asm 1: movq   <buf=int64#2,280(<input_0=int64#1)
2002# asm 2: movq   <buf=%rsi,280(<input_0=%rdi)
2003movq   %rsi,280(%rdi)
2004
2005# qhasm: buf = r5[0]
2006# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
2007# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
2008pextrq $0x0,%xmm8,%rsi
2009
2010# qhasm: mem64[ input_0 + 344 ] = buf
2011# asm 1: movq   <buf=int64#2,344(<input_0=int64#1)
2012# asm 2: movq   <buf=%rsi,344(<input_0=%rdi)
2013movq   %rsi,344(%rdi)
2014
2015# qhasm: buf = r6[0]
2016# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
2017# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
2018pextrq $0x0,%xmm12,%rsi
2019
2020# qhasm: mem64[ input_0 + 408 ] = buf
2021# asm 1: movq   <buf=int64#2,408(<input_0=int64#1)
2022# asm 2: movq   <buf=%rsi,408(<input_0=%rdi)
2023movq   %rsi,408(%rdi)
2024
2025# qhasm: buf = r7[0]
2026# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
2027# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
2028pextrq $0x0,%xmm6,%rsi
2029
2030# qhasm: mem64[ input_0 + 472 ] = buf
2031# asm 1: movq   <buf=int64#2,472(<input_0=int64#1)
2032# asm 2: movq   <buf=%rsi,472(<input_0=%rdi)
2033movq   %rsi,472(%rdi)
2034
2035# qhasm: r0 = mem64[ input_0 + 32 ] x2
2036# asm 1: movddup 32(<input_0=int64#1),>r0=reg128#7
2037# asm 2: movddup 32(<input_0=%rdi),>r0=%xmm6
2038movddup 32(%rdi),%xmm6
2039
2040# qhasm: r1 = mem64[ input_0 + 96 ] x2
2041# asm 1: movddup 96(<input_0=int64#1),>r1=reg128#8
2042# asm 2: movddup 96(<input_0=%rdi),>r1=%xmm7
2043movddup 96(%rdi),%xmm7
2044
2045# qhasm: r2 = mem64[ input_0 + 160 ] x2
2046# asm 1: movddup 160(<input_0=int64#1),>r2=reg128#9
2047# asm 2: movddup 160(<input_0=%rdi),>r2=%xmm8
2048movddup 160(%rdi),%xmm8
2049
2050# qhasm: r3 = mem64[ input_0 + 224 ] x2
2051# asm 1: movddup 224(<input_0=int64#1),>r3=reg128#10
2052# asm 2: movddup 224(<input_0=%rdi),>r3=%xmm9
2053movddup 224(%rdi),%xmm9
2054
2055# qhasm: r4 = mem64[ input_0 + 288 ] x2
2056# asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11
2057# asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10
2058movddup 288(%rdi),%xmm10
2059
2060# qhasm: r5 = mem64[ input_0 + 352 ] x2
2061# asm 1: movddup 352(<input_0=int64#1),>r5=reg128#12
2062# asm 2: movddup 352(<input_0=%rdi),>r5=%xmm11
2063movddup 352(%rdi),%xmm11
2064
2065# qhasm: r6 = mem64[ input_0 + 416 ] x2
2066# asm 1: movddup 416(<input_0=int64#1),>r6=reg128#13
2067# asm 2: movddup 416(<input_0=%rdi),>r6=%xmm12
2068movddup 416(%rdi),%xmm12
2069
2070# qhasm: r7 = mem64[ input_0 + 480 ] x2
2071# asm 1: movddup 480(<input_0=int64#1),>r7=reg128#14
2072# asm 2: movddup 480(<input_0=%rdi),>r7=%xmm13
2073movddup 480(%rdi),%xmm13
2074
2075# qhasm: v00 = r0 & mask0
2076# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
2077# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
2078vpand %xmm0,%xmm6,%xmm14
2079
2080# qhasm: 2x v10 = r4 << 32
2081# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
2082# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
2083vpsllq $32,%xmm10,%xmm15
2084
2085# qhasm: 2x v01 = r0 unsigned>> 32
2086# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
2087# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
2088vpsrlq $32,%xmm6,%xmm6
2089
2090# qhasm: v11 = r4 & mask1
2091# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
2092# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
2093vpand %xmm1,%xmm10,%xmm10
2094
2095# qhasm: r0 = v00 | v10
2096# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
2097# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
2098vpor %xmm15,%xmm14,%xmm14
2099
2100# qhasm: r4 = v01 | v11
2101# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
2102# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
2103vpor %xmm10,%xmm6,%xmm6
2104
2105# qhasm: v00 = r1 & mask0
2106# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
2107# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
2108vpand %xmm0,%xmm7,%xmm10
2109
2110# qhasm: 2x v10 = r5 << 32
2111# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
2112# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
2113vpsllq $32,%xmm11,%xmm15
2114
2115# qhasm: 2x v01 = r1 unsigned>> 32
2116# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
2117# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
2118vpsrlq $32,%xmm7,%xmm7
2119
2120# qhasm: v11 = r5 & mask1
2121# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
2122# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
2123vpand %xmm1,%xmm11,%xmm11
2124
2125# qhasm: r1 = v00 | v10
2126# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
2127# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
2128vpor %xmm15,%xmm10,%xmm10
2129
2130# qhasm: r5 = v01 | v11
2131# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
2132# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
2133vpor %xmm11,%xmm7,%xmm7
2134
2135# qhasm: v00 = r2 & mask0
2136# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
2137# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
2138vpand %xmm0,%xmm8,%xmm11
2139
2140# qhasm: 2x v10 = r6 << 32
2141# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
2142# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
2143vpsllq $32,%xmm12,%xmm15
2144
2145# qhasm: 2x v01 = r2 unsigned>> 32
2146# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
2147# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
2148vpsrlq $32,%xmm8,%xmm8
2149
2150# qhasm: v11 = r6 & mask1
2151# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
2152# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
2153vpand %xmm1,%xmm12,%xmm12
2154
2155# qhasm: r2 = v00 | v10
2156# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
2157# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
2158vpor %xmm15,%xmm11,%xmm11
2159
2160# qhasm: r6 = v01 | v11
2161# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
2162# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
2163vpor %xmm12,%xmm8,%xmm8
2164
2165# qhasm: v00 = r3 & mask0
2166# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
2167# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
2168vpand %xmm0,%xmm9,%xmm12
2169
2170# qhasm: 2x v10 = r7 << 32
2171# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
2172# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
2173vpsllq $32,%xmm13,%xmm15
2174
2175# qhasm: 2x v01 = r3 unsigned>> 32
2176# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
2177# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
2178vpsrlq $32,%xmm9,%xmm9
2179
2180# qhasm: v11 = r7 & mask1
2181# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
2182# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
2183vpand %xmm1,%xmm13,%xmm13
2184
2185# qhasm: r3 = v00 | v10
2186# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
2187# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
2188vpor %xmm15,%xmm12,%xmm12
2189
2190# qhasm: r7 = v01 | v11
2191# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
2192# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
2193vpor %xmm13,%xmm9,%xmm9
2194
2195# qhasm: v00 = r0 & mask2
2196# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
2197# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
2198vpand %xmm2,%xmm14,%xmm13
2199
2200# qhasm: 4x v10 = r2 << 16
2201# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
2202# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
2203vpslld $16,%xmm11,%xmm15
2204
2205# qhasm: 4x v01 = r0 unsigned>> 16
2206# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
2207# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
2208vpsrld $16,%xmm14,%xmm14
2209
2210# qhasm: v11 = r2 & mask3
2211# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
2212# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
2213vpand %xmm3,%xmm11,%xmm11
2214
2215# qhasm: r0 = v00 | v10
2216# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
2217# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
2218vpor %xmm15,%xmm13,%xmm13
2219
2220# qhasm: r2 = v01 | v11
2221# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
2222# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
2223vpor %xmm11,%xmm14,%xmm11
2224
2225# qhasm: v00 = r1 & mask2
2226# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
2227# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
2228vpand %xmm2,%xmm10,%xmm14
2229
2230# qhasm: 4x v10 = r3 << 16
2231# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
2232# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
2233vpslld $16,%xmm12,%xmm15
2234
2235# qhasm: 4x v01 = r1 unsigned>> 16
2236# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
2237# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
2238vpsrld $16,%xmm10,%xmm10
2239
2240# qhasm: v11 = r3 & mask3
2241# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
2242# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
2243vpand %xmm3,%xmm12,%xmm12
2244
2245# qhasm: r1 = v00 | v10
2246# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
2247# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
2248vpor %xmm15,%xmm14,%xmm14
2249
2250# qhasm: r3 = v01 | v11
2251# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
2252# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
2253vpor %xmm12,%xmm10,%xmm10
2254
2255# qhasm: v00 = r4 & mask2
2256# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
2257# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
2258vpand %xmm2,%xmm6,%xmm12
2259
2260# qhasm: 4x v10 = r6 << 16
2261# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
2262# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
2263vpslld $16,%xmm8,%xmm15
2264
2265# qhasm: 4x v01 = r4 unsigned>> 16
2266# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
2267# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
2268vpsrld $16,%xmm6,%xmm6
2269
2270# qhasm: v11 = r6 & mask3
2271# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
2272# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
2273vpand %xmm3,%xmm8,%xmm8
2274
2275# qhasm: r4 = v00 | v10
2276# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
2277# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
2278vpor %xmm15,%xmm12,%xmm12
2279
2280# qhasm: r6 = v01 | v11
2281# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
2282# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
2283vpor %xmm8,%xmm6,%xmm6
2284
2285# qhasm: v00 = r5 & mask2
2286# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
2287# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
2288vpand %xmm2,%xmm7,%xmm8
2289
2290# qhasm: 4x v10 = r7 << 16
2291# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
2292# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
2293vpslld $16,%xmm9,%xmm15
2294
2295# qhasm: 4x v01 = r5 unsigned>> 16
2296# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
2297# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
2298vpsrld $16,%xmm7,%xmm7
2299
2300# qhasm: v11 = r7 & mask3
2301# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
2302# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
2303vpand %xmm3,%xmm9,%xmm9
2304
2305# qhasm: r5 = v00 | v10
2306# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
2307# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
2308vpor %xmm15,%xmm8,%xmm8
2309
2310# qhasm: r7 = v01 | v11
2311# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
2312# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
2313vpor %xmm9,%xmm7,%xmm7
2314
2315# qhasm: v00 = r0 & mask4
2316# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
2317# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
2318vpand %xmm4,%xmm13,%xmm9
2319
2320# qhasm: 8x v10 = r1 << 8
2321# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
2322# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
2323vpsllw $8,%xmm14,%xmm15
2324
2325# qhasm: 8x v01 = r0 unsigned>> 8
2326# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
2327# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
2328vpsrlw $8,%xmm13,%xmm13
2329
2330# qhasm: v11 = r1 & mask5
2331# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
2332# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
2333vpand %xmm5,%xmm14,%xmm14
2334
2335# qhasm: r0 = v00 | v10
2336# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
2337# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
2338vpor %xmm15,%xmm9,%xmm9
2339
2340# qhasm: r1 = v01 | v11
2341# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
2342# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
2343vpor %xmm14,%xmm13,%xmm13
2344
2345# qhasm: v00 = r2 & mask4
2346# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
2347# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
2348vpand %xmm4,%xmm11,%xmm14
2349
2350# qhasm: 8x v10 = r3 << 8
2351# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
2352# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
2353vpsllw $8,%xmm10,%xmm15
2354
2355# qhasm: 8x v01 = r2 unsigned>> 8
2356# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
2357# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
2358vpsrlw $8,%xmm11,%xmm11
2359
2360# qhasm: v11 = r3 & mask5
2361# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
2362# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
2363vpand %xmm5,%xmm10,%xmm10
2364
2365# qhasm: r2 = v00 | v10
2366# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
2367# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
2368vpor %xmm15,%xmm14,%xmm14
2369
2370# qhasm: r3 = v01 | v11
2371# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
2372# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
2373vpor %xmm10,%xmm11,%xmm10
2374
2375# qhasm: v00 = r4 & mask4
2376# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
2377# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
2378vpand %xmm4,%xmm12,%xmm11
2379
2380# qhasm: 8x v10 = r5 << 8
2381# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
2382# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
2383vpsllw $8,%xmm8,%xmm15
2384
2385# qhasm: 8x v01 = r4 unsigned>> 8
2386# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
2387# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
2388vpsrlw $8,%xmm12,%xmm12
2389
2390# qhasm: v11 = r5 & mask5
2391# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
2392# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
2393vpand %xmm5,%xmm8,%xmm8
2394
2395# qhasm: r4 = v00 | v10
2396# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
2397# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
2398vpor %xmm15,%xmm11,%xmm11
2399
2400# qhasm: r5 = v01 | v11
2401# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
2402# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
2403vpor %xmm8,%xmm12,%xmm8
2404
2405# qhasm: v00 = r6 & mask4
2406# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
2407# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
2408vpand %xmm4,%xmm6,%xmm12
2409
2410# qhasm: 8x v10 = r7 << 8
2411# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
2412# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
2413vpsllw $8,%xmm7,%xmm15
2414
2415# qhasm: 8x v01 = r6 unsigned>> 8
2416# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
2417# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
2418vpsrlw $8,%xmm6,%xmm6
2419
2420# qhasm: v11 = r7 & mask5
2421# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
2422# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
2423vpand %xmm5,%xmm7,%xmm7
2424
2425# qhasm: r6 = v00 | v10
2426# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
2427# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
2428vpor %xmm15,%xmm12,%xmm12
2429
2430# qhasm: r7 = v01 | v11
2431# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
2432# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
2433vpor %xmm7,%xmm6,%xmm6
2434
2435# qhasm: buf = r0[0]
2436# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
2437# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
2438pextrq $0x0,%xmm9,%rsi
2439
2440# qhasm: mem64[ input_0 + 32 ] = buf
2441# asm 1: movq   <buf=int64#2,32(<input_0=int64#1)
2442# asm 2: movq   <buf=%rsi,32(<input_0=%rdi)
2443movq   %rsi,32(%rdi)
2444
2445# qhasm: buf = r1[0]
2446# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
2447# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
2448pextrq $0x0,%xmm13,%rsi
2449
2450# qhasm: mem64[ input_0 + 96 ] = buf
2451# asm 1: movq   <buf=int64#2,96(<input_0=int64#1)
2452# asm 2: movq   <buf=%rsi,96(<input_0=%rdi)
2453movq   %rsi,96(%rdi)
2454
2455# qhasm: buf = r2[0]
2456# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
2457# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
2458pextrq $0x0,%xmm14,%rsi
2459
2460# qhasm: mem64[ input_0 + 160 ] = buf
2461# asm 1: movq   <buf=int64#2,160(<input_0=int64#1)
2462# asm 2: movq   <buf=%rsi,160(<input_0=%rdi)
2463movq   %rsi,160(%rdi)
2464
2465# qhasm: buf = r3[0]
2466# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
2467# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
2468pextrq $0x0,%xmm10,%rsi
2469
2470# qhasm: mem64[ input_0 + 224 ] = buf
2471# asm 1: movq   <buf=int64#2,224(<input_0=int64#1)
2472# asm 2: movq   <buf=%rsi,224(<input_0=%rdi)
2473movq   %rsi,224(%rdi)
2474
2475# qhasm: buf = r4[0]
2476# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
2477# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
2478pextrq $0x0,%xmm11,%rsi
2479
2480# qhasm: mem64[ input_0 + 288 ] = buf
2481# asm 1: movq   <buf=int64#2,288(<input_0=int64#1)
2482# asm 2: movq   <buf=%rsi,288(<input_0=%rdi)
2483movq   %rsi,288(%rdi)
2484
2485# qhasm: buf = r5[0]
2486# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
2487# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
2488pextrq $0x0,%xmm8,%rsi
2489
2490# qhasm: mem64[ input_0 + 352 ] = buf
2491# asm 1: movq   <buf=int64#2,352(<input_0=int64#1)
2492# asm 2: movq   <buf=%rsi,352(<input_0=%rdi)
2493movq   %rsi,352(%rdi)
2494
2495# qhasm: buf = r6[0]
2496# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
2497# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
2498pextrq $0x0,%xmm12,%rsi
2499
2500# qhasm: mem64[ input_0 + 416 ] = buf
2501# asm 1: movq   <buf=int64#2,416(<input_0=int64#1)
2502# asm 2: movq   <buf=%rsi,416(<input_0=%rdi)
2503movq   %rsi,416(%rdi)
2504
2505# qhasm: buf = r7[0]
2506# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
2507# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
2508pextrq $0x0,%xmm6,%rsi
2509
2510# qhasm: mem64[ input_0 + 480 ] = buf
2511# asm 1: movq   <buf=int64#2,480(<input_0=int64#1)
2512# asm 2: movq   <buf=%rsi,480(<input_0=%rdi)
2513movq   %rsi,480(%rdi)
2514
2515# qhasm: r0 = mem64[ input_0 + 40 ] x2
2516# asm 1: movddup 40(<input_0=int64#1),>r0=reg128#7
2517# asm 2: movddup 40(<input_0=%rdi),>r0=%xmm6
2518movddup 40(%rdi),%xmm6
2519
2520# qhasm: r1 = mem64[ input_0 + 104 ] x2
2521# asm 1: movddup 104(<input_0=int64#1),>r1=reg128#8
2522# asm 2: movddup 104(<input_0=%rdi),>r1=%xmm7
2523movddup 104(%rdi),%xmm7
2524
2525# qhasm: r2 = mem64[ input_0 + 168 ] x2
2526# asm 1: movddup 168(<input_0=int64#1),>r2=reg128#9
2527# asm 2: movddup 168(<input_0=%rdi),>r2=%xmm8
2528movddup 168(%rdi),%xmm8
2529
2530# qhasm: r3 = mem64[ input_0 + 232 ] x2
2531# asm 1: movddup 232(<input_0=int64#1),>r3=reg128#10
2532# asm 2: movddup 232(<input_0=%rdi),>r3=%xmm9
2533movddup 232(%rdi),%xmm9
2534
2535# qhasm: r4 = mem64[ input_0 + 296 ] x2
2536# asm 1: movddup 296(<input_0=int64#1),>r4=reg128#11
2537# asm 2: movddup 296(<input_0=%rdi),>r4=%xmm10
2538movddup 296(%rdi),%xmm10
2539
2540# qhasm: r5 = mem64[ input_0 + 360 ] x2
2541# asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12
2542# asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11
2543movddup 360(%rdi),%xmm11
2544
2545# qhasm: r6 = mem64[ input_0 + 424 ] x2
2546# asm 1: movddup 424(<input_0=int64#1),>r6=reg128#13
2547# asm 2: movddup 424(<input_0=%rdi),>r6=%xmm12
2548movddup 424(%rdi),%xmm12
2549
2550# qhasm: r7 = mem64[ input_0 + 488 ] x2
2551# asm 1: movddup 488(<input_0=int64#1),>r7=reg128#14
2552# asm 2: movddup 488(<input_0=%rdi),>r7=%xmm13
2553movddup 488(%rdi),%xmm13
2554
2555# qhasm: v00 = r0 & mask0
2556# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
2557# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
2558vpand %xmm0,%xmm6,%xmm14
2559
2560# qhasm: 2x v10 = r4 << 32
2561# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
2562# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
2563vpsllq $32,%xmm10,%xmm15
2564
2565# qhasm: 2x v01 = r0 unsigned>> 32
2566# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
2567# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
2568vpsrlq $32,%xmm6,%xmm6
2569
2570# qhasm: v11 = r4 & mask1
2571# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
2572# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
2573vpand %xmm1,%xmm10,%xmm10
2574
2575# qhasm: r0 = v00 | v10
2576# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
2577# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
2578vpor %xmm15,%xmm14,%xmm14
2579
2580# qhasm: r4 = v01 | v11
2581# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
2582# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
2583vpor %xmm10,%xmm6,%xmm6
2584
2585# qhasm: v00 = r1 & mask0
2586# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
2587# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
2588vpand %xmm0,%xmm7,%xmm10
2589
2590# qhasm: 2x v10 = r5 << 32
2591# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
2592# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
2593vpsllq $32,%xmm11,%xmm15
2594
2595# qhasm: 2x v01 = r1 unsigned>> 32
2596# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
2597# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
2598vpsrlq $32,%xmm7,%xmm7
2599
2600# qhasm: v11 = r5 & mask1
2601# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
2602# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
2603vpand %xmm1,%xmm11,%xmm11
2604
2605# qhasm: r1 = v00 | v10
2606# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
2607# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
2608vpor %xmm15,%xmm10,%xmm10
2609
2610# qhasm: r5 = v01 | v11
2611# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
2612# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
2613vpor %xmm11,%xmm7,%xmm7
2614
2615# qhasm: v00 = r2 & mask0
2616# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
2617# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
2618vpand %xmm0,%xmm8,%xmm11
2619
2620# qhasm: 2x v10 = r6 << 32
2621# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
2622# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
2623vpsllq $32,%xmm12,%xmm15
2624
2625# qhasm: 2x v01 = r2 unsigned>> 32
2626# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
2627# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
2628vpsrlq $32,%xmm8,%xmm8
2629
2630# qhasm: v11 = r6 & mask1
2631# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
2632# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
2633vpand %xmm1,%xmm12,%xmm12
2634
2635# qhasm: r2 = v00 | v10
2636# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
2637# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
2638vpor %xmm15,%xmm11,%xmm11
2639
2640# qhasm: r6 = v01 | v11
2641# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
2642# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
2643vpor %xmm12,%xmm8,%xmm8
2644
2645# qhasm: v00 = r3 & mask0
2646# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
2647# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
2648vpand %xmm0,%xmm9,%xmm12
2649
2650# qhasm: 2x v10 = r7 << 32
2651# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
2652# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
2653vpsllq $32,%xmm13,%xmm15
2654
2655# qhasm: 2x v01 = r3 unsigned>> 32
2656# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
2657# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
2658vpsrlq $32,%xmm9,%xmm9
2659
2660# qhasm: v11 = r7 & mask1
2661# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
2662# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
2663vpand %xmm1,%xmm13,%xmm13
2664
2665# qhasm: r3 = v00 | v10
2666# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
2667# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
2668vpor %xmm15,%xmm12,%xmm12
2669
2670# qhasm: r7 = v01 | v11
2671# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
2672# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
2673vpor %xmm13,%xmm9,%xmm9
2674
2675# qhasm: v00 = r0 & mask2
2676# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
2677# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
2678vpand %xmm2,%xmm14,%xmm13
2679
2680# qhasm: 4x v10 = r2 << 16
2681# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
2682# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
2683vpslld $16,%xmm11,%xmm15
2684
2685# qhasm: 4x v01 = r0 unsigned>> 16
2686# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
2687# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
2688vpsrld $16,%xmm14,%xmm14
2689
2690# qhasm: v11 = r2 & mask3
2691# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
2692# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
2693vpand %xmm3,%xmm11,%xmm11
2694
2695# qhasm: r0 = v00 | v10
2696# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
2697# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
2698vpor %xmm15,%xmm13,%xmm13
2699
2700# qhasm: r2 = v01 | v11
2701# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
2702# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
2703vpor %xmm11,%xmm14,%xmm11
2704
2705# qhasm: v00 = r1 & mask2
2706# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
2707# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
2708vpand %xmm2,%xmm10,%xmm14
2709
2710# qhasm: 4x v10 = r3 << 16
2711# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
2712# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
2713vpslld $16,%xmm12,%xmm15
2714
2715# qhasm: 4x v01 = r1 unsigned>> 16
2716# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
2717# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
2718vpsrld $16,%xmm10,%xmm10
2719
2720# qhasm: v11 = r3 & mask3
2721# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
2722# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
2723vpand %xmm3,%xmm12,%xmm12
2724
2725# qhasm: r1 = v00 | v10
2726# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
2727# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
2728vpor %xmm15,%xmm14,%xmm14
2729
2730# qhasm: r3 = v01 | v11
2731# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
2732# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
2733vpor %xmm12,%xmm10,%xmm10
2734
2735# qhasm: v00 = r4 & mask2
2736# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
2737# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
2738vpand %xmm2,%xmm6,%xmm12
2739
2740# qhasm: 4x v10 = r6 << 16
2741# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
2742# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
2743vpslld $16,%xmm8,%xmm15
2744
2745# qhasm: 4x v01 = r4 unsigned>> 16
2746# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
2747# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
2748vpsrld $16,%xmm6,%xmm6
2749
2750# qhasm: v11 = r6 & mask3
2751# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
2752# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
2753vpand %xmm3,%xmm8,%xmm8
2754
2755# qhasm: r4 = v00 | v10
2756# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
2757# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
2758vpor %xmm15,%xmm12,%xmm12
2759
2760# qhasm: r6 = v01 | v11
2761# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
2762# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
2763vpor %xmm8,%xmm6,%xmm6
2764
2765# qhasm: v00 = r5 & mask2
2766# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
2767# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
2768vpand %xmm2,%xmm7,%xmm8
2769
2770# qhasm: 4x v10 = r7 << 16
2771# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
2772# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
2773vpslld $16,%xmm9,%xmm15
2774
2775# qhasm: 4x v01 = r5 unsigned>> 16
2776# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
2777# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
2778vpsrld $16,%xmm7,%xmm7
2779
2780# qhasm: v11 = r7 & mask3
2781# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
2782# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
2783vpand %xmm3,%xmm9,%xmm9
2784
2785# qhasm: r5 = v00 | v10
2786# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
2787# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
2788vpor %xmm15,%xmm8,%xmm8
2789
2790# qhasm: r7 = v01 | v11
2791# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
2792# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
2793vpor %xmm9,%xmm7,%xmm7
2794
2795# qhasm: v00 = r0 & mask4
2796# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
2797# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
2798vpand %xmm4,%xmm13,%xmm9
2799
2800# qhasm: 8x v10 = r1 << 8
2801# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
2802# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
2803vpsllw $8,%xmm14,%xmm15
2804
2805# qhasm: 8x v01 = r0 unsigned>> 8
2806# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
2807# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
2808vpsrlw $8,%xmm13,%xmm13
2809
2810# qhasm: v11 = r1 & mask5
2811# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
2812# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
2813vpand %xmm5,%xmm14,%xmm14
2814
2815# qhasm: r0 = v00 | v10
2816# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
2817# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
2818vpor %xmm15,%xmm9,%xmm9
2819
2820# qhasm: r1 = v01 | v11
2821# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
2822# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
2823vpor %xmm14,%xmm13,%xmm13
2824
2825# qhasm: v00 = r2 & mask4
2826# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
2827# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
2828vpand %xmm4,%xmm11,%xmm14
2829
2830# qhasm: 8x v10 = r3 << 8
2831# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
2832# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
2833vpsllw $8,%xmm10,%xmm15
2834
2835# qhasm: 8x v01 = r2 unsigned>> 8
2836# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
2837# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
2838vpsrlw $8,%xmm11,%xmm11
2839
2840# qhasm: v11 = r3 & mask5
2841# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
2842# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
2843vpand %xmm5,%xmm10,%xmm10
2844
2845# qhasm: r2 = v00 | v10
2846# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
2847# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
2848vpor %xmm15,%xmm14,%xmm14
2849
2850# qhasm: r3 = v01 | v11
2851# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
2852# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
2853vpor %xmm10,%xmm11,%xmm10
2854
2855# qhasm: v00 = r4 & mask4
2856# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
2857# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
2858vpand %xmm4,%xmm12,%xmm11
2859
2860# qhasm: 8x v10 = r5 << 8
2861# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
2862# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
2863vpsllw $8,%xmm8,%xmm15
2864
2865# qhasm: 8x v01 = r4 unsigned>> 8
2866# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
2867# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
2868vpsrlw $8,%xmm12,%xmm12
2869
2870# qhasm: v11 = r5 & mask5
2871# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
2872# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
2873vpand %xmm5,%xmm8,%xmm8
2874
2875# qhasm: r4 = v00 | v10
2876# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
2877# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
2878vpor %xmm15,%xmm11,%xmm11
2879
2880# qhasm: r5 = v01 | v11
2881# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
2882# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
2883vpor %xmm8,%xmm12,%xmm8
2884
2885# qhasm: v00 = r6 & mask4
2886# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
2887# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
2888vpand %xmm4,%xmm6,%xmm12
2889
2890# qhasm: 8x v10 = r7 << 8
2891# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
2892# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
2893vpsllw $8,%xmm7,%xmm15
2894
2895# qhasm: 8x v01 = r6 unsigned>> 8
2896# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
2897# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
2898vpsrlw $8,%xmm6,%xmm6
2899
2900# qhasm: v11 = r7 & mask5
2901# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
2902# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
2903vpand %xmm5,%xmm7,%xmm7
2904
2905# qhasm: r6 = v00 | v10
2906# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
2907# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
2908vpor %xmm15,%xmm12,%xmm12
2909
2910# qhasm: r7 = v01 | v11
2911# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
2912# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
2913vpor %xmm7,%xmm6,%xmm6
2914
2915# qhasm: buf = r0[0]
2916# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
2917# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
2918pextrq $0x0,%xmm9,%rsi
2919
2920# qhasm: mem64[ input_0 + 40 ] = buf
2921# asm 1: movq   <buf=int64#2,40(<input_0=int64#1)
2922# asm 2: movq   <buf=%rsi,40(<input_0=%rdi)
2923movq   %rsi,40(%rdi)
2924
2925# qhasm: buf = r1[0]
2926# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
2927# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
2928pextrq $0x0,%xmm13,%rsi
2929
2930# qhasm: mem64[ input_0 + 104 ] = buf
2931# asm 1: movq   <buf=int64#2,104(<input_0=int64#1)
2932# asm 2: movq   <buf=%rsi,104(<input_0=%rdi)
2933movq   %rsi,104(%rdi)
2934
2935# qhasm: buf = r2[0]
2936# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
2937# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
2938pextrq $0x0,%xmm14,%rsi
2939
2940# qhasm: mem64[ input_0 + 168 ] = buf
2941# asm 1: movq   <buf=int64#2,168(<input_0=int64#1)
2942# asm 2: movq   <buf=%rsi,168(<input_0=%rdi)
2943movq   %rsi,168(%rdi)
2944
2945# qhasm: buf = r3[0]
2946# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
2947# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
2948pextrq $0x0,%xmm10,%rsi
2949
2950# qhasm: mem64[ input_0 + 232 ] = buf
2951# asm 1: movq   <buf=int64#2,232(<input_0=int64#1)
2952# asm 2: movq   <buf=%rsi,232(<input_0=%rdi)
2953movq   %rsi,232(%rdi)
2954
2955# qhasm: buf = r4[0]
2956# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
2957# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
2958pextrq $0x0,%xmm11,%rsi
2959
2960# qhasm: mem64[ input_0 + 296 ] = buf
2961# asm 1: movq   <buf=int64#2,296(<input_0=int64#1)
2962# asm 2: movq   <buf=%rsi,296(<input_0=%rdi)
2963movq   %rsi,296(%rdi)
2964
2965# qhasm: buf = r5[0]
2966# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
2967# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
2968pextrq $0x0,%xmm8,%rsi
2969
2970# qhasm: mem64[ input_0 + 360 ] = buf
2971# asm 1: movq   <buf=int64#2,360(<input_0=int64#1)
2972# asm 2: movq   <buf=%rsi,360(<input_0=%rdi)
2973movq   %rsi,360(%rdi)
2974
2975# qhasm: buf = r6[0]
2976# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
2977# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
2978pextrq $0x0,%xmm12,%rsi
2979
2980# qhasm: mem64[ input_0 + 424 ] = buf
2981# asm 1: movq   <buf=int64#2,424(<input_0=int64#1)
2982# asm 2: movq   <buf=%rsi,424(<input_0=%rdi)
2983movq   %rsi,424(%rdi)
2984
2985# qhasm: buf = r7[0]
2986# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
2987# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
2988pextrq $0x0,%xmm6,%rsi
2989
2990# qhasm: mem64[ input_0 + 488 ] = buf
2991# asm 1: movq   <buf=int64#2,488(<input_0=int64#1)
2992# asm 2: movq   <buf=%rsi,488(<input_0=%rdi)
2993movq   %rsi,488(%rdi)
2994
2995# qhasm: r0 = mem64[ input_0 + 48 ] x2
2996# asm 1: movddup 48(<input_0=int64#1),>r0=reg128#7
2997# asm 2: movddup 48(<input_0=%rdi),>r0=%xmm6
2998movddup 48(%rdi),%xmm6
2999
3000# qhasm: r1 = mem64[ input_0 + 112 ] x2
3001# asm 1: movddup 112(<input_0=int64#1),>r1=reg128#8
3002# asm 2: movddup 112(<input_0=%rdi),>r1=%xmm7
3003movddup 112(%rdi),%xmm7
3004
3005# qhasm: r2 = mem64[ input_0 + 176 ] x2
3006# asm 1: movddup 176(<input_0=int64#1),>r2=reg128#9
3007# asm 2: movddup 176(<input_0=%rdi),>r2=%xmm8
3008movddup 176(%rdi),%xmm8
3009
3010# qhasm: r3 = mem64[ input_0 + 240 ] x2
3011# asm 1: movddup 240(<input_0=int64#1),>r3=reg128#10
3012# asm 2: movddup 240(<input_0=%rdi),>r3=%xmm9
3013movddup 240(%rdi),%xmm9
3014
3015# qhasm: r4 = mem64[ input_0 + 304 ] x2
3016# asm 1: movddup 304(<input_0=int64#1),>r4=reg128#11
3017# asm 2: movddup 304(<input_0=%rdi),>r4=%xmm10
3018movddup 304(%rdi),%xmm10
3019
3020# qhasm: r5 = mem64[ input_0 + 368 ] x2
3021# asm 1: movddup 368(<input_0=int64#1),>r5=reg128#12
3022# asm 2: movddup 368(<input_0=%rdi),>r5=%xmm11
3023movddup 368(%rdi),%xmm11
3024
3025# qhasm: r6 = mem64[ input_0 + 432 ] x2
3026# asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13
3027# asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12
3028movddup 432(%rdi),%xmm12
3029
3030# qhasm: r7 = mem64[ input_0 + 496 ] x2
3031# asm 1: movddup 496(<input_0=int64#1),>r7=reg128#14
3032# asm 2: movddup 496(<input_0=%rdi),>r7=%xmm13
3033movddup 496(%rdi),%xmm13
3034
3035# qhasm: v00 = r0 & mask0
3036# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
3037# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
3038vpand %xmm0,%xmm6,%xmm14
3039
3040# qhasm: 2x v10 = r4 << 32
3041# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
3042# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
3043vpsllq $32,%xmm10,%xmm15
3044
3045# qhasm: 2x v01 = r0 unsigned>> 32
3046# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
3047# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
3048vpsrlq $32,%xmm6,%xmm6
3049
3050# qhasm: v11 = r4 & mask1
3051# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
3052# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
3053vpand %xmm1,%xmm10,%xmm10
3054
3055# qhasm: r0 = v00 | v10
3056# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
3057# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
3058vpor %xmm15,%xmm14,%xmm14
3059
3060# qhasm: r4 = v01 | v11
3061# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
3062# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
3063vpor %xmm10,%xmm6,%xmm6
3064
3065# qhasm: v00 = r1 & mask0
3066# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
3067# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
3068vpand %xmm0,%xmm7,%xmm10
3069
3070# qhasm: 2x v10 = r5 << 32
3071# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
3072# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
3073vpsllq $32,%xmm11,%xmm15
3074
3075# qhasm: 2x v01 = r1 unsigned>> 32
3076# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
3077# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
3078vpsrlq $32,%xmm7,%xmm7
3079
3080# qhasm: v11 = r5 & mask1
3081# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
3082# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
3083vpand %xmm1,%xmm11,%xmm11
3084
3085# qhasm: r1 = v00 | v10
3086# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
3087# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
3088vpor %xmm15,%xmm10,%xmm10
3089
3090# qhasm: r5 = v01 | v11
3091# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
3092# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
3093vpor %xmm11,%xmm7,%xmm7
3094
3095# qhasm: v00 = r2 & mask0
3096# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
3097# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
3098vpand %xmm0,%xmm8,%xmm11
3099
3100# qhasm: 2x v10 = r6 << 32
3101# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
3102# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
3103vpsllq $32,%xmm12,%xmm15
3104
3105# qhasm: 2x v01 = r2 unsigned>> 32
3106# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
3107# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
3108vpsrlq $32,%xmm8,%xmm8
3109
3110# qhasm: v11 = r6 & mask1
3111# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
3112# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
3113vpand %xmm1,%xmm12,%xmm12
3114
3115# qhasm: r2 = v00 | v10
3116# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
3117# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
3118vpor %xmm15,%xmm11,%xmm11
3119
3120# qhasm: r6 = v01 | v11
3121# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
3122# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
3123vpor %xmm12,%xmm8,%xmm8
3124
3125# qhasm: v00 = r3 & mask0
3126# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
3127# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
3128vpand %xmm0,%xmm9,%xmm12
3129
3130# qhasm: 2x v10 = r7 << 32
3131# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#16
3132# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm15
3133vpsllq $32,%xmm13,%xmm15
3134
3135# qhasm: 2x v01 = r3 unsigned>> 32
3136# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
3137# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
3138vpsrlq $32,%xmm9,%xmm9
3139
3140# qhasm: v11 = r7 & mask1
3141# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
3142# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
3143vpand %xmm1,%xmm13,%xmm13
3144
3145# qhasm: r3 = v00 | v10
3146# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
3147# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
3148vpor %xmm15,%xmm12,%xmm12
3149
3150# qhasm: r7 = v01 | v11
3151# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
3152# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
3153vpor %xmm13,%xmm9,%xmm9
3154
3155# qhasm: v00 = r0 & mask2
3156# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
3157# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
3158vpand %xmm2,%xmm14,%xmm13
3159
3160# qhasm: 4x v10 = r2 << 16
3161# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#16
3162# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm15
3163vpslld $16,%xmm11,%xmm15
3164
3165# qhasm: 4x v01 = r0 unsigned>> 16
3166# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#15
3167# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm14
3168vpsrld $16,%xmm14,%xmm14
3169
3170# qhasm: v11 = r2 & mask3
3171# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
3172# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
3173vpand %xmm3,%xmm11,%xmm11
3174
3175# qhasm: r0 = v00 | v10
3176# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
3177# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
3178vpor %xmm15,%xmm13,%xmm13
3179
3180# qhasm: r2 = v01 | v11
3181# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
3182# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
3183vpor %xmm11,%xmm14,%xmm11
3184
3185# qhasm: v00 = r1 & mask2
3186# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
3187# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
3188vpand %xmm2,%xmm10,%xmm14
3189
3190# qhasm: 4x v10 = r3 << 16
3191# asm 1: vpslld $16,<r3=reg128#13,>v10=reg128#16
3192# asm 2: vpslld $16,<r3=%xmm12,>v10=%xmm15
3193vpslld $16,%xmm12,%xmm15
3194
3195# qhasm: 4x v01 = r1 unsigned>> 16
3196# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
3197# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
3198vpsrld $16,%xmm10,%xmm10
3199
3200# qhasm: v11 = r3 & mask3
3201# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
3202# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
3203vpand %xmm3,%xmm12,%xmm12
3204
3205# qhasm: r1 = v00 | v10
3206# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
3207# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
3208vpor %xmm15,%xmm14,%xmm14
3209
3210# qhasm: r3 = v01 | v11
3211# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
3212# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
3213vpor %xmm12,%xmm10,%xmm10
3214
3215# qhasm: v00 = r4 & mask2
3216# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
3217# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
3218vpand %xmm2,%xmm6,%xmm12
3219
3220# qhasm: 4x v10 = r6 << 16
3221# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#16
3222# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm15
3223vpslld $16,%xmm8,%xmm15
3224
3225# qhasm: 4x v01 = r4 unsigned>> 16
3226# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
3227# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
3228vpsrld $16,%xmm6,%xmm6
3229
3230# qhasm: v11 = r6 & mask3
3231# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
3232# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
3233vpand %xmm3,%xmm8,%xmm8
3234
3235# qhasm: r4 = v00 | v10
3236# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
3237# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
3238vpor %xmm15,%xmm12,%xmm12
3239
3240# qhasm: r6 = v01 | v11
3241# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
3242# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
3243vpor %xmm8,%xmm6,%xmm6
3244
3245# qhasm: v00 = r5 & mask2
3246# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
3247# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
3248vpand %xmm2,%xmm7,%xmm8
3249
3250# qhasm: 4x v10 = r7 << 16
3251# asm 1: vpslld $16,<r7=reg128#10,>v10=reg128#16
3252# asm 2: vpslld $16,<r7=%xmm9,>v10=%xmm15
3253vpslld $16,%xmm9,%xmm15
3254
3255# qhasm: 4x v01 = r5 unsigned>> 16
3256# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
3257# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
3258vpsrld $16,%xmm7,%xmm7
3259
3260# qhasm: v11 = r7 & mask3
3261# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
3262# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
3263vpand %xmm3,%xmm9,%xmm9
3264
3265# qhasm: r5 = v00 | v10
3266# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
3267# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
3268vpor %xmm15,%xmm8,%xmm8
3269
3270# qhasm: r7 = v01 | v11
3271# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
3272# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
3273vpor %xmm9,%xmm7,%xmm7
3274
3275# qhasm: v00 = r0 & mask4
3276# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
3277# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
3278vpand %xmm4,%xmm13,%xmm9
3279
3280# qhasm: 8x v10 = r1 << 8
3281# asm 1: vpsllw $8,<r1=reg128#15,>v10=reg128#16
3282# asm 2: vpsllw $8,<r1=%xmm14,>v10=%xmm15
3283vpsllw $8,%xmm14,%xmm15
3284
3285# qhasm: 8x v01 = r0 unsigned>> 8
3286# asm 1: vpsrlw $8,<r0=reg128#14,>v01=reg128#14
3287# asm 2: vpsrlw $8,<r0=%xmm13,>v01=%xmm13
3288vpsrlw $8,%xmm13,%xmm13
3289
3290# qhasm: v11 = r1 & mask5
3291# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
3292# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
3293vpand %xmm5,%xmm14,%xmm14
3294
3295# qhasm: r0 = v00 | v10
3296# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
3297# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
3298vpor %xmm15,%xmm9,%xmm9
3299
3300# qhasm: r1 = v01 | v11
3301# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
3302# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
3303vpor %xmm14,%xmm13,%xmm13
3304
3305# qhasm: v00 = r2 & mask4
3306# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
3307# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
3308vpand %xmm4,%xmm11,%xmm14
3309
3310# qhasm: 8x v10 = r3 << 8
3311# asm 1: vpsllw $8,<r3=reg128#11,>v10=reg128#16
3312# asm 2: vpsllw $8,<r3=%xmm10,>v10=%xmm15
3313vpsllw $8,%xmm10,%xmm15
3314
3315# qhasm: 8x v01 = r2 unsigned>> 8
3316# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
3317# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
3318vpsrlw $8,%xmm11,%xmm11
3319
3320# qhasm: v11 = r3 & mask5
3321# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
3322# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
3323vpand %xmm5,%xmm10,%xmm10
3324
3325# qhasm: r2 = v00 | v10
3326# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
3327# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
3328vpor %xmm15,%xmm14,%xmm14
3329
3330# qhasm: r3 = v01 | v11
3331# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
3332# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
3333vpor %xmm10,%xmm11,%xmm10
3334
3335# qhasm: v00 = r4 & mask4
3336# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
3337# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
3338vpand %xmm4,%xmm12,%xmm11
3339
3340# qhasm: 8x v10 = r5 << 8
3341# asm 1: vpsllw $8,<r5=reg128#9,>v10=reg128#16
3342# asm 2: vpsllw $8,<r5=%xmm8,>v10=%xmm15
3343vpsllw $8,%xmm8,%xmm15
3344
3345# qhasm: 8x v01 = r4 unsigned>> 8
3346# asm 1: vpsrlw $8,<r4=reg128#13,>v01=reg128#13
3347# asm 2: vpsrlw $8,<r4=%xmm12,>v01=%xmm12
3348vpsrlw $8,%xmm12,%xmm12
3349
3350# qhasm: v11 = r5 & mask5
3351# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
3352# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
3353vpand %xmm5,%xmm8,%xmm8
3354
3355# qhasm: r4 = v00 | v10
3356# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
3357# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
3358vpor %xmm15,%xmm11,%xmm11
3359
3360# qhasm: r5 = v01 | v11
3361# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
3362# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
3363vpor %xmm8,%xmm12,%xmm8
3364
3365# qhasm: v00 = r6 & mask4
3366# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
3367# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
3368vpand %xmm4,%xmm6,%xmm12
3369
3370# qhasm: 8x v10 = r7 << 8
3371# asm 1: vpsllw $8,<r7=reg128#8,>v10=reg128#16
3372# asm 2: vpsllw $8,<r7=%xmm7,>v10=%xmm15
3373vpsllw $8,%xmm7,%xmm15
3374
3375# qhasm: 8x v01 = r6 unsigned>> 8
3376# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
3377# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
3378vpsrlw $8,%xmm6,%xmm6
3379
3380# qhasm: v11 = r7 & mask5
3381# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
3382# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
3383vpand %xmm5,%xmm7,%xmm7
3384
3385# qhasm: r6 = v00 | v10
3386# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
3387# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
3388vpor %xmm15,%xmm12,%xmm12
3389
3390# qhasm: r7 = v01 | v11
3391# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
3392# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
3393vpor %xmm7,%xmm6,%xmm6
3394
3395# qhasm: buf = r0[0]
3396# asm 1: pextrq $0x0,<r0=reg128#10,>buf=int64#2
3397# asm 2: pextrq $0x0,<r0=%xmm9,>buf=%rsi
3398pextrq $0x0,%xmm9,%rsi
3399
3400# qhasm: mem64[ input_0 + 48 ] = buf
3401# asm 1: movq   <buf=int64#2,48(<input_0=int64#1)
3402# asm 2: movq   <buf=%rsi,48(<input_0=%rdi)
3403movq   %rsi,48(%rdi)
3404
3405# qhasm: buf = r1[0]
3406# asm 1: pextrq $0x0,<r1=reg128#14,>buf=int64#2
3407# asm 2: pextrq $0x0,<r1=%xmm13,>buf=%rsi
3408pextrq $0x0,%xmm13,%rsi
3409
3410# qhasm: mem64[ input_0 + 112 ] = buf
3411# asm 1: movq   <buf=int64#2,112(<input_0=int64#1)
3412# asm 2: movq   <buf=%rsi,112(<input_0=%rdi)
3413movq   %rsi,112(%rdi)
3414
3415# qhasm: buf = r2[0]
3416# asm 1: pextrq $0x0,<r2=reg128#15,>buf=int64#2
3417# asm 2: pextrq $0x0,<r2=%xmm14,>buf=%rsi
3418pextrq $0x0,%xmm14,%rsi
3419
3420# qhasm: mem64[ input_0 + 176 ] = buf
3421# asm 1: movq   <buf=int64#2,176(<input_0=int64#1)
3422# asm 2: movq   <buf=%rsi,176(<input_0=%rdi)
3423movq   %rsi,176(%rdi)
3424
3425# qhasm: buf = r3[0]
3426# asm 1: pextrq $0x0,<r3=reg128#11,>buf=int64#2
3427# asm 2: pextrq $0x0,<r3=%xmm10,>buf=%rsi
3428pextrq $0x0,%xmm10,%rsi
3429
3430# qhasm: mem64[ input_0 + 240 ] = buf
3431# asm 1: movq   <buf=int64#2,240(<input_0=int64#1)
3432# asm 2: movq   <buf=%rsi,240(<input_0=%rdi)
3433movq   %rsi,240(%rdi)
3434
3435# qhasm: buf = r4[0]
3436# asm 1: pextrq $0x0,<r4=reg128#12,>buf=int64#2
3437# asm 2: pextrq $0x0,<r4=%xmm11,>buf=%rsi
3438pextrq $0x0,%xmm11,%rsi
3439
3440# qhasm: mem64[ input_0 + 304 ] = buf
3441# asm 1: movq   <buf=int64#2,304(<input_0=int64#1)
3442# asm 2: movq   <buf=%rsi,304(<input_0=%rdi)
3443movq   %rsi,304(%rdi)
3444
3445# qhasm: buf = r5[0]
3446# asm 1: pextrq $0x0,<r5=reg128#9,>buf=int64#2
3447# asm 2: pextrq $0x0,<r5=%xmm8,>buf=%rsi
3448pextrq $0x0,%xmm8,%rsi
3449
3450# qhasm: mem64[ input_0 + 368 ] = buf
3451# asm 1: movq   <buf=int64#2,368(<input_0=int64#1)
3452# asm 2: movq   <buf=%rsi,368(<input_0=%rdi)
3453movq   %rsi,368(%rdi)
3454
3455# qhasm: buf = r6[0]
3456# asm 1: pextrq $0x0,<r6=reg128#13,>buf=int64#2
3457# asm 2: pextrq $0x0,<r6=%xmm12,>buf=%rsi
3458pextrq $0x0,%xmm12,%rsi
3459
3460# qhasm: mem64[ input_0 + 432 ] = buf
3461# asm 1: movq   <buf=int64#2,432(<input_0=int64#1)
3462# asm 2: movq   <buf=%rsi,432(<input_0=%rdi)
3463movq   %rsi,432(%rdi)
3464
3465# qhasm: buf = r7[0]
3466# asm 1: pextrq $0x0,<r7=reg128#7,>buf=int64#2
3467# asm 2: pextrq $0x0,<r7=%xmm6,>buf=%rsi
3468pextrq $0x0,%xmm6,%rsi
3469
3470# qhasm: mem64[ input_0 + 496 ] = buf
3471# asm 1: movq   <buf=int64#2,496(<input_0=int64#1)
3472# asm 2: movq   <buf=%rsi,496(<input_0=%rdi)
3473movq   %rsi,496(%rdi)
3474
3475# qhasm: r0 = mem64[ input_0 + 56 ] x2
3476# asm 1: movddup 56(<input_0=int64#1),>r0=reg128#7
3477# asm 2: movddup 56(<input_0=%rdi),>r0=%xmm6
3478movddup 56(%rdi),%xmm6
3479
3480# qhasm: r1 = mem64[ input_0 + 120 ] x2
3481# asm 1: movddup 120(<input_0=int64#1),>r1=reg128#8
3482# asm 2: movddup 120(<input_0=%rdi),>r1=%xmm7
3483movddup 120(%rdi),%xmm7
3484
3485# qhasm: r2 = mem64[ input_0 + 184 ] x2
3486# asm 1: movddup 184(<input_0=int64#1),>r2=reg128#9
3487# asm 2: movddup 184(<input_0=%rdi),>r2=%xmm8
3488movddup 184(%rdi),%xmm8
3489
3490# qhasm: r3 = mem64[ input_0 + 248 ] x2
3491# asm 1: movddup 248(<input_0=int64#1),>r3=reg128#10
3492# asm 2: movddup 248(<input_0=%rdi),>r3=%xmm9
3493movddup 248(%rdi),%xmm9
3494
3495# qhasm: r4 = mem64[ input_0 + 312 ] x2
3496# asm 1: movddup 312(<input_0=int64#1),>r4=reg128#11
3497# asm 2: movddup 312(<input_0=%rdi),>r4=%xmm10
3498movddup 312(%rdi),%xmm10
3499
3500# qhasm: r5 = mem64[ input_0 + 376 ] x2
3501# asm 1: movddup 376(<input_0=int64#1),>r5=reg128#12
3502# asm 2: movddup 376(<input_0=%rdi),>r5=%xmm11
3503movddup 376(%rdi),%xmm11
3504
3505# qhasm: r6 = mem64[ input_0 + 440 ] x2
3506# asm 1: movddup 440(<input_0=int64#1),>r6=reg128#13
3507# asm 2: movddup 440(<input_0=%rdi),>r6=%xmm12
3508movddup 440(%rdi),%xmm12
3509
3510# qhasm: r7 = mem64[ input_0 + 504 ] x2
3511# asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14
3512# asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13
3513movddup 504(%rdi),%xmm13
3514
3515# qhasm: v00 = r0 & mask0
3516# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
3517# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
3518vpand %xmm0,%xmm6,%xmm14
3519
3520# qhasm: 2x v10 = r4 << 32
3521# asm 1: vpsllq $32,<r4=reg128#11,>v10=reg128#16
3522# asm 2: vpsllq $32,<r4=%xmm10,>v10=%xmm15
3523vpsllq $32,%xmm10,%xmm15
3524
3525# qhasm: 2x v01 = r0 unsigned>> 32
3526# asm 1: vpsrlq $32,<r0=reg128#7,>v01=reg128#7
3527# asm 2: vpsrlq $32,<r0=%xmm6,>v01=%xmm6
3528vpsrlq $32,%xmm6,%xmm6
3529
3530# qhasm: v11 = r4 & mask1
3531# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
3532# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
3533vpand %xmm1,%xmm10,%xmm10
3534
3535# qhasm: r0 = v00 | v10
3536# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
3537# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
3538vpor %xmm15,%xmm14,%xmm14
3539
3540# qhasm: r4 = v01 | v11
3541# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
3542# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
3543vpor %xmm10,%xmm6,%xmm6
3544
3545# qhasm: v00 = r1 & mask0
3546# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
3547# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
3548vpand %xmm0,%xmm7,%xmm10
3549
3550# qhasm: 2x v10 = r5 << 32
3551# asm 1: vpsllq $32,<r5=reg128#12,>v10=reg128#16
3552# asm 2: vpsllq $32,<r5=%xmm11,>v10=%xmm15
3553vpsllq $32,%xmm11,%xmm15
3554
3555# qhasm: 2x v01 = r1 unsigned>> 32
3556# asm 1: vpsrlq $32,<r1=reg128#8,>v01=reg128#8
3557# asm 2: vpsrlq $32,<r1=%xmm7,>v01=%xmm7
3558vpsrlq $32,%xmm7,%xmm7
3559
3560# qhasm: v11 = r5 & mask1
3561# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
3562# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
3563vpand %xmm1,%xmm11,%xmm11
3564
3565# qhasm: r1 = v00 | v10
3566# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
3567# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
3568vpor %xmm15,%xmm10,%xmm10
3569
3570# qhasm: r5 = v01 | v11
3571# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
3572# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
3573vpor %xmm11,%xmm7,%xmm7
3574
3575# qhasm: v00 = r2 & mask0
3576# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
3577# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
3578vpand %xmm0,%xmm8,%xmm11
3579
3580# qhasm: 2x v10 = r6 << 32
3581# asm 1: vpsllq $32,<r6=reg128#13,>v10=reg128#16
3582# asm 2: vpsllq $32,<r6=%xmm12,>v10=%xmm15
3583vpsllq $32,%xmm12,%xmm15
3584
3585# qhasm: 2x v01 = r2 unsigned>> 32
3586# asm 1: vpsrlq $32,<r2=reg128#9,>v01=reg128#9
3587# asm 2: vpsrlq $32,<r2=%xmm8,>v01=%xmm8
3588vpsrlq $32,%xmm8,%xmm8
3589
3590# qhasm: v11 = r6 & mask1
3591# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
3592# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
3593vpand %xmm1,%xmm12,%xmm12
3594
3595# qhasm: r2 = v00 | v10
3596# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
3597# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
3598vpor %xmm15,%xmm11,%xmm11
3599
3600# qhasm: r6 = v01 | v11
3601# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
3602# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
3603vpor %xmm12,%xmm8,%xmm8
3604
3605# qhasm: v00 = r3 & mask0
3606# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#1
3607# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm0
3608vpand %xmm0,%xmm9,%xmm0
3609
3610# qhasm: 2x v10 = r7 << 32
3611# asm 1: vpsllq $32,<r7=reg128#14,>v10=reg128#13
3612# asm 2: vpsllq $32,<r7=%xmm13,>v10=%xmm12
3613vpsllq $32,%xmm13,%xmm12
3614
3615# qhasm: 2x v01 = r3 unsigned>> 32
3616# asm 1: vpsrlq $32,<r3=reg128#10,>v01=reg128#10
3617# asm 2: vpsrlq $32,<r3=%xmm9,>v01=%xmm9
3618vpsrlq $32,%xmm9,%xmm9
3619
3620# qhasm: v11 = r7 & mask1
3621# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2
3622# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1
3623vpand %xmm1,%xmm13,%xmm1
3624
3625# qhasm: r3 = v00 | v10
3626# asm 1: vpor <v10=reg128#13,<v00=reg128#1,>r3=reg128#1
3627# asm 2: vpor <v10=%xmm12,<v00=%xmm0,>r3=%xmm0
3628vpor %xmm12,%xmm0,%xmm0
3629
3630# qhasm: r7 = v01 | v11
3631# asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2
3632# asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1
3633vpor %xmm1,%xmm9,%xmm1
3634
3635# qhasm: v00 = r0 & mask2
3636# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10
3637# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9
3638vpand %xmm2,%xmm14,%xmm9
3639
3640# qhasm: 4x v10 = r2 << 16
3641# asm 1: vpslld $16,<r2=reg128#12,>v10=reg128#13
3642# asm 2: vpslld $16,<r2=%xmm11,>v10=%xmm12
3643vpslld $16,%xmm11,%xmm12
3644
3645# qhasm: 4x v01 = r0 unsigned>> 16
3646# asm 1: vpsrld $16,<r0=reg128#15,>v01=reg128#14
3647# asm 2: vpsrld $16,<r0=%xmm14,>v01=%xmm13
3648vpsrld $16,%xmm14,%xmm13
3649
3650# qhasm: v11 = r2 & mask3
3651# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
3652# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
3653vpand %xmm3,%xmm11,%xmm11
3654
3655# qhasm: r0 = v00 | v10
3656# asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10
3657# asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9
3658vpor %xmm12,%xmm9,%xmm9
3659
3660# qhasm: r2 = v01 | v11
3661# asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12
3662# asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11
3663vpor %xmm11,%xmm13,%xmm11
3664
3665# qhasm: v00 = r1 & mask2
3666# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13
3667# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12
3668vpand %xmm2,%xmm10,%xmm12
3669
3670# qhasm: 4x v10 = r3 << 16
3671# asm 1: vpslld $16,<r3=reg128#1,>v10=reg128#14
3672# asm 2: vpslld $16,<r3=%xmm0,>v10=%xmm13
3673vpslld $16,%xmm0,%xmm13
3674
3675# qhasm: 4x v01 = r1 unsigned>> 16
3676# asm 1: vpsrld $16,<r1=reg128#11,>v01=reg128#11
3677# asm 2: vpsrld $16,<r1=%xmm10,>v01=%xmm10
3678vpsrld $16,%xmm10,%xmm10
3679
3680# qhasm: v11 = r3 & mask3
3681# asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1
3682# asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0
3683vpand %xmm3,%xmm0,%xmm0
3684
3685# qhasm: r1 = v00 | v10
3686# asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13
3687# asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12
3688vpor %xmm13,%xmm12,%xmm12
3689
3690# qhasm: r3 = v01 | v11
3691# asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1
3692# asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0
3693vpor %xmm0,%xmm10,%xmm0
3694
3695# qhasm: v00 = r4 & mask2
3696# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11
3697# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10
3698vpand %xmm2,%xmm6,%xmm10
3699
3700# qhasm: 4x v10 = r6 << 16
3701# asm 1: vpslld $16,<r6=reg128#9,>v10=reg128#14
3702# asm 2: vpslld $16,<r6=%xmm8,>v10=%xmm13
3703vpslld $16,%xmm8,%xmm13
3704
3705# qhasm: 4x v01 = r4 unsigned>> 16
3706# asm 1: vpsrld $16,<r4=reg128#7,>v01=reg128#7
3707# asm 2: vpsrld $16,<r4=%xmm6,>v01=%xmm6
3708vpsrld $16,%xmm6,%xmm6
3709
3710# qhasm: v11 = r6 & mask3
3711# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
3712# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
3713vpand %xmm3,%xmm8,%xmm8
3714
3715# qhasm: r4 = v00 | v10
3716# asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11
3717# asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10
3718vpor %xmm13,%xmm10,%xmm10
3719
3720# qhasm: r6 = v01 | v11
3721# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
3722# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
3723vpor %xmm8,%xmm6,%xmm6
3724
3725# qhasm: v00 = r5 & mask2
3726# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#3
3727# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm2
3728vpand %xmm2,%xmm7,%xmm2
3729
3730# qhasm: 4x v10 = r7 << 16
3731# asm 1: vpslld $16,<r7=reg128#2,>v10=reg128#9
3732# asm 2: vpslld $16,<r7=%xmm1,>v10=%xmm8
3733vpslld $16,%xmm1,%xmm8
3734
3735# qhasm: 4x v01 = r5 unsigned>> 16
3736# asm 1: vpsrld $16,<r5=reg128#8,>v01=reg128#8
3737# asm 2: vpsrld $16,<r5=%xmm7,>v01=%xmm7
3738vpsrld $16,%xmm7,%xmm7
3739
3740# qhasm: v11 = r7 & mask3
3741# asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2
3742# asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1
3743vpand %xmm3,%xmm1,%xmm1
3744
3745# qhasm: r5 = v00 | v10
3746# asm 1: vpor <v10=reg128#9,<v00=reg128#3,>r5=reg128#3
3747# asm 2: vpor <v10=%xmm8,<v00=%xmm2,>r5=%xmm2
3748vpor %xmm8,%xmm2,%xmm2
3749
3750# qhasm: r7 = v01 | v11
3751# asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2
3752# asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1
3753vpor %xmm1,%xmm7,%xmm1
3754
3755# qhasm: v00 = r0 & mask4
3756# asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4
3757# asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3
3758vpand %xmm4,%xmm9,%xmm3
3759
3760# qhasm: 8x v10 = r1 << 8
3761# asm 1: vpsllw $8,<r1=reg128#13,>v10=reg128#8
3762# asm 2: vpsllw $8,<r1=%xmm12,>v10=%xmm7
3763vpsllw $8,%xmm12,%xmm7
3764
3765# qhasm: 8x v01 = r0 unsigned>> 8
3766# asm 1: vpsrlw $8,<r0=reg128#10,>v01=reg128#9
3767# asm 2: vpsrlw $8,<r0=%xmm9,>v01=%xmm8
3768vpsrlw $8,%xmm9,%xmm8
3769
3770# qhasm: v11 = r1 & mask5
3771# asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10
3772# asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9
3773vpand %xmm5,%xmm12,%xmm9
3774
3775# qhasm: r0 = v00 | v10
3776# asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4
3777# asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3
3778vpor %xmm7,%xmm3,%xmm3
3779
3780# qhasm: r1 = v01 | v11
3781# asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8
3782# asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7
3783vpor %xmm9,%xmm8,%xmm7
3784
3785# qhasm: v00 = r2 & mask4
3786# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9
3787# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8
3788vpand %xmm4,%xmm11,%xmm8
3789
3790# qhasm: 8x v10 = r3 << 8
3791# asm 1: vpsllw $8,<r3=reg128#1,>v10=reg128#10
3792# asm 2: vpsllw $8,<r3=%xmm0,>v10=%xmm9
3793vpsllw $8,%xmm0,%xmm9
3794
3795# qhasm: 8x v01 = r2 unsigned>> 8
3796# asm 1: vpsrlw $8,<r2=reg128#12,>v01=reg128#12
3797# asm 2: vpsrlw $8,<r2=%xmm11,>v01=%xmm11
3798vpsrlw $8,%xmm11,%xmm11
3799
3800# qhasm: v11 = r3 & mask5
3801# asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1
3802# asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0
3803vpand %xmm5,%xmm0,%xmm0
3804
3805# qhasm: r2 = v00 | v10
3806# asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9
3807# asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8
3808vpor %xmm9,%xmm8,%xmm8
3809
3810# qhasm: r3 = v01 | v11
3811# asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1
3812# asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0
3813vpor %xmm0,%xmm11,%xmm0
3814
3815# qhasm: v00 = r4 & mask4
3816# asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10
3817# asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9
3818vpand %xmm4,%xmm10,%xmm9
3819
3820# qhasm: 8x v10 = r5 << 8
3821# asm 1: vpsllw $8,<r5=reg128#3,>v10=reg128#12
3822# asm 2: vpsllw $8,<r5=%xmm2,>v10=%xmm11
3823vpsllw $8,%xmm2,%xmm11
3824
3825# qhasm: 8x v01 = r4 unsigned>> 8
3826# asm 1: vpsrlw $8,<r4=reg128#11,>v01=reg128#11
3827# asm 2: vpsrlw $8,<r4=%xmm10,>v01=%xmm10
3828vpsrlw $8,%xmm10,%xmm10
3829
3830# qhasm: v11 = r5 & mask5
3831# asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3
3832# asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2
3833vpand %xmm5,%xmm2,%xmm2
3834
3835# qhasm: r4 = v00 | v10
3836# asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10
3837# asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9
3838vpor %xmm11,%xmm9,%xmm9
3839
3840# qhasm: r5 = v01 | v11
3841# asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3
3842# asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2
3843vpor %xmm2,%xmm10,%xmm2
3844
3845# qhasm: v00 = r6 & mask4
3846# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#5
3847# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm4
3848vpand %xmm4,%xmm6,%xmm4
3849
3850# qhasm: 8x v10 = r7 << 8
3851# asm 1: vpsllw $8,<r7=reg128#2,>v10=reg128#11
3852# asm 2: vpsllw $8,<r7=%xmm1,>v10=%xmm10
3853vpsllw $8,%xmm1,%xmm10
3854
3855# qhasm: 8x v01 = r6 unsigned>> 8
3856# asm 1: vpsrlw $8,<r6=reg128#7,>v01=reg128#7
3857# asm 2: vpsrlw $8,<r6=%xmm6,>v01=%xmm6
3858vpsrlw $8,%xmm6,%xmm6
3859
3860# qhasm: v11 = r7 & mask5
3861# asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2
3862# asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1
3863vpand %xmm5,%xmm1,%xmm1
3864
3865# qhasm: r6 = v00 | v10
3866# asm 1: vpor <v10=reg128#11,<v00=reg128#5,>r6=reg128#5
3867# asm 2: vpor <v10=%xmm10,<v00=%xmm4,>r6=%xmm4
3868vpor %xmm10,%xmm4,%xmm4
3869
3870# qhasm: r7 = v01 | v11
3871# asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2
3872# asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1
3873vpor %xmm1,%xmm6,%xmm1
3874
3875# qhasm: buf = r0[0]
3876# asm 1: pextrq $0x0,<r0=reg128#4,>buf=int64#2
3877# asm 2: pextrq $0x0,<r0=%xmm3,>buf=%rsi
3878pextrq $0x0,%xmm3,%rsi
3879
3880# qhasm: mem64[ input_0 + 56 ] = buf
3881# asm 1: movq   <buf=int64#2,56(<input_0=int64#1)
3882# asm 2: movq   <buf=%rsi,56(<input_0=%rdi)
3883movq   %rsi,56(%rdi)
3884
3885# qhasm: buf = r1[0]
3886# asm 1: pextrq $0x0,<r1=reg128#8,>buf=int64#2
3887# asm 2: pextrq $0x0,<r1=%xmm7,>buf=%rsi
3888pextrq $0x0,%xmm7,%rsi
3889
3890# qhasm: mem64[ input_0 + 120 ] = buf
3891# asm 1: movq   <buf=int64#2,120(<input_0=int64#1)
3892# asm 2: movq   <buf=%rsi,120(<input_0=%rdi)
3893movq   %rsi,120(%rdi)
3894
3895# qhasm: buf = r2[0]
3896# asm 1: pextrq $0x0,<r2=reg128#9,>buf=int64#2
3897# asm 2: pextrq $0x0,<r2=%xmm8,>buf=%rsi
3898pextrq $0x0,%xmm8,%rsi
3899
3900# qhasm: mem64[ input_0 + 184 ] = buf
3901# asm 1: movq   <buf=int64#2,184(<input_0=int64#1)
3902# asm 2: movq   <buf=%rsi,184(<input_0=%rdi)
3903movq   %rsi,184(%rdi)
3904
3905# qhasm: buf = r3[0]
3906# asm 1: pextrq $0x0,<r3=reg128#1,>buf=int64#2
3907# asm 2: pextrq $0x0,<r3=%xmm0,>buf=%rsi
3908pextrq $0x0,%xmm0,%rsi
3909
3910# qhasm: mem64[ input_0 + 248 ] = buf
3911# asm 1: movq   <buf=int64#2,248(<input_0=int64#1)
3912# asm 2: movq   <buf=%rsi,248(<input_0=%rdi)
3913movq   %rsi,248(%rdi)
3914
3915# qhasm: buf = r4[0]
3916# asm 1: pextrq $0x0,<r4=reg128#10,>buf=int64#2
3917# asm 2: pextrq $0x0,<r4=%xmm9,>buf=%rsi
3918pextrq $0x0,%xmm9,%rsi
3919
3920# qhasm: mem64[ input_0 + 312 ] = buf
3921# asm 1: movq   <buf=int64#2,312(<input_0=int64#1)
3922# asm 2: movq   <buf=%rsi,312(<input_0=%rdi)
3923movq   %rsi,312(%rdi)
3924
3925# qhasm: buf = r5[0]
3926# asm 1: pextrq $0x0,<r5=reg128#3,>buf=int64#2
3927# asm 2: pextrq $0x0,<r5=%xmm2,>buf=%rsi
3928pextrq $0x0,%xmm2,%rsi
3929
3930# qhasm: mem64[ input_0 + 376 ] = buf
3931# asm 1: movq   <buf=int64#2,376(<input_0=int64#1)
3932# asm 2: movq   <buf=%rsi,376(<input_0=%rdi)
3933movq   %rsi,376(%rdi)
3934
3935# qhasm: buf = r6[0]
3936# asm 1: pextrq $0x0,<r6=reg128#5,>buf=int64#2
3937# asm 2: pextrq $0x0,<r6=%xmm4,>buf=%rsi
3938pextrq $0x0,%xmm4,%rsi
3939
3940# qhasm: mem64[ input_0 + 440 ] = buf
3941# asm 1: movq   <buf=int64#2,440(<input_0=int64#1)
3942# asm 2: movq   <buf=%rsi,440(<input_0=%rdi)
3943movq   %rsi,440(%rdi)
3944
3945# qhasm: buf = r7[0]
3946# asm 1: pextrq $0x0,<r7=reg128#2,>buf=int64#2
3947# asm 2: pextrq $0x0,<r7=%xmm1,>buf=%rsi
3948pextrq $0x0,%xmm1,%rsi
3949
3950# qhasm: mem64[ input_0 + 504 ] = buf
3951# asm 1: movq   <buf=int64#2,504(<input_0=int64#1)
3952# asm 2: movq   <buf=%rsi,504(<input_0=%rdi)
3953movq   %rsi,504(%rdi)
3954
3955# qhasm: mask0 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK2_0 ]
3956# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK2_0(%rip),>mask0=reg128#1
3957# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK2_0(%rip),>mask0=%xmm0
3958movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK2_0(%rip),%xmm0
3959
3960# qhasm: mask1 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK2_1 ]
3961# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK2_1(%rip),>mask1=reg128#2
3962# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK2_1(%rip),>mask1=%xmm1
3963movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK2_1(%rip),%xmm1
3964
3965# qhasm: mask2 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK1_0 ]
3966# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK1_0(%rip),>mask2=reg128#3
3967# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK1_0(%rip),>mask2=%xmm2
3968movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK1_0(%rip),%xmm2
3969
3970# qhasm: mask3 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK1_1 ]
3971# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK1_1(%rip),>mask3=reg128#4
3972# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK1_1(%rip),>mask3=%xmm3
3973movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK1_1(%rip),%xmm3
3974
3975# qhasm: mask4 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK0_0 ]
3976# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK0_0(%rip),>mask4=reg128#5
3977# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK0_0(%rip),>mask4=%xmm4
3978movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK0_0(%rip),%xmm4
3979
3980# qhasm: mask5 aligned= mem128[ PQCLEAN_MCELIECE348864F_AVX_MASK0_1 ]
3981# asm 1: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK0_1(%rip),>mask5=reg128#6
3982# asm 2: movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK0_1(%rip),>mask5=%xmm5
3983movdqa   PQCLEAN_MCELIECE348864F_AVX_MASK0_1(%rip),%xmm5
3984
3985# qhasm: r0 = mem64[ input_0 + 0 ] x2
3986# asm 1: movddup 0(<input_0=int64#1),>r0=reg128#7
3987# asm 2: movddup 0(<input_0=%rdi),>r0=%xmm6
3988movddup 0(%rdi),%xmm6
3989
3990# qhasm: r1 = mem64[ input_0 + 8 ] x2
3991# asm 1: movddup 8(<input_0=int64#1),>r1=reg128#8
3992# asm 2: movddup 8(<input_0=%rdi),>r1=%xmm7
3993movddup 8(%rdi),%xmm7
3994
3995# qhasm: r2 = mem64[ input_0 + 16 ] x2
3996# asm 1: movddup 16(<input_0=int64#1),>r2=reg128#9
3997# asm 2: movddup 16(<input_0=%rdi),>r2=%xmm8
3998movddup 16(%rdi),%xmm8
3999
4000# qhasm: r3 = mem64[ input_0 + 24 ] x2
4001# asm 1: movddup 24(<input_0=int64#1),>r3=reg128#10
4002# asm 2: movddup 24(<input_0=%rdi),>r3=%xmm9
4003movddup 24(%rdi),%xmm9
4004
4005# qhasm: r4 = mem64[ input_0 + 32 ] x2
4006# asm 1: movddup 32(<input_0=int64#1),>r4=reg128#11
4007# asm 2: movddup 32(<input_0=%rdi),>r4=%xmm10
4008movddup 32(%rdi),%xmm10
4009
4010# qhasm: r5 = mem64[ input_0 + 40 ] x2
4011# asm 1: movddup 40(<input_0=int64#1),>r5=reg128#12
4012# asm 2: movddup 40(<input_0=%rdi),>r5=%xmm11
4013movddup 40(%rdi),%xmm11
4014
4015# qhasm: r6 = mem64[ input_0 + 48 ] x2
4016# asm 1: movddup 48(<input_0=int64#1),>r6=reg128#13
4017# asm 2: movddup 48(<input_0=%rdi),>r6=%xmm12
4018movddup 48(%rdi),%xmm12
4019
4020# qhasm: r7 = mem64[ input_0 + 56 ] x2
4021# asm 1: movddup 56(<input_0=int64#1),>r7=reg128#14
4022# asm 2: movddup 56(<input_0=%rdi),>r7=%xmm13
4023movddup 56(%rdi),%xmm13
4024
4025# qhasm: v00 = r0 & mask0
4026# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
4027# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
4028vpand %xmm0,%xmm6,%xmm14
4029
4030# qhasm: v10 = r4 & mask0
4031# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
4032# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
4033vpand %xmm0,%xmm10,%xmm15
4034
4035# qhasm: 2x v10 <<= 4
4036# asm 1: psllq $4,<v10=reg128#16
4037# asm 2: psllq $4,<v10=%xmm15
4038psllq $4,%xmm15
4039
4040# qhasm: v01 = r0 & mask1
4041# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
4042# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
4043vpand %xmm1,%xmm6,%xmm6
4044
4045# qhasm: v11 = r4 & mask1
4046# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
4047# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
4048vpand %xmm1,%xmm10,%xmm10
4049
4050# qhasm: 2x v01 unsigned>>= 4
4051# asm 1: psrlq $4,<v01=reg128#7
4052# asm 2: psrlq $4,<v01=%xmm6
4053psrlq $4,%xmm6
4054
4055# qhasm: r0 = v00 | v10
4056# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
4057# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
4058vpor %xmm15,%xmm14,%xmm14
4059
4060# qhasm: r4 = v01 | v11
4061# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
4062# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
4063vpor %xmm10,%xmm6,%xmm6
4064
4065# qhasm: v00 = r1 & mask0
4066# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
4067# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
4068vpand %xmm0,%xmm7,%xmm10
4069
4070# qhasm: v10 = r5 & mask0
4071# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
4072# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
4073vpand %xmm0,%xmm11,%xmm15
4074
4075# qhasm: 2x v10 <<= 4
4076# asm 1: psllq $4,<v10=reg128#16
4077# asm 2: psllq $4,<v10=%xmm15
4078psllq $4,%xmm15
4079
4080# qhasm: v01 = r1 & mask1
4081# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
4082# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
4083vpand %xmm1,%xmm7,%xmm7
4084
4085# qhasm: v11 = r5 & mask1
4086# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
4087# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
4088vpand %xmm1,%xmm11,%xmm11
4089
4090# qhasm: 2x v01 unsigned>>= 4
4091# asm 1: psrlq $4,<v01=reg128#8
4092# asm 2: psrlq $4,<v01=%xmm7
4093psrlq $4,%xmm7
4094
4095# qhasm: r1 = v00 | v10
4096# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
4097# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
4098vpor %xmm15,%xmm10,%xmm10
4099
4100# qhasm: r5 = v01 | v11
4101# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
4102# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
4103vpor %xmm11,%xmm7,%xmm7
4104
4105# qhasm: v00 = r2 & mask0
4106# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
4107# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
4108vpand %xmm0,%xmm8,%xmm11
4109
4110# qhasm: v10 = r6 & mask0
4111# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
4112# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
4113vpand %xmm0,%xmm12,%xmm15
4114
4115# qhasm: 2x v10 <<= 4
4116# asm 1: psllq $4,<v10=reg128#16
4117# asm 2: psllq $4,<v10=%xmm15
4118psllq $4,%xmm15
4119
4120# qhasm: v01 = r2 & mask1
4121# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
4122# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
4123vpand %xmm1,%xmm8,%xmm8
4124
4125# qhasm: v11 = r6 & mask1
4126# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
4127# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
4128vpand %xmm1,%xmm12,%xmm12
4129
4130# qhasm: 2x v01 unsigned>>= 4
4131# asm 1: psrlq $4,<v01=reg128#9
4132# asm 2: psrlq $4,<v01=%xmm8
4133psrlq $4,%xmm8
4134
4135# qhasm: r2 = v00 | v10
4136# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
4137# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
4138vpor %xmm15,%xmm11,%xmm11
4139
4140# qhasm: r6 = v01 | v11
4141# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
4142# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
4143vpor %xmm12,%xmm8,%xmm8
4144
4145# qhasm: v00 = r3 & mask0
4146# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
4147# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
4148vpand %xmm0,%xmm9,%xmm12
4149
4150# qhasm: v10 = r7 & mask0
4151# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
4152# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
4153vpand %xmm0,%xmm13,%xmm15
4154
4155# qhasm: 2x v10 <<= 4
4156# asm 1: psllq $4,<v10=reg128#16
4157# asm 2: psllq $4,<v10=%xmm15
4158psllq $4,%xmm15
4159
4160# qhasm: v01 = r3 & mask1
4161# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
4162# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
4163vpand %xmm1,%xmm9,%xmm9
4164
4165# qhasm: v11 = r7 & mask1
4166# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
4167# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
4168vpand %xmm1,%xmm13,%xmm13
4169
4170# qhasm: 2x v01 unsigned>>= 4
4171# asm 1: psrlq $4,<v01=reg128#10
4172# asm 2: psrlq $4,<v01=%xmm9
4173psrlq $4,%xmm9
4174
4175# qhasm: r3 = v00 | v10
4176# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
4177# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
4178vpor %xmm15,%xmm12,%xmm12
4179
4180# qhasm: r7 = v01 | v11
4181# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
4182# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
4183vpor %xmm13,%xmm9,%xmm9
4184
4185# qhasm: v00 = r0 & mask2
4186# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
4187# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
4188vpand %xmm2,%xmm14,%xmm13
4189
4190# qhasm: v10 = r2 & mask2
4191# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
4192# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
4193vpand %xmm2,%xmm11,%xmm15
4194
4195# qhasm: 2x v10 <<= 2
4196# asm 1: psllq $2,<v10=reg128#16
4197# asm 2: psllq $2,<v10=%xmm15
4198psllq $2,%xmm15
4199
4200# qhasm: v01 = r0 & mask3
4201# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
4202# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
4203vpand %xmm3,%xmm14,%xmm14
4204
4205# qhasm: v11 = r2 & mask3
4206# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
4207# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
4208vpand %xmm3,%xmm11,%xmm11
4209
4210# qhasm: 2x v01 unsigned>>= 2
4211# asm 1: psrlq $2,<v01=reg128#15
4212# asm 2: psrlq $2,<v01=%xmm14
4213psrlq $2,%xmm14
4214
4215# qhasm: r0 = v00 | v10
4216# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
4217# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
4218vpor %xmm15,%xmm13,%xmm13
4219
4220# qhasm: r2 = v01 | v11
4221# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
4222# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
4223vpor %xmm11,%xmm14,%xmm11
4224
4225# qhasm: v00 = r1 & mask2
4226# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
4227# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
4228vpand %xmm2,%xmm10,%xmm14
4229
4230# qhasm: v10 = r3 & mask2
4231# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
4232# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
4233vpand %xmm2,%xmm12,%xmm15
4234
4235# qhasm: 2x v10 <<= 2
4236# asm 1: psllq $2,<v10=reg128#16
4237# asm 2: psllq $2,<v10=%xmm15
4238psllq $2,%xmm15
4239
4240# qhasm: v01 = r1 & mask3
4241# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
4242# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
4243vpand %xmm3,%xmm10,%xmm10
4244
4245# qhasm: v11 = r3 & mask3
4246# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
4247# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
4248vpand %xmm3,%xmm12,%xmm12
4249
4250# qhasm: 2x v01 unsigned>>= 2
4251# asm 1: psrlq $2,<v01=reg128#11
4252# asm 2: psrlq $2,<v01=%xmm10
4253psrlq $2,%xmm10
4254
4255# qhasm: r1 = v00 | v10
4256# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
4257# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
4258vpor %xmm15,%xmm14,%xmm14
4259
4260# qhasm: r3 = v01 | v11
4261# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
4262# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
4263vpor %xmm12,%xmm10,%xmm10
4264
4265# qhasm: v00 = r4 & mask2
4266# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
4267# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
4268vpand %xmm2,%xmm6,%xmm12
4269
4270# qhasm: v10 = r6 & mask2
4271# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
4272# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
4273vpand %xmm2,%xmm8,%xmm15
4274
4275# qhasm: 2x v10 <<= 2
4276# asm 1: psllq $2,<v10=reg128#16
4277# asm 2: psllq $2,<v10=%xmm15
4278psllq $2,%xmm15
4279
4280# qhasm: v01 = r4 & mask3
4281# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
4282# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
4283vpand %xmm3,%xmm6,%xmm6
4284
4285# qhasm: v11 = r6 & mask3
4286# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
4287# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
4288vpand %xmm3,%xmm8,%xmm8
4289
4290# qhasm: 2x v01 unsigned>>= 2
4291# asm 1: psrlq $2,<v01=reg128#7
4292# asm 2: psrlq $2,<v01=%xmm6
4293psrlq $2,%xmm6
4294
4295# qhasm: r4 = v00 | v10
4296# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
4297# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
4298vpor %xmm15,%xmm12,%xmm12
4299
4300# qhasm: r6 = v01 | v11
4301# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
4302# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
4303vpor %xmm8,%xmm6,%xmm6
4304
4305# qhasm: v00 = r5 & mask2
4306# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
4307# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
4308vpand %xmm2,%xmm7,%xmm8
4309
4310# qhasm: v10 = r7 & mask2
4311# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
4312# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
4313vpand %xmm2,%xmm9,%xmm15
4314
4315# qhasm: 2x v10 <<= 2
4316# asm 1: psllq $2,<v10=reg128#16
4317# asm 2: psllq $2,<v10=%xmm15
4318psllq $2,%xmm15
4319
4320# qhasm: v01 = r5 & mask3
4321# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
4322# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
4323vpand %xmm3,%xmm7,%xmm7
4324
4325# qhasm: v11 = r7 & mask3
4326# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
4327# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
4328vpand %xmm3,%xmm9,%xmm9
4329
4330# qhasm: 2x v01 unsigned>>= 2
4331# asm 1: psrlq $2,<v01=reg128#8
4332# asm 2: psrlq $2,<v01=%xmm7
4333psrlq $2,%xmm7
4334
4335# qhasm: r5 = v00 | v10
4336# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
4337# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
4338vpor %xmm15,%xmm8,%xmm8
4339
4340# qhasm: r7 = v01 | v11
4341# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
4342# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
4343vpor %xmm9,%xmm7,%xmm7
4344
4345# qhasm: v00 = r0 & mask4
4346# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
4347# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
4348vpand %xmm4,%xmm13,%xmm9
4349
4350# qhasm: v10 = r1 & mask4
4351# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
4352# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
4353vpand %xmm4,%xmm14,%xmm15
4354
4355# qhasm: 2x v10 <<= 1
4356# asm 1: psllq $1,<v10=reg128#16
4357# asm 2: psllq $1,<v10=%xmm15
4358psllq $1,%xmm15
4359
4360# qhasm: v01 = r0 & mask5
4361# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
4362# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
4363vpand %xmm5,%xmm13,%xmm13
4364
4365# qhasm: v11 = r1 & mask5
4366# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
4367# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
4368vpand %xmm5,%xmm14,%xmm14
4369
4370# qhasm: 2x v01 unsigned>>= 1
4371# asm 1: psrlq $1,<v01=reg128#14
4372# asm 2: psrlq $1,<v01=%xmm13
4373psrlq $1,%xmm13
4374
4375# qhasm: r0 = v00 | v10
4376# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
4377# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
4378vpor %xmm15,%xmm9,%xmm9
4379
4380# qhasm: r1 = v01 | v11
4381# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
4382# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
4383vpor %xmm14,%xmm13,%xmm13
4384
4385# qhasm: v00 = r2 & mask4
4386# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
4387# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
4388vpand %xmm4,%xmm11,%xmm14
4389
4390# qhasm: v10 = r3 & mask4
4391# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
4392# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
4393vpand %xmm4,%xmm10,%xmm15
4394
4395# qhasm: 2x v10 <<= 1
4396# asm 1: psllq $1,<v10=reg128#16
4397# asm 2: psllq $1,<v10=%xmm15
4398psllq $1,%xmm15
4399
4400# qhasm: v01 = r2 & mask5
4401# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
4402# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
4403vpand %xmm5,%xmm11,%xmm11
4404
4405# qhasm: v11 = r3 & mask5
4406# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
4407# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
4408vpand %xmm5,%xmm10,%xmm10
4409
4410# qhasm: 2x v01 unsigned>>= 1
4411# asm 1: psrlq $1,<v01=reg128#12
4412# asm 2: psrlq $1,<v01=%xmm11
4413psrlq $1,%xmm11
4414
4415# qhasm: r2 = v00 | v10
4416# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
4417# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
4418vpor %xmm15,%xmm14,%xmm14
4419
4420# qhasm: r3 = v01 | v11
4421# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
4422# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
4423vpor %xmm10,%xmm11,%xmm10
4424
4425# qhasm: v00 = r4 & mask4
4426# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
4427# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
4428vpand %xmm4,%xmm12,%xmm11
4429
4430# qhasm: v10 = r5 & mask4
4431# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
4432# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
4433vpand %xmm4,%xmm8,%xmm15
4434
4435# qhasm: 2x v10 <<= 1
4436# asm 1: psllq $1,<v10=reg128#16
4437# asm 2: psllq $1,<v10=%xmm15
4438psllq $1,%xmm15
4439
4440# qhasm: v01 = r4 & mask5
4441# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
4442# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
4443vpand %xmm5,%xmm12,%xmm12
4444
4445# qhasm: v11 = r5 & mask5
4446# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
4447# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
4448vpand %xmm5,%xmm8,%xmm8
4449
4450# qhasm: 2x v01 unsigned>>= 1
4451# asm 1: psrlq $1,<v01=reg128#13
4452# asm 2: psrlq $1,<v01=%xmm12
4453psrlq $1,%xmm12
4454
4455# qhasm: r4 = v00 | v10
4456# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
4457# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
4458vpor %xmm15,%xmm11,%xmm11
4459
4460# qhasm: r5 = v01 | v11
4461# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
4462# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
4463vpor %xmm8,%xmm12,%xmm8
4464
4465# qhasm: v00 = r6 & mask4
4466# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
4467# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
4468vpand %xmm4,%xmm6,%xmm12
4469
4470# qhasm: v10 = r7 & mask4
4471# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
4472# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
4473vpand %xmm4,%xmm7,%xmm15
4474
4475# qhasm: 2x v10 <<= 1
4476# asm 1: psllq $1,<v10=reg128#16
4477# asm 2: psllq $1,<v10=%xmm15
4478psllq $1,%xmm15
4479
4480# qhasm: v01 = r6 & mask5
4481# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
4482# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
4483vpand %xmm5,%xmm6,%xmm6
4484
4485# qhasm: v11 = r7 & mask5
4486# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
4487# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
4488vpand %xmm5,%xmm7,%xmm7
4489
4490# qhasm: 2x v01 unsigned>>= 1
4491# asm 1: psrlq $1,<v01=reg128#7
4492# asm 2: psrlq $1,<v01=%xmm6
4493psrlq $1,%xmm6
4494
4495# qhasm: r6 = v00 | v10
4496# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
4497# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
4498vpor %xmm15,%xmm12,%xmm12
4499
4500# qhasm: r7 = v01 | v11
4501# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
4502# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
4503vpor %xmm7,%xmm6,%xmm6
4504
4505# qhasm: t0 = r0[0]r1[0]
4506# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
4507# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
4508vpunpcklqdq %xmm13,%xmm9,%xmm7
4509
4510# qhasm: mem128[ input_0 + 0 ] = t0
4511# asm 1: movdqu   <t0=reg128#8,0(<input_0=int64#1)
4512# asm 2: movdqu   <t0=%xmm7,0(<input_0=%rdi)
4513movdqu   %xmm7,0(%rdi)
4514
4515# qhasm: t0 = r2[0]r3[0]
4516# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
4517# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
4518vpunpcklqdq %xmm10,%xmm14,%xmm7
4519
4520# qhasm: mem128[ input_0 + 16 ] = t0
4521# asm 1: movdqu   <t0=reg128#8,16(<input_0=int64#1)
4522# asm 2: movdqu   <t0=%xmm7,16(<input_0=%rdi)
4523movdqu   %xmm7,16(%rdi)
4524
4525# qhasm: t0 = r4[0]r5[0]
4526# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
4527# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
4528vpunpcklqdq %xmm8,%xmm11,%xmm7
4529
4530# qhasm: mem128[ input_0 + 32 ] = t0
4531# asm 1: movdqu   <t0=reg128#8,32(<input_0=int64#1)
4532# asm 2: movdqu   <t0=%xmm7,32(<input_0=%rdi)
4533movdqu   %xmm7,32(%rdi)
4534
4535# qhasm: t0 = r6[0]r7[0]
4536# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
4537# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
4538vpunpcklqdq %xmm6,%xmm12,%xmm6
4539
4540# qhasm: mem128[ input_0 + 48 ] = t0
4541# asm 1: movdqu   <t0=reg128#7,48(<input_0=int64#1)
4542# asm 2: movdqu   <t0=%xmm6,48(<input_0=%rdi)
4543movdqu   %xmm6,48(%rdi)
4544
4545# qhasm: r0 = mem64[ input_0 + 64 ] x2
4546# asm 1: movddup 64(<input_0=int64#1),>r0=reg128#7
4547# asm 2: movddup 64(<input_0=%rdi),>r0=%xmm6
4548movddup 64(%rdi),%xmm6
4549
4550# qhasm: r1 = mem64[ input_0 + 72 ] x2
4551# asm 1: movddup 72(<input_0=int64#1),>r1=reg128#8
4552# asm 2: movddup 72(<input_0=%rdi),>r1=%xmm7
4553movddup 72(%rdi),%xmm7
4554
4555# qhasm: r2 = mem64[ input_0 + 80 ] x2
4556# asm 1: movddup 80(<input_0=int64#1),>r2=reg128#9
4557# asm 2: movddup 80(<input_0=%rdi),>r2=%xmm8
4558movddup 80(%rdi),%xmm8
4559
4560# qhasm: r3 = mem64[ input_0 + 88 ] x2
4561# asm 1: movddup 88(<input_0=int64#1),>r3=reg128#10
4562# asm 2: movddup 88(<input_0=%rdi),>r3=%xmm9
4563movddup 88(%rdi),%xmm9
4564
4565# qhasm: r4 = mem64[ input_0 + 96 ] x2
4566# asm 1: movddup 96(<input_0=int64#1),>r4=reg128#11
4567# asm 2: movddup 96(<input_0=%rdi),>r4=%xmm10
4568movddup 96(%rdi),%xmm10
4569
4570# qhasm: r5 = mem64[ input_0 + 104 ] x2
4571# asm 1: movddup 104(<input_0=int64#1),>r5=reg128#12
4572# asm 2: movddup 104(<input_0=%rdi),>r5=%xmm11
4573movddup 104(%rdi),%xmm11
4574
4575# qhasm: r6 = mem64[ input_0 + 112 ] x2
4576# asm 1: movddup 112(<input_0=int64#1),>r6=reg128#13
4577# asm 2: movddup 112(<input_0=%rdi),>r6=%xmm12
4578movddup 112(%rdi),%xmm12
4579
4580# qhasm: r7 = mem64[ input_0 + 120 ] x2
4581# asm 1: movddup 120(<input_0=int64#1),>r7=reg128#14
4582# asm 2: movddup 120(<input_0=%rdi),>r7=%xmm13
4583movddup 120(%rdi),%xmm13
4584
4585# qhasm: v00 = r0 & mask0
4586# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
4587# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
4588vpand %xmm0,%xmm6,%xmm14
4589
4590# qhasm: v10 = r4 & mask0
4591# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
4592# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
4593vpand %xmm0,%xmm10,%xmm15
4594
4595# qhasm: 2x v10 <<= 4
4596# asm 1: psllq $4,<v10=reg128#16
4597# asm 2: psllq $4,<v10=%xmm15
4598psllq $4,%xmm15
4599
4600# qhasm: v01 = r0 & mask1
4601# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
4602# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
4603vpand %xmm1,%xmm6,%xmm6
4604
4605# qhasm: v11 = r4 & mask1
4606# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
4607# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
4608vpand %xmm1,%xmm10,%xmm10
4609
4610# qhasm: 2x v01 unsigned>>= 4
4611# asm 1: psrlq $4,<v01=reg128#7
4612# asm 2: psrlq $4,<v01=%xmm6
4613psrlq $4,%xmm6
4614
4615# qhasm: r0 = v00 | v10
4616# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
4617# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
4618vpor %xmm15,%xmm14,%xmm14
4619
4620# qhasm: r4 = v01 | v11
4621# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
4622# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
4623vpor %xmm10,%xmm6,%xmm6
4624
4625# qhasm: v00 = r1 & mask0
4626# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
4627# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
4628vpand %xmm0,%xmm7,%xmm10
4629
4630# qhasm: v10 = r5 & mask0
4631# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
4632# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
4633vpand %xmm0,%xmm11,%xmm15
4634
4635# qhasm: 2x v10 <<= 4
4636# asm 1: psllq $4,<v10=reg128#16
4637# asm 2: psllq $4,<v10=%xmm15
4638psllq $4,%xmm15
4639
4640# qhasm: v01 = r1 & mask1
4641# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
4642# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
4643vpand %xmm1,%xmm7,%xmm7
4644
4645# qhasm: v11 = r5 & mask1
4646# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
4647# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
4648vpand %xmm1,%xmm11,%xmm11
4649
4650# qhasm: 2x v01 unsigned>>= 4
4651# asm 1: psrlq $4,<v01=reg128#8
4652# asm 2: psrlq $4,<v01=%xmm7
4653psrlq $4,%xmm7
4654
4655# qhasm: r1 = v00 | v10
4656# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
4657# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
4658vpor %xmm15,%xmm10,%xmm10
4659
4660# qhasm: r5 = v01 | v11
4661# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
4662# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
4663vpor %xmm11,%xmm7,%xmm7
4664
4665# qhasm: v00 = r2 & mask0
4666# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
4667# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
4668vpand %xmm0,%xmm8,%xmm11
4669
4670# qhasm: v10 = r6 & mask0
4671# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
4672# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
4673vpand %xmm0,%xmm12,%xmm15
4674
4675# qhasm: 2x v10 <<= 4
4676# asm 1: psllq $4,<v10=reg128#16
4677# asm 2: psllq $4,<v10=%xmm15
4678psllq $4,%xmm15
4679
4680# qhasm: v01 = r2 & mask1
4681# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
4682# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
4683vpand %xmm1,%xmm8,%xmm8
4684
4685# qhasm: v11 = r6 & mask1
4686# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
4687# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
4688vpand %xmm1,%xmm12,%xmm12
4689
4690# qhasm: 2x v01 unsigned>>= 4
4691# asm 1: psrlq $4,<v01=reg128#9
4692# asm 2: psrlq $4,<v01=%xmm8
4693psrlq $4,%xmm8
4694
4695# qhasm: r2 = v00 | v10
4696# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
4697# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
4698vpor %xmm15,%xmm11,%xmm11
4699
4700# qhasm: r6 = v01 | v11
4701# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
4702# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
4703vpor %xmm12,%xmm8,%xmm8
4704
4705# qhasm: v00 = r3 & mask0
4706# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
4707# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
4708vpand %xmm0,%xmm9,%xmm12
4709
4710# qhasm: v10 = r7 & mask0
4711# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
4712# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
4713vpand %xmm0,%xmm13,%xmm15
4714
4715# qhasm: 2x v10 <<= 4
4716# asm 1: psllq $4,<v10=reg128#16
4717# asm 2: psllq $4,<v10=%xmm15
4718psllq $4,%xmm15
4719
4720# qhasm: v01 = r3 & mask1
4721# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
4722# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
4723vpand %xmm1,%xmm9,%xmm9
4724
4725# qhasm: v11 = r7 & mask1
4726# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
4727# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
4728vpand %xmm1,%xmm13,%xmm13
4729
4730# qhasm: 2x v01 unsigned>>= 4
4731# asm 1: psrlq $4,<v01=reg128#10
4732# asm 2: psrlq $4,<v01=%xmm9
4733psrlq $4,%xmm9
4734
4735# qhasm: r3 = v00 | v10
4736# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
4737# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
4738vpor %xmm15,%xmm12,%xmm12
4739
4740# qhasm: r7 = v01 | v11
4741# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
4742# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
4743vpor %xmm13,%xmm9,%xmm9
4744
4745# qhasm: v00 = r0 & mask2
4746# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
4747# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
4748vpand %xmm2,%xmm14,%xmm13
4749
4750# qhasm: v10 = r2 & mask2
4751# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
4752# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
4753vpand %xmm2,%xmm11,%xmm15
4754
4755# qhasm: 2x v10 <<= 2
4756# asm 1: psllq $2,<v10=reg128#16
4757# asm 2: psllq $2,<v10=%xmm15
4758psllq $2,%xmm15
4759
4760# qhasm: v01 = r0 & mask3
4761# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
4762# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
4763vpand %xmm3,%xmm14,%xmm14
4764
4765# qhasm: v11 = r2 & mask3
4766# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
4767# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
4768vpand %xmm3,%xmm11,%xmm11
4769
4770# qhasm: 2x v01 unsigned>>= 2
4771# asm 1: psrlq $2,<v01=reg128#15
4772# asm 2: psrlq $2,<v01=%xmm14
4773psrlq $2,%xmm14
4774
4775# qhasm: r0 = v00 | v10
4776# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
4777# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
4778vpor %xmm15,%xmm13,%xmm13
4779
4780# qhasm: r2 = v01 | v11
4781# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
4782# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
4783vpor %xmm11,%xmm14,%xmm11
4784
4785# qhasm: v00 = r1 & mask2
4786# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
4787# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
4788vpand %xmm2,%xmm10,%xmm14
4789
4790# qhasm: v10 = r3 & mask2
4791# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
4792# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
4793vpand %xmm2,%xmm12,%xmm15
4794
4795# qhasm: 2x v10 <<= 2
4796# asm 1: psllq $2,<v10=reg128#16
4797# asm 2: psllq $2,<v10=%xmm15
4798psllq $2,%xmm15
4799
4800# qhasm: v01 = r1 & mask3
4801# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
4802# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
4803vpand %xmm3,%xmm10,%xmm10
4804
4805# qhasm: v11 = r3 & mask3
4806# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
4807# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
4808vpand %xmm3,%xmm12,%xmm12
4809
4810# qhasm: 2x v01 unsigned>>= 2
4811# asm 1: psrlq $2,<v01=reg128#11
4812# asm 2: psrlq $2,<v01=%xmm10
4813psrlq $2,%xmm10
4814
4815# qhasm: r1 = v00 | v10
4816# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
4817# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
4818vpor %xmm15,%xmm14,%xmm14
4819
4820# qhasm: r3 = v01 | v11
4821# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
4822# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
4823vpor %xmm12,%xmm10,%xmm10
4824
4825# qhasm: v00 = r4 & mask2
4826# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
4827# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
4828vpand %xmm2,%xmm6,%xmm12
4829
4830# qhasm: v10 = r6 & mask2
4831# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
4832# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
4833vpand %xmm2,%xmm8,%xmm15
4834
4835# qhasm: 2x v10 <<= 2
4836# asm 1: psllq $2,<v10=reg128#16
4837# asm 2: psllq $2,<v10=%xmm15
4838psllq $2,%xmm15
4839
4840# qhasm: v01 = r4 & mask3
4841# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
4842# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
4843vpand %xmm3,%xmm6,%xmm6
4844
4845# qhasm: v11 = r6 & mask3
4846# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
4847# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
4848vpand %xmm3,%xmm8,%xmm8
4849
4850# qhasm: 2x v01 unsigned>>= 2
4851# asm 1: psrlq $2,<v01=reg128#7
4852# asm 2: psrlq $2,<v01=%xmm6
4853psrlq $2,%xmm6
4854
4855# qhasm: r4 = v00 | v10
4856# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
4857# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
4858vpor %xmm15,%xmm12,%xmm12
4859
4860# qhasm: r6 = v01 | v11
4861# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
4862# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
4863vpor %xmm8,%xmm6,%xmm6
4864
4865# qhasm: v00 = r5 & mask2
4866# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
4867# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
4868vpand %xmm2,%xmm7,%xmm8
4869
4870# qhasm: v10 = r7 & mask2
4871# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
4872# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
4873vpand %xmm2,%xmm9,%xmm15
4874
4875# qhasm: 2x v10 <<= 2
4876# asm 1: psllq $2,<v10=reg128#16
4877# asm 2: psllq $2,<v10=%xmm15
4878psllq $2,%xmm15
4879
4880# qhasm: v01 = r5 & mask3
4881# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
4882# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
4883vpand %xmm3,%xmm7,%xmm7
4884
4885# qhasm: v11 = r7 & mask3
4886# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
4887# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
4888vpand %xmm3,%xmm9,%xmm9
4889
4890# qhasm: 2x v01 unsigned>>= 2
4891# asm 1: psrlq $2,<v01=reg128#8
4892# asm 2: psrlq $2,<v01=%xmm7
4893psrlq $2,%xmm7
4894
4895# qhasm: r5 = v00 | v10
4896# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
4897# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
4898vpor %xmm15,%xmm8,%xmm8
4899
4900# qhasm: r7 = v01 | v11
4901# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
4902# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
4903vpor %xmm9,%xmm7,%xmm7
4904
4905# qhasm: v00 = r0 & mask4
4906# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
4907# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
4908vpand %xmm4,%xmm13,%xmm9
4909
4910# qhasm: v10 = r1 & mask4
4911# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
4912# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
4913vpand %xmm4,%xmm14,%xmm15
4914
4915# qhasm: 2x v10 <<= 1
4916# asm 1: psllq $1,<v10=reg128#16
4917# asm 2: psllq $1,<v10=%xmm15
4918psllq $1,%xmm15
4919
4920# qhasm: v01 = r0 & mask5
4921# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
4922# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
4923vpand %xmm5,%xmm13,%xmm13
4924
4925# qhasm: v11 = r1 & mask5
4926# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
4927# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
4928vpand %xmm5,%xmm14,%xmm14
4929
4930# qhasm: 2x v01 unsigned>>= 1
4931# asm 1: psrlq $1,<v01=reg128#14
4932# asm 2: psrlq $1,<v01=%xmm13
4933psrlq $1,%xmm13
4934
4935# qhasm: r0 = v00 | v10
4936# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
4937# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
4938vpor %xmm15,%xmm9,%xmm9
4939
4940# qhasm: r1 = v01 | v11
4941# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
4942# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
4943vpor %xmm14,%xmm13,%xmm13
4944
4945# qhasm: v00 = r2 & mask4
4946# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
4947# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
4948vpand %xmm4,%xmm11,%xmm14
4949
4950# qhasm: v10 = r3 & mask4
4951# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
4952# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
4953vpand %xmm4,%xmm10,%xmm15
4954
4955# qhasm: 2x v10 <<= 1
4956# asm 1: psllq $1,<v10=reg128#16
4957# asm 2: psllq $1,<v10=%xmm15
4958psllq $1,%xmm15
4959
4960# qhasm: v01 = r2 & mask5
4961# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
4962# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
4963vpand %xmm5,%xmm11,%xmm11
4964
4965# qhasm: v11 = r3 & mask5
4966# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
4967# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
4968vpand %xmm5,%xmm10,%xmm10
4969
4970# qhasm: 2x v01 unsigned>>= 1
4971# asm 1: psrlq $1,<v01=reg128#12
4972# asm 2: psrlq $1,<v01=%xmm11
4973psrlq $1,%xmm11
4974
4975# qhasm: r2 = v00 | v10
4976# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
4977# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
4978vpor %xmm15,%xmm14,%xmm14
4979
4980# qhasm: r3 = v01 | v11
4981# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
4982# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
4983vpor %xmm10,%xmm11,%xmm10
4984
4985# qhasm: v00 = r4 & mask4
4986# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
4987# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
4988vpand %xmm4,%xmm12,%xmm11
4989
4990# qhasm: v10 = r5 & mask4
4991# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
4992# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
4993vpand %xmm4,%xmm8,%xmm15
4994
4995# qhasm: 2x v10 <<= 1
4996# asm 1: psllq $1,<v10=reg128#16
4997# asm 2: psllq $1,<v10=%xmm15
4998psllq $1,%xmm15
4999
5000# qhasm: v01 = r4 & mask5
5001# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
5002# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
5003vpand %xmm5,%xmm12,%xmm12
5004
5005# qhasm: v11 = r5 & mask5
5006# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
5007# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
5008vpand %xmm5,%xmm8,%xmm8
5009
5010# qhasm: 2x v01 unsigned>>= 1
5011# asm 1: psrlq $1,<v01=reg128#13
5012# asm 2: psrlq $1,<v01=%xmm12
5013psrlq $1,%xmm12
5014
5015# qhasm: r4 = v00 | v10
5016# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
5017# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
5018vpor %xmm15,%xmm11,%xmm11
5019
5020# qhasm: r5 = v01 | v11
5021# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
5022# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
5023vpor %xmm8,%xmm12,%xmm8
5024
5025# qhasm: v00 = r6 & mask4
5026# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
5027# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
5028vpand %xmm4,%xmm6,%xmm12
5029
5030# qhasm: v10 = r7 & mask4
5031# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
5032# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
5033vpand %xmm4,%xmm7,%xmm15
5034
5035# qhasm: 2x v10 <<= 1
5036# asm 1: psllq $1,<v10=reg128#16
5037# asm 2: psllq $1,<v10=%xmm15
5038psllq $1,%xmm15
5039
5040# qhasm: v01 = r6 & mask5
5041# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
5042# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
5043vpand %xmm5,%xmm6,%xmm6
5044
5045# qhasm: v11 = r7 & mask5
5046# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
5047# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
5048vpand %xmm5,%xmm7,%xmm7
5049
5050# qhasm: 2x v01 unsigned>>= 1
5051# asm 1: psrlq $1,<v01=reg128#7
5052# asm 2: psrlq $1,<v01=%xmm6
5053psrlq $1,%xmm6
5054
5055# qhasm: r6 = v00 | v10
5056# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
5057# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
5058vpor %xmm15,%xmm12,%xmm12
5059
5060# qhasm: r7 = v01 | v11
5061# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
5062# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
5063vpor %xmm7,%xmm6,%xmm6
5064
5065# qhasm: t0 = r0[0]r1[0]
5066# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
5067# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
5068vpunpcklqdq %xmm13,%xmm9,%xmm7
5069
5070# qhasm: mem128[ input_0 + 64 ] = t0
5071# asm 1: movdqu   <t0=reg128#8,64(<input_0=int64#1)
5072# asm 2: movdqu   <t0=%xmm7,64(<input_0=%rdi)
5073movdqu   %xmm7,64(%rdi)
5074
5075# qhasm: t0 = r2[0]r3[0]
5076# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
5077# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
5078vpunpcklqdq %xmm10,%xmm14,%xmm7
5079
5080# qhasm: mem128[ input_0 + 80 ] = t0
5081# asm 1: movdqu   <t0=reg128#8,80(<input_0=int64#1)
5082# asm 2: movdqu   <t0=%xmm7,80(<input_0=%rdi)
5083movdqu   %xmm7,80(%rdi)
5084
5085# qhasm: t0 = r4[0]r5[0]
5086# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
5087# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
5088vpunpcklqdq %xmm8,%xmm11,%xmm7
5089
5090# qhasm: mem128[ input_0 + 96 ] = t0
5091# asm 1: movdqu   <t0=reg128#8,96(<input_0=int64#1)
5092# asm 2: movdqu   <t0=%xmm7,96(<input_0=%rdi)
5093movdqu   %xmm7,96(%rdi)
5094
5095# qhasm: t0 = r6[0]r7[0]
5096# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
5097# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
5098vpunpcklqdq %xmm6,%xmm12,%xmm6
5099
5100# qhasm: mem128[ input_0 + 112 ] = t0
5101# asm 1: movdqu   <t0=reg128#7,112(<input_0=int64#1)
5102# asm 2: movdqu   <t0=%xmm6,112(<input_0=%rdi)
5103movdqu   %xmm6,112(%rdi)
5104
5105# qhasm: r0 = mem64[ input_0 + 128 ] x2
5106# asm 1: movddup 128(<input_0=int64#1),>r0=reg128#7
5107# asm 2: movddup 128(<input_0=%rdi),>r0=%xmm6
5108movddup 128(%rdi),%xmm6
5109
5110# qhasm: r1 = mem64[ input_0 + 136 ] x2
5111# asm 1: movddup 136(<input_0=int64#1),>r1=reg128#8
5112# asm 2: movddup 136(<input_0=%rdi),>r1=%xmm7
5113movddup 136(%rdi),%xmm7
5114
5115# qhasm: r2 = mem64[ input_0 + 144 ] x2
5116# asm 1: movddup 144(<input_0=int64#1),>r2=reg128#9
5117# asm 2: movddup 144(<input_0=%rdi),>r2=%xmm8
5118movddup 144(%rdi),%xmm8
5119
5120# qhasm: r3 = mem64[ input_0 + 152 ] x2
5121# asm 1: movddup 152(<input_0=int64#1),>r3=reg128#10
5122# asm 2: movddup 152(<input_0=%rdi),>r3=%xmm9
5123movddup 152(%rdi),%xmm9
5124
5125# qhasm: r4 = mem64[ input_0 + 160 ] x2
5126# asm 1: movddup 160(<input_0=int64#1),>r4=reg128#11
5127# asm 2: movddup 160(<input_0=%rdi),>r4=%xmm10
5128movddup 160(%rdi),%xmm10
5129
5130# qhasm: r5 = mem64[ input_0 + 168 ] x2
5131# asm 1: movddup 168(<input_0=int64#1),>r5=reg128#12
5132# asm 2: movddup 168(<input_0=%rdi),>r5=%xmm11
5133movddup 168(%rdi),%xmm11
5134
5135# qhasm: r6 = mem64[ input_0 + 176 ] x2
5136# asm 1: movddup 176(<input_0=int64#1),>r6=reg128#13
5137# asm 2: movddup 176(<input_0=%rdi),>r6=%xmm12
5138movddup 176(%rdi),%xmm12
5139
5140# qhasm: r7 = mem64[ input_0 + 184 ] x2
5141# asm 1: movddup 184(<input_0=int64#1),>r7=reg128#14
5142# asm 2: movddup 184(<input_0=%rdi),>r7=%xmm13
5143movddup 184(%rdi),%xmm13
5144
5145# qhasm: v00 = r0 & mask0
5146# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
5147# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
5148vpand %xmm0,%xmm6,%xmm14
5149
5150# qhasm: v10 = r4 & mask0
5151# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
5152# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
5153vpand %xmm0,%xmm10,%xmm15
5154
5155# qhasm: 2x v10 <<= 4
5156# asm 1: psllq $4,<v10=reg128#16
5157# asm 2: psllq $4,<v10=%xmm15
5158psllq $4,%xmm15
5159
5160# qhasm: v01 = r0 & mask1
5161# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
5162# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
5163vpand %xmm1,%xmm6,%xmm6
5164
5165# qhasm: v11 = r4 & mask1
5166# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
5167# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
5168vpand %xmm1,%xmm10,%xmm10
5169
5170# qhasm: 2x v01 unsigned>>= 4
5171# asm 1: psrlq $4,<v01=reg128#7
5172# asm 2: psrlq $4,<v01=%xmm6
5173psrlq $4,%xmm6
5174
5175# qhasm: r0 = v00 | v10
5176# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
5177# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
5178vpor %xmm15,%xmm14,%xmm14
5179
5180# qhasm: r4 = v01 | v11
5181# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
5182# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
5183vpor %xmm10,%xmm6,%xmm6
5184
5185# qhasm: v00 = r1 & mask0
5186# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
5187# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
5188vpand %xmm0,%xmm7,%xmm10
5189
5190# qhasm: v10 = r5 & mask0
5191# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
5192# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
5193vpand %xmm0,%xmm11,%xmm15
5194
5195# qhasm: 2x v10 <<= 4
5196# asm 1: psllq $4,<v10=reg128#16
5197# asm 2: psllq $4,<v10=%xmm15
5198psllq $4,%xmm15
5199
5200# qhasm: v01 = r1 & mask1
5201# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
5202# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
5203vpand %xmm1,%xmm7,%xmm7
5204
5205# qhasm: v11 = r5 & mask1
5206# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
5207# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
5208vpand %xmm1,%xmm11,%xmm11
5209
5210# qhasm: 2x v01 unsigned>>= 4
5211# asm 1: psrlq $4,<v01=reg128#8
5212# asm 2: psrlq $4,<v01=%xmm7
5213psrlq $4,%xmm7
5214
5215# qhasm: r1 = v00 | v10
5216# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
5217# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
5218vpor %xmm15,%xmm10,%xmm10
5219
5220# qhasm: r5 = v01 | v11
5221# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
5222# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
5223vpor %xmm11,%xmm7,%xmm7
5224
5225# qhasm: v00 = r2 & mask0
5226# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
5227# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
5228vpand %xmm0,%xmm8,%xmm11
5229
5230# qhasm: v10 = r6 & mask0
5231# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
5232# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
5233vpand %xmm0,%xmm12,%xmm15
5234
5235# qhasm: 2x v10 <<= 4
5236# asm 1: psllq $4,<v10=reg128#16
5237# asm 2: psllq $4,<v10=%xmm15
5238psllq $4,%xmm15
5239
5240# qhasm: v01 = r2 & mask1
5241# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
5242# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
5243vpand %xmm1,%xmm8,%xmm8
5244
5245# qhasm: v11 = r6 & mask1
5246# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
5247# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
5248vpand %xmm1,%xmm12,%xmm12
5249
5250# qhasm: 2x v01 unsigned>>= 4
5251# asm 1: psrlq $4,<v01=reg128#9
5252# asm 2: psrlq $4,<v01=%xmm8
5253psrlq $4,%xmm8
5254
5255# qhasm: r2 = v00 | v10
5256# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
5257# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
5258vpor %xmm15,%xmm11,%xmm11
5259
5260# qhasm: r6 = v01 | v11
5261# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
5262# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
5263vpor %xmm12,%xmm8,%xmm8
5264
5265# qhasm: v00 = r3 & mask0
5266# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
5267# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
5268vpand %xmm0,%xmm9,%xmm12
5269
5270# qhasm: v10 = r7 & mask0
5271# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
5272# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
5273vpand %xmm0,%xmm13,%xmm15
5274
5275# qhasm: 2x v10 <<= 4
5276# asm 1: psllq $4,<v10=reg128#16
5277# asm 2: psllq $4,<v10=%xmm15
5278psllq $4,%xmm15
5279
5280# qhasm: v01 = r3 & mask1
5281# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
5282# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
5283vpand %xmm1,%xmm9,%xmm9
5284
5285# qhasm: v11 = r7 & mask1
5286# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
5287# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
5288vpand %xmm1,%xmm13,%xmm13
5289
5290# qhasm: 2x v01 unsigned>>= 4
5291# asm 1: psrlq $4,<v01=reg128#10
5292# asm 2: psrlq $4,<v01=%xmm9
5293psrlq $4,%xmm9
5294
5295# qhasm: r3 = v00 | v10
5296# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
5297# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
5298vpor %xmm15,%xmm12,%xmm12
5299
5300# qhasm: r7 = v01 | v11
5301# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
5302# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
5303vpor %xmm13,%xmm9,%xmm9
5304
5305# qhasm: v00 = r0 & mask2
5306# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
5307# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
5308vpand %xmm2,%xmm14,%xmm13
5309
5310# qhasm: v10 = r2 & mask2
5311# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
5312# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
5313vpand %xmm2,%xmm11,%xmm15
5314
5315# qhasm: 2x v10 <<= 2
5316# asm 1: psllq $2,<v10=reg128#16
5317# asm 2: psllq $2,<v10=%xmm15
5318psllq $2,%xmm15
5319
5320# qhasm: v01 = r0 & mask3
5321# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
5322# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
5323vpand %xmm3,%xmm14,%xmm14
5324
5325# qhasm: v11 = r2 & mask3
5326# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
5327# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
5328vpand %xmm3,%xmm11,%xmm11
5329
5330# qhasm: 2x v01 unsigned>>= 2
5331# asm 1: psrlq $2,<v01=reg128#15
5332# asm 2: psrlq $2,<v01=%xmm14
5333psrlq $2,%xmm14
5334
5335# qhasm: r0 = v00 | v10
5336# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
5337# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
5338vpor %xmm15,%xmm13,%xmm13
5339
5340# qhasm: r2 = v01 | v11
5341# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
5342# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
5343vpor %xmm11,%xmm14,%xmm11
5344
5345# qhasm: v00 = r1 & mask2
5346# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
5347# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
5348vpand %xmm2,%xmm10,%xmm14
5349
5350# qhasm: v10 = r3 & mask2
5351# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
5352# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
5353vpand %xmm2,%xmm12,%xmm15
5354
5355# qhasm: 2x v10 <<= 2
5356# asm 1: psllq $2,<v10=reg128#16
5357# asm 2: psllq $2,<v10=%xmm15
5358psllq $2,%xmm15
5359
5360# qhasm: v01 = r1 & mask3
5361# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
5362# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
5363vpand %xmm3,%xmm10,%xmm10
5364
5365# qhasm: v11 = r3 & mask3
5366# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
5367# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
5368vpand %xmm3,%xmm12,%xmm12
5369
5370# qhasm: 2x v01 unsigned>>= 2
5371# asm 1: psrlq $2,<v01=reg128#11
5372# asm 2: psrlq $2,<v01=%xmm10
5373psrlq $2,%xmm10
5374
5375# qhasm: r1 = v00 | v10
5376# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
5377# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
5378vpor %xmm15,%xmm14,%xmm14
5379
5380# qhasm: r3 = v01 | v11
5381# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
5382# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
5383vpor %xmm12,%xmm10,%xmm10
5384
5385# qhasm: v00 = r4 & mask2
5386# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
5387# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
5388vpand %xmm2,%xmm6,%xmm12
5389
5390# qhasm: v10 = r6 & mask2
5391# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
5392# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
5393vpand %xmm2,%xmm8,%xmm15
5394
5395# qhasm: 2x v10 <<= 2
5396# asm 1: psllq $2,<v10=reg128#16
5397# asm 2: psllq $2,<v10=%xmm15
5398psllq $2,%xmm15
5399
5400# qhasm: v01 = r4 & mask3
5401# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
5402# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
5403vpand %xmm3,%xmm6,%xmm6
5404
5405# qhasm: v11 = r6 & mask3
5406# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
5407# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
5408vpand %xmm3,%xmm8,%xmm8
5409
5410# qhasm: 2x v01 unsigned>>= 2
5411# asm 1: psrlq $2,<v01=reg128#7
5412# asm 2: psrlq $2,<v01=%xmm6
5413psrlq $2,%xmm6
5414
5415# qhasm: r4 = v00 | v10
5416# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
5417# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
5418vpor %xmm15,%xmm12,%xmm12
5419
5420# qhasm: r6 = v01 | v11
5421# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
5422# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
5423vpor %xmm8,%xmm6,%xmm6
5424
5425# qhasm: v00 = r5 & mask2
5426# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
5427# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
5428vpand %xmm2,%xmm7,%xmm8
5429
5430# qhasm: v10 = r7 & mask2
5431# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
5432# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
5433vpand %xmm2,%xmm9,%xmm15
5434
5435# qhasm: 2x v10 <<= 2
5436# asm 1: psllq $2,<v10=reg128#16
5437# asm 2: psllq $2,<v10=%xmm15
5438psllq $2,%xmm15
5439
5440# qhasm: v01 = r5 & mask3
5441# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
5442# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
5443vpand %xmm3,%xmm7,%xmm7
5444
5445# qhasm: v11 = r7 & mask3
5446# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
5447# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
5448vpand %xmm3,%xmm9,%xmm9
5449
5450# qhasm: 2x v01 unsigned>>= 2
5451# asm 1: psrlq $2,<v01=reg128#8
5452# asm 2: psrlq $2,<v01=%xmm7
5453psrlq $2,%xmm7
5454
5455# qhasm: r5 = v00 | v10
5456# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
5457# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
5458vpor %xmm15,%xmm8,%xmm8
5459
5460# qhasm: r7 = v01 | v11
5461# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
5462# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
5463vpor %xmm9,%xmm7,%xmm7
5464
5465# qhasm: v00 = r0 & mask4
5466# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
5467# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
5468vpand %xmm4,%xmm13,%xmm9
5469
5470# qhasm: v10 = r1 & mask4
5471# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
5472# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
5473vpand %xmm4,%xmm14,%xmm15
5474
5475# qhasm: 2x v10 <<= 1
5476# asm 1: psllq $1,<v10=reg128#16
5477# asm 2: psllq $1,<v10=%xmm15
5478psllq $1,%xmm15
5479
5480# qhasm: v01 = r0 & mask5
5481# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
5482# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
5483vpand %xmm5,%xmm13,%xmm13
5484
5485# qhasm: v11 = r1 & mask5
5486# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
5487# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
5488vpand %xmm5,%xmm14,%xmm14
5489
5490# qhasm: 2x v01 unsigned>>= 1
5491# asm 1: psrlq $1,<v01=reg128#14
5492# asm 2: psrlq $1,<v01=%xmm13
5493psrlq $1,%xmm13
5494
5495# qhasm: r0 = v00 | v10
5496# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
5497# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
5498vpor %xmm15,%xmm9,%xmm9
5499
5500# qhasm: r1 = v01 | v11
5501# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
5502# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
5503vpor %xmm14,%xmm13,%xmm13
5504
5505# qhasm: v00 = r2 & mask4
5506# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
5507# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
5508vpand %xmm4,%xmm11,%xmm14
5509
5510# qhasm: v10 = r3 & mask4
5511# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
5512# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
5513vpand %xmm4,%xmm10,%xmm15
5514
5515# qhasm: 2x v10 <<= 1
5516# asm 1: psllq $1,<v10=reg128#16
5517# asm 2: psllq $1,<v10=%xmm15
5518psllq $1,%xmm15
5519
5520# qhasm: v01 = r2 & mask5
5521# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
5522# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
5523vpand %xmm5,%xmm11,%xmm11
5524
5525# qhasm: v11 = r3 & mask5
5526# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
5527# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
5528vpand %xmm5,%xmm10,%xmm10
5529
5530# qhasm: 2x v01 unsigned>>= 1
5531# asm 1: psrlq $1,<v01=reg128#12
5532# asm 2: psrlq $1,<v01=%xmm11
5533psrlq $1,%xmm11
5534
5535# qhasm: r2 = v00 | v10
5536# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
5537# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
5538vpor %xmm15,%xmm14,%xmm14
5539
5540# qhasm: r3 = v01 | v11
5541# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
5542# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
5543vpor %xmm10,%xmm11,%xmm10
5544
5545# qhasm: v00 = r4 & mask4
5546# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
5547# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
5548vpand %xmm4,%xmm12,%xmm11
5549
5550# qhasm: v10 = r5 & mask4
5551# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
5552# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
5553vpand %xmm4,%xmm8,%xmm15
5554
5555# qhasm: 2x v10 <<= 1
5556# asm 1: psllq $1,<v10=reg128#16
5557# asm 2: psllq $1,<v10=%xmm15
5558psllq $1,%xmm15
5559
5560# qhasm: v01 = r4 & mask5
5561# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
5562# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
5563vpand %xmm5,%xmm12,%xmm12
5564
5565# qhasm: v11 = r5 & mask5
5566# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
5567# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
5568vpand %xmm5,%xmm8,%xmm8
5569
5570# qhasm: 2x v01 unsigned>>= 1
5571# asm 1: psrlq $1,<v01=reg128#13
5572# asm 2: psrlq $1,<v01=%xmm12
5573psrlq $1,%xmm12
5574
5575# qhasm: r4 = v00 | v10
5576# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
5577# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
5578vpor %xmm15,%xmm11,%xmm11
5579
5580# qhasm: r5 = v01 | v11
5581# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
5582# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
5583vpor %xmm8,%xmm12,%xmm8
5584
5585# qhasm: v00 = r6 & mask4
5586# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
5587# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
5588vpand %xmm4,%xmm6,%xmm12
5589
5590# qhasm: v10 = r7 & mask4
5591# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
5592# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
5593vpand %xmm4,%xmm7,%xmm15
5594
5595# qhasm: 2x v10 <<= 1
5596# asm 1: psllq $1,<v10=reg128#16
5597# asm 2: psllq $1,<v10=%xmm15
5598psllq $1,%xmm15
5599
5600# qhasm: v01 = r6 & mask5
5601# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
5602# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
5603vpand %xmm5,%xmm6,%xmm6
5604
5605# qhasm: v11 = r7 & mask5
5606# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
5607# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
5608vpand %xmm5,%xmm7,%xmm7
5609
5610# qhasm: 2x v01 unsigned>>= 1
5611# asm 1: psrlq $1,<v01=reg128#7
5612# asm 2: psrlq $1,<v01=%xmm6
5613psrlq $1,%xmm6
5614
5615# qhasm: r6 = v00 | v10
5616# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
5617# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
5618vpor %xmm15,%xmm12,%xmm12
5619
5620# qhasm: r7 = v01 | v11
5621# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
5622# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
5623vpor %xmm7,%xmm6,%xmm6
5624
5625# qhasm: t0 = r0[0]r1[0]
5626# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
5627# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
5628vpunpcklqdq %xmm13,%xmm9,%xmm7
5629
5630# qhasm: mem128[ input_0 + 128 ] = t0
5631# asm 1: movdqu   <t0=reg128#8,128(<input_0=int64#1)
5632# asm 2: movdqu   <t0=%xmm7,128(<input_0=%rdi)
5633movdqu   %xmm7,128(%rdi)
5634
5635# qhasm: t0 = r2[0]r3[0]
5636# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
5637# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
5638vpunpcklqdq %xmm10,%xmm14,%xmm7
5639
5640# qhasm: mem128[ input_0 + 144 ] = t0
5641# asm 1: movdqu   <t0=reg128#8,144(<input_0=int64#1)
5642# asm 2: movdqu   <t0=%xmm7,144(<input_0=%rdi)
5643movdqu   %xmm7,144(%rdi)
5644
5645# qhasm: t0 = r4[0]r5[0]
5646# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
5647# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
5648vpunpcklqdq %xmm8,%xmm11,%xmm7
5649
5650# qhasm: mem128[ input_0 + 160 ] = t0
5651# asm 1: movdqu   <t0=reg128#8,160(<input_0=int64#1)
5652# asm 2: movdqu   <t0=%xmm7,160(<input_0=%rdi)
5653movdqu   %xmm7,160(%rdi)
5654
5655# qhasm: t0 = r6[0]r7[0]
5656# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
5657# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
5658vpunpcklqdq %xmm6,%xmm12,%xmm6
5659
5660# qhasm: mem128[ input_0 + 176 ] = t0
5661# asm 1: movdqu   <t0=reg128#7,176(<input_0=int64#1)
5662# asm 2: movdqu   <t0=%xmm6,176(<input_0=%rdi)
5663movdqu   %xmm6,176(%rdi)
5664
5665# qhasm: r0 = mem64[ input_0 + 192 ] x2
5666# asm 1: movddup 192(<input_0=int64#1),>r0=reg128#7
5667# asm 2: movddup 192(<input_0=%rdi),>r0=%xmm6
5668movddup 192(%rdi),%xmm6
5669
5670# qhasm: r1 = mem64[ input_0 + 200 ] x2
5671# asm 1: movddup 200(<input_0=int64#1),>r1=reg128#8
5672# asm 2: movddup 200(<input_0=%rdi),>r1=%xmm7
5673movddup 200(%rdi),%xmm7
5674
5675# qhasm: r2 = mem64[ input_0 + 208 ] x2
5676# asm 1: movddup 208(<input_0=int64#1),>r2=reg128#9
5677# asm 2: movddup 208(<input_0=%rdi),>r2=%xmm8
5678movddup 208(%rdi),%xmm8
5679
5680# qhasm: r3 = mem64[ input_0 + 216 ] x2
5681# asm 1: movddup 216(<input_0=int64#1),>r3=reg128#10
5682# asm 2: movddup 216(<input_0=%rdi),>r3=%xmm9
5683movddup 216(%rdi),%xmm9
5684
5685# qhasm: r4 = mem64[ input_0 + 224 ] x2
5686# asm 1: movddup 224(<input_0=int64#1),>r4=reg128#11
5687# asm 2: movddup 224(<input_0=%rdi),>r4=%xmm10
5688movddup 224(%rdi),%xmm10
5689
5690# qhasm: r5 = mem64[ input_0 + 232 ] x2
5691# asm 1: movddup 232(<input_0=int64#1),>r5=reg128#12
5692# asm 2: movddup 232(<input_0=%rdi),>r5=%xmm11
5693movddup 232(%rdi),%xmm11
5694
5695# qhasm: r6 = mem64[ input_0 + 240 ] x2
5696# asm 1: movddup 240(<input_0=int64#1),>r6=reg128#13
5697# asm 2: movddup 240(<input_0=%rdi),>r6=%xmm12
5698movddup 240(%rdi),%xmm12
5699
5700# qhasm: r7 = mem64[ input_0 + 248 ] x2
5701# asm 1: movddup 248(<input_0=int64#1),>r7=reg128#14
5702# asm 2: movddup 248(<input_0=%rdi),>r7=%xmm13
5703movddup 248(%rdi),%xmm13
5704
5705# qhasm: v00 = r0 & mask0
5706# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
5707# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
5708vpand %xmm0,%xmm6,%xmm14
5709
5710# qhasm: v10 = r4 & mask0
5711# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
5712# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
5713vpand %xmm0,%xmm10,%xmm15
5714
5715# qhasm: 2x v10 <<= 4
5716# asm 1: psllq $4,<v10=reg128#16
5717# asm 2: psllq $4,<v10=%xmm15
5718psllq $4,%xmm15
5719
5720# qhasm: v01 = r0 & mask1
5721# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
5722# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
5723vpand %xmm1,%xmm6,%xmm6
5724
5725# qhasm: v11 = r4 & mask1
5726# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
5727# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
5728vpand %xmm1,%xmm10,%xmm10
5729
5730# qhasm: 2x v01 unsigned>>= 4
5731# asm 1: psrlq $4,<v01=reg128#7
5732# asm 2: psrlq $4,<v01=%xmm6
5733psrlq $4,%xmm6
5734
5735# qhasm: r0 = v00 | v10
5736# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
5737# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
5738vpor %xmm15,%xmm14,%xmm14
5739
5740# qhasm: r4 = v01 | v11
5741# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
5742# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
5743vpor %xmm10,%xmm6,%xmm6
5744
5745# qhasm: v00 = r1 & mask0
5746# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
5747# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
5748vpand %xmm0,%xmm7,%xmm10
5749
5750# qhasm: v10 = r5 & mask0
5751# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
5752# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
5753vpand %xmm0,%xmm11,%xmm15
5754
5755# qhasm: 2x v10 <<= 4
5756# asm 1: psllq $4,<v10=reg128#16
5757# asm 2: psllq $4,<v10=%xmm15
5758psllq $4,%xmm15
5759
5760# qhasm: v01 = r1 & mask1
5761# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
5762# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
5763vpand %xmm1,%xmm7,%xmm7
5764
5765# qhasm: v11 = r5 & mask1
5766# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
5767# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
5768vpand %xmm1,%xmm11,%xmm11
5769
5770# qhasm: 2x v01 unsigned>>= 4
5771# asm 1: psrlq $4,<v01=reg128#8
5772# asm 2: psrlq $4,<v01=%xmm7
5773psrlq $4,%xmm7
5774
5775# qhasm: r1 = v00 | v10
5776# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
5777# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
5778vpor %xmm15,%xmm10,%xmm10
5779
5780# qhasm: r5 = v01 | v11
5781# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
5782# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
5783vpor %xmm11,%xmm7,%xmm7
5784
5785# qhasm: v00 = r2 & mask0
5786# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
5787# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
5788vpand %xmm0,%xmm8,%xmm11
5789
5790# qhasm: v10 = r6 & mask0
5791# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
5792# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
5793vpand %xmm0,%xmm12,%xmm15
5794
5795# qhasm: 2x v10 <<= 4
5796# asm 1: psllq $4,<v10=reg128#16
5797# asm 2: psllq $4,<v10=%xmm15
5798psllq $4,%xmm15
5799
5800# qhasm: v01 = r2 & mask1
5801# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
5802# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
5803vpand %xmm1,%xmm8,%xmm8
5804
5805# qhasm: v11 = r6 & mask1
5806# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
5807# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
5808vpand %xmm1,%xmm12,%xmm12
5809
5810# qhasm: 2x v01 unsigned>>= 4
5811# asm 1: psrlq $4,<v01=reg128#9
5812# asm 2: psrlq $4,<v01=%xmm8
5813psrlq $4,%xmm8
5814
5815# qhasm: r2 = v00 | v10
5816# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
5817# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
5818vpor %xmm15,%xmm11,%xmm11
5819
5820# qhasm: r6 = v01 | v11
5821# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
5822# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
5823vpor %xmm12,%xmm8,%xmm8
5824
5825# qhasm: v00 = r3 & mask0
5826# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
5827# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
5828vpand %xmm0,%xmm9,%xmm12
5829
5830# qhasm: v10 = r7 & mask0
5831# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
5832# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
5833vpand %xmm0,%xmm13,%xmm15
5834
5835# qhasm: 2x v10 <<= 4
5836# asm 1: psllq $4,<v10=reg128#16
5837# asm 2: psllq $4,<v10=%xmm15
5838psllq $4,%xmm15
5839
5840# qhasm: v01 = r3 & mask1
5841# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
5842# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
5843vpand %xmm1,%xmm9,%xmm9
5844
5845# qhasm: v11 = r7 & mask1
5846# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
5847# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
5848vpand %xmm1,%xmm13,%xmm13
5849
5850# qhasm: 2x v01 unsigned>>= 4
5851# asm 1: psrlq $4,<v01=reg128#10
5852# asm 2: psrlq $4,<v01=%xmm9
5853psrlq $4,%xmm9
5854
5855# qhasm: r3 = v00 | v10
5856# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
5857# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
5858vpor %xmm15,%xmm12,%xmm12
5859
5860# qhasm: r7 = v01 | v11
5861# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
5862# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
5863vpor %xmm13,%xmm9,%xmm9
5864
5865# qhasm: v00 = r0 & mask2
5866# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
5867# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
5868vpand %xmm2,%xmm14,%xmm13
5869
5870# qhasm: v10 = r2 & mask2
5871# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
5872# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
5873vpand %xmm2,%xmm11,%xmm15
5874
5875# qhasm: 2x v10 <<= 2
5876# asm 1: psllq $2,<v10=reg128#16
5877# asm 2: psllq $2,<v10=%xmm15
5878psllq $2,%xmm15
5879
5880# qhasm: v01 = r0 & mask3
5881# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
5882# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
5883vpand %xmm3,%xmm14,%xmm14
5884
5885# qhasm: v11 = r2 & mask3
5886# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
5887# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
5888vpand %xmm3,%xmm11,%xmm11
5889
5890# qhasm: 2x v01 unsigned>>= 2
5891# asm 1: psrlq $2,<v01=reg128#15
5892# asm 2: psrlq $2,<v01=%xmm14
5893psrlq $2,%xmm14
5894
5895# qhasm: r0 = v00 | v10
5896# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
5897# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
5898vpor %xmm15,%xmm13,%xmm13
5899
5900# qhasm: r2 = v01 | v11
5901# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
5902# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
5903vpor %xmm11,%xmm14,%xmm11
5904
5905# qhasm: v00 = r1 & mask2
5906# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
5907# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
5908vpand %xmm2,%xmm10,%xmm14
5909
5910# qhasm: v10 = r3 & mask2
5911# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
5912# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
5913vpand %xmm2,%xmm12,%xmm15
5914
5915# qhasm: 2x v10 <<= 2
5916# asm 1: psllq $2,<v10=reg128#16
5917# asm 2: psllq $2,<v10=%xmm15
5918psllq $2,%xmm15
5919
5920# qhasm: v01 = r1 & mask3
5921# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
5922# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
5923vpand %xmm3,%xmm10,%xmm10
5924
5925# qhasm: v11 = r3 & mask3
5926# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
5927# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
5928vpand %xmm3,%xmm12,%xmm12
5929
5930# qhasm: 2x v01 unsigned>>= 2
5931# asm 1: psrlq $2,<v01=reg128#11
5932# asm 2: psrlq $2,<v01=%xmm10
5933psrlq $2,%xmm10
5934
5935# qhasm: r1 = v00 | v10
5936# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
5937# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
5938vpor %xmm15,%xmm14,%xmm14
5939
5940# qhasm: r3 = v01 | v11
5941# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
5942# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
5943vpor %xmm12,%xmm10,%xmm10
5944
5945# qhasm: v00 = r4 & mask2
5946# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
5947# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
5948vpand %xmm2,%xmm6,%xmm12
5949
5950# qhasm: v10 = r6 & mask2
5951# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
5952# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
5953vpand %xmm2,%xmm8,%xmm15
5954
5955# qhasm: 2x v10 <<= 2
5956# asm 1: psllq $2,<v10=reg128#16
5957# asm 2: psllq $2,<v10=%xmm15
5958psllq $2,%xmm15
5959
5960# qhasm: v01 = r4 & mask3
5961# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
5962# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
5963vpand %xmm3,%xmm6,%xmm6
5964
5965# qhasm: v11 = r6 & mask3
5966# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
5967# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
5968vpand %xmm3,%xmm8,%xmm8
5969
5970# qhasm: 2x v01 unsigned>>= 2
5971# asm 1: psrlq $2,<v01=reg128#7
5972# asm 2: psrlq $2,<v01=%xmm6
5973psrlq $2,%xmm6
5974
5975# qhasm: r4 = v00 | v10
5976# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
5977# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
5978vpor %xmm15,%xmm12,%xmm12
5979
5980# qhasm: r6 = v01 | v11
5981# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
5982# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
5983vpor %xmm8,%xmm6,%xmm6
5984
5985# qhasm: v00 = r5 & mask2
5986# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
5987# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
5988vpand %xmm2,%xmm7,%xmm8
5989
5990# qhasm: v10 = r7 & mask2
5991# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
5992# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
5993vpand %xmm2,%xmm9,%xmm15
5994
5995# qhasm: 2x v10 <<= 2
5996# asm 1: psllq $2,<v10=reg128#16
5997# asm 2: psllq $2,<v10=%xmm15
5998psllq $2,%xmm15
5999
6000# qhasm: v01 = r5 & mask3
6001# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
6002# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
6003vpand %xmm3,%xmm7,%xmm7
6004
6005# qhasm: v11 = r7 & mask3
6006# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
6007# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
6008vpand %xmm3,%xmm9,%xmm9
6009
6010# qhasm: 2x v01 unsigned>>= 2
6011# asm 1: psrlq $2,<v01=reg128#8
6012# asm 2: psrlq $2,<v01=%xmm7
6013psrlq $2,%xmm7
6014
6015# qhasm: r5 = v00 | v10
6016# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
6017# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
6018vpor %xmm15,%xmm8,%xmm8
6019
6020# qhasm: r7 = v01 | v11
6021# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
6022# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
6023vpor %xmm9,%xmm7,%xmm7
6024
6025# qhasm: v00 = r0 & mask4
6026# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
6027# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
6028vpand %xmm4,%xmm13,%xmm9
6029
6030# qhasm: v10 = r1 & mask4
6031# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
6032# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
6033vpand %xmm4,%xmm14,%xmm15
6034
6035# qhasm: 2x v10 <<= 1
6036# asm 1: psllq $1,<v10=reg128#16
6037# asm 2: psllq $1,<v10=%xmm15
6038psllq $1,%xmm15
6039
6040# qhasm: v01 = r0 & mask5
6041# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
6042# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
6043vpand %xmm5,%xmm13,%xmm13
6044
6045# qhasm: v11 = r1 & mask5
6046# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
6047# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
6048vpand %xmm5,%xmm14,%xmm14
6049
6050# qhasm: 2x v01 unsigned>>= 1
6051# asm 1: psrlq $1,<v01=reg128#14
6052# asm 2: psrlq $1,<v01=%xmm13
6053psrlq $1,%xmm13
6054
6055# qhasm: r0 = v00 | v10
6056# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
6057# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
6058vpor %xmm15,%xmm9,%xmm9
6059
6060# qhasm: r1 = v01 | v11
6061# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
6062# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
6063vpor %xmm14,%xmm13,%xmm13
6064
6065# qhasm: v00 = r2 & mask4
6066# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
6067# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
6068vpand %xmm4,%xmm11,%xmm14
6069
6070# qhasm: v10 = r3 & mask4
6071# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
6072# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
6073vpand %xmm4,%xmm10,%xmm15
6074
6075# qhasm: 2x v10 <<= 1
6076# asm 1: psllq $1,<v10=reg128#16
6077# asm 2: psllq $1,<v10=%xmm15
6078psllq $1,%xmm15
6079
6080# qhasm: v01 = r2 & mask5
6081# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
6082# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
6083vpand %xmm5,%xmm11,%xmm11
6084
6085# qhasm: v11 = r3 & mask5
6086# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
6087# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
6088vpand %xmm5,%xmm10,%xmm10
6089
6090# qhasm: 2x v01 unsigned>>= 1
6091# asm 1: psrlq $1,<v01=reg128#12
6092# asm 2: psrlq $1,<v01=%xmm11
6093psrlq $1,%xmm11
6094
6095# qhasm: r2 = v00 | v10
6096# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
6097# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
6098vpor %xmm15,%xmm14,%xmm14
6099
6100# qhasm: r3 = v01 | v11
6101# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
6102# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
6103vpor %xmm10,%xmm11,%xmm10
6104
6105# qhasm: v00 = r4 & mask4
6106# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
6107# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
6108vpand %xmm4,%xmm12,%xmm11
6109
6110# qhasm: v10 = r5 & mask4
6111# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
6112# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
6113vpand %xmm4,%xmm8,%xmm15
6114
6115# qhasm: 2x v10 <<= 1
6116# asm 1: psllq $1,<v10=reg128#16
6117# asm 2: psllq $1,<v10=%xmm15
6118psllq $1,%xmm15
6119
6120# qhasm: v01 = r4 & mask5
6121# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
6122# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
6123vpand %xmm5,%xmm12,%xmm12
6124
6125# qhasm: v11 = r5 & mask5
6126# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
6127# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
6128vpand %xmm5,%xmm8,%xmm8
6129
6130# qhasm: 2x v01 unsigned>>= 1
6131# asm 1: psrlq $1,<v01=reg128#13
6132# asm 2: psrlq $1,<v01=%xmm12
6133psrlq $1,%xmm12
6134
6135# qhasm: r4 = v00 | v10
6136# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
6137# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
6138vpor %xmm15,%xmm11,%xmm11
6139
6140# qhasm: r5 = v01 | v11
6141# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
6142# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
6143vpor %xmm8,%xmm12,%xmm8
6144
6145# qhasm: v00 = r6 & mask4
6146# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
6147# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
6148vpand %xmm4,%xmm6,%xmm12
6149
6150# qhasm: v10 = r7 & mask4
6151# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
6152# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
6153vpand %xmm4,%xmm7,%xmm15
6154
6155# qhasm: 2x v10 <<= 1
6156# asm 1: psllq $1,<v10=reg128#16
6157# asm 2: psllq $1,<v10=%xmm15
6158psllq $1,%xmm15
6159
6160# qhasm: v01 = r6 & mask5
6161# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
6162# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
6163vpand %xmm5,%xmm6,%xmm6
6164
6165# qhasm: v11 = r7 & mask5
6166# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
6167# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
6168vpand %xmm5,%xmm7,%xmm7
6169
6170# qhasm: 2x v01 unsigned>>= 1
6171# asm 1: psrlq $1,<v01=reg128#7
6172# asm 2: psrlq $1,<v01=%xmm6
6173psrlq $1,%xmm6
6174
6175# qhasm: r6 = v00 | v10
6176# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
6177# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
6178vpor %xmm15,%xmm12,%xmm12
6179
6180# qhasm: r7 = v01 | v11
6181# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
6182# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
6183vpor %xmm7,%xmm6,%xmm6
6184
6185# qhasm: t0 = r0[0]r1[0]
6186# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
6187# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
6188vpunpcklqdq %xmm13,%xmm9,%xmm7
6189
6190# qhasm: mem128[ input_0 + 192 ] = t0
6191# asm 1: movdqu   <t0=reg128#8,192(<input_0=int64#1)
6192# asm 2: movdqu   <t0=%xmm7,192(<input_0=%rdi)
6193movdqu   %xmm7,192(%rdi)
6194
6195# qhasm: t0 = r2[0]r3[0]
6196# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
6197# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
6198vpunpcklqdq %xmm10,%xmm14,%xmm7
6199
6200# qhasm: mem128[ input_0 + 208 ] = t0
6201# asm 1: movdqu   <t0=reg128#8,208(<input_0=int64#1)
6202# asm 2: movdqu   <t0=%xmm7,208(<input_0=%rdi)
6203movdqu   %xmm7,208(%rdi)
6204
6205# qhasm: t0 = r4[0]r5[0]
6206# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
6207# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
6208vpunpcklqdq %xmm8,%xmm11,%xmm7
6209
6210# qhasm: mem128[ input_0 + 224 ] = t0
6211# asm 1: movdqu   <t0=reg128#8,224(<input_0=int64#1)
6212# asm 2: movdqu   <t0=%xmm7,224(<input_0=%rdi)
6213movdqu   %xmm7,224(%rdi)
6214
6215# qhasm: t0 = r6[0]r7[0]
6216# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
6217# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
6218vpunpcklqdq %xmm6,%xmm12,%xmm6
6219
6220# qhasm: mem128[ input_0 + 240 ] = t0
6221# asm 1: movdqu   <t0=reg128#7,240(<input_0=int64#1)
6222# asm 2: movdqu   <t0=%xmm6,240(<input_0=%rdi)
6223movdqu   %xmm6,240(%rdi)
6224
6225# qhasm: r0 = mem64[ input_0 + 256 ] x2
6226# asm 1: movddup 256(<input_0=int64#1),>r0=reg128#7
6227# asm 2: movddup 256(<input_0=%rdi),>r0=%xmm6
6228movddup 256(%rdi),%xmm6
6229
6230# qhasm: r1 = mem64[ input_0 + 264 ] x2
6231# asm 1: movddup 264(<input_0=int64#1),>r1=reg128#8
6232# asm 2: movddup 264(<input_0=%rdi),>r1=%xmm7
6233movddup 264(%rdi),%xmm7
6234
6235# qhasm: r2 = mem64[ input_0 + 272 ] x2
6236# asm 1: movddup 272(<input_0=int64#1),>r2=reg128#9
6237# asm 2: movddup 272(<input_0=%rdi),>r2=%xmm8
6238movddup 272(%rdi),%xmm8
6239
6240# qhasm: r3 = mem64[ input_0 + 280 ] x2
6241# asm 1: movddup 280(<input_0=int64#1),>r3=reg128#10
6242# asm 2: movddup 280(<input_0=%rdi),>r3=%xmm9
6243movddup 280(%rdi),%xmm9
6244
6245# qhasm: r4 = mem64[ input_0 + 288 ] x2
6246# asm 1: movddup 288(<input_0=int64#1),>r4=reg128#11
6247# asm 2: movddup 288(<input_0=%rdi),>r4=%xmm10
6248movddup 288(%rdi),%xmm10
6249
6250# qhasm: r5 = mem64[ input_0 + 296 ] x2
6251# asm 1: movddup 296(<input_0=int64#1),>r5=reg128#12
6252# asm 2: movddup 296(<input_0=%rdi),>r5=%xmm11
6253movddup 296(%rdi),%xmm11
6254
6255# qhasm: r6 = mem64[ input_0 + 304 ] x2
6256# asm 1: movddup 304(<input_0=int64#1),>r6=reg128#13
6257# asm 2: movddup 304(<input_0=%rdi),>r6=%xmm12
6258movddup 304(%rdi),%xmm12
6259
6260# qhasm: r7 = mem64[ input_0 + 312 ] x2
6261# asm 1: movddup 312(<input_0=int64#1),>r7=reg128#14
6262# asm 2: movddup 312(<input_0=%rdi),>r7=%xmm13
6263movddup 312(%rdi),%xmm13
6264
6265# qhasm: v00 = r0 & mask0
6266# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
6267# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
6268vpand %xmm0,%xmm6,%xmm14
6269
6270# qhasm: v10 = r4 & mask0
6271# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
6272# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
6273vpand %xmm0,%xmm10,%xmm15
6274
6275# qhasm: 2x v10 <<= 4
6276# asm 1: psllq $4,<v10=reg128#16
6277# asm 2: psllq $4,<v10=%xmm15
6278psllq $4,%xmm15
6279
6280# qhasm: v01 = r0 & mask1
6281# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
6282# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
6283vpand %xmm1,%xmm6,%xmm6
6284
6285# qhasm: v11 = r4 & mask1
6286# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
6287# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
6288vpand %xmm1,%xmm10,%xmm10
6289
6290# qhasm: 2x v01 unsigned>>= 4
6291# asm 1: psrlq $4,<v01=reg128#7
6292# asm 2: psrlq $4,<v01=%xmm6
6293psrlq $4,%xmm6
6294
6295# qhasm: r0 = v00 | v10
6296# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
6297# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
6298vpor %xmm15,%xmm14,%xmm14
6299
6300# qhasm: r4 = v01 | v11
6301# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
6302# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
6303vpor %xmm10,%xmm6,%xmm6
6304
6305# qhasm: v00 = r1 & mask0
6306# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
6307# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
6308vpand %xmm0,%xmm7,%xmm10
6309
6310# qhasm: v10 = r5 & mask0
6311# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
6312# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
6313vpand %xmm0,%xmm11,%xmm15
6314
6315# qhasm: 2x v10 <<= 4
6316# asm 1: psllq $4,<v10=reg128#16
6317# asm 2: psllq $4,<v10=%xmm15
6318psllq $4,%xmm15
6319
6320# qhasm: v01 = r1 & mask1
6321# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
6322# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
6323vpand %xmm1,%xmm7,%xmm7
6324
6325# qhasm: v11 = r5 & mask1
6326# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
6327# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
6328vpand %xmm1,%xmm11,%xmm11
6329
6330# qhasm: 2x v01 unsigned>>= 4
6331# asm 1: psrlq $4,<v01=reg128#8
6332# asm 2: psrlq $4,<v01=%xmm7
6333psrlq $4,%xmm7
6334
6335# qhasm: r1 = v00 | v10
6336# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
6337# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
6338vpor %xmm15,%xmm10,%xmm10
6339
6340# qhasm: r5 = v01 | v11
6341# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
6342# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
6343vpor %xmm11,%xmm7,%xmm7
6344
6345# qhasm: v00 = r2 & mask0
6346# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
6347# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
6348vpand %xmm0,%xmm8,%xmm11
6349
6350# qhasm: v10 = r6 & mask0
6351# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
6352# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
6353vpand %xmm0,%xmm12,%xmm15
6354
6355# qhasm: 2x v10 <<= 4
6356# asm 1: psllq $4,<v10=reg128#16
6357# asm 2: psllq $4,<v10=%xmm15
6358psllq $4,%xmm15
6359
6360# qhasm: v01 = r2 & mask1
6361# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
6362# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
6363vpand %xmm1,%xmm8,%xmm8
6364
6365# qhasm: v11 = r6 & mask1
6366# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
6367# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
6368vpand %xmm1,%xmm12,%xmm12
6369
6370# qhasm: 2x v01 unsigned>>= 4
6371# asm 1: psrlq $4,<v01=reg128#9
6372# asm 2: psrlq $4,<v01=%xmm8
6373psrlq $4,%xmm8
6374
6375# qhasm: r2 = v00 | v10
6376# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
6377# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
6378vpor %xmm15,%xmm11,%xmm11
6379
6380# qhasm: r6 = v01 | v11
6381# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
6382# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
6383vpor %xmm12,%xmm8,%xmm8
6384
6385# qhasm: v00 = r3 & mask0
6386# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
6387# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
6388vpand %xmm0,%xmm9,%xmm12
6389
6390# qhasm: v10 = r7 & mask0
6391# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
6392# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
6393vpand %xmm0,%xmm13,%xmm15
6394
6395# qhasm: 2x v10 <<= 4
6396# asm 1: psllq $4,<v10=reg128#16
6397# asm 2: psllq $4,<v10=%xmm15
6398psllq $4,%xmm15
6399
6400# qhasm: v01 = r3 & mask1
6401# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
6402# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
6403vpand %xmm1,%xmm9,%xmm9
6404
6405# qhasm: v11 = r7 & mask1
6406# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
6407# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
6408vpand %xmm1,%xmm13,%xmm13
6409
6410# qhasm: 2x v01 unsigned>>= 4
6411# asm 1: psrlq $4,<v01=reg128#10
6412# asm 2: psrlq $4,<v01=%xmm9
6413psrlq $4,%xmm9
6414
6415# qhasm: r3 = v00 | v10
6416# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
6417# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
6418vpor %xmm15,%xmm12,%xmm12
6419
6420# qhasm: r7 = v01 | v11
6421# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
6422# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
6423vpor %xmm13,%xmm9,%xmm9
6424
6425# qhasm: v00 = r0 & mask2
6426# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
6427# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
6428vpand %xmm2,%xmm14,%xmm13
6429
6430# qhasm: v10 = r2 & mask2
6431# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
6432# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
6433vpand %xmm2,%xmm11,%xmm15
6434
6435# qhasm: 2x v10 <<= 2
6436# asm 1: psllq $2,<v10=reg128#16
6437# asm 2: psllq $2,<v10=%xmm15
6438psllq $2,%xmm15
6439
6440# qhasm: v01 = r0 & mask3
6441# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
6442# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
6443vpand %xmm3,%xmm14,%xmm14
6444
6445# qhasm: v11 = r2 & mask3
6446# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
6447# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
6448vpand %xmm3,%xmm11,%xmm11
6449
6450# qhasm: 2x v01 unsigned>>= 2
6451# asm 1: psrlq $2,<v01=reg128#15
6452# asm 2: psrlq $2,<v01=%xmm14
6453psrlq $2,%xmm14
6454
6455# qhasm: r0 = v00 | v10
6456# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
6457# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
6458vpor %xmm15,%xmm13,%xmm13
6459
6460# qhasm: r2 = v01 | v11
6461# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
6462# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
6463vpor %xmm11,%xmm14,%xmm11
6464
6465# qhasm: v00 = r1 & mask2
6466# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
6467# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
6468vpand %xmm2,%xmm10,%xmm14
6469
6470# qhasm: v10 = r3 & mask2
6471# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
6472# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
6473vpand %xmm2,%xmm12,%xmm15
6474
6475# qhasm: 2x v10 <<= 2
6476# asm 1: psllq $2,<v10=reg128#16
6477# asm 2: psllq $2,<v10=%xmm15
6478psllq $2,%xmm15
6479
6480# qhasm: v01 = r1 & mask3
6481# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
6482# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
6483vpand %xmm3,%xmm10,%xmm10
6484
6485# qhasm: v11 = r3 & mask3
6486# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
6487# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
6488vpand %xmm3,%xmm12,%xmm12
6489
6490# qhasm: 2x v01 unsigned>>= 2
6491# asm 1: psrlq $2,<v01=reg128#11
6492# asm 2: psrlq $2,<v01=%xmm10
6493psrlq $2,%xmm10
6494
6495# qhasm: r1 = v00 | v10
6496# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
6497# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
6498vpor %xmm15,%xmm14,%xmm14
6499
6500# qhasm: r3 = v01 | v11
6501# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
6502# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
6503vpor %xmm12,%xmm10,%xmm10
6504
6505# qhasm: v00 = r4 & mask2
6506# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
6507# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
6508vpand %xmm2,%xmm6,%xmm12
6509
6510# qhasm: v10 = r6 & mask2
6511# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
6512# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
6513vpand %xmm2,%xmm8,%xmm15
6514
6515# qhasm: 2x v10 <<= 2
6516# asm 1: psllq $2,<v10=reg128#16
6517# asm 2: psllq $2,<v10=%xmm15
6518psllq $2,%xmm15
6519
6520# qhasm: v01 = r4 & mask3
6521# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
6522# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
6523vpand %xmm3,%xmm6,%xmm6
6524
6525# qhasm: v11 = r6 & mask3
6526# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
6527# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
6528vpand %xmm3,%xmm8,%xmm8
6529
6530# qhasm: 2x v01 unsigned>>= 2
6531# asm 1: psrlq $2,<v01=reg128#7
6532# asm 2: psrlq $2,<v01=%xmm6
6533psrlq $2,%xmm6
6534
6535# qhasm: r4 = v00 | v10
6536# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
6537# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
6538vpor %xmm15,%xmm12,%xmm12
6539
6540# qhasm: r6 = v01 | v11
6541# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
6542# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
6543vpor %xmm8,%xmm6,%xmm6
6544
6545# qhasm: v00 = r5 & mask2
6546# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
6547# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
6548vpand %xmm2,%xmm7,%xmm8
6549
6550# qhasm: v10 = r7 & mask2
6551# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
6552# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
6553vpand %xmm2,%xmm9,%xmm15
6554
6555# qhasm: 2x v10 <<= 2
6556# asm 1: psllq $2,<v10=reg128#16
6557# asm 2: psllq $2,<v10=%xmm15
6558psllq $2,%xmm15
6559
6560# qhasm: v01 = r5 & mask3
6561# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
6562# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
6563vpand %xmm3,%xmm7,%xmm7
6564
6565# qhasm: v11 = r7 & mask3
6566# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
6567# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
6568vpand %xmm3,%xmm9,%xmm9
6569
6570# qhasm: 2x v01 unsigned>>= 2
6571# asm 1: psrlq $2,<v01=reg128#8
6572# asm 2: psrlq $2,<v01=%xmm7
6573psrlq $2,%xmm7
6574
6575# qhasm: r5 = v00 | v10
6576# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
6577# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
6578vpor %xmm15,%xmm8,%xmm8
6579
6580# qhasm: r7 = v01 | v11
6581# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
6582# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
6583vpor %xmm9,%xmm7,%xmm7
6584
6585# qhasm: v00 = r0 & mask4
6586# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
6587# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
6588vpand %xmm4,%xmm13,%xmm9
6589
6590# qhasm: v10 = r1 & mask4
6591# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
6592# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
6593vpand %xmm4,%xmm14,%xmm15
6594
6595# qhasm: 2x v10 <<= 1
6596# asm 1: psllq $1,<v10=reg128#16
6597# asm 2: psllq $1,<v10=%xmm15
6598psllq $1,%xmm15
6599
6600# qhasm: v01 = r0 & mask5
6601# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
6602# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
6603vpand %xmm5,%xmm13,%xmm13
6604
6605# qhasm: v11 = r1 & mask5
6606# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
6607# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
6608vpand %xmm5,%xmm14,%xmm14
6609
6610# qhasm: 2x v01 unsigned>>= 1
6611# asm 1: psrlq $1,<v01=reg128#14
6612# asm 2: psrlq $1,<v01=%xmm13
6613psrlq $1,%xmm13
6614
6615# qhasm: r0 = v00 | v10
6616# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
6617# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
6618vpor %xmm15,%xmm9,%xmm9
6619
6620# qhasm: r1 = v01 | v11
6621# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
6622# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
6623vpor %xmm14,%xmm13,%xmm13
6624
6625# qhasm: v00 = r2 & mask4
6626# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
6627# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
6628vpand %xmm4,%xmm11,%xmm14
6629
6630# qhasm: v10 = r3 & mask4
6631# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
6632# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
6633vpand %xmm4,%xmm10,%xmm15
6634
6635# qhasm: 2x v10 <<= 1
6636# asm 1: psllq $1,<v10=reg128#16
6637# asm 2: psllq $1,<v10=%xmm15
6638psllq $1,%xmm15
6639
6640# qhasm: v01 = r2 & mask5
6641# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
6642# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
6643vpand %xmm5,%xmm11,%xmm11
6644
6645# qhasm: v11 = r3 & mask5
6646# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
6647# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
6648vpand %xmm5,%xmm10,%xmm10
6649
6650# qhasm: 2x v01 unsigned>>= 1
6651# asm 1: psrlq $1,<v01=reg128#12
6652# asm 2: psrlq $1,<v01=%xmm11
6653psrlq $1,%xmm11
6654
6655# qhasm: r2 = v00 | v10
6656# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
6657# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
6658vpor %xmm15,%xmm14,%xmm14
6659
6660# qhasm: r3 = v01 | v11
6661# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
6662# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
6663vpor %xmm10,%xmm11,%xmm10
6664
6665# qhasm: v00 = r4 & mask4
6666# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
6667# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
6668vpand %xmm4,%xmm12,%xmm11
6669
6670# qhasm: v10 = r5 & mask4
6671# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
6672# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
6673vpand %xmm4,%xmm8,%xmm15
6674
6675# qhasm: 2x v10 <<= 1
6676# asm 1: psllq $1,<v10=reg128#16
6677# asm 2: psllq $1,<v10=%xmm15
6678psllq $1,%xmm15
6679
6680# qhasm: v01 = r4 & mask5
6681# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
6682# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
6683vpand %xmm5,%xmm12,%xmm12
6684
6685# qhasm: v11 = r5 & mask5
6686# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
6687# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
6688vpand %xmm5,%xmm8,%xmm8
6689
6690# qhasm: 2x v01 unsigned>>= 1
6691# asm 1: psrlq $1,<v01=reg128#13
6692# asm 2: psrlq $1,<v01=%xmm12
6693psrlq $1,%xmm12
6694
6695# qhasm: r4 = v00 | v10
6696# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
6697# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
6698vpor %xmm15,%xmm11,%xmm11
6699
6700# qhasm: r5 = v01 | v11
6701# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
6702# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
6703vpor %xmm8,%xmm12,%xmm8
6704
6705# qhasm: v00 = r6 & mask4
6706# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
6707# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
6708vpand %xmm4,%xmm6,%xmm12
6709
6710# qhasm: v10 = r7 & mask4
6711# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
6712# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
6713vpand %xmm4,%xmm7,%xmm15
6714
6715# qhasm: 2x v10 <<= 1
6716# asm 1: psllq $1,<v10=reg128#16
6717# asm 2: psllq $1,<v10=%xmm15
6718psllq $1,%xmm15
6719
6720# qhasm: v01 = r6 & mask5
6721# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
6722# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
6723vpand %xmm5,%xmm6,%xmm6
6724
6725# qhasm: v11 = r7 & mask5
6726# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
6727# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
6728vpand %xmm5,%xmm7,%xmm7
6729
6730# qhasm: 2x v01 unsigned>>= 1
6731# asm 1: psrlq $1,<v01=reg128#7
6732# asm 2: psrlq $1,<v01=%xmm6
6733psrlq $1,%xmm6
6734
6735# qhasm: r6 = v00 | v10
6736# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
6737# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
6738vpor %xmm15,%xmm12,%xmm12
6739
6740# qhasm: r7 = v01 | v11
6741# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
6742# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
6743vpor %xmm7,%xmm6,%xmm6
6744
6745# qhasm: t0 = r0[0]r1[0]
6746# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
6747# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
6748vpunpcklqdq %xmm13,%xmm9,%xmm7
6749
6750# qhasm: mem128[ input_0 + 256 ] = t0
6751# asm 1: movdqu   <t0=reg128#8,256(<input_0=int64#1)
6752# asm 2: movdqu   <t0=%xmm7,256(<input_0=%rdi)
6753movdqu   %xmm7,256(%rdi)
6754
6755# qhasm: t0 = r2[0]r3[0]
6756# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
6757# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
6758vpunpcklqdq %xmm10,%xmm14,%xmm7
6759
6760# qhasm: mem128[ input_0 + 272 ] = t0
6761# asm 1: movdqu   <t0=reg128#8,272(<input_0=int64#1)
6762# asm 2: movdqu   <t0=%xmm7,272(<input_0=%rdi)
6763movdqu   %xmm7,272(%rdi)
6764
6765# qhasm: t0 = r4[0]r5[0]
6766# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
6767# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
6768vpunpcklqdq %xmm8,%xmm11,%xmm7
6769
6770# qhasm: mem128[ input_0 + 288 ] = t0
6771# asm 1: movdqu   <t0=reg128#8,288(<input_0=int64#1)
6772# asm 2: movdqu   <t0=%xmm7,288(<input_0=%rdi)
6773movdqu   %xmm7,288(%rdi)
6774
6775# qhasm: t0 = r6[0]r7[0]
6776# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
6777# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
6778vpunpcklqdq %xmm6,%xmm12,%xmm6
6779
6780# qhasm: mem128[ input_0 + 304 ] = t0
6781# asm 1: movdqu   <t0=reg128#7,304(<input_0=int64#1)
6782# asm 2: movdqu   <t0=%xmm6,304(<input_0=%rdi)
6783movdqu   %xmm6,304(%rdi)
6784
6785# qhasm: r0 = mem64[ input_0 + 320 ] x2
6786# asm 1: movddup 320(<input_0=int64#1),>r0=reg128#7
6787# asm 2: movddup 320(<input_0=%rdi),>r0=%xmm6
6788movddup 320(%rdi),%xmm6
6789
6790# qhasm: r1 = mem64[ input_0 + 328 ] x2
6791# asm 1: movddup 328(<input_0=int64#1),>r1=reg128#8
6792# asm 2: movddup 328(<input_0=%rdi),>r1=%xmm7
6793movddup 328(%rdi),%xmm7
6794
6795# qhasm: r2 = mem64[ input_0 + 336 ] x2
6796# asm 1: movddup 336(<input_0=int64#1),>r2=reg128#9
6797# asm 2: movddup 336(<input_0=%rdi),>r2=%xmm8
6798movddup 336(%rdi),%xmm8
6799
6800# qhasm: r3 = mem64[ input_0 + 344 ] x2
6801# asm 1: movddup 344(<input_0=int64#1),>r3=reg128#10
6802# asm 2: movddup 344(<input_0=%rdi),>r3=%xmm9
6803movddup 344(%rdi),%xmm9
6804
6805# qhasm: r4 = mem64[ input_0 + 352 ] x2
6806# asm 1: movddup 352(<input_0=int64#1),>r4=reg128#11
6807# asm 2: movddup 352(<input_0=%rdi),>r4=%xmm10
6808movddup 352(%rdi),%xmm10
6809
6810# qhasm: r5 = mem64[ input_0 + 360 ] x2
6811# asm 1: movddup 360(<input_0=int64#1),>r5=reg128#12
6812# asm 2: movddup 360(<input_0=%rdi),>r5=%xmm11
6813movddup 360(%rdi),%xmm11
6814
6815# qhasm: r6 = mem64[ input_0 + 368 ] x2
6816# asm 1: movddup 368(<input_0=int64#1),>r6=reg128#13
6817# asm 2: movddup 368(<input_0=%rdi),>r6=%xmm12
6818movddup 368(%rdi),%xmm12
6819
6820# qhasm: r7 = mem64[ input_0 + 376 ] x2
6821# asm 1: movddup 376(<input_0=int64#1),>r7=reg128#14
6822# asm 2: movddup 376(<input_0=%rdi),>r7=%xmm13
6823movddup 376(%rdi),%xmm13
6824
6825# qhasm: v00 = r0 & mask0
6826# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
6827# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
6828vpand %xmm0,%xmm6,%xmm14
6829
6830# qhasm: v10 = r4 & mask0
6831# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
6832# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
6833vpand %xmm0,%xmm10,%xmm15
6834
6835# qhasm: 2x v10 <<= 4
6836# asm 1: psllq $4,<v10=reg128#16
6837# asm 2: psllq $4,<v10=%xmm15
6838psllq $4,%xmm15
6839
6840# qhasm: v01 = r0 & mask1
6841# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
6842# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
6843vpand %xmm1,%xmm6,%xmm6
6844
6845# qhasm: v11 = r4 & mask1
6846# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
6847# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
6848vpand %xmm1,%xmm10,%xmm10
6849
6850# qhasm: 2x v01 unsigned>>= 4
6851# asm 1: psrlq $4,<v01=reg128#7
6852# asm 2: psrlq $4,<v01=%xmm6
6853psrlq $4,%xmm6
6854
6855# qhasm: r0 = v00 | v10
6856# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
6857# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
6858vpor %xmm15,%xmm14,%xmm14
6859
6860# qhasm: r4 = v01 | v11
6861# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
6862# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
6863vpor %xmm10,%xmm6,%xmm6
6864
6865# qhasm: v00 = r1 & mask0
6866# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
6867# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
6868vpand %xmm0,%xmm7,%xmm10
6869
6870# qhasm: v10 = r5 & mask0
6871# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
6872# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
6873vpand %xmm0,%xmm11,%xmm15
6874
6875# qhasm: 2x v10 <<= 4
6876# asm 1: psllq $4,<v10=reg128#16
6877# asm 2: psllq $4,<v10=%xmm15
6878psllq $4,%xmm15
6879
6880# qhasm: v01 = r1 & mask1
6881# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
6882# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
6883vpand %xmm1,%xmm7,%xmm7
6884
6885# qhasm: v11 = r5 & mask1
6886# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
6887# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
6888vpand %xmm1,%xmm11,%xmm11
6889
6890# qhasm: 2x v01 unsigned>>= 4
6891# asm 1: psrlq $4,<v01=reg128#8
6892# asm 2: psrlq $4,<v01=%xmm7
6893psrlq $4,%xmm7
6894
6895# qhasm: r1 = v00 | v10
6896# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
6897# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
6898vpor %xmm15,%xmm10,%xmm10
6899
6900# qhasm: r5 = v01 | v11
6901# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
6902# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
6903vpor %xmm11,%xmm7,%xmm7
6904
6905# qhasm: v00 = r2 & mask0
6906# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
6907# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
6908vpand %xmm0,%xmm8,%xmm11
6909
6910# qhasm: v10 = r6 & mask0
6911# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
6912# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
6913vpand %xmm0,%xmm12,%xmm15
6914
6915# qhasm: 2x v10 <<= 4
6916# asm 1: psllq $4,<v10=reg128#16
6917# asm 2: psllq $4,<v10=%xmm15
6918psllq $4,%xmm15
6919
6920# qhasm: v01 = r2 & mask1
6921# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
6922# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
6923vpand %xmm1,%xmm8,%xmm8
6924
6925# qhasm: v11 = r6 & mask1
6926# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
6927# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
6928vpand %xmm1,%xmm12,%xmm12
6929
6930# qhasm: 2x v01 unsigned>>= 4
6931# asm 1: psrlq $4,<v01=reg128#9
6932# asm 2: psrlq $4,<v01=%xmm8
6933psrlq $4,%xmm8
6934
6935# qhasm: r2 = v00 | v10
6936# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
6937# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
6938vpor %xmm15,%xmm11,%xmm11
6939
6940# qhasm: r6 = v01 | v11
6941# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
6942# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
6943vpor %xmm12,%xmm8,%xmm8
6944
6945# qhasm: v00 = r3 & mask0
6946# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
6947# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
6948vpand %xmm0,%xmm9,%xmm12
6949
6950# qhasm: v10 = r7 & mask0
6951# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
6952# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
6953vpand %xmm0,%xmm13,%xmm15
6954
6955# qhasm: 2x v10 <<= 4
6956# asm 1: psllq $4,<v10=reg128#16
6957# asm 2: psllq $4,<v10=%xmm15
6958psllq $4,%xmm15
6959
6960# qhasm: v01 = r3 & mask1
6961# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
6962# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
6963vpand %xmm1,%xmm9,%xmm9
6964
6965# qhasm: v11 = r7 & mask1
6966# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
6967# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
6968vpand %xmm1,%xmm13,%xmm13
6969
6970# qhasm: 2x v01 unsigned>>= 4
6971# asm 1: psrlq $4,<v01=reg128#10
6972# asm 2: psrlq $4,<v01=%xmm9
6973psrlq $4,%xmm9
6974
6975# qhasm: r3 = v00 | v10
6976# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
6977# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
6978vpor %xmm15,%xmm12,%xmm12
6979
6980# qhasm: r7 = v01 | v11
6981# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
6982# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
6983vpor %xmm13,%xmm9,%xmm9
6984
6985# qhasm: v00 = r0 & mask2
6986# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
6987# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
6988vpand %xmm2,%xmm14,%xmm13
6989
6990# qhasm: v10 = r2 & mask2
6991# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
6992# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
6993vpand %xmm2,%xmm11,%xmm15
6994
6995# qhasm: 2x v10 <<= 2
6996# asm 1: psllq $2,<v10=reg128#16
6997# asm 2: psllq $2,<v10=%xmm15
6998psllq $2,%xmm15
6999
7000# qhasm: v01 = r0 & mask3
7001# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
7002# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
7003vpand %xmm3,%xmm14,%xmm14
7004
7005# qhasm: v11 = r2 & mask3
7006# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
7007# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
7008vpand %xmm3,%xmm11,%xmm11
7009
7010# qhasm: 2x v01 unsigned>>= 2
7011# asm 1: psrlq $2,<v01=reg128#15
7012# asm 2: psrlq $2,<v01=%xmm14
7013psrlq $2,%xmm14
7014
7015# qhasm: r0 = v00 | v10
7016# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
7017# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
7018vpor %xmm15,%xmm13,%xmm13
7019
7020# qhasm: r2 = v01 | v11
7021# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
7022# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
7023vpor %xmm11,%xmm14,%xmm11
7024
7025# qhasm: v00 = r1 & mask2
7026# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
7027# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
7028vpand %xmm2,%xmm10,%xmm14
7029
7030# qhasm: v10 = r3 & mask2
7031# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
7032# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
7033vpand %xmm2,%xmm12,%xmm15
7034
7035# qhasm: 2x v10 <<= 2
7036# asm 1: psllq $2,<v10=reg128#16
7037# asm 2: psllq $2,<v10=%xmm15
7038psllq $2,%xmm15
7039
7040# qhasm: v01 = r1 & mask3
7041# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
7042# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
7043vpand %xmm3,%xmm10,%xmm10
7044
7045# qhasm: v11 = r3 & mask3
7046# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
7047# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
7048vpand %xmm3,%xmm12,%xmm12
7049
7050# qhasm: 2x v01 unsigned>>= 2
7051# asm 1: psrlq $2,<v01=reg128#11
7052# asm 2: psrlq $2,<v01=%xmm10
7053psrlq $2,%xmm10
7054
7055# qhasm: r1 = v00 | v10
7056# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
7057# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
7058vpor %xmm15,%xmm14,%xmm14
7059
7060# qhasm: r3 = v01 | v11
7061# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
7062# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
7063vpor %xmm12,%xmm10,%xmm10
7064
7065# qhasm: v00 = r4 & mask2
7066# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
7067# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
7068vpand %xmm2,%xmm6,%xmm12
7069
7070# qhasm: v10 = r6 & mask2
7071# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
7072# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
7073vpand %xmm2,%xmm8,%xmm15
7074
7075# qhasm: 2x v10 <<= 2
7076# asm 1: psllq $2,<v10=reg128#16
7077# asm 2: psllq $2,<v10=%xmm15
7078psllq $2,%xmm15
7079
7080# qhasm: v01 = r4 & mask3
7081# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
7082# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
7083vpand %xmm3,%xmm6,%xmm6
7084
7085# qhasm: v11 = r6 & mask3
7086# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
7087# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
7088vpand %xmm3,%xmm8,%xmm8
7089
7090# qhasm: 2x v01 unsigned>>= 2
7091# asm 1: psrlq $2,<v01=reg128#7
7092# asm 2: psrlq $2,<v01=%xmm6
7093psrlq $2,%xmm6
7094
7095# qhasm: r4 = v00 | v10
7096# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
7097# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
7098vpor %xmm15,%xmm12,%xmm12
7099
7100# qhasm: r6 = v01 | v11
7101# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
7102# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
7103vpor %xmm8,%xmm6,%xmm6
7104
7105# qhasm: v00 = r5 & mask2
7106# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
7107# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
7108vpand %xmm2,%xmm7,%xmm8
7109
7110# qhasm: v10 = r7 & mask2
7111# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
7112# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
7113vpand %xmm2,%xmm9,%xmm15
7114
7115# qhasm: 2x v10 <<= 2
7116# asm 1: psllq $2,<v10=reg128#16
7117# asm 2: psllq $2,<v10=%xmm15
7118psllq $2,%xmm15
7119
7120# qhasm: v01 = r5 & mask3
7121# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
7122# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
7123vpand %xmm3,%xmm7,%xmm7
7124
7125# qhasm: v11 = r7 & mask3
7126# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
7127# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
7128vpand %xmm3,%xmm9,%xmm9
7129
7130# qhasm: 2x v01 unsigned>>= 2
7131# asm 1: psrlq $2,<v01=reg128#8
7132# asm 2: psrlq $2,<v01=%xmm7
7133psrlq $2,%xmm7
7134
7135# qhasm: r5 = v00 | v10
7136# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
7137# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
7138vpor %xmm15,%xmm8,%xmm8
7139
7140# qhasm: r7 = v01 | v11
7141# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
7142# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
7143vpor %xmm9,%xmm7,%xmm7
7144
7145# qhasm: v00 = r0 & mask4
7146# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
7147# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
7148vpand %xmm4,%xmm13,%xmm9
7149
7150# qhasm: v10 = r1 & mask4
7151# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
7152# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
7153vpand %xmm4,%xmm14,%xmm15
7154
7155# qhasm: 2x v10 <<= 1
7156# asm 1: psllq $1,<v10=reg128#16
7157# asm 2: psllq $1,<v10=%xmm15
7158psllq $1,%xmm15
7159
7160# qhasm: v01 = r0 & mask5
7161# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
7162# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
7163vpand %xmm5,%xmm13,%xmm13
7164
7165# qhasm: v11 = r1 & mask5
7166# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
7167# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
7168vpand %xmm5,%xmm14,%xmm14
7169
7170# qhasm: 2x v01 unsigned>>= 1
7171# asm 1: psrlq $1,<v01=reg128#14
7172# asm 2: psrlq $1,<v01=%xmm13
7173psrlq $1,%xmm13
7174
7175# qhasm: r0 = v00 | v10
7176# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
7177# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
7178vpor %xmm15,%xmm9,%xmm9
7179
7180# qhasm: r1 = v01 | v11
7181# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
7182# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
7183vpor %xmm14,%xmm13,%xmm13
7184
7185# qhasm: v00 = r2 & mask4
7186# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
7187# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
7188vpand %xmm4,%xmm11,%xmm14
7189
7190# qhasm: v10 = r3 & mask4
7191# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
7192# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
7193vpand %xmm4,%xmm10,%xmm15
7194
7195# qhasm: 2x v10 <<= 1
7196# asm 1: psllq $1,<v10=reg128#16
7197# asm 2: psllq $1,<v10=%xmm15
7198psllq $1,%xmm15
7199
7200# qhasm: v01 = r2 & mask5
7201# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
7202# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
7203vpand %xmm5,%xmm11,%xmm11
7204
7205# qhasm: v11 = r3 & mask5
7206# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
7207# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
7208vpand %xmm5,%xmm10,%xmm10
7209
7210# qhasm: 2x v01 unsigned>>= 1
7211# asm 1: psrlq $1,<v01=reg128#12
7212# asm 2: psrlq $1,<v01=%xmm11
7213psrlq $1,%xmm11
7214
7215# qhasm: r2 = v00 | v10
7216# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
7217# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
7218vpor %xmm15,%xmm14,%xmm14
7219
7220# qhasm: r3 = v01 | v11
7221# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
7222# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
7223vpor %xmm10,%xmm11,%xmm10
7224
7225# qhasm: v00 = r4 & mask4
7226# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
7227# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
7228vpand %xmm4,%xmm12,%xmm11
7229
7230# qhasm: v10 = r5 & mask4
7231# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
7232# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
7233vpand %xmm4,%xmm8,%xmm15
7234
7235# qhasm: 2x v10 <<= 1
7236# asm 1: psllq $1,<v10=reg128#16
7237# asm 2: psllq $1,<v10=%xmm15
7238psllq $1,%xmm15
7239
7240# qhasm: v01 = r4 & mask5
7241# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
7242# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
7243vpand %xmm5,%xmm12,%xmm12
7244
7245# qhasm: v11 = r5 & mask5
7246# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
7247# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
7248vpand %xmm5,%xmm8,%xmm8
7249
7250# qhasm: 2x v01 unsigned>>= 1
7251# asm 1: psrlq $1,<v01=reg128#13
7252# asm 2: psrlq $1,<v01=%xmm12
7253psrlq $1,%xmm12
7254
7255# qhasm: r4 = v00 | v10
7256# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
7257# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
7258vpor %xmm15,%xmm11,%xmm11
7259
7260# qhasm: r5 = v01 | v11
7261# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
7262# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
7263vpor %xmm8,%xmm12,%xmm8
7264
7265# qhasm: v00 = r6 & mask4
7266# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
7267# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
7268vpand %xmm4,%xmm6,%xmm12
7269
7270# qhasm: v10 = r7 & mask4
7271# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
7272# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
7273vpand %xmm4,%xmm7,%xmm15
7274
7275# qhasm: 2x v10 <<= 1
7276# asm 1: psllq $1,<v10=reg128#16
7277# asm 2: psllq $1,<v10=%xmm15
7278psllq $1,%xmm15
7279
7280# qhasm: v01 = r6 & mask5
7281# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
7282# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
7283vpand %xmm5,%xmm6,%xmm6
7284
7285# qhasm: v11 = r7 & mask5
7286# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
7287# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
7288vpand %xmm5,%xmm7,%xmm7
7289
7290# qhasm: 2x v01 unsigned>>= 1
7291# asm 1: psrlq $1,<v01=reg128#7
7292# asm 2: psrlq $1,<v01=%xmm6
7293psrlq $1,%xmm6
7294
7295# qhasm: r6 = v00 | v10
7296# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
7297# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
7298vpor %xmm15,%xmm12,%xmm12
7299
7300# qhasm: r7 = v01 | v11
7301# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
7302# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
7303vpor %xmm7,%xmm6,%xmm6
7304
7305# qhasm: t0 = r0[0]r1[0]
7306# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
7307# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
7308vpunpcklqdq %xmm13,%xmm9,%xmm7
7309
7310# qhasm: mem128[ input_0 + 320 ] = t0
7311# asm 1: movdqu   <t0=reg128#8,320(<input_0=int64#1)
7312# asm 2: movdqu   <t0=%xmm7,320(<input_0=%rdi)
7313movdqu   %xmm7,320(%rdi)
7314
7315# qhasm: t0 = r2[0]r3[0]
7316# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
7317# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
7318vpunpcklqdq %xmm10,%xmm14,%xmm7
7319
7320# qhasm: mem128[ input_0 + 336 ] = t0
7321# asm 1: movdqu   <t0=reg128#8,336(<input_0=int64#1)
7322# asm 2: movdqu   <t0=%xmm7,336(<input_0=%rdi)
7323movdqu   %xmm7,336(%rdi)
7324
7325# qhasm: t0 = r4[0]r5[0]
7326# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
7327# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
7328vpunpcklqdq %xmm8,%xmm11,%xmm7
7329
7330# qhasm: mem128[ input_0 + 352 ] = t0
7331# asm 1: movdqu   <t0=reg128#8,352(<input_0=int64#1)
7332# asm 2: movdqu   <t0=%xmm7,352(<input_0=%rdi)
7333movdqu   %xmm7,352(%rdi)
7334
7335# qhasm: t0 = r6[0]r7[0]
7336# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
7337# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
7338vpunpcklqdq %xmm6,%xmm12,%xmm6
7339
7340# qhasm: mem128[ input_0 + 368 ] = t0
7341# asm 1: movdqu   <t0=reg128#7,368(<input_0=int64#1)
7342# asm 2: movdqu   <t0=%xmm6,368(<input_0=%rdi)
7343movdqu   %xmm6,368(%rdi)
7344
7345# qhasm: r0 = mem64[ input_0 + 384 ] x2
7346# asm 1: movddup 384(<input_0=int64#1),>r0=reg128#7
7347# asm 2: movddup 384(<input_0=%rdi),>r0=%xmm6
7348movddup 384(%rdi),%xmm6
7349
7350# qhasm: r1 = mem64[ input_0 + 392 ] x2
7351# asm 1: movddup 392(<input_0=int64#1),>r1=reg128#8
7352# asm 2: movddup 392(<input_0=%rdi),>r1=%xmm7
7353movddup 392(%rdi),%xmm7
7354
7355# qhasm: r2 = mem64[ input_0 + 400 ] x2
7356# asm 1: movddup 400(<input_0=int64#1),>r2=reg128#9
7357# asm 2: movddup 400(<input_0=%rdi),>r2=%xmm8
7358movddup 400(%rdi),%xmm8
7359
7360# qhasm: r3 = mem64[ input_0 + 408 ] x2
7361# asm 1: movddup 408(<input_0=int64#1),>r3=reg128#10
7362# asm 2: movddup 408(<input_0=%rdi),>r3=%xmm9
7363movddup 408(%rdi),%xmm9
7364
7365# qhasm: r4 = mem64[ input_0 + 416 ] x2
7366# asm 1: movddup 416(<input_0=int64#1),>r4=reg128#11
7367# asm 2: movddup 416(<input_0=%rdi),>r4=%xmm10
7368movddup 416(%rdi),%xmm10
7369
7370# qhasm: r5 = mem64[ input_0 + 424 ] x2
7371# asm 1: movddup 424(<input_0=int64#1),>r5=reg128#12
7372# asm 2: movddup 424(<input_0=%rdi),>r5=%xmm11
7373movddup 424(%rdi),%xmm11
7374
7375# qhasm: r6 = mem64[ input_0 + 432 ] x2
7376# asm 1: movddup 432(<input_0=int64#1),>r6=reg128#13
7377# asm 2: movddup 432(<input_0=%rdi),>r6=%xmm12
7378movddup 432(%rdi),%xmm12
7379
7380# qhasm: r7 = mem64[ input_0 + 440 ] x2
7381# asm 1: movddup 440(<input_0=int64#1),>r7=reg128#14
7382# asm 2: movddup 440(<input_0=%rdi),>r7=%xmm13
7383movddup 440(%rdi),%xmm13
7384
7385# qhasm: v00 = r0 & mask0
7386# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
7387# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
7388vpand %xmm0,%xmm6,%xmm14
7389
7390# qhasm: v10 = r4 & mask0
7391# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
7392# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
7393vpand %xmm0,%xmm10,%xmm15
7394
7395# qhasm: 2x v10 <<= 4
7396# asm 1: psllq $4,<v10=reg128#16
7397# asm 2: psllq $4,<v10=%xmm15
7398psllq $4,%xmm15
7399
7400# qhasm: v01 = r0 & mask1
7401# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
7402# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
7403vpand %xmm1,%xmm6,%xmm6
7404
7405# qhasm: v11 = r4 & mask1
7406# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
7407# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
7408vpand %xmm1,%xmm10,%xmm10
7409
7410# qhasm: 2x v01 unsigned>>= 4
7411# asm 1: psrlq $4,<v01=reg128#7
7412# asm 2: psrlq $4,<v01=%xmm6
7413psrlq $4,%xmm6
7414
7415# qhasm: r0 = v00 | v10
7416# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
7417# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
7418vpor %xmm15,%xmm14,%xmm14
7419
7420# qhasm: r4 = v01 | v11
7421# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
7422# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
7423vpor %xmm10,%xmm6,%xmm6
7424
7425# qhasm: v00 = r1 & mask0
7426# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
7427# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
7428vpand %xmm0,%xmm7,%xmm10
7429
7430# qhasm: v10 = r5 & mask0
7431# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
7432# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
7433vpand %xmm0,%xmm11,%xmm15
7434
7435# qhasm: 2x v10 <<= 4
7436# asm 1: psllq $4,<v10=reg128#16
7437# asm 2: psllq $4,<v10=%xmm15
7438psllq $4,%xmm15
7439
7440# qhasm: v01 = r1 & mask1
7441# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
7442# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
7443vpand %xmm1,%xmm7,%xmm7
7444
7445# qhasm: v11 = r5 & mask1
7446# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
7447# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
7448vpand %xmm1,%xmm11,%xmm11
7449
7450# qhasm: 2x v01 unsigned>>= 4
7451# asm 1: psrlq $4,<v01=reg128#8
7452# asm 2: psrlq $4,<v01=%xmm7
7453psrlq $4,%xmm7
7454
7455# qhasm: r1 = v00 | v10
7456# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
7457# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
7458vpor %xmm15,%xmm10,%xmm10
7459
7460# qhasm: r5 = v01 | v11
7461# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
7462# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
7463vpor %xmm11,%xmm7,%xmm7
7464
7465# qhasm: v00 = r2 & mask0
7466# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
7467# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
7468vpand %xmm0,%xmm8,%xmm11
7469
7470# qhasm: v10 = r6 & mask0
7471# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
7472# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
7473vpand %xmm0,%xmm12,%xmm15
7474
7475# qhasm: 2x v10 <<= 4
7476# asm 1: psllq $4,<v10=reg128#16
7477# asm 2: psllq $4,<v10=%xmm15
7478psllq $4,%xmm15
7479
7480# qhasm: v01 = r2 & mask1
7481# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
7482# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
7483vpand %xmm1,%xmm8,%xmm8
7484
7485# qhasm: v11 = r6 & mask1
7486# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
7487# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
7488vpand %xmm1,%xmm12,%xmm12
7489
7490# qhasm: 2x v01 unsigned>>= 4
7491# asm 1: psrlq $4,<v01=reg128#9
7492# asm 2: psrlq $4,<v01=%xmm8
7493psrlq $4,%xmm8
7494
7495# qhasm: r2 = v00 | v10
7496# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
7497# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
7498vpor %xmm15,%xmm11,%xmm11
7499
7500# qhasm: r6 = v01 | v11
7501# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
7502# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
7503vpor %xmm12,%xmm8,%xmm8
7504
7505# qhasm: v00 = r3 & mask0
7506# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
7507# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
7508vpand %xmm0,%xmm9,%xmm12
7509
7510# qhasm: v10 = r7 & mask0
7511# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#16
7512# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm15
7513vpand %xmm0,%xmm13,%xmm15
7514
7515# qhasm: 2x v10 <<= 4
7516# asm 1: psllq $4,<v10=reg128#16
7517# asm 2: psllq $4,<v10=%xmm15
7518psllq $4,%xmm15
7519
7520# qhasm: v01 = r3 & mask1
7521# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
7522# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
7523vpand %xmm1,%xmm9,%xmm9
7524
7525# qhasm: v11 = r7 & mask1
7526# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#14
7527# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm13
7528vpand %xmm1,%xmm13,%xmm13
7529
7530# qhasm: 2x v01 unsigned>>= 4
7531# asm 1: psrlq $4,<v01=reg128#10
7532# asm 2: psrlq $4,<v01=%xmm9
7533psrlq $4,%xmm9
7534
7535# qhasm: r3 = v00 | v10
7536# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r3=reg128#13
7537# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r3=%xmm12
7538vpor %xmm15,%xmm12,%xmm12
7539
7540# qhasm: r7 = v01 | v11
7541# asm 1: vpor <v11=reg128#14,<v01=reg128#10,>r7=reg128#10
7542# asm 2: vpor <v11=%xmm13,<v01=%xmm9,>r7=%xmm9
7543vpor %xmm13,%xmm9,%xmm9
7544
7545# qhasm: v00 = r0 & mask2
7546# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#14
7547# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm13
7548vpand %xmm2,%xmm14,%xmm13
7549
7550# qhasm: v10 = r2 & mask2
7551# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#16
7552# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm15
7553vpand %xmm2,%xmm11,%xmm15
7554
7555# qhasm: 2x v10 <<= 2
7556# asm 1: psllq $2,<v10=reg128#16
7557# asm 2: psllq $2,<v10=%xmm15
7558psllq $2,%xmm15
7559
7560# qhasm: v01 = r0 & mask3
7561# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#15
7562# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm14
7563vpand %xmm3,%xmm14,%xmm14
7564
7565# qhasm: v11 = r2 & mask3
7566# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
7567# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
7568vpand %xmm3,%xmm11,%xmm11
7569
7570# qhasm: 2x v01 unsigned>>= 2
7571# asm 1: psrlq $2,<v01=reg128#15
7572# asm 2: psrlq $2,<v01=%xmm14
7573psrlq $2,%xmm14
7574
7575# qhasm: r0 = v00 | v10
7576# asm 1: vpor <v10=reg128#16,<v00=reg128#14,>r0=reg128#14
7577# asm 2: vpor <v10=%xmm15,<v00=%xmm13,>r0=%xmm13
7578vpor %xmm15,%xmm13,%xmm13
7579
7580# qhasm: r2 = v01 | v11
7581# asm 1: vpor <v11=reg128#12,<v01=reg128#15,>r2=reg128#12
7582# asm 2: vpor <v11=%xmm11,<v01=%xmm14,>r2=%xmm11
7583vpor %xmm11,%xmm14,%xmm11
7584
7585# qhasm: v00 = r1 & mask2
7586# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#15
7587# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm14
7588vpand %xmm2,%xmm10,%xmm14
7589
7590# qhasm: v10 = r3 & mask2
7591# asm 1: vpand <mask2=reg128#3,<r3=reg128#13,>v10=reg128#16
7592# asm 2: vpand <mask2=%xmm2,<r3=%xmm12,>v10=%xmm15
7593vpand %xmm2,%xmm12,%xmm15
7594
7595# qhasm: 2x v10 <<= 2
7596# asm 1: psllq $2,<v10=reg128#16
7597# asm 2: psllq $2,<v10=%xmm15
7598psllq $2,%xmm15
7599
7600# qhasm: v01 = r1 & mask3
7601# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
7602# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
7603vpand %xmm3,%xmm10,%xmm10
7604
7605# qhasm: v11 = r3 & mask3
7606# asm 1: vpand <mask3=reg128#4,<r3=reg128#13,>v11=reg128#13
7607# asm 2: vpand <mask3=%xmm3,<r3=%xmm12,>v11=%xmm12
7608vpand %xmm3,%xmm12,%xmm12
7609
7610# qhasm: 2x v01 unsigned>>= 2
7611# asm 1: psrlq $2,<v01=reg128#11
7612# asm 2: psrlq $2,<v01=%xmm10
7613psrlq $2,%xmm10
7614
7615# qhasm: r1 = v00 | v10
7616# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r1=reg128#15
7617# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r1=%xmm14
7618vpor %xmm15,%xmm14,%xmm14
7619
7620# qhasm: r3 = v01 | v11
7621# asm 1: vpor <v11=reg128#13,<v01=reg128#11,>r3=reg128#11
7622# asm 2: vpor <v11=%xmm12,<v01=%xmm10,>r3=%xmm10
7623vpor %xmm12,%xmm10,%xmm10
7624
7625# qhasm: v00 = r4 & mask2
7626# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#13
7627# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm12
7628vpand %xmm2,%xmm6,%xmm12
7629
7630# qhasm: v10 = r6 & mask2
7631# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#16
7632# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm15
7633vpand %xmm2,%xmm8,%xmm15
7634
7635# qhasm: 2x v10 <<= 2
7636# asm 1: psllq $2,<v10=reg128#16
7637# asm 2: psllq $2,<v10=%xmm15
7638psllq $2,%xmm15
7639
7640# qhasm: v01 = r4 & mask3
7641# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
7642# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
7643vpand %xmm3,%xmm6,%xmm6
7644
7645# qhasm: v11 = r6 & mask3
7646# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
7647# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
7648vpand %xmm3,%xmm8,%xmm8
7649
7650# qhasm: 2x v01 unsigned>>= 2
7651# asm 1: psrlq $2,<v01=reg128#7
7652# asm 2: psrlq $2,<v01=%xmm6
7653psrlq $2,%xmm6
7654
7655# qhasm: r4 = v00 | v10
7656# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r4=reg128#13
7657# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r4=%xmm12
7658vpor %xmm15,%xmm12,%xmm12
7659
7660# qhasm: r6 = v01 | v11
7661# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
7662# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
7663vpor %xmm8,%xmm6,%xmm6
7664
7665# qhasm: v00 = r5 & mask2
7666# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
7667# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
7668vpand %xmm2,%xmm7,%xmm8
7669
7670# qhasm: v10 = r7 & mask2
7671# asm 1: vpand <mask2=reg128#3,<r7=reg128#10,>v10=reg128#16
7672# asm 2: vpand <mask2=%xmm2,<r7=%xmm9,>v10=%xmm15
7673vpand %xmm2,%xmm9,%xmm15
7674
7675# qhasm: 2x v10 <<= 2
7676# asm 1: psllq $2,<v10=reg128#16
7677# asm 2: psllq $2,<v10=%xmm15
7678psllq $2,%xmm15
7679
7680# qhasm: v01 = r5 & mask3
7681# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
7682# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
7683vpand %xmm3,%xmm7,%xmm7
7684
7685# qhasm: v11 = r7 & mask3
7686# asm 1: vpand <mask3=reg128#4,<r7=reg128#10,>v11=reg128#10
7687# asm 2: vpand <mask3=%xmm3,<r7=%xmm9,>v11=%xmm9
7688vpand %xmm3,%xmm9,%xmm9
7689
7690# qhasm: 2x v01 unsigned>>= 2
7691# asm 1: psrlq $2,<v01=reg128#8
7692# asm 2: psrlq $2,<v01=%xmm7
7693psrlq $2,%xmm7
7694
7695# qhasm: r5 = v00 | v10
7696# asm 1: vpor <v10=reg128#16,<v00=reg128#9,>r5=reg128#9
7697# asm 2: vpor <v10=%xmm15,<v00=%xmm8,>r5=%xmm8
7698vpor %xmm15,%xmm8,%xmm8
7699
7700# qhasm: r7 = v01 | v11
7701# asm 1: vpor <v11=reg128#10,<v01=reg128#8,>r7=reg128#8
7702# asm 2: vpor <v11=%xmm9,<v01=%xmm7,>r7=%xmm7
7703vpor %xmm9,%xmm7,%xmm7
7704
7705# qhasm: v00 = r0 & mask4
7706# asm 1: vpand <mask4=reg128#5,<r0=reg128#14,>v00=reg128#10
7707# asm 2: vpand <mask4=%xmm4,<r0=%xmm13,>v00=%xmm9
7708vpand %xmm4,%xmm13,%xmm9
7709
7710# qhasm: v10 = r1 & mask4
7711# asm 1: vpand <mask4=reg128#5,<r1=reg128#15,>v10=reg128#16
7712# asm 2: vpand <mask4=%xmm4,<r1=%xmm14,>v10=%xmm15
7713vpand %xmm4,%xmm14,%xmm15
7714
7715# qhasm: 2x v10 <<= 1
7716# asm 1: psllq $1,<v10=reg128#16
7717# asm 2: psllq $1,<v10=%xmm15
7718psllq $1,%xmm15
7719
7720# qhasm: v01 = r0 & mask5
7721# asm 1: vpand <mask5=reg128#6,<r0=reg128#14,>v01=reg128#14
7722# asm 2: vpand <mask5=%xmm5,<r0=%xmm13,>v01=%xmm13
7723vpand %xmm5,%xmm13,%xmm13
7724
7725# qhasm: v11 = r1 & mask5
7726# asm 1: vpand <mask5=reg128#6,<r1=reg128#15,>v11=reg128#15
7727# asm 2: vpand <mask5=%xmm5,<r1=%xmm14,>v11=%xmm14
7728vpand %xmm5,%xmm14,%xmm14
7729
7730# qhasm: 2x v01 unsigned>>= 1
7731# asm 1: psrlq $1,<v01=reg128#14
7732# asm 2: psrlq $1,<v01=%xmm13
7733psrlq $1,%xmm13
7734
7735# qhasm: r0 = v00 | v10
7736# asm 1: vpor <v10=reg128#16,<v00=reg128#10,>r0=reg128#10
7737# asm 2: vpor <v10=%xmm15,<v00=%xmm9,>r0=%xmm9
7738vpor %xmm15,%xmm9,%xmm9
7739
7740# qhasm: r1 = v01 | v11
7741# asm 1: vpor <v11=reg128#15,<v01=reg128#14,>r1=reg128#14
7742# asm 2: vpor <v11=%xmm14,<v01=%xmm13,>r1=%xmm13
7743vpor %xmm14,%xmm13,%xmm13
7744
7745# qhasm: v00 = r2 & mask4
7746# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#15
7747# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm14
7748vpand %xmm4,%xmm11,%xmm14
7749
7750# qhasm: v10 = r3 & mask4
7751# asm 1: vpand <mask4=reg128#5,<r3=reg128#11,>v10=reg128#16
7752# asm 2: vpand <mask4=%xmm4,<r3=%xmm10,>v10=%xmm15
7753vpand %xmm4,%xmm10,%xmm15
7754
7755# qhasm: 2x v10 <<= 1
7756# asm 1: psllq $1,<v10=reg128#16
7757# asm 2: psllq $1,<v10=%xmm15
7758psllq $1,%xmm15
7759
7760# qhasm: v01 = r2 & mask5
7761# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
7762# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
7763vpand %xmm5,%xmm11,%xmm11
7764
7765# qhasm: v11 = r3 & mask5
7766# asm 1: vpand <mask5=reg128#6,<r3=reg128#11,>v11=reg128#11
7767# asm 2: vpand <mask5=%xmm5,<r3=%xmm10,>v11=%xmm10
7768vpand %xmm5,%xmm10,%xmm10
7769
7770# qhasm: 2x v01 unsigned>>= 1
7771# asm 1: psrlq $1,<v01=reg128#12
7772# asm 2: psrlq $1,<v01=%xmm11
7773psrlq $1,%xmm11
7774
7775# qhasm: r2 = v00 | v10
7776# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r2=reg128#15
7777# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r2=%xmm14
7778vpor %xmm15,%xmm14,%xmm14
7779
7780# qhasm: r3 = v01 | v11
7781# asm 1: vpor <v11=reg128#11,<v01=reg128#12,>r3=reg128#11
7782# asm 2: vpor <v11=%xmm10,<v01=%xmm11,>r3=%xmm10
7783vpor %xmm10,%xmm11,%xmm10
7784
7785# qhasm: v00 = r4 & mask4
7786# asm 1: vpand <mask4=reg128#5,<r4=reg128#13,>v00=reg128#12
7787# asm 2: vpand <mask4=%xmm4,<r4=%xmm12,>v00=%xmm11
7788vpand %xmm4,%xmm12,%xmm11
7789
7790# qhasm: v10 = r5 & mask4
7791# asm 1: vpand <mask4=reg128#5,<r5=reg128#9,>v10=reg128#16
7792# asm 2: vpand <mask4=%xmm4,<r5=%xmm8,>v10=%xmm15
7793vpand %xmm4,%xmm8,%xmm15
7794
7795# qhasm: 2x v10 <<= 1
7796# asm 1: psllq $1,<v10=reg128#16
7797# asm 2: psllq $1,<v10=%xmm15
7798psllq $1,%xmm15
7799
7800# qhasm: v01 = r4 & mask5
7801# asm 1: vpand <mask5=reg128#6,<r4=reg128#13,>v01=reg128#13
7802# asm 2: vpand <mask5=%xmm5,<r4=%xmm12,>v01=%xmm12
7803vpand %xmm5,%xmm12,%xmm12
7804
7805# qhasm: v11 = r5 & mask5
7806# asm 1: vpand <mask5=reg128#6,<r5=reg128#9,>v11=reg128#9
7807# asm 2: vpand <mask5=%xmm5,<r5=%xmm8,>v11=%xmm8
7808vpand %xmm5,%xmm8,%xmm8
7809
7810# qhasm: 2x v01 unsigned>>= 1
7811# asm 1: psrlq $1,<v01=reg128#13
7812# asm 2: psrlq $1,<v01=%xmm12
7813psrlq $1,%xmm12
7814
7815# qhasm: r4 = v00 | v10
7816# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r4=reg128#12
7817# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r4=%xmm11
7818vpor %xmm15,%xmm11,%xmm11
7819
7820# qhasm: r5 = v01 | v11
7821# asm 1: vpor <v11=reg128#9,<v01=reg128#13,>r5=reg128#9
7822# asm 2: vpor <v11=%xmm8,<v01=%xmm12,>r5=%xmm8
7823vpor %xmm8,%xmm12,%xmm8
7824
7825# qhasm: v00 = r6 & mask4
7826# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#13
7827# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm12
7828vpand %xmm4,%xmm6,%xmm12
7829
7830# qhasm: v10 = r7 & mask4
7831# asm 1: vpand <mask4=reg128#5,<r7=reg128#8,>v10=reg128#16
7832# asm 2: vpand <mask4=%xmm4,<r7=%xmm7,>v10=%xmm15
7833vpand %xmm4,%xmm7,%xmm15
7834
7835# qhasm: 2x v10 <<= 1
7836# asm 1: psllq $1,<v10=reg128#16
7837# asm 2: psllq $1,<v10=%xmm15
7838psllq $1,%xmm15
7839
7840# qhasm: v01 = r6 & mask5
7841# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
7842# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
7843vpand %xmm5,%xmm6,%xmm6
7844
7845# qhasm: v11 = r7 & mask5
7846# asm 1: vpand <mask5=reg128#6,<r7=reg128#8,>v11=reg128#8
7847# asm 2: vpand <mask5=%xmm5,<r7=%xmm7,>v11=%xmm7
7848vpand %xmm5,%xmm7,%xmm7
7849
7850# qhasm: 2x v01 unsigned>>= 1
7851# asm 1: psrlq $1,<v01=reg128#7
7852# asm 2: psrlq $1,<v01=%xmm6
7853psrlq $1,%xmm6
7854
7855# qhasm: r6 = v00 | v10
7856# asm 1: vpor <v10=reg128#16,<v00=reg128#13,>r6=reg128#13
7857# asm 2: vpor <v10=%xmm15,<v00=%xmm12,>r6=%xmm12
7858vpor %xmm15,%xmm12,%xmm12
7859
7860# qhasm: r7 = v01 | v11
7861# asm 1: vpor <v11=reg128#8,<v01=reg128#7,>r7=reg128#7
7862# asm 2: vpor <v11=%xmm7,<v01=%xmm6,>r7=%xmm6
7863vpor %xmm7,%xmm6,%xmm6
7864
7865# qhasm: t0 = r0[0]r1[0]
7866# asm 1: vpunpcklqdq <r1=reg128#14,<r0=reg128#10,>t0=reg128#8
7867# asm 2: vpunpcklqdq <r1=%xmm13,<r0=%xmm9,>t0=%xmm7
7868vpunpcklqdq %xmm13,%xmm9,%xmm7
7869
7870# qhasm: mem128[ input_0 + 384 ] = t0
7871# asm 1: movdqu   <t0=reg128#8,384(<input_0=int64#1)
7872# asm 2: movdqu   <t0=%xmm7,384(<input_0=%rdi)
7873movdqu   %xmm7,384(%rdi)
7874
7875# qhasm: t0 = r2[0]r3[0]
7876# asm 1: vpunpcklqdq <r3=reg128#11,<r2=reg128#15,>t0=reg128#8
7877# asm 2: vpunpcklqdq <r3=%xmm10,<r2=%xmm14,>t0=%xmm7
7878vpunpcklqdq %xmm10,%xmm14,%xmm7
7879
7880# qhasm: mem128[ input_0 + 400 ] = t0
7881# asm 1: movdqu   <t0=reg128#8,400(<input_0=int64#1)
7882# asm 2: movdqu   <t0=%xmm7,400(<input_0=%rdi)
7883movdqu   %xmm7,400(%rdi)
7884
7885# qhasm: t0 = r4[0]r5[0]
7886# asm 1: vpunpcklqdq <r5=reg128#9,<r4=reg128#12,>t0=reg128#8
7887# asm 2: vpunpcklqdq <r5=%xmm8,<r4=%xmm11,>t0=%xmm7
7888vpunpcklqdq %xmm8,%xmm11,%xmm7
7889
7890# qhasm: mem128[ input_0 + 416 ] = t0
7891# asm 1: movdqu   <t0=reg128#8,416(<input_0=int64#1)
7892# asm 2: movdqu   <t0=%xmm7,416(<input_0=%rdi)
7893movdqu   %xmm7,416(%rdi)
7894
7895# qhasm: t0 = r6[0]r7[0]
7896# asm 1: vpunpcklqdq <r7=reg128#7,<r6=reg128#13,>t0=reg128#7
7897# asm 2: vpunpcklqdq <r7=%xmm6,<r6=%xmm12,>t0=%xmm6
7898vpunpcklqdq %xmm6,%xmm12,%xmm6
7899
7900# qhasm: mem128[ input_0 + 432 ] = t0
7901# asm 1: movdqu   <t0=reg128#7,432(<input_0=int64#1)
7902# asm 2: movdqu   <t0=%xmm6,432(<input_0=%rdi)
7903movdqu   %xmm6,432(%rdi)
7904
7905# qhasm: r0 = mem64[ input_0 + 448 ] x2
7906# asm 1: movddup 448(<input_0=int64#1),>r0=reg128#7
7907# asm 2: movddup 448(<input_0=%rdi),>r0=%xmm6
7908movddup 448(%rdi),%xmm6
7909
7910# qhasm: r1 = mem64[ input_0 + 456 ] x2
7911# asm 1: movddup 456(<input_0=int64#1),>r1=reg128#8
7912# asm 2: movddup 456(<input_0=%rdi),>r1=%xmm7
7913movddup 456(%rdi),%xmm7
7914
7915# qhasm: r2 = mem64[ input_0 + 464 ] x2
7916# asm 1: movddup 464(<input_0=int64#1),>r2=reg128#9
7917# asm 2: movddup 464(<input_0=%rdi),>r2=%xmm8
7918movddup 464(%rdi),%xmm8
7919
7920# qhasm: r3 = mem64[ input_0 + 472 ] x2
7921# asm 1: movddup 472(<input_0=int64#1),>r3=reg128#10
7922# asm 2: movddup 472(<input_0=%rdi),>r3=%xmm9
7923movddup 472(%rdi),%xmm9
7924
7925# qhasm: r4 = mem64[ input_0 + 480 ] x2
7926# asm 1: movddup 480(<input_0=int64#1),>r4=reg128#11
7927# asm 2: movddup 480(<input_0=%rdi),>r4=%xmm10
7928movddup 480(%rdi),%xmm10
7929
7930# qhasm: r5 = mem64[ input_0 + 488 ] x2
7931# asm 1: movddup 488(<input_0=int64#1),>r5=reg128#12
7932# asm 2: movddup 488(<input_0=%rdi),>r5=%xmm11
7933movddup 488(%rdi),%xmm11
7934
7935# qhasm: r6 = mem64[ input_0 + 496 ] x2
7936# asm 1: movddup 496(<input_0=int64#1),>r6=reg128#13
7937# asm 2: movddup 496(<input_0=%rdi),>r6=%xmm12
7938movddup 496(%rdi),%xmm12
7939
7940# qhasm: r7 = mem64[ input_0 + 504 ] x2
7941# asm 1: movddup 504(<input_0=int64#1),>r7=reg128#14
7942# asm 2: movddup 504(<input_0=%rdi),>r7=%xmm13
7943movddup 504(%rdi),%xmm13
7944
7945# qhasm: v00 = r0 & mask0
7946# asm 1: vpand <mask0=reg128#1,<r0=reg128#7,>v00=reg128#15
7947# asm 2: vpand <mask0=%xmm0,<r0=%xmm6,>v00=%xmm14
7948vpand %xmm0,%xmm6,%xmm14
7949
7950# qhasm: v10 = r4 & mask0
7951# asm 1: vpand <mask0=reg128#1,<r4=reg128#11,>v10=reg128#16
7952# asm 2: vpand <mask0=%xmm0,<r4=%xmm10,>v10=%xmm15
7953vpand %xmm0,%xmm10,%xmm15
7954
7955# qhasm: 2x v10 <<= 4
7956# asm 1: psllq $4,<v10=reg128#16
7957# asm 2: psllq $4,<v10=%xmm15
7958psllq $4,%xmm15
7959
7960# qhasm: v01 = r0 & mask1
7961# asm 1: vpand <mask1=reg128#2,<r0=reg128#7,>v01=reg128#7
7962# asm 2: vpand <mask1=%xmm1,<r0=%xmm6,>v01=%xmm6
7963vpand %xmm1,%xmm6,%xmm6
7964
7965# qhasm: v11 = r4 & mask1
7966# asm 1: vpand <mask1=reg128#2,<r4=reg128#11,>v11=reg128#11
7967# asm 2: vpand <mask1=%xmm1,<r4=%xmm10,>v11=%xmm10
7968vpand %xmm1,%xmm10,%xmm10
7969
7970# qhasm: 2x v01 unsigned>>= 4
7971# asm 1: psrlq $4,<v01=reg128#7
7972# asm 2: psrlq $4,<v01=%xmm6
7973psrlq $4,%xmm6
7974
7975# qhasm: r0 = v00 | v10
7976# asm 1: vpor <v10=reg128#16,<v00=reg128#15,>r0=reg128#15
7977# asm 2: vpor <v10=%xmm15,<v00=%xmm14,>r0=%xmm14
7978vpor %xmm15,%xmm14,%xmm14
7979
7980# qhasm: r4 = v01 | v11
7981# asm 1: vpor <v11=reg128#11,<v01=reg128#7,>r4=reg128#7
7982# asm 2: vpor <v11=%xmm10,<v01=%xmm6,>r4=%xmm6
7983vpor %xmm10,%xmm6,%xmm6
7984
7985# qhasm: v00 = r1 & mask0
7986# asm 1: vpand <mask0=reg128#1,<r1=reg128#8,>v00=reg128#11
7987# asm 2: vpand <mask0=%xmm0,<r1=%xmm7,>v00=%xmm10
7988vpand %xmm0,%xmm7,%xmm10
7989
7990# qhasm: v10 = r5 & mask0
7991# asm 1: vpand <mask0=reg128#1,<r5=reg128#12,>v10=reg128#16
7992# asm 2: vpand <mask0=%xmm0,<r5=%xmm11,>v10=%xmm15
7993vpand %xmm0,%xmm11,%xmm15
7994
7995# qhasm: 2x v10 <<= 4
7996# asm 1: psllq $4,<v10=reg128#16
7997# asm 2: psllq $4,<v10=%xmm15
7998psllq $4,%xmm15
7999
8000# qhasm: v01 = r1 & mask1
8001# asm 1: vpand <mask1=reg128#2,<r1=reg128#8,>v01=reg128#8
8002# asm 2: vpand <mask1=%xmm1,<r1=%xmm7,>v01=%xmm7
8003vpand %xmm1,%xmm7,%xmm7
8004
8005# qhasm: v11 = r5 & mask1
8006# asm 1: vpand <mask1=reg128#2,<r5=reg128#12,>v11=reg128#12
8007# asm 2: vpand <mask1=%xmm1,<r5=%xmm11,>v11=%xmm11
8008vpand %xmm1,%xmm11,%xmm11
8009
8010# qhasm: 2x v01 unsigned>>= 4
8011# asm 1: psrlq $4,<v01=reg128#8
8012# asm 2: psrlq $4,<v01=%xmm7
8013psrlq $4,%xmm7
8014
8015# qhasm: r1 = v00 | v10
8016# asm 1: vpor <v10=reg128#16,<v00=reg128#11,>r1=reg128#11
8017# asm 2: vpor <v10=%xmm15,<v00=%xmm10,>r1=%xmm10
8018vpor %xmm15,%xmm10,%xmm10
8019
8020# qhasm: r5 = v01 | v11
8021# asm 1: vpor <v11=reg128#12,<v01=reg128#8,>r5=reg128#8
8022# asm 2: vpor <v11=%xmm11,<v01=%xmm7,>r5=%xmm7
8023vpor %xmm11,%xmm7,%xmm7
8024
8025# qhasm: v00 = r2 & mask0
8026# asm 1: vpand <mask0=reg128#1,<r2=reg128#9,>v00=reg128#12
8027# asm 2: vpand <mask0=%xmm0,<r2=%xmm8,>v00=%xmm11
8028vpand %xmm0,%xmm8,%xmm11
8029
8030# qhasm: v10 = r6 & mask0
8031# asm 1: vpand <mask0=reg128#1,<r6=reg128#13,>v10=reg128#16
8032# asm 2: vpand <mask0=%xmm0,<r6=%xmm12,>v10=%xmm15
8033vpand %xmm0,%xmm12,%xmm15
8034
8035# qhasm: 2x v10 <<= 4
8036# asm 1: psllq $4,<v10=reg128#16
8037# asm 2: psllq $4,<v10=%xmm15
8038psllq $4,%xmm15
8039
8040# qhasm: v01 = r2 & mask1
8041# asm 1: vpand <mask1=reg128#2,<r2=reg128#9,>v01=reg128#9
8042# asm 2: vpand <mask1=%xmm1,<r2=%xmm8,>v01=%xmm8
8043vpand %xmm1,%xmm8,%xmm8
8044
8045# qhasm: v11 = r6 & mask1
8046# asm 1: vpand <mask1=reg128#2,<r6=reg128#13,>v11=reg128#13
8047# asm 2: vpand <mask1=%xmm1,<r6=%xmm12,>v11=%xmm12
8048vpand %xmm1,%xmm12,%xmm12
8049
8050# qhasm: 2x v01 unsigned>>= 4
8051# asm 1: psrlq $4,<v01=reg128#9
8052# asm 2: psrlq $4,<v01=%xmm8
8053psrlq $4,%xmm8
8054
8055# qhasm: r2 = v00 | v10
8056# asm 1: vpor <v10=reg128#16,<v00=reg128#12,>r2=reg128#12
8057# asm 2: vpor <v10=%xmm15,<v00=%xmm11,>r2=%xmm11
8058vpor %xmm15,%xmm11,%xmm11
8059
8060# qhasm: r6 = v01 | v11
8061# asm 1: vpor <v11=reg128#13,<v01=reg128#9,>r6=reg128#9
8062# asm 2: vpor <v11=%xmm12,<v01=%xmm8,>r6=%xmm8
8063vpor %xmm12,%xmm8,%xmm8
8064
8065# qhasm: v00 = r3 & mask0
8066# asm 1: vpand <mask0=reg128#1,<r3=reg128#10,>v00=reg128#13
8067# asm 2: vpand <mask0=%xmm0,<r3=%xmm9,>v00=%xmm12
8068vpand %xmm0,%xmm9,%xmm12
8069
8070# qhasm: v10 = r7 & mask0
8071# asm 1: vpand <mask0=reg128#1,<r7=reg128#14,>v10=reg128#1
8072# asm 2: vpand <mask0=%xmm0,<r7=%xmm13,>v10=%xmm0
8073vpand %xmm0,%xmm13,%xmm0
8074
8075# qhasm: 2x v10 <<= 4
8076# asm 1: psllq $4,<v10=reg128#1
8077# asm 2: psllq $4,<v10=%xmm0
8078psllq $4,%xmm0
8079
8080# qhasm: v01 = r3 & mask1
8081# asm 1: vpand <mask1=reg128#2,<r3=reg128#10,>v01=reg128#10
8082# asm 2: vpand <mask1=%xmm1,<r3=%xmm9,>v01=%xmm9
8083vpand %xmm1,%xmm9,%xmm9
8084
8085# qhasm: v11 = r7 & mask1
8086# asm 1: vpand <mask1=reg128#2,<r7=reg128#14,>v11=reg128#2
8087# asm 2: vpand <mask1=%xmm1,<r7=%xmm13,>v11=%xmm1
8088vpand %xmm1,%xmm13,%xmm1
8089
8090# qhasm: 2x v01 unsigned>>= 4
8091# asm 1: psrlq $4,<v01=reg128#10
8092# asm 2: psrlq $4,<v01=%xmm9
8093psrlq $4,%xmm9
8094
8095# qhasm: r3 = v00 | v10
8096# asm 1: vpor <v10=reg128#1,<v00=reg128#13,>r3=reg128#1
8097# asm 2: vpor <v10=%xmm0,<v00=%xmm12,>r3=%xmm0
8098vpor %xmm0,%xmm12,%xmm0
8099
8100# qhasm: r7 = v01 | v11
8101# asm 1: vpor <v11=reg128#2,<v01=reg128#10,>r7=reg128#2
8102# asm 2: vpor <v11=%xmm1,<v01=%xmm9,>r7=%xmm1
8103vpor %xmm1,%xmm9,%xmm1
8104
8105# qhasm: v00 = r0 & mask2
8106# asm 1: vpand <mask2=reg128#3,<r0=reg128#15,>v00=reg128#10
8107# asm 2: vpand <mask2=%xmm2,<r0=%xmm14,>v00=%xmm9
8108vpand %xmm2,%xmm14,%xmm9
8109
8110# qhasm: v10 = r2 & mask2
8111# asm 1: vpand <mask2=reg128#3,<r2=reg128#12,>v10=reg128#13
8112# asm 2: vpand <mask2=%xmm2,<r2=%xmm11,>v10=%xmm12
8113vpand %xmm2,%xmm11,%xmm12
8114
8115# qhasm: 2x v10 <<= 2
8116# asm 1: psllq $2,<v10=reg128#13
8117# asm 2: psllq $2,<v10=%xmm12
8118psllq $2,%xmm12
8119
8120# qhasm: v01 = r0 & mask3
8121# asm 1: vpand <mask3=reg128#4,<r0=reg128#15,>v01=reg128#14
8122# asm 2: vpand <mask3=%xmm3,<r0=%xmm14,>v01=%xmm13
8123vpand %xmm3,%xmm14,%xmm13
8124
8125# qhasm: v11 = r2 & mask3
8126# asm 1: vpand <mask3=reg128#4,<r2=reg128#12,>v11=reg128#12
8127# asm 2: vpand <mask3=%xmm3,<r2=%xmm11,>v11=%xmm11
8128vpand %xmm3,%xmm11,%xmm11
8129
8130# qhasm: 2x v01 unsigned>>= 2
8131# asm 1: psrlq $2,<v01=reg128#14
8132# asm 2: psrlq $2,<v01=%xmm13
8133psrlq $2,%xmm13
8134
8135# qhasm: r0 = v00 | v10
8136# asm 1: vpor <v10=reg128#13,<v00=reg128#10,>r0=reg128#10
8137# asm 2: vpor <v10=%xmm12,<v00=%xmm9,>r0=%xmm9
8138vpor %xmm12,%xmm9,%xmm9
8139
8140# qhasm: r2 = v01 | v11
8141# asm 1: vpor <v11=reg128#12,<v01=reg128#14,>r2=reg128#12
8142# asm 2: vpor <v11=%xmm11,<v01=%xmm13,>r2=%xmm11
8143vpor %xmm11,%xmm13,%xmm11
8144
8145# qhasm: v00 = r1 & mask2
8146# asm 1: vpand <mask2=reg128#3,<r1=reg128#11,>v00=reg128#13
8147# asm 2: vpand <mask2=%xmm2,<r1=%xmm10,>v00=%xmm12
8148vpand %xmm2,%xmm10,%xmm12
8149
8150# qhasm: v10 = r3 & mask2
8151# asm 1: vpand <mask2=reg128#3,<r3=reg128#1,>v10=reg128#14
8152# asm 2: vpand <mask2=%xmm2,<r3=%xmm0,>v10=%xmm13
8153vpand %xmm2,%xmm0,%xmm13
8154
8155# qhasm: 2x v10 <<= 2
8156# asm 1: psllq $2,<v10=reg128#14
8157# asm 2: psllq $2,<v10=%xmm13
8158psllq $2,%xmm13
8159
8160# qhasm: v01 = r1 & mask3
8161# asm 1: vpand <mask3=reg128#4,<r1=reg128#11,>v01=reg128#11
8162# asm 2: vpand <mask3=%xmm3,<r1=%xmm10,>v01=%xmm10
8163vpand %xmm3,%xmm10,%xmm10
8164
8165# qhasm: v11 = r3 & mask3
8166# asm 1: vpand <mask3=reg128#4,<r3=reg128#1,>v11=reg128#1
8167# asm 2: vpand <mask3=%xmm3,<r3=%xmm0,>v11=%xmm0
8168vpand %xmm3,%xmm0,%xmm0
8169
8170# qhasm: 2x v01 unsigned>>= 2
8171# asm 1: psrlq $2,<v01=reg128#11
8172# asm 2: psrlq $2,<v01=%xmm10
8173psrlq $2,%xmm10
8174
8175# qhasm: r1 = v00 | v10
8176# asm 1: vpor <v10=reg128#14,<v00=reg128#13,>r1=reg128#13
8177# asm 2: vpor <v10=%xmm13,<v00=%xmm12,>r1=%xmm12
8178vpor %xmm13,%xmm12,%xmm12
8179
8180# qhasm: r3 = v01 | v11
8181# asm 1: vpor <v11=reg128#1,<v01=reg128#11,>r3=reg128#1
8182# asm 2: vpor <v11=%xmm0,<v01=%xmm10,>r3=%xmm0
8183vpor %xmm0,%xmm10,%xmm0
8184
8185# qhasm: v00 = r4 & mask2
8186# asm 1: vpand <mask2=reg128#3,<r4=reg128#7,>v00=reg128#11
8187# asm 2: vpand <mask2=%xmm2,<r4=%xmm6,>v00=%xmm10
8188vpand %xmm2,%xmm6,%xmm10
8189
8190# qhasm: v10 = r6 & mask2
8191# asm 1: vpand <mask2=reg128#3,<r6=reg128#9,>v10=reg128#14
8192# asm 2: vpand <mask2=%xmm2,<r6=%xmm8,>v10=%xmm13
8193vpand %xmm2,%xmm8,%xmm13
8194
8195# qhasm: 2x v10 <<= 2
8196# asm 1: psllq $2,<v10=reg128#14
8197# asm 2: psllq $2,<v10=%xmm13
8198psllq $2,%xmm13
8199
8200# qhasm: v01 = r4 & mask3
8201# asm 1: vpand <mask3=reg128#4,<r4=reg128#7,>v01=reg128#7
8202# asm 2: vpand <mask3=%xmm3,<r4=%xmm6,>v01=%xmm6
8203vpand %xmm3,%xmm6,%xmm6
8204
8205# qhasm: v11 = r6 & mask3
8206# asm 1: vpand <mask3=reg128#4,<r6=reg128#9,>v11=reg128#9
8207# asm 2: vpand <mask3=%xmm3,<r6=%xmm8,>v11=%xmm8
8208vpand %xmm3,%xmm8,%xmm8
8209
8210# qhasm: 2x v01 unsigned>>= 2
8211# asm 1: psrlq $2,<v01=reg128#7
8212# asm 2: psrlq $2,<v01=%xmm6
8213psrlq $2,%xmm6
8214
8215# qhasm: r4 = v00 | v10
8216# asm 1: vpor <v10=reg128#14,<v00=reg128#11,>r4=reg128#11
8217# asm 2: vpor <v10=%xmm13,<v00=%xmm10,>r4=%xmm10
8218vpor %xmm13,%xmm10,%xmm10
8219
8220# qhasm: r6 = v01 | v11
8221# asm 1: vpor <v11=reg128#9,<v01=reg128#7,>r6=reg128#7
8222# asm 2: vpor <v11=%xmm8,<v01=%xmm6,>r6=%xmm6
8223vpor %xmm8,%xmm6,%xmm6
8224
8225# qhasm: v00 = r5 & mask2
8226# asm 1: vpand <mask2=reg128#3,<r5=reg128#8,>v00=reg128#9
8227# asm 2: vpand <mask2=%xmm2,<r5=%xmm7,>v00=%xmm8
8228vpand %xmm2,%xmm7,%xmm8
8229
8230# qhasm: v10 = r7 & mask2
8231# asm 1: vpand <mask2=reg128#3,<r7=reg128#2,>v10=reg128#3
8232# asm 2: vpand <mask2=%xmm2,<r7=%xmm1,>v10=%xmm2
8233vpand %xmm2,%xmm1,%xmm2
8234
8235# qhasm: 2x v10 <<= 2
8236# asm 1: psllq $2,<v10=reg128#3
8237# asm 2: psllq $2,<v10=%xmm2
8238psllq $2,%xmm2
8239
8240# qhasm: v01 = r5 & mask3
8241# asm 1: vpand <mask3=reg128#4,<r5=reg128#8,>v01=reg128#8
8242# asm 2: vpand <mask3=%xmm3,<r5=%xmm7,>v01=%xmm7
8243vpand %xmm3,%xmm7,%xmm7
8244
8245# qhasm: v11 = r7 & mask3
8246# asm 1: vpand <mask3=reg128#4,<r7=reg128#2,>v11=reg128#2
8247# asm 2: vpand <mask3=%xmm3,<r7=%xmm1,>v11=%xmm1
8248vpand %xmm3,%xmm1,%xmm1
8249
8250# qhasm: 2x v01 unsigned>>= 2
8251# asm 1: psrlq $2,<v01=reg128#8
8252# asm 2: psrlq $2,<v01=%xmm7
8253psrlq $2,%xmm7
8254
8255# qhasm: r5 = v00 | v10
8256# asm 1: vpor <v10=reg128#3,<v00=reg128#9,>r5=reg128#3
8257# asm 2: vpor <v10=%xmm2,<v00=%xmm8,>r5=%xmm2
8258vpor %xmm2,%xmm8,%xmm2
8259
8260# qhasm: r7 = v01 | v11
8261# asm 1: vpor <v11=reg128#2,<v01=reg128#8,>r7=reg128#2
8262# asm 2: vpor <v11=%xmm1,<v01=%xmm7,>r7=%xmm1
8263vpor %xmm1,%xmm7,%xmm1
8264
8265# qhasm: v00 = r0 & mask4
8266# asm 1: vpand <mask4=reg128#5,<r0=reg128#10,>v00=reg128#4
8267# asm 2: vpand <mask4=%xmm4,<r0=%xmm9,>v00=%xmm3
8268vpand %xmm4,%xmm9,%xmm3
8269
8270# qhasm: v10 = r1 & mask4
8271# asm 1: vpand <mask4=reg128#5,<r1=reg128#13,>v10=reg128#8
8272# asm 2: vpand <mask4=%xmm4,<r1=%xmm12,>v10=%xmm7
8273vpand %xmm4,%xmm12,%xmm7
8274
8275# qhasm: 2x v10 <<= 1
8276# asm 1: psllq $1,<v10=reg128#8
8277# asm 2: psllq $1,<v10=%xmm7
8278psllq $1,%xmm7
8279
8280# qhasm: v01 = r0 & mask5
8281# asm 1: vpand <mask5=reg128#6,<r0=reg128#10,>v01=reg128#9
8282# asm 2: vpand <mask5=%xmm5,<r0=%xmm9,>v01=%xmm8
8283vpand %xmm5,%xmm9,%xmm8
8284
8285# qhasm: v11 = r1 & mask5
8286# asm 1: vpand <mask5=reg128#6,<r1=reg128#13,>v11=reg128#10
8287# asm 2: vpand <mask5=%xmm5,<r1=%xmm12,>v11=%xmm9
8288vpand %xmm5,%xmm12,%xmm9
8289
8290# qhasm: 2x v01 unsigned>>= 1
8291# asm 1: psrlq $1,<v01=reg128#9
8292# asm 2: psrlq $1,<v01=%xmm8
8293psrlq $1,%xmm8
8294
8295# qhasm: r0 = v00 | v10
8296# asm 1: vpor <v10=reg128#8,<v00=reg128#4,>r0=reg128#4
8297# asm 2: vpor <v10=%xmm7,<v00=%xmm3,>r0=%xmm3
8298vpor %xmm7,%xmm3,%xmm3
8299
8300# qhasm: r1 = v01 | v11
8301# asm 1: vpor <v11=reg128#10,<v01=reg128#9,>r1=reg128#8
8302# asm 2: vpor <v11=%xmm9,<v01=%xmm8,>r1=%xmm7
8303vpor %xmm9,%xmm8,%xmm7
8304
8305# qhasm: v00 = r2 & mask4
8306# asm 1: vpand <mask4=reg128#5,<r2=reg128#12,>v00=reg128#9
8307# asm 2: vpand <mask4=%xmm4,<r2=%xmm11,>v00=%xmm8
8308vpand %xmm4,%xmm11,%xmm8
8309
8310# qhasm: v10 = r3 & mask4
8311# asm 1: vpand <mask4=reg128#5,<r3=reg128#1,>v10=reg128#10
8312# asm 2: vpand <mask4=%xmm4,<r3=%xmm0,>v10=%xmm9
8313vpand %xmm4,%xmm0,%xmm9
8314
8315# qhasm: 2x v10 <<= 1
8316# asm 1: psllq $1,<v10=reg128#10
8317# asm 2: psllq $1,<v10=%xmm9
8318psllq $1,%xmm9
8319
8320# qhasm: v01 = r2 & mask5
8321# asm 1: vpand <mask5=reg128#6,<r2=reg128#12,>v01=reg128#12
8322# asm 2: vpand <mask5=%xmm5,<r2=%xmm11,>v01=%xmm11
8323vpand %xmm5,%xmm11,%xmm11
8324
8325# qhasm: v11 = r3 & mask5
8326# asm 1: vpand <mask5=reg128#6,<r3=reg128#1,>v11=reg128#1
8327# asm 2: vpand <mask5=%xmm5,<r3=%xmm0,>v11=%xmm0
8328vpand %xmm5,%xmm0,%xmm0
8329
8330# qhasm: 2x v01 unsigned>>= 1
8331# asm 1: psrlq $1,<v01=reg128#12
8332# asm 2: psrlq $1,<v01=%xmm11
8333psrlq $1,%xmm11
8334
8335# qhasm: r2 = v00 | v10
8336# asm 1: vpor <v10=reg128#10,<v00=reg128#9,>r2=reg128#9
8337# asm 2: vpor <v10=%xmm9,<v00=%xmm8,>r2=%xmm8
8338vpor %xmm9,%xmm8,%xmm8
8339
8340# qhasm: r3 = v01 | v11
8341# asm 1: vpor <v11=reg128#1,<v01=reg128#12,>r3=reg128#1
8342# asm 2: vpor <v11=%xmm0,<v01=%xmm11,>r3=%xmm0
8343vpor %xmm0,%xmm11,%xmm0
8344
8345# qhasm: v00 = r4 & mask4
8346# asm 1: vpand <mask4=reg128#5,<r4=reg128#11,>v00=reg128#10
8347# asm 2: vpand <mask4=%xmm4,<r4=%xmm10,>v00=%xmm9
8348vpand %xmm4,%xmm10,%xmm9
8349
8350# qhasm: v10 = r5 & mask4
8351# asm 1: vpand <mask4=reg128#5,<r5=reg128#3,>v10=reg128#12
8352# asm 2: vpand <mask4=%xmm4,<r5=%xmm2,>v10=%xmm11
8353vpand %xmm4,%xmm2,%xmm11
8354
8355# qhasm: 2x v10 <<= 1
8356# asm 1: psllq $1,<v10=reg128#12
8357# asm 2: psllq $1,<v10=%xmm11
8358psllq $1,%xmm11
8359
8360# qhasm: v01 = r4 & mask5
8361# asm 1: vpand <mask5=reg128#6,<r4=reg128#11,>v01=reg128#11
8362# asm 2: vpand <mask5=%xmm5,<r4=%xmm10,>v01=%xmm10
8363vpand %xmm5,%xmm10,%xmm10
8364
8365# qhasm: v11 = r5 & mask5
8366# asm 1: vpand <mask5=reg128#6,<r5=reg128#3,>v11=reg128#3
8367# asm 2: vpand <mask5=%xmm5,<r5=%xmm2,>v11=%xmm2
8368vpand %xmm5,%xmm2,%xmm2
8369
8370# qhasm: 2x v01 unsigned>>= 1
8371# asm 1: psrlq $1,<v01=reg128#11
8372# asm 2: psrlq $1,<v01=%xmm10
8373psrlq $1,%xmm10
8374
8375# qhasm: r4 = v00 | v10
8376# asm 1: vpor <v10=reg128#12,<v00=reg128#10,>r4=reg128#10
8377# asm 2: vpor <v10=%xmm11,<v00=%xmm9,>r4=%xmm9
8378vpor %xmm11,%xmm9,%xmm9
8379
8380# qhasm: r5 = v01 | v11
8381# asm 1: vpor <v11=reg128#3,<v01=reg128#11,>r5=reg128#3
8382# asm 2: vpor <v11=%xmm2,<v01=%xmm10,>r5=%xmm2
8383vpor %xmm2,%xmm10,%xmm2
8384
8385# qhasm: v00 = r6 & mask4
8386# asm 1: vpand <mask4=reg128#5,<r6=reg128#7,>v00=reg128#11
8387# asm 2: vpand <mask4=%xmm4,<r6=%xmm6,>v00=%xmm10
8388vpand %xmm4,%xmm6,%xmm10
8389
8390# qhasm: v10 = r7 & mask4
8391# asm 1: vpand <mask4=reg128#5,<r7=reg128#2,>v10=reg128#5
8392# asm 2: vpand <mask4=%xmm4,<r7=%xmm1,>v10=%xmm4
8393vpand %xmm4,%xmm1,%xmm4
8394
8395# qhasm: 2x v10 <<= 1
8396# asm 1: psllq $1,<v10=reg128#5
8397# asm 2: psllq $1,<v10=%xmm4
8398psllq $1,%xmm4
8399
8400# qhasm: v01 = r6 & mask5
8401# asm 1: vpand <mask5=reg128#6,<r6=reg128#7,>v01=reg128#7
8402# asm 2: vpand <mask5=%xmm5,<r6=%xmm6,>v01=%xmm6
8403vpand %xmm5,%xmm6,%xmm6
8404
8405# qhasm: v11 = r7 & mask5
8406# asm 1: vpand <mask5=reg128#6,<r7=reg128#2,>v11=reg128#2
8407# asm 2: vpand <mask5=%xmm5,<r7=%xmm1,>v11=%xmm1
8408vpand %xmm5,%xmm1,%xmm1
8409
8410# qhasm: 2x v01 unsigned>>= 1
8411# asm 1: psrlq $1,<v01=reg128#7
8412# asm 2: psrlq $1,<v01=%xmm6
8413psrlq $1,%xmm6
8414
8415# qhasm: r6 = v00 | v10
8416# asm 1: vpor <v10=reg128#5,<v00=reg128#11,>r6=reg128#5
8417# asm 2: vpor <v10=%xmm4,<v00=%xmm10,>r6=%xmm4
8418vpor %xmm4,%xmm10,%xmm4
8419
8420# qhasm: r7 = v01 | v11
8421# asm 1: vpor <v11=reg128#2,<v01=reg128#7,>r7=reg128#2
8422# asm 2: vpor <v11=%xmm1,<v01=%xmm6,>r7=%xmm1
8423vpor %xmm1,%xmm6,%xmm1
8424
8425# qhasm: t0 = r0[0]r1[0]
8426# asm 1: vpunpcklqdq <r1=reg128#8,<r0=reg128#4,>t0=reg128#4
8427# asm 2: vpunpcklqdq <r1=%xmm7,<r0=%xmm3,>t0=%xmm3
8428vpunpcklqdq %xmm7,%xmm3,%xmm3
8429
8430# qhasm: mem128[ input_0 + 448 ] = t0
8431# asm 1: movdqu   <t0=reg128#4,448(<input_0=int64#1)
8432# asm 2: movdqu   <t0=%xmm3,448(<input_0=%rdi)
8433movdqu   %xmm3,448(%rdi)
8434
8435# qhasm: t0 = r2[0]r3[0]
8436# asm 1: vpunpcklqdq <r3=reg128#1,<r2=reg128#9,>t0=reg128#1
8437# asm 2: vpunpcklqdq <r3=%xmm0,<r2=%xmm8,>t0=%xmm0
8438vpunpcklqdq %xmm0,%xmm8,%xmm0
8439
8440# qhasm: mem128[ input_0 + 464 ] = t0
8441# asm 1: movdqu   <t0=reg128#1,464(<input_0=int64#1)
8442# asm 2: movdqu   <t0=%xmm0,464(<input_0=%rdi)
8443movdqu   %xmm0,464(%rdi)
8444
8445# qhasm: t0 = r4[0]r5[0]
8446# asm 1: vpunpcklqdq <r5=reg128#3,<r4=reg128#10,>t0=reg128#1
8447# asm 2: vpunpcklqdq <r5=%xmm2,<r4=%xmm9,>t0=%xmm0
8448vpunpcklqdq %xmm2,%xmm9,%xmm0
8449
8450# qhasm: mem128[ input_0 + 480 ] = t0
8451# asm 1: movdqu   <t0=reg128#1,480(<input_0=int64#1)
8452# asm 2: movdqu   <t0=%xmm0,480(<input_0=%rdi)
8453movdqu   %xmm0,480(%rdi)
8454
8455# qhasm: t0 = r6[0]r7[0]
8456# asm 1: vpunpcklqdq <r7=reg128#2,<r6=reg128#5,>t0=reg128#1
8457# asm 2: vpunpcklqdq <r7=%xmm1,<r6=%xmm4,>t0=%xmm0
8458vpunpcklqdq %xmm1,%xmm4,%xmm0
8459
8460# qhasm: mem128[ input_0 + 496 ] = t0
8461# asm 1: movdqu   <t0=reg128#1,496(<input_0=int64#1)
8462# asm 2: movdqu   <t0=%xmm0,496(<input_0=%rdi)
8463movdqu   %xmm0,496(%rdi)
8464
8465# qhasm: return
8466add %r11,%rsp
8467ret
8468