1
2# qhasm: int64 input_0
3
4# qhasm: int64 input_1
5
6# qhasm: int64 input_2
7
8# qhasm: int64 input_3
9
10# qhasm: int64 input_4
11
12# qhasm: int64 input_5
13
14# qhasm: stack64 input_6
15
16# qhasm: stack64 input_7
17
18# qhasm: int64 caller_r11
19
20# qhasm: int64 caller_r12
21
22# qhasm: int64 caller_r13
23
24# qhasm: int64 caller_r14
25
26# qhasm: int64 caller_r15
27
28# qhasm: int64 caller_rbx
29
30# qhasm: int64 caller_rbp
31
32# qhasm: reg256 b0
33
34# qhasm: reg256 b1
35
36# qhasm: reg256 b2
37
38# qhasm: reg256 b3
39
40# qhasm: reg256 b4
41
42# qhasm: reg256 b5
43
44# qhasm: reg256 b6
45
46# qhasm: reg256 b7
47
48# qhasm: reg256 b8
49
50# qhasm: reg256 b9
51
52# qhasm: reg256 b10
53
54# qhasm: reg256 b11
55
56# qhasm: reg256 b12
57
58# qhasm: reg256 a0
59
60# qhasm: reg256 a1
61
62# qhasm: reg256 a2
63
64# qhasm: reg256 a3
65
66# qhasm: reg256 a4
67
68# qhasm: reg256 a5
69
70# qhasm: reg256 a6
71
72# qhasm: reg256 r0
73
74# qhasm: reg256 r1
75
76# qhasm: reg256 r2
77
78# qhasm: reg256 r3
79
80# qhasm: reg256 r4
81
82# qhasm: reg256 r5
83
84# qhasm: reg256 r6
85
86# qhasm: reg256 r7
87
88# qhasm: reg256 r8
89
90# qhasm: reg256 r9
91
92# qhasm: reg256 r10
93
94# qhasm: reg256 r11
95
96# qhasm: reg256 r12
97
98# qhasm: reg256 r13
99
100# qhasm: reg256 r14
101
102# qhasm: reg256 r15
103
104# qhasm: reg256 r16
105
106# qhasm: reg256 r17
107
108# qhasm: reg256 r18
109
110# qhasm: reg256 r19
111
112# qhasm: reg256 r20
113
114# qhasm: reg256 r21
115
116# qhasm: reg256 r22
117
118# qhasm: reg256 r23
119
120# qhasm: reg256 r24
121
122# qhasm: reg256 r
123
124# qhasm: reg128 h0
125
126# qhasm: reg128 h1
127
128# qhasm: reg128 h2
129
130# qhasm: reg128 h3
131
132# qhasm: reg128 h4
133
134# qhasm: reg128 h5
135
136# qhasm: reg128 h6
137
138# qhasm: reg128 h7
139
140# qhasm: reg128 h8
141
142# qhasm: reg128 h9
143
144# qhasm: reg128 h10
145
146# qhasm: reg128 h11
147
148# qhasm: reg128 h12
149
150# qhasm: reg128 h13
151
152# qhasm: reg128 h14
153
154# qhasm: reg128 h15
155
156# qhasm: reg128 h16
157
158# qhasm: reg128 h17
159
160# qhasm: reg128 h18
161
162# qhasm: reg128 h19
163
164# qhasm: reg128 h20
165
166# qhasm: reg128 h21
167
168# qhasm: reg128 h22
169
170# qhasm: reg128 h23
171
172# qhasm: reg128 h24
173
174# qhasm: stack4864 buf
175
176# qhasm: int64 ptr
177
178# qhasm: int64 tmp
179
180# qhasm: enter vec128_mul_asm
181.p2align 5
182.global _PQCLEAN_MCELIECE6688128_AVX_vec128_mul_asm
183.global PQCLEAN_MCELIECE6688128_AVX_vec128_mul_asm
184_PQCLEAN_MCELIECE6688128_AVX_vec128_mul_asm:
185PQCLEAN_MCELIECE6688128_AVX_vec128_mul_asm:
186mov %rsp,%r11
187and $31,%r11
188add $608,%r11
189sub %r11,%rsp
190
191# qhasm: ptr = &buf
192# asm 1: leaq <buf=stack4864#1,>ptr=int64#5
193# asm 2: leaq <buf=0(%rsp),>ptr=%r8
194leaq 0(%rsp),%r8
195
196# qhasm: tmp = input_3
197# asm 1: mov  <input_3=int64#4,>tmp=int64#6
198# asm 2: mov  <input_3=%rcx,>tmp=%r9
199mov  %rcx,%r9
200
201# qhasm: tmp *= 12
202# asm 1: imulq  $12,<tmp=int64#6,>tmp=int64#6
203# asm 2: imulq  $12,<tmp=%r9,>tmp=%r9
204imulq  $12,%r9,%r9
205
206# qhasm: input_2 += tmp
207# asm 1: add  <tmp=int64#6,<input_2=int64#3
208# asm 2: add  <tmp=%r9,<input_2=%rdx
209add  %r9,%rdx
210
211# qhasm: b12 = mem128[ input_2 + 0 ] x2
212# asm 1: vbroadcasti128 0(<input_2=int64#3), >b12=reg256#1
213# asm 2: vbroadcasti128 0(<input_2=%rdx), >b12=%ymm0
214vbroadcasti128 0(%rdx), %ymm0
215
216# qhasm: input_2 -= input_3
217# asm 1: sub  <input_3=int64#4,<input_2=int64#3
218# asm 2: sub  <input_3=%rcx,<input_2=%rdx
219sub  %rcx,%rdx
220
221# qhasm: a6 = a6 ^ a6
222# asm 1: vpxor <a6=reg256#2,<a6=reg256#2,>a6=reg256#2
223# asm 2: vpxor <a6=%ymm1,<a6=%ymm1,>a6=%ymm1
224vpxor %ymm1,%ymm1,%ymm1
225
226# qhasm: a6[0] = mem128[ input_1 + 96 ]
227# asm 1: vinsertf128 $0x0,96(<input_1=int64#2),<a6=reg256#2,<a6=reg256#2
228# asm 2: vinsertf128 $0x0,96(<input_1=%rsi),<a6=%ymm1,<a6=%ymm1
229vinsertf128 $0x0,96(%rsi),%ymm1,%ymm1
230
231# qhasm: r18 = b12 & a6
232# asm 1: vpand <b12=reg256#1,<a6=reg256#2,>r18=reg256#3
233# asm 2: vpand <b12=%ymm0,<a6=%ymm1,>r18=%ymm2
234vpand %ymm0,%ymm1,%ymm2
235
236# qhasm: mem256[ ptr + 576 ] = r18
237# asm 1: vmovupd   <r18=reg256#3,576(<ptr=int64#5)
238# asm 2: vmovupd   <r18=%ymm2,576(<ptr=%r8)
239vmovupd   %ymm2,576(%r8)
240
241# qhasm: a5[0] = mem128[ input_1 + 80 ]
242# asm 1: vinsertf128 $0x0,80(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3
243# asm 2: vinsertf128 $0x0,80(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2
244vinsertf128 $0x0,80(%rsi),%ymm2,%ymm2
245
246# qhasm: a5[1] = mem128[ input_1 + 192 ]
247# asm 1: vinsertf128 $0x1,192(<input_1=int64#2),<a5=reg256#3,<a5=reg256#3
248# asm 2: vinsertf128 $0x1,192(<input_1=%rsi),<a5=%ymm2,<a5=%ymm2
249vinsertf128 $0x1,192(%rsi),%ymm2,%ymm2
250
251# qhasm: r17 = b12 & a5
252# asm 1: vpand <b12=reg256#1,<a5=reg256#3,>r17=reg256#4
253# asm 2: vpand <b12=%ymm0,<a5=%ymm2,>r17=%ymm3
254vpand %ymm0,%ymm2,%ymm3
255
256# qhasm: a4[0] = mem128[ input_1 + 64 ]
257# asm 1: vinsertf128 $0x0,64(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5
258# asm 2: vinsertf128 $0x0,64(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4
259vinsertf128 $0x0,64(%rsi),%ymm4,%ymm4
260
261# qhasm: a4[1] = mem128[ input_1 + 176 ]
262# asm 1: vinsertf128 $0x1,176(<input_1=int64#2),<a4=reg256#5,<a4=reg256#5
263# asm 2: vinsertf128 $0x1,176(<input_1=%rsi),<a4=%ymm4,<a4=%ymm4
264vinsertf128 $0x1,176(%rsi),%ymm4,%ymm4
265
266# qhasm: r16 = b12 & a4
267# asm 1: vpand <b12=reg256#1,<a4=reg256#5,>r16=reg256#6
268# asm 2: vpand <b12=%ymm0,<a4=%ymm4,>r16=%ymm5
269vpand %ymm0,%ymm4,%ymm5
270
271# qhasm: a3[0] = mem128[ input_1 + 48 ]
272# asm 1: vinsertf128 $0x0,48(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7
273# asm 2: vinsertf128 $0x0,48(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6
274vinsertf128 $0x0,48(%rsi),%ymm6,%ymm6
275
276# qhasm: a3[1] = mem128[ input_1 + 160 ]
277# asm 1: vinsertf128 $0x1,160(<input_1=int64#2),<a3=reg256#7,<a3=reg256#7
278# asm 2: vinsertf128 $0x1,160(<input_1=%rsi),<a3=%ymm6,<a3=%ymm6
279vinsertf128 $0x1,160(%rsi),%ymm6,%ymm6
280
281# qhasm: r15 = b12 & a3
282# asm 1: vpand <b12=reg256#1,<a3=reg256#7,>r15=reg256#8
283# asm 2: vpand <b12=%ymm0,<a3=%ymm6,>r15=%ymm7
284vpand %ymm0,%ymm6,%ymm7
285
286# qhasm: a2[0] = mem128[ input_1 + 32 ]
287# asm 1: vinsertf128 $0x0,32(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9
288# asm 2: vinsertf128 $0x0,32(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8
289vinsertf128 $0x0,32(%rsi),%ymm8,%ymm8
290
291# qhasm: a2[1] = mem128[ input_1 + 144 ]
292# asm 1: vinsertf128 $0x1,144(<input_1=int64#2),<a2=reg256#9,<a2=reg256#9
293# asm 2: vinsertf128 $0x1,144(<input_1=%rsi),<a2=%ymm8,<a2=%ymm8
294vinsertf128 $0x1,144(%rsi),%ymm8,%ymm8
295
296# qhasm: r14 = b12 & a2
297# asm 1: vpand <b12=reg256#1,<a2=reg256#9,>r14=reg256#10
298# asm 2: vpand <b12=%ymm0,<a2=%ymm8,>r14=%ymm9
299vpand %ymm0,%ymm8,%ymm9
300
301# qhasm: a1[0] = mem128[ input_1 + 16 ]
302# asm 1: vinsertf128 $0x0,16(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11
303# asm 2: vinsertf128 $0x0,16(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10
304vinsertf128 $0x0,16(%rsi),%ymm10,%ymm10
305
306# qhasm: a1[1] = mem128[ input_1 + 128 ]
307# asm 1: vinsertf128 $0x1,128(<input_1=int64#2),<a1=reg256#11,<a1=reg256#11
308# asm 2: vinsertf128 $0x1,128(<input_1=%rsi),<a1=%ymm10,<a1=%ymm10
309vinsertf128 $0x1,128(%rsi),%ymm10,%ymm10
310
311# qhasm: r13 = b12 & a1
312# asm 1: vpand <b12=reg256#1,<a1=reg256#11,>r13=reg256#12
313# asm 2: vpand <b12=%ymm0,<a1=%ymm10,>r13=%ymm11
314vpand %ymm0,%ymm10,%ymm11
315
316# qhasm: a0[0] = mem128[ input_1 + 0 ]
317# asm 1: vinsertf128 $0x0,0(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13
318# asm 2: vinsertf128 $0x0,0(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12
319vinsertf128 $0x0,0(%rsi),%ymm12,%ymm12
320
321# qhasm: a0[1] = mem128[ input_1 + 112 ]
322# asm 1: vinsertf128 $0x1,112(<input_1=int64#2),<a0=reg256#13,<a0=reg256#13
323# asm 2: vinsertf128 $0x1,112(<input_1=%rsi),<a0=%ymm12,<a0=%ymm12
324vinsertf128 $0x1,112(%rsi),%ymm12,%ymm12
325
326# qhasm: r12 = b12 & a0
327# asm 1: vpand <b12=reg256#1,<a0=reg256#13,>r12=reg256#1
328# asm 2: vpand <b12=%ymm0,<a0=%ymm12,>r12=%ymm0
329vpand %ymm0,%ymm12,%ymm0
330
331# qhasm: b11 = mem128[ input_2 + 0 ] x2
332# asm 1: vbroadcasti128 0(<input_2=int64#3), >b11=reg256#14
333# asm 2: vbroadcasti128 0(<input_2=%rdx), >b11=%ymm13
334vbroadcasti128 0(%rdx), %ymm13
335
336# qhasm: input_2 -= input_3
337# asm 1: sub  <input_3=int64#4,<input_2=int64#3
338# asm 2: sub  <input_3=%rcx,<input_2=%rdx
339sub  %rcx,%rdx
340
341# qhasm: r = b11 & a6
342# asm 1: vpand <b11=reg256#14,<a6=reg256#2,>r=reg256#15
343# asm 2: vpand <b11=%ymm13,<a6=%ymm1,>r=%ymm14
344vpand %ymm13,%ymm1,%ymm14
345
346# qhasm: r17 ^= r
347# asm 1: vpxor <r=reg256#15,<r17=reg256#4,<r17=reg256#4
348# asm 2: vpxor <r=%ymm14,<r17=%ymm3,<r17=%ymm3
349vpxor %ymm14,%ymm3,%ymm3
350
351# qhasm: mem256[ ptr + 544 ] = r17
352# asm 1: vmovupd   <r17=reg256#4,544(<ptr=int64#5)
353# asm 2: vmovupd   <r17=%ymm3,544(<ptr=%r8)
354vmovupd   %ymm3,544(%r8)
355
356# qhasm: r = b11 & a5
357# asm 1: vpand <b11=reg256#14,<a5=reg256#3,>r=reg256#4
358# asm 2: vpand <b11=%ymm13,<a5=%ymm2,>r=%ymm3
359vpand %ymm13,%ymm2,%ymm3
360
361# qhasm: r16 ^= r
362# asm 1: vpxor <r=reg256#4,<r16=reg256#6,<r16=reg256#6
363# asm 2: vpxor <r=%ymm3,<r16=%ymm5,<r16=%ymm5
364vpxor %ymm3,%ymm5,%ymm5
365
366# qhasm: r = b11 & a4
367# asm 1: vpand <b11=reg256#14,<a4=reg256#5,>r=reg256#4
368# asm 2: vpand <b11=%ymm13,<a4=%ymm4,>r=%ymm3
369vpand %ymm13,%ymm4,%ymm3
370
371# qhasm: r15 ^= r
372# asm 1: vpxor <r=reg256#4,<r15=reg256#8,<r15=reg256#8
373# asm 2: vpxor <r=%ymm3,<r15=%ymm7,<r15=%ymm7
374vpxor %ymm3,%ymm7,%ymm7
375
376# qhasm: r = b11 & a3
377# asm 1: vpand <b11=reg256#14,<a3=reg256#7,>r=reg256#4
378# asm 2: vpand <b11=%ymm13,<a3=%ymm6,>r=%ymm3
379vpand %ymm13,%ymm6,%ymm3
380
381# qhasm: r14 ^= r
382# asm 1: vpxor <r=reg256#4,<r14=reg256#10,<r14=reg256#10
383# asm 2: vpxor <r=%ymm3,<r14=%ymm9,<r14=%ymm9
384vpxor %ymm3,%ymm9,%ymm9
385
386# qhasm: r = b11 & a2
387# asm 1: vpand <b11=reg256#14,<a2=reg256#9,>r=reg256#4
388# asm 2: vpand <b11=%ymm13,<a2=%ymm8,>r=%ymm3
389vpand %ymm13,%ymm8,%ymm3
390
391# qhasm: r13 ^= r
392# asm 1: vpxor <r=reg256#4,<r13=reg256#12,<r13=reg256#12
393# asm 2: vpxor <r=%ymm3,<r13=%ymm11,<r13=%ymm11
394vpxor %ymm3,%ymm11,%ymm11
395
396# qhasm: r = b11 & a1
397# asm 1: vpand <b11=reg256#14,<a1=reg256#11,>r=reg256#4
398# asm 2: vpand <b11=%ymm13,<a1=%ymm10,>r=%ymm3
399vpand %ymm13,%ymm10,%ymm3
400
401# qhasm: r12 ^= r
402# asm 1: vpxor <r=reg256#4,<r12=reg256#1,<r12=reg256#1
403# asm 2: vpxor <r=%ymm3,<r12=%ymm0,<r12=%ymm0
404vpxor %ymm3,%ymm0,%ymm0
405
406# qhasm: r11 = b11 & a0
407# asm 1: vpand <b11=reg256#14,<a0=reg256#13,>r11=reg256#4
408# asm 2: vpand <b11=%ymm13,<a0=%ymm12,>r11=%ymm3
409vpand %ymm13,%ymm12,%ymm3
410
411# qhasm: b10 = mem128[ input_2 + 0 ] x2
412# asm 1: vbroadcasti128 0(<input_2=int64#3), >b10=reg256#14
413# asm 2: vbroadcasti128 0(<input_2=%rdx), >b10=%ymm13
414vbroadcasti128 0(%rdx), %ymm13
415
416# qhasm: input_2 -= input_3
417# asm 1: sub  <input_3=int64#4,<input_2=int64#3
418# asm 2: sub  <input_3=%rcx,<input_2=%rdx
419sub  %rcx,%rdx
420
421# qhasm: r = b10 & a6
422# asm 1: vpand <b10=reg256#14,<a6=reg256#2,>r=reg256#15
423# asm 2: vpand <b10=%ymm13,<a6=%ymm1,>r=%ymm14
424vpand %ymm13,%ymm1,%ymm14
425
426# qhasm: r16 ^= r
427# asm 1: vpxor <r=reg256#15,<r16=reg256#6,<r16=reg256#6
428# asm 2: vpxor <r=%ymm14,<r16=%ymm5,<r16=%ymm5
429vpxor %ymm14,%ymm5,%ymm5
430
431# qhasm: mem256[ ptr + 512 ] = r16
432# asm 1: vmovupd   <r16=reg256#6,512(<ptr=int64#5)
433# asm 2: vmovupd   <r16=%ymm5,512(<ptr=%r8)
434vmovupd   %ymm5,512(%r8)
435
436# qhasm: r = b10 & a5
437# asm 1: vpand <b10=reg256#14,<a5=reg256#3,>r=reg256#6
438# asm 2: vpand <b10=%ymm13,<a5=%ymm2,>r=%ymm5
439vpand %ymm13,%ymm2,%ymm5
440
441# qhasm: r15 ^= r
442# asm 1: vpxor <r=reg256#6,<r15=reg256#8,<r15=reg256#8
443# asm 2: vpxor <r=%ymm5,<r15=%ymm7,<r15=%ymm7
444vpxor %ymm5,%ymm7,%ymm7
445
446# qhasm: r = b10 & a4
447# asm 1: vpand <b10=reg256#14,<a4=reg256#5,>r=reg256#6
448# asm 2: vpand <b10=%ymm13,<a4=%ymm4,>r=%ymm5
449vpand %ymm13,%ymm4,%ymm5
450
451# qhasm: r14 ^= r
452# asm 1: vpxor <r=reg256#6,<r14=reg256#10,<r14=reg256#10
453# asm 2: vpxor <r=%ymm5,<r14=%ymm9,<r14=%ymm9
454vpxor %ymm5,%ymm9,%ymm9
455
456# qhasm: r = b10 & a3
457# asm 1: vpand <b10=reg256#14,<a3=reg256#7,>r=reg256#6
458# asm 2: vpand <b10=%ymm13,<a3=%ymm6,>r=%ymm5
459vpand %ymm13,%ymm6,%ymm5
460
461# qhasm: r13 ^= r
462# asm 1: vpxor <r=reg256#6,<r13=reg256#12,<r13=reg256#12
463# asm 2: vpxor <r=%ymm5,<r13=%ymm11,<r13=%ymm11
464vpxor %ymm5,%ymm11,%ymm11
465
466# qhasm: r = b10 & a2
467# asm 1: vpand <b10=reg256#14,<a2=reg256#9,>r=reg256#6
468# asm 2: vpand <b10=%ymm13,<a2=%ymm8,>r=%ymm5
469vpand %ymm13,%ymm8,%ymm5
470
471# qhasm: r12 ^= r
472# asm 1: vpxor <r=reg256#6,<r12=reg256#1,<r12=reg256#1
473# asm 2: vpxor <r=%ymm5,<r12=%ymm0,<r12=%ymm0
474vpxor %ymm5,%ymm0,%ymm0
475
476# qhasm: r = b10 & a1
477# asm 1: vpand <b10=reg256#14,<a1=reg256#11,>r=reg256#6
478# asm 2: vpand <b10=%ymm13,<a1=%ymm10,>r=%ymm5
479vpand %ymm13,%ymm10,%ymm5
480
481# qhasm: r11 ^= r
482# asm 1: vpxor <r=reg256#6,<r11=reg256#4,<r11=reg256#4
483# asm 2: vpxor <r=%ymm5,<r11=%ymm3,<r11=%ymm3
484vpxor %ymm5,%ymm3,%ymm3
485
486# qhasm: r10 = b10 & a0
487# asm 1: vpand <b10=reg256#14,<a0=reg256#13,>r10=reg256#6
488# asm 2: vpand <b10=%ymm13,<a0=%ymm12,>r10=%ymm5
489vpand %ymm13,%ymm12,%ymm5
490
491# qhasm: b9 = mem128[ input_2 + 0 ] x2
492# asm 1: vbroadcasti128 0(<input_2=int64#3), >b9=reg256#14
493# asm 2: vbroadcasti128 0(<input_2=%rdx), >b9=%ymm13
494vbroadcasti128 0(%rdx), %ymm13
495
496# qhasm: input_2 -= input_3
497# asm 1: sub  <input_3=int64#4,<input_2=int64#3
498# asm 2: sub  <input_3=%rcx,<input_2=%rdx
499sub  %rcx,%rdx
500
501# qhasm: r = b9 & a6
502# asm 1: vpand <b9=reg256#14,<a6=reg256#2,>r=reg256#15
503# asm 2: vpand <b9=%ymm13,<a6=%ymm1,>r=%ymm14
504vpand %ymm13,%ymm1,%ymm14
505
506# qhasm: r15 ^= r
507# asm 1: vpxor <r=reg256#15,<r15=reg256#8,<r15=reg256#8
508# asm 2: vpxor <r=%ymm14,<r15=%ymm7,<r15=%ymm7
509vpxor %ymm14,%ymm7,%ymm7
510
511# qhasm: mem256[ ptr + 480 ] = r15
512# asm 1: vmovupd   <r15=reg256#8,480(<ptr=int64#5)
513# asm 2: vmovupd   <r15=%ymm7,480(<ptr=%r8)
514vmovupd   %ymm7,480(%r8)
515
516# qhasm: r = b9 & a5
517# asm 1: vpand <b9=reg256#14,<a5=reg256#3,>r=reg256#8
518# asm 2: vpand <b9=%ymm13,<a5=%ymm2,>r=%ymm7
519vpand %ymm13,%ymm2,%ymm7
520
521# qhasm: r14 ^= r
522# asm 1: vpxor <r=reg256#8,<r14=reg256#10,<r14=reg256#10
523# asm 2: vpxor <r=%ymm7,<r14=%ymm9,<r14=%ymm9
524vpxor %ymm7,%ymm9,%ymm9
525
526# qhasm: r = b9 & a4
527# asm 1: vpand <b9=reg256#14,<a4=reg256#5,>r=reg256#8
528# asm 2: vpand <b9=%ymm13,<a4=%ymm4,>r=%ymm7
529vpand %ymm13,%ymm4,%ymm7
530
531# qhasm: r13 ^= r
532# asm 1: vpxor <r=reg256#8,<r13=reg256#12,<r13=reg256#12
533# asm 2: vpxor <r=%ymm7,<r13=%ymm11,<r13=%ymm11
534vpxor %ymm7,%ymm11,%ymm11
535
536# qhasm: r = b9 & a3
537# asm 1: vpand <b9=reg256#14,<a3=reg256#7,>r=reg256#8
538# asm 2: vpand <b9=%ymm13,<a3=%ymm6,>r=%ymm7
539vpand %ymm13,%ymm6,%ymm7
540
541# qhasm: r12 ^= r
542# asm 1: vpxor <r=reg256#8,<r12=reg256#1,<r12=reg256#1
543# asm 2: vpxor <r=%ymm7,<r12=%ymm0,<r12=%ymm0
544vpxor %ymm7,%ymm0,%ymm0
545
546# qhasm: r = b9 & a2
547# asm 1: vpand <b9=reg256#14,<a2=reg256#9,>r=reg256#8
548# asm 2: vpand <b9=%ymm13,<a2=%ymm8,>r=%ymm7
549vpand %ymm13,%ymm8,%ymm7
550
551# qhasm: r11 ^= r
552# asm 1: vpxor <r=reg256#8,<r11=reg256#4,<r11=reg256#4
553# asm 2: vpxor <r=%ymm7,<r11=%ymm3,<r11=%ymm3
554vpxor %ymm7,%ymm3,%ymm3
555
556# qhasm: r = b9 & a1
557# asm 1: vpand <b9=reg256#14,<a1=reg256#11,>r=reg256#8
558# asm 2: vpand <b9=%ymm13,<a1=%ymm10,>r=%ymm7
559vpand %ymm13,%ymm10,%ymm7
560
561# qhasm: r10 ^= r
562# asm 1: vpxor <r=reg256#8,<r10=reg256#6,<r10=reg256#6
563# asm 2: vpxor <r=%ymm7,<r10=%ymm5,<r10=%ymm5
564vpxor %ymm7,%ymm5,%ymm5
565
566# qhasm: r9 = b9 & a0
567# asm 1: vpand <b9=reg256#14,<a0=reg256#13,>r9=reg256#8
568# asm 2: vpand <b9=%ymm13,<a0=%ymm12,>r9=%ymm7
569vpand %ymm13,%ymm12,%ymm7
570
571# qhasm: b8 = mem128[ input_2 + 0 ] x2
572# asm 1: vbroadcasti128 0(<input_2=int64#3), >b8=reg256#14
573# asm 2: vbroadcasti128 0(<input_2=%rdx), >b8=%ymm13
574vbroadcasti128 0(%rdx), %ymm13
575
576# qhasm: input_2 -= input_3
577# asm 1: sub  <input_3=int64#4,<input_2=int64#3
578# asm 2: sub  <input_3=%rcx,<input_2=%rdx
579sub  %rcx,%rdx
580
581# qhasm: r = b8 & a6
582# asm 1: vpand <b8=reg256#14,<a6=reg256#2,>r=reg256#15
583# asm 2: vpand <b8=%ymm13,<a6=%ymm1,>r=%ymm14
584vpand %ymm13,%ymm1,%ymm14
585
586# qhasm: r14 ^= r
587# asm 1: vpxor <r=reg256#15,<r14=reg256#10,<r14=reg256#10
588# asm 2: vpxor <r=%ymm14,<r14=%ymm9,<r14=%ymm9
589vpxor %ymm14,%ymm9,%ymm9
590
591# qhasm: mem256[ ptr + 448 ] = r14
592# asm 1: vmovupd   <r14=reg256#10,448(<ptr=int64#5)
593# asm 2: vmovupd   <r14=%ymm9,448(<ptr=%r8)
594vmovupd   %ymm9,448(%r8)
595
596# qhasm: r = b8 & a5
597# asm 1: vpand <b8=reg256#14,<a5=reg256#3,>r=reg256#10
598# asm 2: vpand <b8=%ymm13,<a5=%ymm2,>r=%ymm9
599vpand %ymm13,%ymm2,%ymm9
600
601# qhasm: r13 ^= r
602# asm 1: vpxor <r=reg256#10,<r13=reg256#12,<r13=reg256#12
603# asm 2: vpxor <r=%ymm9,<r13=%ymm11,<r13=%ymm11
604vpxor %ymm9,%ymm11,%ymm11
605
606# qhasm: r = b8 & a4
607# asm 1: vpand <b8=reg256#14,<a4=reg256#5,>r=reg256#10
608# asm 2: vpand <b8=%ymm13,<a4=%ymm4,>r=%ymm9
609vpand %ymm13,%ymm4,%ymm9
610
611# qhasm: r12 ^= r
612# asm 1: vpxor <r=reg256#10,<r12=reg256#1,<r12=reg256#1
613# asm 2: vpxor <r=%ymm9,<r12=%ymm0,<r12=%ymm0
614vpxor %ymm9,%ymm0,%ymm0
615
616# qhasm: r = b8 & a3
617# asm 1: vpand <b8=reg256#14,<a3=reg256#7,>r=reg256#10
618# asm 2: vpand <b8=%ymm13,<a3=%ymm6,>r=%ymm9
619vpand %ymm13,%ymm6,%ymm9
620
621# qhasm: r11 ^= r
622# asm 1: vpxor <r=reg256#10,<r11=reg256#4,<r11=reg256#4
623# asm 2: vpxor <r=%ymm9,<r11=%ymm3,<r11=%ymm3
624vpxor %ymm9,%ymm3,%ymm3
625
626# qhasm: r = b8 & a2
627# asm 1: vpand <b8=reg256#14,<a2=reg256#9,>r=reg256#10
628# asm 2: vpand <b8=%ymm13,<a2=%ymm8,>r=%ymm9
629vpand %ymm13,%ymm8,%ymm9
630
631# qhasm: r10 ^= r
632# asm 1: vpxor <r=reg256#10,<r10=reg256#6,<r10=reg256#6
633# asm 2: vpxor <r=%ymm9,<r10=%ymm5,<r10=%ymm5
634vpxor %ymm9,%ymm5,%ymm5
635
636# qhasm: r = b8 & a1
637# asm 1: vpand <b8=reg256#14,<a1=reg256#11,>r=reg256#10
638# asm 2: vpand <b8=%ymm13,<a1=%ymm10,>r=%ymm9
639vpand %ymm13,%ymm10,%ymm9
640
641# qhasm: r9 ^= r
642# asm 1: vpxor <r=reg256#10,<r9=reg256#8,<r9=reg256#8
643# asm 2: vpxor <r=%ymm9,<r9=%ymm7,<r9=%ymm7
644vpxor %ymm9,%ymm7,%ymm7
645
646# qhasm: r8 = b8 & a0
647# asm 1: vpand <b8=reg256#14,<a0=reg256#13,>r8=reg256#10
648# asm 2: vpand <b8=%ymm13,<a0=%ymm12,>r8=%ymm9
649vpand %ymm13,%ymm12,%ymm9
650
651# qhasm: b7 = mem128[ input_2 + 0 ] x2
652# asm 1: vbroadcasti128 0(<input_2=int64#3), >b7=reg256#14
653# asm 2: vbroadcasti128 0(<input_2=%rdx), >b7=%ymm13
654vbroadcasti128 0(%rdx), %ymm13
655
656# qhasm: input_2 -= input_3
657# asm 1: sub  <input_3=int64#4,<input_2=int64#3
658# asm 2: sub  <input_3=%rcx,<input_2=%rdx
659sub  %rcx,%rdx
660
661# qhasm: r = b7 & a6
662# asm 1: vpand <b7=reg256#14,<a6=reg256#2,>r=reg256#15
663# asm 2: vpand <b7=%ymm13,<a6=%ymm1,>r=%ymm14
664vpand %ymm13,%ymm1,%ymm14
665
666# qhasm: r13 ^= r
667# asm 1: vpxor <r=reg256#15,<r13=reg256#12,<r13=reg256#12
668# asm 2: vpxor <r=%ymm14,<r13=%ymm11,<r13=%ymm11
669vpxor %ymm14,%ymm11,%ymm11
670
671# qhasm: mem256[ ptr + 416 ] = r13
672# asm 1: vmovupd   <r13=reg256#12,416(<ptr=int64#5)
673# asm 2: vmovupd   <r13=%ymm11,416(<ptr=%r8)
674vmovupd   %ymm11,416(%r8)
675
676# qhasm: r = b7 & a5
677# asm 1: vpand <b7=reg256#14,<a5=reg256#3,>r=reg256#12
678# asm 2: vpand <b7=%ymm13,<a5=%ymm2,>r=%ymm11
679vpand %ymm13,%ymm2,%ymm11
680
681# qhasm: r12 ^= r
682# asm 1: vpxor <r=reg256#12,<r12=reg256#1,<r12=reg256#1
683# asm 2: vpxor <r=%ymm11,<r12=%ymm0,<r12=%ymm0
684vpxor %ymm11,%ymm0,%ymm0
685
686# qhasm: r = b7 & a4
687# asm 1: vpand <b7=reg256#14,<a4=reg256#5,>r=reg256#12
688# asm 2: vpand <b7=%ymm13,<a4=%ymm4,>r=%ymm11
689vpand %ymm13,%ymm4,%ymm11
690
691# qhasm: r11 ^= r
692# asm 1: vpxor <r=reg256#12,<r11=reg256#4,<r11=reg256#4
693# asm 2: vpxor <r=%ymm11,<r11=%ymm3,<r11=%ymm3
694vpxor %ymm11,%ymm3,%ymm3
695
696# qhasm: r = b7 & a3
697# asm 1: vpand <b7=reg256#14,<a3=reg256#7,>r=reg256#12
698# asm 2: vpand <b7=%ymm13,<a3=%ymm6,>r=%ymm11
699vpand %ymm13,%ymm6,%ymm11
700
701# qhasm: r10 ^= r
702# asm 1: vpxor <r=reg256#12,<r10=reg256#6,<r10=reg256#6
703# asm 2: vpxor <r=%ymm11,<r10=%ymm5,<r10=%ymm5
704vpxor %ymm11,%ymm5,%ymm5
705
706# qhasm: r = b7 & a2
707# asm 1: vpand <b7=reg256#14,<a2=reg256#9,>r=reg256#12
708# asm 2: vpand <b7=%ymm13,<a2=%ymm8,>r=%ymm11
709vpand %ymm13,%ymm8,%ymm11
710
711# qhasm: r9 ^= r
712# asm 1: vpxor <r=reg256#12,<r9=reg256#8,<r9=reg256#8
713# asm 2: vpxor <r=%ymm11,<r9=%ymm7,<r9=%ymm7
714vpxor %ymm11,%ymm7,%ymm7
715
716# qhasm: r = b7 & a1
717# asm 1: vpand <b7=reg256#14,<a1=reg256#11,>r=reg256#12
718# asm 2: vpand <b7=%ymm13,<a1=%ymm10,>r=%ymm11
719vpand %ymm13,%ymm10,%ymm11
720
721# qhasm: r8 ^= r
722# asm 1: vpxor <r=reg256#12,<r8=reg256#10,<r8=reg256#10
723# asm 2: vpxor <r=%ymm11,<r8=%ymm9,<r8=%ymm9
724vpxor %ymm11,%ymm9,%ymm9
725
726# qhasm: r7 = b7 & a0
727# asm 1: vpand <b7=reg256#14,<a0=reg256#13,>r7=reg256#12
728# asm 2: vpand <b7=%ymm13,<a0=%ymm12,>r7=%ymm11
729vpand %ymm13,%ymm12,%ymm11
730
731# qhasm: b6 = mem128[ input_2 + 0 ] x2
732# asm 1: vbroadcasti128 0(<input_2=int64#3), >b6=reg256#14
733# asm 2: vbroadcasti128 0(<input_2=%rdx), >b6=%ymm13
734vbroadcasti128 0(%rdx), %ymm13
735
736# qhasm: input_2 -= input_3
737# asm 1: sub  <input_3=int64#4,<input_2=int64#3
738# asm 2: sub  <input_3=%rcx,<input_2=%rdx
739sub  %rcx,%rdx
740
741# qhasm: r = b6 & a6
742# asm 1: vpand <b6=reg256#14,<a6=reg256#2,>r=reg256#15
743# asm 2: vpand <b6=%ymm13,<a6=%ymm1,>r=%ymm14
744vpand %ymm13,%ymm1,%ymm14
745
746# qhasm: r12 ^= r
747# asm 1: vpxor <r=reg256#15,<r12=reg256#1,<r12=reg256#1
748# asm 2: vpxor <r=%ymm14,<r12=%ymm0,<r12=%ymm0
749vpxor %ymm14,%ymm0,%ymm0
750
751# qhasm: mem256[ ptr + 384 ] = r12
752# asm 1: vmovupd   <r12=reg256#1,384(<ptr=int64#5)
753# asm 2: vmovupd   <r12=%ymm0,384(<ptr=%r8)
754vmovupd   %ymm0,384(%r8)
755
756# qhasm: r = b6 & a5
757# asm 1: vpand <b6=reg256#14,<a5=reg256#3,>r=reg256#1
758# asm 2: vpand <b6=%ymm13,<a5=%ymm2,>r=%ymm0
759vpand %ymm13,%ymm2,%ymm0
760
761# qhasm: r11 ^= r
762# asm 1: vpxor <r=reg256#1,<r11=reg256#4,<r11=reg256#4
763# asm 2: vpxor <r=%ymm0,<r11=%ymm3,<r11=%ymm3
764vpxor %ymm0,%ymm3,%ymm3
765
766# qhasm: r = b6 & a4
767# asm 1: vpand <b6=reg256#14,<a4=reg256#5,>r=reg256#1
768# asm 2: vpand <b6=%ymm13,<a4=%ymm4,>r=%ymm0
769vpand %ymm13,%ymm4,%ymm0
770
771# qhasm: r10 ^= r
772# asm 1: vpxor <r=reg256#1,<r10=reg256#6,<r10=reg256#6
773# asm 2: vpxor <r=%ymm0,<r10=%ymm5,<r10=%ymm5
774vpxor %ymm0,%ymm5,%ymm5
775
776# qhasm: r = b6 & a3
777# asm 1: vpand <b6=reg256#14,<a3=reg256#7,>r=reg256#1
778# asm 2: vpand <b6=%ymm13,<a3=%ymm6,>r=%ymm0
779vpand %ymm13,%ymm6,%ymm0
780
781# qhasm: r9 ^= r
782# asm 1: vpxor <r=reg256#1,<r9=reg256#8,<r9=reg256#8
783# asm 2: vpxor <r=%ymm0,<r9=%ymm7,<r9=%ymm7
784vpxor %ymm0,%ymm7,%ymm7
785
786# qhasm: r = b6 & a2
787# asm 1: vpand <b6=reg256#14,<a2=reg256#9,>r=reg256#1
788# asm 2: vpand <b6=%ymm13,<a2=%ymm8,>r=%ymm0
789vpand %ymm13,%ymm8,%ymm0
790
791# qhasm: r8 ^= r
792# asm 1: vpxor <r=reg256#1,<r8=reg256#10,<r8=reg256#10
793# asm 2: vpxor <r=%ymm0,<r8=%ymm9,<r8=%ymm9
794vpxor %ymm0,%ymm9,%ymm9
795
796# qhasm: r = b6 & a1
797# asm 1: vpand <b6=reg256#14,<a1=reg256#11,>r=reg256#1
798# asm 2: vpand <b6=%ymm13,<a1=%ymm10,>r=%ymm0
799vpand %ymm13,%ymm10,%ymm0
800
801# qhasm: r7 ^= r
802# asm 1: vpxor <r=reg256#1,<r7=reg256#12,<r7=reg256#12
803# asm 2: vpxor <r=%ymm0,<r7=%ymm11,<r7=%ymm11
804vpxor %ymm0,%ymm11,%ymm11
805
806# qhasm: r6 = b6 & a0
807# asm 1: vpand <b6=reg256#14,<a0=reg256#13,>r6=reg256#1
808# asm 2: vpand <b6=%ymm13,<a0=%ymm12,>r6=%ymm0
809vpand %ymm13,%ymm12,%ymm0
810
811# qhasm: b5 = mem128[ input_2 + 0 ] x2
812# asm 1: vbroadcasti128 0(<input_2=int64#3), >b5=reg256#14
813# asm 2: vbroadcasti128 0(<input_2=%rdx), >b5=%ymm13
814vbroadcasti128 0(%rdx), %ymm13
815
816# qhasm: input_2 -= input_3
817# asm 1: sub  <input_3=int64#4,<input_2=int64#3
818# asm 2: sub  <input_3=%rcx,<input_2=%rdx
819sub  %rcx,%rdx
820
821# qhasm: r = b5 & a6
822# asm 1: vpand <b5=reg256#14,<a6=reg256#2,>r=reg256#15
823# asm 2: vpand <b5=%ymm13,<a6=%ymm1,>r=%ymm14
824vpand %ymm13,%ymm1,%ymm14
825
826# qhasm: r11 ^= r
827# asm 1: vpxor <r=reg256#15,<r11=reg256#4,<r11=reg256#4
828# asm 2: vpxor <r=%ymm14,<r11=%ymm3,<r11=%ymm3
829vpxor %ymm14,%ymm3,%ymm3
830
831# qhasm: mem256[ ptr + 352 ] = r11
832# asm 1: vmovupd   <r11=reg256#4,352(<ptr=int64#5)
833# asm 2: vmovupd   <r11=%ymm3,352(<ptr=%r8)
834vmovupd   %ymm3,352(%r8)
835
836# qhasm: r = b5 & a5
837# asm 1: vpand <b5=reg256#14,<a5=reg256#3,>r=reg256#4
838# asm 2: vpand <b5=%ymm13,<a5=%ymm2,>r=%ymm3
839vpand %ymm13,%ymm2,%ymm3
840
841# qhasm: r10 ^= r
842# asm 1: vpxor <r=reg256#4,<r10=reg256#6,<r10=reg256#6
843# asm 2: vpxor <r=%ymm3,<r10=%ymm5,<r10=%ymm5
844vpxor %ymm3,%ymm5,%ymm5
845
846# qhasm: r = b5 & a4
847# asm 1: vpand <b5=reg256#14,<a4=reg256#5,>r=reg256#4
848# asm 2: vpand <b5=%ymm13,<a4=%ymm4,>r=%ymm3
849vpand %ymm13,%ymm4,%ymm3
850
851# qhasm: r9 ^= r
852# asm 1: vpxor <r=reg256#4,<r9=reg256#8,<r9=reg256#8
853# asm 2: vpxor <r=%ymm3,<r9=%ymm7,<r9=%ymm7
854vpxor %ymm3,%ymm7,%ymm7
855
856# qhasm: r = b5 & a3
857# asm 1: vpand <b5=reg256#14,<a3=reg256#7,>r=reg256#4
858# asm 2: vpand <b5=%ymm13,<a3=%ymm6,>r=%ymm3
859vpand %ymm13,%ymm6,%ymm3
860
861# qhasm: r8 ^= r
862# asm 1: vpxor <r=reg256#4,<r8=reg256#10,<r8=reg256#10
863# asm 2: vpxor <r=%ymm3,<r8=%ymm9,<r8=%ymm9
864vpxor %ymm3,%ymm9,%ymm9
865
866# qhasm: r = b5 & a2
867# asm 1: vpand <b5=reg256#14,<a2=reg256#9,>r=reg256#4
868# asm 2: vpand <b5=%ymm13,<a2=%ymm8,>r=%ymm3
869vpand %ymm13,%ymm8,%ymm3
870
871# qhasm: r7 ^= r
872# asm 1: vpxor <r=reg256#4,<r7=reg256#12,<r7=reg256#12
873# asm 2: vpxor <r=%ymm3,<r7=%ymm11,<r7=%ymm11
874vpxor %ymm3,%ymm11,%ymm11
875
876# qhasm: r = b5 & a1
877# asm 1: vpand <b5=reg256#14,<a1=reg256#11,>r=reg256#4
878# asm 2: vpand <b5=%ymm13,<a1=%ymm10,>r=%ymm3
879vpand %ymm13,%ymm10,%ymm3
880
881# qhasm: r6 ^= r
882# asm 1: vpxor <r=reg256#4,<r6=reg256#1,<r6=reg256#1
883# asm 2: vpxor <r=%ymm3,<r6=%ymm0,<r6=%ymm0
884vpxor %ymm3,%ymm0,%ymm0
885
886# qhasm: r5 = b5 & a0
887# asm 1: vpand <b5=reg256#14,<a0=reg256#13,>r5=reg256#4
888# asm 2: vpand <b5=%ymm13,<a0=%ymm12,>r5=%ymm3
889vpand %ymm13,%ymm12,%ymm3
890
891# qhasm: b4 = mem128[ input_2 + 0 ] x2
892# asm 1: vbroadcasti128 0(<input_2=int64#3), >b4=reg256#14
893# asm 2: vbroadcasti128 0(<input_2=%rdx), >b4=%ymm13
894vbroadcasti128 0(%rdx), %ymm13
895
896# qhasm: input_2 -= input_3
897# asm 1: sub  <input_3=int64#4,<input_2=int64#3
898# asm 2: sub  <input_3=%rcx,<input_2=%rdx
899sub  %rcx,%rdx
900
901# qhasm: r = b4 & a6
902# asm 1: vpand <b4=reg256#14,<a6=reg256#2,>r=reg256#15
903# asm 2: vpand <b4=%ymm13,<a6=%ymm1,>r=%ymm14
904vpand %ymm13,%ymm1,%ymm14
905
906# qhasm: r10 ^= r
907# asm 1: vpxor <r=reg256#15,<r10=reg256#6,<r10=reg256#6
908# asm 2: vpxor <r=%ymm14,<r10=%ymm5,<r10=%ymm5
909vpxor %ymm14,%ymm5,%ymm5
910
911# qhasm: mem256[ ptr + 320 ] = r10
912# asm 1: vmovupd   <r10=reg256#6,320(<ptr=int64#5)
913# asm 2: vmovupd   <r10=%ymm5,320(<ptr=%r8)
914vmovupd   %ymm5,320(%r8)
915
916# qhasm: r = b4 & a5
917# asm 1: vpand <b4=reg256#14,<a5=reg256#3,>r=reg256#6
918# asm 2: vpand <b4=%ymm13,<a5=%ymm2,>r=%ymm5
919vpand %ymm13,%ymm2,%ymm5
920
921# qhasm: r9 ^= r
922# asm 1: vpxor <r=reg256#6,<r9=reg256#8,<r9=reg256#8
923# asm 2: vpxor <r=%ymm5,<r9=%ymm7,<r9=%ymm7
924vpxor %ymm5,%ymm7,%ymm7
925
926# qhasm: r = b4 & a4
927# asm 1: vpand <b4=reg256#14,<a4=reg256#5,>r=reg256#6
928# asm 2: vpand <b4=%ymm13,<a4=%ymm4,>r=%ymm5
929vpand %ymm13,%ymm4,%ymm5
930
931# qhasm: r8 ^= r
932# asm 1: vpxor <r=reg256#6,<r8=reg256#10,<r8=reg256#10
933# asm 2: vpxor <r=%ymm5,<r8=%ymm9,<r8=%ymm9
934vpxor %ymm5,%ymm9,%ymm9
935
936# qhasm: r = b4 & a3
937# asm 1: vpand <b4=reg256#14,<a3=reg256#7,>r=reg256#6
938# asm 2: vpand <b4=%ymm13,<a3=%ymm6,>r=%ymm5
939vpand %ymm13,%ymm6,%ymm5
940
941# qhasm: r7 ^= r
942# asm 1: vpxor <r=reg256#6,<r7=reg256#12,<r7=reg256#12
943# asm 2: vpxor <r=%ymm5,<r7=%ymm11,<r7=%ymm11
944vpxor %ymm5,%ymm11,%ymm11
945
946# qhasm: r = b4 & a2
947# asm 1: vpand <b4=reg256#14,<a2=reg256#9,>r=reg256#6
948# asm 2: vpand <b4=%ymm13,<a2=%ymm8,>r=%ymm5
949vpand %ymm13,%ymm8,%ymm5
950
951# qhasm: r6 ^= r
952# asm 1: vpxor <r=reg256#6,<r6=reg256#1,<r6=reg256#1
953# asm 2: vpxor <r=%ymm5,<r6=%ymm0,<r6=%ymm0
954vpxor %ymm5,%ymm0,%ymm0
955
956# qhasm: r = b4 & a1
957# asm 1: vpand <b4=reg256#14,<a1=reg256#11,>r=reg256#6
958# asm 2: vpand <b4=%ymm13,<a1=%ymm10,>r=%ymm5
959vpand %ymm13,%ymm10,%ymm5
960
961# qhasm: r5 ^= r
962# asm 1: vpxor <r=reg256#6,<r5=reg256#4,<r5=reg256#4
963# asm 2: vpxor <r=%ymm5,<r5=%ymm3,<r5=%ymm3
964vpxor %ymm5,%ymm3,%ymm3
965
966# qhasm: r4 = b4 & a0
967# asm 1: vpand <b4=reg256#14,<a0=reg256#13,>r4=reg256#6
968# asm 2: vpand <b4=%ymm13,<a0=%ymm12,>r4=%ymm5
969vpand %ymm13,%ymm12,%ymm5
970
971# qhasm: b3 = mem128[ input_2 + 0 ] x2
972# asm 1: vbroadcasti128 0(<input_2=int64#3), >b3=reg256#14
973# asm 2: vbroadcasti128 0(<input_2=%rdx), >b3=%ymm13
974vbroadcasti128 0(%rdx), %ymm13
975
976# qhasm: input_2 -= input_3
977# asm 1: sub  <input_3=int64#4,<input_2=int64#3
978# asm 2: sub  <input_3=%rcx,<input_2=%rdx
979sub  %rcx,%rdx
980
981# qhasm: r = b3 & a6
982# asm 1: vpand <b3=reg256#14,<a6=reg256#2,>r=reg256#15
983# asm 2: vpand <b3=%ymm13,<a6=%ymm1,>r=%ymm14
984vpand %ymm13,%ymm1,%ymm14
985
986# qhasm: r9 ^= r
987# asm 1: vpxor <r=reg256#15,<r9=reg256#8,<r9=reg256#8
988# asm 2: vpxor <r=%ymm14,<r9=%ymm7,<r9=%ymm7
989vpxor %ymm14,%ymm7,%ymm7
990
991# qhasm: mem256[ ptr + 288 ] = r9
992# asm 1: vmovupd   <r9=reg256#8,288(<ptr=int64#5)
993# asm 2: vmovupd   <r9=%ymm7,288(<ptr=%r8)
994vmovupd   %ymm7,288(%r8)
995
996# qhasm: r = b3 & a5
997# asm 1: vpand <b3=reg256#14,<a5=reg256#3,>r=reg256#8
998# asm 2: vpand <b3=%ymm13,<a5=%ymm2,>r=%ymm7
999vpand %ymm13,%ymm2,%ymm7
1000
1001# qhasm: r8 ^= r
1002# asm 1: vpxor <r=reg256#8,<r8=reg256#10,<r8=reg256#10
1003# asm 2: vpxor <r=%ymm7,<r8=%ymm9,<r8=%ymm9
1004vpxor %ymm7,%ymm9,%ymm9
1005
1006# qhasm: r = b3 & a4
1007# asm 1: vpand <b3=reg256#14,<a4=reg256#5,>r=reg256#8
1008# asm 2: vpand <b3=%ymm13,<a4=%ymm4,>r=%ymm7
1009vpand %ymm13,%ymm4,%ymm7
1010
1011# qhasm: r7 ^= r
1012# asm 1: vpxor <r=reg256#8,<r7=reg256#12,<r7=reg256#12
1013# asm 2: vpxor <r=%ymm7,<r7=%ymm11,<r7=%ymm11
1014vpxor %ymm7,%ymm11,%ymm11
1015
1016# qhasm: r = b3 & a3
1017# asm 1: vpand <b3=reg256#14,<a3=reg256#7,>r=reg256#8
1018# asm 2: vpand <b3=%ymm13,<a3=%ymm6,>r=%ymm7
1019vpand %ymm13,%ymm6,%ymm7
1020
1021# qhasm: r6 ^= r
1022# asm 1: vpxor <r=reg256#8,<r6=reg256#1,<r6=reg256#1
1023# asm 2: vpxor <r=%ymm7,<r6=%ymm0,<r6=%ymm0
1024vpxor %ymm7,%ymm0,%ymm0
1025
1026# qhasm: r = b3 & a2
1027# asm 1: vpand <b3=reg256#14,<a2=reg256#9,>r=reg256#8
1028# asm 2: vpand <b3=%ymm13,<a2=%ymm8,>r=%ymm7
1029vpand %ymm13,%ymm8,%ymm7
1030
1031# qhasm: r5 ^= r
1032# asm 1: vpxor <r=reg256#8,<r5=reg256#4,<r5=reg256#4
1033# asm 2: vpxor <r=%ymm7,<r5=%ymm3,<r5=%ymm3
1034vpxor %ymm7,%ymm3,%ymm3
1035
1036# qhasm: r = b3 & a1
1037# asm 1: vpand <b3=reg256#14,<a1=reg256#11,>r=reg256#8
1038# asm 2: vpand <b3=%ymm13,<a1=%ymm10,>r=%ymm7
1039vpand %ymm13,%ymm10,%ymm7
1040
1041# qhasm: r4 ^= r
1042# asm 1: vpxor <r=reg256#8,<r4=reg256#6,<r4=reg256#6
1043# asm 2: vpxor <r=%ymm7,<r4=%ymm5,<r4=%ymm5
1044vpxor %ymm7,%ymm5,%ymm5
1045
1046# qhasm: r3 = b3 & a0
1047# asm 1: vpand <b3=reg256#14,<a0=reg256#13,>r3=reg256#8
1048# asm 2: vpand <b3=%ymm13,<a0=%ymm12,>r3=%ymm7
1049vpand %ymm13,%ymm12,%ymm7
1050
1051# qhasm: b2 = mem128[ input_2 + 0 ] x2
1052# asm 1: vbroadcasti128 0(<input_2=int64#3), >b2=reg256#14
1053# asm 2: vbroadcasti128 0(<input_2=%rdx), >b2=%ymm13
1054vbroadcasti128 0(%rdx), %ymm13
1055
1056# qhasm: input_2 -= input_3
1057# asm 1: sub  <input_3=int64#4,<input_2=int64#3
1058# asm 2: sub  <input_3=%rcx,<input_2=%rdx
1059sub  %rcx,%rdx
1060
1061# qhasm: r = b2 & a6
1062# asm 1: vpand <b2=reg256#14,<a6=reg256#2,>r=reg256#15
1063# asm 2: vpand <b2=%ymm13,<a6=%ymm1,>r=%ymm14
1064vpand %ymm13,%ymm1,%ymm14
1065
1066# qhasm: r8 ^= r
1067# asm 1: vpxor <r=reg256#15,<r8=reg256#10,<r8=reg256#10
1068# asm 2: vpxor <r=%ymm14,<r8=%ymm9,<r8=%ymm9
1069vpxor %ymm14,%ymm9,%ymm9
1070
1071# qhasm: mem256[ ptr + 256 ] = r8
1072# asm 1: vmovupd   <r8=reg256#10,256(<ptr=int64#5)
1073# asm 2: vmovupd   <r8=%ymm9,256(<ptr=%r8)
1074vmovupd   %ymm9,256(%r8)
1075
1076# qhasm: r = b2 & a5
1077# asm 1: vpand <b2=reg256#14,<a5=reg256#3,>r=reg256#10
1078# asm 2: vpand <b2=%ymm13,<a5=%ymm2,>r=%ymm9
1079vpand %ymm13,%ymm2,%ymm9
1080
1081# qhasm: r7 ^= r
1082# asm 1: vpxor <r=reg256#10,<r7=reg256#12,<r7=reg256#12
1083# asm 2: vpxor <r=%ymm9,<r7=%ymm11,<r7=%ymm11
1084vpxor %ymm9,%ymm11,%ymm11
1085
1086# qhasm: r = b2 & a4
1087# asm 1: vpand <b2=reg256#14,<a4=reg256#5,>r=reg256#10
1088# asm 2: vpand <b2=%ymm13,<a4=%ymm4,>r=%ymm9
1089vpand %ymm13,%ymm4,%ymm9
1090
1091# qhasm: r6 ^= r
1092# asm 1: vpxor <r=reg256#10,<r6=reg256#1,<r6=reg256#1
1093# asm 2: vpxor <r=%ymm9,<r6=%ymm0,<r6=%ymm0
1094vpxor %ymm9,%ymm0,%ymm0
1095
1096# qhasm: r = b2 & a3
1097# asm 1: vpand <b2=reg256#14,<a3=reg256#7,>r=reg256#10
1098# asm 2: vpand <b2=%ymm13,<a3=%ymm6,>r=%ymm9
1099vpand %ymm13,%ymm6,%ymm9
1100
1101# qhasm: r5 ^= r
1102# asm 1: vpxor <r=reg256#10,<r5=reg256#4,<r5=reg256#4
1103# asm 2: vpxor <r=%ymm9,<r5=%ymm3,<r5=%ymm3
1104vpxor %ymm9,%ymm3,%ymm3
1105
1106# qhasm: r = b2 & a2
1107# asm 1: vpand <b2=reg256#14,<a2=reg256#9,>r=reg256#10
1108# asm 2: vpand <b2=%ymm13,<a2=%ymm8,>r=%ymm9
1109vpand %ymm13,%ymm8,%ymm9
1110
1111# qhasm: r4 ^= r
1112# asm 1: vpxor <r=reg256#10,<r4=reg256#6,<r4=reg256#6
1113# asm 2: vpxor <r=%ymm9,<r4=%ymm5,<r4=%ymm5
1114vpxor %ymm9,%ymm5,%ymm5
1115
1116# qhasm: r = b2 & a1
1117# asm 1: vpand <b2=reg256#14,<a1=reg256#11,>r=reg256#10
1118# asm 2: vpand <b2=%ymm13,<a1=%ymm10,>r=%ymm9
1119vpand %ymm13,%ymm10,%ymm9
1120
1121# qhasm: r3 ^= r
1122# asm 1: vpxor <r=reg256#10,<r3=reg256#8,<r3=reg256#8
1123# asm 2: vpxor <r=%ymm9,<r3=%ymm7,<r3=%ymm7
1124vpxor %ymm9,%ymm7,%ymm7
1125
1126# qhasm: r2 = b2 & a0
1127# asm 1: vpand <b2=reg256#14,<a0=reg256#13,>r2=reg256#10
1128# asm 2: vpand <b2=%ymm13,<a0=%ymm12,>r2=%ymm9
1129vpand %ymm13,%ymm12,%ymm9
1130
1131# qhasm: b1 = mem128[ input_2 + 0 ] x2
1132# asm 1: vbroadcasti128 0(<input_2=int64#3), >b1=reg256#14
1133# asm 2: vbroadcasti128 0(<input_2=%rdx), >b1=%ymm13
1134vbroadcasti128 0(%rdx), %ymm13
1135
1136# qhasm: input_2 -= input_3
1137# asm 1: sub  <input_3=int64#4,<input_2=int64#3
1138# asm 2: sub  <input_3=%rcx,<input_2=%rdx
1139sub  %rcx,%rdx
1140
1141# qhasm: r = b1 & a6
1142# asm 1: vpand <b1=reg256#14,<a6=reg256#2,>r=reg256#15
1143# asm 2: vpand <b1=%ymm13,<a6=%ymm1,>r=%ymm14
1144vpand %ymm13,%ymm1,%ymm14
1145
1146# qhasm: r7 ^= r
1147# asm 1: vpxor <r=reg256#15,<r7=reg256#12,<r7=reg256#12
1148# asm 2: vpxor <r=%ymm14,<r7=%ymm11,<r7=%ymm11
1149vpxor %ymm14,%ymm11,%ymm11
1150
1151# qhasm: mem256[ ptr + 224 ] = r7
1152# asm 1: vmovupd   <r7=reg256#12,224(<ptr=int64#5)
1153# asm 2: vmovupd   <r7=%ymm11,224(<ptr=%r8)
1154vmovupd   %ymm11,224(%r8)
1155
1156# qhasm: r = b1 & a5
1157# asm 1: vpand <b1=reg256#14,<a5=reg256#3,>r=reg256#12
1158# asm 2: vpand <b1=%ymm13,<a5=%ymm2,>r=%ymm11
1159vpand %ymm13,%ymm2,%ymm11
1160
1161# qhasm: r6 ^= r
1162# asm 1: vpxor <r=reg256#12,<r6=reg256#1,<r6=reg256#1
1163# asm 2: vpxor <r=%ymm11,<r6=%ymm0,<r6=%ymm0
1164vpxor %ymm11,%ymm0,%ymm0
1165
1166# qhasm: r = b1 & a4
1167# asm 1: vpand <b1=reg256#14,<a4=reg256#5,>r=reg256#12
1168# asm 2: vpand <b1=%ymm13,<a4=%ymm4,>r=%ymm11
1169vpand %ymm13,%ymm4,%ymm11
1170
1171# qhasm: r5 ^= r
1172# asm 1: vpxor <r=reg256#12,<r5=reg256#4,<r5=reg256#4
1173# asm 2: vpxor <r=%ymm11,<r5=%ymm3,<r5=%ymm3
1174vpxor %ymm11,%ymm3,%ymm3
1175
1176# qhasm: r = b1 & a3
1177# asm 1: vpand <b1=reg256#14,<a3=reg256#7,>r=reg256#12
1178# asm 2: vpand <b1=%ymm13,<a3=%ymm6,>r=%ymm11
1179vpand %ymm13,%ymm6,%ymm11
1180
1181# qhasm: r4 ^= r
1182# asm 1: vpxor <r=reg256#12,<r4=reg256#6,<r4=reg256#6
1183# asm 2: vpxor <r=%ymm11,<r4=%ymm5,<r4=%ymm5
1184vpxor %ymm11,%ymm5,%ymm5
1185
1186# qhasm: r = b1 & a2
1187# asm 1: vpand <b1=reg256#14,<a2=reg256#9,>r=reg256#12
1188# asm 2: vpand <b1=%ymm13,<a2=%ymm8,>r=%ymm11
1189vpand %ymm13,%ymm8,%ymm11
1190
1191# qhasm: r3 ^= r
1192# asm 1: vpxor <r=reg256#12,<r3=reg256#8,<r3=reg256#8
1193# asm 2: vpxor <r=%ymm11,<r3=%ymm7,<r3=%ymm7
1194vpxor %ymm11,%ymm7,%ymm7
1195
1196# qhasm: r = b1 & a1
1197# asm 1: vpand <b1=reg256#14,<a1=reg256#11,>r=reg256#12
1198# asm 2: vpand <b1=%ymm13,<a1=%ymm10,>r=%ymm11
1199vpand %ymm13,%ymm10,%ymm11
1200
1201# qhasm: r2 ^= r
1202# asm 1: vpxor <r=reg256#12,<r2=reg256#10,<r2=reg256#10
1203# asm 2: vpxor <r=%ymm11,<r2=%ymm9,<r2=%ymm9
1204vpxor %ymm11,%ymm9,%ymm9
1205
1206# qhasm: r1 = b1 & a0
1207# asm 1: vpand <b1=reg256#14,<a0=reg256#13,>r1=reg256#12
1208# asm 2: vpand <b1=%ymm13,<a0=%ymm12,>r1=%ymm11
1209vpand %ymm13,%ymm12,%ymm11
1210
1211# qhasm: b0 = mem128[ input_2 + 0 ] x2
1212# asm 1: vbroadcasti128 0(<input_2=int64#3), >b0=reg256#14
1213# asm 2: vbroadcasti128 0(<input_2=%rdx), >b0=%ymm13
1214vbroadcasti128 0(%rdx), %ymm13
1215
1216# qhasm: input_2 -= input_3
1217# asm 1: sub  <input_3=int64#4,<input_2=int64#3
1218# asm 2: sub  <input_3=%rcx,<input_2=%rdx
1219sub  %rcx,%rdx
1220
1221# qhasm: r = b0 & a6
1222# asm 1: vpand <b0=reg256#14,<a6=reg256#2,>r=reg256#2
1223# asm 2: vpand <b0=%ymm13,<a6=%ymm1,>r=%ymm1
1224vpand %ymm13,%ymm1,%ymm1
1225
1226# qhasm: r6 ^= r
1227# asm 1: vpxor <r=reg256#2,<r6=reg256#1,<r6=reg256#1
1228# asm 2: vpxor <r=%ymm1,<r6=%ymm0,<r6=%ymm0
1229vpxor %ymm1,%ymm0,%ymm0
1230
1231# qhasm: mem256[ ptr + 192 ] = r6
1232# asm 1: vmovupd   <r6=reg256#1,192(<ptr=int64#5)
1233# asm 2: vmovupd   <r6=%ymm0,192(<ptr=%r8)
1234vmovupd   %ymm0,192(%r8)
1235
1236# qhasm: r = b0 & a5
1237# asm 1: vpand <b0=reg256#14,<a5=reg256#3,>r=reg256#1
1238# asm 2: vpand <b0=%ymm13,<a5=%ymm2,>r=%ymm0
1239vpand %ymm13,%ymm2,%ymm0
1240
1241# qhasm: r5 ^= r
1242# asm 1: vpxor <r=reg256#1,<r5=reg256#4,<r5=reg256#4
1243# asm 2: vpxor <r=%ymm0,<r5=%ymm3,<r5=%ymm3
1244vpxor %ymm0,%ymm3,%ymm3
1245
1246# qhasm: r = b0 & a4
1247# asm 1: vpand <b0=reg256#14,<a4=reg256#5,>r=reg256#1
1248# asm 2: vpand <b0=%ymm13,<a4=%ymm4,>r=%ymm0
1249vpand %ymm13,%ymm4,%ymm0
1250
1251# qhasm: r4 ^= r
1252# asm 1: vpxor <r=reg256#1,<r4=reg256#6,<r4=reg256#6
1253# asm 2: vpxor <r=%ymm0,<r4=%ymm5,<r4=%ymm5
1254vpxor %ymm0,%ymm5,%ymm5
1255
1256# qhasm: r = b0 & a3
1257# asm 1: vpand <b0=reg256#14,<a3=reg256#7,>r=reg256#1
1258# asm 2: vpand <b0=%ymm13,<a3=%ymm6,>r=%ymm0
1259vpand %ymm13,%ymm6,%ymm0
1260
1261# qhasm: r3 ^= r
1262# asm 1: vpxor <r=reg256#1,<r3=reg256#8,<r3=reg256#8
1263# asm 2: vpxor <r=%ymm0,<r3=%ymm7,<r3=%ymm7
1264vpxor %ymm0,%ymm7,%ymm7
1265
1266# qhasm: r = b0 & a2
1267# asm 1: vpand <b0=reg256#14,<a2=reg256#9,>r=reg256#1
1268# asm 2: vpand <b0=%ymm13,<a2=%ymm8,>r=%ymm0
1269vpand %ymm13,%ymm8,%ymm0
1270
1271# qhasm: r2 ^= r
1272# asm 1: vpxor <r=reg256#1,<r2=reg256#10,<r2=reg256#10
1273# asm 2: vpxor <r=%ymm0,<r2=%ymm9,<r2=%ymm9
1274vpxor %ymm0,%ymm9,%ymm9
1275
1276# qhasm: r = b0 & a1
1277# asm 1: vpand <b0=reg256#14,<a1=reg256#11,>r=reg256#1
1278# asm 2: vpand <b0=%ymm13,<a1=%ymm10,>r=%ymm0
1279vpand %ymm13,%ymm10,%ymm0
1280
1281# qhasm: r1 ^= r
1282# asm 1: vpxor <r=reg256#1,<r1=reg256#12,<r1=reg256#12
1283# asm 2: vpxor <r=%ymm0,<r1=%ymm11,<r1=%ymm11
1284vpxor %ymm0,%ymm11,%ymm11
1285
1286# qhasm: r0 = b0 & a0
1287# asm 1: vpand <b0=reg256#14,<a0=reg256#13,>r0=reg256#1
1288# asm 2: vpand <b0=%ymm13,<a0=%ymm12,>r0=%ymm0
1289vpand %ymm13,%ymm12,%ymm0
1290
1291# qhasm: mem256[ ptr + 160 ] = r5
1292# asm 1: vmovupd   <r5=reg256#4,160(<ptr=int64#5)
1293# asm 2: vmovupd   <r5=%ymm3,160(<ptr=%r8)
1294vmovupd   %ymm3,160(%r8)
1295
1296# qhasm: mem256[ ptr + 128 ] = r4
1297# asm 1: vmovupd   <r4=reg256#6,128(<ptr=int64#5)
1298# asm 2: vmovupd   <r4=%ymm5,128(<ptr=%r8)
1299vmovupd   %ymm5,128(%r8)
1300
1301# qhasm: mem256[ ptr + 96 ] = r3
1302# asm 1: vmovupd   <r3=reg256#8,96(<ptr=int64#5)
1303# asm 2: vmovupd   <r3=%ymm7,96(<ptr=%r8)
1304vmovupd   %ymm7,96(%r8)
1305
1306# qhasm: mem256[ ptr + 64 ] = r2
1307# asm 1: vmovupd   <r2=reg256#10,64(<ptr=int64#5)
1308# asm 2: vmovupd   <r2=%ymm9,64(<ptr=%r8)
1309vmovupd   %ymm9,64(%r8)
1310
1311# qhasm: mem256[ ptr + 32 ] = r1
1312# asm 1: vmovupd   <r1=reg256#12,32(<ptr=int64#5)
1313# asm 2: vmovupd   <r1=%ymm11,32(<ptr=%r8)
1314vmovupd   %ymm11,32(%r8)
1315
1316# qhasm: mem256[ ptr + 0 ] = r0
1317# asm 1: vmovupd   <r0=reg256#1,0(<ptr=int64#5)
1318# asm 2: vmovupd   <r0=%ymm0,0(<ptr=%r8)
1319vmovupd   %ymm0,0(%r8)
1320
1321# qhasm: vzeroupper
1322vzeroupper
1323
1324# qhasm: h24 = mem128[ ptr + 560 ]
1325# asm 1: movdqu   560(<ptr=int64#5),>h24=reg128#1
1326# asm 2: movdqu   560(<ptr=%r8),>h24=%xmm0
1327movdqu   560(%r8),%xmm0
1328
1329# qhasm: h11 = h24
1330# asm 1: movdqa <h24=reg128#1,>h11=reg128#2
1331# asm 2: movdqa <h24=%xmm0,>h11=%xmm1
1332movdqa %xmm0,%xmm1
1333
1334# qhasm: h12 = h24
1335# asm 1: movdqa <h24=reg128#1,>h12=reg128#3
1336# asm 2: movdqa <h24=%xmm0,>h12=%xmm2
1337movdqa %xmm0,%xmm2
1338
1339# qhasm: h14 = h24
1340# asm 1: movdqa <h24=reg128#1,>h14=reg128#4
1341# asm 2: movdqa <h24=%xmm0,>h14=%xmm3
1342movdqa %xmm0,%xmm3
1343
1344# qhasm: h15 = h24
1345# asm 1: movdqa <h24=reg128#1,>h15=reg128#1
1346# asm 2: movdqa <h24=%xmm0,>h15=%xmm0
1347movdqa %xmm0,%xmm0
1348
1349# qhasm: h23 = mem128[ ptr + 528 ]
1350# asm 1: movdqu   528(<ptr=int64#5),>h23=reg128#5
1351# asm 2: movdqu   528(<ptr=%r8),>h23=%xmm4
1352movdqu   528(%r8),%xmm4
1353
1354# qhasm: h10 = h23
1355# asm 1: movdqa <h23=reg128#5,>h10=reg128#6
1356# asm 2: movdqa <h23=%xmm4,>h10=%xmm5
1357movdqa %xmm4,%xmm5
1358
1359# qhasm: h11 = h11 ^ h23
1360# asm 1: vpxor <h23=reg128#5,<h11=reg128#2,>h11=reg128#2
1361# asm 2: vpxor <h23=%xmm4,<h11=%xmm1,>h11=%xmm1
1362vpxor %xmm4,%xmm1,%xmm1
1363
1364# qhasm: h13 = h23
1365# asm 1: movdqa <h23=reg128#5,>h13=reg128#7
1366# asm 2: movdqa <h23=%xmm4,>h13=%xmm6
1367movdqa %xmm4,%xmm6
1368
1369# qhasm: h14 = h14 ^ h23
1370# asm 1: vpxor <h23=reg128#5,<h14=reg128#4,>h14=reg128#4
1371# asm 2: vpxor <h23=%xmm4,<h14=%xmm3,>h14=%xmm3
1372vpxor %xmm4,%xmm3,%xmm3
1373
1374# qhasm: h22 = mem128[ ptr + 496 ]
1375# asm 1: movdqu   496(<ptr=int64#5),>h22=reg128#5
1376# asm 2: movdqu   496(<ptr=%r8),>h22=%xmm4
1377movdqu   496(%r8),%xmm4
1378
1379# qhasm: h9 = h22
1380# asm 1: movdqa <h22=reg128#5,>h9=reg128#8
1381# asm 2: movdqa <h22=%xmm4,>h9=%xmm7
1382movdqa %xmm4,%xmm7
1383
1384# qhasm: h10 = h10 ^ h22
1385# asm 1: vpxor <h22=reg128#5,<h10=reg128#6,>h10=reg128#6
1386# asm 2: vpxor <h22=%xmm4,<h10=%xmm5,>h10=%xmm5
1387vpxor %xmm4,%xmm5,%xmm5
1388
1389# qhasm: h12 = h12 ^ h22
1390# asm 1: vpxor <h22=reg128#5,<h12=reg128#3,>h12=reg128#3
1391# asm 2: vpxor <h22=%xmm4,<h12=%xmm2,>h12=%xmm2
1392vpxor %xmm4,%xmm2,%xmm2
1393
1394# qhasm: h13 = h13 ^ h22
1395# asm 1: vpxor <h22=reg128#5,<h13=reg128#7,>h13=reg128#5
1396# asm 2: vpxor <h22=%xmm4,<h13=%xmm6,>h13=%xmm4
1397vpxor %xmm4,%xmm6,%xmm4
1398
1399# qhasm: h21 = mem128[ ptr + 464 ]
1400# asm 1: movdqu   464(<ptr=int64#5),>h21=reg128#7
1401# asm 2: movdqu   464(<ptr=%r8),>h21=%xmm6
1402movdqu   464(%r8),%xmm6
1403
1404# qhasm: h8 = h21
1405# asm 1: movdqa <h21=reg128#7,>h8=reg128#9
1406# asm 2: movdqa <h21=%xmm6,>h8=%xmm8
1407movdqa %xmm6,%xmm8
1408
1409# qhasm: h9 = h9 ^ h21
1410# asm 1: vpxor <h21=reg128#7,<h9=reg128#8,>h9=reg128#8
1411# asm 2: vpxor <h21=%xmm6,<h9=%xmm7,>h9=%xmm7
1412vpxor %xmm6,%xmm7,%xmm7
1413
1414# qhasm: h11 = h11 ^ h21
1415# asm 1: vpxor <h21=reg128#7,<h11=reg128#2,>h11=reg128#2
1416# asm 2: vpxor <h21=%xmm6,<h11=%xmm1,>h11=%xmm1
1417vpxor %xmm6,%xmm1,%xmm1
1418
1419# qhasm: h12 = h12 ^ h21
1420# asm 1: vpxor <h21=reg128#7,<h12=reg128#3,>h12=reg128#3
1421# asm 2: vpxor <h21=%xmm6,<h12=%xmm2,>h12=%xmm2
1422vpxor %xmm6,%xmm2,%xmm2
1423
1424# qhasm: h20 = mem128[ ptr + 432 ]
1425# asm 1: movdqu   432(<ptr=int64#5),>h20=reg128#7
1426# asm 2: movdqu   432(<ptr=%r8),>h20=%xmm6
1427movdqu   432(%r8),%xmm6
1428
1429# qhasm: h7 = h20
1430# asm 1: movdqa <h20=reg128#7,>h7=reg128#10
1431# asm 2: movdqa <h20=%xmm6,>h7=%xmm9
1432movdqa %xmm6,%xmm9
1433
1434# qhasm: h8 = h8 ^ h20
1435# asm 1: vpxor <h20=reg128#7,<h8=reg128#9,>h8=reg128#9
1436# asm 2: vpxor <h20=%xmm6,<h8=%xmm8,>h8=%xmm8
1437vpxor %xmm6,%xmm8,%xmm8
1438
1439# qhasm: h10 = h10 ^ h20
1440# asm 1: vpxor <h20=reg128#7,<h10=reg128#6,>h10=reg128#6
1441# asm 2: vpxor <h20=%xmm6,<h10=%xmm5,>h10=%xmm5
1442vpxor %xmm6,%xmm5,%xmm5
1443
1444# qhasm: h11 = h11 ^ h20
1445# asm 1: vpxor <h20=reg128#7,<h11=reg128#2,>h11=reg128#2
1446# asm 2: vpxor <h20=%xmm6,<h11=%xmm1,>h11=%xmm1
1447vpxor %xmm6,%xmm1,%xmm1
1448
1449# qhasm: h19 = mem128[ ptr + 400 ]
1450# asm 1: movdqu   400(<ptr=int64#5),>h19=reg128#7
1451# asm 2: movdqu   400(<ptr=%r8),>h19=%xmm6
1452movdqu   400(%r8),%xmm6
1453
1454# qhasm: h6 = h19
1455# asm 1: movdqa <h19=reg128#7,>h6=reg128#11
1456# asm 2: movdqa <h19=%xmm6,>h6=%xmm10
1457movdqa %xmm6,%xmm10
1458
1459# qhasm: h7 = h7 ^ h19
1460# asm 1: vpxor <h19=reg128#7,<h7=reg128#10,>h7=reg128#10
1461# asm 2: vpxor <h19=%xmm6,<h7=%xmm9,>h7=%xmm9
1462vpxor %xmm6,%xmm9,%xmm9
1463
1464# qhasm: h9 = h9 ^ h19
1465# asm 1: vpxor <h19=reg128#7,<h9=reg128#8,>h9=reg128#8
1466# asm 2: vpxor <h19=%xmm6,<h9=%xmm7,>h9=%xmm7
1467vpxor %xmm6,%xmm7,%xmm7
1468
1469# qhasm: h10 = h10 ^ h19
1470# asm 1: vpxor <h19=reg128#7,<h10=reg128#6,>h10=reg128#6
1471# asm 2: vpxor <h19=%xmm6,<h10=%xmm5,>h10=%xmm5
1472vpxor %xmm6,%xmm5,%xmm5
1473
1474# qhasm: h18 = mem128[ ptr + 368 ]
1475# asm 1: movdqu   368(<ptr=int64#5),>h18=reg128#7
1476# asm 2: movdqu   368(<ptr=%r8),>h18=%xmm6
1477movdqu   368(%r8),%xmm6
1478
1479# qhasm: h18 = h18 ^ mem128[ ptr + 576 ]
1480# asm 1: vpxor 576(<ptr=int64#5),<h18=reg128#7,>h18=reg128#7
1481# asm 2: vpxor 576(<ptr=%r8),<h18=%xmm6,>h18=%xmm6
1482vpxor 576(%r8),%xmm6,%xmm6
1483
1484# qhasm: h5 = h18
1485# asm 1: movdqa <h18=reg128#7,>h5=reg128#12
1486# asm 2: movdqa <h18=%xmm6,>h5=%xmm11
1487movdqa %xmm6,%xmm11
1488
1489# qhasm: h6 = h6 ^ h18
1490# asm 1: vpxor <h18=reg128#7,<h6=reg128#11,>h6=reg128#11
1491# asm 2: vpxor <h18=%xmm6,<h6=%xmm10,>h6=%xmm10
1492vpxor %xmm6,%xmm10,%xmm10
1493
1494# qhasm: h8 = h8 ^ h18
1495# asm 1: vpxor <h18=reg128#7,<h8=reg128#9,>h8=reg128#9
1496# asm 2: vpxor <h18=%xmm6,<h8=%xmm8,>h8=%xmm8
1497vpxor %xmm6,%xmm8,%xmm8
1498
1499# qhasm: h9 = h9 ^ h18
1500# asm 1: vpxor <h18=reg128#7,<h9=reg128#8,>h9=reg128#7
1501# asm 2: vpxor <h18=%xmm6,<h9=%xmm7,>h9=%xmm6
1502vpxor %xmm6,%xmm7,%xmm6
1503
1504# qhasm: h17 = mem128[ ptr + 336 ]
1505# asm 1: movdqu   336(<ptr=int64#5),>h17=reg128#8
1506# asm 2: movdqu   336(<ptr=%r8),>h17=%xmm7
1507movdqu   336(%r8),%xmm7
1508
1509# qhasm: h17 = h17 ^ mem128[ ptr + 544 ]
1510# asm 1: vpxor 544(<ptr=int64#5),<h17=reg128#8,>h17=reg128#8
1511# asm 2: vpxor 544(<ptr=%r8),<h17=%xmm7,>h17=%xmm7
1512vpxor 544(%r8),%xmm7,%xmm7
1513
1514# qhasm: h4 = h17
1515# asm 1: movdqa <h17=reg128#8,>h4=reg128#13
1516# asm 2: movdqa <h17=%xmm7,>h4=%xmm12
1517movdqa %xmm7,%xmm12
1518
1519# qhasm: h5 = h5 ^ h17
1520# asm 1: vpxor <h17=reg128#8,<h5=reg128#12,>h5=reg128#12
1521# asm 2: vpxor <h17=%xmm7,<h5=%xmm11,>h5=%xmm11
1522vpxor %xmm7,%xmm11,%xmm11
1523
1524# qhasm: h7 = h7 ^ h17
1525# asm 1: vpxor <h17=reg128#8,<h7=reg128#10,>h7=reg128#10
1526# asm 2: vpxor <h17=%xmm7,<h7=%xmm9,>h7=%xmm9
1527vpxor %xmm7,%xmm9,%xmm9
1528
1529# qhasm: h8 = h8 ^ h17
1530# asm 1: vpxor <h17=reg128#8,<h8=reg128#9,>h8=reg128#8
1531# asm 2: vpxor <h17=%xmm7,<h8=%xmm8,>h8=%xmm7
1532vpxor %xmm7,%xmm8,%xmm7
1533
1534# qhasm: h16 = mem128[ ptr + 304 ]
1535# asm 1: movdqu   304(<ptr=int64#5),>h16=reg128#9
1536# asm 2: movdqu   304(<ptr=%r8),>h16=%xmm8
1537movdqu   304(%r8),%xmm8
1538
1539# qhasm: h16 = h16 ^ mem128[ ptr + 512 ]
1540# asm 1: vpxor 512(<ptr=int64#5),<h16=reg128#9,>h16=reg128#9
1541# asm 2: vpxor 512(<ptr=%r8),<h16=%xmm8,>h16=%xmm8
1542vpxor 512(%r8),%xmm8,%xmm8
1543
1544# qhasm: h3 = h16
1545# asm 1: movdqa <h16=reg128#9,>h3=reg128#14
1546# asm 2: movdqa <h16=%xmm8,>h3=%xmm13
1547movdqa %xmm8,%xmm13
1548
1549# qhasm: h4 = h4 ^ h16
1550# asm 1: vpxor <h16=reg128#9,<h4=reg128#13,>h4=reg128#13
1551# asm 2: vpxor <h16=%xmm8,<h4=%xmm12,>h4=%xmm12
1552vpxor %xmm8,%xmm12,%xmm12
1553
1554# qhasm: h6 = h6 ^ h16
1555# asm 1: vpxor <h16=reg128#9,<h6=reg128#11,>h6=reg128#11
1556# asm 2: vpxor <h16=%xmm8,<h6=%xmm10,>h6=%xmm10
1557vpxor %xmm8,%xmm10,%xmm10
1558
1559# qhasm: h7 = h7 ^ h16
1560# asm 1: vpxor <h16=reg128#9,<h7=reg128#10,>h7=reg128#9
1561# asm 2: vpxor <h16=%xmm8,<h7=%xmm9,>h7=%xmm8
1562vpxor %xmm8,%xmm9,%xmm8
1563
1564# qhasm: h15 = h15 ^ mem128[ ptr + 272 ]
1565# asm 1: vpxor 272(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1
1566# asm 2: vpxor 272(<ptr=%r8),<h15=%xmm0,>h15=%xmm0
1567vpxor 272(%r8),%xmm0,%xmm0
1568
1569# qhasm: h15 = h15 ^ mem128[ ptr + 480 ]
1570# asm 1: vpxor 480(<ptr=int64#5),<h15=reg128#1,>h15=reg128#1
1571# asm 2: vpxor 480(<ptr=%r8),<h15=%xmm0,>h15=%xmm0
1572vpxor 480(%r8),%xmm0,%xmm0
1573
1574# qhasm: h2 = h15
1575# asm 1: movdqa <h15=reg128#1,>h2=reg128#10
1576# asm 2: movdqa <h15=%xmm0,>h2=%xmm9
1577movdqa %xmm0,%xmm9
1578
1579# qhasm: h3 = h3 ^ h15
1580# asm 1: vpxor <h15=reg128#1,<h3=reg128#14,>h3=reg128#14
1581# asm 2: vpxor <h15=%xmm0,<h3=%xmm13,>h3=%xmm13
1582vpxor %xmm0,%xmm13,%xmm13
1583
1584# qhasm: h5 = h5 ^ h15
1585# asm 1: vpxor <h15=reg128#1,<h5=reg128#12,>h5=reg128#12
1586# asm 2: vpxor <h15=%xmm0,<h5=%xmm11,>h5=%xmm11
1587vpxor %xmm0,%xmm11,%xmm11
1588
1589# qhasm: h6 = h6 ^ h15
1590# asm 1: vpxor <h15=reg128#1,<h6=reg128#11,>h6=reg128#1
1591# asm 2: vpxor <h15=%xmm0,<h6=%xmm10,>h6=%xmm0
1592vpxor %xmm0,%xmm10,%xmm0
1593
1594# qhasm: h14 = h14 ^ mem128[ ptr + 240 ]
1595# asm 1: vpxor 240(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4
1596# asm 2: vpxor 240(<ptr=%r8),<h14=%xmm3,>h14=%xmm3
1597vpxor 240(%r8),%xmm3,%xmm3
1598
1599# qhasm: h14 = h14 ^ mem128[ ptr + 448 ]
1600# asm 1: vpxor 448(<ptr=int64#5),<h14=reg128#4,>h14=reg128#4
1601# asm 2: vpxor 448(<ptr=%r8),<h14=%xmm3,>h14=%xmm3
1602vpxor 448(%r8),%xmm3,%xmm3
1603
1604# qhasm: h1 = h14
1605# asm 1: movdqa <h14=reg128#4,>h1=reg128#11
1606# asm 2: movdqa <h14=%xmm3,>h1=%xmm10
1607movdqa %xmm3,%xmm10
1608
1609# qhasm: h2 = h2 ^ h14
1610# asm 1: vpxor <h14=reg128#4,<h2=reg128#10,>h2=reg128#10
1611# asm 2: vpxor <h14=%xmm3,<h2=%xmm9,>h2=%xmm9
1612vpxor %xmm3,%xmm9,%xmm9
1613
1614# qhasm: h4 = h4 ^ h14
1615# asm 1: vpxor <h14=reg128#4,<h4=reg128#13,>h4=reg128#13
1616# asm 2: vpxor <h14=%xmm3,<h4=%xmm12,>h4=%xmm12
1617vpxor %xmm3,%xmm12,%xmm12
1618
1619# qhasm: h5 = h5 ^ h14
1620# asm 1: vpxor <h14=reg128#4,<h5=reg128#12,>h5=reg128#4
1621# asm 2: vpxor <h14=%xmm3,<h5=%xmm11,>h5=%xmm3
1622vpxor %xmm3,%xmm11,%xmm3
1623
1624# qhasm: h13 = h13 ^ mem128[ ptr + 208 ]
1625# asm 1: vpxor 208(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5
1626# asm 2: vpxor 208(<ptr=%r8),<h13=%xmm4,>h13=%xmm4
1627vpxor 208(%r8),%xmm4,%xmm4
1628
1629# qhasm: h13 = h13 ^ mem128[ ptr + 416 ]
1630# asm 1: vpxor 416(<ptr=int64#5),<h13=reg128#5,>h13=reg128#5
1631# asm 2: vpxor 416(<ptr=%r8),<h13=%xmm4,>h13=%xmm4
1632vpxor 416(%r8),%xmm4,%xmm4
1633
1634# qhasm: h0 = h13
1635# asm 1: movdqa <h13=reg128#5,>h0=reg128#12
1636# asm 2: movdqa <h13=%xmm4,>h0=%xmm11
1637movdqa %xmm4,%xmm11
1638
1639# qhasm: h1 = h1 ^ h13
1640# asm 1: vpxor <h13=reg128#5,<h1=reg128#11,>h1=reg128#11
1641# asm 2: vpxor <h13=%xmm4,<h1=%xmm10,>h1=%xmm10
1642vpxor %xmm4,%xmm10,%xmm10
1643
1644# qhasm: h3 = h3 ^ h13
1645# asm 1: vpxor <h13=reg128#5,<h3=reg128#14,>h3=reg128#14
1646# asm 2: vpxor <h13=%xmm4,<h3=%xmm13,>h3=%xmm13
1647vpxor %xmm4,%xmm13,%xmm13
1648
1649# qhasm: h4 = h4 ^ h13
1650# asm 1: vpxor <h13=reg128#5,<h4=reg128#13,>h4=reg128#5
1651# asm 2: vpxor <h13=%xmm4,<h4=%xmm12,>h4=%xmm4
1652vpxor %xmm4,%xmm12,%xmm4
1653
1654# qhasm: h12 = h12 ^ mem128[ ptr + 384 ]
1655# asm 1: vpxor 384(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3
1656# asm 2: vpxor 384(<ptr=%r8),<h12=%xmm2,>h12=%xmm2
1657vpxor 384(%r8),%xmm2,%xmm2
1658
1659# qhasm: h12 = h12 ^ mem128[ ptr + 176 ]
1660# asm 1: vpxor 176(<ptr=int64#5),<h12=reg128#3,>h12=reg128#3
1661# asm 2: vpxor 176(<ptr=%r8),<h12=%xmm2,>h12=%xmm2
1662vpxor 176(%r8),%xmm2,%xmm2
1663
1664# qhasm: mem128[ input_0 + 192 ] = h12
1665# asm 1: movdqu   <h12=reg128#3,192(<input_0=int64#1)
1666# asm 2: movdqu   <h12=%xmm2,192(<input_0=%rdi)
1667movdqu   %xmm2,192(%rdi)
1668
1669# qhasm: h11 = h11 ^ mem128[ ptr + 352 ]
1670# asm 1: vpxor 352(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2
1671# asm 2: vpxor 352(<ptr=%r8),<h11=%xmm1,>h11=%xmm1
1672vpxor 352(%r8),%xmm1,%xmm1
1673
1674# qhasm: h11 = h11 ^ mem128[ ptr + 144 ]
1675# asm 1: vpxor 144(<ptr=int64#5),<h11=reg128#2,>h11=reg128#2
1676# asm 2: vpxor 144(<ptr=%r8),<h11=%xmm1,>h11=%xmm1
1677vpxor 144(%r8),%xmm1,%xmm1
1678
1679# qhasm: mem128[ input_0 + 176 ] = h11
1680# asm 1: movdqu   <h11=reg128#2,176(<input_0=int64#1)
1681# asm 2: movdqu   <h11=%xmm1,176(<input_0=%rdi)
1682movdqu   %xmm1,176(%rdi)
1683
1684# qhasm: h10 = h10 ^ mem128[ ptr + 320 ]
1685# asm 1: vpxor 320(<ptr=int64#5),<h10=reg128#6,>h10=reg128#2
1686# asm 2: vpxor 320(<ptr=%r8),<h10=%xmm5,>h10=%xmm1
1687vpxor 320(%r8),%xmm5,%xmm1
1688
1689# qhasm: h10 = h10 ^ mem128[ ptr + 112 ]
1690# asm 1: vpxor 112(<ptr=int64#5),<h10=reg128#2,>h10=reg128#2
1691# asm 2: vpxor 112(<ptr=%r8),<h10=%xmm1,>h10=%xmm1
1692vpxor 112(%r8),%xmm1,%xmm1
1693
1694# qhasm: mem128[ input_0 + 160 ] = h10
1695# asm 1: movdqu   <h10=reg128#2,160(<input_0=int64#1)
1696# asm 2: movdqu   <h10=%xmm1,160(<input_0=%rdi)
1697movdqu   %xmm1,160(%rdi)
1698
1699# qhasm: h9 = h9 ^ mem128[ ptr + 288 ]
1700# asm 1: vpxor 288(<ptr=int64#5),<h9=reg128#7,>h9=reg128#2
1701# asm 2: vpxor 288(<ptr=%r8),<h9=%xmm6,>h9=%xmm1
1702vpxor 288(%r8),%xmm6,%xmm1
1703
1704# qhasm: h9 = h9 ^ mem128[ ptr + 80 ]
1705# asm 1: vpxor 80(<ptr=int64#5),<h9=reg128#2,>h9=reg128#2
1706# asm 2: vpxor 80(<ptr=%r8),<h9=%xmm1,>h9=%xmm1
1707vpxor 80(%r8),%xmm1,%xmm1
1708
1709# qhasm: mem128[ input_0 + 144 ] = h9
1710# asm 1: movdqu   <h9=reg128#2,144(<input_0=int64#1)
1711# asm 2: movdqu   <h9=%xmm1,144(<input_0=%rdi)
1712movdqu   %xmm1,144(%rdi)
1713
1714# qhasm: h8 = h8 ^ mem128[ ptr + 256 ]
1715# asm 1: vpxor 256(<ptr=int64#5),<h8=reg128#8,>h8=reg128#2
1716# asm 2: vpxor 256(<ptr=%r8),<h8=%xmm7,>h8=%xmm1
1717vpxor 256(%r8),%xmm7,%xmm1
1718
1719# qhasm: h8 = h8 ^ mem128[ ptr + 48 ]
1720# asm 1: vpxor 48(<ptr=int64#5),<h8=reg128#2,>h8=reg128#2
1721# asm 2: vpxor 48(<ptr=%r8),<h8=%xmm1,>h8=%xmm1
1722vpxor 48(%r8),%xmm1,%xmm1
1723
1724# qhasm: mem128[ input_0 + 128 ] = h8
1725# asm 1: movdqu   <h8=reg128#2,128(<input_0=int64#1)
1726# asm 2: movdqu   <h8=%xmm1,128(<input_0=%rdi)
1727movdqu   %xmm1,128(%rdi)
1728
1729# qhasm: h7 = h7 ^ mem128[ ptr + 224 ]
1730# asm 1: vpxor 224(<ptr=int64#5),<h7=reg128#9,>h7=reg128#2
1731# asm 2: vpxor 224(<ptr=%r8),<h7=%xmm8,>h7=%xmm1
1732vpxor 224(%r8),%xmm8,%xmm1
1733
1734# qhasm: h7 = h7 ^ mem128[ ptr + 16 ]
1735# asm 1: vpxor 16(<ptr=int64#5),<h7=reg128#2,>h7=reg128#2
1736# asm 2: vpxor 16(<ptr=%r8),<h7=%xmm1,>h7=%xmm1
1737vpxor 16(%r8),%xmm1,%xmm1
1738
1739# qhasm: mem128[ input_0 + 112 ] = h7
1740# asm 1: movdqu   <h7=reg128#2,112(<input_0=int64#1)
1741# asm 2: movdqu   <h7=%xmm1,112(<input_0=%rdi)
1742movdqu   %xmm1,112(%rdi)
1743
1744# qhasm: h6 = h6 ^ mem128[ ptr + 192 ]
1745# asm 1: vpxor 192(<ptr=int64#5),<h6=reg128#1,>h6=reg128#1
1746# asm 2: vpxor 192(<ptr=%r8),<h6=%xmm0,>h6=%xmm0
1747vpxor 192(%r8),%xmm0,%xmm0
1748
1749# qhasm: mem128[ input_0 + 96 ] = h6
1750# asm 1: movdqu   <h6=reg128#1,96(<input_0=int64#1)
1751# asm 2: movdqu   <h6=%xmm0,96(<input_0=%rdi)
1752movdqu   %xmm0,96(%rdi)
1753
1754# qhasm: h5 = h5 ^ mem128[ ptr + 160 ]
1755# asm 1: vpxor 160(<ptr=int64#5),<h5=reg128#4,>h5=reg128#1
1756# asm 2: vpxor 160(<ptr=%r8),<h5=%xmm3,>h5=%xmm0
1757vpxor 160(%r8),%xmm3,%xmm0
1758
1759# qhasm: mem128[ input_0 + 80 ] = h5
1760# asm 1: movdqu   <h5=reg128#1,80(<input_0=int64#1)
1761# asm 2: movdqu   <h5=%xmm0,80(<input_0=%rdi)
1762movdqu   %xmm0,80(%rdi)
1763
1764# qhasm: h4 = h4 ^ mem128[ ptr + 128 ]
1765# asm 1: vpxor 128(<ptr=int64#5),<h4=reg128#5,>h4=reg128#1
1766# asm 2: vpxor 128(<ptr=%r8),<h4=%xmm4,>h4=%xmm0
1767vpxor 128(%r8),%xmm4,%xmm0
1768
1769# qhasm: mem128[ input_0 + 64 ] = h4
1770# asm 1: movdqu   <h4=reg128#1,64(<input_0=int64#1)
1771# asm 2: movdqu   <h4=%xmm0,64(<input_0=%rdi)
1772movdqu   %xmm0,64(%rdi)
1773
1774# qhasm: h3 = h3 ^ mem128[ ptr + 96 ]
1775# asm 1: vpxor 96(<ptr=int64#5),<h3=reg128#14,>h3=reg128#1
1776# asm 2: vpxor 96(<ptr=%r8),<h3=%xmm13,>h3=%xmm0
1777vpxor 96(%r8),%xmm13,%xmm0
1778
1779# qhasm: mem128[ input_0 + 48 ] = h3
1780# asm 1: movdqu   <h3=reg128#1,48(<input_0=int64#1)
1781# asm 2: movdqu   <h3=%xmm0,48(<input_0=%rdi)
1782movdqu   %xmm0,48(%rdi)
1783
1784# qhasm: h2 = h2 ^ mem128[ ptr + 64 ]
1785# asm 1: vpxor 64(<ptr=int64#5),<h2=reg128#10,>h2=reg128#1
1786# asm 2: vpxor 64(<ptr=%r8),<h2=%xmm9,>h2=%xmm0
1787vpxor 64(%r8),%xmm9,%xmm0
1788
1789# qhasm: mem128[ input_0 + 32 ] = h2
1790# asm 1: movdqu   <h2=reg128#1,32(<input_0=int64#1)
1791# asm 2: movdqu   <h2=%xmm0,32(<input_0=%rdi)
1792movdqu   %xmm0,32(%rdi)
1793
1794# qhasm: h1 = h1 ^ mem128[ ptr + 32 ]
1795# asm 1: vpxor 32(<ptr=int64#5),<h1=reg128#11,>h1=reg128#1
1796# asm 2: vpxor 32(<ptr=%r8),<h1=%xmm10,>h1=%xmm0
1797vpxor 32(%r8),%xmm10,%xmm0
1798
1799# qhasm: mem128[ input_0 + 16 ] = h1
1800# asm 1: movdqu   <h1=reg128#1,16(<input_0=int64#1)
1801# asm 2: movdqu   <h1=%xmm0,16(<input_0=%rdi)
1802movdqu   %xmm0,16(%rdi)
1803
1804# qhasm: h0 = h0 ^ mem128[ ptr + 0 ]
1805# asm 1: vpxor 0(<ptr=int64#5),<h0=reg128#12,>h0=reg128#1
1806# asm 2: vpxor 0(<ptr=%r8),<h0=%xmm11,>h0=%xmm0
1807vpxor 0(%r8),%xmm11,%xmm0
1808
1809# qhasm: mem128[ input_0 + 0 ] = h0
1810# asm 1: movdqu   <h0=reg128#1,0(<input_0=int64#1)
1811# asm 2: movdqu   <h0=%xmm0,0(<input_0=%rdi)
1812movdqu   %xmm0,0(%rdi)
1813
1814# qhasm: return
1815add %r11,%rsp
1816ret
1817