1
2# qhasm: int64 rp
3
4# qhasm: int64 qp
5
6# qhasm: input rp
7
8# qhasm: input qp
9
10# qhasm:   int64 caller1
11
12# qhasm:   int64 caller2
13
14# qhasm:   int64 caller3
15
16# qhasm:   int64 caller4
17
18# qhasm:   int64 caller5
19
20# qhasm:   int64 caller6
21
22# qhasm:   int64 caller7
23
24# qhasm:   caller caller1
25
26# qhasm:   caller caller2
27
28# qhasm:   caller caller3
29
30# qhasm:   caller caller4
31
32# qhasm:   caller caller5
33
34# qhasm:   caller caller6
35
36# qhasm:   caller caller7
37
38# qhasm:   stack64 caller1_stack
39
40# qhasm:   stack64 caller2_stack
41
42# qhasm:   stack64 caller3_stack
43
44# qhasm:   stack64 caller4_stack
45
46# qhasm:   stack64 caller5_stack
47
48# qhasm:   stack64 caller6_stack
49
50# qhasm:   stack64 caller7_stack
51
52# qhasm: int64 a0
53
54# qhasm: int64 a1
55
56# qhasm: int64 a2
57
58# qhasm: int64 a3
59
60# qhasm: int64 a4
61
62# qhasm: stack64 a0_stack
63
64# qhasm: stack64 a1_stack
65
66# qhasm: stack64 a2_stack
67
68# qhasm: stack64 a3_stack
69
70# qhasm: stack64 a4_stack
71
72# qhasm: int64 b0
73
74# qhasm: int64 b1
75
76# qhasm: int64 b2
77
78# qhasm: int64 b3
79
80# qhasm: int64 b4
81
82# qhasm: stack64 b0_stack
83
84# qhasm: stack64 b1_stack
85
86# qhasm: stack64 b2_stack
87
88# qhasm: stack64 b3_stack
89
90# qhasm: stack64 b4_stack
91
92# qhasm: int64 c0
93
94# qhasm: int64 c1
95
96# qhasm: int64 c2
97
98# qhasm: int64 c3
99
100# qhasm: int64 c4
101
102# qhasm: stack64 c0_stack
103
104# qhasm: stack64 c1_stack
105
106# qhasm: stack64 c2_stack
107
108# qhasm: stack64 c3_stack
109
110# qhasm: stack64 c4_stack
111
112# qhasm: int64 d0
113
114# qhasm: int64 d1
115
116# qhasm: int64 d2
117
118# qhasm: int64 d3
119
120# qhasm: int64 d4
121
122# qhasm: stack64 d0_stack
123
124# qhasm: stack64 d1_stack
125
126# qhasm: stack64 d2_stack
127
128# qhasm: stack64 d3_stack
129
130# qhasm: stack64 d4_stack
131
132# qhasm: int64 e0
133
134# qhasm: int64 e1
135
136# qhasm: int64 e2
137
138# qhasm: int64 e3
139
140# qhasm: int64 e4
141
142# qhasm: stack64 e0_stack
143
144# qhasm: stack64 e1_stack
145
146# qhasm: stack64 e2_stack
147
148# qhasm: stack64 e3_stack
149
150# qhasm: stack64 e4_stack
151
152# qhasm: int64 f0
153
154# qhasm: int64 f1
155
156# qhasm: int64 f2
157
158# qhasm: int64 f3
159
160# qhasm: int64 f4
161
162# qhasm: stack64 f0_stack
163
164# qhasm: stack64 f1_stack
165
166# qhasm: stack64 f2_stack
167
168# qhasm: stack64 f3_stack
169
170# qhasm: stack64 f4_stack
171
172# qhasm: int64 g0
173
174# qhasm: int64 g1
175
176# qhasm: int64 g2
177
178# qhasm: int64 g3
179
180# qhasm: int64 g4
181
182# qhasm: stack64 g0_stack
183
184# qhasm: stack64 g1_stack
185
186# qhasm: stack64 g2_stack
187
188# qhasm: stack64 g3_stack
189
190# qhasm: stack64 g4_stack
191
192# qhasm: int64 h0
193
194# qhasm: int64 h1
195
196# qhasm: int64 h2
197
198# qhasm: int64 h3
199
200# qhasm: int64 h4
201
202# qhasm: stack64 h0_stack
203
204# qhasm: stack64 h1_stack
205
206# qhasm: stack64 h2_stack
207
208# qhasm: stack64 h3_stack
209
210# qhasm: stack64 h4_stack
211
212# qhasm: int64 qt0
213
214# qhasm: int64 qt1
215
216# qhasm: int64 qt2
217
218# qhasm: int64 qt3
219
220# qhasm: int64 qt4
221
222# qhasm: stack64 qt0_stack
223
224# qhasm: stack64 qt1_stack
225
226# qhasm: stack64 qt2_stack
227
228# qhasm: stack64 qt3_stack
229
230# qhasm: stack64 qt4_stack
231
232# qhasm: int64 t10
233
234# qhasm: int64 t11
235
236# qhasm: int64 t12
237
238# qhasm: int64 t13
239
240# qhasm: int64 t14
241
242# qhasm: stack64 t10_stack
243
244# qhasm: stack64 t11_stack
245
246# qhasm: stack64 t12_stack
247
248# qhasm: stack64 t13_stack
249
250# qhasm: stack64 t14_stack
251
252# qhasm: int64 t20
253
254# qhasm: int64 t21
255
256# qhasm: int64 t22
257
258# qhasm: int64 t23
259
260# qhasm: int64 t24
261
262# qhasm: stack64 t20_stack
263
264# qhasm: stack64 t21_stack
265
266# qhasm: stack64 t22_stack
267
268# qhasm: stack64 t23_stack
269
270# qhasm: stack64 t24_stack
271
272# qhasm: int64 rx0
273
274# qhasm: int64 rx1
275
276# qhasm: int64 rx2
277
278# qhasm: int64 rx3
279
280# qhasm: int64 rx4
281
282# qhasm: int64 ry0
283
284# qhasm: int64 ry1
285
286# qhasm: int64 ry2
287
288# qhasm: int64 ry3
289
290# qhasm: int64 ry4
291
292# qhasm: int64 rz0
293
294# qhasm: int64 rz1
295
296# qhasm: int64 rz2
297
298# qhasm: int64 rz3
299
300# qhasm: int64 rz4
301
302# qhasm: int64 rt0
303
304# qhasm: int64 rt1
305
306# qhasm: int64 rt2
307
308# qhasm: int64 rt3
309
310# qhasm: int64 rt4
311
312# qhasm: int64 mulr01
313
314# qhasm: int64 mulr11
315
316# qhasm: int64 mulr21
317
318# qhasm: int64 mulr31
319
320# qhasm: int64 mulr41
321
322# qhasm: int64 mulrax
323
324# qhasm: int64 mulrdx
325
326# qhasm: int64 mult
327
328# qhasm: int64 mulredmask
329
330# qhasm: stack64 mulx219_stack
331
332# qhasm: stack64 mulx319_stack
333
334# qhasm: stack64 mulx419_stack
335
336# qhasm: enter crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
337.text
338.p2align 5
339.globl _crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
340.globl crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2
341_crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2:
342crypto_sign_ed25519_amd64_51_30k_batch_ge25519_nielsadd2:
343mov %rsp,%r11
344and $31,%r11
345add $256,%r11
346sub %r11,%rsp
347
348# qhasm:   caller1_stack = caller1
349# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
350# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
351movq %r11,0(%rsp)
352
353# qhasm:   caller2_stack = caller2
354# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
355# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
356movq %r12,8(%rsp)
357
358# qhasm:   caller3_stack = caller3
359# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
360# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
361movq %r13,16(%rsp)
362
363# qhasm:   caller4_stack = caller4
364# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
365# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
366movq %r14,24(%rsp)
367
368# qhasm:   caller5_stack = caller5
369# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
370# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
371movq %r15,32(%rsp)
372
373# qhasm:   caller6_stack = caller6
374# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
375# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
376movq %rbx,40(%rsp)
377
378# qhasm:   caller7_stack = caller7
379# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
380# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
381movq %rbp,48(%rsp)
382
383# qhasm: a0 = *(uint64 *)(rp + 40)
384# asm 1: movq   40(<rp=int64#1),>a0=int64#3
385# asm 2: movq   40(<rp=%rdi),>a0=%rdx
386movq   40(%rdi),%rdx
387
388# qhasm: a1 = *(uint64 *)(rp + 48)
389# asm 1: movq   48(<rp=int64#1),>a1=int64#4
390# asm 2: movq   48(<rp=%rdi),>a1=%rcx
391movq   48(%rdi),%rcx
392
393# qhasm: a2 = *(uint64 *)(rp + 56)
394# asm 1: movq   56(<rp=int64#1),>a2=int64#5
395# asm 2: movq   56(<rp=%rdi),>a2=%r8
396movq   56(%rdi),%r8
397
398# qhasm: a3 = *(uint64 *)(rp + 64)
399# asm 1: movq   64(<rp=int64#1),>a3=int64#6
400# asm 2: movq   64(<rp=%rdi),>a3=%r9
401movq   64(%rdi),%r9
402
403# qhasm: a4 = *(uint64 *)(rp + 72)
404# asm 1: movq   72(<rp=int64#1),>a4=int64#7
405# asm 2: movq   72(<rp=%rdi),>a4=%rax
406movq   72(%rdi),%rax
407
408# qhasm: b0 = a0
409# asm 1: mov  <a0=int64#3,>b0=int64#8
410# asm 2: mov  <a0=%rdx,>b0=%r10
411mov  %rdx,%r10
412
413# qhasm: b1 = a1
414# asm 1: mov  <a1=int64#4,>b1=int64#9
415# asm 2: mov  <a1=%rcx,>b1=%r11
416mov  %rcx,%r11
417
418# qhasm: b2 = a2
419# asm 1: mov  <a2=int64#5,>b2=int64#10
420# asm 2: mov  <a2=%r8,>b2=%r12
421mov  %r8,%r12
422
423# qhasm: b3 = a3
424# asm 1: mov  <a3=int64#6,>b3=int64#11
425# asm 2: mov  <a3=%r9,>b3=%r13
426mov  %r9,%r13
427
428# qhasm: b4 = a4
429# asm 1: mov  <a4=int64#7,>b4=int64#12
430# asm 2: mov  <a4=%rax,>b4=%r14
431mov  %rax,%r14
432
433# qhasm: a0 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P0
434# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=int64#3
435# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,<a0=%rdx
436add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rdx
437
438# qhasm: a1 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
439# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=int64#4
440# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a1=%rcx
441add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
442
443# qhasm: a2 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
444# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=int64#5
445# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a2=%r8
446add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r8
447
448# qhasm: a3 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
449# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=int64#6
450# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a3=%r9
451add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
452
453# qhasm: a4 += *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_2P1234
454# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=int64#7
455# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<a4=%rax
456add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
457
458# qhasm: b0 += *(uint64 *) (rp + 0)
459# asm 1: addq 0(<rp=int64#1),<b0=int64#8
460# asm 2: addq 0(<rp=%rdi),<b0=%r10
461addq 0(%rdi),%r10
462
463# qhasm: b1 += *(uint64 *) (rp + 8)
464# asm 1: addq 8(<rp=int64#1),<b1=int64#9
465# asm 2: addq 8(<rp=%rdi),<b1=%r11
466addq 8(%rdi),%r11
467
468# qhasm: b2 += *(uint64 *) (rp + 16)
469# asm 1: addq 16(<rp=int64#1),<b2=int64#10
470# asm 2: addq 16(<rp=%rdi),<b2=%r12
471addq 16(%rdi),%r12
472
473# qhasm: b3 += *(uint64 *) (rp + 24)
474# asm 1: addq 24(<rp=int64#1),<b3=int64#11
475# asm 2: addq 24(<rp=%rdi),<b3=%r13
476addq 24(%rdi),%r13
477
478# qhasm: b4 += *(uint64 *) (rp + 32)
479# asm 1: addq 32(<rp=int64#1),<b4=int64#12
480# asm 2: addq 32(<rp=%rdi),<b4=%r14
481addq 32(%rdi),%r14
482
483# qhasm: a0 -= *(uint64 *) (rp + 0)
484# asm 1: subq 0(<rp=int64#1),<a0=int64#3
485# asm 2: subq 0(<rp=%rdi),<a0=%rdx
486subq 0(%rdi),%rdx
487
488# qhasm: a1 -= *(uint64 *) (rp + 8)
489# asm 1: subq 8(<rp=int64#1),<a1=int64#4
490# asm 2: subq 8(<rp=%rdi),<a1=%rcx
491subq 8(%rdi),%rcx
492
493# qhasm: a2 -= *(uint64 *) (rp + 16)
494# asm 1: subq 16(<rp=int64#1),<a2=int64#5
495# asm 2: subq 16(<rp=%rdi),<a2=%r8
496subq 16(%rdi),%r8
497
498# qhasm: a3 -= *(uint64 *) (rp + 24)
499# asm 1: subq 24(<rp=int64#1),<a3=int64#6
500# asm 2: subq 24(<rp=%rdi),<a3=%r9
501subq 24(%rdi),%r9
502
503# qhasm: a4 -= *(uint64 *) (rp + 32)
504# asm 1: subq 32(<rp=int64#1),<a4=int64#7
505# asm 2: subq 32(<rp=%rdi),<a4=%rax
506subq 32(%rdi),%rax
507
508# qhasm: a0_stack = a0
509# asm 1: movq <a0=int64#3,>a0_stack=stack64#8
510# asm 2: movq <a0=%rdx,>a0_stack=56(%rsp)
511movq %rdx,56(%rsp)
512
513# qhasm: a1_stack = a1
514# asm 1: movq <a1=int64#4,>a1_stack=stack64#9
515# asm 2: movq <a1=%rcx,>a1_stack=64(%rsp)
516movq %rcx,64(%rsp)
517
518# qhasm: a2_stack = a2
519# asm 1: movq <a2=int64#5,>a2_stack=stack64#10
520# asm 2: movq <a2=%r8,>a2_stack=72(%rsp)
521movq %r8,72(%rsp)
522
523# qhasm: a3_stack = a3
524# asm 1: movq <a3=int64#6,>a3_stack=stack64#11
525# asm 2: movq <a3=%r9,>a3_stack=80(%rsp)
526movq %r9,80(%rsp)
527
528# qhasm: a4_stack = a4
529# asm 1: movq <a4=int64#7,>a4_stack=stack64#12
530# asm 2: movq <a4=%rax,>a4_stack=88(%rsp)
531movq %rax,88(%rsp)
532
533# qhasm: b0_stack = b0
534# asm 1: movq <b0=int64#8,>b0_stack=stack64#13
535# asm 2: movq <b0=%r10,>b0_stack=96(%rsp)
536movq %r10,96(%rsp)
537
538# qhasm: b1_stack = b1
539# asm 1: movq <b1=int64#9,>b1_stack=stack64#14
540# asm 2: movq <b1=%r11,>b1_stack=104(%rsp)
541movq %r11,104(%rsp)
542
543# qhasm: b2_stack = b2
544# asm 1: movq <b2=int64#10,>b2_stack=stack64#15
545# asm 2: movq <b2=%r12,>b2_stack=112(%rsp)
546movq %r12,112(%rsp)
547
548# qhasm: b3_stack = b3
549# asm 1: movq <b3=int64#11,>b3_stack=stack64#16
550# asm 2: movq <b3=%r13,>b3_stack=120(%rsp)
551movq %r13,120(%rsp)
552
553# qhasm: b4_stack = b4
554# asm 1: movq <b4=int64#12,>b4_stack=stack64#17
555# asm 2: movq <b4=%r14,>b4_stack=128(%rsp)
556movq %r14,128(%rsp)
557
558# qhasm:   mulrax = a3_stack
559# asm 1: movq <a3_stack=stack64#11,>mulrax=int64#3
560# asm 2: movq <a3_stack=80(%rsp),>mulrax=%rdx
561movq 80(%rsp),%rdx
562
563# qhasm:   mulrax *= 19
564# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
565# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
566imulq  $19,%rdx,%rax
567
568# qhasm:   mulx319_stack = mulrax
569# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#18
570# asm 2: movq <mulrax=%rax,>mulx319_stack=136(%rsp)
571movq %rax,136(%rsp)
572
573# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16)
574# asm 1: mulq  16(<qp=int64#2)
575# asm 2: mulq  16(<qp=%rsi)
576mulq  16(%rsi)
577
578# qhasm:   a0 = mulrax
579# asm 1: mov  <mulrax=int64#7,>a0=int64#4
580# asm 2: mov  <mulrax=%rax,>a0=%rcx
581mov  %rax,%rcx
582
583# qhasm:   mulr01 = mulrdx
584# asm 1: mov  <mulrdx=int64#3,>mulr01=int64#5
585# asm 2: mov  <mulrdx=%rdx,>mulr01=%r8
586mov  %rdx,%r8
587
588# qhasm:   mulrax = a4_stack
589# asm 1: movq <a4_stack=stack64#12,>mulrax=int64#3
590# asm 2: movq <a4_stack=88(%rsp),>mulrax=%rdx
591movq 88(%rsp),%rdx
592
593# qhasm:   mulrax *= 19
594# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
595# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
596imulq  $19,%rdx,%rax
597
598# qhasm:   mulx419_stack = mulrax
599# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#19
600# asm 2: movq <mulrax=%rax,>mulx419_stack=144(%rsp)
601movq %rax,144(%rsp)
602
603# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8)
604# asm 1: mulq  8(<qp=int64#2)
605# asm 2: mulq  8(<qp=%rsi)
606mulq  8(%rsi)
607
608# qhasm:   carry? a0 += mulrax
609# asm 1: add  <mulrax=int64#7,<a0=int64#4
610# asm 2: add  <mulrax=%rax,<a0=%rcx
611add  %rax,%rcx
612
613# qhasm:   mulr01 += mulrdx + carry
614# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
615# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
616adc %rdx,%r8
617
618# qhasm:   mulrax = a0_stack
619# asm 1: movq <a0_stack=stack64#8,>mulrax=int64#7
620# asm 2: movq <a0_stack=56(%rsp),>mulrax=%rax
621movq 56(%rsp),%rax
622
623# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0)
624# asm 1: mulq  0(<qp=int64#2)
625# asm 2: mulq  0(<qp=%rsi)
626mulq  0(%rsi)
627
628# qhasm:   carry? a0 += mulrax
629# asm 1: add  <mulrax=int64#7,<a0=int64#4
630# asm 2: add  <mulrax=%rax,<a0=%rcx
631add  %rax,%rcx
632
633# qhasm:   mulr01 += mulrdx + carry
634# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
635# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
636adc %rdx,%r8
637
638# qhasm:   mulrax = a0_stack
639# asm 1: movq <a0_stack=stack64#8,>mulrax=int64#7
640# asm 2: movq <a0_stack=56(%rsp),>mulrax=%rax
641movq 56(%rsp),%rax
642
643# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8)
644# asm 1: mulq  8(<qp=int64#2)
645# asm 2: mulq  8(<qp=%rsi)
646mulq  8(%rsi)
647
648# qhasm:   a1 = mulrax
649# asm 1: mov  <mulrax=int64#7,>a1=int64#6
650# asm 2: mov  <mulrax=%rax,>a1=%r9
651mov  %rax,%r9
652
653# qhasm:   mulr11 = mulrdx
654# asm 1: mov  <mulrdx=int64#3,>mulr11=int64#8
655# asm 2: mov  <mulrdx=%rdx,>mulr11=%r10
656mov  %rdx,%r10
657
658# qhasm:   mulrax = a0_stack
659# asm 1: movq <a0_stack=stack64#8,>mulrax=int64#7
660# asm 2: movq <a0_stack=56(%rsp),>mulrax=%rax
661movq 56(%rsp),%rax
662
663# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16)
664# asm 1: mulq  16(<qp=int64#2)
665# asm 2: mulq  16(<qp=%rsi)
666mulq  16(%rsi)
667
668# qhasm:   a2 = mulrax
669# asm 1: mov  <mulrax=int64#7,>a2=int64#9
670# asm 2: mov  <mulrax=%rax,>a2=%r11
671mov  %rax,%r11
672
673# qhasm:   mulr21 = mulrdx
674# asm 1: mov  <mulrdx=int64#3,>mulr21=int64#10
675# asm 2: mov  <mulrdx=%rdx,>mulr21=%r12
676mov  %rdx,%r12
677
678# qhasm:   mulrax = a0_stack
679# asm 1: movq <a0_stack=stack64#8,>mulrax=int64#7
680# asm 2: movq <a0_stack=56(%rsp),>mulrax=%rax
681movq 56(%rsp),%rax
682
683# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24)
684# asm 1: mulq  24(<qp=int64#2)
685# asm 2: mulq  24(<qp=%rsi)
686mulq  24(%rsi)
687
688# qhasm:   a3 = mulrax
689# asm 1: mov  <mulrax=int64#7,>a3=int64#11
690# asm 2: mov  <mulrax=%rax,>a3=%r13
691mov  %rax,%r13
692
693# qhasm:   mulr31 = mulrdx
694# asm 1: mov  <mulrdx=int64#3,>mulr31=int64#12
695# asm 2: mov  <mulrdx=%rdx,>mulr31=%r14
696mov  %rdx,%r14
697
698# qhasm:   mulrax = a0_stack
699# asm 1: movq <a0_stack=stack64#8,>mulrax=int64#7
700# asm 2: movq <a0_stack=56(%rsp),>mulrax=%rax
701movq 56(%rsp),%rax
702
703# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32)
704# asm 1: mulq  32(<qp=int64#2)
705# asm 2: mulq  32(<qp=%rsi)
706mulq  32(%rsi)
707
708# qhasm:   a4 = mulrax
709# asm 1: mov  <mulrax=int64#7,>a4=int64#13
710# asm 2: mov  <mulrax=%rax,>a4=%r15
711mov  %rax,%r15
712
713# qhasm:   mulr41 = mulrdx
714# asm 1: mov  <mulrdx=int64#3,>mulr41=int64#14
715# asm 2: mov  <mulrdx=%rdx,>mulr41=%rbx
716mov  %rdx,%rbx
717
718# qhasm:   mulrax = a1_stack
719# asm 1: movq <a1_stack=stack64#9,>mulrax=int64#7
720# asm 2: movq <a1_stack=64(%rsp),>mulrax=%rax
721movq 64(%rsp),%rax
722
723# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0)
724# asm 1: mulq  0(<qp=int64#2)
725# asm 2: mulq  0(<qp=%rsi)
726mulq  0(%rsi)
727
728# qhasm:   carry? a1 += mulrax
729# asm 1: add  <mulrax=int64#7,<a1=int64#6
730# asm 2: add  <mulrax=%rax,<a1=%r9
731add  %rax,%r9
732
733# qhasm:   mulr11 += mulrdx + carry
734# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
735# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
736adc %rdx,%r10
737
738# qhasm:   mulrax = a1_stack
739# asm 1: movq <a1_stack=stack64#9,>mulrax=int64#7
740# asm 2: movq <a1_stack=64(%rsp),>mulrax=%rax
741movq 64(%rsp),%rax
742
743# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8)
744# asm 1: mulq  8(<qp=int64#2)
745# asm 2: mulq  8(<qp=%rsi)
746mulq  8(%rsi)
747
748# qhasm:   carry? a2 += mulrax
749# asm 1: add  <mulrax=int64#7,<a2=int64#9
750# asm 2: add  <mulrax=%rax,<a2=%r11
751add  %rax,%r11
752
753# qhasm:   mulr21 += mulrdx + carry
754# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
755# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
756adc %rdx,%r12
757
758# qhasm:   mulrax = a1_stack
759# asm 1: movq <a1_stack=stack64#9,>mulrax=int64#7
760# asm 2: movq <a1_stack=64(%rsp),>mulrax=%rax
761movq 64(%rsp),%rax
762
763# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16)
764# asm 1: mulq  16(<qp=int64#2)
765# asm 2: mulq  16(<qp=%rsi)
766mulq  16(%rsi)
767
768# qhasm:   carry? a3 += mulrax
769# asm 1: add  <mulrax=int64#7,<a3=int64#11
770# asm 2: add  <mulrax=%rax,<a3=%r13
771add  %rax,%r13
772
773# qhasm:   mulr31 += mulrdx + carry
774# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
775# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
776adc %rdx,%r14
777
778# qhasm:   mulrax = a1_stack
779# asm 1: movq <a1_stack=stack64#9,>mulrax=int64#7
780# asm 2: movq <a1_stack=64(%rsp),>mulrax=%rax
781movq 64(%rsp),%rax
782
783# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24)
784# asm 1: mulq  24(<qp=int64#2)
785# asm 2: mulq  24(<qp=%rsi)
786mulq  24(%rsi)
787
788# qhasm:   carry? a4 += mulrax
789# asm 1: add  <mulrax=int64#7,<a4=int64#13
790# asm 2: add  <mulrax=%rax,<a4=%r15
791add  %rax,%r15
792
793# qhasm:   mulr41 += mulrdx + carry
794# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
795# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
796adc %rdx,%rbx
797
798# qhasm:   mulrax = a1_stack
799# asm 1: movq <a1_stack=stack64#9,>mulrax=int64#3
800# asm 2: movq <a1_stack=64(%rsp),>mulrax=%rdx
801movq 64(%rsp),%rdx
802
803# qhasm:   mulrax *= 19
804# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
805# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
806imulq  $19,%rdx,%rax
807
808# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32)
809# asm 1: mulq  32(<qp=int64#2)
810# asm 2: mulq  32(<qp=%rsi)
811mulq  32(%rsi)
812
813# qhasm:   carry? a0 += mulrax
814# asm 1: add  <mulrax=int64#7,<a0=int64#4
815# asm 2: add  <mulrax=%rax,<a0=%rcx
816add  %rax,%rcx
817
818# qhasm:   mulr01 += mulrdx + carry
819# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
820# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
821adc %rdx,%r8
822
823# qhasm:   mulrax = a2_stack
824# asm 1: movq <a2_stack=stack64#10,>mulrax=int64#7
825# asm 2: movq <a2_stack=72(%rsp),>mulrax=%rax
826movq 72(%rsp),%rax
827
828# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0)
829# asm 1: mulq  0(<qp=int64#2)
830# asm 2: mulq  0(<qp=%rsi)
831mulq  0(%rsi)
832
833# qhasm:   carry? a2 += mulrax
834# asm 1: add  <mulrax=int64#7,<a2=int64#9
835# asm 2: add  <mulrax=%rax,<a2=%r11
836add  %rax,%r11
837
838# qhasm:   mulr21 += mulrdx + carry
839# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
840# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
841adc %rdx,%r12
842
843# qhasm:   mulrax = a2_stack
844# asm 1: movq <a2_stack=stack64#10,>mulrax=int64#7
845# asm 2: movq <a2_stack=72(%rsp),>mulrax=%rax
846movq 72(%rsp),%rax
847
848# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8)
849# asm 1: mulq  8(<qp=int64#2)
850# asm 2: mulq  8(<qp=%rsi)
851mulq  8(%rsi)
852
853# qhasm:   carry? a3 += mulrax
854# asm 1: add  <mulrax=int64#7,<a3=int64#11
855# asm 2: add  <mulrax=%rax,<a3=%r13
856add  %rax,%r13
857
858# qhasm:   mulr31 += mulrdx + carry
859# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
860# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
861adc %rdx,%r14
862
863# qhasm:   mulrax = a2_stack
864# asm 1: movq <a2_stack=stack64#10,>mulrax=int64#7
865# asm 2: movq <a2_stack=72(%rsp),>mulrax=%rax
866movq 72(%rsp),%rax
867
868# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16)
869# asm 1: mulq  16(<qp=int64#2)
870# asm 2: mulq  16(<qp=%rsi)
871mulq  16(%rsi)
872
873# qhasm:   carry? a4 += mulrax
874# asm 1: add  <mulrax=int64#7,<a4=int64#13
875# asm 2: add  <mulrax=%rax,<a4=%r15
876add  %rax,%r15
877
878# qhasm:   mulr41 += mulrdx + carry
879# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
880# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
881adc %rdx,%rbx
882
883# qhasm:   mulrax = a2_stack
884# asm 1: movq <a2_stack=stack64#10,>mulrax=int64#3
885# asm 2: movq <a2_stack=72(%rsp),>mulrax=%rdx
886movq 72(%rsp),%rdx
887
888# qhasm:   mulrax *= 19
889# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
890# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
891imulq  $19,%rdx,%rax
892
893# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24)
894# asm 1: mulq  24(<qp=int64#2)
895# asm 2: mulq  24(<qp=%rsi)
896mulq  24(%rsi)
897
898# qhasm:   carry? a0 += mulrax
899# asm 1: add  <mulrax=int64#7,<a0=int64#4
900# asm 2: add  <mulrax=%rax,<a0=%rcx
901add  %rax,%rcx
902
903# qhasm:   mulr01 += mulrdx + carry
904# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
905# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
906adc %rdx,%r8
907
908# qhasm:   mulrax = a2_stack
909# asm 1: movq <a2_stack=stack64#10,>mulrax=int64#3
910# asm 2: movq <a2_stack=72(%rsp),>mulrax=%rdx
911movq 72(%rsp),%rdx
912
913# qhasm:   mulrax *= 19
914# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
915# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
916imulq  $19,%rdx,%rax
917
918# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32)
919# asm 1: mulq  32(<qp=int64#2)
920# asm 2: mulq  32(<qp=%rsi)
921mulq  32(%rsi)
922
923# qhasm:   carry? a1 += mulrax
924# asm 1: add  <mulrax=int64#7,<a1=int64#6
925# asm 2: add  <mulrax=%rax,<a1=%r9
926add  %rax,%r9
927
928# qhasm:   mulr11 += mulrdx + carry
929# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
930# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
931adc %rdx,%r10
932
933# qhasm:   mulrax = a3_stack
934# asm 1: movq <a3_stack=stack64#11,>mulrax=int64#7
935# asm 2: movq <a3_stack=80(%rsp),>mulrax=%rax
936movq 80(%rsp),%rax
937
938# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0)
939# asm 1: mulq  0(<qp=int64#2)
940# asm 2: mulq  0(<qp=%rsi)
941mulq  0(%rsi)
942
943# qhasm:   carry? a3 += mulrax
944# asm 1: add  <mulrax=int64#7,<a3=int64#11
945# asm 2: add  <mulrax=%rax,<a3=%r13
946add  %rax,%r13
947
948# qhasm:   mulr31 += mulrdx + carry
949# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
950# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
951adc %rdx,%r14
952
953# qhasm:   mulrax = a3_stack
954# asm 1: movq <a3_stack=stack64#11,>mulrax=int64#7
955# asm 2: movq <a3_stack=80(%rsp),>mulrax=%rax
956movq 80(%rsp),%rax
957
958# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 8)
959# asm 1: mulq  8(<qp=int64#2)
960# asm 2: mulq  8(<qp=%rsi)
961mulq  8(%rsi)
962
963# qhasm:   carry? a4 += mulrax
964# asm 1: add  <mulrax=int64#7,<a4=int64#13
965# asm 2: add  <mulrax=%rax,<a4=%r15
966add  %rax,%r15
967
968# qhasm:   mulr41 += mulrdx + carry
969# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
970# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
971adc %rdx,%rbx
972
973# qhasm:   mulrax = mulx319_stack
974# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
975# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
976movq 136(%rsp),%rax
977
978# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24)
979# asm 1: mulq  24(<qp=int64#2)
980# asm 2: mulq  24(<qp=%rsi)
981mulq  24(%rsi)
982
983# qhasm:   carry? a1 += mulrax
984# asm 1: add  <mulrax=int64#7,<a1=int64#6
985# asm 2: add  <mulrax=%rax,<a1=%r9
986add  %rax,%r9
987
988# qhasm:   mulr11 += mulrdx + carry
989# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
990# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
991adc %rdx,%r10
992
993# qhasm:   mulrax = mulx319_stack
994# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
995# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
996movq 136(%rsp),%rax
997
998# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32)
999# asm 1: mulq  32(<qp=int64#2)
1000# asm 2: mulq  32(<qp=%rsi)
1001mulq  32(%rsi)
1002
1003# qhasm:   carry? a2 += mulrax
1004# asm 1: add  <mulrax=int64#7,<a2=int64#9
1005# asm 2: add  <mulrax=%rax,<a2=%r11
1006add  %rax,%r11
1007
1008# qhasm:   mulr21 += mulrdx + carry
1009# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1010# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1011adc %rdx,%r12
1012
1013# qhasm:   mulrax = a4_stack
1014# asm 1: movq <a4_stack=stack64#12,>mulrax=int64#7
1015# asm 2: movq <a4_stack=88(%rsp),>mulrax=%rax
1016movq 88(%rsp),%rax
1017
1018# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 0)
1019# asm 1: mulq  0(<qp=int64#2)
1020# asm 2: mulq  0(<qp=%rsi)
1021mulq  0(%rsi)
1022
1023# qhasm:   carry? a4 += mulrax
1024# asm 1: add  <mulrax=int64#7,<a4=int64#13
1025# asm 2: add  <mulrax=%rax,<a4=%r15
1026add  %rax,%r15
1027
1028# qhasm:   mulr41 += mulrdx + carry
1029# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1030# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1031adc %rdx,%rbx
1032
1033# qhasm:   mulrax = mulx419_stack
1034# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
1035# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
1036movq 144(%rsp),%rax
1037
1038# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 16)
1039# asm 1: mulq  16(<qp=int64#2)
1040# asm 2: mulq  16(<qp=%rsi)
1041mulq  16(%rsi)
1042
1043# qhasm:   carry? a1 += mulrax
1044# asm 1: add  <mulrax=int64#7,<a1=int64#6
1045# asm 2: add  <mulrax=%rax,<a1=%r9
1046add  %rax,%r9
1047
1048# qhasm:   mulr11 += mulrdx + carry
1049# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1050# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1051adc %rdx,%r10
1052
1053# qhasm:   mulrax = mulx419_stack
1054# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
1055# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
1056movq 144(%rsp),%rax
1057
1058# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 24)
1059# asm 1: mulq  24(<qp=int64#2)
1060# asm 2: mulq  24(<qp=%rsi)
1061mulq  24(%rsi)
1062
1063# qhasm:   carry? a2 += mulrax
1064# asm 1: add  <mulrax=int64#7,<a2=int64#9
1065# asm 2: add  <mulrax=%rax,<a2=%r11
1066add  %rax,%r11
1067
1068# qhasm:   mulr21 += mulrdx + carry
1069# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1070# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1071adc %rdx,%r12
1072
1073# qhasm:   mulrax = mulx419_stack
1074# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
1075# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
1076movq 144(%rsp),%rax
1077
1078# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 32)
1079# asm 1: mulq  32(<qp=int64#2)
1080# asm 2: mulq  32(<qp=%rsi)
1081mulq  32(%rsi)
1082
1083# qhasm:   carry? a3 += mulrax
1084# asm 1: add  <mulrax=int64#7,<a3=int64#11
1085# asm 2: add  <mulrax=%rax,<a3=%r13
1086add  %rax,%r13
1087
1088# qhasm:   mulr31 += mulrdx + carry
1089# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1090# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1091adc %rdx,%r14
1092
1093# qhasm:   mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
1094# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
1095# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
1096movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
1097
1098# qhasm:   mulr01 = (mulr01.a0) << 13
1099# asm 1: shld $13,<a0=int64#4,<mulr01=int64#5
1100# asm 2: shld $13,<a0=%rcx,<mulr01=%r8
1101shld $13,%rcx,%r8
1102
1103# qhasm:   a0 &= mulredmask
1104# asm 1: and  <mulredmask=int64#3,<a0=int64#4
1105# asm 2: and  <mulredmask=%rdx,<a0=%rcx
1106and  %rdx,%rcx
1107
1108# qhasm:   mulr11 = (mulr11.a1) << 13
1109# asm 1: shld $13,<a1=int64#6,<mulr11=int64#8
1110# asm 2: shld $13,<a1=%r9,<mulr11=%r10
1111shld $13,%r9,%r10
1112
1113# qhasm:   a1 &= mulredmask
1114# asm 1: and  <mulredmask=int64#3,<a1=int64#6
1115# asm 2: and  <mulredmask=%rdx,<a1=%r9
1116and  %rdx,%r9
1117
1118# qhasm:   a1 += mulr01
1119# asm 1: add  <mulr01=int64#5,<a1=int64#6
1120# asm 2: add  <mulr01=%r8,<a1=%r9
1121add  %r8,%r9
1122
1123# qhasm:   mulr21 = (mulr21.a2) << 13
1124# asm 1: shld $13,<a2=int64#9,<mulr21=int64#10
1125# asm 2: shld $13,<a2=%r11,<mulr21=%r12
1126shld $13,%r11,%r12
1127
1128# qhasm:   a2 &= mulredmask
1129# asm 1: and  <mulredmask=int64#3,<a2=int64#9
1130# asm 2: and  <mulredmask=%rdx,<a2=%r11
1131and  %rdx,%r11
1132
1133# qhasm:   a2 += mulr11
1134# asm 1: add  <mulr11=int64#8,<a2=int64#9
1135# asm 2: add  <mulr11=%r10,<a2=%r11
1136add  %r10,%r11
1137
1138# qhasm:   mulr31 = (mulr31.a3) << 13
1139# asm 1: shld $13,<a3=int64#11,<mulr31=int64#12
1140# asm 2: shld $13,<a3=%r13,<mulr31=%r14
1141shld $13,%r13,%r14
1142
1143# qhasm:   a3 &= mulredmask
1144# asm 1: and  <mulredmask=int64#3,<a3=int64#11
1145# asm 2: and  <mulredmask=%rdx,<a3=%r13
1146and  %rdx,%r13
1147
1148# qhasm:   a3 += mulr21
1149# asm 1: add  <mulr21=int64#10,<a3=int64#11
1150# asm 2: add  <mulr21=%r12,<a3=%r13
1151add  %r12,%r13
1152
1153# qhasm:   mulr41 = (mulr41.a4) << 13
1154# asm 1: shld $13,<a4=int64#13,<mulr41=int64#14
1155# asm 2: shld $13,<a4=%r15,<mulr41=%rbx
1156shld $13,%r15,%rbx
1157
1158# qhasm:   a4 &= mulredmask
1159# asm 1: and  <mulredmask=int64#3,<a4=int64#13
1160# asm 2: and  <mulredmask=%rdx,<a4=%r15
1161and  %rdx,%r15
1162
1163# qhasm:   a4 += mulr31
1164# asm 1: add  <mulr31=int64#12,<a4=int64#13
1165# asm 2: add  <mulr31=%r14,<a4=%r15
1166add  %r14,%r15
1167
1168# qhasm:   mulr41 = mulr41 * 19
1169# asm 1: imulq  $19,<mulr41=int64#14,>mulr41=int64#5
1170# asm 2: imulq  $19,<mulr41=%rbx,>mulr41=%r8
1171imulq  $19,%rbx,%r8
1172
1173# qhasm:   a0 += mulr41
1174# asm 1: add  <mulr41=int64#5,<a0=int64#4
1175# asm 2: add  <mulr41=%r8,<a0=%rcx
1176add  %r8,%rcx
1177
1178# qhasm:   mult = a0
1179# asm 1: mov  <a0=int64#4,>mult=int64#5
1180# asm 2: mov  <a0=%rcx,>mult=%r8
1181mov  %rcx,%r8
1182
1183# qhasm:   (uint64) mult >>= 51
1184# asm 1: shr  $51,<mult=int64#5
1185# asm 2: shr  $51,<mult=%r8
1186shr  $51,%r8
1187
1188# qhasm:   mult += a1
1189# asm 1: add  <a1=int64#6,<mult=int64#5
1190# asm 2: add  <a1=%r9,<mult=%r8
1191add  %r9,%r8
1192
1193# qhasm:   a1 = mult
1194# asm 1: mov  <mult=int64#5,>a1=int64#6
1195# asm 2: mov  <mult=%r8,>a1=%r9
1196mov  %r8,%r9
1197
1198# qhasm:   (uint64) mult >>= 51
1199# asm 1: shr  $51,<mult=int64#5
1200# asm 2: shr  $51,<mult=%r8
1201shr  $51,%r8
1202
1203# qhasm:   a0 &= mulredmask
1204# asm 1: and  <mulredmask=int64#3,<a0=int64#4
1205# asm 2: and  <mulredmask=%rdx,<a0=%rcx
1206and  %rdx,%rcx
1207
1208# qhasm:   mult += a2
1209# asm 1: add  <a2=int64#9,<mult=int64#5
1210# asm 2: add  <a2=%r11,<mult=%r8
1211add  %r11,%r8
1212
1213# qhasm:   a2 = mult
1214# asm 1: mov  <mult=int64#5,>a2=int64#7
1215# asm 2: mov  <mult=%r8,>a2=%rax
1216mov  %r8,%rax
1217
1218# qhasm:   (uint64) mult >>= 51
1219# asm 1: shr  $51,<mult=int64#5
1220# asm 2: shr  $51,<mult=%r8
1221shr  $51,%r8
1222
1223# qhasm:   a1 &= mulredmask
1224# asm 1: and  <mulredmask=int64#3,<a1=int64#6
1225# asm 2: and  <mulredmask=%rdx,<a1=%r9
1226and  %rdx,%r9
1227
1228# qhasm:   mult += a3
1229# asm 1: add  <a3=int64#11,<mult=int64#5
1230# asm 2: add  <a3=%r13,<mult=%r8
1231add  %r13,%r8
1232
1233# qhasm:   a3 = mult
1234# asm 1: mov  <mult=int64#5,>a3=int64#8
1235# asm 2: mov  <mult=%r8,>a3=%r10
1236mov  %r8,%r10
1237
1238# qhasm:   (uint64) mult >>= 51
1239# asm 1: shr  $51,<mult=int64#5
1240# asm 2: shr  $51,<mult=%r8
1241shr  $51,%r8
1242
1243# qhasm:   a2 &= mulredmask
1244# asm 1: and  <mulredmask=int64#3,<a2=int64#7
1245# asm 2: and  <mulredmask=%rdx,<a2=%rax
1246and  %rdx,%rax
1247
1248# qhasm:   mult += a4
1249# asm 1: add  <a4=int64#13,<mult=int64#5
1250# asm 2: add  <a4=%r15,<mult=%r8
1251add  %r15,%r8
1252
1253# qhasm:   a4 = mult
1254# asm 1: mov  <mult=int64#5,>a4=int64#9
1255# asm 2: mov  <mult=%r8,>a4=%r11
1256mov  %r8,%r11
1257
1258# qhasm:   (uint64) mult >>= 51
1259# asm 1: shr  $51,<mult=int64#5
1260# asm 2: shr  $51,<mult=%r8
1261shr  $51,%r8
1262
1263# qhasm:   a3 &= mulredmask
1264# asm 1: and  <mulredmask=int64#3,<a3=int64#8
1265# asm 2: and  <mulredmask=%rdx,<a3=%r10
1266and  %rdx,%r10
1267
1268# qhasm:   mult *= 19
1269# asm 1: imulq  $19,<mult=int64#5,>mult=int64#5
1270# asm 2: imulq  $19,<mult=%r8,>mult=%r8
1271imulq  $19,%r8,%r8
1272
1273# qhasm:   a0 += mult
1274# asm 1: add  <mult=int64#5,<a0=int64#4
1275# asm 2: add  <mult=%r8,<a0=%rcx
1276add  %r8,%rcx
1277
1278# qhasm:   a4 &= mulredmask
1279# asm 1: and  <mulredmask=int64#3,<a4=int64#9
1280# asm 2: and  <mulredmask=%rdx,<a4=%r11
1281and  %rdx,%r11
1282
1283# qhasm: a0_stack = a0
1284# asm 1: movq <a0=int64#4,>a0_stack=stack64#8
1285# asm 2: movq <a0=%rcx,>a0_stack=56(%rsp)
1286movq %rcx,56(%rsp)
1287
1288# qhasm: a1_stack = a1
1289# asm 1: movq <a1=int64#6,>a1_stack=stack64#9
1290# asm 2: movq <a1=%r9,>a1_stack=64(%rsp)
1291movq %r9,64(%rsp)
1292
1293# qhasm: a2_stack = a2
1294# asm 1: movq <a2=int64#7,>a2_stack=stack64#10
1295# asm 2: movq <a2=%rax,>a2_stack=72(%rsp)
1296movq %rax,72(%rsp)
1297
1298# qhasm: a3_stack = a3
1299# asm 1: movq <a3=int64#8,>a3_stack=stack64#11
1300# asm 2: movq <a3=%r10,>a3_stack=80(%rsp)
1301movq %r10,80(%rsp)
1302
1303# qhasm: a4_stack = a4
1304# asm 1: movq <a4=int64#9,>a4_stack=stack64#12
1305# asm 2: movq <a4=%r11,>a4_stack=88(%rsp)
1306movq %r11,88(%rsp)
1307
1308# qhasm:   mulrax = b3_stack
1309# asm 1: movq <b3_stack=stack64#16,>mulrax=int64#3
1310# asm 2: movq <b3_stack=120(%rsp),>mulrax=%rdx
1311movq 120(%rsp),%rdx
1312
1313# qhasm:   mulrax *= 19
1314# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
1315# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
1316imulq  $19,%rdx,%rax
1317
1318# qhasm:   mulx319_stack = mulrax
1319# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#18
1320# asm 2: movq <mulrax=%rax,>mulx319_stack=136(%rsp)
1321movq %rax,136(%rsp)
1322
1323# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56)
1324# asm 1: mulq  56(<qp=int64#2)
1325# asm 2: mulq  56(<qp=%rsi)
1326mulq  56(%rsi)
1327
1328# qhasm:   e0 = mulrax
1329# asm 1: mov  <mulrax=int64#7,>e0=int64#4
1330# asm 2: mov  <mulrax=%rax,>e0=%rcx
1331mov  %rax,%rcx
1332
1333# qhasm:   mulr01 = mulrdx
1334# asm 1: mov  <mulrdx=int64#3,>mulr01=int64#5
1335# asm 2: mov  <mulrdx=%rdx,>mulr01=%r8
1336mov  %rdx,%r8
1337
1338# qhasm:   mulrax = b4_stack
1339# asm 1: movq <b4_stack=stack64#17,>mulrax=int64#3
1340# asm 2: movq <b4_stack=128(%rsp),>mulrax=%rdx
1341movq 128(%rsp),%rdx
1342
1343# qhasm:   mulrax *= 19
1344# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
1345# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
1346imulq  $19,%rdx,%rax
1347
1348# qhasm:   mulx419_stack = mulrax
1349# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#19
1350# asm 2: movq <mulrax=%rax,>mulx419_stack=144(%rsp)
1351movq %rax,144(%rsp)
1352
1353# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48)
1354# asm 1: mulq  48(<qp=int64#2)
1355# asm 2: mulq  48(<qp=%rsi)
1356mulq  48(%rsi)
1357
1358# qhasm:   carry? e0 += mulrax
1359# asm 1: add  <mulrax=int64#7,<e0=int64#4
1360# asm 2: add  <mulrax=%rax,<e0=%rcx
1361add  %rax,%rcx
1362
1363# qhasm:   mulr01 += mulrdx + carry
1364# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1365# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1366adc %rdx,%r8
1367
1368# qhasm:   mulrax = b0_stack
1369# asm 1: movq <b0_stack=stack64#13,>mulrax=int64#7
1370# asm 2: movq <b0_stack=96(%rsp),>mulrax=%rax
1371movq 96(%rsp),%rax
1372
1373# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40)
1374# asm 1: mulq  40(<qp=int64#2)
1375# asm 2: mulq  40(<qp=%rsi)
1376mulq  40(%rsi)
1377
1378# qhasm:   carry? e0 += mulrax
1379# asm 1: add  <mulrax=int64#7,<e0=int64#4
1380# asm 2: add  <mulrax=%rax,<e0=%rcx
1381add  %rax,%rcx
1382
1383# qhasm:   mulr01 += mulrdx + carry
1384# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1385# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1386adc %rdx,%r8
1387
1388# qhasm:   mulrax = b0_stack
1389# asm 1: movq <b0_stack=stack64#13,>mulrax=int64#7
1390# asm 2: movq <b0_stack=96(%rsp),>mulrax=%rax
1391movq 96(%rsp),%rax
1392
1393# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48)
1394# asm 1: mulq  48(<qp=int64#2)
1395# asm 2: mulq  48(<qp=%rsi)
1396mulq  48(%rsi)
1397
1398# qhasm:   e1 = mulrax
1399# asm 1: mov  <mulrax=int64#7,>e1=int64#6
1400# asm 2: mov  <mulrax=%rax,>e1=%r9
1401mov  %rax,%r9
1402
1403# qhasm:   mulr11 = mulrdx
1404# asm 1: mov  <mulrdx=int64#3,>mulr11=int64#8
1405# asm 2: mov  <mulrdx=%rdx,>mulr11=%r10
1406mov  %rdx,%r10
1407
1408# qhasm:   mulrax = b0_stack
1409# asm 1: movq <b0_stack=stack64#13,>mulrax=int64#7
1410# asm 2: movq <b0_stack=96(%rsp),>mulrax=%rax
1411movq 96(%rsp),%rax
1412
1413# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56)
1414# asm 1: mulq  56(<qp=int64#2)
1415# asm 2: mulq  56(<qp=%rsi)
1416mulq  56(%rsi)
1417
1418# qhasm:   e2 = mulrax
1419# asm 1: mov  <mulrax=int64#7,>e2=int64#9
1420# asm 2: mov  <mulrax=%rax,>e2=%r11
1421mov  %rax,%r11
1422
1423# qhasm:   mulr21 = mulrdx
1424# asm 1: mov  <mulrdx=int64#3,>mulr21=int64#10
1425# asm 2: mov  <mulrdx=%rdx,>mulr21=%r12
1426mov  %rdx,%r12
1427
1428# qhasm:   mulrax = b0_stack
1429# asm 1: movq <b0_stack=stack64#13,>mulrax=int64#7
1430# asm 2: movq <b0_stack=96(%rsp),>mulrax=%rax
1431movq 96(%rsp),%rax
1432
1433# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64)
1434# asm 1: mulq  64(<qp=int64#2)
1435# asm 2: mulq  64(<qp=%rsi)
1436mulq  64(%rsi)
1437
1438# qhasm:   e3 = mulrax
1439# asm 1: mov  <mulrax=int64#7,>e3=int64#11
1440# asm 2: mov  <mulrax=%rax,>e3=%r13
1441mov  %rax,%r13
1442
1443# qhasm:   mulr31 = mulrdx
1444# asm 1: mov  <mulrdx=int64#3,>mulr31=int64#12
1445# asm 2: mov  <mulrdx=%rdx,>mulr31=%r14
1446mov  %rdx,%r14
1447
1448# qhasm:   mulrax = b0_stack
1449# asm 1: movq <b0_stack=stack64#13,>mulrax=int64#7
1450# asm 2: movq <b0_stack=96(%rsp),>mulrax=%rax
1451movq 96(%rsp),%rax
1452
1453# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72)
1454# asm 1: mulq  72(<qp=int64#2)
1455# asm 2: mulq  72(<qp=%rsi)
1456mulq  72(%rsi)
1457
1458# qhasm:   e4 = mulrax
1459# asm 1: mov  <mulrax=int64#7,>e4=int64#13
1460# asm 2: mov  <mulrax=%rax,>e4=%r15
1461mov  %rax,%r15
1462
1463# qhasm:   mulr41 = mulrdx
1464# asm 1: mov  <mulrdx=int64#3,>mulr41=int64#14
1465# asm 2: mov  <mulrdx=%rdx,>mulr41=%rbx
1466mov  %rdx,%rbx
1467
1468# qhasm:   mulrax = b1_stack
1469# asm 1: movq <b1_stack=stack64#14,>mulrax=int64#7
1470# asm 2: movq <b1_stack=104(%rsp),>mulrax=%rax
1471movq 104(%rsp),%rax
1472
1473# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40)
1474# asm 1: mulq  40(<qp=int64#2)
1475# asm 2: mulq  40(<qp=%rsi)
1476mulq  40(%rsi)
1477
1478# qhasm:   carry? e1 += mulrax
1479# asm 1: add  <mulrax=int64#7,<e1=int64#6
1480# asm 2: add  <mulrax=%rax,<e1=%r9
1481add  %rax,%r9
1482
1483# qhasm:   mulr11 += mulrdx + carry
1484# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1485# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1486adc %rdx,%r10
1487
1488# qhasm:   mulrax = b1_stack
1489# asm 1: movq <b1_stack=stack64#14,>mulrax=int64#7
1490# asm 2: movq <b1_stack=104(%rsp),>mulrax=%rax
1491movq 104(%rsp),%rax
1492
1493# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48)
1494# asm 1: mulq  48(<qp=int64#2)
1495# asm 2: mulq  48(<qp=%rsi)
1496mulq  48(%rsi)
1497
1498# qhasm:   carry? e2 += mulrax
1499# asm 1: add  <mulrax=int64#7,<e2=int64#9
1500# asm 2: add  <mulrax=%rax,<e2=%r11
1501add  %rax,%r11
1502
1503# qhasm:   mulr21 += mulrdx + carry
1504# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1505# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1506adc %rdx,%r12
1507
1508# qhasm:   mulrax = b1_stack
1509# asm 1: movq <b1_stack=stack64#14,>mulrax=int64#7
1510# asm 2: movq <b1_stack=104(%rsp),>mulrax=%rax
1511movq 104(%rsp),%rax
1512
1513# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56)
1514# asm 1: mulq  56(<qp=int64#2)
1515# asm 2: mulq  56(<qp=%rsi)
1516mulq  56(%rsi)
1517
1518# qhasm:   carry? e3 += mulrax
1519# asm 1: add  <mulrax=int64#7,<e3=int64#11
1520# asm 2: add  <mulrax=%rax,<e3=%r13
1521add  %rax,%r13
1522
1523# qhasm:   mulr31 += mulrdx + carry
1524# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1525# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1526adc %rdx,%r14
1527
1528# qhasm:   mulrax = b1_stack
1529# asm 1: movq <b1_stack=stack64#14,>mulrax=int64#7
1530# asm 2: movq <b1_stack=104(%rsp),>mulrax=%rax
1531movq 104(%rsp),%rax
1532
1533# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64)
1534# asm 1: mulq  64(<qp=int64#2)
1535# asm 2: mulq  64(<qp=%rsi)
1536mulq  64(%rsi)
1537
1538# qhasm:   carry? e4 += mulrax
1539# asm 1: add  <mulrax=int64#7,<e4=int64#13
1540# asm 2: add  <mulrax=%rax,<e4=%r15
1541add  %rax,%r15
1542
1543# qhasm:   mulr41 += mulrdx + carry
1544# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1545# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1546adc %rdx,%rbx
1547
1548# qhasm:   mulrax = b1_stack
1549# asm 1: movq <b1_stack=stack64#14,>mulrax=int64#3
1550# asm 2: movq <b1_stack=104(%rsp),>mulrax=%rdx
1551movq 104(%rsp),%rdx
1552
1553# qhasm:   mulrax *= 19
1554# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
1555# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
1556imulq  $19,%rdx,%rax
1557
1558# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72)
1559# asm 1: mulq  72(<qp=int64#2)
1560# asm 2: mulq  72(<qp=%rsi)
1561mulq  72(%rsi)
1562
1563# qhasm:   carry? e0 += mulrax
1564# asm 1: add  <mulrax=int64#7,<e0=int64#4
1565# asm 2: add  <mulrax=%rax,<e0=%rcx
1566add  %rax,%rcx
1567
1568# qhasm:   mulr01 += mulrdx + carry
1569# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1570# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1571adc %rdx,%r8
1572
1573# qhasm:   mulrax = b2_stack
1574# asm 1: movq <b2_stack=stack64#15,>mulrax=int64#7
1575# asm 2: movq <b2_stack=112(%rsp),>mulrax=%rax
1576movq 112(%rsp),%rax
1577
1578# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40)
1579# asm 1: mulq  40(<qp=int64#2)
1580# asm 2: mulq  40(<qp=%rsi)
1581mulq  40(%rsi)
1582
1583# qhasm:   carry? e2 += mulrax
1584# asm 1: add  <mulrax=int64#7,<e2=int64#9
1585# asm 2: add  <mulrax=%rax,<e2=%r11
1586add  %rax,%r11
1587
1588# qhasm:   mulr21 += mulrdx + carry
1589# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1590# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1591adc %rdx,%r12
1592
1593# qhasm:   mulrax = b2_stack
1594# asm 1: movq <b2_stack=stack64#15,>mulrax=int64#7
1595# asm 2: movq <b2_stack=112(%rsp),>mulrax=%rax
1596movq 112(%rsp),%rax
1597
1598# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48)
1599# asm 1: mulq  48(<qp=int64#2)
1600# asm 2: mulq  48(<qp=%rsi)
1601mulq  48(%rsi)
1602
1603# qhasm:   carry? e3 += mulrax
1604# asm 1: add  <mulrax=int64#7,<e3=int64#11
1605# asm 2: add  <mulrax=%rax,<e3=%r13
1606add  %rax,%r13
1607
1608# qhasm:   mulr31 += mulrdx + carry
1609# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1610# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1611adc %rdx,%r14
1612
1613# qhasm:   mulrax = b2_stack
1614# asm 1: movq <b2_stack=stack64#15,>mulrax=int64#7
1615# asm 2: movq <b2_stack=112(%rsp),>mulrax=%rax
1616movq 112(%rsp),%rax
1617
1618# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56)
1619# asm 1: mulq  56(<qp=int64#2)
1620# asm 2: mulq  56(<qp=%rsi)
1621mulq  56(%rsi)
1622
1623# qhasm:   carry? e4 += mulrax
1624# asm 1: add  <mulrax=int64#7,<e4=int64#13
1625# asm 2: add  <mulrax=%rax,<e4=%r15
1626add  %rax,%r15
1627
1628# qhasm:   mulr41 += mulrdx + carry
1629# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1630# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1631adc %rdx,%rbx
1632
1633# qhasm:   mulrax = b2_stack
1634# asm 1: movq <b2_stack=stack64#15,>mulrax=int64#3
1635# asm 2: movq <b2_stack=112(%rsp),>mulrax=%rdx
1636movq 112(%rsp),%rdx
1637
1638# qhasm:   mulrax *= 19
1639# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
1640# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
1641imulq  $19,%rdx,%rax
1642
1643# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64)
1644# asm 1: mulq  64(<qp=int64#2)
1645# asm 2: mulq  64(<qp=%rsi)
1646mulq  64(%rsi)
1647
1648# qhasm:   carry? e0 += mulrax
1649# asm 1: add  <mulrax=int64#7,<e0=int64#4
1650# asm 2: add  <mulrax=%rax,<e0=%rcx
1651add  %rax,%rcx
1652
1653# qhasm:   mulr01 += mulrdx + carry
1654# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
1655# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
1656adc %rdx,%r8
1657
1658# qhasm:   mulrax = b2_stack
1659# asm 1: movq <b2_stack=stack64#15,>mulrax=int64#3
1660# asm 2: movq <b2_stack=112(%rsp),>mulrax=%rdx
1661movq 112(%rsp),%rdx
1662
1663# qhasm:   mulrax *= 19
1664# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
1665# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
1666imulq  $19,%rdx,%rax
1667
1668# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72)
1669# asm 1: mulq  72(<qp=int64#2)
1670# asm 2: mulq  72(<qp=%rsi)
1671mulq  72(%rsi)
1672
1673# qhasm:   carry? e1 += mulrax
1674# asm 1: add  <mulrax=int64#7,<e1=int64#6
1675# asm 2: add  <mulrax=%rax,<e1=%r9
1676add  %rax,%r9
1677
1678# qhasm:   mulr11 += mulrdx + carry
1679# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1680# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1681adc %rdx,%r10
1682
1683# qhasm:   mulrax = b3_stack
1684# asm 1: movq <b3_stack=stack64#16,>mulrax=int64#7
1685# asm 2: movq <b3_stack=120(%rsp),>mulrax=%rax
1686movq 120(%rsp),%rax
1687
1688# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40)
1689# asm 1: mulq  40(<qp=int64#2)
1690# asm 2: mulq  40(<qp=%rsi)
1691mulq  40(%rsi)
1692
1693# qhasm:   carry? e3 += mulrax
1694# asm 1: add  <mulrax=int64#7,<e3=int64#11
1695# asm 2: add  <mulrax=%rax,<e3=%r13
1696add  %rax,%r13
1697
1698# qhasm:   mulr31 += mulrdx + carry
1699# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1700# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1701adc %rdx,%r14
1702
1703# qhasm:   mulrax = b3_stack
1704# asm 1: movq <b3_stack=stack64#16,>mulrax=int64#7
1705# asm 2: movq <b3_stack=120(%rsp),>mulrax=%rax
1706movq 120(%rsp),%rax
1707
1708# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 48)
1709# asm 1: mulq  48(<qp=int64#2)
1710# asm 2: mulq  48(<qp=%rsi)
1711mulq  48(%rsi)
1712
1713# qhasm:   carry? e4 += mulrax
1714# asm 1: add  <mulrax=int64#7,<e4=int64#13
1715# asm 2: add  <mulrax=%rax,<e4=%r15
1716add  %rax,%r15
1717
1718# qhasm:   mulr41 += mulrdx + carry
1719# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1720# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1721adc %rdx,%rbx
1722
1723# qhasm:   mulrax = mulx319_stack
1724# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
1725# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
1726movq 136(%rsp),%rax
1727
1728# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64)
1729# asm 1: mulq  64(<qp=int64#2)
1730# asm 2: mulq  64(<qp=%rsi)
1731mulq  64(%rsi)
1732
1733# qhasm:   carry? e1 += mulrax
1734# asm 1: add  <mulrax=int64#7,<e1=int64#6
1735# asm 2: add  <mulrax=%rax,<e1=%r9
1736add  %rax,%r9
1737
1738# qhasm:   mulr11 += mulrdx + carry
1739# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1740# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1741adc %rdx,%r10
1742
1743# qhasm:   mulrax = mulx319_stack
1744# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
1745# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
1746movq 136(%rsp),%rax
1747
1748# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72)
1749# asm 1: mulq  72(<qp=int64#2)
1750# asm 2: mulq  72(<qp=%rsi)
1751mulq  72(%rsi)
1752
1753# qhasm:   carry? e2 += mulrax
1754# asm 1: add  <mulrax=int64#7,<e2=int64#9
1755# asm 2: add  <mulrax=%rax,<e2=%r11
1756add  %rax,%r11
1757
1758# qhasm:   mulr21 += mulrdx + carry
1759# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1760# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1761adc %rdx,%r12
1762
1763# qhasm:   mulrax = b4_stack
1764# asm 1: movq <b4_stack=stack64#17,>mulrax=int64#7
1765# asm 2: movq <b4_stack=128(%rsp),>mulrax=%rax
1766movq 128(%rsp),%rax
1767
1768# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 40)
1769# asm 1: mulq  40(<qp=int64#2)
1770# asm 2: mulq  40(<qp=%rsi)
1771mulq  40(%rsi)
1772
1773# qhasm:   carry? e4 += mulrax
1774# asm 1: add  <mulrax=int64#7,<e4=int64#13
1775# asm 2: add  <mulrax=%rax,<e4=%r15
1776add  %rax,%r15
1777
1778# qhasm:   mulr41 += mulrdx + carry
1779# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
1780# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
1781adc %rdx,%rbx
1782
1783# qhasm:   mulrax = mulx419_stack
1784# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
1785# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
1786movq 144(%rsp),%rax
1787
1788# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 56)
1789# asm 1: mulq  56(<qp=int64#2)
1790# asm 2: mulq  56(<qp=%rsi)
1791mulq  56(%rsi)
1792
1793# qhasm:   carry? e1 += mulrax
1794# asm 1: add  <mulrax=int64#7,<e1=int64#6
1795# asm 2: add  <mulrax=%rax,<e1=%r9
1796add  %rax,%r9
1797
1798# qhasm:   mulr11 += mulrdx + carry
1799# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
1800# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
1801adc %rdx,%r10
1802
1803# qhasm:   mulrax = mulx419_stack
1804# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
1805# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
1806movq 144(%rsp),%rax
1807
1808# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 64)
1809# asm 1: mulq  64(<qp=int64#2)
1810# asm 2: mulq  64(<qp=%rsi)
1811mulq  64(%rsi)
1812
1813# qhasm:   carry? e2 += mulrax
1814# asm 1: add  <mulrax=int64#7,<e2=int64#9
1815# asm 2: add  <mulrax=%rax,<e2=%r11
1816add  %rax,%r11
1817
1818# qhasm:   mulr21 += mulrdx + carry
1819# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
1820# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
1821adc %rdx,%r12
1822
1823# qhasm:   mulrax = mulx419_stack
1824# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
1825# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
1826movq 144(%rsp),%rax
1827
1828# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 72)
1829# asm 1: mulq  72(<qp=int64#2)
1830# asm 2: mulq  72(<qp=%rsi)
1831mulq  72(%rsi)
1832
1833# qhasm:   carry? e3 += mulrax
1834# asm 1: add  <mulrax=int64#7,<e3=int64#11
1835# asm 2: add  <mulrax=%rax,<e3=%r13
1836add  %rax,%r13
1837
1838# qhasm:   mulr31 += mulrdx + carry
1839# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
1840# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
1841adc %rdx,%r14
1842
1843# qhasm:   mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
1844# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
1845# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
1846movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
1847
1848# qhasm:   mulr01 = (mulr01.e0) << 13
1849# asm 1: shld $13,<e0=int64#4,<mulr01=int64#5
1850# asm 2: shld $13,<e0=%rcx,<mulr01=%r8
1851shld $13,%rcx,%r8
1852
1853# qhasm:   e0 &= mulredmask
1854# asm 1: and  <mulredmask=int64#3,<e0=int64#4
1855# asm 2: and  <mulredmask=%rdx,<e0=%rcx
1856and  %rdx,%rcx
1857
1858# qhasm:   mulr11 = (mulr11.e1) << 13
1859# asm 1: shld $13,<e1=int64#6,<mulr11=int64#8
1860# asm 2: shld $13,<e1=%r9,<mulr11=%r10
1861shld $13,%r9,%r10
1862
1863# qhasm:   e1 &= mulredmask
1864# asm 1: and  <mulredmask=int64#3,<e1=int64#6
1865# asm 2: and  <mulredmask=%rdx,<e1=%r9
1866and  %rdx,%r9
1867
1868# qhasm:   e1 += mulr01
1869# asm 1: add  <mulr01=int64#5,<e1=int64#6
1870# asm 2: add  <mulr01=%r8,<e1=%r9
1871add  %r8,%r9
1872
1873# qhasm:   mulr21 = (mulr21.e2) << 13
1874# asm 1: shld $13,<e2=int64#9,<mulr21=int64#10
1875# asm 2: shld $13,<e2=%r11,<mulr21=%r12
1876shld $13,%r11,%r12
1877
1878# qhasm:   e2 &= mulredmask
1879# asm 1: and  <mulredmask=int64#3,<e2=int64#9
1880# asm 2: and  <mulredmask=%rdx,<e2=%r11
1881and  %rdx,%r11
1882
1883# qhasm:   e2 += mulr11
1884# asm 1: add  <mulr11=int64#8,<e2=int64#9
1885# asm 2: add  <mulr11=%r10,<e2=%r11
1886add  %r10,%r11
1887
1888# qhasm:   mulr31 = (mulr31.e3) << 13
1889# asm 1: shld $13,<e3=int64#11,<mulr31=int64#12
1890# asm 2: shld $13,<e3=%r13,<mulr31=%r14
1891shld $13,%r13,%r14
1892
1893# qhasm:   e3 &= mulredmask
1894# asm 1: and  <mulredmask=int64#3,<e3=int64#11
1895# asm 2: and  <mulredmask=%rdx,<e3=%r13
1896and  %rdx,%r13
1897
1898# qhasm:   e3 += mulr21
1899# asm 1: add  <mulr21=int64#10,<e3=int64#11
1900# asm 2: add  <mulr21=%r12,<e3=%r13
1901add  %r12,%r13
1902
1903# qhasm:   mulr41 = (mulr41.e4) << 13
1904# asm 1: shld $13,<e4=int64#13,<mulr41=int64#14
1905# asm 2: shld $13,<e4=%r15,<mulr41=%rbx
1906shld $13,%r15,%rbx
1907
1908# qhasm:   e4 &= mulredmask
1909# asm 1: and  <mulredmask=int64#3,<e4=int64#13
1910# asm 2: and  <mulredmask=%rdx,<e4=%r15
1911and  %rdx,%r15
1912
1913# qhasm:   e4 += mulr31
1914# asm 1: add  <mulr31=int64#12,<e4=int64#13
1915# asm 2: add  <mulr31=%r14,<e4=%r15
1916add  %r14,%r15
1917
1918# qhasm:   mulr41 = mulr41 * 19
1919# asm 1: imulq  $19,<mulr41=int64#14,>mulr41=int64#5
1920# asm 2: imulq  $19,<mulr41=%rbx,>mulr41=%r8
1921imulq  $19,%rbx,%r8
1922
1923# qhasm:   e0 += mulr41
1924# asm 1: add  <mulr41=int64#5,<e0=int64#4
1925# asm 2: add  <mulr41=%r8,<e0=%rcx
1926add  %r8,%rcx
1927
1928# qhasm:   mult = e0
1929# asm 1: mov  <e0=int64#4,>mult=int64#5
1930# asm 2: mov  <e0=%rcx,>mult=%r8
1931mov  %rcx,%r8
1932
1933# qhasm:   (uint64) mult >>= 51
1934# asm 1: shr  $51,<mult=int64#5
1935# asm 2: shr  $51,<mult=%r8
1936shr  $51,%r8
1937
1938# qhasm:   mult += e1
1939# asm 1: add  <e1=int64#6,<mult=int64#5
1940# asm 2: add  <e1=%r9,<mult=%r8
1941add  %r9,%r8
1942
1943# qhasm:   e1 = mult
1944# asm 1: mov  <mult=int64#5,>e1=int64#6
1945# asm 2: mov  <mult=%r8,>e1=%r9
1946mov  %r8,%r9
1947
1948# qhasm:   (uint64) mult >>= 51
1949# asm 1: shr  $51,<mult=int64#5
1950# asm 2: shr  $51,<mult=%r8
1951shr  $51,%r8
1952
1953# qhasm:   e0 &= mulredmask
1954# asm 1: and  <mulredmask=int64#3,<e0=int64#4
1955# asm 2: and  <mulredmask=%rdx,<e0=%rcx
1956and  %rdx,%rcx
1957
1958# qhasm:   mult += e2
1959# asm 1: add  <e2=int64#9,<mult=int64#5
1960# asm 2: add  <e2=%r11,<mult=%r8
1961add  %r11,%r8
1962
1963# qhasm:   e2 = mult
1964# asm 1: mov  <mult=int64#5,>e2=int64#7
1965# asm 2: mov  <mult=%r8,>e2=%rax
1966mov  %r8,%rax
1967
1968# qhasm:   (uint64) mult >>= 51
1969# asm 1: shr  $51,<mult=int64#5
1970# asm 2: shr  $51,<mult=%r8
1971shr  $51,%r8
1972
1973# qhasm:   e1 &= mulredmask
1974# asm 1: and  <mulredmask=int64#3,<e1=int64#6
1975# asm 2: and  <mulredmask=%rdx,<e1=%r9
1976and  %rdx,%r9
1977
1978# qhasm:   mult += e3
1979# asm 1: add  <e3=int64#11,<mult=int64#5
1980# asm 2: add  <e3=%r13,<mult=%r8
1981add  %r13,%r8
1982
1983# qhasm:   e3 = mult
1984# asm 1: mov  <mult=int64#5,>e3=int64#8
1985# asm 2: mov  <mult=%r8,>e3=%r10
1986mov  %r8,%r10
1987
1988# qhasm:   (uint64) mult >>= 51
1989# asm 1: shr  $51,<mult=int64#5
1990# asm 2: shr  $51,<mult=%r8
1991shr  $51,%r8
1992
1993# qhasm:   e2 &= mulredmask
1994# asm 1: and  <mulredmask=int64#3,<e2=int64#7
1995# asm 2: and  <mulredmask=%rdx,<e2=%rax
1996and  %rdx,%rax
1997
1998# qhasm:   mult += e4
1999# asm 1: add  <e4=int64#13,<mult=int64#5
2000# asm 2: add  <e4=%r15,<mult=%r8
2001add  %r15,%r8
2002
2003# qhasm:   e4 = mult
2004# asm 1: mov  <mult=int64#5,>e4=int64#9
2005# asm 2: mov  <mult=%r8,>e4=%r11
2006mov  %r8,%r11
2007
2008# qhasm:   (uint64) mult >>= 51
2009# asm 1: shr  $51,<mult=int64#5
2010# asm 2: shr  $51,<mult=%r8
2011shr  $51,%r8
2012
2013# qhasm:   e3 &= mulredmask
2014# asm 1: and  <mulredmask=int64#3,<e3=int64#8
2015# asm 2: and  <mulredmask=%rdx,<e3=%r10
2016and  %rdx,%r10
2017
2018# qhasm:   mult *= 19
2019# asm 1: imulq  $19,<mult=int64#5,>mult=int64#5
2020# asm 2: imulq  $19,<mult=%r8,>mult=%r8
2021imulq  $19,%r8,%r8
2022
2023# qhasm:   e0 += mult
2024# asm 1: add  <mult=int64#5,<e0=int64#4
2025# asm 2: add  <mult=%r8,<e0=%rcx
2026add  %r8,%rcx
2027
2028# qhasm:   e4 &= mulredmask
2029# asm 1: and  <mulredmask=int64#3,<e4=int64#9
2030# asm 2: and  <mulredmask=%rdx,<e4=%r11
2031and  %rdx,%r11
2032
2033# qhasm: h0 = e0
2034# asm 1: mov  <e0=int64#4,>h0=int64#3
2035# asm 2: mov  <e0=%rcx,>h0=%rdx
2036mov  %rcx,%rdx
2037
2038# qhasm: h1 = e1
2039# asm 1: mov  <e1=int64#6,>h1=int64#5
2040# asm 2: mov  <e1=%r9,>h1=%r8
2041mov  %r9,%r8
2042
2043# qhasm: h2 = e2
2044# asm 1: mov  <e2=int64#7,>h2=int64#10
2045# asm 2: mov  <e2=%rax,>h2=%r12
2046mov  %rax,%r12
2047
2048# qhasm: h3 = e3
2049# asm 1: mov  <e3=int64#8,>h3=int64#11
2050# asm 2: mov  <e3=%r10,>h3=%r13
2051mov  %r10,%r13
2052
2053# qhasm: h4 = e4
2054# asm 1: mov  <e4=int64#9,>h4=int64#12
2055# asm 2: mov  <e4=%r11,>h4=%r14
2056mov  %r11,%r14
2057
2058# qhasm: e0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
2059# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=int64#4
2060# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,<e0=%rcx
2061add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rcx
2062
2063# qhasm: e1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
2064# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=int64#6
2065# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e1=%r9
2066add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r9
2067
2068# qhasm: e2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
2069# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=int64#7
2070# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e2=%rax
2071add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rax
2072
2073# qhasm: e3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
2074# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=int64#8
2075# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e3=%r10
2076add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r10
2077
2078# qhasm: e4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
2079# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=int64#9
2080# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<e4=%r11
2081add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
2082
2083# qhasm: h0 += a0_stack
2084# asm 1: addq <a0_stack=stack64#8,<h0=int64#3
2085# asm 2: addq <a0_stack=56(%rsp),<h0=%rdx
2086addq 56(%rsp),%rdx
2087
2088# qhasm: h1 += a1_stack
2089# asm 1: addq <a1_stack=stack64#9,<h1=int64#5
2090# asm 2: addq <a1_stack=64(%rsp),<h1=%r8
2091addq 64(%rsp),%r8
2092
2093# qhasm: h2 += a2_stack
2094# asm 1: addq <a2_stack=stack64#10,<h2=int64#10
2095# asm 2: addq <a2_stack=72(%rsp),<h2=%r12
2096addq 72(%rsp),%r12
2097
2098# qhasm: h3 += a3_stack
2099# asm 1: addq <a3_stack=stack64#11,<h3=int64#11
2100# asm 2: addq <a3_stack=80(%rsp),<h3=%r13
2101addq 80(%rsp),%r13
2102
2103# qhasm: h4 += a4_stack
2104# asm 1: addq <a4_stack=stack64#12,<h4=int64#12
2105# asm 2: addq <a4_stack=88(%rsp),<h4=%r14
2106addq 88(%rsp),%r14
2107
2108# qhasm: e0 -= a0_stack
2109# asm 1: subq <a0_stack=stack64#8,<e0=int64#4
2110# asm 2: subq <a0_stack=56(%rsp),<e0=%rcx
2111subq 56(%rsp),%rcx
2112
2113# qhasm: e1 -= a1_stack
2114# asm 1: subq <a1_stack=stack64#9,<e1=int64#6
2115# asm 2: subq <a1_stack=64(%rsp),<e1=%r9
2116subq 64(%rsp),%r9
2117
2118# qhasm: e2 -= a2_stack
2119# asm 1: subq <a2_stack=stack64#10,<e2=int64#7
2120# asm 2: subq <a2_stack=72(%rsp),<e2=%rax
2121subq 72(%rsp),%rax
2122
2123# qhasm: e3 -= a3_stack
2124# asm 1: subq <a3_stack=stack64#11,<e3=int64#8
2125# asm 2: subq <a3_stack=80(%rsp),<e3=%r10
2126subq 80(%rsp),%r10
2127
2128# qhasm: e4 -= a4_stack
2129# asm 1: subq <a4_stack=stack64#12,<e4=int64#9
2130# asm 2: subq <a4_stack=88(%rsp),<e4=%r11
2131subq 88(%rsp),%r11
2132
2133# qhasm: h0_stack = h0
2134# asm 1: movq <h0=int64#3,>h0_stack=stack64#8
2135# asm 2: movq <h0=%rdx,>h0_stack=56(%rsp)
2136movq %rdx,56(%rsp)
2137
2138# qhasm: h1_stack = h1
2139# asm 1: movq <h1=int64#5,>h1_stack=stack64#9
2140# asm 2: movq <h1=%r8,>h1_stack=64(%rsp)
2141movq %r8,64(%rsp)
2142
2143# qhasm: h2_stack = h2
2144# asm 1: movq <h2=int64#10,>h2_stack=stack64#10
2145# asm 2: movq <h2=%r12,>h2_stack=72(%rsp)
2146movq %r12,72(%rsp)
2147
2148# qhasm: h3_stack = h3
2149# asm 1: movq <h3=int64#11,>h3_stack=stack64#11
2150# asm 2: movq <h3=%r13,>h3_stack=80(%rsp)
2151movq %r13,80(%rsp)
2152
2153# qhasm: h4_stack = h4
2154# asm 1: movq <h4=int64#12,>h4_stack=stack64#12
2155# asm 2: movq <h4=%r14,>h4_stack=88(%rsp)
2156movq %r14,88(%rsp)
2157
2158# qhasm: e0_stack = e0
2159# asm 1: movq <e0=int64#4,>e0_stack=stack64#13
2160# asm 2: movq <e0=%rcx,>e0_stack=96(%rsp)
2161movq %rcx,96(%rsp)
2162
2163# qhasm: e1_stack = e1
2164# asm 1: movq <e1=int64#6,>e1_stack=stack64#14
2165# asm 2: movq <e1=%r9,>e1_stack=104(%rsp)
2166movq %r9,104(%rsp)
2167
2168# qhasm: e2_stack = e2
2169# asm 1: movq <e2=int64#7,>e2_stack=stack64#15
2170# asm 2: movq <e2=%rax,>e2_stack=112(%rsp)
2171movq %rax,112(%rsp)
2172
2173# qhasm: e3_stack = e3
2174# asm 1: movq <e3=int64#8,>e3_stack=stack64#16
2175# asm 2: movq <e3=%r10,>e3_stack=120(%rsp)
2176movq %r10,120(%rsp)
2177
2178# qhasm: e4_stack = e4
2179# asm 1: movq <e4=int64#9,>e4_stack=stack64#17
2180# asm 2: movq <e4=%r11,>e4_stack=128(%rsp)
2181movq %r11,128(%rsp)
2182
2183# qhasm:   mulrax = *(uint64 *)(rp + 144)
2184# asm 1: movq   144(<rp=int64#1),>mulrax=int64#3
2185# asm 2: movq   144(<rp=%rdi),>mulrax=%rdx
2186movq   144(%rdi),%rdx
2187
2188# qhasm:   mulrax *= 19
2189# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
2190# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
2191imulq  $19,%rdx,%rax
2192
2193# qhasm:   mulx319_stack = mulrax
2194# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#18
2195# asm 2: movq <mulrax=%rax,>mulx319_stack=136(%rsp)
2196movq %rax,136(%rsp)
2197
2198# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96)
2199# asm 1: mulq  96(<qp=int64#2)
2200# asm 2: mulq  96(<qp=%rsi)
2201mulq  96(%rsi)
2202
2203# qhasm:   c0 = mulrax
2204# asm 1: mov  <mulrax=int64#7,>c0=int64#4
2205# asm 2: mov  <mulrax=%rax,>c0=%rcx
2206mov  %rax,%rcx
2207
2208# qhasm:   mulr01 = mulrdx
2209# asm 1: mov  <mulrdx=int64#3,>mulr01=int64#5
2210# asm 2: mov  <mulrdx=%rdx,>mulr01=%r8
2211mov  %rdx,%r8
2212
2213# qhasm:   mulrax = *(uint64 *)(rp + 152)
2214# asm 1: movq   152(<rp=int64#1),>mulrax=int64#3
2215# asm 2: movq   152(<rp=%rdi),>mulrax=%rdx
2216movq   152(%rdi),%rdx
2217
2218# qhasm:   mulrax *= 19
2219# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
2220# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
2221imulq  $19,%rdx,%rax
2222
2223# qhasm:   mulx419_stack = mulrax
2224# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#19
2225# asm 2: movq <mulrax=%rax,>mulx419_stack=144(%rsp)
2226movq %rax,144(%rsp)
2227
2228# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88)
2229# asm 1: mulq  88(<qp=int64#2)
2230# asm 2: mulq  88(<qp=%rsi)
2231mulq  88(%rsi)
2232
2233# qhasm:   carry? c0 += mulrax
2234# asm 1: add  <mulrax=int64#7,<c0=int64#4
2235# asm 2: add  <mulrax=%rax,<c0=%rcx
2236add  %rax,%rcx
2237
2238# qhasm:   mulr01 += mulrdx + carry
2239# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2240# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2241adc %rdx,%r8
2242
2243# qhasm:   mulrax = *(uint64 *)(rp + 120)
2244# asm 1: movq   120(<rp=int64#1),>mulrax=int64#7
2245# asm 2: movq   120(<rp=%rdi),>mulrax=%rax
2246movq   120(%rdi),%rax
2247
2248# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80)
2249# asm 1: mulq  80(<qp=int64#2)
2250# asm 2: mulq  80(<qp=%rsi)
2251mulq  80(%rsi)
2252
2253# qhasm:   carry? c0 += mulrax
2254# asm 1: add  <mulrax=int64#7,<c0=int64#4
2255# asm 2: add  <mulrax=%rax,<c0=%rcx
2256add  %rax,%rcx
2257
2258# qhasm:   mulr01 += mulrdx + carry
2259# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2260# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2261adc %rdx,%r8
2262
2263# qhasm:   mulrax = *(uint64 *)(rp + 120)
2264# asm 1: movq   120(<rp=int64#1),>mulrax=int64#7
2265# asm 2: movq   120(<rp=%rdi),>mulrax=%rax
2266movq   120(%rdi),%rax
2267
2268# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88)
2269# asm 1: mulq  88(<qp=int64#2)
2270# asm 2: mulq  88(<qp=%rsi)
2271mulq  88(%rsi)
2272
2273# qhasm:   c1 = mulrax
2274# asm 1: mov  <mulrax=int64#7,>c1=int64#6
2275# asm 2: mov  <mulrax=%rax,>c1=%r9
2276mov  %rax,%r9
2277
2278# qhasm:   mulr11 = mulrdx
2279# asm 1: mov  <mulrdx=int64#3,>mulr11=int64#8
2280# asm 2: mov  <mulrdx=%rdx,>mulr11=%r10
2281mov  %rdx,%r10
2282
2283# qhasm:   mulrax = *(uint64 *)(rp + 120)
2284# asm 1: movq   120(<rp=int64#1),>mulrax=int64#7
2285# asm 2: movq   120(<rp=%rdi),>mulrax=%rax
2286movq   120(%rdi),%rax
2287
2288# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96)
2289# asm 1: mulq  96(<qp=int64#2)
2290# asm 2: mulq  96(<qp=%rsi)
2291mulq  96(%rsi)
2292
2293# qhasm:   c2 = mulrax
2294# asm 1: mov  <mulrax=int64#7,>c2=int64#9
2295# asm 2: mov  <mulrax=%rax,>c2=%r11
2296mov  %rax,%r11
2297
2298# qhasm:   mulr21 = mulrdx
2299# asm 1: mov  <mulrdx=int64#3,>mulr21=int64#10
2300# asm 2: mov  <mulrdx=%rdx,>mulr21=%r12
2301mov  %rdx,%r12
2302
2303# qhasm:   mulrax = *(uint64 *)(rp + 120)
2304# asm 1: movq   120(<rp=int64#1),>mulrax=int64#7
2305# asm 2: movq   120(<rp=%rdi),>mulrax=%rax
2306movq   120(%rdi),%rax
2307
2308# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104)
2309# asm 1: mulq  104(<qp=int64#2)
2310# asm 2: mulq  104(<qp=%rsi)
2311mulq  104(%rsi)
2312
2313# qhasm:   c3 = mulrax
2314# asm 1: mov  <mulrax=int64#7,>c3=int64#11
2315# asm 2: mov  <mulrax=%rax,>c3=%r13
2316mov  %rax,%r13
2317
2318# qhasm:   mulr31 = mulrdx
2319# asm 1: mov  <mulrdx=int64#3,>mulr31=int64#12
2320# asm 2: mov  <mulrdx=%rdx,>mulr31=%r14
2321mov  %rdx,%r14
2322
2323# qhasm:   mulrax = *(uint64 *)(rp + 120)
2324# asm 1: movq   120(<rp=int64#1),>mulrax=int64#7
2325# asm 2: movq   120(<rp=%rdi),>mulrax=%rax
2326movq   120(%rdi),%rax
2327
2328# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112)
2329# asm 1: mulq  112(<qp=int64#2)
2330# asm 2: mulq  112(<qp=%rsi)
2331mulq  112(%rsi)
2332
2333# qhasm:   c4 = mulrax
2334# asm 1: mov  <mulrax=int64#7,>c4=int64#13
2335# asm 2: mov  <mulrax=%rax,>c4=%r15
2336mov  %rax,%r15
2337
2338# qhasm:   mulr41 = mulrdx
2339# asm 1: mov  <mulrdx=int64#3,>mulr41=int64#14
2340# asm 2: mov  <mulrdx=%rdx,>mulr41=%rbx
2341mov  %rdx,%rbx
2342
2343# qhasm:   mulrax = *(uint64 *)(rp + 128)
2344# asm 1: movq   128(<rp=int64#1),>mulrax=int64#7
2345# asm 2: movq   128(<rp=%rdi),>mulrax=%rax
2346movq   128(%rdi),%rax
2347
2348# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80)
2349# asm 1: mulq  80(<qp=int64#2)
2350# asm 2: mulq  80(<qp=%rsi)
2351mulq  80(%rsi)
2352
2353# qhasm:   carry? c1 += mulrax
2354# asm 1: add  <mulrax=int64#7,<c1=int64#6
2355# asm 2: add  <mulrax=%rax,<c1=%r9
2356add  %rax,%r9
2357
2358# qhasm:   mulr11 += mulrdx + carry
2359# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2360# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2361adc %rdx,%r10
2362
2363# qhasm:   mulrax = *(uint64 *)(rp + 128)
2364# asm 1: movq   128(<rp=int64#1),>mulrax=int64#7
2365# asm 2: movq   128(<rp=%rdi),>mulrax=%rax
2366movq   128(%rdi),%rax
2367
2368# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88)
2369# asm 1: mulq  88(<qp=int64#2)
2370# asm 2: mulq  88(<qp=%rsi)
2371mulq  88(%rsi)
2372
2373# qhasm:   carry? c2 += mulrax
2374# asm 1: add  <mulrax=int64#7,<c2=int64#9
2375# asm 2: add  <mulrax=%rax,<c2=%r11
2376add  %rax,%r11
2377
2378# qhasm:   mulr21 += mulrdx + carry
2379# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2380# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2381adc %rdx,%r12
2382
2383# qhasm:   mulrax = *(uint64 *)(rp + 128)
2384# asm 1: movq   128(<rp=int64#1),>mulrax=int64#7
2385# asm 2: movq   128(<rp=%rdi),>mulrax=%rax
2386movq   128(%rdi),%rax
2387
2388# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96)
2389# asm 1: mulq  96(<qp=int64#2)
2390# asm 2: mulq  96(<qp=%rsi)
2391mulq  96(%rsi)
2392
2393# qhasm:   carry? c3 += mulrax
2394# asm 1: add  <mulrax=int64#7,<c3=int64#11
2395# asm 2: add  <mulrax=%rax,<c3=%r13
2396add  %rax,%r13
2397
2398# qhasm:   mulr31 += mulrdx + carry
2399# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2400# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2401adc %rdx,%r14
2402
2403# qhasm:   mulrax = *(uint64 *)(rp + 128)
2404# asm 1: movq   128(<rp=int64#1),>mulrax=int64#7
2405# asm 2: movq   128(<rp=%rdi),>mulrax=%rax
2406movq   128(%rdi),%rax
2407
2408# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104)
2409# asm 1: mulq  104(<qp=int64#2)
2410# asm 2: mulq  104(<qp=%rsi)
2411mulq  104(%rsi)
2412
2413# qhasm:   carry? c4 += mulrax
2414# asm 1: add  <mulrax=int64#7,<c4=int64#13
2415# asm 2: add  <mulrax=%rax,<c4=%r15
2416add  %rax,%r15
2417
2418# qhasm:   mulr41 += mulrdx + carry
2419# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2420# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2421adc %rdx,%rbx
2422
2423# qhasm:   mulrax = *(uint64 *)(rp + 128)
2424# asm 1: movq   128(<rp=int64#1),>mulrax=int64#3
2425# asm 2: movq   128(<rp=%rdi),>mulrax=%rdx
2426movq   128(%rdi),%rdx
2427
2428# qhasm:   mulrax *= 19
2429# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
2430# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
2431imulq  $19,%rdx,%rax
2432
2433# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112)
2434# asm 1: mulq  112(<qp=int64#2)
2435# asm 2: mulq  112(<qp=%rsi)
2436mulq  112(%rsi)
2437
2438# qhasm:   carry? c0 += mulrax
2439# asm 1: add  <mulrax=int64#7,<c0=int64#4
2440# asm 2: add  <mulrax=%rax,<c0=%rcx
2441add  %rax,%rcx
2442
2443# qhasm:   mulr01 += mulrdx + carry
2444# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2445# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2446adc %rdx,%r8
2447
2448# qhasm:   mulrax = *(uint64 *)(rp + 136)
2449# asm 1: movq   136(<rp=int64#1),>mulrax=int64#7
2450# asm 2: movq   136(<rp=%rdi),>mulrax=%rax
2451movq   136(%rdi),%rax
2452
2453# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80)
2454# asm 1: mulq  80(<qp=int64#2)
2455# asm 2: mulq  80(<qp=%rsi)
2456mulq  80(%rsi)
2457
2458# qhasm:   carry? c2 += mulrax
2459# asm 1: add  <mulrax=int64#7,<c2=int64#9
2460# asm 2: add  <mulrax=%rax,<c2=%r11
2461add  %rax,%r11
2462
2463# qhasm:   mulr21 += mulrdx + carry
2464# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2465# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2466adc %rdx,%r12
2467
2468# qhasm:   mulrax = *(uint64 *)(rp + 136)
2469# asm 1: movq   136(<rp=int64#1),>mulrax=int64#7
2470# asm 2: movq   136(<rp=%rdi),>mulrax=%rax
2471movq   136(%rdi),%rax
2472
2473# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88)
2474# asm 1: mulq  88(<qp=int64#2)
2475# asm 2: mulq  88(<qp=%rsi)
2476mulq  88(%rsi)
2477
2478# qhasm:   carry? c3 += mulrax
2479# asm 1: add  <mulrax=int64#7,<c3=int64#11
2480# asm 2: add  <mulrax=%rax,<c3=%r13
2481add  %rax,%r13
2482
2483# qhasm:   mulr31 += mulrdx + carry
2484# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2485# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2486adc %rdx,%r14
2487
2488# qhasm:   mulrax = *(uint64 *)(rp + 136)
2489# asm 1: movq   136(<rp=int64#1),>mulrax=int64#7
2490# asm 2: movq   136(<rp=%rdi),>mulrax=%rax
2491movq   136(%rdi),%rax
2492
2493# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96)
2494# asm 1: mulq  96(<qp=int64#2)
2495# asm 2: mulq  96(<qp=%rsi)
2496mulq  96(%rsi)
2497
2498# qhasm:   carry? c4 += mulrax
2499# asm 1: add  <mulrax=int64#7,<c4=int64#13
2500# asm 2: add  <mulrax=%rax,<c4=%r15
2501add  %rax,%r15
2502
2503# qhasm:   mulr41 += mulrdx + carry
2504# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2505# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2506adc %rdx,%rbx
2507
2508# qhasm:   mulrax = *(uint64 *)(rp + 136)
2509# asm 1: movq   136(<rp=int64#1),>mulrax=int64#3
2510# asm 2: movq   136(<rp=%rdi),>mulrax=%rdx
2511movq   136(%rdi),%rdx
2512
2513# qhasm:   mulrax *= 19
2514# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
2515# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
2516imulq  $19,%rdx,%rax
2517
2518# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104)
2519# asm 1: mulq  104(<qp=int64#2)
2520# asm 2: mulq  104(<qp=%rsi)
2521mulq  104(%rsi)
2522
2523# qhasm:   carry? c0 += mulrax
2524# asm 1: add  <mulrax=int64#7,<c0=int64#4
2525# asm 2: add  <mulrax=%rax,<c0=%rcx
2526add  %rax,%rcx
2527
2528# qhasm:   mulr01 += mulrdx + carry
2529# asm 1: adc <mulrdx=int64#3,<mulr01=int64#5
2530# asm 2: adc <mulrdx=%rdx,<mulr01=%r8
2531adc %rdx,%r8
2532
2533# qhasm:   mulrax = *(uint64 *)(rp + 136)
2534# asm 1: movq   136(<rp=int64#1),>mulrax=int64#3
2535# asm 2: movq   136(<rp=%rdi),>mulrax=%rdx
2536movq   136(%rdi),%rdx
2537
2538# qhasm:   mulrax *= 19
2539# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
2540# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
2541imulq  $19,%rdx,%rax
2542
2543# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112)
2544# asm 1: mulq  112(<qp=int64#2)
2545# asm 2: mulq  112(<qp=%rsi)
2546mulq  112(%rsi)
2547
2548# qhasm:   carry? c1 += mulrax
2549# asm 1: add  <mulrax=int64#7,<c1=int64#6
2550# asm 2: add  <mulrax=%rax,<c1=%r9
2551add  %rax,%r9
2552
2553# qhasm:   mulr11 += mulrdx + carry
2554# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2555# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2556adc %rdx,%r10
2557
2558# qhasm:   mulrax = *(uint64 *)(rp + 144)
2559# asm 1: movq   144(<rp=int64#1),>mulrax=int64#7
2560# asm 2: movq   144(<rp=%rdi),>mulrax=%rax
2561movq   144(%rdi),%rax
2562
2563# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80)
2564# asm 1: mulq  80(<qp=int64#2)
2565# asm 2: mulq  80(<qp=%rsi)
2566mulq  80(%rsi)
2567
2568# qhasm:   carry? c3 += mulrax
2569# asm 1: add  <mulrax=int64#7,<c3=int64#11
2570# asm 2: add  <mulrax=%rax,<c3=%r13
2571add  %rax,%r13
2572
2573# qhasm:   mulr31 += mulrdx + carry
2574# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2575# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2576adc %rdx,%r14
2577
2578# qhasm:   mulrax = *(uint64 *)(rp + 144)
2579# asm 1: movq   144(<rp=int64#1),>mulrax=int64#7
2580# asm 2: movq   144(<rp=%rdi),>mulrax=%rax
2581movq   144(%rdi),%rax
2582
2583# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 88)
2584# asm 1: mulq  88(<qp=int64#2)
2585# asm 2: mulq  88(<qp=%rsi)
2586mulq  88(%rsi)
2587
2588# qhasm:   carry? c4 += mulrax
2589# asm 1: add  <mulrax=int64#7,<c4=int64#13
2590# asm 2: add  <mulrax=%rax,<c4=%r15
2591add  %rax,%r15
2592
2593# qhasm:   mulr41 += mulrdx + carry
2594# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2595# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2596adc %rdx,%rbx
2597
2598# qhasm:   mulrax = mulx319_stack
2599# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
2600# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
2601movq 136(%rsp),%rax
2602
2603# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104)
2604# asm 1: mulq  104(<qp=int64#2)
2605# asm 2: mulq  104(<qp=%rsi)
2606mulq  104(%rsi)
2607
2608# qhasm:   carry? c1 += mulrax
2609# asm 1: add  <mulrax=int64#7,<c1=int64#6
2610# asm 2: add  <mulrax=%rax,<c1=%r9
2611add  %rax,%r9
2612
2613# qhasm:   mulr11 += mulrdx + carry
2614# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2615# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2616adc %rdx,%r10
2617
2618# qhasm:   mulrax = mulx319_stack
2619# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
2620# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
2621movq 136(%rsp),%rax
2622
2623# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112)
2624# asm 1: mulq  112(<qp=int64#2)
2625# asm 2: mulq  112(<qp=%rsi)
2626mulq  112(%rsi)
2627
2628# qhasm:   carry? c2 += mulrax
2629# asm 1: add  <mulrax=int64#7,<c2=int64#9
2630# asm 2: add  <mulrax=%rax,<c2=%r11
2631add  %rax,%r11
2632
2633# qhasm:   mulr21 += mulrdx + carry
2634# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2635# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2636adc %rdx,%r12
2637
2638# qhasm:   mulrax = *(uint64 *)(rp + 152)
2639# asm 1: movq   152(<rp=int64#1),>mulrax=int64#7
2640# asm 2: movq   152(<rp=%rdi),>mulrax=%rax
2641movq   152(%rdi),%rax
2642
2643# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 80)
2644# asm 1: mulq  80(<qp=int64#2)
2645# asm 2: mulq  80(<qp=%rsi)
2646mulq  80(%rsi)
2647
2648# qhasm:   carry? c4 += mulrax
2649# asm 1: add  <mulrax=int64#7,<c4=int64#13
2650# asm 2: add  <mulrax=%rax,<c4=%r15
2651add  %rax,%r15
2652
2653# qhasm:   mulr41 += mulrdx + carry
2654# asm 1: adc <mulrdx=int64#3,<mulr41=int64#14
2655# asm 2: adc <mulrdx=%rdx,<mulr41=%rbx
2656adc %rdx,%rbx
2657
2658# qhasm:   mulrax = mulx419_stack
2659# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
2660# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
2661movq 144(%rsp),%rax
2662
2663# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 96)
2664# asm 1: mulq  96(<qp=int64#2)
2665# asm 2: mulq  96(<qp=%rsi)
2666mulq  96(%rsi)
2667
2668# qhasm:   carry? c1 += mulrax
2669# asm 1: add  <mulrax=int64#7,<c1=int64#6
2670# asm 2: add  <mulrax=%rax,<c1=%r9
2671add  %rax,%r9
2672
2673# qhasm:   mulr11 += mulrdx + carry
2674# asm 1: adc <mulrdx=int64#3,<mulr11=int64#8
2675# asm 2: adc <mulrdx=%rdx,<mulr11=%r10
2676adc %rdx,%r10
2677
2678# qhasm:   mulrax = mulx419_stack
2679# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
2680# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
2681movq 144(%rsp),%rax
2682
2683# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 104)
2684# asm 1: mulq  104(<qp=int64#2)
2685# asm 2: mulq  104(<qp=%rsi)
2686mulq  104(%rsi)
2687
2688# qhasm:   carry? c2 += mulrax
2689# asm 1: add  <mulrax=int64#7,<c2=int64#9
2690# asm 2: add  <mulrax=%rax,<c2=%r11
2691add  %rax,%r11
2692
2693# qhasm:   mulr21 += mulrdx + carry
2694# asm 1: adc <mulrdx=int64#3,<mulr21=int64#10
2695# asm 2: adc <mulrdx=%rdx,<mulr21=%r12
2696adc %rdx,%r12
2697
2698# qhasm:   mulrax = mulx419_stack
2699# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
2700# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
2701movq 144(%rsp),%rax
2702
2703# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)(qp + 112)
2704# asm 1: mulq  112(<qp=int64#2)
2705# asm 2: mulq  112(<qp=%rsi)
2706mulq  112(%rsi)
2707
2708# qhasm:   carry? c3 += mulrax
2709# asm 1: add  <mulrax=int64#7,<c3=int64#11
2710# asm 2: add  <mulrax=%rax,<c3=%r13
2711add  %rax,%r13
2712
2713# qhasm:   mulr31 += mulrdx + carry
2714# asm 1: adc <mulrdx=int64#3,<mulr31=int64#12
2715# asm 2: adc <mulrdx=%rdx,<mulr31=%r14
2716adc %rdx,%r14
2717
2718# qhasm:   mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
2719# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#2
2720# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rsi
2721movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rsi
2722
2723# qhasm:   mulr01 = (mulr01.c0) << 13
2724# asm 1: shld $13,<c0=int64#4,<mulr01=int64#5
2725# asm 2: shld $13,<c0=%rcx,<mulr01=%r8
2726shld $13,%rcx,%r8
2727
2728# qhasm:   c0 &= mulredmask
2729# asm 1: and  <mulredmask=int64#2,<c0=int64#4
2730# asm 2: and  <mulredmask=%rsi,<c0=%rcx
2731and  %rsi,%rcx
2732
2733# qhasm:   mulr11 = (mulr11.c1) << 13
2734# asm 1: shld $13,<c1=int64#6,<mulr11=int64#8
2735# asm 2: shld $13,<c1=%r9,<mulr11=%r10
2736shld $13,%r9,%r10
2737
2738# qhasm:   c1 &= mulredmask
2739# asm 1: and  <mulredmask=int64#2,<c1=int64#6
2740# asm 2: and  <mulredmask=%rsi,<c1=%r9
2741and  %rsi,%r9
2742
2743# qhasm:   c1 += mulr01
2744# asm 1: add  <mulr01=int64#5,<c1=int64#6
2745# asm 2: add  <mulr01=%r8,<c1=%r9
2746add  %r8,%r9
2747
2748# qhasm:   mulr21 = (mulr21.c2) << 13
2749# asm 1: shld $13,<c2=int64#9,<mulr21=int64#10
2750# asm 2: shld $13,<c2=%r11,<mulr21=%r12
2751shld $13,%r11,%r12
2752
2753# qhasm:   c2 &= mulredmask
2754# asm 1: and  <mulredmask=int64#2,<c2=int64#9
2755# asm 2: and  <mulredmask=%rsi,<c2=%r11
2756and  %rsi,%r11
2757
2758# qhasm:   c2 += mulr11
2759# asm 1: add  <mulr11=int64#8,<c2=int64#9
2760# asm 2: add  <mulr11=%r10,<c2=%r11
2761add  %r10,%r11
2762
2763# qhasm:   mulr31 = (mulr31.c3) << 13
2764# asm 1: shld $13,<c3=int64#11,<mulr31=int64#12
2765# asm 2: shld $13,<c3=%r13,<mulr31=%r14
2766shld $13,%r13,%r14
2767
2768# qhasm:   c3 &= mulredmask
2769# asm 1: and  <mulredmask=int64#2,<c3=int64#11
2770# asm 2: and  <mulredmask=%rsi,<c3=%r13
2771and  %rsi,%r13
2772
2773# qhasm:   c3 += mulr21
2774# asm 1: add  <mulr21=int64#10,<c3=int64#11
2775# asm 2: add  <mulr21=%r12,<c3=%r13
2776add  %r12,%r13
2777
2778# qhasm:   mulr41 = (mulr41.c4) << 13
2779# asm 1: shld $13,<c4=int64#13,<mulr41=int64#14
2780# asm 2: shld $13,<c4=%r15,<mulr41=%rbx
2781shld $13,%r15,%rbx
2782
2783# qhasm:   c4 &= mulredmask
2784# asm 1: and  <mulredmask=int64#2,<c4=int64#13
2785# asm 2: and  <mulredmask=%rsi,<c4=%r15
2786and  %rsi,%r15
2787
2788# qhasm:   c4 += mulr31
2789# asm 1: add  <mulr31=int64#12,<c4=int64#13
2790# asm 2: add  <mulr31=%r14,<c4=%r15
2791add  %r14,%r15
2792
2793# qhasm:   mulr41 = mulr41 * 19
2794# asm 1: imulq  $19,<mulr41=int64#14,>mulr41=int64#3
2795# asm 2: imulq  $19,<mulr41=%rbx,>mulr41=%rdx
2796imulq  $19,%rbx,%rdx
2797
2798# qhasm:   c0 += mulr41
2799# asm 1: add  <mulr41=int64#3,<c0=int64#4
2800# asm 2: add  <mulr41=%rdx,<c0=%rcx
2801add  %rdx,%rcx
2802
2803# qhasm:   mult = c0
2804# asm 1: mov  <c0=int64#4,>mult=int64#3
2805# asm 2: mov  <c0=%rcx,>mult=%rdx
2806mov  %rcx,%rdx
2807
2808# qhasm:   (uint64) mult >>= 51
2809# asm 1: shr  $51,<mult=int64#3
2810# asm 2: shr  $51,<mult=%rdx
2811shr  $51,%rdx
2812
2813# qhasm:   mult += c1
2814# asm 1: add  <c1=int64#6,<mult=int64#3
2815# asm 2: add  <c1=%r9,<mult=%rdx
2816add  %r9,%rdx
2817
2818# qhasm:   c1 = mult
2819# asm 1: mov  <mult=int64#3,>c1=int64#5
2820# asm 2: mov  <mult=%rdx,>c1=%r8
2821mov  %rdx,%r8
2822
2823# qhasm:   (uint64) mult >>= 51
2824# asm 1: shr  $51,<mult=int64#3
2825# asm 2: shr  $51,<mult=%rdx
2826shr  $51,%rdx
2827
2828# qhasm:   c0 &= mulredmask
2829# asm 1: and  <mulredmask=int64#2,<c0=int64#4
2830# asm 2: and  <mulredmask=%rsi,<c0=%rcx
2831and  %rsi,%rcx
2832
2833# qhasm:   mult += c2
2834# asm 1: add  <c2=int64#9,<mult=int64#3
2835# asm 2: add  <c2=%r11,<mult=%rdx
2836add  %r11,%rdx
2837
2838# qhasm:   c2 = mult
2839# asm 1: mov  <mult=int64#3,>c2=int64#6
2840# asm 2: mov  <mult=%rdx,>c2=%r9
2841mov  %rdx,%r9
2842
2843# qhasm:   (uint64) mult >>= 51
2844# asm 1: shr  $51,<mult=int64#3
2845# asm 2: shr  $51,<mult=%rdx
2846shr  $51,%rdx
2847
2848# qhasm:   c1 &= mulredmask
2849# asm 1: and  <mulredmask=int64#2,<c1=int64#5
2850# asm 2: and  <mulredmask=%rsi,<c1=%r8
2851and  %rsi,%r8
2852
2853# qhasm:   mult += c3
2854# asm 1: add  <c3=int64#11,<mult=int64#3
2855# asm 2: add  <c3=%r13,<mult=%rdx
2856add  %r13,%rdx
2857
2858# qhasm:   c3 = mult
2859# asm 1: mov  <mult=int64#3,>c3=int64#7
2860# asm 2: mov  <mult=%rdx,>c3=%rax
2861mov  %rdx,%rax
2862
2863# qhasm:   (uint64) mult >>= 51
2864# asm 1: shr  $51,<mult=int64#3
2865# asm 2: shr  $51,<mult=%rdx
2866shr  $51,%rdx
2867
2868# qhasm:   c2 &= mulredmask
2869# asm 1: and  <mulredmask=int64#2,<c2=int64#6
2870# asm 2: and  <mulredmask=%rsi,<c2=%r9
2871and  %rsi,%r9
2872
2873# qhasm:   mult += c4
2874# asm 1: add  <c4=int64#13,<mult=int64#3
2875# asm 2: add  <c4=%r15,<mult=%rdx
2876add  %r15,%rdx
2877
2878# qhasm:   c4 = mult
2879# asm 1: mov  <mult=int64#3,>c4=int64#8
2880# asm 2: mov  <mult=%rdx,>c4=%r10
2881mov  %rdx,%r10
2882
2883# qhasm:   (uint64) mult >>= 51
2884# asm 1: shr  $51,<mult=int64#3
2885# asm 2: shr  $51,<mult=%rdx
2886shr  $51,%rdx
2887
2888# qhasm:   c3 &= mulredmask
2889# asm 1: and  <mulredmask=int64#2,<c3=int64#7
2890# asm 2: and  <mulredmask=%rsi,<c3=%rax
2891and  %rsi,%rax
2892
2893# qhasm:   mult *= 19
2894# asm 1: imulq  $19,<mult=int64#3,>mult=int64#3
2895# asm 2: imulq  $19,<mult=%rdx,>mult=%rdx
2896imulq  $19,%rdx,%rdx
2897
2898# qhasm:   c0 += mult
2899# asm 1: add  <mult=int64#3,<c0=int64#4
2900# asm 2: add  <mult=%rdx,<c0=%rcx
2901add  %rdx,%rcx
2902
2903# qhasm:   c4 &= mulredmask
2904# asm 1: and  <mulredmask=int64#2,<c4=int64#8
2905# asm 2: and  <mulredmask=%rsi,<c4=%r10
2906and  %rsi,%r10
2907
2908# qhasm: c0_stack = c0
2909# asm 1: movq <c0=int64#4,>c0_stack=stack64#18
2910# asm 2: movq <c0=%rcx,>c0_stack=136(%rsp)
2911movq %rcx,136(%rsp)
2912
2913# qhasm: f0 = *(uint64 *)(rp + 80)
2914# asm 1: movq   80(<rp=int64#1),>f0=int64#2
2915# asm 2: movq   80(<rp=%rdi),>f0=%rsi
2916movq   80(%rdi),%rsi
2917
2918# qhasm: f1 = *(uint64 *)(rp + 88)
2919# asm 1: movq   88(<rp=int64#1),>f1=int64#3
2920# asm 2: movq   88(<rp=%rdi),>f1=%rdx
2921movq   88(%rdi),%rdx
2922
2923# qhasm: f2 = *(uint64 *)(rp + 96)
2924# asm 1: movq   96(<rp=int64#1),>f2=int64#4
2925# asm 2: movq   96(<rp=%rdi),>f2=%rcx
2926movq   96(%rdi),%rcx
2927
2928# qhasm: f3 = *(uint64 *)(rp + 104)
2929# asm 1: movq   104(<rp=int64#1),>f3=int64#9
2930# asm 2: movq   104(<rp=%rdi),>f3=%r11
2931movq   104(%rdi),%r11
2932
2933# qhasm: f4 = *(uint64 *)(rp + 112)
2934# asm 1: movq   112(<rp=int64#1),>f4=int64#10
2935# asm 2: movq   112(<rp=%rdi),>f4=%r12
2936movq   112(%rdi),%r12
2937
2938# qhasm: f0 += f0
2939# asm 1: add  <f0=int64#2,<f0=int64#2
2940# asm 2: add  <f0=%rsi,<f0=%rsi
2941add  %rsi,%rsi
2942
2943# qhasm: f1 += f1
2944# asm 1: add  <f1=int64#3,<f1=int64#3
2945# asm 2: add  <f1=%rdx,<f1=%rdx
2946add  %rdx,%rdx
2947
2948# qhasm: f2 += f2
2949# asm 1: add  <f2=int64#4,<f2=int64#4
2950# asm 2: add  <f2=%rcx,<f2=%rcx
2951add  %rcx,%rcx
2952
2953# qhasm: f3 += f3
2954# asm 1: add  <f3=int64#9,<f3=int64#9
2955# asm 2: add  <f3=%r11,<f3=%r11
2956add  %r11,%r11
2957
2958# qhasm: f4 += f4
2959# asm 1: add  <f4=int64#10,<f4=int64#10
2960# asm 2: add  <f4=%r12,<f4=%r12
2961add  %r12,%r12
2962
2963# qhasm: g0 = f0
2964# asm 1: mov  <f0=int64#2,>g0=int64#11
2965# asm 2: mov  <f0=%rsi,>g0=%r13
2966mov  %rsi,%r13
2967
2968# qhasm: g1 = f1
2969# asm 1: mov  <f1=int64#3,>g1=int64#12
2970# asm 2: mov  <f1=%rdx,>g1=%r14
2971mov  %rdx,%r14
2972
2973# qhasm: g2 = f2
2974# asm 1: mov  <f2=int64#4,>g2=int64#13
2975# asm 2: mov  <f2=%rcx,>g2=%r15
2976mov  %rcx,%r15
2977
2978# qhasm: g3 = f3
2979# asm 1: mov  <f3=int64#9,>g3=int64#14
2980# asm 2: mov  <f3=%r11,>g3=%rbx
2981mov  %r11,%rbx
2982
2983# qhasm: g4 = f4
2984# asm 1: mov  <f4=int64#10,>g4=int64#15
2985# asm 2: mov  <f4=%r12,>g4=%rbp
2986mov  %r12,%rbp
2987
2988# qhasm: f0 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P0
2989# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=int64#2
2990# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,<f0=%rsi
2991add  crypto_sign_ed25519_amd64_51_30k_batch_2P0,%rsi
2992
2993# qhasm: f1 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
2994# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=int64#3
2995# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f1=%rdx
2996add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rdx
2997
2998# qhasm: f2 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
2999# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=int64#4
3000# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f2=%rcx
3001add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%rcx
3002
3003# qhasm: f3 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
3004# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=int64#9
3005# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f3=%r11
3006add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r11
3007
3008# qhasm: f4 += *(uint64 *)&crypto_sign_ed25519_amd64_51_30k_batch_2P1234
3009# asm 1: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=int64#10
3010# asm 2: add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,<f4=%r12
3011add  crypto_sign_ed25519_amd64_51_30k_batch_2P1234,%r12
3012
3013# qhasm: g0 += c0_stack
3014# asm 1: addq <c0_stack=stack64#18,<g0=int64#11
3015# asm 2: addq <c0_stack=136(%rsp),<g0=%r13
3016addq 136(%rsp),%r13
3017
3018# qhasm: g1 += c1
3019# asm 1: add  <c1=int64#5,<g1=int64#12
3020# asm 2: add  <c1=%r8,<g1=%r14
3021add  %r8,%r14
3022
3023# qhasm: g2 += c2
3024# asm 1: add  <c2=int64#6,<g2=int64#13
3025# asm 2: add  <c2=%r9,<g2=%r15
3026add  %r9,%r15
3027
3028# qhasm: g3 += c3
3029# asm 1: add  <c3=int64#7,<g3=int64#14
3030# asm 2: add  <c3=%rax,<g3=%rbx
3031add  %rax,%rbx
3032
3033# qhasm: g4 += c4
3034# asm 1: add  <c4=int64#8,<g4=int64#15
3035# asm 2: add  <c4=%r10,<g4=%rbp
3036add  %r10,%rbp
3037
3038# qhasm: f0 -= c0_stack
3039# asm 1: subq <c0_stack=stack64#18,<f0=int64#2
3040# asm 2: subq <c0_stack=136(%rsp),<f0=%rsi
3041subq 136(%rsp),%rsi
3042
3043# qhasm: f1 -= c1
3044# asm 1: sub  <c1=int64#5,<f1=int64#3
3045# asm 2: sub  <c1=%r8,<f1=%rdx
3046sub  %r8,%rdx
3047
3048# qhasm: f2 -= c2
3049# asm 1: sub  <c2=int64#6,<f2=int64#4
3050# asm 2: sub  <c2=%r9,<f2=%rcx
3051sub  %r9,%rcx
3052
3053# qhasm: f3 -= c3
3054# asm 1: sub  <c3=int64#7,<f3=int64#9
3055# asm 2: sub  <c3=%rax,<f3=%r11
3056sub  %rax,%r11
3057
3058# qhasm: f4 -= c4
3059# asm 1: sub  <c4=int64#8,<f4=int64#10
3060# asm 2: sub  <c4=%r10,<f4=%r12
3061sub  %r10,%r12
3062
3063# qhasm: g0_stack = g0
3064# asm 1: movq <g0=int64#11,>g0_stack=stack64#18
3065# asm 2: movq <g0=%r13,>g0_stack=136(%rsp)
3066movq %r13,136(%rsp)
3067
3068# qhasm: g1_stack = g1
3069# asm 1: movq <g1=int64#12,>g1_stack=stack64#19
3070# asm 2: movq <g1=%r14,>g1_stack=144(%rsp)
3071movq %r14,144(%rsp)
3072
3073# qhasm: g2_stack = g2
3074# asm 1: movq <g2=int64#13,>g2_stack=stack64#20
3075# asm 2: movq <g2=%r15,>g2_stack=152(%rsp)
3076movq %r15,152(%rsp)
3077
3078# qhasm: g3_stack = g3
3079# asm 1: movq <g3=int64#14,>g3_stack=stack64#21
3080# asm 2: movq <g3=%rbx,>g3_stack=160(%rsp)
3081movq %rbx,160(%rsp)
3082
3083# qhasm: g4_stack = g4
3084# asm 1: movq <g4=int64#15,>g4_stack=stack64#22
3085# asm 2: movq <g4=%rbp,>g4_stack=168(%rsp)
3086movq %rbp,168(%rsp)
3087
3088# qhasm: f0_stack = f0
3089# asm 1: movq <f0=int64#2,>f0_stack=stack64#23
3090# asm 2: movq <f0=%rsi,>f0_stack=176(%rsp)
3091movq %rsi,176(%rsp)
3092
3093# qhasm: f1_stack = f1
3094# asm 1: movq <f1=int64#3,>f1_stack=stack64#24
3095# asm 2: movq <f1=%rdx,>f1_stack=184(%rsp)
3096movq %rdx,184(%rsp)
3097
3098# qhasm: f2_stack = f2
3099# asm 1: movq <f2=int64#4,>f2_stack=stack64#25
3100# asm 2: movq <f2=%rcx,>f2_stack=192(%rsp)
3101movq %rcx,192(%rsp)
3102
3103# qhasm: f3_stack = f3
3104# asm 1: movq <f3=int64#9,>f3_stack=stack64#26
3105# asm 2: movq <f3=%r11,>f3_stack=200(%rsp)
3106movq %r11,200(%rsp)
3107
3108# qhasm: f4_stack = f4
3109# asm 1: movq <f4=int64#10,>f4_stack=stack64#27
3110# asm 2: movq <f4=%r12,>f4_stack=208(%rsp)
3111movq %r12,208(%rsp)
3112
3113# qhasm:   mulrax = e3_stack
3114# asm 1: movq <e3_stack=stack64#16,>mulrax=int64#2
3115# asm 2: movq <e3_stack=120(%rsp),>mulrax=%rsi
3116movq 120(%rsp),%rsi
3117
3118# qhasm:   mulrax *= 19
3119# asm 1: imulq  $19,<mulrax=int64#2,>mulrax=int64#7
3120# asm 2: imulq  $19,<mulrax=%rsi,>mulrax=%rax
3121imulq  $19,%rsi,%rax
3122
3123# qhasm:   mulx319_stack = mulrax
3124# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#28
3125# asm 2: movq <mulrax=%rax,>mulx319_stack=216(%rsp)
3126movq %rax,216(%rsp)
3127
3128# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
3129# asm 1: mulq  <f2_stack=stack64#25
3130# asm 2: mulq  <f2_stack=192(%rsp)
3131mulq  192(%rsp)
3132
3133# qhasm:   rx0 = mulrax
3134# asm 1: mov  <mulrax=int64#7,>rx0=int64#2
3135# asm 2: mov  <mulrax=%rax,>rx0=%rsi
3136mov  %rax,%rsi
3137
3138# qhasm:   mulr01 = mulrdx
3139# asm 1: mov  <mulrdx=int64#3,>mulr01=int64#4
3140# asm 2: mov  <mulrdx=%rdx,>mulr01=%rcx
3141mov  %rdx,%rcx
3142
3143# qhasm:   mulrax = e4_stack
3144# asm 1: movq <e4_stack=stack64#17,>mulrax=int64#3
3145# asm 2: movq <e4_stack=128(%rsp),>mulrax=%rdx
3146movq 128(%rsp),%rdx
3147
3148# qhasm:   mulrax *= 19
3149# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
3150# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
3151imulq  $19,%rdx,%rax
3152
3153# qhasm:   mulx419_stack = mulrax
3154# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#29
3155# asm 2: movq <mulrax=%rax,>mulx419_stack=224(%rsp)
3156movq %rax,224(%rsp)
3157
3158# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
3159# asm 1: mulq  <f1_stack=stack64#24
3160# asm 2: mulq  <f1_stack=184(%rsp)
3161mulq  184(%rsp)
3162
3163# qhasm:   carry? rx0 += mulrax
3164# asm 1: add  <mulrax=int64#7,<rx0=int64#2
3165# asm 2: add  <mulrax=%rax,<rx0=%rsi
3166add  %rax,%rsi
3167
3168# qhasm:   mulr01 += mulrdx + carry
3169# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
3170# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
3171adc %rdx,%rcx
3172
3173# qhasm:   mulrax = e0_stack
3174# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
3175# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
3176movq 96(%rsp),%rax
3177
3178# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
3179# asm 1: mulq  <f0_stack=stack64#23
3180# asm 2: mulq  <f0_stack=176(%rsp)
3181mulq  176(%rsp)
3182
3183# qhasm:   carry? rx0 += mulrax
3184# asm 1: add  <mulrax=int64#7,<rx0=int64#2
3185# asm 2: add  <mulrax=%rax,<rx0=%rsi
3186add  %rax,%rsi
3187
3188# qhasm:   mulr01 += mulrdx + carry
3189# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
3190# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
3191adc %rdx,%rcx
3192
3193# qhasm:   mulrax = e0_stack
3194# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
3195# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
3196movq 96(%rsp),%rax
3197
3198# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
3199# asm 1: mulq  <f1_stack=stack64#24
3200# asm 2: mulq  <f1_stack=184(%rsp)
3201mulq  184(%rsp)
3202
3203# qhasm:   rx1 = mulrax
3204# asm 1: mov  <mulrax=int64#7,>rx1=int64#5
3205# asm 2: mov  <mulrax=%rax,>rx1=%r8
3206mov  %rax,%r8
3207
3208# qhasm:   mulr11 = mulrdx
3209# asm 1: mov  <mulrdx=int64#3,>mulr11=int64#6
3210# asm 2: mov  <mulrdx=%rdx,>mulr11=%r9
3211mov  %rdx,%r9
3212
3213# qhasm:   mulrax = e0_stack
3214# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
3215# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
3216movq 96(%rsp),%rax
3217
3218# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
3219# asm 1: mulq  <f2_stack=stack64#25
3220# asm 2: mulq  <f2_stack=192(%rsp)
3221mulq  192(%rsp)
3222
3223# qhasm:   rx2 = mulrax
3224# asm 1: mov  <mulrax=int64#7,>rx2=int64#8
3225# asm 2: mov  <mulrax=%rax,>rx2=%r10
3226mov  %rax,%r10
3227
3228# qhasm:   mulr21 = mulrdx
3229# asm 1: mov  <mulrdx=int64#3,>mulr21=int64#9
3230# asm 2: mov  <mulrdx=%rdx,>mulr21=%r11
3231mov  %rdx,%r11
3232
3233# qhasm:   mulrax = e0_stack
3234# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
3235# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
3236movq 96(%rsp),%rax
3237
3238# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
3239# asm 1: mulq  <f3_stack=stack64#26
3240# asm 2: mulq  <f3_stack=200(%rsp)
3241mulq  200(%rsp)
3242
3243# qhasm:   rx3 = mulrax
3244# asm 1: mov  <mulrax=int64#7,>rx3=int64#10
3245# asm 2: mov  <mulrax=%rax,>rx3=%r12
3246mov  %rax,%r12
3247
3248# qhasm:   mulr31 = mulrdx
3249# asm 1: mov  <mulrdx=int64#3,>mulr31=int64#11
3250# asm 2: mov  <mulrdx=%rdx,>mulr31=%r13
3251mov  %rdx,%r13
3252
3253# qhasm:   mulrax = e0_stack
3254# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
3255# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
3256movq 96(%rsp),%rax
3257
3258# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
3259# asm 1: mulq  <f4_stack=stack64#27
3260# asm 2: mulq  <f4_stack=208(%rsp)
3261mulq  208(%rsp)
3262
3263# qhasm:   rx4 = mulrax
3264# asm 1: mov  <mulrax=int64#7,>rx4=int64#12
3265# asm 2: mov  <mulrax=%rax,>rx4=%r14
3266mov  %rax,%r14
3267
3268# qhasm:   mulr41 = mulrdx
3269# asm 1: mov  <mulrdx=int64#3,>mulr41=int64#13
3270# asm 2: mov  <mulrdx=%rdx,>mulr41=%r15
3271mov  %rdx,%r15
3272
3273# qhasm:   mulrax = e1_stack
3274# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
3275# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
3276movq 104(%rsp),%rax
3277
3278# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
3279# asm 1: mulq  <f0_stack=stack64#23
3280# asm 2: mulq  <f0_stack=176(%rsp)
3281mulq  176(%rsp)
3282
3283# qhasm:   carry? rx1 += mulrax
3284# asm 1: add  <mulrax=int64#7,<rx1=int64#5
3285# asm 2: add  <mulrax=%rax,<rx1=%r8
3286add  %rax,%r8
3287
3288# qhasm:   mulr11 += mulrdx + carry
3289# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
3290# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
3291adc %rdx,%r9
3292
3293# qhasm:   mulrax = e1_stack
3294# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
3295# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
3296movq 104(%rsp),%rax
3297
3298# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
3299# asm 1: mulq  <f1_stack=stack64#24
3300# asm 2: mulq  <f1_stack=184(%rsp)
3301mulq  184(%rsp)
3302
3303# qhasm:   carry? rx2 += mulrax
3304# asm 1: add  <mulrax=int64#7,<rx2=int64#8
3305# asm 2: add  <mulrax=%rax,<rx2=%r10
3306add  %rax,%r10
3307
3308# qhasm:   mulr21 += mulrdx + carry
3309# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
3310# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
3311adc %rdx,%r11
3312
3313# qhasm:   mulrax = e1_stack
3314# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
3315# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
3316movq 104(%rsp),%rax
3317
3318# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
3319# asm 1: mulq  <f2_stack=stack64#25
3320# asm 2: mulq  <f2_stack=192(%rsp)
3321mulq  192(%rsp)
3322
3323# qhasm:   carry? rx3 += mulrax
3324# asm 1: add  <mulrax=int64#7,<rx3=int64#10
3325# asm 2: add  <mulrax=%rax,<rx3=%r12
3326add  %rax,%r12
3327
3328# qhasm:   mulr31 += mulrdx + carry
3329# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
3330# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
3331adc %rdx,%r13
3332
3333# qhasm:   mulrax = e1_stack
3334# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
3335# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
3336movq 104(%rsp),%rax
3337
3338# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
3339# asm 1: mulq  <f3_stack=stack64#26
3340# asm 2: mulq  <f3_stack=200(%rsp)
3341mulq  200(%rsp)
3342
3343# qhasm:   carry? rx4 += mulrax
3344# asm 1: add  <mulrax=int64#7,<rx4=int64#12
3345# asm 2: add  <mulrax=%rax,<rx4=%r14
3346add  %rax,%r14
3347
3348# qhasm:   mulr41 += mulrdx + carry
3349# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
3350# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
3351adc %rdx,%r15
3352
3353# qhasm:   mulrax = e1_stack
3354# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#3
3355# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rdx
3356movq 104(%rsp),%rdx
3357
3358# qhasm:   mulrax *= 19
3359# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
3360# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
3361imulq  $19,%rdx,%rax
3362
3363# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
3364# asm 1: mulq  <f4_stack=stack64#27
3365# asm 2: mulq  <f4_stack=208(%rsp)
3366mulq  208(%rsp)
3367
3368# qhasm:   carry? rx0 += mulrax
3369# asm 1: add  <mulrax=int64#7,<rx0=int64#2
3370# asm 2: add  <mulrax=%rax,<rx0=%rsi
3371add  %rax,%rsi
3372
3373# qhasm:   mulr01 += mulrdx + carry
3374# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
3375# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
3376adc %rdx,%rcx
3377
3378# qhasm:   mulrax = e2_stack
3379# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#7
3380# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rax
3381movq 112(%rsp),%rax
3382
3383# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
3384# asm 1: mulq  <f0_stack=stack64#23
3385# asm 2: mulq  <f0_stack=176(%rsp)
3386mulq  176(%rsp)
3387
3388# qhasm:   carry? rx2 += mulrax
3389# asm 1: add  <mulrax=int64#7,<rx2=int64#8
3390# asm 2: add  <mulrax=%rax,<rx2=%r10
3391add  %rax,%r10
3392
3393# qhasm:   mulr21 += mulrdx + carry
3394# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
3395# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
3396adc %rdx,%r11
3397
3398# qhasm:   mulrax = e2_stack
3399# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#7
3400# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rax
3401movq 112(%rsp),%rax
3402
3403# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
3404# asm 1: mulq  <f1_stack=stack64#24
3405# asm 2: mulq  <f1_stack=184(%rsp)
3406mulq  184(%rsp)
3407
3408# qhasm:   carry? rx3 += mulrax
3409# asm 1: add  <mulrax=int64#7,<rx3=int64#10
3410# asm 2: add  <mulrax=%rax,<rx3=%r12
3411add  %rax,%r12
3412
3413# qhasm:   mulr31 += mulrdx + carry
3414# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
3415# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
3416adc %rdx,%r13
3417
3418# qhasm:   mulrax = e2_stack
3419# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#7
3420# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rax
3421movq 112(%rsp),%rax
3422
3423# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
3424# asm 1: mulq  <f2_stack=stack64#25
3425# asm 2: mulq  <f2_stack=192(%rsp)
3426mulq  192(%rsp)
3427
3428# qhasm:   carry? rx4 += mulrax
3429# asm 1: add  <mulrax=int64#7,<rx4=int64#12
3430# asm 2: add  <mulrax=%rax,<rx4=%r14
3431add  %rax,%r14
3432
3433# qhasm:   mulr41 += mulrdx + carry
3434# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
3435# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
3436adc %rdx,%r15
3437
3438# qhasm:   mulrax = e2_stack
3439# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#3
3440# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rdx
3441movq 112(%rsp),%rdx
3442
3443# qhasm:   mulrax *= 19
3444# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
3445# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
3446imulq  $19,%rdx,%rax
3447
3448# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
3449# asm 1: mulq  <f3_stack=stack64#26
3450# asm 2: mulq  <f3_stack=200(%rsp)
3451mulq  200(%rsp)
3452
3453# qhasm:   carry? rx0 += mulrax
3454# asm 1: add  <mulrax=int64#7,<rx0=int64#2
3455# asm 2: add  <mulrax=%rax,<rx0=%rsi
3456add  %rax,%rsi
3457
3458# qhasm:   mulr01 += mulrdx + carry
3459# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
3460# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
3461adc %rdx,%rcx
3462
3463# qhasm:   mulrax = e2_stack
3464# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#3
3465# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rdx
3466movq 112(%rsp),%rdx
3467
3468# qhasm:   mulrax *= 19
3469# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
3470# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
3471imulq  $19,%rdx,%rax
3472
3473# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
3474# asm 1: mulq  <f4_stack=stack64#27
3475# asm 2: mulq  <f4_stack=208(%rsp)
3476mulq  208(%rsp)
3477
3478# qhasm:   carry? rx1 += mulrax
3479# asm 1: add  <mulrax=int64#7,<rx1=int64#5
3480# asm 2: add  <mulrax=%rax,<rx1=%r8
3481add  %rax,%r8
3482
3483# qhasm:   mulr11 += mulrdx + carry
3484# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
3485# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
3486adc %rdx,%r9
3487
3488# qhasm:   mulrax = e3_stack
3489# asm 1: movq <e3_stack=stack64#16,>mulrax=int64#7
3490# asm 2: movq <e3_stack=120(%rsp),>mulrax=%rax
3491movq 120(%rsp),%rax
3492
3493# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
3494# asm 1: mulq  <f0_stack=stack64#23
3495# asm 2: mulq  <f0_stack=176(%rsp)
3496mulq  176(%rsp)
3497
3498# qhasm:   carry? rx3 += mulrax
3499# asm 1: add  <mulrax=int64#7,<rx3=int64#10
3500# asm 2: add  <mulrax=%rax,<rx3=%r12
3501add  %rax,%r12
3502
3503# qhasm:   mulr31 += mulrdx + carry
3504# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
3505# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
3506adc %rdx,%r13
3507
3508# qhasm:   mulrax = e3_stack
3509# asm 1: movq <e3_stack=stack64#16,>mulrax=int64#7
3510# asm 2: movq <e3_stack=120(%rsp),>mulrax=%rax
3511movq 120(%rsp),%rax
3512
3513# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
3514# asm 1: mulq  <f1_stack=stack64#24
3515# asm 2: mulq  <f1_stack=184(%rsp)
3516mulq  184(%rsp)
3517
3518# qhasm:   carry? rx4 += mulrax
3519# asm 1: add  <mulrax=int64#7,<rx4=int64#12
3520# asm 2: add  <mulrax=%rax,<rx4=%r14
3521add  %rax,%r14
3522
3523# qhasm:   mulr41 += mulrdx + carry
3524# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
3525# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
3526adc %rdx,%r15
3527
3528# qhasm:   mulrax = mulx319_stack
3529# asm 1: movq <mulx319_stack=stack64#28,>mulrax=int64#7
3530# asm 2: movq <mulx319_stack=216(%rsp),>mulrax=%rax
3531movq 216(%rsp),%rax
3532
3533# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
3534# asm 1: mulq  <f3_stack=stack64#26
3535# asm 2: mulq  <f3_stack=200(%rsp)
3536mulq  200(%rsp)
3537
3538# qhasm:   carry? rx1 += mulrax
3539# asm 1: add  <mulrax=int64#7,<rx1=int64#5
3540# asm 2: add  <mulrax=%rax,<rx1=%r8
3541add  %rax,%r8
3542
3543# qhasm:   mulr11 += mulrdx + carry
3544# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
3545# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
3546adc %rdx,%r9
3547
3548# qhasm:   mulrax = mulx319_stack
3549# asm 1: movq <mulx319_stack=stack64#28,>mulrax=int64#7
3550# asm 2: movq <mulx319_stack=216(%rsp),>mulrax=%rax
3551movq 216(%rsp),%rax
3552
3553# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
3554# asm 1: mulq  <f4_stack=stack64#27
3555# asm 2: mulq  <f4_stack=208(%rsp)
3556mulq  208(%rsp)
3557
3558# qhasm:   carry? rx2 += mulrax
3559# asm 1: add  <mulrax=int64#7,<rx2=int64#8
3560# asm 2: add  <mulrax=%rax,<rx2=%r10
3561add  %rax,%r10
3562
3563# qhasm:   mulr21 += mulrdx + carry
3564# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
3565# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
3566adc %rdx,%r11
3567
3568# qhasm:   mulrax = e4_stack
3569# asm 1: movq <e4_stack=stack64#17,>mulrax=int64#7
3570# asm 2: movq <e4_stack=128(%rsp),>mulrax=%rax
3571movq 128(%rsp),%rax
3572
3573# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
3574# asm 1: mulq  <f0_stack=stack64#23
3575# asm 2: mulq  <f0_stack=176(%rsp)
3576mulq  176(%rsp)
3577
3578# qhasm:   carry? rx4 += mulrax
3579# asm 1: add  <mulrax=int64#7,<rx4=int64#12
3580# asm 2: add  <mulrax=%rax,<rx4=%r14
3581add  %rax,%r14
3582
3583# qhasm:   mulr41 += mulrdx + carry
3584# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
3585# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
3586adc %rdx,%r15
3587
3588# qhasm:   mulrax = mulx419_stack
3589# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
3590# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
3591movq 224(%rsp),%rax
3592
3593# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
3594# asm 1: mulq  <f2_stack=stack64#25
3595# asm 2: mulq  <f2_stack=192(%rsp)
3596mulq  192(%rsp)
3597
3598# qhasm:   carry? rx1 += mulrax
3599# asm 1: add  <mulrax=int64#7,<rx1=int64#5
3600# asm 2: add  <mulrax=%rax,<rx1=%r8
3601add  %rax,%r8
3602
3603# qhasm:   mulr11 += mulrdx + carry
3604# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
3605# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
3606adc %rdx,%r9
3607
3608# qhasm:   mulrax = mulx419_stack
3609# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
3610# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
3611movq 224(%rsp),%rax
3612
3613# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
3614# asm 1: mulq  <f3_stack=stack64#26
3615# asm 2: mulq  <f3_stack=200(%rsp)
3616mulq  200(%rsp)
3617
3618# qhasm:   carry? rx2 += mulrax
3619# asm 1: add  <mulrax=int64#7,<rx2=int64#8
3620# asm 2: add  <mulrax=%rax,<rx2=%r10
3621add  %rax,%r10
3622
3623# qhasm:   mulr21 += mulrdx + carry
3624# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
3625# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
3626adc %rdx,%r11
3627
3628# qhasm:   mulrax = mulx419_stack
3629# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
3630# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
3631movq 224(%rsp),%rax
3632
3633# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
3634# asm 1: mulq  <f4_stack=stack64#27
3635# asm 2: mulq  <f4_stack=208(%rsp)
3636mulq  208(%rsp)
3637
3638# qhasm:   carry? rx3 += mulrax
3639# asm 1: add  <mulrax=int64#7,<rx3=int64#10
3640# asm 2: add  <mulrax=%rax,<rx3=%r12
3641add  %rax,%r12
3642
3643# qhasm:   mulr31 += mulrdx + carry
3644# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
3645# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
3646adc %rdx,%r13
3647
3648# qhasm:   mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
3649# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
3650# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
3651movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
3652
3653# qhasm:   mulr01 = (mulr01.rx0) << 13
3654# asm 1: shld $13,<rx0=int64#2,<mulr01=int64#4
3655# asm 2: shld $13,<rx0=%rsi,<mulr01=%rcx
3656shld $13,%rsi,%rcx
3657
3658# qhasm:   rx0 &= mulredmask
3659# asm 1: and  <mulredmask=int64#3,<rx0=int64#2
3660# asm 2: and  <mulredmask=%rdx,<rx0=%rsi
3661and  %rdx,%rsi
3662
3663# qhasm:   mulr11 = (mulr11.rx1) << 13
3664# asm 1: shld $13,<rx1=int64#5,<mulr11=int64#6
3665# asm 2: shld $13,<rx1=%r8,<mulr11=%r9
3666shld $13,%r8,%r9
3667
3668# qhasm:   rx1 &= mulredmask
3669# asm 1: and  <mulredmask=int64#3,<rx1=int64#5
3670# asm 2: and  <mulredmask=%rdx,<rx1=%r8
3671and  %rdx,%r8
3672
3673# qhasm:   rx1 += mulr01
3674# asm 1: add  <mulr01=int64#4,<rx1=int64#5
3675# asm 2: add  <mulr01=%rcx,<rx1=%r8
3676add  %rcx,%r8
3677
3678# qhasm:   mulr21 = (mulr21.rx2) << 13
3679# asm 1: shld $13,<rx2=int64#8,<mulr21=int64#9
3680# asm 2: shld $13,<rx2=%r10,<mulr21=%r11
3681shld $13,%r10,%r11
3682
3683# qhasm:   rx2 &= mulredmask
3684# asm 1: and  <mulredmask=int64#3,<rx2=int64#8
3685# asm 2: and  <mulredmask=%rdx,<rx2=%r10
3686and  %rdx,%r10
3687
3688# qhasm:   rx2 += mulr11
3689# asm 1: add  <mulr11=int64#6,<rx2=int64#8
3690# asm 2: add  <mulr11=%r9,<rx2=%r10
3691add  %r9,%r10
3692
3693# qhasm:   mulr31 = (mulr31.rx3) << 13
3694# asm 1: shld $13,<rx3=int64#10,<mulr31=int64#11
3695# asm 2: shld $13,<rx3=%r12,<mulr31=%r13
3696shld $13,%r12,%r13
3697
3698# qhasm:   rx3 &= mulredmask
3699# asm 1: and  <mulredmask=int64#3,<rx3=int64#10
3700# asm 2: and  <mulredmask=%rdx,<rx3=%r12
3701and  %rdx,%r12
3702
3703# qhasm:   rx3 += mulr21
3704# asm 1: add  <mulr21=int64#9,<rx3=int64#10
3705# asm 2: add  <mulr21=%r11,<rx3=%r12
3706add  %r11,%r12
3707
3708# qhasm:   mulr41 = (mulr41.rx4) << 13
3709# asm 1: shld $13,<rx4=int64#12,<mulr41=int64#13
3710# asm 2: shld $13,<rx4=%r14,<mulr41=%r15
3711shld $13,%r14,%r15
3712
3713# qhasm:   rx4 &= mulredmask
3714# asm 1: and  <mulredmask=int64#3,<rx4=int64#12
3715# asm 2: and  <mulredmask=%rdx,<rx4=%r14
3716and  %rdx,%r14
3717
3718# qhasm:   rx4 += mulr31
3719# asm 1: add  <mulr31=int64#11,<rx4=int64#12
3720# asm 2: add  <mulr31=%r13,<rx4=%r14
3721add  %r13,%r14
3722
3723# qhasm:   mulr41 = mulr41 * 19
3724# asm 1: imulq  $19,<mulr41=int64#13,>mulr41=int64#4
3725# asm 2: imulq  $19,<mulr41=%r15,>mulr41=%rcx
3726imulq  $19,%r15,%rcx
3727
3728# qhasm:   rx0 += mulr41
3729# asm 1: add  <mulr41=int64#4,<rx0=int64#2
3730# asm 2: add  <mulr41=%rcx,<rx0=%rsi
3731add  %rcx,%rsi
3732
3733# qhasm:   mult = rx0
3734# asm 1: mov  <rx0=int64#2,>mult=int64#4
3735# asm 2: mov  <rx0=%rsi,>mult=%rcx
3736mov  %rsi,%rcx
3737
3738# qhasm:   (uint64) mult >>= 51
3739# asm 1: shr  $51,<mult=int64#4
3740# asm 2: shr  $51,<mult=%rcx
3741shr  $51,%rcx
3742
3743# qhasm:   mult += rx1
3744# asm 1: add  <rx1=int64#5,<mult=int64#4
3745# asm 2: add  <rx1=%r8,<mult=%rcx
3746add  %r8,%rcx
3747
3748# qhasm:   rx1 = mult
3749# asm 1: mov  <mult=int64#4,>rx1=int64#5
3750# asm 2: mov  <mult=%rcx,>rx1=%r8
3751mov  %rcx,%r8
3752
3753# qhasm:   (uint64) mult >>= 51
3754# asm 1: shr  $51,<mult=int64#4
3755# asm 2: shr  $51,<mult=%rcx
3756shr  $51,%rcx
3757
3758# qhasm:   rx0 &= mulredmask
3759# asm 1: and  <mulredmask=int64#3,<rx0=int64#2
3760# asm 2: and  <mulredmask=%rdx,<rx0=%rsi
3761and  %rdx,%rsi
3762
3763# qhasm:   mult += rx2
3764# asm 1: add  <rx2=int64#8,<mult=int64#4
3765# asm 2: add  <rx2=%r10,<mult=%rcx
3766add  %r10,%rcx
3767
3768# qhasm:   rx2 = mult
3769# asm 1: mov  <mult=int64#4,>rx2=int64#6
3770# asm 2: mov  <mult=%rcx,>rx2=%r9
3771mov  %rcx,%r9
3772
3773# qhasm:   (uint64) mult >>= 51
3774# asm 1: shr  $51,<mult=int64#4
3775# asm 2: shr  $51,<mult=%rcx
3776shr  $51,%rcx
3777
3778# qhasm:   rx1 &= mulredmask
3779# asm 1: and  <mulredmask=int64#3,<rx1=int64#5
3780# asm 2: and  <mulredmask=%rdx,<rx1=%r8
3781and  %rdx,%r8
3782
3783# qhasm:   mult += rx3
3784# asm 1: add  <rx3=int64#10,<mult=int64#4
3785# asm 2: add  <rx3=%r12,<mult=%rcx
3786add  %r12,%rcx
3787
3788# qhasm:   rx3 = mult
3789# asm 1: mov  <mult=int64#4,>rx3=int64#7
3790# asm 2: mov  <mult=%rcx,>rx3=%rax
3791mov  %rcx,%rax
3792
3793# qhasm:   (uint64) mult >>= 51
3794# asm 1: shr  $51,<mult=int64#4
3795# asm 2: shr  $51,<mult=%rcx
3796shr  $51,%rcx
3797
3798# qhasm:   rx2 &= mulredmask
3799# asm 1: and  <mulredmask=int64#3,<rx2=int64#6
3800# asm 2: and  <mulredmask=%rdx,<rx2=%r9
3801and  %rdx,%r9
3802
3803# qhasm:   mult += rx4
3804# asm 1: add  <rx4=int64#12,<mult=int64#4
3805# asm 2: add  <rx4=%r14,<mult=%rcx
3806add  %r14,%rcx
3807
3808# qhasm:   rx4 = mult
3809# asm 1: mov  <mult=int64#4,>rx4=int64#8
3810# asm 2: mov  <mult=%rcx,>rx4=%r10
3811mov  %rcx,%r10
3812
3813# qhasm:   (uint64) mult >>= 51
3814# asm 1: shr  $51,<mult=int64#4
3815# asm 2: shr  $51,<mult=%rcx
3816shr  $51,%rcx
3817
3818# qhasm:   rx3 &= mulredmask
3819# asm 1: and  <mulredmask=int64#3,<rx3=int64#7
3820# asm 2: and  <mulredmask=%rdx,<rx3=%rax
3821and  %rdx,%rax
3822
3823# qhasm:   mult *= 19
3824# asm 1: imulq  $19,<mult=int64#4,>mult=int64#4
3825# asm 2: imulq  $19,<mult=%rcx,>mult=%rcx
3826imulq  $19,%rcx,%rcx
3827
3828# qhasm:   rx0 += mult
3829# asm 1: add  <mult=int64#4,<rx0=int64#2
3830# asm 2: add  <mult=%rcx,<rx0=%rsi
3831add  %rcx,%rsi
3832
3833# qhasm:   rx4 &= mulredmask
3834# asm 1: and  <mulredmask=int64#3,<rx4=int64#8
3835# asm 2: and  <mulredmask=%rdx,<rx4=%r10
3836and  %rdx,%r10
3837
3838# qhasm: *(uint64 *)(rp + 0) = rx0
3839# asm 1: movq   <rx0=int64#2,0(<rp=int64#1)
3840# asm 2: movq   <rx0=%rsi,0(<rp=%rdi)
3841movq   %rsi,0(%rdi)
3842
3843# qhasm: *(uint64 *)(rp + 8) = rx1
3844# asm 1: movq   <rx1=int64#5,8(<rp=int64#1)
3845# asm 2: movq   <rx1=%r8,8(<rp=%rdi)
3846movq   %r8,8(%rdi)
3847
3848# qhasm: *(uint64 *)(rp + 16) = rx2
3849# asm 1: movq   <rx2=int64#6,16(<rp=int64#1)
3850# asm 2: movq   <rx2=%r9,16(<rp=%rdi)
3851movq   %r9,16(%rdi)
3852
3853# qhasm: *(uint64 *)(rp + 24) = rx3
3854# asm 1: movq   <rx3=int64#7,24(<rp=int64#1)
3855# asm 2: movq   <rx3=%rax,24(<rp=%rdi)
3856movq   %rax,24(%rdi)
3857
3858# qhasm: *(uint64 *)(rp + 32) = rx4
3859# asm 1: movq   <rx4=int64#8,32(<rp=int64#1)
3860# asm 2: movq   <rx4=%r10,32(<rp=%rdi)
3861movq   %r10,32(%rdi)
3862
3863# qhasm:   mulrax = h3_stack
3864# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#2
3865# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rsi
3866movq 80(%rsp),%rsi
3867
3868# qhasm:   mulrax *= 19
3869# asm 1: imulq  $19,<mulrax=int64#2,>mulrax=int64#7
3870# asm 2: imulq  $19,<mulrax=%rsi,>mulrax=%rax
3871imulq  $19,%rsi,%rax
3872
3873# qhasm:   mulx319_stack = mulrax
3874# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#28
3875# asm 2: movq <mulrax=%rax,>mulx319_stack=216(%rsp)
3876movq %rax,216(%rsp)
3877
3878# qhasm:   (uint128) mulrdx mulrax = mulrax * g2_stack
3879# asm 1: mulq  <g2_stack=stack64#20
3880# asm 2: mulq  <g2_stack=152(%rsp)
3881mulq  152(%rsp)
3882
3883# qhasm:   ry0 = mulrax
3884# asm 1: mov  <mulrax=int64#7,>ry0=int64#2
3885# asm 2: mov  <mulrax=%rax,>ry0=%rsi
3886mov  %rax,%rsi
3887
3888# qhasm:   mulr01 = mulrdx
3889# asm 1: mov  <mulrdx=int64#3,>mulr01=int64#4
3890# asm 2: mov  <mulrdx=%rdx,>mulr01=%rcx
3891mov  %rdx,%rcx
3892
3893# qhasm:   mulrax = h4_stack
3894# asm 1: movq <h4_stack=stack64#12,>mulrax=int64#3
3895# asm 2: movq <h4_stack=88(%rsp),>mulrax=%rdx
3896movq 88(%rsp),%rdx
3897
3898# qhasm:   mulrax *= 19
3899# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
3900# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
3901imulq  $19,%rdx,%rax
3902
3903# qhasm:   mulx419_stack = mulrax
3904# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#29
3905# asm 2: movq <mulrax=%rax,>mulx419_stack=224(%rsp)
3906movq %rax,224(%rsp)
3907
3908# qhasm:   (uint128) mulrdx mulrax = mulrax * g1_stack
3909# asm 1: mulq  <g1_stack=stack64#19
3910# asm 2: mulq  <g1_stack=144(%rsp)
3911mulq  144(%rsp)
3912
3913# qhasm:   carry? ry0 += mulrax
3914# asm 1: add  <mulrax=int64#7,<ry0=int64#2
3915# asm 2: add  <mulrax=%rax,<ry0=%rsi
3916add  %rax,%rsi
3917
3918# qhasm:   mulr01 += mulrdx + carry
3919# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
3920# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
3921adc %rdx,%rcx
3922
3923# qhasm:   mulrax = h0_stack
3924# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
3925# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
3926movq 56(%rsp),%rax
3927
3928# qhasm:   (uint128) mulrdx mulrax = mulrax * g0_stack
3929# asm 1: mulq  <g0_stack=stack64#18
3930# asm 2: mulq  <g0_stack=136(%rsp)
3931mulq  136(%rsp)
3932
3933# qhasm:   carry? ry0 += mulrax
3934# asm 1: add  <mulrax=int64#7,<ry0=int64#2
3935# asm 2: add  <mulrax=%rax,<ry0=%rsi
3936add  %rax,%rsi
3937
3938# qhasm:   mulr01 += mulrdx + carry
3939# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
3940# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
3941adc %rdx,%rcx
3942
3943# qhasm:   mulrax = h0_stack
3944# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
3945# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
3946movq 56(%rsp),%rax
3947
3948# qhasm:   (uint128) mulrdx mulrax = mulrax * g1_stack
3949# asm 1: mulq  <g1_stack=stack64#19
3950# asm 2: mulq  <g1_stack=144(%rsp)
3951mulq  144(%rsp)
3952
3953# qhasm:   ry1 = mulrax
3954# asm 1: mov  <mulrax=int64#7,>ry1=int64#5
3955# asm 2: mov  <mulrax=%rax,>ry1=%r8
3956mov  %rax,%r8
3957
3958# qhasm:   mulr11 = mulrdx
3959# asm 1: mov  <mulrdx=int64#3,>mulr11=int64#6
3960# asm 2: mov  <mulrdx=%rdx,>mulr11=%r9
3961mov  %rdx,%r9
3962
3963# qhasm:   mulrax = h0_stack
3964# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
3965# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
3966movq 56(%rsp),%rax
3967
3968# qhasm:   (uint128) mulrdx mulrax = mulrax * g2_stack
3969# asm 1: mulq  <g2_stack=stack64#20
3970# asm 2: mulq  <g2_stack=152(%rsp)
3971mulq  152(%rsp)
3972
3973# qhasm:   ry2 = mulrax
3974# asm 1: mov  <mulrax=int64#7,>ry2=int64#8
3975# asm 2: mov  <mulrax=%rax,>ry2=%r10
3976mov  %rax,%r10
3977
3978# qhasm:   mulr21 = mulrdx
3979# asm 1: mov  <mulrdx=int64#3,>mulr21=int64#9
3980# asm 2: mov  <mulrdx=%rdx,>mulr21=%r11
3981mov  %rdx,%r11
3982
3983# qhasm:   mulrax = h0_stack
3984# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
3985# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
3986movq 56(%rsp),%rax
3987
3988# qhasm:   (uint128) mulrdx mulrax = mulrax * g3_stack
3989# asm 1: mulq  <g3_stack=stack64#21
3990# asm 2: mulq  <g3_stack=160(%rsp)
3991mulq  160(%rsp)
3992
3993# qhasm:   ry3 = mulrax
3994# asm 1: mov  <mulrax=int64#7,>ry3=int64#10
3995# asm 2: mov  <mulrax=%rax,>ry3=%r12
3996mov  %rax,%r12
3997
3998# qhasm:   mulr31 = mulrdx
3999# asm 1: mov  <mulrdx=int64#3,>mulr31=int64#11
4000# asm 2: mov  <mulrdx=%rdx,>mulr31=%r13
4001mov  %rdx,%r13
4002
4003# qhasm:   mulrax = h0_stack
4004# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
4005# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
4006movq 56(%rsp),%rax
4007
4008# qhasm:   (uint128) mulrdx mulrax = mulrax * g4_stack
4009# asm 1: mulq  <g4_stack=stack64#22
4010# asm 2: mulq  <g4_stack=168(%rsp)
4011mulq  168(%rsp)
4012
4013# qhasm:   ry4 = mulrax
4014# asm 1: mov  <mulrax=int64#7,>ry4=int64#12
4015# asm 2: mov  <mulrax=%rax,>ry4=%r14
4016mov  %rax,%r14
4017
4018# qhasm:   mulr41 = mulrdx
4019# asm 1: mov  <mulrdx=int64#3,>mulr41=int64#13
4020# asm 2: mov  <mulrdx=%rdx,>mulr41=%r15
4021mov  %rdx,%r15
4022
4023# qhasm:   mulrax = h1_stack
4024# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
4025# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
4026movq 64(%rsp),%rax
4027
4028# qhasm:   (uint128) mulrdx mulrax = mulrax * g0_stack
4029# asm 1: mulq  <g0_stack=stack64#18
4030# asm 2: mulq  <g0_stack=136(%rsp)
4031mulq  136(%rsp)
4032
4033# qhasm:   carry? ry1 += mulrax
4034# asm 1: add  <mulrax=int64#7,<ry1=int64#5
4035# asm 2: add  <mulrax=%rax,<ry1=%r8
4036add  %rax,%r8
4037
4038# qhasm:   mulr11 += mulrdx + carry
4039# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
4040# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
4041adc %rdx,%r9
4042
4043# qhasm:   mulrax = h1_stack
4044# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
4045# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
4046movq 64(%rsp),%rax
4047
4048# qhasm:   (uint128) mulrdx mulrax = mulrax * g1_stack
4049# asm 1: mulq  <g1_stack=stack64#19
4050# asm 2: mulq  <g1_stack=144(%rsp)
4051mulq  144(%rsp)
4052
4053# qhasm:   carry? ry2 += mulrax
4054# asm 1: add  <mulrax=int64#7,<ry2=int64#8
4055# asm 2: add  <mulrax=%rax,<ry2=%r10
4056add  %rax,%r10
4057
4058# qhasm:   mulr21 += mulrdx + carry
4059# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
4060# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
4061adc %rdx,%r11
4062
4063# qhasm:   mulrax = h1_stack
4064# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
4065# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
4066movq 64(%rsp),%rax
4067
4068# qhasm:   (uint128) mulrdx mulrax = mulrax * g2_stack
4069# asm 1: mulq  <g2_stack=stack64#20
4070# asm 2: mulq  <g2_stack=152(%rsp)
4071mulq  152(%rsp)
4072
4073# qhasm:   carry? ry3 += mulrax
4074# asm 1: add  <mulrax=int64#7,<ry3=int64#10
4075# asm 2: add  <mulrax=%rax,<ry3=%r12
4076add  %rax,%r12
4077
4078# qhasm:   mulr31 += mulrdx + carry
4079# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
4080# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
4081adc %rdx,%r13
4082
4083# qhasm:   mulrax = h1_stack
4084# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
4085# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
4086movq 64(%rsp),%rax
4087
4088# qhasm:   (uint128) mulrdx mulrax = mulrax * g3_stack
4089# asm 1: mulq  <g3_stack=stack64#21
4090# asm 2: mulq  <g3_stack=160(%rsp)
4091mulq  160(%rsp)
4092
4093# qhasm:   carry? ry4 += mulrax
4094# asm 1: add  <mulrax=int64#7,<ry4=int64#12
4095# asm 2: add  <mulrax=%rax,<ry4=%r14
4096add  %rax,%r14
4097
4098# qhasm:   mulr41 += mulrdx + carry
4099# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
4100# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
4101adc %rdx,%r15
4102
4103# qhasm:   mulrax = h1_stack
4104# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#3
4105# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rdx
4106movq 64(%rsp),%rdx
4107
4108# qhasm:   mulrax *= 19
4109# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
4110# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
4111imulq  $19,%rdx,%rax
4112
4113# qhasm:   (uint128) mulrdx mulrax = mulrax * g4_stack
4114# asm 1: mulq  <g4_stack=stack64#22
4115# asm 2: mulq  <g4_stack=168(%rsp)
4116mulq  168(%rsp)
4117
4118# qhasm:   carry? ry0 += mulrax
4119# asm 1: add  <mulrax=int64#7,<ry0=int64#2
4120# asm 2: add  <mulrax=%rax,<ry0=%rsi
4121add  %rax,%rsi
4122
4123# qhasm:   mulr01 += mulrdx + carry
4124# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
4125# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
4126adc %rdx,%rcx
4127
4128# qhasm:   mulrax = h2_stack
4129# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
4130# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
4131movq 72(%rsp),%rax
4132
4133# qhasm:   (uint128) mulrdx mulrax = mulrax * g0_stack
4134# asm 1: mulq  <g0_stack=stack64#18
4135# asm 2: mulq  <g0_stack=136(%rsp)
4136mulq  136(%rsp)
4137
4138# qhasm:   carry? ry2 += mulrax
4139# asm 1: add  <mulrax=int64#7,<ry2=int64#8
4140# asm 2: add  <mulrax=%rax,<ry2=%r10
4141add  %rax,%r10
4142
4143# qhasm:   mulr21 += mulrdx + carry
4144# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
4145# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
4146adc %rdx,%r11
4147
4148# qhasm:   mulrax = h2_stack
4149# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
4150# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
4151movq 72(%rsp),%rax
4152
4153# qhasm:   (uint128) mulrdx mulrax = mulrax * g1_stack
4154# asm 1: mulq  <g1_stack=stack64#19
4155# asm 2: mulq  <g1_stack=144(%rsp)
4156mulq  144(%rsp)
4157
4158# qhasm:   carry? ry3 += mulrax
4159# asm 1: add  <mulrax=int64#7,<ry3=int64#10
4160# asm 2: add  <mulrax=%rax,<ry3=%r12
4161add  %rax,%r12
4162
4163# qhasm:   mulr31 += mulrdx + carry
4164# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
4165# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
4166adc %rdx,%r13
4167
4168# qhasm:   mulrax = h2_stack
4169# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
4170# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
4171movq 72(%rsp),%rax
4172
4173# qhasm:   (uint128) mulrdx mulrax = mulrax * g2_stack
4174# asm 1: mulq  <g2_stack=stack64#20
4175# asm 2: mulq  <g2_stack=152(%rsp)
4176mulq  152(%rsp)
4177
4178# qhasm:   carry? ry4 += mulrax
4179# asm 1: add  <mulrax=int64#7,<ry4=int64#12
4180# asm 2: add  <mulrax=%rax,<ry4=%r14
4181add  %rax,%r14
4182
4183# qhasm:   mulr41 += mulrdx + carry
4184# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
4185# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
4186adc %rdx,%r15
4187
4188# qhasm:   mulrax = h2_stack
4189# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#3
4190# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rdx
4191movq 72(%rsp),%rdx
4192
4193# qhasm:   mulrax *= 19
4194# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
4195# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
4196imulq  $19,%rdx,%rax
4197
4198# qhasm:   (uint128) mulrdx mulrax = mulrax * g3_stack
4199# asm 1: mulq  <g3_stack=stack64#21
4200# asm 2: mulq  <g3_stack=160(%rsp)
4201mulq  160(%rsp)
4202
4203# qhasm:   carry? ry0 += mulrax
4204# asm 1: add  <mulrax=int64#7,<ry0=int64#2
4205# asm 2: add  <mulrax=%rax,<ry0=%rsi
4206add  %rax,%rsi
4207
4208# qhasm:   mulr01 += mulrdx + carry
4209# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
4210# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
4211adc %rdx,%rcx
4212
4213# qhasm:   mulrax = h2_stack
4214# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#3
4215# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rdx
4216movq 72(%rsp),%rdx
4217
4218# qhasm:   mulrax *= 19
4219# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
4220# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
4221imulq  $19,%rdx,%rax
4222
4223# qhasm:   (uint128) mulrdx mulrax = mulrax * g4_stack
4224# asm 1: mulq  <g4_stack=stack64#22
4225# asm 2: mulq  <g4_stack=168(%rsp)
4226mulq  168(%rsp)
4227
4228# qhasm:   carry? ry1 += mulrax
4229# asm 1: add  <mulrax=int64#7,<ry1=int64#5
4230# asm 2: add  <mulrax=%rax,<ry1=%r8
4231add  %rax,%r8
4232
4233# qhasm:   mulr11 += mulrdx + carry
4234# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
4235# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
4236adc %rdx,%r9
4237
4238# qhasm:   mulrax = h3_stack
4239# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
4240# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
4241movq 80(%rsp),%rax
4242
4243# qhasm:   (uint128) mulrdx mulrax = mulrax * g0_stack
4244# asm 1: mulq  <g0_stack=stack64#18
4245# asm 2: mulq  <g0_stack=136(%rsp)
4246mulq  136(%rsp)
4247
4248# qhasm:   carry? ry3 += mulrax
4249# asm 1: add  <mulrax=int64#7,<ry3=int64#10
4250# asm 2: add  <mulrax=%rax,<ry3=%r12
4251add  %rax,%r12
4252
4253# qhasm:   mulr31 += mulrdx + carry
4254# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
4255# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
4256adc %rdx,%r13
4257
4258# qhasm:   mulrax = h3_stack
4259# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
4260# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
4261movq 80(%rsp),%rax
4262
4263# qhasm:   (uint128) mulrdx mulrax = mulrax * g1_stack
4264# asm 1: mulq  <g1_stack=stack64#19
4265# asm 2: mulq  <g1_stack=144(%rsp)
4266mulq  144(%rsp)
4267
4268# qhasm:   carry? ry4 += mulrax
4269# asm 1: add  <mulrax=int64#7,<ry4=int64#12
4270# asm 2: add  <mulrax=%rax,<ry4=%r14
4271add  %rax,%r14
4272
4273# qhasm:   mulr41 += mulrdx + carry
4274# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
4275# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
4276adc %rdx,%r15
4277
4278# qhasm:   mulrax = mulx319_stack
4279# asm 1: movq <mulx319_stack=stack64#28,>mulrax=int64#7
4280# asm 2: movq <mulx319_stack=216(%rsp),>mulrax=%rax
4281movq 216(%rsp),%rax
4282
4283# qhasm:   (uint128) mulrdx mulrax = mulrax * g3_stack
4284# asm 1: mulq  <g3_stack=stack64#21
4285# asm 2: mulq  <g3_stack=160(%rsp)
4286mulq  160(%rsp)
4287
4288# qhasm:   carry? ry1 += mulrax
4289# asm 1: add  <mulrax=int64#7,<ry1=int64#5
4290# asm 2: add  <mulrax=%rax,<ry1=%r8
4291add  %rax,%r8
4292
4293# qhasm:   mulr11 += mulrdx + carry
4294# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
4295# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
4296adc %rdx,%r9
4297
4298# qhasm:   mulrax = mulx319_stack
4299# asm 1: movq <mulx319_stack=stack64#28,>mulrax=int64#7
4300# asm 2: movq <mulx319_stack=216(%rsp),>mulrax=%rax
4301movq 216(%rsp),%rax
4302
4303# qhasm:   (uint128) mulrdx mulrax = mulrax * g4_stack
4304# asm 1: mulq  <g4_stack=stack64#22
4305# asm 2: mulq  <g4_stack=168(%rsp)
4306mulq  168(%rsp)
4307
4308# qhasm:   carry? ry2 += mulrax
4309# asm 1: add  <mulrax=int64#7,<ry2=int64#8
4310# asm 2: add  <mulrax=%rax,<ry2=%r10
4311add  %rax,%r10
4312
4313# qhasm:   mulr21 += mulrdx + carry
4314# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
4315# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
4316adc %rdx,%r11
4317
4318# qhasm:   mulrax = h4_stack
4319# asm 1: movq <h4_stack=stack64#12,>mulrax=int64#7
4320# asm 2: movq <h4_stack=88(%rsp),>mulrax=%rax
4321movq 88(%rsp),%rax
4322
4323# qhasm:   (uint128) mulrdx mulrax = mulrax * g0_stack
4324# asm 1: mulq  <g0_stack=stack64#18
4325# asm 2: mulq  <g0_stack=136(%rsp)
4326mulq  136(%rsp)
4327
4328# qhasm:   carry? ry4 += mulrax
4329# asm 1: add  <mulrax=int64#7,<ry4=int64#12
4330# asm 2: add  <mulrax=%rax,<ry4=%r14
4331add  %rax,%r14
4332
4333# qhasm:   mulr41 += mulrdx + carry
4334# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
4335# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
4336adc %rdx,%r15
4337
4338# qhasm:   mulrax = mulx419_stack
4339# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
4340# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
4341movq 224(%rsp),%rax
4342
4343# qhasm:   (uint128) mulrdx mulrax = mulrax * g2_stack
4344# asm 1: mulq  <g2_stack=stack64#20
4345# asm 2: mulq  <g2_stack=152(%rsp)
4346mulq  152(%rsp)
4347
4348# qhasm:   carry? ry1 += mulrax
4349# asm 1: add  <mulrax=int64#7,<ry1=int64#5
4350# asm 2: add  <mulrax=%rax,<ry1=%r8
4351add  %rax,%r8
4352
4353# qhasm:   mulr11 += mulrdx + carry
4354# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
4355# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
4356adc %rdx,%r9
4357
4358# qhasm:   mulrax = mulx419_stack
4359# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
4360# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
4361movq 224(%rsp),%rax
4362
4363# qhasm:   (uint128) mulrdx mulrax = mulrax * g3_stack
4364# asm 1: mulq  <g3_stack=stack64#21
4365# asm 2: mulq  <g3_stack=160(%rsp)
4366mulq  160(%rsp)
4367
4368# qhasm:   carry? ry2 += mulrax
4369# asm 1: add  <mulrax=int64#7,<ry2=int64#8
4370# asm 2: add  <mulrax=%rax,<ry2=%r10
4371add  %rax,%r10
4372
4373# qhasm:   mulr21 += mulrdx + carry
4374# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
4375# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
4376adc %rdx,%r11
4377
4378# qhasm:   mulrax = mulx419_stack
4379# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
4380# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
4381movq 224(%rsp),%rax
4382
4383# qhasm:   (uint128) mulrdx mulrax = mulrax * g4_stack
4384# asm 1: mulq  <g4_stack=stack64#22
4385# asm 2: mulq  <g4_stack=168(%rsp)
4386mulq  168(%rsp)
4387
4388# qhasm:   carry? ry3 += mulrax
4389# asm 1: add  <mulrax=int64#7,<ry3=int64#10
4390# asm 2: add  <mulrax=%rax,<ry3=%r12
4391add  %rax,%r12
4392
4393# qhasm:   mulr31 += mulrdx + carry
4394# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
4395# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
4396adc %rdx,%r13
4397
4398# qhasm:   mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
4399# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
4400# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
4401movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
4402
4403# qhasm:   mulr01 = (mulr01.ry0) << 13
4404# asm 1: shld $13,<ry0=int64#2,<mulr01=int64#4
4405# asm 2: shld $13,<ry0=%rsi,<mulr01=%rcx
4406shld $13,%rsi,%rcx
4407
4408# qhasm:   ry0 &= mulredmask
4409# asm 1: and  <mulredmask=int64#3,<ry0=int64#2
4410# asm 2: and  <mulredmask=%rdx,<ry0=%rsi
4411and  %rdx,%rsi
4412
4413# qhasm:   mulr11 = (mulr11.ry1) << 13
4414# asm 1: shld $13,<ry1=int64#5,<mulr11=int64#6
4415# asm 2: shld $13,<ry1=%r8,<mulr11=%r9
4416shld $13,%r8,%r9
4417
4418# qhasm:   ry1 &= mulredmask
4419# asm 1: and  <mulredmask=int64#3,<ry1=int64#5
4420# asm 2: and  <mulredmask=%rdx,<ry1=%r8
4421and  %rdx,%r8
4422
4423# qhasm:   ry1 += mulr01
4424# asm 1: add  <mulr01=int64#4,<ry1=int64#5
4425# asm 2: add  <mulr01=%rcx,<ry1=%r8
4426add  %rcx,%r8
4427
4428# qhasm:   mulr21 = (mulr21.ry2) << 13
4429# asm 1: shld $13,<ry2=int64#8,<mulr21=int64#9
4430# asm 2: shld $13,<ry2=%r10,<mulr21=%r11
4431shld $13,%r10,%r11
4432
4433# qhasm:   ry2 &= mulredmask
4434# asm 1: and  <mulredmask=int64#3,<ry2=int64#8
4435# asm 2: and  <mulredmask=%rdx,<ry2=%r10
4436and  %rdx,%r10
4437
4438# qhasm:   ry2 += mulr11
4439# asm 1: add  <mulr11=int64#6,<ry2=int64#8
4440# asm 2: add  <mulr11=%r9,<ry2=%r10
4441add  %r9,%r10
4442
4443# qhasm:   mulr31 = (mulr31.ry3) << 13
4444# asm 1: shld $13,<ry3=int64#10,<mulr31=int64#11
4445# asm 2: shld $13,<ry3=%r12,<mulr31=%r13
4446shld $13,%r12,%r13
4447
4448# qhasm:   ry3 &= mulredmask
4449# asm 1: and  <mulredmask=int64#3,<ry3=int64#10
4450# asm 2: and  <mulredmask=%rdx,<ry3=%r12
4451and  %rdx,%r12
4452
4453# qhasm:   ry3 += mulr21
4454# asm 1: add  <mulr21=int64#9,<ry3=int64#10
4455# asm 2: add  <mulr21=%r11,<ry3=%r12
4456add  %r11,%r12
4457
4458# qhasm:   mulr41 = (mulr41.ry4) << 13
4459# asm 1: shld $13,<ry4=int64#12,<mulr41=int64#13
4460# asm 2: shld $13,<ry4=%r14,<mulr41=%r15
4461shld $13,%r14,%r15
4462
4463# qhasm:   ry4 &= mulredmask
4464# asm 1: and  <mulredmask=int64#3,<ry4=int64#12
4465# asm 2: and  <mulredmask=%rdx,<ry4=%r14
4466and  %rdx,%r14
4467
4468# qhasm:   ry4 += mulr31
4469# asm 1: add  <mulr31=int64#11,<ry4=int64#12
4470# asm 2: add  <mulr31=%r13,<ry4=%r14
4471add  %r13,%r14
4472
4473# qhasm:   mulr41 = mulr41 * 19
4474# asm 1: imulq  $19,<mulr41=int64#13,>mulr41=int64#4
4475# asm 2: imulq  $19,<mulr41=%r15,>mulr41=%rcx
4476imulq  $19,%r15,%rcx
4477
4478# qhasm:   ry0 += mulr41
4479# asm 1: add  <mulr41=int64#4,<ry0=int64#2
4480# asm 2: add  <mulr41=%rcx,<ry0=%rsi
4481add  %rcx,%rsi
4482
4483# qhasm:   mult = ry0
4484# asm 1: mov  <ry0=int64#2,>mult=int64#4
4485# asm 2: mov  <ry0=%rsi,>mult=%rcx
4486mov  %rsi,%rcx
4487
4488# qhasm:   (uint64) mult >>= 51
4489# asm 1: shr  $51,<mult=int64#4
4490# asm 2: shr  $51,<mult=%rcx
4491shr  $51,%rcx
4492
4493# qhasm:   mult += ry1
4494# asm 1: add  <ry1=int64#5,<mult=int64#4
4495# asm 2: add  <ry1=%r8,<mult=%rcx
4496add  %r8,%rcx
4497
4498# qhasm:   ry1 = mult
4499# asm 1: mov  <mult=int64#4,>ry1=int64#5
4500# asm 2: mov  <mult=%rcx,>ry1=%r8
4501mov  %rcx,%r8
4502
4503# qhasm:   (uint64) mult >>= 51
4504# asm 1: shr  $51,<mult=int64#4
4505# asm 2: shr  $51,<mult=%rcx
4506shr  $51,%rcx
4507
4508# qhasm:   ry0 &= mulredmask
4509# asm 1: and  <mulredmask=int64#3,<ry0=int64#2
4510# asm 2: and  <mulredmask=%rdx,<ry0=%rsi
4511and  %rdx,%rsi
4512
4513# qhasm:   mult += ry2
4514# asm 1: add  <ry2=int64#8,<mult=int64#4
4515# asm 2: add  <ry2=%r10,<mult=%rcx
4516add  %r10,%rcx
4517
4518# qhasm:   ry2 = mult
4519# asm 1: mov  <mult=int64#4,>ry2=int64#6
4520# asm 2: mov  <mult=%rcx,>ry2=%r9
4521mov  %rcx,%r9
4522
4523# qhasm:   (uint64) mult >>= 51
4524# asm 1: shr  $51,<mult=int64#4
4525# asm 2: shr  $51,<mult=%rcx
4526shr  $51,%rcx
4527
4528# qhasm:   ry1 &= mulredmask
4529# asm 1: and  <mulredmask=int64#3,<ry1=int64#5
4530# asm 2: and  <mulredmask=%rdx,<ry1=%r8
4531and  %rdx,%r8
4532
4533# qhasm:   mult += ry3
4534# asm 1: add  <ry3=int64#10,<mult=int64#4
4535# asm 2: add  <ry3=%r12,<mult=%rcx
4536add  %r12,%rcx
4537
4538# qhasm:   ry3 = mult
4539# asm 1: mov  <mult=int64#4,>ry3=int64#7
4540# asm 2: mov  <mult=%rcx,>ry3=%rax
4541mov  %rcx,%rax
4542
4543# qhasm:   (uint64) mult >>= 51
4544# asm 1: shr  $51,<mult=int64#4
4545# asm 2: shr  $51,<mult=%rcx
4546shr  $51,%rcx
4547
4548# qhasm:   ry2 &= mulredmask
4549# asm 1: and  <mulredmask=int64#3,<ry2=int64#6
4550# asm 2: and  <mulredmask=%rdx,<ry2=%r9
4551and  %rdx,%r9
4552
4553# qhasm:   mult += ry4
4554# asm 1: add  <ry4=int64#12,<mult=int64#4
4555# asm 2: add  <ry4=%r14,<mult=%rcx
4556add  %r14,%rcx
4557
4558# qhasm:   ry4 = mult
4559# asm 1: mov  <mult=int64#4,>ry4=int64#8
4560# asm 2: mov  <mult=%rcx,>ry4=%r10
4561mov  %rcx,%r10
4562
4563# qhasm:   (uint64) mult >>= 51
4564# asm 1: shr  $51,<mult=int64#4
4565# asm 2: shr  $51,<mult=%rcx
4566shr  $51,%rcx
4567
4568# qhasm:   ry3 &= mulredmask
4569# asm 1: and  <mulredmask=int64#3,<ry3=int64#7
4570# asm 2: and  <mulredmask=%rdx,<ry3=%rax
4571and  %rdx,%rax
4572
4573# qhasm:   mult *= 19
4574# asm 1: imulq  $19,<mult=int64#4,>mult=int64#4
4575# asm 2: imulq  $19,<mult=%rcx,>mult=%rcx
4576imulq  $19,%rcx,%rcx
4577
4578# qhasm:   ry0 += mult
4579# asm 1: add  <mult=int64#4,<ry0=int64#2
4580# asm 2: add  <mult=%rcx,<ry0=%rsi
4581add  %rcx,%rsi
4582
4583# qhasm:   ry4 &= mulredmask
4584# asm 1: and  <mulredmask=int64#3,<ry4=int64#8
4585# asm 2: and  <mulredmask=%rdx,<ry4=%r10
4586and  %rdx,%r10
4587
4588# qhasm: *(uint64 *)(rp + 40) = ry0
4589# asm 1: movq   <ry0=int64#2,40(<rp=int64#1)
4590# asm 2: movq   <ry0=%rsi,40(<rp=%rdi)
4591movq   %rsi,40(%rdi)
4592
4593# qhasm: *(uint64 *)(rp + 48) = ry1
4594# asm 1: movq   <ry1=int64#5,48(<rp=int64#1)
4595# asm 2: movq   <ry1=%r8,48(<rp=%rdi)
4596movq   %r8,48(%rdi)
4597
4598# qhasm: *(uint64 *)(rp + 56) = ry2
4599# asm 1: movq   <ry2=int64#6,56(<rp=int64#1)
4600# asm 2: movq   <ry2=%r9,56(<rp=%rdi)
4601movq   %r9,56(%rdi)
4602
4603# qhasm: *(uint64 *)(rp + 64) = ry3
4604# asm 1: movq   <ry3=int64#7,64(<rp=int64#1)
4605# asm 2: movq   <ry3=%rax,64(<rp=%rdi)
4606movq   %rax,64(%rdi)
4607
4608# qhasm: *(uint64 *)(rp + 72) = ry4
4609# asm 1: movq   <ry4=int64#8,72(<rp=int64#1)
4610# asm 2: movq   <ry4=%r10,72(<rp=%rdi)
4611movq   %r10,72(%rdi)
4612
4613# qhasm:   mulrax = g3_stack
4614# asm 1: movq <g3_stack=stack64#21,>mulrax=int64#2
4615# asm 2: movq <g3_stack=160(%rsp),>mulrax=%rsi
4616movq 160(%rsp),%rsi
4617
4618# qhasm:   mulrax *= 19
4619# asm 1: imulq  $19,<mulrax=int64#2,>mulrax=int64#7
4620# asm 2: imulq  $19,<mulrax=%rsi,>mulrax=%rax
4621imulq  $19,%rsi,%rax
4622
4623# qhasm:   mulx319_stack = mulrax
4624# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#28
4625# asm 2: movq <mulrax=%rax,>mulx319_stack=216(%rsp)
4626movq %rax,216(%rsp)
4627
4628# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
4629# asm 1: mulq  <f2_stack=stack64#25
4630# asm 2: mulq  <f2_stack=192(%rsp)
4631mulq  192(%rsp)
4632
4633# qhasm:   rz0 = mulrax
4634# asm 1: mov  <mulrax=int64#7,>rz0=int64#2
4635# asm 2: mov  <mulrax=%rax,>rz0=%rsi
4636mov  %rax,%rsi
4637
4638# qhasm:   mulr01 = mulrdx
4639# asm 1: mov  <mulrdx=int64#3,>mulr01=int64#4
4640# asm 2: mov  <mulrdx=%rdx,>mulr01=%rcx
4641mov  %rdx,%rcx
4642
4643# qhasm:   mulrax = g4_stack
4644# asm 1: movq <g4_stack=stack64#22,>mulrax=int64#3
4645# asm 2: movq <g4_stack=168(%rsp),>mulrax=%rdx
4646movq 168(%rsp),%rdx
4647
4648# qhasm:   mulrax *= 19
4649# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
4650# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
4651imulq  $19,%rdx,%rax
4652
4653# qhasm:   mulx419_stack = mulrax
4654# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#29
4655# asm 2: movq <mulrax=%rax,>mulx419_stack=224(%rsp)
4656movq %rax,224(%rsp)
4657
4658# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
4659# asm 1: mulq  <f1_stack=stack64#24
4660# asm 2: mulq  <f1_stack=184(%rsp)
4661mulq  184(%rsp)
4662
4663# qhasm:   carry? rz0 += mulrax
4664# asm 1: add  <mulrax=int64#7,<rz0=int64#2
4665# asm 2: add  <mulrax=%rax,<rz0=%rsi
4666add  %rax,%rsi
4667
4668# qhasm:   mulr01 += mulrdx + carry
4669# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
4670# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
4671adc %rdx,%rcx
4672
4673# qhasm:   mulrax = g0_stack
4674# asm 1: movq <g0_stack=stack64#18,>mulrax=int64#7
4675# asm 2: movq <g0_stack=136(%rsp),>mulrax=%rax
4676movq 136(%rsp),%rax
4677
4678# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
4679# asm 1: mulq  <f0_stack=stack64#23
4680# asm 2: mulq  <f0_stack=176(%rsp)
4681mulq  176(%rsp)
4682
4683# qhasm:   carry? rz0 += mulrax
4684# asm 1: add  <mulrax=int64#7,<rz0=int64#2
4685# asm 2: add  <mulrax=%rax,<rz0=%rsi
4686add  %rax,%rsi
4687
4688# qhasm:   mulr01 += mulrdx + carry
4689# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
4690# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
4691adc %rdx,%rcx
4692
4693# qhasm:   mulrax = g0_stack
4694# asm 1: movq <g0_stack=stack64#18,>mulrax=int64#7
4695# asm 2: movq <g0_stack=136(%rsp),>mulrax=%rax
4696movq 136(%rsp),%rax
4697
4698# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
4699# asm 1: mulq  <f1_stack=stack64#24
4700# asm 2: mulq  <f1_stack=184(%rsp)
4701mulq  184(%rsp)
4702
4703# qhasm:   rz1 = mulrax
4704# asm 1: mov  <mulrax=int64#7,>rz1=int64#5
4705# asm 2: mov  <mulrax=%rax,>rz1=%r8
4706mov  %rax,%r8
4707
4708# qhasm:   mulr11 = mulrdx
4709# asm 1: mov  <mulrdx=int64#3,>mulr11=int64#6
4710# asm 2: mov  <mulrdx=%rdx,>mulr11=%r9
4711mov  %rdx,%r9
4712
4713# qhasm:   mulrax = g0_stack
4714# asm 1: movq <g0_stack=stack64#18,>mulrax=int64#7
4715# asm 2: movq <g0_stack=136(%rsp),>mulrax=%rax
4716movq 136(%rsp),%rax
4717
4718# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
4719# asm 1: mulq  <f2_stack=stack64#25
4720# asm 2: mulq  <f2_stack=192(%rsp)
4721mulq  192(%rsp)
4722
4723# qhasm:   rz2 = mulrax
4724# asm 1: mov  <mulrax=int64#7,>rz2=int64#8
4725# asm 2: mov  <mulrax=%rax,>rz2=%r10
4726mov  %rax,%r10
4727
4728# qhasm:   mulr21 = mulrdx
4729# asm 1: mov  <mulrdx=int64#3,>mulr21=int64#9
4730# asm 2: mov  <mulrdx=%rdx,>mulr21=%r11
4731mov  %rdx,%r11
4732
4733# qhasm:   mulrax = g0_stack
4734# asm 1: movq <g0_stack=stack64#18,>mulrax=int64#7
4735# asm 2: movq <g0_stack=136(%rsp),>mulrax=%rax
4736movq 136(%rsp),%rax
4737
4738# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
4739# asm 1: mulq  <f3_stack=stack64#26
4740# asm 2: mulq  <f3_stack=200(%rsp)
4741mulq  200(%rsp)
4742
4743# qhasm:   rz3 = mulrax
4744# asm 1: mov  <mulrax=int64#7,>rz3=int64#10
4745# asm 2: mov  <mulrax=%rax,>rz3=%r12
4746mov  %rax,%r12
4747
4748# qhasm:   mulr31 = mulrdx
4749# asm 1: mov  <mulrdx=int64#3,>mulr31=int64#11
4750# asm 2: mov  <mulrdx=%rdx,>mulr31=%r13
4751mov  %rdx,%r13
4752
4753# qhasm:   mulrax = g0_stack
4754# asm 1: movq <g0_stack=stack64#18,>mulrax=int64#7
4755# asm 2: movq <g0_stack=136(%rsp),>mulrax=%rax
4756movq 136(%rsp),%rax
4757
4758# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
4759# asm 1: mulq  <f4_stack=stack64#27
4760# asm 2: mulq  <f4_stack=208(%rsp)
4761mulq  208(%rsp)
4762
4763# qhasm:   rz4 = mulrax
4764# asm 1: mov  <mulrax=int64#7,>rz4=int64#12
4765# asm 2: mov  <mulrax=%rax,>rz4=%r14
4766mov  %rax,%r14
4767
4768# qhasm:   mulr41 = mulrdx
4769# asm 1: mov  <mulrdx=int64#3,>mulr41=int64#13
4770# asm 2: mov  <mulrdx=%rdx,>mulr41=%r15
4771mov  %rdx,%r15
4772
4773# qhasm:   mulrax = g1_stack
4774# asm 1: movq <g1_stack=stack64#19,>mulrax=int64#7
4775# asm 2: movq <g1_stack=144(%rsp),>mulrax=%rax
4776movq 144(%rsp),%rax
4777
4778# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
4779# asm 1: mulq  <f0_stack=stack64#23
4780# asm 2: mulq  <f0_stack=176(%rsp)
4781mulq  176(%rsp)
4782
4783# qhasm:   carry? rz1 += mulrax
4784# asm 1: add  <mulrax=int64#7,<rz1=int64#5
4785# asm 2: add  <mulrax=%rax,<rz1=%r8
4786add  %rax,%r8
4787
4788# qhasm:   mulr11 += mulrdx + carry
4789# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
4790# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
4791adc %rdx,%r9
4792
4793# qhasm:   mulrax = g1_stack
4794# asm 1: movq <g1_stack=stack64#19,>mulrax=int64#7
4795# asm 2: movq <g1_stack=144(%rsp),>mulrax=%rax
4796movq 144(%rsp),%rax
4797
4798# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
4799# asm 1: mulq  <f1_stack=stack64#24
4800# asm 2: mulq  <f1_stack=184(%rsp)
4801mulq  184(%rsp)
4802
4803# qhasm:   carry? rz2 += mulrax
4804# asm 1: add  <mulrax=int64#7,<rz2=int64#8
4805# asm 2: add  <mulrax=%rax,<rz2=%r10
4806add  %rax,%r10
4807
4808# qhasm:   mulr21 += mulrdx + carry
4809# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
4810# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
4811adc %rdx,%r11
4812
4813# qhasm:   mulrax = g1_stack
4814# asm 1: movq <g1_stack=stack64#19,>mulrax=int64#7
4815# asm 2: movq <g1_stack=144(%rsp),>mulrax=%rax
4816movq 144(%rsp),%rax
4817
4818# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
4819# asm 1: mulq  <f2_stack=stack64#25
4820# asm 2: mulq  <f2_stack=192(%rsp)
4821mulq  192(%rsp)
4822
4823# qhasm:   carry? rz3 += mulrax
4824# asm 1: add  <mulrax=int64#7,<rz3=int64#10
4825# asm 2: add  <mulrax=%rax,<rz3=%r12
4826add  %rax,%r12
4827
4828# qhasm:   mulr31 += mulrdx + carry
4829# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
4830# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
4831adc %rdx,%r13
4832
4833# qhasm:   mulrax = g1_stack
4834# asm 1: movq <g1_stack=stack64#19,>mulrax=int64#7
4835# asm 2: movq <g1_stack=144(%rsp),>mulrax=%rax
4836movq 144(%rsp),%rax
4837
4838# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
4839# asm 1: mulq  <f3_stack=stack64#26
4840# asm 2: mulq  <f3_stack=200(%rsp)
4841mulq  200(%rsp)
4842
4843# qhasm:   carry? rz4 += mulrax
4844# asm 1: add  <mulrax=int64#7,<rz4=int64#12
4845# asm 2: add  <mulrax=%rax,<rz4=%r14
4846add  %rax,%r14
4847
4848# qhasm:   mulr41 += mulrdx + carry
4849# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
4850# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
4851adc %rdx,%r15
4852
4853# qhasm:   mulrax = g1_stack
4854# asm 1: movq <g1_stack=stack64#19,>mulrax=int64#3
4855# asm 2: movq <g1_stack=144(%rsp),>mulrax=%rdx
4856movq 144(%rsp),%rdx
4857
4858# qhasm:   mulrax *= 19
4859# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
4860# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
4861imulq  $19,%rdx,%rax
4862
4863# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
4864# asm 1: mulq  <f4_stack=stack64#27
4865# asm 2: mulq  <f4_stack=208(%rsp)
4866mulq  208(%rsp)
4867
4868# qhasm:   carry? rz0 += mulrax
4869# asm 1: add  <mulrax=int64#7,<rz0=int64#2
4870# asm 2: add  <mulrax=%rax,<rz0=%rsi
4871add  %rax,%rsi
4872
4873# qhasm:   mulr01 += mulrdx + carry
4874# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
4875# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
4876adc %rdx,%rcx
4877
4878# qhasm:   mulrax = g2_stack
4879# asm 1: movq <g2_stack=stack64#20,>mulrax=int64#7
4880# asm 2: movq <g2_stack=152(%rsp),>mulrax=%rax
4881movq 152(%rsp),%rax
4882
4883# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
4884# asm 1: mulq  <f0_stack=stack64#23
4885# asm 2: mulq  <f0_stack=176(%rsp)
4886mulq  176(%rsp)
4887
4888# qhasm:   carry? rz2 += mulrax
4889# asm 1: add  <mulrax=int64#7,<rz2=int64#8
4890# asm 2: add  <mulrax=%rax,<rz2=%r10
4891add  %rax,%r10
4892
4893# qhasm:   mulr21 += mulrdx + carry
4894# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
4895# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
4896adc %rdx,%r11
4897
4898# qhasm:   mulrax = g2_stack
4899# asm 1: movq <g2_stack=stack64#20,>mulrax=int64#7
4900# asm 2: movq <g2_stack=152(%rsp),>mulrax=%rax
4901movq 152(%rsp),%rax
4902
4903# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
4904# asm 1: mulq  <f1_stack=stack64#24
4905# asm 2: mulq  <f1_stack=184(%rsp)
4906mulq  184(%rsp)
4907
4908# qhasm:   carry? rz3 += mulrax
4909# asm 1: add  <mulrax=int64#7,<rz3=int64#10
4910# asm 2: add  <mulrax=%rax,<rz3=%r12
4911add  %rax,%r12
4912
4913# qhasm:   mulr31 += mulrdx + carry
4914# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
4915# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
4916adc %rdx,%r13
4917
4918# qhasm:   mulrax = g2_stack
4919# asm 1: movq <g2_stack=stack64#20,>mulrax=int64#7
4920# asm 2: movq <g2_stack=152(%rsp),>mulrax=%rax
4921movq 152(%rsp),%rax
4922
4923# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
4924# asm 1: mulq  <f2_stack=stack64#25
4925# asm 2: mulq  <f2_stack=192(%rsp)
4926mulq  192(%rsp)
4927
4928# qhasm:   carry? rz4 += mulrax
4929# asm 1: add  <mulrax=int64#7,<rz4=int64#12
4930# asm 2: add  <mulrax=%rax,<rz4=%r14
4931add  %rax,%r14
4932
4933# qhasm:   mulr41 += mulrdx + carry
4934# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
4935# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
4936adc %rdx,%r15
4937
4938# qhasm:   mulrax = g2_stack
4939# asm 1: movq <g2_stack=stack64#20,>mulrax=int64#3
4940# asm 2: movq <g2_stack=152(%rsp),>mulrax=%rdx
4941movq 152(%rsp),%rdx
4942
4943# qhasm:   mulrax *= 19
4944# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
4945# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
4946imulq  $19,%rdx,%rax
4947
4948# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
4949# asm 1: mulq  <f3_stack=stack64#26
4950# asm 2: mulq  <f3_stack=200(%rsp)
4951mulq  200(%rsp)
4952
4953# qhasm:   carry? rz0 += mulrax
4954# asm 1: add  <mulrax=int64#7,<rz0=int64#2
4955# asm 2: add  <mulrax=%rax,<rz0=%rsi
4956add  %rax,%rsi
4957
4958# qhasm:   mulr01 += mulrdx + carry
4959# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
4960# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
4961adc %rdx,%rcx
4962
4963# qhasm:   mulrax = g2_stack
4964# asm 1: movq <g2_stack=stack64#20,>mulrax=int64#3
4965# asm 2: movq <g2_stack=152(%rsp),>mulrax=%rdx
4966movq 152(%rsp),%rdx
4967
4968# qhasm:   mulrax *= 19
4969# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
4970# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
4971imulq  $19,%rdx,%rax
4972
4973# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
4974# asm 1: mulq  <f4_stack=stack64#27
4975# asm 2: mulq  <f4_stack=208(%rsp)
4976mulq  208(%rsp)
4977
4978# qhasm:   carry? rz1 += mulrax
4979# asm 1: add  <mulrax=int64#7,<rz1=int64#5
4980# asm 2: add  <mulrax=%rax,<rz1=%r8
4981add  %rax,%r8
4982
4983# qhasm:   mulr11 += mulrdx + carry
4984# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
4985# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
4986adc %rdx,%r9
4987
4988# qhasm:   mulrax = g3_stack
4989# asm 1: movq <g3_stack=stack64#21,>mulrax=int64#7
4990# asm 2: movq <g3_stack=160(%rsp),>mulrax=%rax
4991movq 160(%rsp),%rax
4992
4993# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
4994# asm 1: mulq  <f0_stack=stack64#23
4995# asm 2: mulq  <f0_stack=176(%rsp)
4996mulq  176(%rsp)
4997
4998# qhasm:   carry? rz3 += mulrax
4999# asm 1: add  <mulrax=int64#7,<rz3=int64#10
5000# asm 2: add  <mulrax=%rax,<rz3=%r12
5001add  %rax,%r12
5002
5003# qhasm:   mulr31 += mulrdx + carry
5004# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
5005# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
5006adc %rdx,%r13
5007
5008# qhasm:   mulrax = g3_stack
5009# asm 1: movq <g3_stack=stack64#21,>mulrax=int64#7
5010# asm 2: movq <g3_stack=160(%rsp),>mulrax=%rax
5011movq 160(%rsp),%rax
5012
5013# qhasm:   (uint128) mulrdx mulrax = mulrax * f1_stack
5014# asm 1: mulq  <f1_stack=stack64#24
5015# asm 2: mulq  <f1_stack=184(%rsp)
5016mulq  184(%rsp)
5017
5018# qhasm:   carry? rz4 += mulrax
5019# asm 1: add  <mulrax=int64#7,<rz4=int64#12
5020# asm 2: add  <mulrax=%rax,<rz4=%r14
5021add  %rax,%r14
5022
5023# qhasm:   mulr41 += mulrdx + carry
5024# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
5025# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
5026adc %rdx,%r15
5027
5028# qhasm:   mulrax = mulx319_stack
5029# asm 1: movq <mulx319_stack=stack64#28,>mulrax=int64#7
5030# asm 2: movq <mulx319_stack=216(%rsp),>mulrax=%rax
5031movq 216(%rsp),%rax
5032
5033# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
5034# asm 1: mulq  <f3_stack=stack64#26
5035# asm 2: mulq  <f3_stack=200(%rsp)
5036mulq  200(%rsp)
5037
5038# qhasm:   carry? rz1 += mulrax
5039# asm 1: add  <mulrax=int64#7,<rz1=int64#5
5040# asm 2: add  <mulrax=%rax,<rz1=%r8
5041add  %rax,%r8
5042
5043# qhasm:   mulr11 += mulrdx + carry
5044# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
5045# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
5046adc %rdx,%r9
5047
5048# qhasm:   mulrax = mulx319_stack
5049# asm 1: movq <mulx319_stack=stack64#28,>mulrax=int64#7
5050# asm 2: movq <mulx319_stack=216(%rsp),>mulrax=%rax
5051movq 216(%rsp),%rax
5052
5053# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
5054# asm 1: mulq  <f4_stack=stack64#27
5055# asm 2: mulq  <f4_stack=208(%rsp)
5056mulq  208(%rsp)
5057
5058# qhasm:   carry? rz2 += mulrax
5059# asm 1: add  <mulrax=int64#7,<rz2=int64#8
5060# asm 2: add  <mulrax=%rax,<rz2=%r10
5061add  %rax,%r10
5062
5063# qhasm:   mulr21 += mulrdx + carry
5064# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
5065# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
5066adc %rdx,%r11
5067
5068# qhasm:   mulrax = g4_stack
5069# asm 1: movq <g4_stack=stack64#22,>mulrax=int64#7
5070# asm 2: movq <g4_stack=168(%rsp),>mulrax=%rax
5071movq 168(%rsp),%rax
5072
5073# qhasm:   (uint128) mulrdx mulrax = mulrax * f0_stack
5074# asm 1: mulq  <f0_stack=stack64#23
5075# asm 2: mulq  <f0_stack=176(%rsp)
5076mulq  176(%rsp)
5077
5078# qhasm:   carry? rz4 += mulrax
5079# asm 1: add  <mulrax=int64#7,<rz4=int64#12
5080# asm 2: add  <mulrax=%rax,<rz4=%r14
5081add  %rax,%r14
5082
5083# qhasm:   mulr41 += mulrdx + carry
5084# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
5085# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
5086adc %rdx,%r15
5087
5088# qhasm:   mulrax = mulx419_stack
5089# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
5090# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
5091movq 224(%rsp),%rax
5092
5093# qhasm:   (uint128) mulrdx mulrax = mulrax * f2_stack
5094# asm 1: mulq  <f2_stack=stack64#25
5095# asm 2: mulq  <f2_stack=192(%rsp)
5096mulq  192(%rsp)
5097
5098# qhasm:   carry? rz1 += mulrax
5099# asm 1: add  <mulrax=int64#7,<rz1=int64#5
5100# asm 2: add  <mulrax=%rax,<rz1=%r8
5101add  %rax,%r8
5102
5103# qhasm:   mulr11 += mulrdx + carry
5104# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
5105# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
5106adc %rdx,%r9
5107
5108# qhasm:   mulrax = mulx419_stack
5109# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
5110# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
5111movq 224(%rsp),%rax
5112
5113# qhasm:   (uint128) mulrdx mulrax = mulrax * f3_stack
5114# asm 1: mulq  <f3_stack=stack64#26
5115# asm 2: mulq  <f3_stack=200(%rsp)
5116mulq  200(%rsp)
5117
5118# qhasm:   carry? rz2 += mulrax
5119# asm 1: add  <mulrax=int64#7,<rz2=int64#8
5120# asm 2: add  <mulrax=%rax,<rz2=%r10
5121add  %rax,%r10
5122
5123# qhasm:   mulr21 += mulrdx + carry
5124# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
5125# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
5126adc %rdx,%r11
5127
5128# qhasm:   mulrax = mulx419_stack
5129# asm 1: movq <mulx419_stack=stack64#29,>mulrax=int64#7
5130# asm 2: movq <mulx419_stack=224(%rsp),>mulrax=%rax
5131movq 224(%rsp),%rax
5132
5133# qhasm:   (uint128) mulrdx mulrax = mulrax * f4_stack
5134# asm 1: mulq  <f4_stack=stack64#27
5135# asm 2: mulq  <f4_stack=208(%rsp)
5136mulq  208(%rsp)
5137
5138# qhasm:   carry? rz3 += mulrax
5139# asm 1: add  <mulrax=int64#7,<rz3=int64#10
5140# asm 2: add  <mulrax=%rax,<rz3=%r12
5141add  %rax,%r12
5142
5143# qhasm:   mulr31 += mulrdx + carry
5144# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
5145# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
5146adc %rdx,%r13
5147
5148# qhasm:   mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
5149# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
5150# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
5151movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
5152
5153# qhasm:   mulr01 = (mulr01.rz0) << 13
5154# asm 1: shld $13,<rz0=int64#2,<mulr01=int64#4
5155# asm 2: shld $13,<rz0=%rsi,<mulr01=%rcx
5156shld $13,%rsi,%rcx
5157
5158# qhasm:   rz0 &= mulredmask
5159# asm 1: and  <mulredmask=int64#3,<rz0=int64#2
5160# asm 2: and  <mulredmask=%rdx,<rz0=%rsi
5161and  %rdx,%rsi
5162
5163# qhasm:   mulr11 = (mulr11.rz1) << 13
5164# asm 1: shld $13,<rz1=int64#5,<mulr11=int64#6
5165# asm 2: shld $13,<rz1=%r8,<mulr11=%r9
5166shld $13,%r8,%r9
5167
5168# qhasm:   rz1 &= mulredmask
5169# asm 1: and  <mulredmask=int64#3,<rz1=int64#5
5170# asm 2: and  <mulredmask=%rdx,<rz1=%r8
5171and  %rdx,%r8
5172
5173# qhasm:   rz1 += mulr01
5174# asm 1: add  <mulr01=int64#4,<rz1=int64#5
5175# asm 2: add  <mulr01=%rcx,<rz1=%r8
5176add  %rcx,%r8
5177
5178# qhasm:   mulr21 = (mulr21.rz2) << 13
5179# asm 1: shld $13,<rz2=int64#8,<mulr21=int64#9
5180# asm 2: shld $13,<rz2=%r10,<mulr21=%r11
5181shld $13,%r10,%r11
5182
5183# qhasm:   rz2 &= mulredmask
5184# asm 1: and  <mulredmask=int64#3,<rz2=int64#8
5185# asm 2: and  <mulredmask=%rdx,<rz2=%r10
5186and  %rdx,%r10
5187
5188# qhasm:   rz2 += mulr11
5189# asm 1: add  <mulr11=int64#6,<rz2=int64#8
5190# asm 2: add  <mulr11=%r9,<rz2=%r10
5191add  %r9,%r10
5192
5193# qhasm:   mulr31 = (mulr31.rz3) << 13
5194# asm 1: shld $13,<rz3=int64#10,<mulr31=int64#11
5195# asm 2: shld $13,<rz3=%r12,<mulr31=%r13
5196shld $13,%r12,%r13
5197
5198# qhasm:   rz3 &= mulredmask
5199# asm 1: and  <mulredmask=int64#3,<rz3=int64#10
5200# asm 2: and  <mulredmask=%rdx,<rz3=%r12
5201and  %rdx,%r12
5202
5203# qhasm:   rz3 += mulr21
5204# asm 1: add  <mulr21=int64#9,<rz3=int64#10
5205# asm 2: add  <mulr21=%r11,<rz3=%r12
5206add  %r11,%r12
5207
5208# qhasm:   mulr41 = (mulr41.rz4) << 13
5209# asm 1: shld $13,<rz4=int64#12,<mulr41=int64#13
5210# asm 2: shld $13,<rz4=%r14,<mulr41=%r15
5211shld $13,%r14,%r15
5212
5213# qhasm:   rz4 &= mulredmask
5214# asm 1: and  <mulredmask=int64#3,<rz4=int64#12
5215# asm 2: and  <mulredmask=%rdx,<rz4=%r14
5216and  %rdx,%r14
5217
5218# qhasm:   rz4 += mulr31
5219# asm 1: add  <mulr31=int64#11,<rz4=int64#12
5220# asm 2: add  <mulr31=%r13,<rz4=%r14
5221add  %r13,%r14
5222
5223# qhasm:   mulr41 = mulr41 * 19
5224# asm 1: imulq  $19,<mulr41=int64#13,>mulr41=int64#4
5225# asm 2: imulq  $19,<mulr41=%r15,>mulr41=%rcx
5226imulq  $19,%r15,%rcx
5227
5228# qhasm:   rz0 += mulr41
5229# asm 1: add  <mulr41=int64#4,<rz0=int64#2
5230# asm 2: add  <mulr41=%rcx,<rz0=%rsi
5231add  %rcx,%rsi
5232
5233# qhasm:   mult = rz0
5234# asm 1: mov  <rz0=int64#2,>mult=int64#4
5235# asm 2: mov  <rz0=%rsi,>mult=%rcx
5236mov  %rsi,%rcx
5237
5238# qhasm:   (uint64) mult >>= 51
5239# asm 1: shr  $51,<mult=int64#4
5240# asm 2: shr  $51,<mult=%rcx
5241shr  $51,%rcx
5242
5243# qhasm:   mult += rz1
5244# asm 1: add  <rz1=int64#5,<mult=int64#4
5245# asm 2: add  <rz1=%r8,<mult=%rcx
5246add  %r8,%rcx
5247
5248# qhasm:   rz1 = mult
5249# asm 1: mov  <mult=int64#4,>rz1=int64#5
5250# asm 2: mov  <mult=%rcx,>rz1=%r8
5251mov  %rcx,%r8
5252
5253# qhasm:   (uint64) mult >>= 51
5254# asm 1: shr  $51,<mult=int64#4
5255# asm 2: shr  $51,<mult=%rcx
5256shr  $51,%rcx
5257
5258# qhasm:   rz0 &= mulredmask
5259# asm 1: and  <mulredmask=int64#3,<rz0=int64#2
5260# asm 2: and  <mulredmask=%rdx,<rz0=%rsi
5261and  %rdx,%rsi
5262
5263# qhasm:   mult += rz2
5264# asm 1: add  <rz2=int64#8,<mult=int64#4
5265# asm 2: add  <rz2=%r10,<mult=%rcx
5266add  %r10,%rcx
5267
5268# qhasm:   rz2 = mult
5269# asm 1: mov  <mult=int64#4,>rz2=int64#6
5270# asm 2: mov  <mult=%rcx,>rz2=%r9
5271mov  %rcx,%r9
5272
5273# qhasm:   (uint64) mult >>= 51
5274# asm 1: shr  $51,<mult=int64#4
5275# asm 2: shr  $51,<mult=%rcx
5276shr  $51,%rcx
5277
5278# qhasm:   rz1 &= mulredmask
5279# asm 1: and  <mulredmask=int64#3,<rz1=int64#5
5280# asm 2: and  <mulredmask=%rdx,<rz1=%r8
5281and  %rdx,%r8
5282
5283# qhasm:   mult += rz3
5284# asm 1: add  <rz3=int64#10,<mult=int64#4
5285# asm 2: add  <rz3=%r12,<mult=%rcx
5286add  %r12,%rcx
5287
5288# qhasm:   rz3 = mult
5289# asm 1: mov  <mult=int64#4,>rz3=int64#7
5290# asm 2: mov  <mult=%rcx,>rz3=%rax
5291mov  %rcx,%rax
5292
5293# qhasm:   (uint64) mult >>= 51
5294# asm 1: shr  $51,<mult=int64#4
5295# asm 2: shr  $51,<mult=%rcx
5296shr  $51,%rcx
5297
5298# qhasm:   rz2 &= mulredmask
5299# asm 1: and  <mulredmask=int64#3,<rz2=int64#6
5300# asm 2: and  <mulredmask=%rdx,<rz2=%r9
5301and  %rdx,%r9
5302
5303# qhasm:   mult += rz4
5304# asm 1: add  <rz4=int64#12,<mult=int64#4
5305# asm 2: add  <rz4=%r14,<mult=%rcx
5306add  %r14,%rcx
5307
5308# qhasm:   rz4 = mult
5309# asm 1: mov  <mult=int64#4,>rz4=int64#8
5310# asm 2: mov  <mult=%rcx,>rz4=%r10
5311mov  %rcx,%r10
5312
5313# qhasm:   (uint64) mult >>= 51
5314# asm 1: shr  $51,<mult=int64#4
5315# asm 2: shr  $51,<mult=%rcx
5316shr  $51,%rcx
5317
5318# qhasm:   rz3 &= mulredmask
5319# asm 1: and  <mulredmask=int64#3,<rz3=int64#7
5320# asm 2: and  <mulredmask=%rdx,<rz3=%rax
5321and  %rdx,%rax
5322
5323# qhasm:   mult *= 19
5324# asm 1: imulq  $19,<mult=int64#4,>mult=int64#4
5325# asm 2: imulq  $19,<mult=%rcx,>mult=%rcx
5326imulq  $19,%rcx,%rcx
5327
5328# qhasm:   rz0 += mult
5329# asm 1: add  <mult=int64#4,<rz0=int64#2
5330# asm 2: add  <mult=%rcx,<rz0=%rsi
5331add  %rcx,%rsi
5332
5333# qhasm:   rz4 &= mulredmask
5334# asm 1: and  <mulredmask=int64#3,<rz4=int64#8
5335# asm 2: and  <mulredmask=%rdx,<rz4=%r10
5336and  %rdx,%r10
5337
5338# qhasm: *(uint64 *)(rp + 80) = rz0
5339# asm 1: movq   <rz0=int64#2,80(<rp=int64#1)
5340# asm 2: movq   <rz0=%rsi,80(<rp=%rdi)
5341movq   %rsi,80(%rdi)
5342
5343# qhasm: *(uint64 *)(rp + 88) = rz1
5344# asm 1: movq   <rz1=int64#5,88(<rp=int64#1)
5345# asm 2: movq   <rz1=%r8,88(<rp=%rdi)
5346movq   %r8,88(%rdi)
5347
5348# qhasm: *(uint64 *)(rp + 96) = rz2
5349# asm 1: movq   <rz2=int64#6,96(<rp=int64#1)
5350# asm 2: movq   <rz2=%r9,96(<rp=%rdi)
5351movq   %r9,96(%rdi)
5352
5353# qhasm: *(uint64 *)(rp + 104) = rz3
5354# asm 1: movq   <rz3=int64#7,104(<rp=int64#1)
5355# asm 2: movq   <rz3=%rax,104(<rp=%rdi)
5356movq   %rax,104(%rdi)
5357
5358# qhasm: *(uint64 *)(rp + 112) = rz4
5359# asm 1: movq   <rz4=int64#8,112(<rp=int64#1)
5360# asm 2: movq   <rz4=%r10,112(<rp=%rdi)
5361movq   %r10,112(%rdi)
5362
5363# qhasm:   mulrax = e3_stack
5364# asm 1: movq <e3_stack=stack64#16,>mulrax=int64#2
5365# asm 2: movq <e3_stack=120(%rsp),>mulrax=%rsi
5366movq 120(%rsp),%rsi
5367
5368# qhasm:   mulrax *= 19
5369# asm 1: imulq  $19,<mulrax=int64#2,>mulrax=int64#7
5370# asm 2: imulq  $19,<mulrax=%rsi,>mulrax=%rax
5371imulq  $19,%rsi,%rax
5372
5373# qhasm:   mulx319_stack = mulrax
5374# asm 1: movq <mulrax=int64#7,>mulx319_stack=stack64#18
5375# asm 2: movq <mulrax=%rax,>mulx319_stack=136(%rsp)
5376movq %rax,136(%rsp)
5377
5378# qhasm:   (uint128) mulrdx mulrax = mulrax * h2_stack
5379# asm 1: mulq  <h2_stack=stack64#10
5380# asm 2: mulq  <h2_stack=72(%rsp)
5381mulq  72(%rsp)
5382
5383# qhasm:   rt0 = mulrax
5384# asm 1: mov  <mulrax=int64#7,>rt0=int64#2
5385# asm 2: mov  <mulrax=%rax,>rt0=%rsi
5386mov  %rax,%rsi
5387
5388# qhasm:   mulr01 = mulrdx
5389# asm 1: mov  <mulrdx=int64#3,>mulr01=int64#4
5390# asm 2: mov  <mulrdx=%rdx,>mulr01=%rcx
5391mov  %rdx,%rcx
5392
5393# qhasm:   mulrax = e4_stack
5394# asm 1: movq <e4_stack=stack64#17,>mulrax=int64#3
5395# asm 2: movq <e4_stack=128(%rsp),>mulrax=%rdx
5396movq 128(%rsp),%rdx
5397
5398# qhasm:   mulrax *= 19
5399# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
5400# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
5401imulq  $19,%rdx,%rax
5402
5403# qhasm:   mulx419_stack = mulrax
5404# asm 1: movq <mulrax=int64#7,>mulx419_stack=stack64#19
5405# asm 2: movq <mulrax=%rax,>mulx419_stack=144(%rsp)
5406movq %rax,144(%rsp)
5407
5408# qhasm:   (uint128) mulrdx mulrax = mulrax * h1_stack
5409# asm 1: mulq  <h1_stack=stack64#9
5410# asm 2: mulq  <h1_stack=64(%rsp)
5411mulq  64(%rsp)
5412
5413# qhasm:   carry? rt0 += mulrax
5414# asm 1: add  <mulrax=int64#7,<rt0=int64#2
5415# asm 2: add  <mulrax=%rax,<rt0=%rsi
5416add  %rax,%rsi
5417
5418# qhasm:   mulr01 += mulrdx + carry
5419# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
5420# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
5421adc %rdx,%rcx
5422
5423# qhasm:   mulrax = e0_stack
5424# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
5425# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
5426movq 96(%rsp),%rax
5427
5428# qhasm:   (uint128) mulrdx mulrax = mulrax * h0_stack
5429# asm 1: mulq  <h0_stack=stack64#8
5430# asm 2: mulq  <h0_stack=56(%rsp)
5431mulq  56(%rsp)
5432
5433# qhasm:   carry? rt0 += mulrax
5434# asm 1: add  <mulrax=int64#7,<rt0=int64#2
5435# asm 2: add  <mulrax=%rax,<rt0=%rsi
5436add  %rax,%rsi
5437
5438# qhasm:   mulr01 += mulrdx + carry
5439# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
5440# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
5441adc %rdx,%rcx
5442
5443# qhasm:   mulrax = e0_stack
5444# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
5445# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
5446movq 96(%rsp),%rax
5447
5448# qhasm:   (uint128) mulrdx mulrax = mulrax * h1_stack
5449# asm 1: mulq  <h1_stack=stack64#9
5450# asm 2: mulq  <h1_stack=64(%rsp)
5451mulq  64(%rsp)
5452
5453# qhasm:   rt1 = mulrax
5454# asm 1: mov  <mulrax=int64#7,>rt1=int64#5
5455# asm 2: mov  <mulrax=%rax,>rt1=%r8
5456mov  %rax,%r8
5457
5458# qhasm:   mulr11 = mulrdx
5459# asm 1: mov  <mulrdx=int64#3,>mulr11=int64#6
5460# asm 2: mov  <mulrdx=%rdx,>mulr11=%r9
5461mov  %rdx,%r9
5462
5463# qhasm:   mulrax = e0_stack
5464# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
5465# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
5466movq 96(%rsp),%rax
5467
5468# qhasm:   (uint128) mulrdx mulrax = mulrax * h2_stack
5469# asm 1: mulq  <h2_stack=stack64#10
5470# asm 2: mulq  <h2_stack=72(%rsp)
5471mulq  72(%rsp)
5472
5473# qhasm:   rt2 = mulrax
5474# asm 1: mov  <mulrax=int64#7,>rt2=int64#8
5475# asm 2: mov  <mulrax=%rax,>rt2=%r10
5476mov  %rax,%r10
5477
5478# qhasm:   mulr21 = mulrdx
5479# asm 1: mov  <mulrdx=int64#3,>mulr21=int64#9
5480# asm 2: mov  <mulrdx=%rdx,>mulr21=%r11
5481mov  %rdx,%r11
5482
5483# qhasm:   mulrax = e0_stack
5484# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
5485# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
5486movq 96(%rsp),%rax
5487
5488# qhasm:   (uint128) mulrdx mulrax = mulrax * h3_stack
5489# asm 1: mulq  <h3_stack=stack64#11
5490# asm 2: mulq  <h3_stack=80(%rsp)
5491mulq  80(%rsp)
5492
5493# qhasm:   rt3 = mulrax
5494# asm 1: mov  <mulrax=int64#7,>rt3=int64#10
5495# asm 2: mov  <mulrax=%rax,>rt3=%r12
5496mov  %rax,%r12
5497
5498# qhasm:   mulr31 = mulrdx
5499# asm 1: mov  <mulrdx=int64#3,>mulr31=int64#11
5500# asm 2: mov  <mulrdx=%rdx,>mulr31=%r13
5501mov  %rdx,%r13
5502
5503# qhasm:   mulrax = e0_stack
5504# asm 1: movq <e0_stack=stack64#13,>mulrax=int64#7
5505# asm 2: movq <e0_stack=96(%rsp),>mulrax=%rax
5506movq 96(%rsp),%rax
5507
5508# qhasm:   (uint128) mulrdx mulrax = mulrax * h4_stack
5509# asm 1: mulq  <h4_stack=stack64#12
5510# asm 2: mulq  <h4_stack=88(%rsp)
5511mulq  88(%rsp)
5512
5513# qhasm:   rt4 = mulrax
5514# asm 1: mov  <mulrax=int64#7,>rt4=int64#12
5515# asm 2: mov  <mulrax=%rax,>rt4=%r14
5516mov  %rax,%r14
5517
5518# qhasm:   mulr41 = mulrdx
5519# asm 1: mov  <mulrdx=int64#3,>mulr41=int64#13
5520# asm 2: mov  <mulrdx=%rdx,>mulr41=%r15
5521mov  %rdx,%r15
5522
5523# qhasm:   mulrax = e1_stack
5524# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
5525# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
5526movq 104(%rsp),%rax
5527
5528# qhasm:   (uint128) mulrdx mulrax = mulrax * h0_stack
5529# asm 1: mulq  <h0_stack=stack64#8
5530# asm 2: mulq  <h0_stack=56(%rsp)
5531mulq  56(%rsp)
5532
5533# qhasm:   carry? rt1 += mulrax
5534# asm 1: add  <mulrax=int64#7,<rt1=int64#5
5535# asm 2: add  <mulrax=%rax,<rt1=%r8
5536add  %rax,%r8
5537
5538# qhasm:   mulr11 += mulrdx + carry
5539# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
5540# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
5541adc %rdx,%r9
5542
5543# qhasm:   mulrax = e1_stack
5544# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
5545# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
5546movq 104(%rsp),%rax
5547
5548# qhasm:   (uint128) mulrdx mulrax = mulrax * h1_stack
5549# asm 1: mulq  <h1_stack=stack64#9
5550# asm 2: mulq  <h1_stack=64(%rsp)
5551mulq  64(%rsp)
5552
5553# qhasm:   carry? rt2 += mulrax
5554# asm 1: add  <mulrax=int64#7,<rt2=int64#8
5555# asm 2: add  <mulrax=%rax,<rt2=%r10
5556add  %rax,%r10
5557
5558# qhasm:   mulr21 += mulrdx + carry
5559# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
5560# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
5561adc %rdx,%r11
5562
5563# qhasm:   mulrax = e1_stack
5564# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
5565# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
5566movq 104(%rsp),%rax
5567
5568# qhasm:   (uint128) mulrdx mulrax = mulrax * h2_stack
5569# asm 1: mulq  <h2_stack=stack64#10
5570# asm 2: mulq  <h2_stack=72(%rsp)
5571mulq  72(%rsp)
5572
5573# qhasm:   carry? rt3 += mulrax
5574# asm 1: add  <mulrax=int64#7,<rt3=int64#10
5575# asm 2: add  <mulrax=%rax,<rt3=%r12
5576add  %rax,%r12
5577
5578# qhasm:   mulr31 += mulrdx + carry
5579# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
5580# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
5581adc %rdx,%r13
5582
5583# qhasm:   mulrax = e1_stack
5584# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#7
5585# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rax
5586movq 104(%rsp),%rax
5587
5588# qhasm:   (uint128) mulrdx mulrax = mulrax * h3_stack
5589# asm 1: mulq  <h3_stack=stack64#11
5590# asm 2: mulq  <h3_stack=80(%rsp)
5591mulq  80(%rsp)
5592
5593# qhasm:   carry? rt4 += mulrax
5594# asm 1: add  <mulrax=int64#7,<rt4=int64#12
5595# asm 2: add  <mulrax=%rax,<rt4=%r14
5596add  %rax,%r14
5597
5598# qhasm:   mulr41 += mulrdx + carry
5599# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
5600# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
5601adc %rdx,%r15
5602
5603# qhasm:   mulrax = e1_stack
5604# asm 1: movq <e1_stack=stack64#14,>mulrax=int64#3
5605# asm 2: movq <e1_stack=104(%rsp),>mulrax=%rdx
5606movq 104(%rsp),%rdx
5607
5608# qhasm:   mulrax *= 19
5609# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
5610# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
5611imulq  $19,%rdx,%rax
5612
5613# qhasm:   (uint128) mulrdx mulrax = mulrax * h4_stack
5614# asm 1: mulq  <h4_stack=stack64#12
5615# asm 2: mulq  <h4_stack=88(%rsp)
5616mulq  88(%rsp)
5617
5618# qhasm:   carry? rt0 += mulrax
5619# asm 1: add  <mulrax=int64#7,<rt0=int64#2
5620# asm 2: add  <mulrax=%rax,<rt0=%rsi
5621add  %rax,%rsi
5622
5623# qhasm:   mulr01 += mulrdx + carry
5624# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
5625# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
5626adc %rdx,%rcx
5627
5628# qhasm:   mulrax = e2_stack
5629# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#7
5630# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rax
5631movq 112(%rsp),%rax
5632
5633# qhasm:   (uint128) mulrdx mulrax = mulrax * h0_stack
5634# asm 1: mulq  <h0_stack=stack64#8
5635# asm 2: mulq  <h0_stack=56(%rsp)
5636mulq  56(%rsp)
5637
5638# qhasm:   carry? rt2 += mulrax
5639# asm 1: add  <mulrax=int64#7,<rt2=int64#8
5640# asm 2: add  <mulrax=%rax,<rt2=%r10
5641add  %rax,%r10
5642
5643# qhasm:   mulr21 += mulrdx + carry
5644# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
5645# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
5646adc %rdx,%r11
5647
5648# qhasm:   mulrax = e2_stack
5649# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#7
5650# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rax
5651movq 112(%rsp),%rax
5652
5653# qhasm:   (uint128) mulrdx mulrax = mulrax * h1_stack
5654# asm 1: mulq  <h1_stack=stack64#9
5655# asm 2: mulq  <h1_stack=64(%rsp)
5656mulq  64(%rsp)
5657
5658# qhasm:   carry? rt3 += mulrax
5659# asm 1: add  <mulrax=int64#7,<rt3=int64#10
5660# asm 2: add  <mulrax=%rax,<rt3=%r12
5661add  %rax,%r12
5662
5663# qhasm:   mulr31 += mulrdx + carry
5664# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
5665# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
5666adc %rdx,%r13
5667
5668# qhasm:   mulrax = e2_stack
5669# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#7
5670# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rax
5671movq 112(%rsp),%rax
5672
5673# qhasm:   (uint128) mulrdx mulrax = mulrax * h2_stack
5674# asm 1: mulq  <h2_stack=stack64#10
5675# asm 2: mulq  <h2_stack=72(%rsp)
5676mulq  72(%rsp)
5677
5678# qhasm:   carry? rt4 += mulrax
5679# asm 1: add  <mulrax=int64#7,<rt4=int64#12
5680# asm 2: add  <mulrax=%rax,<rt4=%r14
5681add  %rax,%r14
5682
5683# qhasm:   mulr41 += mulrdx + carry
5684# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
5685# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
5686adc %rdx,%r15
5687
5688# qhasm:   mulrax = e2_stack
5689# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#3
5690# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rdx
5691movq 112(%rsp),%rdx
5692
5693# qhasm:   mulrax *= 19
5694# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
5695# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
5696imulq  $19,%rdx,%rax
5697
5698# qhasm:   (uint128) mulrdx mulrax = mulrax * h3_stack
5699# asm 1: mulq  <h3_stack=stack64#11
5700# asm 2: mulq  <h3_stack=80(%rsp)
5701mulq  80(%rsp)
5702
5703# qhasm:   carry? rt0 += mulrax
5704# asm 1: add  <mulrax=int64#7,<rt0=int64#2
5705# asm 2: add  <mulrax=%rax,<rt0=%rsi
5706add  %rax,%rsi
5707
5708# qhasm:   mulr01 += mulrdx + carry
5709# asm 1: adc <mulrdx=int64#3,<mulr01=int64#4
5710# asm 2: adc <mulrdx=%rdx,<mulr01=%rcx
5711adc %rdx,%rcx
5712
5713# qhasm:   mulrax = e2_stack
5714# asm 1: movq <e2_stack=stack64#15,>mulrax=int64#3
5715# asm 2: movq <e2_stack=112(%rsp),>mulrax=%rdx
5716movq 112(%rsp),%rdx
5717
5718# qhasm:   mulrax *= 19
5719# asm 1: imulq  $19,<mulrax=int64#3,>mulrax=int64#7
5720# asm 2: imulq  $19,<mulrax=%rdx,>mulrax=%rax
5721imulq  $19,%rdx,%rax
5722
5723# qhasm:   (uint128) mulrdx mulrax = mulrax * h4_stack
5724# asm 1: mulq  <h4_stack=stack64#12
5725# asm 2: mulq  <h4_stack=88(%rsp)
5726mulq  88(%rsp)
5727
5728# qhasm:   carry? rt1 += mulrax
5729# asm 1: add  <mulrax=int64#7,<rt1=int64#5
5730# asm 2: add  <mulrax=%rax,<rt1=%r8
5731add  %rax,%r8
5732
5733# qhasm:   mulr11 += mulrdx + carry
5734# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
5735# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
5736adc %rdx,%r9
5737
5738# qhasm:   mulrax = e3_stack
5739# asm 1: movq <e3_stack=stack64#16,>mulrax=int64#7
5740# asm 2: movq <e3_stack=120(%rsp),>mulrax=%rax
5741movq 120(%rsp),%rax
5742
5743# qhasm:   (uint128) mulrdx mulrax = mulrax * h0_stack
5744# asm 1: mulq  <h0_stack=stack64#8
5745# asm 2: mulq  <h0_stack=56(%rsp)
5746mulq  56(%rsp)
5747
5748# qhasm:   carry? rt3 += mulrax
5749# asm 1: add  <mulrax=int64#7,<rt3=int64#10
5750# asm 2: add  <mulrax=%rax,<rt3=%r12
5751add  %rax,%r12
5752
5753# qhasm:   mulr31 += mulrdx + carry
5754# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
5755# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
5756adc %rdx,%r13
5757
5758# qhasm:   mulrax = e3_stack
5759# asm 1: movq <e3_stack=stack64#16,>mulrax=int64#7
5760# asm 2: movq <e3_stack=120(%rsp),>mulrax=%rax
5761movq 120(%rsp),%rax
5762
5763# qhasm:   (uint128) mulrdx mulrax = mulrax * h1_stack
5764# asm 1: mulq  <h1_stack=stack64#9
5765# asm 2: mulq  <h1_stack=64(%rsp)
5766mulq  64(%rsp)
5767
5768# qhasm:   carry? rt4 += mulrax
5769# asm 1: add  <mulrax=int64#7,<rt4=int64#12
5770# asm 2: add  <mulrax=%rax,<rt4=%r14
5771add  %rax,%r14
5772
5773# qhasm:   mulr41 += mulrdx + carry
5774# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
5775# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
5776adc %rdx,%r15
5777
5778# qhasm:   mulrax = mulx319_stack
5779# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
5780# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
5781movq 136(%rsp),%rax
5782
5783# qhasm:   (uint128) mulrdx mulrax = mulrax * h3_stack
5784# asm 1: mulq  <h3_stack=stack64#11
5785# asm 2: mulq  <h3_stack=80(%rsp)
5786mulq  80(%rsp)
5787
5788# qhasm:   carry? rt1 += mulrax
5789# asm 1: add  <mulrax=int64#7,<rt1=int64#5
5790# asm 2: add  <mulrax=%rax,<rt1=%r8
5791add  %rax,%r8
5792
5793# qhasm:   mulr11 += mulrdx + carry
5794# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
5795# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
5796adc %rdx,%r9
5797
5798# qhasm:   mulrax = mulx319_stack
5799# asm 1: movq <mulx319_stack=stack64#18,>mulrax=int64#7
5800# asm 2: movq <mulx319_stack=136(%rsp),>mulrax=%rax
5801movq 136(%rsp),%rax
5802
5803# qhasm:   (uint128) mulrdx mulrax = mulrax * h4_stack
5804# asm 1: mulq  <h4_stack=stack64#12
5805# asm 2: mulq  <h4_stack=88(%rsp)
5806mulq  88(%rsp)
5807
5808# qhasm:   carry? rt2 += mulrax
5809# asm 1: add  <mulrax=int64#7,<rt2=int64#8
5810# asm 2: add  <mulrax=%rax,<rt2=%r10
5811add  %rax,%r10
5812
5813# qhasm:   mulr21 += mulrdx + carry
5814# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
5815# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
5816adc %rdx,%r11
5817
5818# qhasm:   mulrax = e4_stack
5819# asm 1: movq <e4_stack=stack64#17,>mulrax=int64#7
5820# asm 2: movq <e4_stack=128(%rsp),>mulrax=%rax
5821movq 128(%rsp),%rax
5822
5823# qhasm:   (uint128) mulrdx mulrax = mulrax * h0_stack
5824# asm 1: mulq  <h0_stack=stack64#8
5825# asm 2: mulq  <h0_stack=56(%rsp)
5826mulq  56(%rsp)
5827
5828# qhasm:   carry? rt4 += mulrax
5829# asm 1: add  <mulrax=int64#7,<rt4=int64#12
5830# asm 2: add  <mulrax=%rax,<rt4=%r14
5831add  %rax,%r14
5832
5833# qhasm:   mulr41 += mulrdx + carry
5834# asm 1: adc <mulrdx=int64#3,<mulr41=int64#13
5835# asm 2: adc <mulrdx=%rdx,<mulr41=%r15
5836adc %rdx,%r15
5837
5838# qhasm:   mulrax = mulx419_stack
5839# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
5840# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
5841movq 144(%rsp),%rax
5842
5843# qhasm:   (uint128) mulrdx mulrax = mulrax * h2_stack
5844# asm 1: mulq  <h2_stack=stack64#10
5845# asm 2: mulq  <h2_stack=72(%rsp)
5846mulq  72(%rsp)
5847
5848# qhasm:   carry? rt1 += mulrax
5849# asm 1: add  <mulrax=int64#7,<rt1=int64#5
5850# asm 2: add  <mulrax=%rax,<rt1=%r8
5851add  %rax,%r8
5852
5853# qhasm:   mulr11 += mulrdx + carry
5854# asm 1: adc <mulrdx=int64#3,<mulr11=int64#6
5855# asm 2: adc <mulrdx=%rdx,<mulr11=%r9
5856adc %rdx,%r9
5857
5858# qhasm:   mulrax = mulx419_stack
5859# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
5860# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
5861movq 144(%rsp),%rax
5862
5863# qhasm:   (uint128) mulrdx mulrax = mulrax * h3_stack
5864# asm 1: mulq  <h3_stack=stack64#11
5865# asm 2: mulq  <h3_stack=80(%rsp)
5866mulq  80(%rsp)
5867
5868# qhasm:   carry? rt2 += mulrax
5869# asm 1: add  <mulrax=int64#7,<rt2=int64#8
5870# asm 2: add  <mulrax=%rax,<rt2=%r10
5871add  %rax,%r10
5872
5873# qhasm:   mulr21 += mulrdx + carry
5874# asm 1: adc <mulrdx=int64#3,<mulr21=int64#9
5875# asm 2: adc <mulrdx=%rdx,<mulr21=%r11
5876adc %rdx,%r11
5877
5878# qhasm:   mulrax = mulx419_stack
5879# asm 1: movq <mulx419_stack=stack64#19,>mulrax=int64#7
5880# asm 2: movq <mulx419_stack=144(%rsp),>mulrax=%rax
5881movq 144(%rsp),%rax
5882
5883# qhasm:   (uint128) mulrdx mulrax = mulrax * h4_stack
5884# asm 1: mulq  <h4_stack=stack64#12
5885# asm 2: mulq  <h4_stack=88(%rsp)
5886mulq  88(%rsp)
5887
5888# qhasm:   carry? rt3 += mulrax
5889# asm 1: add  <mulrax=int64#7,<rt3=int64#10
5890# asm 2: add  <mulrax=%rax,<rt3=%r12
5891add  %rax,%r12
5892
5893# qhasm:   mulr31 += mulrdx + carry
5894# asm 1: adc <mulrdx=int64#3,<mulr31=int64#11
5895# asm 2: adc <mulrdx=%rdx,<mulr31=%r13
5896adc %rdx,%r13
5897
5898# qhasm:   mulredmask = *(uint64 *) &crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51
5899# asm 1: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=int64#3
5900# asm 2: movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,>mulredmask=%rdx
5901movq crypto_sign_ed25519_amd64_51_30k_batch_REDMASK51,%rdx
5902
5903# qhasm:   mulr01 = (mulr01.rt0) << 13
5904# asm 1: shld $13,<rt0=int64#2,<mulr01=int64#4
5905# asm 2: shld $13,<rt0=%rsi,<mulr01=%rcx
5906shld $13,%rsi,%rcx
5907
5908# qhasm:   rt0 &= mulredmask
5909# asm 1: and  <mulredmask=int64#3,<rt0=int64#2
5910# asm 2: and  <mulredmask=%rdx,<rt0=%rsi
5911and  %rdx,%rsi
5912
5913# qhasm:   mulr11 = (mulr11.rt1) << 13
5914# asm 1: shld $13,<rt1=int64#5,<mulr11=int64#6
5915# asm 2: shld $13,<rt1=%r8,<mulr11=%r9
5916shld $13,%r8,%r9
5917
5918# qhasm:   rt1 &= mulredmask
5919# asm 1: and  <mulredmask=int64#3,<rt1=int64#5
5920# asm 2: and  <mulredmask=%rdx,<rt1=%r8
5921and  %rdx,%r8
5922
5923# qhasm:   rt1 += mulr01
5924# asm 1: add  <mulr01=int64#4,<rt1=int64#5
5925# asm 2: add  <mulr01=%rcx,<rt1=%r8
5926add  %rcx,%r8
5927
5928# qhasm:   mulr21 = (mulr21.rt2) << 13
5929# asm 1: shld $13,<rt2=int64#8,<mulr21=int64#9
5930# asm 2: shld $13,<rt2=%r10,<mulr21=%r11
5931shld $13,%r10,%r11
5932
5933# qhasm:   rt2 &= mulredmask
5934# asm 1: and  <mulredmask=int64#3,<rt2=int64#8
5935# asm 2: and  <mulredmask=%rdx,<rt2=%r10
5936and  %rdx,%r10
5937
5938# qhasm:   rt2 += mulr11
5939# asm 1: add  <mulr11=int64#6,<rt2=int64#8
5940# asm 2: add  <mulr11=%r9,<rt2=%r10
5941add  %r9,%r10
5942
5943# qhasm:   mulr31 = (mulr31.rt3) << 13
5944# asm 1: shld $13,<rt3=int64#10,<mulr31=int64#11
5945# asm 2: shld $13,<rt3=%r12,<mulr31=%r13
5946shld $13,%r12,%r13
5947
5948# qhasm:   rt3 &= mulredmask
5949# asm 1: and  <mulredmask=int64#3,<rt3=int64#10
5950# asm 2: and  <mulredmask=%rdx,<rt3=%r12
5951and  %rdx,%r12
5952
5953# qhasm:   rt3 += mulr21
5954# asm 1: add  <mulr21=int64#9,<rt3=int64#10
5955# asm 2: add  <mulr21=%r11,<rt3=%r12
5956add  %r11,%r12
5957
5958# qhasm:   mulr41 = (mulr41.rt4) << 13
5959# asm 1: shld $13,<rt4=int64#12,<mulr41=int64#13
5960# asm 2: shld $13,<rt4=%r14,<mulr41=%r15
5961shld $13,%r14,%r15
5962
5963# qhasm:   rt4 &= mulredmask
5964# asm 1: and  <mulredmask=int64#3,<rt4=int64#12
5965# asm 2: and  <mulredmask=%rdx,<rt4=%r14
5966and  %rdx,%r14
5967
5968# qhasm:   rt4 += mulr31
5969# asm 1: add  <mulr31=int64#11,<rt4=int64#12
5970# asm 2: add  <mulr31=%r13,<rt4=%r14
5971add  %r13,%r14
5972
5973# qhasm:   mulr41 = mulr41 * 19
5974# asm 1: imulq  $19,<mulr41=int64#13,>mulr41=int64#4
5975# asm 2: imulq  $19,<mulr41=%r15,>mulr41=%rcx
5976imulq  $19,%r15,%rcx
5977
5978# qhasm:   rt0 += mulr41
5979# asm 1: add  <mulr41=int64#4,<rt0=int64#2
5980# asm 2: add  <mulr41=%rcx,<rt0=%rsi
5981add  %rcx,%rsi
5982
5983# qhasm:   mult = rt0
5984# asm 1: mov  <rt0=int64#2,>mult=int64#4
5985# asm 2: mov  <rt0=%rsi,>mult=%rcx
5986mov  %rsi,%rcx
5987
5988# qhasm:   (uint64) mult >>= 51
5989# asm 1: shr  $51,<mult=int64#4
5990# asm 2: shr  $51,<mult=%rcx
5991shr  $51,%rcx
5992
5993# qhasm:   mult += rt1
5994# asm 1: add  <rt1=int64#5,<mult=int64#4
5995# asm 2: add  <rt1=%r8,<mult=%rcx
5996add  %r8,%rcx
5997
5998# qhasm:   rt1 = mult
5999# asm 1: mov  <mult=int64#4,>rt1=int64#5
6000# asm 2: mov  <mult=%rcx,>rt1=%r8
6001mov  %rcx,%r8
6002
6003# qhasm:   (uint64) mult >>= 51
6004# asm 1: shr  $51,<mult=int64#4
6005# asm 2: shr  $51,<mult=%rcx
6006shr  $51,%rcx
6007
6008# qhasm:   rt0 &= mulredmask
6009# asm 1: and  <mulredmask=int64#3,<rt0=int64#2
6010# asm 2: and  <mulredmask=%rdx,<rt0=%rsi
6011and  %rdx,%rsi
6012
6013# qhasm:   mult += rt2
6014# asm 1: add  <rt2=int64#8,<mult=int64#4
6015# asm 2: add  <rt2=%r10,<mult=%rcx
6016add  %r10,%rcx
6017
6018# qhasm:   rt2 = mult
6019# asm 1: mov  <mult=int64#4,>rt2=int64#6
6020# asm 2: mov  <mult=%rcx,>rt2=%r9
6021mov  %rcx,%r9
6022
6023# qhasm:   (uint64) mult >>= 51
6024# asm 1: shr  $51,<mult=int64#4
6025# asm 2: shr  $51,<mult=%rcx
6026shr  $51,%rcx
6027
6028# qhasm:   rt1 &= mulredmask
6029# asm 1: and  <mulredmask=int64#3,<rt1=int64#5
6030# asm 2: and  <mulredmask=%rdx,<rt1=%r8
6031and  %rdx,%r8
6032
6033# qhasm:   mult += rt3
6034# asm 1: add  <rt3=int64#10,<mult=int64#4
6035# asm 2: add  <rt3=%r12,<mult=%rcx
6036add  %r12,%rcx
6037
6038# qhasm:   rt3 = mult
6039# asm 1: mov  <mult=int64#4,>rt3=int64#7
6040# asm 2: mov  <mult=%rcx,>rt3=%rax
6041mov  %rcx,%rax
6042
6043# qhasm:   (uint64) mult >>= 51
6044# asm 1: shr  $51,<mult=int64#4
6045# asm 2: shr  $51,<mult=%rcx
6046shr  $51,%rcx
6047
6048# qhasm:   rt2 &= mulredmask
6049# asm 1: and  <mulredmask=int64#3,<rt2=int64#6
6050# asm 2: and  <mulredmask=%rdx,<rt2=%r9
6051and  %rdx,%r9
6052
6053# qhasm:   mult += rt4
6054# asm 1: add  <rt4=int64#12,<mult=int64#4
6055# asm 2: add  <rt4=%r14,<mult=%rcx
6056add  %r14,%rcx
6057
6058# qhasm:   rt4 = mult
6059# asm 1: mov  <mult=int64#4,>rt4=int64#8
6060# asm 2: mov  <mult=%rcx,>rt4=%r10
6061mov  %rcx,%r10
6062
6063# qhasm:   (uint64) mult >>= 51
6064# asm 1: shr  $51,<mult=int64#4
6065# asm 2: shr  $51,<mult=%rcx
6066shr  $51,%rcx
6067
6068# qhasm:   rt3 &= mulredmask
6069# asm 1: and  <mulredmask=int64#3,<rt3=int64#7
6070# asm 2: and  <mulredmask=%rdx,<rt3=%rax
6071and  %rdx,%rax
6072
6073# qhasm:   mult *= 19
6074# asm 1: imulq  $19,<mult=int64#4,>mult=int64#4
6075# asm 2: imulq  $19,<mult=%rcx,>mult=%rcx
6076imulq  $19,%rcx,%rcx
6077
6078# qhasm:   rt0 += mult
6079# asm 1: add  <mult=int64#4,<rt0=int64#2
6080# asm 2: add  <mult=%rcx,<rt0=%rsi
6081add  %rcx,%rsi
6082
6083# qhasm:   rt4 &= mulredmask
6084# asm 1: and  <mulredmask=int64#3,<rt4=int64#8
6085# asm 2: and  <mulredmask=%rdx,<rt4=%r10
6086and  %rdx,%r10
6087
6088# qhasm: *(uint64 *)(rp + 120) = rt0
6089# asm 1: movq   <rt0=int64#2,120(<rp=int64#1)
6090# asm 2: movq   <rt0=%rsi,120(<rp=%rdi)
6091movq   %rsi,120(%rdi)
6092
6093# qhasm: *(uint64 *)(rp + 128) = rt1
6094# asm 1: movq   <rt1=int64#5,128(<rp=int64#1)
6095# asm 2: movq   <rt1=%r8,128(<rp=%rdi)
6096movq   %r8,128(%rdi)
6097
6098# qhasm: *(uint64 *)(rp + 136) = rt2
6099# asm 1: movq   <rt2=int64#6,136(<rp=int64#1)
6100# asm 2: movq   <rt2=%r9,136(<rp=%rdi)
6101movq   %r9,136(%rdi)
6102
6103# qhasm: *(uint64 *)(rp + 144) = rt3
6104# asm 1: movq   <rt3=int64#7,144(<rp=int64#1)
6105# asm 2: movq   <rt3=%rax,144(<rp=%rdi)
6106movq   %rax,144(%rdi)
6107
6108# qhasm: *(uint64 *)(rp + 152) = rt4
6109# asm 1: movq   <rt4=int64#8,152(<rp=int64#1)
6110# asm 2: movq   <rt4=%r10,152(<rp=%rdi)
6111movq   %r10,152(%rdi)
6112
6113# qhasm:   caller1 = caller1_stack
6114# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
6115# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
6116movq 0(%rsp),%r11
6117
6118# qhasm:   caller2 = caller2_stack
6119# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
6120# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
6121movq 8(%rsp),%r12
6122
6123# qhasm:   caller3 = caller3_stack
6124# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
6125# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
6126movq 16(%rsp),%r13
6127
6128# qhasm:   caller4 = caller4_stack
6129# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
6130# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
6131movq 24(%rsp),%r14
6132
6133# qhasm:   caller5 = caller5_stack
6134# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
6135# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
6136movq 32(%rsp),%r15
6137
6138# qhasm:   caller6 = caller6_stack
6139# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
6140# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
6141movq 40(%rsp),%rbx
6142
6143# qhasm:   caller7 = caller7_stack
6144# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
6145# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
6146movq 48(%rsp),%rbp
6147
6148# qhasm: leave
6149add %r11,%rsp
6150mov %rdi,%rax
6151mov %rsi,%rdx
6152ret
6153