1
2# qhasm: int64 rp
3
4# qhasm: int64 qp
5
6# qhasm: input rp
7
8# qhasm: input qp
9
10# qhasm:   int64 caller1
11
12# qhasm:   int64 caller2
13
14# qhasm:   int64 caller3
15
16# qhasm:   int64 caller4
17
18# qhasm:   int64 caller5
19
20# qhasm:   int64 caller6
21
22# qhasm:   int64 caller7
23
24# qhasm:   caller caller1
25
26# qhasm:   caller caller2
27
28# qhasm:   caller caller3
29
30# qhasm:   caller caller4
31
32# qhasm:   caller caller5
33
34# qhasm:   caller caller6
35
36# qhasm:   caller caller7
37
38# qhasm:   stack64 caller1_stack
39
40# qhasm:   stack64 caller2_stack
41
42# qhasm:   stack64 caller3_stack
43
44# qhasm:   stack64 caller4_stack
45
46# qhasm:   stack64 caller5_stack
47
48# qhasm:   stack64 caller6_stack
49
50# qhasm:   stack64 caller7_stack
51
52# qhasm: int64 a0
53
54# qhasm: int64 a1
55
56# qhasm: int64 a2
57
58# qhasm: int64 a3
59
60# qhasm: stack64 a0_stack
61
62# qhasm: stack64 a1_stack
63
64# qhasm: stack64 a2_stack
65
66# qhasm: stack64 a3_stack
67
68# qhasm: int64 b0
69
70# qhasm: int64 b1
71
72# qhasm: int64 b2
73
74# qhasm: int64 b3
75
76# qhasm: stack64 b0_stack
77
78# qhasm: stack64 b1_stack
79
80# qhasm: stack64 b2_stack
81
82# qhasm: stack64 b3_stack
83
84# qhasm: int64 c0
85
86# qhasm: int64 c1
87
88# qhasm: int64 c2
89
90# qhasm: int64 c3
91
92# qhasm: stack64 c0_stack
93
94# qhasm: stack64 c1_stack
95
96# qhasm: stack64 c2_stack
97
98# qhasm: stack64 c3_stack
99
100# qhasm: int64 d0
101
102# qhasm: int64 d1
103
104# qhasm: int64 d2
105
106# qhasm: int64 d3
107
108# qhasm: stack64 d0_stack
109
110# qhasm: stack64 d1_stack
111
112# qhasm: stack64 d2_stack
113
114# qhasm: stack64 d3_stack
115
116# qhasm: int64 e0
117
118# qhasm: int64 e1
119
120# qhasm: int64 e2
121
122# qhasm: int64 e3
123
124# qhasm: stack64 e0_stack
125
126# qhasm: stack64 e1_stack
127
128# qhasm: stack64 e2_stack
129
130# qhasm: stack64 e3_stack
131
132# qhasm: int64 f0
133
134# qhasm: int64 f1
135
136# qhasm: int64 f2
137
138# qhasm: int64 f3
139
140# qhasm: stack64 f0_stack
141
142# qhasm: stack64 f1_stack
143
144# qhasm: stack64 f2_stack
145
146# qhasm: stack64 f3_stack
147
148# qhasm: int64 g0
149
150# qhasm: int64 g1
151
152# qhasm: int64 g2
153
154# qhasm: int64 g3
155
156# qhasm: stack64 g0_stack
157
158# qhasm: stack64 g1_stack
159
160# qhasm: stack64 g2_stack
161
162# qhasm: stack64 g3_stack
163
164# qhasm: int64 h0
165
166# qhasm: int64 h1
167
168# qhasm: int64 h2
169
170# qhasm: int64 h3
171
172# qhasm: stack64 h0_stack
173
174# qhasm: stack64 h1_stack
175
176# qhasm: stack64 h2_stack
177
178# qhasm: stack64 h3_stack
179
180# qhasm: int64 qt0
181
182# qhasm: int64 qt1
183
184# qhasm: int64 qt2
185
186# qhasm: int64 qt3
187
188# qhasm: stack64 qt0_stack
189
190# qhasm: stack64 qt1_stack
191
192# qhasm: stack64 qt2_stack
193
194# qhasm: stack64 qt3_stack
195
196# qhasm: int64 t10
197
198# qhasm: int64 t11
199
200# qhasm: int64 t12
201
202# qhasm: int64 t13
203
204# qhasm: stack64 t10_stack
205
206# qhasm: stack64 t11_stack
207
208# qhasm: stack64 t12_stack
209
210# qhasm: stack64 t13_stack
211
212# qhasm: int64 t20
213
214# qhasm: int64 t21
215
216# qhasm: int64 t22
217
218# qhasm: int64 t23
219
220# qhasm: stack64 t20_stack
221
222# qhasm: stack64 t21_stack
223
224# qhasm: stack64 t22_stack
225
226# qhasm: stack64 t23_stack
227
228# qhasm: int64 rx0
229
230# qhasm: int64 rx1
231
232# qhasm: int64 rx2
233
234# qhasm: int64 rx3
235
236# qhasm: int64 ry0
237
238# qhasm: int64 ry1
239
240# qhasm: int64 ry2
241
242# qhasm: int64 ry3
243
244# qhasm: int64 rz0
245
246# qhasm: int64 rz1
247
248# qhasm: int64 rz2
249
250# qhasm: int64 rz3
251
252# qhasm: int64 rt0
253
254# qhasm: int64 rt1
255
256# qhasm: int64 rt2
257
258# qhasm: int64 rt3
259
260# qhasm: int64 mulr4
261
262# qhasm: int64 mulr5
263
264# qhasm: int64 mulr6
265
266# qhasm: int64 mulr7
267
268# qhasm: int64 mulr8
269
270# qhasm: int64 mulrax
271
272# qhasm: int64 mulrdx
273
274# qhasm: int64 mulx0
275
276# qhasm: int64 mulx1
277
278# qhasm: int64 mulx2
279
280# qhasm: int64 mulx3
281
282# qhasm: int64 mulc
283
284# qhasm: int64 mulzero
285
286# qhasm: int64 muli38
287
288# qhasm: int64 addt0
289
290# qhasm: int64 addt1
291
292# qhasm: int64 subt0
293
294# qhasm: int64 subt1
295
296# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_nielsadd2
297.text
298.p2align 5
299.globl _crypto_sign_ed25519_amd64_64_ge25519_nielsadd2
300.globl crypto_sign_ed25519_amd64_64_ge25519_nielsadd2
301_crypto_sign_ed25519_amd64_64_ge25519_nielsadd2:
302crypto_sign_ed25519_amd64_64_ge25519_nielsadd2:
303mov %rsp,%r11
304and $31,%r11
305add $192,%r11
306sub %r11,%rsp
307
308# qhasm:   caller1_stack = caller1
309# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
310# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
311movq %r11,0(%rsp)
312
313# qhasm:   caller2_stack = caller2
314# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
315# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
316movq %r12,8(%rsp)
317
318# qhasm:   caller3_stack = caller3
319# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
320# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
321movq %r13,16(%rsp)
322
323# qhasm:   caller4_stack = caller4
324# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
325# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
326movq %r14,24(%rsp)
327
328# qhasm:   caller5_stack = caller5
329# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
330# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
331movq %r15,32(%rsp)
332
333# qhasm:   caller6_stack = caller6
334# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
335# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
336movq %rbx,40(%rsp)
337
338# qhasm:   caller7_stack = caller7
339# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
340# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
341movq %rbp,48(%rsp)
342
343# qhasm: a0 = *(uint64 *)(rp + 32)
344# asm 1: movq   32(<rp=int64#1),>a0=int64#3
345# asm 2: movq   32(<rp=%rdi),>a0=%rdx
346movq   32(%rdi),%rdx
347
348# qhasm: a1 = *(uint64 *)(rp + 40)
349# asm 1: movq   40(<rp=int64#1),>a1=int64#4
350# asm 2: movq   40(<rp=%rdi),>a1=%rcx
351movq   40(%rdi),%rcx
352
353# qhasm: a2 = *(uint64 *)(rp + 48)
354# asm 1: movq   48(<rp=int64#1),>a2=int64#5
355# asm 2: movq   48(<rp=%rdi),>a2=%r8
356movq   48(%rdi),%r8
357
358# qhasm: a3 = *(uint64 *)(rp + 56)
359# asm 1: movq   56(<rp=int64#1),>a3=int64#6
360# asm 2: movq   56(<rp=%rdi),>a3=%r9
361movq   56(%rdi),%r9
362
363# qhasm: b0 = a0
364# asm 1: mov  <a0=int64#3,>b0=int64#7
365# asm 2: mov  <a0=%rdx,>b0=%rax
366mov  %rdx,%rax
367
368# qhasm: b1 = a1
369# asm 1: mov  <a1=int64#4,>b1=int64#8
370# asm 2: mov  <a1=%rcx,>b1=%r10
371mov  %rcx,%r10
372
373# qhasm: b2 = a2
374# asm 1: mov  <a2=int64#5,>b2=int64#9
375# asm 2: mov  <a2=%r8,>b2=%r11
376mov  %r8,%r11
377
378# qhasm: b3 = a3
379# asm 1: mov  <a3=int64#6,>b3=int64#10
380# asm 2: mov  <a3=%r9,>b3=%r12
381mov  %r9,%r12
382
383# qhasm:   carry? a0 -= *(uint64 *) (rp + 0)
384# asm 1: subq 0(<rp=int64#1),<a0=int64#3
385# asm 2: subq 0(<rp=%rdi),<a0=%rdx
386subq 0(%rdi),%rdx
387
388# qhasm:   carry? a1 -= *(uint64 *) (rp + 8) - carry
389# asm 1: sbbq 8(<rp=int64#1),<a1=int64#4
390# asm 2: sbbq 8(<rp=%rdi),<a1=%rcx
391sbbq 8(%rdi),%rcx
392
393# qhasm:   carry? a2 -= *(uint64 *) (rp + 16) - carry
394# asm 1: sbbq 16(<rp=int64#1),<a2=int64#5
395# asm 2: sbbq 16(<rp=%rdi),<a2=%r8
396sbbq 16(%rdi),%r8
397
398# qhasm:   carry? a3 -= *(uint64 *) (rp + 24) - carry
399# asm 1: sbbq 24(<rp=int64#1),<a3=int64#6
400# asm 2: sbbq 24(<rp=%rdi),<a3=%r9
401sbbq 24(%rdi),%r9
402
403# qhasm:   subt0 = 0
404# asm 1: mov  $0,>subt0=int64#11
405# asm 2: mov  $0,>subt0=%r13
406mov  $0,%r13
407
408# qhasm:   subt1 = 38
409# asm 1: mov  $38,>subt1=int64#12
410# asm 2: mov  $38,>subt1=%r14
411mov  $38,%r14
412
413# qhasm:   subt1 = subt0 if !carry
414# asm 1: cmovae <subt0=int64#11,<subt1=int64#12
415# asm 2: cmovae <subt0=%r13,<subt1=%r14
416cmovae %r13,%r14
417
418# qhasm:   carry? a0 -= subt1
419# asm 1: sub  <subt1=int64#12,<a0=int64#3
420# asm 2: sub  <subt1=%r14,<a0=%rdx
421sub  %r14,%rdx
422
423# qhasm:   carry? a1 -= subt0 - carry
424# asm 1: sbb  <subt0=int64#11,<a1=int64#4
425# asm 2: sbb  <subt0=%r13,<a1=%rcx
426sbb  %r13,%rcx
427
428# qhasm:   carry? a2 -= subt0 - carry
429# asm 1: sbb  <subt0=int64#11,<a2=int64#5
430# asm 2: sbb  <subt0=%r13,<a2=%r8
431sbb  %r13,%r8
432
433# qhasm:   carry? a3 -= subt0 - carry
434# asm 1: sbb  <subt0=int64#11,<a3=int64#6
435# asm 2: sbb  <subt0=%r13,<a3=%r9
436sbb  %r13,%r9
437
438# qhasm:   subt0 = subt1 if carry
439# asm 1: cmovc <subt1=int64#12,<subt0=int64#11
440# asm 2: cmovc <subt1=%r14,<subt0=%r13
441cmovc %r14,%r13
442
443# qhasm:   a0 -= subt0
444# asm 1: sub  <subt0=int64#11,<a0=int64#3
445# asm 2: sub  <subt0=%r13,<a0=%rdx
446sub  %r13,%rdx
447
448# qhasm:   carry? b0 += *(uint64 *) (rp + 0)
449# asm 1: addq 0(<rp=int64#1),<b0=int64#7
450# asm 2: addq 0(<rp=%rdi),<b0=%rax
451addq 0(%rdi),%rax
452
453# qhasm:   carry? b1 += *(uint64 *) (rp + 8) + carry
454# asm 1: adcq 8(<rp=int64#1),<b1=int64#8
455# asm 2: adcq 8(<rp=%rdi),<b1=%r10
456adcq 8(%rdi),%r10
457
458# qhasm:   carry? b2 += *(uint64 *) (rp + 16) + carry
459# asm 1: adcq 16(<rp=int64#1),<b2=int64#9
460# asm 2: adcq 16(<rp=%rdi),<b2=%r11
461adcq 16(%rdi),%r11
462
463# qhasm:   carry? b3 += *(uint64 *) (rp + 24) + carry
464# asm 1: adcq 24(<rp=int64#1),<b3=int64#10
465# asm 2: adcq 24(<rp=%rdi),<b3=%r12
466adcq 24(%rdi),%r12
467
468# qhasm:   addt0 = 0
469# asm 1: mov  $0,>addt0=int64#11
470# asm 2: mov  $0,>addt0=%r13
471mov  $0,%r13
472
473# qhasm:   addt1 = 38
474# asm 1: mov  $38,>addt1=int64#12
475# asm 2: mov  $38,>addt1=%r14
476mov  $38,%r14
477
478# qhasm:   addt1 = addt0 if !carry
479# asm 1: cmovae <addt0=int64#11,<addt1=int64#12
480# asm 2: cmovae <addt0=%r13,<addt1=%r14
481cmovae %r13,%r14
482
483# qhasm:   carry? b0 += addt1
484# asm 1: add  <addt1=int64#12,<b0=int64#7
485# asm 2: add  <addt1=%r14,<b0=%rax
486add  %r14,%rax
487
488# qhasm:   carry? b1 += addt0 + carry
489# asm 1: adc <addt0=int64#11,<b1=int64#8
490# asm 2: adc <addt0=%r13,<b1=%r10
491adc %r13,%r10
492
493# qhasm:   carry? b2 += addt0 + carry
494# asm 1: adc <addt0=int64#11,<b2=int64#9
495# asm 2: adc <addt0=%r13,<b2=%r11
496adc %r13,%r11
497
498# qhasm:   carry? b3 += addt0 + carry
499# asm 1: adc <addt0=int64#11,<b3=int64#10
500# asm 2: adc <addt0=%r13,<b3=%r12
501adc %r13,%r12
502
503# qhasm:   addt0 = addt1 if carry
504# asm 1: cmovc <addt1=int64#12,<addt0=int64#11
505# asm 2: cmovc <addt1=%r14,<addt0=%r13
506cmovc %r14,%r13
507
508# qhasm:   b0 += addt0
509# asm 1: add  <addt0=int64#11,<b0=int64#7
510# asm 2: add  <addt0=%r13,<b0=%rax
511add  %r13,%rax
512
513# qhasm: a0_stack = a0
514# asm 1: movq <a0=int64#3,>a0_stack=stack64#8
515# asm 2: movq <a0=%rdx,>a0_stack=56(%rsp)
516movq %rdx,56(%rsp)
517
518# qhasm: a1_stack = a1
519# asm 1: movq <a1=int64#4,>a1_stack=stack64#9
520# asm 2: movq <a1=%rcx,>a1_stack=64(%rsp)
521movq %rcx,64(%rsp)
522
523# qhasm: a2_stack = a2
524# asm 1: movq <a2=int64#5,>a2_stack=stack64#10
525# asm 2: movq <a2=%r8,>a2_stack=72(%rsp)
526movq %r8,72(%rsp)
527
528# qhasm: a3_stack = a3
529# asm 1: movq <a3=int64#6,>a3_stack=stack64#11
530# asm 2: movq <a3=%r9,>a3_stack=80(%rsp)
531movq %r9,80(%rsp)
532
533# qhasm: b0_stack = b0
534# asm 1: movq <b0=int64#7,>b0_stack=stack64#12
535# asm 2: movq <b0=%rax,>b0_stack=88(%rsp)
536movq %rax,88(%rsp)
537
538# qhasm: b1_stack = b1
539# asm 1: movq <b1=int64#8,>b1_stack=stack64#13
540# asm 2: movq <b1=%r10,>b1_stack=96(%rsp)
541movq %r10,96(%rsp)
542
543# qhasm: b2_stack = b2
544# asm 1: movq <b2=int64#9,>b2_stack=stack64#14
545# asm 2: movq <b2=%r11,>b2_stack=104(%rsp)
546movq %r11,104(%rsp)
547
548# qhasm: b3_stack = b3
549# asm 1: movq <b3=int64#10,>b3_stack=stack64#15
550# asm 2: movq <b3=%r12,>b3_stack=112(%rsp)
551movq %r12,112(%rsp)
552
553# qhasm:   mulr4 = 0
554# asm 1: mov  $0,>mulr4=int64#4
555# asm 2: mov  $0,>mulr4=%rcx
556mov  $0,%rcx
557
558# qhasm:   mulr5 = 0
559# asm 1: mov  $0,>mulr5=int64#5
560# asm 2: mov  $0,>mulr5=%r8
561mov  $0,%r8
562
563# qhasm:   mulr6 = 0
564# asm 1: mov  $0,>mulr6=int64#6
565# asm 2: mov  $0,>mulr6=%r9
566mov  $0,%r9
567
568# qhasm:   mulr7 = 0
569# asm 1: mov  $0,>mulr7=int64#8
570# asm 2: mov  $0,>mulr7=%r10
571mov  $0,%r10
572
573# qhasm:   mulx0 = a0_stack
574# asm 1: movq <a0_stack=stack64#8,>mulx0=int64#9
575# asm 2: movq <a0_stack=56(%rsp),>mulx0=%r11
576movq 56(%rsp),%r11
577
578# qhasm:   mulrax = *(uint64 *)(qp + 0)
579# asm 1: movq   0(<qp=int64#2),>mulrax=int64#7
580# asm 2: movq   0(<qp=%rsi),>mulrax=%rax
581movq   0(%rsi),%rax
582
583# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
584# asm 1: mul  <mulx0=int64#9
585# asm 2: mul  <mulx0=%r11
586mul  %r11
587
588# qhasm:   a0 = mulrax
589# asm 1: mov  <mulrax=int64#7,>a0=int64#10
590# asm 2: mov  <mulrax=%rax,>a0=%r12
591mov  %rax,%r12
592
593# qhasm:   a1 = mulrdx
594# asm 1: mov  <mulrdx=int64#3,>a1=int64#11
595# asm 2: mov  <mulrdx=%rdx,>a1=%r13
596mov  %rdx,%r13
597
598# qhasm:   mulrax = *(uint64 *)(qp + 8)
599# asm 1: movq   8(<qp=int64#2),>mulrax=int64#7
600# asm 2: movq   8(<qp=%rsi),>mulrax=%rax
601movq   8(%rsi),%rax
602
603# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
604# asm 1: mul  <mulx0=int64#9
605# asm 2: mul  <mulx0=%r11
606mul  %r11
607
608# qhasm:   carry? a1 += mulrax
609# asm 1: add  <mulrax=int64#7,<a1=int64#11
610# asm 2: add  <mulrax=%rax,<a1=%r13
611add  %rax,%r13
612
613# qhasm:   a2 = 0
614# asm 1: mov  $0,>a2=int64#12
615# asm 2: mov  $0,>a2=%r14
616mov  $0,%r14
617
618# qhasm:   a2 += mulrdx + carry
619# asm 1: adc <mulrdx=int64#3,<a2=int64#12
620# asm 2: adc <mulrdx=%rdx,<a2=%r14
621adc %rdx,%r14
622
623# qhasm:   mulrax = *(uint64 *)(qp + 16)
624# asm 1: movq   16(<qp=int64#2),>mulrax=int64#7
625# asm 2: movq   16(<qp=%rsi),>mulrax=%rax
626movq   16(%rsi),%rax
627
628# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
629# asm 1: mul  <mulx0=int64#9
630# asm 2: mul  <mulx0=%r11
631mul  %r11
632
633# qhasm:   carry? a2 += mulrax
634# asm 1: add  <mulrax=int64#7,<a2=int64#12
635# asm 2: add  <mulrax=%rax,<a2=%r14
636add  %rax,%r14
637
638# qhasm:   a3 = 0
639# asm 1: mov  $0,>a3=int64#13
640# asm 2: mov  $0,>a3=%r15
641mov  $0,%r15
642
643# qhasm:   a3 += mulrdx + carry
644# asm 1: adc <mulrdx=int64#3,<a3=int64#13
645# asm 2: adc <mulrdx=%rdx,<a3=%r15
646adc %rdx,%r15
647
648# qhasm:   mulrax = *(uint64 *)(qp + 24)
649# asm 1: movq   24(<qp=int64#2),>mulrax=int64#7
650# asm 2: movq   24(<qp=%rsi),>mulrax=%rax
651movq   24(%rsi),%rax
652
653# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
654# asm 1: mul  <mulx0=int64#9
655# asm 2: mul  <mulx0=%r11
656mul  %r11
657
658# qhasm:   carry? a3 += mulrax
659# asm 1: add  <mulrax=int64#7,<a3=int64#13
660# asm 2: add  <mulrax=%rax,<a3=%r15
661add  %rax,%r15
662
663# qhasm:   mulr4 += mulrdx + carry
664# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
665# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
666adc %rdx,%rcx
667
668# qhasm:   mulx1 = a1_stack
669# asm 1: movq <a1_stack=stack64#9,>mulx1=int64#9
670# asm 2: movq <a1_stack=64(%rsp),>mulx1=%r11
671movq 64(%rsp),%r11
672
673# qhasm:   mulrax = *(uint64 *)(qp + 0)
674# asm 1: movq   0(<qp=int64#2),>mulrax=int64#7
675# asm 2: movq   0(<qp=%rsi),>mulrax=%rax
676movq   0(%rsi),%rax
677
678# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
679# asm 1: mul  <mulx1=int64#9
680# asm 2: mul  <mulx1=%r11
681mul  %r11
682
683# qhasm:   carry? a1 += mulrax
684# asm 1: add  <mulrax=int64#7,<a1=int64#11
685# asm 2: add  <mulrax=%rax,<a1=%r13
686add  %rax,%r13
687
688# qhasm:   mulc = 0
689# asm 1: mov  $0,>mulc=int64#14
690# asm 2: mov  $0,>mulc=%rbx
691mov  $0,%rbx
692
693# qhasm:   mulc += mulrdx + carry
694# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
695# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
696adc %rdx,%rbx
697
698# qhasm:   mulrax = *(uint64 *)(qp + 8)
699# asm 1: movq   8(<qp=int64#2),>mulrax=int64#7
700# asm 2: movq   8(<qp=%rsi),>mulrax=%rax
701movq   8(%rsi),%rax
702
703# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
704# asm 1: mul  <mulx1=int64#9
705# asm 2: mul  <mulx1=%r11
706mul  %r11
707
708# qhasm:   carry? a2 += mulrax
709# asm 1: add  <mulrax=int64#7,<a2=int64#12
710# asm 2: add  <mulrax=%rax,<a2=%r14
711add  %rax,%r14
712
713# qhasm:   mulrdx += 0 + carry
714# asm 1: adc $0,<mulrdx=int64#3
715# asm 2: adc $0,<mulrdx=%rdx
716adc $0,%rdx
717
718# qhasm:   carry? a2 += mulc
719# asm 1: add  <mulc=int64#14,<a2=int64#12
720# asm 2: add  <mulc=%rbx,<a2=%r14
721add  %rbx,%r14
722
723# qhasm:   mulc = 0
724# asm 1: mov  $0,>mulc=int64#14
725# asm 2: mov  $0,>mulc=%rbx
726mov  $0,%rbx
727
728# qhasm:   mulc += mulrdx + carry
729# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
730# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
731adc %rdx,%rbx
732
733# qhasm:   mulrax = *(uint64 *)(qp + 16)
734# asm 1: movq   16(<qp=int64#2),>mulrax=int64#7
735# asm 2: movq   16(<qp=%rsi),>mulrax=%rax
736movq   16(%rsi),%rax
737
738# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
739# asm 1: mul  <mulx1=int64#9
740# asm 2: mul  <mulx1=%r11
741mul  %r11
742
743# qhasm:   carry? a3 += mulrax
744# asm 1: add  <mulrax=int64#7,<a3=int64#13
745# asm 2: add  <mulrax=%rax,<a3=%r15
746add  %rax,%r15
747
748# qhasm:   mulrdx += 0 + carry
749# asm 1: adc $0,<mulrdx=int64#3
750# asm 2: adc $0,<mulrdx=%rdx
751adc $0,%rdx
752
753# qhasm:   carry? a3 += mulc
754# asm 1: add  <mulc=int64#14,<a3=int64#13
755# asm 2: add  <mulc=%rbx,<a3=%r15
756add  %rbx,%r15
757
758# qhasm:   mulc = 0
759# asm 1: mov  $0,>mulc=int64#14
760# asm 2: mov  $0,>mulc=%rbx
761mov  $0,%rbx
762
763# qhasm:   mulc += mulrdx + carry
764# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
765# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
766adc %rdx,%rbx
767
768# qhasm:   mulrax = *(uint64 *)(qp + 24)
769# asm 1: movq   24(<qp=int64#2),>mulrax=int64#7
770# asm 2: movq   24(<qp=%rsi),>mulrax=%rax
771movq   24(%rsi),%rax
772
773# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
774# asm 1: mul  <mulx1=int64#9
775# asm 2: mul  <mulx1=%r11
776mul  %r11
777
778# qhasm:   carry? mulr4 += mulrax
779# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
780# asm 2: add  <mulrax=%rax,<mulr4=%rcx
781add  %rax,%rcx
782
783# qhasm:   mulrdx += 0 + carry
784# asm 1: adc $0,<mulrdx=int64#3
785# asm 2: adc $0,<mulrdx=%rdx
786adc $0,%rdx
787
788# qhasm:   carry? mulr4 += mulc
789# asm 1: add  <mulc=int64#14,<mulr4=int64#4
790# asm 2: add  <mulc=%rbx,<mulr4=%rcx
791add  %rbx,%rcx
792
793# qhasm:   mulr5 += mulrdx + carry
794# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
795# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
796adc %rdx,%r8
797
798# qhasm:   mulx2 = a2_stack
799# asm 1: movq <a2_stack=stack64#10,>mulx2=int64#9
800# asm 2: movq <a2_stack=72(%rsp),>mulx2=%r11
801movq 72(%rsp),%r11
802
803# qhasm:   mulrax = *(uint64 *)(qp + 0)
804# asm 1: movq   0(<qp=int64#2),>mulrax=int64#7
805# asm 2: movq   0(<qp=%rsi),>mulrax=%rax
806movq   0(%rsi),%rax
807
808# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
809# asm 1: mul  <mulx2=int64#9
810# asm 2: mul  <mulx2=%r11
811mul  %r11
812
813# qhasm:   carry? a2 += mulrax
814# asm 1: add  <mulrax=int64#7,<a2=int64#12
815# asm 2: add  <mulrax=%rax,<a2=%r14
816add  %rax,%r14
817
818# qhasm:   mulc = 0
819# asm 1: mov  $0,>mulc=int64#14
820# asm 2: mov  $0,>mulc=%rbx
821mov  $0,%rbx
822
823# qhasm:   mulc += mulrdx + carry
824# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
825# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
826adc %rdx,%rbx
827
828# qhasm:   mulrax = *(uint64 *)(qp + 8)
829# asm 1: movq   8(<qp=int64#2),>mulrax=int64#7
830# asm 2: movq   8(<qp=%rsi),>mulrax=%rax
831movq   8(%rsi),%rax
832
833# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
834# asm 1: mul  <mulx2=int64#9
835# asm 2: mul  <mulx2=%r11
836mul  %r11
837
838# qhasm:   carry? a3 += mulrax
839# asm 1: add  <mulrax=int64#7,<a3=int64#13
840# asm 2: add  <mulrax=%rax,<a3=%r15
841add  %rax,%r15
842
843# qhasm:   mulrdx += 0 + carry
844# asm 1: adc $0,<mulrdx=int64#3
845# asm 2: adc $0,<mulrdx=%rdx
846adc $0,%rdx
847
848# qhasm:   carry? a3 += mulc
849# asm 1: add  <mulc=int64#14,<a3=int64#13
850# asm 2: add  <mulc=%rbx,<a3=%r15
851add  %rbx,%r15
852
853# qhasm:   mulc = 0
854# asm 1: mov  $0,>mulc=int64#14
855# asm 2: mov  $0,>mulc=%rbx
856mov  $0,%rbx
857
858# qhasm:   mulc += mulrdx + carry
859# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
860# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
861adc %rdx,%rbx
862
863# qhasm:   mulrax = *(uint64 *)(qp + 16)
864# asm 1: movq   16(<qp=int64#2),>mulrax=int64#7
865# asm 2: movq   16(<qp=%rsi),>mulrax=%rax
866movq   16(%rsi),%rax
867
868# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
869# asm 1: mul  <mulx2=int64#9
870# asm 2: mul  <mulx2=%r11
871mul  %r11
872
873# qhasm:   carry? mulr4 += mulrax
874# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
875# asm 2: add  <mulrax=%rax,<mulr4=%rcx
876add  %rax,%rcx
877
878# qhasm:   mulrdx += 0 + carry
879# asm 1: adc $0,<mulrdx=int64#3
880# asm 2: adc $0,<mulrdx=%rdx
881adc $0,%rdx
882
883# qhasm:   carry? mulr4 += mulc
884# asm 1: add  <mulc=int64#14,<mulr4=int64#4
885# asm 2: add  <mulc=%rbx,<mulr4=%rcx
886add  %rbx,%rcx
887
888# qhasm:   mulc = 0
889# asm 1: mov  $0,>mulc=int64#14
890# asm 2: mov  $0,>mulc=%rbx
891mov  $0,%rbx
892
893# qhasm:   mulc += mulrdx + carry
894# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
895# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
896adc %rdx,%rbx
897
898# qhasm:   mulrax = *(uint64 *)(qp + 24)
899# asm 1: movq   24(<qp=int64#2),>mulrax=int64#7
900# asm 2: movq   24(<qp=%rsi),>mulrax=%rax
901movq   24(%rsi),%rax
902
903# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
904# asm 1: mul  <mulx2=int64#9
905# asm 2: mul  <mulx2=%r11
906mul  %r11
907
908# qhasm:   carry? mulr5 += mulrax
909# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
910# asm 2: add  <mulrax=%rax,<mulr5=%r8
911add  %rax,%r8
912
913# qhasm:   mulrdx += 0 + carry
914# asm 1: adc $0,<mulrdx=int64#3
915# asm 2: adc $0,<mulrdx=%rdx
916adc $0,%rdx
917
918# qhasm:   carry? mulr5 += mulc
919# asm 1: add  <mulc=int64#14,<mulr5=int64#5
920# asm 2: add  <mulc=%rbx,<mulr5=%r8
921add  %rbx,%r8
922
923# qhasm:   mulr6 += mulrdx + carry
924# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
925# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
926adc %rdx,%r9
927
928# qhasm:   mulx3 = a3_stack
929# asm 1: movq <a3_stack=stack64#11,>mulx3=int64#9
930# asm 2: movq <a3_stack=80(%rsp),>mulx3=%r11
931movq 80(%rsp),%r11
932
933# qhasm:   mulrax = *(uint64 *)(qp + 0)
934# asm 1: movq   0(<qp=int64#2),>mulrax=int64#7
935# asm 2: movq   0(<qp=%rsi),>mulrax=%rax
936movq   0(%rsi),%rax
937
938# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
939# asm 1: mul  <mulx3=int64#9
940# asm 2: mul  <mulx3=%r11
941mul  %r11
942
943# qhasm:   carry? a3 += mulrax
944# asm 1: add  <mulrax=int64#7,<a3=int64#13
945# asm 2: add  <mulrax=%rax,<a3=%r15
946add  %rax,%r15
947
948# qhasm:   mulc = 0
949# asm 1: mov  $0,>mulc=int64#14
950# asm 2: mov  $0,>mulc=%rbx
951mov  $0,%rbx
952
953# qhasm:   mulc += mulrdx + carry
954# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
955# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
956adc %rdx,%rbx
957
958# qhasm:   mulrax = *(uint64 *)(qp + 8)
959# asm 1: movq   8(<qp=int64#2),>mulrax=int64#7
960# asm 2: movq   8(<qp=%rsi),>mulrax=%rax
961movq   8(%rsi),%rax
962
963# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
964# asm 1: mul  <mulx3=int64#9
965# asm 2: mul  <mulx3=%r11
966mul  %r11
967
968# qhasm:   carry? mulr4 += mulrax
969# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
970# asm 2: add  <mulrax=%rax,<mulr4=%rcx
971add  %rax,%rcx
972
973# qhasm:   mulrdx += 0 + carry
974# asm 1: adc $0,<mulrdx=int64#3
975# asm 2: adc $0,<mulrdx=%rdx
976adc $0,%rdx
977
978# qhasm:   carry? mulr4 += mulc
979# asm 1: add  <mulc=int64#14,<mulr4=int64#4
980# asm 2: add  <mulc=%rbx,<mulr4=%rcx
981add  %rbx,%rcx
982
983# qhasm:   mulc = 0
984# asm 1: mov  $0,>mulc=int64#14
985# asm 2: mov  $0,>mulc=%rbx
986mov  $0,%rbx
987
988# qhasm:   mulc += mulrdx + carry
989# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
990# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
991adc %rdx,%rbx
992
993# qhasm:   mulrax = *(uint64 *)(qp + 16)
994# asm 1: movq   16(<qp=int64#2),>mulrax=int64#7
995# asm 2: movq   16(<qp=%rsi),>mulrax=%rax
996movq   16(%rsi),%rax
997
998# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
999# asm 1: mul  <mulx3=int64#9
1000# asm 2: mul  <mulx3=%r11
1001mul  %r11
1002
1003# qhasm:   carry? mulr5 += mulrax
1004# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1005# asm 2: add  <mulrax=%rax,<mulr5=%r8
1006add  %rax,%r8
1007
1008# qhasm:   mulrdx += 0 + carry
1009# asm 1: adc $0,<mulrdx=int64#3
1010# asm 2: adc $0,<mulrdx=%rdx
1011adc $0,%rdx
1012
1013# qhasm:   carry? mulr5 += mulc
1014# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1015# asm 2: add  <mulc=%rbx,<mulr5=%r8
1016add  %rbx,%r8
1017
1018# qhasm:   mulc = 0
1019# asm 1: mov  $0,>mulc=int64#14
1020# asm 2: mov  $0,>mulc=%rbx
1021mov  $0,%rbx
1022
1023# qhasm:   mulc += mulrdx + carry
1024# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1025# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1026adc %rdx,%rbx
1027
1028# qhasm:   mulrax = *(uint64 *)(qp + 24)
1029# asm 1: movq   24(<qp=int64#2),>mulrax=int64#7
1030# asm 2: movq   24(<qp=%rsi),>mulrax=%rax
1031movq   24(%rsi),%rax
1032
1033# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1034# asm 1: mul  <mulx3=int64#9
1035# asm 2: mul  <mulx3=%r11
1036mul  %r11
1037
1038# qhasm:   carry? mulr6 += mulrax
1039# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1040# asm 2: add  <mulrax=%rax,<mulr6=%r9
1041add  %rax,%r9
1042
1043# qhasm:   mulrdx += 0 + carry
1044# asm 1: adc $0,<mulrdx=int64#3
1045# asm 2: adc $0,<mulrdx=%rdx
1046adc $0,%rdx
1047
1048# qhasm:   carry? mulr6 += mulc
1049# asm 1: add  <mulc=int64#14,<mulr6=int64#6
1050# asm 2: add  <mulc=%rbx,<mulr6=%r9
1051add  %rbx,%r9
1052
1053# qhasm:   mulr7 += mulrdx + carry
1054# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1055# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1056adc %rdx,%r10
1057
1058# qhasm:   mulrax = mulr4
1059# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
1060# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
1061mov  %rcx,%rax
1062
1063# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1064mulq  crypto_sign_ed25519_amd64_64_38
1065
1066# qhasm:   mulr4 = mulrax
1067# asm 1: mov  <mulrax=int64#7,>mulr4=int64#4
1068# asm 2: mov  <mulrax=%rax,>mulr4=%rcx
1069mov  %rax,%rcx
1070
1071# qhasm:   mulrax = mulr5
1072# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
1073# asm 2: mov  <mulr5=%r8,>mulrax=%rax
1074mov  %r8,%rax
1075
1076# qhasm:   mulr5 = mulrdx
1077# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#5
1078# asm 2: mov  <mulrdx=%rdx,>mulr5=%r8
1079mov  %rdx,%r8
1080
1081# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1082mulq  crypto_sign_ed25519_amd64_64_38
1083
1084# qhasm:   carry? mulr5 += mulrax
1085# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1086# asm 2: add  <mulrax=%rax,<mulr5=%r8
1087add  %rax,%r8
1088
1089# qhasm:   mulrax = mulr6
1090# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
1091# asm 2: mov  <mulr6=%r9,>mulrax=%rax
1092mov  %r9,%rax
1093
1094# qhasm:   mulr6 = 0
1095# asm 1: mov  $0,>mulr6=int64#6
1096# asm 2: mov  $0,>mulr6=%r9
1097mov  $0,%r9
1098
1099# qhasm:   mulr6 += mulrdx + carry
1100# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1101# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1102adc %rdx,%r9
1103
1104# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1105mulq  crypto_sign_ed25519_amd64_64_38
1106
1107# qhasm:   carry? mulr6 += mulrax
1108# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1109# asm 2: add  <mulrax=%rax,<mulr6=%r9
1110add  %rax,%r9
1111
1112# qhasm:   mulrax = mulr7
1113# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
1114# asm 2: mov  <mulr7=%r10,>mulrax=%rax
1115mov  %r10,%rax
1116
1117# qhasm:   mulr7 = 0
1118# asm 1: mov  $0,>mulr7=int64#8
1119# asm 2: mov  $0,>mulr7=%r10
1120mov  $0,%r10
1121
1122# qhasm:   mulr7 += mulrdx + carry
1123# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1124# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1125adc %rdx,%r10
1126
1127# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1128mulq  crypto_sign_ed25519_amd64_64_38
1129
1130# qhasm:   carry? mulr7 += mulrax
1131# asm 1: add  <mulrax=int64#7,<mulr7=int64#8
1132# asm 2: add  <mulrax=%rax,<mulr7=%r10
1133add  %rax,%r10
1134
1135# qhasm:   mulr8 = 0
1136# asm 1: mov  $0,>mulr8=int64#7
1137# asm 2: mov  $0,>mulr8=%rax
1138mov  $0,%rax
1139
1140# qhasm:   mulr8 += mulrdx + carry
1141# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
1142# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
1143adc %rdx,%rax
1144
1145# qhasm:   carry? a0 += mulr4
1146# asm 1: add  <mulr4=int64#4,<a0=int64#10
1147# asm 2: add  <mulr4=%rcx,<a0=%r12
1148add  %rcx,%r12
1149
1150# qhasm:   carry? a1 += mulr5 + carry
1151# asm 1: adc <mulr5=int64#5,<a1=int64#11
1152# asm 2: adc <mulr5=%r8,<a1=%r13
1153adc %r8,%r13
1154
1155# qhasm:   carry? a2 += mulr6 + carry
1156# asm 1: adc <mulr6=int64#6,<a2=int64#12
1157# asm 2: adc <mulr6=%r9,<a2=%r14
1158adc %r9,%r14
1159
1160# qhasm:   carry? a3 += mulr7 + carry
1161# asm 1: adc <mulr7=int64#8,<a3=int64#13
1162# asm 2: adc <mulr7=%r10,<a3=%r15
1163adc %r10,%r15
1164
1165# qhasm:   mulzero = 0
1166# asm 1: mov  $0,>mulzero=int64#3
1167# asm 2: mov  $0,>mulzero=%rdx
1168mov  $0,%rdx
1169
1170# qhasm:   mulr8 += mulzero + carry
1171# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
1172# asm 2: adc <mulzero=%rdx,<mulr8=%rax
1173adc %rdx,%rax
1174
1175# qhasm:   mulr8 *= 38
1176# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#4
1177# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rcx
1178imulq  $38,%rax,%rcx
1179
1180# qhasm:   carry? a0 += mulr8
1181# asm 1: add  <mulr8=int64#4,<a0=int64#10
1182# asm 2: add  <mulr8=%rcx,<a0=%r12
1183add  %rcx,%r12
1184
1185# qhasm:   carry? a1 += mulzero + carry
1186# asm 1: adc <mulzero=int64#3,<a1=int64#11
1187# asm 2: adc <mulzero=%rdx,<a1=%r13
1188adc %rdx,%r13
1189
1190# qhasm:   carry? a2 += mulzero + carry
1191# asm 1: adc <mulzero=int64#3,<a2=int64#12
1192# asm 2: adc <mulzero=%rdx,<a2=%r14
1193adc %rdx,%r14
1194
1195# qhasm:   carry? a3 += mulzero + carry
1196# asm 1: adc <mulzero=int64#3,<a3=int64#13
1197# asm 2: adc <mulzero=%rdx,<a3=%r15
1198adc %rdx,%r15
1199
1200# qhasm:   mulzero += mulzero + carry
1201# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
1202# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
1203adc %rdx,%rdx
1204
1205# qhasm:   mulzero *= 38
1206# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
1207# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
1208imulq  $38,%rdx,%rdx
1209
1210# qhasm:   a0 += mulzero
1211# asm 1: add  <mulzero=int64#3,<a0=int64#10
1212# asm 2: add  <mulzero=%rdx,<a0=%r12
1213add  %rdx,%r12
1214
1215# qhasm: a0_stack = a0
1216# asm 1: movq <a0=int64#10,>a0_stack=stack64#8
1217# asm 2: movq <a0=%r12,>a0_stack=56(%rsp)
1218movq %r12,56(%rsp)
1219
1220# qhasm: a1_stack = a1
1221# asm 1: movq <a1=int64#11,>a1_stack=stack64#9
1222# asm 2: movq <a1=%r13,>a1_stack=64(%rsp)
1223movq %r13,64(%rsp)
1224
1225# qhasm: a2_stack = a2
1226# asm 1: movq <a2=int64#12,>a2_stack=stack64#10
1227# asm 2: movq <a2=%r14,>a2_stack=72(%rsp)
1228movq %r14,72(%rsp)
1229
1230# qhasm: a3_stack = a3
1231# asm 1: movq <a3=int64#13,>a3_stack=stack64#11
1232# asm 2: movq <a3=%r15,>a3_stack=80(%rsp)
1233movq %r15,80(%rsp)
1234
1235# qhasm:   mulr4 = 0
1236# asm 1: mov  $0,>mulr4=int64#4
1237# asm 2: mov  $0,>mulr4=%rcx
1238mov  $0,%rcx
1239
1240# qhasm:   mulr5 = 0
1241# asm 1: mov  $0,>mulr5=int64#5
1242# asm 2: mov  $0,>mulr5=%r8
1243mov  $0,%r8
1244
1245# qhasm:   mulr6 = 0
1246# asm 1: mov  $0,>mulr6=int64#6
1247# asm 2: mov  $0,>mulr6=%r9
1248mov  $0,%r9
1249
1250# qhasm:   mulr7 = 0
1251# asm 1: mov  $0,>mulr7=int64#8
1252# asm 2: mov  $0,>mulr7=%r10
1253mov  $0,%r10
1254
1255# qhasm:   mulx0 = b0_stack
1256# asm 1: movq <b0_stack=stack64#12,>mulx0=int64#9
1257# asm 2: movq <b0_stack=88(%rsp),>mulx0=%r11
1258movq 88(%rsp),%r11
1259
1260# qhasm:   mulrax = *(uint64 *)(qp + 32)
1261# asm 1: movq   32(<qp=int64#2),>mulrax=int64#7
1262# asm 2: movq   32(<qp=%rsi),>mulrax=%rax
1263movq   32(%rsi),%rax
1264
1265# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1266# asm 1: mul  <mulx0=int64#9
1267# asm 2: mul  <mulx0=%r11
1268mul  %r11
1269
1270# qhasm:   e0 = mulrax
1271# asm 1: mov  <mulrax=int64#7,>e0=int64#10
1272# asm 2: mov  <mulrax=%rax,>e0=%r12
1273mov  %rax,%r12
1274
1275# qhasm:   e1 = mulrdx
1276# asm 1: mov  <mulrdx=int64#3,>e1=int64#11
1277# asm 2: mov  <mulrdx=%rdx,>e1=%r13
1278mov  %rdx,%r13
1279
1280# qhasm:   mulrax = *(uint64 *)(qp + 40)
1281# asm 1: movq   40(<qp=int64#2),>mulrax=int64#7
1282# asm 2: movq   40(<qp=%rsi),>mulrax=%rax
1283movq   40(%rsi),%rax
1284
1285# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1286# asm 1: mul  <mulx0=int64#9
1287# asm 2: mul  <mulx0=%r11
1288mul  %r11
1289
1290# qhasm:   carry? e1 += mulrax
1291# asm 1: add  <mulrax=int64#7,<e1=int64#11
1292# asm 2: add  <mulrax=%rax,<e1=%r13
1293add  %rax,%r13
1294
1295# qhasm:   e2 = 0
1296# asm 1: mov  $0,>e2=int64#12
1297# asm 2: mov  $0,>e2=%r14
1298mov  $0,%r14
1299
1300# qhasm:   e2 += mulrdx + carry
1301# asm 1: adc <mulrdx=int64#3,<e2=int64#12
1302# asm 2: adc <mulrdx=%rdx,<e2=%r14
1303adc %rdx,%r14
1304
1305# qhasm:   mulrax = *(uint64 *)(qp + 48)
1306# asm 1: movq   48(<qp=int64#2),>mulrax=int64#7
1307# asm 2: movq   48(<qp=%rsi),>mulrax=%rax
1308movq   48(%rsi),%rax
1309
1310# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1311# asm 1: mul  <mulx0=int64#9
1312# asm 2: mul  <mulx0=%r11
1313mul  %r11
1314
1315# qhasm:   carry? e2 += mulrax
1316# asm 1: add  <mulrax=int64#7,<e2=int64#12
1317# asm 2: add  <mulrax=%rax,<e2=%r14
1318add  %rax,%r14
1319
1320# qhasm:   e3 = 0
1321# asm 1: mov  $0,>e3=int64#13
1322# asm 2: mov  $0,>e3=%r15
1323mov  $0,%r15
1324
1325# qhasm:   e3 += mulrdx + carry
1326# asm 1: adc <mulrdx=int64#3,<e3=int64#13
1327# asm 2: adc <mulrdx=%rdx,<e3=%r15
1328adc %rdx,%r15
1329
1330# qhasm:   mulrax = *(uint64 *)(qp + 56)
1331# asm 1: movq   56(<qp=int64#2),>mulrax=int64#7
1332# asm 2: movq   56(<qp=%rsi),>mulrax=%rax
1333movq   56(%rsi),%rax
1334
1335# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1336# asm 1: mul  <mulx0=int64#9
1337# asm 2: mul  <mulx0=%r11
1338mul  %r11
1339
1340# qhasm:   carry? e3 += mulrax
1341# asm 1: add  <mulrax=int64#7,<e3=int64#13
1342# asm 2: add  <mulrax=%rax,<e3=%r15
1343add  %rax,%r15
1344
1345# qhasm:   mulr4 += mulrdx + carry
1346# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
1347# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
1348adc %rdx,%rcx
1349
1350# qhasm:   mulx1 = b1_stack
1351# asm 1: movq <b1_stack=stack64#13,>mulx1=int64#9
1352# asm 2: movq <b1_stack=96(%rsp),>mulx1=%r11
1353movq 96(%rsp),%r11
1354
1355# qhasm:   mulrax = *(uint64 *)(qp + 32)
1356# asm 1: movq   32(<qp=int64#2),>mulrax=int64#7
1357# asm 2: movq   32(<qp=%rsi),>mulrax=%rax
1358movq   32(%rsi),%rax
1359
1360# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1361# asm 1: mul  <mulx1=int64#9
1362# asm 2: mul  <mulx1=%r11
1363mul  %r11
1364
1365# qhasm:   carry? e1 += mulrax
1366# asm 1: add  <mulrax=int64#7,<e1=int64#11
1367# asm 2: add  <mulrax=%rax,<e1=%r13
1368add  %rax,%r13
1369
1370# qhasm:   mulc = 0
1371# asm 1: mov  $0,>mulc=int64#14
1372# asm 2: mov  $0,>mulc=%rbx
1373mov  $0,%rbx
1374
1375# qhasm:   mulc += mulrdx + carry
1376# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1377# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1378adc %rdx,%rbx
1379
1380# qhasm:   mulrax = *(uint64 *)(qp + 40)
1381# asm 1: movq   40(<qp=int64#2),>mulrax=int64#7
1382# asm 2: movq   40(<qp=%rsi),>mulrax=%rax
1383movq   40(%rsi),%rax
1384
1385# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1386# asm 1: mul  <mulx1=int64#9
1387# asm 2: mul  <mulx1=%r11
1388mul  %r11
1389
1390# qhasm:   carry? e2 += mulrax
1391# asm 1: add  <mulrax=int64#7,<e2=int64#12
1392# asm 2: add  <mulrax=%rax,<e2=%r14
1393add  %rax,%r14
1394
1395# qhasm:   mulrdx += 0 + carry
1396# asm 1: adc $0,<mulrdx=int64#3
1397# asm 2: adc $0,<mulrdx=%rdx
1398adc $0,%rdx
1399
1400# qhasm:   carry? e2 += mulc
1401# asm 1: add  <mulc=int64#14,<e2=int64#12
1402# asm 2: add  <mulc=%rbx,<e2=%r14
1403add  %rbx,%r14
1404
1405# qhasm:   mulc = 0
1406# asm 1: mov  $0,>mulc=int64#14
1407# asm 2: mov  $0,>mulc=%rbx
1408mov  $0,%rbx
1409
1410# qhasm:   mulc += mulrdx + carry
1411# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1412# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1413adc %rdx,%rbx
1414
1415# qhasm:   mulrax = *(uint64 *)(qp + 48)
1416# asm 1: movq   48(<qp=int64#2),>mulrax=int64#7
1417# asm 2: movq   48(<qp=%rsi),>mulrax=%rax
1418movq   48(%rsi),%rax
1419
1420# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1421# asm 1: mul  <mulx1=int64#9
1422# asm 2: mul  <mulx1=%r11
1423mul  %r11
1424
1425# qhasm:   carry? e3 += mulrax
1426# asm 1: add  <mulrax=int64#7,<e3=int64#13
1427# asm 2: add  <mulrax=%rax,<e3=%r15
1428add  %rax,%r15
1429
1430# qhasm:   mulrdx += 0 + carry
1431# asm 1: adc $0,<mulrdx=int64#3
1432# asm 2: adc $0,<mulrdx=%rdx
1433adc $0,%rdx
1434
1435# qhasm:   carry? e3 += mulc
1436# asm 1: add  <mulc=int64#14,<e3=int64#13
1437# asm 2: add  <mulc=%rbx,<e3=%r15
1438add  %rbx,%r15
1439
1440# qhasm:   mulc = 0
1441# asm 1: mov  $0,>mulc=int64#14
1442# asm 2: mov  $0,>mulc=%rbx
1443mov  $0,%rbx
1444
1445# qhasm:   mulc += mulrdx + carry
1446# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1447# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1448adc %rdx,%rbx
1449
1450# qhasm:   mulrax = *(uint64 *)(qp + 56)
1451# asm 1: movq   56(<qp=int64#2),>mulrax=int64#7
1452# asm 2: movq   56(<qp=%rsi),>mulrax=%rax
1453movq   56(%rsi),%rax
1454
1455# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1456# asm 1: mul  <mulx1=int64#9
1457# asm 2: mul  <mulx1=%r11
1458mul  %r11
1459
1460# qhasm:   carry? mulr4 += mulrax
1461# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1462# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1463add  %rax,%rcx
1464
1465# qhasm:   mulrdx += 0 + carry
1466# asm 1: adc $0,<mulrdx=int64#3
1467# asm 2: adc $0,<mulrdx=%rdx
1468adc $0,%rdx
1469
1470# qhasm:   carry? mulr4 += mulc
1471# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1472# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1473add  %rbx,%rcx
1474
1475# qhasm:   mulr5 += mulrdx + carry
1476# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
1477# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
1478adc %rdx,%r8
1479
1480# qhasm:   mulx2 = b2_stack
1481# asm 1: movq <b2_stack=stack64#14,>mulx2=int64#9
1482# asm 2: movq <b2_stack=104(%rsp),>mulx2=%r11
1483movq 104(%rsp),%r11
1484
1485# qhasm:   mulrax = *(uint64 *)(qp + 32)
1486# asm 1: movq   32(<qp=int64#2),>mulrax=int64#7
1487# asm 2: movq   32(<qp=%rsi),>mulrax=%rax
1488movq   32(%rsi),%rax
1489
1490# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1491# asm 1: mul  <mulx2=int64#9
1492# asm 2: mul  <mulx2=%r11
1493mul  %r11
1494
1495# qhasm:   carry? e2 += mulrax
1496# asm 1: add  <mulrax=int64#7,<e2=int64#12
1497# asm 2: add  <mulrax=%rax,<e2=%r14
1498add  %rax,%r14
1499
1500# qhasm:   mulc = 0
1501# asm 1: mov  $0,>mulc=int64#14
1502# asm 2: mov  $0,>mulc=%rbx
1503mov  $0,%rbx
1504
1505# qhasm:   mulc += mulrdx + carry
1506# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1507# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1508adc %rdx,%rbx
1509
1510# qhasm:   mulrax = *(uint64 *)(qp + 40)
1511# asm 1: movq   40(<qp=int64#2),>mulrax=int64#7
1512# asm 2: movq   40(<qp=%rsi),>mulrax=%rax
1513movq   40(%rsi),%rax
1514
1515# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1516# asm 1: mul  <mulx2=int64#9
1517# asm 2: mul  <mulx2=%r11
1518mul  %r11
1519
1520# qhasm:   carry? e3 += mulrax
1521# asm 1: add  <mulrax=int64#7,<e3=int64#13
1522# asm 2: add  <mulrax=%rax,<e3=%r15
1523add  %rax,%r15
1524
1525# qhasm:   mulrdx += 0 + carry
1526# asm 1: adc $0,<mulrdx=int64#3
1527# asm 2: adc $0,<mulrdx=%rdx
1528adc $0,%rdx
1529
1530# qhasm:   carry? e3 += mulc
1531# asm 1: add  <mulc=int64#14,<e3=int64#13
1532# asm 2: add  <mulc=%rbx,<e3=%r15
1533add  %rbx,%r15
1534
1535# qhasm:   mulc = 0
1536# asm 1: mov  $0,>mulc=int64#14
1537# asm 2: mov  $0,>mulc=%rbx
1538mov  $0,%rbx
1539
1540# qhasm:   mulc += mulrdx + carry
1541# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1542# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1543adc %rdx,%rbx
1544
1545# qhasm:   mulrax = *(uint64 *)(qp + 48)
1546# asm 1: movq   48(<qp=int64#2),>mulrax=int64#7
1547# asm 2: movq   48(<qp=%rsi),>mulrax=%rax
1548movq   48(%rsi),%rax
1549
1550# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1551# asm 1: mul  <mulx2=int64#9
1552# asm 2: mul  <mulx2=%r11
1553mul  %r11
1554
1555# qhasm:   carry? mulr4 += mulrax
1556# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1557# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1558add  %rax,%rcx
1559
1560# qhasm:   mulrdx += 0 + carry
1561# asm 1: adc $0,<mulrdx=int64#3
1562# asm 2: adc $0,<mulrdx=%rdx
1563adc $0,%rdx
1564
1565# qhasm:   carry? mulr4 += mulc
1566# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1567# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1568add  %rbx,%rcx
1569
1570# qhasm:   mulc = 0
1571# asm 1: mov  $0,>mulc=int64#14
1572# asm 2: mov  $0,>mulc=%rbx
1573mov  $0,%rbx
1574
1575# qhasm:   mulc += mulrdx + carry
1576# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1577# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1578adc %rdx,%rbx
1579
1580# qhasm:   mulrax = *(uint64 *)(qp + 56)
1581# asm 1: movq   56(<qp=int64#2),>mulrax=int64#7
1582# asm 2: movq   56(<qp=%rsi),>mulrax=%rax
1583movq   56(%rsi),%rax
1584
1585# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1586# asm 1: mul  <mulx2=int64#9
1587# asm 2: mul  <mulx2=%r11
1588mul  %r11
1589
1590# qhasm:   carry? mulr5 += mulrax
1591# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1592# asm 2: add  <mulrax=%rax,<mulr5=%r8
1593add  %rax,%r8
1594
1595# qhasm:   mulrdx += 0 + carry
1596# asm 1: adc $0,<mulrdx=int64#3
1597# asm 2: adc $0,<mulrdx=%rdx
1598adc $0,%rdx
1599
1600# qhasm:   carry? mulr5 += mulc
1601# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1602# asm 2: add  <mulc=%rbx,<mulr5=%r8
1603add  %rbx,%r8
1604
1605# qhasm:   mulr6 += mulrdx + carry
1606# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1607# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1608adc %rdx,%r9
1609
1610# qhasm:   mulx3 = b3_stack
1611# asm 1: movq <b3_stack=stack64#15,>mulx3=int64#9
1612# asm 2: movq <b3_stack=112(%rsp),>mulx3=%r11
1613movq 112(%rsp),%r11
1614
1615# qhasm:   mulrax = *(uint64 *)(qp + 32)
1616# asm 1: movq   32(<qp=int64#2),>mulrax=int64#7
1617# asm 2: movq   32(<qp=%rsi),>mulrax=%rax
1618movq   32(%rsi),%rax
1619
1620# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1621# asm 1: mul  <mulx3=int64#9
1622# asm 2: mul  <mulx3=%r11
1623mul  %r11
1624
1625# qhasm:   carry? e3 += mulrax
1626# asm 1: add  <mulrax=int64#7,<e3=int64#13
1627# asm 2: add  <mulrax=%rax,<e3=%r15
1628add  %rax,%r15
1629
1630# qhasm:   mulc = 0
1631# asm 1: mov  $0,>mulc=int64#14
1632# asm 2: mov  $0,>mulc=%rbx
1633mov  $0,%rbx
1634
1635# qhasm:   mulc += mulrdx + carry
1636# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1637# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1638adc %rdx,%rbx
1639
1640# qhasm:   mulrax = *(uint64 *)(qp + 40)
1641# asm 1: movq   40(<qp=int64#2),>mulrax=int64#7
1642# asm 2: movq   40(<qp=%rsi),>mulrax=%rax
1643movq   40(%rsi),%rax
1644
1645# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1646# asm 1: mul  <mulx3=int64#9
1647# asm 2: mul  <mulx3=%r11
1648mul  %r11
1649
1650# qhasm:   carry? mulr4 += mulrax
1651# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1652# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1653add  %rax,%rcx
1654
1655# qhasm:   mulrdx += 0 + carry
1656# asm 1: adc $0,<mulrdx=int64#3
1657# asm 2: adc $0,<mulrdx=%rdx
1658adc $0,%rdx
1659
1660# qhasm:   carry? mulr4 += mulc
1661# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1662# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1663add  %rbx,%rcx
1664
1665# qhasm:   mulc = 0
1666# asm 1: mov  $0,>mulc=int64#14
1667# asm 2: mov  $0,>mulc=%rbx
1668mov  $0,%rbx
1669
1670# qhasm:   mulc += mulrdx + carry
1671# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1672# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1673adc %rdx,%rbx
1674
1675# qhasm:   mulrax = *(uint64 *)(qp + 48)
1676# asm 1: movq   48(<qp=int64#2),>mulrax=int64#7
1677# asm 2: movq   48(<qp=%rsi),>mulrax=%rax
1678movq   48(%rsi),%rax
1679
1680# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1681# asm 1: mul  <mulx3=int64#9
1682# asm 2: mul  <mulx3=%r11
1683mul  %r11
1684
1685# qhasm:   carry? mulr5 += mulrax
1686# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1687# asm 2: add  <mulrax=%rax,<mulr5=%r8
1688add  %rax,%r8
1689
1690# qhasm:   mulrdx += 0 + carry
1691# asm 1: adc $0,<mulrdx=int64#3
1692# asm 2: adc $0,<mulrdx=%rdx
1693adc $0,%rdx
1694
1695# qhasm:   carry? mulr5 += mulc
1696# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1697# asm 2: add  <mulc=%rbx,<mulr5=%r8
1698add  %rbx,%r8
1699
1700# qhasm:   mulc = 0
1701# asm 1: mov  $0,>mulc=int64#14
1702# asm 2: mov  $0,>mulc=%rbx
1703mov  $0,%rbx
1704
1705# qhasm:   mulc += mulrdx + carry
1706# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1707# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1708adc %rdx,%rbx
1709
1710# qhasm:   mulrax = *(uint64 *)(qp + 56)
1711# asm 1: movq   56(<qp=int64#2),>mulrax=int64#7
1712# asm 2: movq   56(<qp=%rsi),>mulrax=%rax
1713movq   56(%rsi),%rax
1714
1715# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1716# asm 1: mul  <mulx3=int64#9
1717# asm 2: mul  <mulx3=%r11
1718mul  %r11
1719
1720# qhasm:   carry? mulr6 += mulrax
1721# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1722# asm 2: add  <mulrax=%rax,<mulr6=%r9
1723add  %rax,%r9
1724
1725# qhasm:   mulrdx += 0 + carry
1726# asm 1: adc $0,<mulrdx=int64#3
1727# asm 2: adc $0,<mulrdx=%rdx
1728adc $0,%rdx
1729
1730# qhasm:   carry? mulr6 += mulc
1731# asm 1: add  <mulc=int64#14,<mulr6=int64#6
1732# asm 2: add  <mulc=%rbx,<mulr6=%r9
1733add  %rbx,%r9
1734
1735# qhasm:   mulr7 += mulrdx + carry
1736# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1737# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1738adc %rdx,%r10
1739
1740# qhasm:   mulrax = mulr4
1741# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
1742# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
1743mov  %rcx,%rax
1744
1745# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1746mulq  crypto_sign_ed25519_amd64_64_38
1747
1748# qhasm:   mulr4 = mulrax
1749# asm 1: mov  <mulrax=int64#7,>mulr4=int64#4
1750# asm 2: mov  <mulrax=%rax,>mulr4=%rcx
1751mov  %rax,%rcx
1752
1753# qhasm:   mulrax = mulr5
1754# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
1755# asm 2: mov  <mulr5=%r8,>mulrax=%rax
1756mov  %r8,%rax
1757
1758# qhasm:   mulr5 = mulrdx
1759# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#5
1760# asm 2: mov  <mulrdx=%rdx,>mulr5=%r8
1761mov  %rdx,%r8
1762
1763# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1764mulq  crypto_sign_ed25519_amd64_64_38
1765
1766# qhasm:   carry? mulr5 += mulrax
1767# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1768# asm 2: add  <mulrax=%rax,<mulr5=%r8
1769add  %rax,%r8
1770
1771# qhasm:   mulrax = mulr6
1772# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
1773# asm 2: mov  <mulr6=%r9,>mulrax=%rax
1774mov  %r9,%rax
1775
1776# qhasm:   mulr6 = 0
1777# asm 1: mov  $0,>mulr6=int64#6
1778# asm 2: mov  $0,>mulr6=%r9
1779mov  $0,%r9
1780
1781# qhasm:   mulr6 += mulrdx + carry
1782# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1783# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1784adc %rdx,%r9
1785
1786# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1787mulq  crypto_sign_ed25519_amd64_64_38
1788
1789# qhasm:   carry? mulr6 += mulrax
1790# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1791# asm 2: add  <mulrax=%rax,<mulr6=%r9
1792add  %rax,%r9
1793
1794# qhasm:   mulrax = mulr7
1795# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
1796# asm 2: mov  <mulr7=%r10,>mulrax=%rax
1797mov  %r10,%rax
1798
1799# qhasm:   mulr7 = 0
1800# asm 1: mov  $0,>mulr7=int64#8
1801# asm 2: mov  $0,>mulr7=%r10
1802mov  $0,%r10
1803
1804# qhasm:   mulr7 += mulrdx + carry
1805# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1806# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1807adc %rdx,%r10
1808
1809# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1810mulq  crypto_sign_ed25519_amd64_64_38
1811
1812# qhasm:   carry? mulr7 += mulrax
1813# asm 1: add  <mulrax=int64#7,<mulr7=int64#8
1814# asm 2: add  <mulrax=%rax,<mulr7=%r10
1815add  %rax,%r10
1816
1817# qhasm:   mulr8 = 0
1818# asm 1: mov  $0,>mulr8=int64#7
1819# asm 2: mov  $0,>mulr8=%rax
1820mov  $0,%rax
1821
1822# qhasm:   mulr8 += mulrdx + carry
1823# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
1824# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
1825adc %rdx,%rax
1826
1827# qhasm:   carry? e0 += mulr4
1828# asm 1: add  <mulr4=int64#4,<e0=int64#10
1829# asm 2: add  <mulr4=%rcx,<e0=%r12
1830add  %rcx,%r12
1831
1832# qhasm:   carry? e1 += mulr5 + carry
1833# asm 1: adc <mulr5=int64#5,<e1=int64#11
1834# asm 2: adc <mulr5=%r8,<e1=%r13
1835adc %r8,%r13
1836
1837# qhasm:   carry? e2 += mulr6 + carry
1838# asm 1: adc <mulr6=int64#6,<e2=int64#12
1839# asm 2: adc <mulr6=%r9,<e2=%r14
1840adc %r9,%r14
1841
1842# qhasm:   carry? e3 += mulr7 + carry
1843# asm 1: adc <mulr7=int64#8,<e3=int64#13
1844# asm 2: adc <mulr7=%r10,<e3=%r15
1845adc %r10,%r15
1846
1847# qhasm:   mulzero = 0
1848# asm 1: mov  $0,>mulzero=int64#3
1849# asm 2: mov  $0,>mulzero=%rdx
1850mov  $0,%rdx
1851
1852# qhasm:   mulr8 += mulzero + carry
1853# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
1854# asm 2: adc <mulzero=%rdx,<mulr8=%rax
1855adc %rdx,%rax
1856
1857# qhasm:   mulr8 *= 38
1858# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#4
1859# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rcx
1860imulq  $38,%rax,%rcx
1861
1862# qhasm:   carry? e0 += mulr8
1863# asm 1: add  <mulr8=int64#4,<e0=int64#10
1864# asm 2: add  <mulr8=%rcx,<e0=%r12
1865add  %rcx,%r12
1866
1867# qhasm:   carry? e1 += mulzero + carry
1868# asm 1: adc <mulzero=int64#3,<e1=int64#11
1869# asm 2: adc <mulzero=%rdx,<e1=%r13
1870adc %rdx,%r13
1871
1872# qhasm:   carry? e2 += mulzero + carry
1873# asm 1: adc <mulzero=int64#3,<e2=int64#12
1874# asm 2: adc <mulzero=%rdx,<e2=%r14
1875adc %rdx,%r14
1876
1877# qhasm:   carry? e3 += mulzero + carry
1878# asm 1: adc <mulzero=int64#3,<e3=int64#13
1879# asm 2: adc <mulzero=%rdx,<e3=%r15
1880adc %rdx,%r15
1881
1882# qhasm:   mulzero += mulzero + carry
1883# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
1884# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
1885adc %rdx,%rdx
1886
1887# qhasm:   mulzero *= 38
1888# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
1889# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
1890imulq  $38,%rdx,%rdx
1891
1892# qhasm:   e0 += mulzero
1893# asm 1: add  <mulzero=int64#3,<e0=int64#10
1894# asm 2: add  <mulzero=%rdx,<e0=%r12
1895add  %rdx,%r12
1896
1897# qhasm: h0 = e0
1898# asm 1: mov  <e0=int64#10,>h0=int64#3
1899# asm 2: mov  <e0=%r12,>h0=%rdx
1900mov  %r12,%rdx
1901
1902# qhasm: h1 = e1
1903# asm 1: mov  <e1=int64#11,>h1=int64#4
1904# asm 2: mov  <e1=%r13,>h1=%rcx
1905mov  %r13,%rcx
1906
1907# qhasm: h2 = e2
1908# asm 1: mov  <e2=int64#12,>h2=int64#5
1909# asm 2: mov  <e2=%r14,>h2=%r8
1910mov  %r14,%r8
1911
1912# qhasm: h3 = e3
1913# asm 1: mov  <e3=int64#13,>h3=int64#6
1914# asm 2: mov  <e3=%r15,>h3=%r9
1915mov  %r15,%r9
1916
1917# qhasm:   carry? e0 -= a0_stack
1918# asm 1: subq <a0_stack=stack64#8,<e0=int64#10
1919# asm 2: subq <a0_stack=56(%rsp),<e0=%r12
1920subq 56(%rsp),%r12
1921
1922# qhasm:   carry? e1 -= a1_stack - carry
1923# asm 1: sbbq <a1_stack=stack64#9,<e1=int64#11
1924# asm 2: sbbq <a1_stack=64(%rsp),<e1=%r13
1925sbbq 64(%rsp),%r13
1926
1927# qhasm:   carry? e2 -= a2_stack - carry
1928# asm 1: sbbq <a2_stack=stack64#10,<e2=int64#12
1929# asm 2: sbbq <a2_stack=72(%rsp),<e2=%r14
1930sbbq 72(%rsp),%r14
1931
1932# qhasm:   carry? e3 -= a3_stack - carry
1933# asm 1: sbbq <a3_stack=stack64#11,<e3=int64#13
1934# asm 2: sbbq <a3_stack=80(%rsp),<e3=%r15
1935sbbq 80(%rsp),%r15
1936
1937# qhasm:   subt0 = 0
1938# asm 1: mov  $0,>subt0=int64#7
1939# asm 2: mov  $0,>subt0=%rax
1940mov  $0,%rax
1941
1942# qhasm:   subt1 = 38
1943# asm 1: mov  $38,>subt1=int64#8
1944# asm 2: mov  $38,>subt1=%r10
1945mov  $38,%r10
1946
1947# qhasm:   subt1 = subt0 if !carry
1948# asm 1: cmovae <subt0=int64#7,<subt1=int64#8
1949# asm 2: cmovae <subt0=%rax,<subt1=%r10
1950cmovae %rax,%r10
1951
1952# qhasm:   carry? e0 -= subt1
1953# asm 1: sub  <subt1=int64#8,<e0=int64#10
1954# asm 2: sub  <subt1=%r10,<e0=%r12
1955sub  %r10,%r12
1956
1957# qhasm:   carry? e1 -= subt0 - carry
1958# asm 1: sbb  <subt0=int64#7,<e1=int64#11
1959# asm 2: sbb  <subt0=%rax,<e1=%r13
1960sbb  %rax,%r13
1961
1962# qhasm:   carry? e2 -= subt0 - carry
1963# asm 1: sbb  <subt0=int64#7,<e2=int64#12
1964# asm 2: sbb  <subt0=%rax,<e2=%r14
1965sbb  %rax,%r14
1966
1967# qhasm:   carry? e3 -= subt0 - carry
1968# asm 1: sbb  <subt0=int64#7,<e3=int64#13
1969# asm 2: sbb  <subt0=%rax,<e3=%r15
1970sbb  %rax,%r15
1971
1972# qhasm:   subt0 = subt1 if carry
1973# asm 1: cmovc <subt1=int64#8,<subt0=int64#7
1974# asm 2: cmovc <subt1=%r10,<subt0=%rax
1975cmovc %r10,%rax
1976
1977# qhasm:   e0 -= subt0
1978# asm 1: sub  <subt0=int64#7,<e0=int64#10
1979# asm 2: sub  <subt0=%rax,<e0=%r12
1980sub  %rax,%r12
1981
1982# qhasm:   carry? h0 += a0_stack
1983# asm 1: addq <a0_stack=stack64#8,<h0=int64#3
1984# asm 2: addq <a0_stack=56(%rsp),<h0=%rdx
1985addq 56(%rsp),%rdx
1986
1987# qhasm:   carry? h1 += a1_stack + carry
1988# asm 1: adcq <a1_stack=stack64#9,<h1=int64#4
1989# asm 2: adcq <a1_stack=64(%rsp),<h1=%rcx
1990adcq 64(%rsp),%rcx
1991
1992# qhasm:   carry? h2 += a2_stack + carry
1993# asm 1: adcq <a2_stack=stack64#10,<h2=int64#5
1994# asm 2: adcq <a2_stack=72(%rsp),<h2=%r8
1995adcq 72(%rsp),%r8
1996
1997# qhasm:   carry? h3 += a3_stack + carry
1998# asm 1: adcq <a3_stack=stack64#11,<h3=int64#6
1999# asm 2: adcq <a3_stack=80(%rsp),<h3=%r9
2000adcq 80(%rsp),%r9
2001
2002# qhasm:   addt0 = 0
2003# asm 1: mov  $0,>addt0=int64#7
2004# asm 2: mov  $0,>addt0=%rax
2005mov  $0,%rax
2006
2007# qhasm:   addt1 = 38
2008# asm 1: mov  $38,>addt1=int64#8
2009# asm 2: mov  $38,>addt1=%r10
2010mov  $38,%r10
2011
2012# qhasm:   addt1 = addt0 if !carry
2013# asm 1: cmovae <addt0=int64#7,<addt1=int64#8
2014# asm 2: cmovae <addt0=%rax,<addt1=%r10
2015cmovae %rax,%r10
2016
2017# qhasm:   carry? h0 += addt1
2018# asm 1: add  <addt1=int64#8,<h0=int64#3
2019# asm 2: add  <addt1=%r10,<h0=%rdx
2020add  %r10,%rdx
2021
2022# qhasm:   carry? h1 += addt0 + carry
2023# asm 1: adc <addt0=int64#7,<h1=int64#4
2024# asm 2: adc <addt0=%rax,<h1=%rcx
2025adc %rax,%rcx
2026
2027# qhasm:   carry? h2 += addt0 + carry
2028# asm 1: adc <addt0=int64#7,<h2=int64#5
2029# asm 2: adc <addt0=%rax,<h2=%r8
2030adc %rax,%r8
2031
2032# qhasm:   carry? h3 += addt0 + carry
2033# asm 1: adc <addt0=int64#7,<h3=int64#6
2034# asm 2: adc <addt0=%rax,<h3=%r9
2035adc %rax,%r9
2036
2037# qhasm:   addt0 = addt1 if carry
2038# asm 1: cmovc <addt1=int64#8,<addt0=int64#7
2039# asm 2: cmovc <addt1=%r10,<addt0=%rax
2040cmovc %r10,%rax
2041
2042# qhasm:   h0 += addt0
2043# asm 1: add  <addt0=int64#7,<h0=int64#3
2044# asm 2: add  <addt0=%rax,<h0=%rdx
2045add  %rax,%rdx
2046
2047# qhasm: h0_stack = h0
2048# asm 1: movq <h0=int64#3,>h0_stack=stack64#8
2049# asm 2: movq <h0=%rdx,>h0_stack=56(%rsp)
2050movq %rdx,56(%rsp)
2051
2052# qhasm: h1_stack = h1
2053# asm 1: movq <h1=int64#4,>h1_stack=stack64#9
2054# asm 2: movq <h1=%rcx,>h1_stack=64(%rsp)
2055movq %rcx,64(%rsp)
2056
2057# qhasm: h2_stack = h2
2058# asm 1: movq <h2=int64#5,>h2_stack=stack64#10
2059# asm 2: movq <h2=%r8,>h2_stack=72(%rsp)
2060movq %r8,72(%rsp)
2061
2062# qhasm: h3_stack = h3
2063# asm 1: movq <h3=int64#6,>h3_stack=stack64#11
2064# asm 2: movq <h3=%r9,>h3_stack=80(%rsp)
2065movq %r9,80(%rsp)
2066
2067# qhasm: e0_stack = e0
2068# asm 1: movq <e0=int64#10,>e0_stack=stack64#12
2069# asm 2: movq <e0=%r12,>e0_stack=88(%rsp)
2070movq %r12,88(%rsp)
2071
2072# qhasm: e1_stack = e1
2073# asm 1: movq <e1=int64#11,>e1_stack=stack64#13
2074# asm 2: movq <e1=%r13,>e1_stack=96(%rsp)
2075movq %r13,96(%rsp)
2076
2077# qhasm: e2_stack = e2
2078# asm 1: movq <e2=int64#12,>e2_stack=stack64#14
2079# asm 2: movq <e2=%r14,>e2_stack=104(%rsp)
2080movq %r14,104(%rsp)
2081
2082# qhasm: e3_stack = e3
2083# asm 1: movq <e3=int64#13,>e3_stack=stack64#15
2084# asm 2: movq <e3=%r15,>e3_stack=112(%rsp)
2085movq %r15,112(%rsp)
2086
2087# qhasm:   mulr4 = 0
2088# asm 1: mov  $0,>mulr4=int64#4
2089# asm 2: mov  $0,>mulr4=%rcx
2090mov  $0,%rcx
2091
2092# qhasm:   mulr5 = 0
2093# asm 1: mov  $0,>mulr5=int64#5
2094# asm 2: mov  $0,>mulr5=%r8
2095mov  $0,%r8
2096
2097# qhasm:   mulr6 = 0
2098# asm 1: mov  $0,>mulr6=int64#6
2099# asm 2: mov  $0,>mulr6=%r9
2100mov  $0,%r9
2101
2102# qhasm:   mulr7 = 0
2103# asm 1: mov  $0,>mulr7=int64#8
2104# asm 2: mov  $0,>mulr7=%r10
2105mov  $0,%r10
2106
2107# qhasm:   mulx0 = *(uint64 *)(rp + 96)
2108# asm 1: movq   96(<rp=int64#1),>mulx0=int64#9
2109# asm 2: movq   96(<rp=%rdi),>mulx0=%r11
2110movq   96(%rdi),%r11
2111
2112# qhasm:   mulrax = *(uint64 *)(qp + 64)
2113# asm 1: movq   64(<qp=int64#2),>mulrax=int64#7
2114# asm 2: movq   64(<qp=%rsi),>mulrax=%rax
2115movq   64(%rsi),%rax
2116
2117# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2118# asm 1: mul  <mulx0=int64#9
2119# asm 2: mul  <mulx0=%r11
2120mul  %r11
2121
2122# qhasm:   c0 = mulrax
2123# asm 1: mov  <mulrax=int64#7,>c0=int64#10
2124# asm 2: mov  <mulrax=%rax,>c0=%r12
2125mov  %rax,%r12
2126
2127# qhasm:   c1 = mulrdx
2128# asm 1: mov  <mulrdx=int64#3,>c1=int64#11
2129# asm 2: mov  <mulrdx=%rdx,>c1=%r13
2130mov  %rdx,%r13
2131
2132# qhasm:   mulrax = *(uint64 *)(qp + 72)
2133# asm 1: movq   72(<qp=int64#2),>mulrax=int64#7
2134# asm 2: movq   72(<qp=%rsi),>mulrax=%rax
2135movq   72(%rsi),%rax
2136
2137# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2138# asm 1: mul  <mulx0=int64#9
2139# asm 2: mul  <mulx0=%r11
2140mul  %r11
2141
2142# qhasm:   carry? c1 += mulrax
2143# asm 1: add  <mulrax=int64#7,<c1=int64#11
2144# asm 2: add  <mulrax=%rax,<c1=%r13
2145add  %rax,%r13
2146
2147# qhasm:   c2 = 0
2148# asm 1: mov  $0,>c2=int64#12
2149# asm 2: mov  $0,>c2=%r14
2150mov  $0,%r14
2151
2152# qhasm:   c2 += mulrdx + carry
2153# asm 1: adc <mulrdx=int64#3,<c2=int64#12
2154# asm 2: adc <mulrdx=%rdx,<c2=%r14
2155adc %rdx,%r14
2156
2157# qhasm:   mulrax = *(uint64 *)(qp + 80)
2158# asm 1: movq   80(<qp=int64#2),>mulrax=int64#7
2159# asm 2: movq   80(<qp=%rsi),>mulrax=%rax
2160movq   80(%rsi),%rax
2161
2162# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2163# asm 1: mul  <mulx0=int64#9
2164# asm 2: mul  <mulx0=%r11
2165mul  %r11
2166
2167# qhasm:   carry? c2 += mulrax
2168# asm 1: add  <mulrax=int64#7,<c2=int64#12
2169# asm 2: add  <mulrax=%rax,<c2=%r14
2170add  %rax,%r14
2171
2172# qhasm:   c3 = 0
2173# asm 1: mov  $0,>c3=int64#13
2174# asm 2: mov  $0,>c3=%r15
2175mov  $0,%r15
2176
2177# qhasm:   c3 += mulrdx + carry
2178# asm 1: adc <mulrdx=int64#3,<c3=int64#13
2179# asm 2: adc <mulrdx=%rdx,<c3=%r15
2180adc %rdx,%r15
2181
2182# qhasm:   mulrax = *(uint64 *)(qp + 88)
2183# asm 1: movq   88(<qp=int64#2),>mulrax=int64#7
2184# asm 2: movq   88(<qp=%rsi),>mulrax=%rax
2185movq   88(%rsi),%rax
2186
2187# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2188# asm 1: mul  <mulx0=int64#9
2189# asm 2: mul  <mulx0=%r11
2190mul  %r11
2191
2192# qhasm:   carry? c3 += mulrax
2193# asm 1: add  <mulrax=int64#7,<c3=int64#13
2194# asm 2: add  <mulrax=%rax,<c3=%r15
2195add  %rax,%r15
2196
2197# qhasm:   mulr4 += mulrdx + carry
2198# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
2199# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
2200adc %rdx,%rcx
2201
2202# qhasm:   mulx1 = *(uint64 *)(rp + 104)
2203# asm 1: movq   104(<rp=int64#1),>mulx1=int64#9
2204# asm 2: movq   104(<rp=%rdi),>mulx1=%r11
2205movq   104(%rdi),%r11
2206
2207# qhasm:   mulrax = *(uint64 *)(qp + 64)
2208# asm 1: movq   64(<qp=int64#2),>mulrax=int64#7
2209# asm 2: movq   64(<qp=%rsi),>mulrax=%rax
2210movq   64(%rsi),%rax
2211
2212# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2213# asm 1: mul  <mulx1=int64#9
2214# asm 2: mul  <mulx1=%r11
2215mul  %r11
2216
2217# qhasm:   carry? c1 += mulrax
2218# asm 1: add  <mulrax=int64#7,<c1=int64#11
2219# asm 2: add  <mulrax=%rax,<c1=%r13
2220add  %rax,%r13
2221
2222# qhasm:   mulc = 0
2223# asm 1: mov  $0,>mulc=int64#14
2224# asm 2: mov  $0,>mulc=%rbx
2225mov  $0,%rbx
2226
2227# qhasm:   mulc += mulrdx + carry
2228# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2229# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2230adc %rdx,%rbx
2231
2232# qhasm:   mulrax = *(uint64 *)(qp + 72)
2233# asm 1: movq   72(<qp=int64#2),>mulrax=int64#7
2234# asm 2: movq   72(<qp=%rsi),>mulrax=%rax
2235movq   72(%rsi),%rax
2236
2237# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2238# asm 1: mul  <mulx1=int64#9
2239# asm 2: mul  <mulx1=%r11
2240mul  %r11
2241
2242# qhasm:   carry? c2 += mulrax
2243# asm 1: add  <mulrax=int64#7,<c2=int64#12
2244# asm 2: add  <mulrax=%rax,<c2=%r14
2245add  %rax,%r14
2246
2247# qhasm:   mulrdx += 0 + carry
2248# asm 1: adc $0,<mulrdx=int64#3
2249# asm 2: adc $0,<mulrdx=%rdx
2250adc $0,%rdx
2251
2252# qhasm:   carry? c2 += mulc
2253# asm 1: add  <mulc=int64#14,<c2=int64#12
2254# asm 2: add  <mulc=%rbx,<c2=%r14
2255add  %rbx,%r14
2256
2257# qhasm:   mulc = 0
2258# asm 1: mov  $0,>mulc=int64#14
2259# asm 2: mov  $0,>mulc=%rbx
2260mov  $0,%rbx
2261
2262# qhasm:   mulc += mulrdx + carry
2263# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2264# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2265adc %rdx,%rbx
2266
2267# qhasm:   mulrax = *(uint64 *)(qp + 80)
2268# asm 1: movq   80(<qp=int64#2),>mulrax=int64#7
2269# asm 2: movq   80(<qp=%rsi),>mulrax=%rax
2270movq   80(%rsi),%rax
2271
2272# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2273# asm 1: mul  <mulx1=int64#9
2274# asm 2: mul  <mulx1=%r11
2275mul  %r11
2276
2277# qhasm:   carry? c3 += mulrax
2278# asm 1: add  <mulrax=int64#7,<c3=int64#13
2279# asm 2: add  <mulrax=%rax,<c3=%r15
2280add  %rax,%r15
2281
2282# qhasm:   mulrdx += 0 + carry
2283# asm 1: adc $0,<mulrdx=int64#3
2284# asm 2: adc $0,<mulrdx=%rdx
2285adc $0,%rdx
2286
2287# qhasm:   carry? c3 += mulc
2288# asm 1: add  <mulc=int64#14,<c3=int64#13
2289# asm 2: add  <mulc=%rbx,<c3=%r15
2290add  %rbx,%r15
2291
2292# qhasm:   mulc = 0
2293# asm 1: mov  $0,>mulc=int64#14
2294# asm 2: mov  $0,>mulc=%rbx
2295mov  $0,%rbx
2296
2297# qhasm:   mulc += mulrdx + carry
2298# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2299# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2300adc %rdx,%rbx
2301
2302# qhasm:   mulrax = *(uint64 *)(qp + 88)
2303# asm 1: movq   88(<qp=int64#2),>mulrax=int64#7
2304# asm 2: movq   88(<qp=%rsi),>mulrax=%rax
2305movq   88(%rsi),%rax
2306
2307# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2308# asm 1: mul  <mulx1=int64#9
2309# asm 2: mul  <mulx1=%r11
2310mul  %r11
2311
2312# qhasm:   carry? mulr4 += mulrax
2313# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
2314# asm 2: add  <mulrax=%rax,<mulr4=%rcx
2315add  %rax,%rcx
2316
2317# qhasm:   mulrdx += 0 + carry
2318# asm 1: adc $0,<mulrdx=int64#3
2319# asm 2: adc $0,<mulrdx=%rdx
2320adc $0,%rdx
2321
2322# qhasm:   carry? mulr4 += mulc
2323# asm 1: add  <mulc=int64#14,<mulr4=int64#4
2324# asm 2: add  <mulc=%rbx,<mulr4=%rcx
2325add  %rbx,%rcx
2326
2327# qhasm:   mulr5 += mulrdx + carry
2328# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
2329# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
2330adc %rdx,%r8
2331
2332# qhasm:   mulx2 = *(uint64 *)(rp + 112)
2333# asm 1: movq   112(<rp=int64#1),>mulx2=int64#9
2334# asm 2: movq   112(<rp=%rdi),>mulx2=%r11
2335movq   112(%rdi),%r11
2336
2337# qhasm:   mulrax = *(uint64 *)(qp + 64)
2338# asm 1: movq   64(<qp=int64#2),>mulrax=int64#7
2339# asm 2: movq   64(<qp=%rsi),>mulrax=%rax
2340movq   64(%rsi),%rax
2341
2342# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2343# asm 1: mul  <mulx2=int64#9
2344# asm 2: mul  <mulx2=%r11
2345mul  %r11
2346
2347# qhasm:   carry? c2 += mulrax
2348# asm 1: add  <mulrax=int64#7,<c2=int64#12
2349# asm 2: add  <mulrax=%rax,<c2=%r14
2350add  %rax,%r14
2351
2352# qhasm:   mulc = 0
2353# asm 1: mov  $0,>mulc=int64#14
2354# asm 2: mov  $0,>mulc=%rbx
2355mov  $0,%rbx
2356
2357# qhasm:   mulc += mulrdx + carry
2358# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2359# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2360adc %rdx,%rbx
2361
2362# qhasm:   mulrax = *(uint64 *)(qp + 72)
2363# asm 1: movq   72(<qp=int64#2),>mulrax=int64#7
2364# asm 2: movq   72(<qp=%rsi),>mulrax=%rax
2365movq   72(%rsi),%rax
2366
2367# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2368# asm 1: mul  <mulx2=int64#9
2369# asm 2: mul  <mulx2=%r11
2370mul  %r11
2371
2372# qhasm:   carry? c3 += mulrax
2373# asm 1: add  <mulrax=int64#7,<c3=int64#13
2374# asm 2: add  <mulrax=%rax,<c3=%r15
2375add  %rax,%r15
2376
2377# qhasm:   mulrdx += 0 + carry
2378# asm 1: adc $0,<mulrdx=int64#3
2379# asm 2: adc $0,<mulrdx=%rdx
2380adc $0,%rdx
2381
2382# qhasm:   carry? c3 += mulc
2383# asm 1: add  <mulc=int64#14,<c3=int64#13
2384# asm 2: add  <mulc=%rbx,<c3=%r15
2385add  %rbx,%r15
2386
2387# qhasm:   mulc = 0
2388# asm 1: mov  $0,>mulc=int64#14
2389# asm 2: mov  $0,>mulc=%rbx
2390mov  $0,%rbx
2391
2392# qhasm:   mulc += mulrdx + carry
2393# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2394# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2395adc %rdx,%rbx
2396
2397# qhasm:   mulrax = *(uint64 *)(qp + 80)
2398# asm 1: movq   80(<qp=int64#2),>mulrax=int64#7
2399# asm 2: movq   80(<qp=%rsi),>mulrax=%rax
2400movq   80(%rsi),%rax
2401
2402# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2403# asm 1: mul  <mulx2=int64#9
2404# asm 2: mul  <mulx2=%r11
2405mul  %r11
2406
2407# qhasm:   carry? mulr4 += mulrax
2408# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
2409# asm 2: add  <mulrax=%rax,<mulr4=%rcx
2410add  %rax,%rcx
2411
2412# qhasm:   mulrdx += 0 + carry
2413# asm 1: adc $0,<mulrdx=int64#3
2414# asm 2: adc $0,<mulrdx=%rdx
2415adc $0,%rdx
2416
2417# qhasm:   carry? mulr4 += mulc
2418# asm 1: add  <mulc=int64#14,<mulr4=int64#4
2419# asm 2: add  <mulc=%rbx,<mulr4=%rcx
2420add  %rbx,%rcx
2421
2422# qhasm:   mulc = 0
2423# asm 1: mov  $0,>mulc=int64#14
2424# asm 2: mov  $0,>mulc=%rbx
2425mov  $0,%rbx
2426
2427# qhasm:   mulc += mulrdx + carry
2428# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2429# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2430adc %rdx,%rbx
2431
2432# qhasm:   mulrax = *(uint64 *)(qp + 88)
2433# asm 1: movq   88(<qp=int64#2),>mulrax=int64#7
2434# asm 2: movq   88(<qp=%rsi),>mulrax=%rax
2435movq   88(%rsi),%rax
2436
2437# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2438# asm 1: mul  <mulx2=int64#9
2439# asm 2: mul  <mulx2=%r11
2440mul  %r11
2441
2442# qhasm:   carry? mulr5 += mulrax
2443# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
2444# asm 2: add  <mulrax=%rax,<mulr5=%r8
2445add  %rax,%r8
2446
2447# qhasm:   mulrdx += 0 + carry
2448# asm 1: adc $0,<mulrdx=int64#3
2449# asm 2: adc $0,<mulrdx=%rdx
2450adc $0,%rdx
2451
2452# qhasm:   carry? mulr5 += mulc
2453# asm 1: add  <mulc=int64#14,<mulr5=int64#5
2454# asm 2: add  <mulc=%rbx,<mulr5=%r8
2455add  %rbx,%r8
2456
2457# qhasm:   mulr6 += mulrdx + carry
2458# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
2459# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
2460adc %rdx,%r9
2461
2462# qhasm:   mulx3 = *(uint64 *)(rp + 120)
2463# asm 1: movq   120(<rp=int64#1),>mulx3=int64#9
2464# asm 2: movq   120(<rp=%rdi),>mulx3=%r11
2465movq   120(%rdi),%r11
2466
2467# qhasm:   mulrax = *(uint64 *)(qp + 64)
2468# asm 1: movq   64(<qp=int64#2),>mulrax=int64#7
2469# asm 2: movq   64(<qp=%rsi),>mulrax=%rax
2470movq   64(%rsi),%rax
2471
2472# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2473# asm 1: mul  <mulx3=int64#9
2474# asm 2: mul  <mulx3=%r11
2475mul  %r11
2476
2477# qhasm:   carry? c3 += mulrax
2478# asm 1: add  <mulrax=int64#7,<c3=int64#13
2479# asm 2: add  <mulrax=%rax,<c3=%r15
2480add  %rax,%r15
2481
2482# qhasm:   mulc = 0
2483# asm 1: mov  $0,>mulc=int64#14
2484# asm 2: mov  $0,>mulc=%rbx
2485mov  $0,%rbx
2486
2487# qhasm:   mulc += mulrdx + carry
2488# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2489# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2490adc %rdx,%rbx
2491
2492# qhasm:   mulrax = *(uint64 *)(qp + 72)
2493# asm 1: movq   72(<qp=int64#2),>mulrax=int64#7
2494# asm 2: movq   72(<qp=%rsi),>mulrax=%rax
2495movq   72(%rsi),%rax
2496
2497# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2498# asm 1: mul  <mulx3=int64#9
2499# asm 2: mul  <mulx3=%r11
2500mul  %r11
2501
2502# qhasm:   carry? mulr4 += mulrax
2503# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
2504# asm 2: add  <mulrax=%rax,<mulr4=%rcx
2505add  %rax,%rcx
2506
2507# qhasm:   mulrdx += 0 + carry
2508# asm 1: adc $0,<mulrdx=int64#3
2509# asm 2: adc $0,<mulrdx=%rdx
2510adc $0,%rdx
2511
2512# qhasm:   carry? mulr4 += mulc
2513# asm 1: add  <mulc=int64#14,<mulr4=int64#4
2514# asm 2: add  <mulc=%rbx,<mulr4=%rcx
2515add  %rbx,%rcx
2516
2517# qhasm:   mulc = 0
2518# asm 1: mov  $0,>mulc=int64#14
2519# asm 2: mov  $0,>mulc=%rbx
2520mov  $0,%rbx
2521
2522# qhasm:   mulc += mulrdx + carry
2523# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2524# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2525adc %rdx,%rbx
2526
2527# qhasm:   mulrax = *(uint64 *)(qp + 80)
2528# asm 1: movq   80(<qp=int64#2),>mulrax=int64#7
2529# asm 2: movq   80(<qp=%rsi),>mulrax=%rax
2530movq   80(%rsi),%rax
2531
2532# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2533# asm 1: mul  <mulx3=int64#9
2534# asm 2: mul  <mulx3=%r11
2535mul  %r11
2536
2537# qhasm:   carry? mulr5 += mulrax
2538# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
2539# asm 2: add  <mulrax=%rax,<mulr5=%r8
2540add  %rax,%r8
2541
2542# qhasm:   mulrdx += 0 + carry
2543# asm 1: adc $0,<mulrdx=int64#3
2544# asm 2: adc $0,<mulrdx=%rdx
2545adc $0,%rdx
2546
2547# qhasm:   carry? mulr5 += mulc
2548# asm 1: add  <mulc=int64#14,<mulr5=int64#5
2549# asm 2: add  <mulc=%rbx,<mulr5=%r8
2550add  %rbx,%r8
2551
2552# qhasm:   mulc = 0
2553# asm 1: mov  $0,>mulc=int64#14
2554# asm 2: mov  $0,>mulc=%rbx
2555mov  $0,%rbx
2556
2557# qhasm:   mulc += mulrdx + carry
2558# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2559# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2560adc %rdx,%rbx
2561
2562# qhasm:   mulrax = *(uint64 *)(qp + 88)
2563# asm 1: movq   88(<qp=int64#2),>mulrax=int64#7
2564# asm 2: movq   88(<qp=%rsi),>mulrax=%rax
2565movq   88(%rsi),%rax
2566
2567# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2568# asm 1: mul  <mulx3=int64#9
2569# asm 2: mul  <mulx3=%r11
2570mul  %r11
2571
2572# qhasm:   carry? mulr6 += mulrax
2573# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
2574# asm 2: add  <mulrax=%rax,<mulr6=%r9
2575add  %rax,%r9
2576
2577# qhasm:   mulrdx += 0 + carry
2578# asm 1: adc $0,<mulrdx=int64#3
2579# asm 2: adc $0,<mulrdx=%rdx
2580adc $0,%rdx
2581
2582# qhasm:   carry? mulr6 += mulc
2583# asm 1: add  <mulc=int64#14,<mulr6=int64#6
2584# asm 2: add  <mulc=%rbx,<mulr6=%r9
2585add  %rbx,%r9
2586
2587# qhasm:   mulr7 += mulrdx + carry
2588# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
2589# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
2590adc %rdx,%r10
2591
2592# qhasm:   mulrax = mulr4
2593# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
2594# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
2595mov  %rcx,%rax
2596
2597# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2598mulq  crypto_sign_ed25519_amd64_64_38
2599
2600# qhasm:   mulr4 = mulrax
2601# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
2602# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
2603mov  %rax,%rsi
2604
2605# qhasm:   mulrax = mulr5
2606# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
2607# asm 2: mov  <mulr5=%r8,>mulrax=%rax
2608mov  %r8,%rax
2609
2610# qhasm:   mulr5 = mulrdx
2611# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
2612# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
2613mov  %rdx,%rcx
2614
2615# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2616mulq  crypto_sign_ed25519_amd64_64_38
2617
2618# qhasm:   carry? mulr5 += mulrax
2619# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
2620# asm 2: add  <mulrax=%rax,<mulr5=%rcx
2621add  %rax,%rcx
2622
2623# qhasm:   mulrax = mulr6
2624# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
2625# asm 2: mov  <mulr6=%r9,>mulrax=%rax
2626mov  %r9,%rax
2627
2628# qhasm:   mulr6 = 0
2629# asm 1: mov  $0,>mulr6=int64#5
2630# asm 2: mov  $0,>mulr6=%r8
2631mov  $0,%r8
2632
2633# qhasm:   mulr6 += mulrdx + carry
2634# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
2635# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
2636adc %rdx,%r8
2637
2638# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2639mulq  crypto_sign_ed25519_amd64_64_38
2640
2641# qhasm:   carry? mulr6 += mulrax
2642# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
2643# asm 2: add  <mulrax=%rax,<mulr6=%r8
2644add  %rax,%r8
2645
2646# qhasm:   mulrax = mulr7
2647# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
2648# asm 2: mov  <mulr7=%r10,>mulrax=%rax
2649mov  %r10,%rax
2650
2651# qhasm:   mulr7 = 0
2652# asm 1: mov  $0,>mulr7=int64#6
2653# asm 2: mov  $0,>mulr7=%r9
2654mov  $0,%r9
2655
2656# qhasm:   mulr7 += mulrdx + carry
2657# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
2658# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
2659adc %rdx,%r9
2660
2661# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2662mulq  crypto_sign_ed25519_amd64_64_38
2663
2664# qhasm:   carry? mulr7 += mulrax
2665# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
2666# asm 2: add  <mulrax=%rax,<mulr7=%r9
2667add  %rax,%r9
2668
2669# qhasm:   mulr8 = 0
2670# asm 1: mov  $0,>mulr8=int64#7
2671# asm 2: mov  $0,>mulr8=%rax
2672mov  $0,%rax
2673
2674# qhasm:   mulr8 += mulrdx + carry
2675# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
2676# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
2677adc %rdx,%rax
2678
2679# qhasm:   carry? c0 += mulr4
2680# asm 1: add  <mulr4=int64#2,<c0=int64#10
2681# asm 2: add  <mulr4=%rsi,<c0=%r12
2682add  %rsi,%r12
2683
2684# qhasm:   carry? c1 += mulr5 + carry
2685# asm 1: adc <mulr5=int64#4,<c1=int64#11
2686# asm 2: adc <mulr5=%rcx,<c1=%r13
2687adc %rcx,%r13
2688
2689# qhasm:   carry? c2 += mulr6 + carry
2690# asm 1: adc <mulr6=int64#5,<c2=int64#12
2691# asm 2: adc <mulr6=%r8,<c2=%r14
2692adc %r8,%r14
2693
2694# qhasm:   carry? c3 += mulr7 + carry
2695# asm 1: adc <mulr7=int64#6,<c3=int64#13
2696# asm 2: adc <mulr7=%r9,<c3=%r15
2697adc %r9,%r15
2698
2699# qhasm:   mulzero = 0
2700# asm 1: mov  $0,>mulzero=int64#2
2701# asm 2: mov  $0,>mulzero=%rsi
2702mov  $0,%rsi
2703
2704# qhasm:   mulr8 += mulzero + carry
2705# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
2706# asm 2: adc <mulzero=%rsi,<mulr8=%rax
2707adc %rsi,%rax
2708
2709# qhasm:   mulr8 *= 38
2710# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
2711# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
2712imulq  $38,%rax,%rdx
2713
2714# qhasm:   carry? c0 += mulr8
2715# asm 1: add  <mulr8=int64#3,<c0=int64#10
2716# asm 2: add  <mulr8=%rdx,<c0=%r12
2717add  %rdx,%r12
2718
2719# qhasm:   carry? c1 += mulzero + carry
2720# asm 1: adc <mulzero=int64#2,<c1=int64#11
2721# asm 2: adc <mulzero=%rsi,<c1=%r13
2722adc %rsi,%r13
2723
2724# qhasm:   carry? c2 += mulzero + carry
2725# asm 1: adc <mulzero=int64#2,<c2=int64#12
2726# asm 2: adc <mulzero=%rsi,<c2=%r14
2727adc %rsi,%r14
2728
2729# qhasm:   carry? c3 += mulzero + carry
2730# asm 1: adc <mulzero=int64#2,<c3=int64#13
2731# asm 2: adc <mulzero=%rsi,<c3=%r15
2732adc %rsi,%r15
2733
2734# qhasm:   mulzero += mulzero + carry
2735# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
2736# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
2737adc %rsi,%rsi
2738
2739# qhasm:   mulzero *= 38
2740# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
2741# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
2742imulq  $38,%rsi,%rsi
2743
2744# qhasm:   c0 += mulzero
2745# asm 1: add  <mulzero=int64#2,<c0=int64#10
2746# asm 2: add  <mulzero=%rsi,<c0=%r12
2747add  %rsi,%r12
2748
2749# qhasm: f0 = *(uint64 *)(rp + 64)
2750# asm 1: movq   64(<rp=int64#1),>f0=int64#2
2751# asm 2: movq   64(<rp=%rdi),>f0=%rsi
2752movq   64(%rdi),%rsi
2753
2754# qhasm: f1 = *(uint64 *)(rp + 72)
2755# asm 1: movq   72(<rp=int64#1),>f1=int64#3
2756# asm 2: movq   72(<rp=%rdi),>f1=%rdx
2757movq   72(%rdi),%rdx
2758
2759# qhasm: f2 = *(uint64 *)(rp + 80)
2760# asm 1: movq   80(<rp=int64#1),>f2=int64#4
2761# asm 2: movq   80(<rp=%rdi),>f2=%rcx
2762movq   80(%rdi),%rcx
2763
2764# qhasm: f3 = *(uint64 *)(rp + 88)
2765# asm 1: movq   88(<rp=int64#1),>f3=int64#5
2766# asm 2: movq   88(<rp=%rdi),>f3=%r8
2767movq   88(%rdi),%r8
2768
2769# qhasm:   carry? f0 += f0
2770# asm 1: add  <f0=int64#2,<f0=int64#2
2771# asm 2: add  <f0=%rsi,<f0=%rsi
2772add  %rsi,%rsi
2773
2774# qhasm:   carry? f1 += f1 + carry
2775# asm 1: adc <f1=int64#3,<f1=int64#3
2776# asm 2: adc <f1=%rdx,<f1=%rdx
2777adc %rdx,%rdx
2778
2779# qhasm:   carry? f2 += f2 + carry
2780# asm 1: adc <f2=int64#4,<f2=int64#4
2781# asm 2: adc <f2=%rcx,<f2=%rcx
2782adc %rcx,%rcx
2783
2784# qhasm:   carry? f3 += f3 + carry
2785# asm 1: adc <f3=int64#5,<f3=int64#5
2786# asm 2: adc <f3=%r8,<f3=%r8
2787adc %r8,%r8
2788
2789# qhasm:   addt0 = 0
2790# asm 1: mov  $0,>addt0=int64#6
2791# asm 2: mov  $0,>addt0=%r9
2792mov  $0,%r9
2793
2794# qhasm:   addt1 = 38
2795# asm 1: mov  $38,>addt1=int64#7
2796# asm 2: mov  $38,>addt1=%rax
2797mov  $38,%rax
2798
2799# qhasm:   addt1 = addt0 if !carry
2800# asm 1: cmovae <addt0=int64#6,<addt1=int64#7
2801# asm 2: cmovae <addt0=%r9,<addt1=%rax
2802cmovae %r9,%rax
2803
2804# qhasm:   carry? f0 += addt1
2805# asm 1: add  <addt1=int64#7,<f0=int64#2
2806# asm 2: add  <addt1=%rax,<f0=%rsi
2807add  %rax,%rsi
2808
2809# qhasm:   carry? f1 += addt0 + carry
2810# asm 1: adc <addt0=int64#6,<f1=int64#3
2811# asm 2: adc <addt0=%r9,<f1=%rdx
2812adc %r9,%rdx
2813
2814# qhasm:   carry? f2 += addt0 + carry
2815# asm 1: adc <addt0=int64#6,<f2=int64#4
2816# asm 2: adc <addt0=%r9,<f2=%rcx
2817adc %r9,%rcx
2818
2819# qhasm:   carry? f3 += addt0 + carry
2820# asm 1: adc <addt0=int64#6,<f3=int64#5
2821# asm 2: adc <addt0=%r9,<f3=%r8
2822adc %r9,%r8
2823
2824# qhasm:   addt0 = addt1 if carry
2825# asm 1: cmovc <addt1=int64#7,<addt0=int64#6
2826# asm 2: cmovc <addt1=%rax,<addt0=%r9
2827cmovc %rax,%r9
2828
2829# qhasm:   f0 += addt0
2830# asm 1: add  <addt0=int64#6,<f0=int64#2
2831# asm 2: add  <addt0=%r9,<f0=%rsi
2832add  %r9,%rsi
2833
2834# qhasm: g0 = f0
2835# asm 1: mov  <f0=int64#2,>g0=int64#6
2836# asm 2: mov  <f0=%rsi,>g0=%r9
2837mov  %rsi,%r9
2838
2839# qhasm: g1 = f1
2840# asm 1: mov  <f1=int64#3,>g1=int64#7
2841# asm 2: mov  <f1=%rdx,>g1=%rax
2842mov  %rdx,%rax
2843
2844# qhasm: g2 = f2
2845# asm 1: mov  <f2=int64#4,>g2=int64#8
2846# asm 2: mov  <f2=%rcx,>g2=%r10
2847mov  %rcx,%r10
2848
2849# qhasm: g3 = f3
2850# asm 1: mov  <f3=int64#5,>g3=int64#9
2851# asm 2: mov  <f3=%r8,>g3=%r11
2852mov  %r8,%r11
2853
2854# qhasm:   carry? f0 -= c0
2855# asm 1: sub  <c0=int64#10,<f0=int64#2
2856# asm 2: sub  <c0=%r12,<f0=%rsi
2857sub  %r12,%rsi
2858
2859# qhasm:   carry? f1 -= c1 - carry
2860# asm 1: sbb  <c1=int64#11,<f1=int64#3
2861# asm 2: sbb  <c1=%r13,<f1=%rdx
2862sbb  %r13,%rdx
2863
2864# qhasm:   carry? f2 -= c2 - carry
2865# asm 1: sbb  <c2=int64#12,<f2=int64#4
2866# asm 2: sbb  <c2=%r14,<f2=%rcx
2867sbb  %r14,%rcx
2868
2869# qhasm:   carry? f3 -= c3 - carry
2870# asm 1: sbb  <c3=int64#13,<f3=int64#5
2871# asm 2: sbb  <c3=%r15,<f3=%r8
2872sbb  %r15,%r8
2873
2874# qhasm:   subt0 = 0
2875# asm 1: mov  $0,>subt0=int64#14
2876# asm 2: mov  $0,>subt0=%rbx
2877mov  $0,%rbx
2878
2879# qhasm:   subt1 = 38
2880# asm 1: mov  $38,>subt1=int64#15
2881# asm 2: mov  $38,>subt1=%rbp
2882mov  $38,%rbp
2883
2884# qhasm:   subt1 = subt0 if !carry
2885# asm 1: cmovae <subt0=int64#14,<subt1=int64#15
2886# asm 2: cmovae <subt0=%rbx,<subt1=%rbp
2887cmovae %rbx,%rbp
2888
2889# qhasm:   carry? f0 -= subt1
2890# asm 1: sub  <subt1=int64#15,<f0=int64#2
2891# asm 2: sub  <subt1=%rbp,<f0=%rsi
2892sub  %rbp,%rsi
2893
2894# qhasm:   carry? f1 -= subt0 - carry
2895# asm 1: sbb  <subt0=int64#14,<f1=int64#3
2896# asm 2: sbb  <subt0=%rbx,<f1=%rdx
2897sbb  %rbx,%rdx
2898
2899# qhasm:   carry? f2 -= subt0 - carry
2900# asm 1: sbb  <subt0=int64#14,<f2=int64#4
2901# asm 2: sbb  <subt0=%rbx,<f2=%rcx
2902sbb  %rbx,%rcx
2903
2904# qhasm:   carry? f3 -= subt0 - carry
2905# asm 1: sbb  <subt0=int64#14,<f3=int64#5
2906# asm 2: sbb  <subt0=%rbx,<f3=%r8
2907sbb  %rbx,%r8
2908
2909# qhasm:   subt0 = subt1 if carry
2910# asm 1: cmovc <subt1=int64#15,<subt0=int64#14
2911# asm 2: cmovc <subt1=%rbp,<subt0=%rbx
2912cmovc %rbp,%rbx
2913
2914# qhasm:   f0 -= subt0
2915# asm 1: sub  <subt0=int64#14,<f0=int64#2
2916# asm 2: sub  <subt0=%rbx,<f0=%rsi
2917sub  %rbx,%rsi
2918
2919# qhasm:   carry? g0 += c0
2920# asm 1: add  <c0=int64#10,<g0=int64#6
2921# asm 2: add  <c0=%r12,<g0=%r9
2922add  %r12,%r9
2923
2924# qhasm:   carry? g1 += c1 + carry
2925# asm 1: adc <c1=int64#11,<g1=int64#7
2926# asm 2: adc <c1=%r13,<g1=%rax
2927adc %r13,%rax
2928
2929# qhasm:   carry? g2 += c2 + carry
2930# asm 1: adc <c2=int64#12,<g2=int64#8
2931# asm 2: adc <c2=%r14,<g2=%r10
2932adc %r14,%r10
2933
2934# qhasm:   carry? g3 += c3 + carry
2935# asm 1: adc <c3=int64#13,<g3=int64#9
2936# asm 2: adc <c3=%r15,<g3=%r11
2937adc %r15,%r11
2938
2939# qhasm:   addt0 = 0
2940# asm 1: mov  $0,>addt0=int64#10
2941# asm 2: mov  $0,>addt0=%r12
2942mov  $0,%r12
2943
2944# qhasm:   addt1 = 38
2945# asm 1: mov  $38,>addt1=int64#11
2946# asm 2: mov  $38,>addt1=%r13
2947mov  $38,%r13
2948
2949# qhasm:   addt1 = addt0 if !carry
2950# asm 1: cmovae <addt0=int64#10,<addt1=int64#11
2951# asm 2: cmovae <addt0=%r12,<addt1=%r13
2952cmovae %r12,%r13
2953
2954# qhasm:   carry? g0 += addt1
2955# asm 1: add  <addt1=int64#11,<g0=int64#6
2956# asm 2: add  <addt1=%r13,<g0=%r9
2957add  %r13,%r9
2958
2959# qhasm:   carry? g1 += addt0 + carry
2960# asm 1: adc <addt0=int64#10,<g1=int64#7
2961# asm 2: adc <addt0=%r12,<g1=%rax
2962adc %r12,%rax
2963
2964# qhasm:   carry? g2 += addt0 + carry
2965# asm 1: adc <addt0=int64#10,<g2=int64#8
2966# asm 2: adc <addt0=%r12,<g2=%r10
2967adc %r12,%r10
2968
2969# qhasm:   carry? g3 += addt0 + carry
2970# asm 1: adc <addt0=int64#10,<g3=int64#9
2971# asm 2: adc <addt0=%r12,<g3=%r11
2972adc %r12,%r11
2973
2974# qhasm:   addt0 = addt1 if carry
2975# asm 1: cmovc <addt1=int64#11,<addt0=int64#10
2976# asm 2: cmovc <addt1=%r13,<addt0=%r12
2977cmovc %r13,%r12
2978
2979# qhasm:   g0 += addt0
2980# asm 1: add  <addt0=int64#10,<g0=int64#6
2981# asm 2: add  <addt0=%r12,<g0=%r9
2982add  %r12,%r9
2983
2984# qhasm: g0_stack = g0
2985# asm 1: movq <g0=int64#6,>g0_stack=stack64#16
2986# asm 2: movq <g0=%r9,>g0_stack=120(%rsp)
2987movq %r9,120(%rsp)
2988
2989# qhasm: g1_stack = g1
2990# asm 1: movq <g1=int64#7,>g1_stack=stack64#17
2991# asm 2: movq <g1=%rax,>g1_stack=128(%rsp)
2992movq %rax,128(%rsp)
2993
2994# qhasm: g2_stack = g2
2995# asm 1: movq <g2=int64#8,>g2_stack=stack64#18
2996# asm 2: movq <g2=%r10,>g2_stack=136(%rsp)
2997movq %r10,136(%rsp)
2998
2999# qhasm: g3_stack = g3
3000# asm 1: movq <g3=int64#9,>g3_stack=stack64#19
3001# asm 2: movq <g3=%r11,>g3_stack=144(%rsp)
3002movq %r11,144(%rsp)
3003
3004# qhasm: f0_stack = f0
3005# asm 1: movq <f0=int64#2,>f0_stack=stack64#20
3006# asm 2: movq <f0=%rsi,>f0_stack=152(%rsp)
3007movq %rsi,152(%rsp)
3008
3009# qhasm: f1_stack = f1
3010# asm 1: movq <f1=int64#3,>f1_stack=stack64#21
3011# asm 2: movq <f1=%rdx,>f1_stack=160(%rsp)
3012movq %rdx,160(%rsp)
3013
3014# qhasm: f2_stack = f2
3015# asm 1: movq <f2=int64#4,>f2_stack=stack64#22
3016# asm 2: movq <f2=%rcx,>f2_stack=168(%rsp)
3017movq %rcx,168(%rsp)
3018
3019# qhasm: f3_stack = f3
3020# asm 1: movq <f3=int64#5,>f3_stack=stack64#23
3021# asm 2: movq <f3=%r8,>f3_stack=176(%rsp)
3022movq %r8,176(%rsp)
3023
3024# qhasm:   mulr4 = 0
3025# asm 1: mov  $0,>mulr4=int64#2
3026# asm 2: mov  $0,>mulr4=%rsi
3027mov  $0,%rsi
3028
3029# qhasm:   mulr5 = 0
3030# asm 1: mov  $0,>mulr5=int64#4
3031# asm 2: mov  $0,>mulr5=%rcx
3032mov  $0,%rcx
3033
3034# qhasm:   mulr6 = 0
3035# asm 1: mov  $0,>mulr6=int64#5
3036# asm 2: mov  $0,>mulr6=%r8
3037mov  $0,%r8
3038
3039# qhasm:   mulr7 = 0
3040# asm 1: mov  $0,>mulr7=int64#6
3041# asm 2: mov  $0,>mulr7=%r9
3042mov  $0,%r9
3043
3044# qhasm:   mulx0 = e0_stack
3045# asm 1: movq <e0_stack=stack64#12,>mulx0=int64#8
3046# asm 2: movq <e0_stack=88(%rsp),>mulx0=%r10
3047movq 88(%rsp),%r10
3048
3049# qhasm:   mulrax = f0_stack
3050# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
3051# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
3052movq 152(%rsp),%rax
3053
3054# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3055# asm 1: mul  <mulx0=int64#8
3056# asm 2: mul  <mulx0=%r10
3057mul  %r10
3058
3059# qhasm:   rx0 = mulrax
3060# asm 1: mov  <mulrax=int64#7,>rx0=int64#9
3061# asm 2: mov  <mulrax=%rax,>rx0=%r11
3062mov  %rax,%r11
3063
3064# qhasm:   rx1 = mulrdx
3065# asm 1: mov  <mulrdx=int64#3,>rx1=int64#10
3066# asm 2: mov  <mulrdx=%rdx,>rx1=%r12
3067mov  %rdx,%r12
3068
3069# qhasm:   mulrax = f1_stack
3070# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
3071# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
3072movq 160(%rsp),%rax
3073
3074# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3075# asm 1: mul  <mulx0=int64#8
3076# asm 2: mul  <mulx0=%r10
3077mul  %r10
3078
3079# qhasm:   carry? rx1 += mulrax
3080# asm 1: add  <mulrax=int64#7,<rx1=int64#10
3081# asm 2: add  <mulrax=%rax,<rx1=%r12
3082add  %rax,%r12
3083
3084# qhasm:   rx2 = 0
3085# asm 1: mov  $0,>rx2=int64#11
3086# asm 2: mov  $0,>rx2=%r13
3087mov  $0,%r13
3088
3089# qhasm:   rx2 += mulrdx + carry
3090# asm 1: adc <mulrdx=int64#3,<rx2=int64#11
3091# asm 2: adc <mulrdx=%rdx,<rx2=%r13
3092adc %rdx,%r13
3093
3094# qhasm:   mulrax = f2_stack
3095# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
3096# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
3097movq 168(%rsp),%rax
3098
3099# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3100# asm 1: mul  <mulx0=int64#8
3101# asm 2: mul  <mulx0=%r10
3102mul  %r10
3103
3104# qhasm:   carry? rx2 += mulrax
3105# asm 1: add  <mulrax=int64#7,<rx2=int64#11
3106# asm 2: add  <mulrax=%rax,<rx2=%r13
3107add  %rax,%r13
3108
3109# qhasm:   rx3 = 0
3110# asm 1: mov  $0,>rx3=int64#12
3111# asm 2: mov  $0,>rx3=%r14
3112mov  $0,%r14
3113
3114# qhasm:   rx3 += mulrdx + carry
3115# asm 1: adc <mulrdx=int64#3,<rx3=int64#12
3116# asm 2: adc <mulrdx=%rdx,<rx3=%r14
3117adc %rdx,%r14
3118
3119# qhasm:   mulrax = f3_stack
3120# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
3121# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
3122movq 176(%rsp),%rax
3123
3124# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3125# asm 1: mul  <mulx0=int64#8
3126# asm 2: mul  <mulx0=%r10
3127mul  %r10
3128
3129# qhasm:   carry? rx3 += mulrax
3130# asm 1: add  <mulrax=int64#7,<rx3=int64#12
3131# asm 2: add  <mulrax=%rax,<rx3=%r14
3132add  %rax,%r14
3133
3134# qhasm:   mulr4 += mulrdx + carry
3135# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
3136# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
3137adc %rdx,%rsi
3138
3139# qhasm:   mulx1 = e1_stack
3140# asm 1: movq <e1_stack=stack64#13,>mulx1=int64#8
3141# asm 2: movq <e1_stack=96(%rsp),>mulx1=%r10
3142movq 96(%rsp),%r10
3143
3144# qhasm:   mulrax = f0_stack
3145# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
3146# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
3147movq 152(%rsp),%rax
3148
3149# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3150# asm 1: mul  <mulx1=int64#8
3151# asm 2: mul  <mulx1=%r10
3152mul  %r10
3153
3154# qhasm:   carry? rx1 += mulrax
3155# asm 1: add  <mulrax=int64#7,<rx1=int64#10
3156# asm 2: add  <mulrax=%rax,<rx1=%r12
3157add  %rax,%r12
3158
3159# qhasm:   mulc = 0
3160# asm 1: mov  $0,>mulc=int64#13
3161# asm 2: mov  $0,>mulc=%r15
3162mov  $0,%r15
3163
3164# qhasm:   mulc += mulrdx + carry
3165# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3166# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3167adc %rdx,%r15
3168
3169# qhasm:   mulrax = f1_stack
3170# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
3171# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
3172movq 160(%rsp),%rax
3173
3174# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3175# asm 1: mul  <mulx1=int64#8
3176# asm 2: mul  <mulx1=%r10
3177mul  %r10
3178
3179# qhasm:   carry? rx2 += mulrax
3180# asm 1: add  <mulrax=int64#7,<rx2=int64#11
3181# asm 2: add  <mulrax=%rax,<rx2=%r13
3182add  %rax,%r13
3183
3184# qhasm:   mulrdx += 0 + carry
3185# asm 1: adc $0,<mulrdx=int64#3
3186# asm 2: adc $0,<mulrdx=%rdx
3187adc $0,%rdx
3188
3189# qhasm:   carry? rx2 += mulc
3190# asm 1: add  <mulc=int64#13,<rx2=int64#11
3191# asm 2: add  <mulc=%r15,<rx2=%r13
3192add  %r15,%r13
3193
3194# qhasm:   mulc = 0
3195# asm 1: mov  $0,>mulc=int64#13
3196# asm 2: mov  $0,>mulc=%r15
3197mov  $0,%r15
3198
3199# qhasm:   mulc += mulrdx + carry
3200# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3201# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3202adc %rdx,%r15
3203
3204# qhasm:   mulrax = f2_stack
3205# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
3206# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
3207movq 168(%rsp),%rax
3208
3209# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3210# asm 1: mul  <mulx1=int64#8
3211# asm 2: mul  <mulx1=%r10
3212mul  %r10
3213
3214# qhasm:   carry? rx3 += mulrax
3215# asm 1: add  <mulrax=int64#7,<rx3=int64#12
3216# asm 2: add  <mulrax=%rax,<rx3=%r14
3217add  %rax,%r14
3218
3219# qhasm:   mulrdx += 0 + carry
3220# asm 1: adc $0,<mulrdx=int64#3
3221# asm 2: adc $0,<mulrdx=%rdx
3222adc $0,%rdx
3223
3224# qhasm:   carry? rx3 += mulc
3225# asm 1: add  <mulc=int64#13,<rx3=int64#12
3226# asm 2: add  <mulc=%r15,<rx3=%r14
3227add  %r15,%r14
3228
3229# qhasm:   mulc = 0
3230# asm 1: mov  $0,>mulc=int64#13
3231# asm 2: mov  $0,>mulc=%r15
3232mov  $0,%r15
3233
3234# qhasm:   mulc += mulrdx + carry
3235# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3236# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3237adc %rdx,%r15
3238
3239# qhasm:   mulrax = f3_stack
3240# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
3241# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
3242movq 176(%rsp),%rax
3243
3244# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3245# asm 1: mul  <mulx1=int64#8
3246# asm 2: mul  <mulx1=%r10
3247mul  %r10
3248
3249# qhasm:   carry? mulr4 += mulrax
3250# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
3251# asm 2: add  <mulrax=%rax,<mulr4=%rsi
3252add  %rax,%rsi
3253
3254# qhasm:   mulrdx += 0 + carry
3255# asm 1: adc $0,<mulrdx=int64#3
3256# asm 2: adc $0,<mulrdx=%rdx
3257adc $0,%rdx
3258
3259# qhasm:   carry? mulr4 += mulc
3260# asm 1: add  <mulc=int64#13,<mulr4=int64#2
3261# asm 2: add  <mulc=%r15,<mulr4=%rsi
3262add  %r15,%rsi
3263
3264# qhasm:   mulr5 += mulrdx + carry
3265# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
3266# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
3267adc %rdx,%rcx
3268
3269# qhasm:   mulx2 = e2_stack
3270# asm 1: movq <e2_stack=stack64#14,>mulx2=int64#8
3271# asm 2: movq <e2_stack=104(%rsp),>mulx2=%r10
3272movq 104(%rsp),%r10
3273
3274# qhasm:   mulrax = f0_stack
3275# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
3276# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
3277movq 152(%rsp),%rax
3278
3279# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3280# asm 1: mul  <mulx2=int64#8
3281# asm 2: mul  <mulx2=%r10
3282mul  %r10
3283
3284# qhasm:   carry? rx2 += mulrax
3285# asm 1: add  <mulrax=int64#7,<rx2=int64#11
3286# asm 2: add  <mulrax=%rax,<rx2=%r13
3287add  %rax,%r13
3288
3289# qhasm:   mulc = 0
3290# asm 1: mov  $0,>mulc=int64#13
3291# asm 2: mov  $0,>mulc=%r15
3292mov  $0,%r15
3293
3294# qhasm:   mulc += mulrdx + carry
3295# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3296# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3297adc %rdx,%r15
3298
3299# qhasm:   mulrax = f1_stack
3300# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
3301# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
3302movq 160(%rsp),%rax
3303
3304# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3305# asm 1: mul  <mulx2=int64#8
3306# asm 2: mul  <mulx2=%r10
3307mul  %r10
3308
3309# qhasm:   carry? rx3 += mulrax
3310# asm 1: add  <mulrax=int64#7,<rx3=int64#12
3311# asm 2: add  <mulrax=%rax,<rx3=%r14
3312add  %rax,%r14
3313
3314# qhasm:   mulrdx += 0 + carry
3315# asm 1: adc $0,<mulrdx=int64#3
3316# asm 2: adc $0,<mulrdx=%rdx
3317adc $0,%rdx
3318
3319# qhasm:   carry? rx3 += mulc
3320# asm 1: add  <mulc=int64#13,<rx3=int64#12
3321# asm 2: add  <mulc=%r15,<rx3=%r14
3322add  %r15,%r14
3323
3324# qhasm:   mulc = 0
3325# asm 1: mov  $0,>mulc=int64#13
3326# asm 2: mov  $0,>mulc=%r15
3327mov  $0,%r15
3328
3329# qhasm:   mulc += mulrdx + carry
3330# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3331# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3332adc %rdx,%r15
3333
3334# qhasm:   mulrax = f2_stack
3335# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
3336# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
3337movq 168(%rsp),%rax
3338
3339# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3340# asm 1: mul  <mulx2=int64#8
3341# asm 2: mul  <mulx2=%r10
3342mul  %r10
3343
3344# qhasm:   carry? mulr4 += mulrax
3345# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
3346# asm 2: add  <mulrax=%rax,<mulr4=%rsi
3347add  %rax,%rsi
3348
3349# qhasm:   mulrdx += 0 + carry
3350# asm 1: adc $0,<mulrdx=int64#3
3351# asm 2: adc $0,<mulrdx=%rdx
3352adc $0,%rdx
3353
3354# qhasm:   carry? mulr4 += mulc
3355# asm 1: add  <mulc=int64#13,<mulr4=int64#2
3356# asm 2: add  <mulc=%r15,<mulr4=%rsi
3357add  %r15,%rsi
3358
3359# qhasm:   mulc = 0
3360# asm 1: mov  $0,>mulc=int64#13
3361# asm 2: mov  $0,>mulc=%r15
3362mov  $0,%r15
3363
3364# qhasm:   mulc += mulrdx + carry
3365# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3366# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3367adc %rdx,%r15
3368
3369# qhasm:   mulrax = f3_stack
3370# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
3371# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
3372movq 176(%rsp),%rax
3373
3374# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3375# asm 1: mul  <mulx2=int64#8
3376# asm 2: mul  <mulx2=%r10
3377mul  %r10
3378
3379# qhasm:   carry? mulr5 += mulrax
3380# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
3381# asm 2: add  <mulrax=%rax,<mulr5=%rcx
3382add  %rax,%rcx
3383
3384# qhasm:   mulrdx += 0 + carry
3385# asm 1: adc $0,<mulrdx=int64#3
3386# asm 2: adc $0,<mulrdx=%rdx
3387adc $0,%rdx
3388
3389# qhasm:   carry? mulr5 += mulc
3390# asm 1: add  <mulc=int64#13,<mulr5=int64#4
3391# asm 2: add  <mulc=%r15,<mulr5=%rcx
3392add  %r15,%rcx
3393
3394# qhasm:   mulr6 += mulrdx + carry
3395# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
3396# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
3397adc %rdx,%r8
3398
3399# qhasm:   mulx3 = e3_stack
3400# asm 1: movq <e3_stack=stack64#15,>mulx3=int64#8
3401# asm 2: movq <e3_stack=112(%rsp),>mulx3=%r10
3402movq 112(%rsp),%r10
3403
3404# qhasm:   mulrax = f0_stack
3405# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
3406# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
3407movq 152(%rsp),%rax
3408
3409# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3410# asm 1: mul  <mulx3=int64#8
3411# asm 2: mul  <mulx3=%r10
3412mul  %r10
3413
3414# qhasm:   carry? rx3 += mulrax
3415# asm 1: add  <mulrax=int64#7,<rx3=int64#12
3416# asm 2: add  <mulrax=%rax,<rx3=%r14
3417add  %rax,%r14
3418
3419# qhasm:   mulc = 0
3420# asm 1: mov  $0,>mulc=int64#13
3421# asm 2: mov  $0,>mulc=%r15
3422mov  $0,%r15
3423
3424# qhasm:   mulc += mulrdx + carry
3425# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3426# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3427adc %rdx,%r15
3428
3429# qhasm:   mulrax = f1_stack
3430# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
3431# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
3432movq 160(%rsp),%rax
3433
3434# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3435# asm 1: mul  <mulx3=int64#8
3436# asm 2: mul  <mulx3=%r10
3437mul  %r10
3438
3439# qhasm:   carry? mulr4 += mulrax
3440# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
3441# asm 2: add  <mulrax=%rax,<mulr4=%rsi
3442add  %rax,%rsi
3443
3444# qhasm:   mulrdx += 0 + carry
3445# asm 1: adc $0,<mulrdx=int64#3
3446# asm 2: adc $0,<mulrdx=%rdx
3447adc $0,%rdx
3448
3449# qhasm:   carry? mulr4 += mulc
3450# asm 1: add  <mulc=int64#13,<mulr4=int64#2
3451# asm 2: add  <mulc=%r15,<mulr4=%rsi
3452add  %r15,%rsi
3453
3454# qhasm:   mulc = 0
3455# asm 1: mov  $0,>mulc=int64#13
3456# asm 2: mov  $0,>mulc=%r15
3457mov  $0,%r15
3458
3459# qhasm:   mulc += mulrdx + carry
3460# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3461# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3462adc %rdx,%r15
3463
3464# qhasm:   mulrax = f2_stack
3465# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
3466# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
3467movq 168(%rsp),%rax
3468
3469# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3470# asm 1: mul  <mulx3=int64#8
3471# asm 2: mul  <mulx3=%r10
3472mul  %r10
3473
3474# qhasm:   carry? mulr5 += mulrax
3475# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
3476# asm 2: add  <mulrax=%rax,<mulr5=%rcx
3477add  %rax,%rcx
3478
3479# qhasm:   mulrdx += 0 + carry
3480# asm 1: adc $0,<mulrdx=int64#3
3481# asm 2: adc $0,<mulrdx=%rdx
3482adc $0,%rdx
3483
3484# qhasm:   carry? mulr5 += mulc
3485# asm 1: add  <mulc=int64#13,<mulr5=int64#4
3486# asm 2: add  <mulc=%r15,<mulr5=%rcx
3487add  %r15,%rcx
3488
3489# qhasm:   mulc = 0
3490# asm 1: mov  $0,>mulc=int64#13
3491# asm 2: mov  $0,>mulc=%r15
3492mov  $0,%r15
3493
3494# qhasm:   mulc += mulrdx + carry
3495# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3496# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3497adc %rdx,%r15
3498
3499# qhasm:   mulrax = f3_stack
3500# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
3501# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
3502movq 176(%rsp),%rax
3503
3504# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3505# asm 1: mul  <mulx3=int64#8
3506# asm 2: mul  <mulx3=%r10
3507mul  %r10
3508
3509# qhasm:   carry? mulr6 += mulrax
3510# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
3511# asm 2: add  <mulrax=%rax,<mulr6=%r8
3512add  %rax,%r8
3513
3514# qhasm:   mulrdx += 0 + carry
3515# asm 1: adc $0,<mulrdx=int64#3
3516# asm 2: adc $0,<mulrdx=%rdx
3517adc $0,%rdx
3518
3519# qhasm:   carry? mulr6 += mulc
3520# asm 1: add  <mulc=int64#13,<mulr6=int64#5
3521# asm 2: add  <mulc=%r15,<mulr6=%r8
3522add  %r15,%r8
3523
3524# qhasm:   mulr7 += mulrdx + carry
3525# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
3526# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
3527adc %rdx,%r9
3528
3529# qhasm:   mulrax = mulr4
3530# asm 1: mov  <mulr4=int64#2,>mulrax=int64#7
3531# asm 2: mov  <mulr4=%rsi,>mulrax=%rax
3532mov  %rsi,%rax
3533
3534# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3535mulq  crypto_sign_ed25519_amd64_64_38
3536
3537# qhasm:   mulr4 = mulrax
3538# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
3539# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
3540mov  %rax,%rsi
3541
3542# qhasm:   mulrax = mulr5
3543# asm 1: mov  <mulr5=int64#4,>mulrax=int64#7
3544# asm 2: mov  <mulr5=%rcx,>mulrax=%rax
3545mov  %rcx,%rax
3546
3547# qhasm:   mulr5 = mulrdx
3548# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
3549# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
3550mov  %rdx,%rcx
3551
3552# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3553mulq  crypto_sign_ed25519_amd64_64_38
3554
3555# qhasm:   carry? mulr5 += mulrax
3556# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
3557# asm 2: add  <mulrax=%rax,<mulr5=%rcx
3558add  %rax,%rcx
3559
3560# qhasm:   mulrax = mulr6
3561# asm 1: mov  <mulr6=int64#5,>mulrax=int64#7
3562# asm 2: mov  <mulr6=%r8,>mulrax=%rax
3563mov  %r8,%rax
3564
3565# qhasm:   mulr6 = 0
3566# asm 1: mov  $0,>mulr6=int64#5
3567# asm 2: mov  $0,>mulr6=%r8
3568mov  $0,%r8
3569
3570# qhasm:   mulr6 += mulrdx + carry
3571# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
3572# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
3573adc %rdx,%r8
3574
3575# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3576mulq  crypto_sign_ed25519_amd64_64_38
3577
3578# qhasm:   carry? mulr6 += mulrax
3579# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
3580# asm 2: add  <mulrax=%rax,<mulr6=%r8
3581add  %rax,%r8
3582
3583# qhasm:   mulrax = mulr7
3584# asm 1: mov  <mulr7=int64#6,>mulrax=int64#7
3585# asm 2: mov  <mulr7=%r9,>mulrax=%rax
3586mov  %r9,%rax
3587
3588# qhasm:   mulr7 = 0
3589# asm 1: mov  $0,>mulr7=int64#6
3590# asm 2: mov  $0,>mulr7=%r9
3591mov  $0,%r9
3592
3593# qhasm:   mulr7 += mulrdx + carry
3594# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
3595# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
3596adc %rdx,%r9
3597
3598# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3599mulq  crypto_sign_ed25519_amd64_64_38
3600
3601# qhasm:   carry? mulr7 += mulrax
3602# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
3603# asm 2: add  <mulrax=%rax,<mulr7=%r9
3604add  %rax,%r9
3605
3606# qhasm:   mulr8 = 0
3607# asm 1: mov  $0,>mulr8=int64#7
3608# asm 2: mov  $0,>mulr8=%rax
3609mov  $0,%rax
3610
3611# qhasm:   mulr8 += mulrdx + carry
3612# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
3613# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
3614adc %rdx,%rax
3615
3616# qhasm:   carry? rx0 += mulr4
3617# asm 1: add  <mulr4=int64#2,<rx0=int64#9
3618# asm 2: add  <mulr4=%rsi,<rx0=%r11
3619add  %rsi,%r11
3620
3621# qhasm:   carry? rx1 += mulr5 + carry
3622# asm 1: adc <mulr5=int64#4,<rx1=int64#10
3623# asm 2: adc <mulr5=%rcx,<rx1=%r12
3624adc %rcx,%r12
3625
3626# qhasm:   carry? rx2 += mulr6 + carry
3627# asm 1: adc <mulr6=int64#5,<rx2=int64#11
3628# asm 2: adc <mulr6=%r8,<rx2=%r13
3629adc %r8,%r13
3630
3631# qhasm:   carry? rx3 += mulr7 + carry
3632# asm 1: adc <mulr7=int64#6,<rx3=int64#12
3633# asm 2: adc <mulr7=%r9,<rx3=%r14
3634adc %r9,%r14
3635
3636# qhasm:   mulzero = 0
3637# asm 1: mov  $0,>mulzero=int64#2
3638# asm 2: mov  $0,>mulzero=%rsi
3639mov  $0,%rsi
3640
3641# qhasm:   mulr8 += mulzero + carry
3642# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
3643# asm 2: adc <mulzero=%rsi,<mulr8=%rax
3644adc %rsi,%rax
3645
3646# qhasm:   mulr8 *= 38
3647# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
3648# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
3649imulq  $38,%rax,%rdx
3650
3651# qhasm:   carry? rx0 += mulr8
3652# asm 1: add  <mulr8=int64#3,<rx0=int64#9
3653# asm 2: add  <mulr8=%rdx,<rx0=%r11
3654add  %rdx,%r11
3655
3656# qhasm:   carry? rx1 += mulzero + carry
3657# asm 1: adc <mulzero=int64#2,<rx1=int64#10
3658# asm 2: adc <mulzero=%rsi,<rx1=%r12
3659adc %rsi,%r12
3660
3661# qhasm:   carry? rx2 += mulzero + carry
3662# asm 1: adc <mulzero=int64#2,<rx2=int64#11
3663# asm 2: adc <mulzero=%rsi,<rx2=%r13
3664adc %rsi,%r13
3665
3666# qhasm:   carry? rx3 += mulzero + carry
3667# asm 1: adc <mulzero=int64#2,<rx3=int64#12
3668# asm 2: adc <mulzero=%rsi,<rx3=%r14
3669adc %rsi,%r14
3670
3671# qhasm:   mulzero += mulzero + carry
3672# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
3673# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
3674adc %rsi,%rsi
3675
3676# qhasm:   mulzero *= 38
3677# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
3678# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
3679imulq  $38,%rsi,%rsi
3680
3681# qhasm:   rx0 += mulzero
3682# asm 1: add  <mulzero=int64#2,<rx0=int64#9
3683# asm 2: add  <mulzero=%rsi,<rx0=%r11
3684add  %rsi,%r11
3685
3686# qhasm: *(uint64 *)(rp + 0) = rx0
3687# asm 1: movq   <rx0=int64#9,0(<rp=int64#1)
3688# asm 2: movq   <rx0=%r11,0(<rp=%rdi)
3689movq   %r11,0(%rdi)
3690
3691# qhasm: *(uint64 *)(rp + 8) = rx1
3692# asm 1: movq   <rx1=int64#10,8(<rp=int64#1)
3693# asm 2: movq   <rx1=%r12,8(<rp=%rdi)
3694movq   %r12,8(%rdi)
3695
3696# qhasm: *(uint64 *)(rp + 16) = rx2
3697# asm 1: movq   <rx2=int64#11,16(<rp=int64#1)
3698# asm 2: movq   <rx2=%r13,16(<rp=%rdi)
3699movq   %r13,16(%rdi)
3700
3701# qhasm: *(uint64 *)(rp + 24) = rx3
3702# asm 1: movq   <rx3=int64#12,24(<rp=int64#1)
3703# asm 2: movq   <rx3=%r14,24(<rp=%rdi)
3704movq   %r14,24(%rdi)
3705
3706# qhasm:   mulr4 = 0
3707# asm 1: mov  $0,>mulr4=int64#2
3708# asm 2: mov  $0,>mulr4=%rsi
3709mov  $0,%rsi
3710
3711# qhasm:   mulr5 = 0
3712# asm 1: mov  $0,>mulr5=int64#4
3713# asm 2: mov  $0,>mulr5=%rcx
3714mov  $0,%rcx
3715
3716# qhasm:   mulr6 = 0
3717# asm 1: mov  $0,>mulr6=int64#5
3718# asm 2: mov  $0,>mulr6=%r8
3719mov  $0,%r8
3720
3721# qhasm:   mulr7 = 0
3722# asm 1: mov  $0,>mulr7=int64#6
3723# asm 2: mov  $0,>mulr7=%r9
3724mov  $0,%r9
3725
3726# qhasm:   mulx0 = h0_stack
3727# asm 1: movq <h0_stack=stack64#8,>mulx0=int64#8
3728# asm 2: movq <h0_stack=56(%rsp),>mulx0=%r10
3729movq 56(%rsp),%r10
3730
3731# qhasm:   mulrax = g0_stack
3732# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
3733# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
3734movq 120(%rsp),%rax
3735
3736# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3737# asm 1: mul  <mulx0=int64#8
3738# asm 2: mul  <mulx0=%r10
3739mul  %r10
3740
3741# qhasm:   ry0 = mulrax
3742# asm 1: mov  <mulrax=int64#7,>ry0=int64#9
3743# asm 2: mov  <mulrax=%rax,>ry0=%r11
3744mov  %rax,%r11
3745
3746# qhasm:   ry1 = mulrdx
3747# asm 1: mov  <mulrdx=int64#3,>ry1=int64#10
3748# asm 2: mov  <mulrdx=%rdx,>ry1=%r12
3749mov  %rdx,%r12
3750
3751# qhasm:   mulrax = g1_stack
3752# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
3753# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
3754movq 128(%rsp),%rax
3755
3756# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3757# asm 1: mul  <mulx0=int64#8
3758# asm 2: mul  <mulx0=%r10
3759mul  %r10
3760
3761# qhasm:   carry? ry1 += mulrax
3762# asm 1: add  <mulrax=int64#7,<ry1=int64#10
3763# asm 2: add  <mulrax=%rax,<ry1=%r12
3764add  %rax,%r12
3765
3766# qhasm:   ry2 = 0
3767# asm 1: mov  $0,>ry2=int64#11
3768# asm 2: mov  $0,>ry2=%r13
3769mov  $0,%r13
3770
3771# qhasm:   ry2 += mulrdx + carry
3772# asm 1: adc <mulrdx=int64#3,<ry2=int64#11
3773# asm 2: adc <mulrdx=%rdx,<ry2=%r13
3774adc %rdx,%r13
3775
3776# qhasm:   mulrax = g2_stack
3777# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
3778# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
3779movq 136(%rsp),%rax
3780
3781# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3782# asm 1: mul  <mulx0=int64#8
3783# asm 2: mul  <mulx0=%r10
3784mul  %r10
3785
3786# qhasm:   carry? ry2 += mulrax
3787# asm 1: add  <mulrax=int64#7,<ry2=int64#11
3788# asm 2: add  <mulrax=%rax,<ry2=%r13
3789add  %rax,%r13
3790
3791# qhasm:   ry3 = 0
3792# asm 1: mov  $0,>ry3=int64#12
3793# asm 2: mov  $0,>ry3=%r14
3794mov  $0,%r14
3795
3796# qhasm:   ry3 += mulrdx + carry
3797# asm 1: adc <mulrdx=int64#3,<ry3=int64#12
3798# asm 2: adc <mulrdx=%rdx,<ry3=%r14
3799adc %rdx,%r14
3800
3801# qhasm:   mulrax = g3_stack
3802# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
3803# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
3804movq 144(%rsp),%rax
3805
3806# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
3807# asm 1: mul  <mulx0=int64#8
3808# asm 2: mul  <mulx0=%r10
3809mul  %r10
3810
3811# qhasm:   carry? ry3 += mulrax
3812# asm 1: add  <mulrax=int64#7,<ry3=int64#12
3813# asm 2: add  <mulrax=%rax,<ry3=%r14
3814add  %rax,%r14
3815
3816# qhasm:   mulr4 += mulrdx + carry
3817# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
3818# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
3819adc %rdx,%rsi
3820
3821# qhasm:   mulx1 = h1_stack
3822# asm 1: movq <h1_stack=stack64#9,>mulx1=int64#8
3823# asm 2: movq <h1_stack=64(%rsp),>mulx1=%r10
3824movq 64(%rsp),%r10
3825
3826# qhasm:   mulrax = g0_stack
3827# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
3828# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
3829movq 120(%rsp),%rax
3830
3831# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3832# asm 1: mul  <mulx1=int64#8
3833# asm 2: mul  <mulx1=%r10
3834mul  %r10
3835
3836# qhasm:   carry? ry1 += mulrax
3837# asm 1: add  <mulrax=int64#7,<ry1=int64#10
3838# asm 2: add  <mulrax=%rax,<ry1=%r12
3839add  %rax,%r12
3840
3841# qhasm:   mulc = 0
3842# asm 1: mov  $0,>mulc=int64#13
3843# asm 2: mov  $0,>mulc=%r15
3844mov  $0,%r15
3845
3846# qhasm:   mulc += mulrdx + carry
3847# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3848# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3849adc %rdx,%r15
3850
3851# qhasm:   mulrax = g1_stack
3852# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
3853# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
3854movq 128(%rsp),%rax
3855
3856# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3857# asm 1: mul  <mulx1=int64#8
3858# asm 2: mul  <mulx1=%r10
3859mul  %r10
3860
3861# qhasm:   carry? ry2 += mulrax
3862# asm 1: add  <mulrax=int64#7,<ry2=int64#11
3863# asm 2: add  <mulrax=%rax,<ry2=%r13
3864add  %rax,%r13
3865
3866# qhasm:   mulrdx += 0 + carry
3867# asm 1: adc $0,<mulrdx=int64#3
3868# asm 2: adc $0,<mulrdx=%rdx
3869adc $0,%rdx
3870
3871# qhasm:   carry? ry2 += mulc
3872# asm 1: add  <mulc=int64#13,<ry2=int64#11
3873# asm 2: add  <mulc=%r15,<ry2=%r13
3874add  %r15,%r13
3875
3876# qhasm:   mulc = 0
3877# asm 1: mov  $0,>mulc=int64#13
3878# asm 2: mov  $0,>mulc=%r15
3879mov  $0,%r15
3880
3881# qhasm:   mulc += mulrdx + carry
3882# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3883# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3884adc %rdx,%r15
3885
3886# qhasm:   mulrax = g2_stack
3887# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
3888# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
3889movq 136(%rsp),%rax
3890
3891# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3892# asm 1: mul  <mulx1=int64#8
3893# asm 2: mul  <mulx1=%r10
3894mul  %r10
3895
3896# qhasm:   carry? ry3 += mulrax
3897# asm 1: add  <mulrax=int64#7,<ry3=int64#12
3898# asm 2: add  <mulrax=%rax,<ry3=%r14
3899add  %rax,%r14
3900
3901# qhasm:   mulrdx += 0 + carry
3902# asm 1: adc $0,<mulrdx=int64#3
3903# asm 2: adc $0,<mulrdx=%rdx
3904adc $0,%rdx
3905
3906# qhasm:   carry? ry3 += mulc
3907# asm 1: add  <mulc=int64#13,<ry3=int64#12
3908# asm 2: add  <mulc=%r15,<ry3=%r14
3909add  %r15,%r14
3910
3911# qhasm:   mulc = 0
3912# asm 1: mov  $0,>mulc=int64#13
3913# asm 2: mov  $0,>mulc=%r15
3914mov  $0,%r15
3915
3916# qhasm:   mulc += mulrdx + carry
3917# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3918# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3919adc %rdx,%r15
3920
3921# qhasm:   mulrax = g3_stack
3922# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
3923# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
3924movq 144(%rsp),%rax
3925
3926# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
3927# asm 1: mul  <mulx1=int64#8
3928# asm 2: mul  <mulx1=%r10
3929mul  %r10
3930
3931# qhasm:   carry? mulr4 += mulrax
3932# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
3933# asm 2: add  <mulrax=%rax,<mulr4=%rsi
3934add  %rax,%rsi
3935
3936# qhasm:   mulrdx += 0 + carry
3937# asm 1: adc $0,<mulrdx=int64#3
3938# asm 2: adc $0,<mulrdx=%rdx
3939adc $0,%rdx
3940
3941# qhasm:   carry? mulr4 += mulc
3942# asm 1: add  <mulc=int64#13,<mulr4=int64#2
3943# asm 2: add  <mulc=%r15,<mulr4=%rsi
3944add  %r15,%rsi
3945
3946# qhasm:   mulr5 += mulrdx + carry
3947# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
3948# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
3949adc %rdx,%rcx
3950
3951# qhasm:   mulx2 = h2_stack
3952# asm 1: movq <h2_stack=stack64#10,>mulx2=int64#8
3953# asm 2: movq <h2_stack=72(%rsp),>mulx2=%r10
3954movq 72(%rsp),%r10
3955
3956# qhasm:   mulrax = g0_stack
3957# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
3958# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
3959movq 120(%rsp),%rax
3960
3961# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3962# asm 1: mul  <mulx2=int64#8
3963# asm 2: mul  <mulx2=%r10
3964mul  %r10
3965
3966# qhasm:   carry? ry2 += mulrax
3967# asm 1: add  <mulrax=int64#7,<ry2=int64#11
3968# asm 2: add  <mulrax=%rax,<ry2=%r13
3969add  %rax,%r13
3970
3971# qhasm:   mulc = 0
3972# asm 1: mov  $0,>mulc=int64#13
3973# asm 2: mov  $0,>mulc=%r15
3974mov  $0,%r15
3975
3976# qhasm:   mulc += mulrdx + carry
3977# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
3978# asm 2: adc <mulrdx=%rdx,<mulc=%r15
3979adc %rdx,%r15
3980
3981# qhasm:   mulrax = g1_stack
3982# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
3983# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
3984movq 128(%rsp),%rax
3985
3986# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3987# asm 1: mul  <mulx2=int64#8
3988# asm 2: mul  <mulx2=%r10
3989mul  %r10
3990
3991# qhasm:   carry? ry3 += mulrax
3992# asm 1: add  <mulrax=int64#7,<ry3=int64#12
3993# asm 2: add  <mulrax=%rax,<ry3=%r14
3994add  %rax,%r14
3995
3996# qhasm:   mulrdx += 0 + carry
3997# asm 1: adc $0,<mulrdx=int64#3
3998# asm 2: adc $0,<mulrdx=%rdx
3999adc $0,%rdx
4000
4001# qhasm:   carry? ry3 += mulc
4002# asm 1: add  <mulc=int64#13,<ry3=int64#12
4003# asm 2: add  <mulc=%r15,<ry3=%r14
4004add  %r15,%r14
4005
4006# qhasm:   mulc = 0
4007# asm 1: mov  $0,>mulc=int64#13
4008# asm 2: mov  $0,>mulc=%r15
4009mov  $0,%r15
4010
4011# qhasm:   mulc += mulrdx + carry
4012# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4013# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4014adc %rdx,%r15
4015
4016# qhasm:   mulrax = g2_stack
4017# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
4018# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
4019movq 136(%rsp),%rax
4020
4021# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
4022# asm 1: mul  <mulx2=int64#8
4023# asm 2: mul  <mulx2=%r10
4024mul  %r10
4025
4026# qhasm:   carry? mulr4 += mulrax
4027# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
4028# asm 2: add  <mulrax=%rax,<mulr4=%rsi
4029add  %rax,%rsi
4030
4031# qhasm:   mulrdx += 0 + carry
4032# asm 1: adc $0,<mulrdx=int64#3
4033# asm 2: adc $0,<mulrdx=%rdx
4034adc $0,%rdx
4035
4036# qhasm:   carry? mulr4 += mulc
4037# asm 1: add  <mulc=int64#13,<mulr4=int64#2
4038# asm 2: add  <mulc=%r15,<mulr4=%rsi
4039add  %r15,%rsi
4040
4041# qhasm:   mulc = 0
4042# asm 1: mov  $0,>mulc=int64#13
4043# asm 2: mov  $0,>mulc=%r15
4044mov  $0,%r15
4045
4046# qhasm:   mulc += mulrdx + carry
4047# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4048# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4049adc %rdx,%r15
4050
4051# qhasm:   mulrax = g3_stack
4052# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
4053# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
4054movq 144(%rsp),%rax
4055
4056# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
4057# asm 1: mul  <mulx2=int64#8
4058# asm 2: mul  <mulx2=%r10
4059mul  %r10
4060
4061# qhasm:   carry? mulr5 += mulrax
4062# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
4063# asm 2: add  <mulrax=%rax,<mulr5=%rcx
4064add  %rax,%rcx
4065
4066# qhasm:   mulrdx += 0 + carry
4067# asm 1: adc $0,<mulrdx=int64#3
4068# asm 2: adc $0,<mulrdx=%rdx
4069adc $0,%rdx
4070
4071# qhasm:   carry? mulr5 += mulc
4072# asm 1: add  <mulc=int64#13,<mulr5=int64#4
4073# asm 2: add  <mulc=%r15,<mulr5=%rcx
4074add  %r15,%rcx
4075
4076# qhasm:   mulr6 += mulrdx + carry
4077# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
4078# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
4079adc %rdx,%r8
4080
4081# qhasm:   mulx3 = h3_stack
4082# asm 1: movq <h3_stack=stack64#11,>mulx3=int64#8
4083# asm 2: movq <h3_stack=80(%rsp),>mulx3=%r10
4084movq 80(%rsp),%r10
4085
4086# qhasm:   mulrax = g0_stack
4087# asm 1: movq <g0_stack=stack64#16,>mulrax=int64#7
4088# asm 2: movq <g0_stack=120(%rsp),>mulrax=%rax
4089movq 120(%rsp),%rax
4090
4091# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4092# asm 1: mul  <mulx3=int64#8
4093# asm 2: mul  <mulx3=%r10
4094mul  %r10
4095
4096# qhasm:   carry? ry3 += mulrax
4097# asm 1: add  <mulrax=int64#7,<ry3=int64#12
4098# asm 2: add  <mulrax=%rax,<ry3=%r14
4099add  %rax,%r14
4100
4101# qhasm:   mulc = 0
4102# asm 1: mov  $0,>mulc=int64#13
4103# asm 2: mov  $0,>mulc=%r15
4104mov  $0,%r15
4105
4106# qhasm:   mulc += mulrdx + carry
4107# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4108# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4109adc %rdx,%r15
4110
4111# qhasm:   mulrax = g1_stack
4112# asm 1: movq <g1_stack=stack64#17,>mulrax=int64#7
4113# asm 2: movq <g1_stack=128(%rsp),>mulrax=%rax
4114movq 128(%rsp),%rax
4115
4116# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4117# asm 1: mul  <mulx3=int64#8
4118# asm 2: mul  <mulx3=%r10
4119mul  %r10
4120
4121# qhasm:   carry? mulr4 += mulrax
4122# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
4123# asm 2: add  <mulrax=%rax,<mulr4=%rsi
4124add  %rax,%rsi
4125
4126# qhasm:   mulrdx += 0 + carry
4127# asm 1: adc $0,<mulrdx=int64#3
4128# asm 2: adc $0,<mulrdx=%rdx
4129adc $0,%rdx
4130
4131# qhasm:   carry? mulr4 += mulc
4132# asm 1: add  <mulc=int64#13,<mulr4=int64#2
4133# asm 2: add  <mulc=%r15,<mulr4=%rsi
4134add  %r15,%rsi
4135
4136# qhasm:   mulc = 0
4137# asm 1: mov  $0,>mulc=int64#13
4138# asm 2: mov  $0,>mulc=%r15
4139mov  $0,%r15
4140
4141# qhasm:   mulc += mulrdx + carry
4142# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4143# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4144adc %rdx,%r15
4145
4146# qhasm:   mulrax = g2_stack
4147# asm 1: movq <g2_stack=stack64#18,>mulrax=int64#7
4148# asm 2: movq <g2_stack=136(%rsp),>mulrax=%rax
4149movq 136(%rsp),%rax
4150
4151# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4152# asm 1: mul  <mulx3=int64#8
4153# asm 2: mul  <mulx3=%r10
4154mul  %r10
4155
4156# qhasm:   carry? mulr5 += mulrax
4157# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
4158# asm 2: add  <mulrax=%rax,<mulr5=%rcx
4159add  %rax,%rcx
4160
4161# qhasm:   mulrdx += 0 + carry
4162# asm 1: adc $0,<mulrdx=int64#3
4163# asm 2: adc $0,<mulrdx=%rdx
4164adc $0,%rdx
4165
4166# qhasm:   carry? mulr5 += mulc
4167# asm 1: add  <mulc=int64#13,<mulr5=int64#4
4168# asm 2: add  <mulc=%r15,<mulr5=%rcx
4169add  %r15,%rcx
4170
4171# qhasm:   mulc = 0
4172# asm 1: mov  $0,>mulc=int64#13
4173# asm 2: mov  $0,>mulc=%r15
4174mov  $0,%r15
4175
4176# qhasm:   mulc += mulrdx + carry
4177# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4178# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4179adc %rdx,%r15
4180
4181# qhasm:   mulrax = g3_stack
4182# asm 1: movq <g3_stack=stack64#19,>mulrax=int64#7
4183# asm 2: movq <g3_stack=144(%rsp),>mulrax=%rax
4184movq 144(%rsp),%rax
4185
4186# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4187# asm 1: mul  <mulx3=int64#8
4188# asm 2: mul  <mulx3=%r10
4189mul  %r10
4190
4191# qhasm:   carry? mulr6 += mulrax
4192# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
4193# asm 2: add  <mulrax=%rax,<mulr6=%r8
4194add  %rax,%r8
4195
4196# qhasm:   mulrdx += 0 + carry
4197# asm 1: adc $0,<mulrdx=int64#3
4198# asm 2: adc $0,<mulrdx=%rdx
4199adc $0,%rdx
4200
4201# qhasm:   carry? mulr6 += mulc
4202# asm 1: add  <mulc=int64#13,<mulr6=int64#5
4203# asm 2: add  <mulc=%r15,<mulr6=%r8
4204add  %r15,%r8
4205
4206# qhasm:   mulr7 += mulrdx + carry
4207# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
4208# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
4209adc %rdx,%r9
4210
4211# qhasm:   mulrax = mulr4
4212# asm 1: mov  <mulr4=int64#2,>mulrax=int64#7
4213# asm 2: mov  <mulr4=%rsi,>mulrax=%rax
4214mov  %rsi,%rax
4215
4216# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4217mulq  crypto_sign_ed25519_amd64_64_38
4218
4219# qhasm:   mulr4 = mulrax
4220# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
4221# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
4222mov  %rax,%rsi
4223
4224# qhasm:   mulrax = mulr5
4225# asm 1: mov  <mulr5=int64#4,>mulrax=int64#7
4226# asm 2: mov  <mulr5=%rcx,>mulrax=%rax
4227mov  %rcx,%rax
4228
4229# qhasm:   mulr5 = mulrdx
4230# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
4231# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
4232mov  %rdx,%rcx
4233
4234# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4235mulq  crypto_sign_ed25519_amd64_64_38
4236
4237# qhasm:   carry? mulr5 += mulrax
4238# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
4239# asm 2: add  <mulrax=%rax,<mulr5=%rcx
4240add  %rax,%rcx
4241
4242# qhasm:   mulrax = mulr6
4243# asm 1: mov  <mulr6=int64#5,>mulrax=int64#7
4244# asm 2: mov  <mulr6=%r8,>mulrax=%rax
4245mov  %r8,%rax
4246
4247# qhasm:   mulr6 = 0
4248# asm 1: mov  $0,>mulr6=int64#5
4249# asm 2: mov  $0,>mulr6=%r8
4250mov  $0,%r8
4251
4252# qhasm:   mulr6 += mulrdx + carry
4253# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
4254# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
4255adc %rdx,%r8
4256
4257# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4258mulq  crypto_sign_ed25519_amd64_64_38
4259
4260# qhasm:   carry? mulr6 += mulrax
4261# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
4262# asm 2: add  <mulrax=%rax,<mulr6=%r8
4263add  %rax,%r8
4264
4265# qhasm:   mulrax = mulr7
4266# asm 1: mov  <mulr7=int64#6,>mulrax=int64#7
4267# asm 2: mov  <mulr7=%r9,>mulrax=%rax
4268mov  %r9,%rax
4269
4270# qhasm:   mulr7 = 0
4271# asm 1: mov  $0,>mulr7=int64#6
4272# asm 2: mov  $0,>mulr7=%r9
4273mov  $0,%r9
4274
4275# qhasm:   mulr7 += mulrdx + carry
4276# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
4277# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
4278adc %rdx,%r9
4279
4280# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4281mulq  crypto_sign_ed25519_amd64_64_38
4282
4283# qhasm:   carry? mulr7 += mulrax
4284# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
4285# asm 2: add  <mulrax=%rax,<mulr7=%r9
4286add  %rax,%r9
4287
4288# qhasm:   mulr8 = 0
4289# asm 1: mov  $0,>mulr8=int64#7
4290# asm 2: mov  $0,>mulr8=%rax
4291mov  $0,%rax
4292
4293# qhasm:   mulr8 += mulrdx + carry
4294# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
4295# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
4296adc %rdx,%rax
4297
4298# qhasm:   carry? ry0 += mulr4
4299# asm 1: add  <mulr4=int64#2,<ry0=int64#9
4300# asm 2: add  <mulr4=%rsi,<ry0=%r11
4301add  %rsi,%r11
4302
4303# qhasm:   carry? ry1 += mulr5 + carry
4304# asm 1: adc <mulr5=int64#4,<ry1=int64#10
4305# asm 2: adc <mulr5=%rcx,<ry1=%r12
4306adc %rcx,%r12
4307
4308# qhasm:   carry? ry2 += mulr6 + carry
4309# asm 1: adc <mulr6=int64#5,<ry2=int64#11
4310# asm 2: adc <mulr6=%r8,<ry2=%r13
4311adc %r8,%r13
4312
4313# qhasm:   carry? ry3 += mulr7 + carry
4314# asm 1: adc <mulr7=int64#6,<ry3=int64#12
4315# asm 2: adc <mulr7=%r9,<ry3=%r14
4316adc %r9,%r14
4317
4318# qhasm:   mulzero = 0
4319# asm 1: mov  $0,>mulzero=int64#2
4320# asm 2: mov  $0,>mulzero=%rsi
4321mov  $0,%rsi
4322
4323# qhasm:   mulr8 += mulzero + carry
4324# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
4325# asm 2: adc <mulzero=%rsi,<mulr8=%rax
4326adc %rsi,%rax
4327
4328# qhasm:   mulr8 *= 38
4329# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
4330# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
4331imulq  $38,%rax,%rdx
4332
4333# qhasm:   carry? ry0 += mulr8
4334# asm 1: add  <mulr8=int64#3,<ry0=int64#9
4335# asm 2: add  <mulr8=%rdx,<ry0=%r11
4336add  %rdx,%r11
4337
4338# qhasm:   carry? ry1 += mulzero + carry
4339# asm 1: adc <mulzero=int64#2,<ry1=int64#10
4340# asm 2: adc <mulzero=%rsi,<ry1=%r12
4341adc %rsi,%r12
4342
4343# qhasm:   carry? ry2 += mulzero + carry
4344# asm 1: adc <mulzero=int64#2,<ry2=int64#11
4345# asm 2: adc <mulzero=%rsi,<ry2=%r13
4346adc %rsi,%r13
4347
4348# qhasm:   carry? ry3 += mulzero + carry
4349# asm 1: adc <mulzero=int64#2,<ry3=int64#12
4350# asm 2: adc <mulzero=%rsi,<ry3=%r14
4351adc %rsi,%r14
4352
4353# qhasm:   mulzero += mulzero + carry
4354# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
4355# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
4356adc %rsi,%rsi
4357
4358# qhasm:   mulzero *= 38
4359# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
4360# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
4361imulq  $38,%rsi,%rsi
4362
4363# qhasm:   ry0 += mulzero
4364# asm 1: add  <mulzero=int64#2,<ry0=int64#9
4365# asm 2: add  <mulzero=%rsi,<ry0=%r11
4366add  %rsi,%r11
4367
4368# qhasm: *(uint64 *)(rp + 32) = ry0
4369# asm 1: movq   <ry0=int64#9,32(<rp=int64#1)
4370# asm 2: movq   <ry0=%r11,32(<rp=%rdi)
4371movq   %r11,32(%rdi)
4372
4373# qhasm: *(uint64 *)(rp + 40) = ry1
4374# asm 1: movq   <ry1=int64#10,40(<rp=int64#1)
4375# asm 2: movq   <ry1=%r12,40(<rp=%rdi)
4376movq   %r12,40(%rdi)
4377
4378# qhasm: *(uint64 *)(rp + 48) = ry2
4379# asm 1: movq   <ry2=int64#11,48(<rp=int64#1)
4380# asm 2: movq   <ry2=%r13,48(<rp=%rdi)
4381movq   %r13,48(%rdi)
4382
4383# qhasm: *(uint64 *)(rp + 56) = ry3
4384# asm 1: movq   <ry3=int64#12,56(<rp=int64#1)
4385# asm 2: movq   <ry3=%r14,56(<rp=%rdi)
4386movq   %r14,56(%rdi)
4387
4388# qhasm:   mulr4 = 0
4389# asm 1: mov  $0,>mulr4=int64#2
4390# asm 2: mov  $0,>mulr4=%rsi
4391mov  $0,%rsi
4392
4393# qhasm:   mulr5 = 0
4394# asm 1: mov  $0,>mulr5=int64#4
4395# asm 2: mov  $0,>mulr5=%rcx
4396mov  $0,%rcx
4397
4398# qhasm:   mulr6 = 0
4399# asm 1: mov  $0,>mulr6=int64#5
4400# asm 2: mov  $0,>mulr6=%r8
4401mov  $0,%r8
4402
4403# qhasm:   mulr7 = 0
4404# asm 1: mov  $0,>mulr7=int64#6
4405# asm 2: mov  $0,>mulr7=%r9
4406mov  $0,%r9
4407
4408# qhasm:   mulx0 = g0_stack
4409# asm 1: movq <g0_stack=stack64#16,>mulx0=int64#8
4410# asm 2: movq <g0_stack=120(%rsp),>mulx0=%r10
4411movq 120(%rsp),%r10
4412
4413# qhasm:   mulrax = f0_stack
4414# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
4415# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
4416movq 152(%rsp),%rax
4417
4418# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
4419# asm 1: mul  <mulx0=int64#8
4420# asm 2: mul  <mulx0=%r10
4421mul  %r10
4422
4423# qhasm:   rz0 = mulrax
4424# asm 1: mov  <mulrax=int64#7,>rz0=int64#9
4425# asm 2: mov  <mulrax=%rax,>rz0=%r11
4426mov  %rax,%r11
4427
4428# qhasm:   rz1 = mulrdx
4429# asm 1: mov  <mulrdx=int64#3,>rz1=int64#10
4430# asm 2: mov  <mulrdx=%rdx,>rz1=%r12
4431mov  %rdx,%r12
4432
4433# qhasm:   mulrax = f1_stack
4434# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
4435# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
4436movq 160(%rsp),%rax
4437
4438# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
4439# asm 1: mul  <mulx0=int64#8
4440# asm 2: mul  <mulx0=%r10
4441mul  %r10
4442
4443# qhasm:   carry? rz1 += mulrax
4444# asm 1: add  <mulrax=int64#7,<rz1=int64#10
4445# asm 2: add  <mulrax=%rax,<rz1=%r12
4446add  %rax,%r12
4447
4448# qhasm:   rz2 = 0
4449# asm 1: mov  $0,>rz2=int64#11
4450# asm 2: mov  $0,>rz2=%r13
4451mov  $0,%r13
4452
4453# qhasm:   rz2 += mulrdx + carry
4454# asm 1: adc <mulrdx=int64#3,<rz2=int64#11
4455# asm 2: adc <mulrdx=%rdx,<rz2=%r13
4456adc %rdx,%r13
4457
4458# qhasm:   mulrax = f2_stack
4459# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
4460# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
4461movq 168(%rsp),%rax
4462
4463# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
4464# asm 1: mul  <mulx0=int64#8
4465# asm 2: mul  <mulx0=%r10
4466mul  %r10
4467
4468# qhasm:   carry? rz2 += mulrax
4469# asm 1: add  <mulrax=int64#7,<rz2=int64#11
4470# asm 2: add  <mulrax=%rax,<rz2=%r13
4471add  %rax,%r13
4472
4473# qhasm:   rz3 = 0
4474# asm 1: mov  $0,>rz3=int64#12
4475# asm 2: mov  $0,>rz3=%r14
4476mov  $0,%r14
4477
4478# qhasm:   rz3 += mulrdx + carry
4479# asm 1: adc <mulrdx=int64#3,<rz3=int64#12
4480# asm 2: adc <mulrdx=%rdx,<rz3=%r14
4481adc %rdx,%r14
4482
4483# qhasm:   mulrax = f3_stack
4484# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
4485# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
4486movq 176(%rsp),%rax
4487
4488# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
4489# asm 1: mul  <mulx0=int64#8
4490# asm 2: mul  <mulx0=%r10
4491mul  %r10
4492
4493# qhasm:   carry? rz3 += mulrax
4494# asm 1: add  <mulrax=int64#7,<rz3=int64#12
4495# asm 2: add  <mulrax=%rax,<rz3=%r14
4496add  %rax,%r14
4497
4498# qhasm:   mulr4 += mulrdx + carry
4499# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
4500# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
4501adc %rdx,%rsi
4502
4503# qhasm:   mulx1 = g1_stack
4504# asm 1: movq <g1_stack=stack64#17,>mulx1=int64#8
4505# asm 2: movq <g1_stack=128(%rsp),>mulx1=%r10
4506movq 128(%rsp),%r10
4507
4508# qhasm:   mulrax = f0_stack
4509# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
4510# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
4511movq 152(%rsp),%rax
4512
4513# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
4514# asm 1: mul  <mulx1=int64#8
4515# asm 2: mul  <mulx1=%r10
4516mul  %r10
4517
4518# qhasm:   carry? rz1 += mulrax
4519# asm 1: add  <mulrax=int64#7,<rz1=int64#10
4520# asm 2: add  <mulrax=%rax,<rz1=%r12
4521add  %rax,%r12
4522
4523# qhasm:   mulc = 0
4524# asm 1: mov  $0,>mulc=int64#13
4525# asm 2: mov  $0,>mulc=%r15
4526mov  $0,%r15
4527
4528# qhasm:   mulc += mulrdx + carry
4529# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4530# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4531adc %rdx,%r15
4532
4533# qhasm:   mulrax = f1_stack
4534# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
4535# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
4536movq 160(%rsp),%rax
4537
4538# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
4539# asm 1: mul  <mulx1=int64#8
4540# asm 2: mul  <mulx1=%r10
4541mul  %r10
4542
4543# qhasm:   carry? rz2 += mulrax
4544# asm 1: add  <mulrax=int64#7,<rz2=int64#11
4545# asm 2: add  <mulrax=%rax,<rz2=%r13
4546add  %rax,%r13
4547
4548# qhasm:   mulrdx += 0 + carry
4549# asm 1: adc $0,<mulrdx=int64#3
4550# asm 2: adc $0,<mulrdx=%rdx
4551adc $0,%rdx
4552
4553# qhasm:   carry? rz2 += mulc
4554# asm 1: add  <mulc=int64#13,<rz2=int64#11
4555# asm 2: add  <mulc=%r15,<rz2=%r13
4556add  %r15,%r13
4557
4558# qhasm:   mulc = 0
4559# asm 1: mov  $0,>mulc=int64#13
4560# asm 2: mov  $0,>mulc=%r15
4561mov  $0,%r15
4562
4563# qhasm:   mulc += mulrdx + carry
4564# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4565# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4566adc %rdx,%r15
4567
4568# qhasm:   mulrax = f2_stack
4569# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
4570# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
4571movq 168(%rsp),%rax
4572
4573# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
4574# asm 1: mul  <mulx1=int64#8
4575# asm 2: mul  <mulx1=%r10
4576mul  %r10
4577
4578# qhasm:   carry? rz3 += mulrax
4579# asm 1: add  <mulrax=int64#7,<rz3=int64#12
4580# asm 2: add  <mulrax=%rax,<rz3=%r14
4581add  %rax,%r14
4582
4583# qhasm:   mulrdx += 0 + carry
4584# asm 1: adc $0,<mulrdx=int64#3
4585# asm 2: adc $0,<mulrdx=%rdx
4586adc $0,%rdx
4587
4588# qhasm:   carry? rz3 += mulc
4589# asm 1: add  <mulc=int64#13,<rz3=int64#12
4590# asm 2: add  <mulc=%r15,<rz3=%r14
4591add  %r15,%r14
4592
4593# qhasm:   mulc = 0
4594# asm 1: mov  $0,>mulc=int64#13
4595# asm 2: mov  $0,>mulc=%r15
4596mov  $0,%r15
4597
4598# qhasm:   mulc += mulrdx + carry
4599# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4600# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4601adc %rdx,%r15
4602
4603# qhasm:   mulrax = f3_stack
4604# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
4605# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
4606movq 176(%rsp),%rax
4607
4608# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
4609# asm 1: mul  <mulx1=int64#8
4610# asm 2: mul  <mulx1=%r10
4611mul  %r10
4612
4613# qhasm:   carry? mulr4 += mulrax
4614# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
4615# asm 2: add  <mulrax=%rax,<mulr4=%rsi
4616add  %rax,%rsi
4617
4618# qhasm:   mulrdx += 0 + carry
4619# asm 1: adc $0,<mulrdx=int64#3
4620# asm 2: adc $0,<mulrdx=%rdx
4621adc $0,%rdx
4622
4623# qhasm:   carry? mulr4 += mulc
4624# asm 1: add  <mulc=int64#13,<mulr4=int64#2
4625# asm 2: add  <mulc=%r15,<mulr4=%rsi
4626add  %r15,%rsi
4627
4628# qhasm:   mulr5 += mulrdx + carry
4629# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
4630# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
4631adc %rdx,%rcx
4632
4633# qhasm:   mulx2 = g2_stack
4634# asm 1: movq <g2_stack=stack64#18,>mulx2=int64#8
4635# asm 2: movq <g2_stack=136(%rsp),>mulx2=%r10
4636movq 136(%rsp),%r10
4637
4638# qhasm:   mulrax = f0_stack
4639# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
4640# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
4641movq 152(%rsp),%rax
4642
4643# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
4644# asm 1: mul  <mulx2=int64#8
4645# asm 2: mul  <mulx2=%r10
4646mul  %r10
4647
4648# qhasm:   carry? rz2 += mulrax
4649# asm 1: add  <mulrax=int64#7,<rz2=int64#11
4650# asm 2: add  <mulrax=%rax,<rz2=%r13
4651add  %rax,%r13
4652
4653# qhasm:   mulc = 0
4654# asm 1: mov  $0,>mulc=int64#13
4655# asm 2: mov  $0,>mulc=%r15
4656mov  $0,%r15
4657
4658# qhasm:   mulc += mulrdx + carry
4659# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4660# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4661adc %rdx,%r15
4662
4663# qhasm:   mulrax = f1_stack
4664# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
4665# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
4666movq 160(%rsp),%rax
4667
4668# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
4669# asm 1: mul  <mulx2=int64#8
4670# asm 2: mul  <mulx2=%r10
4671mul  %r10
4672
4673# qhasm:   carry? rz3 += mulrax
4674# asm 1: add  <mulrax=int64#7,<rz3=int64#12
4675# asm 2: add  <mulrax=%rax,<rz3=%r14
4676add  %rax,%r14
4677
4678# qhasm:   mulrdx += 0 + carry
4679# asm 1: adc $0,<mulrdx=int64#3
4680# asm 2: adc $0,<mulrdx=%rdx
4681adc $0,%rdx
4682
4683# qhasm:   carry? rz3 += mulc
4684# asm 1: add  <mulc=int64#13,<rz3=int64#12
4685# asm 2: add  <mulc=%r15,<rz3=%r14
4686add  %r15,%r14
4687
4688# qhasm:   mulc = 0
4689# asm 1: mov  $0,>mulc=int64#13
4690# asm 2: mov  $0,>mulc=%r15
4691mov  $0,%r15
4692
4693# qhasm:   mulc += mulrdx + carry
4694# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4695# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4696adc %rdx,%r15
4697
4698# qhasm:   mulrax = f2_stack
4699# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
4700# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
4701movq 168(%rsp),%rax
4702
4703# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
4704# asm 1: mul  <mulx2=int64#8
4705# asm 2: mul  <mulx2=%r10
4706mul  %r10
4707
4708# qhasm:   carry? mulr4 += mulrax
4709# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
4710# asm 2: add  <mulrax=%rax,<mulr4=%rsi
4711add  %rax,%rsi
4712
4713# qhasm:   mulrdx += 0 + carry
4714# asm 1: adc $0,<mulrdx=int64#3
4715# asm 2: adc $0,<mulrdx=%rdx
4716adc $0,%rdx
4717
4718# qhasm:   carry? mulr4 += mulc
4719# asm 1: add  <mulc=int64#13,<mulr4=int64#2
4720# asm 2: add  <mulc=%r15,<mulr4=%rsi
4721add  %r15,%rsi
4722
4723# qhasm:   mulc = 0
4724# asm 1: mov  $0,>mulc=int64#13
4725# asm 2: mov  $0,>mulc=%r15
4726mov  $0,%r15
4727
4728# qhasm:   mulc += mulrdx + carry
4729# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4730# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4731adc %rdx,%r15
4732
4733# qhasm:   mulrax = f3_stack
4734# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
4735# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
4736movq 176(%rsp),%rax
4737
4738# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
4739# asm 1: mul  <mulx2=int64#8
4740# asm 2: mul  <mulx2=%r10
4741mul  %r10
4742
4743# qhasm:   carry? mulr5 += mulrax
4744# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
4745# asm 2: add  <mulrax=%rax,<mulr5=%rcx
4746add  %rax,%rcx
4747
4748# qhasm:   mulrdx += 0 + carry
4749# asm 1: adc $0,<mulrdx=int64#3
4750# asm 2: adc $0,<mulrdx=%rdx
4751adc $0,%rdx
4752
4753# qhasm:   carry? mulr5 += mulc
4754# asm 1: add  <mulc=int64#13,<mulr5=int64#4
4755# asm 2: add  <mulc=%r15,<mulr5=%rcx
4756add  %r15,%rcx
4757
4758# qhasm:   mulr6 += mulrdx + carry
4759# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
4760# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
4761adc %rdx,%r8
4762
4763# qhasm:   mulx3 = g3_stack
4764# asm 1: movq <g3_stack=stack64#19,>mulx3=int64#8
4765# asm 2: movq <g3_stack=144(%rsp),>mulx3=%r10
4766movq 144(%rsp),%r10
4767
4768# qhasm:   mulrax = f0_stack
4769# asm 1: movq <f0_stack=stack64#20,>mulrax=int64#7
4770# asm 2: movq <f0_stack=152(%rsp),>mulrax=%rax
4771movq 152(%rsp),%rax
4772
4773# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4774# asm 1: mul  <mulx3=int64#8
4775# asm 2: mul  <mulx3=%r10
4776mul  %r10
4777
4778# qhasm:   carry? rz3 += mulrax
4779# asm 1: add  <mulrax=int64#7,<rz3=int64#12
4780# asm 2: add  <mulrax=%rax,<rz3=%r14
4781add  %rax,%r14
4782
4783# qhasm:   mulc = 0
4784# asm 1: mov  $0,>mulc=int64#13
4785# asm 2: mov  $0,>mulc=%r15
4786mov  $0,%r15
4787
4788# qhasm:   mulc += mulrdx + carry
4789# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4790# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4791adc %rdx,%r15
4792
4793# qhasm:   mulrax = f1_stack
4794# asm 1: movq <f1_stack=stack64#21,>mulrax=int64#7
4795# asm 2: movq <f1_stack=160(%rsp),>mulrax=%rax
4796movq 160(%rsp),%rax
4797
4798# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4799# asm 1: mul  <mulx3=int64#8
4800# asm 2: mul  <mulx3=%r10
4801mul  %r10
4802
4803# qhasm:   carry? mulr4 += mulrax
4804# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
4805# asm 2: add  <mulrax=%rax,<mulr4=%rsi
4806add  %rax,%rsi
4807
4808# qhasm:   mulrdx += 0 + carry
4809# asm 1: adc $0,<mulrdx=int64#3
4810# asm 2: adc $0,<mulrdx=%rdx
4811adc $0,%rdx
4812
4813# qhasm:   carry? mulr4 += mulc
4814# asm 1: add  <mulc=int64#13,<mulr4=int64#2
4815# asm 2: add  <mulc=%r15,<mulr4=%rsi
4816add  %r15,%rsi
4817
4818# qhasm:   mulc = 0
4819# asm 1: mov  $0,>mulc=int64#13
4820# asm 2: mov  $0,>mulc=%r15
4821mov  $0,%r15
4822
4823# qhasm:   mulc += mulrdx + carry
4824# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4825# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4826adc %rdx,%r15
4827
4828# qhasm:   mulrax = f2_stack
4829# asm 1: movq <f2_stack=stack64#22,>mulrax=int64#7
4830# asm 2: movq <f2_stack=168(%rsp),>mulrax=%rax
4831movq 168(%rsp),%rax
4832
4833# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4834# asm 1: mul  <mulx3=int64#8
4835# asm 2: mul  <mulx3=%r10
4836mul  %r10
4837
4838# qhasm:   carry? mulr5 += mulrax
4839# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
4840# asm 2: add  <mulrax=%rax,<mulr5=%rcx
4841add  %rax,%rcx
4842
4843# qhasm:   mulrdx += 0 + carry
4844# asm 1: adc $0,<mulrdx=int64#3
4845# asm 2: adc $0,<mulrdx=%rdx
4846adc $0,%rdx
4847
4848# qhasm:   carry? mulr5 += mulc
4849# asm 1: add  <mulc=int64#13,<mulr5=int64#4
4850# asm 2: add  <mulc=%r15,<mulr5=%rcx
4851add  %r15,%rcx
4852
4853# qhasm:   mulc = 0
4854# asm 1: mov  $0,>mulc=int64#13
4855# asm 2: mov  $0,>mulc=%r15
4856mov  $0,%r15
4857
4858# qhasm:   mulc += mulrdx + carry
4859# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
4860# asm 2: adc <mulrdx=%rdx,<mulc=%r15
4861adc %rdx,%r15
4862
4863# qhasm:   mulrax = f3_stack
4864# asm 1: movq <f3_stack=stack64#23,>mulrax=int64#7
4865# asm 2: movq <f3_stack=176(%rsp),>mulrax=%rax
4866movq 176(%rsp),%rax
4867
4868# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
4869# asm 1: mul  <mulx3=int64#8
4870# asm 2: mul  <mulx3=%r10
4871mul  %r10
4872
4873# qhasm:   carry? mulr6 += mulrax
4874# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
4875# asm 2: add  <mulrax=%rax,<mulr6=%r8
4876add  %rax,%r8
4877
4878# qhasm:   mulrdx += 0 + carry
4879# asm 1: adc $0,<mulrdx=int64#3
4880# asm 2: adc $0,<mulrdx=%rdx
4881adc $0,%rdx
4882
4883# qhasm:   carry? mulr6 += mulc
4884# asm 1: add  <mulc=int64#13,<mulr6=int64#5
4885# asm 2: add  <mulc=%r15,<mulr6=%r8
4886add  %r15,%r8
4887
4888# qhasm:   mulr7 += mulrdx + carry
4889# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
4890# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
4891adc %rdx,%r9
4892
4893# qhasm:   mulrax = mulr4
4894# asm 1: mov  <mulr4=int64#2,>mulrax=int64#7
4895# asm 2: mov  <mulr4=%rsi,>mulrax=%rax
4896mov  %rsi,%rax
4897
4898# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4899mulq  crypto_sign_ed25519_amd64_64_38
4900
4901# qhasm:   mulr4 = mulrax
4902# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
4903# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
4904mov  %rax,%rsi
4905
4906# qhasm:   mulrax = mulr5
4907# asm 1: mov  <mulr5=int64#4,>mulrax=int64#7
4908# asm 2: mov  <mulr5=%rcx,>mulrax=%rax
4909mov  %rcx,%rax
4910
4911# qhasm:   mulr5 = mulrdx
4912# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
4913# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
4914mov  %rdx,%rcx
4915
4916# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4917mulq  crypto_sign_ed25519_amd64_64_38
4918
4919# qhasm:   carry? mulr5 += mulrax
4920# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
4921# asm 2: add  <mulrax=%rax,<mulr5=%rcx
4922add  %rax,%rcx
4923
4924# qhasm:   mulrax = mulr6
4925# asm 1: mov  <mulr6=int64#5,>mulrax=int64#7
4926# asm 2: mov  <mulr6=%r8,>mulrax=%rax
4927mov  %r8,%rax
4928
4929# qhasm:   mulr6 = 0
4930# asm 1: mov  $0,>mulr6=int64#5
4931# asm 2: mov  $0,>mulr6=%r8
4932mov  $0,%r8
4933
4934# qhasm:   mulr6 += mulrdx + carry
4935# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
4936# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
4937adc %rdx,%r8
4938
4939# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4940mulq  crypto_sign_ed25519_amd64_64_38
4941
4942# qhasm:   carry? mulr6 += mulrax
4943# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
4944# asm 2: add  <mulrax=%rax,<mulr6=%r8
4945add  %rax,%r8
4946
4947# qhasm:   mulrax = mulr7
4948# asm 1: mov  <mulr7=int64#6,>mulrax=int64#7
4949# asm 2: mov  <mulr7=%r9,>mulrax=%rax
4950mov  %r9,%rax
4951
4952# qhasm:   mulr7 = 0
4953# asm 1: mov  $0,>mulr7=int64#6
4954# asm 2: mov  $0,>mulr7=%r9
4955mov  $0,%r9
4956
4957# qhasm:   mulr7 += mulrdx + carry
4958# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
4959# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
4960adc %rdx,%r9
4961
4962# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
4963mulq  crypto_sign_ed25519_amd64_64_38
4964
4965# qhasm:   carry? mulr7 += mulrax
4966# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
4967# asm 2: add  <mulrax=%rax,<mulr7=%r9
4968add  %rax,%r9
4969
4970# qhasm:   mulr8 = 0
4971# asm 1: mov  $0,>mulr8=int64#7
4972# asm 2: mov  $0,>mulr8=%rax
4973mov  $0,%rax
4974
4975# qhasm:   mulr8 += mulrdx + carry
4976# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
4977# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
4978adc %rdx,%rax
4979
4980# qhasm:   carry? rz0 += mulr4
4981# asm 1: add  <mulr4=int64#2,<rz0=int64#9
4982# asm 2: add  <mulr4=%rsi,<rz0=%r11
4983add  %rsi,%r11
4984
4985# qhasm:   carry? rz1 += mulr5 + carry
4986# asm 1: adc <mulr5=int64#4,<rz1=int64#10
4987# asm 2: adc <mulr5=%rcx,<rz1=%r12
4988adc %rcx,%r12
4989
4990# qhasm:   carry? rz2 += mulr6 + carry
4991# asm 1: adc <mulr6=int64#5,<rz2=int64#11
4992# asm 2: adc <mulr6=%r8,<rz2=%r13
4993adc %r8,%r13
4994
4995# qhasm:   carry? rz3 += mulr7 + carry
4996# asm 1: adc <mulr7=int64#6,<rz3=int64#12
4997# asm 2: adc <mulr7=%r9,<rz3=%r14
4998adc %r9,%r14
4999
5000# qhasm:   mulzero = 0
5001# asm 1: mov  $0,>mulzero=int64#2
5002# asm 2: mov  $0,>mulzero=%rsi
5003mov  $0,%rsi
5004
5005# qhasm:   mulr8 += mulzero + carry
5006# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
5007# asm 2: adc <mulzero=%rsi,<mulr8=%rax
5008adc %rsi,%rax
5009
5010# qhasm:   mulr8 *= 38
5011# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
5012# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
5013imulq  $38,%rax,%rdx
5014
5015# qhasm:   carry? rz0 += mulr8
5016# asm 1: add  <mulr8=int64#3,<rz0=int64#9
5017# asm 2: add  <mulr8=%rdx,<rz0=%r11
5018add  %rdx,%r11
5019
5020# qhasm:   carry? rz1 += mulzero + carry
5021# asm 1: adc <mulzero=int64#2,<rz1=int64#10
5022# asm 2: adc <mulzero=%rsi,<rz1=%r12
5023adc %rsi,%r12
5024
5025# qhasm:   carry? rz2 += mulzero + carry
5026# asm 1: adc <mulzero=int64#2,<rz2=int64#11
5027# asm 2: adc <mulzero=%rsi,<rz2=%r13
5028adc %rsi,%r13
5029
5030# qhasm:   carry? rz3 += mulzero + carry
5031# asm 1: adc <mulzero=int64#2,<rz3=int64#12
5032# asm 2: adc <mulzero=%rsi,<rz3=%r14
5033adc %rsi,%r14
5034
5035# qhasm:   mulzero += mulzero + carry
5036# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
5037# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
5038adc %rsi,%rsi
5039
5040# qhasm:   mulzero *= 38
5041# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
5042# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
5043imulq  $38,%rsi,%rsi
5044
5045# qhasm:   rz0 += mulzero
5046# asm 1: add  <mulzero=int64#2,<rz0=int64#9
5047# asm 2: add  <mulzero=%rsi,<rz0=%r11
5048add  %rsi,%r11
5049
5050# qhasm: *(uint64 *)(rp + 64) = rz0
5051# asm 1: movq   <rz0=int64#9,64(<rp=int64#1)
5052# asm 2: movq   <rz0=%r11,64(<rp=%rdi)
5053movq   %r11,64(%rdi)
5054
5055# qhasm: *(uint64 *)(rp + 72) = rz1
5056# asm 1: movq   <rz1=int64#10,72(<rp=int64#1)
5057# asm 2: movq   <rz1=%r12,72(<rp=%rdi)
5058movq   %r12,72(%rdi)
5059
5060# qhasm: *(uint64 *)(rp + 80) = rz2
5061# asm 1: movq   <rz2=int64#11,80(<rp=int64#1)
5062# asm 2: movq   <rz2=%r13,80(<rp=%rdi)
5063movq   %r13,80(%rdi)
5064
5065# qhasm: *(uint64 *)(rp + 88) = rz3
5066# asm 1: movq   <rz3=int64#12,88(<rp=int64#1)
5067# asm 2: movq   <rz3=%r14,88(<rp=%rdi)
5068movq   %r14,88(%rdi)
5069
5070# qhasm:   mulr4 = 0
5071# asm 1: mov  $0,>mulr4=int64#2
5072# asm 2: mov  $0,>mulr4=%rsi
5073mov  $0,%rsi
5074
5075# qhasm:   mulr5 = 0
5076# asm 1: mov  $0,>mulr5=int64#4
5077# asm 2: mov  $0,>mulr5=%rcx
5078mov  $0,%rcx
5079
5080# qhasm:   mulr6 = 0
5081# asm 1: mov  $0,>mulr6=int64#5
5082# asm 2: mov  $0,>mulr6=%r8
5083mov  $0,%r8
5084
5085# qhasm:   mulr7 = 0
5086# asm 1: mov  $0,>mulr7=int64#6
5087# asm 2: mov  $0,>mulr7=%r9
5088mov  $0,%r9
5089
5090# qhasm:   mulx0 = e0_stack
5091# asm 1: movq <e0_stack=stack64#12,>mulx0=int64#8
5092# asm 2: movq <e0_stack=88(%rsp),>mulx0=%r10
5093movq 88(%rsp),%r10
5094
5095# qhasm:   mulrax = h0_stack
5096# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
5097# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
5098movq 56(%rsp),%rax
5099
5100# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
5101# asm 1: mul  <mulx0=int64#8
5102# asm 2: mul  <mulx0=%r10
5103mul  %r10
5104
5105# qhasm:   rt0 = mulrax
5106# asm 1: mov  <mulrax=int64#7,>rt0=int64#9
5107# asm 2: mov  <mulrax=%rax,>rt0=%r11
5108mov  %rax,%r11
5109
5110# qhasm:   rt1 = mulrdx
5111# asm 1: mov  <mulrdx=int64#3,>rt1=int64#10
5112# asm 2: mov  <mulrdx=%rdx,>rt1=%r12
5113mov  %rdx,%r12
5114
5115# qhasm:   mulrax = h1_stack
5116# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
5117# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
5118movq 64(%rsp),%rax
5119
5120# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
5121# asm 1: mul  <mulx0=int64#8
5122# asm 2: mul  <mulx0=%r10
5123mul  %r10
5124
5125# qhasm:   carry? rt1 += mulrax
5126# asm 1: add  <mulrax=int64#7,<rt1=int64#10
5127# asm 2: add  <mulrax=%rax,<rt1=%r12
5128add  %rax,%r12
5129
5130# qhasm:   rt2 = 0
5131# asm 1: mov  $0,>rt2=int64#11
5132# asm 2: mov  $0,>rt2=%r13
5133mov  $0,%r13
5134
5135# qhasm:   rt2 += mulrdx + carry
5136# asm 1: adc <mulrdx=int64#3,<rt2=int64#11
5137# asm 2: adc <mulrdx=%rdx,<rt2=%r13
5138adc %rdx,%r13
5139
5140# qhasm:   mulrax = h2_stack
5141# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
5142# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
5143movq 72(%rsp),%rax
5144
5145# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
5146# asm 1: mul  <mulx0=int64#8
5147# asm 2: mul  <mulx0=%r10
5148mul  %r10
5149
5150# qhasm:   carry? rt2 += mulrax
5151# asm 1: add  <mulrax=int64#7,<rt2=int64#11
5152# asm 2: add  <mulrax=%rax,<rt2=%r13
5153add  %rax,%r13
5154
5155# qhasm:   rt3 = 0
5156# asm 1: mov  $0,>rt3=int64#12
5157# asm 2: mov  $0,>rt3=%r14
5158mov  $0,%r14
5159
5160# qhasm:   rt3 += mulrdx + carry
5161# asm 1: adc <mulrdx=int64#3,<rt3=int64#12
5162# asm 2: adc <mulrdx=%rdx,<rt3=%r14
5163adc %rdx,%r14
5164
5165# qhasm:   mulrax = h3_stack
5166# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
5167# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
5168movq 80(%rsp),%rax
5169
5170# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
5171# asm 1: mul  <mulx0=int64#8
5172# asm 2: mul  <mulx0=%r10
5173mul  %r10
5174
5175# qhasm:   carry? rt3 += mulrax
5176# asm 1: add  <mulrax=int64#7,<rt3=int64#12
5177# asm 2: add  <mulrax=%rax,<rt3=%r14
5178add  %rax,%r14
5179
5180# qhasm:   mulr4 += mulrdx + carry
5181# asm 1: adc <mulrdx=int64#3,<mulr4=int64#2
5182# asm 2: adc <mulrdx=%rdx,<mulr4=%rsi
5183adc %rdx,%rsi
5184
5185# qhasm:   mulx1 = e1_stack
5186# asm 1: movq <e1_stack=stack64#13,>mulx1=int64#8
5187# asm 2: movq <e1_stack=96(%rsp),>mulx1=%r10
5188movq 96(%rsp),%r10
5189
5190# qhasm:   mulrax = h0_stack
5191# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
5192# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
5193movq 56(%rsp),%rax
5194
5195# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
5196# asm 1: mul  <mulx1=int64#8
5197# asm 2: mul  <mulx1=%r10
5198mul  %r10
5199
5200# qhasm:   carry? rt1 += mulrax
5201# asm 1: add  <mulrax=int64#7,<rt1=int64#10
5202# asm 2: add  <mulrax=%rax,<rt1=%r12
5203add  %rax,%r12
5204
5205# qhasm:   mulc = 0
5206# asm 1: mov  $0,>mulc=int64#13
5207# asm 2: mov  $0,>mulc=%r15
5208mov  $0,%r15
5209
5210# qhasm:   mulc += mulrdx + carry
5211# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5212# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5213adc %rdx,%r15
5214
5215# qhasm:   mulrax = h1_stack
5216# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
5217# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
5218movq 64(%rsp),%rax
5219
5220# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
5221# asm 1: mul  <mulx1=int64#8
5222# asm 2: mul  <mulx1=%r10
5223mul  %r10
5224
5225# qhasm:   carry? rt2 += mulrax
5226# asm 1: add  <mulrax=int64#7,<rt2=int64#11
5227# asm 2: add  <mulrax=%rax,<rt2=%r13
5228add  %rax,%r13
5229
5230# qhasm:   mulrdx += 0 + carry
5231# asm 1: adc $0,<mulrdx=int64#3
5232# asm 2: adc $0,<mulrdx=%rdx
5233adc $0,%rdx
5234
5235# qhasm:   carry? rt2 += mulc
5236# asm 1: add  <mulc=int64#13,<rt2=int64#11
5237# asm 2: add  <mulc=%r15,<rt2=%r13
5238add  %r15,%r13
5239
5240# qhasm:   mulc = 0
5241# asm 1: mov  $0,>mulc=int64#13
5242# asm 2: mov  $0,>mulc=%r15
5243mov  $0,%r15
5244
5245# qhasm:   mulc += mulrdx + carry
5246# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5247# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5248adc %rdx,%r15
5249
5250# qhasm:   mulrax = h2_stack
5251# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
5252# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
5253movq 72(%rsp),%rax
5254
5255# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
5256# asm 1: mul  <mulx1=int64#8
5257# asm 2: mul  <mulx1=%r10
5258mul  %r10
5259
5260# qhasm:   carry? rt3 += mulrax
5261# asm 1: add  <mulrax=int64#7,<rt3=int64#12
5262# asm 2: add  <mulrax=%rax,<rt3=%r14
5263add  %rax,%r14
5264
5265# qhasm:   mulrdx += 0 + carry
5266# asm 1: adc $0,<mulrdx=int64#3
5267# asm 2: adc $0,<mulrdx=%rdx
5268adc $0,%rdx
5269
5270# qhasm:   carry? rt3 += mulc
5271# asm 1: add  <mulc=int64#13,<rt3=int64#12
5272# asm 2: add  <mulc=%r15,<rt3=%r14
5273add  %r15,%r14
5274
5275# qhasm:   mulc = 0
5276# asm 1: mov  $0,>mulc=int64#13
5277# asm 2: mov  $0,>mulc=%r15
5278mov  $0,%r15
5279
5280# qhasm:   mulc += mulrdx + carry
5281# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5282# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5283adc %rdx,%r15
5284
5285# qhasm:   mulrax = h3_stack
5286# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
5287# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
5288movq 80(%rsp),%rax
5289
5290# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
5291# asm 1: mul  <mulx1=int64#8
5292# asm 2: mul  <mulx1=%r10
5293mul  %r10
5294
5295# qhasm:   carry? mulr4 += mulrax
5296# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
5297# asm 2: add  <mulrax=%rax,<mulr4=%rsi
5298add  %rax,%rsi
5299
5300# qhasm:   mulrdx += 0 + carry
5301# asm 1: adc $0,<mulrdx=int64#3
5302# asm 2: adc $0,<mulrdx=%rdx
5303adc $0,%rdx
5304
5305# qhasm:   carry? mulr4 += mulc
5306# asm 1: add  <mulc=int64#13,<mulr4=int64#2
5307# asm 2: add  <mulc=%r15,<mulr4=%rsi
5308add  %r15,%rsi
5309
5310# qhasm:   mulr5 += mulrdx + carry
5311# asm 1: adc <mulrdx=int64#3,<mulr5=int64#4
5312# asm 2: adc <mulrdx=%rdx,<mulr5=%rcx
5313adc %rdx,%rcx
5314
5315# qhasm:   mulx2 = e2_stack
5316# asm 1: movq <e2_stack=stack64#14,>mulx2=int64#8
5317# asm 2: movq <e2_stack=104(%rsp),>mulx2=%r10
5318movq 104(%rsp),%r10
5319
5320# qhasm:   mulrax = h0_stack
5321# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
5322# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
5323movq 56(%rsp),%rax
5324
5325# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
5326# asm 1: mul  <mulx2=int64#8
5327# asm 2: mul  <mulx2=%r10
5328mul  %r10
5329
5330# qhasm:   carry? rt2 += mulrax
5331# asm 1: add  <mulrax=int64#7,<rt2=int64#11
5332# asm 2: add  <mulrax=%rax,<rt2=%r13
5333add  %rax,%r13
5334
5335# qhasm:   mulc = 0
5336# asm 1: mov  $0,>mulc=int64#13
5337# asm 2: mov  $0,>mulc=%r15
5338mov  $0,%r15
5339
5340# qhasm:   mulc += mulrdx + carry
5341# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5342# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5343adc %rdx,%r15
5344
5345# qhasm:   mulrax = h1_stack
5346# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
5347# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
5348movq 64(%rsp),%rax
5349
5350# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
5351# asm 1: mul  <mulx2=int64#8
5352# asm 2: mul  <mulx2=%r10
5353mul  %r10
5354
5355# qhasm:   carry? rt3 += mulrax
5356# asm 1: add  <mulrax=int64#7,<rt3=int64#12
5357# asm 2: add  <mulrax=%rax,<rt3=%r14
5358add  %rax,%r14
5359
5360# qhasm:   mulrdx += 0 + carry
5361# asm 1: adc $0,<mulrdx=int64#3
5362# asm 2: adc $0,<mulrdx=%rdx
5363adc $0,%rdx
5364
5365# qhasm:   carry? rt3 += mulc
5366# asm 1: add  <mulc=int64#13,<rt3=int64#12
5367# asm 2: add  <mulc=%r15,<rt3=%r14
5368add  %r15,%r14
5369
5370# qhasm:   mulc = 0
5371# asm 1: mov  $0,>mulc=int64#13
5372# asm 2: mov  $0,>mulc=%r15
5373mov  $0,%r15
5374
5375# qhasm:   mulc += mulrdx + carry
5376# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5377# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5378adc %rdx,%r15
5379
5380# qhasm:   mulrax = h2_stack
5381# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
5382# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
5383movq 72(%rsp),%rax
5384
5385# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
5386# asm 1: mul  <mulx2=int64#8
5387# asm 2: mul  <mulx2=%r10
5388mul  %r10
5389
5390# qhasm:   carry? mulr4 += mulrax
5391# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
5392# asm 2: add  <mulrax=%rax,<mulr4=%rsi
5393add  %rax,%rsi
5394
5395# qhasm:   mulrdx += 0 + carry
5396# asm 1: adc $0,<mulrdx=int64#3
5397# asm 2: adc $0,<mulrdx=%rdx
5398adc $0,%rdx
5399
5400# qhasm:   carry? mulr4 += mulc
5401# asm 1: add  <mulc=int64#13,<mulr4=int64#2
5402# asm 2: add  <mulc=%r15,<mulr4=%rsi
5403add  %r15,%rsi
5404
5405# qhasm:   mulc = 0
5406# asm 1: mov  $0,>mulc=int64#13
5407# asm 2: mov  $0,>mulc=%r15
5408mov  $0,%r15
5409
5410# qhasm:   mulc += mulrdx + carry
5411# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5412# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5413adc %rdx,%r15
5414
5415# qhasm:   mulrax = h3_stack
5416# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
5417# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
5418movq 80(%rsp),%rax
5419
5420# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
5421# asm 1: mul  <mulx2=int64#8
5422# asm 2: mul  <mulx2=%r10
5423mul  %r10
5424
5425# qhasm:   carry? mulr5 += mulrax
5426# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
5427# asm 2: add  <mulrax=%rax,<mulr5=%rcx
5428add  %rax,%rcx
5429
5430# qhasm:   mulrdx += 0 + carry
5431# asm 1: adc $0,<mulrdx=int64#3
5432# asm 2: adc $0,<mulrdx=%rdx
5433adc $0,%rdx
5434
5435# qhasm:   carry? mulr5 += mulc
5436# asm 1: add  <mulc=int64#13,<mulr5=int64#4
5437# asm 2: add  <mulc=%r15,<mulr5=%rcx
5438add  %r15,%rcx
5439
5440# qhasm:   mulr6 += mulrdx + carry
5441# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
5442# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
5443adc %rdx,%r8
5444
5445# qhasm:   mulx3 = e3_stack
5446# asm 1: movq <e3_stack=stack64#15,>mulx3=int64#8
5447# asm 2: movq <e3_stack=112(%rsp),>mulx3=%r10
5448movq 112(%rsp),%r10
5449
5450# qhasm:   mulrax = h0_stack
5451# asm 1: movq <h0_stack=stack64#8,>mulrax=int64#7
5452# asm 2: movq <h0_stack=56(%rsp),>mulrax=%rax
5453movq 56(%rsp),%rax
5454
5455# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
5456# asm 1: mul  <mulx3=int64#8
5457# asm 2: mul  <mulx3=%r10
5458mul  %r10
5459
5460# qhasm:   carry? rt3 += mulrax
5461# asm 1: add  <mulrax=int64#7,<rt3=int64#12
5462# asm 2: add  <mulrax=%rax,<rt3=%r14
5463add  %rax,%r14
5464
5465# qhasm:   mulc = 0
5466# asm 1: mov  $0,>mulc=int64#13
5467# asm 2: mov  $0,>mulc=%r15
5468mov  $0,%r15
5469
5470# qhasm:   mulc += mulrdx + carry
5471# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5472# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5473adc %rdx,%r15
5474
5475# qhasm:   mulrax = h1_stack
5476# asm 1: movq <h1_stack=stack64#9,>mulrax=int64#7
5477# asm 2: movq <h1_stack=64(%rsp),>mulrax=%rax
5478movq 64(%rsp),%rax
5479
5480# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
5481# asm 1: mul  <mulx3=int64#8
5482# asm 2: mul  <mulx3=%r10
5483mul  %r10
5484
5485# qhasm:   carry? mulr4 += mulrax
5486# asm 1: add  <mulrax=int64#7,<mulr4=int64#2
5487# asm 2: add  <mulrax=%rax,<mulr4=%rsi
5488add  %rax,%rsi
5489
5490# qhasm:   mulrdx += 0 + carry
5491# asm 1: adc $0,<mulrdx=int64#3
5492# asm 2: adc $0,<mulrdx=%rdx
5493adc $0,%rdx
5494
5495# qhasm:   carry? mulr4 += mulc
5496# asm 1: add  <mulc=int64#13,<mulr4=int64#2
5497# asm 2: add  <mulc=%r15,<mulr4=%rsi
5498add  %r15,%rsi
5499
5500# qhasm:   mulc = 0
5501# asm 1: mov  $0,>mulc=int64#13
5502# asm 2: mov  $0,>mulc=%r15
5503mov  $0,%r15
5504
5505# qhasm:   mulc += mulrdx + carry
5506# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5507# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5508adc %rdx,%r15
5509
5510# qhasm:   mulrax = h2_stack
5511# asm 1: movq <h2_stack=stack64#10,>mulrax=int64#7
5512# asm 2: movq <h2_stack=72(%rsp),>mulrax=%rax
5513movq 72(%rsp),%rax
5514
5515# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
5516# asm 1: mul  <mulx3=int64#8
5517# asm 2: mul  <mulx3=%r10
5518mul  %r10
5519
5520# qhasm:   carry? mulr5 += mulrax
5521# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
5522# asm 2: add  <mulrax=%rax,<mulr5=%rcx
5523add  %rax,%rcx
5524
5525# qhasm:   mulrdx += 0 + carry
5526# asm 1: adc $0,<mulrdx=int64#3
5527# asm 2: adc $0,<mulrdx=%rdx
5528adc $0,%rdx
5529
5530# qhasm:   carry? mulr5 += mulc
5531# asm 1: add  <mulc=int64#13,<mulr5=int64#4
5532# asm 2: add  <mulc=%r15,<mulr5=%rcx
5533add  %r15,%rcx
5534
5535# qhasm:   mulc = 0
5536# asm 1: mov  $0,>mulc=int64#13
5537# asm 2: mov  $0,>mulc=%r15
5538mov  $0,%r15
5539
5540# qhasm:   mulc += mulrdx + carry
5541# asm 1: adc <mulrdx=int64#3,<mulc=int64#13
5542# asm 2: adc <mulrdx=%rdx,<mulc=%r15
5543adc %rdx,%r15
5544
5545# qhasm:   mulrax = h3_stack
5546# asm 1: movq <h3_stack=stack64#11,>mulrax=int64#7
5547# asm 2: movq <h3_stack=80(%rsp),>mulrax=%rax
5548movq 80(%rsp),%rax
5549
5550# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
5551# asm 1: mul  <mulx3=int64#8
5552# asm 2: mul  <mulx3=%r10
5553mul  %r10
5554
5555# qhasm:   carry? mulr6 += mulrax
5556# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
5557# asm 2: add  <mulrax=%rax,<mulr6=%r8
5558add  %rax,%r8
5559
5560# qhasm:   mulrdx += 0 + carry
5561# asm 1: adc $0,<mulrdx=int64#3
5562# asm 2: adc $0,<mulrdx=%rdx
5563adc $0,%rdx
5564
5565# qhasm:   carry? mulr6 += mulc
5566# asm 1: add  <mulc=int64#13,<mulr6=int64#5
5567# asm 2: add  <mulc=%r15,<mulr6=%r8
5568add  %r15,%r8
5569
5570# qhasm:   mulr7 += mulrdx + carry
5571# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
5572# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
5573adc %rdx,%r9
5574
5575# qhasm:   mulrax = mulr4
5576# asm 1: mov  <mulr4=int64#2,>mulrax=int64#7
5577# asm 2: mov  <mulr4=%rsi,>mulrax=%rax
5578mov  %rsi,%rax
5579
5580# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
5581mulq  crypto_sign_ed25519_amd64_64_38
5582
5583# qhasm:   mulr4 = mulrax
5584# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
5585# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
5586mov  %rax,%rsi
5587
5588# qhasm:   mulrax = mulr5
5589# asm 1: mov  <mulr5=int64#4,>mulrax=int64#7
5590# asm 2: mov  <mulr5=%rcx,>mulrax=%rax
5591mov  %rcx,%rax
5592
5593# qhasm:   mulr5 = mulrdx
5594# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
5595# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
5596mov  %rdx,%rcx
5597
5598# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
5599mulq  crypto_sign_ed25519_amd64_64_38
5600
5601# qhasm:   carry? mulr5 += mulrax
5602# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
5603# asm 2: add  <mulrax=%rax,<mulr5=%rcx
5604add  %rax,%rcx
5605
5606# qhasm:   mulrax = mulr6
5607# asm 1: mov  <mulr6=int64#5,>mulrax=int64#7
5608# asm 2: mov  <mulr6=%r8,>mulrax=%rax
5609mov  %r8,%rax
5610
5611# qhasm:   mulr6 = 0
5612# asm 1: mov  $0,>mulr6=int64#5
5613# asm 2: mov  $0,>mulr6=%r8
5614mov  $0,%r8
5615
5616# qhasm:   mulr6 += mulrdx + carry
5617# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
5618# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
5619adc %rdx,%r8
5620
5621# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
5622mulq  crypto_sign_ed25519_amd64_64_38
5623
5624# qhasm:   carry? mulr6 += mulrax
5625# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
5626# asm 2: add  <mulrax=%rax,<mulr6=%r8
5627add  %rax,%r8
5628
5629# qhasm:   mulrax = mulr7
5630# asm 1: mov  <mulr7=int64#6,>mulrax=int64#7
5631# asm 2: mov  <mulr7=%r9,>mulrax=%rax
5632mov  %r9,%rax
5633
5634# qhasm:   mulr7 = 0
5635# asm 1: mov  $0,>mulr7=int64#6
5636# asm 2: mov  $0,>mulr7=%r9
5637mov  $0,%r9
5638
5639# qhasm:   mulr7 += mulrdx + carry
5640# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
5641# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
5642adc %rdx,%r9
5643
5644# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
5645mulq  crypto_sign_ed25519_amd64_64_38
5646
5647# qhasm:   carry? mulr7 += mulrax
5648# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
5649# asm 2: add  <mulrax=%rax,<mulr7=%r9
5650add  %rax,%r9
5651
5652# qhasm:   mulr8 = 0
5653# asm 1: mov  $0,>mulr8=int64#7
5654# asm 2: mov  $0,>mulr8=%rax
5655mov  $0,%rax
5656
5657# qhasm:   mulr8 += mulrdx + carry
5658# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
5659# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
5660adc %rdx,%rax
5661
5662# qhasm:   carry? rt0 += mulr4
5663# asm 1: add  <mulr4=int64#2,<rt0=int64#9
5664# asm 2: add  <mulr4=%rsi,<rt0=%r11
5665add  %rsi,%r11
5666
5667# qhasm:   carry? rt1 += mulr5 + carry
5668# asm 1: adc <mulr5=int64#4,<rt1=int64#10
5669# asm 2: adc <mulr5=%rcx,<rt1=%r12
5670adc %rcx,%r12
5671
5672# qhasm:   carry? rt2 += mulr6 + carry
5673# asm 1: adc <mulr6=int64#5,<rt2=int64#11
5674# asm 2: adc <mulr6=%r8,<rt2=%r13
5675adc %r8,%r13
5676
5677# qhasm:   carry? rt3 += mulr7 + carry
5678# asm 1: adc <mulr7=int64#6,<rt3=int64#12
5679# asm 2: adc <mulr7=%r9,<rt3=%r14
5680adc %r9,%r14
5681
5682# qhasm:   mulzero = 0
5683# asm 1: mov  $0,>mulzero=int64#2
5684# asm 2: mov  $0,>mulzero=%rsi
5685mov  $0,%rsi
5686
5687# qhasm:   mulr8 += mulzero + carry
5688# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
5689# asm 2: adc <mulzero=%rsi,<mulr8=%rax
5690adc %rsi,%rax
5691
5692# qhasm:   mulr8 *= 38
5693# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
5694# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
5695imulq  $38,%rax,%rdx
5696
5697# qhasm:   carry? rt0 += mulr8
5698# asm 1: add  <mulr8=int64#3,<rt0=int64#9
5699# asm 2: add  <mulr8=%rdx,<rt0=%r11
5700add  %rdx,%r11
5701
5702# qhasm:   carry? rt1 += mulzero + carry
5703# asm 1: adc <mulzero=int64#2,<rt1=int64#10
5704# asm 2: adc <mulzero=%rsi,<rt1=%r12
5705adc %rsi,%r12
5706
5707# qhasm:   carry? rt2 += mulzero + carry
5708# asm 1: adc <mulzero=int64#2,<rt2=int64#11
5709# asm 2: adc <mulzero=%rsi,<rt2=%r13
5710adc %rsi,%r13
5711
5712# qhasm:   carry? rt3 += mulzero + carry
5713# asm 1: adc <mulzero=int64#2,<rt3=int64#12
5714# asm 2: adc <mulzero=%rsi,<rt3=%r14
5715adc %rsi,%r14
5716
5717# qhasm:   mulzero += mulzero + carry
5718# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
5719# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
5720adc %rsi,%rsi
5721
5722# qhasm:   mulzero *= 38
5723# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
5724# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
5725imulq  $38,%rsi,%rsi
5726
5727# qhasm:   rt0 += mulzero
5728# asm 1: add  <mulzero=int64#2,<rt0=int64#9
5729# asm 2: add  <mulzero=%rsi,<rt0=%r11
5730add  %rsi,%r11
5731
5732# qhasm: *(uint64 *)(rp + 96) = rt0
5733# asm 1: movq   <rt0=int64#9,96(<rp=int64#1)
5734# asm 2: movq   <rt0=%r11,96(<rp=%rdi)
5735movq   %r11,96(%rdi)
5736
5737# qhasm: *(uint64 *)(rp + 104) = rt1
5738# asm 1: movq   <rt1=int64#10,104(<rp=int64#1)
5739# asm 2: movq   <rt1=%r12,104(<rp=%rdi)
5740movq   %r12,104(%rdi)
5741
5742# qhasm: *(uint64 *)(rp + 112) = rt2
5743# asm 1: movq   <rt2=int64#11,112(<rp=int64#1)
5744# asm 2: movq   <rt2=%r13,112(<rp=%rdi)
5745movq   %r13,112(%rdi)
5746
5747# qhasm: *(uint64 *)(rp + 120) = rt3
5748# asm 1: movq   <rt3=int64#12,120(<rp=int64#1)
5749# asm 2: movq   <rt3=%r14,120(<rp=%rdi)
5750movq   %r14,120(%rdi)
5751
5752# qhasm:   caller1 = caller1_stack
5753# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
5754# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
5755movq 0(%rsp),%r11
5756
5757# qhasm:   caller2 = caller2_stack
5758# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
5759# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
5760movq 8(%rsp),%r12
5761
5762# qhasm:   caller3 = caller3_stack
5763# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
5764# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
5765movq 16(%rsp),%r13
5766
5767# qhasm:   caller4 = caller4_stack
5768# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
5769# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
5770movq 24(%rsp),%r14
5771
5772# qhasm:   caller5 = caller5_stack
5773# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
5774# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
5775movq 32(%rsp),%r15
5776
5777# qhasm:   caller6 = caller6_stack
5778# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
5779# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
5780movq 40(%rsp),%rbx
5781
5782# qhasm:   caller7 = caller7_stack
5783# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
5784# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
5785movq 48(%rsp),%rbp
5786
5787# qhasm: leave
5788add %r11,%rsp
5789mov %rdi,%rax
5790mov %rsi,%rdx
5791ret
5792