1
2# qhasm: int64 rp
3
4# qhasm: int64 pp
5
6# qhasm: int64 qp
7
8# qhasm: input rp
9
10# qhasm: input pp
11
12# qhasm: input qp
13
14# qhasm:   int64 caller1
15
16# qhasm:   int64 caller2
17
18# qhasm:   int64 caller3
19
20# qhasm:   int64 caller4
21
22# qhasm:   int64 caller5
23
24# qhasm:   int64 caller6
25
26# qhasm:   int64 caller7
27
28# qhasm:   caller caller1
29
30# qhasm:   caller caller2
31
32# qhasm:   caller caller3
33
34# qhasm:   caller caller4
35
36# qhasm:   caller caller5
37
38# qhasm:   caller caller6
39
40# qhasm:   caller caller7
41
42# qhasm:   stack64 caller1_stack
43
44# qhasm:   stack64 caller2_stack
45
46# qhasm:   stack64 caller3_stack
47
48# qhasm:   stack64 caller4_stack
49
50# qhasm:   stack64 caller5_stack
51
52# qhasm:   stack64 caller6_stack
53
54# qhasm:   stack64 caller7_stack
55
56# qhasm: int64 a0
57
58# qhasm: int64 a1
59
60# qhasm: int64 a2
61
62# qhasm: int64 a3
63
64# qhasm: stack64 a0_stack
65
66# qhasm: stack64 a1_stack
67
68# qhasm: stack64 a2_stack
69
70# qhasm: stack64 a3_stack
71
72# qhasm: int64 b0
73
74# qhasm: int64 b1
75
76# qhasm: int64 b2
77
78# qhasm: int64 b3
79
80# qhasm: stack64 b0_stack
81
82# qhasm: stack64 b1_stack
83
84# qhasm: stack64 b2_stack
85
86# qhasm: stack64 b3_stack
87
88# qhasm: int64 c0
89
90# qhasm: int64 c1
91
92# qhasm: int64 c2
93
94# qhasm: int64 c3
95
96# qhasm: stack64 c0_stack
97
98# qhasm: stack64 c1_stack
99
100# qhasm: stack64 c2_stack
101
102# qhasm: stack64 c3_stack
103
104# qhasm: int64 d0
105
106# qhasm: int64 d1
107
108# qhasm: int64 d2
109
110# qhasm: int64 d3
111
112# qhasm: stack64 d0_stack
113
114# qhasm: stack64 d1_stack
115
116# qhasm: stack64 d2_stack
117
118# qhasm: stack64 d3_stack
119
120# qhasm: int64 t10
121
122# qhasm: int64 t11
123
124# qhasm: int64 t12
125
126# qhasm: int64 t13
127
128# qhasm: stack64 t10_stack
129
130# qhasm: stack64 t11_stack
131
132# qhasm: stack64 t12_stack
133
134# qhasm: stack64 t13_stack
135
136# qhasm: int64 t20
137
138# qhasm: int64 t21
139
140# qhasm: int64 t22
141
142# qhasm: int64 t23
143
144# qhasm: stack64 t20_stack
145
146# qhasm: stack64 t21_stack
147
148# qhasm: stack64 t22_stack
149
150# qhasm: stack64 t23_stack
151
152# qhasm: int64 rx0
153
154# qhasm: int64 rx1
155
156# qhasm: int64 rx2
157
158# qhasm: int64 rx3
159
160# qhasm: int64 ry0
161
162# qhasm: int64 ry1
163
164# qhasm: int64 ry2
165
166# qhasm: int64 ry3
167
168# qhasm: int64 rz0
169
170# qhasm: int64 rz1
171
172# qhasm: int64 rz2
173
174# qhasm: int64 rz3
175
176# qhasm: int64 rt0
177
178# qhasm: int64 rt1
179
180# qhasm: int64 rt2
181
182# qhasm: int64 rt3
183
184# qhasm: int64 x0
185
186# qhasm: int64 x1
187
188# qhasm: int64 x2
189
190# qhasm: int64 x3
191
192# qhasm: int64 mulr4
193
194# qhasm: int64 mulr5
195
196# qhasm: int64 mulr6
197
198# qhasm: int64 mulr7
199
200# qhasm: int64 mulr8
201
202# qhasm: int64 mulrax
203
204# qhasm: int64 mulrdx
205
206# qhasm: int64 mulx0
207
208# qhasm: int64 mulx1
209
210# qhasm: int64 mulx2
211
212# qhasm: int64 mulx3
213
214# qhasm: int64 mulc
215
216# qhasm: int64 mulzero
217
218# qhasm: int64 muli38
219
220# qhasm: int64 addt0
221
222# qhasm: int64 addt1
223
224# qhasm: int64 subt0
225
226# qhasm: int64 subt1
227
228# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1
229.text
230.p2align 5
231.globl _crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1
232.globl crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1
233_crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1:
234crypto_sign_ed25519_amd64_64_ge25519_pnielsadd_p1p1:
235mov %rsp,%r11
236and $31,%r11
237add $128,%r11
238sub %r11,%rsp
239
240# qhasm:   caller1_stack = caller1
241# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
242# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
243movq %r11,0(%rsp)
244
245# qhasm:   caller2_stack = caller2
246# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
247# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
248movq %r12,8(%rsp)
249
250# qhasm:   caller3_stack = caller3
251# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
252# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
253movq %r13,16(%rsp)
254
255# qhasm:   caller4_stack = caller4
256# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
257# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
258movq %r14,24(%rsp)
259
260# qhasm:   caller5_stack = caller5
261# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
262# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
263movq %r15,32(%rsp)
264
265# qhasm:   caller6_stack = caller6
266# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
267# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
268movq %rbx,40(%rsp)
269
270# qhasm:   caller7_stack = caller7
271# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
272# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
273movq %rbp,48(%rsp)
274
275# qhasm: qp = qp
276# asm 1: mov  <qp=int64#3,>qp=int64#4
277# asm 2: mov  <qp=%rdx,>qp=%rcx
278mov  %rdx,%rcx
279
280# qhasm: a0 = *(uint64 *)(pp + 32)
281# asm 1: movq   32(<pp=int64#2),>a0=int64#3
282# asm 2: movq   32(<pp=%rsi),>a0=%rdx
283movq   32(%rsi),%rdx
284
285# qhasm: a1 = *(uint64 *)(pp + 40)
286# asm 1: movq   40(<pp=int64#2),>a1=int64#5
287# asm 2: movq   40(<pp=%rsi),>a1=%r8
288movq   40(%rsi),%r8
289
290# qhasm: a2 = *(uint64 *)(pp + 48)
291# asm 1: movq   48(<pp=int64#2),>a2=int64#6
292# asm 2: movq   48(<pp=%rsi),>a2=%r9
293movq   48(%rsi),%r9
294
295# qhasm: a3 = *(uint64 *)(pp + 56)
296# asm 1: movq   56(<pp=int64#2),>a3=int64#7
297# asm 2: movq   56(<pp=%rsi),>a3=%rax
298movq   56(%rsi),%rax
299
300# qhasm: b0 = a0
301# asm 1: mov  <a0=int64#3,>b0=int64#8
302# asm 2: mov  <a0=%rdx,>b0=%r10
303mov  %rdx,%r10
304
305# qhasm: b1 = a1
306# asm 1: mov  <a1=int64#5,>b1=int64#9
307# asm 2: mov  <a1=%r8,>b1=%r11
308mov  %r8,%r11
309
310# qhasm: b2 = a2
311# asm 1: mov  <a2=int64#6,>b2=int64#10
312# asm 2: mov  <a2=%r9,>b2=%r12
313mov  %r9,%r12
314
315# qhasm: b3 = a3
316# asm 1: mov  <a3=int64#7,>b3=int64#11
317# asm 2: mov  <a3=%rax,>b3=%r13
318mov  %rax,%r13
319
320# qhasm:   carry? a0 -= *(uint64 *)(pp + 0)
321# asm 1: subq 0(<pp=int64#2),<a0=int64#3
322# asm 2: subq 0(<pp=%rsi),<a0=%rdx
323subq 0(%rsi),%rdx
324
325# qhasm:   carry? a1 -= *(uint64 *)(pp + 8) - carry
326# asm 1: sbbq 8(<pp=int64#2),<a1=int64#5
327# asm 2: sbbq 8(<pp=%rsi),<a1=%r8
328sbbq 8(%rsi),%r8
329
330# qhasm:   carry? a2 -= *(uint64 *)(pp + 16) - carry
331# asm 1: sbbq 16(<pp=int64#2),<a2=int64#6
332# asm 2: sbbq 16(<pp=%rsi),<a2=%r9
333sbbq 16(%rsi),%r9
334
335# qhasm:   carry? a3 -= *(uint64 *)(pp + 24) - carry
336# asm 1: sbbq 24(<pp=int64#2),<a3=int64#7
337# asm 2: sbbq 24(<pp=%rsi),<a3=%rax
338sbbq 24(%rsi),%rax
339
340# qhasm:   subt0 = 0
341# asm 1: mov  $0,>subt0=int64#12
342# asm 2: mov  $0,>subt0=%r14
343mov  $0,%r14
344
345# qhasm:   subt1 = 38
346# asm 1: mov  $38,>subt1=int64#13
347# asm 2: mov  $38,>subt1=%r15
348mov  $38,%r15
349
350# qhasm:   subt1 = subt0 if !carry
351# asm 1: cmovae <subt0=int64#12,<subt1=int64#13
352# asm 2: cmovae <subt0=%r14,<subt1=%r15
353cmovae %r14,%r15
354
355# qhasm:   carry? a0 -= subt1
356# asm 1: sub  <subt1=int64#13,<a0=int64#3
357# asm 2: sub  <subt1=%r15,<a0=%rdx
358sub  %r15,%rdx
359
360# qhasm:   carry? a1 -= subt0 - carry
361# asm 1: sbb  <subt0=int64#12,<a1=int64#5
362# asm 2: sbb  <subt0=%r14,<a1=%r8
363sbb  %r14,%r8
364
365# qhasm:   carry? a2 -= subt0 - carry
366# asm 1: sbb  <subt0=int64#12,<a2=int64#6
367# asm 2: sbb  <subt0=%r14,<a2=%r9
368sbb  %r14,%r9
369
370# qhasm:   carry? a3 -= subt0 - carry
371# asm 1: sbb  <subt0=int64#12,<a3=int64#7
372# asm 2: sbb  <subt0=%r14,<a3=%rax
373sbb  %r14,%rax
374
375# qhasm:   subt0 = subt1 if carry
376# asm 1: cmovc <subt1=int64#13,<subt0=int64#12
377# asm 2: cmovc <subt1=%r15,<subt0=%r14
378cmovc %r15,%r14
379
380# qhasm:   a0 -= subt0
381# asm 1: sub  <subt0=int64#12,<a0=int64#3
382# asm 2: sub  <subt0=%r14,<a0=%rdx
383sub  %r14,%rdx
384
385# qhasm:   carry? b0 += *(uint64 *)(pp + 0)
386# asm 1: addq 0(<pp=int64#2),<b0=int64#8
387# asm 2: addq 0(<pp=%rsi),<b0=%r10
388addq 0(%rsi),%r10
389
390# qhasm:   carry? b1 += *(uint64 *)(pp + 8) + carry
391# asm 1: adcq 8(<pp=int64#2),<b1=int64#9
392# asm 2: adcq 8(<pp=%rsi),<b1=%r11
393adcq 8(%rsi),%r11
394
395# qhasm:   carry? b2 += *(uint64 *)(pp + 16) + carry
396# asm 1: adcq 16(<pp=int64#2),<b2=int64#10
397# asm 2: adcq 16(<pp=%rsi),<b2=%r12
398adcq 16(%rsi),%r12
399
400# qhasm:   carry? b3 += *(uint64 *)(pp + 24) + carry
401# asm 1: adcq 24(<pp=int64#2),<b3=int64#11
402# asm 2: adcq 24(<pp=%rsi),<b3=%r13
403adcq 24(%rsi),%r13
404
405# qhasm:   addt0 = 0
406# asm 1: mov  $0,>addt0=int64#12
407# asm 2: mov  $0,>addt0=%r14
408mov  $0,%r14
409
410# qhasm:   addt1 = 38
411# asm 1: mov  $38,>addt1=int64#13
412# asm 2: mov  $38,>addt1=%r15
413mov  $38,%r15
414
415# qhasm:   addt1 = addt0 if !carry
416# asm 1: cmovae <addt0=int64#12,<addt1=int64#13
417# asm 2: cmovae <addt0=%r14,<addt1=%r15
418cmovae %r14,%r15
419
420# qhasm:   carry? b0 += addt1
421# asm 1: add  <addt1=int64#13,<b0=int64#8
422# asm 2: add  <addt1=%r15,<b0=%r10
423add  %r15,%r10
424
425# qhasm:   carry? b1 += addt0 + carry
426# asm 1: adc <addt0=int64#12,<b1=int64#9
427# asm 2: adc <addt0=%r14,<b1=%r11
428adc %r14,%r11
429
430# qhasm:   carry? b2 += addt0 + carry
431# asm 1: adc <addt0=int64#12,<b2=int64#10
432# asm 2: adc <addt0=%r14,<b2=%r12
433adc %r14,%r12
434
435# qhasm:   carry? b3 += addt0 + carry
436# asm 1: adc <addt0=int64#12,<b3=int64#11
437# asm 2: adc <addt0=%r14,<b3=%r13
438adc %r14,%r13
439
440# qhasm:   addt0 = addt1 if carry
441# asm 1: cmovc <addt1=int64#13,<addt0=int64#12
442# asm 2: cmovc <addt1=%r15,<addt0=%r14
443cmovc %r15,%r14
444
445# qhasm:   b0 += addt0
446# asm 1: add  <addt0=int64#12,<b0=int64#8
447# asm 2: add  <addt0=%r14,<b0=%r10
448add  %r14,%r10
449
450# qhasm: a0_stack = a0
451# asm 1: movq <a0=int64#3,>a0_stack=stack64#8
452# asm 2: movq <a0=%rdx,>a0_stack=56(%rsp)
453movq %rdx,56(%rsp)
454
455# qhasm: a1_stack = a1
456# asm 1: movq <a1=int64#5,>a1_stack=stack64#9
457# asm 2: movq <a1=%r8,>a1_stack=64(%rsp)
458movq %r8,64(%rsp)
459
460# qhasm: a2_stack = a2
461# asm 1: movq <a2=int64#6,>a2_stack=stack64#10
462# asm 2: movq <a2=%r9,>a2_stack=72(%rsp)
463movq %r9,72(%rsp)
464
465# qhasm: a3_stack = a3
466# asm 1: movq <a3=int64#7,>a3_stack=stack64#11
467# asm 2: movq <a3=%rax,>a3_stack=80(%rsp)
468movq %rax,80(%rsp)
469
470# qhasm: b0_stack = b0
471# asm 1: movq <b0=int64#8,>b0_stack=stack64#12
472# asm 2: movq <b0=%r10,>b0_stack=88(%rsp)
473movq %r10,88(%rsp)
474
475# qhasm: b1_stack = b1
476# asm 1: movq <b1=int64#9,>b1_stack=stack64#13
477# asm 2: movq <b1=%r11,>b1_stack=96(%rsp)
478movq %r11,96(%rsp)
479
480# qhasm: b2_stack = b2
481# asm 1: movq <b2=int64#10,>b2_stack=stack64#14
482# asm 2: movq <b2=%r12,>b2_stack=104(%rsp)
483movq %r12,104(%rsp)
484
485# qhasm: b3_stack = b3
486# asm 1: movq <b3=int64#11,>b3_stack=stack64#15
487# asm 2: movq <b3=%r13,>b3_stack=112(%rsp)
488movq %r13,112(%rsp)
489
490# qhasm:   mulr4 = 0
491# asm 1: mov  $0,>mulr4=int64#5
492# asm 2: mov  $0,>mulr4=%r8
493mov  $0,%r8
494
495# qhasm:   mulr5 = 0
496# asm 1: mov  $0,>mulr5=int64#6
497# asm 2: mov  $0,>mulr5=%r9
498mov  $0,%r9
499
500# qhasm:   mulr6 = 0
501# asm 1: mov  $0,>mulr6=int64#8
502# asm 2: mov  $0,>mulr6=%r10
503mov  $0,%r10
504
505# qhasm:   mulr7 = 0
506# asm 1: mov  $0,>mulr7=int64#9
507# asm 2: mov  $0,>mulr7=%r11
508mov  $0,%r11
509
510# qhasm:   mulx0 = a0_stack
511# asm 1: movq <a0_stack=stack64#8,>mulx0=int64#10
512# asm 2: movq <a0_stack=56(%rsp),>mulx0=%r12
513movq 56(%rsp),%r12
514
515# qhasm:   mulrax = *(uint64 *)(qp + 0)
516# asm 1: movq   0(<qp=int64#4),>mulrax=int64#7
517# asm 2: movq   0(<qp=%rcx),>mulrax=%rax
518movq   0(%rcx),%rax
519
520# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
521# asm 1: mul  <mulx0=int64#10
522# asm 2: mul  <mulx0=%r12
523mul  %r12
524
525# qhasm:   a0 = mulrax
526# asm 1: mov  <mulrax=int64#7,>a0=int64#11
527# asm 2: mov  <mulrax=%rax,>a0=%r13
528mov  %rax,%r13
529
530# qhasm:   a1 = mulrdx
531# asm 1: mov  <mulrdx=int64#3,>a1=int64#12
532# asm 2: mov  <mulrdx=%rdx,>a1=%r14
533mov  %rdx,%r14
534
535# qhasm:   mulrax = *(uint64 *)(qp + 8)
536# asm 1: movq   8(<qp=int64#4),>mulrax=int64#7
537# asm 2: movq   8(<qp=%rcx),>mulrax=%rax
538movq   8(%rcx),%rax
539
540# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
541# asm 1: mul  <mulx0=int64#10
542# asm 2: mul  <mulx0=%r12
543mul  %r12
544
545# qhasm:   carry? a1 += mulrax
546# asm 1: add  <mulrax=int64#7,<a1=int64#12
547# asm 2: add  <mulrax=%rax,<a1=%r14
548add  %rax,%r14
549
550# qhasm:   a2 = 0
551# asm 1: mov  $0,>a2=int64#13
552# asm 2: mov  $0,>a2=%r15
553mov  $0,%r15
554
555# qhasm:   a2 += mulrdx + carry
556# asm 1: adc <mulrdx=int64#3,<a2=int64#13
557# asm 2: adc <mulrdx=%rdx,<a2=%r15
558adc %rdx,%r15
559
560# qhasm:   mulrax = *(uint64 *)(qp + 16)
561# asm 1: movq   16(<qp=int64#4),>mulrax=int64#7
562# asm 2: movq   16(<qp=%rcx),>mulrax=%rax
563movq   16(%rcx),%rax
564
565# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
566# asm 1: mul  <mulx0=int64#10
567# asm 2: mul  <mulx0=%r12
568mul  %r12
569
570# qhasm:   carry? a2 += mulrax
571# asm 1: add  <mulrax=int64#7,<a2=int64#13
572# asm 2: add  <mulrax=%rax,<a2=%r15
573add  %rax,%r15
574
575# qhasm:   a3 = 0
576# asm 1: mov  $0,>a3=int64#14
577# asm 2: mov  $0,>a3=%rbx
578mov  $0,%rbx
579
580# qhasm:   a3 += mulrdx + carry
581# asm 1: adc <mulrdx=int64#3,<a3=int64#14
582# asm 2: adc <mulrdx=%rdx,<a3=%rbx
583adc %rdx,%rbx
584
585# qhasm:   mulrax = *(uint64 *)(qp + 24)
586# asm 1: movq   24(<qp=int64#4),>mulrax=int64#7
587# asm 2: movq   24(<qp=%rcx),>mulrax=%rax
588movq   24(%rcx),%rax
589
590# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
591# asm 1: mul  <mulx0=int64#10
592# asm 2: mul  <mulx0=%r12
593mul  %r12
594
595# qhasm:   carry? a3 += mulrax
596# asm 1: add  <mulrax=int64#7,<a3=int64#14
597# asm 2: add  <mulrax=%rax,<a3=%rbx
598add  %rax,%rbx
599
600# qhasm:   mulr4 += mulrdx + carry
601# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
602# asm 2: adc <mulrdx=%rdx,<mulr4=%r8
603adc %rdx,%r8
604
605# qhasm:   mulx1 = a1_stack
606# asm 1: movq <a1_stack=stack64#9,>mulx1=int64#10
607# asm 2: movq <a1_stack=64(%rsp),>mulx1=%r12
608movq 64(%rsp),%r12
609
610# qhasm:   mulrax = *(uint64 *)(qp + 0)
611# asm 1: movq   0(<qp=int64#4),>mulrax=int64#7
612# asm 2: movq   0(<qp=%rcx),>mulrax=%rax
613movq   0(%rcx),%rax
614
615# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
616# asm 1: mul  <mulx1=int64#10
617# asm 2: mul  <mulx1=%r12
618mul  %r12
619
620# qhasm:   carry? a1 += mulrax
621# asm 1: add  <mulrax=int64#7,<a1=int64#12
622# asm 2: add  <mulrax=%rax,<a1=%r14
623add  %rax,%r14
624
625# qhasm:   mulc = 0
626# asm 1: mov  $0,>mulc=int64#15
627# asm 2: mov  $0,>mulc=%rbp
628mov  $0,%rbp
629
630# qhasm:   mulc += mulrdx + carry
631# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
632# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
633adc %rdx,%rbp
634
635# qhasm:   mulrax = *(uint64 *)(qp + 8)
636# asm 1: movq   8(<qp=int64#4),>mulrax=int64#7
637# asm 2: movq   8(<qp=%rcx),>mulrax=%rax
638movq   8(%rcx),%rax
639
640# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
641# asm 1: mul  <mulx1=int64#10
642# asm 2: mul  <mulx1=%r12
643mul  %r12
644
645# qhasm:   carry? a2 += mulrax
646# asm 1: add  <mulrax=int64#7,<a2=int64#13
647# asm 2: add  <mulrax=%rax,<a2=%r15
648add  %rax,%r15
649
650# qhasm:   mulrdx += 0 + carry
651# asm 1: adc $0,<mulrdx=int64#3
652# asm 2: adc $0,<mulrdx=%rdx
653adc $0,%rdx
654
655# qhasm:   carry? a2 += mulc
656# asm 1: add  <mulc=int64#15,<a2=int64#13
657# asm 2: add  <mulc=%rbp,<a2=%r15
658add  %rbp,%r15
659
660# qhasm:   mulc = 0
661# asm 1: mov  $0,>mulc=int64#15
662# asm 2: mov  $0,>mulc=%rbp
663mov  $0,%rbp
664
665# qhasm:   mulc += mulrdx + carry
666# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
667# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
668adc %rdx,%rbp
669
670# qhasm:   mulrax = *(uint64 *)(qp + 16)
671# asm 1: movq   16(<qp=int64#4),>mulrax=int64#7
672# asm 2: movq   16(<qp=%rcx),>mulrax=%rax
673movq   16(%rcx),%rax
674
675# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
676# asm 1: mul  <mulx1=int64#10
677# asm 2: mul  <mulx1=%r12
678mul  %r12
679
680# qhasm:   carry? a3 += mulrax
681# asm 1: add  <mulrax=int64#7,<a3=int64#14
682# asm 2: add  <mulrax=%rax,<a3=%rbx
683add  %rax,%rbx
684
685# qhasm:   mulrdx += 0 + carry
686# asm 1: adc $0,<mulrdx=int64#3
687# asm 2: adc $0,<mulrdx=%rdx
688adc $0,%rdx
689
690# qhasm:   carry? a3 += mulc
691# asm 1: add  <mulc=int64#15,<a3=int64#14
692# asm 2: add  <mulc=%rbp,<a3=%rbx
693add  %rbp,%rbx
694
695# qhasm:   mulc = 0
696# asm 1: mov  $0,>mulc=int64#15
697# asm 2: mov  $0,>mulc=%rbp
698mov  $0,%rbp
699
700# qhasm:   mulc += mulrdx + carry
701# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
702# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
703adc %rdx,%rbp
704
705# qhasm:   mulrax = *(uint64 *)(qp + 24)
706# asm 1: movq   24(<qp=int64#4),>mulrax=int64#7
707# asm 2: movq   24(<qp=%rcx),>mulrax=%rax
708movq   24(%rcx),%rax
709
710# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
711# asm 1: mul  <mulx1=int64#10
712# asm 2: mul  <mulx1=%r12
713mul  %r12
714
715# qhasm:   carry? mulr4 += mulrax
716# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
717# asm 2: add  <mulrax=%rax,<mulr4=%r8
718add  %rax,%r8
719
720# qhasm:   mulrdx += 0 + carry
721# asm 1: adc $0,<mulrdx=int64#3
722# asm 2: adc $0,<mulrdx=%rdx
723adc $0,%rdx
724
725# qhasm:   carry? mulr4 += mulc
726# asm 1: add  <mulc=int64#15,<mulr4=int64#5
727# asm 2: add  <mulc=%rbp,<mulr4=%r8
728add  %rbp,%r8
729
730# qhasm:   mulr5 += mulrdx + carry
731# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
732# asm 2: adc <mulrdx=%rdx,<mulr5=%r9
733adc %rdx,%r9
734
735# qhasm:   mulx2 = a2_stack
736# asm 1: movq <a2_stack=stack64#10,>mulx2=int64#10
737# asm 2: movq <a2_stack=72(%rsp),>mulx2=%r12
738movq 72(%rsp),%r12
739
740# qhasm:   mulrax = *(uint64 *)(qp + 0)
741# asm 1: movq   0(<qp=int64#4),>mulrax=int64#7
742# asm 2: movq   0(<qp=%rcx),>mulrax=%rax
743movq   0(%rcx),%rax
744
745# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
746# asm 1: mul  <mulx2=int64#10
747# asm 2: mul  <mulx2=%r12
748mul  %r12
749
750# qhasm:   carry? a2 += mulrax
751# asm 1: add  <mulrax=int64#7,<a2=int64#13
752# asm 2: add  <mulrax=%rax,<a2=%r15
753add  %rax,%r15
754
755# qhasm:   mulc = 0
756# asm 1: mov  $0,>mulc=int64#15
757# asm 2: mov  $0,>mulc=%rbp
758mov  $0,%rbp
759
760# qhasm:   mulc += mulrdx + carry
761# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
762# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
763adc %rdx,%rbp
764
765# qhasm:   mulrax = *(uint64 *)(qp + 8)
766# asm 1: movq   8(<qp=int64#4),>mulrax=int64#7
767# asm 2: movq   8(<qp=%rcx),>mulrax=%rax
768movq   8(%rcx),%rax
769
770# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
771# asm 1: mul  <mulx2=int64#10
772# asm 2: mul  <mulx2=%r12
773mul  %r12
774
775# qhasm:   carry? a3 += mulrax
776# asm 1: add  <mulrax=int64#7,<a3=int64#14
777# asm 2: add  <mulrax=%rax,<a3=%rbx
778add  %rax,%rbx
779
780# qhasm:   mulrdx += 0 + carry
781# asm 1: adc $0,<mulrdx=int64#3
782# asm 2: adc $0,<mulrdx=%rdx
783adc $0,%rdx
784
785# qhasm:   carry? a3 += mulc
786# asm 1: add  <mulc=int64#15,<a3=int64#14
787# asm 2: add  <mulc=%rbp,<a3=%rbx
788add  %rbp,%rbx
789
790# qhasm:   mulc = 0
791# asm 1: mov  $0,>mulc=int64#15
792# asm 2: mov  $0,>mulc=%rbp
793mov  $0,%rbp
794
795# qhasm:   mulc += mulrdx + carry
796# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
797# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
798adc %rdx,%rbp
799
800# qhasm:   mulrax = *(uint64 *)(qp + 16)
801# asm 1: movq   16(<qp=int64#4),>mulrax=int64#7
802# asm 2: movq   16(<qp=%rcx),>mulrax=%rax
803movq   16(%rcx),%rax
804
805# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
806# asm 1: mul  <mulx2=int64#10
807# asm 2: mul  <mulx2=%r12
808mul  %r12
809
810# qhasm:   carry? mulr4 += mulrax
811# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
812# asm 2: add  <mulrax=%rax,<mulr4=%r8
813add  %rax,%r8
814
815# qhasm:   mulrdx += 0 + carry
816# asm 1: adc $0,<mulrdx=int64#3
817# asm 2: adc $0,<mulrdx=%rdx
818adc $0,%rdx
819
820# qhasm:   carry? mulr4 += mulc
821# asm 1: add  <mulc=int64#15,<mulr4=int64#5
822# asm 2: add  <mulc=%rbp,<mulr4=%r8
823add  %rbp,%r8
824
825# qhasm:   mulc = 0
826# asm 1: mov  $0,>mulc=int64#15
827# asm 2: mov  $0,>mulc=%rbp
828mov  $0,%rbp
829
830# qhasm:   mulc += mulrdx + carry
831# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
832# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
833adc %rdx,%rbp
834
835# qhasm:   mulrax = *(uint64 *)(qp + 24)
836# asm 1: movq   24(<qp=int64#4),>mulrax=int64#7
837# asm 2: movq   24(<qp=%rcx),>mulrax=%rax
838movq   24(%rcx),%rax
839
840# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
841# asm 1: mul  <mulx2=int64#10
842# asm 2: mul  <mulx2=%r12
843mul  %r12
844
845# qhasm:   carry? mulr5 += mulrax
846# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
847# asm 2: add  <mulrax=%rax,<mulr5=%r9
848add  %rax,%r9
849
850# qhasm:   mulrdx += 0 + carry
851# asm 1: adc $0,<mulrdx=int64#3
852# asm 2: adc $0,<mulrdx=%rdx
853adc $0,%rdx
854
855# qhasm:   carry? mulr5 += mulc
856# asm 1: add  <mulc=int64#15,<mulr5=int64#6
857# asm 2: add  <mulc=%rbp,<mulr5=%r9
858add  %rbp,%r9
859
860# qhasm:   mulr6 += mulrdx + carry
861# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
862# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
863adc %rdx,%r10
864
865# qhasm:   mulx3 = a3_stack
866# asm 1: movq <a3_stack=stack64#11,>mulx3=int64#10
867# asm 2: movq <a3_stack=80(%rsp),>mulx3=%r12
868movq 80(%rsp),%r12
869
870# qhasm:   mulrax = *(uint64 *)(qp + 0)
871# asm 1: movq   0(<qp=int64#4),>mulrax=int64#7
872# asm 2: movq   0(<qp=%rcx),>mulrax=%rax
873movq   0(%rcx),%rax
874
875# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
876# asm 1: mul  <mulx3=int64#10
877# asm 2: mul  <mulx3=%r12
878mul  %r12
879
880# qhasm:   carry? a3 += mulrax
881# asm 1: add  <mulrax=int64#7,<a3=int64#14
882# asm 2: add  <mulrax=%rax,<a3=%rbx
883add  %rax,%rbx
884
885# qhasm:   mulc = 0
886# asm 1: mov  $0,>mulc=int64#15
887# asm 2: mov  $0,>mulc=%rbp
888mov  $0,%rbp
889
890# qhasm:   mulc += mulrdx + carry
891# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
892# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
893adc %rdx,%rbp
894
895# qhasm:   mulrax = *(uint64 *)(qp + 8)
896# asm 1: movq   8(<qp=int64#4),>mulrax=int64#7
897# asm 2: movq   8(<qp=%rcx),>mulrax=%rax
898movq   8(%rcx),%rax
899
900# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
901# asm 1: mul  <mulx3=int64#10
902# asm 2: mul  <mulx3=%r12
903mul  %r12
904
905# qhasm:   carry? mulr4 += mulrax
906# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
907# asm 2: add  <mulrax=%rax,<mulr4=%r8
908add  %rax,%r8
909
910# qhasm:   mulrdx += 0 + carry
911# asm 1: adc $0,<mulrdx=int64#3
912# asm 2: adc $0,<mulrdx=%rdx
913adc $0,%rdx
914
915# qhasm:   carry? mulr4 += mulc
916# asm 1: add  <mulc=int64#15,<mulr4=int64#5
917# asm 2: add  <mulc=%rbp,<mulr4=%r8
918add  %rbp,%r8
919
920# qhasm:   mulc = 0
921# asm 1: mov  $0,>mulc=int64#15
922# asm 2: mov  $0,>mulc=%rbp
923mov  $0,%rbp
924
925# qhasm:   mulc += mulrdx + carry
926# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
927# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
928adc %rdx,%rbp
929
930# qhasm:   mulrax = *(uint64 *)(qp + 16)
931# asm 1: movq   16(<qp=int64#4),>mulrax=int64#7
932# asm 2: movq   16(<qp=%rcx),>mulrax=%rax
933movq   16(%rcx),%rax
934
935# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
936# asm 1: mul  <mulx3=int64#10
937# asm 2: mul  <mulx3=%r12
938mul  %r12
939
940# qhasm:   carry? mulr5 += mulrax
941# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
942# asm 2: add  <mulrax=%rax,<mulr5=%r9
943add  %rax,%r9
944
945# qhasm:   mulrdx += 0 + carry
946# asm 1: adc $0,<mulrdx=int64#3
947# asm 2: adc $0,<mulrdx=%rdx
948adc $0,%rdx
949
950# qhasm:   carry? mulr5 += mulc
951# asm 1: add  <mulc=int64#15,<mulr5=int64#6
952# asm 2: add  <mulc=%rbp,<mulr5=%r9
953add  %rbp,%r9
954
955# qhasm:   mulc = 0
956# asm 1: mov  $0,>mulc=int64#15
957# asm 2: mov  $0,>mulc=%rbp
958mov  $0,%rbp
959
960# qhasm:   mulc += mulrdx + carry
961# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
962# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
963adc %rdx,%rbp
964
965# qhasm:   mulrax = *(uint64 *)(qp + 24)
966# asm 1: movq   24(<qp=int64#4),>mulrax=int64#7
967# asm 2: movq   24(<qp=%rcx),>mulrax=%rax
968movq   24(%rcx),%rax
969
970# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
971# asm 1: mul  <mulx3=int64#10
972# asm 2: mul  <mulx3=%r12
973mul  %r12
974
975# qhasm:   carry? mulr6 += mulrax
976# asm 1: add  <mulrax=int64#7,<mulr6=int64#8
977# asm 2: add  <mulrax=%rax,<mulr6=%r10
978add  %rax,%r10
979
980# qhasm:   mulrdx += 0 + carry
981# asm 1: adc $0,<mulrdx=int64#3
982# asm 2: adc $0,<mulrdx=%rdx
983adc $0,%rdx
984
985# qhasm:   carry? mulr6 += mulc
986# asm 1: add  <mulc=int64#15,<mulr6=int64#8
987# asm 2: add  <mulc=%rbp,<mulr6=%r10
988add  %rbp,%r10
989
990# qhasm:   mulr7 += mulrdx + carry
991# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
992# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
993adc %rdx,%r11
994
995# qhasm:   mulrax = mulr4
996# asm 1: mov  <mulr4=int64#5,>mulrax=int64#7
997# asm 2: mov  <mulr4=%r8,>mulrax=%rax
998mov  %r8,%rax
999
1000# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1001mulq  crypto_sign_ed25519_amd64_64_38
1002
1003# qhasm:   mulr4 = mulrax
1004# asm 1: mov  <mulrax=int64#7,>mulr4=int64#5
1005# asm 2: mov  <mulrax=%rax,>mulr4=%r8
1006mov  %rax,%r8
1007
1008# qhasm:   mulrax = mulr5
1009# asm 1: mov  <mulr5=int64#6,>mulrax=int64#7
1010# asm 2: mov  <mulr5=%r9,>mulrax=%rax
1011mov  %r9,%rax
1012
1013# qhasm:   mulr5 = mulrdx
1014# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#6
1015# asm 2: mov  <mulrdx=%rdx,>mulr5=%r9
1016mov  %rdx,%r9
1017
1018# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1019mulq  crypto_sign_ed25519_amd64_64_38
1020
1021# qhasm:   carry? mulr5 += mulrax
1022# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
1023# asm 2: add  <mulrax=%rax,<mulr5=%r9
1024add  %rax,%r9
1025
1026# qhasm:   mulrax = mulr6
1027# asm 1: mov  <mulr6=int64#8,>mulrax=int64#7
1028# asm 2: mov  <mulr6=%r10,>mulrax=%rax
1029mov  %r10,%rax
1030
1031# qhasm:   mulr6 = 0
1032# asm 1: mov  $0,>mulr6=int64#8
1033# asm 2: mov  $0,>mulr6=%r10
1034mov  $0,%r10
1035
1036# qhasm:   mulr6 += mulrdx + carry
1037# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
1038# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
1039adc %rdx,%r10
1040
1041# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1042mulq  crypto_sign_ed25519_amd64_64_38
1043
1044# qhasm:   carry? mulr6 += mulrax
1045# asm 1: add  <mulrax=int64#7,<mulr6=int64#8
1046# asm 2: add  <mulrax=%rax,<mulr6=%r10
1047add  %rax,%r10
1048
1049# qhasm:   mulrax = mulr7
1050# asm 1: mov  <mulr7=int64#9,>mulrax=int64#7
1051# asm 2: mov  <mulr7=%r11,>mulrax=%rax
1052mov  %r11,%rax
1053
1054# qhasm:   mulr7 = 0
1055# asm 1: mov  $0,>mulr7=int64#9
1056# asm 2: mov  $0,>mulr7=%r11
1057mov  $0,%r11
1058
1059# qhasm:   mulr7 += mulrdx + carry
1060# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
1061# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
1062adc %rdx,%r11
1063
1064# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1065mulq  crypto_sign_ed25519_amd64_64_38
1066
1067# qhasm:   carry? mulr7 += mulrax
1068# asm 1: add  <mulrax=int64#7,<mulr7=int64#9
1069# asm 2: add  <mulrax=%rax,<mulr7=%r11
1070add  %rax,%r11
1071
1072# qhasm:   mulr8 = 0
1073# asm 1: mov  $0,>mulr8=int64#7
1074# asm 2: mov  $0,>mulr8=%rax
1075mov  $0,%rax
1076
1077# qhasm:   mulr8 += mulrdx + carry
1078# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
1079# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
1080adc %rdx,%rax
1081
1082# qhasm:   carry? a0 += mulr4
1083# asm 1: add  <mulr4=int64#5,<a0=int64#11
1084# asm 2: add  <mulr4=%r8,<a0=%r13
1085add  %r8,%r13
1086
1087# qhasm:   carry? a1 += mulr5 + carry
1088# asm 1: adc <mulr5=int64#6,<a1=int64#12
1089# asm 2: adc <mulr5=%r9,<a1=%r14
1090adc %r9,%r14
1091
1092# qhasm:   carry? a2 += mulr6 + carry
1093# asm 1: adc <mulr6=int64#8,<a2=int64#13
1094# asm 2: adc <mulr6=%r10,<a2=%r15
1095adc %r10,%r15
1096
1097# qhasm:   carry? a3 += mulr7 + carry
1098# asm 1: adc <mulr7=int64#9,<a3=int64#14
1099# asm 2: adc <mulr7=%r11,<a3=%rbx
1100adc %r11,%rbx
1101
1102# qhasm:   mulzero = 0
1103# asm 1: mov  $0,>mulzero=int64#3
1104# asm 2: mov  $0,>mulzero=%rdx
1105mov  $0,%rdx
1106
1107# qhasm:   mulr8 += mulzero + carry
1108# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
1109# asm 2: adc <mulzero=%rdx,<mulr8=%rax
1110adc %rdx,%rax
1111
1112# qhasm:   mulr8 *= 38
1113# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#5
1114# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%r8
1115imulq  $38,%rax,%r8
1116
1117# qhasm:   carry? a0 += mulr8
1118# asm 1: add  <mulr8=int64#5,<a0=int64#11
1119# asm 2: add  <mulr8=%r8,<a0=%r13
1120add  %r8,%r13
1121
1122# qhasm:   carry? a1 += mulzero + carry
1123# asm 1: adc <mulzero=int64#3,<a1=int64#12
1124# asm 2: adc <mulzero=%rdx,<a1=%r14
1125adc %rdx,%r14
1126
1127# qhasm:   carry? a2 += mulzero + carry
1128# asm 1: adc <mulzero=int64#3,<a2=int64#13
1129# asm 2: adc <mulzero=%rdx,<a2=%r15
1130adc %rdx,%r15
1131
1132# qhasm:   carry? a3 += mulzero + carry
1133# asm 1: adc <mulzero=int64#3,<a3=int64#14
1134# asm 2: adc <mulzero=%rdx,<a3=%rbx
1135adc %rdx,%rbx
1136
1137# qhasm:   mulzero += mulzero + carry
1138# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
1139# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
1140adc %rdx,%rdx
1141
1142# qhasm:   mulzero *= 38
1143# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
1144# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
1145imulq  $38,%rdx,%rdx
1146
1147# qhasm:   a0 += mulzero
1148# asm 1: add  <mulzero=int64#3,<a0=int64#11
1149# asm 2: add  <mulzero=%rdx,<a0=%r13
1150add  %rdx,%r13
1151
1152# qhasm: a0_stack = a0
1153# asm 1: movq <a0=int64#11,>a0_stack=stack64#8
1154# asm 2: movq <a0=%r13,>a0_stack=56(%rsp)
1155movq %r13,56(%rsp)
1156
1157# qhasm: a1_stack = a1
1158# asm 1: movq <a1=int64#12,>a1_stack=stack64#9
1159# asm 2: movq <a1=%r14,>a1_stack=64(%rsp)
1160movq %r14,64(%rsp)
1161
1162# qhasm: a2_stack = a2
1163# asm 1: movq <a2=int64#13,>a2_stack=stack64#10
1164# asm 2: movq <a2=%r15,>a2_stack=72(%rsp)
1165movq %r15,72(%rsp)
1166
1167# qhasm: a3_stack = a3
1168# asm 1: movq <a3=int64#14,>a3_stack=stack64#11
1169# asm 2: movq <a3=%rbx,>a3_stack=80(%rsp)
1170movq %rbx,80(%rsp)
1171
1172# qhasm:   mulr4 = 0
1173# asm 1: mov  $0,>mulr4=int64#5
1174# asm 2: mov  $0,>mulr4=%r8
1175mov  $0,%r8
1176
1177# qhasm:   mulr5 = 0
1178# asm 1: mov  $0,>mulr5=int64#6
1179# asm 2: mov  $0,>mulr5=%r9
1180mov  $0,%r9
1181
1182# qhasm:   mulr6 = 0
1183# asm 1: mov  $0,>mulr6=int64#8
1184# asm 2: mov  $0,>mulr6=%r10
1185mov  $0,%r10
1186
1187# qhasm:   mulr7 = 0
1188# asm 1: mov  $0,>mulr7=int64#9
1189# asm 2: mov  $0,>mulr7=%r11
1190mov  $0,%r11
1191
1192# qhasm:   mulx0 = b0_stack
1193# asm 1: movq <b0_stack=stack64#12,>mulx0=int64#10
1194# asm 2: movq <b0_stack=88(%rsp),>mulx0=%r12
1195movq 88(%rsp),%r12
1196
1197# qhasm:   mulrax = *(uint64 *)(qp + 32)
1198# asm 1: movq   32(<qp=int64#4),>mulrax=int64#7
1199# asm 2: movq   32(<qp=%rcx),>mulrax=%rax
1200movq   32(%rcx),%rax
1201
1202# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1203# asm 1: mul  <mulx0=int64#10
1204# asm 2: mul  <mulx0=%r12
1205mul  %r12
1206
1207# qhasm:   rx0 = mulrax
1208# asm 1: mov  <mulrax=int64#7,>rx0=int64#11
1209# asm 2: mov  <mulrax=%rax,>rx0=%r13
1210mov  %rax,%r13
1211
1212# qhasm:   rx1 = mulrdx
1213# asm 1: mov  <mulrdx=int64#3,>rx1=int64#12
1214# asm 2: mov  <mulrdx=%rdx,>rx1=%r14
1215mov  %rdx,%r14
1216
1217# qhasm:   mulrax = *(uint64 *)(qp + 40)
1218# asm 1: movq   40(<qp=int64#4),>mulrax=int64#7
1219# asm 2: movq   40(<qp=%rcx),>mulrax=%rax
1220movq   40(%rcx),%rax
1221
1222# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1223# asm 1: mul  <mulx0=int64#10
1224# asm 2: mul  <mulx0=%r12
1225mul  %r12
1226
1227# qhasm:   carry? rx1 += mulrax
1228# asm 1: add  <mulrax=int64#7,<rx1=int64#12
1229# asm 2: add  <mulrax=%rax,<rx1=%r14
1230add  %rax,%r14
1231
1232# qhasm:   rx2 = 0
1233# asm 1: mov  $0,>rx2=int64#13
1234# asm 2: mov  $0,>rx2=%r15
1235mov  $0,%r15
1236
1237# qhasm:   rx2 += mulrdx + carry
1238# asm 1: adc <mulrdx=int64#3,<rx2=int64#13
1239# asm 2: adc <mulrdx=%rdx,<rx2=%r15
1240adc %rdx,%r15
1241
1242# qhasm:   mulrax = *(uint64 *)(qp + 48)
1243# asm 1: movq   48(<qp=int64#4),>mulrax=int64#7
1244# asm 2: movq   48(<qp=%rcx),>mulrax=%rax
1245movq   48(%rcx),%rax
1246
1247# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1248# asm 1: mul  <mulx0=int64#10
1249# asm 2: mul  <mulx0=%r12
1250mul  %r12
1251
1252# qhasm:   carry? rx2 += mulrax
1253# asm 1: add  <mulrax=int64#7,<rx2=int64#13
1254# asm 2: add  <mulrax=%rax,<rx2=%r15
1255add  %rax,%r15
1256
1257# qhasm:   rx3 = 0
1258# asm 1: mov  $0,>rx3=int64#14
1259# asm 2: mov  $0,>rx3=%rbx
1260mov  $0,%rbx
1261
1262# qhasm:   rx3 += mulrdx + carry
1263# asm 1: adc <mulrdx=int64#3,<rx3=int64#14
1264# asm 2: adc <mulrdx=%rdx,<rx3=%rbx
1265adc %rdx,%rbx
1266
1267# qhasm:   mulrax = *(uint64 *)(qp + 56)
1268# asm 1: movq   56(<qp=int64#4),>mulrax=int64#7
1269# asm 2: movq   56(<qp=%rcx),>mulrax=%rax
1270movq   56(%rcx),%rax
1271
1272# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1273# asm 1: mul  <mulx0=int64#10
1274# asm 2: mul  <mulx0=%r12
1275mul  %r12
1276
1277# qhasm:   carry? rx3 += mulrax
1278# asm 1: add  <mulrax=int64#7,<rx3=int64#14
1279# asm 2: add  <mulrax=%rax,<rx3=%rbx
1280add  %rax,%rbx
1281
1282# qhasm:   mulr4 += mulrdx + carry
1283# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
1284# asm 2: adc <mulrdx=%rdx,<mulr4=%r8
1285adc %rdx,%r8
1286
1287# qhasm:   mulx1 = b1_stack
1288# asm 1: movq <b1_stack=stack64#13,>mulx1=int64#10
1289# asm 2: movq <b1_stack=96(%rsp),>mulx1=%r12
1290movq 96(%rsp),%r12
1291
1292# qhasm:   mulrax = *(uint64 *)(qp + 32)
1293# asm 1: movq   32(<qp=int64#4),>mulrax=int64#7
1294# asm 2: movq   32(<qp=%rcx),>mulrax=%rax
1295movq   32(%rcx),%rax
1296
1297# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1298# asm 1: mul  <mulx1=int64#10
1299# asm 2: mul  <mulx1=%r12
1300mul  %r12
1301
1302# qhasm:   carry? rx1 += mulrax
1303# asm 1: add  <mulrax=int64#7,<rx1=int64#12
1304# asm 2: add  <mulrax=%rax,<rx1=%r14
1305add  %rax,%r14
1306
1307# qhasm:   mulc = 0
1308# asm 1: mov  $0,>mulc=int64#15
1309# asm 2: mov  $0,>mulc=%rbp
1310mov  $0,%rbp
1311
1312# qhasm:   mulc += mulrdx + carry
1313# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1314# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1315adc %rdx,%rbp
1316
1317# qhasm:   mulrax = *(uint64 *)(qp + 40)
1318# asm 1: movq   40(<qp=int64#4),>mulrax=int64#7
1319# asm 2: movq   40(<qp=%rcx),>mulrax=%rax
1320movq   40(%rcx),%rax
1321
1322# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1323# asm 1: mul  <mulx1=int64#10
1324# asm 2: mul  <mulx1=%r12
1325mul  %r12
1326
1327# qhasm:   carry? rx2 += mulrax
1328# asm 1: add  <mulrax=int64#7,<rx2=int64#13
1329# asm 2: add  <mulrax=%rax,<rx2=%r15
1330add  %rax,%r15
1331
1332# qhasm:   mulrdx += 0 + carry
1333# asm 1: adc $0,<mulrdx=int64#3
1334# asm 2: adc $0,<mulrdx=%rdx
1335adc $0,%rdx
1336
1337# qhasm:   carry? rx2 += mulc
1338# asm 1: add  <mulc=int64#15,<rx2=int64#13
1339# asm 2: add  <mulc=%rbp,<rx2=%r15
1340add  %rbp,%r15
1341
1342# qhasm:   mulc = 0
1343# asm 1: mov  $0,>mulc=int64#15
1344# asm 2: mov  $0,>mulc=%rbp
1345mov  $0,%rbp
1346
1347# qhasm:   mulc += mulrdx + carry
1348# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1349# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1350adc %rdx,%rbp
1351
1352# qhasm:   mulrax = *(uint64 *)(qp + 48)
1353# asm 1: movq   48(<qp=int64#4),>mulrax=int64#7
1354# asm 2: movq   48(<qp=%rcx),>mulrax=%rax
1355movq   48(%rcx),%rax
1356
1357# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1358# asm 1: mul  <mulx1=int64#10
1359# asm 2: mul  <mulx1=%r12
1360mul  %r12
1361
1362# qhasm:   carry? rx3 += mulrax
1363# asm 1: add  <mulrax=int64#7,<rx3=int64#14
1364# asm 2: add  <mulrax=%rax,<rx3=%rbx
1365add  %rax,%rbx
1366
1367# qhasm:   mulrdx += 0 + carry
1368# asm 1: adc $0,<mulrdx=int64#3
1369# asm 2: adc $0,<mulrdx=%rdx
1370adc $0,%rdx
1371
1372# qhasm:   carry? rx3 += mulc
1373# asm 1: add  <mulc=int64#15,<rx3=int64#14
1374# asm 2: add  <mulc=%rbp,<rx3=%rbx
1375add  %rbp,%rbx
1376
1377# qhasm:   mulc = 0
1378# asm 1: mov  $0,>mulc=int64#15
1379# asm 2: mov  $0,>mulc=%rbp
1380mov  $0,%rbp
1381
1382# qhasm:   mulc += mulrdx + carry
1383# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1384# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1385adc %rdx,%rbp
1386
1387# qhasm:   mulrax = *(uint64 *)(qp + 56)
1388# asm 1: movq   56(<qp=int64#4),>mulrax=int64#7
1389# asm 2: movq   56(<qp=%rcx),>mulrax=%rax
1390movq   56(%rcx),%rax
1391
1392# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1393# asm 1: mul  <mulx1=int64#10
1394# asm 2: mul  <mulx1=%r12
1395mul  %r12
1396
1397# qhasm:   carry? mulr4 += mulrax
1398# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
1399# asm 2: add  <mulrax=%rax,<mulr4=%r8
1400add  %rax,%r8
1401
1402# qhasm:   mulrdx += 0 + carry
1403# asm 1: adc $0,<mulrdx=int64#3
1404# asm 2: adc $0,<mulrdx=%rdx
1405adc $0,%rdx
1406
1407# qhasm:   carry? mulr4 += mulc
1408# asm 1: add  <mulc=int64#15,<mulr4=int64#5
1409# asm 2: add  <mulc=%rbp,<mulr4=%r8
1410add  %rbp,%r8
1411
1412# qhasm:   mulr5 += mulrdx + carry
1413# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
1414# asm 2: adc <mulrdx=%rdx,<mulr5=%r9
1415adc %rdx,%r9
1416
1417# qhasm:   mulx2 = b2_stack
1418# asm 1: movq <b2_stack=stack64#14,>mulx2=int64#10
1419# asm 2: movq <b2_stack=104(%rsp),>mulx2=%r12
1420movq 104(%rsp),%r12
1421
1422# qhasm:   mulrax = *(uint64 *)(qp + 32)
1423# asm 1: movq   32(<qp=int64#4),>mulrax=int64#7
1424# asm 2: movq   32(<qp=%rcx),>mulrax=%rax
1425movq   32(%rcx),%rax
1426
1427# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1428# asm 1: mul  <mulx2=int64#10
1429# asm 2: mul  <mulx2=%r12
1430mul  %r12
1431
1432# qhasm:   carry? rx2 += mulrax
1433# asm 1: add  <mulrax=int64#7,<rx2=int64#13
1434# asm 2: add  <mulrax=%rax,<rx2=%r15
1435add  %rax,%r15
1436
1437# qhasm:   mulc = 0
1438# asm 1: mov  $0,>mulc=int64#15
1439# asm 2: mov  $0,>mulc=%rbp
1440mov  $0,%rbp
1441
1442# qhasm:   mulc += mulrdx + carry
1443# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1444# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1445adc %rdx,%rbp
1446
1447# qhasm:   mulrax = *(uint64 *)(qp + 40)
1448# asm 1: movq   40(<qp=int64#4),>mulrax=int64#7
1449# asm 2: movq   40(<qp=%rcx),>mulrax=%rax
1450movq   40(%rcx),%rax
1451
1452# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1453# asm 1: mul  <mulx2=int64#10
1454# asm 2: mul  <mulx2=%r12
1455mul  %r12
1456
1457# qhasm:   carry? rx3 += mulrax
1458# asm 1: add  <mulrax=int64#7,<rx3=int64#14
1459# asm 2: add  <mulrax=%rax,<rx3=%rbx
1460add  %rax,%rbx
1461
1462# qhasm:   mulrdx += 0 + carry
1463# asm 1: adc $0,<mulrdx=int64#3
1464# asm 2: adc $0,<mulrdx=%rdx
1465adc $0,%rdx
1466
1467# qhasm:   carry? rx3 += mulc
1468# asm 1: add  <mulc=int64#15,<rx3=int64#14
1469# asm 2: add  <mulc=%rbp,<rx3=%rbx
1470add  %rbp,%rbx
1471
1472# qhasm:   mulc = 0
1473# asm 1: mov  $0,>mulc=int64#15
1474# asm 2: mov  $0,>mulc=%rbp
1475mov  $0,%rbp
1476
1477# qhasm:   mulc += mulrdx + carry
1478# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1479# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1480adc %rdx,%rbp
1481
1482# qhasm:   mulrax = *(uint64 *)(qp + 48)
1483# asm 1: movq   48(<qp=int64#4),>mulrax=int64#7
1484# asm 2: movq   48(<qp=%rcx),>mulrax=%rax
1485movq   48(%rcx),%rax
1486
1487# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1488# asm 1: mul  <mulx2=int64#10
1489# asm 2: mul  <mulx2=%r12
1490mul  %r12
1491
1492# qhasm:   carry? mulr4 += mulrax
1493# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
1494# asm 2: add  <mulrax=%rax,<mulr4=%r8
1495add  %rax,%r8
1496
1497# qhasm:   mulrdx += 0 + carry
1498# asm 1: adc $0,<mulrdx=int64#3
1499# asm 2: adc $0,<mulrdx=%rdx
1500adc $0,%rdx
1501
1502# qhasm:   carry? mulr4 += mulc
1503# asm 1: add  <mulc=int64#15,<mulr4=int64#5
1504# asm 2: add  <mulc=%rbp,<mulr4=%r8
1505add  %rbp,%r8
1506
1507# qhasm:   mulc = 0
1508# asm 1: mov  $0,>mulc=int64#15
1509# asm 2: mov  $0,>mulc=%rbp
1510mov  $0,%rbp
1511
1512# qhasm:   mulc += mulrdx + carry
1513# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1514# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1515adc %rdx,%rbp
1516
1517# qhasm:   mulrax = *(uint64 *)(qp + 56)
1518# asm 1: movq   56(<qp=int64#4),>mulrax=int64#7
1519# asm 2: movq   56(<qp=%rcx),>mulrax=%rax
1520movq   56(%rcx),%rax
1521
1522# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1523# asm 1: mul  <mulx2=int64#10
1524# asm 2: mul  <mulx2=%r12
1525mul  %r12
1526
1527# qhasm:   carry? mulr5 += mulrax
1528# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
1529# asm 2: add  <mulrax=%rax,<mulr5=%r9
1530add  %rax,%r9
1531
1532# qhasm:   mulrdx += 0 + carry
1533# asm 1: adc $0,<mulrdx=int64#3
1534# asm 2: adc $0,<mulrdx=%rdx
1535adc $0,%rdx
1536
1537# qhasm:   carry? mulr5 += mulc
1538# asm 1: add  <mulc=int64#15,<mulr5=int64#6
1539# asm 2: add  <mulc=%rbp,<mulr5=%r9
1540add  %rbp,%r9
1541
1542# qhasm:   mulr6 += mulrdx + carry
1543# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
1544# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
1545adc %rdx,%r10
1546
1547# qhasm:   mulx3 = b3_stack
1548# asm 1: movq <b3_stack=stack64#15,>mulx3=int64#10
1549# asm 2: movq <b3_stack=112(%rsp),>mulx3=%r12
1550movq 112(%rsp),%r12
1551
1552# qhasm:   mulrax = *(uint64 *)(qp + 32)
1553# asm 1: movq   32(<qp=int64#4),>mulrax=int64#7
1554# asm 2: movq   32(<qp=%rcx),>mulrax=%rax
1555movq   32(%rcx),%rax
1556
1557# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1558# asm 1: mul  <mulx3=int64#10
1559# asm 2: mul  <mulx3=%r12
1560mul  %r12
1561
1562# qhasm:   carry? rx3 += mulrax
1563# asm 1: add  <mulrax=int64#7,<rx3=int64#14
1564# asm 2: add  <mulrax=%rax,<rx3=%rbx
1565add  %rax,%rbx
1566
1567# qhasm:   mulc = 0
1568# asm 1: mov  $0,>mulc=int64#15
1569# asm 2: mov  $0,>mulc=%rbp
1570mov  $0,%rbp
1571
1572# qhasm:   mulc += mulrdx + carry
1573# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1574# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1575adc %rdx,%rbp
1576
1577# qhasm:   mulrax = *(uint64 *)(qp + 40)
1578# asm 1: movq   40(<qp=int64#4),>mulrax=int64#7
1579# asm 2: movq   40(<qp=%rcx),>mulrax=%rax
1580movq   40(%rcx),%rax
1581
1582# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1583# asm 1: mul  <mulx3=int64#10
1584# asm 2: mul  <mulx3=%r12
1585mul  %r12
1586
1587# qhasm:   carry? mulr4 += mulrax
1588# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
1589# asm 2: add  <mulrax=%rax,<mulr4=%r8
1590add  %rax,%r8
1591
1592# qhasm:   mulrdx += 0 + carry
1593# asm 1: adc $0,<mulrdx=int64#3
1594# asm 2: adc $0,<mulrdx=%rdx
1595adc $0,%rdx
1596
1597# qhasm:   carry? mulr4 += mulc
1598# asm 1: add  <mulc=int64#15,<mulr4=int64#5
1599# asm 2: add  <mulc=%rbp,<mulr4=%r8
1600add  %rbp,%r8
1601
1602# qhasm:   mulc = 0
1603# asm 1: mov  $0,>mulc=int64#15
1604# asm 2: mov  $0,>mulc=%rbp
1605mov  $0,%rbp
1606
1607# qhasm:   mulc += mulrdx + carry
1608# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1609# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1610adc %rdx,%rbp
1611
1612# qhasm:   mulrax = *(uint64 *)(qp + 48)
1613# asm 1: movq   48(<qp=int64#4),>mulrax=int64#7
1614# asm 2: movq   48(<qp=%rcx),>mulrax=%rax
1615movq   48(%rcx),%rax
1616
1617# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1618# asm 1: mul  <mulx3=int64#10
1619# asm 2: mul  <mulx3=%r12
1620mul  %r12
1621
1622# qhasm:   carry? mulr5 += mulrax
1623# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
1624# asm 2: add  <mulrax=%rax,<mulr5=%r9
1625add  %rax,%r9
1626
1627# qhasm:   mulrdx += 0 + carry
1628# asm 1: adc $0,<mulrdx=int64#3
1629# asm 2: adc $0,<mulrdx=%rdx
1630adc $0,%rdx
1631
1632# qhasm:   carry? mulr5 += mulc
1633# asm 1: add  <mulc=int64#15,<mulr5=int64#6
1634# asm 2: add  <mulc=%rbp,<mulr5=%r9
1635add  %rbp,%r9
1636
1637# qhasm:   mulc = 0
1638# asm 1: mov  $0,>mulc=int64#15
1639# asm 2: mov  $0,>mulc=%rbp
1640mov  $0,%rbp
1641
1642# qhasm:   mulc += mulrdx + carry
1643# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
1644# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
1645adc %rdx,%rbp
1646
1647# qhasm:   mulrax = *(uint64 *)(qp + 56)
1648# asm 1: movq   56(<qp=int64#4),>mulrax=int64#7
1649# asm 2: movq   56(<qp=%rcx),>mulrax=%rax
1650movq   56(%rcx),%rax
1651
1652# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1653# asm 1: mul  <mulx3=int64#10
1654# asm 2: mul  <mulx3=%r12
1655mul  %r12
1656
1657# qhasm:   carry? mulr6 += mulrax
1658# asm 1: add  <mulrax=int64#7,<mulr6=int64#8
1659# asm 2: add  <mulrax=%rax,<mulr6=%r10
1660add  %rax,%r10
1661
1662# qhasm:   mulrdx += 0 + carry
1663# asm 1: adc $0,<mulrdx=int64#3
1664# asm 2: adc $0,<mulrdx=%rdx
1665adc $0,%rdx
1666
1667# qhasm:   carry? mulr6 += mulc
1668# asm 1: add  <mulc=int64#15,<mulr6=int64#8
1669# asm 2: add  <mulc=%rbp,<mulr6=%r10
1670add  %rbp,%r10
1671
1672# qhasm:   mulr7 += mulrdx + carry
1673# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
1674# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
1675adc %rdx,%r11
1676
1677# qhasm:   mulrax = mulr4
1678# asm 1: mov  <mulr4=int64#5,>mulrax=int64#7
1679# asm 2: mov  <mulr4=%r8,>mulrax=%rax
1680mov  %r8,%rax
1681
1682# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1683mulq  crypto_sign_ed25519_amd64_64_38
1684
1685# qhasm:   mulr4 = mulrax
1686# asm 1: mov  <mulrax=int64#7,>mulr4=int64#5
1687# asm 2: mov  <mulrax=%rax,>mulr4=%r8
1688mov  %rax,%r8
1689
1690# qhasm:   mulrax = mulr5
1691# asm 1: mov  <mulr5=int64#6,>mulrax=int64#7
1692# asm 2: mov  <mulr5=%r9,>mulrax=%rax
1693mov  %r9,%rax
1694
1695# qhasm:   mulr5 = mulrdx
1696# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#6
1697# asm 2: mov  <mulrdx=%rdx,>mulr5=%r9
1698mov  %rdx,%r9
1699
1700# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1701mulq  crypto_sign_ed25519_amd64_64_38
1702
1703# qhasm:   carry? mulr5 += mulrax
1704# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
1705# asm 2: add  <mulrax=%rax,<mulr5=%r9
1706add  %rax,%r9
1707
1708# qhasm:   mulrax = mulr6
1709# asm 1: mov  <mulr6=int64#8,>mulrax=int64#7
1710# asm 2: mov  <mulr6=%r10,>mulrax=%rax
1711mov  %r10,%rax
1712
1713# qhasm:   mulr6 = 0
1714# asm 1: mov  $0,>mulr6=int64#8
1715# asm 2: mov  $0,>mulr6=%r10
1716mov  $0,%r10
1717
1718# qhasm:   mulr6 += mulrdx + carry
1719# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
1720# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
1721adc %rdx,%r10
1722
1723# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1724mulq  crypto_sign_ed25519_amd64_64_38
1725
1726# qhasm:   carry? mulr6 += mulrax
1727# asm 1: add  <mulrax=int64#7,<mulr6=int64#8
1728# asm 2: add  <mulrax=%rax,<mulr6=%r10
1729add  %rax,%r10
1730
1731# qhasm:   mulrax = mulr7
1732# asm 1: mov  <mulr7=int64#9,>mulrax=int64#7
1733# asm 2: mov  <mulr7=%r11,>mulrax=%rax
1734mov  %r11,%rax
1735
1736# qhasm:   mulr7 = 0
1737# asm 1: mov  $0,>mulr7=int64#9
1738# asm 2: mov  $0,>mulr7=%r11
1739mov  $0,%r11
1740
1741# qhasm:   mulr7 += mulrdx + carry
1742# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
1743# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
1744adc %rdx,%r11
1745
1746# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1747mulq  crypto_sign_ed25519_amd64_64_38
1748
1749# qhasm:   carry? mulr7 += mulrax
1750# asm 1: add  <mulrax=int64#7,<mulr7=int64#9
1751# asm 2: add  <mulrax=%rax,<mulr7=%r11
1752add  %rax,%r11
1753
1754# qhasm:   mulr8 = 0
1755# asm 1: mov  $0,>mulr8=int64#7
1756# asm 2: mov  $0,>mulr8=%rax
1757mov  $0,%rax
1758
1759# qhasm:   mulr8 += mulrdx + carry
1760# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
1761# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
1762adc %rdx,%rax
1763
1764# qhasm:   carry? rx0 += mulr4
1765# asm 1: add  <mulr4=int64#5,<rx0=int64#11
1766# asm 2: add  <mulr4=%r8,<rx0=%r13
1767add  %r8,%r13
1768
1769# qhasm:   carry? rx1 += mulr5 + carry
1770# asm 1: adc <mulr5=int64#6,<rx1=int64#12
1771# asm 2: adc <mulr5=%r9,<rx1=%r14
1772adc %r9,%r14
1773
1774# qhasm:   carry? rx2 += mulr6 + carry
1775# asm 1: adc <mulr6=int64#8,<rx2=int64#13
1776# asm 2: adc <mulr6=%r10,<rx2=%r15
1777adc %r10,%r15
1778
1779# qhasm:   carry? rx3 += mulr7 + carry
1780# asm 1: adc <mulr7=int64#9,<rx3=int64#14
1781# asm 2: adc <mulr7=%r11,<rx3=%rbx
1782adc %r11,%rbx
1783
1784# qhasm:   mulzero = 0
1785# asm 1: mov  $0,>mulzero=int64#3
1786# asm 2: mov  $0,>mulzero=%rdx
1787mov  $0,%rdx
1788
1789# qhasm:   mulr8 += mulzero + carry
1790# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
1791# asm 2: adc <mulzero=%rdx,<mulr8=%rax
1792adc %rdx,%rax
1793
1794# qhasm:   mulr8 *= 38
1795# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#5
1796# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%r8
1797imulq  $38,%rax,%r8
1798
1799# qhasm:   carry? rx0 += mulr8
1800# asm 1: add  <mulr8=int64#5,<rx0=int64#11
1801# asm 2: add  <mulr8=%r8,<rx0=%r13
1802add  %r8,%r13
1803
1804# qhasm:   carry? rx1 += mulzero + carry
1805# asm 1: adc <mulzero=int64#3,<rx1=int64#12
1806# asm 2: adc <mulzero=%rdx,<rx1=%r14
1807adc %rdx,%r14
1808
1809# qhasm:   carry? rx2 += mulzero + carry
1810# asm 1: adc <mulzero=int64#3,<rx2=int64#13
1811# asm 2: adc <mulzero=%rdx,<rx2=%r15
1812adc %rdx,%r15
1813
1814# qhasm:   carry? rx3 += mulzero + carry
1815# asm 1: adc <mulzero=int64#3,<rx3=int64#14
1816# asm 2: adc <mulzero=%rdx,<rx3=%rbx
1817adc %rdx,%rbx
1818
1819# qhasm:   mulzero += mulzero + carry
1820# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
1821# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
1822adc %rdx,%rdx
1823
1824# qhasm:   mulzero *= 38
1825# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
1826# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
1827imulq  $38,%rdx,%rdx
1828
1829# qhasm:   rx0 += mulzero
1830# asm 1: add  <mulzero=int64#3,<rx0=int64#11
1831# asm 2: add  <mulzero=%rdx,<rx0=%r13
1832add  %rdx,%r13
1833
1834# qhasm: ry0 = rx0
1835# asm 1: mov  <rx0=int64#11,>ry0=int64#3
1836# asm 2: mov  <rx0=%r13,>ry0=%rdx
1837mov  %r13,%rdx
1838
1839# qhasm: ry1 = rx1
1840# asm 1: mov  <rx1=int64#12,>ry1=int64#5
1841# asm 2: mov  <rx1=%r14,>ry1=%r8
1842mov  %r14,%r8
1843
1844# qhasm: ry2 = rx2
1845# asm 1: mov  <rx2=int64#13,>ry2=int64#6
1846# asm 2: mov  <rx2=%r15,>ry2=%r9
1847mov  %r15,%r9
1848
1849# qhasm: ry3 = rx3
1850# asm 1: mov  <rx3=int64#14,>ry3=int64#7
1851# asm 2: mov  <rx3=%rbx,>ry3=%rax
1852mov  %rbx,%rax
1853
1854# qhasm:   carry? ry0 += a0_stack
1855# asm 1: addq <a0_stack=stack64#8,<ry0=int64#3
1856# asm 2: addq <a0_stack=56(%rsp),<ry0=%rdx
1857addq 56(%rsp),%rdx
1858
1859# qhasm:   carry? ry1 += a1_stack + carry
1860# asm 1: adcq <a1_stack=stack64#9,<ry1=int64#5
1861# asm 2: adcq <a1_stack=64(%rsp),<ry1=%r8
1862adcq 64(%rsp),%r8
1863
1864# qhasm:   carry? ry2 += a2_stack + carry
1865# asm 1: adcq <a2_stack=stack64#10,<ry2=int64#6
1866# asm 2: adcq <a2_stack=72(%rsp),<ry2=%r9
1867adcq 72(%rsp),%r9
1868
1869# qhasm:   carry? ry3 += a3_stack + carry
1870# asm 1: adcq <a3_stack=stack64#11,<ry3=int64#7
1871# asm 2: adcq <a3_stack=80(%rsp),<ry3=%rax
1872adcq 80(%rsp),%rax
1873
1874# qhasm:   addt0 = 0
1875# asm 1: mov  $0,>addt0=int64#8
1876# asm 2: mov  $0,>addt0=%r10
1877mov  $0,%r10
1878
1879# qhasm:   addt1 = 38
1880# asm 1: mov  $38,>addt1=int64#9
1881# asm 2: mov  $38,>addt1=%r11
1882mov  $38,%r11
1883
1884# qhasm:   addt1 = addt0 if !carry
1885# asm 1: cmovae <addt0=int64#8,<addt1=int64#9
1886# asm 2: cmovae <addt0=%r10,<addt1=%r11
1887cmovae %r10,%r11
1888
1889# qhasm:   carry? ry0 += addt1
1890# asm 1: add  <addt1=int64#9,<ry0=int64#3
1891# asm 2: add  <addt1=%r11,<ry0=%rdx
1892add  %r11,%rdx
1893
1894# qhasm:   carry? ry1 += addt0 + carry
1895# asm 1: adc <addt0=int64#8,<ry1=int64#5
1896# asm 2: adc <addt0=%r10,<ry1=%r8
1897adc %r10,%r8
1898
1899# qhasm:   carry? ry2 += addt0 + carry
1900# asm 1: adc <addt0=int64#8,<ry2=int64#6
1901# asm 2: adc <addt0=%r10,<ry2=%r9
1902adc %r10,%r9
1903
1904# qhasm:   carry? ry3 += addt0 + carry
1905# asm 1: adc <addt0=int64#8,<ry3=int64#7
1906# asm 2: adc <addt0=%r10,<ry3=%rax
1907adc %r10,%rax
1908
1909# qhasm:   addt0 = addt1 if carry
1910# asm 1: cmovc <addt1=int64#9,<addt0=int64#8
1911# asm 2: cmovc <addt1=%r11,<addt0=%r10
1912cmovc %r11,%r10
1913
1914# qhasm:   ry0 += addt0
1915# asm 1: add  <addt0=int64#8,<ry0=int64#3
1916# asm 2: add  <addt0=%r10,<ry0=%rdx
1917add  %r10,%rdx
1918
1919# qhasm:   carry? rx0 -= a0_stack
1920# asm 1: subq <a0_stack=stack64#8,<rx0=int64#11
1921# asm 2: subq <a0_stack=56(%rsp),<rx0=%r13
1922subq 56(%rsp),%r13
1923
1924# qhasm:   carry? rx1 -= a1_stack - carry
1925# asm 1: sbbq <a1_stack=stack64#9,<rx1=int64#12
1926# asm 2: sbbq <a1_stack=64(%rsp),<rx1=%r14
1927sbbq 64(%rsp),%r14
1928
1929# qhasm:   carry? rx2 -= a2_stack - carry
1930# asm 1: sbbq <a2_stack=stack64#10,<rx2=int64#13
1931# asm 2: sbbq <a2_stack=72(%rsp),<rx2=%r15
1932sbbq 72(%rsp),%r15
1933
1934# qhasm:   carry? rx3 -= a3_stack - carry
1935# asm 1: sbbq <a3_stack=stack64#11,<rx3=int64#14
1936# asm 2: sbbq <a3_stack=80(%rsp),<rx3=%rbx
1937sbbq 80(%rsp),%rbx
1938
1939# qhasm:   subt0 = 0
1940# asm 1: mov  $0,>subt0=int64#8
1941# asm 2: mov  $0,>subt0=%r10
1942mov  $0,%r10
1943
1944# qhasm:   subt1 = 38
1945# asm 1: mov  $38,>subt1=int64#9
1946# asm 2: mov  $38,>subt1=%r11
1947mov  $38,%r11
1948
1949# qhasm:   subt1 = subt0 if !carry
1950# asm 1: cmovae <subt0=int64#8,<subt1=int64#9
1951# asm 2: cmovae <subt0=%r10,<subt1=%r11
1952cmovae %r10,%r11
1953
1954# qhasm:   carry? rx0 -= subt1
1955# asm 1: sub  <subt1=int64#9,<rx0=int64#11
1956# asm 2: sub  <subt1=%r11,<rx0=%r13
1957sub  %r11,%r13
1958
1959# qhasm:   carry? rx1 -= subt0 - carry
1960# asm 1: sbb  <subt0=int64#8,<rx1=int64#12
1961# asm 2: sbb  <subt0=%r10,<rx1=%r14
1962sbb  %r10,%r14
1963
1964# qhasm:   carry? rx2 -= subt0 - carry
1965# asm 1: sbb  <subt0=int64#8,<rx2=int64#13
1966# asm 2: sbb  <subt0=%r10,<rx2=%r15
1967sbb  %r10,%r15
1968
1969# qhasm:   carry? rx3 -= subt0 - carry
1970# asm 1: sbb  <subt0=int64#8,<rx3=int64#14
1971# asm 2: sbb  <subt0=%r10,<rx3=%rbx
1972sbb  %r10,%rbx
1973
1974# qhasm:   subt0 = subt1 if carry
1975# asm 1: cmovc <subt1=int64#9,<subt0=int64#8
1976# asm 2: cmovc <subt1=%r11,<subt0=%r10
1977cmovc %r11,%r10
1978
1979# qhasm:   rx0 -= subt0
1980# asm 1: sub  <subt0=int64#8,<rx0=int64#11
1981# asm 2: sub  <subt0=%r10,<rx0=%r13
1982sub  %r10,%r13
1983
1984# qhasm: *(uint64 *) (rp + 0) = rx0
1985# asm 1: movq   <rx0=int64#11,0(<rp=int64#1)
1986# asm 2: movq   <rx0=%r13,0(<rp=%rdi)
1987movq   %r13,0(%rdi)
1988
1989# qhasm: *(uint64 *) (rp + 8) = rx1
1990# asm 1: movq   <rx1=int64#12,8(<rp=int64#1)
1991# asm 2: movq   <rx1=%r14,8(<rp=%rdi)
1992movq   %r14,8(%rdi)
1993
1994# qhasm: *(uint64 *) (rp + 16) = rx2
1995# asm 1: movq   <rx2=int64#13,16(<rp=int64#1)
1996# asm 2: movq   <rx2=%r15,16(<rp=%rdi)
1997movq   %r15,16(%rdi)
1998
1999# qhasm: *(uint64 *) (rp + 24) = rx3
2000# asm 1: movq   <rx3=int64#14,24(<rp=int64#1)
2001# asm 2: movq   <rx3=%rbx,24(<rp=%rdi)
2002movq   %rbx,24(%rdi)
2003
2004# qhasm: *(uint64 *) (rp + 64) = ry0
2005# asm 1: movq   <ry0=int64#3,64(<rp=int64#1)
2006# asm 2: movq   <ry0=%rdx,64(<rp=%rdi)
2007movq   %rdx,64(%rdi)
2008
2009# qhasm: *(uint64 *) (rp + 72) = ry1
2010# asm 1: movq   <ry1=int64#5,72(<rp=int64#1)
2011# asm 2: movq   <ry1=%r8,72(<rp=%rdi)
2012movq   %r8,72(%rdi)
2013
2014# qhasm: *(uint64 *) (rp + 80) = ry2
2015# asm 1: movq   <ry2=int64#6,80(<rp=int64#1)
2016# asm 2: movq   <ry2=%r9,80(<rp=%rdi)
2017movq   %r9,80(%rdi)
2018
2019# qhasm: *(uint64 *) (rp + 88) = ry3
2020# asm 1: movq   <ry3=int64#7,88(<rp=int64#1)
2021# asm 2: movq   <ry3=%rax,88(<rp=%rdi)
2022movq   %rax,88(%rdi)
2023
2024# qhasm:   mulr4 = 0
2025# asm 1: mov  $0,>mulr4=int64#5
2026# asm 2: mov  $0,>mulr4=%r8
2027mov  $0,%r8
2028
2029# qhasm:   mulr5 = 0
2030# asm 1: mov  $0,>mulr5=int64#6
2031# asm 2: mov  $0,>mulr5=%r9
2032mov  $0,%r9
2033
2034# qhasm:   mulr6 = 0
2035# asm 1: mov  $0,>mulr6=int64#8
2036# asm 2: mov  $0,>mulr6=%r10
2037mov  $0,%r10
2038
2039# qhasm:   mulr7 = 0
2040# asm 1: mov  $0,>mulr7=int64#9
2041# asm 2: mov  $0,>mulr7=%r11
2042mov  $0,%r11
2043
2044# qhasm:   mulx0 = *(uint64 *)(pp + 96)
2045# asm 1: movq   96(<pp=int64#2),>mulx0=int64#10
2046# asm 2: movq   96(<pp=%rsi),>mulx0=%r12
2047movq   96(%rsi),%r12
2048
2049# qhasm:   mulrax = *(uint64 *)(qp + 96)
2050# asm 1: movq   96(<qp=int64#4),>mulrax=int64#7
2051# asm 2: movq   96(<qp=%rcx),>mulrax=%rax
2052movq   96(%rcx),%rax
2053
2054# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2055# asm 1: mul  <mulx0=int64#10
2056# asm 2: mul  <mulx0=%r12
2057mul  %r12
2058
2059# qhasm:   c0 = mulrax
2060# asm 1: mov  <mulrax=int64#7,>c0=int64#11
2061# asm 2: mov  <mulrax=%rax,>c0=%r13
2062mov  %rax,%r13
2063
2064# qhasm:   c1 = mulrdx
2065# asm 1: mov  <mulrdx=int64#3,>c1=int64#12
2066# asm 2: mov  <mulrdx=%rdx,>c1=%r14
2067mov  %rdx,%r14
2068
2069# qhasm:   mulrax = *(uint64 *)(qp + 104)
2070# asm 1: movq   104(<qp=int64#4),>mulrax=int64#7
2071# asm 2: movq   104(<qp=%rcx),>mulrax=%rax
2072movq   104(%rcx),%rax
2073
2074# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2075# asm 1: mul  <mulx0=int64#10
2076# asm 2: mul  <mulx0=%r12
2077mul  %r12
2078
2079# qhasm:   carry? c1 += mulrax
2080# asm 1: add  <mulrax=int64#7,<c1=int64#12
2081# asm 2: add  <mulrax=%rax,<c1=%r14
2082add  %rax,%r14
2083
2084# qhasm:   c2 = 0
2085# asm 1: mov  $0,>c2=int64#13
2086# asm 2: mov  $0,>c2=%r15
2087mov  $0,%r15
2088
2089# qhasm:   c2 += mulrdx + carry
2090# asm 1: adc <mulrdx=int64#3,<c2=int64#13
2091# asm 2: adc <mulrdx=%rdx,<c2=%r15
2092adc %rdx,%r15
2093
2094# qhasm:   mulrax = *(uint64 *)(qp + 112)
2095# asm 1: movq   112(<qp=int64#4),>mulrax=int64#7
2096# asm 2: movq   112(<qp=%rcx),>mulrax=%rax
2097movq   112(%rcx),%rax
2098
2099# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2100# asm 1: mul  <mulx0=int64#10
2101# asm 2: mul  <mulx0=%r12
2102mul  %r12
2103
2104# qhasm:   carry? c2 += mulrax
2105# asm 1: add  <mulrax=int64#7,<c2=int64#13
2106# asm 2: add  <mulrax=%rax,<c2=%r15
2107add  %rax,%r15
2108
2109# qhasm:   c3 = 0
2110# asm 1: mov  $0,>c3=int64#14
2111# asm 2: mov  $0,>c3=%rbx
2112mov  $0,%rbx
2113
2114# qhasm:   c3 += mulrdx + carry
2115# asm 1: adc <mulrdx=int64#3,<c3=int64#14
2116# asm 2: adc <mulrdx=%rdx,<c3=%rbx
2117adc %rdx,%rbx
2118
2119# qhasm:   mulrax = *(uint64 *)(qp + 120)
2120# asm 1: movq   120(<qp=int64#4),>mulrax=int64#7
2121# asm 2: movq   120(<qp=%rcx),>mulrax=%rax
2122movq   120(%rcx),%rax
2123
2124# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2125# asm 1: mul  <mulx0=int64#10
2126# asm 2: mul  <mulx0=%r12
2127mul  %r12
2128
2129# qhasm:   carry? c3 += mulrax
2130# asm 1: add  <mulrax=int64#7,<c3=int64#14
2131# asm 2: add  <mulrax=%rax,<c3=%rbx
2132add  %rax,%rbx
2133
2134# qhasm:   mulr4 += mulrdx + carry
2135# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
2136# asm 2: adc <mulrdx=%rdx,<mulr4=%r8
2137adc %rdx,%r8
2138
2139# qhasm:   mulx1 = *(uint64 *)(pp + 104)
2140# asm 1: movq   104(<pp=int64#2),>mulx1=int64#10
2141# asm 2: movq   104(<pp=%rsi),>mulx1=%r12
2142movq   104(%rsi),%r12
2143
2144# qhasm:   mulrax = *(uint64 *)(qp + 96)
2145# asm 1: movq   96(<qp=int64#4),>mulrax=int64#7
2146# asm 2: movq   96(<qp=%rcx),>mulrax=%rax
2147movq   96(%rcx),%rax
2148
2149# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2150# asm 1: mul  <mulx1=int64#10
2151# asm 2: mul  <mulx1=%r12
2152mul  %r12
2153
2154# qhasm:   carry? c1 += mulrax
2155# asm 1: add  <mulrax=int64#7,<c1=int64#12
2156# asm 2: add  <mulrax=%rax,<c1=%r14
2157add  %rax,%r14
2158
2159# qhasm:   mulc = 0
2160# asm 1: mov  $0,>mulc=int64#15
2161# asm 2: mov  $0,>mulc=%rbp
2162mov  $0,%rbp
2163
2164# qhasm:   mulc += mulrdx + carry
2165# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2166# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2167adc %rdx,%rbp
2168
2169# qhasm:   mulrax = *(uint64 *)(qp + 104)
2170# asm 1: movq   104(<qp=int64#4),>mulrax=int64#7
2171# asm 2: movq   104(<qp=%rcx),>mulrax=%rax
2172movq   104(%rcx),%rax
2173
2174# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2175# asm 1: mul  <mulx1=int64#10
2176# asm 2: mul  <mulx1=%r12
2177mul  %r12
2178
2179# qhasm:   carry? c2 += mulrax
2180# asm 1: add  <mulrax=int64#7,<c2=int64#13
2181# asm 2: add  <mulrax=%rax,<c2=%r15
2182add  %rax,%r15
2183
2184# qhasm:   mulrdx += 0 + carry
2185# asm 1: adc $0,<mulrdx=int64#3
2186# asm 2: adc $0,<mulrdx=%rdx
2187adc $0,%rdx
2188
2189# qhasm:   carry? c2 += mulc
2190# asm 1: add  <mulc=int64#15,<c2=int64#13
2191# asm 2: add  <mulc=%rbp,<c2=%r15
2192add  %rbp,%r15
2193
2194# qhasm:   mulc = 0
2195# asm 1: mov  $0,>mulc=int64#15
2196# asm 2: mov  $0,>mulc=%rbp
2197mov  $0,%rbp
2198
2199# qhasm:   mulc += mulrdx + carry
2200# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2201# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2202adc %rdx,%rbp
2203
2204# qhasm:   mulrax = *(uint64 *)(qp + 112)
2205# asm 1: movq   112(<qp=int64#4),>mulrax=int64#7
2206# asm 2: movq   112(<qp=%rcx),>mulrax=%rax
2207movq   112(%rcx),%rax
2208
2209# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2210# asm 1: mul  <mulx1=int64#10
2211# asm 2: mul  <mulx1=%r12
2212mul  %r12
2213
2214# qhasm:   carry? c3 += mulrax
2215# asm 1: add  <mulrax=int64#7,<c3=int64#14
2216# asm 2: add  <mulrax=%rax,<c3=%rbx
2217add  %rax,%rbx
2218
2219# qhasm:   mulrdx += 0 + carry
2220# asm 1: adc $0,<mulrdx=int64#3
2221# asm 2: adc $0,<mulrdx=%rdx
2222adc $0,%rdx
2223
2224# qhasm:   carry? c3 += mulc
2225# asm 1: add  <mulc=int64#15,<c3=int64#14
2226# asm 2: add  <mulc=%rbp,<c3=%rbx
2227add  %rbp,%rbx
2228
2229# qhasm:   mulc = 0
2230# asm 1: mov  $0,>mulc=int64#15
2231# asm 2: mov  $0,>mulc=%rbp
2232mov  $0,%rbp
2233
2234# qhasm:   mulc += mulrdx + carry
2235# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2236# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2237adc %rdx,%rbp
2238
2239# qhasm:   mulrax = *(uint64 *)(qp + 120)
2240# asm 1: movq   120(<qp=int64#4),>mulrax=int64#7
2241# asm 2: movq   120(<qp=%rcx),>mulrax=%rax
2242movq   120(%rcx),%rax
2243
2244# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2245# asm 1: mul  <mulx1=int64#10
2246# asm 2: mul  <mulx1=%r12
2247mul  %r12
2248
2249# qhasm:   carry? mulr4 += mulrax
2250# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
2251# asm 2: add  <mulrax=%rax,<mulr4=%r8
2252add  %rax,%r8
2253
2254# qhasm:   mulrdx += 0 + carry
2255# asm 1: adc $0,<mulrdx=int64#3
2256# asm 2: adc $0,<mulrdx=%rdx
2257adc $0,%rdx
2258
2259# qhasm:   carry? mulr4 += mulc
2260# asm 1: add  <mulc=int64#15,<mulr4=int64#5
2261# asm 2: add  <mulc=%rbp,<mulr4=%r8
2262add  %rbp,%r8
2263
2264# qhasm:   mulr5 += mulrdx + carry
2265# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
2266# asm 2: adc <mulrdx=%rdx,<mulr5=%r9
2267adc %rdx,%r9
2268
2269# qhasm:   mulx2 = *(uint64 *)(pp + 112)
2270# asm 1: movq   112(<pp=int64#2),>mulx2=int64#10
2271# asm 2: movq   112(<pp=%rsi),>mulx2=%r12
2272movq   112(%rsi),%r12
2273
2274# qhasm:   mulrax = *(uint64 *)(qp + 96)
2275# asm 1: movq   96(<qp=int64#4),>mulrax=int64#7
2276# asm 2: movq   96(<qp=%rcx),>mulrax=%rax
2277movq   96(%rcx),%rax
2278
2279# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2280# asm 1: mul  <mulx2=int64#10
2281# asm 2: mul  <mulx2=%r12
2282mul  %r12
2283
2284# qhasm:   carry? c2 += mulrax
2285# asm 1: add  <mulrax=int64#7,<c2=int64#13
2286# asm 2: add  <mulrax=%rax,<c2=%r15
2287add  %rax,%r15
2288
2289# qhasm:   mulc = 0
2290# asm 1: mov  $0,>mulc=int64#15
2291# asm 2: mov  $0,>mulc=%rbp
2292mov  $0,%rbp
2293
2294# qhasm:   mulc += mulrdx + carry
2295# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2296# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2297adc %rdx,%rbp
2298
2299# qhasm:   mulrax = *(uint64 *)(qp + 104)
2300# asm 1: movq   104(<qp=int64#4),>mulrax=int64#7
2301# asm 2: movq   104(<qp=%rcx),>mulrax=%rax
2302movq   104(%rcx),%rax
2303
2304# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2305# asm 1: mul  <mulx2=int64#10
2306# asm 2: mul  <mulx2=%r12
2307mul  %r12
2308
2309# qhasm:   carry? c3 += mulrax
2310# asm 1: add  <mulrax=int64#7,<c3=int64#14
2311# asm 2: add  <mulrax=%rax,<c3=%rbx
2312add  %rax,%rbx
2313
2314# qhasm:   mulrdx += 0 + carry
2315# asm 1: adc $0,<mulrdx=int64#3
2316# asm 2: adc $0,<mulrdx=%rdx
2317adc $0,%rdx
2318
2319# qhasm:   carry? c3 += mulc
2320# asm 1: add  <mulc=int64#15,<c3=int64#14
2321# asm 2: add  <mulc=%rbp,<c3=%rbx
2322add  %rbp,%rbx
2323
2324# qhasm:   mulc = 0
2325# asm 1: mov  $0,>mulc=int64#15
2326# asm 2: mov  $0,>mulc=%rbp
2327mov  $0,%rbp
2328
2329# qhasm:   mulc += mulrdx + carry
2330# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2331# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2332adc %rdx,%rbp
2333
2334# qhasm:   mulrax = *(uint64 *)(qp + 112)
2335# asm 1: movq   112(<qp=int64#4),>mulrax=int64#7
2336# asm 2: movq   112(<qp=%rcx),>mulrax=%rax
2337movq   112(%rcx),%rax
2338
2339# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2340# asm 1: mul  <mulx2=int64#10
2341# asm 2: mul  <mulx2=%r12
2342mul  %r12
2343
2344# qhasm:   carry? mulr4 += mulrax
2345# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
2346# asm 2: add  <mulrax=%rax,<mulr4=%r8
2347add  %rax,%r8
2348
2349# qhasm:   mulrdx += 0 + carry
2350# asm 1: adc $0,<mulrdx=int64#3
2351# asm 2: adc $0,<mulrdx=%rdx
2352adc $0,%rdx
2353
2354# qhasm:   carry? mulr4 += mulc
2355# asm 1: add  <mulc=int64#15,<mulr4=int64#5
2356# asm 2: add  <mulc=%rbp,<mulr4=%r8
2357add  %rbp,%r8
2358
2359# qhasm:   mulc = 0
2360# asm 1: mov  $0,>mulc=int64#15
2361# asm 2: mov  $0,>mulc=%rbp
2362mov  $0,%rbp
2363
2364# qhasm:   mulc += mulrdx + carry
2365# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2366# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2367adc %rdx,%rbp
2368
2369# qhasm:   mulrax = *(uint64 *)(qp + 120)
2370# asm 1: movq   120(<qp=int64#4),>mulrax=int64#7
2371# asm 2: movq   120(<qp=%rcx),>mulrax=%rax
2372movq   120(%rcx),%rax
2373
2374# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2375# asm 1: mul  <mulx2=int64#10
2376# asm 2: mul  <mulx2=%r12
2377mul  %r12
2378
2379# qhasm:   carry? mulr5 += mulrax
2380# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
2381# asm 2: add  <mulrax=%rax,<mulr5=%r9
2382add  %rax,%r9
2383
2384# qhasm:   mulrdx += 0 + carry
2385# asm 1: adc $0,<mulrdx=int64#3
2386# asm 2: adc $0,<mulrdx=%rdx
2387adc $0,%rdx
2388
2389# qhasm:   carry? mulr5 += mulc
2390# asm 1: add  <mulc=int64#15,<mulr5=int64#6
2391# asm 2: add  <mulc=%rbp,<mulr5=%r9
2392add  %rbp,%r9
2393
2394# qhasm:   mulr6 += mulrdx + carry
2395# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
2396# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
2397adc %rdx,%r10
2398
2399# qhasm:   mulx3 = *(uint64 *)(pp + 120)
2400# asm 1: movq   120(<pp=int64#2),>mulx3=int64#10
2401# asm 2: movq   120(<pp=%rsi),>mulx3=%r12
2402movq   120(%rsi),%r12
2403
2404# qhasm:   mulrax = *(uint64 *)(qp + 96)
2405# asm 1: movq   96(<qp=int64#4),>mulrax=int64#7
2406# asm 2: movq   96(<qp=%rcx),>mulrax=%rax
2407movq   96(%rcx),%rax
2408
2409# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2410# asm 1: mul  <mulx3=int64#10
2411# asm 2: mul  <mulx3=%r12
2412mul  %r12
2413
2414# qhasm:   carry? c3 += mulrax
2415# asm 1: add  <mulrax=int64#7,<c3=int64#14
2416# asm 2: add  <mulrax=%rax,<c3=%rbx
2417add  %rax,%rbx
2418
2419# qhasm:   mulc = 0
2420# asm 1: mov  $0,>mulc=int64#15
2421# asm 2: mov  $0,>mulc=%rbp
2422mov  $0,%rbp
2423
2424# qhasm:   mulc += mulrdx + carry
2425# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2426# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2427adc %rdx,%rbp
2428
2429# qhasm:   mulrax = *(uint64 *)(qp + 104)
2430# asm 1: movq   104(<qp=int64#4),>mulrax=int64#7
2431# asm 2: movq   104(<qp=%rcx),>mulrax=%rax
2432movq   104(%rcx),%rax
2433
2434# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2435# asm 1: mul  <mulx3=int64#10
2436# asm 2: mul  <mulx3=%r12
2437mul  %r12
2438
2439# qhasm:   carry? mulr4 += mulrax
2440# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
2441# asm 2: add  <mulrax=%rax,<mulr4=%r8
2442add  %rax,%r8
2443
2444# qhasm:   mulrdx += 0 + carry
2445# asm 1: adc $0,<mulrdx=int64#3
2446# asm 2: adc $0,<mulrdx=%rdx
2447adc $0,%rdx
2448
2449# qhasm:   carry? mulr4 += mulc
2450# asm 1: add  <mulc=int64#15,<mulr4=int64#5
2451# asm 2: add  <mulc=%rbp,<mulr4=%r8
2452add  %rbp,%r8
2453
2454# qhasm:   mulc = 0
2455# asm 1: mov  $0,>mulc=int64#15
2456# asm 2: mov  $0,>mulc=%rbp
2457mov  $0,%rbp
2458
2459# qhasm:   mulc += mulrdx + carry
2460# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2461# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2462adc %rdx,%rbp
2463
2464# qhasm:   mulrax = *(uint64 *)(qp + 112)
2465# asm 1: movq   112(<qp=int64#4),>mulrax=int64#7
2466# asm 2: movq   112(<qp=%rcx),>mulrax=%rax
2467movq   112(%rcx),%rax
2468
2469# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2470# asm 1: mul  <mulx3=int64#10
2471# asm 2: mul  <mulx3=%r12
2472mul  %r12
2473
2474# qhasm:   carry? mulr5 += mulrax
2475# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
2476# asm 2: add  <mulrax=%rax,<mulr5=%r9
2477add  %rax,%r9
2478
2479# qhasm:   mulrdx += 0 + carry
2480# asm 1: adc $0,<mulrdx=int64#3
2481# asm 2: adc $0,<mulrdx=%rdx
2482adc $0,%rdx
2483
2484# qhasm:   carry? mulr5 += mulc
2485# asm 1: add  <mulc=int64#15,<mulr5=int64#6
2486# asm 2: add  <mulc=%rbp,<mulr5=%r9
2487add  %rbp,%r9
2488
2489# qhasm:   mulc = 0
2490# asm 1: mov  $0,>mulc=int64#15
2491# asm 2: mov  $0,>mulc=%rbp
2492mov  $0,%rbp
2493
2494# qhasm:   mulc += mulrdx + carry
2495# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2496# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2497adc %rdx,%rbp
2498
2499# qhasm:   mulrax = *(uint64 *)(qp + 120)
2500# asm 1: movq   120(<qp=int64#4),>mulrax=int64#7
2501# asm 2: movq   120(<qp=%rcx),>mulrax=%rax
2502movq   120(%rcx),%rax
2503
2504# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2505# asm 1: mul  <mulx3=int64#10
2506# asm 2: mul  <mulx3=%r12
2507mul  %r12
2508
2509# qhasm:   carry? mulr6 += mulrax
2510# asm 1: add  <mulrax=int64#7,<mulr6=int64#8
2511# asm 2: add  <mulrax=%rax,<mulr6=%r10
2512add  %rax,%r10
2513
2514# qhasm:   mulrdx += 0 + carry
2515# asm 1: adc $0,<mulrdx=int64#3
2516# asm 2: adc $0,<mulrdx=%rdx
2517adc $0,%rdx
2518
2519# qhasm:   carry? mulr6 += mulc
2520# asm 1: add  <mulc=int64#15,<mulr6=int64#8
2521# asm 2: add  <mulc=%rbp,<mulr6=%r10
2522add  %rbp,%r10
2523
2524# qhasm:   mulr7 += mulrdx + carry
2525# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
2526# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
2527adc %rdx,%r11
2528
2529# qhasm:   mulrax = mulr4
2530# asm 1: mov  <mulr4=int64#5,>mulrax=int64#7
2531# asm 2: mov  <mulr4=%r8,>mulrax=%rax
2532mov  %r8,%rax
2533
2534# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2535mulq  crypto_sign_ed25519_amd64_64_38
2536
2537# qhasm:   mulr4 = mulrax
2538# asm 1: mov  <mulrax=int64#7,>mulr4=int64#5
2539# asm 2: mov  <mulrax=%rax,>mulr4=%r8
2540mov  %rax,%r8
2541
2542# qhasm:   mulrax = mulr5
2543# asm 1: mov  <mulr5=int64#6,>mulrax=int64#7
2544# asm 2: mov  <mulr5=%r9,>mulrax=%rax
2545mov  %r9,%rax
2546
2547# qhasm:   mulr5 = mulrdx
2548# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#6
2549# asm 2: mov  <mulrdx=%rdx,>mulr5=%r9
2550mov  %rdx,%r9
2551
2552# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2553mulq  crypto_sign_ed25519_amd64_64_38
2554
2555# qhasm:   carry? mulr5 += mulrax
2556# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
2557# asm 2: add  <mulrax=%rax,<mulr5=%r9
2558add  %rax,%r9
2559
2560# qhasm:   mulrax = mulr6
2561# asm 1: mov  <mulr6=int64#8,>mulrax=int64#7
2562# asm 2: mov  <mulr6=%r10,>mulrax=%rax
2563mov  %r10,%rax
2564
2565# qhasm:   mulr6 = 0
2566# asm 1: mov  $0,>mulr6=int64#8
2567# asm 2: mov  $0,>mulr6=%r10
2568mov  $0,%r10
2569
2570# qhasm:   mulr6 += mulrdx + carry
2571# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
2572# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
2573adc %rdx,%r10
2574
2575# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2576mulq  crypto_sign_ed25519_amd64_64_38
2577
2578# qhasm:   carry? mulr6 += mulrax
2579# asm 1: add  <mulrax=int64#7,<mulr6=int64#8
2580# asm 2: add  <mulrax=%rax,<mulr6=%r10
2581add  %rax,%r10
2582
2583# qhasm:   mulrax = mulr7
2584# asm 1: mov  <mulr7=int64#9,>mulrax=int64#7
2585# asm 2: mov  <mulr7=%r11,>mulrax=%rax
2586mov  %r11,%rax
2587
2588# qhasm:   mulr7 = 0
2589# asm 1: mov  $0,>mulr7=int64#9
2590# asm 2: mov  $0,>mulr7=%r11
2591mov  $0,%r11
2592
2593# qhasm:   mulr7 += mulrdx + carry
2594# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
2595# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
2596adc %rdx,%r11
2597
2598# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2599mulq  crypto_sign_ed25519_amd64_64_38
2600
2601# qhasm:   carry? mulr7 += mulrax
2602# asm 1: add  <mulrax=int64#7,<mulr7=int64#9
2603# asm 2: add  <mulrax=%rax,<mulr7=%r11
2604add  %rax,%r11
2605
2606# qhasm:   mulr8 = 0
2607# asm 1: mov  $0,>mulr8=int64#7
2608# asm 2: mov  $0,>mulr8=%rax
2609mov  $0,%rax
2610
2611# qhasm:   mulr8 += mulrdx + carry
2612# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
2613# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
2614adc %rdx,%rax
2615
2616# qhasm:   carry? c0 += mulr4
2617# asm 1: add  <mulr4=int64#5,<c0=int64#11
2618# asm 2: add  <mulr4=%r8,<c0=%r13
2619add  %r8,%r13
2620
2621# qhasm:   carry? c1 += mulr5 + carry
2622# asm 1: adc <mulr5=int64#6,<c1=int64#12
2623# asm 2: adc <mulr5=%r9,<c1=%r14
2624adc %r9,%r14
2625
2626# qhasm:   carry? c2 += mulr6 + carry
2627# asm 1: adc <mulr6=int64#8,<c2=int64#13
2628# asm 2: adc <mulr6=%r10,<c2=%r15
2629adc %r10,%r15
2630
2631# qhasm:   carry? c3 += mulr7 + carry
2632# asm 1: adc <mulr7=int64#9,<c3=int64#14
2633# asm 2: adc <mulr7=%r11,<c3=%rbx
2634adc %r11,%rbx
2635
2636# qhasm:   mulzero = 0
2637# asm 1: mov  $0,>mulzero=int64#3
2638# asm 2: mov  $0,>mulzero=%rdx
2639mov  $0,%rdx
2640
2641# qhasm:   mulr8 += mulzero + carry
2642# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
2643# asm 2: adc <mulzero=%rdx,<mulr8=%rax
2644adc %rdx,%rax
2645
2646# qhasm:   mulr8 *= 38
2647# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#5
2648# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%r8
2649imulq  $38,%rax,%r8
2650
2651# qhasm:   carry? c0 += mulr8
2652# asm 1: add  <mulr8=int64#5,<c0=int64#11
2653# asm 2: add  <mulr8=%r8,<c0=%r13
2654add  %r8,%r13
2655
2656# qhasm:   carry? c1 += mulzero + carry
2657# asm 1: adc <mulzero=int64#3,<c1=int64#12
2658# asm 2: adc <mulzero=%rdx,<c1=%r14
2659adc %rdx,%r14
2660
2661# qhasm:   carry? c2 += mulzero + carry
2662# asm 1: adc <mulzero=int64#3,<c2=int64#13
2663# asm 2: adc <mulzero=%rdx,<c2=%r15
2664adc %rdx,%r15
2665
2666# qhasm:   carry? c3 += mulzero + carry
2667# asm 1: adc <mulzero=int64#3,<c3=int64#14
2668# asm 2: adc <mulzero=%rdx,<c3=%rbx
2669adc %rdx,%rbx
2670
2671# qhasm:   mulzero += mulzero + carry
2672# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
2673# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
2674adc %rdx,%rdx
2675
2676# qhasm:   mulzero *= 38
2677# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
2678# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
2679imulq  $38,%rdx,%rdx
2680
2681# qhasm:   c0 += mulzero
2682# asm 1: add  <mulzero=int64#3,<c0=int64#11
2683# asm 2: add  <mulzero=%rdx,<c0=%r13
2684add  %rdx,%r13
2685
2686# qhasm: c0_stack = c0
2687# asm 1: movq <c0=int64#11,>c0_stack=stack64#8
2688# asm 2: movq <c0=%r13,>c0_stack=56(%rsp)
2689movq %r13,56(%rsp)
2690
2691# qhasm: c1_stack = c1
2692# asm 1: movq <c1=int64#12,>c1_stack=stack64#9
2693# asm 2: movq <c1=%r14,>c1_stack=64(%rsp)
2694movq %r14,64(%rsp)
2695
2696# qhasm: c2_stack = c2
2697# asm 1: movq <c2=int64#13,>c2_stack=stack64#10
2698# asm 2: movq <c2=%r15,>c2_stack=72(%rsp)
2699movq %r15,72(%rsp)
2700
2701# qhasm: c3_stack = c3
2702# asm 1: movq <c3=int64#14,>c3_stack=stack64#11
2703# asm 2: movq <c3=%rbx,>c3_stack=80(%rsp)
2704movq %rbx,80(%rsp)
2705
2706# qhasm:   mulr4 = 0
2707# asm 1: mov  $0,>mulr4=int64#5
2708# asm 2: mov  $0,>mulr4=%r8
2709mov  $0,%r8
2710
2711# qhasm:   mulr5 = 0
2712# asm 1: mov  $0,>mulr5=int64#6
2713# asm 2: mov  $0,>mulr5=%r9
2714mov  $0,%r9
2715
2716# qhasm:   mulr6 = 0
2717# asm 1: mov  $0,>mulr6=int64#8
2718# asm 2: mov  $0,>mulr6=%r10
2719mov  $0,%r10
2720
2721# qhasm:   mulr7 = 0
2722# asm 1: mov  $0,>mulr7=int64#9
2723# asm 2: mov  $0,>mulr7=%r11
2724mov  $0,%r11
2725
2726# qhasm:   mulx0 = *(uint64 *)(pp + 64)
2727# asm 1: movq   64(<pp=int64#2),>mulx0=int64#10
2728# asm 2: movq   64(<pp=%rsi),>mulx0=%r12
2729movq   64(%rsi),%r12
2730
2731# qhasm:   mulrax = *(uint64 *)(qp + 64)
2732# asm 1: movq   64(<qp=int64#4),>mulrax=int64#7
2733# asm 2: movq   64(<qp=%rcx),>mulrax=%rax
2734movq   64(%rcx),%rax
2735
2736# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2737# asm 1: mul  <mulx0=int64#10
2738# asm 2: mul  <mulx0=%r12
2739mul  %r12
2740
2741# qhasm:   rt0 = mulrax
2742# asm 1: mov  <mulrax=int64#7,>rt0=int64#11
2743# asm 2: mov  <mulrax=%rax,>rt0=%r13
2744mov  %rax,%r13
2745
2746# qhasm:   rt1 = mulrdx
2747# asm 1: mov  <mulrdx=int64#3,>rt1=int64#12
2748# asm 2: mov  <mulrdx=%rdx,>rt1=%r14
2749mov  %rdx,%r14
2750
2751# qhasm:   mulrax = *(uint64 *)(qp + 72)
2752# asm 1: movq   72(<qp=int64#4),>mulrax=int64#7
2753# asm 2: movq   72(<qp=%rcx),>mulrax=%rax
2754movq   72(%rcx),%rax
2755
2756# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2757# asm 1: mul  <mulx0=int64#10
2758# asm 2: mul  <mulx0=%r12
2759mul  %r12
2760
2761# qhasm:   carry? rt1 += mulrax
2762# asm 1: add  <mulrax=int64#7,<rt1=int64#12
2763# asm 2: add  <mulrax=%rax,<rt1=%r14
2764add  %rax,%r14
2765
2766# qhasm:   rt2 = 0
2767# asm 1: mov  $0,>rt2=int64#13
2768# asm 2: mov  $0,>rt2=%r15
2769mov  $0,%r15
2770
2771# qhasm:   rt2 += mulrdx + carry
2772# asm 1: adc <mulrdx=int64#3,<rt2=int64#13
2773# asm 2: adc <mulrdx=%rdx,<rt2=%r15
2774adc %rdx,%r15
2775
2776# qhasm:   mulrax = *(uint64 *)(qp + 80)
2777# asm 1: movq   80(<qp=int64#4),>mulrax=int64#7
2778# asm 2: movq   80(<qp=%rcx),>mulrax=%rax
2779movq   80(%rcx),%rax
2780
2781# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2782# asm 1: mul  <mulx0=int64#10
2783# asm 2: mul  <mulx0=%r12
2784mul  %r12
2785
2786# qhasm:   carry? rt2 += mulrax
2787# asm 1: add  <mulrax=int64#7,<rt2=int64#13
2788# asm 2: add  <mulrax=%rax,<rt2=%r15
2789add  %rax,%r15
2790
2791# qhasm:   rt3 = 0
2792# asm 1: mov  $0,>rt3=int64#14
2793# asm 2: mov  $0,>rt3=%rbx
2794mov  $0,%rbx
2795
2796# qhasm:   rt3 += mulrdx + carry
2797# asm 1: adc <mulrdx=int64#3,<rt3=int64#14
2798# asm 2: adc <mulrdx=%rdx,<rt3=%rbx
2799adc %rdx,%rbx
2800
2801# qhasm:   mulrax = *(uint64 *)(qp + 88)
2802# asm 1: movq   88(<qp=int64#4),>mulrax=int64#7
2803# asm 2: movq   88(<qp=%rcx),>mulrax=%rax
2804movq   88(%rcx),%rax
2805
2806# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2807# asm 1: mul  <mulx0=int64#10
2808# asm 2: mul  <mulx0=%r12
2809mul  %r12
2810
2811# qhasm:   carry? rt3 += mulrax
2812# asm 1: add  <mulrax=int64#7,<rt3=int64#14
2813# asm 2: add  <mulrax=%rax,<rt3=%rbx
2814add  %rax,%rbx
2815
2816# qhasm:   mulr4 += mulrdx + carry
2817# asm 1: adc <mulrdx=int64#3,<mulr4=int64#5
2818# asm 2: adc <mulrdx=%rdx,<mulr4=%r8
2819adc %rdx,%r8
2820
2821# qhasm:   mulx1 = *(uint64 *)(pp + 72)
2822# asm 1: movq   72(<pp=int64#2),>mulx1=int64#10
2823# asm 2: movq   72(<pp=%rsi),>mulx1=%r12
2824movq   72(%rsi),%r12
2825
2826# qhasm:   mulrax = *(uint64 *)(qp + 64)
2827# asm 1: movq   64(<qp=int64#4),>mulrax=int64#7
2828# asm 2: movq   64(<qp=%rcx),>mulrax=%rax
2829movq   64(%rcx),%rax
2830
2831# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2832# asm 1: mul  <mulx1=int64#10
2833# asm 2: mul  <mulx1=%r12
2834mul  %r12
2835
2836# qhasm:   carry? rt1 += mulrax
2837# asm 1: add  <mulrax=int64#7,<rt1=int64#12
2838# asm 2: add  <mulrax=%rax,<rt1=%r14
2839add  %rax,%r14
2840
2841# qhasm:   mulc = 0
2842# asm 1: mov  $0,>mulc=int64#15
2843# asm 2: mov  $0,>mulc=%rbp
2844mov  $0,%rbp
2845
2846# qhasm:   mulc += mulrdx + carry
2847# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2848# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2849adc %rdx,%rbp
2850
2851# qhasm:   mulrax = *(uint64 *)(qp + 72)
2852# asm 1: movq   72(<qp=int64#4),>mulrax=int64#7
2853# asm 2: movq   72(<qp=%rcx),>mulrax=%rax
2854movq   72(%rcx),%rax
2855
2856# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2857# asm 1: mul  <mulx1=int64#10
2858# asm 2: mul  <mulx1=%r12
2859mul  %r12
2860
2861# qhasm:   carry? rt2 += mulrax
2862# asm 1: add  <mulrax=int64#7,<rt2=int64#13
2863# asm 2: add  <mulrax=%rax,<rt2=%r15
2864add  %rax,%r15
2865
2866# qhasm:   mulrdx += 0 + carry
2867# asm 1: adc $0,<mulrdx=int64#3
2868# asm 2: adc $0,<mulrdx=%rdx
2869adc $0,%rdx
2870
2871# qhasm:   carry? rt2 += mulc
2872# asm 1: add  <mulc=int64#15,<rt2=int64#13
2873# asm 2: add  <mulc=%rbp,<rt2=%r15
2874add  %rbp,%r15
2875
2876# qhasm:   mulc = 0
2877# asm 1: mov  $0,>mulc=int64#15
2878# asm 2: mov  $0,>mulc=%rbp
2879mov  $0,%rbp
2880
2881# qhasm:   mulc += mulrdx + carry
2882# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2883# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2884adc %rdx,%rbp
2885
2886# qhasm:   mulrax = *(uint64 *)(qp + 80)
2887# asm 1: movq   80(<qp=int64#4),>mulrax=int64#7
2888# asm 2: movq   80(<qp=%rcx),>mulrax=%rax
2889movq   80(%rcx),%rax
2890
2891# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2892# asm 1: mul  <mulx1=int64#10
2893# asm 2: mul  <mulx1=%r12
2894mul  %r12
2895
2896# qhasm:   carry? rt3 += mulrax
2897# asm 1: add  <mulrax=int64#7,<rt3=int64#14
2898# asm 2: add  <mulrax=%rax,<rt3=%rbx
2899add  %rax,%rbx
2900
2901# qhasm:   mulrdx += 0 + carry
2902# asm 1: adc $0,<mulrdx=int64#3
2903# asm 2: adc $0,<mulrdx=%rdx
2904adc $0,%rdx
2905
2906# qhasm:   carry? rt3 += mulc
2907# asm 1: add  <mulc=int64#15,<rt3=int64#14
2908# asm 2: add  <mulc=%rbp,<rt3=%rbx
2909add  %rbp,%rbx
2910
2911# qhasm:   mulc = 0
2912# asm 1: mov  $0,>mulc=int64#15
2913# asm 2: mov  $0,>mulc=%rbp
2914mov  $0,%rbp
2915
2916# qhasm:   mulc += mulrdx + carry
2917# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2918# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2919adc %rdx,%rbp
2920
2921# qhasm:   mulrax = *(uint64 *)(qp + 88)
2922# asm 1: movq   88(<qp=int64#4),>mulrax=int64#7
2923# asm 2: movq   88(<qp=%rcx),>mulrax=%rax
2924movq   88(%rcx),%rax
2925
2926# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2927# asm 1: mul  <mulx1=int64#10
2928# asm 2: mul  <mulx1=%r12
2929mul  %r12
2930
2931# qhasm:   carry? mulr4 += mulrax
2932# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
2933# asm 2: add  <mulrax=%rax,<mulr4=%r8
2934add  %rax,%r8
2935
2936# qhasm:   mulrdx += 0 + carry
2937# asm 1: adc $0,<mulrdx=int64#3
2938# asm 2: adc $0,<mulrdx=%rdx
2939adc $0,%rdx
2940
2941# qhasm:   carry? mulr4 += mulc
2942# asm 1: add  <mulc=int64#15,<mulr4=int64#5
2943# asm 2: add  <mulc=%rbp,<mulr4=%r8
2944add  %rbp,%r8
2945
2946# qhasm:   mulr5 += mulrdx + carry
2947# asm 1: adc <mulrdx=int64#3,<mulr5=int64#6
2948# asm 2: adc <mulrdx=%rdx,<mulr5=%r9
2949adc %rdx,%r9
2950
2951# qhasm:   mulx2 = *(uint64 *)(pp + 80)
2952# asm 1: movq   80(<pp=int64#2),>mulx2=int64#10
2953# asm 2: movq   80(<pp=%rsi),>mulx2=%r12
2954movq   80(%rsi),%r12
2955
2956# qhasm:   mulrax = *(uint64 *)(qp + 64)
2957# asm 1: movq   64(<qp=int64#4),>mulrax=int64#7
2958# asm 2: movq   64(<qp=%rcx),>mulrax=%rax
2959movq   64(%rcx),%rax
2960
2961# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2962# asm 1: mul  <mulx2=int64#10
2963# asm 2: mul  <mulx2=%r12
2964mul  %r12
2965
2966# qhasm:   carry? rt2 += mulrax
2967# asm 1: add  <mulrax=int64#7,<rt2=int64#13
2968# asm 2: add  <mulrax=%rax,<rt2=%r15
2969add  %rax,%r15
2970
2971# qhasm:   mulc = 0
2972# asm 1: mov  $0,>mulc=int64#15
2973# asm 2: mov  $0,>mulc=%rbp
2974mov  $0,%rbp
2975
2976# qhasm:   mulc += mulrdx + carry
2977# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
2978# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
2979adc %rdx,%rbp
2980
2981# qhasm:   mulrax = *(uint64 *)(qp + 72)
2982# asm 1: movq   72(<qp=int64#4),>mulrax=int64#7
2983# asm 2: movq   72(<qp=%rcx),>mulrax=%rax
2984movq   72(%rcx),%rax
2985
2986# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2987# asm 1: mul  <mulx2=int64#10
2988# asm 2: mul  <mulx2=%r12
2989mul  %r12
2990
2991# qhasm:   carry? rt3 += mulrax
2992# asm 1: add  <mulrax=int64#7,<rt3=int64#14
2993# asm 2: add  <mulrax=%rax,<rt3=%rbx
2994add  %rax,%rbx
2995
2996# qhasm:   mulrdx += 0 + carry
2997# asm 1: adc $0,<mulrdx=int64#3
2998# asm 2: adc $0,<mulrdx=%rdx
2999adc $0,%rdx
3000
3001# qhasm:   carry? rt3 += mulc
3002# asm 1: add  <mulc=int64#15,<rt3=int64#14
3003# asm 2: add  <mulc=%rbp,<rt3=%rbx
3004add  %rbp,%rbx
3005
3006# qhasm:   mulc = 0
3007# asm 1: mov  $0,>mulc=int64#15
3008# asm 2: mov  $0,>mulc=%rbp
3009mov  $0,%rbp
3010
3011# qhasm:   mulc += mulrdx + carry
3012# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
3013# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
3014adc %rdx,%rbp
3015
3016# qhasm:   mulrax = *(uint64 *)(qp + 80)
3017# asm 1: movq   80(<qp=int64#4),>mulrax=int64#7
3018# asm 2: movq   80(<qp=%rcx),>mulrax=%rax
3019movq   80(%rcx),%rax
3020
3021# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3022# asm 1: mul  <mulx2=int64#10
3023# asm 2: mul  <mulx2=%r12
3024mul  %r12
3025
3026# qhasm:   carry? mulr4 += mulrax
3027# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
3028# asm 2: add  <mulrax=%rax,<mulr4=%r8
3029add  %rax,%r8
3030
3031# qhasm:   mulrdx += 0 + carry
3032# asm 1: adc $0,<mulrdx=int64#3
3033# asm 2: adc $0,<mulrdx=%rdx
3034adc $0,%rdx
3035
3036# qhasm:   carry? mulr4 += mulc
3037# asm 1: add  <mulc=int64#15,<mulr4=int64#5
3038# asm 2: add  <mulc=%rbp,<mulr4=%r8
3039add  %rbp,%r8
3040
3041# qhasm:   mulc = 0
3042# asm 1: mov  $0,>mulc=int64#15
3043# asm 2: mov  $0,>mulc=%rbp
3044mov  $0,%rbp
3045
3046# qhasm:   mulc += mulrdx + carry
3047# asm 1: adc <mulrdx=int64#3,<mulc=int64#15
3048# asm 2: adc <mulrdx=%rdx,<mulc=%rbp
3049adc %rdx,%rbp
3050
3051# qhasm:   mulrax = *(uint64 *)(qp + 88)
3052# asm 1: movq   88(<qp=int64#4),>mulrax=int64#7
3053# asm 2: movq   88(<qp=%rcx),>mulrax=%rax
3054movq   88(%rcx),%rax
3055
3056# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
3057# asm 1: mul  <mulx2=int64#10
3058# asm 2: mul  <mulx2=%r12
3059mul  %r12
3060
3061# qhasm:   carry? mulr5 += mulrax
3062# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
3063# asm 2: add  <mulrax=%rax,<mulr5=%r9
3064add  %rax,%r9
3065
3066# qhasm:   mulrdx += 0 + carry
3067# asm 1: adc $0,<mulrdx=int64#3
3068# asm 2: adc $0,<mulrdx=%rdx
3069adc $0,%rdx
3070
3071# qhasm:   carry? mulr5 += mulc
3072# asm 1: add  <mulc=int64#15,<mulr5=int64#6
3073# asm 2: add  <mulc=%rbp,<mulr5=%r9
3074add  %rbp,%r9
3075
3076# qhasm:   mulr6 += mulrdx + carry
3077# asm 1: adc <mulrdx=int64#3,<mulr6=int64#8
3078# asm 2: adc <mulrdx=%rdx,<mulr6=%r10
3079adc %rdx,%r10
3080
3081# qhasm:   mulx3 = *(uint64 *)(pp + 88)
3082# asm 1: movq   88(<pp=int64#2),>mulx3=int64#2
3083# asm 2: movq   88(<pp=%rsi),>mulx3=%rsi
3084movq   88(%rsi),%rsi
3085
3086# qhasm:   mulrax = *(uint64 *)(qp + 64)
3087# asm 1: movq   64(<qp=int64#4),>mulrax=int64#7
3088# asm 2: movq   64(<qp=%rcx),>mulrax=%rax
3089movq   64(%rcx),%rax
3090
3091# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3092# asm 1: mul  <mulx3=int64#2
3093# asm 2: mul  <mulx3=%rsi
3094mul  %rsi
3095
3096# qhasm:   carry? rt3 += mulrax
3097# asm 1: add  <mulrax=int64#7,<rt3=int64#14
3098# asm 2: add  <mulrax=%rax,<rt3=%rbx
3099add  %rax,%rbx
3100
3101# qhasm:   mulc = 0
3102# asm 1: mov  $0,>mulc=int64#10
3103# asm 2: mov  $0,>mulc=%r12
3104mov  $0,%r12
3105
3106# qhasm:   mulc += mulrdx + carry
3107# asm 1: adc <mulrdx=int64#3,<mulc=int64#10
3108# asm 2: adc <mulrdx=%rdx,<mulc=%r12
3109adc %rdx,%r12
3110
3111# qhasm:   mulrax = *(uint64 *)(qp + 72)
3112# asm 1: movq   72(<qp=int64#4),>mulrax=int64#7
3113# asm 2: movq   72(<qp=%rcx),>mulrax=%rax
3114movq   72(%rcx),%rax
3115
3116# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3117# asm 1: mul  <mulx3=int64#2
3118# asm 2: mul  <mulx3=%rsi
3119mul  %rsi
3120
3121# qhasm:   carry? mulr4 += mulrax
3122# asm 1: add  <mulrax=int64#7,<mulr4=int64#5
3123# asm 2: add  <mulrax=%rax,<mulr4=%r8
3124add  %rax,%r8
3125
3126# qhasm:   mulrdx += 0 + carry
3127# asm 1: adc $0,<mulrdx=int64#3
3128# asm 2: adc $0,<mulrdx=%rdx
3129adc $0,%rdx
3130
3131# qhasm:   carry? mulr4 += mulc
3132# asm 1: add  <mulc=int64#10,<mulr4=int64#5
3133# asm 2: add  <mulc=%r12,<mulr4=%r8
3134add  %r12,%r8
3135
3136# qhasm:   mulc = 0
3137# asm 1: mov  $0,>mulc=int64#10
3138# asm 2: mov  $0,>mulc=%r12
3139mov  $0,%r12
3140
3141# qhasm:   mulc += mulrdx + carry
3142# asm 1: adc <mulrdx=int64#3,<mulc=int64#10
3143# asm 2: adc <mulrdx=%rdx,<mulc=%r12
3144adc %rdx,%r12
3145
3146# qhasm:   mulrax = *(uint64 *)(qp + 80)
3147# asm 1: movq   80(<qp=int64#4),>mulrax=int64#7
3148# asm 2: movq   80(<qp=%rcx),>mulrax=%rax
3149movq   80(%rcx),%rax
3150
3151# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3152# asm 1: mul  <mulx3=int64#2
3153# asm 2: mul  <mulx3=%rsi
3154mul  %rsi
3155
3156# qhasm:   carry? mulr5 += mulrax
3157# asm 1: add  <mulrax=int64#7,<mulr5=int64#6
3158# asm 2: add  <mulrax=%rax,<mulr5=%r9
3159add  %rax,%r9
3160
3161# qhasm:   mulrdx += 0 + carry
3162# asm 1: adc $0,<mulrdx=int64#3
3163# asm 2: adc $0,<mulrdx=%rdx
3164adc $0,%rdx
3165
3166# qhasm:   carry? mulr5 += mulc
3167# asm 1: add  <mulc=int64#10,<mulr5=int64#6
3168# asm 2: add  <mulc=%r12,<mulr5=%r9
3169add  %r12,%r9
3170
3171# qhasm:   mulc = 0
3172# asm 1: mov  $0,>mulc=int64#10
3173# asm 2: mov  $0,>mulc=%r12
3174mov  $0,%r12
3175
3176# qhasm:   mulc += mulrdx + carry
3177# asm 1: adc <mulrdx=int64#3,<mulc=int64#10
3178# asm 2: adc <mulrdx=%rdx,<mulc=%r12
3179adc %rdx,%r12
3180
3181# qhasm:   mulrax = *(uint64 *)(qp + 88)
3182# asm 1: movq   88(<qp=int64#4),>mulrax=int64#7
3183# asm 2: movq   88(<qp=%rcx),>mulrax=%rax
3184movq   88(%rcx),%rax
3185
3186# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
3187# asm 1: mul  <mulx3=int64#2
3188# asm 2: mul  <mulx3=%rsi
3189mul  %rsi
3190
3191# qhasm:   carry? mulr6 += mulrax
3192# asm 1: add  <mulrax=int64#7,<mulr6=int64#8
3193# asm 2: add  <mulrax=%rax,<mulr6=%r10
3194add  %rax,%r10
3195
3196# qhasm:   mulrdx += 0 + carry
3197# asm 1: adc $0,<mulrdx=int64#3
3198# asm 2: adc $0,<mulrdx=%rdx
3199adc $0,%rdx
3200
3201# qhasm:   carry? mulr6 += mulc
3202# asm 1: add  <mulc=int64#10,<mulr6=int64#8
3203# asm 2: add  <mulc=%r12,<mulr6=%r10
3204add  %r12,%r10
3205
3206# qhasm:   mulr7 += mulrdx + carry
3207# asm 1: adc <mulrdx=int64#3,<mulr7=int64#9
3208# asm 2: adc <mulrdx=%rdx,<mulr7=%r11
3209adc %rdx,%r11
3210
3211# qhasm:   mulrax = mulr4
3212# asm 1: mov  <mulr4=int64#5,>mulrax=int64#7
3213# asm 2: mov  <mulr4=%r8,>mulrax=%rax
3214mov  %r8,%rax
3215
3216# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3217mulq  crypto_sign_ed25519_amd64_64_38
3218
3219# qhasm:   mulr4 = mulrax
3220# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
3221# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
3222mov  %rax,%rsi
3223
3224# qhasm:   mulrax = mulr5
3225# asm 1: mov  <mulr5=int64#6,>mulrax=int64#7
3226# asm 2: mov  <mulr5=%r9,>mulrax=%rax
3227mov  %r9,%rax
3228
3229# qhasm:   mulr5 = mulrdx
3230# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
3231# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
3232mov  %rdx,%rcx
3233
3234# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3235mulq  crypto_sign_ed25519_amd64_64_38
3236
3237# qhasm:   carry? mulr5 += mulrax
3238# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
3239# asm 2: add  <mulrax=%rax,<mulr5=%rcx
3240add  %rax,%rcx
3241
3242# qhasm:   mulrax = mulr6
3243# asm 1: mov  <mulr6=int64#8,>mulrax=int64#7
3244# asm 2: mov  <mulr6=%r10,>mulrax=%rax
3245mov  %r10,%rax
3246
3247# qhasm:   mulr6 = 0
3248# asm 1: mov  $0,>mulr6=int64#5
3249# asm 2: mov  $0,>mulr6=%r8
3250mov  $0,%r8
3251
3252# qhasm:   mulr6 += mulrdx + carry
3253# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
3254# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
3255adc %rdx,%r8
3256
3257# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3258mulq  crypto_sign_ed25519_amd64_64_38
3259
3260# qhasm:   carry? mulr6 += mulrax
3261# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
3262# asm 2: add  <mulrax=%rax,<mulr6=%r8
3263add  %rax,%r8
3264
3265# qhasm:   mulrax = mulr7
3266# asm 1: mov  <mulr7=int64#9,>mulrax=int64#7
3267# asm 2: mov  <mulr7=%r11,>mulrax=%rax
3268mov  %r11,%rax
3269
3270# qhasm:   mulr7 = 0
3271# asm 1: mov  $0,>mulr7=int64#6
3272# asm 2: mov  $0,>mulr7=%r9
3273mov  $0,%r9
3274
3275# qhasm:   mulr7 += mulrdx + carry
3276# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
3277# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
3278adc %rdx,%r9
3279
3280# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
3281mulq  crypto_sign_ed25519_amd64_64_38
3282
3283# qhasm:   carry? mulr7 += mulrax
3284# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
3285# asm 2: add  <mulrax=%rax,<mulr7=%r9
3286add  %rax,%r9
3287
3288# qhasm:   mulr8 = 0
3289# asm 1: mov  $0,>mulr8=int64#7
3290# asm 2: mov  $0,>mulr8=%rax
3291mov  $0,%rax
3292
3293# qhasm:   mulr8 += mulrdx + carry
3294# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
3295# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
3296adc %rdx,%rax
3297
3298# qhasm:   carry? rt0 += mulr4
3299# asm 1: add  <mulr4=int64#2,<rt0=int64#11
3300# asm 2: add  <mulr4=%rsi,<rt0=%r13
3301add  %rsi,%r13
3302
3303# qhasm:   carry? rt1 += mulr5 + carry
3304# asm 1: adc <mulr5=int64#4,<rt1=int64#12
3305# asm 2: adc <mulr5=%rcx,<rt1=%r14
3306adc %rcx,%r14
3307
3308# qhasm:   carry? rt2 += mulr6 + carry
3309# asm 1: adc <mulr6=int64#5,<rt2=int64#13
3310# asm 2: adc <mulr6=%r8,<rt2=%r15
3311adc %r8,%r15
3312
3313# qhasm:   carry? rt3 += mulr7 + carry
3314# asm 1: adc <mulr7=int64#6,<rt3=int64#14
3315# asm 2: adc <mulr7=%r9,<rt3=%rbx
3316adc %r9,%rbx
3317
3318# qhasm:   mulzero = 0
3319# asm 1: mov  $0,>mulzero=int64#2
3320# asm 2: mov  $0,>mulzero=%rsi
3321mov  $0,%rsi
3322
3323# qhasm:   mulr8 += mulzero + carry
3324# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
3325# asm 2: adc <mulzero=%rsi,<mulr8=%rax
3326adc %rsi,%rax
3327
3328# qhasm:   mulr8 *= 38
3329# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
3330# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
3331imulq  $38,%rax,%rdx
3332
3333# qhasm:   carry? rt0 += mulr8
3334# asm 1: add  <mulr8=int64#3,<rt0=int64#11
3335# asm 2: add  <mulr8=%rdx,<rt0=%r13
3336add  %rdx,%r13
3337
3338# qhasm:   carry? rt1 += mulzero + carry
3339# asm 1: adc <mulzero=int64#2,<rt1=int64#12
3340# asm 2: adc <mulzero=%rsi,<rt1=%r14
3341adc %rsi,%r14
3342
3343# qhasm:   carry? rt2 += mulzero + carry
3344# asm 1: adc <mulzero=int64#2,<rt2=int64#13
3345# asm 2: adc <mulzero=%rsi,<rt2=%r15
3346adc %rsi,%r15
3347
3348# qhasm:   carry? rt3 += mulzero + carry
3349# asm 1: adc <mulzero=int64#2,<rt3=int64#14
3350# asm 2: adc <mulzero=%rsi,<rt3=%rbx
3351adc %rsi,%rbx
3352
3353# qhasm:   mulzero += mulzero + carry
3354# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
3355# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
3356adc %rsi,%rsi
3357
3358# qhasm:   mulzero *= 38
3359# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
3360# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
3361imulq  $38,%rsi,%rsi
3362
3363# qhasm:   rt0 += mulzero
3364# asm 1: add  <mulzero=int64#2,<rt0=int64#11
3365# asm 2: add  <mulzero=%rsi,<rt0=%r13
3366add  %rsi,%r13
3367
3368# qhasm:   carry? rt0 += rt0
3369# asm 1: add  <rt0=int64#11,<rt0=int64#11
3370# asm 2: add  <rt0=%r13,<rt0=%r13
3371add  %r13,%r13
3372
3373# qhasm:   carry? rt1 += rt1 + carry
3374# asm 1: adc <rt1=int64#12,<rt1=int64#12
3375# asm 2: adc <rt1=%r14,<rt1=%r14
3376adc %r14,%r14
3377
3378# qhasm:   carry? rt2 += rt2 + carry
3379# asm 1: adc <rt2=int64#13,<rt2=int64#13
3380# asm 2: adc <rt2=%r15,<rt2=%r15
3381adc %r15,%r15
3382
3383# qhasm:   carry? rt3 += rt3 + carry
3384# asm 1: adc <rt3=int64#14,<rt3=int64#14
3385# asm 2: adc <rt3=%rbx,<rt3=%rbx
3386adc %rbx,%rbx
3387
3388# qhasm:   addt0 = 0
3389# asm 1: mov  $0,>addt0=int64#2
3390# asm 2: mov  $0,>addt0=%rsi
3391mov  $0,%rsi
3392
3393# qhasm:   addt1 = 38
3394# asm 1: mov  $38,>addt1=int64#3
3395# asm 2: mov  $38,>addt1=%rdx
3396mov  $38,%rdx
3397
3398# qhasm:   addt1 = addt0 if !carry
3399# asm 1: cmovae <addt0=int64#2,<addt1=int64#3
3400# asm 2: cmovae <addt0=%rsi,<addt1=%rdx
3401cmovae %rsi,%rdx
3402
3403# qhasm:   carry? rt0 += addt1
3404# asm 1: add  <addt1=int64#3,<rt0=int64#11
3405# asm 2: add  <addt1=%rdx,<rt0=%r13
3406add  %rdx,%r13
3407
3408# qhasm:   carry? rt1 += addt0 + carry
3409# asm 1: adc <addt0=int64#2,<rt1=int64#12
3410# asm 2: adc <addt0=%rsi,<rt1=%r14
3411adc %rsi,%r14
3412
3413# qhasm:   carry? rt2 += addt0 + carry
3414# asm 1: adc <addt0=int64#2,<rt2=int64#13
3415# asm 2: adc <addt0=%rsi,<rt2=%r15
3416adc %rsi,%r15
3417
3418# qhasm:   carry? rt3 += addt0 + carry
3419# asm 1: adc <addt0=int64#2,<rt3=int64#14
3420# asm 2: adc <addt0=%rsi,<rt3=%rbx
3421adc %rsi,%rbx
3422
3423# qhasm:   addt0 = addt1 if carry
3424# asm 1: cmovc <addt1=int64#3,<addt0=int64#2
3425# asm 2: cmovc <addt1=%rdx,<addt0=%rsi
3426cmovc %rdx,%rsi
3427
3428# qhasm:   rt0 += addt0
3429# asm 1: add  <addt0=int64#2,<rt0=int64#11
3430# asm 2: add  <addt0=%rsi,<rt0=%r13
3431add  %rsi,%r13
3432
3433# qhasm: rz0 = rt0
3434# asm 1: mov  <rt0=int64#11,>rz0=int64#2
3435# asm 2: mov  <rt0=%r13,>rz0=%rsi
3436mov  %r13,%rsi
3437
3438# qhasm: rz1 = rt1
3439# asm 1: mov  <rt1=int64#12,>rz1=int64#3
3440# asm 2: mov  <rt1=%r14,>rz1=%rdx
3441mov  %r14,%rdx
3442
3443# qhasm: rz2 = rt2
3444# asm 1: mov  <rt2=int64#13,>rz2=int64#4
3445# asm 2: mov  <rt2=%r15,>rz2=%rcx
3446mov  %r15,%rcx
3447
3448# qhasm: rz3 = rt3
3449# asm 1: mov  <rt3=int64#14,>rz3=int64#5
3450# asm 2: mov  <rt3=%rbx,>rz3=%r8
3451mov  %rbx,%r8
3452
3453# qhasm:   carry? rz0 += c0_stack
3454# asm 1: addq <c0_stack=stack64#8,<rz0=int64#2
3455# asm 2: addq <c0_stack=56(%rsp),<rz0=%rsi
3456addq 56(%rsp),%rsi
3457
3458# qhasm:   carry? rz1 += c1_stack + carry
3459# asm 1: adcq <c1_stack=stack64#9,<rz1=int64#3
3460# asm 2: adcq <c1_stack=64(%rsp),<rz1=%rdx
3461adcq 64(%rsp),%rdx
3462
3463# qhasm:   carry? rz2 += c2_stack + carry
3464# asm 1: adcq <c2_stack=stack64#10,<rz2=int64#4
3465# asm 2: adcq <c2_stack=72(%rsp),<rz2=%rcx
3466adcq 72(%rsp),%rcx
3467
3468# qhasm:   carry? rz3 += c3_stack + carry
3469# asm 1: adcq <c3_stack=stack64#11,<rz3=int64#5
3470# asm 2: adcq <c3_stack=80(%rsp),<rz3=%r8
3471adcq 80(%rsp),%r8
3472
3473# qhasm:   addt0 = 0
3474# asm 1: mov  $0,>addt0=int64#6
3475# asm 2: mov  $0,>addt0=%r9
3476mov  $0,%r9
3477
3478# qhasm:   addt1 = 38
3479# asm 1: mov  $38,>addt1=int64#7
3480# asm 2: mov  $38,>addt1=%rax
3481mov  $38,%rax
3482
3483# qhasm:   addt1 = addt0 if !carry
3484# asm 1: cmovae <addt0=int64#6,<addt1=int64#7
3485# asm 2: cmovae <addt0=%r9,<addt1=%rax
3486cmovae %r9,%rax
3487
3488# qhasm:   carry? rz0 += addt1
3489# asm 1: add  <addt1=int64#7,<rz0=int64#2
3490# asm 2: add  <addt1=%rax,<rz0=%rsi
3491add  %rax,%rsi
3492
3493# qhasm:   carry? rz1 += addt0 + carry
3494# asm 1: adc <addt0=int64#6,<rz1=int64#3
3495# asm 2: adc <addt0=%r9,<rz1=%rdx
3496adc %r9,%rdx
3497
3498# qhasm:   carry? rz2 += addt0 + carry
3499# asm 1: adc <addt0=int64#6,<rz2=int64#4
3500# asm 2: adc <addt0=%r9,<rz2=%rcx
3501adc %r9,%rcx
3502
3503# qhasm:   carry? rz3 += addt0 + carry
3504# asm 1: adc <addt0=int64#6,<rz3=int64#5
3505# asm 2: adc <addt0=%r9,<rz3=%r8
3506adc %r9,%r8
3507
3508# qhasm:   addt0 = addt1 if carry
3509# asm 1: cmovc <addt1=int64#7,<addt0=int64#6
3510# asm 2: cmovc <addt1=%rax,<addt0=%r9
3511cmovc %rax,%r9
3512
3513# qhasm:   rz0 += addt0
3514# asm 1: add  <addt0=int64#6,<rz0=int64#2
3515# asm 2: add  <addt0=%r9,<rz0=%rsi
3516add  %r9,%rsi
3517
3518# qhasm:   carry? rt0 -= c0_stack
3519# asm 1: subq <c0_stack=stack64#8,<rt0=int64#11
3520# asm 2: subq <c0_stack=56(%rsp),<rt0=%r13
3521subq 56(%rsp),%r13
3522
3523# qhasm:   carry? rt1 -= c1_stack - carry
3524# asm 1: sbbq <c1_stack=stack64#9,<rt1=int64#12
3525# asm 2: sbbq <c1_stack=64(%rsp),<rt1=%r14
3526sbbq 64(%rsp),%r14
3527
3528# qhasm:   carry? rt2 -= c2_stack - carry
3529# asm 1: sbbq <c2_stack=stack64#10,<rt2=int64#13
3530# asm 2: sbbq <c2_stack=72(%rsp),<rt2=%r15
3531sbbq 72(%rsp),%r15
3532
3533# qhasm:   carry? rt3 -= c3_stack - carry
3534# asm 1: sbbq <c3_stack=stack64#11,<rt3=int64#14
3535# asm 2: sbbq <c3_stack=80(%rsp),<rt3=%rbx
3536sbbq 80(%rsp),%rbx
3537
3538# qhasm:   subt0 = 0
3539# asm 1: mov  $0,>subt0=int64#6
3540# asm 2: mov  $0,>subt0=%r9
3541mov  $0,%r9
3542
3543# qhasm:   subt1 = 38
3544# asm 1: mov  $38,>subt1=int64#7
3545# asm 2: mov  $38,>subt1=%rax
3546mov  $38,%rax
3547
3548# qhasm:   subt1 = subt0 if !carry
3549# asm 1: cmovae <subt0=int64#6,<subt1=int64#7
3550# asm 2: cmovae <subt0=%r9,<subt1=%rax
3551cmovae %r9,%rax
3552
3553# qhasm:   carry? rt0 -= subt1
3554# asm 1: sub  <subt1=int64#7,<rt0=int64#11
3555# asm 2: sub  <subt1=%rax,<rt0=%r13
3556sub  %rax,%r13
3557
3558# qhasm:   carry? rt1 -= subt0 - carry
3559# asm 1: sbb  <subt0=int64#6,<rt1=int64#12
3560# asm 2: sbb  <subt0=%r9,<rt1=%r14
3561sbb  %r9,%r14
3562
3563# qhasm:   carry? rt2 -= subt0 - carry
3564# asm 1: sbb  <subt0=int64#6,<rt2=int64#13
3565# asm 2: sbb  <subt0=%r9,<rt2=%r15
3566sbb  %r9,%r15
3567
3568# qhasm:   carry? rt3 -= subt0 - carry
3569# asm 1: sbb  <subt0=int64#6,<rt3=int64#14
3570# asm 2: sbb  <subt0=%r9,<rt3=%rbx
3571sbb  %r9,%rbx
3572
3573# qhasm:   subt0 = subt1 if carry
3574# asm 1: cmovc <subt1=int64#7,<subt0=int64#6
3575# asm 2: cmovc <subt1=%rax,<subt0=%r9
3576cmovc %rax,%r9
3577
3578# qhasm:   rt0 -= subt0
3579# asm 1: sub  <subt0=int64#6,<rt0=int64#11
3580# asm 2: sub  <subt0=%r9,<rt0=%r13
3581sub  %r9,%r13
3582
3583# qhasm: *(uint64 *)(rp + 32) = rz0
3584# asm 1: movq   <rz0=int64#2,32(<rp=int64#1)
3585# asm 2: movq   <rz0=%rsi,32(<rp=%rdi)
3586movq   %rsi,32(%rdi)
3587
3588# qhasm: *(uint64 *)(rp + 40) = rz1
3589# asm 1: movq   <rz1=int64#3,40(<rp=int64#1)
3590# asm 2: movq   <rz1=%rdx,40(<rp=%rdi)
3591movq   %rdx,40(%rdi)
3592
3593# qhasm: *(uint64 *)(rp + 48) = rz2
3594# asm 1: movq   <rz2=int64#4,48(<rp=int64#1)
3595# asm 2: movq   <rz2=%rcx,48(<rp=%rdi)
3596movq   %rcx,48(%rdi)
3597
3598# qhasm: *(uint64 *)(rp + 56) = rz3
3599# asm 1: movq   <rz3=int64#5,56(<rp=int64#1)
3600# asm 2: movq   <rz3=%r8,56(<rp=%rdi)
3601movq   %r8,56(%rdi)
3602
3603# qhasm: *(uint64 *)(rp + 96) = rt0
3604# asm 1: movq   <rt0=int64#11,96(<rp=int64#1)
3605# asm 2: movq   <rt0=%r13,96(<rp=%rdi)
3606movq   %r13,96(%rdi)
3607
3608# qhasm: *(uint64 *)(rp + 104) = rt1
3609# asm 1: movq   <rt1=int64#12,104(<rp=int64#1)
3610# asm 2: movq   <rt1=%r14,104(<rp=%rdi)
3611movq   %r14,104(%rdi)
3612
3613# qhasm: *(uint64 *)(rp + 112) = rt2
3614# asm 1: movq   <rt2=int64#13,112(<rp=int64#1)
3615# asm 2: movq   <rt2=%r15,112(<rp=%rdi)
3616movq   %r15,112(%rdi)
3617
3618# qhasm: *(uint64 *)(rp + 120) = rt3
3619# asm 1: movq   <rt3=int64#14,120(<rp=int64#1)
3620# asm 2: movq   <rt3=%rbx,120(<rp=%rdi)
3621movq   %rbx,120(%rdi)
3622
3623# qhasm:   caller1 = caller1_stack
3624# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
3625# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
3626movq 0(%rsp),%r11
3627
3628# qhasm:   caller2 = caller2_stack
3629# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
3630# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
3631movq 8(%rsp),%r12
3632
3633# qhasm:   caller3 = caller3_stack
3634# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
3635# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
3636movq 16(%rsp),%r13
3637
3638# qhasm:   caller4 = caller4_stack
3639# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
3640# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
3641movq 24(%rsp),%r14
3642
3643# qhasm:   caller5 = caller5_stack
3644# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
3645# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
3646movq 32(%rsp),%r15
3647
3648# qhasm:   caller6 = caller6_stack
3649# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
3650# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
3651movq 40(%rsp),%rbx
3652
3653# qhasm:   caller7 = caller7_stack
3654# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
3655# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
3656movq 48(%rsp),%rbp
3657
3658# qhasm: leave
3659add %r11,%rsp
3660mov %rdi,%rax
3661mov %rsi,%rdx
3662ret
3663