1
2# qhasm: int64 rp
3
4# qhasm: int64 pp
5
6# qhasm: input rp
7
8# qhasm: input pp
9
10# qhasm:   int64 caller1
11
12# qhasm:   int64 caller2
13
14# qhasm:   int64 caller3
15
16# qhasm:   int64 caller4
17
18# qhasm:   int64 caller5
19
20# qhasm:   int64 caller6
21
22# qhasm:   int64 caller7
23
24# qhasm:   caller caller1
25
26# qhasm:   caller caller2
27
28# qhasm:   caller caller3
29
30# qhasm:   caller caller4
31
32# qhasm:   caller caller5
33
34# qhasm:   caller caller6
35
36# qhasm:   caller caller7
37
38# qhasm:   stack64 caller1_stack
39
40# qhasm:   stack64 caller2_stack
41
42# qhasm:   stack64 caller3_stack
43
44# qhasm:   stack64 caller4_stack
45
46# qhasm:   stack64 caller5_stack
47
48# qhasm:   stack64 caller6_stack
49
50# qhasm:   stack64 caller7_stack
51
52# qhasm: int64 rx0
53
54# qhasm: int64 rx1
55
56# qhasm: int64 rx2
57
58# qhasm: int64 rx3
59
60# qhasm: int64 ry0
61
62# qhasm: int64 ry1
63
64# qhasm: int64 ry2
65
66# qhasm: int64 ry3
67
68# qhasm: int64 rz0
69
70# qhasm: int64 rz1
71
72# qhasm: int64 rz2
73
74# qhasm: int64 rz3
75
76# qhasm: int64 rt0
77
78# qhasm: int64 rt1
79
80# qhasm: int64 rt2
81
82# qhasm: int64 rt3
83
84# qhasm: int64 mulr4
85
86# qhasm: int64 mulr5
87
88# qhasm: int64 mulr6
89
90# qhasm: int64 mulr7
91
92# qhasm: int64 mulr8
93
94# qhasm: int64 mulrax
95
96# qhasm: int64 mulrdx
97
98# qhasm: int64 mulx0
99
100# qhasm: int64 mulx1
101
102# qhasm: int64 mulx2
103
104# qhasm: int64 mulx3
105
106# qhasm: int64 mulc
107
108# qhasm: int64 mulzero
109
110# qhasm: int64 muli38
111
112# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3
113.text
114.p2align 5
115.globl _crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3
116.globl crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3
117_crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3:
118crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p3:
119mov %rsp,%r11
120and $31,%r11
121add $64,%r11
122sub %r11,%rsp
123
124# qhasm:   caller1_stack = caller1
125# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
126# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
127movq %r11,0(%rsp)
128
129# qhasm:   caller2_stack = caller2
130# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
131# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
132movq %r12,8(%rsp)
133
134# qhasm:   caller3_stack = caller3
135# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
136# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
137movq %r13,16(%rsp)
138
139# qhasm:   caller4_stack = caller4
140# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
141# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
142movq %r14,24(%rsp)
143
144# qhasm:   caller5_stack = caller5
145# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
146# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
147movq %r15,32(%rsp)
148
149# qhasm:   caller6_stack = caller6
150# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
151# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
152movq %rbx,40(%rsp)
153
154# qhasm:   caller7_stack = caller7
155# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
156# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
157movq %rbp,48(%rsp)
158
159# qhasm:   mulr4 = 0
160# asm 1: mov  $0,>mulr4=int64#4
161# asm 2: mov  $0,>mulr4=%rcx
162mov  $0,%rcx
163
164# qhasm:   mulr5 = 0
165# asm 1: mov  $0,>mulr5=int64#5
166# asm 2: mov  $0,>mulr5=%r8
167mov  $0,%r8
168
169# qhasm:   mulr6 = 0
170# asm 1: mov  $0,>mulr6=int64#6
171# asm 2: mov  $0,>mulr6=%r9
172mov  $0,%r9
173
174# qhasm:   mulr7 = 0
175# asm 1: mov  $0,>mulr7=int64#8
176# asm 2: mov  $0,>mulr7=%r10
177mov  $0,%r10
178
179# qhasm:   mulx0 = *(uint64 *)(pp + 0)
180# asm 1: movq   0(<pp=int64#2),>mulx0=int64#9
181# asm 2: movq   0(<pp=%rsi),>mulx0=%r11
182movq   0(%rsi),%r11
183
184# qhasm:   mulrax = *(uint64 *)(pp + 96)
185# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
186# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
187movq   96(%rsi),%rax
188
189# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
190# asm 1: mul  <mulx0=int64#9
191# asm 2: mul  <mulx0=%r11
192mul  %r11
193
194# qhasm:   rx0 = mulrax
195# asm 1: mov  <mulrax=int64#7,>rx0=int64#10
196# asm 2: mov  <mulrax=%rax,>rx0=%r12
197mov  %rax,%r12
198
199# qhasm:   rx1 = mulrdx
200# asm 1: mov  <mulrdx=int64#3,>rx1=int64#11
201# asm 2: mov  <mulrdx=%rdx,>rx1=%r13
202mov  %rdx,%r13
203
204# qhasm:   mulrax = *(uint64 *)(pp + 104)
205# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
206# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
207movq   104(%rsi),%rax
208
209# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
210# asm 1: mul  <mulx0=int64#9
211# asm 2: mul  <mulx0=%r11
212mul  %r11
213
214# qhasm:   carry? rx1 += mulrax
215# asm 1: add  <mulrax=int64#7,<rx1=int64#11
216# asm 2: add  <mulrax=%rax,<rx1=%r13
217add  %rax,%r13
218
219# qhasm:   rx2 = 0
220# asm 1: mov  $0,>rx2=int64#12
221# asm 2: mov  $0,>rx2=%r14
222mov  $0,%r14
223
224# qhasm:   rx2 += mulrdx + carry
225# asm 1: adc <mulrdx=int64#3,<rx2=int64#12
226# asm 2: adc <mulrdx=%rdx,<rx2=%r14
227adc %rdx,%r14
228
229# qhasm:   mulrax = *(uint64 *)(pp + 112)
230# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
231# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
232movq   112(%rsi),%rax
233
234# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
235# asm 1: mul  <mulx0=int64#9
236# asm 2: mul  <mulx0=%r11
237mul  %r11
238
239# qhasm:   carry? rx2 += mulrax
240# asm 1: add  <mulrax=int64#7,<rx2=int64#12
241# asm 2: add  <mulrax=%rax,<rx2=%r14
242add  %rax,%r14
243
244# qhasm:   rx3 = 0
245# asm 1: mov  $0,>rx3=int64#13
246# asm 2: mov  $0,>rx3=%r15
247mov  $0,%r15
248
249# qhasm:   rx3 += mulrdx + carry
250# asm 1: adc <mulrdx=int64#3,<rx3=int64#13
251# asm 2: adc <mulrdx=%rdx,<rx3=%r15
252adc %rdx,%r15
253
254# qhasm:   mulrax = *(uint64 *)(pp + 120)
255# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
256# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
257movq   120(%rsi),%rax
258
259# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
260# asm 1: mul  <mulx0=int64#9
261# asm 2: mul  <mulx0=%r11
262mul  %r11
263
264# qhasm:   carry? rx3 += mulrax
265# asm 1: add  <mulrax=int64#7,<rx3=int64#13
266# asm 2: add  <mulrax=%rax,<rx3=%r15
267add  %rax,%r15
268
269# qhasm:   mulr4 += mulrdx + carry
270# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
271# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
272adc %rdx,%rcx
273
274# qhasm:   mulx1 = *(uint64 *)(pp + 8)
275# asm 1: movq   8(<pp=int64#2),>mulx1=int64#9
276# asm 2: movq   8(<pp=%rsi),>mulx1=%r11
277movq   8(%rsi),%r11
278
279# qhasm:   mulrax = *(uint64 *)(pp + 96)
280# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
281# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
282movq   96(%rsi),%rax
283
284# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
285# asm 1: mul  <mulx1=int64#9
286# asm 2: mul  <mulx1=%r11
287mul  %r11
288
289# qhasm:   carry? rx1 += mulrax
290# asm 1: add  <mulrax=int64#7,<rx1=int64#11
291# asm 2: add  <mulrax=%rax,<rx1=%r13
292add  %rax,%r13
293
294# qhasm:   mulc = 0
295# asm 1: mov  $0,>mulc=int64#14
296# asm 2: mov  $0,>mulc=%rbx
297mov  $0,%rbx
298
299# qhasm:   mulc += mulrdx + carry
300# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
301# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
302adc %rdx,%rbx
303
304# qhasm:   mulrax = *(uint64 *)(pp + 104)
305# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
306# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
307movq   104(%rsi),%rax
308
309# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
310# asm 1: mul  <mulx1=int64#9
311# asm 2: mul  <mulx1=%r11
312mul  %r11
313
314# qhasm:   carry? rx2 += mulrax
315# asm 1: add  <mulrax=int64#7,<rx2=int64#12
316# asm 2: add  <mulrax=%rax,<rx2=%r14
317add  %rax,%r14
318
319# qhasm:   mulrdx += 0 + carry
320# asm 1: adc $0,<mulrdx=int64#3
321# asm 2: adc $0,<mulrdx=%rdx
322adc $0,%rdx
323
324# qhasm:   carry? rx2 += mulc
325# asm 1: add  <mulc=int64#14,<rx2=int64#12
326# asm 2: add  <mulc=%rbx,<rx2=%r14
327add  %rbx,%r14
328
329# qhasm:   mulc = 0
330# asm 1: mov  $0,>mulc=int64#14
331# asm 2: mov  $0,>mulc=%rbx
332mov  $0,%rbx
333
334# qhasm:   mulc += mulrdx + carry
335# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
336# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
337adc %rdx,%rbx
338
339# qhasm:   mulrax = *(uint64 *)(pp + 112)
340# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
341# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
342movq   112(%rsi),%rax
343
344# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
345# asm 1: mul  <mulx1=int64#9
346# asm 2: mul  <mulx1=%r11
347mul  %r11
348
349# qhasm:   carry? rx3 += mulrax
350# asm 1: add  <mulrax=int64#7,<rx3=int64#13
351# asm 2: add  <mulrax=%rax,<rx3=%r15
352add  %rax,%r15
353
354# qhasm:   mulrdx += 0 + carry
355# asm 1: adc $0,<mulrdx=int64#3
356# asm 2: adc $0,<mulrdx=%rdx
357adc $0,%rdx
358
359# qhasm:   carry? rx3 += mulc
360# asm 1: add  <mulc=int64#14,<rx3=int64#13
361# asm 2: add  <mulc=%rbx,<rx3=%r15
362add  %rbx,%r15
363
364# qhasm:   mulc = 0
365# asm 1: mov  $0,>mulc=int64#14
366# asm 2: mov  $0,>mulc=%rbx
367mov  $0,%rbx
368
369# qhasm:   mulc += mulrdx + carry
370# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
371# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
372adc %rdx,%rbx
373
374# qhasm:   mulrax = *(uint64 *)(pp + 120)
375# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
376# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
377movq   120(%rsi),%rax
378
379# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
380# asm 1: mul  <mulx1=int64#9
381# asm 2: mul  <mulx1=%r11
382mul  %r11
383
384# qhasm:   carry? mulr4 += mulrax
385# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
386# asm 2: add  <mulrax=%rax,<mulr4=%rcx
387add  %rax,%rcx
388
389# qhasm:   mulrdx += 0 + carry
390# asm 1: adc $0,<mulrdx=int64#3
391# asm 2: adc $0,<mulrdx=%rdx
392adc $0,%rdx
393
394# qhasm:   carry? mulr4 += mulc
395# asm 1: add  <mulc=int64#14,<mulr4=int64#4
396# asm 2: add  <mulc=%rbx,<mulr4=%rcx
397add  %rbx,%rcx
398
399# qhasm:   mulr5 += mulrdx + carry
400# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
401# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
402adc %rdx,%r8
403
404# qhasm:   mulx2 = *(uint64 *)(pp + 16)
405# asm 1: movq   16(<pp=int64#2),>mulx2=int64#9
406# asm 2: movq   16(<pp=%rsi),>mulx2=%r11
407movq   16(%rsi),%r11
408
409# qhasm:   mulrax = *(uint64 *)(pp + 96)
410# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
411# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
412movq   96(%rsi),%rax
413
414# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
415# asm 1: mul  <mulx2=int64#9
416# asm 2: mul  <mulx2=%r11
417mul  %r11
418
419# qhasm:   carry? rx2 += mulrax
420# asm 1: add  <mulrax=int64#7,<rx2=int64#12
421# asm 2: add  <mulrax=%rax,<rx2=%r14
422add  %rax,%r14
423
424# qhasm:   mulc = 0
425# asm 1: mov  $0,>mulc=int64#14
426# asm 2: mov  $0,>mulc=%rbx
427mov  $0,%rbx
428
429# qhasm:   mulc += mulrdx + carry
430# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
431# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
432adc %rdx,%rbx
433
434# qhasm:   mulrax = *(uint64 *)(pp + 104)
435# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
436# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
437movq   104(%rsi),%rax
438
439# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
440# asm 1: mul  <mulx2=int64#9
441# asm 2: mul  <mulx2=%r11
442mul  %r11
443
444# qhasm:   carry? rx3 += mulrax
445# asm 1: add  <mulrax=int64#7,<rx3=int64#13
446# asm 2: add  <mulrax=%rax,<rx3=%r15
447add  %rax,%r15
448
449# qhasm:   mulrdx += 0 + carry
450# asm 1: adc $0,<mulrdx=int64#3
451# asm 2: adc $0,<mulrdx=%rdx
452adc $0,%rdx
453
454# qhasm:   carry? rx3 += mulc
455# asm 1: add  <mulc=int64#14,<rx3=int64#13
456# asm 2: add  <mulc=%rbx,<rx3=%r15
457add  %rbx,%r15
458
459# qhasm:   mulc = 0
460# asm 1: mov  $0,>mulc=int64#14
461# asm 2: mov  $0,>mulc=%rbx
462mov  $0,%rbx
463
464# qhasm:   mulc += mulrdx + carry
465# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
466# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
467adc %rdx,%rbx
468
469# qhasm:   mulrax = *(uint64 *)(pp + 112)
470# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
471# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
472movq   112(%rsi),%rax
473
474# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
475# asm 1: mul  <mulx2=int64#9
476# asm 2: mul  <mulx2=%r11
477mul  %r11
478
479# qhasm:   carry? mulr4 += mulrax
480# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
481# asm 2: add  <mulrax=%rax,<mulr4=%rcx
482add  %rax,%rcx
483
484# qhasm:   mulrdx += 0 + carry
485# asm 1: adc $0,<mulrdx=int64#3
486# asm 2: adc $0,<mulrdx=%rdx
487adc $0,%rdx
488
489# qhasm:   carry? mulr4 += mulc
490# asm 1: add  <mulc=int64#14,<mulr4=int64#4
491# asm 2: add  <mulc=%rbx,<mulr4=%rcx
492add  %rbx,%rcx
493
494# qhasm:   mulc = 0
495# asm 1: mov  $0,>mulc=int64#14
496# asm 2: mov  $0,>mulc=%rbx
497mov  $0,%rbx
498
499# qhasm:   mulc += mulrdx + carry
500# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
501# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
502adc %rdx,%rbx
503
504# qhasm:   mulrax = *(uint64 *)(pp + 120)
505# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
506# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
507movq   120(%rsi),%rax
508
509# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
510# asm 1: mul  <mulx2=int64#9
511# asm 2: mul  <mulx2=%r11
512mul  %r11
513
514# qhasm:   carry? mulr5 += mulrax
515# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
516# asm 2: add  <mulrax=%rax,<mulr5=%r8
517add  %rax,%r8
518
519# qhasm:   mulrdx += 0 + carry
520# asm 1: adc $0,<mulrdx=int64#3
521# asm 2: adc $0,<mulrdx=%rdx
522adc $0,%rdx
523
524# qhasm:   carry? mulr5 += mulc
525# asm 1: add  <mulc=int64#14,<mulr5=int64#5
526# asm 2: add  <mulc=%rbx,<mulr5=%r8
527add  %rbx,%r8
528
529# qhasm:   mulr6 += mulrdx + carry
530# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
531# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
532adc %rdx,%r9
533
534# qhasm:   mulx3 = *(uint64 *)(pp + 24)
535# asm 1: movq   24(<pp=int64#2),>mulx3=int64#9
536# asm 2: movq   24(<pp=%rsi),>mulx3=%r11
537movq   24(%rsi),%r11
538
539# qhasm:   mulrax = *(uint64 *)(pp + 96)
540# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
541# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
542movq   96(%rsi),%rax
543
544# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
545# asm 1: mul  <mulx3=int64#9
546# asm 2: mul  <mulx3=%r11
547mul  %r11
548
549# qhasm:   carry? rx3 += mulrax
550# asm 1: add  <mulrax=int64#7,<rx3=int64#13
551# asm 2: add  <mulrax=%rax,<rx3=%r15
552add  %rax,%r15
553
554# qhasm:   mulc = 0
555# asm 1: mov  $0,>mulc=int64#14
556# asm 2: mov  $0,>mulc=%rbx
557mov  $0,%rbx
558
559# qhasm:   mulc += mulrdx + carry
560# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
561# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
562adc %rdx,%rbx
563
564# qhasm:   mulrax = *(uint64 *)(pp + 104)
565# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
566# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
567movq   104(%rsi),%rax
568
569# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
570# asm 1: mul  <mulx3=int64#9
571# asm 2: mul  <mulx3=%r11
572mul  %r11
573
574# qhasm:   carry? mulr4 += mulrax
575# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
576# asm 2: add  <mulrax=%rax,<mulr4=%rcx
577add  %rax,%rcx
578
579# qhasm:   mulrdx += 0 + carry
580# asm 1: adc $0,<mulrdx=int64#3
581# asm 2: adc $0,<mulrdx=%rdx
582adc $0,%rdx
583
584# qhasm:   carry? mulr4 += mulc
585# asm 1: add  <mulc=int64#14,<mulr4=int64#4
586# asm 2: add  <mulc=%rbx,<mulr4=%rcx
587add  %rbx,%rcx
588
589# qhasm:   mulc = 0
590# asm 1: mov  $0,>mulc=int64#14
591# asm 2: mov  $0,>mulc=%rbx
592mov  $0,%rbx
593
594# qhasm:   mulc += mulrdx + carry
595# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
596# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
597adc %rdx,%rbx
598
599# qhasm:   mulrax = *(uint64 *)(pp + 112)
600# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
601# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
602movq   112(%rsi),%rax
603
604# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
605# asm 1: mul  <mulx3=int64#9
606# asm 2: mul  <mulx3=%r11
607mul  %r11
608
609# qhasm:   carry? mulr5 += mulrax
610# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
611# asm 2: add  <mulrax=%rax,<mulr5=%r8
612add  %rax,%r8
613
614# qhasm:   mulrdx += 0 + carry
615# asm 1: adc $0,<mulrdx=int64#3
616# asm 2: adc $0,<mulrdx=%rdx
617adc $0,%rdx
618
619# qhasm:   carry? mulr5 += mulc
620# asm 1: add  <mulc=int64#14,<mulr5=int64#5
621# asm 2: add  <mulc=%rbx,<mulr5=%r8
622add  %rbx,%r8
623
624# qhasm:   mulc = 0
625# asm 1: mov  $0,>mulc=int64#14
626# asm 2: mov  $0,>mulc=%rbx
627mov  $0,%rbx
628
629# qhasm:   mulc += mulrdx + carry
630# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
631# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
632adc %rdx,%rbx
633
634# qhasm:   mulrax = *(uint64 *)(pp + 120)
635# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
636# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
637movq   120(%rsi),%rax
638
639# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
640# asm 1: mul  <mulx3=int64#9
641# asm 2: mul  <mulx3=%r11
642mul  %r11
643
644# qhasm:   carry? mulr6 += mulrax
645# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
646# asm 2: add  <mulrax=%rax,<mulr6=%r9
647add  %rax,%r9
648
649# qhasm:   mulrdx += 0 + carry
650# asm 1: adc $0,<mulrdx=int64#3
651# asm 2: adc $0,<mulrdx=%rdx
652adc $0,%rdx
653
654# qhasm:   carry? mulr6 += mulc
655# asm 1: add  <mulc=int64#14,<mulr6=int64#6
656# asm 2: add  <mulc=%rbx,<mulr6=%r9
657add  %rbx,%r9
658
659# qhasm:   mulr7 += mulrdx + carry
660# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
661# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
662adc %rdx,%r10
663
664# qhasm:   mulrax = mulr4
665# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
666# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
667mov  %rcx,%rax
668
669# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
670mulq  crypto_sign_ed25519_amd64_64_38
671
672# qhasm:   mulr4 = mulrax
673# asm 1: mov  <mulrax=int64#7,>mulr4=int64#4
674# asm 2: mov  <mulrax=%rax,>mulr4=%rcx
675mov  %rax,%rcx
676
677# qhasm:   mulrax = mulr5
678# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
679# asm 2: mov  <mulr5=%r8,>mulrax=%rax
680mov  %r8,%rax
681
682# qhasm:   mulr5 = mulrdx
683# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#5
684# asm 2: mov  <mulrdx=%rdx,>mulr5=%r8
685mov  %rdx,%r8
686
687# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
688mulq  crypto_sign_ed25519_amd64_64_38
689
690# qhasm:   carry? mulr5 += mulrax
691# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
692# asm 2: add  <mulrax=%rax,<mulr5=%r8
693add  %rax,%r8
694
695# qhasm:   mulrax = mulr6
696# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
697# asm 2: mov  <mulr6=%r9,>mulrax=%rax
698mov  %r9,%rax
699
700# qhasm:   mulr6 = 0
701# asm 1: mov  $0,>mulr6=int64#6
702# asm 2: mov  $0,>mulr6=%r9
703mov  $0,%r9
704
705# qhasm:   mulr6 += mulrdx + carry
706# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
707# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
708adc %rdx,%r9
709
710# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
711mulq  crypto_sign_ed25519_amd64_64_38
712
713# qhasm:   carry? mulr6 += mulrax
714# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
715# asm 2: add  <mulrax=%rax,<mulr6=%r9
716add  %rax,%r9
717
718# qhasm:   mulrax = mulr7
719# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
720# asm 2: mov  <mulr7=%r10,>mulrax=%rax
721mov  %r10,%rax
722
723# qhasm:   mulr7 = 0
724# asm 1: mov  $0,>mulr7=int64#8
725# asm 2: mov  $0,>mulr7=%r10
726mov  $0,%r10
727
728# qhasm:   mulr7 += mulrdx + carry
729# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
730# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
731adc %rdx,%r10
732
733# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
734mulq  crypto_sign_ed25519_amd64_64_38
735
736# qhasm:   carry? mulr7 += mulrax
737# asm 1: add  <mulrax=int64#7,<mulr7=int64#8
738# asm 2: add  <mulrax=%rax,<mulr7=%r10
739add  %rax,%r10
740
741# qhasm:   mulr8 = 0
742# asm 1: mov  $0,>mulr8=int64#7
743# asm 2: mov  $0,>mulr8=%rax
744mov  $0,%rax
745
746# qhasm:   mulr8 += mulrdx + carry
747# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
748# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
749adc %rdx,%rax
750
751# qhasm:   carry? rx0 += mulr4
752# asm 1: add  <mulr4=int64#4,<rx0=int64#10
753# asm 2: add  <mulr4=%rcx,<rx0=%r12
754add  %rcx,%r12
755
756# qhasm:   carry? rx1 += mulr5 + carry
757# asm 1: adc <mulr5=int64#5,<rx1=int64#11
758# asm 2: adc <mulr5=%r8,<rx1=%r13
759adc %r8,%r13
760
761# qhasm:   carry? rx2 += mulr6 + carry
762# asm 1: adc <mulr6=int64#6,<rx2=int64#12
763# asm 2: adc <mulr6=%r9,<rx2=%r14
764adc %r9,%r14
765
766# qhasm:   carry? rx3 += mulr7 + carry
767# asm 1: adc <mulr7=int64#8,<rx3=int64#13
768# asm 2: adc <mulr7=%r10,<rx3=%r15
769adc %r10,%r15
770
771# qhasm:   mulzero = 0
772# asm 1: mov  $0,>mulzero=int64#3
773# asm 2: mov  $0,>mulzero=%rdx
774mov  $0,%rdx
775
776# qhasm:   mulr8 += mulzero + carry
777# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
778# asm 2: adc <mulzero=%rdx,<mulr8=%rax
779adc %rdx,%rax
780
781# qhasm:   mulr8 *= 38
782# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#4
783# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rcx
784imulq  $38,%rax,%rcx
785
786# qhasm:   carry? rx0 += mulr8
787# asm 1: add  <mulr8=int64#4,<rx0=int64#10
788# asm 2: add  <mulr8=%rcx,<rx0=%r12
789add  %rcx,%r12
790
791# qhasm:   carry? rx1 += mulzero + carry
792# asm 1: adc <mulzero=int64#3,<rx1=int64#11
793# asm 2: adc <mulzero=%rdx,<rx1=%r13
794adc %rdx,%r13
795
796# qhasm:   carry? rx2 += mulzero + carry
797# asm 1: adc <mulzero=int64#3,<rx2=int64#12
798# asm 2: adc <mulzero=%rdx,<rx2=%r14
799adc %rdx,%r14
800
801# qhasm:   carry? rx3 += mulzero + carry
802# asm 1: adc <mulzero=int64#3,<rx3=int64#13
803# asm 2: adc <mulzero=%rdx,<rx3=%r15
804adc %rdx,%r15
805
806# qhasm:   mulzero += mulzero + carry
807# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
808# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
809adc %rdx,%rdx
810
811# qhasm:   mulzero *= 38
812# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
813# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
814imulq  $38,%rdx,%rdx
815
816# qhasm:   rx0 += mulzero
817# asm 1: add  <mulzero=int64#3,<rx0=int64#10
818# asm 2: add  <mulzero=%rdx,<rx0=%r12
819add  %rdx,%r12
820
821# qhasm: *(uint64 *)(rp + 0) = rx0
822# asm 1: movq   <rx0=int64#10,0(<rp=int64#1)
823# asm 2: movq   <rx0=%r12,0(<rp=%rdi)
824movq   %r12,0(%rdi)
825
826# qhasm: *(uint64 *)(rp + 8) = rx1
827# asm 1: movq   <rx1=int64#11,8(<rp=int64#1)
828# asm 2: movq   <rx1=%r13,8(<rp=%rdi)
829movq   %r13,8(%rdi)
830
831# qhasm: *(uint64 *)(rp + 16) = rx2
832# asm 1: movq   <rx2=int64#12,16(<rp=int64#1)
833# asm 2: movq   <rx2=%r14,16(<rp=%rdi)
834movq   %r14,16(%rdi)
835
836# qhasm: *(uint64 *)(rp + 24) = rx3
837# asm 1: movq   <rx3=int64#13,24(<rp=int64#1)
838# asm 2: movq   <rx3=%r15,24(<rp=%rdi)
839movq   %r15,24(%rdi)
840
841# qhasm:   mulr4 = 0
842# asm 1: mov  $0,>mulr4=int64#4
843# asm 2: mov  $0,>mulr4=%rcx
844mov  $0,%rcx
845
846# qhasm:   mulr5 = 0
847# asm 1: mov  $0,>mulr5=int64#5
848# asm 2: mov  $0,>mulr5=%r8
849mov  $0,%r8
850
851# qhasm:   mulr6 = 0
852# asm 1: mov  $0,>mulr6=int64#6
853# asm 2: mov  $0,>mulr6=%r9
854mov  $0,%r9
855
856# qhasm:   mulr7 = 0
857# asm 1: mov  $0,>mulr7=int64#8
858# asm 2: mov  $0,>mulr7=%r10
859mov  $0,%r10
860
861# qhasm:   mulx0 = *(uint64 *)(pp + 64)
862# asm 1: movq   64(<pp=int64#2),>mulx0=int64#9
863# asm 2: movq   64(<pp=%rsi),>mulx0=%r11
864movq   64(%rsi),%r11
865
866# qhasm:   mulrax = *(uint64 *)(pp + 32)
867# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
868# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
869movq   32(%rsi),%rax
870
871# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
872# asm 1: mul  <mulx0=int64#9
873# asm 2: mul  <mulx0=%r11
874mul  %r11
875
876# qhasm:   ry0 = mulrax
877# asm 1: mov  <mulrax=int64#7,>ry0=int64#10
878# asm 2: mov  <mulrax=%rax,>ry0=%r12
879mov  %rax,%r12
880
881# qhasm:   ry1 = mulrdx
882# asm 1: mov  <mulrdx=int64#3,>ry1=int64#11
883# asm 2: mov  <mulrdx=%rdx,>ry1=%r13
884mov  %rdx,%r13
885
886# qhasm:   mulrax = *(uint64 *)(pp + 40)
887# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
888# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
889movq   40(%rsi),%rax
890
891# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
892# asm 1: mul  <mulx0=int64#9
893# asm 2: mul  <mulx0=%r11
894mul  %r11
895
896# qhasm:   carry? ry1 += mulrax
897# asm 1: add  <mulrax=int64#7,<ry1=int64#11
898# asm 2: add  <mulrax=%rax,<ry1=%r13
899add  %rax,%r13
900
901# qhasm:   ry2 = 0
902# asm 1: mov  $0,>ry2=int64#12
903# asm 2: mov  $0,>ry2=%r14
904mov  $0,%r14
905
906# qhasm:   ry2 += mulrdx + carry
907# asm 1: adc <mulrdx=int64#3,<ry2=int64#12
908# asm 2: adc <mulrdx=%rdx,<ry2=%r14
909adc %rdx,%r14
910
911# qhasm:   mulrax = *(uint64 *)(pp + 48)
912# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
913# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
914movq   48(%rsi),%rax
915
916# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
917# asm 1: mul  <mulx0=int64#9
918# asm 2: mul  <mulx0=%r11
919mul  %r11
920
921# qhasm:   carry? ry2 += mulrax
922# asm 1: add  <mulrax=int64#7,<ry2=int64#12
923# asm 2: add  <mulrax=%rax,<ry2=%r14
924add  %rax,%r14
925
926# qhasm:   ry3 = 0
927# asm 1: mov  $0,>ry3=int64#13
928# asm 2: mov  $0,>ry3=%r15
929mov  $0,%r15
930
931# qhasm:   ry3 += mulrdx + carry
932# asm 1: adc <mulrdx=int64#3,<ry3=int64#13
933# asm 2: adc <mulrdx=%rdx,<ry3=%r15
934adc %rdx,%r15
935
936# qhasm:   mulrax = *(uint64 *)(pp + 56)
937# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
938# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
939movq   56(%rsi),%rax
940
941# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
942# asm 1: mul  <mulx0=int64#9
943# asm 2: mul  <mulx0=%r11
944mul  %r11
945
946# qhasm:   carry? ry3 += mulrax
947# asm 1: add  <mulrax=int64#7,<ry3=int64#13
948# asm 2: add  <mulrax=%rax,<ry3=%r15
949add  %rax,%r15
950
951# qhasm:   mulr4 += mulrdx + carry
952# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
953# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
954adc %rdx,%rcx
955
956# qhasm:   mulx1 = *(uint64 *)(pp + 72)
957# asm 1: movq   72(<pp=int64#2),>mulx1=int64#9
958# asm 2: movq   72(<pp=%rsi),>mulx1=%r11
959movq   72(%rsi),%r11
960
961# qhasm:   mulrax = *(uint64 *)(pp + 32)
962# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
963# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
964movq   32(%rsi),%rax
965
966# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
967# asm 1: mul  <mulx1=int64#9
968# asm 2: mul  <mulx1=%r11
969mul  %r11
970
971# qhasm:   carry? ry1 += mulrax
972# asm 1: add  <mulrax=int64#7,<ry1=int64#11
973# asm 2: add  <mulrax=%rax,<ry1=%r13
974add  %rax,%r13
975
976# qhasm:   mulc = 0
977# asm 1: mov  $0,>mulc=int64#14
978# asm 2: mov  $0,>mulc=%rbx
979mov  $0,%rbx
980
981# qhasm:   mulc += mulrdx + carry
982# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
983# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
984adc %rdx,%rbx
985
986# qhasm:   mulrax = *(uint64 *)(pp + 40)
987# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
988# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
989movq   40(%rsi),%rax
990
991# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
992# asm 1: mul  <mulx1=int64#9
993# asm 2: mul  <mulx1=%r11
994mul  %r11
995
996# qhasm:   carry? ry2 += mulrax
997# asm 1: add  <mulrax=int64#7,<ry2=int64#12
998# asm 2: add  <mulrax=%rax,<ry2=%r14
999add  %rax,%r14
1000
1001# qhasm:   mulrdx += 0 + carry
1002# asm 1: adc $0,<mulrdx=int64#3
1003# asm 2: adc $0,<mulrdx=%rdx
1004adc $0,%rdx
1005
1006# qhasm:   carry? ry2 += mulc
1007# asm 1: add  <mulc=int64#14,<ry2=int64#12
1008# asm 2: add  <mulc=%rbx,<ry2=%r14
1009add  %rbx,%r14
1010
1011# qhasm:   mulc = 0
1012# asm 1: mov  $0,>mulc=int64#14
1013# asm 2: mov  $0,>mulc=%rbx
1014mov  $0,%rbx
1015
1016# qhasm:   mulc += mulrdx + carry
1017# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1018# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1019adc %rdx,%rbx
1020
1021# qhasm:   mulrax = *(uint64 *)(pp + 48)
1022# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
1023# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
1024movq   48(%rsi),%rax
1025
1026# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1027# asm 1: mul  <mulx1=int64#9
1028# asm 2: mul  <mulx1=%r11
1029mul  %r11
1030
1031# qhasm:   carry? ry3 += mulrax
1032# asm 1: add  <mulrax=int64#7,<ry3=int64#13
1033# asm 2: add  <mulrax=%rax,<ry3=%r15
1034add  %rax,%r15
1035
1036# qhasm:   mulrdx += 0 + carry
1037# asm 1: adc $0,<mulrdx=int64#3
1038# asm 2: adc $0,<mulrdx=%rdx
1039adc $0,%rdx
1040
1041# qhasm:   carry? ry3 += mulc
1042# asm 1: add  <mulc=int64#14,<ry3=int64#13
1043# asm 2: add  <mulc=%rbx,<ry3=%r15
1044add  %rbx,%r15
1045
1046# qhasm:   mulc = 0
1047# asm 1: mov  $0,>mulc=int64#14
1048# asm 2: mov  $0,>mulc=%rbx
1049mov  $0,%rbx
1050
1051# qhasm:   mulc += mulrdx + carry
1052# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1053# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1054adc %rdx,%rbx
1055
1056# qhasm:   mulrax = *(uint64 *)(pp + 56)
1057# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
1058# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
1059movq   56(%rsi),%rax
1060
1061# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1062# asm 1: mul  <mulx1=int64#9
1063# asm 2: mul  <mulx1=%r11
1064mul  %r11
1065
1066# qhasm:   carry? mulr4 += mulrax
1067# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1068# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1069add  %rax,%rcx
1070
1071# qhasm:   mulrdx += 0 + carry
1072# asm 1: adc $0,<mulrdx=int64#3
1073# asm 2: adc $0,<mulrdx=%rdx
1074adc $0,%rdx
1075
1076# qhasm:   carry? mulr4 += mulc
1077# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1078# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1079add  %rbx,%rcx
1080
1081# qhasm:   mulr5 += mulrdx + carry
1082# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
1083# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
1084adc %rdx,%r8
1085
1086# qhasm:   mulx2 = *(uint64 *)(pp + 80)
1087# asm 1: movq   80(<pp=int64#2),>mulx2=int64#9
1088# asm 2: movq   80(<pp=%rsi),>mulx2=%r11
1089movq   80(%rsi),%r11
1090
1091# qhasm:   mulrax = *(uint64 *)(pp + 32)
1092# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
1093# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
1094movq   32(%rsi),%rax
1095
1096# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1097# asm 1: mul  <mulx2=int64#9
1098# asm 2: mul  <mulx2=%r11
1099mul  %r11
1100
1101# qhasm:   carry? ry2 += mulrax
1102# asm 1: add  <mulrax=int64#7,<ry2=int64#12
1103# asm 2: add  <mulrax=%rax,<ry2=%r14
1104add  %rax,%r14
1105
1106# qhasm:   mulc = 0
1107# asm 1: mov  $0,>mulc=int64#14
1108# asm 2: mov  $0,>mulc=%rbx
1109mov  $0,%rbx
1110
1111# qhasm:   mulc += mulrdx + carry
1112# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1113# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1114adc %rdx,%rbx
1115
1116# qhasm:   mulrax = *(uint64 *)(pp + 40)
1117# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
1118# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
1119movq   40(%rsi),%rax
1120
1121# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1122# asm 1: mul  <mulx2=int64#9
1123# asm 2: mul  <mulx2=%r11
1124mul  %r11
1125
1126# qhasm:   carry? ry3 += mulrax
1127# asm 1: add  <mulrax=int64#7,<ry3=int64#13
1128# asm 2: add  <mulrax=%rax,<ry3=%r15
1129add  %rax,%r15
1130
1131# qhasm:   mulrdx += 0 + carry
1132# asm 1: adc $0,<mulrdx=int64#3
1133# asm 2: adc $0,<mulrdx=%rdx
1134adc $0,%rdx
1135
1136# qhasm:   carry? ry3 += mulc
1137# asm 1: add  <mulc=int64#14,<ry3=int64#13
1138# asm 2: add  <mulc=%rbx,<ry3=%r15
1139add  %rbx,%r15
1140
1141# qhasm:   mulc = 0
1142# asm 1: mov  $0,>mulc=int64#14
1143# asm 2: mov  $0,>mulc=%rbx
1144mov  $0,%rbx
1145
1146# qhasm:   mulc += mulrdx + carry
1147# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1148# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1149adc %rdx,%rbx
1150
1151# qhasm:   mulrax = *(uint64 *)(pp + 48)
1152# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
1153# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
1154movq   48(%rsi),%rax
1155
1156# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1157# asm 1: mul  <mulx2=int64#9
1158# asm 2: mul  <mulx2=%r11
1159mul  %r11
1160
1161# qhasm:   carry? mulr4 += mulrax
1162# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1163# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1164add  %rax,%rcx
1165
1166# qhasm:   mulrdx += 0 + carry
1167# asm 1: adc $0,<mulrdx=int64#3
1168# asm 2: adc $0,<mulrdx=%rdx
1169adc $0,%rdx
1170
1171# qhasm:   carry? mulr4 += mulc
1172# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1173# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1174add  %rbx,%rcx
1175
1176# qhasm:   mulc = 0
1177# asm 1: mov  $0,>mulc=int64#14
1178# asm 2: mov  $0,>mulc=%rbx
1179mov  $0,%rbx
1180
1181# qhasm:   mulc += mulrdx + carry
1182# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1183# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1184adc %rdx,%rbx
1185
1186# qhasm:   mulrax = *(uint64 *)(pp + 56)
1187# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
1188# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
1189movq   56(%rsi),%rax
1190
1191# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1192# asm 1: mul  <mulx2=int64#9
1193# asm 2: mul  <mulx2=%r11
1194mul  %r11
1195
1196# qhasm:   carry? mulr5 += mulrax
1197# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1198# asm 2: add  <mulrax=%rax,<mulr5=%r8
1199add  %rax,%r8
1200
1201# qhasm:   mulrdx += 0 + carry
1202# asm 1: adc $0,<mulrdx=int64#3
1203# asm 2: adc $0,<mulrdx=%rdx
1204adc $0,%rdx
1205
1206# qhasm:   carry? mulr5 += mulc
1207# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1208# asm 2: add  <mulc=%rbx,<mulr5=%r8
1209add  %rbx,%r8
1210
1211# qhasm:   mulr6 += mulrdx + carry
1212# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1213# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1214adc %rdx,%r9
1215
1216# qhasm:   mulx3 = *(uint64 *)(pp + 88)
1217# asm 1: movq   88(<pp=int64#2),>mulx3=int64#9
1218# asm 2: movq   88(<pp=%rsi),>mulx3=%r11
1219movq   88(%rsi),%r11
1220
1221# qhasm:   mulrax = *(uint64 *)(pp + 32)
1222# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
1223# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
1224movq   32(%rsi),%rax
1225
1226# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1227# asm 1: mul  <mulx3=int64#9
1228# asm 2: mul  <mulx3=%r11
1229mul  %r11
1230
1231# qhasm:   carry? ry3 += mulrax
1232# asm 1: add  <mulrax=int64#7,<ry3=int64#13
1233# asm 2: add  <mulrax=%rax,<ry3=%r15
1234add  %rax,%r15
1235
1236# qhasm:   mulc = 0
1237# asm 1: mov  $0,>mulc=int64#14
1238# asm 2: mov  $0,>mulc=%rbx
1239mov  $0,%rbx
1240
1241# qhasm:   mulc += mulrdx + carry
1242# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1243# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1244adc %rdx,%rbx
1245
1246# qhasm:   mulrax = *(uint64 *)(pp + 40)
1247# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
1248# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
1249movq   40(%rsi),%rax
1250
1251# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1252# asm 1: mul  <mulx3=int64#9
1253# asm 2: mul  <mulx3=%r11
1254mul  %r11
1255
1256# qhasm:   carry? mulr4 += mulrax
1257# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1258# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1259add  %rax,%rcx
1260
1261# qhasm:   mulrdx += 0 + carry
1262# asm 1: adc $0,<mulrdx=int64#3
1263# asm 2: adc $0,<mulrdx=%rdx
1264adc $0,%rdx
1265
1266# qhasm:   carry? mulr4 += mulc
1267# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1268# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1269add  %rbx,%rcx
1270
1271# qhasm:   mulc = 0
1272# asm 1: mov  $0,>mulc=int64#14
1273# asm 2: mov  $0,>mulc=%rbx
1274mov  $0,%rbx
1275
1276# qhasm:   mulc += mulrdx + carry
1277# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1278# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1279adc %rdx,%rbx
1280
1281# qhasm:   mulrax = *(uint64 *)(pp + 48)
1282# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
1283# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
1284movq   48(%rsi),%rax
1285
1286# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1287# asm 1: mul  <mulx3=int64#9
1288# asm 2: mul  <mulx3=%r11
1289mul  %r11
1290
1291# qhasm:   carry? mulr5 += mulrax
1292# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1293# asm 2: add  <mulrax=%rax,<mulr5=%r8
1294add  %rax,%r8
1295
1296# qhasm:   mulrdx += 0 + carry
1297# asm 1: adc $0,<mulrdx=int64#3
1298# asm 2: adc $0,<mulrdx=%rdx
1299adc $0,%rdx
1300
1301# qhasm:   carry? mulr5 += mulc
1302# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1303# asm 2: add  <mulc=%rbx,<mulr5=%r8
1304add  %rbx,%r8
1305
1306# qhasm:   mulc = 0
1307# asm 1: mov  $0,>mulc=int64#14
1308# asm 2: mov  $0,>mulc=%rbx
1309mov  $0,%rbx
1310
1311# qhasm:   mulc += mulrdx + carry
1312# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1313# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1314adc %rdx,%rbx
1315
1316# qhasm:   mulrax = *(uint64 *)(pp + 56)
1317# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
1318# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
1319movq   56(%rsi),%rax
1320
1321# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1322# asm 1: mul  <mulx3=int64#9
1323# asm 2: mul  <mulx3=%r11
1324mul  %r11
1325
1326# qhasm:   carry? mulr6 += mulrax
1327# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1328# asm 2: add  <mulrax=%rax,<mulr6=%r9
1329add  %rax,%r9
1330
1331# qhasm:   mulrdx += 0 + carry
1332# asm 1: adc $0,<mulrdx=int64#3
1333# asm 2: adc $0,<mulrdx=%rdx
1334adc $0,%rdx
1335
1336# qhasm:   carry? mulr6 += mulc
1337# asm 1: add  <mulc=int64#14,<mulr6=int64#6
1338# asm 2: add  <mulc=%rbx,<mulr6=%r9
1339add  %rbx,%r9
1340
1341# qhasm:   mulr7 += mulrdx + carry
1342# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1343# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1344adc %rdx,%r10
1345
1346# qhasm:   mulrax = mulr4
1347# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
1348# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
1349mov  %rcx,%rax
1350
1351# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1352mulq  crypto_sign_ed25519_amd64_64_38
1353
1354# qhasm:   mulr4 = mulrax
1355# asm 1: mov  <mulrax=int64#7,>mulr4=int64#4
1356# asm 2: mov  <mulrax=%rax,>mulr4=%rcx
1357mov  %rax,%rcx
1358
1359# qhasm:   mulrax = mulr5
1360# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
1361# asm 2: mov  <mulr5=%r8,>mulrax=%rax
1362mov  %r8,%rax
1363
1364# qhasm:   mulr5 = mulrdx
1365# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#5
1366# asm 2: mov  <mulrdx=%rdx,>mulr5=%r8
1367mov  %rdx,%r8
1368
1369# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1370mulq  crypto_sign_ed25519_amd64_64_38
1371
1372# qhasm:   carry? mulr5 += mulrax
1373# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1374# asm 2: add  <mulrax=%rax,<mulr5=%r8
1375add  %rax,%r8
1376
1377# qhasm:   mulrax = mulr6
1378# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
1379# asm 2: mov  <mulr6=%r9,>mulrax=%rax
1380mov  %r9,%rax
1381
1382# qhasm:   mulr6 = 0
1383# asm 1: mov  $0,>mulr6=int64#6
1384# asm 2: mov  $0,>mulr6=%r9
1385mov  $0,%r9
1386
1387# qhasm:   mulr6 += mulrdx + carry
1388# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1389# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1390adc %rdx,%r9
1391
1392# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1393mulq  crypto_sign_ed25519_amd64_64_38
1394
1395# qhasm:   carry? mulr6 += mulrax
1396# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1397# asm 2: add  <mulrax=%rax,<mulr6=%r9
1398add  %rax,%r9
1399
1400# qhasm:   mulrax = mulr7
1401# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
1402# asm 2: mov  <mulr7=%r10,>mulrax=%rax
1403mov  %r10,%rax
1404
1405# qhasm:   mulr7 = 0
1406# asm 1: mov  $0,>mulr7=int64#8
1407# asm 2: mov  $0,>mulr7=%r10
1408mov  $0,%r10
1409
1410# qhasm:   mulr7 += mulrdx + carry
1411# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1412# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1413adc %rdx,%r10
1414
1415# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1416mulq  crypto_sign_ed25519_amd64_64_38
1417
1418# qhasm:   carry? mulr7 += mulrax
1419# asm 1: add  <mulrax=int64#7,<mulr7=int64#8
1420# asm 2: add  <mulrax=%rax,<mulr7=%r10
1421add  %rax,%r10
1422
1423# qhasm:   mulr8 = 0
1424# asm 1: mov  $0,>mulr8=int64#7
1425# asm 2: mov  $0,>mulr8=%rax
1426mov  $0,%rax
1427
1428# qhasm:   mulr8 += mulrdx + carry
1429# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
1430# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
1431adc %rdx,%rax
1432
1433# qhasm:   carry? ry0 += mulr4
1434# asm 1: add  <mulr4=int64#4,<ry0=int64#10
1435# asm 2: add  <mulr4=%rcx,<ry0=%r12
1436add  %rcx,%r12
1437
1438# qhasm:   carry? ry1 += mulr5 + carry
1439# asm 1: adc <mulr5=int64#5,<ry1=int64#11
1440# asm 2: adc <mulr5=%r8,<ry1=%r13
1441adc %r8,%r13
1442
1443# qhasm:   carry? ry2 += mulr6 + carry
1444# asm 1: adc <mulr6=int64#6,<ry2=int64#12
1445# asm 2: adc <mulr6=%r9,<ry2=%r14
1446adc %r9,%r14
1447
1448# qhasm:   carry? ry3 += mulr7 + carry
1449# asm 1: adc <mulr7=int64#8,<ry3=int64#13
1450# asm 2: adc <mulr7=%r10,<ry3=%r15
1451adc %r10,%r15
1452
1453# qhasm:   mulzero = 0
1454# asm 1: mov  $0,>mulzero=int64#3
1455# asm 2: mov  $0,>mulzero=%rdx
1456mov  $0,%rdx
1457
1458# qhasm:   mulr8 += mulzero + carry
1459# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
1460# asm 2: adc <mulzero=%rdx,<mulr8=%rax
1461adc %rdx,%rax
1462
1463# qhasm:   mulr8 *= 38
1464# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#4
1465# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rcx
1466imulq  $38,%rax,%rcx
1467
1468# qhasm:   carry? ry0 += mulr8
1469# asm 1: add  <mulr8=int64#4,<ry0=int64#10
1470# asm 2: add  <mulr8=%rcx,<ry0=%r12
1471add  %rcx,%r12
1472
1473# qhasm:   carry? ry1 += mulzero + carry
1474# asm 1: adc <mulzero=int64#3,<ry1=int64#11
1475# asm 2: adc <mulzero=%rdx,<ry1=%r13
1476adc %rdx,%r13
1477
1478# qhasm:   carry? ry2 += mulzero + carry
1479# asm 1: adc <mulzero=int64#3,<ry2=int64#12
1480# asm 2: adc <mulzero=%rdx,<ry2=%r14
1481adc %rdx,%r14
1482
1483# qhasm:   carry? ry3 += mulzero + carry
1484# asm 1: adc <mulzero=int64#3,<ry3=int64#13
1485# asm 2: adc <mulzero=%rdx,<ry3=%r15
1486adc %rdx,%r15
1487
1488# qhasm:   mulzero += mulzero + carry
1489# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
1490# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
1491adc %rdx,%rdx
1492
1493# qhasm:   mulzero *= 38
1494# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
1495# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
1496imulq  $38,%rdx,%rdx
1497
1498# qhasm:   ry0 += mulzero
1499# asm 1: add  <mulzero=int64#3,<ry0=int64#10
1500# asm 2: add  <mulzero=%rdx,<ry0=%r12
1501add  %rdx,%r12
1502
1503# qhasm: *(uint64 *)(rp + 32) = ry0
1504# asm 1: movq   <ry0=int64#10,32(<rp=int64#1)
1505# asm 2: movq   <ry0=%r12,32(<rp=%rdi)
1506movq   %r12,32(%rdi)
1507
1508# qhasm: *(uint64 *)(rp + 40) = ry1
1509# asm 1: movq   <ry1=int64#11,40(<rp=int64#1)
1510# asm 2: movq   <ry1=%r13,40(<rp=%rdi)
1511movq   %r13,40(%rdi)
1512
1513# qhasm: *(uint64 *)(rp + 48) = ry2
1514# asm 1: movq   <ry2=int64#12,48(<rp=int64#1)
1515# asm 2: movq   <ry2=%r14,48(<rp=%rdi)
1516movq   %r14,48(%rdi)
1517
1518# qhasm: *(uint64 *)(rp + 56) = ry3
1519# asm 1: movq   <ry3=int64#13,56(<rp=int64#1)
1520# asm 2: movq   <ry3=%r15,56(<rp=%rdi)
1521movq   %r15,56(%rdi)
1522
1523# qhasm:   mulr4 = 0
1524# asm 1: mov  $0,>mulr4=int64#4
1525# asm 2: mov  $0,>mulr4=%rcx
1526mov  $0,%rcx
1527
1528# qhasm:   mulr5 = 0
1529# asm 1: mov  $0,>mulr5=int64#5
1530# asm 2: mov  $0,>mulr5=%r8
1531mov  $0,%r8
1532
1533# qhasm:   mulr6 = 0
1534# asm 1: mov  $0,>mulr6=int64#6
1535# asm 2: mov  $0,>mulr6=%r9
1536mov  $0,%r9
1537
1538# qhasm:   mulr7 = 0
1539# asm 1: mov  $0,>mulr7=int64#8
1540# asm 2: mov  $0,>mulr7=%r10
1541mov  $0,%r10
1542
1543# qhasm:   mulx0 = *(uint64 *)(pp + 32)
1544# asm 1: movq   32(<pp=int64#2),>mulx0=int64#9
1545# asm 2: movq   32(<pp=%rsi),>mulx0=%r11
1546movq   32(%rsi),%r11
1547
1548# qhasm:   mulrax = *(uint64 *)(pp + 96)
1549# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1550# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1551movq   96(%rsi),%rax
1552
1553# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1554# asm 1: mul  <mulx0=int64#9
1555# asm 2: mul  <mulx0=%r11
1556mul  %r11
1557
1558# qhasm:   rz0 = mulrax
1559# asm 1: mov  <mulrax=int64#7,>rz0=int64#10
1560# asm 2: mov  <mulrax=%rax,>rz0=%r12
1561mov  %rax,%r12
1562
1563# qhasm:   rz1 = mulrdx
1564# asm 1: mov  <mulrdx=int64#3,>rz1=int64#11
1565# asm 2: mov  <mulrdx=%rdx,>rz1=%r13
1566mov  %rdx,%r13
1567
1568# qhasm:   mulrax = *(uint64 *)(pp + 104)
1569# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1570# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1571movq   104(%rsi),%rax
1572
1573# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1574# asm 1: mul  <mulx0=int64#9
1575# asm 2: mul  <mulx0=%r11
1576mul  %r11
1577
1578# qhasm:   carry? rz1 += mulrax
1579# asm 1: add  <mulrax=int64#7,<rz1=int64#11
1580# asm 2: add  <mulrax=%rax,<rz1=%r13
1581add  %rax,%r13
1582
1583# qhasm:   rz2 = 0
1584# asm 1: mov  $0,>rz2=int64#12
1585# asm 2: mov  $0,>rz2=%r14
1586mov  $0,%r14
1587
1588# qhasm:   rz2 += mulrdx + carry
1589# asm 1: adc <mulrdx=int64#3,<rz2=int64#12
1590# asm 2: adc <mulrdx=%rdx,<rz2=%r14
1591adc %rdx,%r14
1592
1593# qhasm:   mulrax = *(uint64 *)(pp + 112)
1594# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1595# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1596movq   112(%rsi),%rax
1597
1598# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1599# asm 1: mul  <mulx0=int64#9
1600# asm 2: mul  <mulx0=%r11
1601mul  %r11
1602
1603# qhasm:   carry? rz2 += mulrax
1604# asm 1: add  <mulrax=int64#7,<rz2=int64#12
1605# asm 2: add  <mulrax=%rax,<rz2=%r14
1606add  %rax,%r14
1607
1608# qhasm:   rz3 = 0
1609# asm 1: mov  $0,>rz3=int64#13
1610# asm 2: mov  $0,>rz3=%r15
1611mov  $0,%r15
1612
1613# qhasm:   rz3 += mulrdx + carry
1614# asm 1: adc <mulrdx=int64#3,<rz3=int64#13
1615# asm 2: adc <mulrdx=%rdx,<rz3=%r15
1616adc %rdx,%r15
1617
1618# qhasm:   mulrax = *(uint64 *)(pp + 120)
1619# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
1620# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
1621movq   120(%rsi),%rax
1622
1623# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1624# asm 1: mul  <mulx0=int64#9
1625# asm 2: mul  <mulx0=%r11
1626mul  %r11
1627
1628# qhasm:   carry? rz3 += mulrax
1629# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1630# asm 2: add  <mulrax=%rax,<rz3=%r15
1631add  %rax,%r15
1632
1633# qhasm:   mulr4 += mulrdx + carry
1634# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
1635# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
1636adc %rdx,%rcx
1637
1638# qhasm:   mulx1 = *(uint64 *)(pp + 40)
1639# asm 1: movq   40(<pp=int64#2),>mulx1=int64#9
1640# asm 2: movq   40(<pp=%rsi),>mulx1=%r11
1641movq   40(%rsi),%r11
1642
1643# qhasm:   mulrax = *(uint64 *)(pp + 96)
1644# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1645# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1646movq   96(%rsi),%rax
1647
1648# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1649# asm 1: mul  <mulx1=int64#9
1650# asm 2: mul  <mulx1=%r11
1651mul  %r11
1652
1653# qhasm:   carry? rz1 += mulrax
1654# asm 1: add  <mulrax=int64#7,<rz1=int64#11
1655# asm 2: add  <mulrax=%rax,<rz1=%r13
1656add  %rax,%r13
1657
1658# qhasm:   mulc = 0
1659# asm 1: mov  $0,>mulc=int64#14
1660# asm 2: mov  $0,>mulc=%rbx
1661mov  $0,%rbx
1662
1663# qhasm:   mulc += mulrdx + carry
1664# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1665# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1666adc %rdx,%rbx
1667
1668# qhasm:   mulrax = *(uint64 *)(pp + 104)
1669# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1670# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1671movq   104(%rsi),%rax
1672
1673# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1674# asm 1: mul  <mulx1=int64#9
1675# asm 2: mul  <mulx1=%r11
1676mul  %r11
1677
1678# qhasm:   carry? rz2 += mulrax
1679# asm 1: add  <mulrax=int64#7,<rz2=int64#12
1680# asm 2: add  <mulrax=%rax,<rz2=%r14
1681add  %rax,%r14
1682
1683# qhasm:   mulrdx += 0 + carry
1684# asm 1: adc $0,<mulrdx=int64#3
1685# asm 2: adc $0,<mulrdx=%rdx
1686adc $0,%rdx
1687
1688# qhasm:   carry? rz2 += mulc
1689# asm 1: add  <mulc=int64#14,<rz2=int64#12
1690# asm 2: add  <mulc=%rbx,<rz2=%r14
1691add  %rbx,%r14
1692
1693# qhasm:   mulc = 0
1694# asm 1: mov  $0,>mulc=int64#14
1695# asm 2: mov  $0,>mulc=%rbx
1696mov  $0,%rbx
1697
1698# qhasm:   mulc += mulrdx + carry
1699# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1700# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1701adc %rdx,%rbx
1702
1703# qhasm:   mulrax = *(uint64 *)(pp + 112)
1704# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1705# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1706movq   112(%rsi),%rax
1707
1708# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1709# asm 1: mul  <mulx1=int64#9
1710# asm 2: mul  <mulx1=%r11
1711mul  %r11
1712
1713# qhasm:   carry? rz3 += mulrax
1714# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1715# asm 2: add  <mulrax=%rax,<rz3=%r15
1716add  %rax,%r15
1717
1718# qhasm:   mulrdx += 0 + carry
1719# asm 1: adc $0,<mulrdx=int64#3
1720# asm 2: adc $0,<mulrdx=%rdx
1721adc $0,%rdx
1722
1723# qhasm:   carry? rz3 += mulc
1724# asm 1: add  <mulc=int64#14,<rz3=int64#13
1725# asm 2: add  <mulc=%rbx,<rz3=%r15
1726add  %rbx,%r15
1727
1728# qhasm:   mulc = 0
1729# asm 1: mov  $0,>mulc=int64#14
1730# asm 2: mov  $0,>mulc=%rbx
1731mov  $0,%rbx
1732
1733# qhasm:   mulc += mulrdx + carry
1734# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1735# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1736adc %rdx,%rbx
1737
1738# qhasm:   mulrax = *(uint64 *)(pp + 120)
1739# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
1740# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
1741movq   120(%rsi),%rax
1742
1743# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1744# asm 1: mul  <mulx1=int64#9
1745# asm 2: mul  <mulx1=%r11
1746mul  %r11
1747
1748# qhasm:   carry? mulr4 += mulrax
1749# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1750# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1751add  %rax,%rcx
1752
1753# qhasm:   mulrdx += 0 + carry
1754# asm 1: adc $0,<mulrdx=int64#3
1755# asm 2: adc $0,<mulrdx=%rdx
1756adc $0,%rdx
1757
1758# qhasm:   carry? mulr4 += mulc
1759# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1760# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1761add  %rbx,%rcx
1762
1763# qhasm:   mulr5 += mulrdx + carry
1764# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
1765# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
1766adc %rdx,%r8
1767
1768# qhasm:   mulx2 = *(uint64 *)(pp + 48)
1769# asm 1: movq   48(<pp=int64#2),>mulx2=int64#9
1770# asm 2: movq   48(<pp=%rsi),>mulx2=%r11
1771movq   48(%rsi),%r11
1772
1773# qhasm:   mulrax = *(uint64 *)(pp + 96)
1774# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1775# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1776movq   96(%rsi),%rax
1777
1778# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1779# asm 1: mul  <mulx2=int64#9
1780# asm 2: mul  <mulx2=%r11
1781mul  %r11
1782
1783# qhasm:   carry? rz2 += mulrax
1784# asm 1: add  <mulrax=int64#7,<rz2=int64#12
1785# asm 2: add  <mulrax=%rax,<rz2=%r14
1786add  %rax,%r14
1787
1788# qhasm:   mulc = 0
1789# asm 1: mov  $0,>mulc=int64#14
1790# asm 2: mov  $0,>mulc=%rbx
1791mov  $0,%rbx
1792
1793# qhasm:   mulc += mulrdx + carry
1794# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1795# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1796adc %rdx,%rbx
1797
1798# qhasm:   mulrax = *(uint64 *)(pp + 104)
1799# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1800# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1801movq   104(%rsi),%rax
1802
1803# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1804# asm 1: mul  <mulx2=int64#9
1805# asm 2: mul  <mulx2=%r11
1806mul  %r11
1807
1808# qhasm:   carry? rz3 += mulrax
1809# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1810# asm 2: add  <mulrax=%rax,<rz3=%r15
1811add  %rax,%r15
1812
1813# qhasm:   mulrdx += 0 + carry
1814# asm 1: adc $0,<mulrdx=int64#3
1815# asm 2: adc $0,<mulrdx=%rdx
1816adc $0,%rdx
1817
1818# qhasm:   carry? rz3 += mulc
1819# asm 1: add  <mulc=int64#14,<rz3=int64#13
1820# asm 2: add  <mulc=%rbx,<rz3=%r15
1821add  %rbx,%r15
1822
1823# qhasm:   mulc = 0
1824# asm 1: mov  $0,>mulc=int64#14
1825# asm 2: mov  $0,>mulc=%rbx
1826mov  $0,%rbx
1827
1828# qhasm:   mulc += mulrdx + carry
1829# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1830# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1831adc %rdx,%rbx
1832
1833# qhasm:   mulrax = *(uint64 *)(pp + 112)
1834# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1835# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1836movq   112(%rsi),%rax
1837
1838# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1839# asm 1: mul  <mulx2=int64#9
1840# asm 2: mul  <mulx2=%r11
1841mul  %r11
1842
1843# qhasm:   carry? mulr4 += mulrax
1844# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1845# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1846add  %rax,%rcx
1847
1848# qhasm:   mulrdx += 0 + carry
1849# asm 1: adc $0,<mulrdx=int64#3
1850# asm 2: adc $0,<mulrdx=%rdx
1851adc $0,%rdx
1852
1853# qhasm:   carry? mulr4 += mulc
1854# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1855# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1856add  %rbx,%rcx
1857
1858# qhasm:   mulc = 0
1859# asm 1: mov  $0,>mulc=int64#14
1860# asm 2: mov  $0,>mulc=%rbx
1861mov  $0,%rbx
1862
1863# qhasm:   mulc += mulrdx + carry
1864# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1865# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1866adc %rdx,%rbx
1867
1868# qhasm:   mulrax = *(uint64 *)(pp + 120)
1869# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
1870# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
1871movq   120(%rsi),%rax
1872
1873# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1874# asm 1: mul  <mulx2=int64#9
1875# asm 2: mul  <mulx2=%r11
1876mul  %r11
1877
1878# qhasm:   carry? mulr5 += mulrax
1879# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1880# asm 2: add  <mulrax=%rax,<mulr5=%r8
1881add  %rax,%r8
1882
1883# qhasm:   mulrdx += 0 + carry
1884# asm 1: adc $0,<mulrdx=int64#3
1885# asm 2: adc $0,<mulrdx=%rdx
1886adc $0,%rdx
1887
1888# qhasm:   carry? mulr5 += mulc
1889# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1890# asm 2: add  <mulc=%rbx,<mulr5=%r8
1891add  %rbx,%r8
1892
1893# qhasm:   mulr6 += mulrdx + carry
1894# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1895# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1896adc %rdx,%r9
1897
1898# qhasm:   mulx3 = *(uint64 *)(pp + 56)
1899# asm 1: movq   56(<pp=int64#2),>mulx3=int64#9
1900# asm 2: movq   56(<pp=%rsi),>mulx3=%r11
1901movq   56(%rsi),%r11
1902
1903# qhasm:   mulrax = *(uint64 *)(pp + 96)
1904# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1905# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1906movq   96(%rsi),%rax
1907
1908# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1909# asm 1: mul  <mulx3=int64#9
1910# asm 2: mul  <mulx3=%r11
1911mul  %r11
1912
1913# qhasm:   carry? rz3 += mulrax
1914# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1915# asm 2: add  <mulrax=%rax,<rz3=%r15
1916add  %rax,%r15
1917
1918# qhasm:   mulc = 0
1919# asm 1: mov  $0,>mulc=int64#14
1920# asm 2: mov  $0,>mulc=%rbx
1921mov  $0,%rbx
1922
1923# qhasm:   mulc += mulrdx + carry
1924# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1925# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1926adc %rdx,%rbx
1927
1928# qhasm:   mulrax = *(uint64 *)(pp + 104)
1929# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1930# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1931movq   104(%rsi),%rax
1932
1933# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1934# asm 1: mul  <mulx3=int64#9
1935# asm 2: mul  <mulx3=%r11
1936mul  %r11
1937
1938# qhasm:   carry? mulr4 += mulrax
1939# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1940# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1941add  %rax,%rcx
1942
1943# qhasm:   mulrdx += 0 + carry
1944# asm 1: adc $0,<mulrdx=int64#3
1945# asm 2: adc $0,<mulrdx=%rdx
1946adc $0,%rdx
1947
1948# qhasm:   carry? mulr4 += mulc
1949# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1950# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1951add  %rbx,%rcx
1952
1953# qhasm:   mulc = 0
1954# asm 1: mov  $0,>mulc=int64#14
1955# asm 2: mov  $0,>mulc=%rbx
1956mov  $0,%rbx
1957
1958# qhasm:   mulc += mulrdx + carry
1959# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1960# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1961adc %rdx,%rbx
1962
1963# qhasm:   mulrax = *(uint64 *)(pp + 112)
1964# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1965# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1966movq   112(%rsi),%rax
1967
1968# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1969# asm 1: mul  <mulx3=int64#9
1970# asm 2: mul  <mulx3=%r11
1971mul  %r11
1972
1973# qhasm:   carry? mulr5 += mulrax
1974# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1975# asm 2: add  <mulrax=%rax,<mulr5=%r8
1976add  %rax,%r8
1977
1978# qhasm:   mulrdx += 0 + carry
1979# asm 1: adc $0,<mulrdx=int64#3
1980# asm 2: adc $0,<mulrdx=%rdx
1981adc $0,%rdx
1982
1983# qhasm:   carry? mulr5 += mulc
1984# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1985# asm 2: add  <mulc=%rbx,<mulr5=%r8
1986add  %rbx,%r8
1987
1988# qhasm:   mulc = 0
1989# asm 1: mov  $0,>mulc=int64#14
1990# asm 2: mov  $0,>mulc=%rbx
1991mov  $0,%rbx
1992
1993# qhasm:   mulc += mulrdx + carry
1994# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1995# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1996adc %rdx,%rbx
1997
1998# qhasm:   mulrax = *(uint64 *)(pp + 120)
1999# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
2000# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
2001movq   120(%rsi),%rax
2002
2003# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2004# asm 1: mul  <mulx3=int64#9
2005# asm 2: mul  <mulx3=%r11
2006mul  %r11
2007
2008# qhasm:   carry? mulr6 += mulrax
2009# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
2010# asm 2: add  <mulrax=%rax,<mulr6=%r9
2011add  %rax,%r9
2012
2013# qhasm:   mulrdx += 0 + carry
2014# asm 1: adc $0,<mulrdx=int64#3
2015# asm 2: adc $0,<mulrdx=%rdx
2016adc $0,%rdx
2017
2018# qhasm:   carry? mulr6 += mulc
2019# asm 1: add  <mulc=int64#14,<mulr6=int64#6
2020# asm 2: add  <mulc=%rbx,<mulr6=%r9
2021add  %rbx,%r9
2022
2023# qhasm:   mulr7 += mulrdx + carry
2024# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
2025# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
2026adc %rdx,%r10
2027
2028# qhasm:   mulrax = mulr4
2029# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
2030# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
2031mov  %rcx,%rax
2032
2033# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2034mulq  crypto_sign_ed25519_amd64_64_38
2035
2036# qhasm:   mulr4 = mulrax
2037# asm 1: mov  <mulrax=int64#7,>mulr4=int64#4
2038# asm 2: mov  <mulrax=%rax,>mulr4=%rcx
2039mov  %rax,%rcx
2040
2041# qhasm:   mulrax = mulr5
2042# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
2043# asm 2: mov  <mulr5=%r8,>mulrax=%rax
2044mov  %r8,%rax
2045
2046# qhasm:   mulr5 = mulrdx
2047# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#5
2048# asm 2: mov  <mulrdx=%rdx,>mulr5=%r8
2049mov  %rdx,%r8
2050
2051# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2052mulq  crypto_sign_ed25519_amd64_64_38
2053
2054# qhasm:   carry? mulr5 += mulrax
2055# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
2056# asm 2: add  <mulrax=%rax,<mulr5=%r8
2057add  %rax,%r8
2058
2059# qhasm:   mulrax = mulr6
2060# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
2061# asm 2: mov  <mulr6=%r9,>mulrax=%rax
2062mov  %r9,%rax
2063
2064# qhasm:   mulr6 = 0
2065# asm 1: mov  $0,>mulr6=int64#6
2066# asm 2: mov  $0,>mulr6=%r9
2067mov  $0,%r9
2068
2069# qhasm:   mulr6 += mulrdx + carry
2070# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
2071# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
2072adc %rdx,%r9
2073
2074# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2075mulq  crypto_sign_ed25519_amd64_64_38
2076
2077# qhasm:   carry? mulr6 += mulrax
2078# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
2079# asm 2: add  <mulrax=%rax,<mulr6=%r9
2080add  %rax,%r9
2081
2082# qhasm:   mulrax = mulr7
2083# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
2084# asm 2: mov  <mulr7=%r10,>mulrax=%rax
2085mov  %r10,%rax
2086
2087# qhasm:   mulr7 = 0
2088# asm 1: mov  $0,>mulr7=int64#8
2089# asm 2: mov  $0,>mulr7=%r10
2090mov  $0,%r10
2091
2092# qhasm:   mulr7 += mulrdx + carry
2093# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
2094# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
2095adc %rdx,%r10
2096
2097# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2098mulq  crypto_sign_ed25519_amd64_64_38
2099
2100# qhasm:   carry? mulr7 += mulrax
2101# asm 1: add  <mulrax=int64#7,<mulr7=int64#8
2102# asm 2: add  <mulrax=%rax,<mulr7=%r10
2103add  %rax,%r10
2104
2105# qhasm:   mulr8 = 0
2106# asm 1: mov  $0,>mulr8=int64#7
2107# asm 2: mov  $0,>mulr8=%rax
2108mov  $0,%rax
2109
2110# qhasm:   mulr8 += mulrdx + carry
2111# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
2112# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
2113adc %rdx,%rax
2114
2115# qhasm:   carry? rz0 += mulr4
2116# asm 1: add  <mulr4=int64#4,<rz0=int64#10
2117# asm 2: add  <mulr4=%rcx,<rz0=%r12
2118add  %rcx,%r12
2119
2120# qhasm:   carry? rz1 += mulr5 + carry
2121# asm 1: adc <mulr5=int64#5,<rz1=int64#11
2122# asm 2: adc <mulr5=%r8,<rz1=%r13
2123adc %r8,%r13
2124
2125# qhasm:   carry? rz2 += mulr6 + carry
2126# asm 1: adc <mulr6=int64#6,<rz2=int64#12
2127# asm 2: adc <mulr6=%r9,<rz2=%r14
2128adc %r9,%r14
2129
2130# qhasm:   carry? rz3 += mulr7 + carry
2131# asm 1: adc <mulr7=int64#8,<rz3=int64#13
2132# asm 2: adc <mulr7=%r10,<rz3=%r15
2133adc %r10,%r15
2134
2135# qhasm:   mulzero = 0
2136# asm 1: mov  $0,>mulzero=int64#3
2137# asm 2: mov  $0,>mulzero=%rdx
2138mov  $0,%rdx
2139
2140# qhasm:   mulr8 += mulzero + carry
2141# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
2142# asm 2: adc <mulzero=%rdx,<mulr8=%rax
2143adc %rdx,%rax
2144
2145# qhasm:   mulr8 *= 38
2146# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#4
2147# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rcx
2148imulq  $38,%rax,%rcx
2149
2150# qhasm:   carry? rz0 += mulr8
2151# asm 1: add  <mulr8=int64#4,<rz0=int64#10
2152# asm 2: add  <mulr8=%rcx,<rz0=%r12
2153add  %rcx,%r12
2154
2155# qhasm:   carry? rz1 += mulzero + carry
2156# asm 1: adc <mulzero=int64#3,<rz1=int64#11
2157# asm 2: adc <mulzero=%rdx,<rz1=%r13
2158adc %rdx,%r13
2159
2160# qhasm:   carry? rz2 += mulzero + carry
2161# asm 1: adc <mulzero=int64#3,<rz2=int64#12
2162# asm 2: adc <mulzero=%rdx,<rz2=%r14
2163adc %rdx,%r14
2164
2165# qhasm:   carry? rz3 += mulzero + carry
2166# asm 1: adc <mulzero=int64#3,<rz3=int64#13
2167# asm 2: adc <mulzero=%rdx,<rz3=%r15
2168adc %rdx,%r15
2169
2170# qhasm:   mulzero += mulzero + carry
2171# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
2172# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
2173adc %rdx,%rdx
2174
2175# qhasm:   mulzero *= 38
2176# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
2177# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
2178imulq  $38,%rdx,%rdx
2179
2180# qhasm:   rz0 += mulzero
2181# asm 1: add  <mulzero=int64#3,<rz0=int64#10
2182# asm 2: add  <mulzero=%rdx,<rz0=%r12
2183add  %rdx,%r12
2184
2185# qhasm: *(uint64 *)(rp + 64) = rz0
2186# asm 1: movq   <rz0=int64#10,64(<rp=int64#1)
2187# asm 2: movq   <rz0=%r12,64(<rp=%rdi)
2188movq   %r12,64(%rdi)
2189
2190# qhasm: *(uint64 *)(rp + 72) = rz1
2191# asm 1: movq   <rz1=int64#11,72(<rp=int64#1)
2192# asm 2: movq   <rz1=%r13,72(<rp=%rdi)
2193movq   %r13,72(%rdi)
2194
2195# qhasm: *(uint64 *)(rp + 80) = rz2
2196# asm 1: movq   <rz2=int64#12,80(<rp=int64#1)
2197# asm 2: movq   <rz2=%r14,80(<rp=%rdi)
2198movq   %r14,80(%rdi)
2199
2200# qhasm: *(uint64 *)(rp + 88) = rz3
2201# asm 1: movq   <rz3=int64#13,88(<rp=int64#1)
2202# asm 2: movq   <rz3=%r15,88(<rp=%rdi)
2203movq   %r15,88(%rdi)
2204
2205# qhasm:   mulr4 = 0
2206# asm 1: mov  $0,>mulr4=int64#4
2207# asm 2: mov  $0,>mulr4=%rcx
2208mov  $0,%rcx
2209
2210# qhasm:   mulr5 = 0
2211# asm 1: mov  $0,>mulr5=int64#5
2212# asm 2: mov  $0,>mulr5=%r8
2213mov  $0,%r8
2214
2215# qhasm:   mulr6 = 0
2216# asm 1: mov  $0,>mulr6=int64#6
2217# asm 2: mov  $0,>mulr6=%r9
2218mov  $0,%r9
2219
2220# qhasm:   mulr7 = 0
2221# asm 1: mov  $0,>mulr7=int64#8
2222# asm 2: mov  $0,>mulr7=%r10
2223mov  $0,%r10
2224
2225# qhasm:   mulx0 = *(uint64 *)(pp + 0)
2226# asm 1: movq   0(<pp=int64#2),>mulx0=int64#9
2227# asm 2: movq   0(<pp=%rsi),>mulx0=%r11
2228movq   0(%rsi),%r11
2229
2230# qhasm:   mulrax = *(uint64 *)(pp + 64)
2231# asm 1: movq   64(<pp=int64#2),>mulrax=int64#7
2232# asm 2: movq   64(<pp=%rsi),>mulrax=%rax
2233movq   64(%rsi),%rax
2234
2235# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2236# asm 1: mul  <mulx0=int64#9
2237# asm 2: mul  <mulx0=%r11
2238mul  %r11
2239
2240# qhasm:   rt0 = mulrax
2241# asm 1: mov  <mulrax=int64#7,>rt0=int64#10
2242# asm 2: mov  <mulrax=%rax,>rt0=%r12
2243mov  %rax,%r12
2244
2245# qhasm:   rt1 = mulrdx
2246# asm 1: mov  <mulrdx=int64#3,>rt1=int64#11
2247# asm 2: mov  <mulrdx=%rdx,>rt1=%r13
2248mov  %rdx,%r13
2249
2250# qhasm:   mulrax = *(uint64 *)(pp + 72)
2251# asm 1: movq   72(<pp=int64#2),>mulrax=int64#7
2252# asm 2: movq   72(<pp=%rsi),>mulrax=%rax
2253movq   72(%rsi),%rax
2254
2255# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2256# asm 1: mul  <mulx0=int64#9
2257# asm 2: mul  <mulx0=%r11
2258mul  %r11
2259
2260# qhasm:   carry? rt1 += mulrax
2261# asm 1: add  <mulrax=int64#7,<rt1=int64#11
2262# asm 2: add  <mulrax=%rax,<rt1=%r13
2263add  %rax,%r13
2264
2265# qhasm:   rt2 = 0
2266# asm 1: mov  $0,>rt2=int64#12
2267# asm 2: mov  $0,>rt2=%r14
2268mov  $0,%r14
2269
2270# qhasm:   rt2 += mulrdx + carry
2271# asm 1: adc <mulrdx=int64#3,<rt2=int64#12
2272# asm 2: adc <mulrdx=%rdx,<rt2=%r14
2273adc %rdx,%r14
2274
2275# qhasm:   mulrax = *(uint64 *)(pp + 80)
2276# asm 1: movq   80(<pp=int64#2),>mulrax=int64#7
2277# asm 2: movq   80(<pp=%rsi),>mulrax=%rax
2278movq   80(%rsi),%rax
2279
2280# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2281# asm 1: mul  <mulx0=int64#9
2282# asm 2: mul  <mulx0=%r11
2283mul  %r11
2284
2285# qhasm:   carry? rt2 += mulrax
2286# asm 1: add  <mulrax=int64#7,<rt2=int64#12
2287# asm 2: add  <mulrax=%rax,<rt2=%r14
2288add  %rax,%r14
2289
2290# qhasm:   rt3 = 0
2291# asm 1: mov  $0,>rt3=int64#13
2292# asm 2: mov  $0,>rt3=%r15
2293mov  $0,%r15
2294
2295# qhasm:   rt3 += mulrdx + carry
2296# asm 1: adc <mulrdx=int64#3,<rt3=int64#13
2297# asm 2: adc <mulrdx=%rdx,<rt3=%r15
2298adc %rdx,%r15
2299
2300# qhasm:   mulrax = *(uint64 *)(pp + 88)
2301# asm 1: movq   88(<pp=int64#2),>mulrax=int64#7
2302# asm 2: movq   88(<pp=%rsi),>mulrax=%rax
2303movq   88(%rsi),%rax
2304
2305# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
2306# asm 1: mul  <mulx0=int64#9
2307# asm 2: mul  <mulx0=%r11
2308mul  %r11
2309
2310# qhasm:   carry? rt3 += mulrax
2311# asm 1: add  <mulrax=int64#7,<rt3=int64#13
2312# asm 2: add  <mulrax=%rax,<rt3=%r15
2313add  %rax,%r15
2314
2315# qhasm:   mulr4 += mulrdx + carry
2316# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
2317# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
2318adc %rdx,%rcx
2319
2320# qhasm:   mulx1 = *(uint64 *)(pp + 8)
2321# asm 1: movq   8(<pp=int64#2),>mulx1=int64#9
2322# asm 2: movq   8(<pp=%rsi),>mulx1=%r11
2323movq   8(%rsi),%r11
2324
2325# qhasm:   mulrax = *(uint64 *)(pp + 64)
2326# asm 1: movq   64(<pp=int64#2),>mulrax=int64#7
2327# asm 2: movq   64(<pp=%rsi),>mulrax=%rax
2328movq   64(%rsi),%rax
2329
2330# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2331# asm 1: mul  <mulx1=int64#9
2332# asm 2: mul  <mulx1=%r11
2333mul  %r11
2334
2335# qhasm:   carry? rt1 += mulrax
2336# asm 1: add  <mulrax=int64#7,<rt1=int64#11
2337# asm 2: add  <mulrax=%rax,<rt1=%r13
2338add  %rax,%r13
2339
2340# qhasm:   mulc = 0
2341# asm 1: mov  $0,>mulc=int64#14
2342# asm 2: mov  $0,>mulc=%rbx
2343mov  $0,%rbx
2344
2345# qhasm:   mulc += mulrdx + carry
2346# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2347# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2348adc %rdx,%rbx
2349
2350# qhasm:   mulrax = *(uint64 *)(pp + 72)
2351# asm 1: movq   72(<pp=int64#2),>mulrax=int64#7
2352# asm 2: movq   72(<pp=%rsi),>mulrax=%rax
2353movq   72(%rsi),%rax
2354
2355# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2356# asm 1: mul  <mulx1=int64#9
2357# asm 2: mul  <mulx1=%r11
2358mul  %r11
2359
2360# qhasm:   carry? rt2 += mulrax
2361# asm 1: add  <mulrax=int64#7,<rt2=int64#12
2362# asm 2: add  <mulrax=%rax,<rt2=%r14
2363add  %rax,%r14
2364
2365# qhasm:   mulrdx += 0 + carry
2366# asm 1: adc $0,<mulrdx=int64#3
2367# asm 2: adc $0,<mulrdx=%rdx
2368adc $0,%rdx
2369
2370# qhasm:   carry? rt2 += mulc
2371# asm 1: add  <mulc=int64#14,<rt2=int64#12
2372# asm 2: add  <mulc=%rbx,<rt2=%r14
2373add  %rbx,%r14
2374
2375# qhasm:   mulc = 0
2376# asm 1: mov  $0,>mulc=int64#14
2377# asm 2: mov  $0,>mulc=%rbx
2378mov  $0,%rbx
2379
2380# qhasm:   mulc += mulrdx + carry
2381# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2382# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2383adc %rdx,%rbx
2384
2385# qhasm:   mulrax = *(uint64 *)(pp + 80)
2386# asm 1: movq   80(<pp=int64#2),>mulrax=int64#7
2387# asm 2: movq   80(<pp=%rsi),>mulrax=%rax
2388movq   80(%rsi),%rax
2389
2390# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2391# asm 1: mul  <mulx1=int64#9
2392# asm 2: mul  <mulx1=%r11
2393mul  %r11
2394
2395# qhasm:   carry? rt3 += mulrax
2396# asm 1: add  <mulrax=int64#7,<rt3=int64#13
2397# asm 2: add  <mulrax=%rax,<rt3=%r15
2398add  %rax,%r15
2399
2400# qhasm:   mulrdx += 0 + carry
2401# asm 1: adc $0,<mulrdx=int64#3
2402# asm 2: adc $0,<mulrdx=%rdx
2403adc $0,%rdx
2404
2405# qhasm:   carry? rt3 += mulc
2406# asm 1: add  <mulc=int64#14,<rt3=int64#13
2407# asm 2: add  <mulc=%rbx,<rt3=%r15
2408add  %rbx,%r15
2409
2410# qhasm:   mulc = 0
2411# asm 1: mov  $0,>mulc=int64#14
2412# asm 2: mov  $0,>mulc=%rbx
2413mov  $0,%rbx
2414
2415# qhasm:   mulc += mulrdx + carry
2416# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2417# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2418adc %rdx,%rbx
2419
2420# qhasm:   mulrax = *(uint64 *)(pp + 88)
2421# asm 1: movq   88(<pp=int64#2),>mulrax=int64#7
2422# asm 2: movq   88(<pp=%rsi),>mulrax=%rax
2423movq   88(%rsi),%rax
2424
2425# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
2426# asm 1: mul  <mulx1=int64#9
2427# asm 2: mul  <mulx1=%r11
2428mul  %r11
2429
2430# qhasm:   carry? mulr4 += mulrax
2431# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
2432# asm 2: add  <mulrax=%rax,<mulr4=%rcx
2433add  %rax,%rcx
2434
2435# qhasm:   mulrdx += 0 + carry
2436# asm 1: adc $0,<mulrdx=int64#3
2437# asm 2: adc $0,<mulrdx=%rdx
2438adc $0,%rdx
2439
2440# qhasm:   carry? mulr4 += mulc
2441# asm 1: add  <mulc=int64#14,<mulr4=int64#4
2442# asm 2: add  <mulc=%rbx,<mulr4=%rcx
2443add  %rbx,%rcx
2444
2445# qhasm:   mulr5 += mulrdx + carry
2446# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
2447# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
2448adc %rdx,%r8
2449
2450# qhasm:   mulx2 = *(uint64 *)(pp + 16)
2451# asm 1: movq   16(<pp=int64#2),>mulx2=int64#9
2452# asm 2: movq   16(<pp=%rsi),>mulx2=%r11
2453movq   16(%rsi),%r11
2454
2455# qhasm:   mulrax = *(uint64 *)(pp + 64)
2456# asm 1: movq   64(<pp=int64#2),>mulrax=int64#7
2457# asm 2: movq   64(<pp=%rsi),>mulrax=%rax
2458movq   64(%rsi),%rax
2459
2460# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2461# asm 1: mul  <mulx2=int64#9
2462# asm 2: mul  <mulx2=%r11
2463mul  %r11
2464
2465# qhasm:   carry? rt2 += mulrax
2466# asm 1: add  <mulrax=int64#7,<rt2=int64#12
2467# asm 2: add  <mulrax=%rax,<rt2=%r14
2468add  %rax,%r14
2469
2470# qhasm:   mulc = 0
2471# asm 1: mov  $0,>mulc=int64#14
2472# asm 2: mov  $0,>mulc=%rbx
2473mov  $0,%rbx
2474
2475# qhasm:   mulc += mulrdx + carry
2476# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2477# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2478adc %rdx,%rbx
2479
2480# qhasm:   mulrax = *(uint64 *)(pp + 72)
2481# asm 1: movq   72(<pp=int64#2),>mulrax=int64#7
2482# asm 2: movq   72(<pp=%rsi),>mulrax=%rax
2483movq   72(%rsi),%rax
2484
2485# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2486# asm 1: mul  <mulx2=int64#9
2487# asm 2: mul  <mulx2=%r11
2488mul  %r11
2489
2490# qhasm:   carry? rt3 += mulrax
2491# asm 1: add  <mulrax=int64#7,<rt3=int64#13
2492# asm 2: add  <mulrax=%rax,<rt3=%r15
2493add  %rax,%r15
2494
2495# qhasm:   mulrdx += 0 + carry
2496# asm 1: adc $0,<mulrdx=int64#3
2497# asm 2: adc $0,<mulrdx=%rdx
2498adc $0,%rdx
2499
2500# qhasm:   carry? rt3 += mulc
2501# asm 1: add  <mulc=int64#14,<rt3=int64#13
2502# asm 2: add  <mulc=%rbx,<rt3=%r15
2503add  %rbx,%r15
2504
2505# qhasm:   mulc = 0
2506# asm 1: mov  $0,>mulc=int64#14
2507# asm 2: mov  $0,>mulc=%rbx
2508mov  $0,%rbx
2509
2510# qhasm:   mulc += mulrdx + carry
2511# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2512# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2513adc %rdx,%rbx
2514
2515# qhasm:   mulrax = *(uint64 *)(pp + 80)
2516# asm 1: movq   80(<pp=int64#2),>mulrax=int64#7
2517# asm 2: movq   80(<pp=%rsi),>mulrax=%rax
2518movq   80(%rsi),%rax
2519
2520# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2521# asm 1: mul  <mulx2=int64#9
2522# asm 2: mul  <mulx2=%r11
2523mul  %r11
2524
2525# qhasm:   carry? mulr4 += mulrax
2526# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
2527# asm 2: add  <mulrax=%rax,<mulr4=%rcx
2528add  %rax,%rcx
2529
2530# qhasm:   mulrdx += 0 + carry
2531# asm 1: adc $0,<mulrdx=int64#3
2532# asm 2: adc $0,<mulrdx=%rdx
2533adc $0,%rdx
2534
2535# qhasm:   carry? mulr4 += mulc
2536# asm 1: add  <mulc=int64#14,<mulr4=int64#4
2537# asm 2: add  <mulc=%rbx,<mulr4=%rcx
2538add  %rbx,%rcx
2539
2540# qhasm:   mulc = 0
2541# asm 1: mov  $0,>mulc=int64#14
2542# asm 2: mov  $0,>mulc=%rbx
2543mov  $0,%rbx
2544
2545# qhasm:   mulc += mulrdx + carry
2546# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2547# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2548adc %rdx,%rbx
2549
2550# qhasm:   mulrax = *(uint64 *)(pp + 88)
2551# asm 1: movq   88(<pp=int64#2),>mulrax=int64#7
2552# asm 2: movq   88(<pp=%rsi),>mulrax=%rax
2553movq   88(%rsi),%rax
2554
2555# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
2556# asm 1: mul  <mulx2=int64#9
2557# asm 2: mul  <mulx2=%r11
2558mul  %r11
2559
2560# qhasm:   carry? mulr5 += mulrax
2561# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
2562# asm 2: add  <mulrax=%rax,<mulr5=%r8
2563add  %rax,%r8
2564
2565# qhasm:   mulrdx += 0 + carry
2566# asm 1: adc $0,<mulrdx=int64#3
2567# asm 2: adc $0,<mulrdx=%rdx
2568adc $0,%rdx
2569
2570# qhasm:   carry? mulr5 += mulc
2571# asm 1: add  <mulc=int64#14,<mulr5=int64#5
2572# asm 2: add  <mulc=%rbx,<mulr5=%r8
2573add  %rbx,%r8
2574
2575# qhasm:   mulr6 += mulrdx + carry
2576# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
2577# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
2578adc %rdx,%r9
2579
2580# qhasm:   mulx3 = *(uint64 *)(pp + 24)
2581# asm 1: movq   24(<pp=int64#2),>mulx3=int64#9
2582# asm 2: movq   24(<pp=%rsi),>mulx3=%r11
2583movq   24(%rsi),%r11
2584
2585# qhasm:   mulrax = *(uint64 *)(pp + 64)
2586# asm 1: movq   64(<pp=int64#2),>mulrax=int64#7
2587# asm 2: movq   64(<pp=%rsi),>mulrax=%rax
2588movq   64(%rsi),%rax
2589
2590# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2591# asm 1: mul  <mulx3=int64#9
2592# asm 2: mul  <mulx3=%r11
2593mul  %r11
2594
2595# qhasm:   carry? rt3 += mulrax
2596# asm 1: add  <mulrax=int64#7,<rt3=int64#13
2597# asm 2: add  <mulrax=%rax,<rt3=%r15
2598add  %rax,%r15
2599
2600# qhasm:   mulc = 0
2601# asm 1: mov  $0,>mulc=int64#14
2602# asm 2: mov  $0,>mulc=%rbx
2603mov  $0,%rbx
2604
2605# qhasm:   mulc += mulrdx + carry
2606# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2607# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2608adc %rdx,%rbx
2609
2610# qhasm:   mulrax = *(uint64 *)(pp + 72)
2611# asm 1: movq   72(<pp=int64#2),>mulrax=int64#7
2612# asm 2: movq   72(<pp=%rsi),>mulrax=%rax
2613movq   72(%rsi),%rax
2614
2615# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2616# asm 1: mul  <mulx3=int64#9
2617# asm 2: mul  <mulx3=%r11
2618mul  %r11
2619
2620# qhasm:   carry? mulr4 += mulrax
2621# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
2622# asm 2: add  <mulrax=%rax,<mulr4=%rcx
2623add  %rax,%rcx
2624
2625# qhasm:   mulrdx += 0 + carry
2626# asm 1: adc $0,<mulrdx=int64#3
2627# asm 2: adc $0,<mulrdx=%rdx
2628adc $0,%rdx
2629
2630# qhasm:   carry? mulr4 += mulc
2631# asm 1: add  <mulc=int64#14,<mulr4=int64#4
2632# asm 2: add  <mulc=%rbx,<mulr4=%rcx
2633add  %rbx,%rcx
2634
2635# qhasm:   mulc = 0
2636# asm 1: mov  $0,>mulc=int64#14
2637# asm 2: mov  $0,>mulc=%rbx
2638mov  $0,%rbx
2639
2640# qhasm:   mulc += mulrdx + carry
2641# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2642# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2643adc %rdx,%rbx
2644
2645# qhasm:   mulrax = *(uint64 *)(pp + 80)
2646# asm 1: movq   80(<pp=int64#2),>mulrax=int64#7
2647# asm 2: movq   80(<pp=%rsi),>mulrax=%rax
2648movq   80(%rsi),%rax
2649
2650# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2651# asm 1: mul  <mulx3=int64#9
2652# asm 2: mul  <mulx3=%r11
2653mul  %r11
2654
2655# qhasm:   carry? mulr5 += mulrax
2656# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
2657# asm 2: add  <mulrax=%rax,<mulr5=%r8
2658add  %rax,%r8
2659
2660# qhasm:   mulrdx += 0 + carry
2661# asm 1: adc $0,<mulrdx=int64#3
2662# asm 2: adc $0,<mulrdx=%rdx
2663adc $0,%rdx
2664
2665# qhasm:   carry? mulr5 += mulc
2666# asm 1: add  <mulc=int64#14,<mulr5=int64#5
2667# asm 2: add  <mulc=%rbx,<mulr5=%r8
2668add  %rbx,%r8
2669
2670# qhasm:   mulc = 0
2671# asm 1: mov  $0,>mulc=int64#14
2672# asm 2: mov  $0,>mulc=%rbx
2673mov  $0,%rbx
2674
2675# qhasm:   mulc += mulrdx + carry
2676# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
2677# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
2678adc %rdx,%rbx
2679
2680# qhasm:   mulrax = *(uint64 *)(pp + 88)
2681# asm 1: movq   88(<pp=int64#2),>mulrax=int64#7
2682# asm 2: movq   88(<pp=%rsi),>mulrax=%rax
2683movq   88(%rsi),%rax
2684
2685# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
2686# asm 1: mul  <mulx3=int64#9
2687# asm 2: mul  <mulx3=%r11
2688mul  %r11
2689
2690# qhasm:   carry? mulr6 += mulrax
2691# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
2692# asm 2: add  <mulrax=%rax,<mulr6=%r9
2693add  %rax,%r9
2694
2695# qhasm:   mulrdx += 0 + carry
2696# asm 1: adc $0,<mulrdx=int64#3
2697# asm 2: adc $0,<mulrdx=%rdx
2698adc $0,%rdx
2699
2700# qhasm:   carry? mulr6 += mulc
2701# asm 1: add  <mulc=int64#14,<mulr6=int64#6
2702# asm 2: add  <mulc=%rbx,<mulr6=%r9
2703add  %rbx,%r9
2704
2705# qhasm:   mulr7 += mulrdx + carry
2706# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
2707# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
2708adc %rdx,%r10
2709
2710# qhasm:   mulrax = mulr4
2711# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
2712# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
2713mov  %rcx,%rax
2714
2715# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2716mulq  crypto_sign_ed25519_amd64_64_38
2717
2718# qhasm:   mulr4 = mulrax
2719# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
2720# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
2721mov  %rax,%rsi
2722
2723# qhasm:   mulrax = mulr5
2724# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
2725# asm 2: mov  <mulr5=%r8,>mulrax=%rax
2726mov  %r8,%rax
2727
2728# qhasm:   mulr5 = mulrdx
2729# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
2730# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
2731mov  %rdx,%rcx
2732
2733# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2734mulq  crypto_sign_ed25519_amd64_64_38
2735
2736# qhasm:   carry? mulr5 += mulrax
2737# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
2738# asm 2: add  <mulrax=%rax,<mulr5=%rcx
2739add  %rax,%rcx
2740
2741# qhasm:   mulrax = mulr6
2742# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
2743# asm 2: mov  <mulr6=%r9,>mulrax=%rax
2744mov  %r9,%rax
2745
2746# qhasm:   mulr6 = 0
2747# asm 1: mov  $0,>mulr6=int64#5
2748# asm 2: mov  $0,>mulr6=%r8
2749mov  $0,%r8
2750
2751# qhasm:   mulr6 += mulrdx + carry
2752# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
2753# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
2754adc %rdx,%r8
2755
2756# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2757mulq  crypto_sign_ed25519_amd64_64_38
2758
2759# qhasm:   carry? mulr6 += mulrax
2760# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
2761# asm 2: add  <mulrax=%rax,<mulr6=%r8
2762add  %rax,%r8
2763
2764# qhasm:   mulrax = mulr7
2765# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
2766# asm 2: mov  <mulr7=%r10,>mulrax=%rax
2767mov  %r10,%rax
2768
2769# qhasm:   mulr7 = 0
2770# asm 1: mov  $0,>mulr7=int64#6
2771# asm 2: mov  $0,>mulr7=%r9
2772mov  $0,%r9
2773
2774# qhasm:   mulr7 += mulrdx + carry
2775# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
2776# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
2777adc %rdx,%r9
2778
2779# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2780mulq  crypto_sign_ed25519_amd64_64_38
2781
2782# qhasm:   carry? mulr7 += mulrax
2783# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
2784# asm 2: add  <mulrax=%rax,<mulr7=%r9
2785add  %rax,%r9
2786
2787# qhasm:   mulr8 = 0
2788# asm 1: mov  $0,>mulr8=int64#7
2789# asm 2: mov  $0,>mulr8=%rax
2790mov  $0,%rax
2791
2792# qhasm:   mulr8 += mulrdx + carry
2793# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
2794# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
2795adc %rdx,%rax
2796
2797# qhasm:   carry? rt0 += mulr4
2798# asm 1: add  <mulr4=int64#2,<rt0=int64#10
2799# asm 2: add  <mulr4=%rsi,<rt0=%r12
2800add  %rsi,%r12
2801
2802# qhasm:   carry? rt1 += mulr5 + carry
2803# asm 1: adc <mulr5=int64#4,<rt1=int64#11
2804# asm 2: adc <mulr5=%rcx,<rt1=%r13
2805adc %rcx,%r13
2806
2807# qhasm:   carry? rt2 += mulr6 + carry
2808# asm 1: adc <mulr6=int64#5,<rt2=int64#12
2809# asm 2: adc <mulr6=%r8,<rt2=%r14
2810adc %r8,%r14
2811
2812# qhasm:   carry? rt3 += mulr7 + carry
2813# asm 1: adc <mulr7=int64#6,<rt3=int64#13
2814# asm 2: adc <mulr7=%r9,<rt3=%r15
2815adc %r9,%r15
2816
2817# qhasm:   mulzero = 0
2818# asm 1: mov  $0,>mulzero=int64#2
2819# asm 2: mov  $0,>mulzero=%rsi
2820mov  $0,%rsi
2821
2822# qhasm:   mulr8 += mulzero + carry
2823# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
2824# asm 2: adc <mulzero=%rsi,<mulr8=%rax
2825adc %rsi,%rax
2826
2827# qhasm:   mulr8 *= 38
2828# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
2829# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
2830imulq  $38,%rax,%rdx
2831
2832# qhasm:   carry? rt0 += mulr8
2833# asm 1: add  <mulr8=int64#3,<rt0=int64#10
2834# asm 2: add  <mulr8=%rdx,<rt0=%r12
2835add  %rdx,%r12
2836
2837# qhasm:   carry? rt1 += mulzero + carry
2838# asm 1: adc <mulzero=int64#2,<rt1=int64#11
2839# asm 2: adc <mulzero=%rsi,<rt1=%r13
2840adc %rsi,%r13
2841
2842# qhasm:   carry? rt2 += mulzero + carry
2843# asm 1: adc <mulzero=int64#2,<rt2=int64#12
2844# asm 2: adc <mulzero=%rsi,<rt2=%r14
2845adc %rsi,%r14
2846
2847# qhasm:   carry? rt3 += mulzero + carry
2848# asm 1: adc <mulzero=int64#2,<rt3=int64#13
2849# asm 2: adc <mulzero=%rsi,<rt3=%r15
2850adc %rsi,%r15
2851
2852# qhasm:   mulzero += mulzero + carry
2853# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
2854# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
2855adc %rsi,%rsi
2856
2857# qhasm:   mulzero *= 38
2858# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
2859# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
2860imulq  $38,%rsi,%rsi
2861
2862# qhasm:   rt0 += mulzero
2863# asm 1: add  <mulzero=int64#2,<rt0=int64#10
2864# asm 2: add  <mulzero=%rsi,<rt0=%r12
2865add  %rsi,%r12
2866
2867# qhasm: *(uint64 *)(rp + 96) = rt0
2868# asm 1: movq   <rt0=int64#10,96(<rp=int64#1)
2869# asm 2: movq   <rt0=%r12,96(<rp=%rdi)
2870movq   %r12,96(%rdi)
2871
2872# qhasm: *(uint64 *)(rp + 104) = rt1
2873# asm 1: movq   <rt1=int64#11,104(<rp=int64#1)
2874# asm 2: movq   <rt1=%r13,104(<rp=%rdi)
2875movq   %r13,104(%rdi)
2876
2877# qhasm: *(uint64 *)(rp + 112) = rt2
2878# asm 1: movq   <rt2=int64#12,112(<rp=int64#1)
2879# asm 2: movq   <rt2=%r14,112(<rp=%rdi)
2880movq   %r14,112(%rdi)
2881
2882# qhasm: *(uint64 *)(rp + 120) = rt3
2883# asm 1: movq   <rt3=int64#13,120(<rp=int64#1)
2884# asm 2: movq   <rt3=%r15,120(<rp=%rdi)
2885movq   %r15,120(%rdi)
2886
2887# qhasm:   caller1 = caller1_stack
2888# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
2889# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
2890movq 0(%rsp),%r11
2891
2892# qhasm:   caller2 = caller2_stack
2893# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
2894# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
2895movq 8(%rsp),%r12
2896
2897# qhasm:   caller3 = caller3_stack
2898# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
2899# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
2900movq 16(%rsp),%r13
2901
2902# qhasm:   caller4 = caller4_stack
2903# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
2904# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
2905movq 24(%rsp),%r14
2906
2907# qhasm:   caller5 = caller5_stack
2908# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
2909# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
2910movq 32(%rsp),%r15
2911
2912# qhasm:   caller6 = caller6_stack
2913# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
2914# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
2915movq 40(%rsp),%rbx
2916
2917# qhasm:   caller7 = caller7_stack
2918# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
2919# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
2920movq 48(%rsp),%rbp
2921
2922# qhasm: leave
2923add %r11,%rsp
2924mov %rdi,%rax
2925mov %rsi,%rdx
2926ret
2927