1
2# qhasm: int64 rp
3
4# qhasm: int64 pp
5
6# qhasm: input rp
7
8# qhasm: input pp
9
10# qhasm:   int64 caller1
11
12# qhasm:   int64 caller2
13
14# qhasm:   int64 caller3
15
16# qhasm:   int64 caller4
17
18# qhasm:   int64 caller5
19
20# qhasm:   int64 caller6
21
22# qhasm:   int64 caller7
23
24# qhasm:   caller caller1
25
26# qhasm:   caller caller2
27
28# qhasm:   caller caller3
29
30# qhasm:   caller caller4
31
32# qhasm:   caller caller5
33
34# qhasm:   caller caller6
35
36# qhasm:   caller caller7
37
38# qhasm:   stack64 caller1_stack
39
40# qhasm:   stack64 caller2_stack
41
42# qhasm:   stack64 caller3_stack
43
44# qhasm:   stack64 caller4_stack
45
46# qhasm:   stack64 caller5_stack
47
48# qhasm:   stack64 caller6_stack
49
50# qhasm:   stack64 caller7_stack
51
52# qhasm: int64 rx0
53
54# qhasm: int64 rx1
55
56# qhasm: int64 rx2
57
58# qhasm: int64 rx3
59
60# qhasm: int64 ry0
61
62# qhasm: int64 ry1
63
64# qhasm: int64 ry2
65
66# qhasm: int64 ry3
67
68# qhasm: int64 rz0
69
70# qhasm: int64 rz1
71
72# qhasm: int64 rz2
73
74# qhasm: int64 rz3
75
76# qhasm: int64 mulr4
77
78# qhasm: int64 mulr5
79
80# qhasm: int64 mulr6
81
82# qhasm: int64 mulr7
83
84# qhasm: int64 mulr8
85
86# qhasm: int64 mulrax
87
88# qhasm: int64 mulrdx
89
90# qhasm: int64 mulx0
91
92# qhasm: int64 mulx1
93
94# qhasm: int64 mulx2
95
96# qhasm: int64 mulx3
97
98# qhasm: int64 mulc
99
100# qhasm: int64 mulzero
101
102# qhasm: int64 muli38
103
104# qhasm: enter crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2
105.text
106.p2align 5
107.globl _crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2
108.globl crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2
109_crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2:
110crypto_sign_ed25519_amd64_64_ge25519_p1p1_to_p2:
111mov %rsp,%r11
112and $31,%r11
113add $64,%r11
114sub %r11,%rsp
115
116# qhasm:   caller1_stack = caller1
117# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
118# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
119movq %r11,0(%rsp)
120
121# qhasm:   caller2_stack = caller2
122# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
123# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
124movq %r12,8(%rsp)
125
126# qhasm:   caller3_stack = caller3
127# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
128# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
129movq %r13,16(%rsp)
130
131# qhasm:   caller4_stack = caller4
132# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
133# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
134movq %r14,24(%rsp)
135
136# qhasm:   caller5_stack = caller5
137# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
138# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
139movq %r15,32(%rsp)
140
141# qhasm:   caller6_stack = caller6
142# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
143# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
144movq %rbx,40(%rsp)
145
146# qhasm:   caller7_stack = caller7
147# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
148# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
149movq %rbp,48(%rsp)
150
151# qhasm:   mulr4 = 0
152# asm 1: mov  $0,>mulr4=int64#4
153# asm 2: mov  $0,>mulr4=%rcx
154mov  $0,%rcx
155
156# qhasm:   mulr5 = 0
157# asm 1: mov  $0,>mulr5=int64#5
158# asm 2: mov  $0,>mulr5=%r8
159mov  $0,%r8
160
161# qhasm:   mulr6 = 0
162# asm 1: mov  $0,>mulr6=int64#6
163# asm 2: mov  $0,>mulr6=%r9
164mov  $0,%r9
165
166# qhasm:   mulr7 = 0
167# asm 1: mov  $0,>mulr7=int64#8
168# asm 2: mov  $0,>mulr7=%r10
169mov  $0,%r10
170
171# qhasm:   mulx0 = *(uint64 *)(pp + 0)
172# asm 1: movq   0(<pp=int64#2),>mulx0=int64#9
173# asm 2: movq   0(<pp=%rsi),>mulx0=%r11
174movq   0(%rsi),%r11
175
176# qhasm:   mulrax = *(uint64 *)(pp + 96)
177# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
178# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
179movq   96(%rsi),%rax
180
181# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
182# asm 1: mul  <mulx0=int64#9
183# asm 2: mul  <mulx0=%r11
184mul  %r11
185
186# qhasm:   rx0 = mulrax
187# asm 1: mov  <mulrax=int64#7,>rx0=int64#10
188# asm 2: mov  <mulrax=%rax,>rx0=%r12
189mov  %rax,%r12
190
191# qhasm:   rx1 = mulrdx
192# asm 1: mov  <mulrdx=int64#3,>rx1=int64#11
193# asm 2: mov  <mulrdx=%rdx,>rx1=%r13
194mov  %rdx,%r13
195
196# qhasm:   mulrax = *(uint64 *)(pp + 104)
197# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
198# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
199movq   104(%rsi),%rax
200
201# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
202# asm 1: mul  <mulx0=int64#9
203# asm 2: mul  <mulx0=%r11
204mul  %r11
205
206# qhasm:   carry? rx1 += mulrax
207# asm 1: add  <mulrax=int64#7,<rx1=int64#11
208# asm 2: add  <mulrax=%rax,<rx1=%r13
209add  %rax,%r13
210
211# qhasm:   rx2 = 0
212# asm 1: mov  $0,>rx2=int64#12
213# asm 2: mov  $0,>rx2=%r14
214mov  $0,%r14
215
216# qhasm:   rx2 += mulrdx + carry
217# asm 1: adc <mulrdx=int64#3,<rx2=int64#12
218# asm 2: adc <mulrdx=%rdx,<rx2=%r14
219adc %rdx,%r14
220
221# qhasm:   mulrax = *(uint64 *)(pp + 112)
222# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
223# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
224movq   112(%rsi),%rax
225
226# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
227# asm 1: mul  <mulx0=int64#9
228# asm 2: mul  <mulx0=%r11
229mul  %r11
230
231# qhasm:   carry? rx2 += mulrax
232# asm 1: add  <mulrax=int64#7,<rx2=int64#12
233# asm 2: add  <mulrax=%rax,<rx2=%r14
234add  %rax,%r14
235
236# qhasm:   rx3 = 0
237# asm 1: mov  $0,>rx3=int64#13
238# asm 2: mov  $0,>rx3=%r15
239mov  $0,%r15
240
241# qhasm:   rx3 += mulrdx + carry
242# asm 1: adc <mulrdx=int64#3,<rx3=int64#13
243# asm 2: adc <mulrdx=%rdx,<rx3=%r15
244adc %rdx,%r15
245
246# qhasm:   mulrax = *(uint64 *)(pp + 120)
247# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
248# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
249movq   120(%rsi),%rax
250
251# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
252# asm 1: mul  <mulx0=int64#9
253# asm 2: mul  <mulx0=%r11
254mul  %r11
255
256# qhasm:   carry? rx3 += mulrax
257# asm 1: add  <mulrax=int64#7,<rx3=int64#13
258# asm 2: add  <mulrax=%rax,<rx3=%r15
259add  %rax,%r15
260
261# qhasm:   mulr4 += mulrdx + carry
262# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
263# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
264adc %rdx,%rcx
265
266# qhasm:   mulx1 = *(uint64 *)(pp + 8)
267# asm 1: movq   8(<pp=int64#2),>mulx1=int64#9
268# asm 2: movq   8(<pp=%rsi),>mulx1=%r11
269movq   8(%rsi),%r11
270
271# qhasm:   mulrax = *(uint64 *)(pp + 96)
272# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
273# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
274movq   96(%rsi),%rax
275
276# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
277# asm 1: mul  <mulx1=int64#9
278# asm 2: mul  <mulx1=%r11
279mul  %r11
280
281# qhasm:   carry? rx1 += mulrax
282# asm 1: add  <mulrax=int64#7,<rx1=int64#11
283# asm 2: add  <mulrax=%rax,<rx1=%r13
284add  %rax,%r13
285
286# qhasm:   mulc = 0
287# asm 1: mov  $0,>mulc=int64#14
288# asm 2: mov  $0,>mulc=%rbx
289mov  $0,%rbx
290
291# qhasm:   mulc += mulrdx + carry
292# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
293# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
294adc %rdx,%rbx
295
296# qhasm:   mulrax = *(uint64 *)(pp + 104)
297# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
298# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
299movq   104(%rsi),%rax
300
301# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
302# asm 1: mul  <mulx1=int64#9
303# asm 2: mul  <mulx1=%r11
304mul  %r11
305
306# qhasm:   carry? rx2 += mulrax
307# asm 1: add  <mulrax=int64#7,<rx2=int64#12
308# asm 2: add  <mulrax=%rax,<rx2=%r14
309add  %rax,%r14
310
311# qhasm:   mulrdx += 0 + carry
312# asm 1: adc $0,<mulrdx=int64#3
313# asm 2: adc $0,<mulrdx=%rdx
314adc $0,%rdx
315
316# qhasm:   carry? rx2 += mulc
317# asm 1: add  <mulc=int64#14,<rx2=int64#12
318# asm 2: add  <mulc=%rbx,<rx2=%r14
319add  %rbx,%r14
320
321# qhasm:   mulc = 0
322# asm 1: mov  $0,>mulc=int64#14
323# asm 2: mov  $0,>mulc=%rbx
324mov  $0,%rbx
325
326# qhasm:   mulc += mulrdx + carry
327# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
328# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
329adc %rdx,%rbx
330
331# qhasm:   mulrax = *(uint64 *)(pp + 112)
332# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
333# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
334movq   112(%rsi),%rax
335
336# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
337# asm 1: mul  <mulx1=int64#9
338# asm 2: mul  <mulx1=%r11
339mul  %r11
340
341# qhasm:   carry? rx3 += mulrax
342# asm 1: add  <mulrax=int64#7,<rx3=int64#13
343# asm 2: add  <mulrax=%rax,<rx3=%r15
344add  %rax,%r15
345
346# qhasm:   mulrdx += 0 + carry
347# asm 1: adc $0,<mulrdx=int64#3
348# asm 2: adc $0,<mulrdx=%rdx
349adc $0,%rdx
350
351# qhasm:   carry? rx3 += mulc
352# asm 1: add  <mulc=int64#14,<rx3=int64#13
353# asm 2: add  <mulc=%rbx,<rx3=%r15
354add  %rbx,%r15
355
356# qhasm:   mulc = 0
357# asm 1: mov  $0,>mulc=int64#14
358# asm 2: mov  $0,>mulc=%rbx
359mov  $0,%rbx
360
361# qhasm:   mulc += mulrdx + carry
362# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
363# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
364adc %rdx,%rbx
365
366# qhasm:   mulrax = *(uint64 *)(pp + 120)
367# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
368# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
369movq   120(%rsi),%rax
370
371# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
372# asm 1: mul  <mulx1=int64#9
373# asm 2: mul  <mulx1=%r11
374mul  %r11
375
376# qhasm:   carry? mulr4 += mulrax
377# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
378# asm 2: add  <mulrax=%rax,<mulr4=%rcx
379add  %rax,%rcx
380
381# qhasm:   mulrdx += 0 + carry
382# asm 1: adc $0,<mulrdx=int64#3
383# asm 2: adc $0,<mulrdx=%rdx
384adc $0,%rdx
385
386# qhasm:   carry? mulr4 += mulc
387# asm 1: add  <mulc=int64#14,<mulr4=int64#4
388# asm 2: add  <mulc=%rbx,<mulr4=%rcx
389add  %rbx,%rcx
390
391# qhasm:   mulr5 += mulrdx + carry
392# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
393# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
394adc %rdx,%r8
395
396# qhasm:   mulx2 = *(uint64 *)(pp + 16)
397# asm 1: movq   16(<pp=int64#2),>mulx2=int64#9
398# asm 2: movq   16(<pp=%rsi),>mulx2=%r11
399movq   16(%rsi),%r11
400
401# qhasm:   mulrax = *(uint64 *)(pp + 96)
402# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
403# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
404movq   96(%rsi),%rax
405
406# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
407# asm 1: mul  <mulx2=int64#9
408# asm 2: mul  <mulx2=%r11
409mul  %r11
410
411# qhasm:   carry? rx2 += mulrax
412# asm 1: add  <mulrax=int64#7,<rx2=int64#12
413# asm 2: add  <mulrax=%rax,<rx2=%r14
414add  %rax,%r14
415
416# qhasm:   mulc = 0
417# asm 1: mov  $0,>mulc=int64#14
418# asm 2: mov  $0,>mulc=%rbx
419mov  $0,%rbx
420
421# qhasm:   mulc += mulrdx + carry
422# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
423# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
424adc %rdx,%rbx
425
426# qhasm:   mulrax = *(uint64 *)(pp + 104)
427# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
428# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
429movq   104(%rsi),%rax
430
431# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
432# asm 1: mul  <mulx2=int64#9
433# asm 2: mul  <mulx2=%r11
434mul  %r11
435
436# qhasm:   carry? rx3 += mulrax
437# asm 1: add  <mulrax=int64#7,<rx3=int64#13
438# asm 2: add  <mulrax=%rax,<rx3=%r15
439add  %rax,%r15
440
441# qhasm:   mulrdx += 0 + carry
442# asm 1: adc $0,<mulrdx=int64#3
443# asm 2: adc $0,<mulrdx=%rdx
444adc $0,%rdx
445
446# qhasm:   carry? rx3 += mulc
447# asm 1: add  <mulc=int64#14,<rx3=int64#13
448# asm 2: add  <mulc=%rbx,<rx3=%r15
449add  %rbx,%r15
450
451# qhasm:   mulc = 0
452# asm 1: mov  $0,>mulc=int64#14
453# asm 2: mov  $0,>mulc=%rbx
454mov  $0,%rbx
455
456# qhasm:   mulc += mulrdx + carry
457# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
458# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
459adc %rdx,%rbx
460
461# qhasm:   mulrax = *(uint64 *)(pp + 112)
462# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
463# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
464movq   112(%rsi),%rax
465
466# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
467# asm 1: mul  <mulx2=int64#9
468# asm 2: mul  <mulx2=%r11
469mul  %r11
470
471# qhasm:   carry? mulr4 += mulrax
472# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
473# asm 2: add  <mulrax=%rax,<mulr4=%rcx
474add  %rax,%rcx
475
476# qhasm:   mulrdx += 0 + carry
477# asm 1: adc $0,<mulrdx=int64#3
478# asm 2: adc $0,<mulrdx=%rdx
479adc $0,%rdx
480
481# qhasm:   carry? mulr4 += mulc
482# asm 1: add  <mulc=int64#14,<mulr4=int64#4
483# asm 2: add  <mulc=%rbx,<mulr4=%rcx
484add  %rbx,%rcx
485
486# qhasm:   mulc = 0
487# asm 1: mov  $0,>mulc=int64#14
488# asm 2: mov  $0,>mulc=%rbx
489mov  $0,%rbx
490
491# qhasm:   mulc += mulrdx + carry
492# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
493# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
494adc %rdx,%rbx
495
496# qhasm:   mulrax = *(uint64 *)(pp + 120)
497# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
498# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
499movq   120(%rsi),%rax
500
501# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
502# asm 1: mul  <mulx2=int64#9
503# asm 2: mul  <mulx2=%r11
504mul  %r11
505
506# qhasm:   carry? mulr5 += mulrax
507# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
508# asm 2: add  <mulrax=%rax,<mulr5=%r8
509add  %rax,%r8
510
511# qhasm:   mulrdx += 0 + carry
512# asm 1: adc $0,<mulrdx=int64#3
513# asm 2: adc $0,<mulrdx=%rdx
514adc $0,%rdx
515
516# qhasm:   carry? mulr5 += mulc
517# asm 1: add  <mulc=int64#14,<mulr5=int64#5
518# asm 2: add  <mulc=%rbx,<mulr5=%r8
519add  %rbx,%r8
520
521# qhasm:   mulr6 += mulrdx + carry
522# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
523# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
524adc %rdx,%r9
525
526# qhasm:   mulx3 = *(uint64 *)(pp + 24)
527# asm 1: movq   24(<pp=int64#2),>mulx3=int64#9
528# asm 2: movq   24(<pp=%rsi),>mulx3=%r11
529movq   24(%rsi),%r11
530
531# qhasm:   mulrax = *(uint64 *)(pp + 96)
532# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
533# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
534movq   96(%rsi),%rax
535
536# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
537# asm 1: mul  <mulx3=int64#9
538# asm 2: mul  <mulx3=%r11
539mul  %r11
540
541# qhasm:   carry? rx3 += mulrax
542# asm 1: add  <mulrax=int64#7,<rx3=int64#13
543# asm 2: add  <mulrax=%rax,<rx3=%r15
544add  %rax,%r15
545
546# qhasm:   mulc = 0
547# asm 1: mov  $0,>mulc=int64#14
548# asm 2: mov  $0,>mulc=%rbx
549mov  $0,%rbx
550
551# qhasm:   mulc += mulrdx + carry
552# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
553# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
554adc %rdx,%rbx
555
556# qhasm:   mulrax = *(uint64 *)(pp + 104)
557# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
558# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
559movq   104(%rsi),%rax
560
561# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
562# asm 1: mul  <mulx3=int64#9
563# asm 2: mul  <mulx3=%r11
564mul  %r11
565
566# qhasm:   carry? mulr4 += mulrax
567# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
568# asm 2: add  <mulrax=%rax,<mulr4=%rcx
569add  %rax,%rcx
570
571# qhasm:   mulrdx += 0 + carry
572# asm 1: adc $0,<mulrdx=int64#3
573# asm 2: adc $0,<mulrdx=%rdx
574adc $0,%rdx
575
576# qhasm:   carry? mulr4 += mulc
577# asm 1: add  <mulc=int64#14,<mulr4=int64#4
578# asm 2: add  <mulc=%rbx,<mulr4=%rcx
579add  %rbx,%rcx
580
581# qhasm:   mulc = 0
582# asm 1: mov  $0,>mulc=int64#14
583# asm 2: mov  $0,>mulc=%rbx
584mov  $0,%rbx
585
586# qhasm:   mulc += mulrdx + carry
587# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
588# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
589adc %rdx,%rbx
590
591# qhasm:   mulrax = *(uint64 *)(pp + 112)
592# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
593# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
594movq   112(%rsi),%rax
595
596# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
597# asm 1: mul  <mulx3=int64#9
598# asm 2: mul  <mulx3=%r11
599mul  %r11
600
601# qhasm:   carry? mulr5 += mulrax
602# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
603# asm 2: add  <mulrax=%rax,<mulr5=%r8
604add  %rax,%r8
605
606# qhasm:   mulrdx += 0 + carry
607# asm 1: adc $0,<mulrdx=int64#3
608# asm 2: adc $0,<mulrdx=%rdx
609adc $0,%rdx
610
611# qhasm:   carry? mulr5 += mulc
612# asm 1: add  <mulc=int64#14,<mulr5=int64#5
613# asm 2: add  <mulc=%rbx,<mulr5=%r8
614add  %rbx,%r8
615
616# qhasm:   mulc = 0
617# asm 1: mov  $0,>mulc=int64#14
618# asm 2: mov  $0,>mulc=%rbx
619mov  $0,%rbx
620
621# qhasm:   mulc += mulrdx + carry
622# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
623# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
624adc %rdx,%rbx
625
626# qhasm:   mulrax = *(uint64 *)(pp + 120)
627# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
628# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
629movq   120(%rsi),%rax
630
631# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
632# asm 1: mul  <mulx3=int64#9
633# asm 2: mul  <mulx3=%r11
634mul  %r11
635
636# qhasm:   carry? mulr6 += mulrax
637# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
638# asm 2: add  <mulrax=%rax,<mulr6=%r9
639add  %rax,%r9
640
641# qhasm:   mulrdx += 0 + carry
642# asm 1: adc $0,<mulrdx=int64#3
643# asm 2: adc $0,<mulrdx=%rdx
644adc $0,%rdx
645
646# qhasm:   carry? mulr6 += mulc
647# asm 1: add  <mulc=int64#14,<mulr6=int64#6
648# asm 2: add  <mulc=%rbx,<mulr6=%r9
649add  %rbx,%r9
650
651# qhasm:   mulr7 += mulrdx + carry
652# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
653# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
654adc %rdx,%r10
655
656# qhasm:   mulrax = mulr4
657# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
658# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
659mov  %rcx,%rax
660
661# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
662mulq  crypto_sign_ed25519_amd64_64_38
663
664# qhasm:   mulr4 = mulrax
665# asm 1: mov  <mulrax=int64#7,>mulr4=int64#4
666# asm 2: mov  <mulrax=%rax,>mulr4=%rcx
667mov  %rax,%rcx
668
669# qhasm:   mulrax = mulr5
670# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
671# asm 2: mov  <mulr5=%r8,>mulrax=%rax
672mov  %r8,%rax
673
674# qhasm:   mulr5 = mulrdx
675# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#5
676# asm 2: mov  <mulrdx=%rdx,>mulr5=%r8
677mov  %rdx,%r8
678
679# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
680mulq  crypto_sign_ed25519_amd64_64_38
681
682# qhasm:   carry? mulr5 += mulrax
683# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
684# asm 2: add  <mulrax=%rax,<mulr5=%r8
685add  %rax,%r8
686
687# qhasm:   mulrax = mulr6
688# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
689# asm 2: mov  <mulr6=%r9,>mulrax=%rax
690mov  %r9,%rax
691
692# qhasm:   mulr6 = 0
693# asm 1: mov  $0,>mulr6=int64#6
694# asm 2: mov  $0,>mulr6=%r9
695mov  $0,%r9
696
697# qhasm:   mulr6 += mulrdx + carry
698# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
699# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
700adc %rdx,%r9
701
702# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
703mulq  crypto_sign_ed25519_amd64_64_38
704
705# qhasm:   carry? mulr6 += mulrax
706# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
707# asm 2: add  <mulrax=%rax,<mulr6=%r9
708add  %rax,%r9
709
710# qhasm:   mulrax = mulr7
711# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
712# asm 2: mov  <mulr7=%r10,>mulrax=%rax
713mov  %r10,%rax
714
715# qhasm:   mulr7 = 0
716# asm 1: mov  $0,>mulr7=int64#8
717# asm 2: mov  $0,>mulr7=%r10
718mov  $0,%r10
719
720# qhasm:   mulr7 += mulrdx + carry
721# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
722# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
723adc %rdx,%r10
724
725# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
726mulq  crypto_sign_ed25519_amd64_64_38
727
728# qhasm:   carry? mulr7 += mulrax
729# asm 1: add  <mulrax=int64#7,<mulr7=int64#8
730# asm 2: add  <mulrax=%rax,<mulr7=%r10
731add  %rax,%r10
732
733# qhasm:   mulr8 = 0
734# asm 1: mov  $0,>mulr8=int64#7
735# asm 2: mov  $0,>mulr8=%rax
736mov  $0,%rax
737
738# qhasm:   mulr8 += mulrdx + carry
739# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
740# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
741adc %rdx,%rax
742
743# qhasm:   carry? rx0 += mulr4
744# asm 1: add  <mulr4=int64#4,<rx0=int64#10
745# asm 2: add  <mulr4=%rcx,<rx0=%r12
746add  %rcx,%r12
747
748# qhasm:   carry? rx1 += mulr5 + carry
749# asm 1: adc <mulr5=int64#5,<rx1=int64#11
750# asm 2: adc <mulr5=%r8,<rx1=%r13
751adc %r8,%r13
752
753# qhasm:   carry? rx2 += mulr6 + carry
754# asm 1: adc <mulr6=int64#6,<rx2=int64#12
755# asm 2: adc <mulr6=%r9,<rx2=%r14
756adc %r9,%r14
757
758# qhasm:   carry? rx3 += mulr7 + carry
759# asm 1: adc <mulr7=int64#8,<rx3=int64#13
760# asm 2: adc <mulr7=%r10,<rx3=%r15
761adc %r10,%r15
762
763# qhasm:   mulzero = 0
764# asm 1: mov  $0,>mulzero=int64#3
765# asm 2: mov  $0,>mulzero=%rdx
766mov  $0,%rdx
767
768# qhasm:   mulr8 += mulzero + carry
769# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
770# asm 2: adc <mulzero=%rdx,<mulr8=%rax
771adc %rdx,%rax
772
773# qhasm:   mulr8 *= 38
774# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#4
775# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rcx
776imulq  $38,%rax,%rcx
777
778# qhasm:   carry? rx0 += mulr8
779# asm 1: add  <mulr8=int64#4,<rx0=int64#10
780# asm 2: add  <mulr8=%rcx,<rx0=%r12
781add  %rcx,%r12
782
783# qhasm:   carry? rx1 += mulzero + carry
784# asm 1: adc <mulzero=int64#3,<rx1=int64#11
785# asm 2: adc <mulzero=%rdx,<rx1=%r13
786adc %rdx,%r13
787
788# qhasm:   carry? rx2 += mulzero + carry
789# asm 1: adc <mulzero=int64#3,<rx2=int64#12
790# asm 2: adc <mulzero=%rdx,<rx2=%r14
791adc %rdx,%r14
792
793# qhasm:   carry? rx3 += mulzero + carry
794# asm 1: adc <mulzero=int64#3,<rx3=int64#13
795# asm 2: adc <mulzero=%rdx,<rx3=%r15
796adc %rdx,%r15
797
798# qhasm:   mulzero += mulzero + carry
799# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
800# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
801adc %rdx,%rdx
802
803# qhasm:   mulzero *= 38
804# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
805# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
806imulq  $38,%rdx,%rdx
807
808# qhasm:   rx0 += mulzero
809# asm 1: add  <mulzero=int64#3,<rx0=int64#10
810# asm 2: add  <mulzero=%rdx,<rx0=%r12
811add  %rdx,%r12
812
813# qhasm: *(uint64 *)(rp + 0) = rx0
814# asm 1: movq   <rx0=int64#10,0(<rp=int64#1)
815# asm 2: movq   <rx0=%r12,0(<rp=%rdi)
816movq   %r12,0(%rdi)
817
818# qhasm: *(uint64 *)(rp + 8) = rx1
819# asm 1: movq   <rx1=int64#11,8(<rp=int64#1)
820# asm 2: movq   <rx1=%r13,8(<rp=%rdi)
821movq   %r13,8(%rdi)
822
823# qhasm: *(uint64 *)(rp + 16) = rx2
824# asm 1: movq   <rx2=int64#12,16(<rp=int64#1)
825# asm 2: movq   <rx2=%r14,16(<rp=%rdi)
826movq   %r14,16(%rdi)
827
828# qhasm: *(uint64 *)(rp + 24) = rx3
829# asm 1: movq   <rx3=int64#13,24(<rp=int64#1)
830# asm 2: movq   <rx3=%r15,24(<rp=%rdi)
831movq   %r15,24(%rdi)
832
833# qhasm:   mulr4 = 0
834# asm 1: mov  $0,>mulr4=int64#4
835# asm 2: mov  $0,>mulr4=%rcx
836mov  $0,%rcx
837
838# qhasm:   mulr5 = 0
839# asm 1: mov  $0,>mulr5=int64#5
840# asm 2: mov  $0,>mulr5=%r8
841mov  $0,%r8
842
843# qhasm:   mulr6 = 0
844# asm 1: mov  $0,>mulr6=int64#6
845# asm 2: mov  $0,>mulr6=%r9
846mov  $0,%r9
847
848# qhasm:   mulr7 = 0
849# asm 1: mov  $0,>mulr7=int64#8
850# asm 2: mov  $0,>mulr7=%r10
851mov  $0,%r10
852
853# qhasm:   mulx0 = *(uint64 *)(pp + 64)
854# asm 1: movq   64(<pp=int64#2),>mulx0=int64#9
855# asm 2: movq   64(<pp=%rsi),>mulx0=%r11
856movq   64(%rsi),%r11
857
858# qhasm:   mulrax = *(uint64 *)(pp + 32)
859# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
860# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
861movq   32(%rsi),%rax
862
863# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
864# asm 1: mul  <mulx0=int64#9
865# asm 2: mul  <mulx0=%r11
866mul  %r11
867
868# qhasm:   ry0 = mulrax
869# asm 1: mov  <mulrax=int64#7,>ry0=int64#10
870# asm 2: mov  <mulrax=%rax,>ry0=%r12
871mov  %rax,%r12
872
873# qhasm:   ry1 = mulrdx
874# asm 1: mov  <mulrdx=int64#3,>ry1=int64#11
875# asm 2: mov  <mulrdx=%rdx,>ry1=%r13
876mov  %rdx,%r13
877
878# qhasm:   mulrax = *(uint64 *)(pp + 40)
879# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
880# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
881movq   40(%rsi),%rax
882
883# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
884# asm 1: mul  <mulx0=int64#9
885# asm 2: mul  <mulx0=%r11
886mul  %r11
887
888# qhasm:   carry? ry1 += mulrax
889# asm 1: add  <mulrax=int64#7,<ry1=int64#11
890# asm 2: add  <mulrax=%rax,<ry1=%r13
891add  %rax,%r13
892
893# qhasm:   ry2 = 0
894# asm 1: mov  $0,>ry2=int64#12
895# asm 2: mov  $0,>ry2=%r14
896mov  $0,%r14
897
898# qhasm:   ry2 += mulrdx + carry
899# asm 1: adc <mulrdx=int64#3,<ry2=int64#12
900# asm 2: adc <mulrdx=%rdx,<ry2=%r14
901adc %rdx,%r14
902
903# qhasm:   mulrax = *(uint64 *)(pp + 48)
904# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
905# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
906movq   48(%rsi),%rax
907
908# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
909# asm 1: mul  <mulx0=int64#9
910# asm 2: mul  <mulx0=%r11
911mul  %r11
912
913# qhasm:   carry? ry2 += mulrax
914# asm 1: add  <mulrax=int64#7,<ry2=int64#12
915# asm 2: add  <mulrax=%rax,<ry2=%r14
916add  %rax,%r14
917
918# qhasm:   ry3 = 0
919# asm 1: mov  $0,>ry3=int64#13
920# asm 2: mov  $0,>ry3=%r15
921mov  $0,%r15
922
923# qhasm:   ry3 += mulrdx + carry
924# asm 1: adc <mulrdx=int64#3,<ry3=int64#13
925# asm 2: adc <mulrdx=%rdx,<ry3=%r15
926adc %rdx,%r15
927
928# qhasm:   mulrax = *(uint64 *)(pp + 56)
929# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
930# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
931movq   56(%rsi),%rax
932
933# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
934# asm 1: mul  <mulx0=int64#9
935# asm 2: mul  <mulx0=%r11
936mul  %r11
937
938# qhasm:   carry? ry3 += mulrax
939# asm 1: add  <mulrax=int64#7,<ry3=int64#13
940# asm 2: add  <mulrax=%rax,<ry3=%r15
941add  %rax,%r15
942
943# qhasm:   mulr4 += mulrdx + carry
944# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
945# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
946adc %rdx,%rcx
947
948# qhasm:   mulx1 = *(uint64 *)(pp + 72)
949# asm 1: movq   72(<pp=int64#2),>mulx1=int64#9
950# asm 2: movq   72(<pp=%rsi),>mulx1=%r11
951movq   72(%rsi),%r11
952
953# qhasm:   mulrax = *(uint64 *)(pp + 32)
954# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
955# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
956movq   32(%rsi),%rax
957
958# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
959# asm 1: mul  <mulx1=int64#9
960# asm 2: mul  <mulx1=%r11
961mul  %r11
962
963# qhasm:   carry? ry1 += mulrax
964# asm 1: add  <mulrax=int64#7,<ry1=int64#11
965# asm 2: add  <mulrax=%rax,<ry1=%r13
966add  %rax,%r13
967
968# qhasm:   mulc = 0
969# asm 1: mov  $0,>mulc=int64#14
970# asm 2: mov  $0,>mulc=%rbx
971mov  $0,%rbx
972
973# qhasm:   mulc += mulrdx + carry
974# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
975# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
976adc %rdx,%rbx
977
978# qhasm:   mulrax = *(uint64 *)(pp + 40)
979# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
980# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
981movq   40(%rsi),%rax
982
983# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
984# asm 1: mul  <mulx1=int64#9
985# asm 2: mul  <mulx1=%r11
986mul  %r11
987
988# qhasm:   carry? ry2 += mulrax
989# asm 1: add  <mulrax=int64#7,<ry2=int64#12
990# asm 2: add  <mulrax=%rax,<ry2=%r14
991add  %rax,%r14
992
993# qhasm:   mulrdx += 0 + carry
994# asm 1: adc $0,<mulrdx=int64#3
995# asm 2: adc $0,<mulrdx=%rdx
996adc $0,%rdx
997
998# qhasm:   carry? ry2 += mulc
999# asm 1: add  <mulc=int64#14,<ry2=int64#12
1000# asm 2: add  <mulc=%rbx,<ry2=%r14
1001add  %rbx,%r14
1002
1003# qhasm:   mulc = 0
1004# asm 1: mov  $0,>mulc=int64#14
1005# asm 2: mov  $0,>mulc=%rbx
1006mov  $0,%rbx
1007
1008# qhasm:   mulc += mulrdx + carry
1009# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1010# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1011adc %rdx,%rbx
1012
1013# qhasm:   mulrax = *(uint64 *)(pp + 48)
1014# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
1015# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
1016movq   48(%rsi),%rax
1017
1018# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1019# asm 1: mul  <mulx1=int64#9
1020# asm 2: mul  <mulx1=%r11
1021mul  %r11
1022
1023# qhasm:   carry? ry3 += mulrax
1024# asm 1: add  <mulrax=int64#7,<ry3=int64#13
1025# asm 2: add  <mulrax=%rax,<ry3=%r15
1026add  %rax,%r15
1027
1028# qhasm:   mulrdx += 0 + carry
1029# asm 1: adc $0,<mulrdx=int64#3
1030# asm 2: adc $0,<mulrdx=%rdx
1031adc $0,%rdx
1032
1033# qhasm:   carry? ry3 += mulc
1034# asm 1: add  <mulc=int64#14,<ry3=int64#13
1035# asm 2: add  <mulc=%rbx,<ry3=%r15
1036add  %rbx,%r15
1037
1038# qhasm:   mulc = 0
1039# asm 1: mov  $0,>mulc=int64#14
1040# asm 2: mov  $0,>mulc=%rbx
1041mov  $0,%rbx
1042
1043# qhasm:   mulc += mulrdx + carry
1044# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1045# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1046adc %rdx,%rbx
1047
1048# qhasm:   mulrax = *(uint64 *)(pp + 56)
1049# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
1050# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
1051movq   56(%rsi),%rax
1052
1053# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1054# asm 1: mul  <mulx1=int64#9
1055# asm 2: mul  <mulx1=%r11
1056mul  %r11
1057
1058# qhasm:   carry? mulr4 += mulrax
1059# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1060# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1061add  %rax,%rcx
1062
1063# qhasm:   mulrdx += 0 + carry
1064# asm 1: adc $0,<mulrdx=int64#3
1065# asm 2: adc $0,<mulrdx=%rdx
1066adc $0,%rdx
1067
1068# qhasm:   carry? mulr4 += mulc
1069# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1070# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1071add  %rbx,%rcx
1072
1073# qhasm:   mulr5 += mulrdx + carry
1074# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
1075# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
1076adc %rdx,%r8
1077
1078# qhasm:   mulx2 = *(uint64 *)(pp + 80)
1079# asm 1: movq   80(<pp=int64#2),>mulx2=int64#9
1080# asm 2: movq   80(<pp=%rsi),>mulx2=%r11
1081movq   80(%rsi),%r11
1082
1083# qhasm:   mulrax = *(uint64 *)(pp + 32)
1084# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
1085# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
1086movq   32(%rsi),%rax
1087
1088# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1089# asm 1: mul  <mulx2=int64#9
1090# asm 2: mul  <mulx2=%r11
1091mul  %r11
1092
1093# qhasm:   carry? ry2 += mulrax
1094# asm 1: add  <mulrax=int64#7,<ry2=int64#12
1095# asm 2: add  <mulrax=%rax,<ry2=%r14
1096add  %rax,%r14
1097
1098# qhasm:   mulc = 0
1099# asm 1: mov  $0,>mulc=int64#14
1100# asm 2: mov  $0,>mulc=%rbx
1101mov  $0,%rbx
1102
1103# qhasm:   mulc += mulrdx + carry
1104# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1105# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1106adc %rdx,%rbx
1107
1108# qhasm:   mulrax = *(uint64 *)(pp + 40)
1109# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
1110# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
1111movq   40(%rsi),%rax
1112
1113# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1114# asm 1: mul  <mulx2=int64#9
1115# asm 2: mul  <mulx2=%r11
1116mul  %r11
1117
1118# qhasm:   carry? ry3 += mulrax
1119# asm 1: add  <mulrax=int64#7,<ry3=int64#13
1120# asm 2: add  <mulrax=%rax,<ry3=%r15
1121add  %rax,%r15
1122
1123# qhasm:   mulrdx += 0 + carry
1124# asm 1: adc $0,<mulrdx=int64#3
1125# asm 2: adc $0,<mulrdx=%rdx
1126adc $0,%rdx
1127
1128# qhasm:   carry? ry3 += mulc
1129# asm 1: add  <mulc=int64#14,<ry3=int64#13
1130# asm 2: add  <mulc=%rbx,<ry3=%r15
1131add  %rbx,%r15
1132
1133# qhasm:   mulc = 0
1134# asm 1: mov  $0,>mulc=int64#14
1135# asm 2: mov  $0,>mulc=%rbx
1136mov  $0,%rbx
1137
1138# qhasm:   mulc += mulrdx + carry
1139# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1140# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1141adc %rdx,%rbx
1142
1143# qhasm:   mulrax = *(uint64 *)(pp + 48)
1144# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
1145# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
1146movq   48(%rsi),%rax
1147
1148# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1149# asm 1: mul  <mulx2=int64#9
1150# asm 2: mul  <mulx2=%r11
1151mul  %r11
1152
1153# qhasm:   carry? mulr4 += mulrax
1154# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1155# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1156add  %rax,%rcx
1157
1158# qhasm:   mulrdx += 0 + carry
1159# asm 1: adc $0,<mulrdx=int64#3
1160# asm 2: adc $0,<mulrdx=%rdx
1161adc $0,%rdx
1162
1163# qhasm:   carry? mulr4 += mulc
1164# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1165# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1166add  %rbx,%rcx
1167
1168# qhasm:   mulc = 0
1169# asm 1: mov  $0,>mulc=int64#14
1170# asm 2: mov  $0,>mulc=%rbx
1171mov  $0,%rbx
1172
1173# qhasm:   mulc += mulrdx + carry
1174# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1175# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1176adc %rdx,%rbx
1177
1178# qhasm:   mulrax = *(uint64 *)(pp + 56)
1179# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
1180# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
1181movq   56(%rsi),%rax
1182
1183# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1184# asm 1: mul  <mulx2=int64#9
1185# asm 2: mul  <mulx2=%r11
1186mul  %r11
1187
1188# qhasm:   carry? mulr5 += mulrax
1189# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1190# asm 2: add  <mulrax=%rax,<mulr5=%r8
1191add  %rax,%r8
1192
1193# qhasm:   mulrdx += 0 + carry
1194# asm 1: adc $0,<mulrdx=int64#3
1195# asm 2: adc $0,<mulrdx=%rdx
1196adc $0,%rdx
1197
1198# qhasm:   carry? mulr5 += mulc
1199# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1200# asm 2: add  <mulc=%rbx,<mulr5=%r8
1201add  %rbx,%r8
1202
1203# qhasm:   mulr6 += mulrdx + carry
1204# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1205# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1206adc %rdx,%r9
1207
1208# qhasm:   mulx3 = *(uint64 *)(pp + 88)
1209# asm 1: movq   88(<pp=int64#2),>mulx3=int64#9
1210# asm 2: movq   88(<pp=%rsi),>mulx3=%r11
1211movq   88(%rsi),%r11
1212
1213# qhasm:   mulrax = *(uint64 *)(pp + 32)
1214# asm 1: movq   32(<pp=int64#2),>mulrax=int64#7
1215# asm 2: movq   32(<pp=%rsi),>mulrax=%rax
1216movq   32(%rsi),%rax
1217
1218# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1219# asm 1: mul  <mulx3=int64#9
1220# asm 2: mul  <mulx3=%r11
1221mul  %r11
1222
1223# qhasm:   carry? ry3 += mulrax
1224# asm 1: add  <mulrax=int64#7,<ry3=int64#13
1225# asm 2: add  <mulrax=%rax,<ry3=%r15
1226add  %rax,%r15
1227
1228# qhasm:   mulc = 0
1229# asm 1: mov  $0,>mulc=int64#14
1230# asm 2: mov  $0,>mulc=%rbx
1231mov  $0,%rbx
1232
1233# qhasm:   mulc += mulrdx + carry
1234# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1235# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1236adc %rdx,%rbx
1237
1238# qhasm:   mulrax = *(uint64 *)(pp + 40)
1239# asm 1: movq   40(<pp=int64#2),>mulrax=int64#7
1240# asm 2: movq   40(<pp=%rsi),>mulrax=%rax
1241movq   40(%rsi),%rax
1242
1243# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1244# asm 1: mul  <mulx3=int64#9
1245# asm 2: mul  <mulx3=%r11
1246mul  %r11
1247
1248# qhasm:   carry? mulr4 += mulrax
1249# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1250# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1251add  %rax,%rcx
1252
1253# qhasm:   mulrdx += 0 + carry
1254# asm 1: adc $0,<mulrdx=int64#3
1255# asm 2: adc $0,<mulrdx=%rdx
1256adc $0,%rdx
1257
1258# qhasm:   carry? mulr4 += mulc
1259# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1260# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1261add  %rbx,%rcx
1262
1263# qhasm:   mulc = 0
1264# asm 1: mov  $0,>mulc=int64#14
1265# asm 2: mov  $0,>mulc=%rbx
1266mov  $0,%rbx
1267
1268# qhasm:   mulc += mulrdx + carry
1269# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1270# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1271adc %rdx,%rbx
1272
1273# qhasm:   mulrax = *(uint64 *)(pp + 48)
1274# asm 1: movq   48(<pp=int64#2),>mulrax=int64#7
1275# asm 2: movq   48(<pp=%rsi),>mulrax=%rax
1276movq   48(%rsi),%rax
1277
1278# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1279# asm 1: mul  <mulx3=int64#9
1280# asm 2: mul  <mulx3=%r11
1281mul  %r11
1282
1283# qhasm:   carry? mulr5 += mulrax
1284# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1285# asm 2: add  <mulrax=%rax,<mulr5=%r8
1286add  %rax,%r8
1287
1288# qhasm:   mulrdx += 0 + carry
1289# asm 1: adc $0,<mulrdx=int64#3
1290# asm 2: adc $0,<mulrdx=%rdx
1291adc $0,%rdx
1292
1293# qhasm:   carry? mulr5 += mulc
1294# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1295# asm 2: add  <mulc=%rbx,<mulr5=%r8
1296add  %rbx,%r8
1297
1298# qhasm:   mulc = 0
1299# asm 1: mov  $0,>mulc=int64#14
1300# asm 2: mov  $0,>mulc=%rbx
1301mov  $0,%rbx
1302
1303# qhasm:   mulc += mulrdx + carry
1304# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1305# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1306adc %rdx,%rbx
1307
1308# qhasm:   mulrax = *(uint64 *)(pp + 56)
1309# asm 1: movq   56(<pp=int64#2),>mulrax=int64#7
1310# asm 2: movq   56(<pp=%rsi),>mulrax=%rax
1311movq   56(%rsi),%rax
1312
1313# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1314# asm 1: mul  <mulx3=int64#9
1315# asm 2: mul  <mulx3=%r11
1316mul  %r11
1317
1318# qhasm:   carry? mulr6 += mulrax
1319# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1320# asm 2: add  <mulrax=%rax,<mulr6=%r9
1321add  %rax,%r9
1322
1323# qhasm:   mulrdx += 0 + carry
1324# asm 1: adc $0,<mulrdx=int64#3
1325# asm 2: adc $0,<mulrdx=%rdx
1326adc $0,%rdx
1327
1328# qhasm:   carry? mulr6 += mulc
1329# asm 1: add  <mulc=int64#14,<mulr6=int64#6
1330# asm 2: add  <mulc=%rbx,<mulr6=%r9
1331add  %rbx,%r9
1332
1333# qhasm:   mulr7 += mulrdx + carry
1334# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1335# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1336adc %rdx,%r10
1337
1338# qhasm:   mulrax = mulr4
1339# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
1340# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
1341mov  %rcx,%rax
1342
1343# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1344mulq  crypto_sign_ed25519_amd64_64_38
1345
1346# qhasm:   mulr4 = mulrax
1347# asm 1: mov  <mulrax=int64#7,>mulr4=int64#4
1348# asm 2: mov  <mulrax=%rax,>mulr4=%rcx
1349mov  %rax,%rcx
1350
1351# qhasm:   mulrax = mulr5
1352# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
1353# asm 2: mov  <mulr5=%r8,>mulrax=%rax
1354mov  %r8,%rax
1355
1356# qhasm:   mulr5 = mulrdx
1357# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#5
1358# asm 2: mov  <mulrdx=%rdx,>mulr5=%r8
1359mov  %rdx,%r8
1360
1361# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1362mulq  crypto_sign_ed25519_amd64_64_38
1363
1364# qhasm:   carry? mulr5 += mulrax
1365# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1366# asm 2: add  <mulrax=%rax,<mulr5=%r8
1367add  %rax,%r8
1368
1369# qhasm:   mulrax = mulr6
1370# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
1371# asm 2: mov  <mulr6=%r9,>mulrax=%rax
1372mov  %r9,%rax
1373
1374# qhasm:   mulr6 = 0
1375# asm 1: mov  $0,>mulr6=int64#6
1376# asm 2: mov  $0,>mulr6=%r9
1377mov  $0,%r9
1378
1379# qhasm:   mulr6 += mulrdx + carry
1380# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1381# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1382adc %rdx,%r9
1383
1384# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1385mulq  crypto_sign_ed25519_amd64_64_38
1386
1387# qhasm:   carry? mulr6 += mulrax
1388# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
1389# asm 2: add  <mulrax=%rax,<mulr6=%r9
1390add  %rax,%r9
1391
1392# qhasm:   mulrax = mulr7
1393# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
1394# asm 2: mov  <mulr7=%r10,>mulrax=%rax
1395mov  %r10,%rax
1396
1397# qhasm:   mulr7 = 0
1398# asm 1: mov  $0,>mulr7=int64#8
1399# asm 2: mov  $0,>mulr7=%r10
1400mov  $0,%r10
1401
1402# qhasm:   mulr7 += mulrdx + carry
1403# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
1404# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
1405adc %rdx,%r10
1406
1407# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
1408mulq  crypto_sign_ed25519_amd64_64_38
1409
1410# qhasm:   carry? mulr7 += mulrax
1411# asm 1: add  <mulrax=int64#7,<mulr7=int64#8
1412# asm 2: add  <mulrax=%rax,<mulr7=%r10
1413add  %rax,%r10
1414
1415# qhasm:   mulr8 = 0
1416# asm 1: mov  $0,>mulr8=int64#7
1417# asm 2: mov  $0,>mulr8=%rax
1418mov  $0,%rax
1419
1420# qhasm:   mulr8 += mulrdx + carry
1421# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
1422# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
1423adc %rdx,%rax
1424
1425# qhasm:   carry? ry0 += mulr4
1426# asm 1: add  <mulr4=int64#4,<ry0=int64#10
1427# asm 2: add  <mulr4=%rcx,<ry0=%r12
1428add  %rcx,%r12
1429
1430# qhasm:   carry? ry1 += mulr5 + carry
1431# asm 1: adc <mulr5=int64#5,<ry1=int64#11
1432# asm 2: adc <mulr5=%r8,<ry1=%r13
1433adc %r8,%r13
1434
1435# qhasm:   carry? ry2 += mulr6 + carry
1436# asm 1: adc <mulr6=int64#6,<ry2=int64#12
1437# asm 2: adc <mulr6=%r9,<ry2=%r14
1438adc %r9,%r14
1439
1440# qhasm:   carry? ry3 += mulr7 + carry
1441# asm 1: adc <mulr7=int64#8,<ry3=int64#13
1442# asm 2: adc <mulr7=%r10,<ry3=%r15
1443adc %r10,%r15
1444
1445# qhasm:   mulzero = 0
1446# asm 1: mov  $0,>mulzero=int64#3
1447# asm 2: mov  $0,>mulzero=%rdx
1448mov  $0,%rdx
1449
1450# qhasm:   mulr8 += mulzero + carry
1451# asm 1: adc <mulzero=int64#3,<mulr8=int64#7
1452# asm 2: adc <mulzero=%rdx,<mulr8=%rax
1453adc %rdx,%rax
1454
1455# qhasm:   mulr8 *= 38
1456# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#4
1457# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rcx
1458imulq  $38,%rax,%rcx
1459
1460# qhasm:   carry? ry0 += mulr8
1461# asm 1: add  <mulr8=int64#4,<ry0=int64#10
1462# asm 2: add  <mulr8=%rcx,<ry0=%r12
1463add  %rcx,%r12
1464
1465# qhasm:   carry? ry1 += mulzero + carry
1466# asm 1: adc <mulzero=int64#3,<ry1=int64#11
1467# asm 2: adc <mulzero=%rdx,<ry1=%r13
1468adc %rdx,%r13
1469
1470# qhasm:   carry? ry2 += mulzero + carry
1471# asm 1: adc <mulzero=int64#3,<ry2=int64#12
1472# asm 2: adc <mulzero=%rdx,<ry2=%r14
1473adc %rdx,%r14
1474
1475# qhasm:   carry? ry3 += mulzero + carry
1476# asm 1: adc <mulzero=int64#3,<ry3=int64#13
1477# asm 2: adc <mulzero=%rdx,<ry3=%r15
1478adc %rdx,%r15
1479
1480# qhasm:   mulzero += mulzero + carry
1481# asm 1: adc <mulzero=int64#3,<mulzero=int64#3
1482# asm 2: adc <mulzero=%rdx,<mulzero=%rdx
1483adc %rdx,%rdx
1484
1485# qhasm:   mulzero *= 38
1486# asm 1: imulq  $38,<mulzero=int64#3,>mulzero=int64#3
1487# asm 2: imulq  $38,<mulzero=%rdx,>mulzero=%rdx
1488imulq  $38,%rdx,%rdx
1489
1490# qhasm:   ry0 += mulzero
1491# asm 1: add  <mulzero=int64#3,<ry0=int64#10
1492# asm 2: add  <mulzero=%rdx,<ry0=%r12
1493add  %rdx,%r12
1494
1495# qhasm: *(uint64 *)(rp + 32) = ry0
1496# asm 1: movq   <ry0=int64#10,32(<rp=int64#1)
1497# asm 2: movq   <ry0=%r12,32(<rp=%rdi)
1498movq   %r12,32(%rdi)
1499
1500# qhasm: *(uint64 *)(rp + 40) = ry1
1501# asm 1: movq   <ry1=int64#11,40(<rp=int64#1)
1502# asm 2: movq   <ry1=%r13,40(<rp=%rdi)
1503movq   %r13,40(%rdi)
1504
1505# qhasm: *(uint64 *)(rp + 48) = ry2
1506# asm 1: movq   <ry2=int64#12,48(<rp=int64#1)
1507# asm 2: movq   <ry2=%r14,48(<rp=%rdi)
1508movq   %r14,48(%rdi)
1509
1510# qhasm: *(uint64 *)(rp + 56) = ry3
1511# asm 1: movq   <ry3=int64#13,56(<rp=int64#1)
1512# asm 2: movq   <ry3=%r15,56(<rp=%rdi)
1513movq   %r15,56(%rdi)
1514
1515# qhasm:   mulr4 = 0
1516# asm 1: mov  $0,>mulr4=int64#4
1517# asm 2: mov  $0,>mulr4=%rcx
1518mov  $0,%rcx
1519
1520# qhasm:   mulr5 = 0
1521# asm 1: mov  $0,>mulr5=int64#5
1522# asm 2: mov  $0,>mulr5=%r8
1523mov  $0,%r8
1524
1525# qhasm:   mulr6 = 0
1526# asm 1: mov  $0,>mulr6=int64#6
1527# asm 2: mov  $0,>mulr6=%r9
1528mov  $0,%r9
1529
1530# qhasm:   mulr7 = 0
1531# asm 1: mov  $0,>mulr7=int64#8
1532# asm 2: mov  $0,>mulr7=%r10
1533mov  $0,%r10
1534
1535# qhasm:   mulx0 = *(uint64 *)(pp + 32)
1536# asm 1: movq   32(<pp=int64#2),>mulx0=int64#9
1537# asm 2: movq   32(<pp=%rsi),>mulx0=%r11
1538movq   32(%rsi),%r11
1539
1540# qhasm:   mulrax = *(uint64 *)(pp + 96)
1541# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1542# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1543movq   96(%rsi),%rax
1544
1545# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1546# asm 1: mul  <mulx0=int64#9
1547# asm 2: mul  <mulx0=%r11
1548mul  %r11
1549
1550# qhasm:   rz0 = mulrax
1551# asm 1: mov  <mulrax=int64#7,>rz0=int64#10
1552# asm 2: mov  <mulrax=%rax,>rz0=%r12
1553mov  %rax,%r12
1554
1555# qhasm:   rz1 = mulrdx
1556# asm 1: mov  <mulrdx=int64#3,>rz1=int64#11
1557# asm 2: mov  <mulrdx=%rdx,>rz1=%r13
1558mov  %rdx,%r13
1559
1560# qhasm:   mulrax = *(uint64 *)(pp + 104)
1561# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1562# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1563movq   104(%rsi),%rax
1564
1565# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1566# asm 1: mul  <mulx0=int64#9
1567# asm 2: mul  <mulx0=%r11
1568mul  %r11
1569
1570# qhasm:   carry? rz1 += mulrax
1571# asm 1: add  <mulrax=int64#7,<rz1=int64#11
1572# asm 2: add  <mulrax=%rax,<rz1=%r13
1573add  %rax,%r13
1574
1575# qhasm:   rz2 = 0
1576# asm 1: mov  $0,>rz2=int64#12
1577# asm 2: mov  $0,>rz2=%r14
1578mov  $0,%r14
1579
1580# qhasm:   rz2 += mulrdx + carry
1581# asm 1: adc <mulrdx=int64#3,<rz2=int64#12
1582# asm 2: adc <mulrdx=%rdx,<rz2=%r14
1583adc %rdx,%r14
1584
1585# qhasm:   mulrax = *(uint64 *)(pp + 112)
1586# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1587# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1588movq   112(%rsi),%rax
1589
1590# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1591# asm 1: mul  <mulx0=int64#9
1592# asm 2: mul  <mulx0=%r11
1593mul  %r11
1594
1595# qhasm:   carry? rz2 += mulrax
1596# asm 1: add  <mulrax=int64#7,<rz2=int64#12
1597# asm 2: add  <mulrax=%rax,<rz2=%r14
1598add  %rax,%r14
1599
1600# qhasm:   rz3 = 0
1601# asm 1: mov  $0,>rz3=int64#13
1602# asm 2: mov  $0,>rz3=%r15
1603mov  $0,%r15
1604
1605# qhasm:   rz3 += mulrdx + carry
1606# asm 1: adc <mulrdx=int64#3,<rz3=int64#13
1607# asm 2: adc <mulrdx=%rdx,<rz3=%r15
1608adc %rdx,%r15
1609
1610# qhasm:   mulrax = *(uint64 *)(pp + 120)
1611# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
1612# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
1613movq   120(%rsi),%rax
1614
1615# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx0
1616# asm 1: mul  <mulx0=int64#9
1617# asm 2: mul  <mulx0=%r11
1618mul  %r11
1619
1620# qhasm:   carry? rz3 += mulrax
1621# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1622# asm 2: add  <mulrax=%rax,<rz3=%r15
1623add  %rax,%r15
1624
1625# qhasm:   mulr4 += mulrdx + carry
1626# asm 1: adc <mulrdx=int64#3,<mulr4=int64#4
1627# asm 2: adc <mulrdx=%rdx,<mulr4=%rcx
1628adc %rdx,%rcx
1629
1630# qhasm:   mulx1 = *(uint64 *)(pp + 40)
1631# asm 1: movq   40(<pp=int64#2),>mulx1=int64#9
1632# asm 2: movq   40(<pp=%rsi),>mulx1=%r11
1633movq   40(%rsi),%r11
1634
1635# qhasm:   mulrax = *(uint64 *)(pp + 96)
1636# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1637# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1638movq   96(%rsi),%rax
1639
1640# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1641# asm 1: mul  <mulx1=int64#9
1642# asm 2: mul  <mulx1=%r11
1643mul  %r11
1644
1645# qhasm:   carry? rz1 += mulrax
1646# asm 1: add  <mulrax=int64#7,<rz1=int64#11
1647# asm 2: add  <mulrax=%rax,<rz1=%r13
1648add  %rax,%r13
1649
1650# qhasm:   mulc = 0
1651# asm 1: mov  $0,>mulc=int64#14
1652# asm 2: mov  $0,>mulc=%rbx
1653mov  $0,%rbx
1654
1655# qhasm:   mulc += mulrdx + carry
1656# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1657# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1658adc %rdx,%rbx
1659
1660# qhasm:   mulrax = *(uint64 *)(pp + 104)
1661# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1662# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1663movq   104(%rsi),%rax
1664
1665# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1666# asm 1: mul  <mulx1=int64#9
1667# asm 2: mul  <mulx1=%r11
1668mul  %r11
1669
1670# qhasm:   carry? rz2 += mulrax
1671# asm 1: add  <mulrax=int64#7,<rz2=int64#12
1672# asm 2: add  <mulrax=%rax,<rz2=%r14
1673add  %rax,%r14
1674
1675# qhasm:   mulrdx += 0 + carry
1676# asm 1: adc $0,<mulrdx=int64#3
1677# asm 2: adc $0,<mulrdx=%rdx
1678adc $0,%rdx
1679
1680# qhasm:   carry? rz2 += mulc
1681# asm 1: add  <mulc=int64#14,<rz2=int64#12
1682# asm 2: add  <mulc=%rbx,<rz2=%r14
1683add  %rbx,%r14
1684
1685# qhasm:   mulc = 0
1686# asm 1: mov  $0,>mulc=int64#14
1687# asm 2: mov  $0,>mulc=%rbx
1688mov  $0,%rbx
1689
1690# qhasm:   mulc += mulrdx + carry
1691# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1692# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1693adc %rdx,%rbx
1694
1695# qhasm:   mulrax = *(uint64 *)(pp + 112)
1696# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1697# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1698movq   112(%rsi),%rax
1699
1700# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1701# asm 1: mul  <mulx1=int64#9
1702# asm 2: mul  <mulx1=%r11
1703mul  %r11
1704
1705# qhasm:   carry? rz3 += mulrax
1706# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1707# asm 2: add  <mulrax=%rax,<rz3=%r15
1708add  %rax,%r15
1709
1710# qhasm:   mulrdx += 0 + carry
1711# asm 1: adc $0,<mulrdx=int64#3
1712# asm 2: adc $0,<mulrdx=%rdx
1713adc $0,%rdx
1714
1715# qhasm:   carry? rz3 += mulc
1716# asm 1: add  <mulc=int64#14,<rz3=int64#13
1717# asm 2: add  <mulc=%rbx,<rz3=%r15
1718add  %rbx,%r15
1719
1720# qhasm:   mulc = 0
1721# asm 1: mov  $0,>mulc=int64#14
1722# asm 2: mov  $0,>mulc=%rbx
1723mov  $0,%rbx
1724
1725# qhasm:   mulc += mulrdx + carry
1726# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1727# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1728adc %rdx,%rbx
1729
1730# qhasm:   mulrax = *(uint64 *)(pp + 120)
1731# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
1732# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
1733movq   120(%rsi),%rax
1734
1735# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx1
1736# asm 1: mul  <mulx1=int64#9
1737# asm 2: mul  <mulx1=%r11
1738mul  %r11
1739
1740# qhasm:   carry? mulr4 += mulrax
1741# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1742# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1743add  %rax,%rcx
1744
1745# qhasm:   mulrdx += 0 + carry
1746# asm 1: adc $0,<mulrdx=int64#3
1747# asm 2: adc $0,<mulrdx=%rdx
1748adc $0,%rdx
1749
1750# qhasm:   carry? mulr4 += mulc
1751# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1752# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1753add  %rbx,%rcx
1754
1755# qhasm:   mulr5 += mulrdx + carry
1756# asm 1: adc <mulrdx=int64#3,<mulr5=int64#5
1757# asm 2: adc <mulrdx=%rdx,<mulr5=%r8
1758adc %rdx,%r8
1759
1760# qhasm:   mulx2 = *(uint64 *)(pp + 48)
1761# asm 1: movq   48(<pp=int64#2),>mulx2=int64#9
1762# asm 2: movq   48(<pp=%rsi),>mulx2=%r11
1763movq   48(%rsi),%r11
1764
1765# qhasm:   mulrax = *(uint64 *)(pp + 96)
1766# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1767# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1768movq   96(%rsi),%rax
1769
1770# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1771# asm 1: mul  <mulx2=int64#9
1772# asm 2: mul  <mulx2=%r11
1773mul  %r11
1774
1775# qhasm:   carry? rz2 += mulrax
1776# asm 1: add  <mulrax=int64#7,<rz2=int64#12
1777# asm 2: add  <mulrax=%rax,<rz2=%r14
1778add  %rax,%r14
1779
1780# qhasm:   mulc = 0
1781# asm 1: mov  $0,>mulc=int64#14
1782# asm 2: mov  $0,>mulc=%rbx
1783mov  $0,%rbx
1784
1785# qhasm:   mulc += mulrdx + carry
1786# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1787# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1788adc %rdx,%rbx
1789
1790# qhasm:   mulrax = *(uint64 *)(pp + 104)
1791# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1792# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1793movq   104(%rsi),%rax
1794
1795# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1796# asm 1: mul  <mulx2=int64#9
1797# asm 2: mul  <mulx2=%r11
1798mul  %r11
1799
1800# qhasm:   carry? rz3 += mulrax
1801# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1802# asm 2: add  <mulrax=%rax,<rz3=%r15
1803add  %rax,%r15
1804
1805# qhasm:   mulrdx += 0 + carry
1806# asm 1: adc $0,<mulrdx=int64#3
1807# asm 2: adc $0,<mulrdx=%rdx
1808adc $0,%rdx
1809
1810# qhasm:   carry? rz3 += mulc
1811# asm 1: add  <mulc=int64#14,<rz3=int64#13
1812# asm 2: add  <mulc=%rbx,<rz3=%r15
1813add  %rbx,%r15
1814
1815# qhasm:   mulc = 0
1816# asm 1: mov  $0,>mulc=int64#14
1817# asm 2: mov  $0,>mulc=%rbx
1818mov  $0,%rbx
1819
1820# qhasm:   mulc += mulrdx + carry
1821# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1822# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1823adc %rdx,%rbx
1824
1825# qhasm:   mulrax = *(uint64 *)(pp + 112)
1826# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1827# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1828movq   112(%rsi),%rax
1829
1830# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1831# asm 1: mul  <mulx2=int64#9
1832# asm 2: mul  <mulx2=%r11
1833mul  %r11
1834
1835# qhasm:   carry? mulr4 += mulrax
1836# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1837# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1838add  %rax,%rcx
1839
1840# qhasm:   mulrdx += 0 + carry
1841# asm 1: adc $0,<mulrdx=int64#3
1842# asm 2: adc $0,<mulrdx=%rdx
1843adc $0,%rdx
1844
1845# qhasm:   carry? mulr4 += mulc
1846# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1847# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1848add  %rbx,%rcx
1849
1850# qhasm:   mulc = 0
1851# asm 1: mov  $0,>mulc=int64#14
1852# asm 2: mov  $0,>mulc=%rbx
1853mov  $0,%rbx
1854
1855# qhasm:   mulc += mulrdx + carry
1856# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1857# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1858adc %rdx,%rbx
1859
1860# qhasm:   mulrax = *(uint64 *)(pp + 120)
1861# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
1862# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
1863movq   120(%rsi),%rax
1864
1865# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx2
1866# asm 1: mul  <mulx2=int64#9
1867# asm 2: mul  <mulx2=%r11
1868mul  %r11
1869
1870# qhasm:   carry? mulr5 += mulrax
1871# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1872# asm 2: add  <mulrax=%rax,<mulr5=%r8
1873add  %rax,%r8
1874
1875# qhasm:   mulrdx += 0 + carry
1876# asm 1: adc $0,<mulrdx=int64#3
1877# asm 2: adc $0,<mulrdx=%rdx
1878adc $0,%rdx
1879
1880# qhasm:   carry? mulr5 += mulc
1881# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1882# asm 2: add  <mulc=%rbx,<mulr5=%r8
1883add  %rbx,%r8
1884
1885# qhasm:   mulr6 += mulrdx + carry
1886# asm 1: adc <mulrdx=int64#3,<mulr6=int64#6
1887# asm 2: adc <mulrdx=%rdx,<mulr6=%r9
1888adc %rdx,%r9
1889
1890# qhasm:   mulx3 = *(uint64 *)(pp + 56)
1891# asm 1: movq   56(<pp=int64#2),>mulx3=int64#9
1892# asm 2: movq   56(<pp=%rsi),>mulx3=%r11
1893movq   56(%rsi),%r11
1894
1895# qhasm:   mulrax = *(uint64 *)(pp + 96)
1896# asm 1: movq   96(<pp=int64#2),>mulrax=int64#7
1897# asm 2: movq   96(<pp=%rsi),>mulrax=%rax
1898movq   96(%rsi),%rax
1899
1900# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1901# asm 1: mul  <mulx3=int64#9
1902# asm 2: mul  <mulx3=%r11
1903mul  %r11
1904
1905# qhasm:   carry? rz3 += mulrax
1906# asm 1: add  <mulrax=int64#7,<rz3=int64#13
1907# asm 2: add  <mulrax=%rax,<rz3=%r15
1908add  %rax,%r15
1909
1910# qhasm:   mulc = 0
1911# asm 1: mov  $0,>mulc=int64#14
1912# asm 2: mov  $0,>mulc=%rbx
1913mov  $0,%rbx
1914
1915# qhasm:   mulc += mulrdx + carry
1916# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1917# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1918adc %rdx,%rbx
1919
1920# qhasm:   mulrax = *(uint64 *)(pp + 104)
1921# asm 1: movq   104(<pp=int64#2),>mulrax=int64#7
1922# asm 2: movq   104(<pp=%rsi),>mulrax=%rax
1923movq   104(%rsi),%rax
1924
1925# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1926# asm 1: mul  <mulx3=int64#9
1927# asm 2: mul  <mulx3=%r11
1928mul  %r11
1929
1930# qhasm:   carry? mulr4 += mulrax
1931# asm 1: add  <mulrax=int64#7,<mulr4=int64#4
1932# asm 2: add  <mulrax=%rax,<mulr4=%rcx
1933add  %rax,%rcx
1934
1935# qhasm:   mulrdx += 0 + carry
1936# asm 1: adc $0,<mulrdx=int64#3
1937# asm 2: adc $0,<mulrdx=%rdx
1938adc $0,%rdx
1939
1940# qhasm:   carry? mulr4 += mulc
1941# asm 1: add  <mulc=int64#14,<mulr4=int64#4
1942# asm 2: add  <mulc=%rbx,<mulr4=%rcx
1943add  %rbx,%rcx
1944
1945# qhasm:   mulc = 0
1946# asm 1: mov  $0,>mulc=int64#14
1947# asm 2: mov  $0,>mulc=%rbx
1948mov  $0,%rbx
1949
1950# qhasm:   mulc += mulrdx + carry
1951# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1952# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1953adc %rdx,%rbx
1954
1955# qhasm:   mulrax = *(uint64 *)(pp + 112)
1956# asm 1: movq   112(<pp=int64#2),>mulrax=int64#7
1957# asm 2: movq   112(<pp=%rsi),>mulrax=%rax
1958movq   112(%rsi),%rax
1959
1960# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1961# asm 1: mul  <mulx3=int64#9
1962# asm 2: mul  <mulx3=%r11
1963mul  %r11
1964
1965# qhasm:   carry? mulr5 += mulrax
1966# asm 1: add  <mulrax=int64#7,<mulr5=int64#5
1967# asm 2: add  <mulrax=%rax,<mulr5=%r8
1968add  %rax,%r8
1969
1970# qhasm:   mulrdx += 0 + carry
1971# asm 1: adc $0,<mulrdx=int64#3
1972# asm 2: adc $0,<mulrdx=%rdx
1973adc $0,%rdx
1974
1975# qhasm:   carry? mulr5 += mulc
1976# asm 1: add  <mulc=int64#14,<mulr5=int64#5
1977# asm 2: add  <mulc=%rbx,<mulr5=%r8
1978add  %rbx,%r8
1979
1980# qhasm:   mulc = 0
1981# asm 1: mov  $0,>mulc=int64#14
1982# asm 2: mov  $0,>mulc=%rbx
1983mov  $0,%rbx
1984
1985# qhasm:   mulc += mulrdx + carry
1986# asm 1: adc <mulrdx=int64#3,<mulc=int64#14
1987# asm 2: adc <mulrdx=%rdx,<mulc=%rbx
1988adc %rdx,%rbx
1989
1990# qhasm:   mulrax = *(uint64 *)(pp + 120)
1991# asm 1: movq   120(<pp=int64#2),>mulrax=int64#7
1992# asm 2: movq   120(<pp=%rsi),>mulrax=%rax
1993movq   120(%rsi),%rax
1994
1995# qhasm:   (uint128) mulrdx mulrax = mulrax * mulx3
1996# asm 1: mul  <mulx3=int64#9
1997# asm 2: mul  <mulx3=%r11
1998mul  %r11
1999
2000# qhasm:   carry? mulr6 += mulrax
2001# asm 1: add  <mulrax=int64#7,<mulr6=int64#6
2002# asm 2: add  <mulrax=%rax,<mulr6=%r9
2003add  %rax,%r9
2004
2005# qhasm:   mulrdx += 0 + carry
2006# asm 1: adc $0,<mulrdx=int64#3
2007# asm 2: adc $0,<mulrdx=%rdx
2008adc $0,%rdx
2009
2010# qhasm:   carry? mulr6 += mulc
2011# asm 1: add  <mulc=int64#14,<mulr6=int64#6
2012# asm 2: add  <mulc=%rbx,<mulr6=%r9
2013add  %rbx,%r9
2014
2015# qhasm:   mulr7 += mulrdx + carry
2016# asm 1: adc <mulrdx=int64#3,<mulr7=int64#8
2017# asm 2: adc <mulrdx=%rdx,<mulr7=%r10
2018adc %rdx,%r10
2019
2020# qhasm:   mulrax = mulr4
2021# asm 1: mov  <mulr4=int64#4,>mulrax=int64#7
2022# asm 2: mov  <mulr4=%rcx,>mulrax=%rax
2023mov  %rcx,%rax
2024
2025# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2026mulq  crypto_sign_ed25519_amd64_64_38
2027
2028# qhasm:   mulr4 = mulrax
2029# asm 1: mov  <mulrax=int64#7,>mulr4=int64#2
2030# asm 2: mov  <mulrax=%rax,>mulr4=%rsi
2031mov  %rax,%rsi
2032
2033# qhasm:   mulrax = mulr5
2034# asm 1: mov  <mulr5=int64#5,>mulrax=int64#7
2035# asm 2: mov  <mulr5=%r8,>mulrax=%rax
2036mov  %r8,%rax
2037
2038# qhasm:   mulr5 = mulrdx
2039# asm 1: mov  <mulrdx=int64#3,>mulr5=int64#4
2040# asm 2: mov  <mulrdx=%rdx,>mulr5=%rcx
2041mov  %rdx,%rcx
2042
2043# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2044mulq  crypto_sign_ed25519_amd64_64_38
2045
2046# qhasm:   carry? mulr5 += mulrax
2047# asm 1: add  <mulrax=int64#7,<mulr5=int64#4
2048# asm 2: add  <mulrax=%rax,<mulr5=%rcx
2049add  %rax,%rcx
2050
2051# qhasm:   mulrax = mulr6
2052# asm 1: mov  <mulr6=int64#6,>mulrax=int64#7
2053# asm 2: mov  <mulr6=%r9,>mulrax=%rax
2054mov  %r9,%rax
2055
2056# qhasm:   mulr6 = 0
2057# asm 1: mov  $0,>mulr6=int64#5
2058# asm 2: mov  $0,>mulr6=%r8
2059mov  $0,%r8
2060
2061# qhasm:   mulr6 += mulrdx + carry
2062# asm 1: adc <mulrdx=int64#3,<mulr6=int64#5
2063# asm 2: adc <mulrdx=%rdx,<mulr6=%r8
2064adc %rdx,%r8
2065
2066# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2067mulq  crypto_sign_ed25519_amd64_64_38
2068
2069# qhasm:   carry? mulr6 += mulrax
2070# asm 1: add  <mulrax=int64#7,<mulr6=int64#5
2071# asm 2: add  <mulrax=%rax,<mulr6=%r8
2072add  %rax,%r8
2073
2074# qhasm:   mulrax = mulr7
2075# asm 1: mov  <mulr7=int64#8,>mulrax=int64#7
2076# asm 2: mov  <mulr7=%r10,>mulrax=%rax
2077mov  %r10,%rax
2078
2079# qhasm:   mulr7 = 0
2080# asm 1: mov  $0,>mulr7=int64#6
2081# asm 2: mov  $0,>mulr7=%r9
2082mov  $0,%r9
2083
2084# qhasm:   mulr7 += mulrdx + carry
2085# asm 1: adc <mulrdx=int64#3,<mulr7=int64#6
2086# asm 2: adc <mulrdx=%rdx,<mulr7=%r9
2087adc %rdx,%r9
2088
2089# qhasm:   (uint128) mulrdx mulrax = mulrax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38
2090mulq  crypto_sign_ed25519_amd64_64_38
2091
2092# qhasm:   carry? mulr7 += mulrax
2093# asm 1: add  <mulrax=int64#7,<mulr7=int64#6
2094# asm 2: add  <mulrax=%rax,<mulr7=%r9
2095add  %rax,%r9
2096
2097# qhasm:   mulr8 = 0
2098# asm 1: mov  $0,>mulr8=int64#7
2099# asm 2: mov  $0,>mulr8=%rax
2100mov  $0,%rax
2101
2102# qhasm:   mulr8 += mulrdx + carry
2103# asm 1: adc <mulrdx=int64#3,<mulr8=int64#7
2104# asm 2: adc <mulrdx=%rdx,<mulr8=%rax
2105adc %rdx,%rax
2106
2107# qhasm:   carry? rz0 += mulr4
2108# asm 1: add  <mulr4=int64#2,<rz0=int64#10
2109# asm 2: add  <mulr4=%rsi,<rz0=%r12
2110add  %rsi,%r12
2111
2112# qhasm:   carry? rz1 += mulr5 + carry
2113# asm 1: adc <mulr5=int64#4,<rz1=int64#11
2114# asm 2: adc <mulr5=%rcx,<rz1=%r13
2115adc %rcx,%r13
2116
2117# qhasm:   carry? rz2 += mulr6 + carry
2118# asm 1: adc <mulr6=int64#5,<rz2=int64#12
2119# asm 2: adc <mulr6=%r8,<rz2=%r14
2120adc %r8,%r14
2121
2122# qhasm:   carry? rz3 += mulr7 + carry
2123# asm 1: adc <mulr7=int64#6,<rz3=int64#13
2124# asm 2: adc <mulr7=%r9,<rz3=%r15
2125adc %r9,%r15
2126
2127# qhasm:   mulzero = 0
2128# asm 1: mov  $0,>mulzero=int64#2
2129# asm 2: mov  $0,>mulzero=%rsi
2130mov  $0,%rsi
2131
2132# qhasm:   mulr8 += mulzero + carry
2133# asm 1: adc <mulzero=int64#2,<mulr8=int64#7
2134# asm 2: adc <mulzero=%rsi,<mulr8=%rax
2135adc %rsi,%rax
2136
2137# qhasm:   mulr8 *= 38
2138# asm 1: imulq  $38,<mulr8=int64#7,>mulr8=int64#3
2139# asm 2: imulq  $38,<mulr8=%rax,>mulr8=%rdx
2140imulq  $38,%rax,%rdx
2141
2142# qhasm:   carry? rz0 += mulr8
2143# asm 1: add  <mulr8=int64#3,<rz0=int64#10
2144# asm 2: add  <mulr8=%rdx,<rz0=%r12
2145add  %rdx,%r12
2146
2147# qhasm:   carry? rz1 += mulzero + carry
2148# asm 1: adc <mulzero=int64#2,<rz1=int64#11
2149# asm 2: adc <mulzero=%rsi,<rz1=%r13
2150adc %rsi,%r13
2151
2152# qhasm:   carry? rz2 += mulzero + carry
2153# asm 1: adc <mulzero=int64#2,<rz2=int64#12
2154# asm 2: adc <mulzero=%rsi,<rz2=%r14
2155adc %rsi,%r14
2156
2157# qhasm:   carry? rz3 += mulzero + carry
2158# asm 1: adc <mulzero=int64#2,<rz3=int64#13
2159# asm 2: adc <mulzero=%rsi,<rz3=%r15
2160adc %rsi,%r15
2161
2162# qhasm:   mulzero += mulzero + carry
2163# asm 1: adc <mulzero=int64#2,<mulzero=int64#2
2164# asm 2: adc <mulzero=%rsi,<mulzero=%rsi
2165adc %rsi,%rsi
2166
2167# qhasm:   mulzero *= 38
2168# asm 1: imulq  $38,<mulzero=int64#2,>mulzero=int64#2
2169# asm 2: imulq  $38,<mulzero=%rsi,>mulzero=%rsi
2170imulq  $38,%rsi,%rsi
2171
2172# qhasm:   rz0 += mulzero
2173# asm 1: add  <mulzero=int64#2,<rz0=int64#10
2174# asm 2: add  <mulzero=%rsi,<rz0=%r12
2175add  %rsi,%r12
2176
2177# qhasm: *(uint64 *)(rp + 64) = rz0
2178# asm 1: movq   <rz0=int64#10,64(<rp=int64#1)
2179# asm 2: movq   <rz0=%r12,64(<rp=%rdi)
2180movq   %r12,64(%rdi)
2181
2182# qhasm: *(uint64 *)(rp + 72) = rz1
2183# asm 1: movq   <rz1=int64#11,72(<rp=int64#1)
2184# asm 2: movq   <rz1=%r13,72(<rp=%rdi)
2185movq   %r13,72(%rdi)
2186
2187# qhasm: *(uint64 *)(rp + 80) = rz2
2188# asm 1: movq   <rz2=int64#12,80(<rp=int64#1)
2189# asm 2: movq   <rz2=%r14,80(<rp=%rdi)
2190movq   %r14,80(%rdi)
2191
2192# qhasm: *(uint64 *)(rp + 88) = rz3
2193# asm 1: movq   <rz3=int64#13,88(<rp=int64#1)
2194# asm 2: movq   <rz3=%r15,88(<rp=%rdi)
2195movq   %r15,88(%rdi)
2196
2197# qhasm:   caller1 = caller1_stack
2198# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
2199# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
2200movq 0(%rsp),%r11
2201
2202# qhasm:   caller2 = caller2_stack
2203# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
2204# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
2205movq 8(%rsp),%r12
2206
2207# qhasm:   caller3 = caller3_stack
2208# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
2209# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
2210movq 16(%rsp),%r13
2211
2212# qhasm:   caller4 = caller4_stack
2213# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
2214# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
2215movq 24(%rsp),%r14
2216
2217# qhasm:   caller5 = caller5_stack
2218# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
2219# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
2220movq 32(%rsp),%r15
2221
2222# qhasm:   caller6 = caller6_stack
2223# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
2224# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
2225movq 40(%rsp),%rbx
2226
2227# qhasm:   caller7 = caller7_stack
2228# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
2229# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
2230movq 48(%rsp),%rbp
2231
2232# qhasm: leave
2233add %r11,%rsp
2234mov %rdi,%rax
2235mov %rsi,%rdx
2236ret
2237