1
2# qhasm: int64 rp
3
4# qhasm: int64 xp
5
6# qhasm: input rp
7
8# qhasm: input xp
9
10# qhasm:   int64 caller1
11
12# qhasm:   int64 caller2
13
14# qhasm:   int64 caller3
15
16# qhasm:   int64 caller4
17
18# qhasm:   int64 caller5
19
20# qhasm:   int64 caller6
21
22# qhasm:   int64 caller7
23
24# qhasm:   caller caller1
25
26# qhasm:   caller caller2
27
28# qhasm:   caller caller3
29
30# qhasm:   caller caller4
31
32# qhasm:   caller caller5
33
34# qhasm:   caller caller6
35
36# qhasm:   caller caller7
37
38# qhasm:   stack64 caller1_stack
39
40# qhasm:   stack64 caller2_stack
41
42# qhasm:   stack64 caller3_stack
43
44# qhasm:   stack64 caller4_stack
45
46# qhasm:   stack64 caller5_stack
47
48# qhasm:   stack64 caller6_stack
49
50# qhasm:   stack64 caller7_stack
51
52# qhasm: int64 q23
53
54# qhasm: int64 q24
55
56# qhasm: int64 q30
57
58# qhasm: int64 q31
59
60# qhasm: int64 q32
61
62# qhasm: int64 q33
63
64# qhasm: int64 r20
65
66# qhasm: int64 r21
67
68# qhasm: int64 r22
69
70# qhasm: int64 r23
71
72# qhasm: int64 r24
73
74# qhasm: int64 r0
75
76# qhasm: int64 r1
77
78# qhasm: int64 r2
79
80# qhasm: int64 r3
81
82# qhasm: int64 t0
83
84# qhasm: int64 t1
85
86# qhasm: int64 t2
87
88# qhasm: int64 t3
89
90# qhasm: int64 rax
91
92# qhasm: int64 rdx
93
94# qhasm: int64 c
95
96# qhasm: int64 zero
97
98# qhasm: int64 mask
99
100# qhasm: int64 nmask
101
102# qhasm: stack64 q30_stack
103
104# qhasm: stack64 q31_stack
105
106# qhasm: stack64 q32_stack
107
108# qhasm: stack64 q33_stack
109
110# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_barrett
111.text
112.p2align 5
113.globl _crypto_sign_ed25519_amd64_64_sc25519_barrett
114.globl crypto_sign_ed25519_amd64_64_sc25519_barrett
115_crypto_sign_ed25519_amd64_64_sc25519_barrett:
116crypto_sign_ed25519_amd64_64_sc25519_barrett:
117mov %rsp,%r11
118and $31,%r11
119add $96,%r11
120sub %r11,%rsp
121
122# qhasm:   caller1_stack = caller1
123# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
124# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
125movq %r11,0(%rsp)
126
127# qhasm:   caller2_stack = caller2
128# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
129# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
130movq %r12,8(%rsp)
131
132# qhasm:   caller3_stack = caller3
133# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
134# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
135movq %r13,16(%rsp)
136
137# qhasm:   caller4_stack = caller4
138# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
139# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
140movq %r14,24(%rsp)
141
142# qhasm:   caller5_stack = caller5
143# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
144# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
145movq %r15,32(%rsp)
146
147# qhasm:   caller6_stack = caller6
148# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
149# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
150movq %rbx,40(%rsp)
151
152# qhasm:   caller7_stack = caller7
153# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
154# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
155movq %rbp,48(%rsp)
156
157# qhasm: zero ^= zero
158# asm 1: xor  <zero=int64#4,<zero=int64#4
159# asm 2: xor  <zero=%rcx,<zero=%rcx
160xor  %rcx,%rcx
161
162# qhasm: q30 ^= q30
163# asm 1: xor  <q30=int64#5,<q30=int64#5
164# asm 2: xor  <q30=%r8,<q30=%r8
165xor  %r8,%r8
166
167# qhasm: q31 ^= q31
168# asm 1: xor  <q31=int64#6,<q31=int64#6
169# asm 2: xor  <q31=%r9,<q31=%r9
170xor  %r9,%r9
171
172# qhasm: q32 ^= q32
173# asm 1: xor  <q32=int64#8,<q32=int64#8
174# asm 2: xor  <q32=%r10,<q32=%r10
175xor  %r10,%r10
176
177# qhasm: q33 ^= q33
178# asm 1: xor  <q33=int64#9,<q33=int64#9
179# asm 2: xor  <q33=%r11,<q33=%r11
180xor  %r11,%r11
181
182# qhasm: rax = *(uint64 *)(xp + 24)
183# asm 1: movq   24(<xp=int64#2),>rax=int64#7
184# asm 2: movq   24(<xp=%rsi),>rax=%rax
185movq   24(%rsi),%rax
186
187# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3
188mulq  crypto_sign_ed25519_amd64_64_MU3
189
190# qhasm: q23 = rax
191# asm 1: mov  <rax=int64#7,>q23=int64#10
192# asm 2: mov  <rax=%rax,>q23=%r12
193mov  %rax,%r12
194
195# qhasm: c = rdx
196# asm 1: mov  <rdx=int64#3,>c=int64#11
197# asm 2: mov  <rdx=%rdx,>c=%r13
198mov  %rdx,%r13
199
200# qhasm: rax = *(uint64 *)(xp + 24)
201# asm 1: movq   24(<xp=int64#2),>rax=int64#7
202# asm 2: movq   24(<xp=%rsi),>rax=%rax
203movq   24(%rsi),%rax
204
205# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4
206mulq  crypto_sign_ed25519_amd64_64_MU4
207
208# qhasm: q24 = rax
209# asm 1: mov  <rax=int64#7,>q24=int64#12
210# asm 2: mov  <rax=%rax,>q24=%r14
211mov  %rax,%r14
212
213# qhasm: carry? q24 += c
214# asm 1: add  <c=int64#11,<q24=int64#12
215# asm 2: add  <c=%r13,<q24=%r14
216add  %r13,%r14
217
218# qhasm: q30 += rdx + carry
219# asm 1: adc <rdx=int64#3,<q30=int64#5
220# asm 2: adc <rdx=%rdx,<q30=%r8
221adc %rdx,%r8
222
223# qhasm: rax = *(uint64 *)(xp + 32)
224# asm 1: movq   32(<xp=int64#2),>rax=int64#7
225# asm 2: movq   32(<xp=%rsi),>rax=%rax
226movq   32(%rsi),%rax
227
228# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2
229mulq  crypto_sign_ed25519_amd64_64_MU2
230
231# qhasm: carry? q23 += rax
232# asm 1: add  <rax=int64#7,<q23=int64#10
233# asm 2: add  <rax=%rax,<q23=%r12
234add  %rax,%r12
235
236# qhasm: c = 0
237# asm 1: mov  $0,>c=int64#11
238# asm 2: mov  $0,>c=%r13
239mov  $0,%r13
240
241# qhasm: c += rdx + carry
242# asm 1: adc <rdx=int64#3,<c=int64#11
243# asm 2: adc <rdx=%rdx,<c=%r13
244adc %rdx,%r13
245
246# qhasm: rax = *(uint64 *)(xp + 32)
247# asm 1: movq   32(<xp=int64#2),>rax=int64#7
248# asm 2: movq   32(<xp=%rsi),>rax=%rax
249movq   32(%rsi),%rax
250
251# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3
252mulq  crypto_sign_ed25519_amd64_64_MU3
253
254# qhasm: carry? q24 += rax
255# asm 1: add  <rax=int64#7,<q24=int64#12
256# asm 2: add  <rax=%rax,<q24=%r14
257add  %rax,%r14
258
259# qhasm: rdx += zero + carry
260# asm 1: adc <zero=int64#4,<rdx=int64#3
261# asm 2: adc <zero=%rcx,<rdx=%rdx
262adc %rcx,%rdx
263
264# qhasm: carry? q24 += c
265# asm 1: add  <c=int64#11,<q24=int64#12
266# asm 2: add  <c=%r13,<q24=%r14
267add  %r13,%r14
268
269# qhasm: c = 0
270# asm 1: mov  $0,>c=int64#11
271# asm 2: mov  $0,>c=%r13
272mov  $0,%r13
273
274# qhasm: c += rdx + carry
275# asm 1: adc <rdx=int64#3,<c=int64#11
276# asm 2: adc <rdx=%rdx,<c=%r13
277adc %rdx,%r13
278
279# qhasm: rax = *(uint64 *)(xp + 32)
280# asm 1: movq   32(<xp=int64#2),>rax=int64#7
281# asm 2: movq   32(<xp=%rsi),>rax=%rax
282movq   32(%rsi),%rax
283
284# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4
285mulq  crypto_sign_ed25519_amd64_64_MU4
286
287# qhasm: carry? q30 += rax
288# asm 1: add  <rax=int64#7,<q30=int64#5
289# asm 2: add  <rax=%rax,<q30=%r8
290add  %rax,%r8
291
292# qhasm: rdx += zero + carry
293# asm 1: adc <zero=int64#4,<rdx=int64#3
294# asm 2: adc <zero=%rcx,<rdx=%rdx
295adc %rcx,%rdx
296
297# qhasm: carry? q30 += c
298# asm 1: add  <c=int64#11,<q30=int64#5
299# asm 2: add  <c=%r13,<q30=%r8
300add  %r13,%r8
301
302# qhasm: q31 += rdx + carry
303# asm 1: adc <rdx=int64#3,<q31=int64#6
304# asm 2: adc <rdx=%rdx,<q31=%r9
305adc %rdx,%r9
306
307# qhasm: rax = *(uint64 *)(xp + 40)
308# asm 1: movq   40(<xp=int64#2),>rax=int64#7
309# asm 2: movq   40(<xp=%rsi),>rax=%rax
310movq   40(%rsi),%rax
311
312# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1
313mulq  crypto_sign_ed25519_amd64_64_MU1
314
315# qhasm: carry? q23 += rax
316# asm 1: add  <rax=int64#7,<q23=int64#10
317# asm 2: add  <rax=%rax,<q23=%r12
318add  %rax,%r12
319
320# qhasm: c = 0
321# asm 1: mov  $0,>c=int64#11
322# asm 2: mov  $0,>c=%r13
323mov  $0,%r13
324
325# qhasm: c += rdx + carry
326# asm 1: adc <rdx=int64#3,<c=int64#11
327# asm 2: adc <rdx=%rdx,<c=%r13
328adc %rdx,%r13
329
330# qhasm: rax = *(uint64 *)(xp + 40)
331# asm 1: movq   40(<xp=int64#2),>rax=int64#7
332# asm 2: movq   40(<xp=%rsi),>rax=%rax
333movq   40(%rsi),%rax
334
335# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2
336mulq  crypto_sign_ed25519_amd64_64_MU2
337
338# qhasm: carry? q24 += rax
339# asm 1: add  <rax=int64#7,<q24=int64#12
340# asm 2: add  <rax=%rax,<q24=%r14
341add  %rax,%r14
342
343# qhasm: rdx += zero + carry
344# asm 1: adc <zero=int64#4,<rdx=int64#3
345# asm 2: adc <zero=%rcx,<rdx=%rdx
346adc %rcx,%rdx
347
348# qhasm: carry? q24 += c
349# asm 1: add  <c=int64#11,<q24=int64#12
350# asm 2: add  <c=%r13,<q24=%r14
351add  %r13,%r14
352
353# qhasm: c = 0
354# asm 1: mov  $0,>c=int64#11
355# asm 2: mov  $0,>c=%r13
356mov  $0,%r13
357
358# qhasm: c += rdx + carry
359# asm 1: adc <rdx=int64#3,<c=int64#11
360# asm 2: adc <rdx=%rdx,<c=%r13
361adc %rdx,%r13
362
363# qhasm: rax = *(uint64 *)(xp + 40)
364# asm 1: movq   40(<xp=int64#2),>rax=int64#7
365# asm 2: movq   40(<xp=%rsi),>rax=%rax
366movq   40(%rsi),%rax
367
368# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3
369mulq  crypto_sign_ed25519_amd64_64_MU3
370
371# qhasm: carry? q30 += rax
372# asm 1: add  <rax=int64#7,<q30=int64#5
373# asm 2: add  <rax=%rax,<q30=%r8
374add  %rax,%r8
375
376# qhasm: rdx += zero + carry
377# asm 1: adc <zero=int64#4,<rdx=int64#3
378# asm 2: adc <zero=%rcx,<rdx=%rdx
379adc %rcx,%rdx
380
381# qhasm: carry? q30 += c
382# asm 1: add  <c=int64#11,<q30=int64#5
383# asm 2: add  <c=%r13,<q30=%r8
384add  %r13,%r8
385
386# qhasm: c = 0
387# asm 1: mov  $0,>c=int64#11
388# asm 2: mov  $0,>c=%r13
389mov  $0,%r13
390
391# qhasm: c += rdx + carry
392# asm 1: adc <rdx=int64#3,<c=int64#11
393# asm 2: adc <rdx=%rdx,<c=%r13
394adc %rdx,%r13
395
396# qhasm: rax = *(uint64 *)(xp + 40)
397# asm 1: movq   40(<xp=int64#2),>rax=int64#7
398# asm 2: movq   40(<xp=%rsi),>rax=%rax
399movq   40(%rsi),%rax
400
401# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4
402mulq  crypto_sign_ed25519_amd64_64_MU4
403
404# qhasm: carry? q31 += rax
405# asm 1: add  <rax=int64#7,<q31=int64#6
406# asm 2: add  <rax=%rax,<q31=%r9
407add  %rax,%r9
408
409# qhasm: rdx += zero + carry
410# asm 1: adc <zero=int64#4,<rdx=int64#3
411# asm 2: adc <zero=%rcx,<rdx=%rdx
412adc %rcx,%rdx
413
414# qhasm: carry? q31 += c
415# asm 1: add  <c=int64#11,<q31=int64#6
416# asm 2: add  <c=%r13,<q31=%r9
417add  %r13,%r9
418
419# qhasm: q32 += rdx + carry
420# asm 1: adc <rdx=int64#3,<q32=int64#8
421# asm 2: adc <rdx=%rdx,<q32=%r10
422adc %rdx,%r10
423
424# qhasm: rax = *(uint64 *)(xp + 48)
425# asm 1: movq   48(<xp=int64#2),>rax=int64#7
426# asm 2: movq   48(<xp=%rsi),>rax=%rax
427movq   48(%rsi),%rax
428
429# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU0
430mulq  crypto_sign_ed25519_amd64_64_MU0
431
432# qhasm: carry? q23 += rax
433# asm 1: add  <rax=int64#7,<q23=int64#10
434# asm 2: add  <rax=%rax,<q23=%r12
435add  %rax,%r12
436
437# qhasm: c = 0
438# asm 1: mov  $0,>c=int64#10
439# asm 2: mov  $0,>c=%r12
440mov  $0,%r12
441
442# qhasm: c += rdx + carry
443# asm 1: adc <rdx=int64#3,<c=int64#10
444# asm 2: adc <rdx=%rdx,<c=%r12
445adc %rdx,%r12
446
447# qhasm: rax = *(uint64 *)(xp + 48)
448# asm 1: movq   48(<xp=int64#2),>rax=int64#7
449# asm 2: movq   48(<xp=%rsi),>rax=%rax
450movq   48(%rsi),%rax
451
452# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1
453mulq  crypto_sign_ed25519_amd64_64_MU1
454
455# qhasm: carry? q24 += rax
456# asm 1: add  <rax=int64#7,<q24=int64#12
457# asm 2: add  <rax=%rax,<q24=%r14
458add  %rax,%r14
459
460# qhasm: rdx += zero + carry
461# asm 1: adc <zero=int64#4,<rdx=int64#3
462# asm 2: adc <zero=%rcx,<rdx=%rdx
463adc %rcx,%rdx
464
465# qhasm: carry? q24 += c
466# asm 1: add  <c=int64#10,<q24=int64#12
467# asm 2: add  <c=%r12,<q24=%r14
468add  %r12,%r14
469
470# qhasm: c = 0
471# asm 1: mov  $0,>c=int64#10
472# asm 2: mov  $0,>c=%r12
473mov  $0,%r12
474
475# qhasm: c += rdx + carry
476# asm 1: adc <rdx=int64#3,<c=int64#10
477# asm 2: adc <rdx=%rdx,<c=%r12
478adc %rdx,%r12
479
480# qhasm: rax = *(uint64 *)(xp + 48)
481# asm 1: movq   48(<xp=int64#2),>rax=int64#7
482# asm 2: movq   48(<xp=%rsi),>rax=%rax
483movq   48(%rsi),%rax
484
485# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2
486mulq  crypto_sign_ed25519_amd64_64_MU2
487
488# qhasm: carry? q30 += rax
489# asm 1: add  <rax=int64#7,<q30=int64#5
490# asm 2: add  <rax=%rax,<q30=%r8
491add  %rax,%r8
492
493# qhasm: rdx += zero + carry
494# asm 1: adc <zero=int64#4,<rdx=int64#3
495# asm 2: adc <zero=%rcx,<rdx=%rdx
496adc %rcx,%rdx
497
498# qhasm: carry? q30 += c
499# asm 1: add  <c=int64#10,<q30=int64#5
500# asm 2: add  <c=%r12,<q30=%r8
501add  %r12,%r8
502
503# qhasm: c = 0
504# asm 1: mov  $0,>c=int64#10
505# asm 2: mov  $0,>c=%r12
506mov  $0,%r12
507
508# qhasm: c += rdx + carry
509# asm 1: adc <rdx=int64#3,<c=int64#10
510# asm 2: adc <rdx=%rdx,<c=%r12
511adc %rdx,%r12
512
513# qhasm: rax = *(uint64 *)(xp + 48)
514# asm 1: movq   48(<xp=int64#2),>rax=int64#7
515# asm 2: movq   48(<xp=%rsi),>rax=%rax
516movq   48(%rsi),%rax
517
518# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3
519mulq  crypto_sign_ed25519_amd64_64_MU3
520
521# qhasm: carry? q31 += rax
522# asm 1: add  <rax=int64#7,<q31=int64#6
523# asm 2: add  <rax=%rax,<q31=%r9
524add  %rax,%r9
525
526# qhasm: rdx += zero + carry
527# asm 1: adc <zero=int64#4,<rdx=int64#3
528# asm 2: adc <zero=%rcx,<rdx=%rdx
529adc %rcx,%rdx
530
531# qhasm: carry? q31 += c
532# asm 1: add  <c=int64#10,<q31=int64#6
533# asm 2: add  <c=%r12,<q31=%r9
534add  %r12,%r9
535
536# qhasm: c = 0
537# asm 1: mov  $0,>c=int64#10
538# asm 2: mov  $0,>c=%r12
539mov  $0,%r12
540
541# qhasm: c += rdx + carry
542# asm 1: adc <rdx=int64#3,<c=int64#10
543# asm 2: adc <rdx=%rdx,<c=%r12
544adc %rdx,%r12
545
546# qhasm: rax = *(uint64 *)(xp + 48)
547# asm 1: movq   48(<xp=int64#2),>rax=int64#7
548# asm 2: movq   48(<xp=%rsi),>rax=%rax
549movq   48(%rsi),%rax
550
551# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4
552mulq  crypto_sign_ed25519_amd64_64_MU4
553
554# qhasm: carry? q32 += rax
555# asm 1: add  <rax=int64#7,<q32=int64#8
556# asm 2: add  <rax=%rax,<q32=%r10
557add  %rax,%r10
558
559# qhasm: rdx += zero + carry
560# asm 1: adc <zero=int64#4,<rdx=int64#3
561# asm 2: adc <zero=%rcx,<rdx=%rdx
562adc %rcx,%rdx
563
564# qhasm: carry? q32 += c
565# asm 1: add  <c=int64#10,<q32=int64#8
566# asm 2: add  <c=%r12,<q32=%r10
567add  %r12,%r10
568
569# qhasm: q33 += rdx + carry
570# asm 1: adc <rdx=int64#3,<q33=int64#9
571# asm 2: adc <rdx=%rdx,<q33=%r11
572adc %rdx,%r11
573
574# qhasm: rax = *(uint64 *)(xp + 56)
575# asm 1: movq   56(<xp=int64#2),>rax=int64#7
576# asm 2: movq   56(<xp=%rsi),>rax=%rax
577movq   56(%rsi),%rax
578
579# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU0
580mulq  crypto_sign_ed25519_amd64_64_MU0
581
582# qhasm: carry? q24 += rax
583# asm 1: add  <rax=int64#7,<q24=int64#12
584# asm 2: add  <rax=%rax,<q24=%r14
585add  %rax,%r14
586
587# qhasm: free q24
588
589# qhasm: c = 0
590# asm 1: mov  $0,>c=int64#10
591# asm 2: mov  $0,>c=%r12
592mov  $0,%r12
593
594# qhasm: c += rdx + carry
595# asm 1: adc <rdx=int64#3,<c=int64#10
596# asm 2: adc <rdx=%rdx,<c=%r12
597adc %rdx,%r12
598
599# qhasm: rax = *(uint64 *)(xp + 56)
600# asm 1: movq   56(<xp=int64#2),>rax=int64#7
601# asm 2: movq   56(<xp=%rsi),>rax=%rax
602movq   56(%rsi),%rax
603
604# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU1
605mulq  crypto_sign_ed25519_amd64_64_MU1
606
607# qhasm: carry? q30 += rax
608# asm 1: add  <rax=int64#7,<q30=int64#5
609# asm 2: add  <rax=%rax,<q30=%r8
610add  %rax,%r8
611
612# qhasm: rdx += zero + carry
613# asm 1: adc <zero=int64#4,<rdx=int64#3
614# asm 2: adc <zero=%rcx,<rdx=%rdx
615adc %rcx,%rdx
616
617# qhasm: carry? q30 += c
618# asm 1: add  <c=int64#10,<q30=int64#5
619# asm 2: add  <c=%r12,<q30=%r8
620add  %r12,%r8
621
622# qhasm: c = 0
623# asm 1: mov  $0,>c=int64#10
624# asm 2: mov  $0,>c=%r12
625mov  $0,%r12
626
627# qhasm: c += rdx + carry
628# asm 1: adc <rdx=int64#3,<c=int64#10
629# asm 2: adc <rdx=%rdx,<c=%r12
630adc %rdx,%r12
631
632# qhasm: q30_stack = q30
633# asm 1: movq <q30=int64#5,>q30_stack=stack64#8
634# asm 2: movq <q30=%r8,>q30_stack=56(%rsp)
635movq %r8,56(%rsp)
636
637# qhasm: rax = *(uint64 *)(xp + 56)
638# asm 1: movq   56(<xp=int64#2),>rax=int64#7
639# asm 2: movq   56(<xp=%rsi),>rax=%rax
640movq   56(%rsi),%rax
641
642# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU2
643mulq  crypto_sign_ed25519_amd64_64_MU2
644
645# qhasm: carry? q31 += rax
646# asm 1: add  <rax=int64#7,<q31=int64#6
647# asm 2: add  <rax=%rax,<q31=%r9
648add  %rax,%r9
649
650# qhasm: rdx += zero + carry
651# asm 1: adc <zero=int64#4,<rdx=int64#3
652# asm 2: adc <zero=%rcx,<rdx=%rdx
653adc %rcx,%rdx
654
655# qhasm: carry? q31 += c
656# asm 1: add  <c=int64#10,<q31=int64#6
657# asm 2: add  <c=%r12,<q31=%r9
658add  %r12,%r9
659
660# qhasm: c = 0
661# asm 1: mov  $0,>c=int64#5
662# asm 2: mov  $0,>c=%r8
663mov  $0,%r8
664
665# qhasm: c += rdx + carry
666# asm 1: adc <rdx=int64#3,<c=int64#5
667# asm 2: adc <rdx=%rdx,<c=%r8
668adc %rdx,%r8
669
670# qhasm: q31_stack = q31
671# asm 1: movq <q31=int64#6,>q31_stack=stack64#9
672# asm 2: movq <q31=%r9,>q31_stack=64(%rsp)
673movq %r9,64(%rsp)
674
675# qhasm: rax = *(uint64 *)(xp + 56)
676# asm 1: movq   56(<xp=int64#2),>rax=int64#7
677# asm 2: movq   56(<xp=%rsi),>rax=%rax
678movq   56(%rsi),%rax
679
680# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU3
681mulq  crypto_sign_ed25519_amd64_64_MU3
682
683# qhasm: carry? q32 += rax
684# asm 1: add  <rax=int64#7,<q32=int64#8
685# asm 2: add  <rax=%rax,<q32=%r10
686add  %rax,%r10
687
688# qhasm: rdx += zero + carry
689# asm 1: adc <zero=int64#4,<rdx=int64#3
690# asm 2: adc <zero=%rcx,<rdx=%rdx
691adc %rcx,%rdx
692
693# qhasm: carry? q32 += c
694# asm 1: add  <c=int64#5,<q32=int64#8
695# asm 2: add  <c=%r8,<q32=%r10
696add  %r8,%r10
697
698# qhasm: c = 0
699# asm 1: mov  $0,>c=int64#5
700# asm 2: mov  $0,>c=%r8
701mov  $0,%r8
702
703# qhasm: c += rdx + carry
704# asm 1: adc <rdx=int64#3,<c=int64#5
705# asm 2: adc <rdx=%rdx,<c=%r8
706adc %rdx,%r8
707
708# qhasm: q32_stack = q32
709# asm 1: movq <q32=int64#8,>q32_stack=stack64#10
710# asm 2: movq <q32=%r10,>q32_stack=72(%rsp)
711movq %r10,72(%rsp)
712
713# qhasm: rax = *(uint64 *)(xp + 56)
714# asm 1: movq   56(<xp=int64#2),>rax=int64#7
715# asm 2: movq   56(<xp=%rsi),>rax=%rax
716movq   56(%rsi),%rax
717
718# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_MU4
719mulq  crypto_sign_ed25519_amd64_64_MU4
720
721# qhasm: carry? q33 += rax
722# asm 1: add  <rax=int64#7,<q33=int64#9
723# asm 2: add  <rax=%rax,<q33=%r11
724add  %rax,%r11
725
726# qhasm: rdx += zero + carry
727# asm 1: adc <zero=int64#4,<rdx=int64#3
728# asm 2: adc <zero=%rcx,<rdx=%rdx
729adc %rcx,%rdx
730
731# qhasm: q33 += c
732# asm 1: add  <c=int64#5,<q33=int64#9
733# asm 2: add  <c=%r8,<q33=%r11
734add  %r8,%r11
735
736# qhasm: q33_stack = q33
737# asm 1: movq <q33=int64#9,>q33_stack=stack64#11
738# asm 2: movq <q33=%r11,>q33_stack=80(%rsp)
739movq %r11,80(%rsp)
740
741# qhasm: rax = q30_stack
742# asm 1: movq <q30_stack=stack64#8,>rax=int64#7
743# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
744movq 56(%rsp),%rax
745
746# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
747mulq  crypto_sign_ed25519_amd64_64_ORDER0
748
749# qhasm: r20 = rax
750# asm 1: mov  <rax=int64#7,>r20=int64#5
751# asm 2: mov  <rax=%rax,>r20=%r8
752mov  %rax,%r8
753
754# qhasm: c = rdx
755# asm 1: mov  <rdx=int64#3,>c=int64#6
756# asm 2: mov  <rdx=%rdx,>c=%r9
757mov  %rdx,%r9
758
759# qhasm: rax = q30_stack
760# asm 1: movq <q30_stack=stack64#8,>rax=int64#7
761# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
762movq 56(%rsp),%rax
763
764# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1
765mulq  crypto_sign_ed25519_amd64_64_ORDER1
766
767# qhasm: r21 = rax
768# asm 1: mov  <rax=int64#7,>r21=int64#8
769# asm 2: mov  <rax=%rax,>r21=%r10
770mov  %rax,%r10
771
772# qhasm: carry? r21 += c
773# asm 1: add  <c=int64#6,<r21=int64#8
774# asm 2: add  <c=%r9,<r21=%r10
775add  %r9,%r10
776
777# qhasm: c = 0
778# asm 1: mov  $0,>c=int64#6
779# asm 2: mov  $0,>c=%r9
780mov  $0,%r9
781
782# qhasm: c += rdx + carry
783# asm 1: adc <rdx=int64#3,<c=int64#6
784# asm 2: adc <rdx=%rdx,<c=%r9
785adc %rdx,%r9
786
787# qhasm: rax = q30_stack
788# asm 1: movq <q30_stack=stack64#8,>rax=int64#7
789# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
790movq 56(%rsp),%rax
791
792# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2
793mulq  crypto_sign_ed25519_amd64_64_ORDER2
794
795# qhasm: r22 = rax
796# asm 1: mov  <rax=int64#7,>r22=int64#9
797# asm 2: mov  <rax=%rax,>r22=%r11
798mov  %rax,%r11
799
800# qhasm: carry? r22 += c
801# asm 1: add  <c=int64#6,<r22=int64#9
802# asm 2: add  <c=%r9,<r22=%r11
803add  %r9,%r11
804
805# qhasm: c = 0
806# asm 1: mov  $0,>c=int64#6
807# asm 2: mov  $0,>c=%r9
808mov  $0,%r9
809
810# qhasm: c += rdx + carry
811# asm 1: adc <rdx=int64#3,<c=int64#6
812# asm 2: adc <rdx=%rdx,<c=%r9
813adc %rdx,%r9
814
815# qhasm: rax = q30_stack
816# asm 1: movq <q30_stack=stack64#8,>rax=int64#7
817# asm 2: movq <q30_stack=56(%rsp),>rax=%rax
818movq 56(%rsp),%rax
819
820# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3
821mulq  crypto_sign_ed25519_amd64_64_ORDER3
822
823# qhasm: free rdx
824
825# qhasm: r23 = rax
826# asm 1: mov  <rax=int64#7,>r23=int64#10
827# asm 2: mov  <rax=%rax,>r23=%r12
828mov  %rax,%r12
829
830# qhasm: r23 += c
831# asm 1: add  <c=int64#6,<r23=int64#10
832# asm 2: add  <c=%r9,<r23=%r12
833add  %r9,%r12
834
835# qhasm: rax = q31_stack
836# asm 1: movq <q31_stack=stack64#9,>rax=int64#7
837# asm 2: movq <q31_stack=64(%rsp),>rax=%rax
838movq 64(%rsp),%rax
839
840# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
841mulq  crypto_sign_ed25519_amd64_64_ORDER0
842
843# qhasm: carry? r21 += rax
844# asm 1: add  <rax=int64#7,<r21=int64#8
845# asm 2: add  <rax=%rax,<r21=%r10
846add  %rax,%r10
847
848# qhasm: c = 0
849# asm 1: mov  $0,>c=int64#6
850# asm 2: mov  $0,>c=%r9
851mov  $0,%r9
852
853# qhasm: c += rdx + carry
854# asm 1: adc <rdx=int64#3,<c=int64#6
855# asm 2: adc <rdx=%rdx,<c=%r9
856adc %rdx,%r9
857
858# qhasm: rax = q31_stack
859# asm 1: movq <q31_stack=stack64#9,>rax=int64#7
860# asm 2: movq <q31_stack=64(%rsp),>rax=%rax
861movq 64(%rsp),%rax
862
863# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1
864mulq  crypto_sign_ed25519_amd64_64_ORDER1
865
866# qhasm: carry? r22 += rax
867# asm 1: add  <rax=int64#7,<r22=int64#9
868# asm 2: add  <rax=%rax,<r22=%r11
869add  %rax,%r11
870
871# qhasm: rdx += zero + carry
872# asm 1: adc <zero=int64#4,<rdx=int64#3
873# asm 2: adc <zero=%rcx,<rdx=%rdx
874adc %rcx,%rdx
875
876# qhasm: carry? r22 += c
877# asm 1: add  <c=int64#6,<r22=int64#9
878# asm 2: add  <c=%r9,<r22=%r11
879add  %r9,%r11
880
881# qhasm: c = 0
882# asm 1: mov  $0,>c=int64#4
883# asm 2: mov  $0,>c=%rcx
884mov  $0,%rcx
885
886# qhasm: c += rdx + carry
887# asm 1: adc <rdx=int64#3,<c=int64#4
888# asm 2: adc <rdx=%rdx,<c=%rcx
889adc %rdx,%rcx
890
891# qhasm: rax = q31_stack
892# asm 1: movq <q31_stack=stack64#9,>rax=int64#7
893# asm 2: movq <q31_stack=64(%rsp),>rax=%rax
894movq 64(%rsp),%rax
895
896# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2
897mulq  crypto_sign_ed25519_amd64_64_ORDER2
898
899# qhasm: free rdx
900
901# qhasm: r23 += rax
902# asm 1: add  <rax=int64#7,<r23=int64#10
903# asm 2: add  <rax=%rax,<r23=%r12
904add  %rax,%r12
905
906# qhasm: r23 += c
907# asm 1: add  <c=int64#4,<r23=int64#10
908# asm 2: add  <c=%rcx,<r23=%r12
909add  %rcx,%r12
910
911# qhasm: rax = q32_stack
912# asm 1: movq <q32_stack=stack64#10,>rax=int64#7
913# asm 2: movq <q32_stack=72(%rsp),>rax=%rax
914movq 72(%rsp),%rax
915
916# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
917mulq  crypto_sign_ed25519_amd64_64_ORDER0
918
919# qhasm: carry? r22 += rax
920# asm 1: add  <rax=int64#7,<r22=int64#9
921# asm 2: add  <rax=%rax,<r22=%r11
922add  %rax,%r11
923
924# qhasm: c = 0
925# asm 1: mov  $0,>c=int64#4
926# asm 2: mov  $0,>c=%rcx
927mov  $0,%rcx
928
929# qhasm: c += rdx + carry
930# asm 1: adc <rdx=int64#3,<c=int64#4
931# asm 2: adc <rdx=%rdx,<c=%rcx
932adc %rdx,%rcx
933
934# qhasm: rax = q32_stack
935# asm 1: movq <q32_stack=stack64#10,>rax=int64#7
936# asm 2: movq <q32_stack=72(%rsp),>rax=%rax
937movq 72(%rsp),%rax
938
939# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1
940mulq  crypto_sign_ed25519_amd64_64_ORDER1
941
942# qhasm: free rdx
943
944# qhasm: r23 += rax
945# asm 1: add  <rax=int64#7,<r23=int64#10
946# asm 2: add  <rax=%rax,<r23=%r12
947add  %rax,%r12
948
949# qhasm: r23 += c
950# asm 1: add  <c=int64#4,<r23=int64#10
951# asm 2: add  <c=%rcx,<r23=%r12
952add  %rcx,%r12
953
954# qhasm: rax = q33_stack
955# asm 1: movq <q33_stack=stack64#11,>rax=int64#7
956# asm 2: movq <q33_stack=80(%rsp),>rax=%rax
957movq 80(%rsp),%rax
958
959# qhasm: (uint128) rdx rax = rax * *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
960mulq  crypto_sign_ed25519_amd64_64_ORDER0
961
962# qhasm: free rdx
963
964# qhasm: r23 += rax
965# asm 1: add  <rax=int64#7,<r23=int64#10
966# asm 2: add  <rax=%rax,<r23=%r12
967add  %rax,%r12
968
969# qhasm: r0 = *(uint64 *)(xp +  0)
970# asm 1: movq   0(<xp=int64#2),>r0=int64#3
971# asm 2: movq   0(<xp=%rsi),>r0=%rdx
972movq   0(%rsi),%rdx
973
974# qhasm: carry? r0 -= r20
975# asm 1: sub  <r20=int64#5,<r0=int64#3
976# asm 2: sub  <r20=%r8,<r0=%rdx
977sub  %r8,%rdx
978
979# qhasm: t0 = r0
980# asm 1: mov  <r0=int64#3,>t0=int64#4
981# asm 2: mov  <r0=%rdx,>t0=%rcx
982mov  %rdx,%rcx
983
984# qhasm: r1 = *(uint64 *)(xp +  8)
985# asm 1: movq   8(<xp=int64#2),>r1=int64#5
986# asm 2: movq   8(<xp=%rsi),>r1=%r8
987movq   8(%rsi),%r8
988
989# qhasm: carry? r1 -= r21 - carry
990# asm 1: sbb  <r21=int64#8,<r1=int64#5
991# asm 2: sbb  <r21=%r10,<r1=%r8
992sbb  %r10,%r8
993
994# qhasm: t1 = r1
995# asm 1: mov  <r1=int64#5,>t1=int64#6
996# asm 2: mov  <r1=%r8,>t1=%r9
997mov  %r8,%r9
998
999# qhasm: r2 = *(uint64 *)(xp + 16)
1000# asm 1: movq   16(<xp=int64#2),>r2=int64#7
1001# asm 2: movq   16(<xp=%rsi),>r2=%rax
1002movq   16(%rsi),%rax
1003
1004# qhasm: carry? r2 -= r22 - carry
1005# asm 1: sbb  <r22=int64#9,<r2=int64#7
1006# asm 2: sbb  <r22=%r11,<r2=%rax
1007sbb  %r11,%rax
1008
1009# qhasm: t2 = r2
1010# asm 1: mov  <r2=int64#7,>t2=int64#8
1011# asm 2: mov  <r2=%rax,>t2=%r10
1012mov  %rax,%r10
1013
1014# qhasm: r3 = *(uint64 *)(xp + 24)
1015# asm 1: movq   24(<xp=int64#2),>r3=int64#2
1016# asm 2: movq   24(<xp=%rsi),>r3=%rsi
1017movq   24(%rsi),%rsi
1018
1019# qhasm: r3 -= r23 - carry
1020# asm 1: sbb  <r23=int64#10,<r3=int64#2
1021# asm 2: sbb  <r23=%r12,<r3=%rsi
1022sbb  %r12,%rsi
1023
1024# qhasm: t3 = r3
1025# asm 1: mov  <r3=int64#2,>t3=int64#9
1026# asm 2: mov  <r3=%rsi,>t3=%r11
1027mov  %rsi,%r11
1028
1029# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
1030# asm 1: sub  crypto_sign_ed25519_amd64_64_ORDER0,<t0=int64#4
1031# asm 2: sub  crypto_sign_ed25519_amd64_64_ORDER0,<t0=%rcx
1032sub  crypto_sign_ed25519_amd64_64_ORDER0,%rcx
1033
1034# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 - carry
1035# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER1,<t1=int64#6
1036# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER1,<t1=%r9
1037sbb  crypto_sign_ed25519_amd64_64_ORDER1,%r9
1038
1039# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 - carry
1040# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER2,<t2=int64#8
1041# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER2,<t2=%r10
1042sbb  crypto_sign_ed25519_amd64_64_ORDER2,%r10
1043
1044# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 - carry
1045# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER3,<t3=int64#9
1046# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER3,<t3=%r11
1047sbb  crypto_sign_ed25519_amd64_64_ORDER3,%r11
1048
1049# qhasm: r0 = t0 if !unsigned<
1050# asm 1: cmovae <t0=int64#4,<r0=int64#3
1051# asm 2: cmovae <t0=%rcx,<r0=%rdx
1052cmovae %rcx,%rdx
1053
1054# qhasm: t0 = r0
1055# asm 1: mov  <r0=int64#3,>t0=int64#4
1056# asm 2: mov  <r0=%rdx,>t0=%rcx
1057mov  %rdx,%rcx
1058
1059# qhasm: r1 = t1 if !unsigned<
1060# asm 1: cmovae <t1=int64#6,<r1=int64#5
1061# asm 2: cmovae <t1=%r9,<r1=%r8
1062cmovae %r9,%r8
1063
1064# qhasm: t1 = r1
1065# asm 1: mov  <r1=int64#5,>t1=int64#6
1066# asm 2: mov  <r1=%r8,>t1=%r9
1067mov  %r8,%r9
1068
1069# qhasm: r2 = t2 if !unsigned<
1070# asm 1: cmovae <t2=int64#8,<r2=int64#7
1071# asm 2: cmovae <t2=%r10,<r2=%rax
1072cmovae %r10,%rax
1073
1074# qhasm: t2 = r2
1075# asm 1: mov  <r2=int64#7,>t2=int64#8
1076# asm 2: mov  <r2=%rax,>t2=%r10
1077mov  %rax,%r10
1078
1079# qhasm: r3 = t3 if !unsigned<
1080# asm 1: cmovae <t3=int64#9,<r3=int64#2
1081# asm 2: cmovae <t3=%r11,<r3=%rsi
1082cmovae %r11,%rsi
1083
1084# qhasm: t3 = r3
1085# asm 1: mov  <r3=int64#2,>t3=int64#9
1086# asm 2: mov  <r3=%rsi,>t3=%r11
1087mov  %rsi,%r11
1088
1089# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
1090# asm 1: sub  crypto_sign_ed25519_amd64_64_ORDER0,<t0=int64#4
1091# asm 2: sub  crypto_sign_ed25519_amd64_64_ORDER0,<t0=%rcx
1092sub  crypto_sign_ed25519_amd64_64_ORDER0,%rcx
1093
1094# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 - carry
1095# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER1,<t1=int64#6
1096# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER1,<t1=%r9
1097sbb  crypto_sign_ed25519_amd64_64_ORDER1,%r9
1098
1099# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 - carry
1100# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER2,<t2=int64#8
1101# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER2,<t2=%r10
1102sbb  crypto_sign_ed25519_amd64_64_ORDER2,%r10
1103
1104# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 - carry
1105# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER3,<t3=int64#9
1106# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER3,<t3=%r11
1107sbb  crypto_sign_ed25519_amd64_64_ORDER3,%r11
1108
1109# qhasm: r0 = t0 if !unsigned<
1110# asm 1: cmovae <t0=int64#4,<r0=int64#3
1111# asm 2: cmovae <t0=%rcx,<r0=%rdx
1112cmovae %rcx,%rdx
1113
1114# qhasm: r1 = t1 if !unsigned<
1115# asm 1: cmovae <t1=int64#6,<r1=int64#5
1116# asm 2: cmovae <t1=%r9,<r1=%r8
1117cmovae %r9,%r8
1118
1119# qhasm: r2 = t2 if !unsigned<
1120# asm 1: cmovae <t2=int64#8,<r2=int64#7
1121# asm 2: cmovae <t2=%r10,<r2=%rax
1122cmovae %r10,%rax
1123
1124# qhasm: r3 = t3 if !unsigned<
1125# asm 1: cmovae <t3=int64#9,<r3=int64#2
1126# asm 2: cmovae <t3=%r11,<r3=%rsi
1127cmovae %r11,%rsi
1128
1129# qhasm: *(uint64 *)(rp +  0) = r0
1130# asm 1: movq   <r0=int64#3,0(<rp=int64#1)
1131# asm 2: movq   <r0=%rdx,0(<rp=%rdi)
1132movq   %rdx,0(%rdi)
1133
1134# qhasm: *(uint64 *)(rp +  8) = r1
1135# asm 1: movq   <r1=int64#5,8(<rp=int64#1)
1136# asm 2: movq   <r1=%r8,8(<rp=%rdi)
1137movq   %r8,8(%rdi)
1138
1139# qhasm: *(uint64 *)(rp + 16) = r2
1140# asm 1: movq   <r2=int64#7,16(<rp=int64#1)
1141# asm 2: movq   <r2=%rax,16(<rp=%rdi)
1142movq   %rax,16(%rdi)
1143
1144# qhasm: *(uint64 *)(rp + 24) = r3
1145# asm 1: movq   <r3=int64#2,24(<rp=int64#1)
1146# asm 2: movq   <r3=%rsi,24(<rp=%rdi)
1147movq   %rsi,24(%rdi)
1148
1149# qhasm:   caller1 = caller1_stack
1150# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
1151# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
1152movq 0(%rsp),%r11
1153
1154# qhasm:   caller2 = caller2_stack
1155# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
1156# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
1157movq 8(%rsp),%r12
1158
1159# qhasm:   caller3 = caller3_stack
1160# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
1161# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
1162movq 16(%rsp),%r13
1163
1164# qhasm:   caller4 = caller4_stack
1165# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
1166# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
1167movq 24(%rsp),%r14
1168
1169# qhasm:   caller5 = caller5_stack
1170# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
1171# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
1172movq 32(%rsp),%r15
1173
1174# qhasm:   caller6 = caller6_stack
1175# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
1176# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
1177movq 40(%rsp),%rbx
1178
1179# qhasm:   caller7 = caller7_stack
1180# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
1181# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
1182movq 48(%rsp),%rbp
1183
1184# qhasm: leave
1185add %r11,%rsp
1186mov %rdi,%rax
1187mov %rsi,%rdx
1188ret
1189