1
2# qhasm: int64 rp
3
4# qhasm: int64 xp
5
6# qhasm: int64 yp
7
8# qhasm: input rp
9
10# qhasm: input xp
11
12# qhasm: input yp
13
14# qhasm: int64 r0
15
16# qhasm: int64 r1
17
18# qhasm: int64 r2
19
20# qhasm: int64 r3
21
22# qhasm: int64 r4
23
24# qhasm: int64 r5
25
26# qhasm: int64 r6
27
28# qhasm: int64 r7
29
30# qhasm: int64 c
31
32# qhasm: int64 zero
33
34# qhasm: int64 rax
35
36# qhasm: int64 rdx
37
38# qhasm:   int64 caller1
39
40# qhasm:   int64 caller2
41
42# qhasm:   int64 caller3
43
44# qhasm:   int64 caller4
45
46# qhasm:   int64 caller5
47
48# qhasm:   int64 caller6
49
50# qhasm:   int64 caller7
51
52# qhasm:   caller caller1
53
54# qhasm:   caller caller2
55
56# qhasm:   caller caller3
57
58# qhasm:   caller caller4
59
60# qhasm:   caller caller5
61
62# qhasm:   caller caller6
63
64# qhasm:   caller caller7
65
66# qhasm:   stack64 caller1_stack
67
68# qhasm:   stack64 caller2_stack
69
70# qhasm:   stack64 caller3_stack
71
72# qhasm:   stack64 caller4_stack
73
74# qhasm:   stack64 caller5_stack
75
76# qhasm:   stack64 caller6_stack
77
78# qhasm:   stack64 caller7_stack
79
80# qhasm: enter crypto_sign_ed25519_amd64_64_ull4_mul
81.text
82.p2align 5
83.globl _crypto_sign_ed25519_amd64_64_ull4_mul
84.globl crypto_sign_ed25519_amd64_64_ull4_mul
85_crypto_sign_ed25519_amd64_64_ull4_mul:
86crypto_sign_ed25519_amd64_64_ull4_mul:
87mov %rsp,%r11
88and $31,%r11
89add $64,%r11
90sub %r11,%rsp
91
92# qhasm:   caller1_stack = caller1
93# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
94# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
95movq %r11,0(%rsp)
96
97# qhasm:   caller2_stack = caller2
98# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
99# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
100movq %r12,8(%rsp)
101
102# qhasm:   caller3_stack = caller3
103# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
104# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
105movq %r13,16(%rsp)
106
107# qhasm:   caller4_stack = caller4
108# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
109# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
110movq %r14,24(%rsp)
111
112# qhasm:   caller5_stack = caller5
113# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
114# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
115movq %r15,32(%rsp)
116
117# qhasm:   caller6_stack = caller6
118# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
119# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
120movq %rbx,40(%rsp)
121
122# qhasm:   caller7_stack = caller7
123# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
124# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
125movq %rbp,48(%rsp)
126
127# qhasm: yp = yp
128# asm 1: mov  <yp=int64#3,>yp=int64#4
129# asm 2: mov  <yp=%rdx,>yp=%rcx
130mov  %rdx,%rcx
131
132# qhasm: r4 = 0
133# asm 1: mov  $0,>r4=int64#5
134# asm 2: mov  $0,>r4=%r8
135mov  $0,%r8
136
137# qhasm: r5 = 0
138# asm 1: mov  $0,>r5=int64#6
139# asm 2: mov  $0,>r5=%r9
140mov  $0,%r9
141
142# qhasm: r6 = 0
143# asm 1: mov  $0,>r6=int64#8
144# asm 2: mov  $0,>r6=%r10
145mov  $0,%r10
146
147# qhasm: r7 = 0
148# asm 1: mov  $0,>r7=int64#9
149# asm 2: mov  $0,>r7=%r11
150mov  $0,%r11
151
152# qhasm: zero = 0
153# asm 1: mov  $0,>zero=int64#10
154# asm 2: mov  $0,>zero=%r12
155mov  $0,%r12
156
157# qhasm: rax = *(uint64 *)(xp + 0)
158# asm 1: movq   0(<xp=int64#2),>rax=int64#7
159# asm 2: movq   0(<xp=%rsi),>rax=%rax
160movq   0(%rsi),%rax
161
162# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  0)
163# asm 1: mulq  0(<yp=int64#4)
164# asm 2: mulq  0(<yp=%rcx)
165mulq  0(%rcx)
166
167# qhasm: r0 = rax
168# asm 1: mov  <rax=int64#7,>r0=int64#11
169# asm 2: mov  <rax=%rax,>r0=%r13
170mov  %rax,%r13
171
172# qhasm: c = rdx
173# asm 1: mov  <rdx=int64#3,>c=int64#12
174# asm 2: mov  <rdx=%rdx,>c=%r14
175mov  %rdx,%r14
176
177# qhasm: rax = *(uint64 *)(xp + 0)
178# asm 1: movq   0(<xp=int64#2),>rax=int64#7
179# asm 2: movq   0(<xp=%rsi),>rax=%rax
180movq   0(%rsi),%rax
181
182# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  8)
183# asm 1: mulq  8(<yp=int64#4)
184# asm 2: mulq  8(<yp=%rcx)
185mulq  8(%rcx)
186
187# qhasm: r1 = rax
188# asm 1: mov  <rax=int64#7,>r1=int64#13
189# asm 2: mov  <rax=%rax,>r1=%r15
190mov  %rax,%r15
191
192# qhasm: carry? r1 += c
193# asm 1: add  <c=int64#12,<r1=int64#13
194# asm 2: add  <c=%r14,<r1=%r15
195add  %r14,%r15
196
197# qhasm: c = 0
198# asm 1: mov  $0,>c=int64#12
199# asm 2: mov  $0,>c=%r14
200mov  $0,%r14
201
202# qhasm: c += rdx + carry
203# asm 1: adc <rdx=int64#3,<c=int64#12
204# asm 2: adc <rdx=%rdx,<c=%r14
205adc %rdx,%r14
206
207# qhasm: rax = *(uint64 *)(xp + 0)
208# asm 1: movq   0(<xp=int64#2),>rax=int64#7
209# asm 2: movq   0(<xp=%rsi),>rax=%rax
210movq   0(%rsi),%rax
211
212# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
213# asm 1: mulq  16(<yp=int64#4)
214# asm 2: mulq  16(<yp=%rcx)
215mulq  16(%rcx)
216
217# qhasm: r2 = rax
218# asm 1: mov  <rax=int64#7,>r2=int64#14
219# asm 2: mov  <rax=%rax,>r2=%rbx
220mov  %rax,%rbx
221
222# qhasm: carry? r2 += c
223# asm 1: add  <c=int64#12,<r2=int64#14
224# asm 2: add  <c=%r14,<r2=%rbx
225add  %r14,%rbx
226
227# qhasm: c = 0
228# asm 1: mov  $0,>c=int64#12
229# asm 2: mov  $0,>c=%r14
230mov  $0,%r14
231
232# qhasm: c += rdx + carry
233# asm 1: adc <rdx=int64#3,<c=int64#12
234# asm 2: adc <rdx=%rdx,<c=%r14
235adc %rdx,%r14
236
237# qhasm: rax = *(uint64 *)(xp + 0)
238# asm 1: movq   0(<xp=int64#2),>rax=int64#7
239# asm 2: movq   0(<xp=%rsi),>rax=%rax
240movq   0(%rsi),%rax
241
242# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
243# asm 1: mulq  24(<yp=int64#4)
244# asm 2: mulq  24(<yp=%rcx)
245mulq  24(%rcx)
246
247# qhasm: r3 = rax
248# asm 1: mov  <rax=int64#7,>r3=int64#15
249# asm 2: mov  <rax=%rax,>r3=%rbp
250mov  %rax,%rbp
251
252# qhasm: carry? r3 += c
253# asm 1: add  <c=int64#12,<r3=int64#15
254# asm 2: add  <c=%r14,<r3=%rbp
255add  %r14,%rbp
256
257# qhasm: r4 += rdx + carry
258# asm 1: adc <rdx=int64#3,<r4=int64#5
259# asm 2: adc <rdx=%rdx,<r4=%r8
260adc %rdx,%r8
261
262# qhasm: rax = *(uint64 *)(xp + 8)
263# asm 1: movq   8(<xp=int64#2),>rax=int64#7
264# asm 2: movq   8(<xp=%rsi),>rax=%rax
265movq   8(%rsi),%rax
266
267# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  0)
268# asm 1: mulq  0(<yp=int64#4)
269# asm 2: mulq  0(<yp=%rcx)
270mulq  0(%rcx)
271
272# qhasm: carry? r1 += rax
273# asm 1: add  <rax=int64#7,<r1=int64#13
274# asm 2: add  <rax=%rax,<r1=%r15
275add  %rax,%r15
276
277# qhasm: c = 0
278# asm 1: mov  $0,>c=int64#12
279# asm 2: mov  $0,>c=%r14
280mov  $0,%r14
281
282# qhasm: c += rdx + carry
283# asm 1: adc <rdx=int64#3,<c=int64#12
284# asm 2: adc <rdx=%rdx,<c=%r14
285adc %rdx,%r14
286
287# qhasm: rax = *(uint64 *)(xp + 8)
288# asm 1: movq   8(<xp=int64#2),>rax=int64#7
289# asm 2: movq   8(<xp=%rsi),>rax=%rax
290movq   8(%rsi),%rax
291
292# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  8)
293# asm 1: mulq  8(<yp=int64#4)
294# asm 2: mulq  8(<yp=%rcx)
295mulq  8(%rcx)
296
297# qhasm: carry? r2 += rax
298# asm 1: add  <rax=int64#7,<r2=int64#14
299# asm 2: add  <rax=%rax,<r2=%rbx
300add  %rax,%rbx
301
302# qhasm: rdx += zero + carry
303# asm 1: adc <zero=int64#10,<rdx=int64#3
304# asm 2: adc <zero=%r12,<rdx=%rdx
305adc %r12,%rdx
306
307# qhasm: carry? r2 += c
308# asm 1: add  <c=int64#12,<r2=int64#14
309# asm 2: add  <c=%r14,<r2=%rbx
310add  %r14,%rbx
311
312# qhasm: c = 0
313# asm 1: mov  $0,>c=int64#12
314# asm 2: mov  $0,>c=%r14
315mov  $0,%r14
316
317# qhasm: c += rdx + carry
318# asm 1: adc <rdx=int64#3,<c=int64#12
319# asm 2: adc <rdx=%rdx,<c=%r14
320adc %rdx,%r14
321
322# qhasm: rax = *(uint64 *)(xp + 8)
323# asm 1: movq   8(<xp=int64#2),>rax=int64#7
324# asm 2: movq   8(<xp=%rsi),>rax=%rax
325movq   8(%rsi),%rax
326
327# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
328# asm 1: mulq  16(<yp=int64#4)
329# asm 2: mulq  16(<yp=%rcx)
330mulq  16(%rcx)
331
332# qhasm: carry? r3 += rax
333# asm 1: add  <rax=int64#7,<r3=int64#15
334# asm 2: add  <rax=%rax,<r3=%rbp
335add  %rax,%rbp
336
337# qhasm: rdx += zero + carry
338# asm 1: adc <zero=int64#10,<rdx=int64#3
339# asm 2: adc <zero=%r12,<rdx=%rdx
340adc %r12,%rdx
341
342# qhasm: carry? r3 += c
343# asm 1: add  <c=int64#12,<r3=int64#15
344# asm 2: add  <c=%r14,<r3=%rbp
345add  %r14,%rbp
346
347# qhasm: c = 0
348# asm 1: mov  $0,>c=int64#12
349# asm 2: mov  $0,>c=%r14
350mov  $0,%r14
351
352# qhasm: c += rdx + carry
353# asm 1: adc <rdx=int64#3,<c=int64#12
354# asm 2: adc <rdx=%rdx,<c=%r14
355adc %rdx,%r14
356
357# qhasm: rax = *(uint64 *)(xp + 8)
358# asm 1: movq   8(<xp=int64#2),>rax=int64#7
359# asm 2: movq   8(<xp=%rsi),>rax=%rax
360movq   8(%rsi),%rax
361
362# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
363# asm 1: mulq  24(<yp=int64#4)
364# asm 2: mulq  24(<yp=%rcx)
365mulq  24(%rcx)
366
367# qhasm: carry? r4 += rax
368# asm 1: add  <rax=int64#7,<r4=int64#5
369# asm 2: add  <rax=%rax,<r4=%r8
370add  %rax,%r8
371
372# qhasm: rdx += zero + carry
373# asm 1: adc <zero=int64#10,<rdx=int64#3
374# asm 2: adc <zero=%r12,<rdx=%rdx
375adc %r12,%rdx
376
377# qhasm: carry? r4 += c
378# asm 1: add  <c=int64#12,<r4=int64#5
379# asm 2: add  <c=%r14,<r4=%r8
380add  %r14,%r8
381
382# qhasm: r5 += rdx + carry
383# asm 1: adc <rdx=int64#3,<r5=int64#6
384# asm 2: adc <rdx=%rdx,<r5=%r9
385adc %rdx,%r9
386
387# qhasm: rax = *(uint64 *)(xp + 16)
388# asm 1: movq   16(<xp=int64#2),>rax=int64#7
389# asm 2: movq   16(<xp=%rsi),>rax=%rax
390movq   16(%rsi),%rax
391
392# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  0)
393# asm 1: mulq  0(<yp=int64#4)
394# asm 2: mulq  0(<yp=%rcx)
395mulq  0(%rcx)
396
397# qhasm: carry? r2 += rax
398# asm 1: add  <rax=int64#7,<r2=int64#14
399# asm 2: add  <rax=%rax,<r2=%rbx
400add  %rax,%rbx
401
402# qhasm: c = 0
403# asm 1: mov  $0,>c=int64#12
404# asm 2: mov  $0,>c=%r14
405mov  $0,%r14
406
407# qhasm: c += rdx + carry
408# asm 1: adc <rdx=int64#3,<c=int64#12
409# asm 2: adc <rdx=%rdx,<c=%r14
410adc %rdx,%r14
411
412# qhasm: rax = *(uint64 *)(xp + 16)
413# asm 1: movq   16(<xp=int64#2),>rax=int64#7
414# asm 2: movq   16(<xp=%rsi),>rax=%rax
415movq   16(%rsi),%rax
416
417# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  8)
418# asm 1: mulq  8(<yp=int64#4)
419# asm 2: mulq  8(<yp=%rcx)
420mulq  8(%rcx)
421
422# qhasm: carry? r3 += rax
423# asm 1: add  <rax=int64#7,<r3=int64#15
424# asm 2: add  <rax=%rax,<r3=%rbp
425add  %rax,%rbp
426
427# qhasm: rdx += zero + carry
428# asm 1: adc <zero=int64#10,<rdx=int64#3
429# asm 2: adc <zero=%r12,<rdx=%rdx
430adc %r12,%rdx
431
432# qhasm: carry? r3 += c
433# asm 1: add  <c=int64#12,<r3=int64#15
434# asm 2: add  <c=%r14,<r3=%rbp
435add  %r14,%rbp
436
437# qhasm: c = 0
438# asm 1: mov  $0,>c=int64#12
439# asm 2: mov  $0,>c=%r14
440mov  $0,%r14
441
442# qhasm: c += rdx + carry
443# asm 1: adc <rdx=int64#3,<c=int64#12
444# asm 2: adc <rdx=%rdx,<c=%r14
445adc %rdx,%r14
446
447# qhasm: rax = *(uint64 *)(xp + 16)
448# asm 1: movq   16(<xp=int64#2),>rax=int64#7
449# asm 2: movq   16(<xp=%rsi),>rax=%rax
450movq   16(%rsi),%rax
451
452# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
453# asm 1: mulq  16(<yp=int64#4)
454# asm 2: mulq  16(<yp=%rcx)
455mulq  16(%rcx)
456
457# qhasm: carry? r4 += rax
458# asm 1: add  <rax=int64#7,<r4=int64#5
459# asm 2: add  <rax=%rax,<r4=%r8
460add  %rax,%r8
461
462# qhasm: rdx += zero + carry
463# asm 1: adc <zero=int64#10,<rdx=int64#3
464# asm 2: adc <zero=%r12,<rdx=%rdx
465adc %r12,%rdx
466
467# qhasm: carry? r4 += c
468# asm 1: add  <c=int64#12,<r4=int64#5
469# asm 2: add  <c=%r14,<r4=%r8
470add  %r14,%r8
471
472# qhasm: c = 0
473# asm 1: mov  $0,>c=int64#12
474# asm 2: mov  $0,>c=%r14
475mov  $0,%r14
476
477# qhasm: c += rdx + carry
478# asm 1: adc <rdx=int64#3,<c=int64#12
479# asm 2: adc <rdx=%rdx,<c=%r14
480adc %rdx,%r14
481
482# qhasm: rax = *(uint64 *)(xp + 16)
483# asm 1: movq   16(<xp=int64#2),>rax=int64#7
484# asm 2: movq   16(<xp=%rsi),>rax=%rax
485movq   16(%rsi),%rax
486
487# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
488# asm 1: mulq  24(<yp=int64#4)
489# asm 2: mulq  24(<yp=%rcx)
490mulq  24(%rcx)
491
492# qhasm: carry? r5 += rax
493# asm 1: add  <rax=int64#7,<r5=int64#6
494# asm 2: add  <rax=%rax,<r5=%r9
495add  %rax,%r9
496
497# qhasm: rdx += zero + carry
498# asm 1: adc <zero=int64#10,<rdx=int64#3
499# asm 2: adc <zero=%r12,<rdx=%rdx
500adc %r12,%rdx
501
502# qhasm: carry? r5 += c
503# asm 1: add  <c=int64#12,<r5=int64#6
504# asm 2: add  <c=%r14,<r5=%r9
505add  %r14,%r9
506
507# qhasm: r6 += rdx + carry
508# asm 1: adc <rdx=int64#3,<r6=int64#8
509# asm 2: adc <rdx=%rdx,<r6=%r10
510adc %rdx,%r10
511
512# qhasm: rax = *(uint64 *)(xp + 24)
513# asm 1: movq   24(<xp=int64#2),>rax=int64#7
514# asm 2: movq   24(<xp=%rsi),>rax=%rax
515movq   24(%rsi),%rax
516
517# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  0)
518# asm 1: mulq  0(<yp=int64#4)
519# asm 2: mulq  0(<yp=%rcx)
520mulq  0(%rcx)
521
522# qhasm: carry? r3 += rax
523# asm 1: add  <rax=int64#7,<r3=int64#15
524# asm 2: add  <rax=%rax,<r3=%rbp
525add  %rax,%rbp
526
527# qhasm: c = 0
528# asm 1: mov  $0,>c=int64#12
529# asm 2: mov  $0,>c=%r14
530mov  $0,%r14
531
532# qhasm: c += rdx + carry
533# asm 1: adc <rdx=int64#3,<c=int64#12
534# asm 2: adc <rdx=%rdx,<c=%r14
535adc %rdx,%r14
536
537# qhasm: rax = *(uint64 *)(xp + 24)
538# asm 1: movq   24(<xp=int64#2),>rax=int64#7
539# asm 2: movq   24(<xp=%rsi),>rax=%rax
540movq   24(%rsi),%rax
541
542# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp +  8)
543# asm 1: mulq  8(<yp=int64#4)
544# asm 2: mulq  8(<yp=%rcx)
545mulq  8(%rcx)
546
547# qhasm: carry? r4 += rax
548# asm 1: add  <rax=int64#7,<r4=int64#5
549# asm 2: add  <rax=%rax,<r4=%r8
550add  %rax,%r8
551
552# qhasm: rdx += zero + carry
553# asm 1: adc <zero=int64#10,<rdx=int64#3
554# asm 2: adc <zero=%r12,<rdx=%rdx
555adc %r12,%rdx
556
557# qhasm: carry? r4 += c
558# asm 1: add  <c=int64#12,<r4=int64#5
559# asm 2: add  <c=%r14,<r4=%r8
560add  %r14,%r8
561
562# qhasm: c = 0
563# asm 1: mov  $0,>c=int64#12
564# asm 2: mov  $0,>c=%r14
565mov  $0,%r14
566
567# qhasm: c += rdx + carry
568# asm 1: adc <rdx=int64#3,<c=int64#12
569# asm 2: adc <rdx=%rdx,<c=%r14
570adc %rdx,%r14
571
572# qhasm: rax = *(uint64 *)(xp + 24)
573# asm 1: movq   24(<xp=int64#2),>rax=int64#7
574# asm 2: movq   24(<xp=%rsi),>rax=%rax
575movq   24(%rsi),%rax
576
577# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 16)
578# asm 1: mulq  16(<yp=int64#4)
579# asm 2: mulq  16(<yp=%rcx)
580mulq  16(%rcx)
581
582# qhasm: carry? r5 += rax
583# asm 1: add  <rax=int64#7,<r5=int64#6
584# asm 2: add  <rax=%rax,<r5=%r9
585add  %rax,%r9
586
587# qhasm: rdx += zero + carry
588# asm 1: adc <zero=int64#10,<rdx=int64#3
589# asm 2: adc <zero=%r12,<rdx=%rdx
590adc %r12,%rdx
591
592# qhasm: carry? r5 += c
593# asm 1: add  <c=int64#12,<r5=int64#6
594# asm 2: add  <c=%r14,<r5=%r9
595add  %r14,%r9
596
597# qhasm: c = 0
598# asm 1: mov  $0,>c=int64#12
599# asm 2: mov  $0,>c=%r14
600mov  $0,%r14
601
602# qhasm: c += rdx + carry
603# asm 1: adc <rdx=int64#3,<c=int64#12
604# asm 2: adc <rdx=%rdx,<c=%r14
605adc %rdx,%r14
606
607# qhasm: rax = *(uint64 *)(xp + 24)
608# asm 1: movq   24(<xp=int64#2),>rax=int64#7
609# asm 2: movq   24(<xp=%rsi),>rax=%rax
610movq   24(%rsi),%rax
611
612# qhasm: (uint128) rdx rax = rax * *(uint64 *)(yp + 24)
613# asm 1: mulq  24(<yp=int64#4)
614# asm 2: mulq  24(<yp=%rcx)
615mulq  24(%rcx)
616
617# qhasm: carry? r6 += rax
618# asm 1: add  <rax=int64#7,<r6=int64#8
619# asm 2: add  <rax=%rax,<r6=%r10
620add  %rax,%r10
621
622# qhasm: rdx += zero + carry
623# asm 1: adc <zero=int64#10,<rdx=int64#3
624# asm 2: adc <zero=%r12,<rdx=%rdx
625adc %r12,%rdx
626
627# qhasm: carry? r6 += c
628# asm 1: add  <c=int64#12,<r6=int64#8
629# asm 2: add  <c=%r14,<r6=%r10
630add  %r14,%r10
631
632# qhasm: r7 += rdx + carry
633# asm 1: adc <rdx=int64#3,<r7=int64#9
634# asm 2: adc <rdx=%rdx,<r7=%r11
635adc %rdx,%r11
636
637# qhasm: *(uint64 *)(rp +  0) = r0
638# asm 1: movq   <r0=int64#11,0(<rp=int64#1)
639# asm 2: movq   <r0=%r13,0(<rp=%rdi)
640movq   %r13,0(%rdi)
641
642# qhasm: *(uint64 *)(rp +  8) = r1
643# asm 1: movq   <r1=int64#13,8(<rp=int64#1)
644# asm 2: movq   <r1=%r15,8(<rp=%rdi)
645movq   %r15,8(%rdi)
646
647# qhasm: *(uint64 *)(rp + 16) = r2
648# asm 1: movq   <r2=int64#14,16(<rp=int64#1)
649# asm 2: movq   <r2=%rbx,16(<rp=%rdi)
650movq   %rbx,16(%rdi)
651
652# qhasm: *(uint64 *)(rp + 24) = r3
653# asm 1: movq   <r3=int64#15,24(<rp=int64#1)
654# asm 2: movq   <r3=%rbp,24(<rp=%rdi)
655movq   %rbp,24(%rdi)
656
657# qhasm: *(uint64 *)(rp + 32) = r4
658# asm 1: movq   <r4=int64#5,32(<rp=int64#1)
659# asm 2: movq   <r4=%r8,32(<rp=%rdi)
660movq   %r8,32(%rdi)
661
662# qhasm: *(uint64 *)(rp + 40) = r5
663# asm 1: movq   <r5=int64#6,40(<rp=int64#1)
664# asm 2: movq   <r5=%r9,40(<rp=%rdi)
665movq   %r9,40(%rdi)
666
667# qhasm: *(uint64 *)(rp + 48) = r6
668# asm 1: movq   <r6=int64#8,48(<rp=int64#1)
669# asm 2: movq   <r6=%r10,48(<rp=%rdi)
670movq   %r10,48(%rdi)
671
672# qhasm: *(uint64 *)(rp + 56) = r7
673# asm 1: movq   <r7=int64#9,56(<rp=int64#1)
674# asm 2: movq   <r7=%r11,56(<rp=%rdi)
675movq   %r11,56(%rdi)
676
677# qhasm:   caller1 = caller1_stack
678# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
679# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
680movq 0(%rsp),%r11
681
682# qhasm:   caller2 = caller2_stack
683# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
684# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
685movq 8(%rsp),%r12
686
687# qhasm:   caller3 = caller3_stack
688# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
689# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
690movq 16(%rsp),%r13
691
692# qhasm:   caller4 = caller4_stack
693# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
694# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
695movq 24(%rsp),%r14
696
697# qhasm:   caller5 = caller5_stack
698# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
699# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
700movq 32(%rsp),%r15
701
702# qhasm:   caller6 = caller6_stack
703# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
704# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
705movq 40(%rsp),%rbx
706
707# qhasm:   caller7 = caller7_stack
708# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
709# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
710movq 48(%rsp),%rbp
711
712# qhasm: leave
713add %r11,%rsp
714mov %rdi,%rax
715mov %rsi,%rdx
716ret
717