1
2# qhasm: int64 rp
3
4# qhasm: int64 xp
5
6# qhasm: int64 yp
7
8# qhasm: input rp
9
10# qhasm: input xp
11
12# qhasm: input yp
13
14# qhasm: int64 r0
15
16# qhasm: int64 r1
17
18# qhasm: int64 r2
19
20# qhasm: int64 r3
21
22# qhasm: int64 t0
23
24# qhasm: int64 t1
25
26# qhasm: int64 t2
27
28# qhasm: int64 t3
29
30# qhasm:   int64 caller1
31
32# qhasm:   int64 caller2
33
34# qhasm:   int64 caller3
35
36# qhasm:   int64 caller4
37
38# qhasm:   int64 caller5
39
40# qhasm:   int64 caller6
41
42# qhasm:   int64 caller7
43
44# qhasm:   caller caller1
45
46# qhasm:   caller caller2
47
48# qhasm:   caller caller3
49
50# qhasm:   caller caller4
51
52# qhasm:   caller caller5
53
54# qhasm:   caller caller6
55
56# qhasm:   caller caller7
57
58# qhasm:   stack64 caller4_stack
59
60# qhasm:   stack64 caller5_stack
61
62# qhasm:   stack64 caller6_stack
63
64# qhasm:   stack64 caller7_stack
65
66# qhasm: enter crypto_sign_ed25519_amd64_64_sc25519_add
67.text
68.p2align 5
69.globl _crypto_sign_ed25519_amd64_64_sc25519_add
70.globl crypto_sign_ed25519_amd64_64_sc25519_add
71_crypto_sign_ed25519_amd64_64_sc25519_add:
72crypto_sign_ed25519_amd64_64_sc25519_add:
73mov %rsp,%r11
74and $31,%r11
75add $32,%r11
76sub %r11,%rsp
77
78# qhasm: caller4_stack = caller4
79# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#1
80# asm 2: movq <caller4=%r14,>caller4_stack=0(%rsp)
81movq %r14,0(%rsp)
82
83# qhasm: caller5_stack = caller5
84# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#2
85# asm 2: movq <caller5=%r15,>caller5_stack=8(%rsp)
86movq %r15,8(%rsp)
87
88# qhasm: caller6_stack = caller6
89# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#3
90# asm 2: movq <caller6=%rbx,>caller6_stack=16(%rsp)
91movq %rbx,16(%rsp)
92
93# qhasm: r0 = *(uint64 *)(xp +  0)
94# asm 1: movq   0(<xp=int64#2),>r0=int64#4
95# asm 2: movq   0(<xp=%rsi),>r0=%rcx
96movq   0(%rsi),%rcx
97
98# qhasm: r1 = *(uint64 *)(xp +  8)
99# asm 1: movq   8(<xp=int64#2),>r1=int64#5
100# asm 2: movq   8(<xp=%rsi),>r1=%r8
101movq   8(%rsi),%r8
102
103# qhasm: r2 = *(uint64 *)(xp + 16)
104# asm 1: movq   16(<xp=int64#2),>r2=int64#6
105# asm 2: movq   16(<xp=%rsi),>r2=%r9
106movq   16(%rsi),%r9
107
108# qhasm: r3 = *(uint64 *)(xp + 24)
109# asm 1: movq   24(<xp=int64#2),>r3=int64#2
110# asm 2: movq   24(<xp=%rsi),>r3=%rsi
111movq   24(%rsi),%rsi
112
113# qhasm: carry? r0 += *(uint64 *)(yp +  0)
114# asm 1: addq 0(<yp=int64#3),<r0=int64#4
115# asm 2: addq 0(<yp=%rdx),<r0=%rcx
116addq 0(%rdx),%rcx
117
118# qhasm: carry? r1 += *(uint64 *)(yp +  8) + carry
119# asm 1: adcq 8(<yp=int64#3),<r1=int64#5
120# asm 2: adcq 8(<yp=%rdx),<r1=%r8
121adcq 8(%rdx),%r8
122
123# qhasm: carry? r2 += *(uint64 *)(yp + 16) + carry
124# asm 1: adcq 16(<yp=int64#3),<r2=int64#6
125# asm 2: adcq 16(<yp=%rdx),<r2=%r9
126adcq 16(%rdx),%r9
127
128# qhasm: r3 += *(uint64 *)(yp + 24) + carry
129# asm 1: adcq 24(<yp=int64#3),<r3=int64#2
130# asm 2: adcq 24(<yp=%rdx),<r3=%rsi
131adcq 24(%rdx),%rsi
132
133# qhasm: t0 = r0
134# asm 1: mov  <r0=int64#4,>t0=int64#3
135# asm 2: mov  <r0=%rcx,>t0=%rdx
136mov  %rcx,%rdx
137
138# qhasm: t1 = r1
139# asm 1: mov  <r1=int64#5,>t1=int64#7
140# asm 2: mov  <r1=%r8,>t1=%rax
141mov  %r8,%rax
142
143# qhasm: t2 = r2
144# asm 1: mov  <r2=int64#6,>t2=int64#8
145# asm 2: mov  <r2=%r9,>t2=%r10
146mov  %r9,%r10
147
148# qhasm: t3 = r3
149# asm 1: mov  <r3=int64#2,>t3=int64#12
150# asm 2: mov  <r3=%rsi,>t3=%r14
151mov  %rsi,%r14
152
153# qhasm: carry? t0 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER0
154# asm 1: sub  crypto_sign_ed25519_amd64_64_ORDER0,<t0=int64#3
155# asm 2: sub  crypto_sign_ed25519_amd64_64_ORDER0,<t0=%rdx
156sub  crypto_sign_ed25519_amd64_64_ORDER0,%rdx
157
158# qhasm: carry? t1 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER1 - carry
159# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER1,<t1=int64#7
160# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER1,<t1=%rax
161sbb  crypto_sign_ed25519_amd64_64_ORDER1,%rax
162
163# qhasm: carry? t2 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER2 - carry
164# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER2,<t2=int64#8
165# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER2,<t2=%r10
166sbb  crypto_sign_ed25519_amd64_64_ORDER2,%r10
167
168# qhasm: unsigned<? t3 -= *(uint64 *) &crypto_sign_ed25519_amd64_64_ORDER3 - carry
169# asm 1: sbb  crypto_sign_ed25519_amd64_64_ORDER3,<t3=int64#12
170# asm 2: sbb  crypto_sign_ed25519_amd64_64_ORDER3,<t3=%r14
171sbb  crypto_sign_ed25519_amd64_64_ORDER3,%r14
172
173# qhasm: r0 = t0 if !unsigned<
174# asm 1: cmovae <t0=int64#3,<r0=int64#4
175# asm 2: cmovae <t0=%rdx,<r0=%rcx
176cmovae %rdx,%rcx
177
178# qhasm: r1 = t1 if !unsigned<
179# asm 1: cmovae <t1=int64#7,<r1=int64#5
180# asm 2: cmovae <t1=%rax,<r1=%r8
181cmovae %rax,%r8
182
183# qhasm: r2 = t2 if !unsigned<
184# asm 1: cmovae <t2=int64#8,<r2=int64#6
185# asm 2: cmovae <t2=%r10,<r2=%r9
186cmovae %r10,%r9
187
188# qhasm: r3 = t3 if !unsigned<
189# asm 1: cmovae <t3=int64#12,<r3=int64#2
190# asm 2: cmovae <t3=%r14,<r3=%rsi
191cmovae %r14,%rsi
192
193# qhasm: *(uint64 *)(rp +  0) = r0
194# asm 1: movq   <r0=int64#4,0(<rp=int64#1)
195# asm 2: movq   <r0=%rcx,0(<rp=%rdi)
196movq   %rcx,0(%rdi)
197
198# qhasm: *(uint64 *)(rp +  8) = r1
199# asm 1: movq   <r1=int64#5,8(<rp=int64#1)
200# asm 2: movq   <r1=%r8,8(<rp=%rdi)
201movq   %r8,8(%rdi)
202
203# qhasm: *(uint64 *)(rp + 16) = r2
204# asm 1: movq   <r2=int64#6,16(<rp=int64#1)
205# asm 2: movq   <r2=%r9,16(<rp=%rdi)
206movq   %r9,16(%rdi)
207
208# qhasm: *(uint64 *)(rp + 24) = r3
209# asm 1: movq   <r3=int64#2,24(<rp=int64#1)
210# asm 2: movq   <r3=%rsi,24(<rp=%rdi)
211movq   %rsi,24(%rdi)
212
213# qhasm: caller4 = caller4_stack
214# asm 1: movq <caller4_stack=stack64#1,>caller4=int64#12
215# asm 2: movq <caller4_stack=0(%rsp),>caller4=%r14
216movq 0(%rsp),%r14
217
218# qhasm: caller5 = caller5_stack
219# asm 1: movq <caller5_stack=stack64#2,>caller5=int64#13
220# asm 2: movq <caller5_stack=8(%rsp),>caller5=%r15
221movq 8(%rsp),%r15
222
223# qhasm: caller6 = caller6_stack
224# asm 1: movq <caller6_stack=stack64#3,>caller6=int64#14
225# asm 2: movq <caller6_stack=16(%rsp),>caller6=%rbx
226movq 16(%rsp),%rbx
227
228# qhasm: leave
229add %r11,%rsp
230mov %rdi,%rax
231mov %rsi,%rdx
232ret
233