1
2# qhasm: int64 hp
3
4# qhasm: int64 hlen
5
6# qhasm: int64 sp
7
8# qhasm: int64 pp
9
10# qhasm: input hp
11
12# qhasm: input hlen
13
14# qhasm: input sp
15
16# qhasm: int64 prc
17
18# qhasm: int64 plc
19
20# qhasm: int64 pc
21
22# qhasm: int64 d
23
24# qhasm: int64 spp
25
26# qhasm: int64 sprc
27
28# qhasm: int64 spc
29
30# qhasm: int64 c0
31
32# qhasm: int64 c1
33
34# qhasm: int64 c2
35
36# qhasm: int64 c3
37
38# qhasm: int64 t0
39
40# qhasm: int64 t1
41
42# qhasm: int64 t2
43
44# qhasm: int64 t3
45
46# qhasm: int64 p0
47
48# qhasm: int64 p1
49
50# qhasm: int64 p2
51
52# qhasm: int64 p3
53
54# qhasm:   int64 caller1
55
56# qhasm:   int64 caller2
57
58# qhasm:   int64 caller3
59
60# qhasm:   int64 caller4
61
62# qhasm:   int64 caller5
63
64# qhasm:   int64 caller6
65
66# qhasm:   int64 caller7
67
68# qhasm:   caller caller1
69
70# qhasm:   caller caller2
71
72# qhasm:   caller caller3
73
74# qhasm:   caller caller4
75
76# qhasm:   caller caller5
77
78# qhasm:   caller caller6
79
80# qhasm:   caller caller7
81
82# qhasm:   stack64 caller1_stack
83
84# qhasm:   stack64 caller2_stack
85
86# qhasm:   stack64 caller3_stack
87
88# qhasm:   stack64 caller4_stack
89
90# qhasm:   stack64 caller5_stack
91
92# qhasm:   stack64 caller6_stack
93
94# qhasm:   stack64 caller7_stack
95
96# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced
97.text
98.p2align 5
99.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced
100.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced
101_crypto_sign_ed25519_amd64_64_heap_rootreplaced:
102crypto_sign_ed25519_amd64_64_heap_rootreplaced:
103mov %rsp,%r11
104and $31,%r11
105add $64,%r11
106sub %r11,%rsp
107
108# qhasm: caller1_stack = caller1
109# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1
110# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp)
111movq %r11,0(%rsp)
112
113# qhasm: caller2_stack = caller2
114# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2
115# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp)
116movq %r12,8(%rsp)
117
118# qhasm: caller3_stack = caller3
119# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3
120# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp)
121movq %r13,16(%rsp)
122
123# qhasm: caller4_stack = caller4
124# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4
125# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp)
126movq %r14,24(%rsp)
127
128# qhasm: caller5_stack = caller5
129# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5
130# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp)
131movq %r15,32(%rsp)
132
133# qhasm: caller6_stack = caller6
134# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6
135# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp)
136movq %rbx,40(%rsp)
137
138# qhasm: caller7_stack = caller7
139# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7
140# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp)
141movq %rbp,48(%rsp)
142
143# qhasm: pp = 0
144# asm 1: mov  $0,>pp=int64#4
145# asm 2: mov  $0,>pp=%rcx
146mov  $0,%rcx
147
148# qhasm: siftdownloop:
149._siftdownloop:
150
151# qhasm: prc = pp
152# asm 1: mov  <pp=int64#4,>prc=int64#5
153# asm 2: mov  <pp=%rcx,>prc=%r8
154mov  %rcx,%r8
155
156# qhasm: prc *= 2
157# asm 1: imulq  $2,<prc=int64#5,>prc=int64#5
158# asm 2: imulq  $2,<prc=%r8,>prc=%r8
159imulq  $2,%r8,%r8
160
161# qhasm: pc = prc
162# asm 1: mov  <prc=int64#5,>pc=int64#6
163# asm 2: mov  <prc=%r8,>pc=%r9
164mov  %r8,%r9
165
166# qhasm: prc += 2
167# asm 1: add  $2,<prc=int64#5
168# asm 2: add  $2,<prc=%r8
169add  $2,%r8
170
171# qhasm: pc += 1
172# asm 1: add  $1,<pc=int64#6
173# asm 2: add  $1,<pc=%r9
174add  $1,%r9
175
176# qhasm: unsigned>? hlen - prc
177# asm 1: cmp  <prc=int64#5,<hlen=int64#2
178# asm 2: cmp  <prc=%r8,<hlen=%rsi
179cmp  %r8,%rsi
180# comment:fp stack unchanged by jump
181
182# qhasm: goto siftuploop if !unsigned>
183jbe ._siftuploop
184
185# qhasm: sprc = *(uint64 *)(hp + prc * 8)
186# asm 1: movq   (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7
187# asm 2: movq   (<hp=%rdi,<prc=%r8,8),>sprc=%rax
188movq   (%rdi,%r8,8),%rax
189
190# qhasm: sprc <<= 5
191# asm 1: shl  $5,<sprc=int64#7
192# asm 2: shl  $5,<sprc=%rax
193shl  $5,%rax
194
195# qhasm: sprc += sp
196# asm 1: add  <sp=int64#3,<sprc=int64#7
197# asm 2: add  <sp=%rdx,<sprc=%rax
198add  %rdx,%rax
199
200# qhasm: spc = *(uint64 *)(hp + pc * 8)
201# asm 1: movq   (<hp=int64#1,<pc=int64#6,8),>spc=int64#8
202# asm 2: movq   (<hp=%rdi,<pc=%r9,8),>spc=%r10
203movq   (%rdi,%r9,8),%r10
204
205# qhasm: spc <<= 5
206# asm 1: shl  $5,<spc=int64#8
207# asm 2: shl  $5,<spc=%r10
208shl  $5,%r10
209
210# qhasm: spc += sp
211# asm 1: add  <sp=int64#3,<spc=int64#8
212# asm 2: add  <sp=%rdx,<spc=%r10
213add  %rdx,%r10
214
215# qhasm: c0 = *(uint64 *)(spc +  0)
216# asm 1: movq   0(<spc=int64#8),>c0=int64#9
217# asm 2: movq   0(<spc=%r10),>c0=%r11
218movq   0(%r10),%r11
219
220# qhasm: c1 = *(uint64 *)(spc +  8)
221# asm 1: movq   8(<spc=int64#8),>c1=int64#10
222# asm 2: movq   8(<spc=%r10),>c1=%r12
223movq   8(%r10),%r12
224
225# qhasm: c2 = *(uint64 *)(spc + 16)
226# asm 1: movq   16(<spc=int64#8),>c2=int64#11
227# asm 2: movq   16(<spc=%r10),>c2=%r13
228movq   16(%r10),%r13
229
230# qhasm: c3 = *(uint64 *)(spc + 24)
231# asm 1: movq   24(<spc=int64#8),>c3=int64#12
232# asm 2: movq   24(<spc=%r10),>c3=%r14
233movq   24(%r10),%r14
234
235# qhasm: carry? c0 -= *(uint64 *)(sprc +  0)
236# asm 1: subq 0(<sprc=int64#7),<c0=int64#9
237# asm 2: subq 0(<sprc=%rax),<c0=%r11
238subq 0(%rax),%r11
239
240# qhasm: carry? c1 -= *(uint64 *)(sprc +  8) - carry
241# asm 1: sbbq 8(<sprc=int64#7),<c1=int64#10
242# asm 2: sbbq 8(<sprc=%rax),<c1=%r12
243sbbq 8(%rax),%r12
244
245# qhasm: carry? c2 -= *(uint64 *)(sprc + 16) - carry
246# asm 1: sbbq 16(<sprc=int64#7),<c2=int64#11
247# asm 2: sbbq 16(<sprc=%rax),<c2=%r13
248sbbq 16(%rax),%r13
249
250# qhasm: carry? c3 -= *(uint64 *)(sprc + 24) - carry
251# asm 1: sbbq 24(<sprc=int64#7),<c3=int64#12
252# asm 2: sbbq 24(<sprc=%rax),<c3=%r14
253sbbq 24(%rax),%r14
254
255# qhasm: pc = prc if carry
256# asm 1: cmovc <prc=int64#5,<pc=int64#6
257# asm 2: cmovc <prc=%r8,<pc=%r9
258cmovc %r8,%r9
259
260# qhasm: spc = sprc if carry
261# asm 1: cmovc <sprc=int64#7,<spc=int64#8
262# asm 2: cmovc <sprc=%rax,<spc=%r10
263cmovc %rax,%r10
264
265# qhasm: spc -= sp
266# asm 1: sub  <sp=int64#3,<spc=int64#8
267# asm 2: sub  <sp=%rdx,<spc=%r10
268sub  %rdx,%r10
269
270# qhasm: (uint64) spc >>= 5
271# asm 1: shr  $5,<spc=int64#8
272# asm 2: shr  $5,<spc=%r10
273shr  $5,%r10
274
275# qhasm: spp = *(uint64 *)(hp + pp * 8)
276# asm 1: movq   (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
277# asm 2: movq   (<hp=%rdi,<pp=%rcx,8),>spp=%r8
278movq   (%rdi,%rcx,8),%r8
279
280# qhasm: *(uint64 *)(hp + pp * 8) = spc
281# asm 1: movq  <spc=int64#8,(<hp=int64#1,<pp=int64#4,8)
282# asm 2: movq  <spc=%r10,(<hp=%rdi,<pp=%rcx,8)
283movq  %r10,(%rdi,%rcx,8)
284
285# qhasm: *(uint64 *)(hp + pc * 8) = spp
286# asm 1: movq  <spp=int64#5,(<hp=int64#1,<pc=int64#6,8)
287# asm 2: movq  <spp=%r8,(<hp=%rdi,<pc=%r9,8)
288movq  %r8,(%rdi,%r9,8)
289
290# qhasm: pp = pc
291# asm 1: mov  <pc=int64#6,>pp=int64#4
292# asm 2: mov  <pc=%r9,>pp=%rcx
293mov  %r9,%rcx
294# comment:fp stack unchanged by jump
295
296# qhasm: goto siftdownloop
297jmp ._siftdownloop
298
299# qhasm: siftuploop:
300._siftuploop:
301
302# qhasm: pc = pp
303# asm 1: mov  <pp=int64#4,>pc=int64#2
304# asm 2: mov  <pp=%rcx,>pc=%rsi
305mov  %rcx,%rsi
306
307# qhasm: pp -= 1
308# asm 1: sub  $1,<pp=int64#4
309# asm 2: sub  $1,<pp=%rcx
310sub  $1,%rcx
311
312# qhasm: (uint64) pp >>= 1
313# asm 1: shr  $1,<pp=int64#4
314# asm 2: shr  $1,<pp=%rcx
315shr  $1,%rcx
316
317# qhasm: unsigned>? pc - 0
318# asm 1: cmp  $0,<pc=int64#2
319# asm 2: cmp  $0,<pc=%rsi
320cmp  $0,%rsi
321# comment:fp stack unchanged by jump
322
323# qhasm: goto end if !unsigned>
324jbe ._end
325
326# qhasm: spp = *(uint64 *)(hp + pp * 8)
327# asm 1: movq   (<hp=int64#1,<pp=int64#4,8),>spp=int64#5
328# asm 2: movq   (<hp=%rdi,<pp=%rcx,8),>spp=%r8
329movq   (%rdi,%rcx,8),%r8
330
331# qhasm: spc = *(uint64 *)(hp + pc * 8)
332# asm 1: movq   (<hp=int64#1,<pc=int64#2,8),>spc=int64#6
333# asm 2: movq   (<hp=%rdi,<pc=%rsi,8),>spc=%r9
334movq   (%rdi,%rsi,8),%r9
335
336# qhasm: spp <<= 5
337# asm 1: shl  $5,<spp=int64#5
338# asm 2: shl  $5,<spp=%r8
339shl  $5,%r8
340
341# qhasm: spc <<= 5
342# asm 1: shl  $5,<spc=int64#6
343# asm 2: shl  $5,<spc=%r9
344shl  $5,%r9
345
346# qhasm: spc += sp
347# asm 1: add  <sp=int64#3,<spc=int64#6
348# asm 2: add  <sp=%rdx,<spc=%r9
349add  %rdx,%r9
350
351# qhasm: spp += sp
352# asm 1: add  <sp=int64#3,<spp=int64#5
353# asm 2: add  <sp=%rdx,<spp=%r8
354add  %rdx,%r8
355
356# qhasm: c0 = *(uint64 *)(spc +  0)
357# asm 1: movq   0(<spc=int64#6),>c0=int64#7
358# asm 2: movq   0(<spc=%r9),>c0=%rax
359movq   0(%r9),%rax
360
361# qhasm: c1 = *(uint64 *)(spc +  8)
362# asm 1: movq   8(<spc=int64#6),>c1=int64#8
363# asm 2: movq   8(<spc=%r9),>c1=%r10
364movq   8(%r9),%r10
365
366# qhasm: c2 = *(uint64 *)(spc + 16)
367# asm 1: movq   16(<spc=int64#6),>c2=int64#9
368# asm 2: movq   16(<spc=%r9),>c2=%r11
369movq   16(%r9),%r11
370
371# qhasm: c3 = *(uint64 *)(spc + 24)
372# asm 1: movq   24(<spc=int64#6),>c3=int64#10
373# asm 2: movq   24(<spc=%r9),>c3=%r12
374movq   24(%r9),%r12
375
376# qhasm: carry? c0 -= *(uint64 *)(spp +  0)
377# asm 1: subq 0(<spp=int64#5),<c0=int64#7
378# asm 2: subq 0(<spp=%r8),<c0=%rax
379subq 0(%r8),%rax
380
381# qhasm: carry? c1 -= *(uint64 *)(spp +  8) - carry
382# asm 1: sbbq 8(<spp=int64#5),<c1=int64#8
383# asm 2: sbbq 8(<spp=%r8),<c1=%r10
384sbbq 8(%r8),%r10
385
386# qhasm: carry? c2 -= *(uint64 *)(spp + 16) - carry
387# asm 1: sbbq 16(<spp=int64#5),<c2=int64#9
388# asm 2: sbbq 16(<spp=%r8),<c2=%r11
389sbbq 16(%r8),%r11
390
391# qhasm: carry? c3 -= *(uint64 *)(spp + 24) - carry
392# asm 1: sbbq 24(<spp=int64#5),<c3=int64#10
393# asm 2: sbbq 24(<spp=%r8),<c3=%r12
394sbbq 24(%r8),%r12
395# comment:fp stack unchanged by jump
396
397# qhasm: goto end if carry
398jc ._end
399
400# qhasm: spc -= sp
401# asm 1: sub  <sp=int64#3,<spc=int64#6
402# asm 2: sub  <sp=%rdx,<spc=%r9
403sub  %rdx,%r9
404
405# qhasm: (uint64) spc >>= 5
406# asm 1: shr  $5,<spc=int64#6
407# asm 2: shr  $5,<spc=%r9
408shr  $5,%r9
409
410# qhasm: spp -= sp
411# asm 1: sub  <sp=int64#3,<spp=int64#5
412# asm 2: sub  <sp=%rdx,<spp=%r8
413sub  %rdx,%r8
414
415# qhasm: (uint64) spp >>= 5
416# asm 1: shr  $5,<spp=int64#5
417# asm 2: shr  $5,<spp=%r8
418shr  $5,%r8
419
420# qhasm: *(uint64 *)(hp + pp * 8) = spc
421# asm 1: movq  <spc=int64#6,(<hp=int64#1,<pp=int64#4,8)
422# asm 2: movq  <spc=%r9,(<hp=%rdi,<pp=%rcx,8)
423movq  %r9,(%rdi,%rcx,8)
424
425# qhasm: *(uint64 *)(hp + pc * 8) = spp
426# asm 1: movq  <spp=int64#5,(<hp=int64#1,<pc=int64#2,8)
427# asm 2: movq  <spp=%r8,(<hp=%rdi,<pc=%rsi,8)
428movq  %r8,(%rdi,%rsi,8)
429# comment:fp stack unchanged by jump
430
431# qhasm: goto siftuploop
432jmp ._siftuploop
433
434# qhasm: end:
435._end:
436
437# qhasm: caller1 = caller1_stack
438# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9
439# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11
440movq 0(%rsp),%r11
441
442# qhasm: caller2 = caller2_stack
443# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10
444# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12
445movq 8(%rsp),%r12
446
447# qhasm: caller3 = caller3_stack
448# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11
449# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13
450movq 16(%rsp),%r13
451
452# qhasm: caller4 = caller4_stack
453# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12
454# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14
455movq 24(%rsp),%r14
456
457# qhasm: caller5 = caller5_stack
458# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13
459# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15
460movq 32(%rsp),%r15
461
462# qhasm: caller6 = caller6_stack
463# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14
464# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx
465movq 40(%rsp),%rbx
466
467# qhasm: caller7 = caller7_stack
468# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15
469# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp
470movq 48(%rsp),%rbp
471
472# qhasm: leave
473add %r11,%rsp
474mov %rdi,%rax
475mov %rsi,%rdx
476ret
477