1 2# qhasm: int64 hp 3 4# qhasm: int64 hlen 5 6# qhasm: int64 sp 7 8# qhasm: int64 pp 9 10# qhasm: input hp 11 12# qhasm: input hlen 13 14# qhasm: input sp 15 16# qhasm: int64 prc 17 18# qhasm: int64 plc 19 20# qhasm: int64 pc 21 22# qhasm: int64 d 23 24# qhasm: int64 spp 25 26# qhasm: int64 sprc 27 28# qhasm: int64 spc 29 30# qhasm: int64 c0 31 32# qhasm: int64 c1 33 34# qhasm: int64 c2 35 36# qhasm: int64 c3 37 38# qhasm: int64 t0 39 40# qhasm: int64 t1 41 42# qhasm: int64 t2 43 44# qhasm: int64 t3 45 46# qhasm: int64 p0 47 48# qhasm: int64 p1 49 50# qhasm: int64 p2 51 52# qhasm: int64 p3 53 54# qhasm: int64 caller1 55 56# qhasm: int64 caller2 57 58# qhasm: int64 caller3 59 60# qhasm: int64 caller4 61 62# qhasm: int64 caller5 63 64# qhasm: int64 caller6 65 66# qhasm: int64 caller7 67 68# qhasm: caller caller1 69 70# qhasm: caller caller2 71 72# qhasm: caller caller3 73 74# qhasm: caller caller4 75 76# qhasm: caller caller5 77 78# qhasm: caller caller6 79 80# qhasm: caller caller7 81 82# qhasm: stack64 caller1_stack 83 84# qhasm: stack64 caller2_stack 85 86# qhasm: stack64 caller3_stack 87 88# qhasm: stack64 caller4_stack 89 90# qhasm: stack64 caller5_stack 91 92# qhasm: stack64 caller6_stack 93 94# qhasm: stack64 caller7_stack 95 96# qhasm: enter crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb 97.text 98.p2align 5 99.globl _crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb 100.globl crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb 101_crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb: 102crypto_sign_ed25519_amd64_64_heap_rootreplaced_1limb: 103mov %rsp,%r11 104and $31,%r11 105add $64,%r11 106sub %r11,%rsp 107 108# qhasm: caller1_stack = caller1 109# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 110# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) 111movq %r11,0(%rsp) 112 113# qhasm: caller2_stack = caller2 114# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 115# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) 116movq %r12,8(%rsp) 117 118# qhasm: caller3_stack = caller3 119# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 120# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) 121movq %r13,16(%rsp) 122 123# qhasm: caller4_stack = caller4 124# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 125# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) 126movq %r14,24(%rsp) 127 128# qhasm: caller5_stack = caller5 129# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 130# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) 131movq %r15,32(%rsp) 132 133# qhasm: caller6_stack = caller6 134# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 135# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) 136movq %rbx,40(%rsp) 137 138# qhasm: caller7_stack = caller7 139# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 140# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) 141movq %rbp,48(%rsp) 142 143# qhasm: pp = 0 144# asm 1: mov $0,>pp=int64#4 145# asm 2: mov $0,>pp=%rcx 146mov $0,%rcx 147 148# qhasm: siftdownloop: 149._siftdownloop: 150 151# qhasm: prc = pp 152# asm 1: mov <pp=int64#4,>prc=int64#5 153# asm 2: mov <pp=%rcx,>prc=%r8 154mov %rcx,%r8 155 156# qhasm: prc *= 2 157# asm 1: imulq $2,<prc=int64#5,>prc=int64#5 158# asm 2: imulq $2,<prc=%r8,>prc=%r8 159imulq $2,%r8,%r8 160 161# qhasm: pc = prc 162# asm 1: mov <prc=int64#5,>pc=int64#6 163# asm 2: mov <prc=%r8,>pc=%r9 164mov %r8,%r9 165 166# qhasm: prc += 2 167# asm 1: add $2,<prc=int64#5 168# asm 2: add $2,<prc=%r8 169add $2,%r8 170 171# qhasm: pc += 1 172# asm 1: add $1,<pc=int64#6 173# asm 2: add $1,<pc=%r9 174add $1,%r9 175 176# qhasm: unsigned>? hlen - prc 177# asm 1: cmp <prc=int64#5,<hlen=int64#2 178# asm 2: cmp <prc=%r8,<hlen=%rsi 179cmp %r8,%rsi 180# comment:fp stack unchanged by jump 181 182# qhasm: goto siftuploop if !unsigned> 183jbe ._siftuploop 184 185# qhasm: sprc = *(uint64 *)(hp + prc * 8) 186# asm 1: movq (<hp=int64#1,<prc=int64#5,8),>sprc=int64#7 187# asm 2: movq (<hp=%rdi,<prc=%r8,8),>sprc=%rax 188movq (%rdi,%r8,8),%rax 189 190# qhasm: sprc <<= 5 191# asm 1: shl $5,<sprc=int64#7 192# asm 2: shl $5,<sprc=%rax 193shl $5,%rax 194 195# qhasm: sprc += sp 196# asm 1: add <sp=int64#3,<sprc=int64#7 197# asm 2: add <sp=%rdx,<sprc=%rax 198add %rdx,%rax 199 200# qhasm: spc = *(uint64 *)(hp + pc * 8) 201# asm 1: movq (<hp=int64#1,<pc=int64#6,8),>spc=int64#8 202# asm 2: movq (<hp=%rdi,<pc=%r9,8),>spc=%r10 203movq (%rdi,%r9,8),%r10 204 205# qhasm: spc <<= 5 206# asm 1: shl $5,<spc=int64#8 207# asm 2: shl $5,<spc=%r10 208shl $5,%r10 209 210# qhasm: spc += sp 211# asm 1: add <sp=int64#3,<spc=int64#8 212# asm 2: add <sp=%rdx,<spc=%r10 213add %rdx,%r10 214 215# qhasm: c0 = *(uint64 *)(spc + 0) 216# asm 1: movq 0(<spc=int64#8),>c0=int64#9 217# asm 2: movq 0(<spc=%r10),>c0=%r11 218movq 0(%r10),%r11 219 220# qhasm: carry? c0 -= *(uint64 *)(sprc + 0) 221# asm 1: subq 0(<sprc=int64#7),<c0=int64#9 222# asm 2: subq 0(<sprc=%rax),<c0=%r11 223subq 0(%rax),%r11 224 225# qhasm: pc = prc if carry 226# asm 1: cmovc <prc=int64#5,<pc=int64#6 227# asm 2: cmovc <prc=%r8,<pc=%r9 228cmovc %r8,%r9 229 230# qhasm: spc = sprc if carry 231# asm 1: cmovc <sprc=int64#7,<spc=int64#8 232# asm 2: cmovc <sprc=%rax,<spc=%r10 233cmovc %rax,%r10 234 235# qhasm: spc -= sp 236# asm 1: sub <sp=int64#3,<spc=int64#8 237# asm 2: sub <sp=%rdx,<spc=%r10 238sub %rdx,%r10 239 240# qhasm: (uint64) spc >>= 5 241# asm 1: shr $5,<spc=int64#8 242# asm 2: shr $5,<spc=%r10 243shr $5,%r10 244 245# qhasm: spp = *(uint64 *)(hp + pp * 8) 246# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 247# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 248movq (%rdi,%rcx,8),%r8 249 250# qhasm: *(uint64 *)(hp + pp * 8) = spc 251# asm 1: movq <spc=int64#8,(<hp=int64#1,<pp=int64#4,8) 252# asm 2: movq <spc=%r10,(<hp=%rdi,<pp=%rcx,8) 253movq %r10,(%rdi,%rcx,8) 254 255# qhasm: *(uint64 *)(hp + pc * 8) = spp 256# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#6,8) 257# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%r9,8) 258movq %r8,(%rdi,%r9,8) 259 260# qhasm: pp = pc 261# asm 1: mov <pc=int64#6,>pp=int64#4 262# asm 2: mov <pc=%r9,>pp=%rcx 263mov %r9,%rcx 264# comment:fp stack unchanged by jump 265 266# qhasm: goto siftdownloop 267jmp ._siftdownloop 268 269# qhasm: siftuploop: 270._siftuploop: 271 272# qhasm: pc = pp 273# asm 1: mov <pp=int64#4,>pc=int64#2 274# asm 2: mov <pp=%rcx,>pc=%rsi 275mov %rcx,%rsi 276 277# qhasm: pp -= 1 278# asm 1: sub $1,<pp=int64#4 279# asm 2: sub $1,<pp=%rcx 280sub $1,%rcx 281 282# qhasm: (uint64) pp >>= 1 283# asm 1: shr $1,<pp=int64#4 284# asm 2: shr $1,<pp=%rcx 285shr $1,%rcx 286 287# qhasm: unsigned>? pc - 0 288# asm 1: cmp $0,<pc=int64#2 289# asm 2: cmp $0,<pc=%rsi 290cmp $0,%rsi 291# comment:fp stack unchanged by jump 292 293# qhasm: goto end if !unsigned> 294jbe ._end 295 296# qhasm: spp = *(uint64 *)(hp + pp * 8) 297# asm 1: movq (<hp=int64#1,<pp=int64#4,8),>spp=int64#5 298# asm 2: movq (<hp=%rdi,<pp=%rcx,8),>spp=%r8 299movq (%rdi,%rcx,8),%r8 300 301# qhasm: spc = *(uint64 *)(hp + pc * 8) 302# asm 1: movq (<hp=int64#1,<pc=int64#2,8),>spc=int64#6 303# asm 2: movq (<hp=%rdi,<pc=%rsi,8),>spc=%r9 304movq (%rdi,%rsi,8),%r9 305 306# qhasm: spp <<= 5 307# asm 1: shl $5,<spp=int64#5 308# asm 2: shl $5,<spp=%r8 309shl $5,%r8 310 311# qhasm: spc <<= 5 312# asm 1: shl $5,<spc=int64#6 313# asm 2: shl $5,<spc=%r9 314shl $5,%r9 315 316# qhasm: spc += sp 317# asm 1: add <sp=int64#3,<spc=int64#6 318# asm 2: add <sp=%rdx,<spc=%r9 319add %rdx,%r9 320 321# qhasm: spp += sp 322# asm 1: add <sp=int64#3,<spp=int64#5 323# asm 2: add <sp=%rdx,<spp=%r8 324add %rdx,%r8 325 326# qhasm: c0 = *(uint64 *)(spc + 0) 327# asm 1: movq 0(<spc=int64#6),>c0=int64#7 328# asm 2: movq 0(<spc=%r9),>c0=%rax 329movq 0(%r9),%rax 330 331# qhasm: carry? c0 -= *(uint64 *)(spp + 0) 332# asm 1: subq 0(<spp=int64#5),<c0=int64#7 333# asm 2: subq 0(<spp=%r8),<c0=%rax 334subq 0(%r8),%rax 335# comment:fp stack unchanged by jump 336 337# qhasm: goto end if carry 338jc ._end 339 340# qhasm: spc -= sp 341# asm 1: sub <sp=int64#3,<spc=int64#6 342# asm 2: sub <sp=%rdx,<spc=%r9 343sub %rdx,%r9 344 345# qhasm: (uint64) spc >>= 5 346# asm 1: shr $5,<spc=int64#6 347# asm 2: shr $5,<spc=%r9 348shr $5,%r9 349 350# qhasm: spp -= sp 351# asm 1: sub <sp=int64#3,<spp=int64#5 352# asm 2: sub <sp=%rdx,<spp=%r8 353sub %rdx,%r8 354 355# qhasm: (uint64) spp >>= 5 356# asm 1: shr $5,<spp=int64#5 357# asm 2: shr $5,<spp=%r8 358shr $5,%r8 359 360# qhasm: *(uint64 *)(hp + pp * 8) = spc 361# asm 1: movq <spc=int64#6,(<hp=int64#1,<pp=int64#4,8) 362# asm 2: movq <spc=%r9,(<hp=%rdi,<pp=%rcx,8) 363movq %r9,(%rdi,%rcx,8) 364 365# qhasm: *(uint64 *)(hp + pc * 8) = spp 366# asm 1: movq <spp=int64#5,(<hp=int64#1,<pc=int64#2,8) 367# asm 2: movq <spp=%r8,(<hp=%rdi,<pc=%rsi,8) 368movq %r8,(%rdi,%rsi,8) 369# comment:fp stack unchanged by jump 370 371# qhasm: goto siftuploop 372jmp ._siftuploop 373 374# qhasm: end: 375._end: 376 377# qhasm: caller1 = caller1_stack 378# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 379# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 380movq 0(%rsp),%r11 381 382# qhasm: caller2 = caller2_stack 383# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 384# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 385movq 8(%rsp),%r12 386 387# qhasm: caller3 = caller3_stack 388# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 389# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 390movq 16(%rsp),%r13 391 392# qhasm: caller4 = caller4_stack 393# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 394# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 395movq 24(%rsp),%r14 396 397# qhasm: caller5 = caller5_stack 398# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 399# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 400movq 32(%rsp),%r15 401 402# qhasm: caller6 = caller6_stack 403# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 404# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx 405movq 40(%rsp),%rbx 406 407# qhasm: caller7 = caller7_stack 408# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 409# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp 410movq 48(%rsp),%rbp 411 412# qhasm: leave 413add %r11,%rsp 414mov %rdi,%rax 415mov %rsi,%rdx 416ret 417