1 2# qhasm: int64 rp 3 4# qhasm: input rp 5 6# qhasm: int64 r0 7 8# qhasm: int64 r1 9 10# qhasm: int64 r2 11 12# qhasm: int64 r3 13 14# qhasm: int64 t0 15 16# qhasm: int64 t1 17 18# qhasm: int64 t2 19 20# qhasm: int64 t3 21 22# qhasm: int64 two63 23 24# qhasm: int64 caller1 25 26# qhasm: int64 caller2 27 28# qhasm: int64 caller3 29 30# qhasm: int64 caller4 31 32# qhasm: int64 caller5 33 34# qhasm: int64 caller6 35 36# qhasm: int64 caller7 37 38# qhasm: caller caller1 39 40# qhasm: caller caller2 41 42# qhasm: caller caller3 43 44# qhasm: caller caller4 45 46# qhasm: caller caller5 47 48# qhasm: caller caller6 49 50# qhasm: caller caller7 51 52# qhasm: stack64 caller1_stack 53 54# qhasm: stack64 caller2_stack 55 56# qhasm: stack64 caller3_stack 57 58# qhasm: stack64 caller4_stack 59 60# qhasm: stack64 caller5_stack 61 62# qhasm: stack64 caller6_stack 63 64# qhasm: stack64 caller7_stack 65 66# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_freeze 67.text 68.p2align 5 69.globl _crypto_sign_ed25519_amd64_64_fe25519_freeze 70.globl crypto_sign_ed25519_amd64_64_fe25519_freeze 71_crypto_sign_ed25519_amd64_64_fe25519_freeze: 72crypto_sign_ed25519_amd64_64_fe25519_freeze: 73mov %rsp,%r11 74and $31,%r11 75add $64,%r11 76sub %r11,%rsp 77 78# qhasm: caller1_stack = caller1 79# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 80# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) 81movq %r11,0(%rsp) 82 83# qhasm: caller2_stack = caller2 84# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 85# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) 86movq %r12,8(%rsp) 87 88# qhasm: caller3_stack = caller3 89# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 90# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) 91movq %r13,16(%rsp) 92 93# qhasm: caller4_stack = caller4 94# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 95# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) 96movq %r14,24(%rsp) 97 98# qhasm: caller5_stack = caller5 99# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 100# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) 101movq %r15,32(%rsp) 102 103# qhasm: caller6_stack = caller6 104# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 105# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) 106movq %rbx,40(%rsp) 107 108# qhasm: caller7_stack = caller7 109# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 110# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) 111movq %rbp,48(%rsp) 112 113# qhasm: r0 = *(uint64 *) (rp + 0) 114# asm 1: movq 0(<rp=int64#1),>r0=int64#2 115# asm 2: movq 0(<rp=%rdi),>r0=%rsi 116movq 0(%rdi),%rsi 117 118# qhasm: r1 = *(uint64 *) (rp + 8) 119# asm 1: movq 8(<rp=int64#1),>r1=int64#3 120# asm 2: movq 8(<rp=%rdi),>r1=%rdx 121movq 8(%rdi),%rdx 122 123# qhasm: r2 = *(uint64 *) (rp + 16) 124# asm 1: movq 16(<rp=int64#1),>r2=int64#4 125# asm 2: movq 16(<rp=%rdi),>r2=%rcx 126movq 16(%rdi),%rcx 127 128# qhasm: r3 = *(uint64 *) (rp + 24) 129# asm 1: movq 24(<rp=int64#1),>r3=int64#5 130# asm 2: movq 24(<rp=%rdi),>r3=%r8 131movq 24(%rdi),%r8 132 133# qhasm: t0 = r0 134# asm 1: mov <r0=int64#2,>t0=int64#6 135# asm 2: mov <r0=%rsi,>t0=%r9 136mov %rsi,%r9 137 138# qhasm: t1 = r1 139# asm 1: mov <r1=int64#3,>t1=int64#7 140# asm 2: mov <r1=%rdx,>t1=%rax 141mov %rdx,%rax 142 143# qhasm: t2 = r2 144# asm 1: mov <r2=int64#4,>t2=int64#8 145# asm 2: mov <r2=%rcx,>t2=%r10 146mov %rcx,%r10 147 148# qhasm: t3 = r3 149# asm 1: mov <r3=int64#5,>t3=int64#9 150# asm 2: mov <r3=%r8,>t3=%r11 151mov %r8,%r11 152 153# qhasm: two63 = 1 154# asm 1: mov $1,>two63=int64#10 155# asm 2: mov $1,>two63=%r12 156mov $1,%r12 157 158# qhasm: two63 <<= 63 159# asm 1: shl $63,<two63=int64#10 160# asm 2: shl $63,<two63=%r12 161shl $63,%r12 162 163# qhasm: carry? t0 += 19 164# asm 1: add $19,<t0=int64#6 165# asm 2: add $19,<t0=%r9 166add $19,%r9 167 168# qhasm: carry? t1 += 0 + carry 169# asm 1: adc $0,<t1=int64#7 170# asm 2: adc $0,<t1=%rax 171adc $0,%rax 172 173# qhasm: carry? t2 += 0 + carry 174# asm 1: adc $0,<t2=int64#8 175# asm 2: adc $0,<t2=%r10 176adc $0,%r10 177 178# qhasm: carry? t3 += two63 + carry 179# asm 1: adc <two63=int64#10,<t3=int64#9 180# asm 2: adc <two63=%r12,<t3=%r11 181adc %r12,%r11 182 183# qhasm: r0 = t0 if carry 184# asm 1: cmovc <t0=int64#6,<r0=int64#2 185# asm 2: cmovc <t0=%r9,<r0=%rsi 186cmovc %r9,%rsi 187 188# qhasm: r1 = t1 if carry 189# asm 1: cmovc <t1=int64#7,<r1=int64#3 190# asm 2: cmovc <t1=%rax,<r1=%rdx 191cmovc %rax,%rdx 192 193# qhasm: r2 = t2 if carry 194# asm 1: cmovc <t2=int64#8,<r2=int64#4 195# asm 2: cmovc <t2=%r10,<r2=%rcx 196cmovc %r10,%rcx 197 198# qhasm: r3 = t3 if carry 199# asm 1: cmovc <t3=int64#9,<r3=int64#5 200# asm 2: cmovc <t3=%r11,<r3=%r8 201cmovc %r11,%r8 202 203# qhasm: t0 = r0 204# asm 1: mov <r0=int64#2,>t0=int64#6 205# asm 2: mov <r0=%rsi,>t0=%r9 206mov %rsi,%r9 207 208# qhasm: t1 = r1 209# asm 1: mov <r1=int64#3,>t1=int64#7 210# asm 2: mov <r1=%rdx,>t1=%rax 211mov %rdx,%rax 212 213# qhasm: t2 = r2 214# asm 1: mov <r2=int64#4,>t2=int64#8 215# asm 2: mov <r2=%rcx,>t2=%r10 216mov %rcx,%r10 217 218# qhasm: t3 = r3 219# asm 1: mov <r3=int64#5,>t3=int64#9 220# asm 2: mov <r3=%r8,>t3=%r11 221mov %r8,%r11 222 223# qhasm: carry? t0 += 19 224# asm 1: add $19,<t0=int64#6 225# asm 2: add $19,<t0=%r9 226add $19,%r9 227 228# qhasm: carry? t1 += 0 + carry 229# asm 1: adc $0,<t1=int64#7 230# asm 2: adc $0,<t1=%rax 231adc $0,%rax 232 233# qhasm: carry? t2 += 0 + carry 234# asm 1: adc $0,<t2=int64#8 235# asm 2: adc $0,<t2=%r10 236adc $0,%r10 237 238# qhasm: carry? t3 += two63 + carry 239# asm 1: adc <two63=int64#10,<t3=int64#9 240# asm 2: adc <two63=%r12,<t3=%r11 241adc %r12,%r11 242 243# qhasm: r0 = t0 if carry 244# asm 1: cmovc <t0=int64#6,<r0=int64#2 245# asm 2: cmovc <t0=%r9,<r0=%rsi 246cmovc %r9,%rsi 247 248# qhasm: r1 = t1 if carry 249# asm 1: cmovc <t1=int64#7,<r1=int64#3 250# asm 2: cmovc <t1=%rax,<r1=%rdx 251cmovc %rax,%rdx 252 253# qhasm: r2 = t2 if carry 254# asm 1: cmovc <t2=int64#8,<r2=int64#4 255# asm 2: cmovc <t2=%r10,<r2=%rcx 256cmovc %r10,%rcx 257 258# qhasm: r3 = t3 if carry 259# asm 1: cmovc <t3=int64#9,<r3=int64#5 260# asm 2: cmovc <t3=%r11,<r3=%r8 261cmovc %r11,%r8 262 263# qhasm: *(uint64 *)(rp + 0) = r0 264# asm 1: movq <r0=int64#2,0(<rp=int64#1) 265# asm 2: movq <r0=%rsi,0(<rp=%rdi) 266movq %rsi,0(%rdi) 267 268# qhasm: *(uint64 *)(rp + 8) = r1 269# asm 1: movq <r1=int64#3,8(<rp=int64#1) 270# asm 2: movq <r1=%rdx,8(<rp=%rdi) 271movq %rdx,8(%rdi) 272 273# qhasm: *(uint64 *)(rp + 16) = r2 274# asm 1: movq <r2=int64#4,16(<rp=int64#1) 275# asm 2: movq <r2=%rcx,16(<rp=%rdi) 276movq %rcx,16(%rdi) 277 278# qhasm: *(uint64 *)(rp + 24) = r3 279# asm 1: movq <r3=int64#5,24(<rp=int64#1) 280# asm 2: movq <r3=%r8,24(<rp=%rdi) 281movq %r8,24(%rdi) 282 283# qhasm: caller1 = caller1_stack 284# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 285# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 286movq 0(%rsp),%r11 287 288# qhasm: caller2 = caller2_stack 289# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 290# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 291movq 8(%rsp),%r12 292 293# qhasm: caller3 = caller3_stack 294# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 295# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 296movq 16(%rsp),%r13 297 298# qhasm: caller4 = caller4_stack 299# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 300# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 301movq 24(%rsp),%r14 302 303# qhasm: caller5 = caller5_stack 304# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 305# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 306movq 32(%rsp),%r15 307 308# qhasm: caller6 = caller6_stack 309# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 310# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx 311movq 40(%rsp),%rbx 312 313# qhasm: caller7 = caller7_stack 314# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 315# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp 316movq 48(%rsp),%rbp 317 318# qhasm: leave 319add %r11,%rsp 320mov %rdi,%rax 321mov %rsi,%rdx 322ret 323