1 2# qhasm: int64 rp 3 4# qhasm: int64 xp 5 6# qhasm: input rp 7 8# qhasm: input xp 9 10# qhasm: int64 r0 11 12# qhasm: int64 r1 13 14# qhasm: int64 r2 15 16# qhasm: int64 r3 17 18# qhasm: int64 caller1 19 20# qhasm: int64 caller2 21 22# qhasm: int64 caller3 23 24# qhasm: int64 caller4 25 26# qhasm: int64 caller5 27 28# qhasm: int64 caller6 29 30# qhasm: int64 caller7 31 32# qhasm: caller caller1 33 34# qhasm: caller caller2 35 36# qhasm: caller caller3 37 38# qhasm: caller caller4 39 40# qhasm: caller caller5 41 42# qhasm: caller caller6 43 44# qhasm: caller caller7 45 46# qhasm: stack64 caller1_stack 47 48# qhasm: stack64 caller2_stack 49 50# qhasm: stack64 caller3_stack 51 52# qhasm: stack64 caller4_stack 53 54# qhasm: stack64 caller5_stack 55 56# qhasm: stack64 caller6_stack 57 58# qhasm: stack64 caller7_stack 59 60# qhasm: int64 squarer4 61 62# qhasm: int64 squarer5 63 64# qhasm: int64 squarer6 65 66# qhasm: int64 squarer7 67 68# qhasm: int64 squarer8 69 70# qhasm: int64 squarerax 71 72# qhasm: int64 squarerdx 73 74# qhasm: int64 squaret1 75 76# qhasm: int64 squaret2 77 78# qhasm: int64 squaret3 79 80# qhasm: int64 squarec 81 82# qhasm: int64 squarezero 83 84# qhasm: int64 squarei38 85 86# qhasm: enter crypto_sign_ed25519_amd64_64_fe25519_square 87.text 88.p2align 5 89.globl _crypto_sign_ed25519_amd64_64_fe25519_square 90.globl crypto_sign_ed25519_amd64_64_fe25519_square 91_crypto_sign_ed25519_amd64_64_fe25519_square: 92crypto_sign_ed25519_amd64_64_fe25519_square: 93mov %rsp,%r11 94and $31,%r11 95add $64,%r11 96sub %r11,%rsp 97 98# qhasm: caller1_stack = caller1 99# asm 1: movq <caller1=int64#9,>caller1_stack=stack64#1 100# asm 2: movq <caller1=%r11,>caller1_stack=0(%rsp) 101movq %r11,0(%rsp) 102 103# qhasm: caller2_stack = caller2 104# asm 1: movq <caller2=int64#10,>caller2_stack=stack64#2 105# asm 2: movq <caller2=%r12,>caller2_stack=8(%rsp) 106movq %r12,8(%rsp) 107 108# qhasm: caller3_stack = caller3 109# asm 1: movq <caller3=int64#11,>caller3_stack=stack64#3 110# asm 2: movq <caller3=%r13,>caller3_stack=16(%rsp) 111movq %r13,16(%rsp) 112 113# qhasm: caller4_stack = caller4 114# asm 1: movq <caller4=int64#12,>caller4_stack=stack64#4 115# asm 2: movq <caller4=%r14,>caller4_stack=24(%rsp) 116movq %r14,24(%rsp) 117 118# qhasm: caller5_stack = caller5 119# asm 1: movq <caller5=int64#13,>caller5_stack=stack64#5 120# asm 2: movq <caller5=%r15,>caller5_stack=32(%rsp) 121movq %r15,32(%rsp) 122 123# qhasm: caller6_stack = caller6 124# asm 1: movq <caller6=int64#14,>caller6_stack=stack64#6 125# asm 2: movq <caller6=%rbx,>caller6_stack=40(%rsp) 126movq %rbx,40(%rsp) 127 128# qhasm: caller7_stack = caller7 129# asm 1: movq <caller7=int64#15,>caller7_stack=stack64#7 130# asm 2: movq <caller7=%rbp,>caller7_stack=48(%rsp) 131movq %rbp,48(%rsp) 132 133# qhasm: squarer7 = 0 134# asm 1: mov $0,>squarer7=int64#4 135# asm 2: mov $0,>squarer7=%rcx 136mov $0,%rcx 137 138# qhasm: squarerax = *(uint64 *)(xp + 8) 139# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7 140# asm 2: movq 8(<xp=%rsi),>squarerax=%rax 141movq 8(%rsi),%rax 142 143# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) 144# asm 1: mulq 0(<xp=int64#2) 145# asm 2: mulq 0(<xp=%rsi) 146mulq 0(%rsi) 147 148# qhasm: r1 = squarerax 149# asm 1: mov <squarerax=int64#7,>r1=int64#5 150# asm 2: mov <squarerax=%rax,>r1=%r8 151mov %rax,%r8 152 153# qhasm: r2 = squarerdx 154# asm 1: mov <squarerdx=int64#3,>r2=int64#6 155# asm 2: mov <squarerdx=%rdx,>r2=%r9 156mov %rdx,%r9 157 158# qhasm: squarerax = *(uint64 *)(xp + 16) 159# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7 160# asm 2: movq 16(<xp=%rsi),>squarerax=%rax 161movq 16(%rsi),%rax 162 163# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) 164# asm 1: mulq 8(<xp=int64#2) 165# asm 2: mulq 8(<xp=%rsi) 166mulq 8(%rsi) 167 168# qhasm: r3 = squarerax 169# asm 1: mov <squarerax=int64#7,>r3=int64#8 170# asm 2: mov <squarerax=%rax,>r3=%r10 171mov %rax,%r10 172 173# qhasm: squarer4 = squarerdx 174# asm 1: mov <squarerdx=int64#3,>squarer4=int64#9 175# asm 2: mov <squarerdx=%rdx,>squarer4=%r11 176mov %rdx,%r11 177 178# qhasm: squarerax = *(uint64 *)(xp + 24) 179# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 180# asm 2: movq 24(<xp=%rsi),>squarerax=%rax 181movq 24(%rsi),%rax 182 183# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16) 184# asm 1: mulq 16(<xp=int64#2) 185# asm 2: mulq 16(<xp=%rsi) 186mulq 16(%rsi) 187 188# qhasm: squarer5 = squarerax 189# asm 1: mov <squarerax=int64#7,>squarer5=int64#10 190# asm 2: mov <squarerax=%rax,>squarer5=%r12 191mov %rax,%r12 192 193# qhasm: squarer6 = squarerdx 194# asm 1: mov <squarerdx=int64#3,>squarer6=int64#11 195# asm 2: mov <squarerdx=%rdx,>squarer6=%r13 196mov %rdx,%r13 197 198# qhasm: squarerax = *(uint64 *)(xp + 16) 199# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7 200# asm 2: movq 16(<xp=%rsi),>squarerax=%rax 201movq 16(%rsi),%rax 202 203# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) 204# asm 1: mulq 0(<xp=int64#2) 205# asm 2: mulq 0(<xp=%rsi) 206mulq 0(%rsi) 207 208# qhasm: carry? r2 += squarerax 209# asm 1: add <squarerax=int64#7,<r2=int64#6 210# asm 2: add <squarerax=%rax,<r2=%r9 211add %rax,%r9 212 213# qhasm: carry? r3 += squarerdx + carry 214# asm 1: adc <squarerdx=int64#3,<r3=int64#8 215# asm 2: adc <squarerdx=%rdx,<r3=%r10 216adc %rdx,%r10 217 218# qhasm: squarer4 += 0 + carry 219# asm 1: adc $0,<squarer4=int64#9 220# asm 2: adc $0,<squarer4=%r11 221adc $0,%r11 222 223# qhasm: squarerax = *(uint64 *)(xp + 24) 224# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 225# asm 2: movq 24(<xp=%rsi),>squarerax=%rax 226movq 24(%rsi),%rax 227 228# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) 229# asm 1: mulq 8(<xp=int64#2) 230# asm 2: mulq 8(<xp=%rsi) 231mulq 8(%rsi) 232 233# qhasm: carry? squarer4 += squarerax 234# asm 1: add <squarerax=int64#7,<squarer4=int64#9 235# asm 2: add <squarerax=%rax,<squarer4=%r11 236add %rax,%r11 237 238# qhasm: carry? squarer5 += squarerdx + carry 239# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 240# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 241adc %rdx,%r12 242 243# qhasm: squarer6 += 0 + carry 244# asm 1: adc $0,<squarer6=int64#11 245# asm 2: adc $0,<squarer6=%r13 246adc $0,%r13 247 248# qhasm: squarerax = *(uint64 *)(xp + 24) 249# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 250# asm 2: movq 24(<xp=%rsi),>squarerax=%rax 251movq 24(%rsi),%rax 252 253# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) 254# asm 1: mulq 0(<xp=int64#2) 255# asm 2: mulq 0(<xp=%rsi) 256mulq 0(%rsi) 257 258# qhasm: carry? r3 += squarerax 259# asm 1: add <squarerax=int64#7,<r3=int64#8 260# asm 2: add <squarerax=%rax,<r3=%r10 261add %rax,%r10 262 263# qhasm: carry? squarer4 += squarerdx + carry 264# asm 1: adc <squarerdx=int64#3,<squarer4=int64#9 265# asm 2: adc <squarerdx=%rdx,<squarer4=%r11 266adc %rdx,%r11 267 268# qhasm: carry? squarer5 += 0 + carry 269# asm 1: adc $0,<squarer5=int64#10 270# asm 2: adc $0,<squarer5=%r12 271adc $0,%r12 272 273# qhasm: carry? squarer6 += 0 + carry 274# asm 1: adc $0,<squarer6=int64#11 275# asm 2: adc $0,<squarer6=%r13 276adc $0,%r13 277 278# qhasm: squarer7 += 0 + carry 279# asm 1: adc $0,<squarer7=int64#4 280# asm 2: adc $0,<squarer7=%rcx 281adc $0,%rcx 282 283# qhasm: carry? r1 += r1 284# asm 1: add <r1=int64#5,<r1=int64#5 285# asm 2: add <r1=%r8,<r1=%r8 286add %r8,%r8 287 288# qhasm: carry? r2 += r2 + carry 289# asm 1: adc <r2=int64#6,<r2=int64#6 290# asm 2: adc <r2=%r9,<r2=%r9 291adc %r9,%r9 292 293# qhasm: carry? r3 += r3 + carry 294# asm 1: adc <r3=int64#8,<r3=int64#8 295# asm 2: adc <r3=%r10,<r3=%r10 296adc %r10,%r10 297 298# qhasm: carry? squarer4 += squarer4 + carry 299# asm 1: adc <squarer4=int64#9,<squarer4=int64#9 300# asm 2: adc <squarer4=%r11,<squarer4=%r11 301adc %r11,%r11 302 303# qhasm: carry? squarer5 += squarer5 + carry 304# asm 1: adc <squarer5=int64#10,<squarer5=int64#10 305# asm 2: adc <squarer5=%r12,<squarer5=%r12 306adc %r12,%r12 307 308# qhasm: carry? squarer6 += squarer6 + carry 309# asm 1: adc <squarer6=int64#11,<squarer6=int64#11 310# asm 2: adc <squarer6=%r13,<squarer6=%r13 311adc %r13,%r13 312 313# qhasm: squarer7 += squarer7 + carry 314# asm 1: adc <squarer7=int64#4,<squarer7=int64#4 315# asm 2: adc <squarer7=%rcx,<squarer7=%rcx 316adc %rcx,%rcx 317 318# qhasm: squarerax = *(uint64 *)(xp + 0) 319# asm 1: movq 0(<xp=int64#2),>squarerax=int64#7 320# asm 2: movq 0(<xp=%rsi),>squarerax=%rax 321movq 0(%rsi),%rax 322 323# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 0) 324# asm 1: mulq 0(<xp=int64#2) 325# asm 2: mulq 0(<xp=%rsi) 326mulq 0(%rsi) 327 328# qhasm: r0 = squarerax 329# asm 1: mov <squarerax=int64#7,>r0=int64#12 330# asm 2: mov <squarerax=%rax,>r0=%r14 331mov %rax,%r14 332 333# qhasm: squaret1 = squarerdx 334# asm 1: mov <squarerdx=int64#3,>squaret1=int64#13 335# asm 2: mov <squarerdx=%rdx,>squaret1=%r15 336mov %rdx,%r15 337 338# qhasm: squarerax = *(uint64 *)(xp + 8) 339# asm 1: movq 8(<xp=int64#2),>squarerax=int64#7 340# asm 2: movq 8(<xp=%rsi),>squarerax=%rax 341movq 8(%rsi),%rax 342 343# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 8) 344# asm 1: mulq 8(<xp=int64#2) 345# asm 2: mulq 8(<xp=%rsi) 346mulq 8(%rsi) 347 348# qhasm: squaret2 = squarerax 349# asm 1: mov <squarerax=int64#7,>squaret2=int64#14 350# asm 2: mov <squarerax=%rax,>squaret2=%rbx 351mov %rax,%rbx 352 353# qhasm: squaret3 = squarerdx 354# asm 1: mov <squarerdx=int64#3,>squaret3=int64#15 355# asm 2: mov <squarerdx=%rdx,>squaret3=%rbp 356mov %rdx,%rbp 357 358# qhasm: squarerax = *(uint64 *)(xp + 16) 359# asm 1: movq 16(<xp=int64#2),>squarerax=int64#7 360# asm 2: movq 16(<xp=%rsi),>squarerax=%rax 361movq 16(%rsi),%rax 362 363# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 16) 364# asm 1: mulq 16(<xp=int64#2) 365# asm 2: mulq 16(<xp=%rsi) 366mulq 16(%rsi) 367 368# qhasm: carry? r1 += squaret1 369# asm 1: add <squaret1=int64#13,<r1=int64#5 370# asm 2: add <squaret1=%r15,<r1=%r8 371add %r15,%r8 372 373# qhasm: carry? r2 += squaret2 + carry 374# asm 1: adc <squaret2=int64#14,<r2=int64#6 375# asm 2: adc <squaret2=%rbx,<r2=%r9 376adc %rbx,%r9 377 378# qhasm: carry? r3 += squaret3 + carry 379# asm 1: adc <squaret3=int64#15,<r3=int64#8 380# asm 2: adc <squaret3=%rbp,<r3=%r10 381adc %rbp,%r10 382 383# qhasm: carry? squarer4 += squarerax + carry 384# asm 1: adc <squarerax=int64#7,<squarer4=int64#9 385# asm 2: adc <squarerax=%rax,<squarer4=%r11 386adc %rax,%r11 387 388# qhasm: carry? squarer5 += squarerdx + carry 389# asm 1: adc <squarerdx=int64#3,<squarer5=int64#10 390# asm 2: adc <squarerdx=%rdx,<squarer5=%r12 391adc %rdx,%r12 392 393# qhasm: carry? squarer6 += 0 + carry 394# asm 1: adc $0,<squarer6=int64#11 395# asm 2: adc $0,<squarer6=%r13 396adc $0,%r13 397 398# qhasm: squarer7 += 0 + carry 399# asm 1: adc $0,<squarer7=int64#4 400# asm 2: adc $0,<squarer7=%rcx 401adc $0,%rcx 402 403# qhasm: squarerax = *(uint64 *)(xp + 24) 404# asm 1: movq 24(<xp=int64#2),>squarerax=int64#7 405# asm 2: movq 24(<xp=%rsi),>squarerax=%rax 406movq 24(%rsi),%rax 407 408# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)(xp + 24) 409# asm 1: mulq 24(<xp=int64#2) 410# asm 2: mulq 24(<xp=%rsi) 411mulq 24(%rsi) 412 413# qhasm: carry? squarer6 += squarerax 414# asm 1: add <squarerax=int64#7,<squarer6=int64#11 415# asm 2: add <squarerax=%rax,<squarer6=%r13 416add %rax,%r13 417 418# qhasm: squarer7 += squarerdx + carry 419# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 420# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx 421adc %rdx,%rcx 422 423# qhasm: squarerax = squarer4 424# asm 1: mov <squarer4=int64#9,>squarerax=int64#7 425# asm 2: mov <squarer4=%r11,>squarerax=%rax 426mov %r11,%rax 427 428# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 429mulq crypto_sign_ed25519_amd64_64_38 430 431# qhasm: squarer4 = squarerax 432# asm 1: mov <squarerax=int64#7,>squarer4=int64#2 433# asm 2: mov <squarerax=%rax,>squarer4=%rsi 434mov %rax,%rsi 435 436# qhasm: squarerax = squarer5 437# asm 1: mov <squarer5=int64#10,>squarerax=int64#7 438# asm 2: mov <squarer5=%r12,>squarerax=%rax 439mov %r12,%rax 440 441# qhasm: squarer5 = squarerdx 442# asm 1: mov <squarerdx=int64#3,>squarer5=int64#9 443# asm 2: mov <squarerdx=%rdx,>squarer5=%r11 444mov %rdx,%r11 445 446# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 447mulq crypto_sign_ed25519_amd64_64_38 448 449# qhasm: carry? squarer5 += squarerax 450# asm 1: add <squarerax=int64#7,<squarer5=int64#9 451# asm 2: add <squarerax=%rax,<squarer5=%r11 452add %rax,%r11 453 454# qhasm: squarerax = squarer6 455# asm 1: mov <squarer6=int64#11,>squarerax=int64#7 456# asm 2: mov <squarer6=%r13,>squarerax=%rax 457mov %r13,%rax 458 459# qhasm: squarer6 = 0 460# asm 1: mov $0,>squarer6=int64#10 461# asm 2: mov $0,>squarer6=%r12 462mov $0,%r12 463 464# qhasm: squarer6 += squarerdx + carry 465# asm 1: adc <squarerdx=int64#3,<squarer6=int64#10 466# asm 2: adc <squarerdx=%rdx,<squarer6=%r12 467adc %rdx,%r12 468 469# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 470mulq crypto_sign_ed25519_amd64_64_38 471 472# qhasm: carry? squarer6 += squarerax 473# asm 1: add <squarerax=int64#7,<squarer6=int64#10 474# asm 2: add <squarerax=%rax,<squarer6=%r12 475add %rax,%r12 476 477# qhasm: squarerax = squarer7 478# asm 1: mov <squarer7=int64#4,>squarerax=int64#7 479# asm 2: mov <squarer7=%rcx,>squarerax=%rax 480mov %rcx,%rax 481 482# qhasm: squarer7 = 0 483# asm 1: mov $0,>squarer7=int64#4 484# asm 2: mov $0,>squarer7=%rcx 485mov $0,%rcx 486 487# qhasm: squarer7 += squarerdx + carry 488# asm 1: adc <squarerdx=int64#3,<squarer7=int64#4 489# asm 2: adc <squarerdx=%rdx,<squarer7=%rcx 490adc %rdx,%rcx 491 492# qhasm: (uint128) squarerdx squarerax = squarerax * *(uint64 *)&crypto_sign_ed25519_amd64_64_38 493mulq crypto_sign_ed25519_amd64_64_38 494 495# qhasm: carry? squarer7 += squarerax 496# asm 1: add <squarerax=int64#7,<squarer7=int64#4 497# asm 2: add <squarerax=%rax,<squarer7=%rcx 498add %rax,%rcx 499 500# qhasm: squarer8 = 0 501# asm 1: mov $0,>squarer8=int64#7 502# asm 2: mov $0,>squarer8=%rax 503mov $0,%rax 504 505# qhasm: squarer8 += squarerdx + carry 506# asm 1: adc <squarerdx=int64#3,<squarer8=int64#7 507# asm 2: adc <squarerdx=%rdx,<squarer8=%rax 508adc %rdx,%rax 509 510# qhasm: carry? r0 += squarer4 511# asm 1: add <squarer4=int64#2,<r0=int64#12 512# asm 2: add <squarer4=%rsi,<r0=%r14 513add %rsi,%r14 514 515# qhasm: carry? r1 += squarer5 + carry 516# asm 1: adc <squarer5=int64#9,<r1=int64#5 517# asm 2: adc <squarer5=%r11,<r1=%r8 518adc %r11,%r8 519 520# qhasm: carry? r2 += squarer6 + carry 521# asm 1: adc <squarer6=int64#10,<r2=int64#6 522# asm 2: adc <squarer6=%r12,<r2=%r9 523adc %r12,%r9 524 525# qhasm: carry? r3 += squarer7 + carry 526# asm 1: adc <squarer7=int64#4,<r3=int64#8 527# asm 2: adc <squarer7=%rcx,<r3=%r10 528adc %rcx,%r10 529 530# qhasm: squarezero = 0 531# asm 1: mov $0,>squarezero=int64#2 532# asm 2: mov $0,>squarezero=%rsi 533mov $0,%rsi 534 535# qhasm: squarer8 += squarezero + carry 536# asm 1: adc <squarezero=int64#2,<squarer8=int64#7 537# asm 2: adc <squarezero=%rsi,<squarer8=%rax 538adc %rsi,%rax 539 540# qhasm: squarer8 *= 38 541# asm 1: imulq $38,<squarer8=int64#7,>squarer8=int64#3 542# asm 2: imulq $38,<squarer8=%rax,>squarer8=%rdx 543imulq $38,%rax,%rdx 544 545# qhasm: carry? r0 += squarer8 546# asm 1: add <squarer8=int64#3,<r0=int64#12 547# asm 2: add <squarer8=%rdx,<r0=%r14 548add %rdx,%r14 549 550# qhasm: carry? r1 += squarezero + carry 551# asm 1: adc <squarezero=int64#2,<r1=int64#5 552# asm 2: adc <squarezero=%rsi,<r1=%r8 553adc %rsi,%r8 554 555# qhasm: carry? r2 += squarezero + carry 556# asm 1: adc <squarezero=int64#2,<r2=int64#6 557# asm 2: adc <squarezero=%rsi,<r2=%r9 558adc %rsi,%r9 559 560# qhasm: carry? r3 += squarezero + carry 561# asm 1: adc <squarezero=int64#2,<r3=int64#8 562# asm 2: adc <squarezero=%rsi,<r3=%r10 563adc %rsi,%r10 564 565# qhasm: squarezero += squarezero + carry 566# asm 1: adc <squarezero=int64#2,<squarezero=int64#2 567# asm 2: adc <squarezero=%rsi,<squarezero=%rsi 568adc %rsi,%rsi 569 570# qhasm: squarezero *= 38 571# asm 1: imulq $38,<squarezero=int64#2,>squarezero=int64#2 572# asm 2: imulq $38,<squarezero=%rsi,>squarezero=%rsi 573imulq $38,%rsi,%rsi 574 575# qhasm: r0 += squarezero 576# asm 1: add <squarezero=int64#2,<r0=int64#12 577# asm 2: add <squarezero=%rsi,<r0=%r14 578add %rsi,%r14 579 580# qhasm: *(uint64 *)(rp + 8) = r1 581# asm 1: movq <r1=int64#5,8(<rp=int64#1) 582# asm 2: movq <r1=%r8,8(<rp=%rdi) 583movq %r8,8(%rdi) 584 585# qhasm: *(uint64 *)(rp + 16) = r2 586# asm 1: movq <r2=int64#6,16(<rp=int64#1) 587# asm 2: movq <r2=%r9,16(<rp=%rdi) 588movq %r9,16(%rdi) 589 590# qhasm: *(uint64 *)(rp + 24) = r3 591# asm 1: movq <r3=int64#8,24(<rp=int64#1) 592# asm 2: movq <r3=%r10,24(<rp=%rdi) 593movq %r10,24(%rdi) 594 595# qhasm: *(uint64 *)(rp + 0) = r0 596# asm 1: movq <r0=int64#12,0(<rp=int64#1) 597# asm 2: movq <r0=%r14,0(<rp=%rdi) 598movq %r14,0(%rdi) 599 600# qhasm: caller1 = caller1_stack 601# asm 1: movq <caller1_stack=stack64#1,>caller1=int64#9 602# asm 2: movq <caller1_stack=0(%rsp),>caller1=%r11 603movq 0(%rsp),%r11 604 605# qhasm: caller2 = caller2_stack 606# asm 1: movq <caller2_stack=stack64#2,>caller2=int64#10 607# asm 2: movq <caller2_stack=8(%rsp),>caller2=%r12 608movq 8(%rsp),%r12 609 610# qhasm: caller3 = caller3_stack 611# asm 1: movq <caller3_stack=stack64#3,>caller3=int64#11 612# asm 2: movq <caller3_stack=16(%rsp),>caller3=%r13 613movq 16(%rsp),%r13 614 615# qhasm: caller4 = caller4_stack 616# asm 1: movq <caller4_stack=stack64#4,>caller4=int64#12 617# asm 2: movq <caller4_stack=24(%rsp),>caller4=%r14 618movq 24(%rsp),%r14 619 620# qhasm: caller5 = caller5_stack 621# asm 1: movq <caller5_stack=stack64#5,>caller5=int64#13 622# asm 2: movq <caller5_stack=32(%rsp),>caller5=%r15 623movq 32(%rsp),%r15 624 625# qhasm: caller6 = caller6_stack 626# asm 1: movq <caller6_stack=stack64#6,>caller6=int64#14 627# asm 2: movq <caller6_stack=40(%rsp),>caller6=%rbx 628movq 40(%rsp),%rbx 629 630# qhasm: caller7 = caller7_stack 631# asm 1: movq <caller7_stack=stack64#7,>caller7=int64#15 632# asm 2: movq <caller7_stack=48(%rsp),>caller7=%rbp 633movq 48(%rsp),%rbp 634 635# qhasm: leave 636add %r11,%rsp 637mov %rdi,%rax 638mov %rsi,%rdx 639ret 640