1 2# qhasm: int64 a 3 4# qhasm: int64 b 5 6# qhasm: int64 c 7 8# qhasm: int64 d 9 10# qhasm: int64 e 11 12# qhasm: int64 A 13 14# qhasm: int64 B 15 16# qhasm: int64 C 17 18# qhasm: int64 D 19 20# qhasm: int64 E 21 22# qhasm: int64 r 23 24# qhasm: int64 s 25 26# qhasm: int64 t 27 28# qhasm: int64 u 29 30# qhasm: int64 v 31 32# qhasm: int64 R 33 34# qhasm: int64 S 35 36# qhasm: int64 T 37 38# qhasm: int64 U 39 40# qhasm: int64 V 41 42# qhasm: int64 arg1 43 44# qhasm: int64 arg2 45 46# qhasm: int64 arg3 47 48# qhasm: int64 arg4 49 50# qhasm: input arg1 51 52# qhasm: input arg2 53 54# qhasm: input arg3 55 56# qhasm: input arg4 57 58# qhasm: int64 r11 59 60# qhasm: int64 r12 61 62# qhasm: int64 r13 63 64# qhasm: int64 r14 65 66# qhasm: int64 r15 67 68# qhasm: int64 rbx 69 70# qhasm: int64 rbp 71 72# qhasm: caller r11 73 74# qhasm: caller r12 75 76# qhasm: caller r13 77 78# qhasm: caller r14 79 80# qhasm: caller r15 81 82# qhasm: caller rbx 83 84# qhasm: caller rbp 85 86# qhasm: stack64 r11_stack 87 88# qhasm: stack64 r12_stack 89 90# qhasm: stack64 r13_stack 91 92# qhasm: stack64 r14_stack 93 94# qhasm: stack64 r15_stack 95 96# qhasm: stack64 rbx_stack 97 98# qhasm: stack64 rbp_stack 99 100# qhasm: int64 k 101 102# qhasm: int64 kbits 103 104# qhasm: int64 iv 105 106# qhasm: int64 i 107 108# qhasm: stack64 x_backup 109 110# qhasm: int64 x 111 112# qhasm: stack64 m_backup 113 114# qhasm: int64 m 115 116# qhasm: stack64 out_backup 117 118# qhasm: int64 out 119 120# qhasm: stack64 bytes_backup 121 122# qhasm: int64 bytes 123 124# qhasm: int64 in0 125 126# qhasm: int64 in2 127 128# qhasm: int64 in4 129 130# qhasm: int64 in6 131 132# qhasm: int64 in8 133 134# qhasm: int64 in10 135 136# qhasm: int64 in12 137 138# qhasm: int64 in14 139 140# qhasm: int64 out0 141 142# qhasm: int64 out2 143 144# qhasm: int64 out4 145 146# qhasm: int64 out6 147 148# qhasm: int64 out8 149 150# qhasm: int64 out10 151 152# qhasm: int64 out12 153 154# qhasm: int64 out14 155 156# qhasm: stack64 x0 157 158# qhasm: stack64 x1 159 160# qhasm: stack64 x2 161 162# qhasm: stack64 x3 163 164# qhasm: stack64 x4 165 166# qhasm: stack64 x5 167 168# qhasm: stack64 x6 169 170# qhasm: stack64 x7 171 172# qhasm: stack64 x8 173 174# qhasm: stack64 x9 175 176# qhasm: stack64 x10 177 178# qhasm: stack64 x11 179 180# qhasm: stack64 x12 181 182# qhasm: stack64 x13 183 184# qhasm: stack64 x14 185 186# qhasm: stack64 x15 187 188# qhasm: stack64 j0 189 190# qhasm: stack64 j2 191 192# qhasm: stack64 j4 193 194# qhasm: stack64 j6 195 196# qhasm: stack64 j8 197 198# qhasm: stack64 j10 199 200# qhasm: stack64 j12 201 202# qhasm: stack64 j14 203 204# qhasm: stack512 tmp 205 206# qhasm: int64 ctarget 207 208# qhasm: enter ECRYPT_keystream_bytes 209.text 210.p2align 5 211.globl _ECRYPT_keystream_bytes 212.globl ECRYPT_keystream_bytes 213_ECRYPT_keystream_bytes: 214ECRYPT_keystream_bytes: 215mov %rsp,%r11 216and $31,%r11 217add $352,%r11 218sub %r11,%rsp 219 220# qhasm: x = arg1 221# asm 1: mov <arg1=int64#1,>x=int64#5 222# asm 2: mov <arg1=%rdi,>x=%r8 223mov %rdi,%r8 224 225# qhasm: m = arg2 226# asm 1: mov <arg2=int64#2,>m=int64#2 227# asm 2: mov <arg2=%rsi,>m=%rsi 228mov %rsi,%rsi 229 230# qhasm: out = m 231# asm 1: mov <m=int64#2,>out=int64#1 232# asm 2: mov <m=%rsi,>out=%rdi 233mov %rsi,%rdi 234 235# qhasm: bytes = arg3 236# asm 1: mov <arg3=int64#3,>bytes=int64#3 237# asm 2: mov <arg3=%rdx,>bytes=%rdx 238mov %rdx,%rdx 239 240# qhasm: unsigned>? bytes - 0 241# asm 1: cmp $0,<bytes=int64#3 242# asm 2: cmp $0,<bytes=%rdx 243cmp $0,%rdx 244# comment:fp stack unchanged by jump 245 246# qhasm: goto done if !unsigned> 247jbe ._done 248 249# qhasm: a = 0 250# asm 1: mov $0,>a=int64#7 251# asm 2: mov $0,>a=%rax 252mov $0,%rax 253 254# qhasm: i = bytes 255# asm 1: mov <bytes=int64#3,>i=int64#4 256# asm 2: mov <bytes=%rdx,>i=%rcx 257mov %rdx,%rcx 258 259# qhasm: while (i) { *out++ = a; --i } 260rep stosb 261 262# qhasm: out -= bytes 263# asm 1: sub <bytes=int64#3,<out=int64#1 264# asm 2: sub <bytes=%rdx,<out=%rdi 265sub %rdx,%rdi 266# comment:fp stack unchanged by jump 267 268# qhasm: goto start 269jmp ._start 270 271# qhasm: enter ECRYPT_decrypt_bytes 272.text 273.p2align 5 274.globl _ECRYPT_decrypt_bytes 275.globl ECRYPT_decrypt_bytes 276_ECRYPT_decrypt_bytes: 277ECRYPT_decrypt_bytes: 278mov %rsp,%r11 279and $31,%r11 280add $352,%r11 281sub %r11,%rsp 282 283# qhasm: x = arg1 284# asm 1: mov <arg1=int64#1,>x=int64#5 285# asm 2: mov <arg1=%rdi,>x=%r8 286mov %rdi,%r8 287 288# qhasm: m = arg2 289# asm 1: mov <arg2=int64#2,>m=int64#2 290# asm 2: mov <arg2=%rsi,>m=%rsi 291mov %rsi,%rsi 292 293# qhasm: out = arg3 294# asm 1: mov <arg3=int64#3,>out=int64#1 295# asm 2: mov <arg3=%rdx,>out=%rdi 296mov %rdx,%rdi 297 298# qhasm: bytes = arg4 299# asm 1: mov <arg4=int64#4,>bytes=int64#3 300# asm 2: mov <arg4=%rcx,>bytes=%rdx 301mov %rcx,%rdx 302 303# qhasm: unsigned>? bytes - 0 304# asm 1: cmp $0,<bytes=int64#3 305# asm 2: cmp $0,<bytes=%rdx 306cmp $0,%rdx 307# comment:fp stack unchanged by jump 308 309# qhasm: goto done if !unsigned> 310jbe ._done 311# comment:fp stack unchanged by jump 312 313# qhasm: goto start 314jmp ._start 315 316# qhasm: enter ECRYPT_encrypt_bytes 317.text 318.p2align 5 319.globl _ECRYPT_encrypt_bytes 320.globl ECRYPT_encrypt_bytes 321_ECRYPT_encrypt_bytes: 322ECRYPT_encrypt_bytes: 323mov %rsp,%r11 324and $31,%r11 325add $352,%r11 326sub %r11,%rsp 327 328# qhasm: x = arg1 329# asm 1: mov <arg1=int64#1,>x=int64#5 330# asm 2: mov <arg1=%rdi,>x=%r8 331mov %rdi,%r8 332 333# qhasm: m = arg2 334# asm 1: mov <arg2=int64#2,>m=int64#2 335# asm 2: mov <arg2=%rsi,>m=%rsi 336mov %rsi,%rsi 337 338# qhasm: out = arg3 339# asm 1: mov <arg3=int64#3,>out=int64#1 340# asm 2: mov <arg3=%rdx,>out=%rdi 341mov %rdx,%rdi 342 343# qhasm: bytes = arg4 344# asm 1: mov <arg4=int64#4,>bytes=int64#3 345# asm 2: mov <arg4=%rcx,>bytes=%rdx 346mov %rcx,%rdx 347 348# qhasm: unsigned>? bytes - 0 349# asm 1: cmp $0,<bytes=int64#3 350# asm 2: cmp $0,<bytes=%rdx 351cmp $0,%rdx 352# comment:fp stack unchanged by jump 353 354# qhasm: goto done if !unsigned> 355jbe ._done 356# comment:fp stack unchanged by fallthrough 357 358# qhasm: start: 359._start: 360 361# qhasm: r11_stack = r11 362# asm 1: movq <r11=int64#9,>r11_stack=stack64#1 363# asm 2: movq <r11=%r11,>r11_stack=0(%rsp) 364movq %r11,0(%rsp) 365 366# qhasm: r12_stack = r12 367# asm 1: movq <r12=int64#10,>r12_stack=stack64#2 368# asm 2: movq <r12=%r12,>r12_stack=8(%rsp) 369movq %r12,8(%rsp) 370 371# qhasm: r13_stack = r13 372# asm 1: movq <r13=int64#11,>r13_stack=stack64#3 373# asm 2: movq <r13=%r13,>r13_stack=16(%rsp) 374movq %r13,16(%rsp) 375 376# qhasm: r14_stack = r14 377# asm 1: movq <r14=int64#12,>r14_stack=stack64#4 378# asm 2: movq <r14=%r14,>r14_stack=24(%rsp) 379movq %r14,24(%rsp) 380 381# qhasm: r15_stack = r15 382# asm 1: movq <r15=int64#13,>r15_stack=stack64#5 383# asm 2: movq <r15=%r15,>r15_stack=32(%rsp) 384movq %r15,32(%rsp) 385 386# qhasm: rbx_stack = rbx 387# asm 1: movq <rbx=int64#14,>rbx_stack=stack64#6 388# asm 2: movq <rbx=%rbx,>rbx_stack=40(%rsp) 389movq %rbx,40(%rsp) 390 391# qhasm: rbp_stack = rbp 392# asm 1: movq <rbp=int64#15,>rbp_stack=stack64#7 393# asm 2: movq <rbp=%rbp,>rbp_stack=48(%rsp) 394movq %rbp,48(%rsp) 395 396# qhasm: in0 = *(uint64 *) (x + 0) 397# asm 1: movq 0(<x=int64#5),>in0=int64#4 398# asm 2: movq 0(<x=%r8),>in0=%rcx 399movq 0(%r8),%rcx 400 401# qhasm: in2 = *(uint64 *) (x + 8) 402# asm 1: movq 8(<x=int64#5),>in2=int64#6 403# asm 2: movq 8(<x=%r8),>in2=%r9 404movq 8(%r8),%r9 405 406# qhasm: in4 = *(uint64 *) (x + 16) 407# asm 1: movq 16(<x=int64#5),>in4=int64#7 408# asm 2: movq 16(<x=%r8),>in4=%rax 409movq 16(%r8),%rax 410 411# qhasm: in6 = *(uint64 *) (x + 24) 412# asm 1: movq 24(<x=int64#5),>in6=int64#8 413# asm 2: movq 24(<x=%r8),>in6=%r10 414movq 24(%r8),%r10 415 416# qhasm: in8 = *(uint64 *) (x + 32) 417# asm 1: movq 32(<x=int64#5),>in8=int64#9 418# asm 2: movq 32(<x=%r8),>in8=%r11 419movq 32(%r8),%r11 420 421# qhasm: in10 = *(uint64 *) (x + 40) 422# asm 1: movq 40(<x=int64#5),>in10=int64#10 423# asm 2: movq 40(<x=%r8),>in10=%r12 424movq 40(%r8),%r12 425 426# qhasm: in12 = *(uint64 *) (x + 48) 427# asm 1: movq 48(<x=int64#5),>in12=int64#11 428# asm 2: movq 48(<x=%r8),>in12=%r13 429movq 48(%r8),%r13 430 431# qhasm: in14 = *(uint64 *) (x + 56) 432# asm 1: movq 56(<x=int64#5),>in14=int64#12 433# asm 2: movq 56(<x=%r8),>in14=%r14 434movq 56(%r8),%r14 435 436# qhasm: j0 = in0 437# asm 1: movq <in0=int64#4,>j0=stack64#8 438# asm 2: movq <in0=%rcx,>j0=56(%rsp) 439movq %rcx,56(%rsp) 440 441# qhasm: j2 = in2 442# asm 1: movq <in2=int64#6,>j2=stack64#9 443# asm 2: movq <in2=%r9,>j2=64(%rsp) 444movq %r9,64(%rsp) 445 446# qhasm: j4 = in4 447# asm 1: movq <in4=int64#7,>j4=stack64#10 448# asm 2: movq <in4=%rax,>j4=72(%rsp) 449movq %rax,72(%rsp) 450 451# qhasm: j6 = in6 452# asm 1: movq <in6=int64#8,>j6=stack64#11 453# asm 2: movq <in6=%r10,>j6=80(%rsp) 454movq %r10,80(%rsp) 455 456# qhasm: j8 = in8 457# asm 1: movq <in8=int64#9,>j8=stack64#12 458# asm 2: movq <in8=%r11,>j8=88(%rsp) 459movq %r11,88(%rsp) 460 461# qhasm: j10 = in10 462# asm 1: movq <in10=int64#10,>j10=stack64#13 463# asm 2: movq <in10=%r12,>j10=96(%rsp) 464movq %r12,96(%rsp) 465 466# qhasm: j12 = in12 467# asm 1: movq <in12=int64#11,>j12=stack64#14 468# asm 2: movq <in12=%r13,>j12=104(%rsp) 469movq %r13,104(%rsp) 470 471# qhasm: j14 = in14 472# asm 1: movq <in14=int64#12,>j14=stack64#15 473# asm 2: movq <in14=%r14,>j14=112(%rsp) 474movq %r14,112(%rsp) 475 476# qhasm: x_backup = x 477# asm 1: movq <x=int64#5,>x_backup=stack64#16 478# asm 2: movq <x=%r8,>x_backup=120(%rsp) 479movq %r8,120(%rsp) 480 481# qhasm: bytesatleast1: 482._bytesatleast1: 483 484# qhasm: unsigned<? bytes - 64 485# asm 1: cmp $64,<bytes=int64#3 486# asm 2: cmp $64,<bytes=%rdx 487cmp $64,%rdx 488# comment:fp stack unchanged by jump 489 490# qhasm: goto nocopy if !unsigned< 491jae ._nocopy 492 493# qhasm: ctarget = out 494# asm 1: mov <out=int64#1,>ctarget=int64#5 495# asm 2: mov <out=%rdi,>ctarget=%r8 496mov %rdi,%r8 497 498# qhasm: out = &tmp 499# asm 1: leaq <tmp=stack512#1,>out=int64#1 500# asm 2: leaq <tmp=288(%rsp),>out=%rdi 501leaq 288(%rsp),%rdi 502 503# qhasm: i = bytes 504# asm 1: mov <bytes=int64#3,>i=int64#4 505# asm 2: mov <bytes=%rdx,>i=%rcx 506mov %rdx,%rcx 507 508# qhasm: while (i) { *out++ = *m++; --i } 509rep movsb 510 511# qhasm: out = &tmp 512# asm 1: leaq <tmp=stack512#1,>out=int64#1 513# asm 2: leaq <tmp=288(%rsp),>out=%rdi 514leaq 288(%rsp),%rdi 515 516# qhasm: m = &tmp 517# asm 1: leaq <tmp=stack512#1,>m=int64#2 518# asm 2: leaq <tmp=288(%rsp),>m=%rsi 519leaq 288(%rsp),%rsi 520# comment:fp stack unchanged by fallthrough 521 522# qhasm: nocopy: 523._nocopy: 524 525# qhasm: out_backup = out 526# asm 1: movq <out=int64#1,>out_backup=stack64#17 527# asm 2: movq <out=%rdi,>out_backup=128(%rsp) 528movq %rdi,128(%rsp) 529 530# qhasm: m_backup = m 531# asm 1: movq <m=int64#2,>m_backup=stack64#18 532# asm 2: movq <m=%rsi,>m_backup=136(%rsp) 533movq %rsi,136(%rsp) 534 535# qhasm: bytes_backup = bytes 536# asm 1: movq <bytes=int64#3,>bytes_backup=stack64#19 537# asm 2: movq <bytes=%rdx,>bytes_backup=144(%rsp) 538movq %rdx,144(%rsp) 539 540# qhasm: in0 = j0 541# asm 1: movq <j0=stack64#8,>in0=int64#1 542# asm 2: movq <j0=56(%rsp),>in0=%rdi 543movq 56(%rsp),%rdi 544 545# qhasm: in2 = j2 546# asm 1: movq <j2=stack64#9,>in2=int64#2 547# asm 2: movq <j2=64(%rsp),>in2=%rsi 548movq 64(%rsp),%rsi 549 550# qhasm: in4 = j4 551# asm 1: movq <j4=stack64#10,>in4=int64#3 552# asm 2: movq <j4=72(%rsp),>in4=%rdx 553movq 72(%rsp),%rdx 554 555# qhasm: in6 = j6 556# asm 1: movq <j6=stack64#11,>in6=int64#4 557# asm 2: movq <j6=80(%rsp),>in6=%rcx 558movq 80(%rsp),%rcx 559 560# qhasm: in8 = j8 561# asm 1: movq <j8=stack64#12,>in8=int64#6 562# asm 2: movq <j8=88(%rsp),>in8=%r9 563movq 88(%rsp),%r9 564 565# qhasm: in10 = j10 566# asm 1: movq <j10=stack64#13,>in10=int64#7 567# asm 2: movq <j10=96(%rsp),>in10=%rax 568movq 96(%rsp),%rax 569 570# qhasm: in12 = j12 571# asm 1: movq <j12=stack64#14,>in12=int64#8 572# asm 2: movq <j12=104(%rsp),>in12=%r10 573movq 104(%rsp),%r10 574 575# qhasm: in14 = j14 576# asm 1: movq <j14=stack64#15,>in14=int64#9 577# asm 2: movq <j14=112(%rsp),>in14=%r11 578movq 112(%rsp),%r11 579 580# qhasm: x0 = in0 581# asm 1: movq <in0=int64#1,>x0=stack64#20 582# asm 2: movq <in0=%rdi,>x0=152(%rsp) 583movq %rdi,152(%rsp) 584 585# qhasm: x2 = in2 586# asm 1: movq <in2=int64#2,>x2=stack64#21 587# asm 2: movq <in2=%rsi,>x2=160(%rsp) 588movq %rsi,160(%rsp) 589 590# qhasm: x4 = in4 591# asm 1: movq <in4=int64#3,>x4=stack64#22 592# asm 2: movq <in4=%rdx,>x4=168(%rsp) 593movq %rdx,168(%rsp) 594 595# qhasm: x6 = in6 596# asm 1: movq <in6=int64#4,>x6=stack64#23 597# asm 2: movq <in6=%rcx,>x6=176(%rsp) 598movq %rcx,176(%rsp) 599 600# qhasm: x8 = in8 601# asm 1: movq <in8=int64#6,>x8=stack64#24 602# asm 2: movq <in8=%r9,>x8=184(%rsp) 603movq %r9,184(%rsp) 604 605# qhasm: x10 = in10 606# asm 1: movq <in10=int64#7,>x10=stack64#25 607# asm 2: movq <in10=%rax,>x10=192(%rsp) 608movq %rax,192(%rsp) 609 610# qhasm: x12 = in12 611# asm 1: movq <in12=int64#8,>x12=stack64#26 612# asm 2: movq <in12=%r10,>x12=200(%rsp) 613movq %r10,200(%rsp) 614 615# qhasm: x14 = in14 616# asm 1: movq <in14=int64#9,>x14=stack64#27 617# asm 2: movq <in14=%r11,>x14=208(%rsp) 618movq %r11,208(%rsp) 619 620# qhasm: (uint64) in0 >>= 32 621# asm 1: shr $32,<in0=int64#1 622# asm 2: shr $32,<in0=%rdi 623shr $32,%rdi 624 625# qhasm: (uint64) in2 >>= 32 626# asm 1: shr $32,<in2=int64#2 627# asm 2: shr $32,<in2=%rsi 628shr $32,%rsi 629 630# qhasm: (uint64) in4 >>= 32 631# asm 1: shr $32,<in4=int64#3 632# asm 2: shr $32,<in4=%rdx 633shr $32,%rdx 634 635# qhasm: (uint64) in6 >>= 32 636# asm 1: shr $32,<in6=int64#4 637# asm 2: shr $32,<in6=%rcx 638shr $32,%rcx 639 640# qhasm: (uint64) in8 >>= 32 641# asm 1: shr $32,<in8=int64#6 642# asm 2: shr $32,<in8=%r9 643shr $32,%r9 644 645# qhasm: (uint64) in10 >>= 32 646# asm 1: shr $32,<in10=int64#7 647# asm 2: shr $32,<in10=%rax 648shr $32,%rax 649 650# qhasm: (uint64) in12 >>= 32 651# asm 1: shr $32,<in12=int64#8 652# asm 2: shr $32,<in12=%r10 653shr $32,%r10 654 655# qhasm: (uint64) in14 >>= 32 656# asm 1: shr $32,<in14=int64#9 657# asm 2: shr $32,<in14=%r11 658shr $32,%r11 659 660# qhasm: x1 = in0 661# asm 1: movq <in0=int64#1,>x1=stack64#28 662# asm 2: movq <in0=%rdi,>x1=216(%rsp) 663movq %rdi,216(%rsp) 664 665# qhasm: x3 = in2 666# asm 1: movq <in2=int64#2,>x3=stack64#29 667# asm 2: movq <in2=%rsi,>x3=224(%rsp) 668movq %rsi,224(%rsp) 669 670# qhasm: x5 = in4 671# asm 1: movq <in4=int64#3,>x5=stack64#30 672# asm 2: movq <in4=%rdx,>x5=232(%rsp) 673movq %rdx,232(%rsp) 674 675# qhasm: x7 = in6 676# asm 1: movq <in6=int64#4,>x7=stack64#31 677# asm 2: movq <in6=%rcx,>x7=240(%rsp) 678movq %rcx,240(%rsp) 679 680# qhasm: x9 = in8 681# asm 1: movq <in8=int64#6,>x9=stack64#32 682# asm 2: movq <in8=%r9,>x9=248(%rsp) 683movq %r9,248(%rsp) 684 685# qhasm: x11 = in10 686# asm 1: movq <in10=int64#7,>x11=stack64#33 687# asm 2: movq <in10=%rax,>x11=256(%rsp) 688movq %rax,256(%rsp) 689 690# qhasm: x13 = in12 691# asm 1: movq <in12=int64#8,>x13=stack64#34 692# asm 2: movq <in12=%r10,>x13=264(%rsp) 693movq %r10,264(%rsp) 694 695# qhasm: x15 = in14 696# asm 1: movq <in14=int64#9,>x15=stack64#35 697# asm 2: movq <in14=%r11,>x15=272(%rsp) 698movq %r11,272(%rsp) 699 700# qhasm: i = 20 701# asm 1: mov $20,>i=int64#1 702# asm 2: mov $20,>i=%rdi 703mov $20,%rdi 704 705# qhasm: mainloop: 706._mainloop: 707 708# qhasm: a = x12 709# asm 1: movq <x12=stack64#26,>a=int64#2 710# asm 2: movq <x12=200(%rsp),>a=%rsi 711movq 200(%rsp),%rsi 712 713# qhasm: b = x0 714# asm 1: movq <x0=stack64#20,>b=int64#3 715# asm 2: movq <x0=152(%rsp),>b=%rdx 716movq 152(%rsp),%rdx 717 718# qhasm: c = x4 719# asm 1: movq <x4=stack64#22,>c=int64#4 720# asm 2: movq <x4=168(%rsp),>c=%rcx 721movq 168(%rsp),%rcx 722 723# qhasm: e = a + b 724# asm 1: lea (<a=int64#2,<b=int64#3),>e=int64#6 725# asm 2: lea (<a=%rsi,<b=%rdx),>e=%r9 726lea (%rsi,%rdx),%r9 727 728# qhasm: (uint32) e <<<= 7 729# asm 1: rol $7,<e=int64#6d 730# asm 2: rol $7,<e=%r9d 731rol $7,%r9d 732 733# qhasm: c ^= e 734# asm 1: xor <e=int64#6,<c=int64#4 735# asm 2: xor <e=%r9,<c=%rcx 736xor %r9,%rcx 737 738# qhasm: x4 = c 739# asm 1: movq <c=int64#4,>x4=stack64#22 740# asm 2: movq <c=%rcx,>x4=168(%rsp) 741movq %rcx,168(%rsp) 742 743# qhasm: d = x8 744# asm 1: movq <x8=stack64#24,>d=int64#6 745# asm 2: movq <x8=184(%rsp),>d=%r9 746movq 184(%rsp),%r9 747 748# qhasm: e = b + c 749# asm 1: lea (<b=int64#3,<c=int64#4),>e=int64#7 750# asm 2: lea (<b=%rdx,<c=%rcx),>e=%rax 751lea (%rdx,%rcx),%rax 752 753# qhasm: (uint32) e <<<= 9 754# asm 1: rol $9,<e=int64#7d 755# asm 2: rol $9,<e=%eax 756rol $9,%eax 757 758# qhasm: d ^= e 759# asm 1: xor <e=int64#7,<d=int64#6 760# asm 2: xor <e=%rax,<d=%r9 761xor %rax,%r9 762 763# qhasm: x8 = d 764# asm 1: movq <d=int64#6,>x8=stack64#24 765# asm 2: movq <d=%r9,>x8=184(%rsp) 766movq %r9,184(%rsp) 767 768# qhasm: c += d 769# asm 1: add <d=int64#6,<c=int64#4 770# asm 2: add <d=%r9,<c=%rcx 771add %r9,%rcx 772 773# qhasm: (uint32) c <<<= 13 774# asm 1: rol $13,<c=int64#4d 775# asm 2: rol $13,<c=%ecx 776rol $13,%ecx 777 778# qhasm: a ^= c 779# asm 1: xor <c=int64#4,<a=int64#2 780# asm 2: xor <c=%rcx,<a=%rsi 781xor %rcx,%rsi 782 783# qhasm: x12 = a 784# asm 1: movq <a=int64#2,>x12=stack64#26 785# asm 2: movq <a=%rsi,>x12=200(%rsp) 786movq %rsi,200(%rsp) 787 788# qhasm: a += d 789# asm 1: add <d=int64#6,<a=int64#2 790# asm 2: add <d=%r9,<a=%rsi 791add %r9,%rsi 792 793# qhasm: (uint32) a <<<= 18 794# asm 1: rol $18,<a=int64#2d 795# asm 2: rol $18,<a=%esi 796rol $18,%esi 797 798# qhasm: b ^= a 799# asm 1: xor <a=int64#2,<b=int64#3 800# asm 2: xor <a=%rsi,<b=%rdx 801xor %rsi,%rdx 802 803# qhasm: x0 = b 804# asm 1: movq <b=int64#3,>x0=stack64#20 805# asm 2: movq <b=%rdx,>x0=152(%rsp) 806movq %rdx,152(%rsp) 807 808# qhasm: r = x1 809# asm 1: movq <x1=stack64#28,>r=int64#2 810# asm 2: movq <x1=216(%rsp),>r=%rsi 811movq 216(%rsp),%rsi 812 813# qhasm: s = x5 814# asm 1: movq <x5=stack64#30,>s=int64#3 815# asm 2: movq <x5=232(%rsp),>s=%rdx 816movq 232(%rsp),%rdx 817 818# qhasm: t = x9 819# asm 1: movq <x9=stack64#32,>t=int64#4 820# asm 2: movq <x9=248(%rsp),>t=%rcx 821movq 248(%rsp),%rcx 822 823# qhasm: v = r + s 824# asm 1: lea (<r=int64#2,<s=int64#3),>v=int64#6 825# asm 2: lea (<r=%rsi,<s=%rdx),>v=%r9 826lea (%rsi,%rdx),%r9 827 828# qhasm: (uint32) v <<<= 7 829# asm 1: rol $7,<v=int64#6d 830# asm 2: rol $7,<v=%r9d 831rol $7,%r9d 832 833# qhasm: t ^= v 834# asm 1: xor <v=int64#6,<t=int64#4 835# asm 2: xor <v=%r9,<t=%rcx 836xor %r9,%rcx 837 838# qhasm: x9 = t 839# asm 1: movq <t=int64#4,>x9=stack64#32 840# asm 2: movq <t=%rcx,>x9=248(%rsp) 841movq %rcx,248(%rsp) 842 843# qhasm: u = x13 844# asm 1: movq <x13=stack64#34,>u=int64#6 845# asm 2: movq <x13=264(%rsp),>u=%r9 846movq 264(%rsp),%r9 847 848# qhasm: v = s + t 849# asm 1: lea (<s=int64#3,<t=int64#4),>v=int64#7 850# asm 2: lea (<s=%rdx,<t=%rcx),>v=%rax 851lea (%rdx,%rcx),%rax 852 853# qhasm: (uint32) v <<<= 9 854# asm 1: rol $9,<v=int64#7d 855# asm 2: rol $9,<v=%eax 856rol $9,%eax 857 858# qhasm: u ^= v 859# asm 1: xor <v=int64#7,<u=int64#6 860# asm 2: xor <v=%rax,<u=%r9 861xor %rax,%r9 862 863# qhasm: x13 = u 864# asm 1: movq <u=int64#6,>x13=stack64#34 865# asm 2: movq <u=%r9,>x13=264(%rsp) 866movq %r9,264(%rsp) 867 868# qhasm: t += u 869# asm 1: add <u=int64#6,<t=int64#4 870# asm 2: add <u=%r9,<t=%rcx 871add %r9,%rcx 872 873# qhasm: (uint32) t <<<= 13 874# asm 1: rol $13,<t=int64#4d 875# asm 2: rol $13,<t=%ecx 876rol $13,%ecx 877 878# qhasm: r ^= t 879# asm 1: xor <t=int64#4,<r=int64#2 880# asm 2: xor <t=%rcx,<r=%rsi 881xor %rcx,%rsi 882 883# qhasm: x1 = r 884# asm 1: movq <r=int64#2,>x1=stack64#28 885# asm 2: movq <r=%rsi,>x1=216(%rsp) 886movq %rsi,216(%rsp) 887 888# qhasm: r += u 889# asm 1: add <u=int64#6,<r=int64#2 890# asm 2: add <u=%r9,<r=%rsi 891add %r9,%rsi 892 893# qhasm: (uint32) r <<<= 18 894# asm 1: rol $18,<r=int64#2d 895# asm 2: rol $18,<r=%esi 896rol $18,%esi 897 898# qhasm: s ^= r 899# asm 1: xor <r=int64#2,<s=int64#3 900# asm 2: xor <r=%rsi,<s=%rdx 901xor %rsi,%rdx 902 903# qhasm: x5 = s 904# asm 1: movq <s=int64#3,>x5=stack64#30 905# asm 2: movq <s=%rdx,>x5=232(%rsp) 906movq %rdx,232(%rsp) 907 908# qhasm: A = x6 909# asm 1: movq <x6=stack64#23,>A=int64#2 910# asm 2: movq <x6=176(%rsp),>A=%rsi 911movq 176(%rsp),%rsi 912 913# qhasm: B = x10 914# asm 1: movq <x10=stack64#25,>B=int64#3 915# asm 2: movq <x10=192(%rsp),>B=%rdx 916movq 192(%rsp),%rdx 917 918# qhasm: C = x14 919# asm 1: movq <x14=stack64#27,>C=int64#4 920# asm 2: movq <x14=208(%rsp),>C=%rcx 921movq 208(%rsp),%rcx 922 923# qhasm: E = A + B 924# asm 1: lea (<A=int64#2,<B=int64#3),>E=int64#6 925# asm 2: lea (<A=%rsi,<B=%rdx),>E=%r9 926lea (%rsi,%rdx),%r9 927 928# qhasm: (uint32) E <<<= 7 929# asm 1: rol $7,<E=int64#6d 930# asm 2: rol $7,<E=%r9d 931rol $7,%r9d 932 933# qhasm: C ^= E 934# asm 1: xor <E=int64#6,<C=int64#4 935# asm 2: xor <E=%r9,<C=%rcx 936xor %r9,%rcx 937 938# qhasm: x14 = C 939# asm 1: movq <C=int64#4,>x14=stack64#27 940# asm 2: movq <C=%rcx,>x14=208(%rsp) 941movq %rcx,208(%rsp) 942 943# qhasm: D = x2 944# asm 1: movq <x2=stack64#21,>D=int64#6 945# asm 2: movq <x2=160(%rsp),>D=%r9 946movq 160(%rsp),%r9 947 948# qhasm: E = B + C 949# asm 1: lea (<B=int64#3,<C=int64#4),>E=int64#7 950# asm 2: lea (<B=%rdx,<C=%rcx),>E=%rax 951lea (%rdx,%rcx),%rax 952 953# qhasm: (uint32) E <<<= 9 954# asm 1: rol $9,<E=int64#7d 955# asm 2: rol $9,<E=%eax 956rol $9,%eax 957 958# qhasm: D ^= E 959# asm 1: xor <E=int64#7,<D=int64#6 960# asm 2: xor <E=%rax,<D=%r9 961xor %rax,%r9 962 963# qhasm: x2 = D 964# asm 1: movq <D=int64#6,>x2=stack64#21 965# asm 2: movq <D=%r9,>x2=160(%rsp) 966movq %r9,160(%rsp) 967 968# qhasm: C += D 969# asm 1: add <D=int64#6,<C=int64#4 970# asm 2: add <D=%r9,<C=%rcx 971add %r9,%rcx 972 973# qhasm: (uint32) C <<<= 13 974# asm 1: rol $13,<C=int64#4d 975# asm 2: rol $13,<C=%ecx 976rol $13,%ecx 977 978# qhasm: A ^= C 979# asm 1: xor <C=int64#4,<A=int64#2 980# asm 2: xor <C=%rcx,<A=%rsi 981xor %rcx,%rsi 982 983# qhasm: x6 = A 984# asm 1: movq <A=int64#2,>x6=stack64#23 985# asm 2: movq <A=%rsi,>x6=176(%rsp) 986movq %rsi,176(%rsp) 987 988# qhasm: A += D 989# asm 1: add <D=int64#6,<A=int64#2 990# asm 2: add <D=%r9,<A=%rsi 991add %r9,%rsi 992 993# qhasm: (uint32) A <<<= 18 994# asm 1: rol $18,<A=int64#2d 995# asm 2: rol $18,<A=%esi 996rol $18,%esi 997 998# qhasm: B ^= A 999# asm 1: xor <A=int64#2,<B=int64#3 1000# asm 2: xor <A=%rsi,<B=%rdx 1001xor %rsi,%rdx 1002 1003# qhasm: x10 = B 1004# asm 1: movq <B=int64#3,>x10=stack64#25 1005# asm 2: movq <B=%rdx,>x10=192(%rsp) 1006movq %rdx,192(%rsp) 1007 1008# qhasm: R = x11 1009# asm 1: movq <x11=stack64#33,>R=int64#2 1010# asm 2: movq <x11=256(%rsp),>R=%rsi 1011movq 256(%rsp),%rsi 1012 1013# qhasm: S = x15 1014# asm 1: movq <x15=stack64#35,>S=int64#3 1015# asm 2: movq <x15=272(%rsp),>S=%rdx 1016movq 272(%rsp),%rdx 1017 1018# qhasm: T = x3 1019# asm 1: movq <x3=stack64#29,>T=int64#4 1020# asm 2: movq <x3=224(%rsp),>T=%rcx 1021movq 224(%rsp),%rcx 1022 1023# qhasm: V = R + S 1024# asm 1: lea (<R=int64#2,<S=int64#3),>V=int64#6 1025# asm 2: lea (<R=%rsi,<S=%rdx),>V=%r9 1026lea (%rsi,%rdx),%r9 1027 1028# qhasm: (uint32) V <<<= 7 1029# asm 1: rol $7,<V=int64#6d 1030# asm 2: rol $7,<V=%r9d 1031rol $7,%r9d 1032 1033# qhasm: T ^= V 1034# asm 1: xor <V=int64#6,<T=int64#4 1035# asm 2: xor <V=%r9,<T=%rcx 1036xor %r9,%rcx 1037 1038# qhasm: x3 = T 1039# asm 1: movq <T=int64#4,>x3=stack64#29 1040# asm 2: movq <T=%rcx,>x3=224(%rsp) 1041movq %rcx,224(%rsp) 1042 1043# qhasm: U = x7 1044# asm 1: movq <x7=stack64#31,>U=int64#6 1045# asm 2: movq <x7=240(%rsp),>U=%r9 1046movq 240(%rsp),%r9 1047 1048# qhasm: V = S + T 1049# asm 1: lea (<S=int64#3,<T=int64#4),>V=int64#7 1050# asm 2: lea (<S=%rdx,<T=%rcx),>V=%rax 1051lea (%rdx,%rcx),%rax 1052 1053# qhasm: (uint32) V <<<= 9 1054# asm 1: rol $9,<V=int64#7d 1055# asm 2: rol $9,<V=%eax 1056rol $9,%eax 1057 1058# qhasm: U ^= V 1059# asm 1: xor <V=int64#7,<U=int64#6 1060# asm 2: xor <V=%rax,<U=%r9 1061xor %rax,%r9 1062 1063# qhasm: x7 = U 1064# asm 1: movq <U=int64#6,>x7=stack64#31 1065# asm 2: movq <U=%r9,>x7=240(%rsp) 1066movq %r9,240(%rsp) 1067 1068# qhasm: T += U 1069# asm 1: add <U=int64#6,<T=int64#4 1070# asm 2: add <U=%r9,<T=%rcx 1071add %r9,%rcx 1072 1073# qhasm: (uint32) T <<<= 13 1074# asm 1: rol $13,<T=int64#4d 1075# asm 2: rol $13,<T=%ecx 1076rol $13,%ecx 1077 1078# qhasm: R ^= T 1079# asm 1: xor <T=int64#4,<R=int64#2 1080# asm 2: xor <T=%rcx,<R=%rsi 1081xor %rcx,%rsi 1082 1083# qhasm: x11 = R 1084# asm 1: movq <R=int64#2,>x11=stack64#33 1085# asm 2: movq <R=%rsi,>x11=256(%rsp) 1086movq %rsi,256(%rsp) 1087 1088# qhasm: R += U 1089# asm 1: add <U=int64#6,<R=int64#2 1090# asm 2: add <U=%r9,<R=%rsi 1091add %r9,%rsi 1092 1093# qhasm: (uint32) R <<<= 18 1094# asm 1: rol $18,<R=int64#2d 1095# asm 2: rol $18,<R=%esi 1096rol $18,%esi 1097 1098# qhasm: S ^= R 1099# asm 1: xor <R=int64#2,<S=int64#3 1100# asm 2: xor <R=%rsi,<S=%rdx 1101xor %rsi,%rdx 1102 1103# qhasm: x15 = S 1104# asm 1: movq <S=int64#3,>x15=stack64#35 1105# asm 2: movq <S=%rdx,>x15=272(%rsp) 1106movq %rdx,272(%rsp) 1107 1108# qhasm: a = x3 1109# asm 1: movq <x3=stack64#29,>a=int64#2 1110# asm 2: movq <x3=224(%rsp),>a=%rsi 1111movq 224(%rsp),%rsi 1112 1113# qhasm: b = x0 1114# asm 1: movq <x0=stack64#20,>b=int64#3 1115# asm 2: movq <x0=152(%rsp),>b=%rdx 1116movq 152(%rsp),%rdx 1117 1118# qhasm: c = x1 1119# asm 1: movq <x1=stack64#28,>c=int64#4 1120# asm 2: movq <x1=216(%rsp),>c=%rcx 1121movq 216(%rsp),%rcx 1122 1123# qhasm: e = a + b 1124# asm 1: lea (<a=int64#2,<b=int64#3),>e=int64#6 1125# asm 2: lea (<a=%rsi,<b=%rdx),>e=%r9 1126lea (%rsi,%rdx),%r9 1127 1128# qhasm: (uint32) e <<<= 7 1129# asm 1: rol $7,<e=int64#6d 1130# asm 2: rol $7,<e=%r9d 1131rol $7,%r9d 1132 1133# qhasm: c ^= e 1134# asm 1: xor <e=int64#6,<c=int64#4 1135# asm 2: xor <e=%r9,<c=%rcx 1136xor %r9,%rcx 1137 1138# qhasm: x1 = c 1139# asm 1: movq <c=int64#4,>x1=stack64#28 1140# asm 2: movq <c=%rcx,>x1=216(%rsp) 1141movq %rcx,216(%rsp) 1142 1143# qhasm: d = x2 1144# asm 1: movq <x2=stack64#21,>d=int64#6 1145# asm 2: movq <x2=160(%rsp),>d=%r9 1146movq 160(%rsp),%r9 1147 1148# qhasm: e = b + c 1149# asm 1: lea (<b=int64#3,<c=int64#4),>e=int64#7 1150# asm 2: lea (<b=%rdx,<c=%rcx),>e=%rax 1151lea (%rdx,%rcx),%rax 1152 1153# qhasm: (uint32) e <<<= 9 1154# asm 1: rol $9,<e=int64#7d 1155# asm 2: rol $9,<e=%eax 1156rol $9,%eax 1157 1158# qhasm: d ^= e 1159# asm 1: xor <e=int64#7,<d=int64#6 1160# asm 2: xor <e=%rax,<d=%r9 1161xor %rax,%r9 1162 1163# qhasm: x2 = d 1164# asm 1: movq <d=int64#6,>x2=stack64#21 1165# asm 2: movq <d=%r9,>x2=160(%rsp) 1166movq %r9,160(%rsp) 1167 1168# qhasm: c += d 1169# asm 1: add <d=int64#6,<c=int64#4 1170# asm 2: add <d=%r9,<c=%rcx 1171add %r9,%rcx 1172 1173# qhasm: (uint32) c <<<= 13 1174# asm 1: rol $13,<c=int64#4d 1175# asm 2: rol $13,<c=%ecx 1176rol $13,%ecx 1177 1178# qhasm: a ^= c 1179# asm 1: xor <c=int64#4,<a=int64#2 1180# asm 2: xor <c=%rcx,<a=%rsi 1181xor %rcx,%rsi 1182 1183# qhasm: x3 = a 1184# asm 1: movq <a=int64#2,>x3=stack64#29 1185# asm 2: movq <a=%rsi,>x3=224(%rsp) 1186movq %rsi,224(%rsp) 1187 1188# qhasm: a += d 1189# asm 1: add <d=int64#6,<a=int64#2 1190# asm 2: add <d=%r9,<a=%rsi 1191add %r9,%rsi 1192 1193# qhasm: (uint32) a <<<= 18 1194# asm 1: rol $18,<a=int64#2d 1195# asm 2: rol $18,<a=%esi 1196rol $18,%esi 1197 1198# qhasm: b ^= a 1199# asm 1: xor <a=int64#2,<b=int64#3 1200# asm 2: xor <a=%rsi,<b=%rdx 1201xor %rsi,%rdx 1202 1203# qhasm: x0 = b 1204# asm 1: movq <b=int64#3,>x0=stack64#20 1205# asm 2: movq <b=%rdx,>x0=152(%rsp) 1206movq %rdx,152(%rsp) 1207 1208# qhasm: r = x4 1209# asm 1: movq <x4=stack64#22,>r=int64#2 1210# asm 2: movq <x4=168(%rsp),>r=%rsi 1211movq 168(%rsp),%rsi 1212 1213# qhasm: s = x5 1214# asm 1: movq <x5=stack64#30,>s=int64#3 1215# asm 2: movq <x5=232(%rsp),>s=%rdx 1216movq 232(%rsp),%rdx 1217 1218# qhasm: t = x6 1219# asm 1: movq <x6=stack64#23,>t=int64#4 1220# asm 2: movq <x6=176(%rsp),>t=%rcx 1221movq 176(%rsp),%rcx 1222 1223# qhasm: v = r + s 1224# asm 1: lea (<r=int64#2,<s=int64#3),>v=int64#6 1225# asm 2: lea (<r=%rsi,<s=%rdx),>v=%r9 1226lea (%rsi,%rdx),%r9 1227 1228# qhasm: (uint32) v <<<= 7 1229# asm 1: rol $7,<v=int64#6d 1230# asm 2: rol $7,<v=%r9d 1231rol $7,%r9d 1232 1233# qhasm: t ^= v 1234# asm 1: xor <v=int64#6,<t=int64#4 1235# asm 2: xor <v=%r9,<t=%rcx 1236xor %r9,%rcx 1237 1238# qhasm: x6 = t 1239# asm 1: movq <t=int64#4,>x6=stack64#23 1240# asm 2: movq <t=%rcx,>x6=176(%rsp) 1241movq %rcx,176(%rsp) 1242 1243# qhasm: u = x7 1244# asm 1: movq <x7=stack64#31,>u=int64#6 1245# asm 2: movq <x7=240(%rsp),>u=%r9 1246movq 240(%rsp),%r9 1247 1248# qhasm: v = s + t 1249# asm 1: lea (<s=int64#3,<t=int64#4),>v=int64#7 1250# asm 2: lea (<s=%rdx,<t=%rcx),>v=%rax 1251lea (%rdx,%rcx),%rax 1252 1253# qhasm: (uint32) v <<<= 9 1254# asm 1: rol $9,<v=int64#7d 1255# asm 2: rol $9,<v=%eax 1256rol $9,%eax 1257 1258# qhasm: u ^= v 1259# asm 1: xor <v=int64#7,<u=int64#6 1260# asm 2: xor <v=%rax,<u=%r9 1261xor %rax,%r9 1262 1263# qhasm: x7 = u 1264# asm 1: movq <u=int64#6,>x7=stack64#31 1265# asm 2: movq <u=%r9,>x7=240(%rsp) 1266movq %r9,240(%rsp) 1267 1268# qhasm: t += u 1269# asm 1: add <u=int64#6,<t=int64#4 1270# asm 2: add <u=%r9,<t=%rcx 1271add %r9,%rcx 1272 1273# qhasm: (uint32) t <<<= 13 1274# asm 1: rol $13,<t=int64#4d 1275# asm 2: rol $13,<t=%ecx 1276rol $13,%ecx 1277 1278# qhasm: r ^= t 1279# asm 1: xor <t=int64#4,<r=int64#2 1280# asm 2: xor <t=%rcx,<r=%rsi 1281xor %rcx,%rsi 1282 1283# qhasm: x4 = r 1284# asm 1: movq <r=int64#2,>x4=stack64#22 1285# asm 2: movq <r=%rsi,>x4=168(%rsp) 1286movq %rsi,168(%rsp) 1287 1288# qhasm: r += u 1289# asm 1: add <u=int64#6,<r=int64#2 1290# asm 2: add <u=%r9,<r=%rsi 1291add %r9,%rsi 1292 1293# qhasm: (uint32) r <<<= 18 1294# asm 1: rol $18,<r=int64#2d 1295# asm 2: rol $18,<r=%esi 1296rol $18,%esi 1297 1298# qhasm: s ^= r 1299# asm 1: xor <r=int64#2,<s=int64#3 1300# asm 2: xor <r=%rsi,<s=%rdx 1301xor %rsi,%rdx 1302 1303# qhasm: x5 = s 1304# asm 1: movq <s=int64#3,>x5=stack64#30 1305# asm 2: movq <s=%rdx,>x5=232(%rsp) 1306movq %rdx,232(%rsp) 1307 1308# qhasm: A = x9 1309# asm 1: movq <x9=stack64#32,>A=int64#2 1310# asm 2: movq <x9=248(%rsp),>A=%rsi 1311movq 248(%rsp),%rsi 1312 1313# qhasm: B = x10 1314# asm 1: movq <x10=stack64#25,>B=int64#3 1315# asm 2: movq <x10=192(%rsp),>B=%rdx 1316movq 192(%rsp),%rdx 1317 1318# qhasm: C = x11 1319# asm 1: movq <x11=stack64#33,>C=int64#4 1320# asm 2: movq <x11=256(%rsp),>C=%rcx 1321movq 256(%rsp),%rcx 1322 1323# qhasm: E = A + B 1324# asm 1: lea (<A=int64#2,<B=int64#3),>E=int64#6 1325# asm 2: lea (<A=%rsi,<B=%rdx),>E=%r9 1326lea (%rsi,%rdx),%r9 1327 1328# qhasm: (uint32) E <<<= 7 1329# asm 1: rol $7,<E=int64#6d 1330# asm 2: rol $7,<E=%r9d 1331rol $7,%r9d 1332 1333# qhasm: C ^= E 1334# asm 1: xor <E=int64#6,<C=int64#4 1335# asm 2: xor <E=%r9,<C=%rcx 1336xor %r9,%rcx 1337 1338# qhasm: x11 = C 1339# asm 1: movq <C=int64#4,>x11=stack64#33 1340# asm 2: movq <C=%rcx,>x11=256(%rsp) 1341movq %rcx,256(%rsp) 1342 1343# qhasm: D = x8 1344# asm 1: movq <x8=stack64#24,>D=int64#6 1345# asm 2: movq <x8=184(%rsp),>D=%r9 1346movq 184(%rsp),%r9 1347 1348# qhasm: E = B + C 1349# asm 1: lea (<B=int64#3,<C=int64#4),>E=int64#7 1350# asm 2: lea (<B=%rdx,<C=%rcx),>E=%rax 1351lea (%rdx,%rcx),%rax 1352 1353# qhasm: (uint32) E <<<= 9 1354# asm 1: rol $9,<E=int64#7d 1355# asm 2: rol $9,<E=%eax 1356rol $9,%eax 1357 1358# qhasm: D ^= E 1359# asm 1: xor <E=int64#7,<D=int64#6 1360# asm 2: xor <E=%rax,<D=%r9 1361xor %rax,%r9 1362 1363# qhasm: x8 = D 1364# asm 1: movq <D=int64#6,>x8=stack64#24 1365# asm 2: movq <D=%r9,>x8=184(%rsp) 1366movq %r9,184(%rsp) 1367 1368# qhasm: C += D 1369# asm 1: add <D=int64#6,<C=int64#4 1370# asm 2: add <D=%r9,<C=%rcx 1371add %r9,%rcx 1372 1373# qhasm: (uint32) C <<<= 13 1374# asm 1: rol $13,<C=int64#4d 1375# asm 2: rol $13,<C=%ecx 1376rol $13,%ecx 1377 1378# qhasm: A ^= C 1379# asm 1: xor <C=int64#4,<A=int64#2 1380# asm 2: xor <C=%rcx,<A=%rsi 1381xor %rcx,%rsi 1382 1383# qhasm: x9 = A 1384# asm 1: movq <A=int64#2,>x9=stack64#32 1385# asm 2: movq <A=%rsi,>x9=248(%rsp) 1386movq %rsi,248(%rsp) 1387 1388# qhasm: A += D 1389# asm 1: add <D=int64#6,<A=int64#2 1390# asm 2: add <D=%r9,<A=%rsi 1391add %r9,%rsi 1392 1393# qhasm: (uint32) A <<<= 18 1394# asm 1: rol $18,<A=int64#2d 1395# asm 2: rol $18,<A=%esi 1396rol $18,%esi 1397 1398# qhasm: B ^= A 1399# asm 1: xor <A=int64#2,<B=int64#3 1400# asm 2: xor <A=%rsi,<B=%rdx 1401xor %rsi,%rdx 1402 1403# qhasm: x10 = B 1404# asm 1: movq <B=int64#3,>x10=stack64#25 1405# asm 2: movq <B=%rdx,>x10=192(%rsp) 1406movq %rdx,192(%rsp) 1407 1408# qhasm: R = x14 1409# asm 1: movq <x14=stack64#27,>R=int64#2 1410# asm 2: movq <x14=208(%rsp),>R=%rsi 1411movq 208(%rsp),%rsi 1412 1413# qhasm: S = x15 1414# asm 1: movq <x15=stack64#35,>S=int64#3 1415# asm 2: movq <x15=272(%rsp),>S=%rdx 1416movq 272(%rsp),%rdx 1417 1418# qhasm: T = x12 1419# asm 1: movq <x12=stack64#26,>T=int64#4 1420# asm 2: movq <x12=200(%rsp),>T=%rcx 1421movq 200(%rsp),%rcx 1422 1423# qhasm: V = R + S 1424# asm 1: lea (<R=int64#2,<S=int64#3),>V=int64#6 1425# asm 2: lea (<R=%rsi,<S=%rdx),>V=%r9 1426lea (%rsi,%rdx),%r9 1427 1428# qhasm: (uint32) V <<<= 7 1429# asm 1: rol $7,<V=int64#6d 1430# asm 2: rol $7,<V=%r9d 1431rol $7,%r9d 1432 1433# qhasm: T ^= V 1434# asm 1: xor <V=int64#6,<T=int64#4 1435# asm 2: xor <V=%r9,<T=%rcx 1436xor %r9,%rcx 1437 1438# qhasm: x12 = T 1439# asm 1: movq <T=int64#4,>x12=stack64#26 1440# asm 2: movq <T=%rcx,>x12=200(%rsp) 1441movq %rcx,200(%rsp) 1442 1443# qhasm: U = x13 1444# asm 1: movq <x13=stack64#34,>U=int64#6 1445# asm 2: movq <x13=264(%rsp),>U=%r9 1446movq 264(%rsp),%r9 1447 1448# qhasm: V = S + T 1449# asm 1: lea (<S=int64#3,<T=int64#4),>V=int64#7 1450# asm 2: lea (<S=%rdx,<T=%rcx),>V=%rax 1451lea (%rdx,%rcx),%rax 1452 1453# qhasm: (uint32) V <<<= 9 1454# asm 1: rol $9,<V=int64#7d 1455# asm 2: rol $9,<V=%eax 1456rol $9,%eax 1457 1458# qhasm: U ^= V 1459# asm 1: xor <V=int64#7,<U=int64#6 1460# asm 2: xor <V=%rax,<U=%r9 1461xor %rax,%r9 1462 1463# qhasm: x13 = U 1464# asm 1: movq <U=int64#6,>x13=stack64#34 1465# asm 2: movq <U=%r9,>x13=264(%rsp) 1466movq %r9,264(%rsp) 1467 1468# qhasm: T += U 1469# asm 1: add <U=int64#6,<T=int64#4 1470# asm 2: add <U=%r9,<T=%rcx 1471add %r9,%rcx 1472 1473# qhasm: (uint32) T <<<= 13 1474# asm 1: rol $13,<T=int64#4d 1475# asm 2: rol $13,<T=%ecx 1476rol $13,%ecx 1477 1478# qhasm: R ^= T 1479# asm 1: xor <T=int64#4,<R=int64#2 1480# asm 2: xor <T=%rcx,<R=%rsi 1481xor %rcx,%rsi 1482 1483# qhasm: x14 = R 1484# asm 1: movq <R=int64#2,>x14=stack64#27 1485# asm 2: movq <R=%rsi,>x14=208(%rsp) 1486movq %rsi,208(%rsp) 1487 1488# qhasm: R += U 1489# asm 1: add <U=int64#6,<R=int64#2 1490# asm 2: add <U=%r9,<R=%rsi 1491add %r9,%rsi 1492 1493# qhasm: (uint32) R <<<= 18 1494# asm 1: rol $18,<R=int64#2d 1495# asm 2: rol $18,<R=%esi 1496rol $18,%esi 1497 1498# qhasm: S ^= R 1499# asm 1: xor <R=int64#2,<S=int64#3 1500# asm 2: xor <R=%rsi,<S=%rdx 1501xor %rsi,%rdx 1502 1503# qhasm: x15 = S 1504# asm 1: movq <S=int64#3,>x15=stack64#35 1505# asm 2: movq <S=%rdx,>x15=272(%rsp) 1506movq %rdx,272(%rsp) 1507 1508# qhasm: unsigned>? i -= 2 1509# asm 1: sub $2,<i=int64#1 1510# asm 2: sub $2,<i=%rdi 1511sub $2,%rdi 1512# comment:fp stack unchanged by jump 1513 1514# qhasm: goto mainloop if unsigned> 1515ja ._mainloop 1516 1517# qhasm: out = out_backup 1518# asm 1: movq <out_backup=stack64#17,>out=int64#1 1519# asm 2: movq <out_backup=128(%rsp),>out=%rdi 1520movq 128(%rsp),%rdi 1521 1522# qhasm: m = m_backup 1523# asm 1: movq <m_backup=stack64#18,>m=int64#2 1524# asm 2: movq <m_backup=136(%rsp),>m=%rsi 1525movq 136(%rsp),%rsi 1526 1527# qhasm: in0 = j0 1528# asm 1: movq <j0=stack64#8,>in0=int64#3 1529# asm 2: movq <j0=56(%rsp),>in0=%rdx 1530movq 56(%rsp),%rdx 1531 1532# qhasm: out0 = in0 1533# asm 1: mov <in0=int64#3,>out0=int64#4 1534# asm 2: mov <in0=%rdx,>out0=%rcx 1535mov %rdx,%rcx 1536 1537# qhasm: (uint64) out0 >>= 32 1538# asm 1: shr $32,<out0=int64#4 1539# asm 2: shr $32,<out0=%rcx 1540shr $32,%rcx 1541 1542# qhasm: (uint32) in0 += x0 1543# asm 1: addl <x0=stack64#20,<in0=int64#3d 1544# asm 2: addl <x0=152(%rsp),<in0=%edx 1545addl 152(%rsp),%edx 1546 1547# qhasm: (uint32) out0 += x1 1548# asm 1: addl <x1=stack64#28,<out0=int64#4d 1549# asm 2: addl <x1=216(%rsp),<out0=%ecx 1550addl 216(%rsp),%ecx 1551 1552# qhasm: out0 <<= 32 1553# asm 1: shl $32,<out0=int64#4 1554# asm 2: shl $32,<out0=%rcx 1555shl $32,%rcx 1556 1557# qhasm: out0 ^= in0 1558# asm 1: xor <in0=int64#3,<out0=int64#4 1559# asm 2: xor <in0=%rdx,<out0=%rcx 1560xor %rdx,%rcx 1561 1562# qhasm: out0 ^= *(uint64 *) (m + 0) 1563# asm 1: xorq 0(<m=int64#2),<out0=int64#4 1564# asm 2: xorq 0(<m=%rsi),<out0=%rcx 1565xorq 0(%rsi),%rcx 1566 1567# qhasm: *(uint64 *) (out + 0) = out0 1568# asm 1: movq <out0=int64#4,0(<out=int64#1) 1569# asm 2: movq <out0=%rcx,0(<out=%rdi) 1570movq %rcx,0(%rdi) 1571 1572# qhasm: in2 = j2 1573# asm 1: movq <j2=stack64#9,>in2=int64#3 1574# asm 2: movq <j2=64(%rsp),>in2=%rdx 1575movq 64(%rsp),%rdx 1576 1577# qhasm: out2 = in2 1578# asm 1: mov <in2=int64#3,>out2=int64#4 1579# asm 2: mov <in2=%rdx,>out2=%rcx 1580mov %rdx,%rcx 1581 1582# qhasm: (uint64) out2 >>= 32 1583# asm 1: shr $32,<out2=int64#4 1584# asm 2: shr $32,<out2=%rcx 1585shr $32,%rcx 1586 1587# qhasm: (uint32) in2 += x2 1588# asm 1: addl <x2=stack64#21,<in2=int64#3d 1589# asm 2: addl <x2=160(%rsp),<in2=%edx 1590addl 160(%rsp),%edx 1591 1592# qhasm: (uint32) out2 += x3 1593# asm 1: addl <x3=stack64#29,<out2=int64#4d 1594# asm 2: addl <x3=224(%rsp),<out2=%ecx 1595addl 224(%rsp),%ecx 1596 1597# qhasm: out2 <<= 32 1598# asm 1: shl $32,<out2=int64#4 1599# asm 2: shl $32,<out2=%rcx 1600shl $32,%rcx 1601 1602# qhasm: out2 ^= in2 1603# asm 1: xor <in2=int64#3,<out2=int64#4 1604# asm 2: xor <in2=%rdx,<out2=%rcx 1605xor %rdx,%rcx 1606 1607# qhasm: out2 ^= *(uint64 *) (m + 8) 1608# asm 1: xorq 8(<m=int64#2),<out2=int64#4 1609# asm 2: xorq 8(<m=%rsi),<out2=%rcx 1610xorq 8(%rsi),%rcx 1611 1612# qhasm: *(uint64 *) (out + 8) = out2 1613# asm 1: movq <out2=int64#4,8(<out=int64#1) 1614# asm 2: movq <out2=%rcx,8(<out=%rdi) 1615movq %rcx,8(%rdi) 1616 1617# qhasm: in4 = j4 1618# asm 1: movq <j4=stack64#10,>in4=int64#3 1619# asm 2: movq <j4=72(%rsp),>in4=%rdx 1620movq 72(%rsp),%rdx 1621 1622# qhasm: out4 = in4 1623# asm 1: mov <in4=int64#3,>out4=int64#4 1624# asm 2: mov <in4=%rdx,>out4=%rcx 1625mov %rdx,%rcx 1626 1627# qhasm: (uint64) out4 >>= 32 1628# asm 1: shr $32,<out4=int64#4 1629# asm 2: shr $32,<out4=%rcx 1630shr $32,%rcx 1631 1632# qhasm: (uint32) in4 += x4 1633# asm 1: addl <x4=stack64#22,<in4=int64#3d 1634# asm 2: addl <x4=168(%rsp),<in4=%edx 1635addl 168(%rsp),%edx 1636 1637# qhasm: (uint32) out4 += x5 1638# asm 1: addl <x5=stack64#30,<out4=int64#4d 1639# asm 2: addl <x5=232(%rsp),<out4=%ecx 1640addl 232(%rsp),%ecx 1641 1642# qhasm: out4 <<= 32 1643# asm 1: shl $32,<out4=int64#4 1644# asm 2: shl $32,<out4=%rcx 1645shl $32,%rcx 1646 1647# qhasm: out4 ^= in4 1648# asm 1: xor <in4=int64#3,<out4=int64#4 1649# asm 2: xor <in4=%rdx,<out4=%rcx 1650xor %rdx,%rcx 1651 1652# qhasm: out4 ^= *(uint64 *) (m + 16) 1653# asm 1: xorq 16(<m=int64#2),<out4=int64#4 1654# asm 2: xorq 16(<m=%rsi),<out4=%rcx 1655xorq 16(%rsi),%rcx 1656 1657# qhasm: *(uint64 *) (out + 16) = out4 1658# asm 1: movq <out4=int64#4,16(<out=int64#1) 1659# asm 2: movq <out4=%rcx,16(<out=%rdi) 1660movq %rcx,16(%rdi) 1661 1662# qhasm: in6 = j6 1663# asm 1: movq <j6=stack64#11,>in6=int64#3 1664# asm 2: movq <j6=80(%rsp),>in6=%rdx 1665movq 80(%rsp),%rdx 1666 1667# qhasm: out6 = in6 1668# asm 1: mov <in6=int64#3,>out6=int64#4 1669# asm 2: mov <in6=%rdx,>out6=%rcx 1670mov %rdx,%rcx 1671 1672# qhasm: (uint64) out6 >>= 32 1673# asm 1: shr $32,<out6=int64#4 1674# asm 2: shr $32,<out6=%rcx 1675shr $32,%rcx 1676 1677# qhasm: (uint32) in6 += x6 1678# asm 1: addl <x6=stack64#23,<in6=int64#3d 1679# asm 2: addl <x6=176(%rsp),<in6=%edx 1680addl 176(%rsp),%edx 1681 1682# qhasm: (uint32) out6 += x7 1683# asm 1: addl <x7=stack64#31,<out6=int64#4d 1684# asm 2: addl <x7=240(%rsp),<out6=%ecx 1685addl 240(%rsp),%ecx 1686 1687# qhasm: out6 <<= 32 1688# asm 1: shl $32,<out6=int64#4 1689# asm 2: shl $32,<out6=%rcx 1690shl $32,%rcx 1691 1692# qhasm: out6 ^= in6 1693# asm 1: xor <in6=int64#3,<out6=int64#4 1694# asm 2: xor <in6=%rdx,<out6=%rcx 1695xor %rdx,%rcx 1696 1697# qhasm: out6 ^= *(uint64 *) (m + 24) 1698# asm 1: xorq 24(<m=int64#2),<out6=int64#4 1699# asm 2: xorq 24(<m=%rsi),<out6=%rcx 1700xorq 24(%rsi),%rcx 1701 1702# qhasm: *(uint64 *) (out + 24) = out6 1703# asm 1: movq <out6=int64#4,24(<out=int64#1) 1704# asm 2: movq <out6=%rcx,24(<out=%rdi) 1705movq %rcx,24(%rdi) 1706 1707# qhasm: in8 = j8 1708# asm 1: movq <j8=stack64#12,>in8=int64#3 1709# asm 2: movq <j8=88(%rsp),>in8=%rdx 1710movq 88(%rsp),%rdx 1711 1712# qhasm: out8 = in8 1713# asm 1: mov <in8=int64#3,>out8=int64#4 1714# asm 2: mov <in8=%rdx,>out8=%rcx 1715mov %rdx,%rcx 1716 1717# qhasm: (uint64) out8 >>= 32 1718# asm 1: shr $32,<out8=int64#4 1719# asm 2: shr $32,<out8=%rcx 1720shr $32,%rcx 1721 1722# qhasm: (uint32) in8 += x8 1723# asm 1: addl <x8=stack64#24,<in8=int64#3d 1724# asm 2: addl <x8=184(%rsp),<in8=%edx 1725addl 184(%rsp),%edx 1726 1727# qhasm: (uint32) out8 += x9 1728# asm 1: addl <x9=stack64#32,<out8=int64#4d 1729# asm 2: addl <x9=248(%rsp),<out8=%ecx 1730addl 248(%rsp),%ecx 1731 1732# qhasm: out8 <<= 32 1733# asm 1: shl $32,<out8=int64#4 1734# asm 2: shl $32,<out8=%rcx 1735shl $32,%rcx 1736 1737# qhasm: out8 ^= in8 1738# asm 1: xor <in8=int64#3,<out8=int64#4 1739# asm 2: xor <in8=%rdx,<out8=%rcx 1740xor %rdx,%rcx 1741 1742# qhasm: out8 ^= *(uint64 *) (m + 32) 1743# asm 1: xorq 32(<m=int64#2),<out8=int64#4 1744# asm 2: xorq 32(<m=%rsi),<out8=%rcx 1745xorq 32(%rsi),%rcx 1746 1747# qhasm: *(uint64 *) (out + 32) = out8 1748# asm 1: movq <out8=int64#4,32(<out=int64#1) 1749# asm 2: movq <out8=%rcx,32(<out=%rdi) 1750movq %rcx,32(%rdi) 1751 1752# qhasm: in10 = j10 1753# asm 1: movq <j10=stack64#13,>in10=int64#3 1754# asm 2: movq <j10=96(%rsp),>in10=%rdx 1755movq 96(%rsp),%rdx 1756 1757# qhasm: out10 = in10 1758# asm 1: mov <in10=int64#3,>out10=int64#4 1759# asm 2: mov <in10=%rdx,>out10=%rcx 1760mov %rdx,%rcx 1761 1762# qhasm: (uint64) out10 >>= 32 1763# asm 1: shr $32,<out10=int64#4 1764# asm 2: shr $32,<out10=%rcx 1765shr $32,%rcx 1766 1767# qhasm: (uint32) in10 += x10 1768# asm 1: addl <x10=stack64#25,<in10=int64#3d 1769# asm 2: addl <x10=192(%rsp),<in10=%edx 1770addl 192(%rsp),%edx 1771 1772# qhasm: (uint32) out10 += x11 1773# asm 1: addl <x11=stack64#33,<out10=int64#4d 1774# asm 2: addl <x11=256(%rsp),<out10=%ecx 1775addl 256(%rsp),%ecx 1776 1777# qhasm: out10 <<= 32 1778# asm 1: shl $32,<out10=int64#4 1779# asm 2: shl $32,<out10=%rcx 1780shl $32,%rcx 1781 1782# qhasm: out10 ^= in10 1783# asm 1: xor <in10=int64#3,<out10=int64#4 1784# asm 2: xor <in10=%rdx,<out10=%rcx 1785xor %rdx,%rcx 1786 1787# qhasm: out10 ^= *(uint64 *) (m + 40) 1788# asm 1: xorq 40(<m=int64#2),<out10=int64#4 1789# asm 2: xorq 40(<m=%rsi),<out10=%rcx 1790xorq 40(%rsi),%rcx 1791 1792# qhasm: *(uint64 *) (out + 40) = out10 1793# asm 1: movq <out10=int64#4,40(<out=int64#1) 1794# asm 2: movq <out10=%rcx,40(<out=%rdi) 1795movq %rcx,40(%rdi) 1796 1797# qhasm: in12 = j12 1798# asm 1: movq <j12=stack64#14,>in12=int64#3 1799# asm 2: movq <j12=104(%rsp),>in12=%rdx 1800movq 104(%rsp),%rdx 1801 1802# qhasm: out12 = in12 1803# asm 1: mov <in12=int64#3,>out12=int64#4 1804# asm 2: mov <in12=%rdx,>out12=%rcx 1805mov %rdx,%rcx 1806 1807# qhasm: (uint64) out12 >>= 32 1808# asm 1: shr $32,<out12=int64#4 1809# asm 2: shr $32,<out12=%rcx 1810shr $32,%rcx 1811 1812# qhasm: (uint32) in12 += x12 1813# asm 1: addl <x12=stack64#26,<in12=int64#3d 1814# asm 2: addl <x12=200(%rsp),<in12=%edx 1815addl 200(%rsp),%edx 1816 1817# qhasm: (uint32) out12 += x13 1818# asm 1: addl <x13=stack64#34,<out12=int64#4d 1819# asm 2: addl <x13=264(%rsp),<out12=%ecx 1820addl 264(%rsp),%ecx 1821 1822# qhasm: out12 <<= 32 1823# asm 1: shl $32,<out12=int64#4 1824# asm 2: shl $32,<out12=%rcx 1825shl $32,%rcx 1826 1827# qhasm: out12 ^= in12 1828# asm 1: xor <in12=int64#3,<out12=int64#4 1829# asm 2: xor <in12=%rdx,<out12=%rcx 1830xor %rdx,%rcx 1831 1832# qhasm: out12 ^= *(uint64 *) (m + 48) 1833# asm 1: xorq 48(<m=int64#2),<out12=int64#4 1834# asm 2: xorq 48(<m=%rsi),<out12=%rcx 1835xorq 48(%rsi),%rcx 1836 1837# qhasm: *(uint64 *) (out + 48) = out12 1838# asm 1: movq <out12=int64#4,48(<out=int64#1) 1839# asm 2: movq <out12=%rcx,48(<out=%rdi) 1840movq %rcx,48(%rdi) 1841 1842# qhasm: in14 = j14 1843# asm 1: movq <j14=stack64#15,>in14=int64#3 1844# asm 2: movq <j14=112(%rsp),>in14=%rdx 1845movq 112(%rsp),%rdx 1846 1847# qhasm: out14 = in14 1848# asm 1: mov <in14=int64#3,>out14=int64#4 1849# asm 2: mov <in14=%rdx,>out14=%rcx 1850mov %rdx,%rcx 1851 1852# qhasm: (uint64) out14 >>= 32 1853# asm 1: shr $32,<out14=int64#4 1854# asm 2: shr $32,<out14=%rcx 1855shr $32,%rcx 1856 1857# qhasm: (uint32) in14 += x14 1858# asm 1: addl <x14=stack64#27,<in14=int64#3d 1859# asm 2: addl <x14=208(%rsp),<in14=%edx 1860addl 208(%rsp),%edx 1861 1862# qhasm: (uint32) out14 += x15 1863# asm 1: addl <x15=stack64#35,<out14=int64#4d 1864# asm 2: addl <x15=272(%rsp),<out14=%ecx 1865addl 272(%rsp),%ecx 1866 1867# qhasm: out14 <<= 32 1868# asm 1: shl $32,<out14=int64#4 1869# asm 2: shl $32,<out14=%rcx 1870shl $32,%rcx 1871 1872# qhasm: out14 ^= in14 1873# asm 1: xor <in14=int64#3,<out14=int64#4 1874# asm 2: xor <in14=%rdx,<out14=%rcx 1875xor %rdx,%rcx 1876 1877# qhasm: out14 ^= *(uint64 *) (m + 56) 1878# asm 1: xorq 56(<m=int64#2),<out14=int64#4 1879# asm 2: xorq 56(<m=%rsi),<out14=%rcx 1880xorq 56(%rsi),%rcx 1881 1882# qhasm: *(uint64 *) (out + 56) = out14 1883# asm 1: movq <out14=int64#4,56(<out=int64#1) 1884# asm 2: movq <out14=%rcx,56(<out=%rdi) 1885movq %rcx,56(%rdi) 1886 1887# qhasm: bytes = bytes_backup 1888# asm 1: movq <bytes_backup=stack64#19,>bytes=int64#3 1889# asm 2: movq <bytes_backup=144(%rsp),>bytes=%rdx 1890movq 144(%rsp),%rdx 1891 1892# qhasm: in8 = j8 1893# asm 1: movq <j8=stack64#12,>in8=int64#4 1894# asm 2: movq <j8=88(%rsp),>in8=%rcx 1895movq 88(%rsp),%rcx 1896 1897# qhasm: in8 += 1 1898# asm 1: add $1,<in8=int64#4 1899# asm 2: add $1,<in8=%rcx 1900add $1,%rcx 1901 1902# qhasm: j8 = in8 1903# asm 1: movq <in8=int64#4,>j8=stack64#12 1904# asm 2: movq <in8=%rcx,>j8=88(%rsp) 1905movq %rcx,88(%rsp) 1906 1907# qhasm: unsigned>? unsigned<? bytes - 64 1908# asm 1: cmp $64,<bytes=int64#3 1909# asm 2: cmp $64,<bytes=%rdx 1910cmp $64,%rdx 1911# comment:fp stack unchanged by jump 1912 1913# qhasm: goto bytesatleast65 if unsigned> 1914ja ._bytesatleast65 1915# comment:fp stack unchanged by jump 1916 1917# qhasm: goto bytesatleast64 if !unsigned< 1918jae ._bytesatleast64 1919 1920# qhasm: m = out 1921# asm 1: mov <out=int64#1,>m=int64#2 1922# asm 2: mov <out=%rdi,>m=%rsi 1923mov %rdi,%rsi 1924 1925# qhasm: out = ctarget 1926# asm 1: mov <ctarget=int64#5,>out=int64#1 1927# asm 2: mov <ctarget=%r8,>out=%rdi 1928mov %r8,%rdi 1929 1930# qhasm: i = bytes 1931# asm 1: mov <bytes=int64#3,>i=int64#4 1932# asm 2: mov <bytes=%rdx,>i=%rcx 1933mov %rdx,%rcx 1934 1935# qhasm: while (i) { *out++ = *m++; --i } 1936rep movsb 1937# comment:fp stack unchanged by fallthrough 1938 1939# qhasm: bytesatleast64: 1940._bytesatleast64: 1941 1942# qhasm: x = x_backup 1943# asm 1: movq <x_backup=stack64#16,>x=int64#1 1944# asm 2: movq <x_backup=120(%rsp),>x=%rdi 1945movq 120(%rsp),%rdi 1946 1947# qhasm: in8 = j8 1948# asm 1: movq <j8=stack64#12,>in8=int64#2 1949# asm 2: movq <j8=88(%rsp),>in8=%rsi 1950movq 88(%rsp),%rsi 1951 1952# qhasm: *(uint64 *) (x + 32) = in8 1953# asm 1: movq <in8=int64#2,32(<x=int64#1) 1954# asm 2: movq <in8=%rsi,32(<x=%rdi) 1955movq %rsi,32(%rdi) 1956 1957# qhasm: r11 = r11_stack 1958# asm 1: movq <r11_stack=stack64#1,>r11=int64#9 1959# asm 2: movq <r11_stack=0(%rsp),>r11=%r11 1960movq 0(%rsp),%r11 1961 1962# qhasm: r12 = r12_stack 1963# asm 1: movq <r12_stack=stack64#2,>r12=int64#10 1964# asm 2: movq <r12_stack=8(%rsp),>r12=%r12 1965movq 8(%rsp),%r12 1966 1967# qhasm: r13 = r13_stack 1968# asm 1: movq <r13_stack=stack64#3,>r13=int64#11 1969# asm 2: movq <r13_stack=16(%rsp),>r13=%r13 1970movq 16(%rsp),%r13 1971 1972# qhasm: r14 = r14_stack 1973# asm 1: movq <r14_stack=stack64#4,>r14=int64#12 1974# asm 2: movq <r14_stack=24(%rsp),>r14=%r14 1975movq 24(%rsp),%r14 1976 1977# qhasm: r15 = r15_stack 1978# asm 1: movq <r15_stack=stack64#5,>r15=int64#13 1979# asm 2: movq <r15_stack=32(%rsp),>r15=%r15 1980movq 32(%rsp),%r15 1981 1982# qhasm: rbx = rbx_stack 1983# asm 1: movq <rbx_stack=stack64#6,>rbx=int64#14 1984# asm 2: movq <rbx_stack=40(%rsp),>rbx=%rbx 1985movq 40(%rsp),%rbx 1986 1987# qhasm: rbp = rbp_stack 1988# asm 1: movq <rbp_stack=stack64#7,>rbp=int64#15 1989# asm 2: movq <rbp_stack=48(%rsp),>rbp=%rbp 1990movq 48(%rsp),%rbp 1991# comment:fp stack unchanged by fallthrough 1992 1993# qhasm: done: 1994._done: 1995 1996# qhasm: leave 1997add %r11,%rsp 1998mov %rdi,%rax 1999mov %rsi,%rdx 2000ret 2001 2002# qhasm: bytesatleast65: 2003._bytesatleast65: 2004 2005# qhasm: bytes -= 64 2006# asm 1: sub $64,<bytes=int64#3 2007# asm 2: sub $64,<bytes=%rdx 2008sub $64,%rdx 2009 2010# qhasm: out += 64 2011# asm 1: add $64,<out=int64#1 2012# asm 2: add $64,<out=%rdi 2013add $64,%rdi 2014 2015# qhasm: m += 64 2016# asm 1: add $64,<m=int64#2 2017# asm 2: add $64,<m=%rsi 2018add $64,%rsi 2019# comment:fp stack unchanged by jump 2020 2021# qhasm: goto bytesatleast1 2022jmp ._bytesatleast1 2023 2024# qhasm: enter ECRYPT_init 2025.text 2026.p2align 5 2027.globl _ECRYPT_init 2028.globl ECRYPT_init 2029_ECRYPT_init: 2030ECRYPT_init: 2031mov %rsp,%r11 2032and $31,%r11 2033add $352,%r11 2034sub %r11,%rsp 2035 2036# qhasm: leave 2037add %r11,%rsp 2038mov %rdi,%rax 2039mov %rsi,%rdx 2040ret 2041 2042# qhasm: enter ECRYPT_keysetup 2043.text 2044.p2align 5 2045.globl _ECRYPT_keysetup 2046.globl ECRYPT_keysetup 2047_ECRYPT_keysetup: 2048ECRYPT_keysetup: 2049mov %rsp,%r11 2050and $31,%r11 2051add $352,%r11 2052sub %r11,%rsp 2053 2054# qhasm: k = arg2 2055# asm 1: mov <arg2=int64#2,>k=int64#2 2056# asm 2: mov <arg2=%rsi,>k=%rsi 2057mov %rsi,%rsi 2058 2059# qhasm: kbits = arg3 2060# asm 1: mov <arg3=int64#3,>kbits=int64#3 2061# asm 2: mov <arg3=%rdx,>kbits=%rdx 2062mov %rdx,%rdx 2063 2064# qhasm: x = arg1 2065# asm 1: mov <arg1=int64#1,>x=int64#1 2066# asm 2: mov <arg1=%rdi,>x=%rdi 2067mov %rdi,%rdi 2068 2069# qhasm: in0 = *(uint64 *) (k + 0) 2070# asm 1: movq 0(<k=int64#2),>in0=int64#5 2071# asm 2: movq 0(<k=%rsi),>in0=%r8 2072movq 0(%rsi),%r8 2073 2074# qhasm: in2 = *(uint64 *) (k + 8) 2075# asm 1: movq 8(<k=int64#2),>in2=int64#6 2076# asm 2: movq 8(<k=%rsi),>in2=%r9 2077movq 8(%rsi),%r9 2078 2079# qhasm: *(uint64 *) (x + 4) = in0 2080# asm 1: movq <in0=int64#5,4(<x=int64#1) 2081# asm 2: movq <in0=%r8,4(<x=%rdi) 2082movq %r8,4(%rdi) 2083 2084# qhasm: *(uint64 *) (x + 12) = in2 2085# asm 1: movq <in2=int64#6,12(<x=int64#1) 2086# asm 2: movq <in2=%r9,12(<x=%rdi) 2087movq %r9,12(%rdi) 2088 2089# qhasm: unsigned<? kbits - 256 2090# asm 1: cmp $256,<kbits=int64#3 2091# asm 2: cmp $256,<kbits=%rdx 2092cmp $256,%rdx 2093# comment:fp stack unchanged by jump 2094 2095# qhasm: goto kbits128 if unsigned< 2096jb ._kbits128 2097 2098# qhasm: kbits256: 2099._kbits256: 2100 2101# qhasm: in10 = *(uint64 *) (k + 16) 2102# asm 1: movq 16(<k=int64#2),>in10=int64#3 2103# asm 2: movq 16(<k=%rsi),>in10=%rdx 2104movq 16(%rsi),%rdx 2105 2106# qhasm: in12 = *(uint64 *) (k + 24) 2107# asm 1: movq 24(<k=int64#2),>in12=int64#2 2108# asm 2: movq 24(<k=%rsi),>in12=%rsi 2109movq 24(%rsi),%rsi 2110 2111# qhasm: *(uint64 *) (x + 44) = in10 2112# asm 1: movq <in10=int64#3,44(<x=int64#1) 2113# asm 2: movq <in10=%rdx,44(<x=%rdi) 2114movq %rdx,44(%rdi) 2115 2116# qhasm: *(uint64 *) (x + 52) = in12 2117# asm 1: movq <in12=int64#2,52(<x=int64#1) 2118# asm 2: movq <in12=%rsi,52(<x=%rdi) 2119movq %rsi,52(%rdi) 2120 2121# qhasm: in0 = 1634760805 2122# asm 1: mov $1634760805,>in0=int64#2 2123# asm 2: mov $1634760805,>in0=%rsi 2124mov $1634760805,%rsi 2125 2126# qhasm: in4 = 857760878 2127# asm 1: mov $857760878,>in4=int64#3 2128# asm 2: mov $857760878,>in4=%rdx 2129mov $857760878,%rdx 2130 2131# qhasm: in10 = 2036477234 2132# asm 1: mov $2036477234,>in10=int64#4 2133# asm 2: mov $2036477234,>in10=%rcx 2134mov $2036477234,%rcx 2135 2136# qhasm: in14 = 1797285236 2137# asm 1: mov $1797285236,>in14=int64#5 2138# asm 2: mov $1797285236,>in14=%r8 2139mov $1797285236,%r8 2140 2141# qhasm: *(uint32 *) (x + 0) = in0 2142# asm 1: movl <in0=int64#2d,0(<x=int64#1) 2143# asm 2: movl <in0=%esi,0(<x=%rdi) 2144movl %esi,0(%rdi) 2145 2146# qhasm: *(uint32 *) (x + 20) = in4 2147# asm 1: movl <in4=int64#3d,20(<x=int64#1) 2148# asm 2: movl <in4=%edx,20(<x=%rdi) 2149movl %edx,20(%rdi) 2150 2151# qhasm: *(uint32 *) (x + 40) = in10 2152# asm 1: movl <in10=int64#4d,40(<x=int64#1) 2153# asm 2: movl <in10=%ecx,40(<x=%rdi) 2154movl %ecx,40(%rdi) 2155 2156# qhasm: *(uint32 *) (x + 60) = in14 2157# asm 1: movl <in14=int64#5d,60(<x=int64#1) 2158# asm 2: movl <in14=%r8d,60(<x=%rdi) 2159movl %r8d,60(%rdi) 2160# comment:fp stack unchanged by jump 2161 2162# qhasm: goto keysetupdone 2163jmp ._keysetupdone 2164 2165# qhasm: kbits128: 2166._kbits128: 2167 2168# qhasm: in10 = *(uint64 *) (k + 0) 2169# asm 1: movq 0(<k=int64#2),>in10=int64#3 2170# asm 2: movq 0(<k=%rsi),>in10=%rdx 2171movq 0(%rsi),%rdx 2172 2173# qhasm: in12 = *(uint64 *) (k + 8) 2174# asm 1: movq 8(<k=int64#2),>in12=int64#2 2175# asm 2: movq 8(<k=%rsi),>in12=%rsi 2176movq 8(%rsi),%rsi 2177 2178# qhasm: *(uint64 *) (x + 44) = in10 2179# asm 1: movq <in10=int64#3,44(<x=int64#1) 2180# asm 2: movq <in10=%rdx,44(<x=%rdi) 2181movq %rdx,44(%rdi) 2182 2183# qhasm: *(uint64 *) (x + 52) = in12 2184# asm 1: movq <in12=int64#2,52(<x=int64#1) 2185# asm 2: movq <in12=%rsi,52(<x=%rdi) 2186movq %rsi,52(%rdi) 2187 2188# qhasm: in0 = 1634760805 2189# asm 1: mov $1634760805,>in0=int64#2 2190# asm 2: mov $1634760805,>in0=%rsi 2191mov $1634760805,%rsi 2192 2193# qhasm: in4 = 824206446 2194# asm 1: mov $824206446,>in4=int64#3 2195# asm 2: mov $824206446,>in4=%rdx 2196mov $824206446,%rdx 2197 2198# qhasm: in10 = 2036477238 2199# asm 1: mov $2036477238,>in10=int64#4 2200# asm 2: mov $2036477238,>in10=%rcx 2201mov $2036477238,%rcx 2202 2203# qhasm: in14 = 1797285236 2204# asm 1: mov $1797285236,>in14=int64#5 2205# asm 2: mov $1797285236,>in14=%r8 2206mov $1797285236,%r8 2207 2208# qhasm: *(uint32 *) (x + 0) = in0 2209# asm 1: movl <in0=int64#2d,0(<x=int64#1) 2210# asm 2: movl <in0=%esi,0(<x=%rdi) 2211movl %esi,0(%rdi) 2212 2213# qhasm: *(uint32 *) (x + 20) = in4 2214# asm 1: movl <in4=int64#3d,20(<x=int64#1) 2215# asm 2: movl <in4=%edx,20(<x=%rdi) 2216movl %edx,20(%rdi) 2217 2218# qhasm: *(uint32 *) (x + 40) = in10 2219# asm 1: movl <in10=int64#4d,40(<x=int64#1) 2220# asm 2: movl <in10=%ecx,40(<x=%rdi) 2221movl %ecx,40(%rdi) 2222 2223# qhasm: *(uint32 *) (x + 60) = in14 2224# asm 1: movl <in14=int64#5d,60(<x=int64#1) 2225# asm 2: movl <in14=%r8d,60(<x=%rdi) 2226movl %r8d,60(%rdi) 2227 2228# qhasm: keysetupdone: 2229._keysetupdone: 2230 2231# qhasm: leave 2232add %r11,%rsp 2233mov %rdi,%rax 2234mov %rsi,%rdx 2235ret 2236 2237# qhasm: enter ECRYPT_ivsetup 2238.text 2239.p2align 5 2240.globl _ECRYPT_ivsetup 2241.globl ECRYPT_ivsetup 2242_ECRYPT_ivsetup: 2243ECRYPT_ivsetup: 2244mov %rsp,%r11 2245and $31,%r11 2246add $352,%r11 2247sub %r11,%rsp 2248 2249# qhasm: iv = arg2 2250# asm 1: mov <arg2=int64#2,>iv=int64#2 2251# asm 2: mov <arg2=%rsi,>iv=%rsi 2252mov %rsi,%rsi 2253 2254# qhasm: x = arg1 2255# asm 1: mov <arg1=int64#1,>x=int64#1 2256# asm 2: mov <arg1=%rdi,>x=%rdi 2257mov %rdi,%rdi 2258 2259# qhasm: in6 = *(uint64 *) (iv + 0) 2260# asm 1: movq 0(<iv=int64#2),>in6=int64#2 2261# asm 2: movq 0(<iv=%rsi),>in6=%rsi 2262movq 0(%rsi),%rsi 2263 2264# qhasm: in8 = 0 2265# asm 1: mov $0,>in8=int64#5 2266# asm 2: mov $0,>in8=%r8 2267mov $0,%r8 2268 2269# qhasm: *(uint64 *) (x + 24) = in6 2270# asm 1: movq <in6=int64#2,24(<x=int64#1) 2271# asm 2: movq <in6=%rsi,24(<x=%rdi) 2272movq %rsi,24(%rdi) 2273 2274# qhasm: *(uint64 *) (x + 32) = in8 2275# asm 1: movq <in8=int64#5,32(<x=int64#1) 2276# asm 2: movq <in8=%r8,32(<x=%rdi) 2277movq %r8,32(%rdi) 2278 2279# qhasm: leave 2280add %r11,%rsp 2281mov %rdi,%rax 2282mov %rsi,%rdx 2283ret 2284