1#!/usr/bin/env perl 2# 3# ==================================================================== 4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9# 10# November 2014 11# 12# ChaCha20 for x86_64. 13# 14# Performance in cycles per byte out of large buffer. 15# 16# IALU/gcc 4.8(i) 1xSSSE3/SSE2 4xSSSE3 8xAVX2 17# 18# P4 9.48/+99% -/22.7(ii) - 19# Core2 7.83/+55% 7.90/8.08 4.35 20# Westmere 7.19/+50% 5.60/6.70 3.00 21# Sandy Bridge 8.31/+42% 5.45/6.76 2.72 22# Ivy Bridge 6.71/+46% 5.40/6.49 2.41 23# Haswell 5.92/+43% 5.20/6.45 2.42 1.23 24# Silvermont 12.0/+33% 7.75/7.40 7.03(iii) 25# Sledgehammer 7.28/+52% -/14.2(ii) - 26# Bulldozer 9.66/+28% 9.85/11.1 3.06(iv) 27# VIA Nano 10.5/+46% 6.72/8.60 6.05 28# 29# (i) compared to older gcc 3.x one can observe >2x improvement on 30# most platforms; 31# (ii) as it can be seen, SSE2 performance is too low on legacy 32# processors; NxSSE2 results are naturally better, but not 33# impressively better than IALU ones, which is why you won't 34# find SSE2 code below; 35# (iii) this is not optimal result for Atom because of MSROM 36# limitations, SSE2 can do better, but gain is considered too 37# low to justify the [maintenance] effort; 38# (iv) Bulldozer actually executes 4xXOP code path that delivers 2.20; 39# 40# Modified from upstream OpenSSL to remove the XOP code. 41 42$flavour = shift; 43$output = shift; 44if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 45 46$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 47 48$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 49( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 50( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or 51die "can't locate x86_64-xlate.pl"; 52 53$avx = 2; 54 55open OUT,"| \"$^X\" $xlate $flavour $output"; 56*STDOUT=*OUT; 57 58# input parameter block 59($out,$inp,$len,$key,$counter)=("%rdi","%rsi","%rdx","%rcx","%r8"); 60 61$code.=<<___; 62.text 63 64.extern OPENSSL_ia32cap_P 65 66.align 64 67.Lzero: 68.long 0,0,0,0 69.Lone: 70.long 1,0,0,0 71.Linc: 72.long 0,1,2,3 73.Lfour: 74.long 4,4,4,4 75.Lincy: 76.long 0,2,4,6,1,3,5,7 77.Leight: 78.long 8,8,8,8,8,8,8,8 79.Lrot16: 80.byte 0x2,0x3,0x0,0x1, 0x6,0x7,0x4,0x5, 0xa,0xb,0x8,0x9, 0xe,0xf,0xc,0xd 81.Lrot24: 82.byte 0x3,0x0,0x1,0x2, 0x7,0x4,0x5,0x6, 0xb,0x8,0x9,0xa, 0xf,0xc,0xd,0xe 83.Lsigma: 84.asciz "expand 32-byte k" 85.asciz "ChaCha20 for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 86___ 87 88sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm 89{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; 90 my $arg = pop; 91 $arg = "\$$arg" if ($arg*1 eq $arg); 92 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; 93} 94 95@x=("%eax","%ebx","%ecx","%edx",map("%r${_}d",(8..11)), 96 "%nox","%nox","%nox","%nox",map("%r${_}d",(12..15))); 97@t=("%esi","%edi"); 98 99sub ROUND { # critical path is 24 cycles per round 100my ($a0,$b0,$c0,$d0)=@_; 101my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); 102my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); 103my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); 104my ($xc,$xc_)=map("\"$_\"",@t); 105my @x=map("\"$_\"",@x); 106 107 # Consider order in which variables are addressed by their 108 # index: 109 # 110 # a b c d 111 # 112 # 0 4 8 12 < even round 113 # 1 5 9 13 114 # 2 6 10 14 115 # 3 7 11 15 116 # 0 5 10 15 < odd round 117 # 1 6 11 12 118 # 2 7 8 13 119 # 3 4 9 14 120 # 121 # 'a', 'b' and 'd's are permanently allocated in registers, 122 # @x[0..7,12..15], while 'c's are maintained in memory. If 123 # you observe 'c' column, you'll notice that pair of 'c's is 124 # invariant between rounds. This means that we have to reload 125 # them once per round, in the middle. This is why you'll see 126 # bunch of 'c' stores and loads in the middle, but none in 127 # the beginning or end. 128 129 # Normally instructions would be interleaved to favour in-order 130 # execution. Generally out-of-order cores manage it gracefully, 131 # but not this time for some reason. As in-order execution 132 # cores are dying breed, old Atom is the only one around, 133 # instructions are left uninterleaved. Besides, Atom is better 134 # off executing 1xSSSE3 code anyway... 135 136 ( 137 "&add (@x[$a0],@x[$b0])", # Q1 138 "&xor (@x[$d0],@x[$a0])", 139 "&rol (@x[$d0],16)", 140 "&add (@x[$a1],@x[$b1])", # Q2 141 "&xor (@x[$d1],@x[$a1])", 142 "&rol (@x[$d1],16)", 143 144 "&add ($xc,@x[$d0])", 145 "&xor (@x[$b0],$xc)", 146 "&rol (@x[$b0],12)", 147 "&add ($xc_,@x[$d1])", 148 "&xor (@x[$b1],$xc_)", 149 "&rol (@x[$b1],12)", 150 151 "&add (@x[$a0],@x[$b0])", 152 "&xor (@x[$d0],@x[$a0])", 153 "&rol (@x[$d0],8)", 154 "&add (@x[$a1],@x[$b1])", 155 "&xor (@x[$d1],@x[$a1])", 156 "&rol (@x[$d1],8)", 157 158 "&add ($xc,@x[$d0])", 159 "&xor (@x[$b0],$xc)", 160 "&rol (@x[$b0],7)", 161 "&add ($xc_,@x[$d1])", 162 "&xor (@x[$b1],$xc_)", 163 "&rol (@x[$b1],7)", 164 165 "&mov (\"4*$c0(%rsp)\",$xc)", # reload pair of 'c's 166 "&mov (\"4*$c1(%rsp)\",$xc_)", 167 "&mov ($xc,\"4*$c2(%rsp)\")", 168 "&mov ($xc_,\"4*$c3(%rsp)\")", 169 170 "&add (@x[$a2],@x[$b2])", # Q3 171 "&xor (@x[$d2],@x[$a2])", 172 "&rol (@x[$d2],16)", 173 "&add (@x[$a3],@x[$b3])", # Q4 174 "&xor (@x[$d3],@x[$a3])", 175 "&rol (@x[$d3],16)", 176 177 "&add ($xc,@x[$d2])", 178 "&xor (@x[$b2],$xc)", 179 "&rol (@x[$b2],12)", 180 "&add ($xc_,@x[$d3])", 181 "&xor (@x[$b3],$xc_)", 182 "&rol (@x[$b3],12)", 183 184 "&add (@x[$a2],@x[$b2])", 185 "&xor (@x[$d2],@x[$a2])", 186 "&rol (@x[$d2],8)", 187 "&add (@x[$a3],@x[$b3])", 188 "&xor (@x[$d3],@x[$a3])", 189 "&rol (@x[$d3],8)", 190 191 "&add ($xc,@x[$d2])", 192 "&xor (@x[$b2],$xc)", 193 "&rol (@x[$b2],7)", 194 "&add ($xc_,@x[$d3])", 195 "&xor (@x[$b3],$xc_)", 196 "&rol (@x[$b3],7)" 197 ); 198} 199 200######################################################################## 201# Generic code path that handles all lengths on pre-SSSE3 processors. 202$code.=<<___; 203.globl ChaCha20_ctr32 204.type ChaCha20_ctr32,\@function,5 205.align 64 206ChaCha20_ctr32: 207 cmp \$0,$len 208 je .Lno_data 209 mov OPENSSL_ia32cap_P+4(%rip),%r10 210 test \$`1<<(41-32)`,%r10d 211 jnz .LChaCha20_ssse3 212 213 push %rbx 214 push %rbp 215 push %r12 216 push %r13 217 push %r14 218 push %r15 219 sub \$64+24,%rsp 220 221 #movdqa .Lsigma(%rip),%xmm0 222 movdqu ($key),%xmm1 223 movdqu 16($key),%xmm2 224 movdqu ($counter),%xmm3 225 movdqa .Lone(%rip),%xmm4 226 227 #movdqa %xmm0,4*0(%rsp) # key[0] 228 movdqa %xmm1,4*4(%rsp) # key[1] 229 movdqa %xmm2,4*8(%rsp) # key[2] 230 movdqa %xmm3,4*12(%rsp) # key[3] 231 mov $len,%rbp # reassign $len 232 jmp .Loop_outer 233 234.align 32 235.Loop_outer: 236 mov \$0x61707865,@x[0] # 'expa' 237 mov \$0x3320646e,@x[1] # 'nd 3' 238 mov \$0x79622d32,@x[2] # '2-by' 239 mov \$0x6b206574,@x[3] # 'te k' 240 mov 4*4(%rsp),@x[4] 241 mov 4*5(%rsp),@x[5] 242 mov 4*6(%rsp),@x[6] 243 mov 4*7(%rsp),@x[7] 244 movd %xmm3,@x[12] 245 mov 4*13(%rsp),@x[13] 246 mov 4*14(%rsp),@x[14] 247 mov 4*15(%rsp),@x[15] 248 249 mov %rbp,64+0(%rsp) # save len 250 mov \$10,%ebp 251 mov $inp,64+8(%rsp) # save inp 252 movq %xmm2,%rsi # "@x[8]" 253 mov $out,64+16(%rsp) # save out 254 mov %rsi,%rdi 255 shr \$32,%rdi # "@x[9]" 256 jmp .Loop 257 258.align 32 259.Loop: 260___ 261 foreach (&ROUND (0, 4, 8,12)) { eval; } 262 foreach (&ROUND (0, 5,10,15)) { eval; } 263 &dec ("%ebp"); 264 &jnz (".Loop"); 265 266$code.=<<___; 267 mov @t[1],4*9(%rsp) # modulo-scheduled 268 mov @t[0],4*8(%rsp) 269 mov 64(%rsp),%rbp # load len 270 movdqa %xmm2,%xmm1 271 mov 64+8(%rsp),$inp # load inp 272 paddd %xmm4,%xmm3 # increment counter 273 mov 64+16(%rsp),$out # load out 274 275 add \$0x61707865,@x[0] # 'expa' 276 add \$0x3320646e,@x[1] # 'nd 3' 277 add \$0x79622d32,@x[2] # '2-by' 278 add \$0x6b206574,@x[3] # 'te k' 279 add 4*4(%rsp),@x[4] 280 add 4*5(%rsp),@x[5] 281 add 4*6(%rsp),@x[6] 282 add 4*7(%rsp),@x[7] 283 add 4*12(%rsp),@x[12] 284 add 4*13(%rsp),@x[13] 285 add 4*14(%rsp),@x[14] 286 add 4*15(%rsp),@x[15] 287 paddd 4*8(%rsp),%xmm1 288 289 cmp \$64,%rbp 290 jb .Ltail 291 292 xor 4*0($inp),@x[0] # xor with input 293 xor 4*1($inp),@x[1] 294 xor 4*2($inp),@x[2] 295 xor 4*3($inp),@x[3] 296 xor 4*4($inp),@x[4] 297 xor 4*5($inp),@x[5] 298 xor 4*6($inp),@x[6] 299 xor 4*7($inp),@x[7] 300 movdqu 4*8($inp),%xmm0 301 xor 4*12($inp),@x[12] 302 xor 4*13($inp),@x[13] 303 xor 4*14($inp),@x[14] 304 xor 4*15($inp),@x[15] 305 lea 4*16($inp),$inp # inp+=64 306 pxor %xmm1,%xmm0 307 308 movdqa %xmm2,4*8(%rsp) 309 movd %xmm3,4*12(%rsp) 310 311 mov @x[0],4*0($out) # write output 312 mov @x[1],4*1($out) 313 mov @x[2],4*2($out) 314 mov @x[3],4*3($out) 315 mov @x[4],4*4($out) 316 mov @x[5],4*5($out) 317 mov @x[6],4*6($out) 318 mov @x[7],4*7($out) 319 movdqu %xmm0,4*8($out) 320 mov @x[12],4*12($out) 321 mov @x[13],4*13($out) 322 mov @x[14],4*14($out) 323 mov @x[15],4*15($out) 324 lea 4*16($out),$out # out+=64 325 326 sub \$64,%rbp 327 jnz .Loop_outer 328 329 jmp .Ldone 330 331.align 16 332.Ltail: 333 mov @x[0],4*0(%rsp) 334 mov @x[1],4*1(%rsp) 335 xor %rbx,%rbx 336 mov @x[2],4*2(%rsp) 337 mov @x[3],4*3(%rsp) 338 mov @x[4],4*4(%rsp) 339 mov @x[5],4*5(%rsp) 340 mov @x[6],4*6(%rsp) 341 mov @x[7],4*7(%rsp) 342 movdqa %xmm1,4*8(%rsp) 343 mov @x[12],4*12(%rsp) 344 mov @x[13],4*13(%rsp) 345 mov @x[14],4*14(%rsp) 346 mov @x[15],4*15(%rsp) 347 348.Loop_tail: 349 movzb ($inp,%rbx),%eax 350 movzb (%rsp,%rbx),%edx 351 lea 1(%rbx),%rbx 352 xor %edx,%eax 353 mov %al,-1($out,%rbx) 354 dec %rbp 355 jnz .Loop_tail 356 357.Ldone: 358 add \$64+24,%rsp 359 pop %r15 360 pop %r14 361 pop %r13 362 pop %r12 363 pop %rbp 364 pop %rbx 365.Lno_data: 366 ret 367.size ChaCha20_ctr32,.-ChaCha20_ctr32 368___ 369 370######################################################################## 371# SSSE3 code path that handles shorter lengths 372{ 373my ($a,$b,$c,$d,$t,$t1,$rot16,$rot24)=map("%xmm$_",(0..7)); 374 375sub SSSE3ROUND { # critical path is 20 "SIMD ticks" per round 376 &paddd ($a,$b); 377 &pxor ($d,$a); 378 &pshufb ($d,$rot16); 379 380 &paddd ($c,$d); 381 &pxor ($b,$c); 382 &movdqa ($t,$b); 383 &psrld ($b,20); 384 &pslld ($t,12); 385 &por ($b,$t); 386 387 &paddd ($a,$b); 388 &pxor ($d,$a); 389 &pshufb ($d,$rot24); 390 391 &paddd ($c,$d); 392 &pxor ($b,$c); 393 &movdqa ($t,$b); 394 &psrld ($b,25); 395 &pslld ($t,7); 396 &por ($b,$t); 397} 398 399my $xframe = $win64 ? 32+32+8 : 24; 400 401$code.=<<___; 402.type ChaCha20_ssse3,\@function,5 403.align 32 404ChaCha20_ssse3: 405.LChaCha20_ssse3: 406___ 407$code.=<<___; 408 cmp \$128,$len # we might throw away some data, 409 ja .LChaCha20_4x # but overall it won't be slower 410 411.Ldo_sse3_after_all: 412 push %rbx 413 push %rbp 414 push %r12 415 push %r13 416 push %r14 417 push %r15 418 419 sub \$64+$xframe,%rsp 420___ 421$code.=<<___ if ($win64); 422 movaps %xmm6,64+32(%rsp) 423 movaps %xmm7,64+48(%rsp) 424___ 425$code.=<<___; 426 movdqa .Lsigma(%rip),$a 427 movdqu ($key),$b 428 movdqu 16($key),$c 429 movdqu ($counter),$d 430 movdqa .Lrot16(%rip),$rot16 431 movdqa .Lrot24(%rip),$rot24 432 433 movdqa $a,0x00(%rsp) 434 movdqa $b,0x10(%rsp) 435 movdqa $c,0x20(%rsp) 436 movdqa $d,0x30(%rsp) 437 mov \$10,%ebp 438 jmp .Loop_ssse3 439 440.align 32 441.Loop_outer_ssse3: 442 movdqa .Lone(%rip),$d 443 movdqa 0x00(%rsp),$a 444 movdqa 0x10(%rsp),$b 445 movdqa 0x20(%rsp),$c 446 paddd 0x30(%rsp),$d 447 mov \$10,%ebp 448 movdqa $d,0x30(%rsp) 449 jmp .Loop_ssse3 450 451.align 32 452.Loop_ssse3: 453___ 454 &SSSE3ROUND(); 455 &pshufd ($c,$c,0b01001110); 456 &pshufd ($b,$b,0b00111001); 457 &pshufd ($d,$d,0b10010011); 458 &nop (); 459 460 &SSSE3ROUND(); 461 &pshufd ($c,$c,0b01001110); 462 &pshufd ($b,$b,0b10010011); 463 &pshufd ($d,$d,0b00111001); 464 465 &dec ("%ebp"); 466 &jnz (".Loop_ssse3"); 467 468$code.=<<___; 469 paddd 0x00(%rsp),$a 470 paddd 0x10(%rsp),$b 471 paddd 0x20(%rsp),$c 472 paddd 0x30(%rsp),$d 473 474 cmp \$64,$len 475 jb .Ltail_ssse3 476 477 movdqu 0x00($inp),$t 478 movdqu 0x10($inp),$t1 479 pxor $t,$a # xor with input 480 movdqu 0x20($inp),$t 481 pxor $t1,$b 482 movdqu 0x30($inp),$t1 483 lea 0x40($inp),$inp # inp+=64 484 pxor $t,$c 485 pxor $t1,$d 486 487 movdqu $a,0x00($out) # write output 488 movdqu $b,0x10($out) 489 movdqu $c,0x20($out) 490 movdqu $d,0x30($out) 491 lea 0x40($out),$out # out+=64 492 493 sub \$64,$len 494 jnz .Loop_outer_ssse3 495 496 jmp .Ldone_ssse3 497 498.align 16 499.Ltail_ssse3: 500 movdqa $a,0x00(%rsp) 501 movdqa $b,0x10(%rsp) 502 movdqa $c,0x20(%rsp) 503 movdqa $d,0x30(%rsp) 504 xor %rbx,%rbx 505 506.Loop_tail_ssse3: 507 movzb ($inp,%rbx),%eax 508 movzb (%rsp,%rbx),%ecx 509 lea 1(%rbx),%rbx 510 xor %ecx,%eax 511 mov %al,-1($out,%rbx) 512 dec $len 513 jnz .Loop_tail_ssse3 514 515.Ldone_ssse3: 516___ 517$code.=<<___ if ($win64); 518 movaps 64+32(%rsp),%xmm6 519 movaps 64+48(%rsp),%xmm7 520___ 521$code.=<<___; 522 add \$64+$xframe,%rsp 523 pop %r15 524 pop %r14 525 pop %r13 526 pop %r12 527 pop %rbp 528 pop %rbx 529 ret 530.size ChaCha20_ssse3,.-ChaCha20_ssse3 531___ 532} 533 534######################################################################## 535# SSSE3 code path that handles longer messages. 536{ 537# assign variables to favor Atom front-end 538my ($xd0,$xd1,$xd2,$xd3, $xt0,$xt1,$xt2,$xt3, 539 $xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3)=map("%xmm$_",(0..15)); 540my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, 541 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); 542 543sub SSSE3_lane_ROUND { 544my ($a0,$b0,$c0,$d0)=@_; 545my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); 546my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); 547my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); 548my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); 549my @x=map("\"$_\"",@xx); 550 551 # Consider order in which variables are addressed by their 552 # index: 553 # 554 # a b c d 555 # 556 # 0 4 8 12 < even round 557 # 1 5 9 13 558 # 2 6 10 14 559 # 3 7 11 15 560 # 0 5 10 15 < odd round 561 # 1 6 11 12 562 # 2 7 8 13 563 # 3 4 9 14 564 # 565 # 'a', 'b' and 'd's are permanently allocated in registers, 566 # @x[0..7,12..15], while 'c's are maintained in memory. If 567 # you observe 'c' column, you'll notice that pair of 'c's is 568 # invariant between rounds. This means that we have to reload 569 # them once per round, in the middle. This is why you'll see 570 # bunch of 'c' stores and loads in the middle, but none in 571 # the beginning or end. 572 573 ( 574 "&paddd (@x[$a0],@x[$b0])", # Q1 575 "&paddd (@x[$a1],@x[$b1])", # Q2 576 "&pxor (@x[$d0],@x[$a0])", 577 "&pxor (@x[$d1],@x[$a1])", 578 "&pshufb (@x[$d0],$t1)", 579 "&pshufb (@x[$d1],$t1)", 580 581 "&paddd ($xc,@x[$d0])", 582 "&paddd ($xc_,@x[$d1])", 583 "&pxor (@x[$b0],$xc)", 584 "&pxor (@x[$b1],$xc_)", 585 "&movdqa ($t0,@x[$b0])", 586 "&pslld (@x[$b0],12)", 587 "&psrld ($t0,20)", 588 "&movdqa ($t1,@x[$b1])", 589 "&pslld (@x[$b1],12)", 590 "&por (@x[$b0],$t0)", 591 "&psrld ($t1,20)", 592 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) 593 "&por (@x[$b1],$t1)", 594 595 "&paddd (@x[$a0],@x[$b0])", 596 "&paddd (@x[$a1],@x[$b1])", 597 "&pxor (@x[$d0],@x[$a0])", 598 "&pxor (@x[$d1],@x[$a1])", 599 "&pshufb (@x[$d0],$t0)", 600 "&pshufb (@x[$d1],$t0)", 601 602 "&paddd ($xc,@x[$d0])", 603 "&paddd ($xc_,@x[$d1])", 604 "&pxor (@x[$b0],$xc)", 605 "&pxor (@x[$b1],$xc_)", 606 "&movdqa ($t1,@x[$b0])", 607 "&pslld (@x[$b0],7)", 608 "&psrld ($t1,25)", 609 "&movdqa ($t0,@x[$b1])", 610 "&pslld (@x[$b1],7)", 611 "&por (@x[$b0],$t1)", 612 "&psrld ($t0,25)", 613 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip) 614 "&por (@x[$b1],$t0)", 615 616 "&movdqa (\"`16*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's 617 "&movdqa (\"`16*($c1-8)`(%rsp)\",$xc_)", 618 "&movdqa ($xc,\"`16*($c2-8)`(%rsp)\")", 619 "&movdqa ($xc_,\"`16*($c3-8)`(%rsp)\")", 620 621 "&paddd (@x[$a2],@x[$b2])", # Q3 622 "&paddd (@x[$a3],@x[$b3])", # Q4 623 "&pxor (@x[$d2],@x[$a2])", 624 "&pxor (@x[$d3],@x[$a3])", 625 "&pshufb (@x[$d2],$t1)", 626 "&pshufb (@x[$d3],$t1)", 627 628 "&paddd ($xc,@x[$d2])", 629 "&paddd ($xc_,@x[$d3])", 630 "&pxor (@x[$b2],$xc)", 631 "&pxor (@x[$b3],$xc_)", 632 "&movdqa ($t0,@x[$b2])", 633 "&pslld (@x[$b2],12)", 634 "&psrld ($t0,20)", 635 "&movdqa ($t1,@x[$b3])", 636 "&pslld (@x[$b3],12)", 637 "&por (@x[$b2],$t0)", 638 "&psrld ($t1,20)", 639 "&movdqa ($t0,'(%r11)')", # .Lrot24(%rip) 640 "&por (@x[$b3],$t1)", 641 642 "&paddd (@x[$a2],@x[$b2])", 643 "&paddd (@x[$a3],@x[$b3])", 644 "&pxor (@x[$d2],@x[$a2])", 645 "&pxor (@x[$d3],@x[$a3])", 646 "&pshufb (@x[$d2],$t0)", 647 "&pshufb (@x[$d3],$t0)", 648 649 "&paddd ($xc,@x[$d2])", 650 "&paddd ($xc_,@x[$d3])", 651 "&pxor (@x[$b2],$xc)", 652 "&pxor (@x[$b3],$xc_)", 653 "&movdqa ($t1,@x[$b2])", 654 "&pslld (@x[$b2],7)", 655 "&psrld ($t1,25)", 656 "&movdqa ($t0,@x[$b3])", 657 "&pslld (@x[$b3],7)", 658 "&por (@x[$b2],$t1)", 659 "&psrld ($t0,25)", 660 "&movdqa ($t1,'(%r10)')", # .Lrot16(%rip) 661 "&por (@x[$b3],$t0)" 662 ); 663} 664 665my $xframe = $win64 ? 0xa0 : 0; 666 667$code.=<<___; 668.type ChaCha20_4x,\@function,5 669.align 32 670ChaCha20_4x: 671.LChaCha20_4x: 672 mov %r10,%r11 673___ 674$code.=<<___ if ($avx>1); 675 shr \$32,%r10 # OPENSSL_ia32cap_P+8 676 test \$`1<<5`,%r10 # test AVX2 677 jnz .LChaCha20_8x 678___ 679$code.=<<___; 680 cmp \$192,$len 681 ja .Lproceed4x 682 683 and \$`1<<26|1<<22`,%r11 # isolate XSAVE+MOVBE 684 cmp \$`1<<22`,%r11 # check for MOVBE without XSAVE 685 je .Ldo_sse3_after_all # to detect Atom 686 687.Lproceed4x: 688 lea -0x78(%rsp),%r11 689 sub \$0x148+$xframe,%rsp 690___ 691 ################ stack layout 692 # +0x00 SIMD equivalent of @x[8-12] 693 # ... 694 # +0x40 constant copy of key[0-2] smashed by lanes 695 # ... 696 # +0x100 SIMD counters (with nonce smashed by lanes) 697 # ... 698 # +0x140 699$code.=<<___ if ($win64); 700 movaps %xmm6,-0x30(%r11) 701 movaps %xmm7,-0x20(%r11) 702 movaps %xmm8,-0x10(%r11) 703 movaps %xmm9,0x00(%r11) 704 movaps %xmm10,0x10(%r11) 705 movaps %xmm11,0x20(%r11) 706 movaps %xmm12,0x30(%r11) 707 movaps %xmm13,0x40(%r11) 708 movaps %xmm14,0x50(%r11) 709 movaps %xmm15,0x60(%r11) 710___ 711$code.=<<___; 712 movdqa .Lsigma(%rip),$xa3 # key[0] 713 movdqu ($key),$xb3 # key[1] 714 movdqu 16($key),$xt3 # key[2] 715 movdqu ($counter),$xd3 # key[3] 716 lea 0x100(%rsp),%rcx # size optimization 717 lea .Lrot16(%rip),%r10 718 lea .Lrot24(%rip),%r11 719 720 pshufd \$0x00,$xa3,$xa0 # smash key by lanes... 721 pshufd \$0x55,$xa3,$xa1 722 movdqa $xa0,0x40(%rsp) # ... and offload 723 pshufd \$0xaa,$xa3,$xa2 724 movdqa $xa1,0x50(%rsp) 725 pshufd \$0xff,$xa3,$xa3 726 movdqa $xa2,0x60(%rsp) 727 movdqa $xa3,0x70(%rsp) 728 729 pshufd \$0x00,$xb3,$xb0 730 pshufd \$0x55,$xb3,$xb1 731 movdqa $xb0,0x80-0x100(%rcx) 732 pshufd \$0xaa,$xb3,$xb2 733 movdqa $xb1,0x90-0x100(%rcx) 734 pshufd \$0xff,$xb3,$xb3 735 movdqa $xb2,0xa0-0x100(%rcx) 736 movdqa $xb3,0xb0-0x100(%rcx) 737 738 pshufd \$0x00,$xt3,$xt0 # "$xc0" 739 pshufd \$0x55,$xt3,$xt1 # "$xc1" 740 movdqa $xt0,0xc0-0x100(%rcx) 741 pshufd \$0xaa,$xt3,$xt2 # "$xc2" 742 movdqa $xt1,0xd0-0x100(%rcx) 743 pshufd \$0xff,$xt3,$xt3 # "$xc3" 744 movdqa $xt2,0xe0-0x100(%rcx) 745 movdqa $xt3,0xf0-0x100(%rcx) 746 747 pshufd \$0x00,$xd3,$xd0 748 pshufd \$0x55,$xd3,$xd1 749 paddd .Linc(%rip),$xd0 # don't save counters yet 750 pshufd \$0xaa,$xd3,$xd2 751 movdqa $xd1,0x110-0x100(%rcx) 752 pshufd \$0xff,$xd3,$xd3 753 movdqa $xd2,0x120-0x100(%rcx) 754 movdqa $xd3,0x130-0x100(%rcx) 755 756 jmp .Loop_enter4x 757 758.align 32 759.Loop_outer4x: 760 movdqa 0x40(%rsp),$xa0 # re-load smashed key 761 movdqa 0x50(%rsp),$xa1 762 movdqa 0x60(%rsp),$xa2 763 movdqa 0x70(%rsp),$xa3 764 movdqa 0x80-0x100(%rcx),$xb0 765 movdqa 0x90-0x100(%rcx),$xb1 766 movdqa 0xa0-0x100(%rcx),$xb2 767 movdqa 0xb0-0x100(%rcx),$xb3 768 movdqa 0xc0-0x100(%rcx),$xt0 # "$xc0" 769 movdqa 0xd0-0x100(%rcx),$xt1 # "$xc1" 770 movdqa 0xe0-0x100(%rcx),$xt2 # "$xc2" 771 movdqa 0xf0-0x100(%rcx),$xt3 # "$xc3" 772 movdqa 0x100-0x100(%rcx),$xd0 773 movdqa 0x110-0x100(%rcx),$xd1 774 movdqa 0x120-0x100(%rcx),$xd2 775 movdqa 0x130-0x100(%rcx),$xd3 776 paddd .Lfour(%rip),$xd0 # next SIMD counters 777 778.Loop_enter4x: 779 movdqa $xt2,0x20(%rsp) # SIMD equivalent of "@x[10]" 780 movdqa $xt3,0x30(%rsp) # SIMD equivalent of "@x[11]" 781 movdqa (%r10),$xt3 # .Lrot16(%rip) 782 mov \$10,%eax 783 movdqa $xd0,0x100-0x100(%rcx) # save SIMD counters 784 jmp .Loop4x 785 786.align 32 787.Loop4x: 788___ 789 foreach (&SSSE3_lane_ROUND(0, 4, 8,12)) { eval; } 790 foreach (&SSSE3_lane_ROUND(0, 5,10,15)) { eval; } 791$code.=<<___; 792 dec %eax 793 jnz .Loop4x 794 795 paddd 0x40(%rsp),$xa0 # accumulate key material 796 paddd 0x50(%rsp),$xa1 797 paddd 0x60(%rsp),$xa2 798 paddd 0x70(%rsp),$xa3 799 800 movdqa $xa0,$xt2 # "de-interlace" data 801 punpckldq $xa1,$xa0 802 movdqa $xa2,$xt3 803 punpckldq $xa3,$xa2 804 punpckhdq $xa1,$xt2 805 punpckhdq $xa3,$xt3 806 movdqa $xa0,$xa1 807 punpcklqdq $xa2,$xa0 # "a0" 808 movdqa $xt2,$xa3 809 punpcklqdq $xt3,$xt2 # "a2" 810 punpckhqdq $xa2,$xa1 # "a1" 811 punpckhqdq $xt3,$xa3 # "a3" 812___ 813 ($xa2,$xt2)=($xt2,$xa2); 814$code.=<<___; 815 paddd 0x80-0x100(%rcx),$xb0 816 paddd 0x90-0x100(%rcx),$xb1 817 paddd 0xa0-0x100(%rcx),$xb2 818 paddd 0xb0-0x100(%rcx),$xb3 819 820 movdqa $xa0,0x00(%rsp) # offload $xaN 821 movdqa $xa1,0x10(%rsp) 822 movdqa 0x20(%rsp),$xa0 # "xc2" 823 movdqa 0x30(%rsp),$xa1 # "xc3" 824 825 movdqa $xb0,$xt2 826 punpckldq $xb1,$xb0 827 movdqa $xb2,$xt3 828 punpckldq $xb3,$xb2 829 punpckhdq $xb1,$xt2 830 punpckhdq $xb3,$xt3 831 movdqa $xb0,$xb1 832 punpcklqdq $xb2,$xb0 # "b0" 833 movdqa $xt2,$xb3 834 punpcklqdq $xt3,$xt2 # "b2" 835 punpckhqdq $xb2,$xb1 # "b1" 836 punpckhqdq $xt3,$xb3 # "b3" 837___ 838 ($xb2,$xt2)=($xt2,$xb2); 839 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); 840$code.=<<___; 841 paddd 0xc0-0x100(%rcx),$xc0 842 paddd 0xd0-0x100(%rcx),$xc1 843 paddd 0xe0-0x100(%rcx),$xc2 844 paddd 0xf0-0x100(%rcx),$xc3 845 846 movdqa $xa2,0x20(%rsp) # keep offloading $xaN 847 movdqa $xa3,0x30(%rsp) 848 849 movdqa $xc0,$xt2 850 punpckldq $xc1,$xc0 851 movdqa $xc2,$xt3 852 punpckldq $xc3,$xc2 853 punpckhdq $xc1,$xt2 854 punpckhdq $xc3,$xt3 855 movdqa $xc0,$xc1 856 punpcklqdq $xc2,$xc0 # "c0" 857 movdqa $xt2,$xc3 858 punpcklqdq $xt3,$xt2 # "c2" 859 punpckhqdq $xc2,$xc1 # "c1" 860 punpckhqdq $xt3,$xc3 # "c3" 861___ 862 ($xc2,$xt2)=($xt2,$xc2); 863 ($xt0,$xt1)=($xa2,$xa3); # use $xaN as temporary 864$code.=<<___; 865 paddd 0x100-0x100(%rcx),$xd0 866 paddd 0x110-0x100(%rcx),$xd1 867 paddd 0x120-0x100(%rcx),$xd2 868 paddd 0x130-0x100(%rcx),$xd3 869 870 movdqa $xd0,$xt2 871 punpckldq $xd1,$xd0 872 movdqa $xd2,$xt3 873 punpckldq $xd3,$xd2 874 punpckhdq $xd1,$xt2 875 punpckhdq $xd3,$xt3 876 movdqa $xd0,$xd1 877 punpcklqdq $xd2,$xd0 # "d0" 878 movdqa $xt2,$xd3 879 punpcklqdq $xt3,$xt2 # "d2" 880 punpckhqdq $xd2,$xd1 # "d1" 881 punpckhqdq $xt3,$xd3 # "d3" 882___ 883 ($xd2,$xt2)=($xt2,$xd2); 884$code.=<<___; 885 cmp \$64*4,$len 886 jb .Ltail4x 887 888 movdqu 0x00($inp),$xt0 # xor with input 889 movdqu 0x10($inp),$xt1 890 movdqu 0x20($inp),$xt2 891 movdqu 0x30($inp),$xt3 892 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? 893 pxor $xb0,$xt1 894 pxor $xc0,$xt2 895 pxor $xd0,$xt3 896 897 movdqu $xt0,0x00($out) 898 movdqu 0x40($inp),$xt0 899 movdqu $xt1,0x10($out) 900 movdqu 0x50($inp),$xt1 901 movdqu $xt2,0x20($out) 902 movdqu 0x60($inp),$xt2 903 movdqu $xt3,0x30($out) 904 movdqu 0x70($inp),$xt3 905 lea 0x80($inp),$inp # size optimization 906 pxor 0x10(%rsp),$xt0 907 pxor $xb1,$xt1 908 pxor $xc1,$xt2 909 pxor $xd1,$xt3 910 911 movdqu $xt0,0x40($out) 912 movdqu 0x00($inp),$xt0 913 movdqu $xt1,0x50($out) 914 movdqu 0x10($inp),$xt1 915 movdqu $xt2,0x60($out) 916 movdqu 0x20($inp),$xt2 917 movdqu $xt3,0x70($out) 918 lea 0x80($out),$out # size optimization 919 movdqu 0x30($inp),$xt3 920 pxor 0x20(%rsp),$xt0 921 pxor $xb2,$xt1 922 pxor $xc2,$xt2 923 pxor $xd2,$xt3 924 925 movdqu $xt0,0x00($out) 926 movdqu 0x40($inp),$xt0 927 movdqu $xt1,0x10($out) 928 movdqu 0x50($inp),$xt1 929 movdqu $xt2,0x20($out) 930 movdqu 0x60($inp),$xt2 931 movdqu $xt3,0x30($out) 932 movdqu 0x70($inp),$xt3 933 lea 0x80($inp),$inp # inp+=64*4 934 pxor 0x30(%rsp),$xt0 935 pxor $xb3,$xt1 936 pxor $xc3,$xt2 937 pxor $xd3,$xt3 938 movdqu $xt0,0x40($out) 939 movdqu $xt1,0x50($out) 940 movdqu $xt2,0x60($out) 941 movdqu $xt3,0x70($out) 942 lea 0x80($out),$out # out+=64*4 943 944 sub \$64*4,$len 945 jnz .Loop_outer4x 946 947 jmp .Ldone4x 948 949.Ltail4x: 950 cmp \$192,$len 951 jae .L192_or_more4x 952 cmp \$128,$len 953 jae .L128_or_more4x 954 cmp \$64,$len 955 jae .L64_or_more4x 956 957 #movdqa 0x00(%rsp),$xt0 # $xaN is offloaded, remember? 958 xor %r10,%r10 959 #movdqa $xt0,0x00(%rsp) 960 movdqa $xb0,0x10(%rsp) 961 movdqa $xc0,0x20(%rsp) 962 movdqa $xd0,0x30(%rsp) 963 jmp .Loop_tail4x 964 965.align 32 966.L64_or_more4x: 967 movdqu 0x00($inp),$xt0 # xor with input 968 movdqu 0x10($inp),$xt1 969 movdqu 0x20($inp),$xt2 970 movdqu 0x30($inp),$xt3 971 pxor 0x00(%rsp),$xt0 # $xaxN is offloaded, remember? 972 pxor $xb0,$xt1 973 pxor $xc0,$xt2 974 pxor $xd0,$xt3 975 movdqu $xt0,0x00($out) 976 movdqu $xt1,0x10($out) 977 movdqu $xt2,0x20($out) 978 movdqu $xt3,0x30($out) 979 je .Ldone4x 980 981 movdqa 0x10(%rsp),$xt0 # $xaN is offloaded, remember? 982 lea 0x40($inp),$inp # inp+=64*1 983 xor %r10,%r10 984 movdqa $xt0,0x00(%rsp) 985 movdqa $xb1,0x10(%rsp) 986 lea 0x40($out),$out # out+=64*1 987 movdqa $xc1,0x20(%rsp) 988 sub \$64,$len # len-=64*1 989 movdqa $xd1,0x30(%rsp) 990 jmp .Loop_tail4x 991 992.align 32 993.L128_or_more4x: 994 movdqu 0x00($inp),$xt0 # xor with input 995 movdqu 0x10($inp),$xt1 996 movdqu 0x20($inp),$xt2 997 movdqu 0x30($inp),$xt3 998 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? 999 pxor $xb0,$xt1 1000 pxor $xc0,$xt2 1001 pxor $xd0,$xt3 1002 1003 movdqu $xt0,0x00($out) 1004 movdqu 0x40($inp),$xt0 1005 movdqu $xt1,0x10($out) 1006 movdqu 0x50($inp),$xt1 1007 movdqu $xt2,0x20($out) 1008 movdqu 0x60($inp),$xt2 1009 movdqu $xt3,0x30($out) 1010 movdqu 0x70($inp),$xt3 1011 pxor 0x10(%rsp),$xt0 1012 pxor $xb1,$xt1 1013 pxor $xc1,$xt2 1014 pxor $xd1,$xt3 1015 movdqu $xt0,0x40($out) 1016 movdqu $xt1,0x50($out) 1017 movdqu $xt2,0x60($out) 1018 movdqu $xt3,0x70($out) 1019 je .Ldone4x 1020 1021 movdqa 0x20(%rsp),$xt0 # $xaN is offloaded, remember? 1022 lea 0x80($inp),$inp # inp+=64*2 1023 xor %r10,%r10 1024 movdqa $xt0,0x00(%rsp) 1025 movdqa $xb2,0x10(%rsp) 1026 lea 0x80($out),$out # out+=64*2 1027 movdqa $xc2,0x20(%rsp) 1028 sub \$128,$len # len-=64*2 1029 movdqa $xd2,0x30(%rsp) 1030 jmp .Loop_tail4x 1031 1032.align 32 1033.L192_or_more4x: 1034 movdqu 0x00($inp),$xt0 # xor with input 1035 movdqu 0x10($inp),$xt1 1036 movdqu 0x20($inp),$xt2 1037 movdqu 0x30($inp),$xt3 1038 pxor 0x00(%rsp),$xt0 # $xaN is offloaded, remember? 1039 pxor $xb0,$xt1 1040 pxor $xc0,$xt2 1041 pxor $xd0,$xt3 1042 1043 movdqu $xt0,0x00($out) 1044 movdqu 0x40($inp),$xt0 1045 movdqu $xt1,0x10($out) 1046 movdqu 0x50($inp),$xt1 1047 movdqu $xt2,0x20($out) 1048 movdqu 0x60($inp),$xt2 1049 movdqu $xt3,0x30($out) 1050 movdqu 0x70($inp),$xt3 1051 lea 0x80($inp),$inp # size optimization 1052 pxor 0x10(%rsp),$xt0 1053 pxor $xb1,$xt1 1054 pxor $xc1,$xt2 1055 pxor $xd1,$xt3 1056 1057 movdqu $xt0,0x40($out) 1058 movdqu 0x00($inp),$xt0 1059 movdqu $xt1,0x50($out) 1060 movdqu 0x10($inp),$xt1 1061 movdqu $xt2,0x60($out) 1062 movdqu 0x20($inp),$xt2 1063 movdqu $xt3,0x70($out) 1064 lea 0x80($out),$out # size optimization 1065 movdqu 0x30($inp),$xt3 1066 pxor 0x20(%rsp),$xt0 1067 pxor $xb2,$xt1 1068 pxor $xc2,$xt2 1069 pxor $xd2,$xt3 1070 movdqu $xt0,0x00($out) 1071 movdqu $xt1,0x10($out) 1072 movdqu $xt2,0x20($out) 1073 movdqu $xt3,0x30($out) 1074 je .Ldone4x 1075 1076 movdqa 0x30(%rsp),$xt0 # $xaN is offloaded, remember? 1077 lea 0x40($inp),$inp # inp+=64*3 1078 xor %r10,%r10 1079 movdqa $xt0,0x00(%rsp) 1080 movdqa $xb3,0x10(%rsp) 1081 lea 0x40($out),$out # out+=64*3 1082 movdqa $xc3,0x20(%rsp) 1083 sub \$192,$len # len-=64*3 1084 movdqa $xd3,0x30(%rsp) 1085 1086.Loop_tail4x: 1087 movzb ($inp,%r10),%eax 1088 movzb (%rsp,%r10),%ecx 1089 lea 1(%r10),%r10 1090 xor %ecx,%eax 1091 mov %al,-1($out,%r10) 1092 dec $len 1093 jnz .Loop_tail4x 1094 1095.Ldone4x: 1096___ 1097$code.=<<___ if ($win64); 1098 lea 0x140+0x30(%rsp),%r11 1099 movaps -0x30(%r11),%xmm6 1100 movaps -0x20(%r11),%xmm7 1101 movaps -0x10(%r11),%xmm8 1102 movaps 0x00(%r11),%xmm9 1103 movaps 0x10(%r11),%xmm10 1104 movaps 0x20(%r11),%xmm11 1105 movaps 0x30(%r11),%xmm12 1106 movaps 0x40(%r11),%xmm13 1107 movaps 0x50(%r11),%xmm14 1108 movaps 0x60(%r11),%xmm15 1109___ 1110$code.=<<___; 1111 add \$0x148+$xframe,%rsp 1112 ret 1113.size ChaCha20_4x,.-ChaCha20_4x 1114___ 1115} 1116 1117######################################################################## 1118# AVX2 code path 1119if ($avx>1) { 1120my ($xb0,$xb1,$xb2,$xb3, $xd0,$xd1,$xd2,$xd3, 1121 $xa0,$xa1,$xa2,$xa3, $xt0,$xt1,$xt2,$xt3)=map("%ymm$_",(0..15)); 1122my @xx=($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3, 1123 "%nox","%nox","%nox","%nox", $xd0,$xd1,$xd2,$xd3); 1124 1125sub AVX2_lane_ROUND { 1126my ($a0,$b0,$c0,$d0)=@_; 1127my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0)); 1128my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1)); 1129my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2)); 1130my ($xc,$xc_,$t0,$t1)=map("\"$_\"",$xt0,$xt1,$xt2,$xt3); 1131my @x=map("\"$_\"",@xx); 1132 1133 # Consider order in which variables are addressed by their 1134 # index: 1135 # 1136 # a b c d 1137 # 1138 # 0 4 8 12 < even round 1139 # 1 5 9 13 1140 # 2 6 10 14 1141 # 3 7 11 15 1142 # 0 5 10 15 < odd round 1143 # 1 6 11 12 1144 # 2 7 8 13 1145 # 3 4 9 14 1146 # 1147 # 'a', 'b' and 'd's are permanently allocated in registers, 1148 # @x[0..7,12..15], while 'c's are maintained in memory. If 1149 # you observe 'c' column, you'll notice that pair of 'c's is 1150 # invariant between rounds. This means that we have to reload 1151 # them once per round, in the middle. This is why you'll see 1152 # bunch of 'c' stores and loads in the middle, but none in 1153 # the beginning or end. 1154 1155 ( 1156 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", # Q1 1157 "&vpxor (@x[$d0],@x[$a0],@x[$d0])", 1158 "&vpshufb (@x[$d0],@x[$d0],$t1)", 1159 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", # Q2 1160 "&vpxor (@x[$d1],@x[$a1],@x[$d1])", 1161 "&vpshufb (@x[$d1],@x[$d1],$t1)", 1162 1163 "&vpaddd ($xc,$xc,@x[$d0])", 1164 "&vpxor (@x[$b0],$xc,@x[$b0])", 1165 "&vpslld ($t0,@x[$b0],12)", 1166 "&vpsrld (@x[$b0],@x[$b0],20)", 1167 "&vpor (@x[$b0],$t0,@x[$b0])", 1168 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) 1169 "&vpaddd ($xc_,$xc_,@x[$d1])", 1170 "&vpxor (@x[$b1],$xc_,@x[$b1])", 1171 "&vpslld ($t1,@x[$b1],12)", 1172 "&vpsrld (@x[$b1],@x[$b1],20)", 1173 "&vpor (@x[$b1],$t1,@x[$b1])", 1174 1175 "&vpaddd (@x[$a0],@x[$a0],@x[$b0])", 1176 "&vpxor (@x[$d0],@x[$a0],@x[$d0])", 1177 "&vpshufb (@x[$d0],@x[$d0],$t0)", 1178 "&vpaddd (@x[$a1],@x[$a1],@x[$b1])", 1179 "&vpxor (@x[$d1],@x[$a1],@x[$d1])", 1180 "&vpshufb (@x[$d1],@x[$d1],$t0)", 1181 1182 "&vpaddd ($xc,$xc,@x[$d0])", 1183 "&vpxor (@x[$b0],$xc,@x[$b0])", 1184 "&vpslld ($t1,@x[$b0],7)", 1185 "&vpsrld (@x[$b0],@x[$b0],25)", 1186 "&vpor (@x[$b0],$t1,@x[$b0])", 1187 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip) 1188 "&vpaddd ($xc_,$xc_,@x[$d1])", 1189 "&vpxor (@x[$b1],$xc_,@x[$b1])", 1190 "&vpslld ($t0,@x[$b1],7)", 1191 "&vpsrld (@x[$b1],@x[$b1],25)", 1192 "&vpor (@x[$b1],$t0,@x[$b1])", 1193 1194 "&vmovdqa (\"`32*($c0-8)`(%rsp)\",$xc)", # reload pair of 'c's 1195 "&vmovdqa (\"`32*($c1-8)`(%rsp)\",$xc_)", 1196 "&vmovdqa ($xc,\"`32*($c2-8)`(%rsp)\")", 1197 "&vmovdqa ($xc_,\"`32*($c3-8)`(%rsp)\")", 1198 1199 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", # Q3 1200 "&vpxor (@x[$d2],@x[$a2],@x[$d2])", 1201 "&vpshufb (@x[$d2],@x[$d2],$t1)", 1202 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", # Q4 1203 "&vpxor (@x[$d3],@x[$a3],@x[$d3])", 1204 "&vpshufb (@x[$d3],@x[$d3],$t1)", 1205 1206 "&vpaddd ($xc,$xc,@x[$d2])", 1207 "&vpxor (@x[$b2],$xc,@x[$b2])", 1208 "&vpslld ($t0,@x[$b2],12)", 1209 "&vpsrld (@x[$b2],@x[$b2],20)", 1210 "&vpor (@x[$b2],$t0,@x[$b2])", 1211 "&vbroadcasti128($t0,'(%r11)')", # .Lrot24(%rip) 1212 "&vpaddd ($xc_,$xc_,@x[$d3])", 1213 "&vpxor (@x[$b3],$xc_,@x[$b3])", 1214 "&vpslld ($t1,@x[$b3],12)", 1215 "&vpsrld (@x[$b3],@x[$b3],20)", 1216 "&vpor (@x[$b3],$t1,@x[$b3])", 1217 1218 "&vpaddd (@x[$a2],@x[$a2],@x[$b2])", 1219 "&vpxor (@x[$d2],@x[$a2],@x[$d2])", 1220 "&vpshufb (@x[$d2],@x[$d2],$t0)", 1221 "&vpaddd (@x[$a3],@x[$a3],@x[$b3])", 1222 "&vpxor (@x[$d3],@x[$a3],@x[$d3])", 1223 "&vpshufb (@x[$d3],@x[$d3],$t0)", 1224 1225 "&vpaddd ($xc,$xc,@x[$d2])", 1226 "&vpxor (@x[$b2],$xc,@x[$b2])", 1227 "&vpslld ($t1,@x[$b2],7)", 1228 "&vpsrld (@x[$b2],@x[$b2],25)", 1229 "&vpor (@x[$b2],$t1,@x[$b2])", 1230 "&vbroadcasti128($t1,'(%r10)')", # .Lrot16(%rip) 1231 "&vpaddd ($xc_,$xc_,@x[$d3])", 1232 "&vpxor (@x[$b3],$xc_,@x[$b3])", 1233 "&vpslld ($t0,@x[$b3],7)", 1234 "&vpsrld (@x[$b3],@x[$b3],25)", 1235 "&vpor (@x[$b3],$t0,@x[$b3])" 1236 ); 1237} 1238 1239my $xframe = $win64 ? 0xb0 : 8; 1240 1241$code.=<<___; 1242.type ChaCha20_8x,\@function,5 1243.align 32 1244ChaCha20_8x: 1245.LChaCha20_8x: 1246 mov %rsp,%r10 1247 sub \$0x280+$xframe,%rsp 1248 and \$-32,%rsp 1249___ 1250$code.=<<___ if ($win64); 1251 lea 0x290+0x30(%rsp),%r11 1252 movaps %xmm6,-0x30(%r11) 1253 movaps %xmm7,-0x20(%r11) 1254 movaps %xmm8,-0x10(%r11) 1255 movaps %xmm9,0x00(%r11) 1256 movaps %xmm10,0x10(%r11) 1257 movaps %xmm11,0x20(%r11) 1258 movaps %xmm12,0x30(%r11) 1259 movaps %xmm13,0x40(%r11) 1260 movaps %xmm14,0x50(%r11) 1261 movaps %xmm15,0x60(%r11) 1262___ 1263$code.=<<___; 1264 vzeroupper 1265 mov %r10,0x280(%rsp) 1266 1267 ################ stack layout 1268 # +0x00 SIMD equivalent of @x[8-12] 1269 # ... 1270 # +0x80 constant copy of key[0-2] smashed by lanes 1271 # ... 1272 # +0x200 SIMD counters (with nonce smashed by lanes) 1273 # ... 1274 # +0x280 saved %rsp 1275 1276 vbroadcasti128 .Lsigma(%rip),$xa3 # key[0] 1277 vbroadcasti128 ($key),$xb3 # key[1] 1278 vbroadcasti128 16($key),$xt3 # key[2] 1279 vbroadcasti128 ($counter),$xd3 # key[3] 1280 lea 0x100(%rsp),%rcx # size optimization 1281 lea 0x200(%rsp),%rax # size optimization 1282 lea .Lrot16(%rip),%r10 1283 lea .Lrot24(%rip),%r11 1284 1285 vpshufd \$0x00,$xa3,$xa0 # smash key by lanes... 1286 vpshufd \$0x55,$xa3,$xa1 1287 vmovdqa $xa0,0x80-0x100(%rcx) # ... and offload 1288 vpshufd \$0xaa,$xa3,$xa2 1289 vmovdqa $xa1,0xa0-0x100(%rcx) 1290 vpshufd \$0xff,$xa3,$xa3 1291 vmovdqa $xa2,0xc0-0x100(%rcx) 1292 vmovdqa $xa3,0xe0-0x100(%rcx) 1293 1294 vpshufd \$0x00,$xb3,$xb0 1295 vpshufd \$0x55,$xb3,$xb1 1296 vmovdqa $xb0,0x100-0x100(%rcx) 1297 vpshufd \$0xaa,$xb3,$xb2 1298 vmovdqa $xb1,0x120-0x100(%rcx) 1299 vpshufd \$0xff,$xb3,$xb3 1300 vmovdqa $xb2,0x140-0x100(%rcx) 1301 vmovdqa $xb3,0x160-0x100(%rcx) 1302 1303 vpshufd \$0x00,$xt3,$xt0 # "xc0" 1304 vpshufd \$0x55,$xt3,$xt1 # "xc1" 1305 vmovdqa $xt0,0x180-0x200(%rax) 1306 vpshufd \$0xaa,$xt3,$xt2 # "xc2" 1307 vmovdqa $xt1,0x1a0-0x200(%rax) 1308 vpshufd \$0xff,$xt3,$xt3 # "xc3" 1309 vmovdqa $xt2,0x1c0-0x200(%rax) 1310 vmovdqa $xt3,0x1e0-0x200(%rax) 1311 1312 vpshufd \$0x00,$xd3,$xd0 1313 vpshufd \$0x55,$xd3,$xd1 1314 vpaddd .Lincy(%rip),$xd0,$xd0 # don't save counters yet 1315 vpshufd \$0xaa,$xd3,$xd2 1316 vmovdqa $xd1,0x220-0x200(%rax) 1317 vpshufd \$0xff,$xd3,$xd3 1318 vmovdqa $xd2,0x240-0x200(%rax) 1319 vmovdqa $xd3,0x260-0x200(%rax) 1320 1321 jmp .Loop_enter8x 1322 1323.align 32 1324.Loop_outer8x: 1325 vmovdqa 0x80-0x100(%rcx),$xa0 # re-load smashed key 1326 vmovdqa 0xa0-0x100(%rcx),$xa1 1327 vmovdqa 0xc0-0x100(%rcx),$xa2 1328 vmovdqa 0xe0-0x100(%rcx),$xa3 1329 vmovdqa 0x100-0x100(%rcx),$xb0 1330 vmovdqa 0x120-0x100(%rcx),$xb1 1331 vmovdqa 0x140-0x100(%rcx),$xb2 1332 vmovdqa 0x160-0x100(%rcx),$xb3 1333 vmovdqa 0x180-0x200(%rax),$xt0 # "xc0" 1334 vmovdqa 0x1a0-0x200(%rax),$xt1 # "xc1" 1335 vmovdqa 0x1c0-0x200(%rax),$xt2 # "xc2" 1336 vmovdqa 0x1e0-0x200(%rax),$xt3 # "xc3" 1337 vmovdqa 0x200-0x200(%rax),$xd0 1338 vmovdqa 0x220-0x200(%rax),$xd1 1339 vmovdqa 0x240-0x200(%rax),$xd2 1340 vmovdqa 0x260-0x200(%rax),$xd3 1341 vpaddd .Leight(%rip),$xd0,$xd0 # next SIMD counters 1342 1343.Loop_enter8x: 1344 vmovdqa $xt2,0x40(%rsp) # SIMD equivalent of "@x[10]" 1345 vmovdqa $xt3,0x60(%rsp) # SIMD equivalent of "@x[11]" 1346 vbroadcasti128 (%r10),$xt3 1347 vmovdqa $xd0,0x200-0x200(%rax) # save SIMD counters 1348 mov \$10,%eax 1349 jmp .Loop8x 1350 1351.align 32 1352.Loop8x: 1353___ 1354 foreach (&AVX2_lane_ROUND(0, 4, 8,12)) { eval; } 1355 foreach (&AVX2_lane_ROUND(0, 5,10,15)) { eval; } 1356$code.=<<___; 1357 dec %eax 1358 jnz .Loop8x 1359 1360 lea 0x200(%rsp),%rax # size optimization 1361 vpaddd 0x80-0x100(%rcx),$xa0,$xa0 # accumulate key 1362 vpaddd 0xa0-0x100(%rcx),$xa1,$xa1 1363 vpaddd 0xc0-0x100(%rcx),$xa2,$xa2 1364 vpaddd 0xe0-0x100(%rcx),$xa3,$xa3 1365 1366 vpunpckldq $xa1,$xa0,$xt2 # "de-interlace" data 1367 vpunpckldq $xa3,$xa2,$xt3 1368 vpunpckhdq $xa1,$xa0,$xa0 1369 vpunpckhdq $xa3,$xa2,$xa2 1370 vpunpcklqdq $xt3,$xt2,$xa1 # "a0" 1371 vpunpckhqdq $xt3,$xt2,$xt2 # "a1" 1372 vpunpcklqdq $xa2,$xa0,$xa3 # "a2" 1373 vpunpckhqdq $xa2,$xa0,$xa0 # "a3" 1374___ 1375 ($xa0,$xa1,$xa2,$xa3,$xt2)=($xa1,$xt2,$xa3,$xa0,$xa2); 1376$code.=<<___; 1377 vpaddd 0x100-0x100(%rcx),$xb0,$xb0 1378 vpaddd 0x120-0x100(%rcx),$xb1,$xb1 1379 vpaddd 0x140-0x100(%rcx),$xb2,$xb2 1380 vpaddd 0x160-0x100(%rcx),$xb3,$xb3 1381 1382 vpunpckldq $xb1,$xb0,$xt2 1383 vpunpckldq $xb3,$xb2,$xt3 1384 vpunpckhdq $xb1,$xb0,$xb0 1385 vpunpckhdq $xb3,$xb2,$xb2 1386 vpunpcklqdq $xt3,$xt2,$xb1 # "b0" 1387 vpunpckhqdq $xt3,$xt2,$xt2 # "b1" 1388 vpunpcklqdq $xb2,$xb0,$xb3 # "b2" 1389 vpunpckhqdq $xb2,$xb0,$xb0 # "b3" 1390___ 1391 ($xb0,$xb1,$xb2,$xb3,$xt2)=($xb1,$xt2,$xb3,$xb0,$xb2); 1392$code.=<<___; 1393 vperm2i128 \$0x20,$xb0,$xa0,$xt3 # "de-interlace" further 1394 vperm2i128 \$0x31,$xb0,$xa0,$xb0 1395 vperm2i128 \$0x20,$xb1,$xa1,$xa0 1396 vperm2i128 \$0x31,$xb1,$xa1,$xb1 1397 vperm2i128 \$0x20,$xb2,$xa2,$xa1 1398 vperm2i128 \$0x31,$xb2,$xa2,$xb2 1399 vperm2i128 \$0x20,$xb3,$xa3,$xa2 1400 vperm2i128 \$0x31,$xb3,$xa3,$xb3 1401___ 1402 ($xa0,$xa1,$xa2,$xa3,$xt3)=($xt3,$xa0,$xa1,$xa2,$xa3); 1403 my ($xc0,$xc1,$xc2,$xc3)=($xt0,$xt1,$xa0,$xa1); 1404$code.=<<___; 1405 vmovdqa $xa0,0x00(%rsp) # offload $xaN 1406 vmovdqa $xa1,0x20(%rsp) 1407 vmovdqa 0x40(%rsp),$xc2 # $xa0 1408 vmovdqa 0x60(%rsp),$xc3 # $xa1 1409 1410 vpaddd 0x180-0x200(%rax),$xc0,$xc0 1411 vpaddd 0x1a0-0x200(%rax),$xc1,$xc1 1412 vpaddd 0x1c0-0x200(%rax),$xc2,$xc2 1413 vpaddd 0x1e0-0x200(%rax),$xc3,$xc3 1414 1415 vpunpckldq $xc1,$xc0,$xt2 1416 vpunpckldq $xc3,$xc2,$xt3 1417 vpunpckhdq $xc1,$xc0,$xc0 1418 vpunpckhdq $xc3,$xc2,$xc2 1419 vpunpcklqdq $xt3,$xt2,$xc1 # "c0" 1420 vpunpckhqdq $xt3,$xt2,$xt2 # "c1" 1421 vpunpcklqdq $xc2,$xc0,$xc3 # "c2" 1422 vpunpckhqdq $xc2,$xc0,$xc0 # "c3" 1423___ 1424 ($xc0,$xc1,$xc2,$xc3,$xt2)=($xc1,$xt2,$xc3,$xc0,$xc2); 1425$code.=<<___; 1426 vpaddd 0x200-0x200(%rax),$xd0,$xd0 1427 vpaddd 0x220-0x200(%rax),$xd1,$xd1 1428 vpaddd 0x240-0x200(%rax),$xd2,$xd2 1429 vpaddd 0x260-0x200(%rax),$xd3,$xd3 1430 1431 vpunpckldq $xd1,$xd0,$xt2 1432 vpunpckldq $xd3,$xd2,$xt3 1433 vpunpckhdq $xd1,$xd0,$xd0 1434 vpunpckhdq $xd3,$xd2,$xd2 1435 vpunpcklqdq $xt3,$xt2,$xd1 # "d0" 1436 vpunpckhqdq $xt3,$xt2,$xt2 # "d1" 1437 vpunpcklqdq $xd2,$xd0,$xd3 # "d2" 1438 vpunpckhqdq $xd2,$xd0,$xd0 # "d3" 1439___ 1440 ($xd0,$xd1,$xd2,$xd3,$xt2)=($xd1,$xt2,$xd3,$xd0,$xd2); 1441$code.=<<___; 1442 vperm2i128 \$0x20,$xd0,$xc0,$xt3 # "de-interlace" further 1443 vperm2i128 \$0x31,$xd0,$xc0,$xd0 1444 vperm2i128 \$0x20,$xd1,$xc1,$xc0 1445 vperm2i128 \$0x31,$xd1,$xc1,$xd1 1446 vperm2i128 \$0x20,$xd2,$xc2,$xc1 1447 vperm2i128 \$0x31,$xd2,$xc2,$xd2 1448 vperm2i128 \$0x20,$xd3,$xc3,$xc2 1449 vperm2i128 \$0x31,$xd3,$xc3,$xd3 1450___ 1451 ($xc0,$xc1,$xc2,$xc3,$xt3)=($xt3,$xc0,$xc1,$xc2,$xc3); 1452 ($xb0,$xb1,$xb2,$xb3,$xc0,$xc1,$xc2,$xc3)= 1453 ($xc0,$xc1,$xc2,$xc3,$xb0,$xb1,$xb2,$xb3); 1454 ($xa0,$xa1)=($xt2,$xt3); 1455$code.=<<___; 1456 vmovdqa 0x00(%rsp),$xa0 # $xaN was offloaded, remember? 1457 vmovdqa 0x20(%rsp),$xa1 1458 1459 cmp \$64*8,$len 1460 jb .Ltail8x 1461 1462 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1463 vpxor 0x20($inp),$xb0,$xb0 1464 vpxor 0x40($inp),$xc0,$xc0 1465 vpxor 0x60($inp),$xd0,$xd0 1466 lea 0x80($inp),$inp # size optimization 1467 vmovdqu $xa0,0x00($out) 1468 vmovdqu $xb0,0x20($out) 1469 vmovdqu $xc0,0x40($out) 1470 vmovdqu $xd0,0x60($out) 1471 lea 0x80($out),$out # size optimization 1472 1473 vpxor 0x00($inp),$xa1,$xa1 1474 vpxor 0x20($inp),$xb1,$xb1 1475 vpxor 0x40($inp),$xc1,$xc1 1476 vpxor 0x60($inp),$xd1,$xd1 1477 lea 0x80($inp),$inp # size optimization 1478 vmovdqu $xa1,0x00($out) 1479 vmovdqu $xb1,0x20($out) 1480 vmovdqu $xc1,0x40($out) 1481 vmovdqu $xd1,0x60($out) 1482 lea 0x80($out),$out # size optimization 1483 1484 vpxor 0x00($inp),$xa2,$xa2 1485 vpxor 0x20($inp),$xb2,$xb2 1486 vpxor 0x40($inp),$xc2,$xc2 1487 vpxor 0x60($inp),$xd2,$xd2 1488 lea 0x80($inp),$inp # size optimization 1489 vmovdqu $xa2,0x00($out) 1490 vmovdqu $xb2,0x20($out) 1491 vmovdqu $xc2,0x40($out) 1492 vmovdqu $xd2,0x60($out) 1493 lea 0x80($out),$out # size optimization 1494 1495 vpxor 0x00($inp),$xa3,$xa3 1496 vpxor 0x20($inp),$xb3,$xb3 1497 vpxor 0x40($inp),$xc3,$xc3 1498 vpxor 0x60($inp),$xd3,$xd3 1499 lea 0x80($inp),$inp # size optimization 1500 vmovdqu $xa3,0x00($out) 1501 vmovdqu $xb3,0x20($out) 1502 vmovdqu $xc3,0x40($out) 1503 vmovdqu $xd3,0x60($out) 1504 lea 0x80($out),$out # size optimization 1505 1506 sub \$64*8,$len 1507 jnz .Loop_outer8x 1508 1509 jmp .Ldone8x 1510 1511.Ltail8x: 1512 cmp \$448,$len 1513 jae .L448_or_more8x 1514 cmp \$384,$len 1515 jae .L384_or_more8x 1516 cmp \$320,$len 1517 jae .L320_or_more8x 1518 cmp \$256,$len 1519 jae .L256_or_more8x 1520 cmp \$192,$len 1521 jae .L192_or_more8x 1522 cmp \$128,$len 1523 jae .L128_or_more8x 1524 cmp \$64,$len 1525 jae .L64_or_more8x 1526 1527 xor %r10,%r10 1528 vmovdqa $xa0,0x00(%rsp) 1529 vmovdqa $xb0,0x20(%rsp) 1530 jmp .Loop_tail8x 1531 1532.align 32 1533.L64_or_more8x: 1534 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1535 vpxor 0x20($inp),$xb0,$xb0 1536 vmovdqu $xa0,0x00($out) 1537 vmovdqu $xb0,0x20($out) 1538 je .Ldone8x 1539 1540 lea 0x40($inp),$inp # inp+=64*1 1541 xor %r10,%r10 1542 vmovdqa $xc0,0x00(%rsp) 1543 lea 0x40($out),$out # out+=64*1 1544 sub \$64,$len # len-=64*1 1545 vmovdqa $xd0,0x20(%rsp) 1546 jmp .Loop_tail8x 1547 1548.align 32 1549.L128_or_more8x: 1550 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1551 vpxor 0x20($inp),$xb0,$xb0 1552 vpxor 0x40($inp),$xc0,$xc0 1553 vpxor 0x60($inp),$xd0,$xd0 1554 vmovdqu $xa0,0x00($out) 1555 vmovdqu $xb0,0x20($out) 1556 vmovdqu $xc0,0x40($out) 1557 vmovdqu $xd0,0x60($out) 1558 je .Ldone8x 1559 1560 lea 0x80($inp),$inp # inp+=64*2 1561 xor %r10,%r10 1562 vmovdqa $xa1,0x00(%rsp) 1563 lea 0x80($out),$out # out+=64*2 1564 sub \$128,$len # len-=64*2 1565 vmovdqa $xb1,0x20(%rsp) 1566 jmp .Loop_tail8x 1567 1568.align 32 1569.L192_or_more8x: 1570 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1571 vpxor 0x20($inp),$xb0,$xb0 1572 vpxor 0x40($inp),$xc0,$xc0 1573 vpxor 0x60($inp),$xd0,$xd0 1574 vpxor 0x80($inp),$xa1,$xa1 1575 vpxor 0xa0($inp),$xb1,$xb1 1576 vmovdqu $xa0,0x00($out) 1577 vmovdqu $xb0,0x20($out) 1578 vmovdqu $xc0,0x40($out) 1579 vmovdqu $xd0,0x60($out) 1580 vmovdqu $xa1,0x80($out) 1581 vmovdqu $xb1,0xa0($out) 1582 je .Ldone8x 1583 1584 lea 0xc0($inp),$inp # inp+=64*3 1585 xor %r10,%r10 1586 vmovdqa $xc1,0x00(%rsp) 1587 lea 0xc0($out),$out # out+=64*3 1588 sub \$192,$len # len-=64*3 1589 vmovdqa $xd1,0x20(%rsp) 1590 jmp .Loop_tail8x 1591 1592.align 32 1593.L256_or_more8x: 1594 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1595 vpxor 0x20($inp),$xb0,$xb0 1596 vpxor 0x40($inp),$xc0,$xc0 1597 vpxor 0x60($inp),$xd0,$xd0 1598 vpxor 0x80($inp),$xa1,$xa1 1599 vpxor 0xa0($inp),$xb1,$xb1 1600 vpxor 0xc0($inp),$xc1,$xc1 1601 vpxor 0xe0($inp),$xd1,$xd1 1602 vmovdqu $xa0,0x00($out) 1603 vmovdqu $xb0,0x20($out) 1604 vmovdqu $xc0,0x40($out) 1605 vmovdqu $xd0,0x60($out) 1606 vmovdqu $xa1,0x80($out) 1607 vmovdqu $xb1,0xa0($out) 1608 vmovdqu $xc1,0xc0($out) 1609 vmovdqu $xd1,0xe0($out) 1610 je .Ldone8x 1611 1612 lea 0x100($inp),$inp # inp+=64*4 1613 xor %r10,%r10 1614 vmovdqa $xa2,0x00(%rsp) 1615 lea 0x100($out),$out # out+=64*4 1616 sub \$256,$len # len-=64*4 1617 vmovdqa $xb2,0x20(%rsp) 1618 jmp .Loop_tail8x 1619 1620.align 32 1621.L320_or_more8x: 1622 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1623 vpxor 0x20($inp),$xb0,$xb0 1624 vpxor 0x40($inp),$xc0,$xc0 1625 vpxor 0x60($inp),$xd0,$xd0 1626 vpxor 0x80($inp),$xa1,$xa1 1627 vpxor 0xa0($inp),$xb1,$xb1 1628 vpxor 0xc0($inp),$xc1,$xc1 1629 vpxor 0xe0($inp),$xd1,$xd1 1630 vpxor 0x100($inp),$xa2,$xa2 1631 vpxor 0x120($inp),$xb2,$xb2 1632 vmovdqu $xa0,0x00($out) 1633 vmovdqu $xb0,0x20($out) 1634 vmovdqu $xc0,0x40($out) 1635 vmovdqu $xd0,0x60($out) 1636 vmovdqu $xa1,0x80($out) 1637 vmovdqu $xb1,0xa0($out) 1638 vmovdqu $xc1,0xc0($out) 1639 vmovdqu $xd1,0xe0($out) 1640 vmovdqu $xa2,0x100($out) 1641 vmovdqu $xb2,0x120($out) 1642 je .Ldone8x 1643 1644 lea 0x140($inp),$inp # inp+=64*5 1645 xor %r10,%r10 1646 vmovdqa $xc2,0x00(%rsp) 1647 lea 0x140($out),$out # out+=64*5 1648 sub \$320,$len # len-=64*5 1649 vmovdqa $xd2,0x20(%rsp) 1650 jmp .Loop_tail8x 1651 1652.align 32 1653.L384_or_more8x: 1654 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1655 vpxor 0x20($inp),$xb0,$xb0 1656 vpxor 0x40($inp),$xc0,$xc0 1657 vpxor 0x60($inp),$xd0,$xd0 1658 vpxor 0x80($inp),$xa1,$xa1 1659 vpxor 0xa0($inp),$xb1,$xb1 1660 vpxor 0xc0($inp),$xc1,$xc1 1661 vpxor 0xe0($inp),$xd1,$xd1 1662 vpxor 0x100($inp),$xa2,$xa2 1663 vpxor 0x120($inp),$xb2,$xb2 1664 vpxor 0x140($inp),$xc2,$xc2 1665 vpxor 0x160($inp),$xd2,$xd2 1666 vmovdqu $xa0,0x00($out) 1667 vmovdqu $xb0,0x20($out) 1668 vmovdqu $xc0,0x40($out) 1669 vmovdqu $xd0,0x60($out) 1670 vmovdqu $xa1,0x80($out) 1671 vmovdqu $xb1,0xa0($out) 1672 vmovdqu $xc1,0xc0($out) 1673 vmovdqu $xd1,0xe0($out) 1674 vmovdqu $xa2,0x100($out) 1675 vmovdqu $xb2,0x120($out) 1676 vmovdqu $xc2,0x140($out) 1677 vmovdqu $xd2,0x160($out) 1678 je .Ldone8x 1679 1680 lea 0x180($inp),$inp # inp+=64*6 1681 xor %r10,%r10 1682 vmovdqa $xa3,0x00(%rsp) 1683 lea 0x180($out),$out # out+=64*6 1684 sub \$384,$len # len-=64*6 1685 vmovdqa $xb3,0x20(%rsp) 1686 jmp .Loop_tail8x 1687 1688.align 32 1689.L448_or_more8x: 1690 vpxor 0x00($inp),$xa0,$xa0 # xor with input 1691 vpxor 0x20($inp),$xb0,$xb0 1692 vpxor 0x40($inp),$xc0,$xc0 1693 vpxor 0x60($inp),$xd0,$xd0 1694 vpxor 0x80($inp),$xa1,$xa1 1695 vpxor 0xa0($inp),$xb1,$xb1 1696 vpxor 0xc0($inp),$xc1,$xc1 1697 vpxor 0xe0($inp),$xd1,$xd1 1698 vpxor 0x100($inp),$xa2,$xa2 1699 vpxor 0x120($inp),$xb2,$xb2 1700 vpxor 0x140($inp),$xc2,$xc2 1701 vpxor 0x160($inp),$xd2,$xd2 1702 vpxor 0x180($inp),$xa3,$xa3 1703 vpxor 0x1a0($inp),$xb3,$xb3 1704 vmovdqu $xa0,0x00($out) 1705 vmovdqu $xb0,0x20($out) 1706 vmovdqu $xc0,0x40($out) 1707 vmovdqu $xd0,0x60($out) 1708 vmovdqu $xa1,0x80($out) 1709 vmovdqu $xb1,0xa0($out) 1710 vmovdqu $xc1,0xc0($out) 1711 vmovdqu $xd1,0xe0($out) 1712 vmovdqu $xa2,0x100($out) 1713 vmovdqu $xb2,0x120($out) 1714 vmovdqu $xc2,0x140($out) 1715 vmovdqu $xd2,0x160($out) 1716 vmovdqu $xa3,0x180($out) 1717 vmovdqu $xb3,0x1a0($out) 1718 je .Ldone8x 1719 1720 lea 0x1c0($inp),$inp # inp+=64*7 1721 xor %r10,%r10 1722 vmovdqa $xc3,0x00(%rsp) 1723 lea 0x1c0($out),$out # out+=64*7 1724 sub \$448,$len # len-=64*7 1725 vmovdqa $xd3,0x20(%rsp) 1726 1727.Loop_tail8x: 1728 movzb ($inp,%r10),%eax 1729 movzb (%rsp,%r10),%ecx 1730 lea 1(%r10),%r10 1731 xor %ecx,%eax 1732 mov %al,-1($out,%r10) 1733 dec $len 1734 jnz .Loop_tail8x 1735 1736.Ldone8x: 1737 vzeroall 1738___ 1739$code.=<<___ if ($win64); 1740 lea 0x290+0x30(%rsp),%r11 1741 movaps -0x30(%r11),%xmm6 1742 movaps -0x20(%r11),%xmm7 1743 movaps -0x10(%r11),%xmm8 1744 movaps 0x00(%r11),%xmm9 1745 movaps 0x10(%r11),%xmm10 1746 movaps 0x20(%r11),%xmm11 1747 movaps 0x30(%r11),%xmm12 1748 movaps 0x40(%r11),%xmm13 1749 movaps 0x50(%r11),%xmm14 1750 movaps 0x60(%r11),%xmm15 1751___ 1752$code.=<<___; 1753 mov 0x280(%rsp),%rsp 1754 ret 1755.size ChaCha20_8x,.-ChaCha20_8x 1756___ 1757} 1758 1759foreach (split("\n",$code)) { 1760 s/\`([^\`]*)\`/eval $1/geo; 1761 1762 s/%x#%y/%x/go; 1763 1764 print $_,"\n"; 1765} 1766 1767close STDOUT; 1768