1#!/usr/bin/env perl 2# 3# ==================================================================== 4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9# 10# March, June 2010 11# 12# The module implements "4-bit" GCM GHASH function and underlying 13# single multiplication operation in GF(2^128). "4-bit" means that 14# it uses 256 bytes per-key table [+128 bytes shared table]. GHASH 15# function features so called "528B" variant utilizing additional 16# 256+16 bytes of per-key storage [+512 bytes shared table]. 17# Performance results are for this streamed GHASH subroutine and are 18# expressed in cycles per processed byte, less is better: 19# 20# gcc 3.4.x(*) assembler 21# 22# P4 28.6 14.0 +100% 23# Opteron 19.3 7.7 +150% 24# Core2 17.8 8.1(**) +120% 25# Atom 31.6 16.8 +88% 26# VIA Nano 21.8 10.1 +115% 27# 28# (*) comparison is not completely fair, because C results are 29# for vanilla "256B" implementation, while assembler results 30# are for "528B";-) 31# (**) it's mystery [to me] why Core2 result is not same as for 32# Opteron; 33 34# May 2010 35# 36# Add PCLMULQDQ version performing at 2.02 cycles per processed byte. 37# See ghash-x86.pl for background information and details about coding 38# techniques. 39# 40# Special thanks to David Woodhouse <dwmw2@infradead.org> for 41# providing access to a Westmere-based system on behalf of Intel 42# Open Source Technology Centre. 43 44# December 2012 45# 46# Overhaul: aggregate Karatsuba post-processing, improve ILP in 47# reduction_alg9, increase reduction aggregate factor to 4x. As for 48# the latter. ghash-x86.pl discusses that it makes lesser sense to 49# increase aggregate factor. Then why increase here? Critical path 50# consists of 3 independent pclmulqdq instructions, Karatsuba post- 51# processing and reduction. "On top" of this we lay down aggregated 52# multiplication operations, triplets of independent pclmulqdq's. As 53# issue rate for pclmulqdq is limited, it makes lesser sense to 54# aggregate more multiplications than it takes to perform remaining 55# non-multiplication operations. 2x is near-optimal coefficient for 56# contemporary Intel CPUs (therefore modest improvement coefficient), 57# but not for Bulldozer. Latter is because logical SIMD operations 58# are twice as slow in comparison to Intel, so that critical path is 59# longer. A CPU with higher pclmulqdq issue rate would also benefit 60# from higher aggregate factor... 61# 62# Westmere 1.78(+13%) 63# Sandy Bridge 1.80(+8%) 64# Ivy Bridge 1.80(+7%) 65# Haswell 0.55(+93%) (if system doesn't support AVX) 66# Broadwell 0.45(+110%)(if system doesn't support AVX) 67# Bulldozer 1.49(+27%) 68# Silvermont 2.88(+13%) 69 70# March 2013 71# 72# ... 8x aggregate factor AVX code path is using reduction algorithm 73# suggested by Shay Gueron[1]. Even though contemporary AVX-capable 74# CPUs such as Sandy and Ivy Bridge can execute it, the code performs 75# sub-optimally in comparison to above mentioned version. But thanks 76# to Ilya Albrekht and Max Locktyukhin of Intel Corp. we knew that 77# it performs in 0.41 cycles per byte on Haswell processor, and in 78# 0.29 on Broadwell. 79# 80# [1] http://rt.openssl.org/Ticket/Display.html?id=2900&user=guest&pass=guest 81 82$flavour = shift; 83$output = shift; 84if ($flavour =~ /\./) { $output = $flavour; undef $flavour; } 85 86$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/); 87 88$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 89( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or 90( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or 91die "can't locate x86_64-xlate.pl"; 92 93if (`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` 94 =~ /GNU assembler version ([2-9]\.[0-9]+)/) { 95 $avx = ($1>=2.20) + ($1>=2.22); 96} 97 98if (!$avx && $win64 && ($flavour =~ /nasm/ || $ENV{ASM} =~ /nasm/) && 99 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) { 100 $avx = ($1>=2.09) + ($1>=2.10); 101} 102 103if (!$avx && $win64 && ($flavour =~ /masm/ || $ENV{ASM} =~ /ml64/) && 104 `ml64 2>&1` =~ /Version ([0-9]+)\./) { 105 $avx = ($1>=10) + ($1>=11); 106} 107 108if (!$avx && `$ENV{CC} -v 2>&1` =~ /((?:^clang|LLVM) version|.*based on LLVM) ([3-9]\.[0-9]+)/) { 109 $avx = ($2>=3.0) + ($2>3.0); 110} 111 112open OUT,"| \"$^X\" $xlate $flavour $output"; 113*STDOUT=*OUT; 114 115$do4xaggr=1; 116 117# common register layout 118$nlo="%rax"; 119$nhi="%rbx"; 120$Zlo="%r8"; 121$Zhi="%r9"; 122$tmp="%r10"; 123$rem_4bit = "%r11"; 124 125$Xi="%rdi"; 126$Htbl="%rsi"; 127 128# per-function register layout 129$cnt="%rcx"; 130$rem="%rdx"; 131 132sub LB() { my $r=shift; $r =~ s/%[er]([a-d])x/%\1l/ or 133 $r =~ s/%[er]([sd]i)/%\1l/ or 134 $r =~ s/%[er](bp)/%\1l/ or 135 $r =~ s/%(r[0-9]+)[d]?/%\1b/; $r; } 136 137sub AUTOLOAD() # thunk [simplified] 32-bit style perlasm 138{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; 139 my $arg = pop; 140 $arg = "\$$arg" if ($arg*1 eq $arg); 141 $code .= "\t$opcode\t".join(',',$arg,reverse @_)."\n"; 142} 143 144{ my $N; 145 sub loop() { 146 my $inp = shift; 147 148 $N++; 149$code.=<<___; 150 xor $nlo,$nlo 151 xor $nhi,$nhi 152 mov `&LB("$Zlo")`,`&LB("$nlo")` 153 mov `&LB("$Zlo")`,`&LB("$nhi")` 154 shl \$4,`&LB("$nlo")` 155 mov \$14,$cnt 156 mov 8($Htbl,$nlo),$Zlo 157 mov ($Htbl,$nlo),$Zhi 158 and \$0xf0,`&LB("$nhi")` 159 mov $Zlo,$rem 160 jmp .Loop$N 161 162.align 16 163.Loop$N: 164 shr \$4,$Zlo 165 and \$0xf,$rem 166 mov $Zhi,$tmp 167 mov ($inp,$cnt),`&LB("$nlo")` 168 shr \$4,$Zhi 169 xor 8($Htbl,$nhi),$Zlo 170 shl \$60,$tmp 171 xor ($Htbl,$nhi),$Zhi 172 mov `&LB("$nlo")`,`&LB("$nhi")` 173 xor ($rem_4bit,$rem,8),$Zhi 174 mov $Zlo,$rem 175 shl \$4,`&LB("$nlo")` 176 xor $tmp,$Zlo 177 dec $cnt 178 js .Lbreak$N 179 180 shr \$4,$Zlo 181 and \$0xf,$rem 182 mov $Zhi,$tmp 183 shr \$4,$Zhi 184 xor 8($Htbl,$nlo),$Zlo 185 shl \$60,$tmp 186 xor ($Htbl,$nlo),$Zhi 187 and \$0xf0,`&LB("$nhi")` 188 xor ($rem_4bit,$rem,8),$Zhi 189 mov $Zlo,$rem 190 xor $tmp,$Zlo 191 jmp .Loop$N 192 193.align 16 194.Lbreak$N: 195 shr \$4,$Zlo 196 and \$0xf,$rem 197 mov $Zhi,$tmp 198 shr \$4,$Zhi 199 xor 8($Htbl,$nlo),$Zlo 200 shl \$60,$tmp 201 xor ($Htbl,$nlo),$Zhi 202 and \$0xf0,`&LB("$nhi")` 203 xor ($rem_4bit,$rem,8),$Zhi 204 mov $Zlo,$rem 205 xor $tmp,$Zlo 206 207 shr \$4,$Zlo 208 and \$0xf,$rem 209 mov $Zhi,$tmp 210 shr \$4,$Zhi 211 xor 8($Htbl,$nhi),$Zlo 212 shl \$60,$tmp 213 xor ($Htbl,$nhi),$Zhi 214 xor $tmp,$Zlo 215 xor ($rem_4bit,$rem,8),$Zhi 216 217 bswap $Zlo 218 bswap $Zhi 219___ 220}} 221 222$code=<<___; 223.text 224.extern OPENSSL_ia32cap_P 225 226.globl gcm_gmult_4bit 227.type gcm_gmult_4bit,\@function,2 228.align 16 229gcm_gmult_4bit: 230 push %rbx 231 push %rbp # %rbp and %r12 are pushed exclusively in 232 push %r12 # order to reuse Win64 exception handler... 233.Lgmult_prologue: 234 235 movzb 15($Xi),$Zlo 236 lea .Lrem_4bit(%rip),$rem_4bit 237___ 238 &loop ($Xi); 239$code.=<<___; 240 mov $Zlo,8($Xi) 241 mov $Zhi,($Xi) 242 243 mov 16(%rsp),%rbx 244 lea 24(%rsp),%rsp 245.Lgmult_epilogue: 246 ret 247.size gcm_gmult_4bit,.-gcm_gmult_4bit 248___ 249 250# per-function register layout 251$inp="%rdx"; 252$len="%rcx"; 253$rem_8bit=$rem_4bit; 254 255$code.=<<___; 256.globl gcm_ghash_4bit 257.type gcm_ghash_4bit,\@function,4 258.align 16 259gcm_ghash_4bit: 260 push %rbx 261 push %rbp 262 push %r12 263 push %r13 264 push %r14 265 push %r15 266 sub \$280,%rsp 267.Lghash_prologue: 268 mov $inp,%r14 # reassign couple of args 269 mov $len,%r15 270___ 271{ my $inp="%r14"; 272 my $dat="%edx"; 273 my $len="%r15"; 274 my @nhi=("%ebx","%ecx"); 275 my @rem=("%r12","%r13"); 276 my $Hshr4="%rbp"; 277 278 &sub ($Htbl,-128); # size optimization 279 &lea ($Hshr4,"16+128(%rsp)"); 280 { my @lo =($nlo,$nhi); 281 my @hi =($Zlo,$Zhi); 282 283 &xor ($dat,$dat); 284 for ($i=0,$j=-2;$i<18;$i++,$j++) { 285 &mov ("$j(%rsp)",&LB($dat)) if ($i>1); 286 &or ($lo[0],$tmp) if ($i>1); 287 &mov (&LB($dat),&LB($lo[1])) if ($i>0 && $i<17); 288 &shr ($lo[1],4) if ($i>0 && $i<17); 289 &mov ($tmp,$hi[1]) if ($i>0 && $i<17); 290 &shr ($hi[1],4) if ($i>0 && $i<17); 291 &mov ("8*$j($Hshr4)",$hi[0]) if ($i>1); 292 &mov ($hi[0],"16*$i+0-128($Htbl)") if ($i<16); 293 &shl (&LB($dat),4) if ($i>0 && $i<17); 294 &mov ("8*$j-128($Hshr4)",$lo[0]) if ($i>1); 295 &mov ($lo[0],"16*$i+8-128($Htbl)") if ($i<16); 296 &shl ($tmp,60) if ($i>0 && $i<17); 297 298 push (@lo,shift(@lo)); 299 push (@hi,shift(@hi)); 300 } 301 } 302 &add ($Htbl,-128); 303 &mov ($Zlo,"8($Xi)"); 304 &mov ($Zhi,"0($Xi)"); 305 &add ($len,$inp); # pointer to the end of data 306 &lea ($rem_8bit,".Lrem_8bit(%rip)"); 307 &jmp (".Louter_loop"); 308 309$code.=".align 16\n.Louter_loop:\n"; 310 &xor ($Zhi,"($inp)"); 311 &mov ("%rdx","8($inp)"); 312 &lea ($inp,"16($inp)"); 313 &xor ("%rdx",$Zlo); 314 &mov ("($Xi)",$Zhi); 315 &mov ("8($Xi)","%rdx"); 316 &shr ("%rdx",32); 317 318 &xor ($nlo,$nlo); 319 &rol ($dat,8); 320 &mov (&LB($nlo),&LB($dat)); 321 &movz ($nhi[0],&LB($dat)); 322 &shl (&LB($nlo),4); 323 &shr ($nhi[0],4); 324 325 for ($j=11,$i=0;$i<15;$i++) { 326 &rol ($dat,8); 327 &xor ($Zlo,"8($Htbl,$nlo)") if ($i>0); 328 &xor ($Zhi,"($Htbl,$nlo)") if ($i>0); 329 &mov ($Zlo,"8($Htbl,$nlo)") if ($i==0); 330 &mov ($Zhi,"($Htbl,$nlo)") if ($i==0); 331 332 &mov (&LB($nlo),&LB($dat)); 333 &xor ($Zlo,$tmp) if ($i>0); 334 &movzw ($rem[1],"($rem_8bit,$rem[1],2)") if ($i>0); 335 336 &movz ($nhi[1],&LB($dat)); 337 &shl (&LB($nlo),4); 338 &movzb ($rem[0],"(%rsp,$nhi[0])"); 339 340 &shr ($nhi[1],4) if ($i<14); 341 &and ($nhi[1],0xf0) if ($i==14); 342 &shl ($rem[1],48) if ($i>0); 343 &xor ($rem[0],$Zlo); 344 345 &mov ($tmp,$Zhi); 346 &xor ($Zhi,$rem[1]) if ($i>0); 347 &shr ($Zlo,8); 348 349 &movz ($rem[0],&LB($rem[0])); 350 &mov ($dat,"$j($Xi)") if (--$j%4==0); 351 &shr ($Zhi,8); 352 353 &xor ($Zlo,"-128($Hshr4,$nhi[0],8)"); 354 &shl ($tmp,56); 355 &xor ($Zhi,"($Hshr4,$nhi[0],8)"); 356 357 unshift (@nhi,pop(@nhi)); # "rotate" registers 358 unshift (@rem,pop(@rem)); 359 } 360 &movzw ($rem[1],"($rem_8bit,$rem[1],2)"); 361 &xor ($Zlo,"8($Htbl,$nlo)"); 362 &xor ($Zhi,"($Htbl,$nlo)"); 363 364 &shl ($rem[1],48); 365 &xor ($Zlo,$tmp); 366 367 &xor ($Zhi,$rem[1]); 368 &movz ($rem[0],&LB($Zlo)); 369 &shr ($Zlo,4); 370 371 &mov ($tmp,$Zhi); 372 &shl (&LB($rem[0]),4); 373 &shr ($Zhi,4); 374 375 &xor ($Zlo,"8($Htbl,$nhi[0])"); 376 &movzw ($rem[0],"($rem_8bit,$rem[0],2)"); 377 &shl ($tmp,60); 378 379 &xor ($Zhi,"($Htbl,$nhi[0])"); 380 &xor ($Zlo,$tmp); 381 &shl ($rem[0],48); 382 383 &bswap ($Zlo); 384 &xor ($Zhi,$rem[0]); 385 386 &bswap ($Zhi); 387 &cmp ($inp,$len); 388 &jb (".Louter_loop"); 389} 390$code.=<<___; 391 mov $Zlo,8($Xi) 392 mov $Zhi,($Xi) 393 394 lea 280(%rsp),%rsi 395 mov 0(%rsi),%r15 396 mov 8(%rsi),%r14 397 mov 16(%rsi),%r13 398 mov 24(%rsi),%r12 399 mov 32(%rsi),%rbp 400 mov 40(%rsi),%rbx 401 lea 48(%rsi),%rsp 402.Lghash_epilogue: 403 ret 404.size gcm_ghash_4bit,.-gcm_ghash_4bit 405___ 406 407###################################################################### 408# PCLMULQDQ version. 409 410@_4args=$win64? ("%rcx","%rdx","%r8", "%r9") : # Win64 order 411 ("%rdi","%rsi","%rdx","%rcx"); # Unix order 412 413($Xi,$Xhi)=("%xmm0","%xmm1"); $Hkey="%xmm2"; 414($T1,$T2,$T3)=("%xmm3","%xmm4","%xmm5"); 415 416sub clmul64x64_T2 { # minimal register pressure 417my ($Xhi,$Xi,$Hkey,$HK)=@_; 418 419if (!defined($HK)) { $HK = $T2; 420$code.=<<___; 421 movdqa $Xi,$Xhi # 422 pshufd \$0b01001110,$Xi,$T1 423 pshufd \$0b01001110,$Hkey,$T2 424 pxor $Xi,$T1 # 425 pxor $Hkey,$T2 426___ 427} else { 428$code.=<<___; 429 movdqa $Xi,$Xhi # 430 pshufd \$0b01001110,$Xi,$T1 431 pxor $Xi,$T1 # 432___ 433} 434$code.=<<___; 435 pclmulqdq \$0x00,$Hkey,$Xi ####### 436 pclmulqdq \$0x11,$Hkey,$Xhi ####### 437 pclmulqdq \$0x00,$HK,$T1 ####### 438 pxor $Xi,$T1 # 439 pxor $Xhi,$T1 # 440 441 movdqa $T1,$T2 # 442 psrldq \$8,$T1 443 pslldq \$8,$T2 # 444 pxor $T1,$Xhi 445 pxor $T2,$Xi # 446___ 447} 448 449sub reduction_alg9 { # 17/11 times faster than Intel version 450my ($Xhi,$Xi) = @_; 451 452$code.=<<___; 453 # 1st phase 454 movdqa $Xi,$T2 # 455 movdqa $Xi,$T1 456 psllq \$5,$Xi 457 pxor $Xi,$T1 # 458 psllq \$1,$Xi 459 pxor $T1,$Xi # 460 psllq \$57,$Xi # 461 movdqa $Xi,$T1 # 462 pslldq \$8,$Xi 463 psrldq \$8,$T1 # 464 pxor $T2,$Xi 465 pxor $T1,$Xhi # 466 467 # 2nd phase 468 movdqa $Xi,$T2 469 psrlq \$1,$Xi 470 pxor $T2,$Xhi # 471 pxor $Xi,$T2 472 psrlq \$5,$Xi 473 pxor $T2,$Xi # 474 psrlq \$1,$Xi # 475 pxor $Xhi,$Xi # 476___ 477} 478 479{ my ($Htbl,$Xip)=@_4args; 480 my $HK="%xmm6"; 481 482$code.=<<___; 483.globl gcm_init_clmul 484.type gcm_init_clmul,\@abi-omnipotent 485.align 16 486gcm_init_clmul: 487.L_init_clmul: 488___ 489$code.=<<___ if ($win64); 490.LSEH_begin_gcm_init_clmul: 491 # I can't trust assembler to use specific encoding:-( 492 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 493 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 494___ 495$code.=<<___; 496 movdqu ($Xip),$Hkey 497 pshufd \$0b01001110,$Hkey,$Hkey # dword swap 498 499 # <<1 twist 500 pshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 501 movdqa $Hkey,$T1 502 psllq \$1,$Hkey 503 pxor $T3,$T3 # 504 psrlq \$63,$T1 505 pcmpgtd $T2,$T3 # broadcast carry bit 506 pslldq \$8,$T1 507 por $T1,$Hkey # H<<=1 508 509 # magic reduction 510 pand .L0x1c2_polynomial(%rip),$T3 511 pxor $T3,$Hkey # if(carry) H^=0x1c2_polynomial 512 513 # calculate H^2 514 pshufd \$0b01001110,$Hkey,$HK 515 movdqa $Hkey,$Xi 516 pxor $Hkey,$HK 517___ 518 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); 519 &reduction_alg9 ($Xhi,$Xi); 520$code.=<<___; 521 pshufd \$0b01001110,$Hkey,$T1 522 pshufd \$0b01001110,$Xi,$T2 523 pxor $Hkey,$T1 # Karatsuba pre-processing 524 movdqu $Hkey,0x00($Htbl) # save H 525 pxor $Xi,$T2 # Karatsuba pre-processing 526 movdqu $Xi,0x10($Htbl) # save H^2 527 palignr \$8,$T1,$T2 # low part is H.lo^H.hi... 528 movdqu $T2,0x20($Htbl) # save Karatsuba "salt" 529___ 530if ($do4xaggr) { 531 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^3 532 &reduction_alg9 ($Xhi,$Xi); 533$code.=<<___; 534 movdqa $Xi,$T3 535___ 536 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H^4 537 &reduction_alg9 ($Xhi,$Xi); 538$code.=<<___; 539 pshufd \$0b01001110,$T3,$T1 540 pshufd \$0b01001110,$Xi,$T2 541 pxor $T3,$T1 # Karatsuba pre-processing 542 movdqu $T3,0x30($Htbl) # save H^3 543 pxor $Xi,$T2 # Karatsuba pre-processing 544 movdqu $Xi,0x40($Htbl) # save H^4 545 palignr \$8,$T1,$T2 # low part is H^3.lo^H^3.hi... 546 movdqu $T2,0x50($Htbl) # save Karatsuba "salt" 547___ 548} 549$code.=<<___ if ($win64); 550 movaps (%rsp),%xmm6 551 lea 0x18(%rsp),%rsp 552.LSEH_end_gcm_init_clmul: 553___ 554$code.=<<___; 555 ret 556.size gcm_init_clmul,.-gcm_init_clmul 557___ 558} 559 560{ my ($Xip,$Htbl)=@_4args; 561 562$code.=<<___; 563.globl gcm_gmult_clmul 564.type gcm_gmult_clmul,\@abi-omnipotent 565.align 16 566gcm_gmult_clmul: 567.L_gmult_clmul: 568 movdqu ($Xip),$Xi 569 movdqa .Lbswap_mask(%rip),$T3 570 movdqu ($Htbl),$Hkey 571 movdqu 0x20($Htbl),$T2 572 pshufb $T3,$Xi 573___ 574 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$T2); 575$code.=<<___ if (0 || (&reduction_alg9($Xhi,$Xi)&&0)); 576 # experimental alternative. special thing about is that there 577 # no dependency between the two multiplications... 578 mov \$`0xE1<<1`,%eax 579 mov \$0xA040608020C0E000,%r10 # ((7..0)·0xE0)&0xff 580 mov \$0x07,%r11d 581 movq %rax,$T1 582 movq %r10,$T2 583 movq %r11,$T3 # borrow $T3 584 pand $Xi,$T3 585 pshufb $T3,$T2 # ($Xi&7)·0xE0 586 movq %rax,$T3 587 pclmulqdq \$0x00,$Xi,$T1 # ·(0xE1<<1) 588 pxor $Xi,$T2 589 pslldq \$15,$T2 590 paddd $T2,$T2 # <<(64+56+1) 591 pxor $T2,$Xi 592 pclmulqdq \$0x01,$T3,$Xi 593 movdqa .Lbswap_mask(%rip),$T3 # reload $T3 594 psrldq \$1,$T1 595 pxor $T1,$Xhi 596 pslldq \$7,$Xi 597 pxor $Xhi,$Xi 598___ 599$code.=<<___; 600 pshufb $T3,$Xi 601 movdqu $Xi,($Xip) 602 ret 603.size gcm_gmult_clmul,.-gcm_gmult_clmul 604___ 605} 606 607{ my ($Xip,$Htbl,$inp,$len)=@_4args; 608 my ($Xln,$Xmn,$Xhn,$Hkey2,$HK) = map("%xmm$_",(3..7)); 609 my ($T1,$T2,$T3)=map("%xmm$_",(8..10)); 610 611$code.=<<___; 612.globl gcm_ghash_clmul 613.type gcm_ghash_clmul,\@abi-omnipotent 614.align 32 615gcm_ghash_clmul: 616.L_ghash_clmul: 617___ 618$code.=<<___ if ($win64); 619 lea -0x88(%rsp),%rax 620.LSEH_begin_gcm_ghash_clmul: 621 # I can't trust assembler to use specific encoding:-( 622 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 623 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 624 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 625 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 626 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 627 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 628 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 629 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 630 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 631 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 632 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 633___ 634$code.=<<___; 635 movdqa .Lbswap_mask(%rip),$T3 636 637 movdqu ($Xip),$Xi 638 movdqu ($Htbl),$Hkey 639 movdqu 0x20($Htbl),$HK 640 pshufb $T3,$Xi 641 642 sub \$0x10,$len 643 jz .Lodd_tail 644 645 movdqu 0x10($Htbl),$Hkey2 646___ 647if ($do4xaggr) { 648my ($Xl,$Xm,$Xh,$Hkey3,$Hkey4)=map("%xmm$_",(11..15)); 649 650$code.=<<___; 651 mov OPENSSL_ia32cap_P+4(%rip),%eax 652 cmp \$0x30,$len 653 jb .Lskip4x 654 655 and \$`1<<26|1<<22`,%eax # isolate MOVBE+XSAVE 656 cmp \$`1<<22`,%eax # check for MOVBE without XSAVE 657 je .Lskip4x 658 659 sub \$0x30,$len 660 mov \$0xA040608020C0E000,%rax # ((7..0)·0xE0)&0xff 661 movdqu 0x30($Htbl),$Hkey3 662 movdqu 0x40($Htbl),$Hkey4 663 664 ####### 665 # Xi+4 =[(H*Ii+3) + (H^2*Ii+2) + (H^3*Ii+1) + H^4*(Ii+Xi)] mod P 666 # 667 movdqu 0x30($inp),$Xln 668 movdqu 0x20($inp),$Xl 669 pshufb $T3,$Xln 670 pshufb $T3,$Xl 671 movdqa $Xln,$Xhn 672 pshufd \$0b01001110,$Xln,$Xmn 673 pxor $Xln,$Xmn 674 pclmulqdq \$0x00,$Hkey,$Xln 675 pclmulqdq \$0x11,$Hkey,$Xhn 676 pclmulqdq \$0x00,$HK,$Xmn 677 678 movdqa $Xl,$Xh 679 pshufd \$0b01001110,$Xl,$Xm 680 pxor $Xl,$Xm 681 pclmulqdq \$0x00,$Hkey2,$Xl 682 pclmulqdq \$0x11,$Hkey2,$Xh 683 pclmulqdq \$0x10,$HK,$Xm 684 xorps $Xl,$Xln 685 xorps $Xh,$Xhn 686 movups 0x50($Htbl),$HK 687 xorps $Xm,$Xmn 688 689 movdqu 0x10($inp),$Xl 690 movdqu 0($inp),$T1 691 pshufb $T3,$Xl 692 pshufb $T3,$T1 693 movdqa $Xl,$Xh 694 pshufd \$0b01001110,$Xl,$Xm 695 pxor $T1,$Xi 696 pxor $Xl,$Xm 697 pclmulqdq \$0x00,$Hkey3,$Xl 698 movdqa $Xi,$Xhi 699 pshufd \$0b01001110,$Xi,$T1 700 pxor $Xi,$T1 701 pclmulqdq \$0x11,$Hkey3,$Xh 702 pclmulqdq \$0x00,$HK,$Xm 703 xorps $Xl,$Xln 704 xorps $Xh,$Xhn 705 706 lea 0x40($inp),$inp 707 sub \$0x40,$len 708 jc .Ltail4x 709 710 jmp .Lmod4_loop 711.align 32 712.Lmod4_loop: 713 pclmulqdq \$0x00,$Hkey4,$Xi 714 xorps $Xm,$Xmn 715 movdqu 0x30($inp),$Xl 716 pshufb $T3,$Xl 717 pclmulqdq \$0x11,$Hkey4,$Xhi 718 xorps $Xln,$Xi 719 movdqu 0x20($inp),$Xln 720 movdqa $Xl,$Xh 721 pclmulqdq \$0x10,$HK,$T1 722 pshufd \$0b01001110,$Xl,$Xm 723 xorps $Xhn,$Xhi 724 pxor $Xl,$Xm 725 pshufb $T3,$Xln 726 movups 0x20($Htbl),$HK 727 xorps $Xmn,$T1 728 pclmulqdq \$0x00,$Hkey,$Xl 729 pshufd \$0b01001110,$Xln,$Xmn 730 731 pxor $Xi,$T1 # aggregated Karatsuba post-processing 732 movdqa $Xln,$Xhn 733 pxor $Xhi,$T1 # 734 pxor $Xln,$Xmn 735 movdqa $T1,$T2 # 736 pclmulqdq \$0x11,$Hkey,$Xh 737 pslldq \$8,$T1 738 psrldq \$8,$T2 # 739 pxor $T1,$Xi 740 movdqa .L7_mask(%rip),$T1 741 pxor $T2,$Xhi # 742 movq %rax,$T2 743 744 pand $Xi,$T1 # 1st phase 745 pshufb $T1,$T2 # 746 pxor $Xi,$T2 # 747 pclmulqdq \$0x00,$HK,$Xm 748 psllq \$57,$T2 # 749 movdqa $T2,$T1 # 750 pslldq \$8,$T2 751 pclmulqdq \$0x00,$Hkey2,$Xln 752 psrldq \$8,$T1 # 753 pxor $T2,$Xi 754 pxor $T1,$Xhi # 755 movdqu 0($inp),$T1 756 757 movdqa $Xi,$T2 # 2nd phase 758 psrlq \$1,$Xi 759 pclmulqdq \$0x11,$Hkey2,$Xhn 760 xorps $Xl,$Xln 761 movdqu 0x10($inp),$Xl 762 pshufb $T3,$Xl 763 pclmulqdq \$0x10,$HK,$Xmn 764 xorps $Xh,$Xhn 765 movups 0x50($Htbl),$HK 766 pshufb $T3,$T1 767 pxor $T2,$Xhi # 768 pxor $Xi,$T2 769 psrlq \$5,$Xi 770 771 movdqa $Xl,$Xh 772 pxor $Xm,$Xmn 773 pshufd \$0b01001110,$Xl,$Xm 774 pxor $T2,$Xi # 775 pxor $T1,$Xhi 776 pxor $Xl,$Xm 777 pclmulqdq \$0x00,$Hkey3,$Xl 778 psrlq \$1,$Xi # 779 pxor $Xhi,$Xi # 780 movdqa $Xi,$Xhi 781 pclmulqdq \$0x11,$Hkey3,$Xh 782 xorps $Xl,$Xln 783 pshufd \$0b01001110,$Xi,$T1 784 pxor $Xi,$T1 785 786 pclmulqdq \$0x00,$HK,$Xm 787 xorps $Xh,$Xhn 788 789 lea 0x40($inp),$inp 790 sub \$0x40,$len 791 jnc .Lmod4_loop 792 793.Ltail4x: 794 pclmulqdq \$0x00,$Hkey4,$Xi 795 pclmulqdq \$0x11,$Hkey4,$Xhi 796 pclmulqdq \$0x10,$HK,$T1 797 xorps $Xm,$Xmn 798 xorps $Xln,$Xi 799 xorps $Xhn,$Xhi 800 pxor $Xi,$Xhi # aggregated Karatsuba post-processing 801 pxor $Xmn,$T1 802 803 pxor $Xhi,$T1 # 804 pxor $Xi,$Xhi 805 806 movdqa $T1,$T2 # 807 psrldq \$8,$T1 808 pslldq \$8,$T2 # 809 pxor $T1,$Xhi 810 pxor $T2,$Xi # 811___ 812 &reduction_alg9($Xhi,$Xi); 813$code.=<<___; 814 add \$0x40,$len 815 jz .Ldone 816 movdqu 0x20($Htbl),$HK 817 sub \$0x10,$len 818 jz .Lodd_tail 819.Lskip4x: 820___ 821} 822$code.=<<___; 823 ####### 824 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = 825 # [(H*Ii+1) + (H*Xi+1)] mod P = 826 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P 827 # 828 movdqu ($inp),$T1 # Ii 829 movdqu 16($inp),$Xln # Ii+1 830 pshufb $T3,$T1 831 pshufb $T3,$Xln 832 pxor $T1,$Xi # Ii+Xi 833 834 movdqa $Xln,$Xhn 835 pshufd \$0b01001110,$Xln,$Xmn 836 pxor $Xln,$Xmn 837 pclmulqdq \$0x00,$Hkey,$Xln 838 pclmulqdq \$0x11,$Hkey,$Xhn 839 pclmulqdq \$0x00,$HK,$Xmn 840 841 lea 32($inp),$inp # i+=2 842 nop 843 sub \$0x20,$len 844 jbe .Leven_tail 845 nop 846 jmp .Lmod_loop 847 848.align 32 849.Lmod_loop: 850 movdqa $Xi,$Xhi 851 movdqa $Xmn,$T1 852 pshufd \$0b01001110,$Xi,$Xmn # 853 pxor $Xi,$Xmn # 854 855 pclmulqdq \$0x00,$Hkey2,$Xi 856 pclmulqdq \$0x11,$Hkey2,$Xhi 857 pclmulqdq \$0x10,$HK,$Xmn 858 859 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 860 pxor $Xhn,$Xhi 861 movdqu ($inp),$T2 # Ii 862 pxor $Xi,$T1 # aggregated Karatsuba post-processing 863 pshufb $T3,$T2 864 movdqu 16($inp),$Xln # Ii+1 865 866 pxor $Xhi,$T1 867 pxor $T2,$Xhi # "Ii+Xi", consume early 868 pxor $T1,$Xmn 869 pshufb $T3,$Xln 870 movdqa $Xmn,$T1 # 871 psrldq \$8,$T1 872 pslldq \$8,$Xmn # 873 pxor $T1,$Xhi 874 pxor $Xmn,$Xi # 875 876 movdqa $Xln,$Xhn # 877 878 movdqa $Xi,$T2 # 1st phase 879 movdqa $Xi,$T1 880 psllq \$5,$Xi 881 pxor $Xi,$T1 # 882 pclmulqdq \$0x00,$Hkey,$Xln ####### 883 psllq \$1,$Xi 884 pxor $T1,$Xi # 885 psllq \$57,$Xi # 886 movdqa $Xi,$T1 # 887 pslldq \$8,$Xi 888 psrldq \$8,$T1 # 889 pxor $T2,$Xi 890 pshufd \$0b01001110,$Xhn,$Xmn 891 pxor $T1,$Xhi # 892 pxor $Xhn,$Xmn # 893 894 movdqa $Xi,$T2 # 2nd phase 895 psrlq \$1,$Xi 896 pclmulqdq \$0x11,$Hkey,$Xhn ####### 897 pxor $T2,$Xhi # 898 pxor $Xi,$T2 899 psrlq \$5,$Xi 900 pxor $T2,$Xi # 901 lea 32($inp),$inp 902 psrlq \$1,$Xi # 903 pclmulqdq \$0x00,$HK,$Xmn ####### 904 pxor $Xhi,$Xi # 905 906 sub \$0x20,$len 907 ja .Lmod_loop 908 909.Leven_tail: 910 movdqa $Xi,$Xhi 911 movdqa $Xmn,$T1 912 pshufd \$0b01001110,$Xi,$Xmn # 913 pxor $Xi,$Xmn # 914 915 pclmulqdq \$0x00,$Hkey2,$Xi 916 pclmulqdq \$0x11,$Hkey2,$Xhi 917 pclmulqdq \$0x10,$HK,$Xmn 918 919 pxor $Xln,$Xi # (H*Ii+1) + H^2*(Ii+Xi) 920 pxor $Xhn,$Xhi 921 pxor $Xi,$T1 922 pxor $Xhi,$T1 923 pxor $T1,$Xmn 924 movdqa $Xmn,$T1 # 925 psrldq \$8,$T1 926 pslldq \$8,$Xmn # 927 pxor $T1,$Xhi 928 pxor $Xmn,$Xi # 929___ 930 &reduction_alg9 ($Xhi,$Xi); 931$code.=<<___; 932 test $len,$len 933 jnz .Ldone 934 935.Lodd_tail: 936 movdqu ($inp),$T1 # Ii 937 pshufb $T3,$T1 938 pxor $T1,$Xi # Ii+Xi 939___ 940 &clmul64x64_T2 ($Xhi,$Xi,$Hkey,$HK); # H*(Ii+Xi) 941 &reduction_alg9 ($Xhi,$Xi); 942$code.=<<___; 943.Ldone: 944 pshufb $T3,$Xi 945 movdqu $Xi,($Xip) 946___ 947$code.=<<___ if ($win64); 948 movaps (%rsp),%xmm6 949 movaps 0x10(%rsp),%xmm7 950 movaps 0x20(%rsp),%xmm8 951 movaps 0x30(%rsp),%xmm9 952 movaps 0x40(%rsp),%xmm10 953 movaps 0x50(%rsp),%xmm11 954 movaps 0x60(%rsp),%xmm12 955 movaps 0x70(%rsp),%xmm13 956 movaps 0x80(%rsp),%xmm14 957 movaps 0x90(%rsp),%xmm15 958 lea 0xa8(%rsp),%rsp 959.LSEH_end_gcm_ghash_clmul: 960___ 961$code.=<<___; 962 ret 963.size gcm_ghash_clmul,.-gcm_ghash_clmul 964___ 965} 966 967$code.=<<___; 968.globl gcm_init_avx 969.type gcm_init_avx,\@abi-omnipotent 970.align 32 971gcm_init_avx: 972___ 973if ($avx) { 974my ($Htbl,$Xip)=@_4args; 975my $HK="%xmm6"; 976 977$code.=<<___ if ($win64); 978.LSEH_begin_gcm_init_avx: 979 # I can't trust assembler to use specific encoding:-( 980 .byte 0x48,0x83,0xec,0x18 #sub $0x18,%rsp 981 .byte 0x0f,0x29,0x34,0x24 #movaps %xmm6,(%rsp) 982___ 983$code.=<<___; 984 vzeroupper 985 986 vmovdqu ($Xip),$Hkey 987 vpshufd \$0b01001110,$Hkey,$Hkey # dword swap 988 989 # <<1 twist 990 vpshufd \$0b11111111,$Hkey,$T2 # broadcast uppermost dword 991 vpsrlq \$63,$Hkey,$T1 992 vpsllq \$1,$Hkey,$Hkey 993 vpxor $T3,$T3,$T3 # 994 vpcmpgtd $T2,$T3,$T3 # broadcast carry bit 995 vpslldq \$8,$T1,$T1 996 vpor $T1,$Hkey,$Hkey # H<<=1 997 998 # magic reduction 999 vpand .L0x1c2_polynomial(%rip),$T3,$T3 1000 vpxor $T3,$Hkey,$Hkey # if(carry) H^=0x1c2_polynomial 1001 1002 vpunpckhqdq $Hkey,$Hkey,$HK 1003 vmovdqa $Hkey,$Xi 1004 vpxor $Hkey,$HK,$HK 1005 mov \$4,%r10 # up to H^8 1006 jmp .Linit_start_avx 1007___ 1008 1009sub clmul64x64_avx { 1010my ($Xhi,$Xi,$Hkey,$HK)=@_; 1011 1012if (!defined($HK)) { $HK = $T2; 1013$code.=<<___; 1014 vpunpckhqdq $Xi,$Xi,$T1 1015 vpunpckhqdq $Hkey,$Hkey,$T2 1016 vpxor $Xi,$T1,$T1 # 1017 vpxor $Hkey,$T2,$T2 1018___ 1019} else { 1020$code.=<<___; 1021 vpunpckhqdq $Xi,$Xi,$T1 1022 vpxor $Xi,$T1,$T1 # 1023___ 1024} 1025$code.=<<___; 1026 vpclmulqdq \$0x11,$Hkey,$Xi,$Xhi ####### 1027 vpclmulqdq \$0x00,$Hkey,$Xi,$Xi ####### 1028 vpclmulqdq \$0x00,$HK,$T1,$T1 ####### 1029 vpxor $Xi,$Xhi,$T2 # 1030 vpxor $T2,$T1,$T1 # 1031 1032 vpslldq \$8,$T1,$T2 # 1033 vpsrldq \$8,$T1,$T1 1034 vpxor $T2,$Xi,$Xi # 1035 vpxor $T1,$Xhi,$Xhi 1036___ 1037} 1038 1039sub reduction_avx { 1040my ($Xhi,$Xi) = @_; 1041 1042$code.=<<___; 1043 vpsllq \$57,$Xi,$T1 # 1st phase 1044 vpsllq \$62,$Xi,$T2 1045 vpxor $T1,$T2,$T2 # 1046 vpsllq \$63,$Xi,$T1 1047 vpxor $T1,$T2,$T2 # 1048 vpslldq \$8,$T2,$T1 # 1049 vpsrldq \$8,$T2,$T2 1050 vpxor $T1,$Xi,$Xi # 1051 vpxor $T2,$Xhi,$Xhi 1052 1053 vpsrlq \$1,$Xi,$T2 # 2nd phase 1054 vpxor $Xi,$Xhi,$Xhi 1055 vpxor $T2,$Xi,$Xi # 1056 vpsrlq \$5,$T2,$T2 1057 vpxor $T2,$Xi,$Xi # 1058 vpsrlq \$1,$Xi,$Xi # 1059 vpxor $Xhi,$Xi,$Xi # 1060___ 1061} 1062 1063$code.=<<___; 1064.align 32 1065.Linit_loop_avx: 1066 vpalignr \$8,$T1,$T2,$T3 # low part is H.lo^H.hi... 1067 vmovdqu $T3,-0x10($Htbl) # save Karatsuba "salt" 1068___ 1069 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^3,5,7 1070 &reduction_avx ($Xhi,$Xi); 1071$code.=<<___; 1072.Linit_start_avx: 1073 vmovdqa $Xi,$T3 1074___ 1075 &clmul64x64_avx ($Xhi,$Xi,$Hkey,$HK); # calculate H^2,4,6,8 1076 &reduction_avx ($Xhi,$Xi); 1077$code.=<<___; 1078 vpshufd \$0b01001110,$T3,$T1 1079 vpshufd \$0b01001110,$Xi,$T2 1080 vpxor $T3,$T1,$T1 # Karatsuba pre-processing 1081 vmovdqu $T3,0x00($Htbl) # save H^1,3,5,7 1082 vpxor $Xi,$T2,$T2 # Karatsuba pre-processing 1083 vmovdqu $Xi,0x10($Htbl) # save H^2,4,6,8 1084 lea 0x30($Htbl),$Htbl 1085 sub \$1,%r10 1086 jnz .Linit_loop_avx 1087 1088 vpalignr \$8,$T2,$T1,$T3 # last "salt" is flipped 1089 vmovdqu $T3,-0x10($Htbl) 1090 1091 vzeroupper 1092___ 1093$code.=<<___ if ($win64); 1094 movaps (%rsp),%xmm6 1095 lea 0x18(%rsp),%rsp 1096.LSEH_end_gcm_init_avx: 1097___ 1098$code.=<<___; 1099 ret 1100.size gcm_init_avx,.-gcm_init_avx 1101___ 1102} else { 1103$code.=<<___; 1104 jmp .L_init_clmul 1105.size gcm_init_avx,.-gcm_init_avx 1106___ 1107} 1108 1109$code.=<<___; 1110.globl gcm_gmult_avx 1111.type gcm_gmult_avx,\@abi-omnipotent 1112.align 32 1113gcm_gmult_avx: 1114 jmp .L_gmult_clmul 1115.size gcm_gmult_avx,.-gcm_gmult_avx 1116___ 1117 1118$code.=<<___; 1119.globl gcm_ghash_avx 1120.type gcm_ghash_avx,\@abi-omnipotent 1121.align 32 1122gcm_ghash_avx: 1123___ 1124if ($avx) { 1125my ($Xip,$Htbl,$inp,$len)=@_4args; 1126my ($Xlo,$Xhi,$Xmi, 1127 $Zlo,$Zhi,$Zmi, 1128 $Hkey,$HK,$T1,$T2, 1129 $Xi,$Xo,$Tred,$bswap,$Ii,$Ij) = map("%xmm$_",(0..15)); 1130 1131$code.=<<___ if ($win64); 1132 lea -0x88(%rsp),%rax 1133.LSEH_begin_gcm_ghash_avx: 1134 # I can't trust assembler to use specific encoding:-( 1135 .byte 0x48,0x8d,0x60,0xe0 #lea -0x20(%rax),%rsp 1136 .byte 0x0f,0x29,0x70,0xe0 #movaps %xmm6,-0x20(%rax) 1137 .byte 0x0f,0x29,0x78,0xf0 #movaps %xmm7,-0x10(%rax) 1138 .byte 0x44,0x0f,0x29,0x00 #movaps %xmm8,0(%rax) 1139 .byte 0x44,0x0f,0x29,0x48,0x10 #movaps %xmm9,0x10(%rax) 1140 .byte 0x44,0x0f,0x29,0x50,0x20 #movaps %xmm10,0x20(%rax) 1141 .byte 0x44,0x0f,0x29,0x58,0x30 #movaps %xmm11,0x30(%rax) 1142 .byte 0x44,0x0f,0x29,0x60,0x40 #movaps %xmm12,0x40(%rax) 1143 .byte 0x44,0x0f,0x29,0x68,0x50 #movaps %xmm13,0x50(%rax) 1144 .byte 0x44,0x0f,0x29,0x70,0x60 #movaps %xmm14,0x60(%rax) 1145 .byte 0x44,0x0f,0x29,0x78,0x70 #movaps %xmm15,0x70(%rax) 1146___ 1147$code.=<<___; 1148 vzeroupper 1149 1150 vmovdqu ($Xip),$Xi # load $Xi 1151 lea .L0x1c2_polynomial(%rip),%r10 1152 lea 0x40($Htbl),$Htbl # size optimization 1153 vmovdqu .Lbswap_mask(%rip),$bswap 1154 vpshufb $bswap,$Xi,$Xi 1155 cmp \$0x80,$len 1156 jb .Lshort_avx 1157 sub \$0x80,$len 1158 1159 vmovdqu 0x70($inp),$Ii # I[7] 1160 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1161 vpshufb $bswap,$Ii,$Ii 1162 vmovdqu 0x20-0x40($Htbl),$HK 1163 1164 vpunpckhqdq $Ii,$Ii,$T2 1165 vmovdqu 0x60($inp),$Ij # I[6] 1166 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1167 vpxor $Ii,$T2,$T2 1168 vpshufb $bswap,$Ij,$Ij 1169 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1170 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1171 vpunpckhqdq $Ij,$Ij,$T1 1172 vmovdqu 0x50($inp),$Ii # I[5] 1173 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1174 vpxor $Ij,$T1,$T1 1175 1176 vpshufb $bswap,$Ii,$Ii 1177 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1178 vpunpckhqdq $Ii,$Ii,$T2 1179 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1180 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1181 vpxor $Ii,$T2,$T2 1182 vmovdqu 0x40($inp),$Ij # I[4] 1183 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1184 vmovdqu 0x50-0x40($Htbl),$HK 1185 1186 vpshufb $bswap,$Ij,$Ij 1187 vpxor $Xlo,$Zlo,$Zlo 1188 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1189 vpxor $Xhi,$Zhi,$Zhi 1190 vpunpckhqdq $Ij,$Ij,$T1 1191 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1192 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1193 vpxor $Xmi,$Zmi,$Zmi 1194 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1195 vpxor $Ij,$T1,$T1 1196 1197 vmovdqu 0x30($inp),$Ii # I[3] 1198 vpxor $Zlo,$Xlo,$Xlo 1199 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1200 vpxor $Zhi,$Xhi,$Xhi 1201 vpshufb $bswap,$Ii,$Ii 1202 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1203 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1204 vpxor $Zmi,$Xmi,$Xmi 1205 vpunpckhqdq $Ii,$Ii,$T2 1206 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1207 vmovdqu 0x80-0x40($Htbl),$HK 1208 vpxor $Ii,$T2,$T2 1209 1210 vmovdqu 0x20($inp),$Ij # I[2] 1211 vpxor $Xlo,$Zlo,$Zlo 1212 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1213 vpxor $Xhi,$Zhi,$Zhi 1214 vpshufb $bswap,$Ij,$Ij 1215 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1216 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1217 vpxor $Xmi,$Zmi,$Zmi 1218 vpunpckhqdq $Ij,$Ij,$T1 1219 vpclmulqdq \$0x00,$HK,$T2,$Xmi 1220 vpxor $Ij,$T1,$T1 1221 1222 vmovdqu 0x10($inp),$Ii # I[1] 1223 vpxor $Zlo,$Xlo,$Xlo 1224 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1225 vpxor $Zhi,$Xhi,$Xhi 1226 vpshufb $bswap,$Ii,$Ii 1227 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1228 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1229 vpxor $Zmi,$Xmi,$Xmi 1230 vpunpckhqdq $Ii,$Ii,$T2 1231 vpclmulqdq \$0x10,$HK,$T1,$Zmi 1232 vmovdqu 0xb0-0x40($Htbl),$HK 1233 vpxor $Ii,$T2,$T2 1234 1235 vmovdqu ($inp),$Ij # I[0] 1236 vpxor $Xlo,$Zlo,$Zlo 1237 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1238 vpxor $Xhi,$Zhi,$Zhi 1239 vpshufb $bswap,$Ij,$Ij 1240 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1241 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1242 vpxor $Xmi,$Zmi,$Zmi 1243 vpclmulqdq \$0x10,$HK,$T2,$Xmi 1244 1245 lea 0x80($inp),$inp 1246 cmp \$0x80,$len 1247 jb .Ltail_avx 1248 1249 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1250 sub \$0x80,$len 1251 jmp .Loop8x_avx 1252 1253.align 32 1254.Loop8x_avx: 1255 vpunpckhqdq $Ij,$Ij,$T1 1256 vmovdqu 0x70($inp),$Ii # I[7] 1257 vpxor $Xlo,$Zlo,$Zlo 1258 vpxor $Ij,$T1,$T1 1259 vpclmulqdq \$0x00,$Hkey,$Ij,$Xi 1260 vpshufb $bswap,$Ii,$Ii 1261 vpxor $Xhi,$Zhi,$Zhi 1262 vpclmulqdq \$0x11,$Hkey,$Ij,$Xo 1263 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1264 vpunpckhqdq $Ii,$Ii,$T2 1265 vpxor $Xmi,$Zmi,$Zmi 1266 vpclmulqdq \$0x00,$HK,$T1,$Tred 1267 vmovdqu 0x20-0x40($Htbl),$HK 1268 vpxor $Ii,$T2,$T2 1269 1270 vmovdqu 0x60($inp),$Ij # I[6] 1271 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1272 vpxor $Zlo,$Xi,$Xi # collect result 1273 vpshufb $bswap,$Ij,$Ij 1274 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1275 vxorps $Zhi,$Xo,$Xo 1276 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1277 vpunpckhqdq $Ij,$Ij,$T1 1278 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1279 vpxor $Zmi,$Tred,$Tred 1280 vxorps $Ij,$T1,$T1 1281 1282 vmovdqu 0x50($inp),$Ii # I[5] 1283 vpxor $Xi,$Tred,$Tred # aggregated Karatsuba post-processing 1284 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1285 vpxor $Xo,$Tred,$Tred 1286 vpslldq \$8,$Tred,$T2 1287 vpxor $Xlo,$Zlo,$Zlo 1288 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1289 vpsrldq \$8,$Tred,$Tred 1290 vpxor $T2, $Xi, $Xi 1291 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1292 vpshufb $bswap,$Ii,$Ii 1293 vxorps $Tred,$Xo, $Xo 1294 vpxor $Xhi,$Zhi,$Zhi 1295 vpunpckhqdq $Ii,$Ii,$T2 1296 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1297 vmovdqu 0x50-0x40($Htbl),$HK 1298 vpxor $Ii,$T2,$T2 1299 vpxor $Xmi,$Zmi,$Zmi 1300 1301 vmovdqu 0x40($inp),$Ij # I[4] 1302 vpalignr \$8,$Xi,$Xi,$Tred # 1st phase 1303 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1304 vpshufb $bswap,$Ij,$Ij 1305 vpxor $Zlo,$Xlo,$Xlo 1306 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1307 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1308 vpunpckhqdq $Ij,$Ij,$T1 1309 vpxor $Zhi,$Xhi,$Xhi 1310 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1311 vxorps $Ij,$T1,$T1 1312 vpxor $Zmi,$Xmi,$Xmi 1313 1314 vmovdqu 0x30($inp),$Ii # I[3] 1315 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1316 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1317 vpshufb $bswap,$Ii,$Ii 1318 vpxor $Xlo,$Zlo,$Zlo 1319 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1320 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1321 vpunpckhqdq $Ii,$Ii,$T2 1322 vpxor $Xhi,$Zhi,$Zhi 1323 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1324 vmovdqu 0x80-0x40($Htbl),$HK 1325 vpxor $Ii,$T2,$T2 1326 vpxor $Xmi,$Zmi,$Zmi 1327 1328 vmovdqu 0x20($inp),$Ij # I[2] 1329 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1330 vpshufb $bswap,$Ij,$Ij 1331 vpxor $Zlo,$Xlo,$Xlo 1332 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1333 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1334 vpunpckhqdq $Ij,$Ij,$T1 1335 vpxor $Zhi,$Xhi,$Xhi 1336 vpclmulqdq \$0x00,$HK, $T2,$Xmi 1337 vpxor $Ij,$T1,$T1 1338 vpxor $Zmi,$Xmi,$Xmi 1339 vxorps $Tred,$Xi,$Xi 1340 1341 vmovdqu 0x10($inp),$Ii # I[1] 1342 vpalignr \$8,$Xi,$Xi,$Tred # 2nd phase 1343 vpclmulqdq \$0x00,$Hkey,$Ij,$Zlo 1344 vpshufb $bswap,$Ii,$Ii 1345 vpxor $Xlo,$Zlo,$Zlo 1346 vpclmulqdq \$0x11,$Hkey,$Ij,$Zhi 1347 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1348 vpclmulqdq \$0x10,(%r10),$Xi,$Xi 1349 vxorps $Xo,$Tred,$Tred 1350 vpunpckhqdq $Ii,$Ii,$T2 1351 vpxor $Xhi,$Zhi,$Zhi 1352 vpclmulqdq \$0x10,$HK, $T1,$Zmi 1353 vmovdqu 0xb0-0x40($Htbl),$HK 1354 vpxor $Ii,$T2,$T2 1355 vpxor $Xmi,$Zmi,$Zmi 1356 1357 vmovdqu ($inp),$Ij # I[0] 1358 vpclmulqdq \$0x00,$Hkey,$Ii,$Xlo 1359 vpshufb $bswap,$Ij,$Ij 1360 vpclmulqdq \$0x11,$Hkey,$Ii,$Xhi 1361 vmovdqu 0xa0-0x40($Htbl),$Hkey # $Hkey^8 1362 vpxor $Tred,$Ij,$Ij 1363 vpclmulqdq \$0x10,$HK, $T2,$Xmi 1364 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1365 1366 lea 0x80($inp),$inp 1367 sub \$0x80,$len 1368 jnc .Loop8x_avx 1369 1370 add \$0x80,$len 1371 jmp .Ltail_no_xor_avx 1372 1373.align 32 1374.Lshort_avx: 1375 vmovdqu -0x10($inp,$len),$Ii # very last word 1376 lea ($inp,$len),$inp 1377 vmovdqu 0x00-0x40($Htbl),$Hkey # $Hkey^1 1378 vmovdqu 0x20-0x40($Htbl),$HK 1379 vpshufb $bswap,$Ii,$Ij 1380 1381 vmovdqa $Xlo,$Zlo # subtle way to zero $Zlo, 1382 vmovdqa $Xhi,$Zhi # $Zhi and 1383 vmovdqa $Xmi,$Zmi # $Zmi 1384 sub \$0x10,$len 1385 jz .Ltail_avx 1386 1387 vpunpckhqdq $Ij,$Ij,$T1 1388 vpxor $Xlo,$Zlo,$Zlo 1389 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1390 vpxor $Ij,$T1,$T1 1391 vmovdqu -0x20($inp),$Ii 1392 vpxor $Xhi,$Zhi,$Zhi 1393 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1394 vmovdqu 0x10-0x40($Htbl),$Hkey # $Hkey^2 1395 vpshufb $bswap,$Ii,$Ij 1396 vpxor $Xmi,$Zmi,$Zmi 1397 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1398 vpsrldq \$8,$HK,$HK 1399 sub \$0x10,$len 1400 jz .Ltail_avx 1401 1402 vpunpckhqdq $Ij,$Ij,$T1 1403 vpxor $Xlo,$Zlo,$Zlo 1404 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1405 vpxor $Ij,$T1,$T1 1406 vmovdqu -0x30($inp),$Ii 1407 vpxor $Xhi,$Zhi,$Zhi 1408 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1409 vmovdqu 0x30-0x40($Htbl),$Hkey # $Hkey^3 1410 vpshufb $bswap,$Ii,$Ij 1411 vpxor $Xmi,$Zmi,$Zmi 1412 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1413 vmovdqu 0x50-0x40($Htbl),$HK 1414 sub \$0x10,$len 1415 jz .Ltail_avx 1416 1417 vpunpckhqdq $Ij,$Ij,$T1 1418 vpxor $Xlo,$Zlo,$Zlo 1419 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1420 vpxor $Ij,$T1,$T1 1421 vmovdqu -0x40($inp),$Ii 1422 vpxor $Xhi,$Zhi,$Zhi 1423 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1424 vmovdqu 0x40-0x40($Htbl),$Hkey # $Hkey^4 1425 vpshufb $bswap,$Ii,$Ij 1426 vpxor $Xmi,$Zmi,$Zmi 1427 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1428 vpsrldq \$8,$HK,$HK 1429 sub \$0x10,$len 1430 jz .Ltail_avx 1431 1432 vpunpckhqdq $Ij,$Ij,$T1 1433 vpxor $Xlo,$Zlo,$Zlo 1434 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1435 vpxor $Ij,$T1,$T1 1436 vmovdqu -0x50($inp),$Ii 1437 vpxor $Xhi,$Zhi,$Zhi 1438 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1439 vmovdqu 0x60-0x40($Htbl),$Hkey # $Hkey^5 1440 vpshufb $bswap,$Ii,$Ij 1441 vpxor $Xmi,$Zmi,$Zmi 1442 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1443 vmovdqu 0x80-0x40($Htbl),$HK 1444 sub \$0x10,$len 1445 jz .Ltail_avx 1446 1447 vpunpckhqdq $Ij,$Ij,$T1 1448 vpxor $Xlo,$Zlo,$Zlo 1449 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1450 vpxor $Ij,$T1,$T1 1451 vmovdqu -0x60($inp),$Ii 1452 vpxor $Xhi,$Zhi,$Zhi 1453 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1454 vmovdqu 0x70-0x40($Htbl),$Hkey # $Hkey^6 1455 vpshufb $bswap,$Ii,$Ij 1456 vpxor $Xmi,$Zmi,$Zmi 1457 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1458 vpsrldq \$8,$HK,$HK 1459 sub \$0x10,$len 1460 jz .Ltail_avx 1461 1462 vpunpckhqdq $Ij,$Ij,$T1 1463 vpxor $Xlo,$Zlo,$Zlo 1464 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1465 vpxor $Ij,$T1,$T1 1466 vmovdqu -0x70($inp),$Ii 1467 vpxor $Xhi,$Zhi,$Zhi 1468 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1469 vmovdqu 0x90-0x40($Htbl),$Hkey # $Hkey^7 1470 vpshufb $bswap,$Ii,$Ij 1471 vpxor $Xmi,$Zmi,$Zmi 1472 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1473 vmovq 0xb8-0x40($Htbl),$HK 1474 sub \$0x10,$len 1475 jmp .Ltail_avx 1476 1477.align 32 1478.Ltail_avx: 1479 vpxor $Xi,$Ij,$Ij # accumulate $Xi 1480.Ltail_no_xor_avx: 1481 vpunpckhqdq $Ij,$Ij,$T1 1482 vpxor $Xlo,$Zlo,$Zlo 1483 vpclmulqdq \$0x00,$Hkey,$Ij,$Xlo 1484 vpxor $Ij,$T1,$T1 1485 vpxor $Xhi,$Zhi,$Zhi 1486 vpclmulqdq \$0x11,$Hkey,$Ij,$Xhi 1487 vpxor $Xmi,$Zmi,$Zmi 1488 vpclmulqdq \$0x00,$HK,$T1,$Xmi 1489 1490 vmovdqu (%r10),$Tred 1491 1492 vpxor $Xlo,$Zlo,$Xi 1493 vpxor $Xhi,$Zhi,$Xo 1494 vpxor $Xmi,$Zmi,$Zmi 1495 1496 vpxor $Xi, $Zmi,$Zmi # aggregated Karatsuba post-processing 1497 vpxor $Xo, $Zmi,$Zmi 1498 vpslldq \$8, $Zmi,$T2 1499 vpsrldq \$8, $Zmi,$Zmi 1500 vpxor $T2, $Xi, $Xi 1501 vpxor $Zmi,$Xo, $Xo 1502 1503 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 1st phase 1504 vpalignr \$8,$Xi,$Xi,$Xi 1505 vpxor $T2,$Xi,$Xi 1506 1507 vpclmulqdq \$0x10,$Tred,$Xi,$T2 # 2nd phase 1508 vpalignr \$8,$Xi,$Xi,$Xi 1509 vpxor $Xo,$Xi,$Xi 1510 vpxor $T2,$Xi,$Xi 1511 1512 cmp \$0,$len 1513 jne .Lshort_avx 1514 1515 vpshufb $bswap,$Xi,$Xi 1516 vmovdqu $Xi,($Xip) 1517 vzeroupper 1518___ 1519$code.=<<___ if ($win64); 1520 movaps (%rsp),%xmm6 1521 movaps 0x10(%rsp),%xmm7 1522 movaps 0x20(%rsp),%xmm8 1523 movaps 0x30(%rsp),%xmm9 1524 movaps 0x40(%rsp),%xmm10 1525 movaps 0x50(%rsp),%xmm11 1526 movaps 0x60(%rsp),%xmm12 1527 movaps 0x70(%rsp),%xmm13 1528 movaps 0x80(%rsp),%xmm14 1529 movaps 0x90(%rsp),%xmm15 1530 lea 0xa8(%rsp),%rsp 1531.LSEH_end_gcm_ghash_avx: 1532___ 1533$code.=<<___; 1534 ret 1535.size gcm_ghash_avx,.-gcm_ghash_avx 1536___ 1537} else { 1538$code.=<<___; 1539 jmp .L_ghash_clmul 1540.size gcm_ghash_avx,.-gcm_ghash_avx 1541___ 1542} 1543 1544$code.=<<___; 1545.align 64 1546.Lbswap_mask: 1547 .byte 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 1548.L0x1c2_polynomial: 1549 .byte 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0xc2 1550.L7_mask: 1551 .long 7,0,7,0 1552.L7_mask_poly: 1553 .long 7,0,`0xE1<<1`,0 1554.align 64 1555.type .Lrem_4bit,\@object 1556.Lrem_4bit: 1557 .long 0,`0x0000<<16`,0,`0x1C20<<16`,0,`0x3840<<16`,0,`0x2460<<16` 1558 .long 0,`0x7080<<16`,0,`0x6CA0<<16`,0,`0x48C0<<16`,0,`0x54E0<<16` 1559 .long 0,`0xE100<<16`,0,`0xFD20<<16`,0,`0xD940<<16`,0,`0xC560<<16` 1560 .long 0,`0x9180<<16`,0,`0x8DA0<<16`,0,`0xA9C0<<16`,0,`0xB5E0<<16` 1561.type .Lrem_8bit,\@object 1562.Lrem_8bit: 1563 .value 0x0000,0x01C2,0x0384,0x0246,0x0708,0x06CA,0x048C,0x054E 1564 .value 0x0E10,0x0FD2,0x0D94,0x0C56,0x0918,0x08DA,0x0A9C,0x0B5E 1565 .value 0x1C20,0x1DE2,0x1FA4,0x1E66,0x1B28,0x1AEA,0x18AC,0x196E 1566 .value 0x1230,0x13F2,0x11B4,0x1076,0x1538,0x14FA,0x16BC,0x177E 1567 .value 0x3840,0x3982,0x3BC4,0x3A06,0x3F48,0x3E8A,0x3CCC,0x3D0E 1568 .value 0x3650,0x3792,0x35D4,0x3416,0x3158,0x309A,0x32DC,0x331E 1569 .value 0x2460,0x25A2,0x27E4,0x2626,0x2368,0x22AA,0x20EC,0x212E 1570 .value 0x2A70,0x2BB2,0x29F4,0x2836,0x2D78,0x2CBA,0x2EFC,0x2F3E 1571 .value 0x7080,0x7142,0x7304,0x72C6,0x7788,0x764A,0x740C,0x75CE 1572 .value 0x7E90,0x7F52,0x7D14,0x7CD6,0x7998,0x785A,0x7A1C,0x7BDE 1573 .value 0x6CA0,0x6D62,0x6F24,0x6EE6,0x6BA8,0x6A6A,0x682C,0x69EE 1574 .value 0x62B0,0x6372,0x6134,0x60F6,0x65B8,0x647A,0x663C,0x67FE 1575 .value 0x48C0,0x4902,0x4B44,0x4A86,0x4FC8,0x4E0A,0x4C4C,0x4D8E 1576 .value 0x46D0,0x4712,0x4554,0x4496,0x41D8,0x401A,0x425C,0x439E 1577 .value 0x54E0,0x5522,0x5764,0x56A6,0x53E8,0x522A,0x506C,0x51AE 1578 .value 0x5AF0,0x5B32,0x5974,0x58B6,0x5DF8,0x5C3A,0x5E7C,0x5FBE 1579 .value 0xE100,0xE0C2,0xE284,0xE346,0xE608,0xE7CA,0xE58C,0xE44E 1580 .value 0xEF10,0xEED2,0xEC94,0xED56,0xE818,0xE9DA,0xEB9C,0xEA5E 1581 .value 0xFD20,0xFCE2,0xFEA4,0xFF66,0xFA28,0xFBEA,0xF9AC,0xF86E 1582 .value 0xF330,0xF2F2,0xF0B4,0xF176,0xF438,0xF5FA,0xF7BC,0xF67E 1583 .value 0xD940,0xD882,0xDAC4,0xDB06,0xDE48,0xDF8A,0xDDCC,0xDC0E 1584 .value 0xD750,0xD692,0xD4D4,0xD516,0xD058,0xD19A,0xD3DC,0xD21E 1585 .value 0xC560,0xC4A2,0xC6E4,0xC726,0xC268,0xC3AA,0xC1EC,0xC02E 1586 .value 0xCB70,0xCAB2,0xC8F4,0xC936,0xCC78,0xCDBA,0xCFFC,0xCE3E 1587 .value 0x9180,0x9042,0x9204,0x93C6,0x9688,0x974A,0x950C,0x94CE 1588 .value 0x9F90,0x9E52,0x9C14,0x9DD6,0x9898,0x995A,0x9B1C,0x9ADE 1589 .value 0x8DA0,0x8C62,0x8E24,0x8FE6,0x8AA8,0x8B6A,0x892C,0x88EE 1590 .value 0x83B0,0x8272,0x8034,0x81F6,0x84B8,0x857A,0x873C,0x86FE 1591 .value 0xA9C0,0xA802,0xAA44,0xAB86,0xAEC8,0xAF0A,0xAD4C,0xAC8E 1592 .value 0xA7D0,0xA612,0xA454,0xA596,0xA0D8,0xA11A,0xA35C,0xA29E 1593 .value 0xB5E0,0xB422,0xB664,0xB7A6,0xB2E8,0xB32A,0xB16C,0xB0AE 1594 .value 0xBBF0,0xBA32,0xB874,0xB9B6,0xBCF8,0xBD3A,0xBF7C,0xBEBE 1595 1596.asciz "GHASH for x86_64, CRYPTOGAMS by <appro\@openssl.org>" 1597.align 64 1598___ 1599 1600# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame, 1601# CONTEXT *context,DISPATCHER_CONTEXT *disp) 1602if ($win64) { 1603$rec="%rcx"; 1604$frame="%rdx"; 1605$context="%r8"; 1606$disp="%r9"; 1607 1608$code.=<<___; 1609.extern __imp_RtlVirtualUnwind 1610.type se_handler,\@abi-omnipotent 1611.align 16 1612se_handler: 1613 push %rsi 1614 push %rdi 1615 push %rbx 1616 push %rbp 1617 push %r12 1618 push %r13 1619 push %r14 1620 push %r15 1621 pushfq 1622 sub \$64,%rsp 1623 1624 mov 120($context),%rax # pull context->Rax 1625 mov 248($context),%rbx # pull context->Rip 1626 1627 mov 8($disp),%rsi # disp->ImageBase 1628 mov 56($disp),%r11 # disp->HandlerData 1629 1630 mov 0(%r11),%r10d # HandlerData[0] 1631 lea (%rsi,%r10),%r10 # prologue label 1632 cmp %r10,%rbx # context->Rip<prologue label 1633 jb .Lin_prologue 1634 1635 mov 152($context),%rax # pull context->Rsp 1636 1637 mov 4(%r11),%r10d # HandlerData[1] 1638 lea (%rsi,%r10),%r10 # epilogue label 1639 cmp %r10,%rbx # context->Rip>=epilogue label 1640 jae .Lin_prologue 1641 1642 lea 24(%rax),%rax # adjust "rsp" 1643 1644 mov -8(%rax),%rbx 1645 mov -16(%rax),%rbp 1646 mov -24(%rax),%r12 1647 mov %rbx,144($context) # restore context->Rbx 1648 mov %rbp,160($context) # restore context->Rbp 1649 mov %r12,216($context) # restore context->R12 1650 1651.Lin_prologue: 1652 mov 8(%rax),%rdi 1653 mov 16(%rax),%rsi 1654 mov %rax,152($context) # restore context->Rsp 1655 mov %rsi,168($context) # restore context->Rsi 1656 mov %rdi,176($context) # restore context->Rdi 1657 1658 mov 40($disp),%rdi # disp->ContextRecord 1659 mov $context,%rsi # context 1660 mov \$`1232/8`,%ecx # sizeof(CONTEXT) 1661 .long 0xa548f3fc # cld; rep movsq 1662 1663 mov $disp,%rsi 1664 xor %rcx,%rcx # arg1, UNW_FLAG_NHANDLER 1665 mov 8(%rsi),%rdx # arg2, disp->ImageBase 1666 mov 0(%rsi),%r8 # arg3, disp->ControlPc 1667 mov 16(%rsi),%r9 # arg4, disp->FunctionEntry 1668 mov 40(%rsi),%r10 # disp->ContextRecord 1669 lea 56(%rsi),%r11 # &disp->HandlerData 1670 lea 24(%rsi),%r12 # &disp->EstablisherFrame 1671 mov %r10,32(%rsp) # arg5 1672 mov %r11,40(%rsp) # arg6 1673 mov %r12,48(%rsp) # arg7 1674 mov %rcx,56(%rsp) # arg8, (NULL) 1675 call *__imp_RtlVirtualUnwind(%rip) 1676 1677 mov \$1,%eax # ExceptionContinueSearch 1678 add \$64,%rsp 1679 popfq 1680 pop %r15 1681 pop %r14 1682 pop %r13 1683 pop %r12 1684 pop %rbp 1685 pop %rbx 1686 pop %rdi 1687 pop %rsi 1688 ret 1689.size se_handler,.-se_handler 1690 1691.section .pdata 1692.align 4 1693 .rva .LSEH_begin_gcm_gmult_4bit 1694 .rva .LSEH_end_gcm_gmult_4bit 1695 .rva .LSEH_info_gcm_gmult_4bit 1696 1697 .rva .LSEH_begin_gcm_ghash_4bit 1698 .rva .LSEH_end_gcm_ghash_4bit 1699 .rva .LSEH_info_gcm_ghash_4bit 1700 1701 .rva .LSEH_begin_gcm_init_clmul 1702 .rva .LSEH_end_gcm_init_clmul 1703 .rva .LSEH_info_gcm_init_clmul 1704 1705 .rva .LSEH_begin_gcm_ghash_clmul 1706 .rva .LSEH_end_gcm_ghash_clmul 1707 .rva .LSEH_info_gcm_ghash_clmul 1708___ 1709$code.=<<___ if ($avx); 1710 .rva .LSEH_begin_gcm_init_avx 1711 .rva .LSEH_end_gcm_init_avx 1712 .rva .LSEH_info_gcm_init_clmul 1713 1714 .rva .LSEH_begin_gcm_ghash_avx 1715 .rva .LSEH_end_gcm_ghash_avx 1716 .rva .LSEH_info_gcm_ghash_clmul 1717___ 1718$code.=<<___; 1719.section .xdata 1720.align 8 1721.LSEH_info_gcm_gmult_4bit: 1722 .byte 9,0,0,0 1723 .rva se_handler 1724 .rva .Lgmult_prologue,.Lgmult_epilogue # HandlerData 1725.LSEH_info_gcm_ghash_4bit: 1726 .byte 9,0,0,0 1727 .rva se_handler 1728 .rva .Lghash_prologue,.Lghash_epilogue # HandlerData 1729.LSEH_info_gcm_init_clmul: 1730 .byte 0x01,0x08,0x03,0x00 1731 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1732 .byte 0x04,0x22,0x00,0x00 #sub rsp,0x18 1733.LSEH_info_gcm_ghash_clmul: 1734 .byte 0x01,0x33,0x16,0x00 1735 .byte 0x33,0xf8,0x09,0x00 #movaps 0x90(rsp),xmm15 1736 .byte 0x2e,0xe8,0x08,0x00 #movaps 0x80(rsp),xmm14 1737 .byte 0x29,0xd8,0x07,0x00 #movaps 0x70(rsp),xmm13 1738 .byte 0x24,0xc8,0x06,0x00 #movaps 0x60(rsp),xmm12 1739 .byte 0x1f,0xb8,0x05,0x00 #movaps 0x50(rsp),xmm11 1740 .byte 0x1a,0xa8,0x04,0x00 #movaps 0x40(rsp),xmm10 1741 .byte 0x15,0x98,0x03,0x00 #movaps 0x30(rsp),xmm9 1742 .byte 0x10,0x88,0x02,0x00 #movaps 0x20(rsp),xmm8 1743 .byte 0x0c,0x78,0x01,0x00 #movaps 0x10(rsp),xmm7 1744 .byte 0x08,0x68,0x00,0x00 #movaps 0x00(rsp),xmm6 1745 .byte 0x04,0x01,0x15,0x00 #sub rsp,0xa8 1746___ 1747} 1748 1749$code =~ s/\`([^\`]*)\`/eval($1)/gem; 1750 1751print $code; 1752 1753close STDOUT; 1754