1#!/usr/bin/env perl 2 3# ==================================================================== 4# [Re]written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9 10# "[Re]written" was achieved in two major overhauls. In 2004 BODY_* 11# functions were re-implemented to address P4 performance issue [see 12# commentary below], and in 2006 the rest was rewritten in order to 13# gain freedom to liberate licensing terms. 14 15# January, September 2004. 16# 17# It was noted that Intel IA-32 C compiler generates code which 18# performs ~30% *faster* on P4 CPU than original *hand-coded* 19# SHA1 assembler implementation. To address this problem (and 20# prove that humans are still better than machines:-), the 21# original code was overhauled, which resulted in following 22# performance changes: 23# 24# compared with original compared with Intel cc 25# assembler impl. generated code 26# Pentium -16% +48% 27# PIII/AMD +8% +16% 28# P4 +85%(!) +45% 29# 30# As you can see Pentium came out as looser:-( Yet I reckoned that 31# improvement on P4 outweights the loss and incorporate this 32# re-tuned code to 0.9.7 and later. 33# ---------------------------------------------------------------- 34# <appro@fy.chalmers.se> 35 36# August 2009. 37# 38# George Spelvin has tipped that F_40_59(b,c,d) can be rewritten as 39# '(c&d) + (b&(c^d))', which allows to accumulate partial results 40# and lighten "pressure" on scratch registers. This resulted in 41# >12% performance improvement on contemporary AMD cores (with no 42# degradation on other CPUs:-). Also, the code was revised to maximize 43# "distance" between instructions producing input to 'lea' instruction 44# and the 'lea' instruction itself, which is essential for Intel Atom 45# core and resulted in ~15% improvement. 46 47# October 2010. 48# 49# Add SSSE3, Supplemental[!] SSE3, implementation. The idea behind it 50# is to offload message schedule denoted by Wt in NIST specification, 51# or Xupdate in OpenSSL source, to SIMD unit. The idea is not novel, 52# and in SSE2 context was first explored by Dean Gaudet in 2004, see 53# http://arctic.org/~dean/crypto/sha1.html. Since then several things 54# have changed that made it interesting again: 55# 56# a) XMM units became faster and wider; 57# b) instruction set became more versatile; 58# c) an important observation was made by Max Locktykhin, which made 59# it possible to reduce amount of instructions required to perform 60# the operation in question, for further details see 61# http://software.intel.com/en-us/articles/improving-the-performance-of-the-secure-hash-algorithm-1/. 62 63# April 2011. 64# 65# Add AVX code path, probably most controversial... The thing is that 66# switch to AVX alone improves performance by as little as 4% in 67# comparison to SSSE3 code path. But below result doesn't look like 68# 4% improvement... Trouble is that Sandy Bridge decodes 'ro[rl]' as 69# pair of µ-ops, and it's the additional µ-ops, two per round, that 70# make it run slower than Core2 and Westmere. But 'sh[rl]d' is decoded 71# as single µ-op by Sandy Bridge and it's replacing 'ro[rl]' with 72# equivalent 'sh[rl]d' that is responsible for the impressive 5.1 73# cycles per processed byte. But 'sh[rl]d' is not something that used 74# to be fast, nor does it appear to be fast in upcoming Bulldozer 75# [according to its optimization manual]. Which is why AVX code path 76# is guarded by *both* AVX and synthetic bit denoting Intel CPUs. 77# One can argue that it's unfair to AMD, but without 'sh[rl]d' it 78# makes no sense to keep the AVX code path. If somebody feels that 79# strongly, it's probably more appropriate to discuss possibility of 80# using vector rotate XOP on AMD... 81 82# March 2014. 83# 84# Add support for Intel SHA Extensions. 85 86###################################################################### 87# Current performance is summarized in following table. Numbers are 88# CPU clock cycles spent to process single byte (less is better). 89# 90# x86 SSSE3 AVX 91# Pentium 15.7 - 92# PIII 11.5 - 93# P4 10.6 - 94# AMD K8 7.1 - 95# Core2 7.3 6.0/+22% - 96# Westmere 7.3 5.5/+33% - 97# Sandy Bridge 8.8 6.2/+40% 5.1(**)/+73% 98# Ivy Bridge 7.2 4.8/+51% 4.7(**)/+53% 99# Haswell 6.5 4.3/+51% 4.1(**)/+58% 100# Bulldozer 11.6 6.0/+92% 101# VIA Nano 10.6 7.5/+41% 102# Atom 12.5 9.3(*)/+35% 103# Silvermont 14.5 9.9(*)/+46% 104# 105# (*) Loop is 1056 instructions long and expected result is ~8.25. 106# The discrepancy is because of front-end limitations, so 107# called MS-ROM penalties, and on Silvermont even rotate's 108# limited parallelism. 109# 110# (**) As per above comment, the result is for AVX *plus* sh[rl]d. 111 112$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 113push(@INC,"${dir}","${dir}../../perlasm"); 114require "x86asm.pl"; 115 116&asm_init($ARGV[0],"sha1-586.pl",$ARGV[$#ARGV] eq "386"); 117 118$xmm=$ymm=0; 119for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); } 120 121$ymm=1 if ($xmm && 122 `$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1` 123 =~ /GNU assembler version ([2-9]\.[0-9]+)/ && 124 $1>=2.19); # first version supporting AVX 125 126$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32n" && 127 `nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/ && 128 $1>=2.03); # first version supporting AVX 129 130$ymm=1 if ($xmm && !$ymm && $ARGV[0] eq "win32" && 131 `ml 2>&1` =~ /Version ([0-9]+)\./ && 132 $1>=10); # first version supporting AVX 133 134$ymm=1 if ($xmm && !$ymm && `$ENV{CC} -v 2>&1` =~ /(^clang version|based on LLVM) ([3-9]\.[0-9]+)/ && 135 $2>=3.0); # first version supporting AVX 136 137$shaext=$xmm; ### set to zero if compiling for 1.0.1 138 139&external_label("OPENSSL_ia32cap_P") if ($xmm); 140 141 142$A="eax"; 143$B="ebx"; 144$C="ecx"; 145$D="edx"; 146$E="edi"; 147$T="esi"; 148$tmp1="ebp"; 149 150@V=($A,$B,$C,$D,$E,$T); 151 152$alt=0; # 1 denotes alternative IALU implementation, which performs 153 # 8% *worse* on P4, same on Westmere and Atom, 2% better on 154 # Sandy Bridge... 155 156sub BODY_00_15 157 { 158 local($n,$a,$b,$c,$d,$e,$f)=@_; 159 160 &comment("00_15 $n"); 161 162 &mov($f,$c); # f to hold F_00_19(b,c,d) 163 if ($n==0) { &mov($tmp1,$a); } 164 else { &mov($a,$tmp1); } 165 &rotl($tmp1,5); # tmp1=ROTATE(a,5) 166 &xor($f,$d); 167 &add($tmp1,$e); # tmp1+=e; 168 &mov($e,&swtmp($n%16)); # e becomes volatile and is loaded 169 # with xi, also note that e becomes 170 # f in next round... 171 &and($f,$b); 172 &rotr($b,2); # b=ROTATE(b,30) 173 &xor($f,$d); # f holds F_00_19(b,c,d) 174 &lea($tmp1,&DWP(0x5a827999,$tmp1,$e)); # tmp1+=K_00_19+xi 175 176 if ($n==15) { &mov($e,&swtmp(($n+1)%16));# pre-fetch f for next round 177 &add($f,$tmp1); } # f+=tmp1 178 else { &add($tmp1,$f); } # f becomes a in next round 179 &mov($tmp1,$a) if ($alt && $n==15); 180 } 181 182sub BODY_16_19 183 { 184 local($n,$a,$b,$c,$d,$e,$f)=@_; 185 186 &comment("16_19 $n"); 187 188if ($alt) { 189 &xor($c,$d); 190 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 191 &and($tmp1,$c); # tmp1 to hold F_00_19(b,c,d), b&=c^d 192 &xor($f,&swtmp(($n+8)%16)); 193 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 194 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 195 &rotl($f,1); # f=ROTATE(f,1) 196 &add($e,$tmp1); # e+=F_00_19(b,c,d) 197 &xor($c,$d); # restore $c 198 &mov($tmp1,$a); # b in next round 199 &rotr($b,$n==16?2:7); # b=ROTATE(b,30) 200 &mov(&swtmp($n%16),$f); # xi=f 201 &rotl($a,5); # ROTATE(a,5) 202 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 203 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 204 &add($f,$a); # f+=ROTATE(a,5) 205} else { 206 &mov($tmp1,$c); # tmp1 to hold F_00_19(b,c,d) 207 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 208 &xor($tmp1,$d); 209 &xor($f,&swtmp(($n+8)%16)); 210 &and($tmp1,$b); 211 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 212 &rotl($f,1); # f=ROTATE(f,1) 213 &xor($tmp1,$d); # tmp1=F_00_19(b,c,d) 214 &add($e,$tmp1); # e+=F_00_19(b,c,d) 215 &mov($tmp1,$a); 216 &rotr($b,2); # b=ROTATE(b,30) 217 &mov(&swtmp($n%16),$f); # xi=f 218 &rotl($tmp1,5); # ROTATE(a,5) 219 &lea($f,&DWP(0x5a827999,$f,$e));# f+=F_00_19(b,c,d)+e 220 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 221 &add($f,$tmp1); # f+=ROTATE(a,5) 222} 223 } 224 225sub BODY_20_39 226 { 227 local($n,$a,$b,$c,$d,$e,$f)=@_; 228 local $K=($n<40)?0x6ed9eba1:0xca62c1d6; 229 230 &comment("20_39 $n"); 231 232if ($alt) { 233 &xor($tmp1,$c); # tmp1 to hold F_20_39(b,c,d), b^=c 234 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 235 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 236 &xor($f,&swtmp(($n+8)%16)); 237 &add($e,$tmp1); # e+=F_20_39(b,c,d) 238 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 239 &rotl($f,1); # f=ROTATE(f,1) 240 &mov($tmp1,$a); # b in next round 241 &rotr($b,7); # b=ROTATE(b,30) 242 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 243 &rotl($a,5); # ROTATE(a,5) 244 &xor($b,$c) if($n==39);# warm up for BODY_40_59 245 &and($tmp1,$b) if($n==39); 246 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 247 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 248 &add($f,$a); # f+=ROTATE(a,5) 249 &rotr($a,5) if ($n==79); 250} else { 251 &mov($tmp1,$b); # tmp1 to hold F_20_39(b,c,d) 252 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 253 &xor($tmp1,$c); 254 &xor($f,&swtmp(($n+8)%16)); 255 &xor($tmp1,$d); # tmp1 holds F_20_39(b,c,d) 256 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 257 &rotl($f,1); # f=ROTATE(f,1) 258 &add($e,$tmp1); # e+=F_20_39(b,c,d) 259 &rotr($b,2); # b=ROTATE(b,30) 260 &mov($tmp1,$a); 261 &rotl($tmp1,5); # ROTATE(a,5) 262 &mov(&swtmp($n%16),$f) if($n<77);# xi=f 263 &lea($f,&DWP($K,$f,$e)); # f+=e+K_XX_YY 264 &mov($e,&swtmp(($n+1)%16)) if($n<79);# pre-fetch f for next round 265 &add($f,$tmp1); # f+=ROTATE(a,5) 266} 267 } 268 269sub BODY_40_59 270 { 271 local($n,$a,$b,$c,$d,$e,$f)=@_; 272 273 &comment("40_59 $n"); 274 275if ($alt) { 276 &add($e,$tmp1); # e+=b&(c^d) 277 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 278 &mov($tmp1,$d); 279 &xor($f,&swtmp(($n+8)%16)); 280 &xor($c,$d); # restore $c 281 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 282 &rotl($f,1); # f=ROTATE(f,1) 283 &and($tmp1,$c); 284 &rotr($b,7); # b=ROTATE(b,30) 285 &add($e,$tmp1); # e+=c&d 286 &mov($tmp1,$a); # b in next round 287 &mov(&swtmp($n%16),$f); # xi=f 288 &rotl($a,5); # ROTATE(a,5) 289 &xor($b,$c) if ($n<59); 290 &and($tmp1,$b) if ($n<59);# tmp1 to hold F_40_59(b,c,d) 291 &lea($f,&DWP(0x8f1bbcdc,$f,$e));# f+=K_40_59+e+(b&(c^d)) 292 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 293 &add($f,$a); # f+=ROTATE(a,5) 294} else { 295 &mov($tmp1,$c); # tmp1 to hold F_40_59(b,c,d) 296 &xor($f,&swtmp(($n+2)%16)); # f to hold Xupdate(xi,xa,xb,xc,xd) 297 &xor($tmp1,$d); 298 &xor($f,&swtmp(($n+8)%16)); 299 &and($tmp1,$b); 300 &xor($f,&swtmp(($n+13)%16)); # f holds xa^xb^xc^xd 301 &rotl($f,1); # f=ROTATE(f,1) 302 &add($tmp1,$e); # b&(c^d)+=e 303 &rotr($b,2); # b=ROTATE(b,30) 304 &mov($e,$a); # e becomes volatile 305 &rotl($e,5); # ROTATE(a,5) 306 &mov(&swtmp($n%16),$f); # xi=f 307 &lea($f,&DWP(0x8f1bbcdc,$f,$tmp1));# f+=K_40_59+e+(b&(c^d)) 308 &mov($tmp1,$c); 309 &add($f,$e); # f+=ROTATE(a,5) 310 &and($tmp1,$d); 311 &mov($e,&swtmp(($n+1)%16)); # pre-fetch f for next round 312 &add($f,$tmp1); # f+=c&d 313} 314 } 315 316&function_begin("sha1_block_data_order"); 317if ($xmm) { 318 &static_label("shaext_shortcut") if ($shaext); 319 &static_label("ssse3_shortcut"); 320 &static_label("avx_shortcut") if ($ymm); 321 &static_label("K_XX_XX"); 322 323 &call (&label("pic_point")); # make it PIC! 324 &set_label("pic_point"); 325 &blindpop($tmp1); 326 &picmeup($T,"OPENSSL_ia32cap_P",$tmp1,&label("pic_point")); 327 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 328 329 &mov ($A,&DWP(0,$T)); 330 &mov ($D,&DWP(4,$T)); 331 &test ($D,1<<9); # check SSSE3 bit 332 &jz (&label("x86")); 333 &mov ($C,&DWP(8,$T)); 334 &test ($A,1<<24); # check FXSR bit 335 &jz (&label("x86")); 336 if ($shaext) { 337 &test ($C,1<<29); # check SHA bit 338 &jnz (&label("shaext_shortcut")); 339 } 340 if ($ymm) { 341 &and ($D,1<<28); # mask AVX bit 342 &and ($A,1<<30); # mask "Intel CPU" bit 343 &or ($A,$D); 344 &cmp ($A,1<<28|1<<30); 345 &je (&label("avx_shortcut")); 346 } 347 &jmp (&label("ssse3_shortcut")); 348 &set_label("x86",16); 349} 350 &mov($tmp1,&wparam(0)); # SHA_CTX *c 351 &mov($T,&wparam(1)); # const void *input 352 &mov($A,&wparam(2)); # size_t num 353 &stack_push(16+3); # allocate X[16] 354 &shl($A,6); 355 &add($A,$T); 356 &mov(&wparam(2),$A); # pointer beyond the end of input 357 &mov($E,&DWP(16,$tmp1));# pre-load E 358 &jmp(&label("loop")); 359 360&set_label("loop",16); 361 362 # copy input chunk to X, but reversing byte order! 363 for ($i=0; $i<16; $i+=4) 364 { 365 &mov($A,&DWP(4*($i+0),$T)); 366 &mov($B,&DWP(4*($i+1),$T)); 367 &mov($C,&DWP(4*($i+2),$T)); 368 &mov($D,&DWP(4*($i+3),$T)); 369 &bswap($A); 370 &bswap($B); 371 &bswap($C); 372 &bswap($D); 373 &mov(&swtmp($i+0),$A); 374 &mov(&swtmp($i+1),$B); 375 &mov(&swtmp($i+2),$C); 376 &mov(&swtmp($i+3),$D); 377 } 378 &mov(&wparam(1),$T); # redundant in 1st spin 379 380 &mov($A,&DWP(0,$tmp1)); # load SHA_CTX 381 &mov($B,&DWP(4,$tmp1)); 382 &mov($C,&DWP(8,$tmp1)); 383 &mov($D,&DWP(12,$tmp1)); 384 # E is pre-loaded 385 386 for($i=0;$i<16;$i++) { &BODY_00_15($i,@V); unshift(@V,pop(@V)); } 387 for(;$i<20;$i++) { &BODY_16_19($i,@V); unshift(@V,pop(@V)); } 388 for(;$i<40;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 389 for(;$i<60;$i++) { &BODY_40_59($i,@V); unshift(@V,pop(@V)); } 390 for(;$i<80;$i++) { &BODY_20_39($i,@V); unshift(@V,pop(@V)); } 391 392 (($V[5] eq $D) and ($V[0] eq $E)) or die; # double-check 393 394 &mov($tmp1,&wparam(0)); # re-load SHA_CTX* 395 &mov($D,&wparam(1)); # D is last "T" and is discarded 396 397 &add($E,&DWP(0,$tmp1)); # E is last "A"... 398 &add($T,&DWP(4,$tmp1)); 399 &add($A,&DWP(8,$tmp1)); 400 &add($B,&DWP(12,$tmp1)); 401 &add($C,&DWP(16,$tmp1)); 402 403 &mov(&DWP(0,$tmp1),$E); # update SHA_CTX 404 &add($D,64); # advance input pointer 405 &mov(&DWP(4,$tmp1),$T); 406 &cmp($D,&wparam(2)); # have we reached the end yet? 407 &mov(&DWP(8,$tmp1),$A); 408 &mov($E,$C); # C is last "E" which needs to be "pre-loaded" 409 &mov(&DWP(12,$tmp1),$B); 410 &mov($T,$D); # input pointer 411 &mov(&DWP(16,$tmp1),$C); 412 &jb(&label("loop")); 413 414 &stack_pop(16+3); 415&function_end("sha1_block_data_order"); 416 417if ($xmm) { 418if ($shaext) { 419###################################################################### 420# Intel SHA Extensions implementation of SHA1 update function. 421# 422my ($ctx,$inp,$num)=("edi","esi","ecx"); 423my ($ABCD,$E,$E_,$BSWAP)=map("xmm$_",(0..3)); 424my @MSG=map("xmm$_",(4..7)); 425 426sub sha1rnds4 { 427 my ($dst,$src,$imm)=@_; 428 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 429 { &data_byte(0x0f,0x3a,0xcc,0xc0|($1<<3)|$2,$imm); } 430} 431sub sha1op38 { 432 my ($opcodelet,$dst,$src)=@_; 433 if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/) 434 { &data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2); } 435} 436sub sha1nexte { sha1op38(0xc8,@_); } 437sub sha1msg1 { sha1op38(0xc9,@_); } 438sub sha1msg2 { sha1op38(0xca,@_); } 439 440&function_begin("_sha1_block_data_order_shaext"); 441 &call (&label("pic_point")); # make it PIC! 442 &set_label("pic_point"); 443 &blindpop($tmp1); 444 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 445&set_label("shaext_shortcut"); 446 &mov ($ctx,&wparam(0)); 447 &mov ("ebx","esp"); 448 &mov ($inp,&wparam(1)); 449 &mov ($num,&wparam(2)); 450 &sub ("esp",32); 451 452 &movdqu ($ABCD,&QWP(0,$ctx)); 453 &movd ($E,&DWP(16,$ctx)); 454 &and ("esp",-32); 455 &movdqa ($BSWAP,&QWP(0x50,$tmp1)); # byte-n-word swap 456 457 &movdqu (@MSG[0],&QWP(0,$inp)); 458 &pshufd ($ABCD,$ABCD,0b00011011); # flip word order 459 &movdqu (@MSG[1],&QWP(0x10,$inp)); 460 &pshufd ($E,$E,0b00011011); # flip word order 461 &movdqu (@MSG[2],&QWP(0x20,$inp)); 462 &pshufb (@MSG[0],$BSWAP); 463 &movdqu (@MSG[3],&QWP(0x30,$inp)); 464 &pshufb (@MSG[1],$BSWAP); 465 &pshufb (@MSG[2],$BSWAP); 466 &pshufb (@MSG[3],$BSWAP); 467 &jmp (&label("loop_shaext")); 468 469&set_label("loop_shaext",16); 470 &dec ($num); 471 &lea ("eax",&DWP(0x40,$inp)); 472 &movdqa (&QWP(0,"esp"),$E); # offload $E 473 &paddd ($E,@MSG[0]); 474 &cmovne ($inp,"eax"); 475 &movdqa (&QWP(16,"esp"),$ABCD); # offload $ABCD 476 477for($i=0;$i<20-4;$i+=2) { 478 &sha1msg1 (@MSG[0],@MSG[1]); 479 &movdqa ($E_,$ABCD); 480 &sha1rnds4 ($ABCD,$E,int($i/5)); # 0-3... 481 &sha1nexte ($E_,@MSG[1]); 482 &pxor (@MSG[0],@MSG[2]); 483 &sha1msg1 (@MSG[1],@MSG[2]); 484 &sha1msg2 (@MSG[0],@MSG[3]); 485 486 &movdqa ($E,$ABCD); 487 &sha1rnds4 ($ABCD,$E_,int(($i+1)/5)); 488 &sha1nexte ($E,@MSG[2]); 489 &pxor (@MSG[1],@MSG[3]); 490 &sha1msg2 (@MSG[1],@MSG[0]); 491 492 push(@MSG,shift(@MSG)); push(@MSG,shift(@MSG)); 493} 494 &movdqu (@MSG[0],&QWP(0,$inp)); 495 &movdqa ($E_,$ABCD); 496 &sha1rnds4 ($ABCD,$E,3); # 64-67 497 &sha1nexte ($E_,@MSG[1]); 498 &movdqu (@MSG[1],&QWP(0x10,$inp)); 499 &pshufb (@MSG[0],$BSWAP); 500 501 &movdqa ($E,$ABCD); 502 &sha1rnds4 ($ABCD,$E_,3); # 68-71 503 &sha1nexte ($E,@MSG[2]); 504 &movdqu (@MSG[2],&QWP(0x20,$inp)); 505 &pshufb (@MSG[1],$BSWAP); 506 507 &movdqa ($E_,$ABCD); 508 &sha1rnds4 ($ABCD,$E,3); # 72-75 509 &sha1nexte ($E_,@MSG[3]); 510 &movdqu (@MSG[3],&QWP(0x30,$inp)); 511 &pshufb (@MSG[2],$BSWAP); 512 513 &movdqa ($E,$ABCD); 514 &sha1rnds4 ($ABCD,$E_,3); # 76-79 515 &movdqa ($E_,&QWP(0,"esp")); 516 &pshufb (@MSG[3],$BSWAP); 517 &sha1nexte ($E,$E_); 518 &paddd ($ABCD,&QWP(16,"esp")); 519 520 &jnz (&label("loop_shaext")); 521 522 &pshufd ($ABCD,$ABCD,0b00011011); 523 &pshufd ($E,$E,0b00011011); 524 &movdqu (&QWP(0,$ctx),$ABCD) 525 &movd (&DWP(16,$ctx),$E); 526 &mov ("esp","ebx"); 527&function_end("_sha1_block_data_order_shaext"); 528} 529###################################################################### 530# The SSSE3 implementation. 531# 532# %xmm[0-7] are used as ring @X[] buffer containing quadruples of last 533# 32 elements of the message schedule or Xupdate outputs. First 4 534# quadruples are simply byte-swapped input, next 4 are calculated 535# according to method originally suggested by Dean Gaudet (modulo 536# being implemented in SSSE3). Once 8 quadruples or 32 elements are 537# collected, it switches to routine proposed by Max Locktyukhin. 538# 539# Calculations inevitably require temporary reqisters, and there are 540# no %xmm registers left to spare. For this reason part of the ring 541# buffer, X[2..4] to be specific, is offloaded to 3 quadriples ring 542# buffer on the stack. Keep in mind that X[2] is alias X[-6], X[3] - 543# X[-5], and X[4] - X[-4]... 544# 545# Another notable optimization is aggressive stack frame compression 546# aiming to minimize amount of 9-byte instructions... 547# 548# Yet another notable optimization is "jumping" $B variable. It means 549# that there is no register permanently allocated for $B value. This 550# allowed to eliminate one instruction from body_20_39... 551# 552my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 553my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 554my @V=($A,$B,$C,$D,$E); 555my $j=0; # hash round 556my $rx=0; 557my @T=($T,$tmp1); 558my $inp; 559 560my $_rol=sub { &rol(@_) }; 561my $_ror=sub { &ror(@_) }; 562 563&function_begin("_sha1_block_data_order_ssse3"); 564 &call (&label("pic_point")); # make it PIC! 565 &set_label("pic_point"); 566 &blindpop($tmp1); 567 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 568&set_label("ssse3_shortcut"); 569 570 &movdqa (@X[3],&QWP(0,$tmp1)); # K_00_19 571 &movdqa (@X[4],&QWP(16,$tmp1)); # K_20_39 572 &movdqa (@X[5],&QWP(32,$tmp1)); # K_40_59 573 &movdqa (@X[6],&QWP(48,$tmp1)); # K_60_79 574 &movdqa (@X[2],&QWP(64,$tmp1)); # pbswap mask 575 576 &mov ($E,&wparam(0)); # load argument block 577 &mov ($inp=@T[1],&wparam(1)); 578 &mov ($D,&wparam(2)); 579 &mov (@T[0],"esp"); 580 581 # stack frame layout 582 # 583 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 584 # X[4]+K X[5]+K X[6]+K X[7]+K 585 # X[8]+K X[9]+K X[10]+K X[11]+K 586 # X[12]+K X[13]+K X[14]+K X[15]+K 587 # 588 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 589 # X[4] X[5] X[6] X[7] 590 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 591 # 592 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 593 # K_40_59 K_40_59 K_40_59 K_40_59 594 # K_60_79 K_60_79 K_60_79 K_60_79 595 # K_00_19 K_00_19 K_00_19 K_00_19 596 # pbswap mask 597 # 598 # +192 ctx # argument block 599 # +196 inp 600 # +200 end 601 # +204 esp 602 &sub ("esp",208); 603 &and ("esp",-64); 604 605 &movdqa (&QWP(112+0,"esp"),@X[4]); # copy constants 606 &movdqa (&QWP(112+16,"esp"),@X[5]); 607 &movdqa (&QWP(112+32,"esp"),@X[6]); 608 &shl ($D,6); # len*64 609 &movdqa (&QWP(112+48,"esp"),@X[3]); 610 &add ($D,$inp); # end of input 611 &movdqa (&QWP(112+64,"esp"),@X[2]); 612 &add ($inp,64); 613 &mov (&DWP(192+0,"esp"),$E); # save argument block 614 &mov (&DWP(192+4,"esp"),$inp); 615 &mov (&DWP(192+8,"esp"),$D); 616 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 617 618 &mov ($A,&DWP(0,$E)); # load context 619 &mov ($B,&DWP(4,$E)); 620 &mov ($C,&DWP(8,$E)); 621 &mov ($D,&DWP(12,$E)); 622 &mov ($E,&DWP(16,$E)); 623 &mov (@T[0],$B); # magic seed 624 625 &movdqu (@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 626 &movdqu (@X[-3&7],&QWP(-48,$inp)); 627 &movdqu (@X[-2&7],&QWP(-32,$inp)); 628 &movdqu (@X[-1&7],&QWP(-16,$inp)); 629 &pshufb (@X[-4&7],@X[2]); # byte swap 630 &pshufb (@X[-3&7],@X[2]); 631 &pshufb (@X[-2&7],@X[2]); 632 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 633 &pshufb (@X[-1&7],@X[2]); 634 &paddd (@X[-4&7],@X[3]); # add K_00_19 635 &paddd (@X[-3&7],@X[3]); 636 &paddd (@X[-2&7],@X[3]); 637 &movdqa (&QWP(0,"esp"),@X[-4&7]); # X[]+K xfer to IALU 638 &psubd (@X[-4&7],@X[3]); # restore X[] 639 &movdqa (&QWP(0+16,"esp"),@X[-3&7]); 640 &psubd (@X[-3&7],@X[3]); 641 &movdqa (&QWP(0+32,"esp"),@X[-2&7]); 642 &mov (@T[1],$C); 643 &psubd (@X[-2&7],@X[3]); 644 &xor (@T[1],$D); 645 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 646 &and (@T[0],@T[1]); 647 &jmp (&label("loop")); 648 649###################################################################### 650# SSE instruction sequence is first broken to groups of indepentent 651# instructions, independent in respect to their inputs and shifter 652# (not all architectures have more than one). Then IALU instructions 653# are "knitted in" between the SSE groups. Distance is maintained for 654# SSE latency of 2 in hope that it fits better upcoming AMD Bulldozer 655# [which allegedly also implements SSSE3]... 656# 657# Temporary registers usage. X[2] is volatile at the entry and at the 658# end is restored from backtrace ring buffer. X[3] is expected to 659# contain current K_XX_XX constant and is used to caclulate X[-1]+K 660# from previous round, it becomes volatile the moment the value is 661# saved to stack for transfer to IALU. X[4] becomes volatile whenever 662# X[-4] is accumulated and offloaded to backtrace ring buffer, at the 663# end it is loaded with next K_XX_XX [which becomes X[3] in next 664# round]... 665# 666sub Xupdate_ssse3_16_31() # recall that $Xi starts wtih 4 667{ use integer; 668 my $body = shift; 669 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 670 my ($a,$b,$c,$d,$e); 671 672 eval(shift(@insns)); # ror 673 eval(shift(@insns)); 674 eval(shift(@insns)); 675 &punpcklqdq(@X[0],@X[-3&7]); # compose "X[-14]" in "X[0]", was &palignr(@X[0],@X[-4&7],8); 676 &movdqa (@X[2],@X[-1&7]); 677 eval(shift(@insns)); 678 eval(shift(@insns)); 679 680 &paddd (@X[3],@X[-1&7]); 681 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 682 eval(shift(@insns)); # rol 683 eval(shift(@insns)); 684 &psrldq (@X[2],4); # "X[-3]", 3 dwords 685 eval(shift(@insns)); 686 eval(shift(@insns)); 687 &pxor (@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 688 eval(shift(@insns)); 689 eval(shift(@insns)); # ror 690 691 &pxor (@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 692 eval(shift(@insns)); 693 eval(shift(@insns)); 694 eval(shift(@insns)); 695 696 &pxor (@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 697 eval(shift(@insns)); 698 eval(shift(@insns)); # rol 699 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 700 eval(shift(@insns)); 701 eval(shift(@insns)); 702 703 &movdqa (@X[4],@X[0]); 704 eval(shift(@insns)); 705 eval(shift(@insns)); 706 eval(shift(@insns)); # ror 707 &movdqa (@X[2],@X[0]); 708 eval(shift(@insns)); 709 710 &pslldq (@X[4],12); # "X[0]"<<96, extract one dword 711 &paddd (@X[0],@X[0]); 712 eval(shift(@insns)); 713 eval(shift(@insns)); 714 715 &psrld (@X[2],31); 716 eval(shift(@insns)); 717 eval(shift(@insns)); # rol 718 &movdqa (@X[3],@X[4]); 719 eval(shift(@insns)); 720 eval(shift(@insns)); 721 eval(shift(@insns)); 722 723 &psrld (@X[4],30); 724 eval(shift(@insns)); 725 eval(shift(@insns)); # ror 726 &por (@X[0],@X[2]); # "X[0]"<<<=1 727 eval(shift(@insns)); 728 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 729 eval(shift(@insns)); 730 eval(shift(@insns)); 731 732 &pslld (@X[3],2); 733 eval(shift(@insns)); 734 eval(shift(@insns)); # rol 735 &pxor (@X[0],@X[4]); 736 &movdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 737 eval(shift(@insns)); 738 eval(shift(@insns)); 739 740 &pxor (@X[0],@X[3]); # "X[0]"^=("X[0]"<<96)<<<2 741 &pshufd (@X[1],@X[-3&7],0xee) if ($Xi<7); # was &movdqa (@X[1],@X[-2&7]) 742 &pshufd (@X[3],@X[-1&7],0xee) if ($Xi==7); 743 eval(shift(@insns)); 744 eval(shift(@insns)); 745 746 foreach (@insns) { eval; } # remaining instructions [if any] 747 748 $Xi++; push(@X,shift(@X)); # "rotate" X[] 749} 750 751sub Xupdate_ssse3_32_79() 752{ use integer; 753 my $body = shift; 754 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 755 my ($a,$b,$c,$d,$e); 756 757 eval(shift(@insns)); # body_20_39 758 &pxor (@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 759 &punpcklqdq(@X[2],@X[-1&7]); # compose "X[-6]", was &palignr(@X[2],@X[-2&7],8) 760 eval(shift(@insns)); 761 eval(shift(@insns)); 762 eval(shift(@insns)); # rol 763 764 &pxor (@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 765 &movdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 766 eval(shift(@insns)); 767 eval(shift(@insns)); 768 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 769 if ($Xi%5) { 770 &movdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 771 } else { # ... or load next one 772 &movdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 773 } 774 eval(shift(@insns)); # ror 775 &paddd (@X[3],@X[-1&7]); 776 eval(shift(@insns)); 777 778 &pxor (@X[0],@X[2]); # "X[0]"^="X[-6]" 779 eval(shift(@insns)); # body_20_39 780 eval(shift(@insns)); 781 eval(shift(@insns)); 782 eval(shift(@insns)); # rol 783 784 &movdqa (@X[2],@X[0]); 785 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 786 eval(shift(@insns)); 787 eval(shift(@insns)); 788 eval(shift(@insns)); # ror 789 eval(shift(@insns)); 790 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 791 792 &pslld (@X[0],2); 793 eval(shift(@insns)); # body_20_39 794 eval(shift(@insns)); 795 &psrld (@X[2],30); 796 eval(shift(@insns)); 797 eval(shift(@insns)); # rol 798 eval(shift(@insns)); 799 eval(shift(@insns)); 800 eval(shift(@insns)); # ror 801 eval(shift(@insns)); 802 eval(shift(@insns)) if (@insns[1] =~ /_rol/); 803 eval(shift(@insns)) if (@insns[0] =~ /_rol/); 804 805 &por (@X[0],@X[2]); # "X[0]"<<<=2 806 eval(shift(@insns)); # body_20_39 807 eval(shift(@insns)); 808 &movdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 809 eval(shift(@insns)); 810 eval(shift(@insns)); # rol 811 eval(shift(@insns)); 812 eval(shift(@insns)); 813 eval(shift(@insns)); # ror 814 &pshufd (@X[3],@X[-1],0xee) if ($Xi<19); # was &movdqa (@X[3],@X[0]) 815 eval(shift(@insns)); 816 817 foreach (@insns) { eval; } # remaining instructions 818 819 $Xi++; push(@X,shift(@X)); # "rotate" X[] 820} 821 822sub Xuplast_ssse3_80() 823{ use integer; 824 my $body = shift; 825 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 826 my ($a,$b,$c,$d,$e); 827 828 eval(shift(@insns)); 829 eval(shift(@insns)); 830 eval(shift(@insns)); 831 eval(shift(@insns)); 832 eval(shift(@insns)); 833 eval(shift(@insns)); 834 eval(shift(@insns)); 835 &paddd (@X[3],@X[-1&7]); 836 eval(shift(@insns)); 837 eval(shift(@insns)); 838 eval(shift(@insns)); 839 eval(shift(@insns)); 840 841 &movdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 842 843 foreach (@insns) { eval; } # remaining instructions 844 845 &mov ($inp=@T[1],&DWP(192+4,"esp")); 846 &cmp ($inp,&DWP(192+8,"esp")); 847 &je (&label("done")); 848 849 &movdqa (@X[3],&QWP(112+48,"esp")); # K_00_19 850 &movdqa (@X[2],&QWP(112+64,"esp")); # pbswap mask 851 &movdqu (@X[-4&7],&QWP(0,$inp)); # load input 852 &movdqu (@X[-3&7],&QWP(16,$inp)); 853 &movdqu (@X[-2&7],&QWP(32,$inp)); 854 &movdqu (@X[-1&7],&QWP(48,$inp)); 855 &add ($inp,64); 856 &pshufb (@X[-4&7],@X[2]); # byte swap 857 &mov (&DWP(192+4,"esp"),$inp); 858 &movdqa (&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 859 860 $Xi=0; 861} 862 863sub Xloop_ssse3() 864{ use integer; 865 my $body = shift; 866 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 867 my ($a,$b,$c,$d,$e); 868 869 eval(shift(@insns)); 870 eval(shift(@insns)); 871 eval(shift(@insns)); 872 eval(shift(@insns)); 873 eval(shift(@insns)); 874 eval(shift(@insns)); 875 eval(shift(@insns)); 876 &pshufb (@X[($Xi-3)&7],@X[2]); 877 eval(shift(@insns)); 878 eval(shift(@insns)); 879 eval(shift(@insns)); 880 eval(shift(@insns)); 881 &paddd (@X[($Xi-4)&7],@X[3]); 882 eval(shift(@insns)); 883 eval(shift(@insns)); 884 eval(shift(@insns)); 885 eval(shift(@insns)); 886 &movdqa (&QWP(0+16*$Xi,"esp"),@X[($Xi-4)&7]); # X[]+K xfer to IALU 887 eval(shift(@insns)); 888 eval(shift(@insns)); 889 eval(shift(@insns)); 890 eval(shift(@insns)); 891 &psubd (@X[($Xi-4)&7],@X[3]); 892 893 foreach (@insns) { eval; } 894 $Xi++; 895} 896 897sub Xtail_ssse3() 898{ use integer; 899 my $body = shift; 900 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 901 my ($a,$b,$c,$d,$e); 902 903 foreach (@insns) { eval; } 904} 905 906sub body_00_19 () { # ((c^d)&b)^d 907 # on start @T[0]=(c^d)&b 908 return &body_20_39() if ($rx==19); $rx++; 909 ( 910 '($a,$b,$c,$d,$e)=@V;'. 911 '&$_ror ($b,$j?7:2);', # $b>>>2 912 '&xor (@T[0],$d);', 913 '&mov (@T[1],$a);', # $b in next round 914 915 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 916 '&xor ($b,$c);', # $c^$d for next round 917 918 '&$_rol ($a,5);', 919 '&add ($e,@T[0]);', 920 '&and (@T[1],$b);', # ($b&($c^$d)) for next round 921 922 '&xor ($b,$c);', # restore $b 923 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 924 ); 925} 926 927sub body_20_39 () { # b^d^c 928 # on entry @T[0]=b^d 929 return &body_40_59() if ($rx==39); $rx++; 930 ( 931 '($a,$b,$c,$d,$e)=@V;'. 932 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 933 '&xor (@T[0],$d) if($j==19);'. 934 '&xor (@T[0],$c) if($j> 19);', # ($b^$d^$c) 935 '&mov (@T[1],$a);', # $b in next round 936 937 '&$_rol ($a,5);', 938 '&add ($e,@T[0]);', 939 '&xor (@T[1],$c) if ($j< 79);', # $b^$d for next round 940 941 '&$_ror ($b,7);', # $b>>>2 942 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 943 ); 944} 945 946sub body_40_59 () { # ((b^c)&(c^d))^c 947 # on entry @T[0]=(b^c), (c^=d) 948 $rx++; 949 ( 950 '($a,$b,$c,$d,$e)=@V;'. 951 '&add ($e,&DWP(4*($j&15),"esp"));', # X[]+K xfer 952 '&and (@T[0],$c) if ($j>=40);', # (b^c)&(c^d) 953 '&xor ($c,$d) if ($j>=40);', # restore $c 954 955 '&$_ror ($b,7);', # $b>>>2 956 '&mov (@T[1],$a);', # $b for next round 957 '&xor (@T[0],$c);', 958 959 '&$_rol ($a,5);', 960 '&add ($e,@T[0]);', 961 '&xor (@T[1],$c) if ($j==59);'. 962 '&xor (@T[1],$b) if ($j< 59);', # b^c for next round 963 964 '&xor ($b,$c) if ($j< 59);', # c^d for next round 965 '&add ($e,$a);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 966 ); 967} 968###### 969sub bodyx_00_19 () { # ((c^d)&b)^d 970 # on start @T[0]=(b&c)^(~b&d), $e+=X[]+K 971 return &bodyx_20_39() if ($rx==19); $rx++; 972 ( 973 '($a,$b,$c,$d,$e)=@V;'. 974 975 '&rorx ($b,$b,2) if ($j==0);'. # $b>>>2 976 '&rorx ($b,@T[1],7) if ($j!=0);', # $b>>>2 977 '&lea ($e,&DWP(0,$e,@T[0]));', 978 '&rorx (@T[0],$a,5);', 979 980 '&andn (@T[1],$a,$c);', 981 '&and ($a,$b)', 982 '&add ($d,&DWP(4*(($j+1)&15),"esp"));', # X[]+K xfer 983 984 '&xor (@T[1],$a)', 985 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 986 ); 987} 988 989sub bodyx_20_39 () { # b^d^c 990 # on start $b=b^c^d 991 return &bodyx_40_59() if ($rx==39); $rx++; 992 ( 993 '($a,$b,$c,$d,$e)=@V;'. 994 995 '&add ($e,($j==19?@T[0]:$b))', 996 '&rorx ($b,@T[1],7);', # $b>>>2 997 '&rorx (@T[0],$a,5);', 998 999 '&xor ($a,$b) if ($j<79);', 1000 '&add ($d,&DWP(4*(($j+1)&15),"esp")) if ($j<79);', # X[]+K xfer 1001 '&xor ($a,$c) if ($j<79);', 1002 '&add ($e,@T[0]);' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 1003 ); 1004} 1005 1006sub bodyx_40_59 () { # ((b^c)&(c^d))^c 1007 # on start $b=((b^c)&(c^d))^c 1008 return &bodyx_20_39() if ($rx==59); $rx++; 1009 ( 1010 '($a,$b,$c,$d,$e)=@V;'. 1011 1012 '&rorx (@T[0],$a,5)', 1013 '&lea ($e,&DWP(0,$e,$b))', 1014 '&rorx ($b,@T[1],7)', # $b>>>2 1015 '&add ($d,&DWP(4*(($j+1)&15),"esp"))', # X[]+K xfer 1016 1017 '&mov (@T[1],$c)', 1018 '&xor ($a,$b)', # b^c for next round 1019 '&xor (@T[1],$b)', # c^d for next round 1020 1021 '&and ($a,@T[1])', 1022 '&add ($e,@T[0])', 1023 '&xor ($a,$b)' .'$j++; unshift(@V,pop(@V)); unshift(@T,pop(@T));' 1024 ); 1025} 1026 1027&set_label("loop",16); 1028 &Xupdate_ssse3_16_31(\&body_00_19); 1029 &Xupdate_ssse3_16_31(\&body_00_19); 1030 &Xupdate_ssse3_16_31(\&body_00_19); 1031 &Xupdate_ssse3_16_31(\&body_00_19); 1032 &Xupdate_ssse3_32_79(\&body_00_19); 1033 &Xupdate_ssse3_32_79(\&body_20_39); 1034 &Xupdate_ssse3_32_79(\&body_20_39); 1035 &Xupdate_ssse3_32_79(\&body_20_39); 1036 &Xupdate_ssse3_32_79(\&body_20_39); 1037 &Xupdate_ssse3_32_79(\&body_20_39); 1038 &Xupdate_ssse3_32_79(\&body_40_59); 1039 &Xupdate_ssse3_32_79(\&body_40_59); 1040 &Xupdate_ssse3_32_79(\&body_40_59); 1041 &Xupdate_ssse3_32_79(\&body_40_59); 1042 &Xupdate_ssse3_32_79(\&body_40_59); 1043 &Xupdate_ssse3_32_79(\&body_20_39); 1044 &Xuplast_ssse3_80(\&body_20_39); # can jump to "done" 1045 1046 $saved_j=$j; @saved_V=@V; 1047 1048 &Xloop_ssse3(\&body_20_39); 1049 &Xloop_ssse3(\&body_20_39); 1050 &Xloop_ssse3(\&body_20_39); 1051 1052 &mov (@T[1],&DWP(192,"esp")); # update context 1053 &add ($A,&DWP(0,@T[1])); 1054 &add (@T[0],&DWP(4,@T[1])); # $b 1055 &add ($C,&DWP(8,@T[1])); 1056 &mov (&DWP(0,@T[1]),$A); 1057 &add ($D,&DWP(12,@T[1])); 1058 &mov (&DWP(4,@T[1]),@T[0]); 1059 &add ($E,&DWP(16,@T[1])); 1060 &mov (&DWP(8,@T[1]),$C); 1061 &mov ($B,$C); 1062 &mov (&DWP(12,@T[1]),$D); 1063 &xor ($B,$D); 1064 &mov (&DWP(16,@T[1]),$E); 1065 &mov (@T[1],@T[0]); 1066 &pshufd (@X[0],@X[-4&7],0xee); # was &movdqa (@X[0],@X[-3&7]); 1067 &and (@T[0],$B); 1068 &mov ($B,$T[1]); 1069 1070 &jmp (&label("loop")); 1071 1072&set_label("done",16); $j=$saved_j; @V=@saved_V; 1073 1074 &Xtail_ssse3(\&body_20_39); 1075 &Xtail_ssse3(\&body_20_39); 1076 &Xtail_ssse3(\&body_20_39); 1077 1078 &mov (@T[1],&DWP(192,"esp")); # update context 1079 &add ($A,&DWP(0,@T[1])); 1080 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1081 &add (@T[0],&DWP(4,@T[1])); # $b 1082 &add ($C,&DWP(8,@T[1])); 1083 &mov (&DWP(0,@T[1]),$A); 1084 &add ($D,&DWP(12,@T[1])); 1085 &mov (&DWP(4,@T[1]),@T[0]); 1086 &add ($E,&DWP(16,@T[1])); 1087 &mov (&DWP(8,@T[1]),$C); 1088 &mov (&DWP(12,@T[1]),$D); 1089 &mov (&DWP(16,@T[1]),$E); 1090 1091&function_end("_sha1_block_data_order_ssse3"); 1092 1093$rx=0; # reset 1094 1095if ($ymm) { 1096my $Xi=4; # 4xSIMD Xupdate round, start pre-seeded 1097my @X=map("xmm$_",(4..7,0..3)); # pre-seeded for $Xi=4 1098my @V=($A,$B,$C,$D,$E); 1099my $j=0; # hash round 1100my @T=($T,$tmp1); 1101my $inp; 1102 1103my $_rol=sub { &shld(@_[0],@_) }; 1104my $_ror=sub { &shrd(@_[0],@_) }; 1105 1106&function_begin("_sha1_block_data_order_avx"); 1107 &call (&label("pic_point")); # make it PIC! 1108 &set_label("pic_point"); 1109 &blindpop($tmp1); 1110 &lea ($tmp1,&DWP(&label("K_XX_XX")."-".&label("pic_point"),$tmp1)); 1111&set_label("avx_shortcut"); 1112 &vzeroall(); 1113 1114 &vmovdqa(@X[3],&QWP(0,$tmp1)); # K_00_19 1115 &vmovdqa(@X[4],&QWP(16,$tmp1)); # K_20_39 1116 &vmovdqa(@X[5],&QWP(32,$tmp1)); # K_40_59 1117 &vmovdqa(@X[6],&QWP(48,$tmp1)); # K_60_79 1118 &vmovdqa(@X[2],&QWP(64,$tmp1)); # pbswap mask 1119 1120 &mov ($E,&wparam(0)); # load argument block 1121 &mov ($inp=@T[1],&wparam(1)); 1122 &mov ($D,&wparam(2)); 1123 &mov (@T[0],"esp"); 1124 1125 # stack frame layout 1126 # 1127 # +0 X[0]+K X[1]+K X[2]+K X[3]+K # XMM->IALU xfer area 1128 # X[4]+K X[5]+K X[6]+K X[7]+K 1129 # X[8]+K X[9]+K X[10]+K X[11]+K 1130 # X[12]+K X[13]+K X[14]+K X[15]+K 1131 # 1132 # +64 X[0] X[1] X[2] X[3] # XMM->XMM backtrace area 1133 # X[4] X[5] X[6] X[7] 1134 # X[8] X[9] X[10] X[11] # even borrowed for K_00_19 1135 # 1136 # +112 K_20_39 K_20_39 K_20_39 K_20_39 # constants 1137 # K_40_59 K_40_59 K_40_59 K_40_59 1138 # K_60_79 K_60_79 K_60_79 K_60_79 1139 # K_00_19 K_00_19 K_00_19 K_00_19 1140 # pbswap mask 1141 # 1142 # +192 ctx # argument block 1143 # +196 inp 1144 # +200 end 1145 # +204 esp 1146 &sub ("esp",208); 1147 &and ("esp",-64); 1148 1149 &vmovdqa(&QWP(112+0,"esp"),@X[4]); # copy constants 1150 &vmovdqa(&QWP(112+16,"esp"),@X[5]); 1151 &vmovdqa(&QWP(112+32,"esp"),@X[6]); 1152 &shl ($D,6); # len*64 1153 &vmovdqa(&QWP(112+48,"esp"),@X[3]); 1154 &add ($D,$inp); # end of input 1155 &vmovdqa(&QWP(112+64,"esp"),@X[2]); 1156 &add ($inp,64); 1157 &mov (&DWP(192+0,"esp"),$E); # save argument block 1158 &mov (&DWP(192+4,"esp"),$inp); 1159 &mov (&DWP(192+8,"esp"),$D); 1160 &mov (&DWP(192+12,"esp"),@T[0]); # save original %esp 1161 1162 &mov ($A,&DWP(0,$E)); # load context 1163 &mov ($B,&DWP(4,$E)); 1164 &mov ($C,&DWP(8,$E)); 1165 &mov ($D,&DWP(12,$E)); 1166 &mov ($E,&DWP(16,$E)); 1167 &mov (@T[0],$B); # magic seed 1168 1169 &vmovdqu(@X[-4&7],&QWP(-64,$inp)); # load input to %xmm[0-3] 1170 &vmovdqu(@X[-3&7],&QWP(-48,$inp)); 1171 &vmovdqu(@X[-2&7],&QWP(-32,$inp)); 1172 &vmovdqu(@X[-1&7],&QWP(-16,$inp)); 1173 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1174 &vpshufb(@X[-3&7],@X[-3&7],@X[2]); 1175 &vpshufb(@X[-2&7],@X[-2&7],@X[2]); 1176 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1177 &vpshufb(@X[-1&7],@X[-1&7],@X[2]); 1178 &vpaddd (@X[0],@X[-4&7],@X[3]); # add K_00_19 1179 &vpaddd (@X[1],@X[-3&7],@X[3]); 1180 &vpaddd (@X[2],@X[-2&7],@X[3]); 1181 &vmovdqa(&QWP(0,"esp"),@X[0]); # X[]+K xfer to IALU 1182 &mov (@T[1],$C); 1183 &vmovdqa(&QWP(0+16,"esp"),@X[1]); 1184 &xor (@T[1],$D); 1185 &vmovdqa(&QWP(0+32,"esp"),@X[2]); 1186 &and (@T[0],@T[1]); 1187 &jmp (&label("loop")); 1188 1189sub Xupdate_avx_16_31() # recall that $Xi starts wtih 4 1190{ use integer; 1191 my $body = shift; 1192 my @insns = (&$body,&$body,&$body,&$body); # 40 instructions 1193 my ($a,$b,$c,$d,$e); 1194 1195 eval(shift(@insns)); 1196 eval(shift(@insns)); 1197 &vpalignr(@X[0],@X[-3&7],@X[-4&7],8); # compose "X[-14]" in "X[0]" 1198 eval(shift(@insns)); 1199 eval(shift(@insns)); 1200 1201 &vpaddd (@X[3],@X[3],@X[-1&7]); 1202 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]);# save X[] to backtrace buffer 1203 eval(shift(@insns)); 1204 eval(shift(@insns)); 1205 &vpsrldq(@X[2],@X[-1&7],4); # "X[-3]", 3 dwords 1206 eval(shift(@insns)); 1207 eval(shift(@insns)); 1208 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"^="X[-16]" 1209 eval(shift(@insns)); 1210 eval(shift(@insns)); 1211 1212 &vpxor (@X[2],@X[2],@X[-2&7]); # "X[-3]"^"X[-8]" 1213 eval(shift(@insns)); 1214 eval(shift(@insns)); 1215 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1216 eval(shift(@insns)); 1217 eval(shift(@insns)); 1218 1219 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-3]"^"X[-8]" 1220 eval(shift(@insns)); 1221 eval(shift(@insns)); 1222 eval(shift(@insns)); 1223 eval(shift(@insns)); 1224 1225 &vpsrld (@X[2],@X[0],31); 1226 eval(shift(@insns)); 1227 eval(shift(@insns)); 1228 eval(shift(@insns)); 1229 eval(shift(@insns)); 1230 1231 &vpslldq(@X[4],@X[0],12); # "X[0]"<<96, extract one dword 1232 &vpaddd (@X[0],@X[0],@X[0]); 1233 eval(shift(@insns)); 1234 eval(shift(@insns)); 1235 eval(shift(@insns)); 1236 eval(shift(@insns)); 1237 1238 &vpsrld (@X[3],@X[4],30); 1239 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=1 1240 eval(shift(@insns)); 1241 eval(shift(@insns)); 1242 eval(shift(@insns)); 1243 eval(shift(@insns)); 1244 1245 &vpslld (@X[4],@X[4],2); 1246 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if ($Xi>5); # restore X[] from backtrace buffer 1247 eval(shift(@insns)); 1248 eval(shift(@insns)); 1249 &vpxor (@X[0],@X[0],@X[3]); 1250 eval(shift(@insns)); 1251 eval(shift(@insns)); 1252 eval(shift(@insns)); 1253 eval(shift(@insns)); 1254 1255 &vpxor (@X[0],@X[0],@X[4]); # "X[0]"^=("X[0]"<<96)<<<2 1256 eval(shift(@insns)); 1257 eval(shift(@insns)); 1258 &vmovdqa (@X[4],&QWP(112-16+16*(($Xi)/5),"esp")); # K_XX_XX 1259 eval(shift(@insns)); 1260 eval(shift(@insns)); 1261 1262 foreach (@insns) { eval; } # remaining instructions [if any] 1263 1264 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1265} 1266 1267sub Xupdate_avx_32_79() 1268{ use integer; 1269 my $body = shift; 1270 my @insns = (&$body,&$body,&$body,&$body); # 32 to 44 instructions 1271 my ($a,$b,$c,$d,$e); 1272 1273 &vpalignr(@X[2],@X[-1&7],@X[-2&7],8); # compose "X[-6]" 1274 &vpxor (@X[0],@X[0],@X[-4&7]); # "X[0]"="X[-32]"^"X[-16]" 1275 eval(shift(@insns)); # body_20_39 1276 eval(shift(@insns)); 1277 eval(shift(@insns)); 1278 eval(shift(@insns)); # rol 1279 1280 &vpxor (@X[0],@X[0],@X[-7&7]); # "X[0]"^="X[-28]" 1281 &vmovdqa (&QWP(64+16*(($Xi-4)%3),"esp"),@X[-4&7]); # save X[] to backtrace buffer 1282 eval(shift(@insns)); 1283 eval(shift(@insns)); 1284 if ($Xi%5) { 1285 &vmovdqa (@X[4],@X[3]); # "perpetuate" K_XX_XX... 1286 } else { # ... or load next one 1287 &vmovdqa (@X[4],&QWP(112-16+16*($Xi/5),"esp")); 1288 } 1289 &vpaddd (@X[3],@X[3],@X[-1&7]); 1290 eval(shift(@insns)); # ror 1291 eval(shift(@insns)); 1292 1293 &vpxor (@X[0],@X[0],@X[2]); # "X[0]"^="X[-6]" 1294 eval(shift(@insns)); # body_20_39 1295 eval(shift(@insns)); 1296 eval(shift(@insns)); 1297 eval(shift(@insns)); # rol 1298 1299 &vpsrld (@X[2],@X[0],30); 1300 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer to IALU 1301 eval(shift(@insns)); 1302 eval(shift(@insns)); 1303 eval(shift(@insns)); # ror 1304 eval(shift(@insns)); 1305 1306 &vpslld (@X[0],@X[0],2); 1307 eval(shift(@insns)); # body_20_39 1308 eval(shift(@insns)); 1309 eval(shift(@insns)); 1310 eval(shift(@insns)); # rol 1311 eval(shift(@insns)); 1312 eval(shift(@insns)); 1313 eval(shift(@insns)); # ror 1314 eval(shift(@insns)); 1315 1316 &vpor (@X[0],@X[0],@X[2]); # "X[0]"<<<=2 1317 eval(shift(@insns)); # body_20_39 1318 eval(shift(@insns)); 1319 &vmovdqa (@X[2],&QWP(64+16*(($Xi-6)%3),"esp")) if($Xi<19); # restore X[] from backtrace buffer 1320 eval(shift(@insns)); 1321 eval(shift(@insns)); # rol 1322 eval(shift(@insns)); 1323 eval(shift(@insns)); 1324 eval(shift(@insns)); # ror 1325 eval(shift(@insns)); 1326 1327 foreach (@insns) { eval; } # remaining instructions 1328 1329 $Xi++; push(@X,shift(@X)); # "rotate" X[] 1330} 1331 1332sub Xuplast_avx_80() 1333{ use integer; 1334 my $body = shift; 1335 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1336 my ($a,$b,$c,$d,$e); 1337 1338 eval(shift(@insns)); 1339 &vpaddd (@X[3],@X[3],@X[-1&7]); 1340 eval(shift(@insns)); 1341 eval(shift(@insns)); 1342 eval(shift(@insns)); 1343 eval(shift(@insns)); 1344 1345 &vmovdqa (&QWP(0+16*(($Xi-1)&3),"esp"),@X[3]); # X[]+K xfer IALU 1346 1347 foreach (@insns) { eval; } # remaining instructions 1348 1349 &mov ($inp=@T[1],&DWP(192+4,"esp")); 1350 &cmp ($inp,&DWP(192+8,"esp")); 1351 &je (&label("done")); 1352 1353 &vmovdqa(@X[3],&QWP(112+48,"esp")); # K_00_19 1354 &vmovdqa(@X[2],&QWP(112+64,"esp")); # pbswap mask 1355 &vmovdqu(@X[-4&7],&QWP(0,$inp)); # load input 1356 &vmovdqu(@X[-3&7],&QWP(16,$inp)); 1357 &vmovdqu(@X[-2&7],&QWP(32,$inp)); 1358 &vmovdqu(@X[-1&7],&QWP(48,$inp)); 1359 &add ($inp,64); 1360 &vpshufb(@X[-4&7],@X[-4&7],@X[2]); # byte swap 1361 &mov (&DWP(192+4,"esp"),$inp); 1362 &vmovdqa(&QWP(112-16,"esp"),@X[3]); # borrow last backtrace slot 1363 1364 $Xi=0; 1365} 1366 1367sub Xloop_avx() 1368{ use integer; 1369 my $body = shift; 1370 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1371 my ($a,$b,$c,$d,$e); 1372 1373 eval(shift(@insns)); 1374 eval(shift(@insns)); 1375 &vpshufb (@X[($Xi-3)&7],@X[($Xi-3)&7],@X[2]); 1376 eval(shift(@insns)); 1377 eval(shift(@insns)); 1378 &vpaddd (@X[$Xi&7],@X[($Xi-4)&7],@X[3]); 1379 eval(shift(@insns)); 1380 eval(shift(@insns)); 1381 eval(shift(@insns)); 1382 eval(shift(@insns)); 1383 &vmovdqa (&QWP(0+16*$Xi,"esp"),@X[$Xi&7]); # X[]+K xfer to IALU 1384 eval(shift(@insns)); 1385 eval(shift(@insns)); 1386 1387 foreach (@insns) { eval; } 1388 $Xi++; 1389} 1390 1391sub Xtail_avx() 1392{ use integer; 1393 my $body = shift; 1394 my @insns = (&$body,&$body,&$body,&$body); # 32 instructions 1395 my ($a,$b,$c,$d,$e); 1396 1397 foreach (@insns) { eval; } 1398} 1399 1400&set_label("loop",16); 1401 &Xupdate_avx_16_31(\&body_00_19); 1402 &Xupdate_avx_16_31(\&body_00_19); 1403 &Xupdate_avx_16_31(\&body_00_19); 1404 &Xupdate_avx_16_31(\&body_00_19); 1405 &Xupdate_avx_32_79(\&body_00_19); 1406 &Xupdate_avx_32_79(\&body_20_39); 1407 &Xupdate_avx_32_79(\&body_20_39); 1408 &Xupdate_avx_32_79(\&body_20_39); 1409 &Xupdate_avx_32_79(\&body_20_39); 1410 &Xupdate_avx_32_79(\&body_20_39); 1411 &Xupdate_avx_32_79(\&body_40_59); 1412 &Xupdate_avx_32_79(\&body_40_59); 1413 &Xupdate_avx_32_79(\&body_40_59); 1414 &Xupdate_avx_32_79(\&body_40_59); 1415 &Xupdate_avx_32_79(\&body_40_59); 1416 &Xupdate_avx_32_79(\&body_20_39); 1417 &Xuplast_avx_80(\&body_20_39); # can jump to "done" 1418 1419 $saved_j=$j; @saved_V=@V; 1420 1421 &Xloop_avx(\&body_20_39); 1422 &Xloop_avx(\&body_20_39); 1423 &Xloop_avx(\&body_20_39); 1424 1425 &mov (@T[1],&DWP(192,"esp")); # update context 1426 &add ($A,&DWP(0,@T[1])); 1427 &add (@T[0],&DWP(4,@T[1])); # $b 1428 &add ($C,&DWP(8,@T[1])); 1429 &mov (&DWP(0,@T[1]),$A); 1430 &add ($D,&DWP(12,@T[1])); 1431 &mov (&DWP(4,@T[1]),@T[0]); 1432 &add ($E,&DWP(16,@T[1])); 1433 &mov ($B,$C); 1434 &mov (&DWP(8,@T[1]),$C); 1435 &xor ($B,$D); 1436 &mov (&DWP(12,@T[1]),$D); 1437 &mov (&DWP(16,@T[1]),$E); 1438 &mov (@T[1],@T[0]); 1439 &and (@T[0],$B); 1440 &mov ($B,@T[1]); 1441 1442 &jmp (&label("loop")); 1443 1444&set_label("done",16); $j=$saved_j; @V=@saved_V; 1445 1446 &Xtail_avx(\&body_20_39); 1447 &Xtail_avx(\&body_20_39); 1448 &Xtail_avx(\&body_20_39); 1449 1450 &vzeroall(); 1451 1452 &mov (@T[1],&DWP(192,"esp")); # update context 1453 &add ($A,&DWP(0,@T[1])); 1454 &mov ("esp",&DWP(192+12,"esp")); # restore %esp 1455 &add (@T[0],&DWP(4,@T[1])); # $b 1456 &add ($C,&DWP(8,@T[1])); 1457 &mov (&DWP(0,@T[1]),$A); 1458 &add ($D,&DWP(12,@T[1])); 1459 &mov (&DWP(4,@T[1]),@T[0]); 1460 &add ($E,&DWP(16,@T[1])); 1461 &mov (&DWP(8,@T[1]),$C); 1462 &mov (&DWP(12,@T[1]),$D); 1463 &mov (&DWP(16,@T[1]),$E); 1464&function_end("_sha1_block_data_order_avx"); 1465} 1466&set_label("K_XX_XX",64); 1467&data_word(0x5a827999,0x5a827999,0x5a827999,0x5a827999); # K_00_19 1468&data_word(0x6ed9eba1,0x6ed9eba1,0x6ed9eba1,0x6ed9eba1); # K_20_39 1469&data_word(0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc,0x8f1bbcdc); # K_40_59 1470&data_word(0xca62c1d6,0xca62c1d6,0xca62c1d6,0xca62c1d6); # K_60_79 1471&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f); # pbswap mask 1472&data_byte(0xf,0xe,0xd,0xc,0xb,0xa,0x9,0x8,0x7,0x6,0x5,0x4,0x3,0x2,0x1,0x0); 1473} 1474&asciz("SHA1 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>"); 1475 1476&asm_finish(); 1477