1#! /usr/bin/env perl 2# Copyright 2014-2018 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9# ==================================================================== 10# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 11# project. The module is, however, dual licensed under OpenSSL and 12# CRYPTOGAMS licenses depending on where you obtain it. For further 13# details see http://www.openssl.org/~appro/cryptogams/. 14# 15# Permission to use under GPLv2 terms is granted. 16# ==================================================================== 17# 18# SHA256/512 for ARMv8. 19# 20# Performance in cycles per processed byte and improvement coefficient 21# over code generated with "default" compiler: 22# 23# SHA256-hw SHA256(*) SHA512 24# Apple A7 1.97 10.5 (+33%) 6.73 (-1%(**)) 25# Cortex-A53 2.38 15.5 (+115%) 10.0 (+150%(***)) 26# Cortex-A57 2.31 11.6 (+86%) 7.51 (+260%(***)) 27# Denver 2.01 10.5 (+26%) 6.70 (+8%) 28# X-Gene 20.0 (+100%) 12.8 (+300%(***)) 29# Mongoose 2.36 13.0 (+50%) 8.36 (+33%) 30# Kryo 1.92 17.4 (+30%) 11.2 (+8%) 31# 32# (*) Software SHA256 results are of lesser relevance, presented 33# mostly for informational purposes. 34# (**) The result is a trade-off: it's possible to improve it by 35# 10% (or by 1 cycle per round), but at the cost of 20% loss 36# on Cortex-A53 (or by 4 cycles per round). 37# (***) Super-impressive coefficients over gcc-generated code are 38# indication of some compiler "pathology", most notably code 39# generated with -mgeneral-regs-only is significantly faster 40# and the gap is only 40-90%. 41# 42# October 2016. 43# 44# Originally it was reckoned that it makes no sense to implement NEON 45# version of SHA256 for 64-bit processors. This is because performance 46# improvement on most wide-spread Cortex-A5x processors was observed 47# to be marginal, same on Cortex-A53 and ~10% on A57. But then it was 48# observed that 32-bit NEON SHA256 performs significantly better than 49# 64-bit scalar version on *some* of the more recent processors. As 50# result 64-bit NEON version of SHA256 was added to provide best 51# all-round performance. For example it executes ~30% faster on X-Gene 52# and Mongoose. [For reference, NEON version of SHA512 is bound to 53# deliver much less improvement, likely *negative* on Cortex-A5x. 54# Which is why NEON support is limited to SHA256.] 55 56$output=pop; 57$flavour=pop; 58 59if ($flavour && $flavour ne "void") { 60 $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 61 ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or 62 ( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or 63 die "can't locate arm-xlate.pl"; 64 65 open OUT,"| \"$^X\" $xlate $flavour $output"; 66 *STDOUT=*OUT; 67} else { 68 open STDOUT,">$output"; 69} 70 71if ($output =~ /512/) { 72 $BITS=512; 73 $SZ=8; 74 @Sigma0=(28,34,39); 75 @Sigma1=(14,18,41); 76 @sigma0=(1, 8, 7); 77 @sigma1=(19,61, 6); 78 $rounds=80; 79 $reg_t="x"; 80} else { 81 $BITS=256; 82 $SZ=4; 83 @Sigma0=( 2,13,22); 84 @Sigma1=( 6,11,25); 85 @sigma0=( 7,18, 3); 86 @sigma1=(17,19,10); 87 $rounds=64; 88 $reg_t="w"; 89} 90 91$func="sha${BITS}_block_data_order"; 92 93($ctx,$inp,$num,$Ktbl)=map("x$_",(0..2,30)); 94 95@X=map("$reg_t$_",(3..15,0..2)); 96@V=($A,$B,$C,$D,$E,$F,$G,$H)=map("$reg_t$_",(20..27)); 97($t0,$t1,$t2,$t3)=map("$reg_t$_",(16,17,19,28)); 98 99sub BODY_00_xx { 100my ($i,$a,$b,$c,$d,$e,$f,$g,$h)=@_; 101my $j=($i+1)&15; 102my ($T0,$T1,$T2)=(@X[($i-8)&15],@X[($i-9)&15],@X[($i-10)&15]); 103 $T0=@X[$i+3] if ($i<11); 104 105$code.=<<___ if ($i<16); 106#ifndef __AARCH64EB__ 107 rev @X[$i],@X[$i] // $i 108#endif 109___ 110$code.=<<___ if ($i<13 && ($i&1)); 111 ldp @X[$i+1],@X[$i+2],[$inp],#2*$SZ 112___ 113$code.=<<___ if ($i==13); 114 ldp @X[14],@X[15],[$inp] 115___ 116$code.=<<___ if ($i>=14); 117 ldr @X[($i-11)&15],[sp,#`$SZ*(($i-11)%4)`] 118___ 119$code.=<<___ if ($i>0 && $i<16); 120 add $a,$a,$t1 // h+=Sigma0(a) 121___ 122$code.=<<___ if ($i>=11); 123 str @X[($i-8)&15],[sp,#`$SZ*(($i-8)%4)`] 124___ 125# While ARMv8 specifies merged rotate-n-logical operation such as 126# 'eor x,y,z,ror#n', it was found to negatively affect performance 127# on Apple A7. The reason seems to be that it requires even 'y' to 128# be available earlier. This means that such merged instruction is 129# not necessarily best choice on critical path... On the other hand 130# Cortex-A5x handles merged instructions much better than disjoint 131# rotate and logical... See (**) footnote above. 132$code.=<<___ if ($i<15); 133 ror $t0,$e,#$Sigma1[0] 134 add $h,$h,$t2 // h+=K[i] 135 eor $T0,$e,$e,ror#`$Sigma1[2]-$Sigma1[1]` 136 and $t1,$f,$e 137 bic $t2,$g,$e 138 add $h,$h,@X[$i&15] // h+=X[i] 139 orr $t1,$t1,$t2 // Ch(e,f,g) 140 eor $t2,$a,$b // a^b, b^c in next round 141 eor $t0,$t0,$T0,ror#$Sigma1[1] // Sigma1(e) 142 ror $T0,$a,#$Sigma0[0] 143 add $h,$h,$t1 // h+=Ch(e,f,g) 144 eor $t1,$a,$a,ror#`$Sigma0[2]-$Sigma0[1]` 145 add $h,$h,$t0 // h+=Sigma1(e) 146 and $t3,$t3,$t2 // (b^c)&=(a^b) 147 add $d,$d,$h // d+=h 148 eor $t3,$t3,$b // Maj(a,b,c) 149 eor $t1,$T0,$t1,ror#$Sigma0[1] // Sigma0(a) 150 add $h,$h,$t3 // h+=Maj(a,b,c) 151 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round 152 //add $h,$h,$t1 // h+=Sigma0(a) 153___ 154$code.=<<___ if ($i>=15); 155 ror $t0,$e,#$Sigma1[0] 156 add $h,$h,$t2 // h+=K[i] 157 ror $T1,@X[($j+1)&15],#$sigma0[0] 158 and $t1,$f,$e 159 ror $T2,@X[($j+14)&15],#$sigma1[0] 160 bic $t2,$g,$e 161 ror $T0,$a,#$Sigma0[0] 162 add $h,$h,@X[$i&15] // h+=X[i] 163 eor $t0,$t0,$e,ror#$Sigma1[1] 164 eor $T1,$T1,@X[($j+1)&15],ror#$sigma0[1] 165 orr $t1,$t1,$t2 // Ch(e,f,g) 166 eor $t2,$a,$b // a^b, b^c in next round 167 eor $t0,$t0,$e,ror#$Sigma1[2] // Sigma1(e) 168 eor $T0,$T0,$a,ror#$Sigma0[1] 169 add $h,$h,$t1 // h+=Ch(e,f,g) 170 and $t3,$t3,$t2 // (b^c)&=(a^b) 171 eor $T2,$T2,@X[($j+14)&15],ror#$sigma1[1] 172 eor $T1,$T1,@X[($j+1)&15],lsr#$sigma0[2] // sigma0(X[i+1]) 173 add $h,$h,$t0 // h+=Sigma1(e) 174 eor $t3,$t3,$b // Maj(a,b,c) 175 eor $t1,$T0,$a,ror#$Sigma0[2] // Sigma0(a) 176 eor $T2,$T2,@X[($j+14)&15],lsr#$sigma1[2] // sigma1(X[i+14]) 177 add @X[$j],@X[$j],@X[($j+9)&15] 178 add $d,$d,$h // d+=h 179 add $h,$h,$t3 // h+=Maj(a,b,c) 180 ldr $t3,[$Ktbl],#$SZ // *K++, $t2 in next round 181 add @X[$j],@X[$j],$T1 182 add $h,$h,$t1 // h+=Sigma0(a) 183 add @X[$j],@X[$j],$T2 184___ 185 ($t2,$t3)=($t3,$t2); 186} 187 188$code.=<<___; 189#ifndef __KERNEL__ 190# include "arm_arch.h" 191#endif 192 193.text 194 195.extern OPENSSL_armcap_P 196.globl $func 197.type $func,%function 198.align 6 199$func: 200#ifndef __KERNEL__ 201# ifdef __ILP32__ 202 ldrsw x16,.LOPENSSL_armcap_P 203# else 204 ldr x16,.LOPENSSL_armcap_P 205# endif 206 adr x17,.LOPENSSL_armcap_P 207 add x16,x16,x17 208 ldr w16,[x16] 209___ 210$code.=<<___ if ($SZ==4); 211 tst w16,#ARMV8_SHA256 212 b.ne .Lv8_entry 213 tst w16,#ARMV7_NEON 214 b.ne .Lneon_entry 215___ 216$code.=<<___ if ($SZ==8); 217 tst w16,#ARMV8_SHA512 218 b.ne .Lv8_entry 219___ 220$code.=<<___; 221#endif 222 stp x29,x30,[sp,#-128]! 223 add x29,sp,#0 224 225 stp x19,x20,[sp,#16] 226 stp x21,x22,[sp,#32] 227 stp x23,x24,[sp,#48] 228 stp x25,x26,[sp,#64] 229 stp x27,x28,[sp,#80] 230 sub sp,sp,#4*$SZ 231 232 ldp $A,$B,[$ctx] // load context 233 ldp $C,$D,[$ctx,#2*$SZ] 234 ldp $E,$F,[$ctx,#4*$SZ] 235 add $num,$inp,$num,lsl#`log(16*$SZ)/log(2)` // end of input 236 ldp $G,$H,[$ctx,#6*$SZ] 237 adr $Ktbl,.LK$BITS 238 stp $ctx,$num,[x29,#96] 239 240.Loop: 241 ldp @X[0],@X[1],[$inp],#2*$SZ 242 ldr $t2,[$Ktbl],#$SZ // *K++ 243 eor $t3,$B,$C // magic seed 244 str $inp,[x29,#112] 245___ 246for ($i=0;$i<16;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } 247$code.=".Loop_16_xx:\n"; 248for (;$i<32;$i++) { &BODY_00_xx($i,@V); unshift(@V,pop(@V)); } 249$code.=<<___; 250 cbnz $t2,.Loop_16_xx 251 252 ldp $ctx,$num,[x29,#96] 253 ldr $inp,[x29,#112] 254 sub $Ktbl,$Ktbl,#`$SZ*($rounds+1)` // rewind 255 256 ldp @X[0],@X[1],[$ctx] 257 ldp @X[2],@X[3],[$ctx,#2*$SZ] 258 add $inp,$inp,#14*$SZ // advance input pointer 259 ldp @X[4],@X[5],[$ctx,#4*$SZ] 260 add $A,$A,@X[0] 261 ldp @X[6],@X[7],[$ctx,#6*$SZ] 262 add $B,$B,@X[1] 263 add $C,$C,@X[2] 264 add $D,$D,@X[3] 265 stp $A,$B,[$ctx] 266 add $E,$E,@X[4] 267 add $F,$F,@X[5] 268 stp $C,$D,[$ctx,#2*$SZ] 269 add $G,$G,@X[6] 270 add $H,$H,@X[7] 271 cmp $inp,$num 272 stp $E,$F,[$ctx,#4*$SZ] 273 stp $G,$H,[$ctx,#6*$SZ] 274 b.ne .Loop 275 276 ldp x19,x20,[x29,#16] 277 add sp,sp,#4*$SZ 278 ldp x21,x22,[x29,#32] 279 ldp x23,x24,[x29,#48] 280 ldp x25,x26,[x29,#64] 281 ldp x27,x28,[x29,#80] 282 ldp x29,x30,[sp],#128 283 ret 284.size $func,.-$func 285 286.align 6 287.type .LK$BITS,%object 288.LK$BITS: 289___ 290$code.=<<___ if ($SZ==8); 291 .quad 0x428a2f98d728ae22,0x7137449123ef65cd 292 .quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc 293 .quad 0x3956c25bf348b538,0x59f111f1b605d019 294 .quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118 295 .quad 0xd807aa98a3030242,0x12835b0145706fbe 296 .quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2 297 .quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1 298 .quad 0x9bdc06a725c71235,0xc19bf174cf692694 299 .quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3 300 .quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65 301 .quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483 302 .quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5 303 .quad 0x983e5152ee66dfab,0xa831c66d2db43210 304 .quad 0xb00327c898fb213f,0xbf597fc7beef0ee4 305 .quad 0xc6e00bf33da88fc2,0xd5a79147930aa725 306 .quad 0x06ca6351e003826f,0x142929670a0e6e70 307 .quad 0x27b70a8546d22ffc,0x2e1b21385c26c926 308 .quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df 309 .quad 0x650a73548baf63de,0x766a0abb3c77b2a8 310 .quad 0x81c2c92e47edaee6,0x92722c851482353b 311 .quad 0xa2bfe8a14cf10364,0xa81a664bbc423001 312 .quad 0xc24b8b70d0f89791,0xc76c51a30654be30 313 .quad 0xd192e819d6ef5218,0xd69906245565a910 314 .quad 0xf40e35855771202a,0x106aa07032bbd1b8 315 .quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53 316 .quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8 317 .quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb 318 .quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3 319 .quad 0x748f82ee5defb2fc,0x78a5636f43172f60 320 .quad 0x84c87814a1f0ab72,0x8cc702081a6439ec 321 .quad 0x90befffa23631e28,0xa4506cebde82bde9 322 .quad 0xbef9a3f7b2c67915,0xc67178f2e372532b 323 .quad 0xca273eceea26619c,0xd186b8c721c0c207 324 .quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178 325 .quad 0x06f067aa72176fba,0x0a637dc5a2c898a6 326 .quad 0x113f9804bef90dae,0x1b710b35131c471b 327 .quad 0x28db77f523047d84,0x32caab7b40c72493 328 .quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c 329 .quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a 330 .quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817 331 .quad 0 // terminator 332___ 333$code.=<<___ if ($SZ==4); 334 .long 0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5 335 .long 0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5 336 .long 0xd807aa98,0x12835b01,0x243185be,0x550c7dc3 337 .long 0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174 338 .long 0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc 339 .long 0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da 340 .long 0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7 341 .long 0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967 342 .long 0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13 343 .long 0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85 344 .long 0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3 345 .long 0xd192e819,0xd6990624,0xf40e3585,0x106aa070 346 .long 0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5 347 .long 0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3 348 .long 0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208 349 .long 0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2 350 .long 0 //terminator 351___ 352$code.=<<___; 353.size .LK$BITS,.-.LK$BITS 354#ifndef __KERNEL__ 355.align 3 356.LOPENSSL_armcap_P: 357# ifdef __ILP32__ 358 .long OPENSSL_armcap_P-. 359# else 360 .quad OPENSSL_armcap_P-. 361# endif 362#endif 363.asciz "SHA$BITS block transform for ARMv8, CRYPTOGAMS by <appro\@openssl.org>" 364.align 2 365___ 366 367if ($SZ==4) { 368my $Ktbl="x3"; 369 370my ($ABCD,$EFGH,$abcd)=map("v$_.16b",(0..2)); 371my @MSG=map("v$_.16b",(4..7)); 372my ($W0,$W1)=("v16.4s","v17.4s"); 373my ($ABCD_SAVE,$EFGH_SAVE)=("v18.16b","v19.16b"); 374 375$code.=<<___; 376#ifndef __KERNEL__ 377.type sha256_block_armv8,%function 378.align 6 379sha256_block_armv8: 380.Lv8_entry: 381 stp x29,x30,[sp,#-16]! 382 add x29,sp,#0 383 384 ld1.32 {$ABCD,$EFGH},[$ctx] 385 adr $Ktbl,.LK256 386 387.Loop_hw: 388 ld1 {@MSG[0]-@MSG[3]},[$inp],#64 389 sub $num,$num,#1 390 ld1.32 {$W0},[$Ktbl],#16 391 rev32 @MSG[0],@MSG[0] 392 rev32 @MSG[1],@MSG[1] 393 rev32 @MSG[2],@MSG[2] 394 rev32 @MSG[3],@MSG[3] 395 orr $ABCD_SAVE,$ABCD,$ABCD // offload 396 orr $EFGH_SAVE,$EFGH,$EFGH 397___ 398for($i=0;$i<12;$i++) { 399$code.=<<___; 400 ld1.32 {$W1},[$Ktbl],#16 401 add.i32 $W0,$W0,@MSG[0] 402 sha256su0 @MSG[0],@MSG[1] 403 orr $abcd,$ABCD,$ABCD 404 sha256h $ABCD,$EFGH,$W0 405 sha256h2 $EFGH,$abcd,$W0 406 sha256su1 @MSG[0],@MSG[2],@MSG[3] 407___ 408 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); 409} 410$code.=<<___; 411 ld1.32 {$W1},[$Ktbl],#16 412 add.i32 $W0,$W0,@MSG[0] 413 orr $abcd,$ABCD,$ABCD 414 sha256h $ABCD,$EFGH,$W0 415 sha256h2 $EFGH,$abcd,$W0 416 417 ld1.32 {$W0},[$Ktbl],#16 418 add.i32 $W1,$W1,@MSG[1] 419 orr $abcd,$ABCD,$ABCD 420 sha256h $ABCD,$EFGH,$W1 421 sha256h2 $EFGH,$abcd,$W1 422 423 ld1.32 {$W1},[$Ktbl] 424 add.i32 $W0,$W0,@MSG[2] 425 sub $Ktbl,$Ktbl,#$rounds*$SZ-16 // rewind 426 orr $abcd,$ABCD,$ABCD 427 sha256h $ABCD,$EFGH,$W0 428 sha256h2 $EFGH,$abcd,$W0 429 430 add.i32 $W1,$W1,@MSG[3] 431 orr $abcd,$ABCD,$ABCD 432 sha256h $ABCD,$EFGH,$W1 433 sha256h2 $EFGH,$abcd,$W1 434 435 add.i32 $ABCD,$ABCD,$ABCD_SAVE 436 add.i32 $EFGH,$EFGH,$EFGH_SAVE 437 438 cbnz $num,.Loop_hw 439 440 st1.32 {$ABCD,$EFGH},[$ctx] 441 442 ldr x29,[sp],#16 443 ret 444.size sha256_block_armv8,.-sha256_block_armv8 445#endif 446___ 447} 448 449if ($SZ==4) { ######################################### NEON stuff # 450# You'll surely note a lot of similarities with sha256-armv4 module, 451# and of course it's not a coincidence. sha256-armv4 was used as 452# initial template, but was adapted for ARMv8 instruction set and 453# extensively re-tuned for all-round performance. 454 455my @V = ($A,$B,$C,$D,$E,$F,$G,$H) = map("w$_",(3..10)); 456my ($t0,$t1,$t2,$t3,$t4) = map("w$_",(11..15)); 457my $Ktbl="x16"; 458my $Xfer="x17"; 459my @X = map("q$_",(0..3)); 460my ($T0,$T1,$T2,$T3,$T4,$T5,$T6,$T7) = map("q$_",(4..7,16..19)); 461my $j=0; 462 463sub AUTOLOAD() # thunk [simplified] x86-style perlasm 464{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./; 465 my $arg = pop; 466 $arg = "#$arg" if ($arg*1 eq $arg); 467 $code .= "\t$opcode\t".join(',',@_,$arg)."\n"; 468} 469 470sub Dscalar { shift =~ m|[qv]([0-9]+)|?"d$1":""; } 471sub Dlo { shift =~ m|[qv]([0-9]+)|?"v$1.d[0]":""; } 472sub Dhi { shift =~ m|[qv]([0-9]+)|?"v$1.d[1]":""; } 473 474sub Xupdate() 475{ use integer; 476 my $body = shift; 477 my @insns = (&$body,&$body,&$body,&$body); 478 my ($a,$b,$c,$d,$e,$f,$g,$h); 479 480 &ext_8 ($T0,@X[0],@X[1],4); # X[1..4] 481 eval(shift(@insns)); 482 eval(shift(@insns)); 483 eval(shift(@insns)); 484 &ext_8 ($T3,@X[2],@X[3],4); # X[9..12] 485 eval(shift(@insns)); 486 eval(shift(@insns)); 487 &mov (&Dscalar($T7),&Dhi(@X[3])); # X[14..15] 488 eval(shift(@insns)); 489 eval(shift(@insns)); 490 &ushr_32 ($T2,$T0,$sigma0[0]); 491 eval(shift(@insns)); 492 &ushr_32 ($T1,$T0,$sigma0[2]); 493 eval(shift(@insns)); 494 &add_32 (@X[0],@X[0],$T3); # X[0..3] += X[9..12] 495 eval(shift(@insns)); 496 &sli_32 ($T2,$T0,32-$sigma0[0]); 497 eval(shift(@insns)); 498 eval(shift(@insns)); 499 &ushr_32 ($T3,$T0,$sigma0[1]); 500 eval(shift(@insns)); 501 eval(shift(@insns)); 502 &eor_8 ($T1,$T1,$T2); 503 eval(shift(@insns)); 504 eval(shift(@insns)); 505 &sli_32 ($T3,$T0,32-$sigma0[1]); 506 eval(shift(@insns)); 507 eval(shift(@insns)); 508 &ushr_32 ($T4,$T7,$sigma1[0]); 509 eval(shift(@insns)); 510 eval(shift(@insns)); 511 &eor_8 ($T1,$T1,$T3); # sigma0(X[1..4]) 512 eval(shift(@insns)); 513 eval(shift(@insns)); 514 &sli_32 ($T4,$T7,32-$sigma1[0]); 515 eval(shift(@insns)); 516 eval(shift(@insns)); 517 &ushr_32 ($T5,$T7,$sigma1[2]); 518 eval(shift(@insns)); 519 eval(shift(@insns)); 520 &ushr_32 ($T3,$T7,$sigma1[1]); 521 eval(shift(@insns)); 522 eval(shift(@insns)); 523 &add_32 (@X[0],@X[0],$T1); # X[0..3] += sigma0(X[1..4]) 524 eval(shift(@insns)); 525 eval(shift(@insns)); 526 &sli_u32 ($T3,$T7,32-$sigma1[1]); 527 eval(shift(@insns)); 528 eval(shift(@insns)); 529 &eor_8 ($T5,$T5,$T4); 530 eval(shift(@insns)); 531 eval(shift(@insns)); 532 eval(shift(@insns)); 533 &eor_8 ($T5,$T5,$T3); # sigma1(X[14..15]) 534 eval(shift(@insns)); 535 eval(shift(@insns)); 536 eval(shift(@insns)); 537 &add_32 (@X[0],@X[0],$T5); # X[0..1] += sigma1(X[14..15]) 538 eval(shift(@insns)); 539 eval(shift(@insns)); 540 eval(shift(@insns)); 541 &ushr_32 ($T6,@X[0],$sigma1[0]); 542 eval(shift(@insns)); 543 &ushr_32 ($T7,@X[0],$sigma1[2]); 544 eval(shift(@insns)); 545 eval(shift(@insns)); 546 &sli_32 ($T6,@X[0],32-$sigma1[0]); 547 eval(shift(@insns)); 548 &ushr_32 ($T5,@X[0],$sigma1[1]); 549 eval(shift(@insns)); 550 eval(shift(@insns)); 551 &eor_8 ($T7,$T7,$T6); 552 eval(shift(@insns)); 553 eval(shift(@insns)); 554 &sli_32 ($T5,@X[0],32-$sigma1[1]); 555 eval(shift(@insns)); 556 eval(shift(@insns)); 557 &ld1_32 ("{$T0}","[$Ktbl], #16"); 558 eval(shift(@insns)); 559 &eor_8 ($T7,$T7,$T5); # sigma1(X[16..17]) 560 eval(shift(@insns)); 561 eval(shift(@insns)); 562 &eor_8 ($T5,$T5,$T5); 563 eval(shift(@insns)); 564 eval(shift(@insns)); 565 &mov (&Dhi($T5), &Dlo($T7)); 566 eval(shift(@insns)); 567 eval(shift(@insns)); 568 eval(shift(@insns)); 569 &add_32 (@X[0],@X[0],$T5); # X[2..3] += sigma1(X[16..17]) 570 eval(shift(@insns)); 571 eval(shift(@insns)); 572 eval(shift(@insns)); 573 &add_32 ($T0,$T0,@X[0]); 574 while($#insns>=1) { eval(shift(@insns)); } 575 &st1_32 ("{$T0}","[$Xfer], #16"); 576 eval(shift(@insns)); 577 578 push(@X,shift(@X)); # "rotate" X[] 579} 580 581sub Xpreload() 582{ use integer; 583 my $body = shift; 584 my @insns = (&$body,&$body,&$body,&$body); 585 my ($a,$b,$c,$d,$e,$f,$g,$h); 586 587 eval(shift(@insns)); 588 eval(shift(@insns)); 589 &ld1_8 ("{@X[0]}","[$inp],#16"); 590 eval(shift(@insns)); 591 eval(shift(@insns)); 592 &ld1_32 ("{$T0}","[$Ktbl],#16"); 593 eval(shift(@insns)); 594 eval(shift(@insns)); 595 eval(shift(@insns)); 596 eval(shift(@insns)); 597 &rev32 (@X[0],@X[0]); 598 eval(shift(@insns)); 599 eval(shift(@insns)); 600 eval(shift(@insns)); 601 eval(shift(@insns)); 602 &add_32 ($T0,$T0,@X[0]); 603 foreach (@insns) { eval; } # remaining instructions 604 &st1_32 ("{$T0}","[$Xfer], #16"); 605 606 push(@X,shift(@X)); # "rotate" X[] 607} 608 609sub body_00_15 () { 610 ( 611 '($a,$b,$c,$d,$e,$f,$g,$h)=@V;'. 612 '&add ($h,$h,$t1)', # h+=X[i]+K[i] 613 '&add ($a,$a,$t4);'. # h+=Sigma0(a) from the past 614 '&and ($t1,$f,$e)', 615 '&bic ($t4,$g,$e)', 616 '&eor ($t0,$e,$e,"ror#".($Sigma1[1]-$Sigma1[0]))', 617 '&add ($a,$a,$t2)', # h+=Maj(a,b,c) from the past 618 '&orr ($t1,$t1,$t4)', # Ch(e,f,g) 619 '&eor ($t0,$t0,$e,"ror#".($Sigma1[2]-$Sigma1[0]))', # Sigma1(e) 620 '&eor ($t4,$a,$a,"ror#".($Sigma0[1]-$Sigma0[0]))', 621 '&add ($h,$h,$t1)', # h+=Ch(e,f,g) 622 '&ror ($t0,$t0,"#$Sigma1[0]")', 623 '&eor ($t2,$a,$b)', # a^b, b^c in next round 624 '&eor ($t4,$t4,$a,"ror#".($Sigma0[2]-$Sigma0[0]))', # Sigma0(a) 625 '&add ($h,$h,$t0)', # h+=Sigma1(e) 626 '&ldr ($t1,sprintf "[sp,#%d]",4*(($j+1)&15)) if (($j&15)!=15);'. 627 '&ldr ($t1,"[$Ktbl]") if ($j==15);'. 628 '&and ($t3,$t3,$t2)', # (b^c)&=(a^b) 629 '&ror ($t4,$t4,"#$Sigma0[0]")', 630 '&add ($d,$d,$h)', # d+=h 631 '&eor ($t3,$t3,$b)', # Maj(a,b,c) 632 '$j++; unshift(@V,pop(@V)); ($t2,$t3)=($t3,$t2);' 633 ) 634} 635 636$code.=<<___; 637#ifdef __KERNEL__ 638.globl sha256_block_neon 639#endif 640.type sha256_block_neon,%function 641.align 4 642sha256_block_neon: 643.Lneon_entry: 644 stp x29, x30, [sp, #-16]! 645 mov x29, sp 646 sub sp,sp,#16*4 647 648 adr $Ktbl,.LK256 649 add $num,$inp,$num,lsl#6 // len to point at the end of inp 650 651 ld1.8 {@X[0]},[$inp], #16 652 ld1.8 {@X[1]},[$inp], #16 653 ld1.8 {@X[2]},[$inp], #16 654 ld1.8 {@X[3]},[$inp], #16 655 ld1.32 {$T0},[$Ktbl], #16 656 ld1.32 {$T1},[$Ktbl], #16 657 ld1.32 {$T2},[$Ktbl], #16 658 ld1.32 {$T3},[$Ktbl], #16 659 rev32 @X[0],@X[0] // yes, even on 660 rev32 @X[1],@X[1] // big-endian 661 rev32 @X[2],@X[2] 662 rev32 @X[3],@X[3] 663 mov $Xfer,sp 664 add.32 $T0,$T0,@X[0] 665 add.32 $T1,$T1,@X[1] 666 add.32 $T2,$T2,@X[2] 667 st1.32 {$T0-$T1},[$Xfer], #32 668 add.32 $T3,$T3,@X[3] 669 st1.32 {$T2-$T3},[$Xfer] 670 sub $Xfer,$Xfer,#32 671 672 ldp $A,$B,[$ctx] 673 ldp $C,$D,[$ctx,#8] 674 ldp $E,$F,[$ctx,#16] 675 ldp $G,$H,[$ctx,#24] 676 ldr $t1,[sp,#0] 677 mov $t2,wzr 678 eor $t3,$B,$C 679 mov $t4,wzr 680 b .L_00_48 681 682.align 4 683.L_00_48: 684___ 685 &Xupdate(\&body_00_15); 686 &Xupdate(\&body_00_15); 687 &Xupdate(\&body_00_15); 688 &Xupdate(\&body_00_15); 689$code.=<<___; 690 cmp $t1,#0 // check for K256 terminator 691 ldr $t1,[sp,#0] 692 sub $Xfer,$Xfer,#64 693 bne .L_00_48 694 695 sub $Ktbl,$Ktbl,#256 // rewind $Ktbl 696 cmp $inp,$num 697 mov $Xfer, #64 698 csel $Xfer, $Xfer, xzr, eq 699 sub $inp,$inp,$Xfer // avoid SEGV 700 mov $Xfer,sp 701___ 702 &Xpreload(\&body_00_15); 703 &Xpreload(\&body_00_15); 704 &Xpreload(\&body_00_15); 705 &Xpreload(\&body_00_15); 706$code.=<<___; 707 add $A,$A,$t4 // h+=Sigma0(a) from the past 708 ldp $t0,$t1,[$ctx,#0] 709 add $A,$A,$t2 // h+=Maj(a,b,c) from the past 710 ldp $t2,$t3,[$ctx,#8] 711 add $A,$A,$t0 // accumulate 712 add $B,$B,$t1 713 ldp $t0,$t1,[$ctx,#16] 714 add $C,$C,$t2 715 add $D,$D,$t3 716 ldp $t2,$t3,[$ctx,#24] 717 add $E,$E,$t0 718 add $F,$F,$t1 719 ldr $t1,[sp,#0] 720 stp $A,$B,[$ctx,#0] 721 add $G,$G,$t2 722 mov $t2,wzr 723 stp $C,$D,[$ctx,#8] 724 add $H,$H,$t3 725 stp $E,$F,[$ctx,#16] 726 eor $t3,$B,$C 727 stp $G,$H,[$ctx,#24] 728 mov $t4,wzr 729 mov $Xfer,sp 730 b.ne .L_00_48 731 732 ldr x29,[x29] 733 add sp,sp,#16*4+16 734 ret 735.size sha256_block_neon,.-sha256_block_neon 736___ 737} 738 739if ($SZ==8) { 740my $Ktbl="x3"; 741 742my @H = map("v$_.16b",(0..4)); 743my ($fg,$de,$m9_10)=map("v$_.16b",(5..7)); 744my @MSG=map("v$_.16b",(16..23)); 745my ($W0,$W1)=("v24.2d","v25.2d"); 746my ($AB,$CD,$EF,$GH)=map("v$_.16b",(26..29)); 747 748$code.=<<___; 749#ifndef __KERNEL__ 750.type sha512_block_armv8,%function 751.align 6 752sha512_block_armv8: 753.Lv8_entry: 754 stp x29,x30,[sp,#-16]! 755 add x29,sp,#0 756 757 ld1 {@MSG[0]-@MSG[3]},[$inp],#64 // load input 758 ld1 {@MSG[4]-@MSG[7]},[$inp],#64 759 760 ld1.64 {@H[0]-@H[3]},[$ctx] // load context 761 adr $Ktbl,.LK512 762 763 rev64 @MSG[0],@MSG[0] 764 rev64 @MSG[1],@MSG[1] 765 rev64 @MSG[2],@MSG[2] 766 rev64 @MSG[3],@MSG[3] 767 rev64 @MSG[4],@MSG[4] 768 rev64 @MSG[5],@MSG[5] 769 rev64 @MSG[6],@MSG[6] 770 rev64 @MSG[7],@MSG[7] 771 b .Loop_hw 772 773.align 4 774.Loop_hw: 775 ld1.64 {$W0},[$Ktbl],#16 776 subs $num,$num,#1 777 sub x4,$inp,#128 778 orr $AB,@H[0],@H[0] // offload 779 orr $CD,@H[1],@H[1] 780 orr $EF,@H[2],@H[2] 781 orr $GH,@H[3],@H[3] 782 csel $inp,$inp,x4,ne // conditional rewind 783___ 784for($i=0;$i<32;$i++) { 785$code.=<<___; 786 add.i64 $W0,$W0,@MSG[0] 787 ld1.64 {$W1},[$Ktbl],#16 788 ext $W0,$W0,$W0,#8 789 ext $fg,@H[2],@H[3],#8 790 ext $de,@H[1],@H[2],#8 791 add.i64 @H[3],@H[3],$W0 // "T1 + H + K512[i]" 792 sha512su0 @MSG[0],@MSG[1] 793 ext $m9_10,@MSG[4],@MSG[5],#8 794 sha512h @H[3],$fg,$de 795 sha512su1 @MSG[0],@MSG[7],$m9_10 796 add.i64 @H[4],@H[1],@H[3] // "D + T1" 797 sha512h2 @H[3],$H[1],@H[0] 798___ 799 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); 800 @H = (@H[3],@H[0],@H[4],@H[2],@H[1]); 801} 802for(;$i<40;$i++) { 803$code.=<<___ if ($i<39); 804 ld1.64 {$W1},[$Ktbl],#16 805___ 806$code.=<<___ if ($i==39); 807 sub $Ktbl,$Ktbl,#$rounds*$SZ // rewind 808___ 809$code.=<<___; 810 add.i64 $W0,$W0,@MSG[0] 811 ld1 {@MSG[0]},[$inp],#16 // load next input 812 ext $W0,$W0,$W0,#8 813 ext $fg,@H[2],@H[3],#8 814 ext $de,@H[1],@H[2],#8 815 add.i64 @H[3],@H[3],$W0 // "T1 + H + K512[i]" 816 sha512h @H[3],$fg,$de 817 rev64 @MSG[0],@MSG[0] 818 add.i64 @H[4],@H[1],@H[3] // "D + T1" 819 sha512h2 @H[3],$H[1],@H[0] 820___ 821 ($W0,$W1)=($W1,$W0); push(@MSG,shift(@MSG)); 822 @H = (@H[3],@H[0],@H[4],@H[2],@H[1]); 823} 824$code.=<<___; 825 add.i64 @H[0],@H[0],$AB // accumulate 826 add.i64 @H[1],@H[1],$CD 827 add.i64 @H[2],@H[2],$EF 828 add.i64 @H[3],@H[3],$GH 829 830 cbnz $num,.Loop_hw 831 832 st1.64 {@H[0]-@H[3]},[$ctx] // store context 833 834 ldr x29,[sp],#16 835 ret 836.size sha512_block_armv8,.-sha512_block_armv8 837#endif 838___ 839} 840 841$code.=<<___; 842#ifndef __KERNEL__ 843.comm OPENSSL_armcap_P,4,4 844#endif 845___ 846 847{ my %opcode = ( 848 "sha256h" => 0x5e004000, "sha256h2" => 0x5e005000, 849 "sha256su0" => 0x5e282800, "sha256su1" => 0x5e006000 ); 850 851 sub unsha256 { 852 my ($mnemonic,$arg)=@_; 853 854 $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o 855 && 856 sprintf ".inst\t0x%08x\t//%s %s", 857 $opcode{$mnemonic}|$1|($2<<5)|($3<<16), 858 $mnemonic,$arg; 859 } 860} 861 862{ my %opcode = ( 863 "sha512h" => 0xce608000, "sha512h2" => 0xce608400, 864 "sha512su0" => 0xcec08000, "sha512su1" => 0xce608800 ); 865 866 sub unsha512 { 867 my ($mnemonic,$arg)=@_; 868 869 $arg =~ m/[qv]([0-9]+)[^,]*,\s*[qv]([0-9]+)[^,]*(?:,\s*[qv]([0-9]+))?/o 870 && 871 sprintf ".inst\t0x%08x\t//%s %s", 872 $opcode{$mnemonic}|$1|($2<<5)|($3<<16), 873 $mnemonic,$arg; 874 } 875} 876 877open SELF,$0; 878while(<SELF>) { 879 next if (/^#!/); 880 last if (!s/^#/\/\// and !/^$/); 881 print; 882} 883close SELF; 884 885foreach(split("\n",$code)) { 886 887 s/\`([^\`]*)\`/eval($1)/ge; 888 889 s/\b(sha512\w+)\s+([qv].*)/unsha512($1,$2)/ge or 890 s/\b(sha256\w+)\s+([qv].*)/unsha256($1,$2)/ge; 891 892 s/\bq([0-9]+)\b/v$1.16b/g; # old->new registers 893 894 s/\.[ui]?8(\s)/$1/; 895 s/\.\w?64\b// and s/\.16b/\.2d/g or 896 s/\.\w?32\b// and s/\.16b/\.4s/g; 897 m/\bext\b/ and s/\.2d/\.16b/g or 898 m/(ld|st)1[^\[]+\[0\]/ and s/\.4s/\.s/g; 899 900 print $_,"\n"; 901} 902 903close STDOUT; 904