1#! /usr/bin/env perl 2# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9# 10# ==================================================================== 11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12# project. The module is, however, dual licensed under OpenSSL and 13# CRYPTOGAMS licenses depending on where you obtain it. For further 14# details see http://www.openssl.org/~appro/cryptogams/. 15# ==================================================================== 16# 17# GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication. 18# 19# June 2014 20# Initial version was developed in tight cooperation with Ard Biesheuvel 21# of Linaro from bits-n-pieces from other assembly modules. Just like 22# aesv8-armx.pl this module supports both AArch32 and AArch64 execution modes. 23# 24# July 2014 25# Implement 2x aggregated reduction [see ghash-x86.pl for background 26# information]. 27# 28# Current performance in cycles per processed byte: 29# 30# PMULL[2] 32-bit NEON(*) 31# Apple A7 0.92 5.62 32# Cortex-A53 1.01 8.39 33# Cortex-A57 1.17 7.61 34# Denver 0.71 6.02 35# Mongoose 1.10 8.06 36# Kryo 1.16 8.00 37# 38# (*) presented for reference/comparison purposes; 39 40$flavour = shift; 41$output = shift; 42 43$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 44( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or 45( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or 46die "can't locate arm-xlate.pl"; 47 48open OUT,"| \"$^X\" $xlate $flavour $output"; 49*STDOUT=*OUT; 50 51$Xi="x0"; # argument block 52$Htbl="x1"; 53$inp="x2"; 54$len="x3"; 55 56$inc="x12"; 57 58{ 59my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3)); 60my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14)); 61 62$code=<<___; 63#include <openssl/arm_arch.h> 64 65.text 66___ 67$code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/); 68$code.=<<___ if ($flavour !~ /64/); 69.fpu neon 70.code 32 71#undef __thumb2__ 72___ 73 74################################################################################ 75# void gcm_init_v8(u128 Htable[16],const u64 H[2]); 76# 77# input: 128-bit H - secret parameter E(K,0^128) 78# output: precomputed table filled with degrees of twisted H; 79# H is twisted to handle reverse bitness of GHASH; 80# only few of 16 slots of Htable[16] are used; 81# data is opaque to outside world (which allows to 82# optimize the code independently); 83# 84$code.=<<___; 85.global gcm_init_v8 86.type gcm_init_v8,%function 87.align 4 88gcm_init_v8: 89 AARCH64_VALID_CALL_TARGET 90 vld1.64 {$t1},[x1] @ load input H 91 vmov.i8 $xC2,#0xe1 92 vshl.i64 $xC2,$xC2,#57 @ 0xc2.0 93 vext.8 $IN,$t1,$t1,#8 94 vshr.u64 $t2,$xC2,#63 95 vdup.32 $t1,${t1}[1] 96 vext.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01 97 vshr.u64 $t2,$IN,#63 98 vshr.s32 $t1,$t1,#31 @ broadcast carry bit 99 vand $t2,$t2,$t0 100 vshl.i64 $IN,$IN,#1 101 vext.8 $t2,$t2,$t2,#8 102 vand $t0,$t0,$t1 103 vorr $IN,$IN,$t2 @ H<<<=1 104 veor $H,$IN,$t0 @ twisted H 105 vst1.64 {$H},[x0],#16 @ store Htable[0] 106 107 @ calculate H^2 108 vext.8 $t0,$H,$H,#8 @ Karatsuba pre-processing 109 vpmull.p64 $Xl,$H,$H 110 veor $t0,$t0,$H 111 vpmull2.p64 $Xh,$H,$H 112 vpmull.p64 $Xm,$t0,$t0 113 114 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 115 veor $t2,$Xl,$Xh 116 veor $Xm,$Xm,$t1 117 veor $Xm,$Xm,$t2 118 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase 119 120 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 121 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 122 veor $Xl,$Xm,$t2 123 124 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase 125 vpmull.p64 $Xl,$Xl,$xC2 126 veor $t2,$t2,$Xh 127 veor $H2,$Xl,$t2 128 129 vext.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing 130 veor $t1,$t1,$H2 131 vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed 132 vst1.64 {$Hhl-$H2},[x0] @ store Htable[1..2] 133 134 ret 135.size gcm_init_v8,.-gcm_init_v8 136___ 137################################################################################ 138# void gcm_gmult_v8(u64 Xi[2],const u128 Htable[16]); 139# 140# input: Xi - current hash value; 141# Htable - table precomputed in gcm_init_v8; 142# output: Xi - next hash value Xi; 143# 144$code.=<<___; 145.global gcm_gmult_v8 146.type gcm_gmult_v8,%function 147.align 4 148gcm_gmult_v8: 149 AARCH64_VALID_CALL_TARGET 150 vld1.64 {$t1},[$Xi] @ load Xi 151 vmov.i8 $xC2,#0xe1 152 vld1.64 {$H-$Hhl},[$Htbl] @ load twisted H, ... 153 vshl.u64 $xC2,$xC2,#57 154#ifndef __ARMEB__ 155 vrev64.8 $t1,$t1 156#endif 157 vext.8 $IN,$t1,$t1,#8 158 159 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo 160 veor $t1,$t1,$IN @ Karatsuba pre-processing 161 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi 162 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) 163 164 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 165 veor $t2,$Xl,$Xh 166 veor $Xm,$Xm,$t1 167 veor $Xm,$Xm,$t2 168 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 169 170 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 171 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 172 veor $Xl,$Xm,$t2 173 174 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 175 vpmull.p64 $Xl,$Xl,$xC2 176 veor $t2,$t2,$Xh 177 veor $Xl,$Xl,$t2 178 179#ifndef __ARMEB__ 180 vrev64.8 $Xl,$Xl 181#endif 182 vext.8 $Xl,$Xl,$Xl,#8 183 vst1.64 {$Xl},[$Xi] @ write out Xi 184 185 ret 186.size gcm_gmult_v8,.-gcm_gmult_v8 187___ 188################################################################################ 189# void gcm_ghash_v8(u64 Xi[2],const u128 Htable[16],const u8 *inp,size_t len); 190# 191# input: table precomputed in gcm_init_v8; 192# current hash value Xi; 193# pointer to input data; 194# length of input data in bytes, but divisible by block size; 195# output: next hash value Xi; 196# 197$code.=<<___; 198.global gcm_ghash_v8 199.type gcm_ghash_v8,%function 200.align 4 201gcm_ghash_v8: 202 AARCH64_VALID_CALL_TARGET 203___ 204$code.=<<___ if ($flavour !~ /64/); 205 vstmdb sp!,{d8-d15} @ 32-bit ABI says so 206___ 207$code.=<<___; 208 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi 209 @ "[rotated]" means that 210 @ loaded value would have 211 @ to be rotated in order to 212 @ make it appear as in 213 @ algorithm specification 214 subs $len,$len,#32 @ see if $len is 32 or larger 215 mov $inc,#16 @ $inc is used as post- 216 @ increment for input pointer; 217 @ as loop is modulo-scheduled 218 @ $inc is zeroed just in time 219 @ to preclude overstepping 220 @ inp[len], which means that 221 @ last block[s] are actually 222 @ loaded twice, but last 223 @ copy is not processed 224 vld1.64 {$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2 225 vmov.i8 $xC2,#0xe1 226 vld1.64 {$H2},[$Htbl] 227 cclr $inc,eq @ is it time to zero $inc? 228 vext.8 $Xl,$Xl,$Xl,#8 @ rotate Xi 229 vld1.64 {$t0},[$inp],#16 @ load [rotated] I[0] 230 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant 231#ifndef __ARMEB__ 232 vrev64.8 $t0,$t0 233 vrev64.8 $Xl,$Xl 234#endif 235 vext.8 $IN,$t0,$t0,#8 @ rotate I[0] 236 b.lo .Lodd_tail_v8 @ $len was less than 32 237___ 238{ my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7)); 239 ####### 240 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = 241 # [(H*Ii+1) + (H*Xi+1)] mod P = 242 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P 243 # 244$code.=<<___; 245 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[1] 246#ifndef __ARMEB__ 247 vrev64.8 $t1,$t1 248#endif 249 vext.8 $In,$t1,$t1,#8 250 veor $IN,$IN,$Xl @ I[i]^=Xi 251 vpmull.p64 $Xln,$H,$In @ H·Ii+1 252 veor $t1,$t1,$In @ Karatsuba pre-processing 253 vpmull2.p64 $Xhn,$H,$In 254 b .Loop_mod2x_v8 255 256.align 4 257.Loop_mod2x_v8: 258 vext.8 $t2,$IN,$IN,#8 259 subs $len,$len,#32 @ is there more data? 260 vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo 261 cclr $inc,lo @ is it time to zero $inc? 262 263 vpmull.p64 $Xmn,$Hhl,$t1 264 veor $t2,$t2,$IN @ Karatsuba pre-processing 265 vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi 266 veor $Xl,$Xl,$Xln @ accumulate 267 vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) 268 vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2] 269 270 veor $Xh,$Xh,$Xhn 271 cclr $inc,eq @ is it time to zero $inc? 272 veor $Xm,$Xm,$Xmn 273 274 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 275 veor $t2,$Xl,$Xh 276 veor $Xm,$Xm,$t1 277 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[i+3] 278#ifndef __ARMEB__ 279 vrev64.8 $t0,$t0 280#endif 281 veor $Xm,$Xm,$t2 282 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 283 284#ifndef __ARMEB__ 285 vrev64.8 $t1,$t1 286#endif 287 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 288 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 289 vext.8 $In,$t1,$t1,#8 290 vext.8 $IN,$t0,$t0,#8 291 veor $Xl,$Xm,$t2 292 vpmull.p64 $Xln,$H,$In @ H·Ii+1 293 veor $IN,$IN,$Xh @ accumulate $IN early 294 295 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 296 vpmull.p64 $Xl,$Xl,$xC2 297 veor $IN,$IN,$t2 298 veor $t1,$t1,$In @ Karatsuba pre-processing 299 veor $IN,$IN,$Xl 300 vpmull2.p64 $Xhn,$H,$In 301 b.hs .Loop_mod2x_v8 @ there was at least 32 more bytes 302 303 veor $Xh,$Xh,$t2 304 vext.8 $IN,$t0,$t0,#8 @ re-construct $IN 305 adds $len,$len,#32 @ re-construct $len 306 veor $Xl,$Xl,$Xh @ re-construct $Xl 307 b.eq .Ldone_v8 @ is $len zero? 308___ 309} 310$code.=<<___; 311.Lodd_tail_v8: 312 vext.8 $t2,$Xl,$Xl,#8 313 veor $IN,$IN,$Xl @ inp^=Xi 314 veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi 315 316 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo 317 veor $t1,$t1,$IN @ Karatsuba pre-processing 318 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi 319 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) 320 321 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 322 veor $t2,$Xl,$Xh 323 veor $Xm,$Xm,$t1 324 veor $Xm,$Xm,$t2 325 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 326 327 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 328 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 329 veor $Xl,$Xm,$t2 330 331 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 332 vpmull.p64 $Xl,$Xl,$xC2 333 veor $t2,$t2,$Xh 334 veor $Xl,$Xl,$t2 335 336.Ldone_v8: 337#ifndef __ARMEB__ 338 vrev64.8 $Xl,$Xl 339#endif 340 vext.8 $Xl,$Xl,$Xl,#8 341 vst1.64 {$Xl},[$Xi] @ write out Xi 342 343___ 344$code.=<<___ if ($flavour !~ /64/); 345 vldmia sp!,{d8-d15} @ 32-bit ABI says so 346___ 347$code.=<<___; 348 ret 349.size gcm_ghash_v8,.-gcm_ghash_v8 350___ 351} 352$code.=<<___; 353.asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>" 354.align 2 355___ 356 357if ($flavour =~ /64/) { ######## 64-bit code 358 sub unvmov { 359 my $arg=shift; 360 361 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o && 362 sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1; 363 } 364 foreach(split("\n",$code)) { 365 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or 366 s/vmov\.i8/movi/o or # fix up legacy mnemonics 367 s/vmov\s+(.*)/unvmov($1)/geo or 368 s/vext\.8/ext/o or 369 s/vshr\.s/sshr\.s/o or 370 s/vshr/ushr/o or 371 s/^(\s+)v/$1/o or # strip off v prefix 372 s/\bbx\s+lr\b/ret/o; 373 374 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers 375 s/@\s/\/\//o; # old->new style commentary 376 377 # fix up remaining legacy suffixes 378 s/\.[ui]?8(\s)/$1/o; 379 s/\.[uis]?32//o and s/\.16b/\.4s/go; 380 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument 381 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments 382 s/\.[uisp]?64//o and s/\.16b/\.2d/go; 383 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o; 384 385 print $_,"\n"; 386 } 387} else { ######## 32-bit code 388 sub unvdup32 { 389 my $arg=shift; 390 391 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o && 392 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1; 393 } 394 sub unvpmullp64 { 395 my ($mnemonic,$arg)=@_; 396 397 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) { 398 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19) 399 |(($2&7)<<17)|(($2&8)<<4) 400 |(($3&7)<<1) |(($3&8)<<2); 401 $word |= 0x00010001 if ($mnemonic =~ "2"); 402 # since ARMv7 instructions are always encoded little-endian. 403 # correct solution is to use .inst directive, but older 404 # assemblers don't implement it:-( 405 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s", 406 $word&0xff,($word>>8)&0xff, 407 ($word>>16)&0xff,($word>>24)&0xff, 408 $mnemonic,$arg; 409 } 410 } 411 412 foreach(split("\n",$code)) { 413 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers 414 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers 415 s/\/\/\s?/@ /o; # new->old style commentary 416 417 # fix up remaining new-style suffixes 418 s/\],#[0-9]+/]!/o; 419 420 s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or 421 s/vdup\.32\s+(.*)/unvdup32($1)/geo or 422 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or 423 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or 424 s/^(\s+)b\./$1b/o or 425 s/^(\s+)ret/$1bx\tlr/o; 426 427 print $_,"\n"; 428 } 429} 430 431close STDOUT or die "error closing STDOUT"; # enforce flush 432