1#! /usr/bin/env perl 2# Copyright 2014-2016 The OpenSSL Project Authors. All Rights Reserved. 3# 4# Licensed under the OpenSSL license (the "License"). You may not use 5# this file except in compliance with the License. You can obtain a copy 6# in the file LICENSE in the source distribution or at 7# https://www.openssl.org/source/license.html 8 9# 10# ==================================================================== 11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 12# project. The module is, however, dual licensed under OpenSSL and 13# CRYPTOGAMS licenses depending on where you obtain it. For further 14# details see http://www.openssl.org/~appro/cryptogams/. 15# ==================================================================== 16# 17# GHASH for ARMv8 Crypto Extension, 64-bit polynomial multiplication. 18# 19# June 2014 20# Initial version was developed in tight cooperation with Ard Biesheuvel 21# of Linaro from bits-n-pieces from other assembly modules. Just like 22# aesv8-armx.pl this module supports both AArch32 and AArch64 execution modes. 23# 24# July 2014 25# Implement 2x aggregated reduction [see ghash-x86.pl for background 26# information]. 27# 28# Current performance in cycles per processed byte: 29# 30# PMULL[2] 32-bit NEON(*) 31# Apple A7 0.92 5.62 32# Cortex-A53 1.01 8.39 33# Cortex-A57 1.17 7.61 34# Denver 0.71 6.02 35# Mongoose 1.10 8.06 36# Kryo 1.16 8.00 37# 38# (*) presented for reference/comparison purposes; 39 40$flavour = shift; 41$output = shift; 42 43$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1; 44( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or 45( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or 46die "can't locate arm-xlate.pl"; 47 48open OUT,"| \"$^X\" $xlate $flavour $output"; 49*STDOUT=*OUT; 50 51$Xi="x0"; # argument block 52$Htbl="x1"; 53$inp="x2"; 54$len="x3"; 55 56$inc="x12"; 57 58{ 59my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3)); 60my ($t0,$t1,$t2,$xC2,$H,$Hhl,$H2)=map("q$_",(8..14)); 61 62$code=<<___; 63#include <GFp/arm_arch.h> 64 65.text 66___ 67$code.=".arch armv8-a+crypto\n" if ($flavour =~ /64/); 68$code.=<<___ if ($flavour !~ /64/); 69.fpu neon 70.code 32 71#undef __thumb2__ 72___ 73 74################################################################################ 75# void GFp_gcm_init_clmul(u128 Htable[16],const u64 H[2]); 76# 77# input: 128-bit H - secret parameter E(K,0^128) 78# output: precomputed table filled with degrees of twisted H; 79# H is twisted to handle reverse bitness of GHASH; 80# only few of 16 slots of Htable[16] are used; 81# data is opaque to outside world (which allows to 82# optimize the code independently); 83# 84$code.=<<___; 85.global GFp_gcm_init_clmul 86.type GFp_gcm_init_clmul,%function 87.align 4 88GFp_gcm_init_clmul: 89 AARCH64_VALID_CALL_TARGET 90 vld1.64 {$t1},[x1] @ load input H 91 vmov.i8 $xC2,#0xe1 92 vshl.i64 $xC2,$xC2,#57 @ 0xc2.0 93 vext.8 $IN,$t1,$t1,#8 94 vshr.u64 $t2,$xC2,#63 95 vdup.32 $t1,${t1}[1] 96 vext.8 $t0,$t2,$xC2,#8 @ t0=0xc2....01 97 vshr.u64 $t2,$IN,#63 98 vshr.s32 $t1,$t1,#31 @ broadcast carry bit 99 vand $t2,$t2,$t0 100 vshl.i64 $IN,$IN,#1 101 vext.8 $t2,$t2,$t2,#8 102 vand $t0,$t0,$t1 103 vorr $IN,$IN,$t2 @ H<<<=1 104 veor $H,$IN,$t0 @ twisted H 105 vst1.64 {$H},[x0],#16 @ store Htable[0] 106 107 @ calculate H^2 108 vext.8 $t0,$H,$H,#8 @ Karatsuba pre-processing 109 vpmull.p64 $Xl,$H,$H 110 veor $t0,$t0,$H 111 vpmull2.p64 $Xh,$H,$H 112 vpmull.p64 $Xm,$t0,$t0 113 114 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 115 veor $t2,$Xl,$Xh 116 veor $Xm,$Xm,$t1 117 veor $Xm,$Xm,$t2 118 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase 119 120 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 121 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 122 veor $Xl,$Xm,$t2 123 124 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase 125 vpmull.p64 $Xl,$Xl,$xC2 126 veor $t2,$t2,$Xh 127 veor $H2,$Xl,$t2 128 129 vext.8 $t1,$H2,$H2,#8 @ Karatsuba pre-processing 130 veor $t1,$t1,$H2 131 vext.8 $Hhl,$t0,$t1,#8 @ pack Karatsuba pre-processed 132 vst1.64 {$Hhl-$H2},[x0] @ store Htable[1..2] 133 134 ret 135.size GFp_gcm_init_clmul,.-GFp_gcm_init_clmul 136___ 137################################################################################ 138# void GFp_gcm_gmult_clmul(u64 Xi[2],const u128 Htable[16]); 139# 140# input: Xi - current hash value; 141# Htable - table precomputed in GFp_gcm_init_clmul; 142# output: Xi - next hash value Xi; 143# 144$code.=<<___; 145.global GFp_gcm_gmult_clmul 146.type GFp_gcm_gmult_clmul,%function 147.align 4 148GFp_gcm_gmult_clmul: 149 AARCH64_VALID_CALL_TARGET 150 vld1.64 {$t1},[$Xi] @ load Xi 151 vmov.i8 $xC2,#0xe1 152 vld1.64 {$H-$Hhl},[$Htbl] @ load twisted H, ... 153 vshl.u64 $xC2,$xC2,#57 154#ifndef __ARMEB__ 155 vrev64.8 $t1,$t1 156#endif 157 vext.8 $IN,$t1,$t1,#8 158 159 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo 160 veor $t1,$t1,$IN @ Karatsuba pre-processing 161 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi 162 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) 163 164 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 165 veor $t2,$Xl,$Xh 166 veor $Xm,$Xm,$t1 167 veor $Xm,$Xm,$t2 168 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 169 170 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 171 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 172 veor $Xl,$Xm,$t2 173 174 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 175 vpmull.p64 $Xl,$Xl,$xC2 176 veor $t2,$t2,$Xh 177 veor $Xl,$Xl,$t2 178 179#ifndef __ARMEB__ 180 vrev64.8 $Xl,$Xl 181#endif 182 vext.8 $Xl,$Xl,$Xl,#8 183 vst1.64 {$Xl},[$Xi] @ write out Xi 184 185 ret 186.size GFp_gcm_gmult_clmul,.-GFp_gcm_gmult_clmul 187___ 188################################################################################ 189# void GFp_gcm_ghash_clmul(u64 Xi[2], const u128 Htable[16], const u8 *inp, 190# size_t len); 191# 192# input: table precomputed in GFp_gcm_init_clmul; 193# current hash value Xi; 194# pointer to input data; 195# length of input data in bytes, but divisible by block size; 196# output: next hash value Xi; 197# 198$code.=<<___; 199.global GFp_gcm_ghash_clmul 200.type GFp_gcm_ghash_clmul,%function 201.align 4 202GFp_gcm_ghash_clmul: 203 AARCH64_VALID_CALL_TARGET 204___ 205$code.=<<___ if ($flavour !~ /64/); 206 vstmdb sp!,{d8-d15} @ 32-bit ABI says so 207___ 208$code.=<<___; 209 vld1.64 {$Xl},[$Xi] @ load [rotated] Xi 210 @ "[rotated]" means that 211 @ loaded value would have 212 @ to be rotated in order to 213 @ make it appear as in 214 @ algorithm specification 215 subs $len,$len,#32 @ see if $len is 32 or larger 216 mov $inc,#16 @ $inc is used as post- 217 @ increment for input pointer; 218 @ as loop is modulo-scheduled 219 @ $inc is zeroed just in time 220 @ to preclude overstepping 221 @ inp[len], which means that 222 @ last block[s] are actually 223 @ loaded twice, but last 224 @ copy is not processed 225 vld1.64 {$H-$Hhl},[$Htbl],#32 @ load twisted H, ..., H^2 226 vmov.i8 $xC2,#0xe1 227 vld1.64 {$H2},[$Htbl] 228 cclr $inc,eq @ is it time to zero $inc? 229 vext.8 $Xl,$Xl,$Xl,#8 @ rotate Xi 230 vld1.64 {$t0},[$inp],#16 @ load [rotated] I[0] 231 vshl.u64 $xC2,$xC2,#57 @ compose 0xc2.0 constant 232#ifndef __ARMEB__ 233 vrev64.8 $t0,$t0 234 vrev64.8 $Xl,$Xl 235#endif 236 vext.8 $IN,$t0,$t0,#8 @ rotate I[0] 237 b.lo .Lodd_tail_v8 @ $len was less than 32 238___ 239{ my ($Xln,$Xmn,$Xhn,$In) = map("q$_",(4..7)); 240 ####### 241 # Xi+2 =[H*(Ii+1 + Xi+1)] mod P = 242 # [(H*Ii+1) + (H*Xi+1)] mod P = 243 # [(H*Ii+1) + H^2*(Ii+Xi)] mod P 244 # 245$code.=<<___; 246 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[1] 247#ifndef __ARMEB__ 248 vrev64.8 $t1,$t1 249#endif 250 vext.8 $In,$t1,$t1,#8 251 veor $IN,$IN,$Xl @ I[i]^=Xi 252 vpmull.p64 $Xln,$H,$In @ H·Ii+1 253 veor $t1,$t1,$In @ Karatsuba pre-processing 254 vpmull2.p64 $Xhn,$H,$In 255 b .Loop_mod2x_v8 256 257.align 4 258.Loop_mod2x_v8: 259 vext.8 $t2,$IN,$IN,#8 260 subs $len,$len,#32 @ is there more data? 261 vpmull.p64 $Xl,$H2,$IN @ H^2.lo·Xi.lo 262 cclr $inc,lo @ is it time to zero $inc? 263 264 vpmull.p64 $Xmn,$Hhl,$t1 265 veor $t2,$t2,$IN @ Karatsuba pre-processing 266 vpmull2.p64 $Xh,$H2,$IN @ H^2.hi·Xi.hi 267 veor $Xl,$Xl,$Xln @ accumulate 268 vpmull2.p64 $Xm,$Hhl,$t2 @ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi) 269 vld1.64 {$t0},[$inp],$inc @ load [rotated] I[i+2] 270 271 veor $Xh,$Xh,$Xhn 272 cclr $inc,eq @ is it time to zero $inc? 273 veor $Xm,$Xm,$Xmn 274 275 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 276 veor $t2,$Xl,$Xh 277 veor $Xm,$Xm,$t1 278 vld1.64 {$t1},[$inp],$inc @ load [rotated] I[i+3] 279#ifndef __ARMEB__ 280 vrev64.8 $t0,$t0 281#endif 282 veor $Xm,$Xm,$t2 283 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 284 285#ifndef __ARMEB__ 286 vrev64.8 $t1,$t1 287#endif 288 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 289 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 290 vext.8 $In,$t1,$t1,#8 291 vext.8 $IN,$t0,$t0,#8 292 veor $Xl,$Xm,$t2 293 vpmull.p64 $Xln,$H,$In @ H·Ii+1 294 veor $IN,$IN,$Xh @ accumulate $IN early 295 296 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 297 vpmull.p64 $Xl,$Xl,$xC2 298 veor $IN,$IN,$t2 299 veor $t1,$t1,$In @ Karatsuba pre-processing 300 veor $IN,$IN,$Xl 301 vpmull2.p64 $Xhn,$H,$In 302 b.hs .Loop_mod2x_v8 @ there was at least 32 more bytes 303 304 veor $Xh,$Xh,$t2 305 vext.8 $IN,$t0,$t0,#8 @ re-construct $IN 306 adds $len,$len,#32 @ re-construct $len 307 veor $Xl,$Xl,$Xh @ re-construct $Xl 308 b.eq .Ldone_v8 @ is $len zero? 309___ 310} 311$code.=<<___; 312.Lodd_tail_v8: 313 vext.8 $t2,$Xl,$Xl,#8 314 veor $IN,$IN,$Xl @ inp^=Xi 315 veor $t1,$t0,$t2 @ $t1 is rotated inp^Xi 316 317 vpmull.p64 $Xl,$H,$IN @ H.lo·Xi.lo 318 veor $t1,$t1,$IN @ Karatsuba pre-processing 319 vpmull2.p64 $Xh,$H,$IN @ H.hi·Xi.hi 320 vpmull.p64 $Xm,$Hhl,$t1 @ (H.lo+H.hi)·(Xi.lo+Xi.hi) 321 322 vext.8 $t1,$Xl,$Xh,#8 @ Karatsuba post-processing 323 veor $t2,$Xl,$Xh 324 veor $Xm,$Xm,$t1 325 veor $Xm,$Xm,$t2 326 vpmull.p64 $t2,$Xl,$xC2 @ 1st phase of reduction 327 328 vmov $Xh#lo,$Xm#hi @ Xh|Xm - 256-bit result 329 vmov $Xm#hi,$Xl#lo @ Xm is rotated Xl 330 veor $Xl,$Xm,$t2 331 332 vext.8 $t2,$Xl,$Xl,#8 @ 2nd phase of reduction 333 vpmull.p64 $Xl,$Xl,$xC2 334 veor $t2,$t2,$Xh 335 veor $Xl,$Xl,$t2 336 337.Ldone_v8: 338#ifndef __ARMEB__ 339 vrev64.8 $Xl,$Xl 340#endif 341 vext.8 $Xl,$Xl,$Xl,#8 342 vst1.64 {$Xl},[$Xi] @ write out Xi 343 344___ 345$code.=<<___ if ($flavour !~ /64/); 346 vldmia sp!,{d8-d15} @ 32-bit ABI says so 347___ 348$code.=<<___; 349 ret 350.size GFp_gcm_ghash_clmul,.-GFp_gcm_ghash_clmul 351___ 352} 353$code.=<<___; 354.asciz "GHASH for ARMv8, CRYPTOGAMS by <appro\@openssl.org>" 355.align 2 356___ 357 358if ($flavour =~ /64/) { ######## 64-bit code 359 sub unvmov { 360 my $arg=shift; 361 362 $arg =~ m/q([0-9]+)#(lo|hi),\s*q([0-9]+)#(lo|hi)/o && 363 sprintf "ins v%d.d[%d],v%d.d[%d]",$1,($2 eq "lo")?0:1,$3,($4 eq "lo")?0:1; 364 } 365 foreach(split("\n",$code)) { 366 s/cclr\s+([wx])([^,]+),\s*([a-z]+)/csel $1$2,$1zr,$1$2,$3/o or 367 s/vmov\.i8/movi/o or # fix up legacy mnemonics 368 s/vmov\s+(.*)/unvmov($1)/geo or 369 s/vext\.8/ext/o or 370 s/vshr\.s/sshr\.s/o or 371 s/vshr/ushr/o or 372 s/^(\s+)v/$1/o or # strip off v prefix 373 s/\bbx\s+lr\b/ret/o; 374 375 s/\bq([0-9]+)\b/"v".($1<8?$1:$1+8).".16b"/geo; # old->new registers 376 s/@\s/\/\//o; # old->new style commentary 377 378 # fix up remaining legacy suffixes 379 s/\.[ui]?8(\s)/$1/o; 380 s/\.[uis]?32//o and s/\.16b/\.4s/go; 381 m/\.p64/o and s/\.16b/\.1q/o; # 1st pmull argument 382 m/l\.p64/o and s/\.16b/\.1d/go; # 2nd and 3rd pmull arguments 383 s/\.[uisp]?64//o and s/\.16b/\.2d/go; 384 s/\.[42]([sd])\[([0-3])\]/\.$1\[$2\]/o; 385 386 print $_,"\n"; 387 } 388} else { ######## 32-bit code 389 sub unvdup32 { 390 my $arg=shift; 391 392 $arg =~ m/q([0-9]+),\s*q([0-9]+)\[([0-3])\]/o && 393 sprintf "vdup.32 q%d,d%d[%d]",$1,2*$2+($3>>1),$3&1; 394 } 395 sub unvpmullp64 { 396 my ($mnemonic,$arg)=@_; 397 398 if ($arg =~ m/q([0-9]+),\s*q([0-9]+),\s*q([0-9]+)/o) { 399 my $word = 0xf2a00e00|(($1&7)<<13)|(($1&8)<<19) 400 |(($2&7)<<17)|(($2&8)<<4) 401 |(($3&7)<<1) |(($3&8)<<2); 402 $word |= 0x00010001 if ($mnemonic =~ "2"); 403 # since ARMv7 instructions are always encoded little-endian. 404 # correct solution is to use .inst directive, but older 405 # assemblers don't implement it:-( 406 sprintf ".byte\t0x%02x,0x%02x,0x%02x,0x%02x\t@ %s %s", 407 $word&0xff,($word>>8)&0xff, 408 ($word>>16)&0xff,($word>>24)&0xff, 409 $mnemonic,$arg; 410 } 411 } 412 413 foreach(split("\n",$code)) { 414 s/\b[wx]([0-9]+)\b/r$1/go; # new->old registers 415 s/\bv([0-9])\.[12468]+[bsd]\b/q$1/go; # new->old registers 416 s/\/\/\s?/@ /o; # new->old style commentary 417 418 # fix up remaining new-style suffixes 419 s/\],#[0-9]+/]!/o; 420 421 s/cclr\s+([^,]+),\s*([a-z]+)/mov$2 $1,#0/o or 422 s/vdup\.32\s+(.*)/unvdup32($1)/geo or 423 s/v?(pmull2?)\.p64\s+(.*)/unvpmullp64($1,$2)/geo or 424 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or 425 s/^(\s+)b\./$1b/o or 426 s/^(\s+)ret/$1bx\tlr/o; 427 428 print $_,"\n"; 429 } 430} 431 432close STDOUT or die "error closing STDOUT"; # enforce flush 433