1#!/usr/bin/env perl 2# 3# ==================================================================== 4# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL 5# project. The module is, however, dual licensed under OpenSSL and 6# CRYPTOGAMS licenses depending on where you obtain it. For further 7# details see http://www.openssl.org/~appro/cryptogams/. 8# ==================================================================== 9# 10# April 2010 11# 12# The module implements "4-bit" GCM GHASH function and underlying 13# single multiplication operation in GF(2^128). "4-bit" means that it 14# uses 256 bytes per-key table [+32 bytes shared table]. There is no 15# experimental performance data available yet. The only approximation 16# that can be made at this point is based on code size. Inner loop is 17# 32 instructions long and on single-issue core should execute in <40 18# cycles. Having verified that gcc 3.4 didn't unroll corresponding 19# loop, this assembler loop body was found to be ~3x smaller than 20# compiler-generated one... 21# 22# July 2010 23# 24# Rescheduling for dual-issue pipeline resulted in 8.5% improvement on 25# Cortex A8 core and ~25 cycles per processed byte (which was observed 26# to be ~3 times faster than gcc-generated code:-) 27# 28# February 2011 29# 30# Profiler-assisted and platform-specific optimization resulted in 7% 31# improvement on Cortex A8 core and ~23.5 cycles per byte. 32# 33# March 2011 34# 35# Add NEON implementation featuring polynomial multiplication, i.e. no 36# lookup tables involved. On Cortex A8 it was measured to process one 37# byte in 15 cycles or 55% faster than integer-only code. 38# 39# April 2014 40# 41# Switch to multiplication algorithm suggested in paper referred 42# below and combine it with reduction algorithm from x86 module. 43# Performance improvement over previous version varies from 65% on 44# Snapdragon S4 to 110% on Cortex A9. In absolute terms Cortex A8 45# processes one byte in 8.45 cycles, A9 - in 10.2, Snapdragon S4 - 46# in 9.33. 47# 48# Câmara, D.; Gouvêa, C. P. L.; López, J. & Dahab, R.: Fast Software 49# Polynomial Multiplication on ARM Processors using the NEON Engine. 50# 51# http://conradoplg.cryptoland.net/files/2010/12/mocrysen13.pdf 52 53# ==================================================================== 54# Note about "528B" variant. In ARM case it makes lesser sense to 55# implement it for following reasons: 56# 57# - performance improvement won't be anywhere near 50%, because 128- 58# bit shift operation is neatly fused with 128-bit xor here, and 59# "538B" variant would eliminate only 4-5 instructions out of 32 60# in the inner loop (meaning that estimated improvement is ~15%); 61# - ARM-based systems are often embedded ones and extra memory 62# consumption might be unappreciated (for so little improvement); 63# 64# Byte order [in]dependence. ========================================= 65# 66# Caller is expected to maintain specific *dword* order in Htable, 67# namely with *least* significant dword of 128-bit value at *lower* 68# address. This differs completely from C code and has everything to 69# do with ldm instruction and order in which dwords are "consumed" by 70# algorithm. *Byte* order within these dwords in turn is whatever 71# *native* byte order on current platform. See gcm128.c for working 72# example... 73 74while (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {} 75open STDOUT,">$output"; 76 77$Xi="r0"; # argument block 78$Htbl="r1"; 79$inp="r2"; 80$len="r3"; 81 82$Zll="r4"; # variables 83$Zlh="r5"; 84$Zhl="r6"; 85$Zhh="r7"; 86$Tll="r8"; 87$Tlh="r9"; 88$Thl="r10"; 89$Thh="r11"; 90$nlo="r12"; 91################# r13 is stack pointer 92$nhi="r14"; 93################# r15 is program counter 94 95$rem_4bit=$inp; # used in gcm_gmult_4bit 96$cnt=$len; 97 98sub Zsmash() { 99 my $i=12; 100 my @args=@_; 101 for ($Zll,$Zlh,$Zhl,$Zhh) { 102 $code.=<<___; 103#if __ARM_ARCH__>=7 && defined(__ARMEL__) 104 rev $_,$_ 105 str $_,[$Xi,#$i] 106#elif defined(__ARMEB__) 107 str $_,[$Xi,#$i] 108#else 109 mov $Tlh,$_,lsr#8 110 strb $_,[$Xi,#$i+3] 111 mov $Thl,$_,lsr#16 112 strb $Tlh,[$Xi,#$i+2] 113 mov $Thh,$_,lsr#24 114 strb $Thl,[$Xi,#$i+1] 115 strb $Thh,[$Xi,#$i] 116#endif 117___ 118 $code.="\t".shift(@args)."\n"; 119 $i-=4; 120 } 121} 122 123$code=<<___; 124#include "arm_arch.h" 125 126.text 127.code 32 128 129#ifdef __clang__ 130#define ldrplb ldrbpl 131#define ldrneb ldrbne 132#endif 133 134.type rem_4bit,%object 135.align 5 136rem_4bit: 137.short 0x0000,0x1C20,0x3840,0x2460 138.short 0x7080,0x6CA0,0x48C0,0x54E0 139.short 0xE100,0xFD20,0xD940,0xC560 140.short 0x9180,0x8DA0,0xA9C0,0xB5E0 141.size rem_4bit,.-rem_4bit 142 143.type rem_4bit_get,%function 144rem_4bit_get: 145 sub $rem_4bit,pc,#8 146 sub $rem_4bit,$rem_4bit,#32 @ &rem_4bit 147 b .Lrem_4bit_got 148 nop 149.size rem_4bit_get,.-rem_4bit_get 150 151.global gcm_ghash_4bit 152.type gcm_ghash_4bit,%function 153gcm_ghash_4bit: 154 sub r12,pc,#8 155 add $len,$inp,$len @ $len to point at the end 156 stmdb sp!,{r3-r11,lr} @ save $len/end too 157 sub r12,r12,#48 @ &rem_4bit 158 159 ldmia r12,{r4-r11} @ copy rem_4bit ... 160 stmdb sp!,{r4-r11} @ ... to stack 161 162 ldrb $nlo,[$inp,#15] 163 ldrb $nhi,[$Xi,#15] 164.Louter: 165 eor $nlo,$nlo,$nhi 166 and $nhi,$nlo,#0xf0 167 and $nlo,$nlo,#0x0f 168 mov $cnt,#14 169 170 add $Zhh,$Htbl,$nlo,lsl#4 171 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo] 172 add $Thh,$Htbl,$nhi 173 ldrb $nlo,[$inp,#14] 174 175 and $nhi,$Zll,#0xf @ rem 176 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 177 add $nhi,$nhi,$nhi 178 eor $Zll,$Tll,$Zll,lsr#4 179 ldrh $Tll,[sp,$nhi] @ rem_4bit[rem] 180 eor $Zll,$Zll,$Zlh,lsl#28 181 ldrb $nhi,[$Xi,#14] 182 eor $Zlh,$Tlh,$Zlh,lsr#4 183 eor $Zlh,$Zlh,$Zhl,lsl#28 184 eor $Zhl,$Thl,$Zhl,lsr#4 185 eor $Zhl,$Zhl,$Zhh,lsl#28 186 eor $Zhh,$Thh,$Zhh,lsr#4 187 eor $nlo,$nlo,$nhi 188 and $nhi,$nlo,#0xf0 189 and $nlo,$nlo,#0x0f 190 eor $Zhh,$Zhh,$Tll,lsl#16 191 192.Linner: 193 add $Thh,$Htbl,$nlo,lsl#4 194 and $nlo,$Zll,#0xf @ rem 195 subs $cnt,$cnt,#1 196 add $nlo,$nlo,$nlo 197 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo] 198 eor $Zll,$Tll,$Zll,lsr#4 199 eor $Zll,$Zll,$Zlh,lsl#28 200 eor $Zlh,$Tlh,$Zlh,lsr#4 201 eor $Zlh,$Zlh,$Zhl,lsl#28 202 ldrh $Tll,[sp,$nlo] @ rem_4bit[rem] 203 eor $Zhl,$Thl,$Zhl,lsr#4 204 ldrplb $nlo,[$inp,$cnt] 205 eor $Zhl,$Zhl,$Zhh,lsl#28 206 eor $Zhh,$Thh,$Zhh,lsr#4 207 208 add $Thh,$Htbl,$nhi 209 and $nhi,$Zll,#0xf @ rem 210 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] 211 add $nhi,$nhi,$nhi 212 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 213 eor $Zll,$Tll,$Zll,lsr#4 214 ldrplb $Tll,[$Xi,$cnt] 215 eor $Zll,$Zll,$Zlh,lsl#28 216 eor $Zlh,$Tlh,$Zlh,lsr#4 217 ldrh $Tlh,[sp,$nhi] 218 eor $Zlh,$Zlh,$Zhl,lsl#28 219 eor $Zhl,$Thl,$Zhl,lsr#4 220 eor $Zhl,$Zhl,$Zhh,lsl#28 221 eorpl $nlo,$nlo,$Tll 222 eor $Zhh,$Thh,$Zhh,lsr#4 223 andpl $nhi,$nlo,#0xf0 224 andpl $nlo,$nlo,#0x0f 225 eor $Zhh,$Zhh,$Tlh,lsl#16 @ ^= rem_4bit[rem] 226 bpl .Linner 227 228 ldr $len,[sp,#32] @ re-load $len/end 229 add $inp,$inp,#16 230 mov $nhi,$Zll 231___ 232 &Zsmash("cmp\t$inp,$len","ldrneb\t$nlo,[$inp,#15]"); 233$code.=<<___; 234 bne .Louter 235 236 add sp,sp,#36 237#if __ARM_ARCH__>=5 238 ldmia sp!,{r4-r11,pc} 239#else 240 ldmia sp!,{r4-r11,lr} 241 tst lr,#1 242 moveq pc,lr @ be binary compatible with V4, yet 243 bx lr @ interoperable with Thumb ISA:-) 244#endif 245.size gcm_ghash_4bit,.-gcm_ghash_4bit 246 247.global gcm_gmult_4bit 248.type gcm_gmult_4bit,%function 249gcm_gmult_4bit: 250 stmdb sp!,{r4-r11,lr} 251 ldrb $nlo,[$Xi,#15] 252 b rem_4bit_get 253.Lrem_4bit_got: 254 and $nhi,$nlo,#0xf0 255 and $nlo,$nlo,#0x0f 256 mov $cnt,#14 257 258 add $Zhh,$Htbl,$nlo,lsl#4 259 ldmia $Zhh,{$Zll-$Zhh} @ load Htbl[nlo] 260 ldrb $nlo,[$Xi,#14] 261 262 add $Thh,$Htbl,$nhi 263 and $nhi,$Zll,#0xf @ rem 264 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 265 add $nhi,$nhi,$nhi 266 eor $Zll,$Tll,$Zll,lsr#4 267 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem] 268 eor $Zll,$Zll,$Zlh,lsl#28 269 eor $Zlh,$Tlh,$Zlh,lsr#4 270 eor $Zlh,$Zlh,$Zhl,lsl#28 271 eor $Zhl,$Thl,$Zhl,lsr#4 272 eor $Zhl,$Zhl,$Zhh,lsl#28 273 eor $Zhh,$Thh,$Zhh,lsr#4 274 and $nhi,$nlo,#0xf0 275 eor $Zhh,$Zhh,$Tll,lsl#16 276 and $nlo,$nlo,#0x0f 277 278.Loop: 279 add $Thh,$Htbl,$nlo,lsl#4 280 and $nlo,$Zll,#0xf @ rem 281 subs $cnt,$cnt,#1 282 add $nlo,$nlo,$nlo 283 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nlo] 284 eor $Zll,$Tll,$Zll,lsr#4 285 eor $Zll,$Zll,$Zlh,lsl#28 286 eor $Zlh,$Tlh,$Zlh,lsr#4 287 eor $Zlh,$Zlh,$Zhl,lsl#28 288 ldrh $Tll,[$rem_4bit,$nlo] @ rem_4bit[rem] 289 eor $Zhl,$Thl,$Zhl,lsr#4 290 ldrplb $nlo,[$Xi,$cnt] 291 eor $Zhl,$Zhl,$Zhh,lsl#28 292 eor $Zhh,$Thh,$Zhh,lsr#4 293 294 add $Thh,$Htbl,$nhi 295 and $nhi,$Zll,#0xf @ rem 296 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] 297 add $nhi,$nhi,$nhi 298 ldmia $Thh,{$Tll-$Thh} @ load Htbl[nhi] 299 eor $Zll,$Tll,$Zll,lsr#4 300 eor $Zll,$Zll,$Zlh,lsl#28 301 eor $Zlh,$Tlh,$Zlh,lsr#4 302 ldrh $Tll,[$rem_4bit,$nhi] @ rem_4bit[rem] 303 eor $Zlh,$Zlh,$Zhl,lsl#28 304 eor $Zhl,$Thl,$Zhl,lsr#4 305 eor $Zhl,$Zhl,$Zhh,lsl#28 306 eor $Zhh,$Thh,$Zhh,lsr#4 307 andpl $nhi,$nlo,#0xf0 308 andpl $nlo,$nlo,#0x0f 309 eor $Zhh,$Zhh,$Tll,lsl#16 @ ^= rem_4bit[rem] 310 bpl .Loop 311___ 312 &Zsmash(); 313$code.=<<___; 314#if __ARM_ARCH__>=5 315 ldmia sp!,{r4-r11,pc} 316#else 317 ldmia sp!,{r4-r11,lr} 318 tst lr,#1 319 moveq pc,lr @ be binary compatible with V4, yet 320 bx lr @ interoperable with Thumb ISA:-) 321#endif 322.size gcm_gmult_4bit,.-gcm_gmult_4bit 323___ 324{ 325my ($Xl,$Xm,$Xh,$IN)=map("q$_",(0..3)); 326my ($t0,$t1,$t2,$t3)=map("q$_",(8..12)); 327my ($Hlo,$Hhi,$Hhl,$k48,$k32,$k16)=map("d$_",(26..31)); 328 329sub clmul64x64 { 330my ($r,$a,$b)=@_; 331$code.=<<___; 332 vext.8 $t0#lo, $a, $a, #1 @ A1 333 vmull.p8 $t0, $t0#lo, $b @ F = A1*B 334 vext.8 $r#lo, $b, $b, #1 @ B1 335 vmull.p8 $r, $a, $r#lo @ E = A*B1 336 vext.8 $t1#lo, $a, $a, #2 @ A2 337 vmull.p8 $t1, $t1#lo, $b @ H = A2*B 338 vext.8 $t3#lo, $b, $b, #2 @ B2 339 vmull.p8 $t3, $a, $t3#lo @ G = A*B2 340 vext.8 $t2#lo, $a, $a, #3 @ A3 341 veor $t0, $t0, $r @ L = E + F 342 vmull.p8 $t2, $t2#lo, $b @ J = A3*B 343 vext.8 $r#lo, $b, $b, #3 @ B3 344 veor $t1, $t1, $t3 @ M = G + H 345 vmull.p8 $r, $a, $r#lo @ I = A*B3 346 veor $t0#lo, $t0#lo, $t0#hi @ t0 = (L) (P0 + P1) << 8 347 vand $t0#hi, $t0#hi, $k48 348 vext.8 $t3#lo, $b, $b, #4 @ B4 349 veor $t1#lo, $t1#lo, $t1#hi @ t1 = (M) (P2 + P3) << 16 350 vand $t1#hi, $t1#hi, $k32 351 vmull.p8 $t3, $a, $t3#lo @ K = A*B4 352 veor $t2, $t2, $r @ N = I + J 353 veor $t0#lo, $t0#lo, $t0#hi 354 veor $t1#lo, $t1#lo, $t1#hi 355 veor $t2#lo, $t2#lo, $t2#hi @ t2 = (N) (P4 + P5) << 24 356 vand $t2#hi, $t2#hi, $k16 357 vext.8 $t0, $t0, $t0, #15 358 veor $t3#lo, $t3#lo, $t3#hi @ t3 = (K) (P6 + P7) << 32 359 vmov.i64 $t3#hi, #0 360 vext.8 $t1, $t1, $t1, #14 361 veor $t2#lo, $t2#lo, $t2#hi 362 vmull.p8 $r, $a, $b @ D = A*B 363 vext.8 $t3, $t3, $t3, #12 364 vext.8 $t2, $t2, $t2, #13 365 veor $t0, $t0, $t1 366 veor $t2, $t2, $t3 367 veor $r, $r, $t0 368 veor $r, $r, $t2 369___ 370} 371 372$code.=<<___; 373#if __ARM_MAX_ARCH__>=7 374.arch armv7-a 375.fpu neon 376 377.global gcm_init_neon 378.type gcm_init_neon,%function 379.align 4 380gcm_init_neon: 381 vld1.64 $IN#hi,[r1,:64]! @ load H 382 vmov.i8 $t0,#0xe1 383 vld1.64 $IN#lo,[r1,:64] 384 vshl.i64 $t0#hi,#57 385 vshr.u64 $t0#lo,#63 @ t0=0xc2....01 386 vdup.8 $t1,$IN#hi[7] 387 vshr.u64 $Hlo,$IN#lo,#63 388 vshr.s8 $t1,#7 @ broadcast carry bit 389 vshl.i64 $IN,$IN,#1 390 vand $t0,$t0,$t1 391 vorr $IN#hi,$Hlo @ H<<<=1 392 veor $IN,$IN,$t0 @ twisted H 393 vstmia r0,{$IN} 394 395 ret @ bx lr 396.size gcm_init_neon,.-gcm_init_neon 397 398.global gcm_gmult_neon 399.type gcm_gmult_neon,%function 400.align 4 401gcm_gmult_neon: 402 vld1.64 $IN#hi,[$Xi,:64]! @ load Xi 403 vld1.64 $IN#lo,[$Xi,:64]! 404 vmov.i64 $k48,#0x0000ffffffffffff 405 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H 406 vmov.i64 $k32,#0x00000000ffffffff 407#ifdef __ARMEL__ 408 vrev64.8 $IN,$IN 409#endif 410 vmov.i64 $k16,#0x000000000000ffff 411 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing 412 mov $len,#16 413 b .Lgmult_neon 414.size gcm_gmult_neon,.-gcm_gmult_neon 415 416.global gcm_ghash_neon 417.type gcm_ghash_neon,%function 418.align 4 419gcm_ghash_neon: 420 vld1.64 $Xl#hi,[$Xi,:64]! @ load Xi 421 vld1.64 $Xl#lo,[$Xi,:64]! 422 vmov.i64 $k48,#0x0000ffffffffffff 423 vldmia $Htbl,{$Hlo-$Hhi} @ load twisted H 424 vmov.i64 $k32,#0x00000000ffffffff 425#ifdef __ARMEL__ 426 vrev64.8 $Xl,$Xl 427#endif 428 vmov.i64 $k16,#0x000000000000ffff 429 veor $Hhl,$Hlo,$Hhi @ Karatsuba pre-processing 430 431.Loop_neon: 432 vld1.64 $IN#hi,[$inp]! @ load inp 433 vld1.64 $IN#lo,[$inp]! 434#ifdef __ARMEL__ 435 vrev64.8 $IN,$IN 436#endif 437 veor $IN,$Xl @ inp^=Xi 438.Lgmult_neon: 439___ 440 &clmul64x64 ($Xl,$Hlo,"$IN#lo"); # H.lo·Xi.lo 441$code.=<<___; 442 veor $IN#lo,$IN#lo,$IN#hi @ Karatsuba pre-processing 443___ 444 &clmul64x64 ($Xm,$Hhl,"$IN#lo"); # (H.lo+H.hi)·(Xi.lo+Xi.hi) 445 &clmul64x64 ($Xh,$Hhi,"$IN#hi"); # H.hi·Xi.hi 446$code.=<<___; 447 veor $Xm,$Xm,$Xl @ Karatsuba post-processing 448 veor $Xm,$Xm,$Xh 449 veor $Xl#hi,$Xl#hi,$Xm#lo 450 veor $Xh#lo,$Xh#lo,$Xm#hi @ Xh|Xl - 256-bit result 451 452 @ equivalent of reduction_avx from ghash-x86_64.pl 453 vshl.i64 $t1,$Xl,#57 @ 1st phase 454 vshl.i64 $t2,$Xl,#62 455 veor $t2,$t2,$t1 @ 456 vshl.i64 $t1,$Xl,#63 457 veor $t2, $t2, $t1 @ 458 veor $Xl#hi,$Xl#hi,$t2#lo @ 459 veor $Xh#lo,$Xh#lo,$t2#hi 460 461 vshr.u64 $t2,$Xl,#1 @ 2nd phase 462 veor $Xh,$Xh,$Xl 463 veor $Xl,$Xl,$t2 @ 464 vshr.u64 $t2,$t2,#6 465 vshr.u64 $Xl,$Xl,#1 @ 466 veor $Xl,$Xl,$Xh @ 467 veor $Xl,$Xl,$t2 @ 468 469 subs $len,#16 470 bne .Loop_neon 471 472#ifdef __ARMEL__ 473 vrev64.8 $Xl,$Xl 474#endif 475 sub $Xi,#16 476 vst1.64 $Xl#hi,[$Xi,:64]! @ write out Xi 477 vst1.64 $Xl#lo,[$Xi,:64] 478 479 ret @ bx lr 480.size gcm_ghash_neon,.-gcm_ghash_neon 481#endif 482___ 483} 484$code.=<<___; 485.asciz "GHASH for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>" 486.align 2 487___ 488 489foreach (split("\n",$code)) { 490 s/\`([^\`]*)\`/eval $1/geo; 491 492 s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo or 493 s/\bret\b/bx lr/go or 494 s/\bbx\s+lr\b/.word\t0xe12fff1e/go; # make it possible to compile with -march=armv4 495 496 print $_,"\n"; 497} 498close STDOUT; # enforce flush 499