1*ebfedea0SLionel Sambuc#!/usr/bin/env perl
2*ebfedea0SLionel Sambuc#
3*ebfedea0SLionel Sambuc# ====================================================================
4*ebfedea0SLionel Sambuc# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
5*ebfedea0SLionel Sambuc# project. The module is, however, dual licensed under OpenSSL and
6*ebfedea0SLionel Sambuc# CRYPTOGAMS licenses depending on where you obtain it. For further
7*ebfedea0SLionel Sambuc# details see http://www.openssl.org/~appro/cryptogams/.
8*ebfedea0SLionel Sambuc# ====================================================================
9*ebfedea0SLionel Sambuc#
10*ebfedea0SLionel Sambuc# May 2011
11*ebfedea0SLionel Sambuc#
12*ebfedea0SLionel Sambuc# The module implements bn_GF2m_mul_2x2 polynomial multiplication
13*ebfedea0SLionel Sambuc# used in bn_gf2m.c. It's kind of low-hanging mechanical port from
14*ebfedea0SLionel Sambuc# C for the time being... Except that it has two code paths: pure
15*ebfedea0SLionel Sambuc# integer code suitable for any ARMv4 and later CPU and NEON code
16*ebfedea0SLionel Sambuc# suitable for ARMv7. Pure integer 1x1 multiplication subroutine runs
17*ebfedea0SLionel Sambuc# in ~45 cycles on dual-issue core such as Cortex A8, which is ~50%
18*ebfedea0SLionel Sambuc# faster than compiler-generated code. For ECDH and ECDSA verify (but
19*ebfedea0SLionel Sambuc# not for ECDSA sign) it means 25%-45% improvement depending on key
20*ebfedea0SLionel Sambuc# length, more for longer keys. Even though NEON 1x1 multiplication
21*ebfedea0SLionel Sambuc# runs in even less cycles, ~30, improvement is measurable only on
22*ebfedea0SLionel Sambuc# longer keys. One has to optimize code elsewhere to get NEON glow...
23*ebfedea0SLionel Sambuc
24*ebfedea0SLionel Sambucwhile (($output=shift) && ($output!~/^\w[\w\-]*\.\w+$/)) {}
25*ebfedea0SLionel Sambucopen STDOUT,">$output";
26*ebfedea0SLionel Sambuc
27*ebfedea0SLionel Sambucsub Dlo()   { shift=~m|q([1]?[0-9])|?"d".($1*2):"";     }
28*ebfedea0SLionel Sambucsub Dhi()   { shift=~m|q([1]?[0-9])|?"d".($1*2+1):"";   }
29*ebfedea0SLionel Sambucsub Q()     { shift=~m|d([1-3]?[02468])|?"q".($1/2):""; }
30*ebfedea0SLionel Sambuc
31*ebfedea0SLionel Sambuc$code=<<___;
32*ebfedea0SLionel Sambuc#include "arm_arch.h"
33*ebfedea0SLionel Sambuc
34*ebfedea0SLionel Sambuc.text
35*ebfedea0SLionel Sambuc.code	32
36*ebfedea0SLionel Sambuc
37*ebfedea0SLionel Sambuc#if __ARM_ARCH__>=7
38*ebfedea0SLionel Sambuc.fpu	neon
39*ebfedea0SLionel Sambuc
40*ebfedea0SLionel Sambuc.type	mul_1x1_neon,%function
41*ebfedea0SLionel Sambuc.align	5
42*ebfedea0SLionel Sambucmul_1x1_neon:
43*ebfedea0SLionel Sambuc	vshl.u64	`&Dlo("q1")`,d16,#8	@ q1-q3 are slided $a
44*ebfedea0SLionel Sambuc	vmull.p8	`&Q("d0")`,d16,d17	@ a�bb
45*ebfedea0SLionel Sambuc	vshl.u64	`&Dlo("q2")`,d16,#16
46*ebfedea0SLionel Sambuc	vmull.p8	q1,`&Dlo("q1")`,d17	@ a<<8�bb
47*ebfedea0SLionel Sambuc	vshl.u64	`&Dlo("q3")`,d16,#24
48*ebfedea0SLionel Sambuc	vmull.p8	q2,`&Dlo("q2")`,d17	@ a<<16�bb
49*ebfedea0SLionel Sambuc	vshr.u64	`&Dlo("q1")`,#8
50*ebfedea0SLionel Sambuc	vmull.p8	q3,`&Dlo("q3")`,d17	@ a<<24�bb
51*ebfedea0SLionel Sambuc	vshl.u64	`&Dhi("q1")`,#24
52*ebfedea0SLionel Sambuc	veor		d0,`&Dlo("q1")`
53*ebfedea0SLionel Sambuc	vshr.u64	`&Dlo("q2")`,#16
54*ebfedea0SLionel Sambuc	veor		d0,`&Dhi("q1")`
55*ebfedea0SLionel Sambuc	vshl.u64	`&Dhi("q2")`,#16
56*ebfedea0SLionel Sambuc	veor		d0,`&Dlo("q2")`
57*ebfedea0SLionel Sambuc	vshr.u64	`&Dlo("q3")`,#24
58*ebfedea0SLionel Sambuc	veor		d0,`&Dhi("q2")`
59*ebfedea0SLionel Sambuc	vshl.u64	`&Dhi("q3")`,#8
60*ebfedea0SLionel Sambuc	veor		d0,`&Dlo("q3")`
61*ebfedea0SLionel Sambuc	veor		d0,`&Dhi("q3")`
62*ebfedea0SLionel Sambuc	bx	lr
63*ebfedea0SLionel Sambuc.size	mul_1x1_neon,.-mul_1x1_neon
64*ebfedea0SLionel Sambuc#endif
65*ebfedea0SLionel Sambuc___
66*ebfedea0SLionel Sambuc################
67*ebfedea0SLionel Sambuc# private interface to mul_1x1_ialu
68*ebfedea0SLionel Sambuc#
69*ebfedea0SLionel Sambuc$a="r1";
70*ebfedea0SLionel Sambuc$b="r0";
71*ebfedea0SLionel Sambuc
72*ebfedea0SLionel Sambuc($a0,$a1,$a2,$a12,$a4,$a14)=
73*ebfedea0SLionel Sambuc($hi,$lo,$t0,$t1, $i0,$i1 )=map("r$_",(4..9),12);
74*ebfedea0SLionel Sambuc
75*ebfedea0SLionel Sambuc$mask="r12";
76*ebfedea0SLionel Sambuc
77*ebfedea0SLionel Sambuc$code.=<<___;
78*ebfedea0SLionel Sambuc.type	mul_1x1_ialu,%function
79*ebfedea0SLionel Sambuc.align	5
80*ebfedea0SLionel Sambucmul_1x1_ialu:
81*ebfedea0SLionel Sambuc	mov	$a0,#0
82*ebfedea0SLionel Sambuc	bic	$a1,$a,#3<<30		@ a1=a&0x3fffffff
83*ebfedea0SLionel Sambuc	str	$a0,[sp,#0]		@ tab[0]=0
84*ebfedea0SLionel Sambuc	add	$a2,$a1,$a1		@ a2=a1<<1
85*ebfedea0SLionel Sambuc	str	$a1,[sp,#4]		@ tab[1]=a1
86*ebfedea0SLionel Sambuc	eor	$a12,$a1,$a2		@ a1^a2
87*ebfedea0SLionel Sambuc	str	$a2,[sp,#8]		@ tab[2]=a2
88*ebfedea0SLionel Sambuc	mov	$a4,$a1,lsl#2		@ a4=a1<<2
89*ebfedea0SLionel Sambuc	str	$a12,[sp,#12]		@ tab[3]=a1^a2
90*ebfedea0SLionel Sambuc	eor	$a14,$a1,$a4		@ a1^a4
91*ebfedea0SLionel Sambuc	str	$a4,[sp,#16]		@ tab[4]=a4
92*ebfedea0SLionel Sambuc	eor	$a0,$a2,$a4		@ a2^a4
93*ebfedea0SLionel Sambuc	str	$a14,[sp,#20]		@ tab[5]=a1^a4
94*ebfedea0SLionel Sambuc	eor	$a12,$a12,$a4		@ a1^a2^a4
95*ebfedea0SLionel Sambuc	str	$a0,[sp,#24]		@ tab[6]=a2^a4
96*ebfedea0SLionel Sambuc	and	$i0,$mask,$b,lsl#2
97*ebfedea0SLionel Sambuc	str	$a12,[sp,#28]		@ tab[7]=a1^a2^a4
98*ebfedea0SLionel Sambuc
99*ebfedea0SLionel Sambuc	and	$i1,$mask,$b,lsr#1
100*ebfedea0SLionel Sambuc	ldr	$lo,[sp,$i0]		@ tab[b       & 0x7]
101*ebfedea0SLionel Sambuc	and	$i0,$mask,$b,lsr#4
102*ebfedea0SLionel Sambuc	ldr	$t1,[sp,$i1]		@ tab[b >>  3 & 0x7]
103*ebfedea0SLionel Sambuc	and	$i1,$mask,$b,lsr#7
104*ebfedea0SLionel Sambuc	ldr	$t0,[sp,$i0]		@ tab[b >>  6 & 0x7]
105*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t1,lsl#3	@ stall
106*ebfedea0SLionel Sambuc	mov	$hi,$t1,lsr#29
107*ebfedea0SLionel Sambuc	ldr	$t1,[sp,$i1]		@ tab[b >>  9 & 0x7]
108*ebfedea0SLionel Sambuc
109*ebfedea0SLionel Sambuc	and	$i0,$mask,$b,lsr#10
110*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t0,lsl#6
111*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t0,lsr#26
112*ebfedea0SLionel Sambuc	ldr	$t0,[sp,$i0]		@ tab[b >> 12 & 0x7]
113*ebfedea0SLionel Sambuc
114*ebfedea0SLionel Sambuc	and	$i1,$mask,$b,lsr#13
115*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t1,lsl#9
116*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t1,lsr#23
117*ebfedea0SLionel Sambuc	ldr	$t1,[sp,$i1]		@ tab[b >> 15 & 0x7]
118*ebfedea0SLionel Sambuc
119*ebfedea0SLionel Sambuc	and	$i0,$mask,$b,lsr#16
120*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t0,lsl#12
121*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t0,lsr#20
122*ebfedea0SLionel Sambuc	ldr	$t0,[sp,$i0]		@ tab[b >> 18 & 0x7]
123*ebfedea0SLionel Sambuc
124*ebfedea0SLionel Sambuc	and	$i1,$mask,$b,lsr#19
125*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t1,lsl#15
126*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t1,lsr#17
127*ebfedea0SLionel Sambuc	ldr	$t1,[sp,$i1]		@ tab[b >> 21 & 0x7]
128*ebfedea0SLionel Sambuc
129*ebfedea0SLionel Sambuc	and	$i0,$mask,$b,lsr#22
130*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t0,lsl#18
131*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t0,lsr#14
132*ebfedea0SLionel Sambuc	ldr	$t0,[sp,$i0]		@ tab[b >> 24 & 0x7]
133*ebfedea0SLionel Sambuc
134*ebfedea0SLionel Sambuc	and	$i1,$mask,$b,lsr#25
135*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t1,lsl#21
136*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t1,lsr#11
137*ebfedea0SLionel Sambuc	ldr	$t1,[sp,$i1]		@ tab[b >> 27 & 0x7]
138*ebfedea0SLionel Sambuc
139*ebfedea0SLionel Sambuc	tst	$a,#1<<30
140*ebfedea0SLionel Sambuc	and	$i0,$mask,$b,lsr#28
141*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t0,lsl#24
142*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t0,lsr#8
143*ebfedea0SLionel Sambuc	ldr	$t0,[sp,$i0]		@ tab[b >> 30      ]
144*ebfedea0SLionel Sambuc
145*ebfedea0SLionel Sambuc	eorne	$lo,$lo,$b,lsl#30
146*ebfedea0SLionel Sambuc	eorne	$hi,$hi,$b,lsr#2
147*ebfedea0SLionel Sambuc	tst	$a,#1<<31
148*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t1,lsl#27
149*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t1,lsr#5
150*ebfedea0SLionel Sambuc	eorne	$lo,$lo,$b,lsl#31
151*ebfedea0SLionel Sambuc	eorne	$hi,$hi,$b,lsr#1
152*ebfedea0SLionel Sambuc	eor	$lo,$lo,$t0,lsl#30
153*ebfedea0SLionel Sambuc	eor	$hi,$hi,$t0,lsr#2
154*ebfedea0SLionel Sambuc
155*ebfedea0SLionel Sambuc	mov	pc,lr
156*ebfedea0SLionel Sambuc.size	mul_1x1_ialu,.-mul_1x1_ialu
157*ebfedea0SLionel Sambuc___
158*ebfedea0SLionel Sambuc################
159*ebfedea0SLionel Sambuc# void	bn_GF2m_mul_2x2(BN_ULONG *r,
160*ebfedea0SLionel Sambuc#	BN_ULONG a1,BN_ULONG a0,
161*ebfedea0SLionel Sambuc#	BN_ULONG b1,BN_ULONG b0);	# r[3..0]=a1a0�b1b0
162*ebfedea0SLionel Sambuc
163*ebfedea0SLionel Sambuc($A1,$B1,$A0,$B0,$A1B1,$A0B0)=map("d$_",(18..23));
164*ebfedea0SLionel Sambuc
165*ebfedea0SLionel Sambuc$code.=<<___;
166*ebfedea0SLionel Sambuc.global	bn_GF2m_mul_2x2
167*ebfedea0SLionel Sambuc.type	bn_GF2m_mul_2x2,%function
168*ebfedea0SLionel Sambuc.align	5
169*ebfedea0SLionel Sambucbn_GF2m_mul_2x2:
170*ebfedea0SLionel Sambuc#if __ARM_ARCH__>=7
171*ebfedea0SLionel Sambuc	ldr	r12,.LOPENSSL_armcap
172*ebfedea0SLionel Sambuc.Lpic:	ldr	r12,[pc,r12]
173*ebfedea0SLionel Sambuc	tst	r12,#1
174*ebfedea0SLionel Sambuc	beq	.Lialu
175*ebfedea0SLionel Sambuc
176*ebfedea0SLionel Sambuc	veor	$A1,$A1
177*ebfedea0SLionel Sambuc	vmov.32	$B1,r3,r3		@ two copies of b1
178*ebfedea0SLionel Sambuc	vmov.32	${A1}[0],r1		@ a1
179*ebfedea0SLionel Sambuc
180*ebfedea0SLionel Sambuc	veor	$A0,$A0
181*ebfedea0SLionel Sambuc	vld1.32	${B0}[],[sp,:32]	@ two copies of b0
182*ebfedea0SLionel Sambuc	vmov.32	${A0}[0],r2		@ a0
183*ebfedea0SLionel Sambuc	mov	r12,lr
184*ebfedea0SLionel Sambuc
185*ebfedea0SLionel Sambuc	vmov	d16,$A1
186*ebfedea0SLionel Sambuc	vmov	d17,$B1
187*ebfedea0SLionel Sambuc	bl	mul_1x1_neon		@ a1�b1
188*ebfedea0SLionel Sambuc	vmov	$A1B1,d0
189*ebfedea0SLionel Sambuc
190*ebfedea0SLionel Sambuc	vmov	d16,$A0
191*ebfedea0SLionel Sambuc	vmov	d17,$B0
192*ebfedea0SLionel Sambuc	bl	mul_1x1_neon		@ a0�b0
193*ebfedea0SLionel Sambuc	vmov	$A0B0,d0
194*ebfedea0SLionel Sambuc
195*ebfedea0SLionel Sambuc	veor	d16,$A0,$A1
196*ebfedea0SLionel Sambuc	veor	d17,$B0,$B1
197*ebfedea0SLionel Sambuc	veor	$A0,$A0B0,$A1B1
198*ebfedea0SLionel Sambuc	bl	mul_1x1_neon		@ (a0+a1)�(b0+b1)
199*ebfedea0SLionel Sambuc
200*ebfedea0SLionel Sambuc	veor	d0,$A0			@ (a0+a1)�(b0+b1)-a0�b0-a1�b1
201*ebfedea0SLionel Sambuc	vshl.u64 d1,d0,#32
202*ebfedea0SLionel Sambuc	vshr.u64 d0,d0,#32
203*ebfedea0SLionel Sambuc	veor	$A0B0,d1
204*ebfedea0SLionel Sambuc	veor	$A1B1,d0
205*ebfedea0SLionel Sambuc	vst1.32	{${A0B0}[0]},[r0,:32]!
206*ebfedea0SLionel Sambuc	vst1.32	{${A0B0}[1]},[r0,:32]!
207*ebfedea0SLionel Sambuc	vst1.32	{${A1B1}[0]},[r0,:32]!
208*ebfedea0SLionel Sambuc	vst1.32	{${A1B1}[1]},[r0,:32]
209*ebfedea0SLionel Sambuc	bx	r12
210*ebfedea0SLionel Sambuc.align	4
211*ebfedea0SLionel Sambuc.Lialu:
212*ebfedea0SLionel Sambuc#endif
213*ebfedea0SLionel Sambuc___
214*ebfedea0SLionel Sambuc$ret="r10";	# reassigned 1st argument
215*ebfedea0SLionel Sambuc$code.=<<___;
216*ebfedea0SLionel Sambuc	stmdb	sp!,{r4-r10,lr}
217*ebfedea0SLionel Sambuc	mov	$ret,r0			@ reassign 1st argument
218*ebfedea0SLionel Sambuc	mov	$b,r3			@ $b=b1
219*ebfedea0SLionel Sambuc	ldr	r3,[sp,#32]		@ load b0
220*ebfedea0SLionel Sambuc	mov	$mask,#7<<2
221*ebfedea0SLionel Sambuc	sub	sp,sp,#32		@ allocate tab[8]
222*ebfedea0SLionel Sambuc
223*ebfedea0SLionel Sambuc	bl	mul_1x1_ialu		@ a1�b1
224*ebfedea0SLionel Sambuc	str	$lo,[$ret,#8]
225*ebfedea0SLionel Sambuc	str	$hi,[$ret,#12]
226*ebfedea0SLionel Sambuc
227*ebfedea0SLionel Sambuc	eor	$b,$b,r3		@ flip b0 and b1
228*ebfedea0SLionel Sambuc	 eor	$a,$a,r2		@ flip a0 and a1
229*ebfedea0SLionel Sambuc	eor	r3,r3,$b
230*ebfedea0SLionel Sambuc	 eor	r2,r2,$a
231*ebfedea0SLionel Sambuc	eor	$b,$b,r3
232*ebfedea0SLionel Sambuc	 eor	$a,$a,r2
233*ebfedea0SLionel Sambuc	bl	mul_1x1_ialu		@ a0�b0
234*ebfedea0SLionel Sambuc	str	$lo,[$ret]
235*ebfedea0SLionel Sambuc	str	$hi,[$ret,#4]
236*ebfedea0SLionel Sambuc
237*ebfedea0SLionel Sambuc	eor	$a,$a,r2
238*ebfedea0SLionel Sambuc	eor	$b,$b,r3
239*ebfedea0SLionel Sambuc	bl	mul_1x1_ialu		@ (a1+a0)�(b1+b0)
240*ebfedea0SLionel Sambuc___
241*ebfedea0SLionel Sambuc@r=map("r$_",(6..9));
242*ebfedea0SLionel Sambuc$code.=<<___;
243*ebfedea0SLionel Sambuc	ldmia	$ret,{@r[0]-@r[3]}
244*ebfedea0SLionel Sambuc	eor	$lo,$lo,$hi
245*ebfedea0SLionel Sambuc	eor	$hi,$hi,@r[1]
246*ebfedea0SLionel Sambuc	eor	$lo,$lo,@r[0]
247*ebfedea0SLionel Sambuc	eor	$hi,$hi,@r[2]
248*ebfedea0SLionel Sambuc	eor	$lo,$lo,@r[3]
249*ebfedea0SLionel Sambuc	eor	$hi,$hi,@r[3]
250*ebfedea0SLionel Sambuc	str	$hi,[$ret,#8]
251*ebfedea0SLionel Sambuc	eor	$lo,$lo,$hi
252*ebfedea0SLionel Sambuc	add	sp,sp,#32		@ destroy tab[8]
253*ebfedea0SLionel Sambuc	str	$lo,[$ret,#4]
254*ebfedea0SLionel Sambuc
255*ebfedea0SLionel Sambuc#if __ARM_ARCH__>=5
256*ebfedea0SLionel Sambuc	ldmia	sp!,{r4-r10,pc}
257*ebfedea0SLionel Sambuc#else
258*ebfedea0SLionel Sambuc	ldmia	sp!,{r4-r10,lr}
259*ebfedea0SLionel Sambuc	tst	lr,#1
260*ebfedea0SLionel Sambuc	moveq	pc,lr			@ be binary compatible with V4, yet
261*ebfedea0SLionel Sambuc	bx	lr			@ interoperable with Thumb ISA:-)
262*ebfedea0SLionel Sambuc#endif
263*ebfedea0SLionel Sambuc.size	bn_GF2m_mul_2x2,.-bn_GF2m_mul_2x2
264*ebfedea0SLionel Sambuc#if __ARM_ARCH__>=7
265*ebfedea0SLionel Sambuc.align	5
266*ebfedea0SLionel Sambuc.LOPENSSL_armcap:
267*ebfedea0SLionel Sambuc.word	OPENSSL_armcap_P-(.Lpic+8)
268*ebfedea0SLionel Sambuc#endif
269*ebfedea0SLionel Sambuc.asciz	"GF(2^m) Multiplication for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
270*ebfedea0SLionel Sambuc.align	5
271*ebfedea0SLionel Sambuc
272*ebfedea0SLionel Sambuc.comm	OPENSSL_armcap_P,4,4
273*ebfedea0SLionel Sambuc___
274*ebfedea0SLionel Sambuc
275*ebfedea0SLionel Sambuc$code =~ s/\`([^\`]*)\`/eval $1/gem;
276*ebfedea0SLionel Sambuc$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;    # make it possible to compile with -march=armv4
277*ebfedea0SLionel Sambucprint $code;
278*ebfedea0SLionel Sambucclose STDOUT;   # enforce flush
279