xref: /freebsd/crypto/openssl/crypto/bn/asm/x86-gf2m.pl (revision c697fb7f)
1#! /usr/bin/env perl
2# Copyright 2011-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# May 2011
18#
19# The module implements bn_GF2m_mul_2x2 polynomial multiplication used
20# in bn_gf2m.c. It's kind of low-hanging mechanical port from C for
21# the time being... Except that it has three code paths: pure integer
22# code suitable for any x86 CPU, MMX code suitable for PIII and later
23# and PCLMULQDQ suitable for Westmere and later. Improvement varies
24# from one benchmark and µ-arch to another. Below are interval values
25# for 163- and 571-bit ECDH benchmarks relative to compiler-generated
26# code:
27#
28# PIII		16%-30%
29# P4		12%-12%
30# Opteron	18%-40%
31# Core2		19%-44%
32# Atom		38%-64%
33# Westmere	53%-121%(PCLMULQDQ)/20%-32%(MMX)
34# Sandy Bridge	72%-127%(PCLMULQDQ)/27%-23%(MMX)
35#
36# Note that above improvement coefficients are not coefficients for
37# bn_GF2m_mul_2x2 itself. For example 120% ECDH improvement is result
38# of bn_GF2m_mul_2x2 being >4x faster. As it gets faster, benchmark
39# is more and more dominated by other subroutines, most notably by
40# BN_GF2m_mod[_mul]_arr...
41
42$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
43push(@INC,"${dir}","${dir}../../perlasm");
44require "x86asm.pl";
45
46$output = pop;
47open STDOUT,">$output";
48
49&asm_init($ARGV[0],$x86only = $ARGV[$#ARGV] eq "386");
50
51$sse2=0;
52for (@ARGV) { $sse2=1 if (/-DOPENSSL_IA32_SSE2/); }
53
54&external_label("OPENSSL_ia32cap_P") if ($sse2);
55
56$a="eax";
57$b="ebx";
58($a1,$a2,$a4)=("ecx","edx","ebp");
59
60$R="mm0";
61@T=("mm1","mm2");
62($A,$B,$B30,$B31)=("mm2","mm3","mm4","mm5");
63@i=("esi","edi");
64
65					if (!$x86only) {
66&function_begin_B("_mul_1x1_mmx");
67	&sub	("esp",32+4);
68	 &mov	($a1,$a);
69	 &lea	($a2,&DWP(0,$a,$a));
70	 &and	($a1,0x3fffffff);
71	 &lea	($a4,&DWP(0,$a2,$a2));
72	 &mov	(&DWP(0*4,"esp"),0);
73	 &and	($a2,0x7fffffff);
74	&movd	($A,$a);
75	&movd	($B,$b);
76	 &mov	(&DWP(1*4,"esp"),$a1);	# a1
77	 &xor	($a1,$a2);		# a1^a2
78	&pxor	($B31,$B31);
79	&pxor	($B30,$B30);
80	 &mov	(&DWP(2*4,"esp"),$a2);	# a2
81	 &xor	($a2,$a4);		# a2^a4
82	 &mov	(&DWP(3*4,"esp"),$a1);	# a1^a2
83	&pcmpgtd($B31,$A);		# broadcast 31st bit
84	&paddd	($A,$A);		# $A<<=1
85	 &xor	($a1,$a2);		# a1^a4=a1^a2^a2^a4
86	 &mov	(&DWP(4*4,"esp"),$a4);	# a4
87	 &xor	($a4,$a2);		# a2=a4^a2^a4
88	&pand	($B31,$B);
89	&pcmpgtd($B30,$A);		# broadcast 30th bit
90	 &mov	(&DWP(5*4,"esp"),$a1);	# a1^a4
91	 &xor	($a4,$a1);		# a1^a2^a4
92	&psllq	($B31,31);
93	&pand	($B30,$B);
94	 &mov	(&DWP(6*4,"esp"),$a2);	# a2^a4
95	&mov	(@i[0],0x7);
96	 &mov	(&DWP(7*4,"esp"),$a4);	# a1^a2^a4
97	 &mov	($a4,@i[0]);
98	&and	(@i[0],$b);
99	&shr	($b,3);
100	&mov	(@i[1],$a4);
101	&psllq	($B30,30);
102	&and	(@i[1],$b);
103	&shr	($b,3);
104	&movd	($R,&DWP(0,"esp",@i[0],4));
105	&mov	(@i[0],$a4);
106	&and	(@i[0],$b);
107	&shr	($b,3);
108	for($n=1;$n<9;$n++) {
109		&movd	(@T[1],&DWP(0,"esp",@i[1],4));
110		&mov	(@i[1],$a4);
111		&psllq	(@T[1],3*$n);
112		&and	(@i[1],$b);
113		&shr	($b,3);
114		&pxor	($R,@T[1]);
115
116		push(@i,shift(@i)); push(@T,shift(@T));
117	}
118	&movd	(@T[1],&DWP(0,"esp",@i[1],4));
119	&pxor	($R,$B30);
120	&psllq	(@T[1],3*$n++);
121	&pxor	($R,@T[1]);
122
123	&movd	(@T[0],&DWP(0,"esp",@i[0],4));
124	&pxor	($R,$B31);
125	&psllq	(@T[0],3*$n);
126	&add	("esp",32+4);
127	&pxor	($R,@T[0]);
128	&ret	();
129&function_end_B("_mul_1x1_mmx");
130					}
131
132($lo,$hi)=("eax","edx");
133@T=("ecx","ebp");
134
135&function_begin_B("_mul_1x1_ialu");
136	&sub	("esp",32+4);
137	 &mov	($a1,$a);
138	 &lea	($a2,&DWP(0,$a,$a));
139	 &lea	($a4,&DWP(0,"",$a,4));
140	 &and	($a1,0x3fffffff);
141	&lea	(@i[1],&DWP(0,$lo,$lo));
142	&sar	($lo,31);		# broadcast 31st bit
143	 &mov	(&DWP(0*4,"esp"),0);
144	 &and	($a2,0x7fffffff);
145	 &mov	(&DWP(1*4,"esp"),$a1);	# a1
146	 &xor	($a1,$a2);		# a1^a2
147	 &mov	(&DWP(2*4,"esp"),$a2);	# a2
148	 &xor	($a2,$a4);		# a2^a4
149	 &mov	(&DWP(3*4,"esp"),$a1);	# a1^a2
150	 &xor	($a1,$a2);		# a1^a4=a1^a2^a2^a4
151	 &mov	(&DWP(4*4,"esp"),$a4);	# a4
152	 &xor	($a4,$a2);		# a2=a4^a2^a4
153	 &mov	(&DWP(5*4,"esp"),$a1);	# a1^a4
154	 &xor	($a4,$a1);		# a1^a2^a4
155	&sar	(@i[1],31);		# broadcast 30th bit
156	&and	($lo,$b);
157	 &mov	(&DWP(6*4,"esp"),$a2);	# a2^a4
158	&and	(@i[1],$b);
159	 &mov	(&DWP(7*4,"esp"),$a4);	# a1^a2^a4
160	&mov	($hi,$lo);
161	&shl	($lo,31);
162	&mov	(@T[0],@i[1]);
163	&shr	($hi,1);
164
165	 &mov	(@i[0],0x7);
166	&shl	(@i[1],30);
167	 &and	(@i[0],$b);
168	&shr	(@T[0],2);
169	&xor	($lo,@i[1]);
170
171	&shr	($b,3);
172	&mov	(@i[1],0x7);		# 5-byte instruction!?
173	&and	(@i[1],$b);
174	&shr	($b,3);
175	 &xor	($hi,@T[0]);
176	&xor	($lo,&DWP(0,"esp",@i[0],4));
177	&mov	(@i[0],0x7);
178	&and	(@i[0],$b);
179	&shr	($b,3);
180	for($n=1;$n<9;$n++) {
181		&mov	(@T[1],&DWP(0,"esp",@i[1],4));
182		&mov	(@i[1],0x7);
183		&mov	(@T[0],@T[1]);
184		&shl	(@T[1],3*$n);
185		&and	(@i[1],$b);
186		&shr	(@T[0],32-3*$n);
187		&xor	($lo,@T[1]);
188		&shr	($b,3);
189		&xor	($hi,@T[0]);
190
191		push(@i,shift(@i)); push(@T,shift(@T));
192	}
193	&mov	(@T[1],&DWP(0,"esp",@i[1],4));
194	&mov	(@T[0],@T[1]);
195	&shl	(@T[1],3*$n);
196	&mov	(@i[1],&DWP(0,"esp",@i[0],4));
197	&shr	(@T[0],32-3*$n);	$n++;
198	&mov	(@i[0],@i[1]);
199	&xor	($lo,@T[1]);
200	&shl	(@i[1],3*$n);
201	&xor	($hi,@T[0]);
202	&shr	(@i[0],32-3*$n);
203	&xor	($lo,@i[1]);
204	&xor	($hi,@i[0]);
205
206	&add	("esp",32+4);
207	&ret	();
208&function_end_B("_mul_1x1_ialu");
209
210# void bn_GF2m_mul_2x2(BN_ULONG *r, BN_ULONG a1, BN_ULONG a0, BN_ULONG b1, BN_ULONG b0);
211&function_begin_B("bn_GF2m_mul_2x2");
212if (!$x86only) {
213	&picmeup("edx","OPENSSL_ia32cap_P");
214	&mov	("eax",&DWP(0,"edx"));
215	&mov	("edx",&DWP(4,"edx"));
216	&test	("eax",1<<23);		# check MMX bit
217	&jz	(&label("ialu"));
218if ($sse2) {
219	&test	("eax",1<<24);		# check FXSR bit
220	&jz	(&label("mmx"));
221	&test	("edx",1<<1);		# check PCLMULQDQ bit
222	&jz	(&label("mmx"));
223
224	&movups		("xmm0",&QWP(8,"esp"));
225	&shufps		("xmm0","xmm0",0b10110001);
226	&pclmulqdq	("xmm0","xmm0",1);
227	&mov		("eax",&DWP(4,"esp"));
228	&movups		(&QWP(0,"eax"),"xmm0");
229	&ret	();
230
231&set_label("mmx",16);
232}
233	&push	("ebp");
234	&push	("ebx");
235	&push	("esi");
236	&push	("edi");
237	&mov	($a,&wparam(1));
238	&mov	($b,&wparam(3));
239	&call	("_mul_1x1_mmx");	# a1·b1
240	&movq	("mm7",$R);
241
242	&mov	($a,&wparam(2));
243	&mov	($b,&wparam(4));
244	&call	("_mul_1x1_mmx");	# a0·b0
245	&movq	("mm6",$R);
246
247	&mov	($a,&wparam(1));
248	&mov	($b,&wparam(3));
249	&xor	($a,&wparam(2));
250	&xor	($b,&wparam(4));
251	&call	("_mul_1x1_mmx");	# (a0+a1)·(b0+b1)
252	&pxor	($R,"mm7");
253	&mov	($a,&wparam(0));
254	&pxor	($R,"mm6");		# (a0+a1)·(b0+b1)-a1·b1-a0·b0
255
256	&movq	($A,$R);
257	&psllq	($R,32);
258	&pop	("edi");
259	&psrlq	($A,32);
260	&pop	("esi");
261	&pxor	($R,"mm6");
262	&pop	("ebx");
263	&pxor	($A,"mm7");
264	&movq	(&QWP(0,$a),$R);
265	&pop	("ebp");
266	&movq	(&QWP(8,$a),$A);
267	&emms	();
268	&ret	();
269&set_label("ialu",16);
270}
271	&push	("ebp");
272	&push	("ebx");
273	&push	("esi");
274	&push	("edi");
275	&stack_push(4+1);
276
277	&mov	($a,&wparam(1));
278	&mov	($b,&wparam(3));
279	&call	("_mul_1x1_ialu");	# a1·b1
280	&mov	(&DWP(8,"esp"),$lo);
281	&mov	(&DWP(12,"esp"),$hi);
282
283	&mov	($a,&wparam(2));
284	&mov	($b,&wparam(4));
285	&call	("_mul_1x1_ialu");	# a0·b0
286	&mov	(&DWP(0,"esp"),$lo);
287	&mov	(&DWP(4,"esp"),$hi);
288
289	&mov	($a,&wparam(1));
290	&mov	($b,&wparam(3));
291	&xor	($a,&wparam(2));
292	&xor	($b,&wparam(4));
293	&call	("_mul_1x1_ialu");	# (a0+a1)·(b0+b1)
294
295	&mov	("ebp",&wparam(0));
296		 @r=("ebx","ecx","edi","esi");
297	&mov	(@r[0],&DWP(0,"esp"));
298	&mov	(@r[1],&DWP(4,"esp"));
299	&mov	(@r[2],&DWP(8,"esp"));
300	&mov	(@r[3],&DWP(12,"esp"));
301
302	&xor	($lo,$hi);
303	&xor	($hi,@r[1]);
304	&xor	($lo,@r[0]);
305	&mov	(&DWP(0,"ebp"),@r[0]);
306	&xor	($hi,@r[2]);
307	&mov	(&DWP(12,"ebp"),@r[3]);
308	&xor	($lo,@r[3]);
309	&stack_pop(4+1);
310	&xor	($hi,@r[3]);
311	&pop	("edi");
312	&xor	($lo,$hi);
313	&pop	("esi");
314	&mov	(&DWP(8,"ebp"),$hi);
315	&pop	("ebx");
316	&mov	(&DWP(4,"ebp"),$lo);
317	&pop	("ebp");
318	&ret	();
319&function_end_B("bn_GF2m_mul_2x2");
320
321&asciz	("GF(2^m) Multiplication for x86, CRYPTOGAMS by <appro\@openssl.org>");
322
323&asm_finish();
324
325close STDOUT or die "error closing STDOUT: $!";
326