xref: /freebsd/sys/crypto/openssl/arm/ghashv8-armx.S (revision 069ac184)
1/* Do not modify. This file is auto-generated from ghashv8-armx.pl. */
2#include "arm_arch.h"
3
4#if __ARM_MAX_ARCH__>=7
5.fpu	neon
6#ifdef __thumb2__
7.syntax	unified
8.thumb
9# define INST(a,b,c,d) .byte  c,0xef,a,b
10#else
11.code	32
12# define INST(a,b,c,d) .byte  a,b,c,0xf2
13#endif
14
15.text
16.globl	gcm_init_v8
17.type	gcm_init_v8,%function
18.align	4
19gcm_init_v8:
20	vld1.64	{q9},[r1]		@ load input H
21	vmov.i8	q11,#0xe1
22	vshl.i64	q11,q11,#57		@ 0xc2.0
23	vext.8	q3,q9,q9,#8
24	vshr.u64	q10,q11,#63
25	vdup.32	q9,d18[1]
26	vext.8	q8,q10,q11,#8		@ t0=0xc2....01
27	vshr.u64	q10,q3,#63
28	vshr.s32	q9,q9,#31		@ broadcast carry bit
29	vand	q10,q10,q8
30	vshl.i64	q3,q3,#1
31	vext.8	q10,q10,q10,#8
32	vand	q8,q8,q9
33	vorr	q3,q3,q10		@ H<<<=1
34	veor	q12,q3,q8		@ twisted H
35	vst1.64	{q12},[r0]!		@ store Htable[0]
36
37	@ calculate H^2
38	vext.8	q8,q12,q12,#8		@ Karatsuba pre-processing
39	INST(0xa8,0x0e,0xa8,0xf2)	@ pmull q0,q12,q12
40	veor	q8,q8,q12
41	INST(0xa9,0x4e,0xa9,0xf2)	@ pmull2 q2,q12,q12
42	INST(0xa0,0x2e,0xa0,0xf2)	@ pmull q1,q8,q8
43
44	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
45	veor	q10,q0,q2
46	veor	q1,q1,q9
47	veor	q1,q1,q10
48	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase
49
50	vmov	d4,d3		@ Xh|Xm - 256-bit result
51	vmov	d3,d0		@ Xm is rotated Xl
52	veor	q0,q1,q10
53
54	vext.8	q10,q0,q0,#8		@ 2nd phase
55	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
56	veor	q10,q10,q2
57	veor	q14,q0,q10
58
59	vext.8	q9,q14,q14,#8		@ Karatsuba pre-processing
60	veor	q9,q9,q14
61	vext.8	q13,q8,q9,#8		@ pack Karatsuba pre-processed
62	vst1.64	{q13,q14},[r0]!	@ store Htable[1..2]
63	bx	lr
64.size	gcm_init_v8,.-gcm_init_v8
65.globl	gcm_gmult_v8
66.type	gcm_gmult_v8,%function
67.align	4
68gcm_gmult_v8:
69	vld1.64	{q9},[r0]		@ load Xi
70	vmov.i8	q11,#0xe1
71	vld1.64	{q12,q13},[r1]	@ load twisted H, ...
72	vshl.u64	q11,q11,#57
73#ifndef __ARMEB__
74	vrev64.8	q9,q9
75#endif
76	vext.8	q3,q9,q9,#8
77
78	INST(0x86,0x0e,0xa8,0xf2)	@ pmull q0,q12,q3		@ H.lo·Xi.lo
79	veor	q9,q9,q3		@ Karatsuba pre-processing
80	INST(0x87,0x4e,0xa9,0xf2)	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
81	INST(0xa2,0x2e,0xaa,0xf2)	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
82
83	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
84	veor	q10,q0,q2
85	veor	q1,q1,q9
86	veor	q1,q1,q10
87	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase of reduction
88
89	vmov	d4,d3		@ Xh|Xm - 256-bit result
90	vmov	d3,d0		@ Xm is rotated Xl
91	veor	q0,q1,q10
92
93	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
94	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
95	veor	q10,q10,q2
96	veor	q0,q0,q10
97
98#ifndef __ARMEB__
99	vrev64.8	q0,q0
100#endif
101	vext.8	q0,q0,q0,#8
102	vst1.64	{q0},[r0]		@ write out Xi
103
104	bx	lr
105.size	gcm_gmult_v8,.-gcm_gmult_v8
106.globl	gcm_ghash_v8
107.type	gcm_ghash_v8,%function
108.align	4
109gcm_ghash_v8:
110	vstmdb	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
111	vld1.64	{q0},[r0]		@ load [rotated] Xi
112						@ "[rotated]" means that
113						@ loaded value would have
114						@ to be rotated in order to
115						@ make it appear as in
116						@ algorithm specification
117	subs	r3,r3,#32		@ see if r3 is 32 or larger
118	mov	r12,#16		@ r12 is used as post-
119						@ increment for input pointer;
120						@ as loop is modulo-scheduled
121						@ r12 is zeroed just in time
122						@ to preclude overstepping
123						@ inp[len], which means that
124						@ last block[s] are actually
125						@ loaded twice, but last
126						@ copy is not processed
127	vld1.64	{q12,q13},[r1]!	@ load twisted H, ..., H^2
128	vmov.i8	q11,#0xe1
129	vld1.64	{q14},[r1]
130	it	eq
131	moveq	r12,#0			@ is it time to zero r12?
132	vext.8	q0,q0,q0,#8		@ rotate Xi
133	vld1.64	{q8},[r2]!	@ load [rotated] I[0]
134	vshl.u64	q11,q11,#57		@ compose 0xc2.0 constant
135#ifndef __ARMEB__
136	vrev64.8	q8,q8
137	vrev64.8	q0,q0
138#endif
139	vext.8	q3,q8,q8,#8		@ rotate I[0]
140	blo	.Lodd_tail_v8		@ r3 was less than 32
141	vld1.64	{q9},[r2],r12	@ load [rotated] I[1]
142#ifndef __ARMEB__
143	vrev64.8	q9,q9
144#endif
145	vext.8	q7,q9,q9,#8
146	veor	q3,q3,q0		@ I[i]^=Xi
147	INST(0x8e,0x8e,0xa8,0xf2)	@ pmull q4,q12,q7		@ H·Ii+1
148	veor	q9,q9,q7		@ Karatsuba pre-processing
149	INST(0x8f,0xce,0xa9,0xf2)	@ pmull2 q6,q12,q7
150	b	.Loop_mod2x_v8
151
152.align	4
153.Loop_mod2x_v8:
154	vext.8	q10,q3,q3,#8
155	subs	r3,r3,#32		@ is there more data?
156	INST(0x86,0x0e,0xac,0xf2)	@ pmull q0,q14,q3		@ H^2.lo·Xi.lo
157	it	lo
158	movlo	r12,#0			@ is it time to zero r12?
159
160	INST(0xa2,0xae,0xaa,0xf2)	@ pmull q5,q13,q9
161	veor	q10,q10,q3		@ Karatsuba pre-processing
162	INST(0x87,0x4e,0xad,0xf2)	@ pmull2 q2,q14,q3		@ H^2.hi·Xi.hi
163	veor	q0,q0,q4		@ accumulate
164	INST(0xa5,0x2e,0xab,0xf2)	@ pmull2 q1,q13,q10		@ (H^2.lo+H^2.hi)·(Xi.lo+Xi.hi)
165	vld1.64	{q8},[r2],r12	@ load [rotated] I[i+2]
166
167	veor	q2,q2,q6
168	it	eq
169	moveq	r12,#0			@ is it time to zero r12?
170	veor	q1,q1,q5
171
172	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
173	veor	q10,q0,q2
174	veor	q1,q1,q9
175	vld1.64	{q9},[r2],r12	@ load [rotated] I[i+3]
176#ifndef __ARMEB__
177	vrev64.8	q8,q8
178#endif
179	veor	q1,q1,q10
180	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase of reduction
181
182#ifndef __ARMEB__
183	vrev64.8	q9,q9
184#endif
185	vmov	d4,d3		@ Xh|Xm - 256-bit result
186	vmov	d3,d0		@ Xm is rotated Xl
187	vext.8	q7,q9,q9,#8
188	vext.8	q3,q8,q8,#8
189	veor	q0,q1,q10
190	INST(0x8e,0x8e,0xa8,0xf2)	@ pmull q4,q12,q7		@ H·Ii+1
191	veor	q3,q3,q2		@ accumulate q3 early
192
193	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
194	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
195	veor	q3,q3,q10
196	veor	q9,q9,q7		@ Karatsuba pre-processing
197	veor	q3,q3,q0
198	INST(0x8f,0xce,0xa9,0xf2)	@ pmull2 q6,q12,q7
199	bhs	.Loop_mod2x_v8		@ there was at least 32 more bytes
200
201	veor	q2,q2,q10
202	vext.8	q3,q8,q8,#8		@ re-construct q3
203	adds	r3,r3,#32		@ re-construct r3
204	veor	q0,q0,q2		@ re-construct q0
205	beq	.Ldone_v8		@ is r3 zero?
206.Lodd_tail_v8:
207	vext.8	q10,q0,q0,#8
208	veor	q3,q3,q0		@ inp^=Xi
209	veor	q9,q8,q10		@ q9 is rotated inp^Xi
210
211	INST(0x86,0x0e,0xa8,0xf2)	@ pmull q0,q12,q3		@ H.lo·Xi.lo
212	veor	q9,q9,q3		@ Karatsuba pre-processing
213	INST(0x87,0x4e,0xa9,0xf2)	@ pmull2 q2,q12,q3		@ H.hi·Xi.hi
214	INST(0xa2,0x2e,0xaa,0xf2)	@ pmull q1,q13,q9		@ (H.lo+H.hi)·(Xi.lo+Xi.hi)
215
216	vext.8	q9,q0,q2,#8		@ Karatsuba post-processing
217	veor	q10,q0,q2
218	veor	q1,q1,q9
219	veor	q1,q1,q10
220	INST(0x26,0x4e,0xe0,0xf2)	@ pmull q10,q0,q11		@ 1st phase of reduction
221
222	vmov	d4,d3		@ Xh|Xm - 256-bit result
223	vmov	d3,d0		@ Xm is rotated Xl
224	veor	q0,q1,q10
225
226	vext.8	q10,q0,q0,#8		@ 2nd phase of reduction
227	INST(0x26,0x0e,0xa0,0xf2)	@ pmull q0,q0,q11
228	veor	q10,q10,q2
229	veor	q0,q0,q10
230
231.Ldone_v8:
232#ifndef __ARMEB__
233	vrev64.8	q0,q0
234#endif
235	vext.8	q0,q0,q0,#8
236	vst1.64	{q0},[r0]		@ write out Xi
237
238	vldmia	sp!,{d8,d9,d10,d11,d12,d13,d14,d15}		@ 32-bit ABI says so
239	bx	lr
240.size	gcm_ghash_v8,.-gcm_ghash_v8
241.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,56,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
242.align	2
243.align	2
244#endif
245