1#include "arm_arch.h"
2
3.text
4#if defined(__thumb2__) || defined(__clang__)
5.syntax	unified
6#define ldrplb  ldrbpl
7#define ldrneb  ldrbne
8#endif
9#if defined(__thumb2__)
10.thumb
11#else
12.code	32
13#endif
14
15.type	rem_4bit,%object
16.align	5
17rem_4bit:
18.short	0x0000,0x1C20,0x3840,0x2460
19.short	0x7080,0x6CA0,0x48C0,0x54E0
20.short	0xE100,0xFD20,0xD940,0xC560
21.short	0x9180,0x8DA0,0xA9C0,0xB5E0
22.size	rem_4bit,.-rem_4bit
23
24.type	rem_4bit_get,%function
25rem_4bit_get:
26#if defined(__thumb2__)
27	adr	r2,rem_4bit
28#else
29	sub	r2,pc,#8+32	@ &rem_4bit
30#endif
31	b	.Lrem_4bit_got
32	nop
33	nop
34.size	rem_4bit_get,.-rem_4bit_get
35
36.globl	gcm_ghash_4bit
37.type	gcm_ghash_4bit,%function
38.align	4
39gcm_ghash_4bit:
40#if defined(__thumb2__)
41	adr	r12,rem_4bit
42#else
43	sub	r12,pc,#8+48		@ &rem_4bit
44#endif
45	add	r3,r2,r3		@ r3 to point at the end
46	stmdb	sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}		@ save r3/end too
47
48	ldmia	r12,{r4,r5,r6,r7,r8,r9,r10,r11}		@ copy rem_4bit ...
49	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11}		@ ... to stack
50
51	ldrb	r12,[r2,#15]
52	ldrb	r14,[r0,#15]
53.Louter:
54	eor	r12,r12,r14
55	and	r14,r12,#0xf0
56	and	r12,r12,#0x0f
57	mov	r3,#14
58
59	add	r7,r1,r12,lsl#4
60	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
61	add	r11,r1,r14
62	ldrb	r12,[r2,#14]
63
64	and	r14,r4,#0xf		@ rem
65	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
66	add	r14,r14,r14
67	eor	r4,r8,r4,lsr#4
68	ldrh	r8,[sp,r14]		@ rem_4bit[rem]
69	eor	r4,r4,r5,lsl#28
70	ldrb	r14,[r0,#14]
71	eor	r5,r9,r5,lsr#4
72	eor	r5,r5,r6,lsl#28
73	eor	r6,r10,r6,lsr#4
74	eor	r6,r6,r7,lsl#28
75	eor	r7,r11,r7,lsr#4
76	eor	r12,r12,r14
77	and	r14,r12,#0xf0
78	and	r12,r12,#0x0f
79	eor	r7,r7,r8,lsl#16
80
81.Linner:
82	add	r11,r1,r12,lsl#4
83	and	r12,r4,#0xf		@ rem
84	subs	r3,r3,#1
85	add	r12,r12,r12
86	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
87	eor	r4,r8,r4,lsr#4
88	eor	r4,r4,r5,lsl#28
89	eor	r5,r9,r5,lsr#4
90	eor	r5,r5,r6,lsl#28
91	ldrh	r8,[sp,r12]		@ rem_4bit[rem]
92	eor	r6,r10,r6,lsr#4
93#ifdef	__thumb2__
94	it	pl
95#endif
96	ldrplb	r12,[r2,r3]
97	eor	r6,r6,r7,lsl#28
98	eor	r7,r11,r7,lsr#4
99
100	add	r11,r1,r14
101	and	r14,r4,#0xf		@ rem
102	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
103	add	r14,r14,r14
104	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
105	eor	r4,r8,r4,lsr#4
106#ifdef	__thumb2__
107	it	pl
108#endif
109	ldrplb	r8,[r0,r3]
110	eor	r4,r4,r5,lsl#28
111	eor	r5,r9,r5,lsr#4
112	ldrh	r9,[sp,r14]
113	eor	r5,r5,r6,lsl#28
114	eor	r6,r10,r6,lsr#4
115	eor	r6,r6,r7,lsl#28
116#ifdef	__thumb2__
117	it	pl
118#endif
119	eorpl	r12,r12,r8
120	eor	r7,r11,r7,lsr#4
121#ifdef	__thumb2__
122	itt	pl
123#endif
124	andpl	r14,r12,#0xf0
125	andpl	r12,r12,#0x0f
126	eor	r7,r7,r9,lsl#16	@ ^= rem_4bit[rem]
127	bpl	.Linner
128
129	ldr	r3,[sp,#32]		@ re-load r3/end
130	add	r2,r2,#16
131	mov	r14,r4
132#if __ARM_ARCH__>=7 && defined(__ARMEL__)
133	rev	r4,r4
134	str	r4,[r0,#12]
135#elif defined(__ARMEB__)
136	str	r4,[r0,#12]
137#else
138	mov	r9,r4,lsr#8
139	strb	r4,[r0,#12+3]
140	mov	r10,r4,lsr#16
141	strb	r9,[r0,#12+2]
142	mov	r11,r4,lsr#24
143	strb	r10,[r0,#12+1]
144	strb	r11,[r0,#12]
145#endif
146	cmp	r2,r3
147#if __ARM_ARCH__>=7 && defined(__ARMEL__)
148	rev	r5,r5
149	str	r5,[r0,#8]
150#elif defined(__ARMEB__)
151	str	r5,[r0,#8]
152#else
153	mov	r9,r5,lsr#8
154	strb	r5,[r0,#8+3]
155	mov	r10,r5,lsr#16
156	strb	r9,[r0,#8+2]
157	mov	r11,r5,lsr#24
158	strb	r10,[r0,#8+1]
159	strb	r11,[r0,#8]
160#endif
161
162#ifdef __thumb2__
163	it	ne
164#endif
165	ldrneb	r12,[r2,#15]
166#if __ARM_ARCH__>=7 && defined(__ARMEL__)
167	rev	r6,r6
168	str	r6,[r0,#4]
169#elif defined(__ARMEB__)
170	str	r6,[r0,#4]
171#else
172	mov	r9,r6,lsr#8
173	strb	r6,[r0,#4+3]
174	mov	r10,r6,lsr#16
175	strb	r9,[r0,#4+2]
176	mov	r11,r6,lsr#24
177	strb	r10,[r0,#4+1]
178	strb	r11,[r0,#4]
179#endif
180
181#if __ARM_ARCH__>=7 && defined(__ARMEL__)
182	rev	r7,r7
183	str	r7,[r0,#0]
184#elif defined(__ARMEB__)
185	str	r7,[r0,#0]
186#else
187	mov	r9,r7,lsr#8
188	strb	r7,[r0,#0+3]
189	mov	r10,r7,lsr#16
190	strb	r9,[r0,#0+2]
191	mov	r11,r7,lsr#24
192	strb	r10,[r0,#0+1]
193	strb	r11,[r0,#0]
194#endif
195
196	bne	.Louter
197
198	add	sp,sp,#36
199#if __ARM_ARCH__>=5
200	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
201#else
202	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
203	tst	lr,#1
204	moveq	pc,lr			@ be binary compatible with V4, yet
205.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
206#endif
207.size	gcm_ghash_4bit,.-gcm_ghash_4bit
208
209.globl	gcm_gmult_4bit
210.type	gcm_gmult_4bit,%function
211gcm_gmult_4bit:
212	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
213	ldrb	r12,[r0,#15]
214	b	rem_4bit_get
215.Lrem_4bit_got:
216	and	r14,r12,#0xf0
217	and	r12,r12,#0x0f
218	mov	r3,#14
219
220	add	r7,r1,r12,lsl#4
221	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
222	ldrb	r12,[r0,#14]
223
224	add	r11,r1,r14
225	and	r14,r4,#0xf		@ rem
226	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
227	add	r14,r14,r14
228	eor	r4,r8,r4,lsr#4
229	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
230	eor	r4,r4,r5,lsl#28
231	eor	r5,r9,r5,lsr#4
232	eor	r5,r5,r6,lsl#28
233	eor	r6,r10,r6,lsr#4
234	eor	r6,r6,r7,lsl#28
235	eor	r7,r11,r7,lsr#4
236	and	r14,r12,#0xf0
237	eor	r7,r7,r8,lsl#16
238	and	r12,r12,#0x0f
239
240.Loop:
241	add	r11,r1,r12,lsl#4
242	and	r12,r4,#0xf		@ rem
243	subs	r3,r3,#1
244	add	r12,r12,r12
245	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
246	eor	r4,r8,r4,lsr#4
247	eor	r4,r4,r5,lsl#28
248	eor	r5,r9,r5,lsr#4
249	eor	r5,r5,r6,lsl#28
250	ldrh	r8,[r2,r12]	@ rem_4bit[rem]
251	eor	r6,r10,r6,lsr#4
252#ifdef	__thumb2__
253	it	pl
254#endif
255	ldrplb	r12,[r0,r3]
256	eor	r6,r6,r7,lsl#28
257	eor	r7,r11,r7,lsr#4
258
259	add	r11,r1,r14
260	and	r14,r4,#0xf		@ rem
261	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
262	add	r14,r14,r14
263	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
264	eor	r4,r8,r4,lsr#4
265	eor	r4,r4,r5,lsl#28
266	eor	r5,r9,r5,lsr#4
267	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
268	eor	r5,r5,r6,lsl#28
269	eor	r6,r10,r6,lsr#4
270	eor	r6,r6,r7,lsl#28
271	eor	r7,r11,r7,lsr#4
272#ifdef	__thumb2__
273	itt	pl
274#endif
275	andpl	r14,r12,#0xf0
276	andpl	r12,r12,#0x0f
277	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
278	bpl	.Loop
279#if __ARM_ARCH__>=7 && defined(__ARMEL__)
280	rev	r4,r4
281	str	r4,[r0,#12]
282#elif defined(__ARMEB__)
283	str	r4,[r0,#12]
284#else
285	mov	r9,r4,lsr#8
286	strb	r4,[r0,#12+3]
287	mov	r10,r4,lsr#16
288	strb	r9,[r0,#12+2]
289	mov	r11,r4,lsr#24
290	strb	r10,[r0,#12+1]
291	strb	r11,[r0,#12]
292#endif
293
294#if __ARM_ARCH__>=7 && defined(__ARMEL__)
295	rev	r5,r5
296	str	r5,[r0,#8]
297#elif defined(__ARMEB__)
298	str	r5,[r0,#8]
299#else
300	mov	r9,r5,lsr#8
301	strb	r5,[r0,#8+3]
302	mov	r10,r5,lsr#16
303	strb	r9,[r0,#8+2]
304	mov	r11,r5,lsr#24
305	strb	r10,[r0,#8+1]
306	strb	r11,[r0,#8]
307#endif
308
309#if __ARM_ARCH__>=7 && defined(__ARMEL__)
310	rev	r6,r6
311	str	r6,[r0,#4]
312#elif defined(__ARMEB__)
313	str	r6,[r0,#4]
314#else
315	mov	r9,r6,lsr#8
316	strb	r6,[r0,#4+3]
317	mov	r10,r6,lsr#16
318	strb	r9,[r0,#4+2]
319	mov	r11,r6,lsr#24
320	strb	r10,[r0,#4+1]
321	strb	r11,[r0,#4]
322#endif
323
324#if __ARM_ARCH__>=7 && defined(__ARMEL__)
325	rev	r7,r7
326	str	r7,[r0,#0]
327#elif defined(__ARMEB__)
328	str	r7,[r0,#0]
329#else
330	mov	r9,r7,lsr#8
331	strb	r7,[r0,#0+3]
332	mov	r10,r7,lsr#16
333	strb	r9,[r0,#0+2]
334	mov	r11,r7,lsr#24
335	strb	r10,[r0,#0+1]
336	strb	r11,[r0,#0]
337#endif
338
339#if __ARM_ARCH__>=5
340	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
341#else
342	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
343	tst	lr,#1
344	moveq	pc,lr			@ be binary compatible with V4, yet
345.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
346#endif
347.size	gcm_gmult_4bit,.-gcm_gmult_4bit
348#if __ARM_MAX_ARCH__>=7
349.arch	armv7-a
350.fpu	neon
351
352.globl	gcm_init_neon
353.type	gcm_init_neon,%function
354.align	4
355gcm_init_neon:
356	vld1.64	d7,[r1]!		@ load H
357	vmov.i8	q8,#0xe1
358	vld1.64	d6,[r1]
359	vshl.i64	d17,#57
360	vshr.u64	d16,#63		@ t0=0xc2....01
361	vdup.8	q9,d7[7]
362	vshr.u64	d26,d6,#63
363	vshr.s8	q9,#7			@ broadcast carry bit
364	vshl.i64	q3,q3,#1
365	vand	q8,q8,q9
366	vorr	d7,d26		@ H<<<=1
367	veor	q3,q3,q8		@ twisted H
368	vstmia	r0,{q3}
369
370	bx	lr					@ bx lr
371.size	gcm_init_neon,.-gcm_init_neon
372
373.globl	gcm_gmult_neon
374.type	gcm_gmult_neon,%function
375.align	4
376gcm_gmult_neon:
377	vld1.64	d7,[r0]!		@ load Xi
378	vld1.64	d6,[r0]!
379	vmov.i64	d29,#0x0000ffffffffffff
380	vldmia	r1,{d26,d27}	@ load twisted H
381	vmov.i64	d30,#0x00000000ffffffff
382#ifdef __ARMEL__
383	vrev64.8	q3,q3
384#endif
385	vmov.i64	d31,#0x000000000000ffff
386	veor	d28,d26,d27		@ Karatsuba pre-processing
387	mov	r3,#16
388	b	.Lgmult_neon
389.size	gcm_gmult_neon,.-gcm_gmult_neon
390
391.globl	gcm_ghash_neon
392.type	gcm_ghash_neon,%function
393.align	4
394gcm_ghash_neon:
395	vld1.64	d1,[r0]!		@ load Xi
396	vld1.64	d0,[r0]!
397	vmov.i64	d29,#0x0000ffffffffffff
398	vldmia	r1,{d26,d27}	@ load twisted H
399	vmov.i64	d30,#0x00000000ffffffff
400#ifdef __ARMEL__
401	vrev64.8	q0,q0
402#endif
403	vmov.i64	d31,#0x000000000000ffff
404	veor	d28,d26,d27		@ Karatsuba pre-processing
405
406.Loop_neon:
407	vld1.64	d7,[r2]!		@ load inp
408	vld1.64	d6,[r2]!
409#ifdef __ARMEL__
410	vrev64.8	q3,q3
411#endif
412	veor	q3,q0			@ inp^=Xi
413.Lgmult_neon:
414	vext.8	d16, d26, d26, #1	@ A1
415	vmull.p8	q8, d16, d6		@ F = A1*B
416	vext.8	d0, d6, d6, #1	@ B1
417	vmull.p8	q0, d26, d0		@ E = A*B1
418	vext.8	d18, d26, d26, #2	@ A2
419	vmull.p8	q9, d18, d6		@ H = A2*B
420	vext.8	d22, d6, d6, #2	@ B2
421	vmull.p8	q11, d26, d22		@ G = A*B2
422	vext.8	d20, d26, d26, #3	@ A3
423	veor	q8, q8, q0		@ L = E + F
424	vmull.p8	q10, d20, d6		@ J = A3*B
425	vext.8	d0, d6, d6, #3	@ B3
426	veor	q9, q9, q11		@ M = G + H
427	vmull.p8	q0, d26, d0		@ I = A*B3
428	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
429	vand	d17, d17, d29
430	vext.8	d22, d6, d6, #4	@ B4
431	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
432	vand	d19, d19, d30
433	vmull.p8	q11, d26, d22		@ K = A*B4
434	veor	q10, q10, q0		@ N = I + J
435	veor	d16, d16, d17
436	veor	d18, d18, d19
437	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
438	vand	d21, d21, d31
439	vext.8	q8, q8, q8, #15
440	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
441	vmov.i64	d23, #0
442	vext.8	q9, q9, q9, #14
443	veor	d20, d20, d21
444	vmull.p8	q0, d26, d6		@ D = A*B
445	vext.8	q11, q11, q11, #12
446	vext.8	q10, q10, q10, #13
447	veor	q8, q8, q9
448	veor	q10, q10, q11
449	veor	q0, q0, q8
450	veor	q0, q0, q10
451	veor	d6,d6,d7	@ Karatsuba pre-processing
452	vext.8	d16, d28, d28, #1	@ A1
453	vmull.p8	q8, d16, d6		@ F = A1*B
454	vext.8	d2, d6, d6, #1	@ B1
455	vmull.p8	q1, d28, d2		@ E = A*B1
456	vext.8	d18, d28, d28, #2	@ A2
457	vmull.p8	q9, d18, d6		@ H = A2*B
458	vext.8	d22, d6, d6, #2	@ B2
459	vmull.p8	q11, d28, d22		@ G = A*B2
460	vext.8	d20, d28, d28, #3	@ A3
461	veor	q8, q8, q1		@ L = E + F
462	vmull.p8	q10, d20, d6		@ J = A3*B
463	vext.8	d2, d6, d6, #3	@ B3
464	veor	q9, q9, q11		@ M = G + H
465	vmull.p8	q1, d28, d2		@ I = A*B3
466	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
467	vand	d17, d17, d29
468	vext.8	d22, d6, d6, #4	@ B4
469	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
470	vand	d19, d19, d30
471	vmull.p8	q11, d28, d22		@ K = A*B4
472	veor	q10, q10, q1		@ N = I + J
473	veor	d16, d16, d17
474	veor	d18, d18, d19
475	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
476	vand	d21, d21, d31
477	vext.8	q8, q8, q8, #15
478	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
479	vmov.i64	d23, #0
480	vext.8	q9, q9, q9, #14
481	veor	d20, d20, d21
482	vmull.p8	q1, d28, d6		@ D = A*B
483	vext.8	q11, q11, q11, #12
484	vext.8	q10, q10, q10, #13
485	veor	q8, q8, q9
486	veor	q10, q10, q11
487	veor	q1, q1, q8
488	veor	q1, q1, q10
489	vext.8	d16, d27, d27, #1	@ A1
490	vmull.p8	q8, d16, d7		@ F = A1*B
491	vext.8	d4, d7, d7, #1	@ B1
492	vmull.p8	q2, d27, d4		@ E = A*B1
493	vext.8	d18, d27, d27, #2	@ A2
494	vmull.p8	q9, d18, d7		@ H = A2*B
495	vext.8	d22, d7, d7, #2	@ B2
496	vmull.p8	q11, d27, d22		@ G = A*B2
497	vext.8	d20, d27, d27, #3	@ A3
498	veor	q8, q8, q2		@ L = E + F
499	vmull.p8	q10, d20, d7		@ J = A3*B
500	vext.8	d4, d7, d7, #3	@ B3
501	veor	q9, q9, q11		@ M = G + H
502	vmull.p8	q2, d27, d4		@ I = A*B3
503	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
504	vand	d17, d17, d29
505	vext.8	d22, d7, d7, #4	@ B4
506	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
507	vand	d19, d19, d30
508	vmull.p8	q11, d27, d22		@ K = A*B4
509	veor	q10, q10, q2		@ N = I + J
510	veor	d16, d16, d17
511	veor	d18, d18, d19
512	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
513	vand	d21, d21, d31
514	vext.8	q8, q8, q8, #15
515	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
516	vmov.i64	d23, #0
517	vext.8	q9, q9, q9, #14
518	veor	d20, d20, d21
519	vmull.p8	q2, d27, d7		@ D = A*B
520	vext.8	q11, q11, q11, #12
521	vext.8	q10, q10, q10, #13
522	veor	q8, q8, q9
523	veor	q10, q10, q11
524	veor	q2, q2, q8
525	veor	q2, q2, q10
526	veor	q1,q1,q0		@ Karatsuba post-processing
527	veor	q1,q1,q2
528	veor	d1,d1,d2
529	veor	d4,d4,d3	@ Xh|Xl - 256-bit result
530
531	@ equivalent of reduction_avx from ghash-x86_64.pl
532	vshl.i64	q9,q0,#57		@ 1st phase
533	vshl.i64	q10,q0,#62
534	veor	q10,q10,q9		@
535	vshl.i64	q9,q0,#63
536	veor	q10, q10, q9		@
537	veor	d1,d1,d20	@
538	veor	d4,d4,d21
539
540	vshr.u64	q10,q0,#1		@ 2nd phase
541	veor	q2,q2,q0
542	veor	q0,q0,q10		@
543	vshr.u64	q10,q10,#6
544	vshr.u64	q0,q0,#1		@
545	veor	q0,q0,q2		@
546	veor	q0,q0,q10		@
547
548	subs	r3,#16
549	bne	.Loop_neon
550
551#ifdef __ARMEL__
552	vrev64.8	q0,q0
553#endif
554	sub	r0,#16
555	vst1.64	d1,[r0]!		@ write out Xi
556	vst1.64	d0,[r0]
557
558	bx	lr					@ bx lr
559.size	gcm_ghash_neon,.-gcm_ghash_neon
560#endif
561.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
562.align	2
563.align	2
564