1#! /usr/bin/env perl
2# Copyright 2007-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15#
16# Permission to use under GPL terms is granted.
17# ====================================================================
18
19# SHA512 block procedure for ARMv4. September 2007.
20
21# This code is ~4.5 (four and a half) times faster than code generated
22# by gcc 3.4 and it spends ~72 clock cycles per byte [on single-issue
23# Xscale PXA250 core].
24#
25# July 2010.
26#
27# Rescheduling for dual-issue pipeline resulted in 6% improvement on
28# Cortex A8 core and ~40 cycles per processed byte.
29
30# February 2011.
31#
32# Profiler-assisted and platform-specific optimization resulted in 7%
33# improvement on Coxtex A8 core and ~38 cycles per byte.
34
35# March 2011.
36#
37# Add NEON implementation. On Cortex A8 it was measured to process
38# one byte in 23.3 cycles or ~60% faster than integer-only code.
39
40# August 2012.
41#
42# Improve NEON performance by 12% on Snapdragon S4. In absolute
43# terms it's 22.6 cycles per byte, which is disappointing result.
44# Technical writers asserted that 3-way S4 pipeline can sustain
45# multiple NEON instructions per cycle, but dual NEON issue could
46# not be observed, see http://www.openssl.org/~appro/Snapdragon-S4.html
47# for further details. On side note Cortex-A15 processes one byte in
48# 16 cycles.
49
50# Byte order [in]dependence. =========================================
51#
52# Originally caller was expected to maintain specific *dword* order in
53# h[0-7], namely with most significant dword at *lower* address, which
54# was reflected in below two parameters as 0 and 4. Now caller is
55# expected to maintain native byte order for whole 64-bit values.
56$hi="HI";
57$lo="LO";
58# ====================================================================
59
60$flavour = shift;
61if ($flavour=~/\w[\w\-]*\.\w+$/) { $output=$flavour; undef $flavour; }
62else { while (($output=shift) && ($output!~/\w[\w\-]*\.\w+$/)) {} }
63
64if ($flavour && $flavour ne "void") {
65    $0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
66    ( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
67    ( $xlate="${dir}../../../perlasm/arm-xlate.pl" and -f $xlate) or
68    die "can't locate arm-xlate.pl";
69
70    open OUT,"| \"$^X\" $xlate $flavour $output";
71    *STDOUT=*OUT;
72} else {
73    open OUT,">$output";
74    *STDOUT=*OUT;
75}
76
77$ctx="r0";	# parameter block
78$inp="r1";
79$len="r2";
80
81$Tlo="r3";
82$Thi="r4";
83$Alo="r5";
84$Ahi="r6";
85$Elo="r7";
86$Ehi="r8";
87$t0="r9";
88$t1="r10";
89$t2="r11";
90$t3="r12";
91############	r13 is stack pointer
92$Ktbl="r14";
93############	r15 is program counter
94
95$Aoff=8*0;
96$Boff=8*1;
97$Coff=8*2;
98$Doff=8*3;
99$Eoff=8*4;
100$Foff=8*5;
101$Goff=8*6;
102$Hoff=8*7;
103$Xoff=8*8;
104
105sub BODY_00_15() {
106my $magic = shift;
107$code.=<<___;
108	@ Sigma1(x)	(ROTR((x),14) ^ ROTR((x),18)  ^ ROTR((x),41))
109	@ LO		lo>>14^hi<<18 ^ lo>>18^hi<<14 ^ hi>>9^lo<<23
110	@ HI		hi>>14^lo<<18 ^ hi>>18^lo<<14 ^ lo>>9^hi<<23
111	mov	$t0,$Elo,lsr#14
112	str	$Tlo,[sp,#$Xoff+0]
113	mov	$t1,$Ehi,lsr#14
114	str	$Thi,[sp,#$Xoff+4]
115	eor	$t0,$t0,$Ehi,lsl#18
116	ldr	$t2,[sp,#$Hoff+0]	@ h.lo
117	eor	$t1,$t1,$Elo,lsl#18
118	ldr	$t3,[sp,#$Hoff+4]	@ h.hi
119	eor	$t0,$t0,$Elo,lsr#18
120	eor	$t1,$t1,$Ehi,lsr#18
121	eor	$t0,$t0,$Ehi,lsl#14
122	eor	$t1,$t1,$Elo,lsl#14
123	eor	$t0,$t0,$Ehi,lsr#9
124	eor	$t1,$t1,$Elo,lsr#9
125	eor	$t0,$t0,$Elo,lsl#23
126	eor	$t1,$t1,$Ehi,lsl#23	@ Sigma1(e)
127	adds	$Tlo,$Tlo,$t0
128	ldr	$t0,[sp,#$Foff+0]	@ f.lo
129	adc	$Thi,$Thi,$t1		@ T += Sigma1(e)
130	ldr	$t1,[sp,#$Foff+4]	@ f.hi
131	adds	$Tlo,$Tlo,$t2
132	ldr	$t2,[sp,#$Goff+0]	@ g.lo
133	adc	$Thi,$Thi,$t3		@ T += h
134	ldr	$t3,[sp,#$Goff+4]	@ g.hi
135
136	eor	$t0,$t0,$t2
137	str	$Elo,[sp,#$Eoff+0]
138	eor	$t1,$t1,$t3
139	str	$Ehi,[sp,#$Eoff+4]
140	and	$t0,$t0,$Elo
141	str	$Alo,[sp,#$Aoff+0]
142	and	$t1,$t1,$Ehi
143	str	$Ahi,[sp,#$Aoff+4]
144	eor	$t0,$t0,$t2
145	ldr	$t2,[$Ktbl,#$lo]	@ K[i].lo
146	eor	$t1,$t1,$t3		@ Ch(e,f,g)
147	ldr	$t3,[$Ktbl,#$hi]	@ K[i].hi
148
149	adds	$Tlo,$Tlo,$t0
150	ldr	$Elo,[sp,#$Doff+0]	@ d.lo
151	adc	$Thi,$Thi,$t1		@ T += Ch(e,f,g)
152	ldr	$Ehi,[sp,#$Doff+4]	@ d.hi
153	adds	$Tlo,$Tlo,$t2
154	and	$t0,$t2,#0xff
155	adc	$Thi,$Thi,$t3		@ T += K[i]
156	adds	$Elo,$Elo,$Tlo
157	ldr	$t2,[sp,#$Boff+0]	@ b.lo
158	adc	$Ehi,$Ehi,$Thi		@ d += T
159	teq	$t0,#$magic
160
161	ldr	$t3,[sp,#$Coff+0]	@ c.lo
162#if __ARM_ARCH__>=7
163	it	eq			@ Thumb2 thing, sanity check in ARM
164#endif
165	orreq	$Ktbl,$Ktbl,#1
166	@ Sigma0(x)	(ROTR((x),28) ^ ROTR((x),34) ^ ROTR((x),39))
167	@ LO		lo>>28^hi<<4  ^ hi>>2^lo<<30 ^ hi>>7^lo<<25
168	@ HI		hi>>28^lo<<4  ^ lo>>2^hi<<30 ^ lo>>7^hi<<25
169	mov	$t0,$Alo,lsr#28
170	mov	$t1,$Ahi,lsr#28
171	eor	$t0,$t0,$Ahi,lsl#4
172	eor	$t1,$t1,$Alo,lsl#4
173	eor	$t0,$t0,$Ahi,lsr#2
174	eor	$t1,$t1,$Alo,lsr#2
175	eor	$t0,$t0,$Alo,lsl#30
176	eor	$t1,$t1,$Ahi,lsl#30
177	eor	$t0,$t0,$Ahi,lsr#7
178	eor	$t1,$t1,$Alo,lsr#7
179	eor	$t0,$t0,$Alo,lsl#25
180	eor	$t1,$t1,$Ahi,lsl#25	@ Sigma0(a)
181	adds	$Tlo,$Tlo,$t0
182	and	$t0,$Alo,$t2
183	adc	$Thi,$Thi,$t1		@ T += Sigma0(a)
184
185	ldr	$t1,[sp,#$Boff+4]	@ b.hi
186	orr	$Alo,$Alo,$t2
187	ldr	$t2,[sp,#$Coff+4]	@ c.hi
188	and	$Alo,$Alo,$t3
189	and	$t3,$Ahi,$t1
190	orr	$Ahi,$Ahi,$t1
191	orr	$Alo,$Alo,$t0		@ Maj(a,b,c).lo
192	and	$Ahi,$Ahi,$t2
193	adds	$Alo,$Alo,$Tlo
194	orr	$Ahi,$Ahi,$t3		@ Maj(a,b,c).hi
195	sub	sp,sp,#8
196	adc	$Ahi,$Ahi,$Thi		@ h += T
197	tst	$Ktbl,#1
198	add	$Ktbl,$Ktbl,#8
199___
200}
201$code=<<___;
202#ifndef __KERNEL__
203# include <GFp/arm_arch.h>
204# define VFP_ABI_PUSH	vstmdb	sp!,{d8-d15}
205# define VFP_ABI_POP	vldmia	sp!,{d8-d15}
206#else
207# define __ARM_ARCH__ __LINUX_ARM_ARCH__
208# define __ARM_MAX_ARCH__ 7
209# define VFP_ABI_PUSH
210# define VFP_ABI_POP
211#endif
212
213@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
214@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions.
215.arch  armv7-a
216
217#ifdef __ARMEL__
218# define LO 0
219# define HI 4
220# define WORD64(hi0,lo0,hi1,lo1)	.word	lo0,hi0, lo1,hi1
221#else
222# define HI 0
223# define LO 4
224# define WORD64(hi0,lo0,hi1,lo1)	.word	hi0,lo0, hi1,lo1
225#endif
226
227.text
228#if defined(__thumb2__)
229.syntax unified
230.thumb
231# define adrl adr
232#else
233.code	32
234#endif
235
236.type	K512,%object
237.align	5
238K512:
239WORD64(0x428a2f98,0xd728ae22, 0x71374491,0x23ef65cd)
240WORD64(0xb5c0fbcf,0xec4d3b2f, 0xe9b5dba5,0x8189dbbc)
241WORD64(0x3956c25b,0xf348b538, 0x59f111f1,0xb605d019)
242WORD64(0x923f82a4,0xaf194f9b, 0xab1c5ed5,0xda6d8118)
243WORD64(0xd807aa98,0xa3030242, 0x12835b01,0x45706fbe)
244WORD64(0x243185be,0x4ee4b28c, 0x550c7dc3,0xd5ffb4e2)
245WORD64(0x72be5d74,0xf27b896f, 0x80deb1fe,0x3b1696b1)
246WORD64(0x9bdc06a7,0x25c71235, 0xc19bf174,0xcf692694)
247WORD64(0xe49b69c1,0x9ef14ad2, 0xefbe4786,0x384f25e3)
248WORD64(0x0fc19dc6,0x8b8cd5b5, 0x240ca1cc,0x77ac9c65)
249WORD64(0x2de92c6f,0x592b0275, 0x4a7484aa,0x6ea6e483)
250WORD64(0x5cb0a9dc,0xbd41fbd4, 0x76f988da,0x831153b5)
251WORD64(0x983e5152,0xee66dfab, 0xa831c66d,0x2db43210)
252WORD64(0xb00327c8,0x98fb213f, 0xbf597fc7,0xbeef0ee4)
253WORD64(0xc6e00bf3,0x3da88fc2, 0xd5a79147,0x930aa725)
254WORD64(0x06ca6351,0xe003826f, 0x14292967,0x0a0e6e70)
255WORD64(0x27b70a85,0x46d22ffc, 0x2e1b2138,0x5c26c926)
256WORD64(0x4d2c6dfc,0x5ac42aed, 0x53380d13,0x9d95b3df)
257WORD64(0x650a7354,0x8baf63de, 0x766a0abb,0x3c77b2a8)
258WORD64(0x81c2c92e,0x47edaee6, 0x92722c85,0x1482353b)
259WORD64(0xa2bfe8a1,0x4cf10364, 0xa81a664b,0xbc423001)
260WORD64(0xc24b8b70,0xd0f89791, 0xc76c51a3,0x0654be30)
261WORD64(0xd192e819,0xd6ef5218, 0xd6990624,0x5565a910)
262WORD64(0xf40e3585,0x5771202a, 0x106aa070,0x32bbd1b8)
263WORD64(0x19a4c116,0xb8d2d0c8, 0x1e376c08,0x5141ab53)
264WORD64(0x2748774c,0xdf8eeb99, 0x34b0bcb5,0xe19b48a8)
265WORD64(0x391c0cb3,0xc5c95a63, 0x4ed8aa4a,0xe3418acb)
266WORD64(0x5b9cca4f,0x7763e373, 0x682e6ff3,0xd6b2b8a3)
267WORD64(0x748f82ee,0x5defb2fc, 0x78a5636f,0x43172f60)
268WORD64(0x84c87814,0xa1f0ab72, 0x8cc70208,0x1a6439ec)
269WORD64(0x90befffa,0x23631e28, 0xa4506ceb,0xde82bde9)
270WORD64(0xbef9a3f7,0xb2c67915, 0xc67178f2,0xe372532b)
271WORD64(0xca273ece,0xea26619c, 0xd186b8c7,0x21c0c207)
272WORD64(0xeada7dd6,0xcde0eb1e, 0xf57d4f7f,0xee6ed178)
273WORD64(0x06f067aa,0x72176fba, 0x0a637dc5,0xa2c898a6)
274WORD64(0x113f9804,0xbef90dae, 0x1b710b35,0x131c471b)
275WORD64(0x28db77f5,0x23047d84, 0x32caab7b,0x40c72493)
276WORD64(0x3c9ebe0a,0x15c9bebc, 0x431d67c4,0x9c100d4c)
277WORD64(0x4cc5d4be,0xcb3e42b6, 0x597f299c,0xfc657e2a)
278WORD64(0x5fcb6fab,0x3ad6faec, 0x6c44198c,0x4a475817)
279.size	K512,.-K512
280#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
281.extern GFp_armcap_P
282.hidden GFp_armcap_P
283.LOPENSSL_armcap:
284.word	GFp_armcap_P-.Lsha512_block_data_order
285.skip	32-4
286#else
287.skip	32
288#endif
289
290.global	GFp_sha512_block_data_order
291.type	GFp_sha512_block_data_order,%function
292GFp_sha512_block_data_order:
293.Lsha512_block_data_order:
294#if __ARM_ARCH__<7 && !defined(__thumb2__)
295	sub	r3,pc,#8		@ GFp_sha512_block_data_order
296#else
297	adr	r3,.Lsha512_block_data_order
298#endif
299#if __ARM_MAX_ARCH__>=7 && !defined(__KERNEL__)
300	ldr	r12,.LOPENSSL_armcap
301	ldr	r12,[r3,r12]		@ GFp_armcap_P
302#ifdef	__APPLE__
303	ldr	r12,[r12]
304#endif
305	tst	r12,#ARMV7_NEON
306	bne	.LNEON
307#endif
308	add	$len,$inp,$len,lsl#7	@ len to point at the end of inp
309	stmdb	sp!,{r4-r12,lr}
310	sub	$Ktbl,r3,#672		@ K512
311	sub	sp,sp,#9*8
312
313	ldr	$Elo,[$ctx,#$Eoff+$lo]
314	ldr	$Ehi,[$ctx,#$Eoff+$hi]
315	ldr	$t0, [$ctx,#$Goff+$lo]
316	ldr	$t1, [$ctx,#$Goff+$hi]
317	ldr	$t2, [$ctx,#$Hoff+$lo]
318	ldr	$t3, [$ctx,#$Hoff+$hi]
319.Loop:
320	str	$t0, [sp,#$Goff+0]
321	str	$t1, [sp,#$Goff+4]
322	str	$t2, [sp,#$Hoff+0]
323	str	$t3, [sp,#$Hoff+4]
324	ldr	$Alo,[$ctx,#$Aoff+$lo]
325	ldr	$Ahi,[$ctx,#$Aoff+$hi]
326	ldr	$Tlo,[$ctx,#$Boff+$lo]
327	ldr	$Thi,[$ctx,#$Boff+$hi]
328	ldr	$t0, [$ctx,#$Coff+$lo]
329	ldr	$t1, [$ctx,#$Coff+$hi]
330	ldr	$t2, [$ctx,#$Doff+$lo]
331	ldr	$t3, [$ctx,#$Doff+$hi]
332	str	$Tlo,[sp,#$Boff+0]
333	str	$Thi,[sp,#$Boff+4]
334	str	$t0, [sp,#$Coff+0]
335	str	$t1, [sp,#$Coff+4]
336	str	$t2, [sp,#$Doff+0]
337	str	$t3, [sp,#$Doff+4]
338	ldr	$Tlo,[$ctx,#$Foff+$lo]
339	ldr	$Thi,[$ctx,#$Foff+$hi]
340	str	$Tlo,[sp,#$Foff+0]
341	str	$Thi,[sp,#$Foff+4]
342
343.L00_15:
344#if __ARM_ARCH__<7
345	ldrb	$Tlo,[$inp,#7]
346	ldrb	$t0, [$inp,#6]
347	ldrb	$t1, [$inp,#5]
348	ldrb	$t2, [$inp,#4]
349	ldrb	$Thi,[$inp,#3]
350	ldrb	$t3, [$inp,#2]
351	orr	$Tlo,$Tlo,$t0,lsl#8
352	ldrb	$t0, [$inp,#1]
353	orr	$Tlo,$Tlo,$t1,lsl#16
354	ldrb	$t1, [$inp],#8
355	orr	$Tlo,$Tlo,$t2,lsl#24
356	orr	$Thi,$Thi,$t3,lsl#8
357	orr	$Thi,$Thi,$t0,lsl#16
358	orr	$Thi,$Thi,$t1,lsl#24
359#else
360	ldr	$Tlo,[$inp,#4]
361	ldr	$Thi,[$inp],#8
362#ifdef __ARMEL__
363	rev	$Tlo,$Tlo
364	rev	$Thi,$Thi
365#endif
366#endif
367___
368	&BODY_00_15(0x94);
369$code.=<<___;
370	tst	$Ktbl,#1
371	beq	.L00_15
372	ldr	$t0,[sp,#`$Xoff+8*(16-1)`+0]
373	ldr	$t1,[sp,#`$Xoff+8*(16-1)`+4]
374	bic	$Ktbl,$Ktbl,#1
375.L16_79:
376	@ sigma0(x)	(ROTR((x),1)  ^ ROTR((x),8)  ^ ((x)>>7))
377	@ LO		lo>>1^hi<<31  ^ lo>>8^hi<<24 ^ lo>>7^hi<<25
378	@ HI		hi>>1^lo<<31  ^ hi>>8^lo<<24 ^ hi>>7
379	mov	$Tlo,$t0,lsr#1
380	ldr	$t2,[sp,#`$Xoff+8*(16-14)`+0]
381	mov	$Thi,$t1,lsr#1
382	ldr	$t3,[sp,#`$Xoff+8*(16-14)`+4]
383	eor	$Tlo,$Tlo,$t1,lsl#31
384	eor	$Thi,$Thi,$t0,lsl#31
385	eor	$Tlo,$Tlo,$t0,lsr#8
386	eor	$Thi,$Thi,$t1,lsr#8
387	eor	$Tlo,$Tlo,$t1,lsl#24
388	eor	$Thi,$Thi,$t0,lsl#24
389	eor	$Tlo,$Tlo,$t0,lsr#7
390	eor	$Thi,$Thi,$t1,lsr#7
391	eor	$Tlo,$Tlo,$t1,lsl#25
392
393	@ sigma1(x)	(ROTR((x),19) ^ ROTR((x),61) ^ ((x)>>6))
394	@ LO		lo>>19^hi<<13 ^ hi>>29^lo<<3 ^ lo>>6^hi<<26
395	@ HI		hi>>19^lo<<13 ^ lo>>29^hi<<3 ^ hi>>6
396	mov	$t0,$t2,lsr#19
397	mov	$t1,$t3,lsr#19
398	eor	$t0,$t0,$t3,lsl#13
399	eor	$t1,$t1,$t2,lsl#13
400	eor	$t0,$t0,$t3,lsr#29
401	eor	$t1,$t1,$t2,lsr#29
402	eor	$t0,$t0,$t2,lsl#3
403	eor	$t1,$t1,$t3,lsl#3
404	eor	$t0,$t0,$t2,lsr#6
405	eor	$t1,$t1,$t3,lsr#6
406	ldr	$t2,[sp,#`$Xoff+8*(16-9)`+0]
407	eor	$t0,$t0,$t3,lsl#26
408
409	ldr	$t3,[sp,#`$Xoff+8*(16-9)`+4]
410	adds	$Tlo,$Tlo,$t0
411	ldr	$t0,[sp,#`$Xoff+8*16`+0]
412	adc	$Thi,$Thi,$t1
413
414	ldr	$t1,[sp,#`$Xoff+8*16`+4]
415	adds	$Tlo,$Tlo,$t2
416	adc	$Thi,$Thi,$t3
417	adds	$Tlo,$Tlo,$t0
418	adc	$Thi,$Thi,$t1
419___
420	&BODY_00_15(0x17);
421$code.=<<___;
422#if __ARM_ARCH__>=7
423	ittt	eq			@ Thumb2 thing, sanity check in ARM
424#endif
425	ldreq	$t0,[sp,#`$Xoff+8*(16-1)`+0]
426	ldreq	$t1,[sp,#`$Xoff+8*(16-1)`+4]
427	beq	.L16_79
428	bic	$Ktbl,$Ktbl,#1
429
430	ldr	$Tlo,[sp,#$Boff+0]
431	ldr	$Thi,[sp,#$Boff+4]
432	ldr	$t0, [$ctx,#$Aoff+$lo]
433	ldr	$t1, [$ctx,#$Aoff+$hi]
434	ldr	$t2, [$ctx,#$Boff+$lo]
435	ldr	$t3, [$ctx,#$Boff+$hi]
436	adds	$t0,$Alo,$t0
437	str	$t0, [$ctx,#$Aoff+$lo]
438	adc	$t1,$Ahi,$t1
439	str	$t1, [$ctx,#$Aoff+$hi]
440	adds	$t2,$Tlo,$t2
441	str	$t2, [$ctx,#$Boff+$lo]
442	adc	$t3,$Thi,$t3
443	str	$t3, [$ctx,#$Boff+$hi]
444
445	ldr	$Alo,[sp,#$Coff+0]
446	ldr	$Ahi,[sp,#$Coff+4]
447	ldr	$Tlo,[sp,#$Doff+0]
448	ldr	$Thi,[sp,#$Doff+4]
449	ldr	$t0, [$ctx,#$Coff+$lo]
450	ldr	$t1, [$ctx,#$Coff+$hi]
451	ldr	$t2, [$ctx,#$Doff+$lo]
452	ldr	$t3, [$ctx,#$Doff+$hi]
453	adds	$t0,$Alo,$t0
454	str	$t0, [$ctx,#$Coff+$lo]
455	adc	$t1,$Ahi,$t1
456	str	$t1, [$ctx,#$Coff+$hi]
457	adds	$t2,$Tlo,$t2
458	str	$t2, [$ctx,#$Doff+$lo]
459	adc	$t3,$Thi,$t3
460	str	$t3, [$ctx,#$Doff+$hi]
461
462	ldr	$Tlo,[sp,#$Foff+0]
463	ldr	$Thi,[sp,#$Foff+4]
464	ldr	$t0, [$ctx,#$Eoff+$lo]
465	ldr	$t1, [$ctx,#$Eoff+$hi]
466	ldr	$t2, [$ctx,#$Foff+$lo]
467	ldr	$t3, [$ctx,#$Foff+$hi]
468	adds	$Elo,$Elo,$t0
469	str	$Elo,[$ctx,#$Eoff+$lo]
470	adc	$Ehi,$Ehi,$t1
471	str	$Ehi,[$ctx,#$Eoff+$hi]
472	adds	$t2,$Tlo,$t2
473	str	$t2, [$ctx,#$Foff+$lo]
474	adc	$t3,$Thi,$t3
475	str	$t3, [$ctx,#$Foff+$hi]
476
477	ldr	$Alo,[sp,#$Goff+0]
478	ldr	$Ahi,[sp,#$Goff+4]
479	ldr	$Tlo,[sp,#$Hoff+0]
480	ldr	$Thi,[sp,#$Hoff+4]
481	ldr	$t0, [$ctx,#$Goff+$lo]
482	ldr	$t1, [$ctx,#$Goff+$hi]
483	ldr	$t2, [$ctx,#$Hoff+$lo]
484	ldr	$t3, [$ctx,#$Hoff+$hi]
485	adds	$t0,$Alo,$t0
486	str	$t0, [$ctx,#$Goff+$lo]
487	adc	$t1,$Ahi,$t1
488	str	$t1, [$ctx,#$Goff+$hi]
489	adds	$t2,$Tlo,$t2
490	str	$t2, [$ctx,#$Hoff+$lo]
491	adc	$t3,$Thi,$t3
492	str	$t3, [$ctx,#$Hoff+$hi]
493
494	add	sp,sp,#640
495	sub	$Ktbl,$Ktbl,#640
496
497	teq	$inp,$len
498	bne	.Loop
499
500	add	sp,sp,#8*9		@ destroy frame
501#if __ARM_ARCH__>=5
502	ldmia	sp!,{r4-r12,pc}
503#else
504	ldmia	sp!,{r4-r12,lr}
505	tst	lr,#1
506	moveq	pc,lr			@ be binary compatible with V4, yet
507	bx	lr			@ interoperable with Thumb ISA:-)
508#endif
509.size	GFp_sha512_block_data_order,.-GFp_sha512_block_data_order
510___
511
512{
513my @Sigma0=(28,34,39);
514my @Sigma1=(14,18,41);
515my @sigma0=(1, 8, 7);
516my @sigma1=(19,61,6);
517
518my $Ktbl="r3";
519my $cnt="r12";	# volatile register known as ip, intra-procedure-call scratch
520
521my @X=map("d$_",(0..15));
522my @V=($A,$B,$C,$D,$E,$F,$G,$H)=map("d$_",(16..23));
523
524sub NEON_00_15() {
525my $i=shift;
526my ($a,$b,$c,$d,$e,$f,$g,$h)=@_;
527my ($t0,$t1,$t2,$T1,$K,$Ch,$Maj)=map("d$_",(24..31));	# temps
528
529$code.=<<___ if ($i<16 || $i&1);
530	vshr.u64	$t0,$e,#@Sigma1[0]	@ $i
531#if $i<16
532	vld1.64		{@X[$i%16]},[$inp]!	@ handles unaligned
533#endif
534	vshr.u64	$t1,$e,#@Sigma1[1]
535#if $i>0
536	 vadd.i64	$a,$Maj			@ h+=Maj from the past
537#endif
538	vshr.u64	$t2,$e,#@Sigma1[2]
539___
540$code.=<<___;
541	vld1.64		{$K},[$Ktbl,:64]!	@ K[i++]
542	vsli.64		$t0,$e,#`64-@Sigma1[0]`
543	vsli.64		$t1,$e,#`64-@Sigma1[1]`
544	vmov		$Ch,$e
545	vsli.64		$t2,$e,#`64-@Sigma1[2]`
546#if $i<16 && defined(__ARMEL__)
547	vrev64.8	@X[$i],@X[$i]
548#endif
549	veor		$t1,$t0
550	vbsl		$Ch,$f,$g		@ Ch(e,f,g)
551	vshr.u64	$t0,$a,#@Sigma0[0]
552	veor		$t2,$t1			@ Sigma1(e)
553	vadd.i64	$T1,$Ch,$h
554	vshr.u64	$t1,$a,#@Sigma0[1]
555	vsli.64		$t0,$a,#`64-@Sigma0[0]`
556	vadd.i64	$T1,$t2
557	vshr.u64	$t2,$a,#@Sigma0[2]
558	vadd.i64	$K,@X[$i%16]
559	vsli.64		$t1,$a,#`64-@Sigma0[1]`
560	veor		$Maj,$a,$b
561	vsli.64		$t2,$a,#`64-@Sigma0[2]`
562	veor		$h,$t0,$t1
563	vadd.i64	$T1,$K
564	vbsl		$Maj,$c,$b		@ Maj(a,b,c)
565	veor		$h,$t2			@ Sigma0(a)
566	vadd.i64	$d,$T1
567	vadd.i64	$Maj,$T1
568	@ vadd.i64	$h,$Maj
569___
570}
571
572sub NEON_16_79() {
573my $i=shift;
574
575if ($i&1)	{ &NEON_00_15($i,@_); return; }
576
577# 2x-vectorized, therefore runs every 2nd round
578my @X=map("q$_",(0..7));			# view @X as 128-bit vector
579my ($t0,$t1,$s0,$s1) = map("q$_",(12..15));	# temps
580my ($d0,$d1,$d2) = map("d$_",(24..26));		# temps from NEON_00_15
581my $e=@_[4];					# $e from NEON_00_15
582$i /= 2;
583$code.=<<___;
584	vshr.u64	$t0,@X[($i+7)%8],#@sigma1[0]
585	vshr.u64	$t1,@X[($i+7)%8],#@sigma1[1]
586	 vadd.i64	@_[0],d30			@ h+=Maj from the past
587	vshr.u64	$s1,@X[($i+7)%8],#@sigma1[2]
588	vsli.64		$t0,@X[($i+7)%8],#`64-@sigma1[0]`
589	vext.8		$s0,@X[$i%8],@X[($i+1)%8],#8	@ X[i+1]
590	vsli.64		$t1,@X[($i+7)%8],#`64-@sigma1[1]`
591	veor		$s1,$t0
592	vshr.u64	$t0,$s0,#@sigma0[0]
593	veor		$s1,$t1				@ sigma1(X[i+14])
594	vshr.u64	$t1,$s0,#@sigma0[1]
595	vadd.i64	@X[$i%8],$s1
596	vshr.u64	$s1,$s0,#@sigma0[2]
597	vsli.64		$t0,$s0,#`64-@sigma0[0]`
598	vsli.64		$t1,$s0,#`64-@sigma0[1]`
599	vext.8		$s0,@X[($i+4)%8],@X[($i+5)%8],#8	@ X[i+9]
600	veor		$s1,$t0
601	vshr.u64	$d0,$e,#@Sigma1[0]		@ from NEON_00_15
602	vadd.i64	@X[$i%8],$s0
603	vshr.u64	$d1,$e,#@Sigma1[1]		@ from NEON_00_15
604	veor		$s1,$t1				@ sigma0(X[i+1])
605	vshr.u64	$d2,$e,#@Sigma1[2]		@ from NEON_00_15
606	vadd.i64	@X[$i%8],$s1
607___
608	&NEON_00_15(2*$i,@_);
609}
610
611$code.=<<___;
612#if __ARM_MAX_ARCH__>=7
613.arch	armv7-a
614.fpu	neon
615
616.type	sha512_block_data_order_neon,%function
617.align	4
618sha512_block_data_order_neon:
619.LNEON:
620	dmb				@ errata #451034 on early Cortex A8
621	add	$len,$inp,$len,lsl#7	@ len to point at the end of inp
622	adr	$Ktbl,K512
623	VFP_ABI_PUSH
624	vldmia	$ctx,{$A-$H}		@ load context
625.Loop_neon:
626___
627for($i=0;$i<16;$i++)	{ &NEON_00_15($i,@V); unshift(@V,pop(@V)); }
628$code.=<<___;
629	mov		$cnt,#4
630.L16_79_neon:
631	subs		$cnt,#1
632___
633for(;$i<32;$i++)	{ &NEON_16_79($i,@V); unshift(@V,pop(@V)); }
634$code.=<<___;
635	bne		.L16_79_neon
636
637	 vadd.i64	$A,d30		@ h+=Maj from the past
638	vldmia		$ctx,{d24-d31}	@ load context to temp
639	vadd.i64	q8,q12		@ vectorized accumulate
640	vadd.i64	q9,q13
641	vadd.i64	q10,q14
642	vadd.i64	q11,q15
643	vstmia		$ctx,{$A-$H}	@ save context
644	teq		$inp,$len
645	sub		$Ktbl,#640	@ rewind K512
646	bne		.Loop_neon
647
648	VFP_ABI_POP
649	ret				@ bx lr
650.size	sha512_block_data_order_neon,.-sha512_block_data_order_neon
651#endif
652___
653}
654$code.=<<___;
655.asciz	"SHA512 block transform for ARMv4/NEON, CRYPTOGAMS by <appro\@openssl.org>"
656___
657
658$code =~ s/\`([^\`]*)\`/eval $1/gem;
659$code =~ s/\bbx\s+lr\b/.word\t0xe12fff1e/gm;	# make it possible to compile with -march=armv4
660$code =~ s/\bret\b/bx	lr/gm;
661
662open SELF,$0;
663while(<SELF>) {
664	next if (/^#!/);
665	last if (!s/^#/@/ and !/^$/);
666	print;
667}
668close SELF;
669
670print $code;
671close STDOUT or die "error closing STDOUT"; # enforce flush
672