1#! /usr/bin/env perl
2# Copyright 2016-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the Apache License 2.0 (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# June 2015
18#
19# ChaCha20 for ARMv8.
20#
21# April 2019
22#
23# Replace 3xNEON+1xIALU code path with 4+1. 4+1 is actually fastest
24# option on most(*), but not all, processors, yet 6+2 is retained.
25# This is because penalties are considered tolerable in comparison to
26# improvement on processors where 6+2 helps. Most notably +37% on
27# ThunderX2. It's server-oriented processor which will have to serve
28# as many requests as possible. While others are mostly clients, when
29# performance doesn't have to be absolute top-notch, just fast enough,
30# as majority of time is spent "entertaining" relatively slow human.
31#
32# Performance in cycles per byte out of large buffer.
33#
34#			IALU/gcc-4.9	4xNEON+1xIALU	6xNEON+2xIALU
35#
36# Apple A7		5.50/+49%	2.72		1.60
37# Cortex-A53		8.40/+80%	4.06		4.45(*)
38# Cortex-A57		8.06/+43%	4.15		4.40(*)
39# Denver		4.50/+82%	2.30		2.70(*)
40# X-Gene		9.50/+46%	8.20		8.90(*)
41# Mongoose		8.00/+44%	2.74		3.12(*)
42# Kryo			8.17/+50%	4.47		4.65(*)
43# ThunderX2		7.22/+48%	5.64		4.10
44#
45# (*)	slower than 4+1:-(
46
47# $output is the last argument if it looks like a file (it has an extension)
48# $flavour is the first argument if it doesn't look like a file
49$output = $#ARGV >= 0 && $ARGV[$#ARGV] =~ m|\.\w+$| ? pop : undef;
50$flavour = $#ARGV >= 0 && $ARGV[0] !~ m|\.| ? shift : undef;
51
52$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
53( $xlate="${dir}arm-xlate.pl" and -f $xlate ) or
54( $xlate="${dir}../../perlasm/arm-xlate.pl" and -f $xlate) or
55die "can't locate arm-xlate.pl";
56
57open OUT,"| \"$^X\" $xlate $flavour \"$output\""
58    or die "can't call $xlate: $!";
59*STDOUT=*OUT;
60
61sub AUTOLOAD()		# thunk [simplified] x86-style perlasm
62{ my $opcode = $AUTOLOAD; $opcode =~ s/.*:://; $opcode =~ s/_/\./;
63  my $arg = pop;
64    $arg = "#$arg" if ($arg*1 eq $arg);
65    $code .= "\t$opcode\t".join(',',@_,$arg)."\n";
66}
67
68my ($out,$inp,$len,$key,$ctr) = map("x$_",(0..4));
69
70my @x=map("x$_",(5..17,19..21));
71my @d=map("x$_",(22..28,30));
72
73sub ROUND {
74my ($a0,$b0,$c0,$d0)=@_;
75my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
76my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
77my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
78
79    (
80	"&add_32	(@x[$a0],@x[$a0],@x[$b0])",
81	 "&add_32	(@x[$a1],@x[$a1],@x[$b1])",
82	  "&add_32	(@x[$a2],@x[$a2],@x[$b2])",
83	   "&add_32	(@x[$a3],@x[$a3],@x[$b3])",
84	"&eor_32	(@x[$d0],@x[$d0],@x[$a0])",
85	 "&eor_32	(@x[$d1],@x[$d1],@x[$a1])",
86	  "&eor_32	(@x[$d2],@x[$d2],@x[$a2])",
87	   "&eor_32	(@x[$d3],@x[$d3],@x[$a3])",
88	"&ror_32	(@x[$d0],@x[$d0],16)",
89	 "&ror_32	(@x[$d1],@x[$d1],16)",
90	  "&ror_32	(@x[$d2],@x[$d2],16)",
91	   "&ror_32	(@x[$d3],@x[$d3],16)",
92
93	"&add_32	(@x[$c0],@x[$c0],@x[$d0])",
94	 "&add_32	(@x[$c1],@x[$c1],@x[$d1])",
95	  "&add_32	(@x[$c2],@x[$c2],@x[$d2])",
96	   "&add_32	(@x[$c3],@x[$c3],@x[$d3])",
97	"&eor_32	(@x[$b0],@x[$b0],@x[$c0])",
98	 "&eor_32	(@x[$b1],@x[$b1],@x[$c1])",
99	  "&eor_32	(@x[$b2],@x[$b2],@x[$c2])",
100	   "&eor_32	(@x[$b3],@x[$b3],@x[$c3])",
101	"&ror_32	(@x[$b0],@x[$b0],20)",
102	 "&ror_32	(@x[$b1],@x[$b1],20)",
103	  "&ror_32	(@x[$b2],@x[$b2],20)",
104	   "&ror_32	(@x[$b3],@x[$b3],20)",
105
106	"&add_32	(@x[$a0],@x[$a0],@x[$b0])",
107	 "&add_32	(@x[$a1],@x[$a1],@x[$b1])",
108	  "&add_32	(@x[$a2],@x[$a2],@x[$b2])",
109	   "&add_32	(@x[$a3],@x[$a3],@x[$b3])",
110	"&eor_32	(@x[$d0],@x[$d0],@x[$a0])",
111	 "&eor_32	(@x[$d1],@x[$d1],@x[$a1])",
112	  "&eor_32	(@x[$d2],@x[$d2],@x[$a2])",
113	   "&eor_32	(@x[$d3],@x[$d3],@x[$a3])",
114	"&ror_32	(@x[$d0],@x[$d0],24)",
115	 "&ror_32	(@x[$d1],@x[$d1],24)",
116	  "&ror_32	(@x[$d2],@x[$d2],24)",
117	   "&ror_32	(@x[$d3],@x[$d3],24)",
118
119	"&add_32	(@x[$c0],@x[$c0],@x[$d0])",
120	 "&add_32	(@x[$c1],@x[$c1],@x[$d1])",
121	  "&add_32	(@x[$c2],@x[$c2],@x[$d2])",
122	   "&add_32	(@x[$c3],@x[$c3],@x[$d3])",
123	"&eor_32	(@x[$b0],@x[$b0],@x[$c0])",
124	 "&eor_32	(@x[$b1],@x[$b1],@x[$c1])",
125	  "&eor_32	(@x[$b2],@x[$b2],@x[$c2])",
126	   "&eor_32	(@x[$b3],@x[$b3],@x[$c3])",
127	"&ror_32	(@x[$b0],@x[$b0],25)",
128	 "&ror_32	(@x[$b1],@x[$b1],25)",
129	  "&ror_32	(@x[$b2],@x[$b2],25)",
130	   "&ror_32	(@x[$b3],@x[$b3],25)"
131    );
132}
133
134$code.=<<___;
135#include "arm_arch.h"
136#ifndef	__KERNEL__
137.extern	OPENSSL_armcap_P
138.hidden	OPENSSL_armcap_P
139#endif
140
141.text
142
143.align	5
144.Lsigma:
145.quad	0x3320646e61707865,0x6b20657479622d32		// endian-neutral
146.Lone:
147.long	1,2,3,4
148.Lrot24:
149.long	0x02010003,0x06050407,0x0a09080b,0x0e0d0c0f
150.asciz	"ChaCha20 for ARMv8, CRYPTOGAMS by \@dot-asm"
151
152.globl	ChaCha20_ctr32
153.type	ChaCha20_ctr32,%function
154.align	5
155ChaCha20_ctr32:
156	AARCH64_SIGN_LINK_REGISTER
157	cbz	$len,.Labort
158	cmp	$len,#192
159	b.lo	.Lshort
160
161#ifndef	__KERNEL__
162	adrp	x17,OPENSSL_armcap_P
163	ldr	w17,[x17,#:lo12:OPENSSL_armcap_P]
164	tst	w17,#ARMV7_NEON
165	b.ne	.LChaCha20_neon
166#endif
167
168.Lshort:
169	stp	x29,x30,[sp,#-96]!
170	add	x29,sp,#0
171
172	adr	@x[0],.Lsigma
173	stp	x19,x20,[sp,#16]
174	stp	x21,x22,[sp,#32]
175	stp	x23,x24,[sp,#48]
176	stp	x25,x26,[sp,#64]
177	stp	x27,x28,[sp,#80]
178	sub	sp,sp,#64
179
180	ldp	@d[0],@d[1],[@x[0]]		// load sigma
181	ldp	@d[2],@d[3],[$key]		// load key
182	ldp	@d[4],@d[5],[$key,#16]
183	ldp	@d[6],@d[7],[$ctr]		// load counter
184#ifdef	__AARCH64EB__
185	ror	@d[2],@d[2],#32
186	ror	@d[3],@d[3],#32
187	ror	@d[4],@d[4],#32
188	ror	@d[5],@d[5],#32
189	ror	@d[6],@d[6],#32
190	ror	@d[7],@d[7],#32
191#endif
192
193.Loop_outer:
194	mov.32	@x[0],@d[0]			// unpack key block
195	lsr	@x[1],@d[0],#32
196	mov.32	@x[2],@d[1]
197	lsr	@x[3],@d[1],#32
198	mov.32	@x[4],@d[2]
199	lsr	@x[5],@d[2],#32
200	mov.32	@x[6],@d[3]
201	lsr	@x[7],@d[3],#32
202	mov.32	@x[8],@d[4]
203	lsr	@x[9],@d[4],#32
204	mov.32	@x[10],@d[5]
205	lsr	@x[11],@d[5],#32
206	mov.32	@x[12],@d[6]
207	lsr	@x[13],@d[6],#32
208	mov.32	@x[14],@d[7]
209	lsr	@x[15],@d[7],#32
210
211	mov	$ctr,#10
212	subs	$len,$len,#64
213.Loop:
214	sub	$ctr,$ctr,#1
215___
216	foreach (&ROUND(0, 4, 8,12)) { eval; }
217	foreach (&ROUND(0, 5,10,15)) { eval; }
218$code.=<<___;
219	cbnz	$ctr,.Loop
220
221	add.32	@x[0],@x[0],@d[0]		// accumulate key block
222	add	@x[1],@x[1],@d[0],lsr#32
223	add.32	@x[2],@x[2],@d[1]
224	add	@x[3],@x[3],@d[1],lsr#32
225	add.32	@x[4],@x[4],@d[2]
226	add	@x[5],@x[5],@d[2],lsr#32
227	add.32	@x[6],@x[6],@d[3]
228	add	@x[7],@x[7],@d[3],lsr#32
229	add.32	@x[8],@x[8],@d[4]
230	add	@x[9],@x[9],@d[4],lsr#32
231	add.32	@x[10],@x[10],@d[5]
232	add	@x[11],@x[11],@d[5],lsr#32
233	add.32	@x[12],@x[12],@d[6]
234	add	@x[13],@x[13],@d[6],lsr#32
235	add.32	@x[14],@x[14],@d[7]
236	add	@x[15],@x[15],@d[7],lsr#32
237
238	b.lo	.Ltail
239
240	add	@x[0],@x[0],@x[1],lsl#32	// pack
241	add	@x[2],@x[2],@x[3],lsl#32
242	ldp	@x[1],@x[3],[$inp,#0]		// load input
243	add	@x[4],@x[4],@x[5],lsl#32
244	add	@x[6],@x[6],@x[7],lsl#32
245	ldp	@x[5],@x[7],[$inp,#16]
246	add	@x[8],@x[8],@x[9],lsl#32
247	add	@x[10],@x[10],@x[11],lsl#32
248	ldp	@x[9],@x[11],[$inp,#32]
249	add	@x[12],@x[12],@x[13],lsl#32
250	add	@x[14],@x[14],@x[15],lsl#32
251	ldp	@x[13],@x[15],[$inp,#48]
252	add	$inp,$inp,#64
253#ifdef	__AARCH64EB__
254	rev	@x[0],@x[0]
255	rev	@x[2],@x[2]
256	rev	@x[4],@x[4]
257	rev	@x[6],@x[6]
258	rev	@x[8],@x[8]
259	rev	@x[10],@x[10]
260	rev	@x[12],@x[12]
261	rev	@x[14],@x[14]
262#endif
263	eor	@x[0],@x[0],@x[1]
264	eor	@x[2],@x[2],@x[3]
265	eor	@x[4],@x[4],@x[5]
266	eor	@x[6],@x[6],@x[7]
267	eor	@x[8],@x[8],@x[9]
268	eor	@x[10],@x[10],@x[11]
269	eor	@x[12],@x[12],@x[13]
270	eor	@x[14],@x[14],@x[15]
271
272	stp	@x[0],@x[2],[$out,#0]		// store output
273	 add	@d[6],@d[6],#1			// increment counter
274	stp	@x[4],@x[6],[$out,#16]
275	stp	@x[8],@x[10],[$out,#32]
276	stp	@x[12],@x[14],[$out,#48]
277	add	$out,$out,#64
278
279	b.hi	.Loop_outer
280
281	ldp	x19,x20,[x29,#16]
282	add	sp,sp,#64
283	ldp	x21,x22,[x29,#32]
284	ldp	x23,x24,[x29,#48]
285	ldp	x25,x26,[x29,#64]
286	ldp	x27,x28,[x29,#80]
287	ldp	x29,x30,[sp],#96
288.Labort:
289	AARCH64_VALIDATE_LINK_REGISTER
290	ret
291
292.align	4
293.Ltail:
294	add	$len,$len,#64
295.Less_than_64:
296	sub	$out,$out,#1
297	add	$inp,$inp,$len
298	add	$out,$out,$len
299	add	$ctr,sp,$len
300	neg	$len,$len
301
302	add	@x[0],@x[0],@x[1],lsl#32	// pack
303	add	@x[2],@x[2],@x[3],lsl#32
304	add	@x[4],@x[4],@x[5],lsl#32
305	add	@x[6],@x[6],@x[7],lsl#32
306	add	@x[8],@x[8],@x[9],lsl#32
307	add	@x[10],@x[10],@x[11],lsl#32
308	add	@x[12],@x[12],@x[13],lsl#32
309	add	@x[14],@x[14],@x[15],lsl#32
310#ifdef	__AARCH64EB__
311	rev	@x[0],@x[0]
312	rev	@x[2],@x[2]
313	rev	@x[4],@x[4]
314	rev	@x[6],@x[6]
315	rev	@x[8],@x[8]
316	rev	@x[10],@x[10]
317	rev	@x[12],@x[12]
318	rev	@x[14],@x[14]
319#endif
320	stp	@x[0],@x[2],[sp,#0]
321	stp	@x[4],@x[6],[sp,#16]
322	stp	@x[8],@x[10],[sp,#32]
323	stp	@x[12],@x[14],[sp,#48]
324
325.Loop_tail:
326	ldrb	w10,[$inp,$len]
327	ldrb	w11,[$ctr,$len]
328	add	$len,$len,#1
329	eor	w10,w10,w11
330	strb	w10,[$out,$len]
331	cbnz	$len,.Loop_tail
332
333	stp	xzr,xzr,[sp,#0]
334	stp	xzr,xzr,[sp,#16]
335	stp	xzr,xzr,[sp,#32]
336	stp	xzr,xzr,[sp,#48]
337
338	ldp	x19,x20,[x29,#16]
339	add	sp,sp,#64
340	ldp	x21,x22,[x29,#32]
341	ldp	x23,x24,[x29,#48]
342	ldp	x25,x26,[x29,#64]
343	ldp	x27,x28,[x29,#80]
344	ldp	x29,x30,[sp],#96
345	AARCH64_VALIDATE_LINK_REGISTER
346	ret
347.size	ChaCha20_ctr32,.-ChaCha20_ctr32
348___
349
350{{{
351my @K = map("v$_.4s",(0..3));
352my ($xt0,$xt1,$xt2,$xt3, $CTR,$ROT24) = map("v$_.4s",(4..9));
353my @X = map("v$_.4s",(16,20,24,28, 17,21,25,29, 18,22,26,30, 19,23,27,31));
354my ($xa0,$xa1,$xa2,$xa3, $xb0,$xb1,$xb2,$xb3,
355    $xc0,$xc1,$xc2,$xc3, $xd0,$xd1,$xd2,$xd3) = @X;
356
357sub NEON_lane_ROUND {
358my ($a0,$b0,$c0,$d0)=@_;
359my ($a1,$b1,$c1,$d1)=map(($_&~3)+(($_+1)&3),($a0,$b0,$c0,$d0));
360my ($a2,$b2,$c2,$d2)=map(($_&~3)+(($_+1)&3),($a1,$b1,$c1,$d1));
361my ($a3,$b3,$c3,$d3)=map(($_&~3)+(($_+1)&3),($a2,$b2,$c2,$d2));
362my @x=map("'$_'",@X);
363
364	(
365	"&add		(@x[$a0],@x[$a0],@x[$b0])",	# Q1
366	 "&add		(@x[$a1],@x[$a1],@x[$b1])",	# Q2
367	  "&add		(@x[$a2],@x[$a2],@x[$b2])",	# Q3
368	   "&add	(@x[$a3],@x[$a3],@x[$b3])",	# Q4
369	"&eor		(@x[$d0],@x[$d0],@x[$a0])",
370	 "&eor		(@x[$d1],@x[$d1],@x[$a1])",
371	  "&eor		(@x[$d2],@x[$d2],@x[$a2])",
372	   "&eor	(@x[$d3],@x[$d3],@x[$a3])",
373	"&rev32_16	(@x[$d0],@x[$d0])",
374	 "&rev32_16	(@x[$d1],@x[$d1])",
375	  "&rev32_16	(@x[$d2],@x[$d2])",
376	   "&rev32_16	(@x[$d3],@x[$d3])",
377
378	"&add		(@x[$c0],@x[$c0],@x[$d0])",
379	 "&add		(@x[$c1],@x[$c1],@x[$d1])",
380	  "&add		(@x[$c2],@x[$c2],@x[$d2])",
381	   "&add	(@x[$c3],@x[$c3],@x[$d3])",
382	"&eor		('$xt0',@x[$b0],@x[$c0])",
383	 "&eor		('$xt1',@x[$b1],@x[$c1])",
384	  "&eor		('$xt2',@x[$b2],@x[$c2])",
385	   "&eor	('$xt3',@x[$b3],@x[$c3])",
386	"&ushr		(@x[$b0],'$xt0',20)",
387	 "&ushr		(@x[$b1],'$xt1',20)",
388	  "&ushr	(@x[$b2],'$xt2',20)",
389	   "&ushr	(@x[$b3],'$xt3',20)",
390	"&sli		(@x[$b0],'$xt0',12)",
391	 "&sli		(@x[$b1],'$xt1',12)",
392	  "&sli		(@x[$b2],'$xt2',12)",
393	   "&sli	(@x[$b3],'$xt3',12)",
394
395	"&add		(@x[$a0],@x[$a0],@x[$b0])",
396	 "&add		(@x[$a1],@x[$a1],@x[$b1])",
397	  "&add		(@x[$a2],@x[$a2],@x[$b2])",
398	   "&add	(@x[$a3],@x[$a3],@x[$b3])",
399	"&eor		('$xt0',@x[$d0],@x[$a0])",
400	 "&eor		('$xt1',@x[$d1],@x[$a1])",
401	  "&eor		('$xt2',@x[$d2],@x[$a2])",
402	   "&eor	('$xt3',@x[$d3],@x[$a3])",
403	"&tbl		(@x[$d0],'{$xt0}','$ROT24')",
404	 "&tbl		(@x[$d1],'{$xt1}','$ROT24')",
405	  "&tbl		(@x[$d2],'{$xt2}','$ROT24')",
406	   "&tbl	(@x[$d3],'{$xt3}','$ROT24')",
407
408	"&add		(@x[$c0],@x[$c0],@x[$d0])",
409	 "&add		(@x[$c1],@x[$c1],@x[$d1])",
410	  "&add		(@x[$c2],@x[$c2],@x[$d2])",
411	   "&add	(@x[$c3],@x[$c3],@x[$d3])",
412	"&eor		('$xt0',@x[$b0],@x[$c0])",
413	 "&eor		('$xt1',@x[$b1],@x[$c1])",
414	  "&eor		('$xt2',@x[$b2],@x[$c2])",
415	   "&eor	('$xt3',@x[$b3],@x[$c3])",
416	"&ushr		(@x[$b0],'$xt0',25)",
417	 "&ushr		(@x[$b1],'$xt1',25)",
418	  "&ushr	(@x[$b2],'$xt2',25)",
419	   "&ushr	(@x[$b3],'$xt3',25)",
420	"&sli		(@x[$b0],'$xt0',7)",
421	 "&sli		(@x[$b1],'$xt1',7)",
422	  "&sli		(@x[$b2],'$xt2',7)",
423	   "&sli	(@x[$b3],'$xt3',7)"
424	);
425}
426
427$code.=<<___;
428
429#ifdef	__KERNEL__
430.globl	ChaCha20_neon
431#endif
432.type	ChaCha20_neon,%function
433.align	5
434ChaCha20_neon:
435	AARCH64_SIGN_LINK_REGISTER
436.LChaCha20_neon:
437	stp	x29,x30,[sp,#-96]!
438	add	x29,sp,#0
439
440	adr	@x[0],.Lsigma
441	stp	x19,x20,[sp,#16]
442	stp	x21,x22,[sp,#32]
443	stp	x23,x24,[sp,#48]
444	stp	x25,x26,[sp,#64]
445	stp	x27,x28,[sp,#80]
446	cmp	$len,#512
447	b.hs	.L512_or_more_neon
448
449	sub	sp,sp,#64
450
451	ldp	@d[0],@d[1],[@x[0]]		// load sigma
452	ld1	{@K[0]},[@x[0]],#16
453	ldp	@d[2],@d[3],[$key]		// load key
454	ldp	@d[4],@d[5],[$key,#16]
455	ld1	{@K[1],@K[2]},[$key]
456	ldp	@d[6],@d[7],[$ctr]		// load counter
457	ld1	{@K[3]},[$ctr]
458	stp	d8,d9,[sp]			// meet ABI requirements
459	ld1	{$CTR,$ROT24},[@x[0]]
460#ifdef	__AARCH64EB__
461	rev64	@K[0],@K[0]
462	ror	@d[2],@d[2],#32
463	ror	@d[3],@d[3],#32
464	ror	@d[4],@d[4],#32
465	ror	@d[5],@d[5],#32
466	ror	@d[6],@d[6],#32
467	ror	@d[7],@d[7],#32
468#endif
469
470.Loop_outer_neon:
471	dup	$xa0,@{K[0]}[0]			// unpack key block
472	 mov.32	@x[0],@d[0]
473	dup	$xa1,@{K[0]}[1]
474	 lsr	@x[1],@d[0],#32
475	dup	$xa2,@{K[0]}[2]
476	 mov.32	@x[2],@d[1]
477	dup	$xa3,@{K[0]}[3]
478	 lsr	@x[3],@d[1],#32
479	dup	$xb0,@{K[1]}[0]
480	 mov.32	@x[4],@d[2]
481	dup	$xb1,@{K[1]}[1]
482	 lsr	@x[5],@d[2],#32
483	dup	$xb2,@{K[1]}[2]
484	 mov.32	@x[6],@d[3]
485	dup	$xb3,@{K[1]}[3]
486	 lsr	@x[7],@d[3],#32
487	dup	$xd0,@{K[3]}[0]
488	 mov.32	@x[8],@d[4]
489	dup	$xd1,@{K[3]}[1]
490	 lsr	@x[9],@d[4],#32
491	dup	$xd2,@{K[3]}[2]
492	 mov.32	@x[10],@d[5]
493	dup	$xd3,@{K[3]}[3]
494	 lsr	@x[11],@d[5],#32
495	add	$xd0,$xd0,$CTR
496	 mov.32	@x[12],@d[6]
497	dup	$xc0,@{K[2]}[0]
498	 lsr	@x[13],@d[6],#32
499	dup	$xc1,@{K[2]}[1]
500	 mov.32	@x[14],@d[7]
501	dup	$xc2,@{K[2]}[2]
502	 lsr	@x[15],@d[7],#32
503	dup	$xc3,@{K[2]}[3]
504
505	mov	$ctr,#10
506	subs	$len,$len,#320
507.Loop_neon:
508	sub	$ctr,$ctr,#1
509___
510	my @plus_one=&ROUND(0,4,8,12);
511	foreach (&NEON_lane_ROUND(0,4,8,12))  { eval; eval(shift(@plus_one)); }
512
513	@plus_one=&ROUND(0,5,10,15);
514	foreach (&NEON_lane_ROUND(0,5,10,15)) { eval; eval(shift(@plus_one)); }
515$code.=<<___;
516	cbnz	$ctr,.Loop_neon
517
518	add	$xd0,$xd0,$CTR
519
520	zip1	$xt0,$xa0,$xa1			// transpose data
521	zip1	$xt1,$xa2,$xa3
522	zip2	$xt2,$xa0,$xa1
523	zip2	$xt3,$xa2,$xa3
524	zip1.64	$xa0,$xt0,$xt1
525	zip2.64	$xa1,$xt0,$xt1
526	zip1.64	$xa2,$xt2,$xt3
527	zip2.64	$xa3,$xt2,$xt3
528
529	zip1	$xt0,$xb0,$xb1
530	zip1	$xt1,$xb2,$xb3
531	zip2	$xt2,$xb0,$xb1
532	zip2	$xt3,$xb2,$xb3
533	zip1.64	$xb0,$xt0,$xt1
534	zip2.64	$xb1,$xt0,$xt1
535	zip1.64	$xb2,$xt2,$xt3
536	zip2.64	$xb3,$xt2,$xt3
537
538	zip1	$xt0,$xc0,$xc1
539	 add.32	@x[0],@x[0],@d[0]		// accumulate key block
540	zip1	$xt1,$xc2,$xc3
541	 add	@x[1],@x[1],@d[0],lsr#32
542	zip2	$xt2,$xc0,$xc1
543	 add.32	@x[2],@x[2],@d[1]
544	zip2	$xt3,$xc2,$xc3
545	 add	@x[3],@x[3],@d[1],lsr#32
546	zip1.64	$xc0,$xt0,$xt1
547	 add.32	@x[4],@x[4],@d[2]
548	zip2.64	$xc1,$xt0,$xt1
549	 add	@x[5],@x[5],@d[2],lsr#32
550	zip1.64	$xc2,$xt2,$xt3
551	 add.32	@x[6],@x[6],@d[3]
552	zip2.64	$xc3,$xt2,$xt3
553	 add	@x[7],@x[7],@d[3],lsr#32
554
555	zip1	$xt0,$xd0,$xd1
556	 add.32	@x[8],@x[8],@d[4]
557	zip1	$xt1,$xd2,$xd3
558	 add	@x[9],@x[9],@d[4],lsr#32
559	zip2	$xt2,$xd0,$xd1
560	 add.32	@x[10],@x[10],@d[5]
561	zip2	$xt3,$xd2,$xd3
562	 add	@x[11],@x[11],@d[5],lsr#32
563	zip1.64	$xd0,$xt0,$xt1
564	 add.32	@x[12],@x[12],@d[6]
565	zip2.64	$xd1,$xt0,$xt1
566	 add	@x[13],@x[13],@d[6],lsr#32
567	zip1.64	$xd2,$xt2,$xt3
568	 add.32	@x[14],@x[14],@d[7]
569	zip2.64	$xd3,$xt2,$xt3
570	 add	@x[15],@x[15],@d[7],lsr#32
571
572	b.lo	.Ltail_neon
573
574	add	@x[0],@x[0],@x[1],lsl#32	// pack
575	add	@x[2],@x[2],@x[3],lsl#32
576	ldp	@x[1],@x[3],[$inp,#0]		// load input
577	 add	$xa0,$xa0,@K[0]			// accumulate key block
578	add	@x[4],@x[4],@x[5],lsl#32
579	add	@x[6],@x[6],@x[7],lsl#32
580	ldp	@x[5],@x[7],[$inp,#16]
581	 add	$xb0,$xb0,@K[1]
582	add	@x[8],@x[8],@x[9],lsl#32
583	add	@x[10],@x[10],@x[11],lsl#32
584	ldp	@x[9],@x[11],[$inp,#32]
585	 add	$xc0,$xc0,@K[2]
586	add	@x[12],@x[12],@x[13],lsl#32
587	add	@x[14],@x[14],@x[15],lsl#32
588	ldp	@x[13],@x[15],[$inp,#48]
589	 add	$xd0,$xd0,@K[3]
590	add	$inp,$inp,#64
591#ifdef	__AARCH64EB__
592	rev	@x[0],@x[0]
593	rev	@x[2],@x[2]
594	rev	@x[4],@x[4]
595	rev	@x[6],@x[6]
596	rev	@x[8],@x[8]
597	rev	@x[10],@x[10]
598	rev	@x[12],@x[12]
599	rev	@x[14],@x[14]
600#endif
601	ld1.8	{$xt0-$xt3},[$inp],#64
602	eor	@x[0],@x[0],@x[1]
603	 add	$xa1,$xa1,@K[0]
604	eor	@x[2],@x[2],@x[3]
605	 add	$xb1,$xb1,@K[1]
606	eor	@x[4],@x[4],@x[5]
607	 add	$xc1,$xc1,@K[2]
608	eor	@x[6],@x[6],@x[7]
609	 add	$xd1,$xd1,@K[3]
610	eor	@x[8],@x[8],@x[9]
611	 eor	$xa0,$xa0,$xt0
612	 movi	$xt0,#5
613	eor	@x[10],@x[10],@x[11]
614	 eor	$xb0,$xb0,$xt1
615	eor	@x[12],@x[12],@x[13]
616	 eor	$xc0,$xc0,$xt2
617	eor	@x[14],@x[14],@x[15]
618	 eor	$xd0,$xd0,$xt3
619	 add	$CTR,$CTR,$xt0			// += 5
620	 ld1.8	{$xt0-$xt3},[$inp],#64
621
622	stp	@x[0],@x[2],[$out,#0]		// store output
623	 add	@d[6],@d[6],#5			// increment counter
624	stp	@x[4],@x[6],[$out,#16]
625	stp	@x[8],@x[10],[$out,#32]
626	stp	@x[12],@x[14],[$out,#48]
627	add	$out,$out,#64
628
629	st1.8	{$xa0-$xd0},[$out],#64
630	 add	$xa2,$xa2,@K[0]
631	 add	$xb2,$xb2,@K[1]
632	 add	$xc2,$xc2,@K[2]
633	 add	$xd2,$xd2,@K[3]
634	ld1.8	{$xa0-$xd0},[$inp],#64
635
636	eor	$xa1,$xa1,$xt0
637	eor	$xb1,$xb1,$xt1
638	eor	$xc1,$xc1,$xt2
639	eor	$xd1,$xd1,$xt3
640	st1.8	{$xa1-$xd1},[$out],#64
641	 add	$xa3,$xa3,@K[0]
642	 add	$xb3,$xb3,@K[1]
643	 add	$xc3,$xc3,@K[2]
644	 add	$xd3,$xd3,@K[3]
645	ld1.8	{$xa1-$xd1},[$inp],#64
646
647	eor	$xa2,$xa2,$xa0
648	eor	$xb2,$xb2,$xb0
649	eor	$xc2,$xc2,$xc0
650	eor	$xd2,$xd2,$xd0
651	st1.8	{$xa2-$xd2},[$out],#64
652
653	eor	$xa3,$xa3,$xa1
654	eor	$xb3,$xb3,$xb1
655	eor	$xc3,$xc3,$xc1
656	eor	$xd3,$xd3,$xd1
657	st1.8	{$xa3-$xd3},[$out],#64
658
659	b.hi	.Loop_outer_neon
660
661	ldp	d8,d9,[sp]			// meet ABI requirements
662
663	ldp	x19,x20,[x29,#16]
664	add	sp,sp,#64
665	ldp	x21,x22,[x29,#32]
666	ldp	x23,x24,[x29,#48]
667	ldp	x25,x26,[x29,#64]
668	ldp	x27,x28,[x29,#80]
669	ldp	x29,x30,[sp],#96
670	AARCH64_VALIDATE_LINK_REGISTER
671	ret
672
673.align	4
674.Ltail_neon:
675	add	$len,$len,#320
676	ldp	d8,d9,[sp]			// meet ABI requirements
677	cmp	$len,#64
678	b.lo	.Less_than_64
679
680	add	@x[0],@x[0],@x[1],lsl#32	// pack
681	add	@x[2],@x[2],@x[3],lsl#32
682	ldp	@x[1],@x[3],[$inp,#0]		// load input
683	add	@x[4],@x[4],@x[5],lsl#32
684	add	@x[6],@x[6],@x[7],lsl#32
685	ldp	@x[5],@x[7],[$inp,#16]
686	add	@x[8],@x[8],@x[9],lsl#32
687	add	@x[10],@x[10],@x[11],lsl#32
688	ldp	@x[9],@x[11],[$inp,#32]
689	add	@x[12],@x[12],@x[13],lsl#32
690	add	@x[14],@x[14],@x[15],lsl#32
691	ldp	@x[13],@x[15],[$inp,#48]
692	add	$inp,$inp,#64
693#ifdef	__AARCH64EB__
694	rev	@x[0],@x[0]
695	rev	@x[2],@x[2]
696	rev	@x[4],@x[4]
697	rev	@x[6],@x[6]
698	rev	@x[8],@x[8]
699	rev	@x[10],@x[10]
700	rev	@x[12],@x[12]
701	rev	@x[14],@x[14]
702#endif
703	eor	@x[0],@x[0],@x[1]
704	eor	@x[2],@x[2],@x[3]
705	eor	@x[4],@x[4],@x[5]
706	eor	@x[6],@x[6],@x[7]
707	eor	@x[8],@x[8],@x[9]
708	eor	@x[10],@x[10],@x[11]
709	eor	@x[12],@x[12],@x[13]
710	eor	@x[14],@x[14],@x[15]
711
712	stp	@x[0],@x[2],[$out,#0]		// store output
713	 add	$xa0,$xa0,@K[0]			// accumulate key block
714	stp	@x[4],@x[6],[$out,#16]
715	 add	$xb0,$xb0,@K[1]
716	stp	@x[8],@x[10],[$out,#32]
717	 add	$xc0,$xc0,@K[2]
718	stp	@x[12],@x[14],[$out,#48]
719	 add	$xd0,$xd0,@K[3]
720	add	$out,$out,#64
721	b.eq	.Ldone_neon
722	sub	$len,$len,#64
723	cmp	$len,#64
724	b.lo	.Last_neon
725
726	ld1.8	{$xt0-$xt3},[$inp],#64
727	eor	$xa0,$xa0,$xt0
728	eor	$xb0,$xb0,$xt1
729	eor	$xc0,$xc0,$xt2
730	eor	$xd0,$xd0,$xt3
731	st1.8	{$xa0-$xd0},[$out],#64
732	b.eq	.Ldone_neon
733
734	add	$xa0,$xa1,@K[0]
735	add	$xb0,$xb1,@K[1]
736	sub	$len,$len,#64
737	add	$xc0,$xc1,@K[2]
738	cmp	$len,#64
739	add	$xd0,$xd1,@K[3]
740	b.lo	.Last_neon
741
742	ld1.8	{$xt0-$xt3},[$inp],#64
743	eor	$xa1,$xa0,$xt0
744	eor	$xb1,$xb0,$xt1
745	eor	$xc1,$xc0,$xt2
746	eor	$xd1,$xd0,$xt3
747	st1.8	{$xa1-$xd1},[$out],#64
748	b.eq	.Ldone_neon
749
750	add	$xa0,$xa2,@K[0]
751	add	$xb0,$xb2,@K[1]
752	sub	$len,$len,#64
753	add	$xc0,$xc2,@K[2]
754	cmp	$len,#64
755	add	$xd0,$xd2,@K[3]
756	b.lo	.Last_neon
757
758	ld1.8	{$xt0-$xt3},[$inp],#64
759	eor	$xa2,$xa0,$xt0
760	eor	$xb2,$xb0,$xt1
761	eor	$xc2,$xc0,$xt2
762	eor	$xd2,$xd0,$xt3
763	st1.8	{$xa2-$xd2},[$out],#64
764	b.eq	.Ldone_neon
765
766	add	$xa0,$xa3,@K[0]
767	add	$xb0,$xb3,@K[1]
768	add	$xc0,$xc3,@K[2]
769	add	$xd0,$xd3,@K[3]
770	sub	$len,$len,#64
771
772.Last_neon:
773	st1.8	{$xa0-$xd0},[sp]
774
775	sub	$out,$out,#1
776	add	$inp,$inp,$len
777	add	$out,$out,$len
778	add	$ctr,sp,$len
779	neg	$len,$len
780
781.Loop_tail_neon:
782	ldrb	w10,[$inp,$len]
783	ldrb	w11,[$ctr,$len]
784	add	$len,$len,#1
785	eor	w10,w10,w11
786	strb	w10,[$out,$len]
787	cbnz	$len,.Loop_tail_neon
788
789	stp	xzr,xzr,[sp,#0]
790	stp	xzr,xzr,[sp,#16]
791	stp	xzr,xzr,[sp,#32]
792	stp	xzr,xzr,[sp,#48]
793
794.Ldone_neon:
795	ldp	x19,x20,[x29,#16]
796	add	sp,sp,#64
797	ldp	x21,x22,[x29,#32]
798	ldp	x23,x24,[x29,#48]
799	ldp	x25,x26,[x29,#64]
800	ldp	x27,x28,[x29,#80]
801	ldp	x29,x30,[sp],#96
802	AARCH64_VALIDATE_LINK_REGISTER
803	ret
804.size	ChaCha20_neon,.-ChaCha20_neon
805___
806{
807my @K = map("v$_.4s",(0..6));
808my ($T0,$T1,$T2,$T3,$T4,$T5)=@K;
809my ($A0,$B0,$C0,$D0,$A1,$B1,$C1,$D1,$A2,$B2,$C2,$D2,
810    $A3,$B3,$C3,$D3,$A4,$B4,$C4,$D4,$A5,$B5,$C5,$D5) = map("v$_.4s",(8..31));
811my $rot24 = @K[6];
812my $ONE = "v7.4s";
813
814sub NEONROUND {
815my $odd = pop;
816my ($a,$b,$c,$d,$t)=@_;
817
818	(
819	"&add		('$a','$a','$b')",
820	"&eor		('$d','$d','$a')",
821	"&rev32_16	('$d','$d')",		# vrot ($d,16)
822
823	"&add		('$c','$c','$d')",
824	"&eor		('$t','$b','$c')",
825	"&ushr		('$b','$t',20)",
826	"&sli		('$b','$t',12)",
827
828	"&add		('$a','$a','$b')",
829	"&eor		('$d','$d','$a')",
830	"&tbl		('$d','{$d}','$rot24')",
831
832	"&add		('$c','$c','$d')",
833	"&eor		('$t','$b','$c')",
834	"&ushr		('$b','$t',25)",
835	"&sli		('$b','$t',7)",
836
837	"&ext		('$c','$c','$c',8)",
838	"&ext		('$d','$d','$d',$odd?4:12)",
839	"&ext		('$b','$b','$b',$odd?12:4)"
840	);
841}
842
843$code.=<<___;
844.type	ChaCha20_512_neon,%function
845.align	5
846ChaCha20_512_neon:
847	AARCH64_SIGN_LINK_REGISTER
848	stp	x29,x30,[sp,#-96]!
849	add	x29,sp,#0
850
851	adr	@x[0],.Lsigma
852	stp	x19,x20,[sp,#16]
853	stp	x21,x22,[sp,#32]
854	stp	x23,x24,[sp,#48]
855	stp	x25,x26,[sp,#64]
856	stp	x27,x28,[sp,#80]
857
858.L512_or_more_neon:
859	sub	sp,sp,#128+64
860
861	eor	$ONE,$ONE,$ONE
862	ldp	@d[0],@d[1],[@x[0]]		// load sigma
863	ld1	{@K[0]},[@x[0]],#16
864	ldp	@d[2],@d[3],[$key]		// load key
865	ldp	@d[4],@d[5],[$key,#16]
866	ld1	{@K[1],@K[2]},[$key]
867	ldp	@d[6],@d[7],[$ctr]		// load counter
868	ld1	{@K[3]},[$ctr]
869	ld1	{$ONE}[0],[@x[0]]
870	add	$key,@x[0],#16			// .Lrot24
871#ifdef	__AARCH64EB__
872	rev64	@K[0],@K[0]
873	ror	@d[2],@d[2],#32
874	ror	@d[3],@d[3],#32
875	ror	@d[4],@d[4],#32
876	ror	@d[5],@d[5],#32
877	ror	@d[6],@d[6],#32
878	ror	@d[7],@d[7],#32
879#endif
880	add	@K[3],@K[3],$ONE		// += 1
881	stp	@K[0],@K[1],[sp,#0]		// off-load key block, invariant part
882	add	@K[3],@K[3],$ONE		// not typo
883	str	@K[2],[sp,#32]
884	add	@K[4],@K[3],$ONE
885	add	@K[5],@K[4],$ONE
886	add	@K[6],@K[5],$ONE
887	shl	$ONE,$ONE,#2			// 1 -> 4
888
889	stp	d8,d9,[sp,#128+0]		// meet ABI requirements
890	stp	d10,d11,[sp,#128+16]
891	stp	d12,d13,[sp,#128+32]
892	stp	d14,d15,[sp,#128+48]
893
894	sub	$len,$len,#512			// not typo
895
896.Loop_outer_512_neon:
897	 mov	$A0,@K[0]
898	 mov	$A1,@K[0]
899	 mov	$A2,@K[0]
900	 mov	$A3,@K[0]
901	 mov	$A4,@K[0]
902	 mov	$A5,@K[0]
903	 mov	$B0,@K[1]
904	mov.32	@x[0],@d[0]			// unpack key block
905	 mov	$B1,@K[1]
906	lsr	@x[1],@d[0],#32
907	 mov	$B2,@K[1]
908	mov.32	@x[2],@d[1]
909	 mov	$B3,@K[1]
910	lsr	@x[3],@d[1],#32
911	 mov	$B4,@K[1]
912	mov.32	@x[4],@d[2]
913	 mov	$B5,@K[1]
914	lsr	@x[5],@d[2],#32
915	 mov	$D0,@K[3]
916	mov.32	@x[6],@d[3]
917	 mov	$D1,@K[4]
918	lsr	@x[7],@d[3],#32
919	 mov	$D2,@K[5]
920	mov.32	@x[8],@d[4]
921	 mov	$D3,@K[6]
922	lsr	@x[9],@d[4],#32
923	 mov	$C0,@K[2]
924	mov.32	@x[10],@d[5]
925	 mov	$C1,@K[2]
926	lsr	@x[11],@d[5],#32
927	 add	$D4,$D0,$ONE			// +4
928	mov.32	@x[12],@d[6]
929	 add	$D5,$D1,$ONE			// +4
930	lsr	@x[13],@d[6],#32
931	 mov	$C2,@K[2]
932	mov.32	@x[14],@d[7]
933	 mov	$C3,@K[2]
934	lsr	@x[15],@d[7],#32
935	 mov	$C4,@K[2]
936	 stp	@K[3],@K[4],[sp,#48]		// off-load key block, variable part
937	 mov	$C5,@K[2]
938	 stp	@K[5],@K[6],[sp,#80]
939
940	mov	$ctr,#5
941	ld1	{$rot24},[$key]
942	subs	$len,$len,#512
943.Loop_upper_neon:
944	sub	$ctr,$ctr,#1
945___
946	my @thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
947	my @thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
948	my @thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
949	my @thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
950	my @thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
951	my @thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
952	my @thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
953	my $diff = ($#thread0+1)*6 - $#thread67 - 1;
954	my $i = 0;
955
956	foreach (@thread0) {
957		eval;			eval(shift(@thread67));
958		eval(shift(@thread1));	eval(shift(@thread67));
959		eval(shift(@thread2));	eval(shift(@thread67));
960		eval(shift(@thread3));	eval(shift(@thread67));
961		eval(shift(@thread4));	eval(shift(@thread67));
962		eval(shift(@thread5));	eval(shift(@thread67));
963	}
964
965	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
966	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
967	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
968	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
969	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
970	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
971	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
972
973	foreach (@thread0) {
974		eval;			eval(shift(@thread67));
975		eval(shift(@thread1));	eval(shift(@thread67));
976		eval(shift(@thread2));	eval(shift(@thread67));
977		eval(shift(@thread3));	eval(shift(@thread67));
978		eval(shift(@thread4));	eval(shift(@thread67));
979		eval(shift(@thread5));	eval(shift(@thread67));
980	}
981$code.=<<___;
982	cbnz	$ctr,.Loop_upper_neon
983
984	add.32	@x[0],@x[0],@d[0]		// accumulate key block
985	add	@x[1],@x[1],@d[0],lsr#32
986	add.32	@x[2],@x[2],@d[1]
987	add	@x[3],@x[3],@d[1],lsr#32
988	add.32	@x[4],@x[4],@d[2]
989	add	@x[5],@x[5],@d[2],lsr#32
990	add.32	@x[6],@x[6],@d[3]
991	add	@x[7],@x[7],@d[3],lsr#32
992	add.32	@x[8],@x[8],@d[4]
993	add	@x[9],@x[9],@d[4],lsr#32
994	add.32	@x[10],@x[10],@d[5]
995	add	@x[11],@x[11],@d[5],lsr#32
996	add.32	@x[12],@x[12],@d[6]
997	add	@x[13],@x[13],@d[6],lsr#32
998	add.32	@x[14],@x[14],@d[7]
999	add	@x[15],@x[15],@d[7],lsr#32
1000
1001	add	@x[0],@x[0],@x[1],lsl#32	// pack
1002	add	@x[2],@x[2],@x[3],lsl#32
1003	ldp	@x[1],@x[3],[$inp,#0]		// load input
1004	add	@x[4],@x[4],@x[5],lsl#32
1005	add	@x[6],@x[6],@x[7],lsl#32
1006	ldp	@x[5],@x[7],[$inp,#16]
1007	add	@x[8],@x[8],@x[9],lsl#32
1008	add	@x[10],@x[10],@x[11],lsl#32
1009	ldp	@x[9],@x[11],[$inp,#32]
1010	add	@x[12],@x[12],@x[13],lsl#32
1011	add	@x[14],@x[14],@x[15],lsl#32
1012	ldp	@x[13],@x[15],[$inp,#48]
1013	add	$inp,$inp,#64
1014#ifdef	__AARCH64EB__
1015	rev	@x[0],@x[0]
1016	rev	@x[2],@x[2]
1017	rev	@x[4],@x[4]
1018	rev	@x[6],@x[6]
1019	rev	@x[8],@x[8]
1020	rev	@x[10],@x[10]
1021	rev	@x[12],@x[12]
1022	rev	@x[14],@x[14]
1023#endif
1024	eor	@x[0],@x[0],@x[1]
1025	eor	@x[2],@x[2],@x[3]
1026	eor	@x[4],@x[4],@x[5]
1027	eor	@x[6],@x[6],@x[7]
1028	eor	@x[8],@x[8],@x[9]
1029	eor	@x[10],@x[10],@x[11]
1030	eor	@x[12],@x[12],@x[13]
1031	eor	@x[14],@x[14],@x[15]
1032
1033	 stp	@x[0],@x[2],[$out,#0]		// store output
1034	 add	@d[6],@d[6],#1			// increment counter
1035	mov.32	@x[0],@d[0]			// unpack key block
1036	lsr	@x[1],@d[0],#32
1037	 stp	@x[4],@x[6],[$out,#16]
1038	mov.32	@x[2],@d[1]
1039	lsr	@x[3],@d[1],#32
1040	 stp	@x[8],@x[10],[$out,#32]
1041	mov.32	@x[4],@d[2]
1042	lsr	@x[5],@d[2],#32
1043	 stp	@x[12],@x[14],[$out,#48]
1044	 add	$out,$out,#64
1045	mov.32	@x[6],@d[3]
1046	lsr	@x[7],@d[3],#32
1047	mov.32	@x[8],@d[4]
1048	lsr	@x[9],@d[4],#32
1049	mov.32	@x[10],@d[5]
1050	lsr	@x[11],@d[5],#32
1051	mov.32	@x[12],@d[6]
1052	lsr	@x[13],@d[6],#32
1053	mov.32	@x[14],@d[7]
1054	lsr	@x[15],@d[7],#32
1055
1056	mov	$ctr,#5
1057.Loop_lower_neon:
1058	sub	$ctr,$ctr,#1
1059___
1060	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,0);
1061	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,0);
1062	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,0);
1063	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,0);
1064	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,0);
1065	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,0);
1066	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
1067
1068	foreach (@thread0) {
1069		eval;			eval(shift(@thread67));
1070		eval(shift(@thread1));	eval(shift(@thread67));
1071		eval(shift(@thread2));	eval(shift(@thread67));
1072		eval(shift(@thread3));	eval(shift(@thread67));
1073		eval(shift(@thread4));	eval(shift(@thread67));
1074		eval(shift(@thread5));	eval(shift(@thread67));
1075	}
1076
1077	@thread0=&NEONROUND($A0,$B0,$C0,$D0,$T0,1);
1078	@thread1=&NEONROUND($A1,$B1,$C1,$D1,$T1,1);
1079	@thread2=&NEONROUND($A2,$B2,$C2,$D2,$T2,1);
1080	@thread3=&NEONROUND($A3,$B3,$C3,$D3,$T3,1);
1081	@thread4=&NEONROUND($A4,$B4,$C4,$D4,$T4,1);
1082	@thread5=&NEONROUND($A5,$B5,$C5,$D5,$T5,1);
1083	@thread67=(&ROUND(0,4,8,12),&ROUND(0,5,10,15));
1084
1085	foreach (@thread0) {
1086		eval;			eval(shift(@thread67));
1087		eval(shift(@thread1));	eval(shift(@thread67));
1088		eval(shift(@thread2));	eval(shift(@thread67));
1089		eval(shift(@thread3));	eval(shift(@thread67));
1090		eval(shift(@thread4));	eval(shift(@thread67));
1091		eval(shift(@thread5));	eval(shift(@thread67));
1092	}
1093$code.=<<___;
1094	cbnz	$ctr,.Loop_lower_neon
1095
1096	add.32	@x[0],@x[0],@d[0]		// accumulate key block
1097	 ldp	@K[0],@K[1],[sp,#0]
1098	add	@x[1],@x[1],@d[0],lsr#32
1099	 ldp	@K[2],@K[3],[sp,#32]
1100	add.32	@x[2],@x[2],@d[1]
1101	 ldp	@K[4],@K[5],[sp,#64]
1102	add	@x[3],@x[3],@d[1],lsr#32
1103	 ldr	@K[6],[sp,#96]
1104	 add	$A0,$A0,@K[0]
1105	add.32	@x[4],@x[4],@d[2]
1106	 add	$A1,$A1,@K[0]
1107	add	@x[5],@x[5],@d[2],lsr#32
1108	 add	$A2,$A2,@K[0]
1109	add.32	@x[6],@x[6],@d[3]
1110	 add	$A3,$A3,@K[0]
1111	add	@x[7],@x[7],@d[3],lsr#32
1112	 add	$A4,$A4,@K[0]
1113	add.32	@x[8],@x[8],@d[4]
1114	 add	$A5,$A5,@K[0]
1115	add	@x[9],@x[9],@d[4],lsr#32
1116	 add	$C0,$C0,@K[2]
1117	add.32	@x[10],@x[10],@d[5]
1118	 add	$C1,$C1,@K[2]
1119	add	@x[11],@x[11],@d[5],lsr#32
1120	 add	$C2,$C2,@K[2]
1121	add.32	@x[12],@x[12],@d[6]
1122	 add	$C3,$C3,@K[2]
1123	add	@x[13],@x[13],@d[6],lsr#32
1124	 add	$C4,$C4,@K[2]
1125	add.32	@x[14],@x[14],@d[7]
1126	 add	$C5,$C5,@K[2]
1127	add	@x[15],@x[15],@d[7],lsr#32
1128	 add	$D4,$D4,$ONE			// +4
1129	add	@x[0],@x[0],@x[1],lsl#32	// pack
1130	 add	$D5,$D5,$ONE			// +4
1131	add	@x[2],@x[2],@x[3],lsl#32
1132	 add	$D0,$D0,@K[3]
1133	ldp	@x[1],@x[3],[$inp,#0]		// load input
1134	 add	$D1,$D1,@K[4]
1135	add	@x[4],@x[4],@x[5],lsl#32
1136	 add	$D2,$D2,@K[5]
1137	add	@x[6],@x[6],@x[7],lsl#32
1138	 add	$D3,$D3,@K[6]
1139	ldp	@x[5],@x[7],[$inp,#16]
1140	 add	$D4,$D4,@K[3]
1141	add	@x[8],@x[8],@x[9],lsl#32
1142	 add	$D5,$D5,@K[4]
1143	add	@x[10],@x[10],@x[11],lsl#32
1144	 add	$B0,$B0,@K[1]
1145	ldp	@x[9],@x[11],[$inp,#32]
1146	 add	$B1,$B1,@K[1]
1147	add	@x[12],@x[12],@x[13],lsl#32
1148	 add	$B2,$B2,@K[1]
1149	add	@x[14],@x[14],@x[15],lsl#32
1150	 add	$B3,$B3,@K[1]
1151	ldp	@x[13],@x[15],[$inp,#48]
1152	 add	$B4,$B4,@K[1]
1153	add	$inp,$inp,#64
1154	 add	$B5,$B5,@K[1]
1155
1156#ifdef	__AARCH64EB__
1157	rev	@x[0],@x[0]
1158	rev	@x[2],@x[2]
1159	rev	@x[4],@x[4]
1160	rev	@x[6],@x[6]
1161	rev	@x[8],@x[8]
1162	rev	@x[10],@x[10]
1163	rev	@x[12],@x[12]
1164	rev	@x[14],@x[14]
1165#endif
1166	ld1.8	{$T0-$T3},[$inp],#64
1167	eor	@x[0],@x[0],@x[1]
1168	eor	@x[2],@x[2],@x[3]
1169	eor	@x[4],@x[4],@x[5]
1170	eor	@x[6],@x[6],@x[7]
1171	eor	@x[8],@x[8],@x[9]
1172	 eor	$A0,$A0,$T0
1173	eor	@x[10],@x[10],@x[11]
1174	 eor	$B0,$B0,$T1
1175	eor	@x[12],@x[12],@x[13]
1176	 eor	$C0,$C0,$T2
1177	eor	@x[14],@x[14],@x[15]
1178	 eor	$D0,$D0,$T3
1179	 ld1.8	{$T0-$T3},[$inp],#64
1180
1181	stp	@x[0],@x[2],[$out,#0]		// store output
1182	 add	@d[6],@d[6],#7			// increment counter
1183	stp	@x[4],@x[6],[$out,#16]
1184	stp	@x[8],@x[10],[$out,#32]
1185	stp	@x[12],@x[14],[$out,#48]
1186	add	$out,$out,#64
1187	st1.8	{$A0-$D0},[$out],#64
1188
1189	ld1.8	{$A0-$D0},[$inp],#64
1190	eor	$A1,$A1,$T0
1191	eor	$B1,$B1,$T1
1192	eor	$C1,$C1,$T2
1193	eor	$D1,$D1,$T3
1194	st1.8	{$A1-$D1},[$out],#64
1195
1196	ld1.8	{$A1-$D1},[$inp],#64
1197	eor	$A2,$A2,$A0
1198	 ldp	@K[0],@K[1],[sp,#0]
1199	eor	$B2,$B2,$B0
1200	 ldp	@K[2],@K[3],[sp,#32]
1201	eor	$C2,$C2,$C0
1202	eor	$D2,$D2,$D0
1203	st1.8	{$A2-$D2},[$out],#64
1204
1205	ld1.8	{$A2-$D2},[$inp],#64
1206	eor	$A3,$A3,$A1
1207	eor	$B3,$B3,$B1
1208	eor	$C3,$C3,$C1
1209	eor	$D3,$D3,$D1
1210	st1.8	{$A3-$D3},[$out],#64
1211
1212	ld1.8	{$A3-$D3},[$inp],#64
1213	eor	$A4,$A4,$A2
1214	eor	$B4,$B4,$B2
1215	eor	$C4,$C4,$C2
1216	eor	$D4,$D4,$D2
1217	st1.8	{$A4-$D4},[$out],#64
1218
1219	shl	$A0,$ONE,#1			// 4 -> 8
1220	eor	$A5,$A5,$A3
1221	eor	$B5,$B5,$B3
1222	eor	$C5,$C5,$C3
1223	eor	$D5,$D5,$D3
1224	st1.8	{$A5-$D5},[$out],#64
1225
1226	add	@K[3],@K[3],$A0			// += 8
1227	add	@K[4],@K[4],$A0
1228	add	@K[5],@K[5],$A0
1229	add	@K[6],@K[6],$A0
1230
1231	b.hs	.Loop_outer_512_neon
1232
1233	adds	$len,$len,#512
1234	ushr	$ONE,$ONE,#1			// 4 -> 2
1235
1236	ldp	d10,d11,[sp,#128+16]		// meet ABI requirements
1237	ldp	d12,d13,[sp,#128+32]
1238	ldp	d14,d15,[sp,#128+48]
1239
1240	stp	@K[0],@K[0],[sp,#0]		// wipe off-load area
1241	stp	@K[0],@K[0],[sp,#32]
1242	stp	@K[0],@K[0],[sp,#64]
1243
1244	b.eq	.Ldone_512_neon
1245
1246	sub	$key,$key,#16			// .Lone
1247	cmp	$len,#192
1248	add	sp,sp,#128
1249	sub	@K[3],@K[3],$ONE		// -= 2
1250	ld1	{$CTR,$ROT24},[$key]
1251	b.hs	.Loop_outer_neon
1252
1253	ldp	d8,d9,[sp,#0]			// meet ABI requirements
1254	eor	@K[1],@K[1],@K[1]
1255	eor	@K[2],@K[2],@K[2]
1256	eor	@K[3],@K[3],@K[3]
1257	eor	@K[4],@K[4],@K[4]
1258	eor	@K[5],@K[5],@K[5]
1259	eor	@K[6],@K[6],@K[6]
1260	b	.Loop_outer
1261
1262.Ldone_512_neon:
1263	ldp	d8,d9,[sp,#128+0]		// meet ABI requirements
1264	ldp	x19,x20,[x29,#16]
1265	add	sp,sp,#128+64
1266	ldp	x21,x22,[x29,#32]
1267	ldp	x23,x24,[x29,#48]
1268	ldp	x25,x26,[x29,#64]
1269	ldp	x27,x28,[x29,#80]
1270	ldp	x29,x30,[sp],#96
1271	AARCH64_VALIDATE_LINK_REGISTER
1272	ret
1273.size	ChaCha20_512_neon,.-ChaCha20_512_neon
1274___
1275}
1276}}}
1277
1278foreach (split("\n",$code)) {
1279	s/\`([^\`]*)\`/eval $1/geo;
1280
1281	(s/\b([a-z]+)\.32\b/$1/ and (s/x([0-9]+)/w$1/g or 1))	or
1282	(m/\b(eor|ext|mov|tbl)\b/ and (s/\.4s/\.16b/g or 1))	or
1283	(s/\b((?:ld|st)1)\.8\b/$1/ and (s/\.4s/\.16b/g or 1))	or
1284	(m/\b(ld|st)[rp]\b/ and (s/v([0-9]+)\.4s/q$1/g or 1))	or
1285	(m/\b(dup|ld1)\b/ and (s/\.4(s}?\[[0-3]\])/.$1/g or 1))	or
1286	(s/\b(zip[12])\.64\b/$1/ and (s/\.4s/\.2d/g or 1))	or
1287	(s/\brev32\.16\b/rev32/ and (s/\.4s/\.8h/g or 1));
1288
1289	#s/\bq([0-9]+)#(lo|hi)/sprintf "d%d",2*$1+($2 eq "hi")/geo;
1290
1291	print $_,"\n";
1292}
1293close STDOUT or die "error closing STDOUT: $!";	# flush
1294