1#! /usr/bin/env perl
2# Copyright 2005-2016 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the OpenSSL license (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# Version 2.1.
18#
19# aes-*-cbc benchmarks are improved by >70% [compared to gcc 3.3.2 on
20# Opteron 240 CPU] plus all the bells-n-whistles from 32-bit version
21# [you'll notice a lot of resemblance], such as compressed S-boxes
22# in little-endian byte order, prefetch of these tables in CBC mode,
23# as well as avoiding L1 cache aliasing between stack frame and key
24# schedule and already mentioned tables, compressed Td4...
25#
26# Performance in number of cycles per processed byte for 128-bit key:
27#
28#		ECB encrypt	ECB decrypt	CBC large chunk
29# AMD64		33		43		13.0
30# EM64T		38		56		18.6(*)
31# Core 2	30		42		14.5(*)
32# Atom		65		86		32.1(*)
33#
34# (*) with hyper-threading off
35
36$flavour = shift;
37$output  = shift;
38if ($flavour =~ /\./) { $output = $flavour; undef $flavour; }
39
40$win64=0; $win64=1 if ($flavour =~ /[nm]asm|mingw64/ || $output =~ /\.asm$/);
41
42$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
43( $xlate="${dir}x86_64-xlate.pl" and -f $xlate ) or
44( $xlate="${dir}../../perlasm/x86_64-xlate.pl" and -f $xlate) or
45die "can't locate x86_64-xlate.pl";
46
47open OUT,"| \"$^X\" \"$xlate\" $flavour \"$output\"";
48*STDOUT=*OUT;
49
50$verticalspin=1;	# unlike 32-bit version $verticalspin performs
51			# ~15% better on both AMD and Intel cores
52$speed_limit=512;	# see aes-586.pl for details
53
54$code=".text\n";
55
56$s0="%eax";
57$s1="%ebx";
58$s2="%ecx";
59$s3="%edx";
60$acc0="%esi";	$mask80="%rsi";
61$acc1="%edi";	$maskfe="%rdi";
62$acc2="%ebp";	$mask1b="%rbp";
63$inp="%r8";
64$out="%r9";
65$t0="%r10d";
66$t1="%r11d";
67$t2="%r12d";
68$rnds="%r13d";
69$sbox="%r14";
70$key="%r15";
71
72sub hi() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1h/;	$r; }
73sub lo() { my $r=shift;	$r =~ s/%[er]([a-d])x/%\1l/;
74			$r =~ s/%[er]([sd]i)/%\1l/;
75			$r =~ s/%(r[0-9]+)[d]?/%\1b/;	$r; }
76sub LO() { my $r=shift; $r =~ s/%r([a-z]+)/%e\1/;
77			$r =~ s/%r([0-9]+)/%r\1d/;	$r; }
78sub _data_word()
79{ my $i;
80    while(defined($i=shift)) { $code.=sprintf".long\t0x%08x,0x%08x\n",$i,$i; }
81}
82sub data_word()
83{ my $i;
84  my $last=pop(@_);
85    $code.=".long\t";
86    while(defined($i=shift)) { $code.=sprintf"0x%08x,",$i; }
87    $code.=sprintf"0x%08x\n",$last;
88}
89
90sub data_byte()
91{ my $i;
92  my $last=pop(@_);
93    $code.=".byte\t";
94    while(defined($i=shift)) { $code.=sprintf"0x%02x,",$i&0xff; }
95    $code.=sprintf"0x%02x\n",$last&0xff;
96}
97
98sub encvert()
99{ my $t3="%r8d";	# zaps $inp!
100
101$code.=<<___;
102	# favor 3-way issue Opteron pipeline...
103	movzb	`&lo("$s0")`,$acc0
104	movzb	`&lo("$s1")`,$acc1
105	movzb	`&lo("$s2")`,$acc2
106	mov	0($sbox,$acc0,8),$t0
107	mov	0($sbox,$acc1,8),$t1
108	mov	0($sbox,$acc2,8),$t2
109
110	movzb	`&hi("$s1")`,$acc0
111	movzb	`&hi("$s2")`,$acc1
112	movzb	`&lo("$s3")`,$acc2
113	xor	3($sbox,$acc0,8),$t0
114	xor	3($sbox,$acc1,8),$t1
115	mov	0($sbox,$acc2,8),$t3
116
117	movzb	`&hi("$s3")`,$acc0
118	shr	\$16,$s2
119	movzb	`&hi("$s0")`,$acc2
120	xor	3($sbox,$acc0,8),$t2
121	shr	\$16,$s3
122	xor	3($sbox,$acc2,8),$t3
123
124	shr	\$16,$s1
125	lea	16($key),$key
126	shr	\$16,$s0
127
128	movzb	`&lo("$s2")`,$acc0
129	movzb	`&lo("$s3")`,$acc1
130	movzb	`&lo("$s0")`,$acc2
131	xor	2($sbox,$acc0,8),$t0
132	xor	2($sbox,$acc1,8),$t1
133	xor	2($sbox,$acc2,8),$t2
134
135	movzb	`&hi("$s3")`,$acc0
136	movzb	`&hi("$s0")`,$acc1
137	movzb	`&lo("$s1")`,$acc2
138	xor	1($sbox,$acc0,8),$t0
139	xor	1($sbox,$acc1,8),$t1
140	xor	2($sbox,$acc2,8),$t3
141
142	mov	12($key),$s3
143	movzb	`&hi("$s1")`,$acc1
144	movzb	`&hi("$s2")`,$acc2
145	mov	0($key),$s0
146	xor	1($sbox,$acc1,8),$t2
147	xor	1($sbox,$acc2,8),$t3
148
149	mov	4($key),$s1
150	mov	8($key),$s2
151	xor	$t0,$s0
152	xor	$t1,$s1
153	xor	$t2,$s2
154	xor	$t3,$s3
155___
156}
157
158sub enclastvert()
159{ my $t3="%r8d";	# zaps $inp!
160
161$code.=<<___;
162	movzb	`&lo("$s0")`,$acc0
163	movzb	`&lo("$s1")`,$acc1
164	movzb	`&lo("$s2")`,$acc2
165	movzb	2($sbox,$acc0,8),$t0
166	movzb	2($sbox,$acc1,8),$t1
167	movzb	2($sbox,$acc2,8),$t2
168
169	movzb	`&lo("$s3")`,$acc0
170	movzb	`&hi("$s1")`,$acc1
171	movzb	`&hi("$s2")`,$acc2
172	movzb	2($sbox,$acc0,8),$t3
173	mov	0($sbox,$acc1,8),$acc1	#$t0
174	mov	0($sbox,$acc2,8),$acc2	#$t1
175
176	and	\$0x0000ff00,$acc1
177	and	\$0x0000ff00,$acc2
178
179	xor	$acc1,$t0
180	xor	$acc2,$t1
181	shr	\$16,$s2
182
183	movzb	`&hi("$s3")`,$acc0
184	movzb	`&hi("$s0")`,$acc1
185	shr	\$16,$s3
186	mov	0($sbox,$acc0,8),$acc0	#$t2
187	mov	0($sbox,$acc1,8),$acc1	#$t3
188
189	and	\$0x0000ff00,$acc0
190	and	\$0x0000ff00,$acc1
191	shr	\$16,$s1
192	xor	$acc0,$t2
193	xor	$acc1,$t3
194	shr	\$16,$s0
195
196	movzb	`&lo("$s2")`,$acc0
197	movzb	`&lo("$s3")`,$acc1
198	movzb	`&lo("$s0")`,$acc2
199	mov	0($sbox,$acc0,8),$acc0	#$t0
200	mov	0($sbox,$acc1,8),$acc1	#$t1
201	mov	0($sbox,$acc2,8),$acc2	#$t2
202
203	and	\$0x00ff0000,$acc0
204	and	\$0x00ff0000,$acc1
205	and	\$0x00ff0000,$acc2
206
207	xor	$acc0,$t0
208	xor	$acc1,$t1
209	xor	$acc2,$t2
210
211	movzb	`&lo("$s1")`,$acc0
212	movzb	`&hi("$s3")`,$acc1
213	movzb	`&hi("$s0")`,$acc2
214	mov	0($sbox,$acc0,8),$acc0	#$t3
215	mov	2($sbox,$acc1,8),$acc1	#$t0
216	mov	2($sbox,$acc2,8),$acc2	#$t1
217
218	and	\$0x00ff0000,$acc0
219	and	\$0xff000000,$acc1
220	and	\$0xff000000,$acc2
221
222	xor	$acc0,$t3
223	xor	$acc1,$t0
224	xor	$acc2,$t1
225
226	movzb	`&hi("$s1")`,$acc0
227	movzb	`&hi("$s2")`,$acc1
228	mov	16+12($key),$s3
229	mov	2($sbox,$acc0,8),$acc0	#$t2
230	mov	2($sbox,$acc1,8),$acc1	#$t3
231	mov	16+0($key),$s0
232
233	and	\$0xff000000,$acc0
234	and	\$0xff000000,$acc1
235
236	xor	$acc0,$t2
237	xor	$acc1,$t3
238
239	mov	16+4($key),$s1
240	mov	16+8($key),$s2
241	xor	$t0,$s0
242	xor	$t1,$s1
243	xor	$t2,$s2
244	xor	$t3,$s3
245___
246}
247
248sub encstep()
249{ my ($i,@s) = @_;
250  my $tmp0=$acc0;
251  my $tmp1=$acc1;
252  my $tmp2=$acc2;
253  my $out=($t0,$t1,$t2,$s[0])[$i];
254
255	if ($i==3) {
256		$tmp0=$s[1];
257		$tmp1=$s[2];
258		$tmp2=$s[3];
259	}
260	$code.="	movzb	".&lo($s[0]).",$out\n";
261	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
262	$code.="	lea	16($key),$key\n"	if ($i==0);
263
264	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
265	$code.="	mov	0($sbox,$out,8),$out\n";
266
267	$code.="	shr	\$16,$tmp1\n";
268	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
269	$code.="	xor	3($sbox,$tmp0,8),$out\n";
270
271	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
272	$code.="	shr	\$24,$tmp2\n";
273	$code.="	xor	4*$i($key),$out\n";
274
275	$code.="	xor	2($sbox,$tmp1,8),$out\n";
276	$code.="	xor	1($sbox,$tmp2,8),$out\n";
277
278	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
279	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
280	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
281	$code.="\n";
282}
283
284sub enclast()
285{ my ($i,@s)=@_;
286  my $tmp0=$acc0;
287  my $tmp1=$acc1;
288  my $tmp2=$acc2;
289  my $out=($t0,$t1,$t2,$s[0])[$i];
290
291	if ($i==3) {
292		$tmp0=$s[1];
293		$tmp1=$s[2];
294		$tmp2=$s[3];
295	}
296	$code.="	movzb	".&lo($s[0]).",$out\n";
297	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
298
299	$code.="	mov	2($sbox,$out,8),$out\n";
300	$code.="	shr	\$16,$tmp1\n";
301	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
302
303	$code.="	and	\$0x000000ff,$out\n";
304	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
305	$code.="	movzb	".&lo($tmp1).",$tmp1\n";
306	$code.="	shr	\$24,$tmp2\n";
307
308	$code.="	mov	0($sbox,$tmp0,8),$tmp0\n";
309	$code.="	mov	0($sbox,$tmp1,8),$tmp1\n";
310	$code.="	mov	2($sbox,$tmp2,8),$tmp2\n";
311
312	$code.="	and	\$0x0000ff00,$tmp0\n";
313	$code.="	and	\$0x00ff0000,$tmp1\n";
314	$code.="	and	\$0xff000000,$tmp2\n";
315
316	$code.="	xor	$tmp0,$out\n";
317	$code.="	mov	$t0,$s[1]\n"		if ($i==3);
318	$code.="	xor	$tmp1,$out\n";
319	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
320	$code.="	xor	$tmp2,$out\n";
321	$code.="	mov	$t2,$s[3]\n"		if ($i==3);
322	$code.="\n";
323}
324
325$code.=<<___;
326.type	_x86_64_AES_encrypt,\@abi-omnipotent
327.align	16
328_x86_64_AES_encrypt:
329	xor	0($key),$s0			# xor with key
330	xor	4($key),$s1
331	xor	8($key),$s2
332	xor	12($key),$s3
333
334	mov	240($key),$rnds			# load key->rounds
335	sub	\$1,$rnds
336	jmp	.Lenc_loop
337.align	16
338.Lenc_loop:
339___
340	if ($verticalspin) { &encvert(); }
341	else {	&encstep(0,$s0,$s1,$s2,$s3);
342		&encstep(1,$s1,$s2,$s3,$s0);
343		&encstep(2,$s2,$s3,$s0,$s1);
344		&encstep(3,$s3,$s0,$s1,$s2);
345	}
346$code.=<<___;
347	sub	\$1,$rnds
348	jnz	.Lenc_loop
349___
350	if ($verticalspin) { &enclastvert(); }
351	else {	&enclast(0,$s0,$s1,$s2,$s3);
352		&enclast(1,$s1,$s2,$s3,$s0);
353		&enclast(2,$s2,$s3,$s0,$s1);
354		&enclast(3,$s3,$s0,$s1,$s2);
355		$code.=<<___;
356		xor	16+0($key),$s0		# xor with key
357		xor	16+4($key),$s1
358		xor	16+8($key),$s2
359		xor	16+12($key),$s3
360___
361	}
362$code.=<<___;
363	.byte	0xf3,0xc3			# rep ret
364.size	_x86_64_AES_encrypt,.-_x86_64_AES_encrypt
365___
366
367# it's possible to implement this by shifting tN by 8, filling least
368# significant byte with byte load and finally bswap-ing at the end,
369# but such partial register load kills Core 2...
370sub enccompactvert()
371{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
372
373$code.=<<___;
374	movzb	`&lo("$s0")`,$t0
375	movzb	`&lo("$s1")`,$t1
376	movzb	`&lo("$s2")`,$t2
377	movzb	`&lo("$s3")`,$t3
378	movzb	`&hi("$s1")`,$acc0
379	movzb	`&hi("$s2")`,$acc1
380	shr	\$16,$s2
381	movzb	`&hi("$s3")`,$acc2
382	movzb	($sbox,$t0,1),$t0
383	movzb	($sbox,$t1,1),$t1
384	movzb	($sbox,$t2,1),$t2
385	movzb	($sbox,$t3,1),$t3
386
387	movzb	($sbox,$acc0,1),$t4	#$t0
388	movzb	`&hi("$s0")`,$acc0
389	movzb	($sbox,$acc1,1),$t5	#$t1
390	movzb	`&lo("$s2")`,$acc1
391	movzb	($sbox,$acc2,1),$acc2	#$t2
392	movzb	($sbox,$acc0,1),$acc0	#$t3
393
394	shl	\$8,$t4
395	shr	\$16,$s3
396	shl	\$8,$t5
397	xor	$t4,$t0
398	shr	\$16,$s0
399	movzb	`&lo("$s3")`,$t4
400	shr	\$16,$s1
401	xor	$t5,$t1
402	shl	\$8,$acc2
403	movzb	`&lo("$s0")`,$t5
404	movzb	($sbox,$acc1,1),$acc1	#$t0
405	xor	$acc2,$t2
406
407	shl	\$8,$acc0
408	movzb	`&lo("$s1")`,$acc2
409	shl	\$16,$acc1
410	xor	$acc0,$t3
411	movzb	($sbox,$t4,1),$t4	#$t1
412	movzb	`&hi("$s3")`,$acc0
413	movzb	($sbox,$t5,1),$t5	#$t2
414	xor	$acc1,$t0
415
416	shr	\$8,$s2
417	movzb	`&hi("$s0")`,$acc1
418	shl	\$16,$t4
419	shr	\$8,$s1
420	shl	\$16,$t5
421	xor	$t4,$t1
422	movzb	($sbox,$acc2,1),$acc2	#$t3
423	movzb	($sbox,$acc0,1),$acc0	#$t0
424	movzb	($sbox,$acc1,1),$acc1	#$t1
425	movzb	($sbox,$s2,1),$s3	#$t3
426	movzb	($sbox,$s1,1),$s2	#$t2
427
428	shl	\$16,$acc2
429	xor	$t5,$t2
430	shl	\$24,$acc0
431	xor	$acc2,$t3
432	shl	\$24,$acc1
433	xor	$acc0,$t0
434	shl	\$24,$s3
435	xor	$acc1,$t1
436	shl	\$24,$s2
437	mov	$t0,$s0
438	mov	$t1,$s1
439	xor	$t2,$s2
440	xor	$t3,$s3
441___
442}
443
444sub enctransform_ref()
445{ my $sn = shift;
446  my ($acc,$r2,$tmp)=("%r8d","%r9d","%r13d");
447
448$code.=<<___;
449	mov	$sn,$acc
450	and	\$0x80808080,$acc
451	mov	$acc,$tmp
452	shr	\$7,$tmp
453	lea	($sn,$sn),$r2
454	sub	$tmp,$acc
455	and	\$0xfefefefe,$r2
456	and	\$0x1b1b1b1b,$acc
457	mov	$sn,$tmp
458	xor	$acc,$r2
459
460	xor	$r2,$sn
461	rol	\$24,$sn
462	xor	$r2,$sn
463	ror	\$16,$tmp
464	xor	$tmp,$sn
465	ror	\$8,$tmp
466	xor	$tmp,$sn
467___
468}
469
470# unlike decrypt case it does not pay off to parallelize enctransform
471sub enctransform()
472{ my ($t3,$r20,$r21)=($acc2,"%r8d","%r9d");
473
474$code.=<<___;
475	mov	\$0x80808080,$t0
476	mov	\$0x80808080,$t1
477	and	$s0,$t0
478	and	$s1,$t1
479	mov	$t0,$acc0
480	mov	$t1,$acc1
481	shr	\$7,$t0
482	lea	($s0,$s0),$r20
483	shr	\$7,$t1
484	lea	($s1,$s1),$r21
485	sub	$t0,$acc0
486	sub	$t1,$acc1
487	and	\$0xfefefefe,$r20
488	and	\$0xfefefefe,$r21
489	and	\$0x1b1b1b1b,$acc0
490	and	\$0x1b1b1b1b,$acc1
491	mov	$s0,$t0
492	mov	$s1,$t1
493	xor	$acc0,$r20
494	xor	$acc1,$r21
495
496	xor	$r20,$s0
497	xor	$r21,$s1
498	 mov	\$0x80808080,$t2
499	rol	\$24,$s0
500	 mov	\$0x80808080,$t3
501	rol	\$24,$s1
502	 and	$s2,$t2
503	 and	$s3,$t3
504	xor	$r20,$s0
505	xor	$r21,$s1
506	 mov	$t2,$acc0
507	ror	\$16,$t0
508	 mov	$t3,$acc1
509	ror	\$16,$t1
510	 lea	($s2,$s2),$r20
511	 shr	\$7,$t2
512	xor	$t0,$s0
513	 shr	\$7,$t3
514	xor	$t1,$s1
515	ror	\$8,$t0
516	 lea	($s3,$s3),$r21
517	ror	\$8,$t1
518	 sub	$t2,$acc0
519	 sub	$t3,$acc1
520	xor	$t0,$s0
521	xor	$t1,$s1
522
523	and	\$0xfefefefe,$r20
524	and	\$0xfefefefe,$r21
525	and	\$0x1b1b1b1b,$acc0
526	and	\$0x1b1b1b1b,$acc1
527	mov	$s2,$t2
528	mov	$s3,$t3
529	xor	$acc0,$r20
530	xor	$acc1,$r21
531
532	ror	\$16,$t2
533	xor	$r20,$s2
534	ror	\$16,$t3
535	xor	$r21,$s3
536	rol	\$24,$s2
537	mov	0($sbox),$acc0			# prefetch Te4
538	rol	\$24,$s3
539	xor	$r20,$s2
540	mov	64($sbox),$acc1
541	xor	$r21,$s3
542	mov	128($sbox),$r20
543	xor	$t2,$s2
544	ror	\$8,$t2
545	xor	$t3,$s3
546	ror	\$8,$t3
547	xor	$t2,$s2
548	mov	192($sbox),$r21
549	xor	$t3,$s3
550___
551}
552
553$code.=<<___;
554.type	_x86_64_AES_encrypt_compact,\@abi-omnipotent
555.align	16
556_x86_64_AES_encrypt_compact:
557	lea	128($sbox),$inp			# size optimization
558	mov	0-128($inp),$acc1		# prefetch Te4
559	mov	32-128($inp),$acc2
560	mov	64-128($inp),$t0
561	mov	96-128($inp),$t1
562	mov	128-128($inp),$acc1
563	mov	160-128($inp),$acc2
564	mov	192-128($inp),$t0
565	mov	224-128($inp),$t1
566	jmp	.Lenc_loop_compact
567.align	16
568.Lenc_loop_compact:
569		xor	0($key),$s0		# xor with key
570		xor	4($key),$s1
571		xor	8($key),$s2
572		xor	12($key),$s3
573		lea	16($key),$key
574___
575		&enccompactvert();
576$code.=<<___;
577		cmp	16(%rsp),$key
578		je	.Lenc_compact_done
579___
580		&enctransform();
581$code.=<<___;
582	jmp	.Lenc_loop_compact
583.align	16
584.Lenc_compact_done:
585	xor	0($key),$s0
586	xor	4($key),$s1
587	xor	8($key),$s2
588	xor	12($key),$s3
589	.byte	0xf3,0xc3			# rep ret
590.size	_x86_64_AES_encrypt_compact,.-_x86_64_AES_encrypt_compact
591___
592
593# void AES_encrypt (const void *inp,void *out,const AES_KEY *key);
594$code.=<<___;
595.globl	AES_encrypt
596.type	AES_encrypt,\@function,3
597.align	16
598.globl	asm_AES_encrypt
599.hidden	asm_AES_encrypt
600asm_AES_encrypt:
601AES_encrypt:
602.cfi_startproc
603	mov	%rsp,%rax
604.cfi_def_cfa_register	%rax
605	push	%rbx
606.cfi_push	%rbx
607	push	%rbp
608.cfi_push	%rbp
609	push	%r12
610.cfi_push	%r12
611	push	%r13
612.cfi_push	%r13
613	push	%r14
614.cfi_push	%r14
615	push	%r15
616.cfi_push	%r15
617
618	# allocate frame "above" key schedule
619	lea	-63(%rdx),%rcx	# %rdx is key argument
620	and	\$-64,%rsp
621	sub	%rsp,%rcx
622	neg	%rcx
623	and	\$0x3c0,%rcx
624	sub	%rcx,%rsp
625	sub	\$32,%rsp
626
627	mov	%rsi,16(%rsp)	# save out
628	mov	%rax,24(%rsp)	# save original stack pointer
629.cfi_cfa_expression	%rsp+24,deref,+8
630.Lenc_prologue:
631
632	mov	%rdx,$key
633	mov	240($key),$rnds	# load rounds
634
635	mov	0(%rdi),$s0	# load input vector
636	mov	4(%rdi),$s1
637	mov	8(%rdi),$s2
638	mov	12(%rdi),$s3
639
640	shl	\$4,$rnds
641	lea	($key,$rnds),%rbp
642	mov	$key,(%rsp)	# key schedule
643	mov	%rbp,8(%rsp)	# end of key schedule
644
645	# pick Te4 copy which can't "overlap" with stack frame or key schedule
646	lea	.LAES_Te+2048(%rip),$sbox
647	lea	768(%rsp),%rbp
648	sub	$sbox,%rbp
649	and	\$0x300,%rbp
650	lea	($sbox,%rbp),$sbox
651
652	call	_x86_64_AES_encrypt_compact
653
654	mov	16(%rsp),$out	# restore out
655	mov	24(%rsp),%rsi	# restore saved stack pointer
656.cfi_def_cfa	%rsi,8
657	mov	$s0,0($out)	# write output vector
658	mov	$s1,4($out)
659	mov	$s2,8($out)
660	mov	$s3,12($out)
661
662	mov	-48(%rsi),%r15
663.cfi_restore	%r15
664	mov	-40(%rsi),%r14
665.cfi_restore	%r14
666	mov	-32(%rsi),%r13
667.cfi_restore	%r13
668	mov	-24(%rsi),%r12
669.cfi_restore	%r12
670	mov	-16(%rsi),%rbp
671.cfi_restore	%rbp
672	mov	-8(%rsi),%rbx
673.cfi_restore	%rbx
674	lea	(%rsi),%rsp
675.cfi_def_cfa_register	%rsp
676.Lenc_epilogue:
677	ret
678.cfi_endproc
679.size	AES_encrypt,.-AES_encrypt
680___
681
682#------------------------------------------------------------------#
683
684sub decvert()
685{ my $t3="%r8d";	# zaps $inp!
686
687$code.=<<___;
688	# favor 3-way issue Opteron pipeline...
689	movzb	`&lo("$s0")`,$acc0
690	movzb	`&lo("$s1")`,$acc1
691	movzb	`&lo("$s2")`,$acc2
692	mov	0($sbox,$acc0,8),$t0
693	mov	0($sbox,$acc1,8),$t1
694	mov	0($sbox,$acc2,8),$t2
695
696	movzb	`&hi("$s3")`,$acc0
697	movzb	`&hi("$s0")`,$acc1
698	movzb	`&lo("$s3")`,$acc2
699	xor	3($sbox,$acc0,8),$t0
700	xor	3($sbox,$acc1,8),$t1
701	mov	0($sbox,$acc2,8),$t3
702
703	movzb	`&hi("$s1")`,$acc0
704	shr	\$16,$s0
705	movzb	`&hi("$s2")`,$acc2
706	xor	3($sbox,$acc0,8),$t2
707	shr	\$16,$s3
708	xor	3($sbox,$acc2,8),$t3
709
710	shr	\$16,$s1
711	lea	16($key),$key
712	shr	\$16,$s2
713
714	movzb	`&lo("$s2")`,$acc0
715	movzb	`&lo("$s3")`,$acc1
716	movzb	`&lo("$s0")`,$acc2
717	xor	2($sbox,$acc0,8),$t0
718	xor	2($sbox,$acc1,8),$t1
719	xor	2($sbox,$acc2,8),$t2
720
721	movzb	`&hi("$s1")`,$acc0
722	movzb	`&hi("$s2")`,$acc1
723	movzb	`&lo("$s1")`,$acc2
724	xor	1($sbox,$acc0,8),$t0
725	xor	1($sbox,$acc1,8),$t1
726	xor	2($sbox,$acc2,8),$t3
727
728	movzb	`&hi("$s3")`,$acc0
729	mov	12($key),$s3
730	movzb	`&hi("$s0")`,$acc2
731	xor	1($sbox,$acc0,8),$t2
732	mov	0($key),$s0
733	xor	1($sbox,$acc2,8),$t3
734
735	xor	$t0,$s0
736	mov	4($key),$s1
737	mov	8($key),$s2
738	xor	$t2,$s2
739	xor	$t1,$s1
740	xor	$t3,$s3
741___
742}
743
744sub declastvert()
745{ my $t3="%r8d";	# zaps $inp!
746
747$code.=<<___;
748	lea	2048($sbox),$sbox	# size optimization
749	movzb	`&lo("$s0")`,$acc0
750	movzb	`&lo("$s1")`,$acc1
751	movzb	`&lo("$s2")`,$acc2
752	movzb	($sbox,$acc0,1),$t0
753	movzb	($sbox,$acc1,1),$t1
754	movzb	($sbox,$acc2,1),$t2
755
756	movzb	`&lo("$s3")`,$acc0
757	movzb	`&hi("$s3")`,$acc1
758	movzb	`&hi("$s0")`,$acc2
759	movzb	($sbox,$acc0,1),$t3
760	movzb	($sbox,$acc1,1),$acc1	#$t0
761	movzb	($sbox,$acc2,1),$acc2	#$t1
762
763	shl	\$8,$acc1
764	shl	\$8,$acc2
765
766	xor	$acc1,$t0
767	xor	$acc2,$t1
768	shr	\$16,$s3
769
770	movzb	`&hi("$s1")`,$acc0
771	movzb	`&hi("$s2")`,$acc1
772	shr	\$16,$s0
773	movzb	($sbox,$acc0,1),$acc0	#$t2
774	movzb	($sbox,$acc1,1),$acc1	#$t3
775
776	shl	\$8,$acc0
777	shl	\$8,$acc1
778	shr	\$16,$s1
779	xor	$acc0,$t2
780	xor	$acc1,$t3
781	shr	\$16,$s2
782
783	movzb	`&lo("$s2")`,$acc0
784	movzb	`&lo("$s3")`,$acc1
785	movzb	`&lo("$s0")`,$acc2
786	movzb	($sbox,$acc0,1),$acc0	#$t0
787	movzb	($sbox,$acc1,1),$acc1	#$t1
788	movzb	($sbox,$acc2,1),$acc2	#$t2
789
790	shl	\$16,$acc0
791	shl	\$16,$acc1
792	shl	\$16,$acc2
793
794	xor	$acc0,$t0
795	xor	$acc1,$t1
796	xor	$acc2,$t2
797
798	movzb	`&lo("$s1")`,$acc0
799	movzb	`&hi("$s1")`,$acc1
800	movzb	`&hi("$s2")`,$acc2
801	movzb	($sbox,$acc0,1),$acc0	#$t3
802	movzb	($sbox,$acc1,1),$acc1	#$t0
803	movzb	($sbox,$acc2,1),$acc2	#$t1
804
805	shl	\$16,$acc0
806	shl	\$24,$acc1
807	shl	\$24,$acc2
808
809	xor	$acc0,$t3
810	xor	$acc1,$t0
811	xor	$acc2,$t1
812
813	movzb	`&hi("$s3")`,$acc0
814	movzb	`&hi("$s0")`,$acc1
815	mov	16+12($key),$s3
816	movzb	($sbox,$acc0,1),$acc0	#$t2
817	movzb	($sbox,$acc1,1),$acc1	#$t3
818	mov	16+0($key),$s0
819
820	shl	\$24,$acc0
821	shl	\$24,$acc1
822
823	xor	$acc0,$t2
824	xor	$acc1,$t3
825
826	mov	16+4($key),$s1
827	mov	16+8($key),$s2
828	lea	-2048($sbox),$sbox
829	xor	$t0,$s0
830	xor	$t1,$s1
831	xor	$t2,$s2
832	xor	$t3,$s3
833___
834}
835
836sub decstep()
837{ my ($i,@s) = @_;
838  my $tmp0=$acc0;
839  my $tmp1=$acc1;
840  my $tmp2=$acc2;
841  my $out=($t0,$t1,$t2,$s[0])[$i];
842
843	$code.="	mov	$s[0],$out\n"		if ($i!=3);
844			$tmp1=$s[2]			if ($i==3);
845	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
846	$code.="	and	\$0xFF,$out\n";
847
848	$code.="	mov	0($sbox,$out,8),$out\n";
849	$code.="	shr	\$16,$tmp1\n";
850			$tmp2=$s[3]			if ($i==3);
851	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
852
853			$tmp0=$s[1]			if ($i==3);
854	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
855	$code.="	and	\$0xFF,$tmp1\n";
856	$code.="	shr	\$24,$tmp2\n";
857
858	$code.="	xor	3($sbox,$tmp0,8),$out\n";
859	$code.="	xor	2($sbox,$tmp1,8),$out\n";
860	$code.="	xor	1($sbox,$tmp2,8),$out\n";
861
862	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
863	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
864	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
865	$code.="\n";
866}
867
868sub declast()
869{ my ($i,@s)=@_;
870  my $tmp0=$acc0;
871  my $tmp1=$acc1;
872  my $tmp2=$acc2;
873  my $out=($t0,$t1,$t2,$s[0])[$i];
874
875	$code.="	mov	$s[0],$out\n"		if ($i!=3);
876			$tmp1=$s[2]			if ($i==3);
877	$code.="	mov	$s[2],$tmp1\n"		if ($i!=3);
878	$code.="	and	\$0xFF,$out\n";
879
880	$code.="	movzb	2048($sbox,$out,1),$out\n";
881	$code.="	shr	\$16,$tmp1\n";
882			$tmp2=$s[3]			if ($i==3);
883	$code.="	mov	$s[3],$tmp2\n"		if ($i!=3);
884
885			$tmp0=$s[1]			if ($i==3);
886	$code.="	movzb	".&hi($s[1]).",$tmp0\n";
887	$code.="	and	\$0xFF,$tmp1\n";
888	$code.="	shr	\$24,$tmp2\n";
889
890	$code.="	movzb	2048($sbox,$tmp0,1),$tmp0\n";
891	$code.="	movzb	2048($sbox,$tmp1,1),$tmp1\n";
892	$code.="	movzb	2048($sbox,$tmp2,1),$tmp2\n";
893
894	$code.="	shl	\$8,$tmp0\n";
895	$code.="	shl	\$16,$tmp1\n";
896	$code.="	shl	\$24,$tmp2\n";
897
898	$code.="	xor	$tmp0,$out\n";
899	$code.="	mov	$t2,$s[1]\n"		if ($i==3);
900	$code.="	xor	$tmp1,$out\n";
901	$code.="	mov	$t1,$s[2]\n"		if ($i==3);
902	$code.="	xor	$tmp2,$out\n";
903	$code.="	mov	$t0,$s[3]\n"		if ($i==3);
904	$code.="\n";
905}
906
907$code.=<<___;
908.type	_x86_64_AES_decrypt,\@abi-omnipotent
909.align	16
910_x86_64_AES_decrypt:
911	xor	0($key),$s0			# xor with key
912	xor	4($key),$s1
913	xor	8($key),$s2
914	xor	12($key),$s3
915
916	mov	240($key),$rnds			# load key->rounds
917	sub	\$1,$rnds
918	jmp	.Ldec_loop
919.align	16
920.Ldec_loop:
921___
922	if ($verticalspin) { &decvert(); }
923	else {	&decstep(0,$s0,$s3,$s2,$s1);
924		&decstep(1,$s1,$s0,$s3,$s2);
925		&decstep(2,$s2,$s1,$s0,$s3);
926		&decstep(3,$s3,$s2,$s1,$s0);
927		$code.=<<___;
928		lea	16($key),$key
929		xor	0($key),$s0			# xor with key
930		xor	4($key),$s1
931		xor	8($key),$s2
932		xor	12($key),$s3
933___
934	}
935$code.=<<___;
936	sub	\$1,$rnds
937	jnz	.Ldec_loop
938___
939	if ($verticalspin) { &declastvert(); }
940	else {	&declast(0,$s0,$s3,$s2,$s1);
941		&declast(1,$s1,$s0,$s3,$s2);
942		&declast(2,$s2,$s1,$s0,$s3);
943		&declast(3,$s3,$s2,$s1,$s0);
944		$code.=<<___;
945		xor	16+0($key),$s0			# xor with key
946		xor	16+4($key),$s1
947		xor	16+8($key),$s2
948		xor	16+12($key),$s3
949___
950	}
951$code.=<<___;
952	.byte	0xf3,0xc3			# rep ret
953.size	_x86_64_AES_decrypt,.-_x86_64_AES_decrypt
954___
955
956sub deccompactvert()
957{ my ($t3,$t4,$t5)=("%r8d","%r9d","%r13d");
958
959$code.=<<___;
960	movzb	`&lo("$s0")`,$t0
961	movzb	`&lo("$s1")`,$t1
962	movzb	`&lo("$s2")`,$t2
963	movzb	`&lo("$s3")`,$t3
964	movzb	`&hi("$s3")`,$acc0
965	movzb	`&hi("$s0")`,$acc1
966	shr	\$16,$s3
967	movzb	`&hi("$s1")`,$acc2
968	movzb	($sbox,$t0,1),$t0
969	movzb	($sbox,$t1,1),$t1
970	movzb	($sbox,$t2,1),$t2
971	movzb	($sbox,$t3,1),$t3
972
973	movzb	($sbox,$acc0,1),$t4	#$t0
974	movzb	`&hi("$s2")`,$acc0
975	movzb	($sbox,$acc1,1),$t5	#$t1
976	movzb	($sbox,$acc2,1),$acc2	#$t2
977	movzb	($sbox,$acc0,1),$acc0	#$t3
978
979	shr	\$16,$s2
980	shl	\$8,$t5
981	shl	\$8,$t4
982	movzb	`&lo("$s2")`,$acc1
983	shr	\$16,$s0
984	xor	$t4,$t0
985	shr	\$16,$s1
986	movzb	`&lo("$s3")`,$t4
987
988	shl	\$8,$acc2
989	xor	$t5,$t1
990	shl	\$8,$acc0
991	movzb	`&lo("$s0")`,$t5
992	movzb	($sbox,$acc1,1),$acc1	#$t0
993	xor	$acc2,$t2
994	movzb	`&lo("$s1")`,$acc2
995
996	shl	\$16,$acc1
997	xor	$acc0,$t3
998	movzb	($sbox,$t4,1),$t4	#$t1
999	movzb	`&hi("$s1")`,$acc0
1000	movzb	($sbox,$acc2,1),$acc2	#$t3
1001	xor	$acc1,$t0
1002	movzb	($sbox,$t5,1),$t5	#$t2
1003	movzb	`&hi("$s2")`,$acc1
1004
1005	shl	\$16,$acc2
1006	shl	\$16,$t4
1007	shl	\$16,$t5
1008	xor	$acc2,$t3
1009	movzb	`&hi("$s3")`,$acc2
1010	xor	$t4,$t1
1011	shr	\$8,$s0
1012	xor	$t5,$t2
1013
1014	movzb	($sbox,$acc0,1),$acc0	#$t0
1015	movzb	($sbox,$acc1,1),$s1	#$t1
1016	movzb	($sbox,$acc2,1),$s2	#$t2
1017	movzb	($sbox,$s0,1),$s3	#$t3
1018
1019	mov	$t0,$s0
1020	shl	\$24,$acc0
1021	shl	\$24,$s1
1022	shl	\$24,$s2
1023	xor	$acc0,$s0
1024	shl	\$24,$s3
1025	xor	$t1,$s1
1026	xor	$t2,$s2
1027	xor	$t3,$s3
1028___
1029}
1030
1031# parallelized version! input is pair of 64-bit values: %rax=s1.s0
1032# and %rcx=s3.s2, output is four 32-bit values in %eax=s0, %ebx=s1,
1033# %ecx=s2 and %edx=s3.
1034sub dectransform()
1035{ my ($tp10,$tp20,$tp40,$tp80,$acc0)=("%rax","%r8", "%r9", "%r10","%rbx");
1036  my ($tp18,$tp28,$tp48,$tp88,$acc8)=("%rcx","%r11","%r12","%r13","%rdx");
1037  my $prefetch = shift;
1038
1039$code.=<<___;
1040	mov	$mask80,$tp40
1041	mov	$mask80,$tp48
1042	and	$tp10,$tp40
1043	and	$tp18,$tp48
1044	mov	$tp40,$acc0
1045	mov	$tp48,$acc8
1046	shr	\$7,$tp40
1047	lea	($tp10,$tp10),$tp20
1048	shr	\$7,$tp48
1049	lea	($tp18,$tp18),$tp28
1050	sub	$tp40,$acc0
1051	sub	$tp48,$acc8
1052	and	$maskfe,$tp20
1053	and	$maskfe,$tp28
1054	and	$mask1b,$acc0
1055	and	$mask1b,$acc8
1056	xor	$acc0,$tp20
1057	xor	$acc8,$tp28
1058	mov	$mask80,$tp80
1059	mov	$mask80,$tp88
1060
1061	and	$tp20,$tp80
1062	and	$tp28,$tp88
1063	mov	$tp80,$acc0
1064	mov	$tp88,$acc8
1065	shr	\$7,$tp80
1066	lea	($tp20,$tp20),$tp40
1067	shr	\$7,$tp88
1068	lea	($tp28,$tp28),$tp48
1069	sub	$tp80,$acc0
1070	sub	$tp88,$acc8
1071	and	$maskfe,$tp40
1072	and	$maskfe,$tp48
1073	and	$mask1b,$acc0
1074	and	$mask1b,$acc8
1075	xor	$acc0,$tp40
1076	xor	$acc8,$tp48
1077	mov	$mask80,$tp80
1078	mov	$mask80,$tp88
1079
1080	and	$tp40,$tp80
1081	and	$tp48,$tp88
1082	mov	$tp80,$acc0
1083	mov	$tp88,$acc8
1084	shr	\$7,$tp80
1085	 xor	$tp10,$tp20		# tp2^=tp1
1086	shr	\$7,$tp88
1087	 xor	$tp18,$tp28		# tp2^=tp1
1088	sub	$tp80,$acc0
1089	sub	$tp88,$acc8
1090	lea	($tp40,$tp40),$tp80
1091	lea	($tp48,$tp48),$tp88
1092	 xor	$tp10,$tp40		# tp4^=tp1
1093	 xor	$tp18,$tp48		# tp4^=tp1
1094	and	$maskfe,$tp80
1095	and	$maskfe,$tp88
1096	and	$mask1b,$acc0
1097	and	$mask1b,$acc8
1098	xor	$acc0,$tp80
1099	xor	$acc8,$tp88
1100
1101	xor	$tp80,$tp10		# tp1^=tp8
1102	xor	$tp88,$tp18		# tp1^=tp8
1103	xor	$tp80,$tp20		# tp2^tp1^=tp8
1104	xor	$tp88,$tp28		# tp2^tp1^=tp8
1105	mov	$tp10,$acc0
1106	mov	$tp18,$acc8
1107	xor	$tp80,$tp40		# tp4^tp1^=tp8
1108	shr	\$32,$acc0
1109	xor	$tp88,$tp48		# tp4^tp1^=tp8
1110	shr	\$32,$acc8
1111	xor	$tp20,$tp80		# tp8^=tp8^tp2^tp1=tp2^tp1
1112	rol	\$8,`&LO("$tp10")`	# ROTATE(tp1^tp8,8)
1113	xor	$tp28,$tp88		# tp8^=tp8^tp2^tp1=tp2^tp1
1114	rol	\$8,`&LO("$tp18")`	# ROTATE(tp1^tp8,8)
1115	xor	$tp40,$tp80		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
1116	rol	\$8,`&LO("$acc0")`	# ROTATE(tp1^tp8,8)
1117	xor	$tp48,$tp88		# tp2^tp1^=tp8^tp4^tp1=tp8^tp4^tp2
1118
1119	rol	\$8,`&LO("$acc8")`	# ROTATE(tp1^tp8,8)
1120	xor	`&LO("$tp80")`,`&LO("$tp10")`
1121	shr	\$32,$tp80
1122	xor	`&LO("$tp88")`,`&LO("$tp18")`
1123	shr	\$32,$tp88
1124	xor	`&LO("$tp80")`,`&LO("$acc0")`
1125	xor	`&LO("$tp88")`,`&LO("$acc8")`
1126
1127	mov	$tp20,$tp80
1128	rol	\$24,`&LO("$tp20")`	# ROTATE(tp2^tp1^tp8,24)
1129	mov	$tp28,$tp88
1130	rol	\$24,`&LO("$tp28")`	# ROTATE(tp2^tp1^tp8,24)
1131	shr	\$32,$tp80
1132	xor	`&LO("$tp20")`,`&LO("$tp10")`
1133	shr	\$32,$tp88
1134	xor	`&LO("$tp28")`,`&LO("$tp18")`
1135	rol	\$24,`&LO("$tp80")`	# ROTATE(tp2^tp1^tp8,24)
1136	mov	$tp40,$tp20
1137	rol	\$24,`&LO("$tp88")`	# ROTATE(tp2^tp1^tp8,24)
1138	mov	$tp48,$tp28
1139	shr	\$32,$tp20
1140	xor	`&LO("$tp80")`,`&LO("$acc0")`
1141	shr	\$32,$tp28
1142	xor	`&LO("$tp88")`,`&LO("$acc8")`
1143
1144	`"mov	0($sbox),$mask80"	if ($prefetch)`
1145	rol	\$16,`&LO("$tp40")`	# ROTATE(tp4^tp1^tp8,16)
1146	`"mov	64($sbox),$maskfe"	if ($prefetch)`
1147	rol	\$16,`&LO("$tp48")`	# ROTATE(tp4^tp1^tp8,16)
1148	`"mov	128($sbox),$mask1b"	if ($prefetch)`
1149	rol	\$16,`&LO("$tp20")`	# ROTATE(tp4^tp1^tp8,16)
1150	`"mov	192($sbox),$tp80"	if ($prefetch)`
1151	xor	`&LO("$tp40")`,`&LO("$tp10")`
1152	rol	\$16,`&LO("$tp28")`	# ROTATE(tp4^tp1^tp8,16)
1153	xor	`&LO("$tp48")`,`&LO("$tp18")`
1154	`"mov	256($sbox),$tp88"	if ($prefetch)`
1155	xor	`&LO("$tp20")`,`&LO("$acc0")`
1156	xor	`&LO("$tp28")`,`&LO("$acc8")`
1157___
1158}
1159
1160$code.=<<___;
1161.type	_x86_64_AES_decrypt_compact,\@abi-omnipotent
1162.align	16
1163_x86_64_AES_decrypt_compact:
1164	lea	128($sbox),$inp			# size optimization
1165	mov	0-128($inp),$acc1		# prefetch Td4
1166	mov	32-128($inp),$acc2
1167	mov	64-128($inp),$t0
1168	mov	96-128($inp),$t1
1169	mov	128-128($inp),$acc1
1170	mov	160-128($inp),$acc2
1171	mov	192-128($inp),$t0
1172	mov	224-128($inp),$t1
1173	jmp	.Ldec_loop_compact
1174
1175.align	16
1176.Ldec_loop_compact:
1177		xor	0($key),$s0		# xor with key
1178		xor	4($key),$s1
1179		xor	8($key),$s2
1180		xor	12($key),$s3
1181		lea	16($key),$key
1182___
1183		&deccompactvert();
1184$code.=<<___;
1185		cmp	16(%rsp),$key
1186		je	.Ldec_compact_done
1187
1188		mov	256+0($sbox),$mask80
1189		shl	\$32,%rbx
1190		shl	\$32,%rdx
1191		mov	256+8($sbox),$maskfe
1192		or	%rbx,%rax
1193		or	%rdx,%rcx
1194		mov	256+16($sbox),$mask1b
1195___
1196		&dectransform(1);
1197$code.=<<___;
1198	jmp	.Ldec_loop_compact
1199.align	16
1200.Ldec_compact_done:
1201	xor	0($key),$s0
1202	xor	4($key),$s1
1203	xor	8($key),$s2
1204	xor	12($key),$s3
1205	.byte	0xf3,0xc3			# rep ret
1206.size	_x86_64_AES_decrypt_compact,.-_x86_64_AES_decrypt_compact
1207___
1208
1209# void AES_decrypt (const void *inp,void *out,const AES_KEY *key);
1210$code.=<<___;
1211.globl	AES_decrypt
1212.type	AES_decrypt,\@function,3
1213.align	16
1214.globl	asm_AES_decrypt
1215.hidden	asm_AES_decrypt
1216asm_AES_decrypt:
1217AES_decrypt:
1218.cfi_startproc
1219	mov	%rsp,%rax
1220.cfi_def_cfa_register	%rax
1221	push	%rbx
1222.cfi_push	%rbx
1223	push	%rbp
1224.cfi_push	%rbp
1225	push	%r12
1226.cfi_push	%r12
1227	push	%r13
1228.cfi_push	%r13
1229	push	%r14
1230.cfi_push	%r14
1231	push	%r15
1232.cfi_push	%r15
1233
1234	# allocate frame "above" key schedule
1235	lea	-63(%rdx),%rcx	# %rdx is key argument
1236	and	\$-64,%rsp
1237	sub	%rsp,%rcx
1238	neg	%rcx
1239	and	\$0x3c0,%rcx
1240	sub	%rcx,%rsp
1241	sub	\$32,%rsp
1242
1243	mov	%rsi,16(%rsp)	# save out
1244	mov	%rax,24(%rsp)	# save original stack pointer
1245.cfi_cfa_expression	%rsp+24,deref,+8
1246.Ldec_prologue:
1247
1248	mov	%rdx,$key
1249	mov	240($key),$rnds	# load rounds
1250
1251	mov	0(%rdi),$s0	# load input vector
1252	mov	4(%rdi),$s1
1253	mov	8(%rdi),$s2
1254	mov	12(%rdi),$s3
1255
1256	shl	\$4,$rnds
1257	lea	($key,$rnds),%rbp
1258	mov	$key,(%rsp)	# key schedule
1259	mov	%rbp,8(%rsp)	# end of key schedule
1260
1261	# pick Td4 copy which can't "overlap" with stack frame or key schedule
1262	lea	.LAES_Td+2048(%rip),$sbox
1263	lea	768(%rsp),%rbp
1264	sub	$sbox,%rbp
1265	and	\$0x300,%rbp
1266	lea	($sbox,%rbp),$sbox
1267	shr	\$3,%rbp	# recall "magic" constants!
1268	add	%rbp,$sbox
1269
1270	call	_x86_64_AES_decrypt_compact
1271
1272	mov	16(%rsp),$out	# restore out
1273	mov	24(%rsp),%rsi	# restore saved stack pointer
1274.cfi_def_cfa	%rsi,8
1275	mov	$s0,0($out)	# write output vector
1276	mov	$s1,4($out)
1277	mov	$s2,8($out)
1278	mov	$s3,12($out)
1279
1280	mov	-48(%rsi),%r15
1281.cfi_restore	%r15
1282	mov	-40(%rsi),%r14
1283.cfi_restore	%r14
1284	mov	-32(%rsi),%r13
1285.cfi_restore	%r13
1286	mov	-24(%rsi),%r12
1287.cfi_restore	%r12
1288	mov	-16(%rsi),%rbp
1289.cfi_restore	%rbp
1290	mov	-8(%rsi),%rbx
1291.cfi_restore	%rbx
1292	lea	(%rsi),%rsp
1293.cfi_def_cfa_register	%rsp
1294.Ldec_epilogue:
1295	ret
1296.cfi_endproc
1297.size	AES_decrypt,.-AES_decrypt
1298___
1299#------------------------------------------------------------------#
1300
1301sub enckey()
1302{
1303$code.=<<___;
1304	movz	%dl,%esi		# rk[i]>>0
1305	movzb	-128(%rbp,%rsi),%ebx
1306	movz	%dh,%esi		# rk[i]>>8
1307	shl	\$24,%ebx
1308	xor	%ebx,%eax
1309
1310	movzb	-128(%rbp,%rsi),%ebx
1311	shr	\$16,%edx
1312	movz	%dl,%esi		# rk[i]>>16
1313	xor	%ebx,%eax
1314
1315	movzb	-128(%rbp,%rsi),%ebx
1316	movz	%dh,%esi		# rk[i]>>24
1317	shl	\$8,%ebx
1318	xor	%ebx,%eax
1319
1320	movzb	-128(%rbp,%rsi),%ebx
1321	shl	\$16,%ebx
1322	xor	%ebx,%eax
1323
1324	xor	1024-128(%rbp,%rcx,4),%eax		# rcon
1325___
1326}
1327
1328# int AES_set_encrypt_key(const unsigned char *userKey, const int bits,
1329#                        AES_KEY *key)
1330$code.=<<___;
1331.globl	AES_set_encrypt_key
1332.type	AES_set_encrypt_key,\@function,3
1333.align	16
1334AES_set_encrypt_key:
1335.cfi_startproc
1336	push	%rbx
1337.cfi_push	%rbx
1338	push	%rbp
1339.cfi_push	%rbp
1340	push	%r12			# redundant, but allows to share
1341.cfi_push	%r12
1342	push	%r13			# exception handler...
1343.cfi_push	%r13
1344	push	%r14
1345.cfi_push	%r14
1346	push	%r15
1347.cfi_push	%r15
1348	sub	\$8,%rsp
1349.cfi_adjust_cfa_offset	8
1350.Lenc_key_prologue:
1351
1352	call	_x86_64_AES_set_encrypt_key
1353
1354	mov	40(%rsp),%rbp
1355.cfi_restore	%rbp
1356	mov	48(%rsp),%rbx
1357.cfi_restore	%rbx
1358	add	\$56,%rsp
1359.cfi_adjust_cfa_offset	-56
1360.Lenc_key_epilogue:
1361	ret
1362.cfi_endproc
1363.size	AES_set_encrypt_key,.-AES_set_encrypt_key
1364
1365.type	_x86_64_AES_set_encrypt_key,\@abi-omnipotent
1366.align	16
1367_x86_64_AES_set_encrypt_key:
1368	mov	%esi,%ecx			# %ecx=bits
1369	mov	%rdi,%rsi			# %rsi=userKey
1370	mov	%rdx,%rdi			# %rdi=key
1371
1372	test	\$-1,%rsi
1373	jz	.Lbadpointer
1374	test	\$-1,%rdi
1375	jz	.Lbadpointer
1376
1377	lea	.LAES_Te(%rip),%rbp
1378	lea	2048+128(%rbp),%rbp
1379
1380	# prefetch Te4
1381	mov	0-128(%rbp),%eax
1382	mov	32-128(%rbp),%ebx
1383	mov	64-128(%rbp),%r8d
1384	mov	96-128(%rbp),%edx
1385	mov	128-128(%rbp),%eax
1386	mov	160-128(%rbp),%ebx
1387	mov	192-128(%rbp),%r8d
1388	mov	224-128(%rbp),%edx
1389
1390	cmp	\$128,%ecx
1391	je	.L10rounds
1392	cmp	\$192,%ecx
1393	je	.L12rounds
1394	cmp	\$256,%ecx
1395	je	.L14rounds
1396	mov	\$-2,%rax			# invalid number of bits
1397	jmp	.Lexit
1398
1399.L10rounds:
1400	mov	0(%rsi),%rax			# copy first 4 dwords
1401	mov	8(%rsi),%rdx
1402	mov	%rax,0(%rdi)
1403	mov	%rdx,8(%rdi)
1404
1405	shr	\$32,%rdx
1406	xor	%ecx,%ecx
1407	jmp	.L10shortcut
1408.align	4
1409.L10loop:
1410		mov	0(%rdi),%eax			# rk[0]
1411		mov	12(%rdi),%edx			# rk[3]
1412.L10shortcut:
1413___
1414		&enckey	();
1415$code.=<<___;
1416		mov	%eax,16(%rdi)			# rk[4]
1417		xor	4(%rdi),%eax
1418		mov	%eax,20(%rdi)			# rk[5]
1419		xor	8(%rdi),%eax
1420		mov	%eax,24(%rdi)			# rk[6]
1421		xor	12(%rdi),%eax
1422		mov	%eax,28(%rdi)			# rk[7]
1423		add	\$1,%ecx
1424		lea	16(%rdi),%rdi
1425		cmp	\$10,%ecx
1426	jl	.L10loop
1427
1428	movl	\$10,80(%rdi)			# setup number of rounds
1429	xor	%rax,%rax
1430	jmp	.Lexit
1431
1432.L12rounds:
1433	mov	0(%rsi),%rax			# copy first 6 dwords
1434	mov	8(%rsi),%rbx
1435	mov	16(%rsi),%rdx
1436	mov	%rax,0(%rdi)
1437	mov	%rbx,8(%rdi)
1438	mov	%rdx,16(%rdi)
1439
1440	shr	\$32,%rdx
1441	xor	%ecx,%ecx
1442	jmp	.L12shortcut
1443.align	4
1444.L12loop:
1445		mov	0(%rdi),%eax			# rk[0]
1446		mov	20(%rdi),%edx			# rk[5]
1447.L12shortcut:
1448___
1449		&enckey	();
1450$code.=<<___;
1451		mov	%eax,24(%rdi)			# rk[6]
1452		xor	4(%rdi),%eax
1453		mov	%eax,28(%rdi)			# rk[7]
1454		xor	8(%rdi),%eax
1455		mov	%eax,32(%rdi)			# rk[8]
1456		xor	12(%rdi),%eax
1457		mov	%eax,36(%rdi)			# rk[9]
1458
1459		cmp	\$7,%ecx
1460		je	.L12break
1461		add	\$1,%ecx
1462
1463		xor	16(%rdi),%eax
1464		mov	%eax,40(%rdi)			# rk[10]
1465		xor	20(%rdi),%eax
1466		mov	%eax,44(%rdi)			# rk[11]
1467
1468		lea	24(%rdi),%rdi
1469	jmp	.L12loop
1470.L12break:
1471	movl	\$12,72(%rdi)		# setup number of rounds
1472	xor	%rax,%rax
1473	jmp	.Lexit
1474
1475.L14rounds:
1476	mov	0(%rsi),%rax			# copy first 8 dwords
1477	mov	8(%rsi),%rbx
1478	mov	16(%rsi),%rcx
1479	mov	24(%rsi),%rdx
1480	mov	%rax,0(%rdi)
1481	mov	%rbx,8(%rdi)
1482	mov	%rcx,16(%rdi)
1483	mov	%rdx,24(%rdi)
1484
1485	shr	\$32,%rdx
1486	xor	%ecx,%ecx
1487	jmp	.L14shortcut
1488.align	4
1489.L14loop:
1490		mov	0(%rdi),%eax			# rk[0]
1491		mov	28(%rdi),%edx			# rk[4]
1492.L14shortcut:
1493___
1494		&enckey	();
1495$code.=<<___;
1496		mov	%eax,32(%rdi)			# rk[8]
1497		xor	4(%rdi),%eax
1498		mov	%eax,36(%rdi)			# rk[9]
1499		xor	8(%rdi),%eax
1500		mov	%eax,40(%rdi)			# rk[10]
1501		xor	12(%rdi),%eax
1502		mov	%eax,44(%rdi)			# rk[11]
1503
1504		cmp	\$6,%ecx
1505		je	.L14break
1506		add	\$1,%ecx
1507
1508		mov	%eax,%edx
1509		mov	16(%rdi),%eax			# rk[4]
1510		movz	%dl,%esi			# rk[11]>>0
1511		movzb	-128(%rbp,%rsi),%ebx
1512		movz	%dh,%esi			# rk[11]>>8
1513		xor	%ebx,%eax
1514
1515		movzb	-128(%rbp,%rsi),%ebx
1516		shr	\$16,%edx
1517		shl	\$8,%ebx
1518		movz	%dl,%esi			# rk[11]>>16
1519		xor	%ebx,%eax
1520
1521		movzb	-128(%rbp,%rsi),%ebx
1522		movz	%dh,%esi			# rk[11]>>24
1523		shl	\$16,%ebx
1524		xor	%ebx,%eax
1525
1526		movzb	-128(%rbp,%rsi),%ebx
1527		shl	\$24,%ebx
1528		xor	%ebx,%eax
1529
1530		mov	%eax,48(%rdi)			# rk[12]
1531		xor	20(%rdi),%eax
1532		mov	%eax,52(%rdi)			# rk[13]
1533		xor	24(%rdi),%eax
1534		mov	%eax,56(%rdi)			# rk[14]
1535		xor	28(%rdi),%eax
1536		mov	%eax,60(%rdi)			# rk[15]
1537
1538		lea	32(%rdi),%rdi
1539	jmp	.L14loop
1540.L14break:
1541	movl	\$14,48(%rdi)		# setup number of rounds
1542	xor	%rax,%rax
1543	jmp	.Lexit
1544
1545.Lbadpointer:
1546	mov	\$-1,%rax
1547.Lexit:
1548	.byte	0xf3,0xc3			# rep ret
1549.size	_x86_64_AES_set_encrypt_key,.-_x86_64_AES_set_encrypt_key
1550___
1551
1552sub deckey_ref()
1553{ my ($i,$ptr,$te,$td) = @_;
1554  my ($tp1,$tp2,$tp4,$tp8,$acc)=("%eax","%ebx","%edi","%edx","%r8d");
1555$code.=<<___;
1556	mov	$i($ptr),$tp1
1557	mov	$tp1,$acc
1558	and	\$0x80808080,$acc
1559	mov	$acc,$tp4
1560	shr	\$7,$tp4
1561	lea	0($tp1,$tp1),$tp2
1562	sub	$tp4,$acc
1563	and	\$0xfefefefe,$tp2
1564	and	\$0x1b1b1b1b,$acc
1565	xor	$tp2,$acc
1566	mov	$acc,$tp2
1567
1568	and	\$0x80808080,$acc
1569	mov	$acc,$tp8
1570	shr	\$7,$tp8
1571	lea	0($tp2,$tp2),$tp4
1572	sub	$tp8,$acc
1573	and	\$0xfefefefe,$tp4
1574	and	\$0x1b1b1b1b,$acc
1575	 xor	$tp1,$tp2		# tp2^tp1
1576	xor	$tp4,$acc
1577	mov	$acc,$tp4
1578
1579	and	\$0x80808080,$acc
1580	mov	$acc,$tp8
1581	shr	\$7,$tp8
1582	sub	$tp8,$acc
1583	lea	0($tp4,$tp4),$tp8
1584	 xor	$tp1,$tp4		# tp4^tp1
1585	and	\$0xfefefefe,$tp8
1586	and	\$0x1b1b1b1b,$acc
1587	xor	$acc,$tp8
1588
1589	xor	$tp8,$tp1		# tp1^tp8
1590	rol	\$8,$tp1		# ROTATE(tp1^tp8,8)
1591	xor	$tp8,$tp2		# tp2^tp1^tp8
1592	xor	$tp8,$tp4		# tp4^tp1^tp8
1593	xor	$tp2,$tp8
1594	xor	$tp4,$tp8		# tp8^(tp8^tp4^tp1)^(tp8^tp2^tp1)=tp8^tp4^tp2
1595
1596	xor	$tp8,$tp1
1597	rol	\$24,$tp2		# ROTATE(tp2^tp1^tp8,24)
1598	xor	$tp2,$tp1
1599	rol	\$16,$tp4		# ROTATE(tp4^tp1^tp8,16)
1600	xor	$tp4,$tp1
1601
1602	mov	$tp1,$i($ptr)
1603___
1604}
1605
1606# int AES_set_decrypt_key(const unsigned char *userKey, const int bits,
1607#                        AES_KEY *key)
1608$code.=<<___;
1609.globl	AES_set_decrypt_key
1610.type	AES_set_decrypt_key,\@function,3
1611.align	16
1612AES_set_decrypt_key:
1613.cfi_startproc
1614	push	%rbx
1615.cfi_push	%rbx
1616	push	%rbp
1617.cfi_push	%rbp
1618	push	%r12
1619.cfi_push	%r12
1620	push	%r13
1621.cfi_push	%r13
1622	push	%r14
1623.cfi_push	%r14
1624	push	%r15
1625.cfi_push	%r15
1626	push	%rdx			# save key schedule
1627.cfi_adjust_cfa_offset	8
1628.Ldec_key_prologue:
1629
1630	call	_x86_64_AES_set_encrypt_key
1631	mov	(%rsp),%r8		# restore key schedule
1632	cmp	\$0,%eax
1633	jne	.Labort
1634
1635	mov	240(%r8),%r14d		# pull number of rounds
1636	xor	%rdi,%rdi
1637	lea	(%rdi,%r14d,4),%rcx
1638	mov	%r8,%rsi
1639	lea	(%r8,%rcx,4),%rdi	# pointer to last chunk
1640.align	4
1641.Linvert:
1642		mov	0(%rsi),%rax
1643		mov	8(%rsi),%rbx
1644		mov	0(%rdi),%rcx
1645		mov	8(%rdi),%rdx
1646		mov	%rax,0(%rdi)
1647		mov	%rbx,8(%rdi)
1648		mov	%rcx,0(%rsi)
1649		mov	%rdx,8(%rsi)
1650		lea	16(%rsi),%rsi
1651		lea	-16(%rdi),%rdi
1652		cmp	%rsi,%rdi
1653	jne	.Linvert
1654
1655	lea	.LAES_Te+2048+1024(%rip),%rax	# rcon
1656
1657	mov	40(%rax),$mask80
1658	mov	48(%rax),$maskfe
1659	mov	56(%rax),$mask1b
1660
1661	mov	%r8,$key
1662	sub	\$1,%r14d
1663.align	4
1664.Lpermute:
1665		lea	16($key),$key
1666		mov	0($key),%rax
1667		mov	8($key),%rcx
1668___
1669		&dectransform ();
1670$code.=<<___;
1671		mov	%eax,0($key)
1672		mov	%ebx,4($key)
1673		mov	%ecx,8($key)
1674		mov	%edx,12($key)
1675		sub	\$1,%r14d
1676	jnz	.Lpermute
1677
1678	xor	%rax,%rax
1679.Labort:
1680	mov	8(%rsp),%r15
1681.cfi_restore	%r15
1682	mov	16(%rsp),%r14
1683.cfi_restore	%r14
1684	mov	24(%rsp),%r13
1685.cfi_restore	%r13
1686	mov	32(%rsp),%r12
1687.cfi_restore	%r12
1688	mov	40(%rsp),%rbp
1689.cfi_restore	%rbp
1690	mov	48(%rsp),%rbx
1691.cfi_restore	%rbx
1692	add	\$56,%rsp
1693.cfi_adjust_cfa_offset	-56
1694.Ldec_key_epilogue:
1695	ret
1696.cfi_endproc
1697.size	AES_set_decrypt_key,.-AES_set_decrypt_key
1698___
1699
1700# void AES_cbc_encrypt (const void char *inp, unsigned char *out,
1701#			size_t length, const AES_KEY *key,
1702#			unsigned char *ivp,const int enc);
1703{
1704# stack frame layout
1705# -8(%rsp)		return address
1706my $keyp="0(%rsp)";		# one to pass as $key
1707my $keyend="8(%rsp)";		# &(keyp->rd_key[4*keyp->rounds])
1708my $_rsp="16(%rsp)";		# saved %rsp
1709my $_inp="24(%rsp)";		# copy of 1st parameter, inp
1710my $_out="32(%rsp)";		# copy of 2nd parameter, out
1711my $_len="40(%rsp)";		# copy of 3rd parameter, length
1712my $_key="48(%rsp)";		# copy of 4th parameter, key
1713my $_ivp="56(%rsp)";		# copy of 5th parameter, ivp
1714my $ivec="64(%rsp)";		# ivec[16]
1715my $aes_key="80(%rsp)";		# copy of aes_key
1716my $mark="80+240(%rsp)";	# copy of aes_key->rounds
1717
1718$code.=<<___;
1719.globl	AES_cbc_encrypt
1720.type	AES_cbc_encrypt,\@function,6
1721.align	16
1722.extern	OPENSSL_ia32cap_P
1723.globl	asm_AES_cbc_encrypt
1724.hidden	asm_AES_cbc_encrypt
1725asm_AES_cbc_encrypt:
1726AES_cbc_encrypt:
1727.cfi_startproc
1728	cmp	\$0,%rdx	# check length
1729	je	.Lcbc_epilogue
1730	pushfq
1731.cfi_push	49		# %rflags
1732	push	%rbx
1733.cfi_push	%rbx
1734	push	%rbp
1735.cfi_push	%rbp
1736	push	%r12
1737.cfi_push	%r12
1738	push	%r13
1739.cfi_push	%r13
1740	push	%r14
1741.cfi_push	%r14
1742	push	%r15
1743.cfi_push	%r15
1744.Lcbc_prologue:
1745
1746	cld
1747	mov	%r9d,%r9d	# clear upper half of enc
1748
1749	lea	.LAES_Te(%rip),$sbox
1750	lea	.LAES_Td(%rip),%r10
1751	cmp	\$0,%r9
1752	cmoveq	%r10,$sbox
1753
1754	mov	OPENSSL_ia32cap_P(%rip),%r10d
1755	cmp	\$$speed_limit,%rdx
1756	jb	.Lcbc_slow_prologue
1757	test	\$15,%rdx
1758	jnz	.Lcbc_slow_prologue
1759	bt	\$28,%r10d
1760	jc	.Lcbc_slow_prologue
1761
1762	# allocate aligned stack frame...
1763	lea	-88-248(%rsp),$key
1764	and	\$-64,$key
1765
1766	# ... and make sure it doesn't alias with AES_T[ed] modulo 4096
1767	mov	$sbox,%r10
1768	lea	2304($sbox),%r11
1769	mov	$key,%r12
1770	and	\$0xFFF,%r10	# s = $sbox&0xfff
1771	and	\$0xFFF,%r11	# e = ($sbox+2048)&0xfff
1772	and	\$0xFFF,%r12	# p = %rsp&0xfff
1773
1774	cmp	%r11,%r12	# if (p=>e) %rsp =- (p-e);
1775	jb	.Lcbc_te_break_out
1776	sub	%r11,%r12
1777	sub	%r12,$key
1778	jmp	.Lcbc_te_ok
1779.Lcbc_te_break_out:		# else %rsp -= (p-s)&0xfff + framesz
1780	sub	%r10,%r12
1781	and	\$0xFFF,%r12
1782	add	\$320,%r12
1783	sub	%r12,$key
1784.align	4
1785.Lcbc_te_ok:
1786
1787	xchg	%rsp,$key
1788.cfi_def_cfa_register	$key
1789	#add	\$8,%rsp	# reserve for return address!
1790	mov	$key,$_rsp	# save %rsp
1791.cfi_cfa_expression	$_rsp,deref,+64
1792.Lcbc_fast_body:
1793	mov	%rdi,$_inp	# save copy of inp
1794	mov	%rsi,$_out	# save copy of out
1795	mov	%rdx,$_len	# save copy of len
1796	mov	%rcx,$_key	# save copy of key
1797	mov	%r8,$_ivp	# save copy of ivp
1798	movl	\$0,$mark	# copy of aes_key->rounds = 0;
1799	mov	%r8,%rbp	# rearrange input arguments
1800	mov	%r9,%rbx
1801	mov	%rsi,$out
1802	mov	%rdi,$inp
1803	mov	%rcx,$key
1804
1805	mov	240($key),%eax		# key->rounds
1806	# do we copy key schedule to stack?
1807	mov	$key,%r10
1808	sub	$sbox,%r10
1809	and	\$0xfff,%r10
1810	cmp	\$2304,%r10
1811	jb	.Lcbc_do_ecopy
1812	cmp	\$4096-248,%r10
1813	jb	.Lcbc_skip_ecopy
1814.align	4
1815.Lcbc_do_ecopy:
1816		mov	$key,%rsi
1817		lea	$aes_key,%rdi
1818		lea	$aes_key,$key
1819		mov	\$240/8,%ecx
1820		.long	0x90A548F3	# rep movsq
1821		mov	%eax,(%rdi)	# copy aes_key->rounds
1822.Lcbc_skip_ecopy:
1823	mov	$key,$keyp	# save key pointer
1824
1825	mov	\$18,%ecx
1826.align	4
1827.Lcbc_prefetch_te:
1828		mov	0($sbox),%r10
1829		mov	32($sbox),%r11
1830		mov	64($sbox),%r12
1831		mov	96($sbox),%r13
1832		lea	128($sbox),$sbox
1833		sub	\$1,%ecx
1834	jnz	.Lcbc_prefetch_te
1835	lea	-2304($sbox),$sbox
1836
1837	cmp	\$0,%rbx
1838	je	.LFAST_DECRYPT
1839
1840#----------------------------- ENCRYPT -----------------------------#
1841	mov	0(%rbp),$s0		# load iv
1842	mov	4(%rbp),$s1
1843	mov	8(%rbp),$s2
1844	mov	12(%rbp),$s3
1845
1846.align	4
1847.Lcbc_fast_enc_loop:
1848		xor	0($inp),$s0
1849		xor	4($inp),$s1
1850		xor	8($inp),$s2
1851		xor	12($inp),$s3
1852		mov	$keyp,$key	# restore key
1853		mov	$inp,$_inp	# if ($verticalspin) save inp
1854
1855		call	_x86_64_AES_encrypt
1856
1857		mov	$_inp,$inp	# if ($verticalspin) restore inp
1858		mov	$_len,%r10
1859		mov	$s0,0($out)
1860		mov	$s1,4($out)
1861		mov	$s2,8($out)
1862		mov	$s3,12($out)
1863
1864		lea	16($inp),$inp
1865		lea	16($out),$out
1866		sub	\$16,%r10
1867		test	\$-16,%r10
1868		mov	%r10,$_len
1869	jnz	.Lcbc_fast_enc_loop
1870	mov	$_ivp,%rbp	# restore ivp
1871	mov	$s0,0(%rbp)	# save ivec
1872	mov	$s1,4(%rbp)
1873	mov	$s2,8(%rbp)
1874	mov	$s3,12(%rbp)
1875
1876	jmp	.Lcbc_fast_cleanup
1877
1878#----------------------------- DECRYPT -----------------------------#
1879.align	16
1880.LFAST_DECRYPT:
1881	cmp	$inp,$out
1882	je	.Lcbc_fast_dec_in_place
1883
1884	mov	%rbp,$ivec
1885.align	4
1886.Lcbc_fast_dec_loop:
1887		mov	0($inp),$s0	# read input
1888		mov	4($inp),$s1
1889		mov	8($inp),$s2
1890		mov	12($inp),$s3
1891		mov	$keyp,$key	# restore key
1892		mov	$inp,$_inp	# if ($verticalspin) save inp
1893
1894		call	_x86_64_AES_decrypt
1895
1896		mov	$ivec,%rbp	# load ivp
1897		mov	$_inp,$inp	# if ($verticalspin) restore inp
1898		mov	$_len,%r10	# load len
1899		xor	0(%rbp),$s0	# xor iv
1900		xor	4(%rbp),$s1
1901		xor	8(%rbp),$s2
1902		xor	12(%rbp),$s3
1903		mov	$inp,%rbp	# current input, next iv
1904
1905		sub	\$16,%r10
1906		mov	%r10,$_len	# update len
1907		mov	%rbp,$ivec	# update ivp
1908
1909		mov	$s0,0($out)	# write output
1910		mov	$s1,4($out)
1911		mov	$s2,8($out)
1912		mov	$s3,12($out)
1913
1914		lea	16($inp),$inp
1915		lea	16($out),$out
1916	jnz	.Lcbc_fast_dec_loop
1917	mov	$_ivp,%r12		# load user ivp
1918	mov	0(%rbp),%r10		# load iv
1919	mov	8(%rbp),%r11
1920	mov	%r10,0(%r12)		# copy back to user
1921	mov	%r11,8(%r12)
1922	jmp	.Lcbc_fast_cleanup
1923
1924.align	16
1925.Lcbc_fast_dec_in_place:
1926	mov	0(%rbp),%r10		# copy iv to stack
1927	mov	8(%rbp),%r11
1928	mov	%r10,0+$ivec
1929	mov	%r11,8+$ivec
1930.align	4
1931.Lcbc_fast_dec_in_place_loop:
1932		mov	0($inp),$s0	# load input
1933		mov	4($inp),$s1
1934		mov	8($inp),$s2
1935		mov	12($inp),$s3
1936		mov	$keyp,$key	# restore key
1937		mov	$inp,$_inp	# if ($verticalspin) save inp
1938
1939		call	_x86_64_AES_decrypt
1940
1941		mov	$_inp,$inp	# if ($verticalspin) restore inp
1942		mov	$_len,%r10
1943		xor	0+$ivec,$s0
1944		xor	4+$ivec,$s1
1945		xor	8+$ivec,$s2
1946		xor	12+$ivec,$s3
1947
1948		mov	0($inp),%r11	# load input
1949		mov	8($inp),%r12
1950		sub	\$16,%r10
1951		jz	.Lcbc_fast_dec_in_place_done
1952
1953		mov	%r11,0+$ivec	# copy input to iv
1954		mov	%r12,8+$ivec
1955
1956		mov	$s0,0($out)	# save output [zaps input]
1957		mov	$s1,4($out)
1958		mov	$s2,8($out)
1959		mov	$s3,12($out)
1960
1961		lea	16($inp),$inp
1962		lea	16($out),$out
1963		mov	%r10,$_len
1964	jmp	.Lcbc_fast_dec_in_place_loop
1965.Lcbc_fast_dec_in_place_done:
1966	mov	$_ivp,%rdi
1967	mov	%r11,0(%rdi)	# copy iv back to user
1968	mov	%r12,8(%rdi)
1969
1970	mov	$s0,0($out)	# save output [zaps input]
1971	mov	$s1,4($out)
1972	mov	$s2,8($out)
1973	mov	$s3,12($out)
1974
1975.align	4
1976.Lcbc_fast_cleanup:
1977	cmpl	\$0,$mark	# was the key schedule copied?
1978	lea	$aes_key,%rdi
1979	je	.Lcbc_exit
1980		mov	\$240/8,%ecx
1981		xor	%rax,%rax
1982		.long	0x90AB48F3	# rep stosq
1983
1984	jmp	.Lcbc_exit
1985
1986#--------------------------- SLOW ROUTINE ---------------------------#
1987.align	16
1988.Lcbc_slow_prologue:
1989	# allocate aligned stack frame...
1990	lea	-88(%rsp),%rbp
1991	and	\$-64,%rbp
1992	# ... just "above" key schedule
1993	lea	-88-63(%rcx),%r10
1994	sub	%rbp,%r10
1995	neg	%r10
1996	and	\$0x3c0,%r10
1997	sub	%r10,%rbp
1998
1999	xchg	%rsp,%rbp
2000	#add	\$8,%rsp	# reserve for return address!
2001	mov	%rbp,$_rsp	# save %rsp
2002.Lcbc_slow_body:
2003	#mov	%rdi,$_inp	# save copy of inp
2004	#mov	%rsi,$_out	# save copy of out
2005	#mov	%rdx,$_len	# save copy of len
2006	#mov	%rcx,$_key	# save copy of key
2007	mov	%r8,$_ivp	# save copy of ivp
2008	mov	%r8,%rbp	# rearrange input arguments
2009	mov	%r9,%rbx
2010	mov	%rsi,$out
2011	mov	%rdi,$inp
2012	mov	%rcx,$key
2013	mov	%rdx,%r10
2014
2015	mov	240($key),%eax
2016	mov	$key,$keyp	# save key pointer
2017	shl	\$4,%eax
2018	lea	($key,%rax),%rax
2019	mov	%rax,$keyend
2020
2021	# pick Te4 copy which can't "overlap" with stack frame or key schedule
2022	lea	2048($sbox),$sbox
2023	lea	768-8(%rsp),%rax
2024	sub	$sbox,%rax
2025	and	\$0x300,%rax
2026	lea	($sbox,%rax),$sbox
2027
2028	cmp	\$0,%rbx
2029	je	.LSLOW_DECRYPT
2030
2031#--------------------------- SLOW ENCRYPT ---------------------------#
2032	test	\$-16,%r10		# check upon length
2033	mov	0(%rbp),$s0		# load iv
2034	mov	4(%rbp),$s1
2035	mov	8(%rbp),$s2
2036	mov	12(%rbp),$s3
2037	jz	.Lcbc_slow_enc_tail	# short input...
2038
2039.align	4
2040.Lcbc_slow_enc_loop:
2041		xor	0($inp),$s0
2042		xor	4($inp),$s1
2043		xor	8($inp),$s2
2044		xor	12($inp),$s3
2045		mov	$keyp,$key	# restore key
2046		mov	$inp,$_inp	# save inp
2047		mov	$out,$_out	# save out
2048		mov	%r10,$_len	# save len
2049
2050		call	_x86_64_AES_encrypt_compact
2051
2052		mov	$_inp,$inp	# restore inp
2053		mov	$_out,$out	# restore out
2054		mov	$_len,%r10	# restore len
2055		mov	$s0,0($out)
2056		mov	$s1,4($out)
2057		mov	$s2,8($out)
2058		mov	$s3,12($out)
2059
2060		lea	16($inp),$inp
2061		lea	16($out),$out
2062		sub	\$16,%r10
2063		test	\$-16,%r10
2064	jnz	.Lcbc_slow_enc_loop
2065	test	\$15,%r10
2066	jnz	.Lcbc_slow_enc_tail
2067	mov	$_ivp,%rbp	# restore ivp
2068	mov	$s0,0(%rbp)	# save ivec
2069	mov	$s1,4(%rbp)
2070	mov	$s2,8(%rbp)
2071	mov	$s3,12(%rbp)
2072
2073	jmp	.Lcbc_exit
2074
2075.align	4
2076.Lcbc_slow_enc_tail:
2077	mov	%rax,%r11
2078	mov	%rcx,%r12
2079	mov	%r10,%rcx
2080	mov	$inp,%rsi
2081	mov	$out,%rdi
2082	.long	0x9066A4F3		# rep movsb
2083	mov	\$16,%rcx		# zero tail
2084	sub	%r10,%rcx
2085	xor	%rax,%rax
2086	.long	0x9066AAF3		# rep stosb
2087	mov	$out,$inp		# this is not a mistake!
2088	mov	\$16,%r10		# len=16
2089	mov	%r11,%rax
2090	mov	%r12,%rcx
2091	jmp	.Lcbc_slow_enc_loop	# one more spin...
2092#--------------------------- SLOW DECRYPT ---------------------------#
2093.align	16
2094.LSLOW_DECRYPT:
2095	shr	\$3,%rax
2096	add	%rax,$sbox		# recall "magic" constants!
2097
2098	mov	0(%rbp),%r11		# copy iv to stack
2099	mov	8(%rbp),%r12
2100	mov	%r11,0+$ivec
2101	mov	%r12,8+$ivec
2102
2103.align	4
2104.Lcbc_slow_dec_loop:
2105		mov	0($inp),$s0	# load input
2106		mov	4($inp),$s1
2107		mov	8($inp),$s2
2108		mov	12($inp),$s3
2109		mov	$keyp,$key	# restore key
2110		mov	$inp,$_inp	# save inp
2111		mov	$out,$_out	# save out
2112		mov	%r10,$_len	# save len
2113
2114		call	_x86_64_AES_decrypt_compact
2115
2116		mov	$_inp,$inp	# restore inp
2117		mov	$_out,$out	# restore out
2118		mov	$_len,%r10
2119		xor	0+$ivec,$s0
2120		xor	4+$ivec,$s1
2121		xor	8+$ivec,$s2
2122		xor	12+$ivec,$s3
2123
2124		mov	0($inp),%r11	# load input
2125		mov	8($inp),%r12
2126		sub	\$16,%r10
2127		jc	.Lcbc_slow_dec_partial
2128		jz	.Lcbc_slow_dec_done
2129
2130		mov	%r11,0+$ivec	# copy input to iv
2131		mov	%r12,8+$ivec
2132
2133		mov	$s0,0($out)	# save output [can zap input]
2134		mov	$s1,4($out)
2135		mov	$s2,8($out)
2136		mov	$s3,12($out)
2137
2138		lea	16($inp),$inp
2139		lea	16($out),$out
2140	jmp	.Lcbc_slow_dec_loop
2141.Lcbc_slow_dec_done:
2142	mov	$_ivp,%rdi
2143	mov	%r11,0(%rdi)		# copy iv back to user
2144	mov	%r12,8(%rdi)
2145
2146	mov	$s0,0($out)		# save output [can zap input]
2147	mov	$s1,4($out)
2148	mov	$s2,8($out)
2149	mov	$s3,12($out)
2150
2151	jmp	.Lcbc_exit
2152
2153.align	4
2154.Lcbc_slow_dec_partial:
2155	mov	$_ivp,%rdi
2156	mov	%r11,0(%rdi)		# copy iv back to user
2157	mov	%r12,8(%rdi)
2158
2159	mov	$s0,0+$ivec		# save output to stack
2160	mov	$s1,4+$ivec
2161	mov	$s2,8+$ivec
2162	mov	$s3,12+$ivec
2163
2164	mov	$out,%rdi
2165	lea	$ivec,%rsi
2166	lea	16(%r10),%rcx
2167	.long	0x9066A4F3	# rep movsb
2168	jmp	.Lcbc_exit
2169
2170.align	16
2171.Lcbc_exit:
2172	mov	$_rsp,%rsi
2173.cfi_def_cfa	%rsi,64
2174	mov	(%rsi),%r15
2175.cfi_restore	%r15
2176	mov	8(%rsi),%r14
2177.cfi_restore	%r14
2178	mov	16(%rsi),%r13
2179.cfi_restore	%r13
2180	mov	24(%rsi),%r12
2181.cfi_restore	%r12
2182	mov	32(%rsi),%rbp
2183.cfi_restore	%rbp
2184	mov	40(%rsi),%rbx
2185.cfi_restore	%rbx
2186	lea	48(%rsi),%rsp
2187.cfi_def_cfa	%rsp,16
2188.Lcbc_popfq:
2189	popfq
2190.cfi_pop	49		# %rflags
2191.Lcbc_epilogue:
2192	ret
2193.cfi_endproc
2194.size	AES_cbc_encrypt,.-AES_cbc_encrypt
2195___
2196}
2197
2198$code.=<<___;
2199.align	64
2200.LAES_Te:
2201___
2202	&_data_word(0xa56363c6, 0x847c7cf8, 0x997777ee, 0x8d7b7bf6);
2203	&_data_word(0x0df2f2ff, 0xbd6b6bd6, 0xb16f6fde, 0x54c5c591);
2204	&_data_word(0x50303060, 0x03010102, 0xa96767ce, 0x7d2b2b56);
2205	&_data_word(0x19fefee7, 0x62d7d7b5, 0xe6abab4d, 0x9a7676ec);
2206	&_data_word(0x45caca8f, 0x9d82821f, 0x40c9c989, 0x877d7dfa);
2207	&_data_word(0x15fafaef, 0xeb5959b2, 0xc947478e, 0x0bf0f0fb);
2208	&_data_word(0xecadad41, 0x67d4d4b3, 0xfda2a25f, 0xeaafaf45);
2209	&_data_word(0xbf9c9c23, 0xf7a4a453, 0x967272e4, 0x5bc0c09b);
2210	&_data_word(0xc2b7b775, 0x1cfdfde1, 0xae93933d, 0x6a26264c);
2211	&_data_word(0x5a36366c, 0x413f3f7e, 0x02f7f7f5, 0x4fcccc83);
2212	&_data_word(0x5c343468, 0xf4a5a551, 0x34e5e5d1, 0x08f1f1f9);
2213	&_data_word(0x937171e2, 0x73d8d8ab, 0x53313162, 0x3f15152a);
2214	&_data_word(0x0c040408, 0x52c7c795, 0x65232346, 0x5ec3c39d);
2215	&_data_word(0x28181830, 0xa1969637, 0x0f05050a, 0xb59a9a2f);
2216	&_data_word(0x0907070e, 0x36121224, 0x9b80801b, 0x3de2e2df);
2217	&_data_word(0x26ebebcd, 0x6927274e, 0xcdb2b27f, 0x9f7575ea);
2218	&_data_word(0x1b090912, 0x9e83831d, 0x742c2c58, 0x2e1a1a34);
2219	&_data_word(0x2d1b1b36, 0xb26e6edc, 0xee5a5ab4, 0xfba0a05b);
2220	&_data_word(0xf65252a4, 0x4d3b3b76, 0x61d6d6b7, 0xceb3b37d);
2221	&_data_word(0x7b292952, 0x3ee3e3dd, 0x712f2f5e, 0x97848413);
2222	&_data_word(0xf55353a6, 0x68d1d1b9, 0x00000000, 0x2cededc1);
2223	&_data_word(0x60202040, 0x1ffcfce3, 0xc8b1b179, 0xed5b5bb6);
2224	&_data_word(0xbe6a6ad4, 0x46cbcb8d, 0xd9bebe67, 0x4b393972);
2225	&_data_word(0xde4a4a94, 0xd44c4c98, 0xe85858b0, 0x4acfcf85);
2226	&_data_word(0x6bd0d0bb, 0x2aefefc5, 0xe5aaaa4f, 0x16fbfbed);
2227	&_data_word(0xc5434386, 0xd74d4d9a, 0x55333366, 0x94858511);
2228	&_data_word(0xcf45458a, 0x10f9f9e9, 0x06020204, 0x817f7ffe);
2229	&_data_word(0xf05050a0, 0x443c3c78, 0xba9f9f25, 0xe3a8a84b);
2230	&_data_word(0xf35151a2, 0xfea3a35d, 0xc0404080, 0x8a8f8f05);
2231	&_data_word(0xad92923f, 0xbc9d9d21, 0x48383870, 0x04f5f5f1);
2232	&_data_word(0xdfbcbc63, 0xc1b6b677, 0x75dadaaf, 0x63212142);
2233	&_data_word(0x30101020, 0x1affffe5, 0x0ef3f3fd, 0x6dd2d2bf);
2234	&_data_word(0x4ccdcd81, 0x140c0c18, 0x35131326, 0x2fececc3);
2235	&_data_word(0xe15f5fbe, 0xa2979735, 0xcc444488, 0x3917172e);
2236	&_data_word(0x57c4c493, 0xf2a7a755, 0x827e7efc, 0x473d3d7a);
2237	&_data_word(0xac6464c8, 0xe75d5dba, 0x2b191932, 0x957373e6);
2238	&_data_word(0xa06060c0, 0x98818119, 0xd14f4f9e, 0x7fdcdca3);
2239	&_data_word(0x66222244, 0x7e2a2a54, 0xab90903b, 0x8388880b);
2240	&_data_word(0xca46468c, 0x29eeeec7, 0xd3b8b86b, 0x3c141428);
2241	&_data_word(0x79dedea7, 0xe25e5ebc, 0x1d0b0b16, 0x76dbdbad);
2242	&_data_word(0x3be0e0db, 0x56323264, 0x4e3a3a74, 0x1e0a0a14);
2243	&_data_word(0xdb494992, 0x0a06060c, 0x6c242448, 0xe45c5cb8);
2244	&_data_word(0x5dc2c29f, 0x6ed3d3bd, 0xefacac43, 0xa66262c4);
2245	&_data_word(0xa8919139, 0xa4959531, 0x37e4e4d3, 0x8b7979f2);
2246	&_data_word(0x32e7e7d5, 0x43c8c88b, 0x5937376e, 0xb76d6dda);
2247	&_data_word(0x8c8d8d01, 0x64d5d5b1, 0xd24e4e9c, 0xe0a9a949);
2248	&_data_word(0xb46c6cd8, 0xfa5656ac, 0x07f4f4f3, 0x25eaeacf);
2249	&_data_word(0xaf6565ca, 0x8e7a7af4, 0xe9aeae47, 0x18080810);
2250	&_data_word(0xd5baba6f, 0x887878f0, 0x6f25254a, 0x722e2e5c);
2251	&_data_word(0x241c1c38, 0xf1a6a657, 0xc7b4b473, 0x51c6c697);
2252	&_data_word(0x23e8e8cb, 0x7cdddda1, 0x9c7474e8, 0x211f1f3e);
2253	&_data_word(0xdd4b4b96, 0xdcbdbd61, 0x868b8b0d, 0x858a8a0f);
2254	&_data_word(0x907070e0, 0x423e3e7c, 0xc4b5b571, 0xaa6666cc);
2255	&_data_word(0xd8484890, 0x05030306, 0x01f6f6f7, 0x120e0e1c);
2256	&_data_word(0xa36161c2, 0x5f35356a, 0xf95757ae, 0xd0b9b969);
2257	&_data_word(0x91868617, 0x58c1c199, 0x271d1d3a, 0xb99e9e27);
2258	&_data_word(0x38e1e1d9, 0x13f8f8eb, 0xb398982b, 0x33111122);
2259	&_data_word(0xbb6969d2, 0x70d9d9a9, 0x898e8e07, 0xa7949433);
2260	&_data_word(0xb69b9b2d, 0x221e1e3c, 0x92878715, 0x20e9e9c9);
2261	&_data_word(0x49cece87, 0xff5555aa, 0x78282850, 0x7adfdfa5);
2262	&_data_word(0x8f8c8c03, 0xf8a1a159, 0x80898909, 0x170d0d1a);
2263	&_data_word(0xdabfbf65, 0x31e6e6d7, 0xc6424284, 0xb86868d0);
2264	&_data_word(0xc3414182, 0xb0999929, 0x772d2d5a, 0x110f0f1e);
2265	&_data_word(0xcbb0b07b, 0xfc5454a8, 0xd6bbbb6d, 0x3a16162c);
2266
2267#Te4	# four copies of Te4 to choose from to avoid L1 aliasing
2268	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
2269	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
2270	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
2271	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
2272	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
2273	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
2274	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
2275	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
2276	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
2277	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
2278	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
2279	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
2280	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
2281	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
2282	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
2283	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
2284	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
2285	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
2286	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
2287	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
2288	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
2289	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
2290	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
2291	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
2292	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
2293	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
2294	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
2295	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
2296	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
2297	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
2298	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
2299	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
2300
2301	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
2302	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
2303	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
2304	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
2305	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
2306	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
2307	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
2308	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
2309	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
2310	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
2311	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
2312	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
2313	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
2314	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
2315	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
2316	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
2317	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
2318	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
2319	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
2320	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
2321	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
2322	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
2323	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
2324	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
2325	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
2326	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
2327	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
2328	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
2329	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
2330	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
2331	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
2332	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
2333
2334	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
2335	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
2336	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
2337	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
2338	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
2339	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
2340	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
2341	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
2342	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
2343	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
2344	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
2345	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
2346	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
2347	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
2348	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
2349	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
2350	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
2351	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
2352	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
2353	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
2354	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
2355	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
2356	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
2357	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
2358	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
2359	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
2360	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
2361	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
2362	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
2363	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
2364	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
2365	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
2366
2367	&data_byte(0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5);
2368	&data_byte(0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76);
2369	&data_byte(0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0);
2370	&data_byte(0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0);
2371	&data_byte(0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc);
2372	&data_byte(0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15);
2373	&data_byte(0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a);
2374	&data_byte(0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75);
2375	&data_byte(0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0);
2376	&data_byte(0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84);
2377	&data_byte(0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b);
2378	&data_byte(0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf);
2379	&data_byte(0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85);
2380	&data_byte(0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8);
2381	&data_byte(0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5);
2382	&data_byte(0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2);
2383	&data_byte(0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17);
2384	&data_byte(0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73);
2385	&data_byte(0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88);
2386	&data_byte(0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb);
2387	&data_byte(0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c);
2388	&data_byte(0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79);
2389	&data_byte(0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9);
2390	&data_byte(0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08);
2391	&data_byte(0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6);
2392	&data_byte(0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a);
2393	&data_byte(0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e);
2394	&data_byte(0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e);
2395	&data_byte(0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94);
2396	&data_byte(0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf);
2397	&data_byte(0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68);
2398	&data_byte(0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16);
2399#rcon:
2400$code.=<<___;
2401	.long	0x00000001, 0x00000002, 0x00000004, 0x00000008
2402	.long	0x00000010, 0x00000020, 0x00000040, 0x00000080
2403	.long	0x0000001b, 0x00000036, 0x80808080, 0x80808080
2404	.long	0xfefefefe, 0xfefefefe, 0x1b1b1b1b, 0x1b1b1b1b
2405___
2406$code.=<<___;
2407.align	64
2408.LAES_Td:
2409___
2410	&_data_word(0x50a7f451, 0x5365417e, 0xc3a4171a, 0x965e273a);
2411	&_data_word(0xcb6bab3b, 0xf1459d1f, 0xab58faac, 0x9303e34b);
2412	&_data_word(0x55fa3020, 0xf66d76ad, 0x9176cc88, 0x254c02f5);
2413	&_data_word(0xfcd7e54f, 0xd7cb2ac5, 0x80443526, 0x8fa362b5);
2414	&_data_word(0x495ab1de, 0x671bba25, 0x980eea45, 0xe1c0fe5d);
2415	&_data_word(0x02752fc3, 0x12f04c81, 0xa397468d, 0xc6f9d36b);
2416	&_data_word(0xe75f8f03, 0x959c9215, 0xeb7a6dbf, 0xda595295);
2417	&_data_word(0x2d83bed4, 0xd3217458, 0x2969e049, 0x44c8c98e);
2418	&_data_word(0x6a89c275, 0x78798ef4, 0x6b3e5899, 0xdd71b927);
2419	&_data_word(0xb64fe1be, 0x17ad88f0, 0x66ac20c9, 0xb43ace7d);
2420	&_data_word(0x184adf63, 0x82311ae5, 0x60335197, 0x457f5362);
2421	&_data_word(0xe07764b1, 0x84ae6bbb, 0x1ca081fe, 0x942b08f9);
2422	&_data_word(0x58684870, 0x19fd458f, 0x876cde94, 0xb7f87b52);
2423	&_data_word(0x23d373ab, 0xe2024b72, 0x578f1fe3, 0x2aab5566);
2424	&_data_word(0x0728ebb2, 0x03c2b52f, 0x9a7bc586, 0xa50837d3);
2425	&_data_word(0xf2872830, 0xb2a5bf23, 0xba6a0302, 0x5c8216ed);
2426	&_data_word(0x2b1ccf8a, 0x92b479a7, 0xf0f207f3, 0xa1e2694e);
2427	&_data_word(0xcdf4da65, 0xd5be0506, 0x1f6234d1, 0x8afea6c4);
2428	&_data_word(0x9d532e34, 0xa055f3a2, 0x32e18a05, 0x75ebf6a4);
2429	&_data_word(0x39ec830b, 0xaaef6040, 0x069f715e, 0x51106ebd);
2430	&_data_word(0xf98a213e, 0x3d06dd96, 0xae053edd, 0x46bde64d);
2431	&_data_word(0xb58d5491, 0x055dc471, 0x6fd40604, 0xff155060);
2432	&_data_word(0x24fb9819, 0x97e9bdd6, 0xcc434089, 0x779ed967);
2433	&_data_word(0xbd42e8b0, 0x888b8907, 0x385b19e7, 0xdbeec879);
2434	&_data_word(0x470a7ca1, 0xe90f427c, 0xc91e84f8, 0x00000000);
2435	&_data_word(0x83868009, 0x48ed2b32, 0xac70111e, 0x4e725a6c);
2436	&_data_word(0xfbff0efd, 0x5638850f, 0x1ed5ae3d, 0x27392d36);
2437	&_data_word(0x64d90f0a, 0x21a65c68, 0xd1545b9b, 0x3a2e3624);
2438	&_data_word(0xb1670a0c, 0x0fe75793, 0xd296eeb4, 0x9e919b1b);
2439	&_data_word(0x4fc5c080, 0xa220dc61, 0x694b775a, 0x161a121c);
2440	&_data_word(0x0aba93e2, 0xe52aa0c0, 0x43e0223c, 0x1d171b12);
2441	&_data_word(0x0b0d090e, 0xadc78bf2, 0xb9a8b62d, 0xc8a91e14);
2442	&_data_word(0x8519f157, 0x4c0775af, 0xbbdd99ee, 0xfd607fa3);
2443	&_data_word(0x9f2601f7, 0xbcf5725c, 0xc53b6644, 0x347efb5b);
2444	&_data_word(0x7629438b, 0xdcc623cb, 0x68fcedb6, 0x63f1e4b8);
2445	&_data_word(0xcadc31d7, 0x10856342, 0x40229713, 0x2011c684);
2446	&_data_word(0x7d244a85, 0xf83dbbd2, 0x1132f9ae, 0x6da129c7);
2447	&_data_word(0x4b2f9e1d, 0xf330b2dc, 0xec52860d, 0xd0e3c177);
2448	&_data_word(0x6c16b32b, 0x99b970a9, 0xfa489411, 0x2264e947);
2449	&_data_word(0xc48cfca8, 0x1a3ff0a0, 0xd82c7d56, 0xef903322);
2450	&_data_word(0xc74e4987, 0xc1d138d9, 0xfea2ca8c, 0x360bd498);
2451	&_data_word(0xcf81f5a6, 0x28de7aa5, 0x268eb7da, 0xa4bfad3f);
2452	&_data_word(0xe49d3a2c, 0x0d927850, 0x9bcc5f6a, 0x62467e54);
2453	&_data_word(0xc2138df6, 0xe8b8d890, 0x5ef7392e, 0xf5afc382);
2454	&_data_word(0xbe805d9f, 0x7c93d069, 0xa92dd56f, 0xb31225cf);
2455	&_data_word(0x3b99acc8, 0xa77d1810, 0x6e639ce8, 0x7bbb3bdb);
2456	&_data_word(0x097826cd, 0xf418596e, 0x01b79aec, 0xa89a4f83);
2457	&_data_word(0x656e95e6, 0x7ee6ffaa, 0x08cfbc21, 0xe6e815ef);
2458	&_data_word(0xd99be7ba, 0xce366f4a, 0xd4099fea, 0xd67cb029);
2459	&_data_word(0xafb2a431, 0x31233f2a, 0x3094a5c6, 0xc066a235);
2460	&_data_word(0x37bc4e74, 0xa6ca82fc, 0xb0d090e0, 0x15d8a733);
2461	&_data_word(0x4a9804f1, 0xf7daec41, 0x0e50cd7f, 0x2ff69117);
2462	&_data_word(0x8dd64d76, 0x4db0ef43, 0x544daacc, 0xdf0496e4);
2463	&_data_word(0xe3b5d19e, 0x1b886a4c, 0xb81f2cc1, 0x7f516546);
2464	&_data_word(0x04ea5e9d, 0x5d358c01, 0x737487fa, 0x2e410bfb);
2465	&_data_word(0x5a1d67b3, 0x52d2db92, 0x335610e9, 0x1347d66d);
2466	&_data_word(0x8c61d79a, 0x7a0ca137, 0x8e14f859, 0x893c13eb);
2467	&_data_word(0xee27a9ce, 0x35c961b7, 0xede51ce1, 0x3cb1477a);
2468	&_data_word(0x59dfd29c, 0x3f73f255, 0x79ce1418, 0xbf37c773);
2469	&_data_word(0xeacdf753, 0x5baafd5f, 0x146f3ddf, 0x86db4478);
2470	&_data_word(0x81f3afca, 0x3ec468b9, 0x2c342438, 0x5f40a3c2);
2471	&_data_word(0x72c31d16, 0x0c25e2bc, 0x8b493c28, 0x41950dff);
2472	&_data_word(0x7101a839, 0xdeb30c08, 0x9ce4b4d8, 0x90c15664);
2473	&_data_word(0x6184cb7b, 0x70b632d5, 0x745c6c48, 0x4257b8d0);
2474
2475#Td4:	# four copies of Td4 to choose from to avoid L1 aliasing
2476	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
2477	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
2478	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
2479	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
2480	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
2481	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
2482	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
2483	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
2484	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
2485	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
2486	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
2487	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
2488	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
2489	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
2490	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
2491	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
2492	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
2493	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
2494	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
2495	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
2496	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
2497	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
2498	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
2499	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
2500	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
2501	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
2502	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
2503	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
2504	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
2505	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
2506	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
2507	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
2508$code.=<<___;
2509	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
2510	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
2511___
2512	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
2513	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
2514	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
2515	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
2516	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
2517	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
2518	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
2519	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
2520	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
2521	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
2522	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
2523	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
2524	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
2525	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
2526	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
2527	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
2528	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
2529	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
2530	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
2531	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
2532	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
2533	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
2534	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
2535	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
2536	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
2537	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
2538	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
2539	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
2540	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
2541	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
2542	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
2543	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
2544$code.=<<___;
2545	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
2546	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
2547___
2548	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
2549	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
2550	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
2551	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
2552	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
2553	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
2554	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
2555	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
2556	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
2557	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
2558	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
2559	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
2560	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
2561	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
2562	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
2563	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
2564	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
2565	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
2566	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
2567	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
2568	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
2569	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
2570	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
2571	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
2572	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
2573	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
2574	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
2575	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
2576	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
2577	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
2578	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
2579	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
2580$code.=<<___;
2581	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
2582	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
2583___
2584	&data_byte(0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38);
2585	&data_byte(0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb);
2586	&data_byte(0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87);
2587	&data_byte(0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb);
2588	&data_byte(0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d);
2589	&data_byte(0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e);
2590	&data_byte(0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2);
2591	&data_byte(0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25);
2592	&data_byte(0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16);
2593	&data_byte(0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92);
2594	&data_byte(0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda);
2595	&data_byte(0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84);
2596	&data_byte(0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a);
2597	&data_byte(0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06);
2598	&data_byte(0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02);
2599	&data_byte(0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b);
2600	&data_byte(0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea);
2601	&data_byte(0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73);
2602	&data_byte(0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85);
2603	&data_byte(0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e);
2604	&data_byte(0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89);
2605	&data_byte(0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b);
2606	&data_byte(0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20);
2607	&data_byte(0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4);
2608	&data_byte(0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31);
2609	&data_byte(0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f);
2610	&data_byte(0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d);
2611	&data_byte(0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef);
2612	&data_byte(0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0);
2613	&data_byte(0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61);
2614	&data_byte(0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26);
2615	&data_byte(0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d);
2616$code.=<<___;
2617	.long	0x80808080, 0x80808080, 0xfefefefe, 0xfefefefe
2618	.long	0x1b1b1b1b, 0x1b1b1b1b, 0, 0
2619.asciz  "AES for x86_64, CRYPTOGAMS by <appro\@openssl.org>"
2620.align	64
2621___
2622
2623# EXCEPTION_DISPOSITION handler (EXCEPTION_RECORD *rec,ULONG64 frame,
2624#		CONTEXT *context,DISPATCHER_CONTEXT *disp)
2625if ($win64) {
2626$rec="%rcx";
2627$frame="%rdx";
2628$context="%r8";
2629$disp="%r9";
2630
2631$code.=<<___;
2632.extern	__imp_RtlVirtualUnwind
2633.type	block_se_handler,\@abi-omnipotent
2634.align	16
2635block_se_handler:
2636	push	%rsi
2637	push	%rdi
2638	push	%rbx
2639	push	%rbp
2640	push	%r12
2641	push	%r13
2642	push	%r14
2643	push	%r15
2644	pushfq
2645	sub	\$64,%rsp
2646
2647	mov	120($context),%rax	# pull context->Rax
2648	mov	248($context),%rbx	# pull context->Rip
2649
2650	mov	8($disp),%rsi		# disp->ImageBase
2651	mov	56($disp),%r11		# disp->HandlerData
2652
2653	mov	0(%r11),%r10d		# HandlerData[0]
2654	lea	(%rsi,%r10),%r10	# prologue label
2655	cmp	%r10,%rbx		# context->Rip<prologue label
2656	jb	.Lin_block_prologue
2657
2658	mov	152($context),%rax	# pull context->Rsp
2659
2660	mov	4(%r11),%r10d		# HandlerData[1]
2661	lea	(%rsi,%r10),%r10	# epilogue label
2662	cmp	%r10,%rbx		# context->Rip>=epilogue label
2663	jae	.Lin_block_prologue
2664
2665	mov	24(%rax),%rax		# pull saved real stack pointer
2666
2667	mov	-8(%rax),%rbx
2668	mov	-16(%rax),%rbp
2669	mov	-24(%rax),%r12
2670	mov	-32(%rax),%r13
2671	mov	-40(%rax),%r14
2672	mov	-48(%rax),%r15
2673	mov	%rbx,144($context)	# restore context->Rbx
2674	mov	%rbp,160($context)	# restore context->Rbp
2675	mov	%r12,216($context)	# restore context->R12
2676	mov	%r13,224($context)	# restore context->R13
2677	mov	%r14,232($context)	# restore context->R14
2678	mov	%r15,240($context)	# restore context->R15
2679
2680.Lin_block_prologue:
2681	mov	8(%rax),%rdi
2682	mov	16(%rax),%rsi
2683	mov	%rax,152($context)	# restore context->Rsp
2684	mov	%rsi,168($context)	# restore context->Rsi
2685	mov	%rdi,176($context)	# restore context->Rdi
2686
2687	jmp	.Lcommon_seh_exit
2688.size	block_se_handler,.-block_se_handler
2689
2690.type	key_se_handler,\@abi-omnipotent
2691.align	16
2692key_se_handler:
2693	push	%rsi
2694	push	%rdi
2695	push	%rbx
2696	push	%rbp
2697	push	%r12
2698	push	%r13
2699	push	%r14
2700	push	%r15
2701	pushfq
2702	sub	\$64,%rsp
2703
2704	mov	120($context),%rax	# pull context->Rax
2705	mov	248($context),%rbx	# pull context->Rip
2706
2707	mov	8($disp),%rsi		# disp->ImageBase
2708	mov	56($disp),%r11		# disp->HandlerData
2709
2710	mov	0(%r11),%r10d		# HandlerData[0]
2711	lea	(%rsi,%r10),%r10	# prologue label
2712	cmp	%r10,%rbx		# context->Rip<prologue label
2713	jb	.Lin_key_prologue
2714
2715	mov	152($context),%rax	# pull context->Rsp
2716
2717	mov	4(%r11),%r10d		# HandlerData[1]
2718	lea	(%rsi,%r10),%r10	# epilogue label
2719	cmp	%r10,%rbx		# context->Rip>=epilogue label
2720	jae	.Lin_key_prologue
2721
2722	lea	56(%rax),%rax
2723
2724	mov	-8(%rax),%rbx
2725	mov	-16(%rax),%rbp
2726	mov	-24(%rax),%r12
2727	mov	-32(%rax),%r13
2728	mov	-40(%rax),%r14
2729	mov	-48(%rax),%r15
2730	mov	%rbx,144($context)	# restore context->Rbx
2731	mov	%rbp,160($context)	# restore context->Rbp
2732	mov	%r12,216($context)	# restore context->R12
2733	mov	%r13,224($context)	# restore context->R13
2734	mov	%r14,232($context)	# restore context->R14
2735	mov	%r15,240($context)	# restore context->R15
2736
2737.Lin_key_prologue:
2738	mov	8(%rax),%rdi
2739	mov	16(%rax),%rsi
2740	mov	%rax,152($context)	# restore context->Rsp
2741	mov	%rsi,168($context)	# restore context->Rsi
2742	mov	%rdi,176($context)	# restore context->Rdi
2743
2744	jmp	.Lcommon_seh_exit
2745.size	key_se_handler,.-key_se_handler
2746
2747.type	cbc_se_handler,\@abi-omnipotent
2748.align	16
2749cbc_se_handler:
2750	push	%rsi
2751	push	%rdi
2752	push	%rbx
2753	push	%rbp
2754	push	%r12
2755	push	%r13
2756	push	%r14
2757	push	%r15
2758	pushfq
2759	sub	\$64,%rsp
2760
2761	mov	120($context),%rax	# pull context->Rax
2762	mov	248($context),%rbx	# pull context->Rip
2763
2764	lea	.Lcbc_prologue(%rip),%r10
2765	cmp	%r10,%rbx		# context->Rip<.Lcbc_prologue
2766	jb	.Lin_cbc_prologue
2767
2768	lea	.Lcbc_fast_body(%rip),%r10
2769	cmp	%r10,%rbx		# context->Rip<.Lcbc_fast_body
2770	jb	.Lin_cbc_frame_setup
2771
2772	lea	.Lcbc_slow_prologue(%rip),%r10
2773	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_prologue
2774	jb	.Lin_cbc_body
2775
2776	lea	.Lcbc_slow_body(%rip),%r10
2777	cmp	%r10,%rbx		# context->Rip<.Lcbc_slow_body
2778	jb	.Lin_cbc_frame_setup
2779
2780.Lin_cbc_body:
2781	mov	152($context),%rax	# pull context->Rsp
2782
2783	lea	.Lcbc_epilogue(%rip),%r10
2784	cmp	%r10,%rbx		# context->Rip>=.Lcbc_epilogue
2785	jae	.Lin_cbc_prologue
2786
2787	lea	8(%rax),%rax
2788
2789	lea	.Lcbc_popfq(%rip),%r10
2790	cmp	%r10,%rbx		# context->Rip>=.Lcbc_popfq
2791	jae	.Lin_cbc_prologue
2792
2793	mov	`16-8`(%rax),%rax	# biased $_rsp
2794	lea	56(%rax),%rax
2795
2796.Lin_cbc_frame_setup:
2797	mov	-16(%rax),%rbx
2798	mov	-24(%rax),%rbp
2799	mov	-32(%rax),%r12
2800	mov	-40(%rax),%r13
2801	mov	-48(%rax),%r14
2802	mov	-56(%rax),%r15
2803	mov	%rbx,144($context)	# restore context->Rbx
2804	mov	%rbp,160($context)	# restore context->Rbp
2805	mov	%r12,216($context)	# restore context->R12
2806	mov	%r13,224($context)	# restore context->R13
2807	mov	%r14,232($context)	# restore context->R14
2808	mov	%r15,240($context)	# restore context->R15
2809
2810.Lin_cbc_prologue:
2811	mov	8(%rax),%rdi
2812	mov	16(%rax),%rsi
2813	mov	%rax,152($context)	# restore context->Rsp
2814	mov	%rsi,168($context)	# restore context->Rsi
2815	mov	%rdi,176($context)	# restore context->Rdi
2816
2817.Lcommon_seh_exit:
2818
2819	mov	40($disp),%rdi		# disp->ContextRecord
2820	mov	$context,%rsi		# context
2821	mov	\$`1232/8`,%ecx		# sizeof(CONTEXT)
2822	.long	0xa548f3fc		# cld; rep movsq
2823
2824	mov	$disp,%rsi
2825	xor	%rcx,%rcx		# arg1, UNW_FLAG_NHANDLER
2826	mov	8(%rsi),%rdx		# arg2, disp->ImageBase
2827	mov	0(%rsi),%r8		# arg3, disp->ControlPc
2828	mov	16(%rsi),%r9		# arg4, disp->FunctionEntry
2829	mov	40(%rsi),%r10		# disp->ContextRecord
2830	lea	56(%rsi),%r11		# &disp->HandlerData
2831	lea	24(%rsi),%r12		# &disp->EstablisherFrame
2832	mov	%r10,32(%rsp)		# arg5
2833	mov	%r11,40(%rsp)		# arg6
2834	mov	%r12,48(%rsp)		# arg7
2835	mov	%rcx,56(%rsp)		# arg8, (NULL)
2836	call	*__imp_RtlVirtualUnwind(%rip)
2837
2838	mov	\$1,%eax		# ExceptionContinueSearch
2839	add	\$64,%rsp
2840	popfq
2841	pop	%r15
2842	pop	%r14
2843	pop	%r13
2844	pop	%r12
2845	pop	%rbp
2846	pop	%rbx
2847	pop	%rdi
2848	pop	%rsi
2849	ret
2850.size	cbc_se_handler,.-cbc_se_handler
2851
2852.section	.pdata
2853.align	4
2854	.rva	.LSEH_begin_AES_encrypt
2855	.rva	.LSEH_end_AES_encrypt
2856	.rva	.LSEH_info_AES_encrypt
2857
2858	.rva	.LSEH_begin_AES_decrypt
2859	.rva	.LSEH_end_AES_decrypt
2860	.rva	.LSEH_info_AES_decrypt
2861
2862	.rva	.LSEH_begin_AES_set_encrypt_key
2863	.rva	.LSEH_end_AES_set_encrypt_key
2864	.rva	.LSEH_info_AES_set_encrypt_key
2865
2866	.rva	.LSEH_begin_AES_set_decrypt_key
2867	.rva	.LSEH_end_AES_set_decrypt_key
2868	.rva	.LSEH_info_AES_set_decrypt_key
2869
2870	.rva	.LSEH_begin_AES_cbc_encrypt
2871	.rva	.LSEH_end_AES_cbc_encrypt
2872	.rva	.LSEH_info_AES_cbc_encrypt
2873
2874.section	.xdata
2875.align	8
2876.LSEH_info_AES_encrypt:
2877	.byte	9,0,0,0
2878	.rva	block_se_handler
2879	.rva	.Lenc_prologue,.Lenc_epilogue	# HandlerData[]
2880.LSEH_info_AES_decrypt:
2881	.byte	9,0,0,0
2882	.rva	block_se_handler
2883	.rva	.Ldec_prologue,.Ldec_epilogue	# HandlerData[]
2884.LSEH_info_AES_set_encrypt_key:
2885	.byte	9,0,0,0
2886	.rva	key_se_handler
2887	.rva	.Lenc_key_prologue,.Lenc_key_epilogue	# HandlerData[]
2888.LSEH_info_AES_set_decrypt_key:
2889	.byte	9,0,0,0
2890	.rva	key_se_handler
2891	.rva	.Ldec_key_prologue,.Ldec_key_epilogue	# HandlerData[]
2892.LSEH_info_AES_cbc_encrypt:
2893	.byte	9,0,0,0
2894	.rva	cbc_se_handler
2895___
2896}
2897
2898$code =~ s/\`([^\`]*)\`/eval($1)/gem;
2899
2900print $code;
2901
2902close STDOUT;
2903