1#! /usr/bin/env perl
2# Copyright 2007-2020 The OpenSSL Project Authors. All Rights Reserved.
3#
4# Licensed under the Apache License 2.0 (the "License").  You may not use
5# this file except in compliance with the License.  You can obtain a copy
6# in the file LICENSE in the source distribution or at
7# https://www.openssl.org/source/license.html
8
9#
10# ====================================================================
11# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
12# project. The module is, however, dual licensed under OpenSSL and
13# CRYPTOGAMS licenses depending on where you obtain it. For further
14# details see http://www.openssl.org/~appro/cryptogams/.
15# ====================================================================
16#
17# SHA256 block transform for x86. September 2007.
18#
19# Performance improvement over compiler generated code varies from
20# 10% to 40% [see below]. Not very impressive on some µ-archs, but
21# it's 5 times smaller and optimizes amount of writes.
22#
23# May 2012.
24#
25# Optimization including two of Pavel Semjanov's ideas, alternative
26# Maj and full unroll, resulted in ~20-25% improvement on most CPUs,
27# ~7% on Pentium, ~40% on Atom. As fully unrolled loop body is almost
28# 15x larger, 8KB vs. 560B, it's fired only for longer inputs. But not
29# on P4, where it kills performance, nor Sandy Bridge, where folded
30# loop is approximately as fast...
31#
32# June 2012.
33#
34# Add AMD XOP-specific code path, >30% improvement on Bulldozer over
35# May version, >60% over original. Add AVX+shrd code path, >25%
36# improvement on Sandy Bridge over May version, 60% over original.
37#
38# May 2013.
39#
40# Replace AMD XOP code path with SSSE3 to cover more processors.
41# (Biggest improvement coefficient is on upcoming Atom Silvermont,
42# not shown.) Add AVX+BMI code path.
43#
44# March 2014.
45#
46# Add support for Intel SHA Extensions.
47#
48# Performance in clock cycles per processed byte (less is better):
49#
50#		gcc	icc	x86 asm(*)	SIMD	x86_64 asm(**)
51# Pentium	46	57	40/38		-	-
52# PIII		36	33	27/24		-	-
53# P4		41	38	28		-	17.3
54# AMD K8	27	25	19/15.5		-	14.9
55# Core2		26	23	18/15.6		14.3	13.8
56# Westmere	27	-	19/15.7		13.4	12.3
57# Sandy Bridge	25	-	15.9		12.4	11.6
58# Ivy Bridge	24	-	15.0		11.4	10.3
59# Haswell	22	-	13.9		9.46	7.80
60# Skylake	20	-	14.9		9.50	7.70
61# Bulldozer	36	-	27/22		17.0	13.6
62# VIA Nano	36	-	25/22		16.8	16.5
63# Atom		50	-	30/25		21.9	18.9
64# Silvermont	40	-	34/31		22.9	20.6
65# Goldmont	29	-	20		16.3(***)
66#
67# (*)	numbers after slash are for unrolled loop, where applicable;
68# (**)	x86_64 assembly performance is presented for reference
69#	purposes, results are best-available;
70# (***)	SHAEXT result is 4.1, strangely enough better than 64-bit one;
71
72$0 =~ m/(.*[\/\\])[^\/\\]+$/; $dir=$1;
73push(@INC,"${dir}","${dir}../../perlasm");
74require "x86asm.pl";
75
76$output=pop and open STDOUT,">$output";
77
78&asm_init($ARGV[0],$ARGV[$#ARGV] eq "386");
79
80$xmm=$avx=0;
81for (@ARGV) { $xmm=1 if (/-DOPENSSL_IA32_SSE2/); }
82
83if ($xmm &&	`$ENV{CC} -Wa,-v -c -o /dev/null -x assembler /dev/null 2>&1`
84			=~ /GNU assembler version ([2-9]\.[0-9]+)/) {
85	$avx = ($1>=2.19) + ($1>=2.22);
86}
87
88if ($xmm && !$avx && $ARGV[0] eq "win32n" &&
89		`nasm -v 2>&1` =~ /NASM version ([2-9]\.[0-9]+)/) {
90	$avx = ($1>=2.03) + ($1>=2.10);
91}
92
93if ($xmm && !$avx && $ARGV[0] eq "win32" &&
94		`ml 2>&1` =~ /Version ([0-9]+)\./) {
95	$avx = ($1>=10) + ($1>=11);
96}
97
98if ($xmm && !$avx && `$ENV{CC} -v 2>&1` =~ /((?:clang|LLVM) version|based on LLVM) ([0-9]+\.[0-9]+)/) {
99	$avx = ($2>=3.0) + ($2>3.0);
100}
101
102$shaext=$xmm;	### set to zero if compiling for 1.0.1
103
104$unroll_after = 64*4;	# If pre-evicted from L1P cache first spin of
105			# fully unrolled loop was measured to run about
106			# 3-4x slower. If slowdown coefficient is N and
107			# unrolled loop is m times faster, then you break
108			# even at (N-1)/(m-1) blocks. Then it needs to be
109			# adjusted for probability of code being evicted,
110			# code size/cache size=1/4. Typical m is 1.15...
111
112$A="eax";
113$E="edx";
114$T="ebx";
115$Aoff=&DWP(4,"esp");
116$Boff=&DWP(8,"esp");
117$Coff=&DWP(12,"esp");
118$Doff=&DWP(16,"esp");
119$Eoff=&DWP(20,"esp");
120$Foff=&DWP(24,"esp");
121$Goff=&DWP(28,"esp");
122$Hoff=&DWP(32,"esp");
123$Xoff=&DWP(36,"esp");
124$K256="ebp";
125
126sub BODY_16_63() {
127	&mov	($T,"ecx");			# "ecx" is preloaded
128	 &mov	("esi",&DWP(4*(9+15+16-14),"esp"));
129	&ror	("ecx",18-7);
130	 &mov	("edi","esi");
131	&ror	("esi",19-17);
132	 &xor	("ecx",$T);
133	 &shr	($T,3);
134	&ror	("ecx",7);
135	 &xor	("esi","edi");
136	 &xor	($T,"ecx");			# T = sigma0(X[-15])
137	&ror	("esi",17);
138	 &add	($T,&DWP(4*(9+15+16),"esp"));	# T += X[-16]
139	&shr	("edi",10);
140	 &add	($T,&DWP(4*(9+15+16-9),"esp"));	# T += X[-7]
141	#&xor	("edi","esi")			# sigma1(X[-2])
142	# &add	($T,"edi");			# T += sigma1(X[-2])
143	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
144
145	&BODY_00_15(1);
146}
147sub BODY_00_15() {
148    my $in_16_63=shift;
149
150	&mov	("ecx",$E);
151	 &xor	("edi","esi")			if ($in_16_63);	# sigma1(X[-2])
152	 &mov	("esi",$Foff);
153	&ror	("ecx",25-11);
154	 &add	($T,"edi")			if ($in_16_63);	# T += sigma1(X[-2])
155	 &mov	("edi",$Goff);
156	&xor	("ecx",$E);
157	 &xor	("esi","edi");
158	 &mov	($T,&DWP(4*(9+15),"esp"))	if (!$in_16_63);
159	 &mov	(&DWP(4*(9+15),"esp"),$T)	if ($in_16_63);	# save X[0]
160	&ror	("ecx",11-6);
161	 &and	("esi",$E);
162	 &mov	($Eoff,$E);		# modulo-scheduled
163	&xor	($E,"ecx");
164	 &add	($T,$Hoff);		# T += h
165	 &xor	("esi","edi");		# Ch(e,f,g)
166	&ror	($E,6);			# Sigma1(e)
167	 &mov	("ecx",$A);
168	 &add	($T,"esi");		# T += Ch(e,f,g)
169
170	&ror	("ecx",22-13);
171	 &add	($T,$E);		# T += Sigma1(e)
172	 &mov	("edi",$Boff);
173	&xor	("ecx",$A);
174	 &mov	($Aoff,$A);		# modulo-scheduled
175	 &lea	("esp",&DWP(-4,"esp"));
176	&ror	("ecx",13-2);
177	 &mov	("esi",&DWP(0,$K256));
178	&xor	("ecx",$A);
179	 &mov	($E,$Eoff);		# e in next iteration, d in this one
180	 &xor	($A,"edi");		# a ^= b
181	&ror	("ecx",2);		# Sigma0(a)
182
183	 &add	($T,"esi");		# T+= K[i]
184	 &mov	(&DWP(0,"esp"),$A);	# (b^c) in next round
185	&add	($E,$T);		# d += T
186	 &and	($A,&DWP(4,"esp"));	# a &= (b^c)
187	&add	($T,"ecx");		# T += Sigma0(a)
188	 &xor	($A,"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
189	 &mov	("ecx",&DWP(4*(9+15+16-1),"esp"))	if ($in_16_63);	# preload T
190	&add	($K256,4);
191	 &add	($A,$T);		# h += T
192}
193
194&external_label("OPENSSL_ia32cap_P")		if (!$i386);
195
196&function_begin("sha256_block_data_order");
197	&mov	("esi",wparam(0));	# ctx
198	&mov	("edi",wparam(1));	# inp
199	&mov	("eax",wparam(2));	# num
200	&mov	("ebx","esp");		# saved sp
201
202	&call	(&label("pic_point"));	# make it PIC!
203&set_label("pic_point");
204	&blindpop($K256);
205	&lea	($K256,&DWP(&label("K256")."-".&label("pic_point"),$K256));
206
207	&sub	("esp",16);
208	&and	("esp",-64);
209
210	&shl	("eax",6);
211	&add	("eax","edi");
212	&mov	(&DWP(0,"esp"),"esi");	# ctx
213	&mov	(&DWP(4,"esp"),"edi");	# inp
214	&mov	(&DWP(8,"esp"),"eax");	# inp+num*128
215	&mov	(&DWP(12,"esp"),"ebx");	# saved sp
216						if (!$i386 && $xmm) {
217	&picmeup("edx","OPENSSL_ia32cap_P",$K256,&label("K256"));
218	&mov	("ecx",&DWP(0,"edx"));
219	&mov	("ebx",&DWP(4,"edx"));
220	&test	("ecx",1<<20);		# check for P4
221	&jnz	(&label("loop"));
222	&mov	("edx",&DWP(8,"edx"))	if ($xmm);
223	&test	("ecx",1<<24);		# check for FXSR
224	&jz	($unroll_after?&label("no_xmm"):&label("loop"));
225	&and	("ecx",1<<30);		# mask "Intel CPU" bit
226	&and	("ebx",1<<28|1<<9);	# mask AVX and SSSE3 bits
227	&test	("edx",1<<29)		if ($shaext);	# check for SHA
228	&jnz	(&label("shaext"))	if ($shaext);
229	&or	("ecx","ebx");
230	&and	("ecx",1<<28|1<<30);
231	&cmp	("ecx",1<<28|1<<30);
232					if ($xmm) {
233	&je	(&label("AVX"))		if ($avx);
234	&test	("ebx",1<<9);		# check for SSSE3
235	&jnz	(&label("SSSE3"));
236					} else {
237	&je	(&label("loop_shrd"));
238					}
239						if ($unroll_after) {
240&set_label("no_xmm");
241	&sub	("eax","edi");
242	&cmp	("eax",$unroll_after);
243	&jae	(&label("unrolled"));
244						} }
245	&jmp	(&label("loop"));
246
247sub COMPACT_LOOP() {
248my $suffix=shift;
249
250&set_label("loop$suffix",$suffix?32:16);
251    # copy input block to stack reversing byte and dword order
252    for($i=0;$i<4;$i++) {
253	&mov	("eax",&DWP($i*16+0,"edi"));
254	&mov	("ebx",&DWP($i*16+4,"edi"));
255	&mov	("ecx",&DWP($i*16+8,"edi"));
256	&bswap	("eax");
257	&mov	("edx",&DWP($i*16+12,"edi"));
258	&bswap	("ebx");
259	&push	("eax");
260	&bswap	("ecx");
261	&push	("ebx");
262	&bswap	("edx");
263	&push	("ecx");
264	&push	("edx");
265    }
266	&add	("edi",64);
267	&lea	("esp",&DWP(-4*9,"esp"));# place for A,B,C,D,E,F,G,H
268	&mov	(&DWP(4*(9+16)+4,"esp"),"edi");
269
270	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
271	&mov	($A,&DWP(0,"esi"));
272	&mov	("ebx",&DWP(4,"esi"));
273	&mov	("ecx",&DWP(8,"esi"));
274	&mov	("edi",&DWP(12,"esi"));
275	# &mov	($Aoff,$A);
276	&mov	($Boff,"ebx");
277	&xor	("ebx","ecx");
278	&mov	($Coff,"ecx");
279	&mov	($Doff,"edi");
280	&mov	(&DWP(0,"esp"),"ebx");	# magic
281	&mov	($E,&DWP(16,"esi"));
282	&mov	("ebx",&DWP(20,"esi"));
283	&mov	("ecx",&DWP(24,"esi"));
284	&mov	("edi",&DWP(28,"esi"));
285	# &mov	($Eoff,$E);
286	&mov	($Foff,"ebx");
287	&mov	($Goff,"ecx");
288	&mov	($Hoff,"edi");
289
290&set_label("00_15$suffix",16);
291
292	&BODY_00_15();
293
294	&cmp	("esi",0xc19bf174);
295	&jne	(&label("00_15$suffix"));
296
297	&mov	("ecx",&DWP(4*(9+15+16-1),"esp"));	# preloaded in BODY_00_15(1)
298	&jmp	(&label("16_63$suffix"));
299
300&set_label("16_63$suffix",16);
301
302	&BODY_16_63();
303
304	&cmp	("esi",0xc67178f2);
305	&jne	(&label("16_63$suffix"));
306
307	&mov	("esi",&DWP(4*(9+16+64)+0,"esp"));#ctx
308	# &mov	($A,$Aoff);
309	&mov	("ebx",$Boff);
310	# &mov	("edi",$Coff);
311	&mov	("ecx",$Doff);
312	&add	($A,&DWP(0,"esi"));
313	&add	("ebx",&DWP(4,"esi"));
314	&add	("edi",&DWP(8,"esi"));
315	&add	("ecx",&DWP(12,"esi"));
316	&mov	(&DWP(0,"esi"),$A);
317	&mov	(&DWP(4,"esi"),"ebx");
318	&mov	(&DWP(8,"esi"),"edi");
319	&mov	(&DWP(12,"esi"),"ecx");
320	# &mov	($E,$Eoff);
321	&mov	("eax",$Foff);
322	&mov	("ebx",$Goff);
323	&mov	("ecx",$Hoff);
324	&mov	("edi",&DWP(4*(9+16+64)+4,"esp"));#inp
325	&add	($E,&DWP(16,"esi"));
326	&add	("eax",&DWP(20,"esi"));
327	&add	("ebx",&DWP(24,"esi"));
328	&add	("ecx",&DWP(28,"esi"));
329	&mov	(&DWP(16,"esi"),$E);
330	&mov	(&DWP(20,"esi"),"eax");
331	&mov	(&DWP(24,"esi"),"ebx");
332	&mov	(&DWP(28,"esi"),"ecx");
333
334	&lea	("esp",&DWP(4*(9+16+64),"esp"));# destroy frame
335	&sub	($K256,4*64);			# rewind K
336
337	&cmp	("edi",&DWP(8,"esp"));		# are we done yet?
338	&jb	(&label("loop$suffix"));
339}
340	&COMPACT_LOOP();
341	&mov	("esp",&DWP(12,"esp"));		# restore sp
342&function_end_A();
343						if (!$i386 && !$xmm) {
344	# ~20% improvement on Sandy Bridge
345	local *ror = sub { &shrd(@_[0],@_) };
346	&COMPACT_LOOP("_shrd");
347	&mov	("esp",&DWP(12,"esp"));		# restore sp
348&function_end_A();
349						}
350
351&set_label("K256",64);	# Yes! I keep it in the code segment!
352@K256=(	0x428a2f98,0x71374491,0xb5c0fbcf,0xe9b5dba5,
353	0x3956c25b,0x59f111f1,0x923f82a4,0xab1c5ed5,
354	0xd807aa98,0x12835b01,0x243185be,0x550c7dc3,
355	0x72be5d74,0x80deb1fe,0x9bdc06a7,0xc19bf174,
356	0xe49b69c1,0xefbe4786,0x0fc19dc6,0x240ca1cc,
357	0x2de92c6f,0x4a7484aa,0x5cb0a9dc,0x76f988da,
358	0x983e5152,0xa831c66d,0xb00327c8,0xbf597fc7,
359	0xc6e00bf3,0xd5a79147,0x06ca6351,0x14292967,
360	0x27b70a85,0x2e1b2138,0x4d2c6dfc,0x53380d13,
361	0x650a7354,0x766a0abb,0x81c2c92e,0x92722c85,
362	0xa2bfe8a1,0xa81a664b,0xc24b8b70,0xc76c51a3,
363	0xd192e819,0xd6990624,0xf40e3585,0x106aa070,
364	0x19a4c116,0x1e376c08,0x2748774c,0x34b0bcb5,
365	0x391c0cb3,0x4ed8aa4a,0x5b9cca4f,0x682e6ff3,
366	0x748f82ee,0x78a5636f,0x84c87814,0x8cc70208,
367	0x90befffa,0xa4506ceb,0xbef9a3f7,0xc67178f2	);
368&data_word(@K256);
369&data_word(0x00010203,0x04050607,0x08090a0b,0x0c0d0e0f);	# byte swap mask
370&asciz("SHA256 block transform for x86, CRYPTOGAMS by <appro\@openssl.org>");
371
372($a,$b,$c,$d,$e,$f,$g,$h)=(0..7);	# offsets
373sub off { &DWP(4*(((shift)-$i)&7),"esp"); }
374
375if (!$i386 && $unroll_after) {
376my @AH=($A,$K256);
377
378&set_label("unrolled",16);
379	&lea	("esp",&DWP(-96,"esp"));
380	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
381	&mov	($AH[0],&DWP(0,"esi"));
382	&mov	($AH[1],&DWP(4,"esi"));
383	&mov	("ecx",&DWP(8,"esi"));
384	&mov	("ebx",&DWP(12,"esi"));
385	#&mov	(&DWP(0,"esp"),$AH[0]);
386	&mov	(&DWP(4,"esp"),$AH[1]);
387	&xor	($AH[1],"ecx");		# magic
388	&mov	(&DWP(8,"esp"),"ecx");
389	&mov	(&DWP(12,"esp"),"ebx");
390	&mov	($E,&DWP(16,"esi"));
391	&mov	("ebx",&DWP(20,"esi"));
392	&mov	("ecx",&DWP(24,"esi"));
393	&mov	("esi",&DWP(28,"esi"));
394	#&mov	(&DWP(16,"esp"),$E);
395	&mov	(&DWP(20,"esp"),"ebx");
396	&mov	(&DWP(24,"esp"),"ecx");
397	&mov	(&DWP(28,"esp"),"esi");
398	&jmp	(&label("grand_loop"));
399
400&set_label("grand_loop",16);
401    # copy input block to stack reversing byte order
402    for($i=0;$i<5;$i++) {
403	&mov	("ebx",&DWP(12*$i+0,"edi"));
404	&mov	("ecx",&DWP(12*$i+4,"edi"));
405	&bswap	("ebx");
406	&mov	("esi",&DWP(12*$i+8,"edi"));
407	&bswap	("ecx");
408	&mov	(&DWP(32+12*$i+0,"esp"),"ebx");
409	&bswap	("esi");
410	&mov	(&DWP(32+12*$i+4,"esp"),"ecx");
411	&mov	(&DWP(32+12*$i+8,"esp"),"esi");
412    }
413	&mov	("ebx",&DWP($i*12,"edi"));
414	&add	("edi",64);
415	&bswap	("ebx");
416	&mov	(&DWP(96+4,"esp"),"edi");
417	&mov	(&DWP(32+12*$i,"esp"),"ebx");
418
419    my ($t1,$t2) = ("ecx","esi");
420
421    for ($i=0;$i<64;$i++) {
422
423      if ($i>=16) {
424	&mov	($T,$t1);			# $t1 is preloaded
425	# &mov	($t2,&DWP(32+4*(($i+14)&15),"esp"));
426	&ror	($t1,18-7);
427	 &mov	("edi",$t2);
428	&ror	($t2,19-17);
429	 &xor	($t1,$T);
430	 &shr	($T,3);
431	&ror	($t1,7);
432	 &xor	($t2,"edi");
433	 &xor	($T,$t1);			# T = sigma0(X[-15])
434	&ror	($t2,17);
435	 &add	($T,&DWP(32+4*($i&15),"esp"));	# T += X[-16]
436	&shr	("edi",10);
437	 &add	($T,&DWP(32+4*(($i+9)&15),"esp"));	# T += X[-7]
438	#&xor	("edi",$t2)			# sigma1(X[-2])
439	# &add	($T,"edi");			# T += sigma1(X[-2])
440	# &mov	(&DWP(4*(9+15),"esp"),$T);	# save X[0]
441      }
442	&mov	($t1,$E);
443	 &xor	("edi",$t2)			if ($i>=16);	# sigma1(X[-2])
444	 &mov	($t2,&off($f));
445	&ror	($E,25-11);
446	 &add	($T,"edi")			if ($i>=16);	# T += sigma1(X[-2])
447	 &mov	("edi",&off($g));
448	&xor	($E,$t1);
449	 &mov	($T,&DWP(32+4*($i&15),"esp"))	if ($i<16);	# X[i]
450	 &mov	(&DWP(32+4*($i&15),"esp"),$T)	if ($i>=16 && $i<62);	# save X[0]
451	 &xor	($t2,"edi");
452	&ror	($E,11-6);
453	 &and	($t2,$t1);
454	 &mov	(&off($e),$t1);		# save $E, modulo-scheduled
455	&xor	($E,$t1);
456	 &add	($T,&off($h));		# T += h
457	 &xor	("edi",$t2);		# Ch(e,f,g)
458	&ror	($E,6);			# Sigma1(e)
459	 &mov	($t1,$AH[0]);
460	 &add	($T,"edi");		# T += Ch(e,f,g)
461
462	&ror	($t1,22-13);
463	 &mov	($t2,$AH[0]);
464	 &mov	("edi",&off($b));
465	&xor	($t1,$AH[0]);
466	 &mov	(&off($a),$AH[0]);	# save $A, modulo-scheduled
467	 &xor	($AH[0],"edi");		# a ^= b, (b^c) in next round
468	&ror	($t1,13-2);
469	 &and	($AH[1],$AH[0]);	# (b^c) &= (a^b)
470	 &lea	($E,&DWP(@K256[$i],$T,$E));	# T += Sigma1(1)+K[i]
471	&xor	($t1,$t2);
472	 &xor	($AH[1],"edi");		# h = Maj(a,b,c) = Ch(a^b,c,b)
473	 &mov	($t2,&DWP(32+4*(($i+2)&15),"esp"))	if ($i>=15 && $i<63);
474	&ror	($t1,2);		# Sigma0(a)
475
476	 &add	($AH[1],$E);		# h += T
477	 &add	($E,&off($d));		# d += T
478	&add	($AH[1],$t1);		# h += Sigma0(a)
479	 &mov	($t1,&DWP(32+4*(($i+15)&15),"esp"))	if ($i>=15 && $i<63);
480
481	@AH = reverse(@AH);		# rotate(a,h)
482	($t1,$t2) = ($t2,$t1);		# rotate(t1,t2)
483    }
484	&mov	("esi",&DWP(96,"esp"));	#ctx
485					#&mov	($AH[0],&DWP(0,"esp"));
486	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
487					#&mov	("edi", &DWP(8,"esp"));
488	&mov	("ecx",&DWP(12,"esp"));
489	&add	($AH[0],&DWP(0,"esi"));
490	&add	($AH[1],&DWP(4,"esi"));
491	&add	("edi",&DWP(8,"esi"));
492	&add	("ecx",&DWP(12,"esi"));
493	&mov	(&DWP(0,"esi"),$AH[0]);
494	&mov	(&DWP(4,"esi"),$AH[1]);
495	&mov	(&DWP(8,"esi"),"edi");
496	&mov	(&DWP(12,"esi"),"ecx");
497	 #&mov	(&DWP(0,"esp"),$AH[0]);
498	 &mov	(&DWP(4,"esp"),$AH[1]);
499	 &xor	($AH[1],"edi");		# magic
500	 &mov	(&DWP(8,"esp"),"edi");
501	 &mov	(&DWP(12,"esp"),"ecx");
502	#&mov	($E,&DWP(16,"esp"));
503	&mov	("edi",&DWP(20,"esp"));
504	&mov	("ebx",&DWP(24,"esp"));
505	&mov	("ecx",&DWP(28,"esp"));
506	&add	($E,&DWP(16,"esi"));
507	&add	("edi",&DWP(20,"esi"));
508	&add	("ebx",&DWP(24,"esi"));
509	&add	("ecx",&DWP(28,"esi"));
510	&mov	(&DWP(16,"esi"),$E);
511	&mov	(&DWP(20,"esi"),"edi");
512	&mov	(&DWP(24,"esi"),"ebx");
513	&mov	(&DWP(28,"esi"),"ecx");
514	 #&mov	(&DWP(16,"esp"),$E);
515	 &mov	(&DWP(20,"esp"),"edi");
516	&mov	("edi",&DWP(96+4,"esp"));	# inp
517	 &mov	(&DWP(24,"esp"),"ebx");
518	 &mov	(&DWP(28,"esp"),"ecx");
519
520	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
521	&jb	(&label("grand_loop"));
522
523	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
524&function_end_A();
525}
526						if (!$i386 && $xmm) {{{
527if ($shaext) {
528######################################################################
529# Intel SHA Extensions implementation of SHA256 update function.
530#
531my ($ctx,$inp,$end)=("esi","edi","eax");
532my ($Wi,$ABEF,$CDGH,$TMP)=map("xmm$_",(0..2,7));
533my @MSG=map("xmm$_",(3..6));
534
535sub sha256op38 {
536 my ($opcodelet,$dst,$src)=@_;
537    if ("$dst:$src" =~ /xmm([0-7]):xmm([0-7])/)
538    {	&data_byte(0x0f,0x38,$opcodelet,0xc0|($1<<3)|$2);	}
539}
540sub sha256rnds2	{ sha256op38(0xcb,@_); }
541sub sha256msg1	{ sha256op38(0xcc,@_); }
542sub sha256msg2	{ sha256op38(0xcd,@_); }
543
544&set_label("shaext",32);
545	&sub		("esp",32);
546
547	&movdqu		($ABEF,&QWP(0,$ctx));		# DCBA
548	&lea		($K256,&DWP(0x80,$K256));
549	&movdqu		($CDGH,&QWP(16,$ctx));		# HGFE
550	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
551
552	&pshufd		($Wi,$ABEF,0x1b);		# ABCD
553	&pshufd		($ABEF,$ABEF,0xb1);		# CDAB
554	&pshufd		($CDGH,$CDGH,0x1b);		# EFGH
555	&palignr	($ABEF,$CDGH,8);		# ABEF
556	&punpcklqdq	($CDGH,$Wi);			# CDGH
557	&jmp		(&label("loop_shaext"));
558
559&set_label("loop_shaext",16);
560	&movdqu		(@MSG[0],&QWP(0,$inp));
561	&movdqu		(@MSG[1],&QWP(0x10,$inp));
562	&movdqu		(@MSG[2],&QWP(0x20,$inp));
563	&pshufb		(@MSG[0],$TMP);
564	&movdqu		(@MSG[3],&QWP(0x30,$inp));
565	&movdqa		(&QWP(16,"esp"),$CDGH);		# offload
566
567	&movdqa		($Wi,&QWP(0*16-0x80,$K256));
568	&paddd		($Wi,@MSG[0]);
569	&pshufb		(@MSG[1],$TMP);
570	&sha256rnds2	($CDGH,$ABEF);			# 0-3
571	&pshufd		($Wi,$Wi,0x0e);
572	&nop		();
573	&movdqa		(&QWP(0,"esp"),$ABEF);		# offload
574	&sha256rnds2	($ABEF,$CDGH);
575
576	&movdqa		($Wi,&QWP(1*16-0x80,$K256));
577	&paddd		($Wi,@MSG[1]);
578	&pshufb		(@MSG[2],$TMP);
579	&sha256rnds2	($CDGH,$ABEF);			# 4-7
580	&pshufd		($Wi,$Wi,0x0e);
581	&lea		($inp,&DWP(0x40,$inp));
582	&sha256msg1	(@MSG[0],@MSG[1]);
583	&sha256rnds2	($ABEF,$CDGH);
584
585	&movdqa		($Wi,&QWP(2*16-0x80,$K256));
586	&paddd		($Wi,@MSG[2]);
587	&pshufb		(@MSG[3],$TMP);
588	&sha256rnds2	($CDGH,$ABEF);			# 8-11
589	&pshufd		($Wi,$Wi,0x0e);
590	&movdqa		($TMP,@MSG[3]);
591	&palignr	($TMP,@MSG[2],4);
592	&nop		();
593	&paddd		(@MSG[0],$TMP);
594	&sha256msg1	(@MSG[1],@MSG[2]);
595	&sha256rnds2	($ABEF,$CDGH);
596
597	&movdqa		($Wi,&QWP(3*16-0x80,$K256));
598	&paddd		($Wi,@MSG[3]);
599	&sha256msg2	(@MSG[0],@MSG[3]);
600	&sha256rnds2	($CDGH,$ABEF);			# 12-15
601	&pshufd		($Wi,$Wi,0x0e);
602	&movdqa		($TMP,@MSG[0]);
603	&palignr	($TMP,@MSG[3],4);
604	&nop		();
605	&paddd		(@MSG[1],$TMP);
606	&sha256msg1	(@MSG[2],@MSG[3]);
607	&sha256rnds2	($ABEF,$CDGH);
608
609for($i=4;$i<16-3;$i++) {
610	&movdqa		($Wi,&QWP($i*16-0x80,$K256));
611	&paddd		($Wi,@MSG[0]);
612	&sha256msg2	(@MSG[1],@MSG[0]);
613	&sha256rnds2	($CDGH,$ABEF);			# 16-19...
614	&pshufd		($Wi,$Wi,0x0e);
615	&movdqa		($TMP,@MSG[1]);
616	&palignr	($TMP,@MSG[0],4);
617	&nop		();
618	&paddd		(@MSG[2],$TMP);
619	&sha256msg1	(@MSG[3],@MSG[0]);
620	&sha256rnds2	($ABEF,$CDGH);
621
622	push(@MSG,shift(@MSG));
623}
624	&movdqa		($Wi,&QWP(13*16-0x80,$K256));
625	&paddd		($Wi,@MSG[0]);
626	&sha256msg2	(@MSG[1],@MSG[0]);
627	&sha256rnds2	($CDGH,$ABEF);			# 52-55
628	&pshufd		($Wi,$Wi,0x0e);
629	&movdqa		($TMP,@MSG[1])
630	&palignr	($TMP,@MSG[0],4);
631	&sha256rnds2	($ABEF,$CDGH);
632	&paddd		(@MSG[2],$TMP);
633
634	&movdqa		($Wi,&QWP(14*16-0x80,$K256));
635	&paddd		($Wi,@MSG[1]);
636	&sha256rnds2	($CDGH,$ABEF);			# 56-59
637	&pshufd		($Wi,$Wi,0x0e);
638	&sha256msg2	(@MSG[2],@MSG[1]);
639	&movdqa		($TMP,&QWP(0x100-0x80,$K256));	# byte swap mask
640	&sha256rnds2	($ABEF,$CDGH);
641
642	&movdqa		($Wi,&QWP(15*16-0x80,$K256));
643	&paddd		($Wi,@MSG[2]);
644	&nop		();
645	&sha256rnds2	($CDGH,$ABEF);			# 60-63
646	&pshufd		($Wi,$Wi,0x0e);
647	&cmp		($end,$inp);
648	&nop		();
649	&sha256rnds2	($ABEF,$CDGH);
650
651	&paddd		($CDGH,&QWP(16,"esp"));
652	&paddd		($ABEF,&QWP(0,"esp"));
653	&jnz		(&label("loop_shaext"));
654
655	&pshufd		($CDGH,$CDGH,0xb1);		# DCHG
656	&pshufd		($TMP,$ABEF,0x1b);		# FEBA
657	&pshufd		($ABEF,$ABEF,0xb1);		# BAFE
658	&punpckhqdq	($ABEF,$CDGH);			# DCBA
659	&palignr	($CDGH,$TMP,8);			# HGFE
660
661	&mov		("esp",&DWP(32+12,"esp"));
662	&movdqu		(&QWP(0,$ctx),$ABEF);
663	&movdqu		(&QWP(16,$ctx),$CDGH);
664&function_end_A();
665}
666
667my @X = map("xmm$_",(0..3));
668my ($t0,$t1,$t2,$t3) = map("xmm$_",(4..7));
669my @AH = ($A,$T);
670
671&set_label("SSSE3",32);
672	&lea	("esp",&DWP(-96,"esp"));
673	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
674	&mov	($AH[0],&DWP(0,"esi"));
675	&mov	($AH[1],&DWP(4,"esi"));
676	&mov	("ecx",&DWP(8,"esi"));
677	&mov	("edi",&DWP(12,"esi"));
678	#&mov	(&DWP(0,"esp"),$AH[0]);
679	&mov	(&DWP(4,"esp"),$AH[1]);
680	&xor	($AH[1],"ecx");			# magic
681	&mov	(&DWP(8,"esp"),"ecx");
682	&mov	(&DWP(12,"esp"),"edi");
683	&mov	($E,&DWP(16,"esi"));
684	&mov	("edi",&DWP(20,"esi"));
685	&mov	("ecx",&DWP(24,"esi"));
686	&mov	("esi",&DWP(28,"esi"));
687	#&mov	(&DWP(16,"esp"),$E);
688	&mov	(&DWP(20,"esp"),"edi");
689	&mov	("edi",&DWP(96+4,"esp"));	# inp
690	&mov	(&DWP(24,"esp"),"ecx");
691	&mov	(&DWP(28,"esp"),"esi");
692	&movdqa	($t3,&QWP(256,$K256));
693	&jmp	(&label("grand_ssse3"));
694
695&set_label("grand_ssse3",16);
696	# load input, reverse byte order, add K256[0..15], save to stack
697	&movdqu	(@X[0],&QWP(0,"edi"));
698	&movdqu	(@X[1],&QWP(16,"edi"));
699	&movdqu	(@X[2],&QWP(32,"edi"));
700	&movdqu	(@X[3],&QWP(48,"edi"));
701	&add	("edi",64);
702	&pshufb	(@X[0],$t3);
703	&mov	(&DWP(96+4,"esp"),"edi");
704	&pshufb	(@X[1],$t3);
705	&movdqa	($t0,&QWP(0,$K256));
706	&pshufb	(@X[2],$t3);
707	&movdqa	($t1,&QWP(16,$K256));
708	&paddd	($t0,@X[0]);
709	&pshufb	(@X[3],$t3);
710	&movdqa	($t2,&QWP(32,$K256));
711	&paddd	($t1,@X[1]);
712	&movdqa	($t3,&QWP(48,$K256));
713	&movdqa	(&QWP(32+0,"esp"),$t0);
714	&paddd	($t2,@X[2]);
715	&movdqa	(&QWP(32+16,"esp"),$t1);
716	&paddd	($t3,@X[3]);
717	&movdqa	(&QWP(32+32,"esp"),$t2);
718	&movdqa	(&QWP(32+48,"esp"),$t3);
719	&jmp	(&label("ssse3_00_47"));
720
721&set_label("ssse3_00_47",16);
722	&add		($K256,64);
723
724sub SSSE3_00_47 () {
725my $j = shift;
726my $body = shift;
727my @X = @_;
728my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
729
730	  eval(shift(@insns));
731	&movdqa		($t0,@X[1]);
732	  eval(shift(@insns));			# @
733	  eval(shift(@insns));
734	&movdqa		($t3,@X[3]);
735	  eval(shift(@insns));
736	  eval(shift(@insns));
737	&palignr	($t0,@X[0],4);		# X[1..4]
738	  eval(shift(@insns));
739	  eval(shift(@insns));			# @
740	  eval(shift(@insns));
741	 &palignr	($t3,@X[2],4);		# X[9..12]
742	  eval(shift(@insns));
743	  eval(shift(@insns));
744	  eval(shift(@insns));
745	&movdqa		($t1,$t0);
746	  eval(shift(@insns));			# @
747	  eval(shift(@insns));
748	&movdqa		($t2,$t0);
749	  eval(shift(@insns));
750	  eval(shift(@insns));
751	&psrld		($t0,3);
752	  eval(shift(@insns));
753	  eval(shift(@insns));			# @
754	 &paddd		(@X[0],$t3);		# X[0..3] += X[9..12]
755	  eval(shift(@insns));
756	  eval(shift(@insns));
757	&psrld		($t2,7);
758	  eval(shift(@insns));
759	  eval(shift(@insns));
760	  eval(shift(@insns));			# @
761	  eval(shift(@insns));
762	 &pshufd	($t3,@X[3],0b11111010);	# X[14..15]
763	  eval(shift(@insns));
764	  eval(shift(@insns));
765	&pslld		($t1,32-18);
766	  eval(shift(@insns));
767	  eval(shift(@insns));			# @
768	&pxor		($t0,$t2);
769	  eval(shift(@insns));
770	  eval(shift(@insns));
771	&psrld		($t2,18-7);
772	  eval(shift(@insns));
773	  eval(shift(@insns));
774	  eval(shift(@insns));			# @
775	&pxor		($t0,$t1);
776	  eval(shift(@insns));
777	  eval(shift(@insns));
778	&pslld		($t1,18-7);
779	  eval(shift(@insns));
780	  eval(shift(@insns));
781	  eval(shift(@insns));			# @
782	&pxor		($t0,$t2);
783	  eval(shift(@insns));
784	  eval(shift(@insns));
785	 &movdqa	($t2,$t3);
786	  eval(shift(@insns));
787	  eval(shift(@insns));
788	  eval(shift(@insns));			# @
789	&pxor		($t0,$t1);		# sigma0(X[1..4])
790	  eval(shift(@insns));
791	  eval(shift(@insns));
792	 &psrld		($t3,10);
793	  eval(shift(@insns));
794	  eval(shift(@insns));
795	  eval(shift(@insns));			# @
796	&paddd		(@X[0],$t0);		# X[0..3] += sigma0(X[1..4])
797	  eval(shift(@insns));
798	  eval(shift(@insns));
799	 &psrlq		($t2,17);
800	  eval(shift(@insns));
801	  eval(shift(@insns));
802	  eval(shift(@insns));			# @
803	 &pxor		($t3,$t2);
804	  eval(shift(@insns));
805	  eval(shift(@insns));
806	 &psrlq		($t2,19-17);
807	  eval(shift(@insns));
808	  eval(shift(@insns));
809	  eval(shift(@insns));			# @
810	 &pxor		($t3,$t2);
811	  eval(shift(@insns));
812	  eval(shift(@insns));
813	 &pshufd	($t3,$t3,0b10000000);
814	  eval(shift(@insns));
815	  eval(shift(@insns));
816	  eval(shift(@insns));			# @
817	  eval(shift(@insns));
818	  eval(shift(@insns));
819	  eval(shift(@insns));
820	  eval(shift(@insns));
821	  eval(shift(@insns));			# @
822	  eval(shift(@insns));
823	 &psrldq	($t3,8);
824	  eval(shift(@insns));
825	  eval(shift(@insns));
826	  eval(shift(@insns));
827	&paddd		(@X[0],$t3);		# X[0..1] += sigma1(X[14..15])
828	  eval(shift(@insns));			# @
829	  eval(shift(@insns));
830	  eval(shift(@insns));
831	  eval(shift(@insns));
832	  eval(shift(@insns));
833	  eval(shift(@insns));			# @
834	  eval(shift(@insns));
835	 &pshufd	($t3,@X[0],0b01010000);	# X[16..17]
836	  eval(shift(@insns));
837	  eval(shift(@insns));
838	  eval(shift(@insns));
839	 &movdqa	($t2,$t3);
840	  eval(shift(@insns));			# @
841	 &psrld		($t3,10);
842	  eval(shift(@insns));
843	 &psrlq		($t2,17);
844	  eval(shift(@insns));
845	  eval(shift(@insns));
846	  eval(shift(@insns));
847	  eval(shift(@insns));			# @
848	 &pxor		($t3,$t2);
849	  eval(shift(@insns));
850	  eval(shift(@insns));
851	 &psrlq		($t2,19-17);
852	  eval(shift(@insns));
853	  eval(shift(@insns));
854	  eval(shift(@insns));			# @
855	 &pxor		($t3,$t2);
856	  eval(shift(@insns));
857	  eval(shift(@insns));
858	  eval(shift(@insns));
859	 &pshufd	($t3,$t3,0b00001000);
860	  eval(shift(@insns));
861	  eval(shift(@insns));			# @
862	&movdqa		($t2,&QWP(16*$j,$K256));
863	  eval(shift(@insns));
864	  eval(shift(@insns));
865	 &pslldq	($t3,8);
866	  eval(shift(@insns));
867	  eval(shift(@insns));
868	  eval(shift(@insns));			# @
869	  eval(shift(@insns));
870	  eval(shift(@insns));
871	  eval(shift(@insns));
872	  eval(shift(@insns));
873	  eval(shift(@insns));			# @
874	&paddd		(@X[0],$t3);		# X[2..3] += sigma1(X[16..17])
875	  eval(shift(@insns));
876	  eval(shift(@insns));
877	  eval(shift(@insns));
878	  eval(shift(@insns));
879	&paddd		($t2,@X[0]);
880	  eval(shift(@insns));			# @
881
882	foreach (@insns) { eval; }		# remaining instructions
883
884	&movdqa		(&QWP(32+16*$j,"esp"),$t2);
885}
886
887sub body_00_15 () {
888	(
889	'&mov	("ecx",$E);',
890	'&ror	($E,25-11);',
891	 '&mov	("esi",&off($f));',
892	'&xor	($E,"ecx");',
893	 '&mov	("edi",&off($g));',
894	 '&xor	("esi","edi");',
895	'&ror	($E,11-6);',
896	 '&and	("esi","ecx");',
897	 '&mov	(&off($e),"ecx");',	# save $E, modulo-scheduled
898	'&xor	($E,"ecx");',
899	 '&xor	("edi","esi");',	# Ch(e,f,g)
900	'&ror	($E,6);',		# T = Sigma1(e)
901	 '&mov	("ecx",$AH[0]);',
902	 '&add	($E,"edi");',		# T += Ch(e,f,g)
903	 '&mov	("edi",&off($b));',
904	'&mov	("esi",$AH[0]);',
905
906	'&ror	("ecx",22-13);',
907	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
908	'&xor	("ecx",$AH[0]);',
909	 '&xor	($AH[0],"edi");',	# a ^= b, (b^c) in next round
910	 '&add	($E,&off($h));',	# T += h
911	'&ror	("ecx",13-2);',
912	 '&and	($AH[1],$AH[0]);',	# (b^c) &= (a^b)
913	'&xor	("ecx","esi");',
914	 '&add	($E,&DWP(32+4*($i&15),"esp"));',	# T += K[i]+X[i]
915	 '&xor	($AH[1],"edi");',	# h = Maj(a,b,c) = Ch(a^b,c,b)
916	'&ror	("ecx",2);',		# Sigma0(a)
917
918	 '&add	($AH[1],$E);',		# h += T
919	 '&add	($E,&off($d));',	# d += T
920	'&add	($AH[1],"ecx");'.	# h += Sigma0(a)
921
922	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
923	);
924}
925
926    for ($i=0,$j=0; $j<4; $j++) {
927	&SSSE3_00_47($j,\&body_00_15,@X);
928	push(@X,shift(@X));		# rotate(@X)
929    }
930	&cmp	(&DWP(16*$j,$K256),0x00010203);
931	&jne	(&label("ssse3_00_47"));
932
933    for ($i=0; $i<16; ) {
934	foreach(body_00_15()) { eval; }
935    }
936
937	&mov	("esi",&DWP(96,"esp"));	#ctx
938					#&mov	($AH[0],&DWP(0,"esp"));
939	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
940					#&mov	("edi", &DWP(8,"esp"));
941	&mov	("ecx",&DWP(12,"esp"));
942	&add	($AH[0],&DWP(0,"esi"));
943	&add	($AH[1],&DWP(4,"esi"));
944	&add	("edi",&DWP(8,"esi"));
945	&add	("ecx",&DWP(12,"esi"));
946	&mov	(&DWP(0,"esi"),$AH[0]);
947	&mov	(&DWP(4,"esi"),$AH[1]);
948	&mov	(&DWP(8,"esi"),"edi");
949	&mov	(&DWP(12,"esi"),"ecx");
950	 #&mov	(&DWP(0,"esp"),$AH[0]);
951	 &mov	(&DWP(4,"esp"),$AH[1]);
952	 &xor	($AH[1],"edi");			# magic
953	 &mov	(&DWP(8,"esp"),"edi");
954	 &mov	(&DWP(12,"esp"),"ecx");
955	#&mov	($E,&DWP(16,"esp"));
956	&mov	("edi",&DWP(20,"esp"));
957	&mov	("ecx",&DWP(24,"esp"));
958	&add	($E,&DWP(16,"esi"));
959	&add	("edi",&DWP(20,"esi"));
960	&add	("ecx",&DWP(24,"esi"));
961	&mov	(&DWP(16,"esi"),$E);
962	&mov	(&DWP(20,"esi"),"edi");
963	 &mov	(&DWP(20,"esp"),"edi");
964	&mov	("edi",&DWP(28,"esp"));
965	&mov	(&DWP(24,"esi"),"ecx");
966	 #&mov	(&DWP(16,"esp"),$E);
967	&add	("edi",&DWP(28,"esi"));
968	 &mov	(&DWP(24,"esp"),"ecx");
969	&mov	(&DWP(28,"esi"),"edi");
970	 &mov	(&DWP(28,"esp"),"edi");
971	&mov	("edi",&DWP(96+4,"esp"));	# inp
972
973	&movdqa	($t3,&QWP(64,$K256));
974	&sub	($K256,3*64);			# rewind K
975	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
976	&jb	(&label("grand_ssse3"));
977
978	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
979&function_end_A();
980						if ($avx) {
981&set_label("AVX",32);
982						if ($avx>1) {
983	&and	("edx",1<<8|1<<3);		# check for BMI2+BMI1
984	&cmp	("edx",1<<8|1<<3);
985	&je	(&label("AVX_BMI"));
986						}
987	&lea	("esp",&DWP(-96,"esp"));
988	&vzeroall	();
989	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
990	&mov	($AH[0],&DWP(0,"esi"));
991	&mov	($AH[1],&DWP(4,"esi"));
992	&mov	("ecx",&DWP(8,"esi"));
993	&mov	("edi",&DWP(12,"esi"));
994	#&mov	(&DWP(0,"esp"),$AH[0]);
995	&mov	(&DWP(4,"esp"),$AH[1]);
996	&xor	($AH[1],"ecx");			# magic
997	&mov	(&DWP(8,"esp"),"ecx");
998	&mov	(&DWP(12,"esp"),"edi");
999	&mov	($E,&DWP(16,"esi"));
1000	&mov	("edi",&DWP(20,"esi"));
1001	&mov	("ecx",&DWP(24,"esi"));
1002	&mov	("esi",&DWP(28,"esi"));
1003	#&mov	(&DWP(16,"esp"),$E);
1004	&mov	(&DWP(20,"esp"),"edi");
1005	&mov	("edi",&DWP(96+4,"esp"));	# inp
1006	&mov	(&DWP(24,"esp"),"ecx");
1007	&mov	(&DWP(28,"esp"),"esi");
1008	&vmovdqa	($t3,&QWP(256,$K256));
1009	&jmp	(&label("grand_avx"));
1010
1011&set_label("grand_avx",32);
1012	# load input, reverse byte order, add K256[0..15], save to stack
1013	&vmovdqu	(@X[0],&QWP(0,"edi"));
1014	&vmovdqu	(@X[1],&QWP(16,"edi"));
1015	&vmovdqu	(@X[2],&QWP(32,"edi"));
1016	&vmovdqu	(@X[3],&QWP(48,"edi"));
1017	&add		("edi",64);
1018	&vpshufb	(@X[0],@X[0],$t3);
1019	&mov		(&DWP(96+4,"esp"),"edi");
1020	&vpshufb	(@X[1],@X[1],$t3);
1021	&vpshufb	(@X[2],@X[2],$t3);
1022	&vpaddd		($t0,@X[0],&QWP(0,$K256));
1023	&vpshufb	(@X[3],@X[3],$t3);
1024	&vpaddd		($t1,@X[1],&QWP(16,$K256));
1025	&vpaddd		($t2,@X[2],&QWP(32,$K256));
1026	&vpaddd		($t3,@X[3],&QWP(48,$K256));
1027	&vmovdqa	(&QWP(32+0,"esp"),$t0);
1028	&vmovdqa	(&QWP(32+16,"esp"),$t1);
1029	&vmovdqa	(&QWP(32+32,"esp"),$t2);
1030	&vmovdqa	(&QWP(32+48,"esp"),$t3);
1031	&jmp		(&label("avx_00_47"));
1032
1033&set_label("avx_00_47",16);
1034	&add		($K256,64);
1035
1036sub Xupdate_AVX () {
1037	(
1038	'&vpalignr	($t0,@X[1],@X[0],4);',	# X[1..4]
1039	 '&vpalignr	($t3,@X[3],@X[2],4);',	# X[9..12]
1040	'&vpsrld	($t2,$t0,7);',
1041	 '&vpaddd	(@X[0],@X[0],$t3);',	# X[0..3] += X[9..16]
1042	'&vpsrld	($t3,$t0,3);',
1043	'&vpslld	($t1,$t0,14);',
1044	'&vpxor		($t0,$t3,$t2);',
1045	 '&vpshufd	($t3,@X[3],0b11111010)',# X[14..15]
1046	'&vpsrld	($t2,$t2,18-7);',
1047	'&vpxor		($t0,$t0,$t1);',
1048	'&vpslld	($t1,$t1,25-14);',
1049	'&vpxor		($t0,$t0,$t2);',
1050	 '&vpsrld	($t2,$t3,10);',
1051	'&vpxor		($t0,$t0,$t1);',	# sigma0(X[1..4])
1052	 '&vpsrlq	($t1,$t3,17);',
1053	'&vpaddd	(@X[0],@X[0],$t0);',	# X[0..3] += sigma0(X[1..4])
1054	 '&vpxor	($t2,$t2,$t1);',
1055	 '&vpsrlq	($t3,$t3,19);',
1056	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[14..15]
1057	 '&vpshufd	($t3,$t2,0b10000100);',
1058	'&vpsrldq	($t3,$t3,8);',
1059	'&vpaddd	(@X[0],@X[0],$t3);',	# X[0..1] += sigma1(X[14..15])
1060	 '&vpshufd	($t3,@X[0],0b01010000)',# X[16..17]
1061	 '&vpsrld	($t2,$t3,10);',
1062	 '&vpsrlq	($t1,$t3,17);',
1063	 '&vpxor	($t2,$t2,$t1);',
1064	 '&vpsrlq	($t3,$t3,19);',
1065	 '&vpxor	($t2,$t2,$t3);',	# sigma1(X[16..17]
1066	 '&vpshufd	($t3,$t2,0b11101000);',
1067	'&vpslldq	($t3,$t3,8);',
1068	'&vpaddd	(@X[0],@X[0],$t3);'	# X[2..3] += sigma1(X[16..17])
1069	);
1070}
1071
1072local *ror = sub { &shrd(@_[0],@_) };
1073sub AVX_00_47 () {
1074my $j = shift;
1075my $body = shift;
1076my @X = @_;
1077my @insns = (&$body,&$body,&$body,&$body);	# 120 instructions
1078my $insn;
1079
1080	foreach (Xupdate_AVX()) {		# 31 instructions
1081	    eval;
1082	    eval(shift(@insns));
1083	    eval(shift(@insns));
1084	    eval($insn = shift(@insns));
1085	    eval(shift(@insns)) if ($insn =~ /rorx/ && @insns[0] =~ /rorx/);
1086	}
1087	&vpaddd		($t2,@X[0],&QWP(16*$j,$K256));
1088	foreach (@insns) { eval; }		# remaining instructions
1089	&vmovdqa	(&QWP(32+16*$j,"esp"),$t2);
1090}
1091
1092    for ($i=0,$j=0; $j<4; $j++) {
1093	&AVX_00_47($j,\&body_00_15,@X);
1094	push(@X,shift(@X));		# rotate(@X)
1095    }
1096	&cmp	(&DWP(16*$j,$K256),0x00010203);
1097	&jne	(&label("avx_00_47"));
1098
1099    for ($i=0; $i<16; ) {
1100	foreach(body_00_15()) { eval; }
1101    }
1102
1103	&mov	("esi",&DWP(96,"esp"));	#ctx
1104					#&mov	($AH[0],&DWP(0,"esp"));
1105	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
1106					#&mov	("edi", &DWP(8,"esp"));
1107	&mov	("ecx",&DWP(12,"esp"));
1108	&add	($AH[0],&DWP(0,"esi"));
1109	&add	($AH[1],&DWP(4,"esi"));
1110	&add	("edi",&DWP(8,"esi"));
1111	&add	("ecx",&DWP(12,"esi"));
1112	&mov	(&DWP(0,"esi"),$AH[0]);
1113	&mov	(&DWP(4,"esi"),$AH[1]);
1114	&mov	(&DWP(8,"esi"),"edi");
1115	&mov	(&DWP(12,"esi"),"ecx");
1116	 #&mov	(&DWP(0,"esp"),$AH[0]);
1117	 &mov	(&DWP(4,"esp"),$AH[1]);
1118	 &xor	($AH[1],"edi");			# magic
1119	 &mov	(&DWP(8,"esp"),"edi");
1120	 &mov	(&DWP(12,"esp"),"ecx");
1121	#&mov	($E,&DWP(16,"esp"));
1122	&mov	("edi",&DWP(20,"esp"));
1123	&mov	("ecx",&DWP(24,"esp"));
1124	&add	($E,&DWP(16,"esi"));
1125	&add	("edi",&DWP(20,"esi"));
1126	&add	("ecx",&DWP(24,"esi"));
1127	&mov	(&DWP(16,"esi"),$E);
1128	&mov	(&DWP(20,"esi"),"edi");
1129	 &mov	(&DWP(20,"esp"),"edi");
1130	&mov	("edi",&DWP(28,"esp"));
1131	&mov	(&DWP(24,"esi"),"ecx");
1132	 #&mov	(&DWP(16,"esp"),$E);
1133	&add	("edi",&DWP(28,"esi"));
1134	 &mov	(&DWP(24,"esp"),"ecx");
1135	&mov	(&DWP(28,"esi"),"edi");
1136	 &mov	(&DWP(28,"esp"),"edi");
1137	&mov	("edi",&DWP(96+4,"esp"));	# inp
1138
1139	&vmovdqa	($t3,&QWP(64,$K256));
1140	&sub	($K256,3*64);			# rewind K
1141	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
1142	&jb	(&label("grand_avx"));
1143
1144	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
1145	&vzeroall	();
1146&function_end_A();
1147						if ($avx>1) {
1148sub bodyx_00_15 () {			# +10%
1149	(
1150	'&rorx	("ecx",$E,6)',
1151	'&rorx	("esi",$E,11)',
1152	 '&mov	(&off($e),$E)',		# save $E, modulo-scheduled
1153	'&rorx	("edi",$E,25)',
1154	'&xor	("ecx","esi")',
1155	 '&andn	("esi",$E,&off($g))',
1156	'&xor	("ecx","edi")',		# Sigma1(e)
1157	 '&and	($E,&off($f))',
1158	 '&mov	(&off($a),$AH[0]);',	# save $A, modulo-scheduled
1159	 '&or	($E,"esi")',		# T = Ch(e,f,g)
1160
1161	'&rorx	("edi",$AH[0],2)',
1162	'&rorx	("esi",$AH[0],13)',
1163	 '&lea	($E,&DWP(0,$E,"ecx"))',	# T += Sigma1(e)
1164	'&rorx	("ecx",$AH[0],22)',
1165	'&xor	("esi","edi")',
1166	 '&mov	("edi",&off($b))',
1167	'&xor	("ecx","esi")',		# Sigma0(a)
1168
1169	 '&xor	($AH[0],"edi")',	# a ^= b, (b^c) in next round
1170	 '&add	($E,&off($h))',		# T += h
1171	 '&and	($AH[1],$AH[0])',	# (b^c) &= (a^b)
1172	 '&add	($E,&DWP(32+4*($i&15),"esp"))',	# T += K[i]+X[i]
1173	 '&xor	($AH[1],"edi")',	# h = Maj(a,b,c) = Ch(a^b,c,b)
1174
1175	 '&add	("ecx",$E)',		# h += T
1176	 '&add	($E,&off($d))',		# d += T
1177	'&lea	($AH[1],&DWP(0,$AH[1],"ecx"));'.	# h += Sigma0(a)
1178
1179	'@AH = reverse(@AH); $i++;'	# rotate(a,h)
1180	);
1181}
1182
1183&set_label("AVX_BMI",32);
1184	&lea	("esp",&DWP(-96,"esp"));
1185	&vzeroall	();
1186	# copy ctx->h[0-7] to A,B,C,D,E,F,G,H on stack
1187	&mov	($AH[0],&DWP(0,"esi"));
1188	&mov	($AH[1],&DWP(4,"esi"));
1189	&mov	("ecx",&DWP(8,"esi"));
1190	&mov	("edi",&DWP(12,"esi"));
1191	#&mov	(&DWP(0,"esp"),$AH[0]);
1192	&mov	(&DWP(4,"esp"),$AH[1]);
1193	&xor	($AH[1],"ecx");			# magic
1194	&mov	(&DWP(8,"esp"),"ecx");
1195	&mov	(&DWP(12,"esp"),"edi");
1196	&mov	($E,&DWP(16,"esi"));
1197	&mov	("edi",&DWP(20,"esi"));
1198	&mov	("ecx",&DWP(24,"esi"));
1199	&mov	("esi",&DWP(28,"esi"));
1200	#&mov	(&DWP(16,"esp"),$E);
1201	&mov	(&DWP(20,"esp"),"edi");
1202	&mov	("edi",&DWP(96+4,"esp"));	# inp
1203	&mov	(&DWP(24,"esp"),"ecx");
1204	&mov	(&DWP(28,"esp"),"esi");
1205	&vmovdqa	($t3,&QWP(256,$K256));
1206	&jmp	(&label("grand_avx_bmi"));
1207
1208&set_label("grand_avx_bmi",32);
1209	# load input, reverse byte order, add K256[0..15], save to stack
1210	&vmovdqu	(@X[0],&QWP(0,"edi"));
1211	&vmovdqu	(@X[1],&QWP(16,"edi"));
1212	&vmovdqu	(@X[2],&QWP(32,"edi"));
1213	&vmovdqu	(@X[3],&QWP(48,"edi"));
1214	&add		("edi",64);
1215	&vpshufb	(@X[0],@X[0],$t3);
1216	&mov		(&DWP(96+4,"esp"),"edi");
1217	&vpshufb	(@X[1],@X[1],$t3);
1218	&vpshufb	(@X[2],@X[2],$t3);
1219	&vpaddd		($t0,@X[0],&QWP(0,$K256));
1220	&vpshufb	(@X[3],@X[3],$t3);
1221	&vpaddd		($t1,@X[1],&QWP(16,$K256));
1222	&vpaddd		($t2,@X[2],&QWP(32,$K256));
1223	&vpaddd		($t3,@X[3],&QWP(48,$K256));
1224	&vmovdqa	(&QWP(32+0,"esp"),$t0);
1225	&vmovdqa	(&QWP(32+16,"esp"),$t1);
1226	&vmovdqa	(&QWP(32+32,"esp"),$t2);
1227	&vmovdqa	(&QWP(32+48,"esp"),$t3);
1228	&jmp		(&label("avx_bmi_00_47"));
1229
1230&set_label("avx_bmi_00_47",16);
1231	&add		($K256,64);
1232
1233    for ($i=0,$j=0; $j<4; $j++) {
1234	&AVX_00_47($j,\&bodyx_00_15,@X);
1235	push(@X,shift(@X));		# rotate(@X)
1236    }
1237	&cmp	(&DWP(16*$j,$K256),0x00010203);
1238	&jne	(&label("avx_bmi_00_47"));
1239
1240    for ($i=0; $i<16; ) {
1241	foreach(bodyx_00_15()) { eval; }
1242    }
1243
1244	&mov	("esi",&DWP(96,"esp"));	#ctx
1245					#&mov	($AH[0],&DWP(0,"esp"));
1246	&xor	($AH[1],"edi");		#&mov	($AH[1],&DWP(4,"esp"));
1247					#&mov	("edi", &DWP(8,"esp"));
1248	&mov	("ecx",&DWP(12,"esp"));
1249	&add	($AH[0],&DWP(0,"esi"));
1250	&add	($AH[1],&DWP(4,"esi"));
1251	&add	("edi",&DWP(8,"esi"));
1252	&add	("ecx",&DWP(12,"esi"));
1253	&mov	(&DWP(0,"esi"),$AH[0]);
1254	&mov	(&DWP(4,"esi"),$AH[1]);
1255	&mov	(&DWP(8,"esi"),"edi");
1256	&mov	(&DWP(12,"esi"),"ecx");
1257	 #&mov	(&DWP(0,"esp"),$AH[0]);
1258	 &mov	(&DWP(4,"esp"),$AH[1]);
1259	 &xor	($AH[1],"edi");			# magic
1260	 &mov	(&DWP(8,"esp"),"edi");
1261	 &mov	(&DWP(12,"esp"),"ecx");
1262	#&mov	($E,&DWP(16,"esp"));
1263	&mov	("edi",&DWP(20,"esp"));
1264	&mov	("ecx",&DWP(24,"esp"));
1265	&add	($E,&DWP(16,"esi"));
1266	&add	("edi",&DWP(20,"esi"));
1267	&add	("ecx",&DWP(24,"esi"));
1268	&mov	(&DWP(16,"esi"),$E);
1269	&mov	(&DWP(20,"esi"),"edi");
1270	 &mov	(&DWP(20,"esp"),"edi");
1271	&mov	("edi",&DWP(28,"esp"));
1272	&mov	(&DWP(24,"esi"),"ecx");
1273	 #&mov	(&DWP(16,"esp"),$E);
1274	&add	("edi",&DWP(28,"esi"));
1275	 &mov	(&DWP(24,"esp"),"ecx");
1276	&mov	(&DWP(28,"esi"),"edi");
1277	 &mov	(&DWP(28,"esp"),"edi");
1278	&mov	("edi",&DWP(96+4,"esp"));	# inp
1279
1280	&vmovdqa	($t3,&QWP(64,$K256));
1281	&sub	($K256,3*64);			# rewind K
1282	&cmp	("edi",&DWP(96+8,"esp"));	# are we done yet?
1283	&jb	(&label("grand_avx_bmi"));
1284
1285	&mov	("esp",&DWP(96+12,"esp"));	# restore sp
1286	&vzeroall	();
1287&function_end_A();
1288						}
1289						}
1290						}}}
1291&function_end_B("sha256_block_data_order");
1292
1293&asm_finish();
1294
1295close STDOUT or die "error closing STDOUT: $!";
1296