1/*
2;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
3; Copyright (c) 2012, Intel Corporation
4;
5; All rights reserved.
6;
7; Redistribution and use in source and binary forms, with or without
8; modification, are permitted provided that the following conditions are
9; met:
10;
11; * Redistributions of source code must retain the above copyright
12;   notice, this list of conditions and the following disclaimer.
13;
14; * Redistributions in binary form must reproduce the above copyright
15;   notice, this list of conditions and the following disclaimer in the
16;   documentation and/or other materials provided with the
17;   distribution.
18;
19; * Neither the name of the Intel Corporation nor the names of its
20;   contributors may be used to endorse or promote products derived from
21;   this software without specific prior written permission.
22;
23;
24; THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS" AND ANY
25; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION OR
28; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
29; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
30; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
31; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
32; LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
33; NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
34; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
36*/
37/*
38 * Conversion to GAS assembly and integration to libgcrypt
39 *  by Jussi Kivilinna <jussi.kivilinna@iki.fi>
40 */
41
42#ifdef __x86_64
43#include <config.h>
44#if (defined(HAVE_COMPATIBLE_GCC_AMD64_PLATFORM_AS) || \
45     defined(HAVE_COMPATIBLE_GCC_WIN64_PLATFORM_AS)) && \
46    defined(HAVE_INTEL_SYNTAX_PLATFORM_AS) && \
47    defined(HAVE_GCC_INLINE_ASM_AVX) && defined(USE_SHA512)
48
49#include "asm-common-amd64.h"
50
51.intel_syntax noprefix
52
53.text
54
55/* Virtual Registers */
56#define msg rdi /* ARG1 */
57#define digest rsi /* ARG2 */
58#define msglen rdx /* ARG3 */
59#define T1 rcx
60#define T2 r8
61#define a_64 r9
62#define b_64 r10
63#define c_64 r11
64#define d_64 r12
65#define e_64 r13
66#define f_64 r14
67#define g_64 r15
68#define h_64 rbx
69#define tmp0 rax
70
71/*
72; Local variables (stack frame)
73; Note: frame_size must be an odd multiple of 8 bytes to XMM align RSP
74*/
75#define frame_W 0 /* Message Schedule */
76#define frame_W_size (80 * 8)
77#define frame_WK ((frame_W) + (frame_W_size)) /* W[t] + K[t] | W[t+1] + K[t+1] */
78#define frame_WK_size (2 * 8)
79#define frame_GPRSAVE ((frame_WK) + (frame_WK_size))
80#define frame_GPRSAVE_size (5 * 8)
81#define frame_size ((frame_GPRSAVE) + (frame_GPRSAVE_size))
82
83
84/* Useful QWORD "arrays" for simpler memory references */
85#define MSG(i)    msg    + 8*(i)               /* Input message (arg1) */
86#define DIGEST(i) digest + 8*(i)               /* Output Digest (arg2) */
87#define K_t(i)    .LK512   + 8*(i) ADD_RIP     /* SHA Constants (static mem) */
88#define W_t(i)    rsp + frame_W  + 8*(i)       /* Message Schedule (stack frame) */
89#define WK_2(i)   rsp + frame_WK + 8*((i) % 2) /* W[t]+K[t] (stack frame) */
90/* MSG, DIGEST, K_t, W_t are arrays */
91/* WK_2(t) points to 1 of 2 qwords at frame.WK depdending on t being odd/even */
92
93#define RORQ(p1, p2) \
94	/* shld is faster than ror on Intel Sandybridge */ \
95	shld	p1, p1, (64 - p2)
96
97#define SHA512_Round(t, a, b, c, d, e, f, g, h) \
98	/* Compute Round %%t */; \
99	mov	T1,   f        /* T1 = f */; \
100	mov	tmp0, e        /* tmp = e */; \
101	xor	T1,   g        /* T1 = f ^ g */; \
102	RORQ(	tmp0, 23) /* 41     ; tmp = e ror 23 */; \
103	and	T1,   e        /* T1 = (f ^ g) & e */; \
104	xor	tmp0, e        /* tmp = (e ror 23) ^ e */; \
105	xor	T1,   g        /* T1 = ((f ^ g) & e) ^ g = CH(e,f,g) */; \
106	add	T1,   [WK_2(t)] /* W[t] + K[t] from message scheduler */; \
107	RORQ(	tmp0, 4) /* 18      ; tmp = ((e ror 23) ^ e) ror 4 */; \
108	xor	tmp0, e        /* tmp = (((e ror 23) ^ e) ror 4) ^ e */; \
109	mov	T2,   a        /* T2 = a */; \
110	add	T1,   h        /* T1 = CH(e,f,g) + W[t] + K[t] + h */; \
111	RORQ(	tmp0, 14) /* 14     ; tmp = ((((e ror23)^e)ror4)^e)ror14 = S1(e) */; \
112	add	T1,   tmp0        /* T1 = CH(e,f,g) + W[t] + K[t] + S1(e) */; \
113	mov	tmp0, a        /* tmp = a */; \
114	xor	T2,   c        /* T2 = a ^ c */; \
115	and	tmp0, c        /* tmp = a & c */; \
116	and	T2,   b        /* T2 = (a ^ c) & b */; \
117	xor	T2,   tmp0        /* T2 = ((a ^ c) & b) ^ (a & c) = Maj(a,b,c) */; \
118	mov	tmp0, a        /* tmp = a */; \
119	RORQ(	tmp0, 5) /* 39      ; tmp = a ror 5 */; \
120	xor	tmp0, a        /* tmp = (a ror 5) ^ a */; \
121	add	d, T1          /* e(next_state) = d + T1  */; \
122	RORQ(	tmp0, 6) /* 34      ; tmp = ((a ror 5) ^ a) ror 6 */; \
123	xor	tmp0, a        /* tmp = (((a ror 5) ^ a) ror 6) ^ a */; \
124	lea	h, [T1 + T2]   /* a(next_state) = T1 + Maj(a,b,c) */; \
125	RORQ(	tmp0, 28) /* 28     ; tmp = ((((a ror5)^a)ror6)^a)ror28 = S0(a) */; \
126	add	h, tmp0        /* a(next_state) = T1 + Maj(a,b,c) S0(a) */
127
128#define SHA512_2Sched_2Round_avx_PART1(t, a, b, c, d, e, f, g, h) \
129	/* \
130	; Compute rounds %%t-2 and %%t-1 \
131	; Compute message schedule QWORDS %%t and %%t+1 \
132	; \
133	;   Two rounds are computed based on the values for K[t-2]+W[t-2] and \
134	; K[t-1]+W[t-1] which were previously stored at WK_2 by the message \
135	; scheduler. \
136	;   The two new schedule QWORDS are stored at [W_t(%%t)] and [W_t(%%t+1)]. \
137	; They are then added to their respective SHA512 constants at \
138	; [K_t(%%t)] and [K_t(%%t+1)] and stored at dqword [WK_2(%%t)] \
139	;   For brievity, the comments following vectored instructions only refer to \
140	; the first of a pair of QWORDS. \
141	; Eg. XMM4=W[t-2] really means XMM4={W[t-2]|W[t-1]} \
142	;   The computation of the message schedule and the rounds are tightly \
143	; stitched to take advantage of instruction-level parallelism. \
144	; For clarity, integer instructions (for the rounds calculation) are indented \
145	; by one tab. Vectored instructions (for the message scheduler) are indented \
146	; by two tabs. \
147	*/ \
148	\
149		vmovdqa	xmm4, [W_t(t-2)]   /* XMM4 = W[t-2] */; \
150		vmovdqu	xmm5, [W_t(t-15)]  /* XMM5 = W[t-15] */; \
151	mov	T1,   f; \
152		vpsrlq	xmm0, xmm4, 61       /* XMM0 = W[t-2]>>61 */; \
153	mov	tmp0, e; \
154		vpsrlq	xmm6, xmm5, 1        /* XMM6 = W[t-15]>>1 */; \
155	xor	T1,   g; \
156	RORQ(	tmp0, 23) /* 41 */; \
157		vpsrlq	xmm1, xmm4, 19       /* XMM1 = W[t-2]>>19 */; \
158	and	T1,   e; \
159	xor	tmp0, e; \
160		vpxor	xmm0, xmm0, xmm1           /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 */; \
161	xor	T1,   g; \
162	add	T1,   [WK_2(t)]; \
163		vpsrlq	xmm7, xmm5, 8        /* XMM7 = W[t-15]>>8 */; \
164	RORQ(	tmp0, 4) /* 18 */; \
165		vpsrlq	xmm2, xmm4, 6        /* XMM2 = W[t-2]>>6 */; \
166	xor	tmp0, e; \
167	mov	T2,   a; \
168	add	T1,   h; \
169		vpxor	xmm6, xmm6, xmm7           /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 */; \
170	RORQ(	tmp0, 14) /* 14 */; \
171	add	T1,   tmp0; \
172		vpsrlq	xmm8, xmm5, 7        /* XMM8 = W[t-15]>>7 */; \
173	mov 	tmp0, a; \
174	xor	T2,   c; \
175		vpsllq	xmm3, xmm4, (64-61)  /* XMM3 = W[t-2]<<3 */; \
176	and	tmp0, c; \
177	and	T2,   b; \
178		vpxor	xmm2, xmm2, xmm3           /* XMM2 = W[t-2]>>6 ^ W[t-2]<<3 */; \
179	xor	T2,   tmp0; \
180	mov	tmp0, a; \
181		vpsllq	xmm9, xmm5, (64-1)   /* XMM9 = W[t-15]<<63 */; \
182	RORQ(	tmp0, 5) /* 39 */; \
183		vpxor	xmm8, xmm8, xmm9           /* XMM8 = W[t-15]>>7 ^ W[t-15]<<63 */; \
184	xor	tmp0, a; \
185	add	d, T1; \
186	RORQ(	tmp0, 6) /* 34 */; \
187	xor	tmp0, a; \
188		vpxor	xmm6, xmm6, xmm8           /* XMM6 = W[t-15]>>1 ^ W[t-15]>>8 ^ W[t-15]>>7 ^ W[t-15]<<63 */; \
189	lea	h, [T1 + T2]; \
190	RORQ(	tmp0, 28) /* 28 */; \
191		vpsllq	xmm4, xmm4, (64-19)        /* XMM4 = W[t-2]<<25 */; \
192	add	h, tmp0
193
194#define SHA512_2Sched_2Round_avx_PART2(t, a, b, c, d, e, f, g, h) \
195		vpxor	xmm0, xmm0, xmm4           /* XMM0 = W[t-2]>>61 ^ W[t-2]>>19 ^ W[t-2]<<25 */; \
196	mov	T1, f; \
197		vpxor	xmm0, xmm0, xmm2           /* XMM0 = s1(W[t-2]) */; \
198	mov	tmp0, e; \
199	xor	T1,   g; \
200		vpaddq	xmm0, xmm0, [W_t(t-16)]  /* XMM0 = s1(W[t-2]) + W[t-16] */; \
201		vmovdqu	xmm1, [W_t(t- 7)]  /* XMM1 = W[t-7] */; \
202	RORQ(	tmp0, 23) /* 41 */; \
203	and	T1,   e; \
204	xor	tmp0, e; \
205	xor	T1,   g; \
206		vpsllq	xmm5, xmm5, (64-8)         /* XMM5 = W[t-15]<<56 */; \
207	add	T1,   [WK_2(t+1)]; \
208		vpxor	xmm6, xmm6, xmm5           /* XMM6 = s0(W[t-15]) */; \
209	RORQ(	tmp0, 4) /* 18 */; \
210		vpaddq	xmm0, xmm0, xmm6           /* XMM0 = s1(W[t-2]) + W[t-16] + s0(W[t-15]) */; \
211	xor	tmp0, e; \
212		vpaddq	xmm0, xmm0, xmm1           /* XMM0 = W[t] = s1(W[t-2]) + W[t-7] + s0(W[t-15]) + W[t-16] */; \
213	mov	T2,   a; \
214	add	T1,   h; \
215	RORQ(	tmp0, 14) /* 14 */; \
216	add	T1,   tmp0; \
217		vmovdqa	[W_t(t)], xmm0      /* Store W[t] */; \
218		vpaddq	xmm0, xmm0, [K_t(t)]        /* Compute W[t]+K[t] */; \
219		vmovdqa	[WK_2(t)], xmm0       /* Store W[t]+K[t] for next rounds */; \
220	mov	tmp0, a; \
221	xor	T2,   c; \
222	and	tmp0, c; \
223	and	T2,   b; \
224	xor	T2,   tmp0; \
225	mov	tmp0, a; \
226	RORQ(	tmp0, 5) /* 39 */; \
227	xor	tmp0, a; \
228	add	d, T1; \
229	RORQ(	tmp0, 6) /* 34 */; \
230	xor	tmp0, a; \
231	lea	h, [T1 + T2]; \
232	RORQ(	tmp0, 28) /* 28 */; \
233	add	h, tmp0
234
235#define SHA512_2Sched_2Round_avx(t, a, b, c, d, e, f, g, h) \
236	SHA512_2Sched_2Round_avx_PART1(t, a, b, c, d, e, f, g, h); \
237	SHA512_2Sched_2Round_avx_PART2(t, h, a, b, c, d, e, f, g)
238
239/*
240;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
241; void sha512_avx(const void* M, void* D, uint64_t L);
242; Purpose: Updates the SHA512 digest stored at D with the message stored in M.
243; The size of the message pointed to by M must be an integer multiple of SHA512
244;   message blocks.
245; L is the message length in SHA512 blocks
246*/
247.globl _gcry_sha512_transform_amd64_avx
248ELF(.type _gcry_sha512_transform_amd64_avx,@function;)
249.align 16
250_gcry_sha512_transform_amd64_avx:
251	CFI_STARTPROC()
252	xor eax, eax
253
254	cmp	msglen, 0
255	je	.Lnowork
256
257	vzeroupper
258
259	/* Allocate Stack Space */
260	sub	rsp, frame_size
261	CFI_ADJUST_CFA_OFFSET(frame_size);
262
263	/* Save GPRs */
264	mov	[rsp + frame_GPRSAVE + 8 * 0], rbx
265	mov	[rsp + frame_GPRSAVE + 8 * 1], r12
266	mov	[rsp + frame_GPRSAVE + 8 * 2], r13
267	mov	[rsp + frame_GPRSAVE + 8 * 3], r14
268	mov	[rsp + frame_GPRSAVE + 8 * 4], r15
269	CFI_REL_OFFSET(rbx, frame_GPRSAVE + 8 * 0);
270	CFI_REL_OFFSET(r12, frame_GPRSAVE + 8 * 1);
271	CFI_REL_OFFSET(r13, frame_GPRSAVE + 8 * 2);
272	CFI_REL_OFFSET(r14, frame_GPRSAVE + 8 * 3);
273	CFI_REL_OFFSET(r15, frame_GPRSAVE + 8 * 4);
274
275.Lupdateblock:
276
277	/* Load state variables */
278	mov	a_64, [DIGEST(0)]
279	mov	b_64, [DIGEST(1)]
280	mov	c_64, [DIGEST(2)]
281	mov	d_64, [DIGEST(3)]
282	mov	e_64, [DIGEST(4)]
283	mov	f_64, [DIGEST(5)]
284	mov	g_64, [DIGEST(6)]
285	mov	h_64, [DIGEST(7)]
286
287	/* BSWAP 2 QWORDS */
288	vmovdqa	xmm1, [.LXMM_QWORD_BSWAP ADD_RIP]
289	vmovdqu	xmm0, [MSG(0)]
290	vpshufb	xmm0, xmm0, xmm1     /* BSWAP */
291	vmovdqa	[W_t(0)], xmm0       /* Store Scheduled Pair */
292	vpaddq	xmm0, xmm0, [K_t(0)] /* Compute W[t]+K[t] */
293	vmovdqa	[WK_2(0)], xmm0      /* Store into WK for rounds */
294
295	#define T_2_14(t, a, b, c, d, e, f, g, h) \
296		/* BSWAP 2 QWORDS, Compute 2 Rounds */; \
297		vmovdqu	xmm0, [MSG(t)]; \
298		vpshufb	xmm0, xmm0, xmm1     /* BSWAP */; \
299		SHA512_Round(((t) - 2), a##_64, b##_64, c##_64, d##_64, \
300				        e##_64, f##_64, g##_64, h##_64); \
301		vmovdqa	[W_t(t)], xmm0       /* Store Scheduled Pair */; \
302		vpaddq	xmm0, xmm0, [K_t(t)] /* Compute W[t]+K[t] */; \
303		SHA512_Round(((t) - 1), h##_64, a##_64, b##_64, c##_64, \
304				        d##_64, e##_64, f##_64, g##_64); \
305		vmovdqa	[WK_2(t)], xmm0      /* W[t]+K[t] into WK */
306
307	#define T_16_78(t, a, b, c, d, e, f, g, h) \
308		SHA512_2Sched_2Round_avx((t), a##_64, b##_64, c##_64, d##_64, \
309					      e##_64, f##_64, g##_64, h##_64)
310
311	#define T_80(t, a, b, c, d, e, f, g, h) \
312		/* Compute 2 Rounds */; \
313		SHA512_Round((t - 2), a##_64, b##_64, c##_64, d##_64, \
314				      e##_64, f##_64, g##_64, h##_64); \
315		SHA512_Round((t - 1), h##_64, a##_64, b##_64, c##_64, \
316				      d##_64, e##_64, f##_64, g##_64)
317
318	T_2_14(2, a, b, c, d, e, f, g, h)
319	T_2_14(4, g, h, a, b, c, d, e, f)
320	T_2_14(6, e, f, g, h, a, b, c, d)
321	T_2_14(8, c, d, e, f, g, h, a, b)
322	T_2_14(10, a, b, c, d, e, f, g, h)
323	T_2_14(12, g, h, a, b, c, d, e, f)
324	T_2_14(14, e, f, g, h, a, b, c, d)
325	T_16_78(16, c, d, e, f, g, h, a, b)
326	T_16_78(18, a, b, c, d, e, f, g, h)
327	T_16_78(20, g, h, a, b, c, d, e, f)
328	T_16_78(22, e, f, g, h, a, b, c, d)
329	T_16_78(24, c, d, e, f, g, h, a, b)
330	T_16_78(26, a, b, c, d, e, f, g, h)
331	T_16_78(28, g, h, a, b, c, d, e, f)
332	T_16_78(30, e, f, g, h, a, b, c, d)
333	T_16_78(32, c, d, e, f, g, h, a, b)
334	T_16_78(34, a, b, c, d, e, f, g, h)
335	T_16_78(36, g, h, a, b, c, d, e, f)
336	T_16_78(38, e, f, g, h, a, b, c, d)
337	T_16_78(40, c, d, e, f, g, h, a, b)
338	T_16_78(42, a, b, c, d, e, f, g, h)
339	T_16_78(44, g, h, a, b, c, d, e, f)
340	T_16_78(46, e, f, g, h, a, b, c, d)
341	T_16_78(48, c, d, e, f, g, h, a, b)
342	T_16_78(50, a, b, c, d, e, f, g, h)
343	T_16_78(52, g, h, a, b, c, d, e, f)
344	T_16_78(54, e, f, g, h, a, b, c, d)
345	T_16_78(56, c, d, e, f, g, h, a, b)
346	T_16_78(58, a, b, c, d, e, f, g, h)
347	T_16_78(60, g, h, a, b, c, d, e, f)
348	T_16_78(62, e, f, g, h, a, b, c, d)
349	T_16_78(64, c, d, e, f, g, h, a, b)
350	T_16_78(66, a, b, c, d, e, f, g, h)
351	T_16_78(68, g, h, a, b, c, d, e, f)
352	T_16_78(70, e, f, g, h, a, b, c, d)
353	T_16_78(72, c, d, e, f, g, h, a, b)
354	T_16_78(74, a, b, c, d, e, f, g, h)
355	T_16_78(76, g, h, a, b, c, d, e, f)
356	T_16_78(78, e, f, g, h, a, b, c, d)
357	T_80(80, c, d, e, f, g, h, a, b)
358
359	/* Update digest */
360	add	[DIGEST(0)], a_64
361	add	[DIGEST(1)], b_64
362	add	[DIGEST(2)], c_64
363	add	[DIGEST(3)], d_64
364	add	[DIGEST(4)], e_64
365	add	[DIGEST(5)], f_64
366	add	[DIGEST(6)], g_64
367	add	[DIGEST(7)], h_64
368
369	/* Advance to next message block */
370	add	msg, 16*8
371	dec	msglen
372	jnz	.Lupdateblock
373
374	/* Restore GPRs */
375	mov	rbx, [rsp + frame_GPRSAVE + 8 * 0]
376	mov	r12, [rsp + frame_GPRSAVE + 8 * 1]
377	mov	r13, [rsp + frame_GPRSAVE + 8 * 2]
378	mov	r14, [rsp + frame_GPRSAVE + 8 * 3]
379	mov	r15, [rsp + frame_GPRSAVE + 8 * 4]
380	CFI_RESTORE(rbx)
381	CFI_RESTORE(r12)
382	CFI_RESTORE(r13)
383	CFI_RESTORE(r14)
384	CFI_RESTORE(r15)
385
386	vzeroall
387
388	/* Burn stack */
389	mov eax, 0
390.Lerase_stack:
391	vmovdqu [rsp + rax], ymm0
392	add eax, 32
393	cmp eax, frame_W_size
394	jne .Lerase_stack
395	vmovdqu [rsp + frame_WK], xmm0
396	xor     eax, eax
397
398	/* Restore Stack Pointer */
399	add	rsp, frame_size
400	CFI_ADJUST_CFA_OFFSET(-frame_size);
401
402.Lnowork:
403	ret
404	CFI_ENDPROC()
405
406/*
407;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
408;;; Binary Data
409*/
410
411.align 16
412
413/* Mask for byte-swapping a couple of qwords in an XMM register using (v)pshufb. */
414.LXMM_QWORD_BSWAP:
415	.octa 0x08090a0b0c0d0e0f0001020304050607
416
417/* K[t] used in SHA512 hashing */
418.LK512:
419	.quad 0x428a2f98d728ae22,0x7137449123ef65cd
420	.quad 0xb5c0fbcfec4d3b2f,0xe9b5dba58189dbbc
421	.quad 0x3956c25bf348b538,0x59f111f1b605d019
422	.quad 0x923f82a4af194f9b,0xab1c5ed5da6d8118
423	.quad 0xd807aa98a3030242,0x12835b0145706fbe
424	.quad 0x243185be4ee4b28c,0x550c7dc3d5ffb4e2
425	.quad 0x72be5d74f27b896f,0x80deb1fe3b1696b1
426	.quad 0x9bdc06a725c71235,0xc19bf174cf692694
427	.quad 0xe49b69c19ef14ad2,0xefbe4786384f25e3
428	.quad 0x0fc19dc68b8cd5b5,0x240ca1cc77ac9c65
429	.quad 0x2de92c6f592b0275,0x4a7484aa6ea6e483
430	.quad 0x5cb0a9dcbd41fbd4,0x76f988da831153b5
431	.quad 0x983e5152ee66dfab,0xa831c66d2db43210
432	.quad 0xb00327c898fb213f,0xbf597fc7beef0ee4
433	.quad 0xc6e00bf33da88fc2,0xd5a79147930aa725
434	.quad 0x06ca6351e003826f,0x142929670a0e6e70
435	.quad 0x27b70a8546d22ffc,0x2e1b21385c26c926
436	.quad 0x4d2c6dfc5ac42aed,0x53380d139d95b3df
437	.quad 0x650a73548baf63de,0x766a0abb3c77b2a8
438	.quad 0x81c2c92e47edaee6,0x92722c851482353b
439	.quad 0xa2bfe8a14cf10364,0xa81a664bbc423001
440	.quad 0xc24b8b70d0f89791,0xc76c51a30654be30
441	.quad 0xd192e819d6ef5218,0xd69906245565a910
442	.quad 0xf40e35855771202a,0x106aa07032bbd1b8
443	.quad 0x19a4c116b8d2d0c8,0x1e376c085141ab53
444	.quad 0x2748774cdf8eeb99,0x34b0bcb5e19b48a8
445	.quad 0x391c0cb3c5c95a63,0x4ed8aa4ae3418acb
446	.quad 0x5b9cca4f7763e373,0x682e6ff3d6b2b8a3
447	.quad 0x748f82ee5defb2fc,0x78a5636f43172f60
448	.quad 0x84c87814a1f0ab72,0x8cc702081a6439ec
449	.quad 0x90befffa23631e28,0xa4506cebde82bde9
450	.quad 0xbef9a3f7b2c67915,0xc67178f2e372532b
451	.quad 0xca273eceea26619c,0xd186b8c721c0c207
452	.quad 0xeada7dd6cde0eb1e,0xf57d4f7fee6ed178
453	.quad 0x06f067aa72176fba,0x0a637dc5a2c898a6
454	.quad 0x113f9804bef90dae,0x1b710b35131c471b
455	.quad 0x28db77f523047d84,0x32caab7b40c72493
456	.quad 0x3c9ebe0a15c9bebc,0x431d67c49c100d4c
457	.quad 0x4cc5d4becb3e42b6,0x597f299cfc657e2a
458	.quad 0x5fcb6fab3ad6faec,0x6c44198c4a475817
459
460#endif
461#endif
462