1// This file is generated from a similarly-named Perl script in the BoringSSL
2// source tree. Do not edit by hand.
3
4#if defined(__has_feature)
5#if __has_feature(memory_sanitizer) && !defined(OPENSSL_NO_ASM)
6#define OPENSSL_NO_ASM
7#endif
8#endif
9
10#if !defined(OPENSSL_NO_ASM)
11#if defined(__arm__)
12#include <GFp/arm_arch.h>
13
14@ Silence ARMv8 deprecated IT instruction warnings. This file is used by both
15@ ARMv7 and ARMv8 processors and does not use ARMv8 instructions. (ARMv8 PMULL
16@ instructions are in aesv8-armx.pl.)
17.arch	armv7-a
18
19.text
20#if defined(__thumb2__) || defined(__clang__)
21.syntax	unified
22#endif
23#if defined(__thumb2__)
24.thumb
25#else
26.code	32
27#endif
28
29#ifdef  __clang__
30#define ldrplb  ldrbpl
31#define ldrneb  ldrbne
32#endif
33
34.type	rem_4bit,%object
35.align	5
36rem_4bit:
37.short	0x0000,0x1C20,0x3840,0x2460
38.short	0x7080,0x6CA0,0x48C0,0x54E0
39.short	0xE100,0xFD20,0xD940,0xC560
40.short	0x9180,0x8DA0,0xA9C0,0xB5E0
41.size	rem_4bit,.-rem_4bit
42
43.type	rem_4bit_get,%function
44rem_4bit_get:
45#if defined(__thumb2__)
46	adr	r2,rem_4bit
47#else
48	sub	r2,pc,#8+32	@ &rem_4bit
49#endif
50	b	.Lrem_4bit_got
51	nop
52	nop
53.size	rem_4bit_get,.-rem_4bit_get
54
55.globl	GFp_gcm_ghash_4bit
56.hidden	GFp_gcm_ghash_4bit
57.type	GFp_gcm_ghash_4bit,%function
58.align	4
59GFp_gcm_ghash_4bit:
60#if defined(__thumb2__)
61	adr	r12,rem_4bit
62#else
63	sub	r12,pc,#8+48		@ &rem_4bit
64#endif
65	add	r3,r2,r3		@ r3 to point at the end
66	stmdb	sp!,{r3,r4,r5,r6,r7,r8,r9,r10,r11,lr}		@ save r3/end too
67
68	ldmia	r12,{r4,r5,r6,r7,r8,r9,r10,r11}		@ copy rem_4bit ...
69	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11}		@ ... to stack
70
71	ldrb	r12,[r2,#15]
72	ldrb	r14,[r0,#15]
73.Louter:
74	eor	r12,r12,r14
75	and	r14,r12,#0xf0
76	and	r12,r12,#0x0f
77	mov	r3,#14
78
79	add	r7,r1,r12,lsl#4
80	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
81	add	r11,r1,r14
82	ldrb	r12,[r2,#14]
83
84	and	r14,r4,#0xf		@ rem
85	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
86	add	r14,r14,r14
87	eor	r4,r8,r4,lsr#4
88	ldrh	r8,[sp,r14]		@ rem_4bit[rem]
89	eor	r4,r4,r5,lsl#28
90	ldrb	r14,[r0,#14]
91	eor	r5,r9,r5,lsr#4
92	eor	r5,r5,r6,lsl#28
93	eor	r6,r10,r6,lsr#4
94	eor	r6,r6,r7,lsl#28
95	eor	r7,r11,r7,lsr#4
96	eor	r12,r12,r14
97	and	r14,r12,#0xf0
98	and	r12,r12,#0x0f
99	eor	r7,r7,r8,lsl#16
100
101.Linner:
102	add	r11,r1,r12,lsl#4
103	and	r12,r4,#0xf		@ rem
104	subs	r3,r3,#1
105	add	r12,r12,r12
106	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
107	eor	r4,r8,r4,lsr#4
108	eor	r4,r4,r5,lsl#28
109	eor	r5,r9,r5,lsr#4
110	eor	r5,r5,r6,lsl#28
111	ldrh	r8,[sp,r12]		@ rem_4bit[rem]
112	eor	r6,r10,r6,lsr#4
113#ifdef	__thumb2__
114	it	pl
115#endif
116	ldrplb	r12,[r2,r3]
117	eor	r6,r6,r7,lsl#28
118	eor	r7,r11,r7,lsr#4
119
120	add	r11,r1,r14
121	and	r14,r4,#0xf		@ rem
122	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
123	add	r14,r14,r14
124	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
125	eor	r4,r8,r4,lsr#4
126#ifdef	__thumb2__
127	it	pl
128#endif
129	ldrplb	r8,[r0,r3]
130	eor	r4,r4,r5,lsl#28
131	eor	r5,r9,r5,lsr#4
132	ldrh	r9,[sp,r14]
133	eor	r5,r5,r6,lsl#28
134	eor	r6,r10,r6,lsr#4
135	eor	r6,r6,r7,lsl#28
136#ifdef	__thumb2__
137	it	pl
138#endif
139	eorpl	r12,r12,r8
140	eor	r7,r11,r7,lsr#4
141#ifdef	__thumb2__
142	itt	pl
143#endif
144	andpl	r14,r12,#0xf0
145	andpl	r12,r12,#0x0f
146	eor	r7,r7,r9,lsl#16	@ ^= rem_4bit[rem]
147	bpl	.Linner
148
149	ldr	r3,[sp,#32]		@ re-load r3/end
150	add	r2,r2,#16
151	mov	r14,r4
152#if __ARM_ARCH__>=7 && defined(__ARMEL__)
153	rev	r4,r4
154	str	r4,[r0,#12]
155#elif defined(__ARMEB__)
156	str	r4,[r0,#12]
157#else
158	mov	r9,r4,lsr#8
159	strb	r4,[r0,#12+3]
160	mov	r10,r4,lsr#16
161	strb	r9,[r0,#12+2]
162	mov	r11,r4,lsr#24
163	strb	r10,[r0,#12+1]
164	strb	r11,[r0,#12]
165#endif
166	cmp	r2,r3
167#if __ARM_ARCH__>=7 && defined(__ARMEL__)
168	rev	r5,r5
169	str	r5,[r0,#8]
170#elif defined(__ARMEB__)
171	str	r5,[r0,#8]
172#else
173	mov	r9,r5,lsr#8
174	strb	r5,[r0,#8+3]
175	mov	r10,r5,lsr#16
176	strb	r9,[r0,#8+2]
177	mov	r11,r5,lsr#24
178	strb	r10,[r0,#8+1]
179	strb	r11,[r0,#8]
180#endif
181
182#ifdef __thumb2__
183	it	ne
184#endif
185	ldrneb	r12,[r2,#15]
186#if __ARM_ARCH__>=7 && defined(__ARMEL__)
187	rev	r6,r6
188	str	r6,[r0,#4]
189#elif defined(__ARMEB__)
190	str	r6,[r0,#4]
191#else
192	mov	r9,r6,lsr#8
193	strb	r6,[r0,#4+3]
194	mov	r10,r6,lsr#16
195	strb	r9,[r0,#4+2]
196	mov	r11,r6,lsr#24
197	strb	r10,[r0,#4+1]
198	strb	r11,[r0,#4]
199#endif
200
201#if __ARM_ARCH__>=7 && defined(__ARMEL__)
202	rev	r7,r7
203	str	r7,[r0,#0]
204#elif defined(__ARMEB__)
205	str	r7,[r0,#0]
206#else
207	mov	r9,r7,lsr#8
208	strb	r7,[r0,#0+3]
209	mov	r10,r7,lsr#16
210	strb	r9,[r0,#0+2]
211	mov	r11,r7,lsr#24
212	strb	r10,[r0,#0+1]
213	strb	r11,[r0,#0]
214#endif
215
216	bne	.Louter
217
218	add	sp,sp,#36
219#if __ARM_ARCH__>=5
220	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
221#else
222	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
223	tst	lr,#1
224	moveq	pc,lr			@ be binary compatible with V4, yet
225.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
226#endif
227.size	GFp_gcm_ghash_4bit,.-GFp_gcm_ghash_4bit
228
229.globl	GFp_gcm_gmult_4bit
230.hidden	GFp_gcm_gmult_4bit
231.type	GFp_gcm_gmult_4bit,%function
232GFp_gcm_gmult_4bit:
233	stmdb	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
234	ldrb	r12,[r0,#15]
235	b	rem_4bit_get
236.Lrem_4bit_got:
237	and	r14,r12,#0xf0
238	and	r12,r12,#0x0f
239	mov	r3,#14
240
241	add	r7,r1,r12,lsl#4
242	ldmia	r7,{r4,r5,r6,r7}	@ load Htbl[nlo]
243	ldrb	r12,[r0,#14]
244
245	add	r11,r1,r14
246	and	r14,r4,#0xf		@ rem
247	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
248	add	r14,r14,r14
249	eor	r4,r8,r4,lsr#4
250	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
251	eor	r4,r4,r5,lsl#28
252	eor	r5,r9,r5,lsr#4
253	eor	r5,r5,r6,lsl#28
254	eor	r6,r10,r6,lsr#4
255	eor	r6,r6,r7,lsl#28
256	eor	r7,r11,r7,lsr#4
257	and	r14,r12,#0xf0
258	eor	r7,r7,r8,lsl#16
259	and	r12,r12,#0x0f
260
261.Loop:
262	add	r11,r1,r12,lsl#4
263	and	r12,r4,#0xf		@ rem
264	subs	r3,r3,#1
265	add	r12,r12,r12
266	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nlo]
267	eor	r4,r8,r4,lsr#4
268	eor	r4,r4,r5,lsl#28
269	eor	r5,r9,r5,lsr#4
270	eor	r5,r5,r6,lsl#28
271	ldrh	r8,[r2,r12]	@ rem_4bit[rem]
272	eor	r6,r10,r6,lsr#4
273#ifdef	__thumb2__
274	it	pl
275#endif
276	ldrplb	r12,[r0,r3]
277	eor	r6,r6,r7,lsl#28
278	eor	r7,r11,r7,lsr#4
279
280	add	r11,r1,r14
281	and	r14,r4,#0xf		@ rem
282	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
283	add	r14,r14,r14
284	ldmia	r11,{r8,r9,r10,r11}	@ load Htbl[nhi]
285	eor	r4,r8,r4,lsr#4
286	eor	r4,r4,r5,lsl#28
287	eor	r5,r9,r5,lsr#4
288	ldrh	r8,[r2,r14]	@ rem_4bit[rem]
289	eor	r5,r5,r6,lsl#28
290	eor	r6,r10,r6,lsr#4
291	eor	r6,r6,r7,lsl#28
292	eor	r7,r11,r7,lsr#4
293#ifdef	__thumb2__
294	itt	pl
295#endif
296	andpl	r14,r12,#0xf0
297	andpl	r12,r12,#0x0f
298	eor	r7,r7,r8,lsl#16	@ ^= rem_4bit[rem]
299	bpl	.Loop
300#if __ARM_ARCH__>=7 && defined(__ARMEL__)
301	rev	r4,r4
302	str	r4,[r0,#12]
303#elif defined(__ARMEB__)
304	str	r4,[r0,#12]
305#else
306	mov	r9,r4,lsr#8
307	strb	r4,[r0,#12+3]
308	mov	r10,r4,lsr#16
309	strb	r9,[r0,#12+2]
310	mov	r11,r4,lsr#24
311	strb	r10,[r0,#12+1]
312	strb	r11,[r0,#12]
313#endif
314
315#if __ARM_ARCH__>=7 && defined(__ARMEL__)
316	rev	r5,r5
317	str	r5,[r0,#8]
318#elif defined(__ARMEB__)
319	str	r5,[r0,#8]
320#else
321	mov	r9,r5,lsr#8
322	strb	r5,[r0,#8+3]
323	mov	r10,r5,lsr#16
324	strb	r9,[r0,#8+2]
325	mov	r11,r5,lsr#24
326	strb	r10,[r0,#8+1]
327	strb	r11,[r0,#8]
328#endif
329
330#if __ARM_ARCH__>=7 && defined(__ARMEL__)
331	rev	r6,r6
332	str	r6,[r0,#4]
333#elif defined(__ARMEB__)
334	str	r6,[r0,#4]
335#else
336	mov	r9,r6,lsr#8
337	strb	r6,[r0,#4+3]
338	mov	r10,r6,lsr#16
339	strb	r9,[r0,#4+2]
340	mov	r11,r6,lsr#24
341	strb	r10,[r0,#4+1]
342	strb	r11,[r0,#4]
343#endif
344
345#if __ARM_ARCH__>=7 && defined(__ARMEL__)
346	rev	r7,r7
347	str	r7,[r0,#0]
348#elif defined(__ARMEB__)
349	str	r7,[r0,#0]
350#else
351	mov	r9,r7,lsr#8
352	strb	r7,[r0,#0+3]
353	mov	r10,r7,lsr#16
354	strb	r9,[r0,#0+2]
355	mov	r11,r7,lsr#24
356	strb	r10,[r0,#0+1]
357	strb	r11,[r0,#0]
358#endif
359
360#if __ARM_ARCH__>=5
361	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,pc}
362#else
363	ldmia	sp!,{r4,r5,r6,r7,r8,r9,r10,r11,lr}
364	tst	lr,#1
365	moveq	pc,lr			@ be binary compatible with V4, yet
366.word	0xe12fff1e			@ interoperable with Thumb ISA:-)
367#endif
368.size	GFp_gcm_gmult_4bit,.-GFp_gcm_gmult_4bit
369#if __ARM_MAX_ARCH__>=7
370.arch	armv7-a
371.fpu	neon
372
373.globl	GFp_gcm_init_neon
374.hidden	GFp_gcm_init_neon
375.type	GFp_gcm_init_neon,%function
376.align	4
377GFp_gcm_init_neon:
378	vld1.64	d7,[r1]!		@ load H
379	vmov.i8	q8,#0xe1
380	vld1.64	d6,[r1]
381	vshl.i64	d17,#57
382	vshr.u64	d16,#63		@ t0=0xc2....01
383	vdup.8	q9,d7[7]
384	vshr.u64	d26,d6,#63
385	vshr.s8	q9,#7			@ broadcast carry bit
386	vshl.i64	q3,q3,#1
387	vand	q8,q8,q9
388	vorr	d7,d26		@ H<<<=1
389	veor	q3,q3,q8		@ twisted H
390	vstmia	r0,{q3}
391
392	bx	lr					@ bx lr
393.size	GFp_gcm_init_neon,.-GFp_gcm_init_neon
394
395.globl	GFp_gcm_gmult_neon
396.hidden	GFp_gcm_gmult_neon
397.type	GFp_gcm_gmult_neon,%function
398.align	4
399GFp_gcm_gmult_neon:
400	vld1.64	d7,[r0]!		@ load Xi
401	vld1.64	d6,[r0]!
402	vmov.i64	d29,#0x0000ffffffffffff
403	vldmia	r1,{d26,d27}	@ load twisted H
404	vmov.i64	d30,#0x00000000ffffffff
405#ifdef __ARMEL__
406	vrev64.8	q3,q3
407#endif
408	vmov.i64	d31,#0x000000000000ffff
409	veor	d28,d26,d27		@ Karatsuba pre-processing
410	mov	r3,#16
411	b	.Lgmult_neon
412.size	GFp_gcm_gmult_neon,.-GFp_gcm_gmult_neon
413
414.globl	GFp_gcm_ghash_neon
415.hidden	GFp_gcm_ghash_neon
416.type	GFp_gcm_ghash_neon,%function
417.align	4
418GFp_gcm_ghash_neon:
419	vld1.64	d1,[r0]!		@ load Xi
420	vld1.64	d0,[r0]!
421	vmov.i64	d29,#0x0000ffffffffffff
422	vldmia	r1,{d26,d27}	@ load twisted H
423	vmov.i64	d30,#0x00000000ffffffff
424#ifdef __ARMEL__
425	vrev64.8	q0,q0
426#endif
427	vmov.i64	d31,#0x000000000000ffff
428	veor	d28,d26,d27		@ Karatsuba pre-processing
429
430.Loop_neon:
431	vld1.64	d7,[r2]!		@ load inp
432	vld1.64	d6,[r2]!
433#ifdef __ARMEL__
434	vrev64.8	q3,q3
435#endif
436	veor	q3,q0			@ inp^=Xi
437.Lgmult_neon:
438	vext.8	d16, d26, d26, #1	@ A1
439	vmull.p8	q8, d16, d6		@ F = A1*B
440	vext.8	d0, d6, d6, #1	@ B1
441	vmull.p8	q0, d26, d0		@ E = A*B1
442	vext.8	d18, d26, d26, #2	@ A2
443	vmull.p8	q9, d18, d6		@ H = A2*B
444	vext.8	d22, d6, d6, #2	@ B2
445	vmull.p8	q11, d26, d22		@ G = A*B2
446	vext.8	d20, d26, d26, #3	@ A3
447	veor	q8, q8, q0		@ L = E + F
448	vmull.p8	q10, d20, d6		@ J = A3*B
449	vext.8	d0, d6, d6, #3	@ B3
450	veor	q9, q9, q11		@ M = G + H
451	vmull.p8	q0, d26, d0		@ I = A*B3
452	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
453	vand	d17, d17, d29
454	vext.8	d22, d6, d6, #4	@ B4
455	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
456	vand	d19, d19, d30
457	vmull.p8	q11, d26, d22		@ K = A*B4
458	veor	q10, q10, q0		@ N = I + J
459	veor	d16, d16, d17
460	veor	d18, d18, d19
461	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
462	vand	d21, d21, d31
463	vext.8	q8, q8, q8, #15
464	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
465	vmov.i64	d23, #0
466	vext.8	q9, q9, q9, #14
467	veor	d20, d20, d21
468	vmull.p8	q0, d26, d6		@ D = A*B
469	vext.8	q11, q11, q11, #12
470	vext.8	q10, q10, q10, #13
471	veor	q8, q8, q9
472	veor	q10, q10, q11
473	veor	q0, q0, q8
474	veor	q0, q0, q10
475	veor	d6,d6,d7	@ Karatsuba pre-processing
476	vext.8	d16, d28, d28, #1	@ A1
477	vmull.p8	q8, d16, d6		@ F = A1*B
478	vext.8	d2, d6, d6, #1	@ B1
479	vmull.p8	q1, d28, d2		@ E = A*B1
480	vext.8	d18, d28, d28, #2	@ A2
481	vmull.p8	q9, d18, d6		@ H = A2*B
482	vext.8	d22, d6, d6, #2	@ B2
483	vmull.p8	q11, d28, d22		@ G = A*B2
484	vext.8	d20, d28, d28, #3	@ A3
485	veor	q8, q8, q1		@ L = E + F
486	vmull.p8	q10, d20, d6		@ J = A3*B
487	vext.8	d2, d6, d6, #3	@ B3
488	veor	q9, q9, q11		@ M = G + H
489	vmull.p8	q1, d28, d2		@ I = A*B3
490	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
491	vand	d17, d17, d29
492	vext.8	d22, d6, d6, #4	@ B4
493	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
494	vand	d19, d19, d30
495	vmull.p8	q11, d28, d22		@ K = A*B4
496	veor	q10, q10, q1		@ N = I + J
497	veor	d16, d16, d17
498	veor	d18, d18, d19
499	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
500	vand	d21, d21, d31
501	vext.8	q8, q8, q8, #15
502	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
503	vmov.i64	d23, #0
504	vext.8	q9, q9, q9, #14
505	veor	d20, d20, d21
506	vmull.p8	q1, d28, d6		@ D = A*B
507	vext.8	q11, q11, q11, #12
508	vext.8	q10, q10, q10, #13
509	veor	q8, q8, q9
510	veor	q10, q10, q11
511	veor	q1, q1, q8
512	veor	q1, q1, q10
513	vext.8	d16, d27, d27, #1	@ A1
514	vmull.p8	q8, d16, d7		@ F = A1*B
515	vext.8	d4, d7, d7, #1	@ B1
516	vmull.p8	q2, d27, d4		@ E = A*B1
517	vext.8	d18, d27, d27, #2	@ A2
518	vmull.p8	q9, d18, d7		@ H = A2*B
519	vext.8	d22, d7, d7, #2	@ B2
520	vmull.p8	q11, d27, d22		@ G = A*B2
521	vext.8	d20, d27, d27, #3	@ A3
522	veor	q8, q8, q2		@ L = E + F
523	vmull.p8	q10, d20, d7		@ J = A3*B
524	vext.8	d4, d7, d7, #3	@ B3
525	veor	q9, q9, q11		@ M = G + H
526	vmull.p8	q2, d27, d4		@ I = A*B3
527	veor	d16, d16, d17	@ t0 = (L) (P0 + P1) << 8
528	vand	d17, d17, d29
529	vext.8	d22, d7, d7, #4	@ B4
530	veor	d18, d18, d19	@ t1 = (M) (P2 + P3) << 16
531	vand	d19, d19, d30
532	vmull.p8	q11, d27, d22		@ K = A*B4
533	veor	q10, q10, q2		@ N = I + J
534	veor	d16, d16, d17
535	veor	d18, d18, d19
536	veor	d20, d20, d21	@ t2 = (N) (P4 + P5) << 24
537	vand	d21, d21, d31
538	vext.8	q8, q8, q8, #15
539	veor	d22, d22, d23	@ t3 = (K) (P6 + P7) << 32
540	vmov.i64	d23, #0
541	vext.8	q9, q9, q9, #14
542	veor	d20, d20, d21
543	vmull.p8	q2, d27, d7		@ D = A*B
544	vext.8	q11, q11, q11, #12
545	vext.8	q10, q10, q10, #13
546	veor	q8, q8, q9
547	veor	q10, q10, q11
548	veor	q2, q2, q8
549	veor	q2, q2, q10
550	veor	q1,q1,q0		@ Karatsuba post-processing
551	veor	q1,q1,q2
552	veor	d1,d1,d2
553	veor	d4,d4,d3	@ Xh|Xl - 256-bit result
554
555	@ equivalent of reduction_avx from ghash-x86_64.pl
556	vshl.i64	q9,q0,#57		@ 1st phase
557	vshl.i64	q10,q0,#62
558	veor	q10,q10,q9		@
559	vshl.i64	q9,q0,#63
560	veor	q10, q10, q9		@
561	veor	d1,d1,d20	@
562	veor	d4,d4,d21
563
564	vshr.u64	q10,q0,#1		@ 2nd phase
565	veor	q2,q2,q0
566	veor	q0,q0,q10		@
567	vshr.u64	q10,q10,#6
568	vshr.u64	q0,q0,#1		@
569	veor	q0,q0,q2		@
570	veor	q0,q0,q10		@
571
572	subs	r3,#16
573	bne	.Loop_neon
574
575#ifdef __ARMEL__
576	vrev64.8	q0,q0
577#endif
578	sub	r0,#16
579	vst1.64	d1,[r0]!		@ write out Xi
580	vst1.64	d0,[r0]
581
582	bx	lr					@ bx lr
583.size	GFp_gcm_ghash_neon,.-GFp_gcm_ghash_neon
584#endif
585.byte	71,72,65,83,72,32,102,111,114,32,65,82,77,118,52,47,78,69,79,78,44,32,67,82,89,80,84,79,71,65,77,83,32,98,121,32,60,97,112,112,114,111,64,111,112,101,110,115,115,108,46,111,114,103,62,0
586.align	2
587.align	2
588#endif
589#endif  // !OPENSSL_NO_ASM
590