xref: /openbsd/lib/libcrypto/sha/sha256.c (revision 9cb04522)
1 /* $OpenBSD: sha256.c,v 1.32 2024/06/01 07:36:16 tb Exp $ */
2 /* ====================================================================
3  * Copyright (c) 1998-2011 The OpenSSL Project.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  *
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in
14  *    the documentation and/or other materials provided with the
15  *    distribution.
16  *
17  * 3. All advertising materials mentioning features or use of this
18  *    software must display the following acknowledgment:
19  *    "This product includes software developed by the OpenSSL Project
20  *    for use in the OpenSSL Toolkit. (http://www.openssl.org/)"
21  *
22  * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
23  *    endorse or promote products derived from this software without
24  *    prior written permission. For written permission, please contact
25  *    openssl-core@openssl.org.
26  *
27  * 5. Products derived from this software may not be called "OpenSSL"
28  *    nor may "OpenSSL" appear in their names without prior written
29  *    permission of the OpenSSL Project.
30  *
31  * 6. Redistributions of any form whatsoever must retain the following
32  *    acknowledgment:
33  *    "This product includes software developed by the OpenSSL Project
34  *    for use in the OpenSSL Toolkit (http://www.openssl.org/)"
35  *
36  * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
37  * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
39  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
40  * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
41  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
42  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
43  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
44  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
45  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
46  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
47  * OF THE POSSIBILITY OF SUCH DAMAGE.
48  * ====================================================================
49  *
50  * This product includes cryptographic software written by Eric Young
51  * (eay@cryptsoft.com).  This product includes software written by Tim
52  * Hudson (tjh@cryptsoft.com).
53  */
54 
55 #include <endian.h>
56 #include <stdlib.h>
57 #include <string.h>
58 
59 #include <openssl/opensslconf.h>
60 
61 #include <openssl/crypto.h>
62 #include <openssl/sha.h>
63 
64 #include "crypto_internal.h"
65 
66 #if !defined(OPENSSL_NO_SHA) && !defined(OPENSSL_NO_SHA256)
67 
68 /* Ensure that SHA_LONG and uint32_t are equivalent. */
69 CTASSERT(sizeof(SHA_LONG) == sizeof(uint32_t));
70 
71 #ifdef SHA256_ASM
72 void sha256_block_data_order(SHA256_CTX *ctx, const void *_in, size_t num);
73 #endif
74 
75 #ifndef SHA256_ASM
76 static const SHA_LONG K256[64] = {
77 	0x428a2f98UL, 0x71374491UL, 0xb5c0fbcfUL, 0xe9b5dba5UL,
78 	0x3956c25bUL, 0x59f111f1UL, 0x923f82a4UL, 0xab1c5ed5UL,
79 	0xd807aa98UL, 0x12835b01UL, 0x243185beUL, 0x550c7dc3UL,
80 	0x72be5d74UL, 0x80deb1feUL, 0x9bdc06a7UL, 0xc19bf174UL,
81 	0xe49b69c1UL, 0xefbe4786UL, 0x0fc19dc6UL, 0x240ca1ccUL,
82 	0x2de92c6fUL, 0x4a7484aaUL, 0x5cb0a9dcUL, 0x76f988daUL,
83 	0x983e5152UL, 0xa831c66dUL, 0xb00327c8UL, 0xbf597fc7UL,
84 	0xc6e00bf3UL, 0xd5a79147UL, 0x06ca6351UL, 0x14292967UL,
85 	0x27b70a85UL, 0x2e1b2138UL, 0x4d2c6dfcUL, 0x53380d13UL,
86 	0x650a7354UL, 0x766a0abbUL, 0x81c2c92eUL, 0x92722c85UL,
87 	0xa2bfe8a1UL, 0xa81a664bUL, 0xc24b8b70UL, 0xc76c51a3UL,
88 	0xd192e819UL, 0xd6990624UL, 0xf40e3585UL, 0x106aa070UL,
89 	0x19a4c116UL, 0x1e376c08UL, 0x2748774cUL, 0x34b0bcb5UL,
90 	0x391c0cb3UL, 0x4ed8aa4aUL, 0x5b9cca4fUL, 0x682e6ff3UL,
91 	0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
92 	0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL,
93 };
94 
95 static inline SHA_LONG
Sigma0(SHA_LONG x)96 Sigma0(SHA_LONG x)
97 {
98 	return crypto_ror_u32(x, 2) ^ crypto_ror_u32(x, 13) ^
99 	    crypto_ror_u32(x, 22);
100 }
101 
102 static inline SHA_LONG
Sigma1(SHA_LONG x)103 Sigma1(SHA_LONG x)
104 {
105 	return crypto_ror_u32(x, 6) ^ crypto_ror_u32(x, 11) ^
106 	    crypto_ror_u32(x, 25);
107 }
108 
109 static inline SHA_LONG
sigma0(SHA_LONG x)110 sigma0(SHA_LONG x)
111 {
112 	return crypto_ror_u32(x, 7) ^ crypto_ror_u32(x, 18) ^ (x >> 3);
113 }
114 
115 static inline SHA_LONG
sigma1(SHA_LONG x)116 sigma1(SHA_LONG x)
117 {
118 	return crypto_ror_u32(x, 17) ^ crypto_ror_u32(x, 19) ^ (x >> 10);
119 }
120 
121 static inline SHA_LONG
Ch(SHA_LONG x,SHA_LONG y,SHA_LONG z)122 Ch(SHA_LONG x, SHA_LONG y, SHA_LONG z)
123 {
124 	return (x & y) ^ (~x & z);
125 }
126 
127 static inline SHA_LONG
Maj(SHA_LONG x,SHA_LONG y,SHA_LONG z)128 Maj(SHA_LONG x, SHA_LONG y, SHA_LONG z)
129 {
130 	return (x & y) ^ (x & z) ^ (y & z);
131 }
132 
133 static inline void
sha256_msg_schedule_update(SHA_LONG * W0,SHA_LONG W1,SHA_LONG W9,SHA_LONG W14)134 sha256_msg_schedule_update(SHA_LONG *W0, SHA_LONG W1, SHA_LONG W9, SHA_LONG W14)
135 {
136 	*W0 = sigma1(W14) + W9 + sigma0(W1) + *W0;
137 }
138 
139 static inline void
sha256_round(SHA_LONG * a,SHA_LONG * b,SHA_LONG * c,SHA_LONG * d,SHA_LONG * e,SHA_LONG * f,SHA_LONG * g,SHA_LONG * h,SHA_LONG Kt,SHA_LONG Wt)140 sha256_round(SHA_LONG *a, SHA_LONG *b, SHA_LONG *c, SHA_LONG *d, SHA_LONG *e,
141     SHA_LONG *f, SHA_LONG *g, SHA_LONG *h, SHA_LONG Kt, SHA_LONG Wt)
142 {
143 	SHA_LONG T1, T2;
144 
145 	T1 = *h + Sigma1(*e) + Ch(*e, *f, *g) + Kt + Wt;
146 	T2 = Sigma0(*a) + Maj(*a, *b, *c);
147 
148 	*h = *g;
149 	*g = *f;
150 	*f = *e;
151 	*e = *d + T1;
152 	*d = *c;
153 	*c = *b;
154 	*b = *a;
155 	*a = T1 + T2;
156 }
157 
158 static void
sha256_block_data_order(SHA256_CTX * ctx,const void * _in,size_t num)159 sha256_block_data_order(SHA256_CTX *ctx, const void *_in, size_t num)
160 {
161 	const uint8_t *in = _in;
162 	const SHA_LONG *in32;
163 	SHA_LONG a, b, c, d, e, f, g, h;
164 	SHA_LONG X[16];
165 	int i;
166 
167 	while (num--) {
168 		a = ctx->h[0];
169 		b = ctx->h[1];
170 		c = ctx->h[2];
171 		d = ctx->h[3];
172 		e = ctx->h[4];
173 		f = ctx->h[5];
174 		g = ctx->h[6];
175 		h = ctx->h[7];
176 
177 		if ((size_t)in % 4 == 0) {
178 			/* Input is 32 bit aligned. */
179 			in32 = (const SHA_LONG *)in;
180 			X[0] = be32toh(in32[0]);
181 			X[1] = be32toh(in32[1]);
182 			X[2] = be32toh(in32[2]);
183 			X[3] = be32toh(in32[3]);
184 			X[4] = be32toh(in32[4]);
185 			X[5] = be32toh(in32[5]);
186 			X[6] = be32toh(in32[6]);
187 			X[7] = be32toh(in32[7]);
188 			X[8] = be32toh(in32[8]);
189 			X[9] = be32toh(in32[9]);
190 			X[10] = be32toh(in32[10]);
191 			X[11] = be32toh(in32[11]);
192 			X[12] = be32toh(in32[12]);
193 			X[13] = be32toh(in32[13]);
194 			X[14] = be32toh(in32[14]);
195 			X[15] = be32toh(in32[15]);
196 		} else {
197 			/* Input is not 32 bit aligned. */
198 			X[0] = crypto_load_be32toh(&in[0 * 4]);
199 			X[1] = crypto_load_be32toh(&in[1 * 4]);
200 			X[2] = crypto_load_be32toh(&in[2 * 4]);
201 			X[3] = crypto_load_be32toh(&in[3 * 4]);
202 			X[4] = crypto_load_be32toh(&in[4 * 4]);
203 			X[5] = crypto_load_be32toh(&in[5 * 4]);
204 			X[6] = crypto_load_be32toh(&in[6 * 4]);
205 			X[7] = crypto_load_be32toh(&in[7 * 4]);
206 			X[8] = crypto_load_be32toh(&in[8 * 4]);
207 			X[9] = crypto_load_be32toh(&in[9 * 4]);
208 			X[10] = crypto_load_be32toh(&in[10 * 4]);
209 			X[11] = crypto_load_be32toh(&in[11 * 4]);
210 			X[12] = crypto_load_be32toh(&in[12 * 4]);
211 			X[13] = crypto_load_be32toh(&in[13 * 4]);
212 			X[14] = crypto_load_be32toh(&in[14 * 4]);
213 			X[15] = crypto_load_be32toh(&in[15 * 4]);
214 		}
215 		in += SHA256_CBLOCK;
216 
217 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[0], X[0]);
218 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[1], X[1]);
219 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[2], X[2]);
220 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[3], X[3]);
221 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[4], X[4]);
222 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[5], X[5]);
223 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[6], X[6]);
224 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[7], X[7]);
225 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[8], X[8]);
226 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[9], X[9]);
227 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[10], X[10]);
228 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[11], X[11]);
229 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[12], X[12]);
230 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[13], X[13]);
231 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[14], X[14]);
232 		sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[15], X[15]);
233 
234 		for (i = 16; i < 64; i += 16) {
235 			sha256_msg_schedule_update(&X[0], X[1], X[9], X[14]);
236 			sha256_msg_schedule_update(&X[1], X[2], X[10], X[15]);
237 			sha256_msg_schedule_update(&X[2], X[3], X[11], X[0]);
238 			sha256_msg_schedule_update(&X[3], X[4], X[12], X[1]);
239 			sha256_msg_schedule_update(&X[4], X[5], X[13], X[2]);
240 			sha256_msg_schedule_update(&X[5], X[6], X[14], X[3]);
241 			sha256_msg_schedule_update(&X[6], X[7], X[15], X[4]);
242 			sha256_msg_schedule_update(&X[7], X[8], X[0], X[5]);
243 			sha256_msg_schedule_update(&X[8], X[9], X[1], X[6]);
244 			sha256_msg_schedule_update(&X[9], X[10], X[2], X[7]);
245 			sha256_msg_schedule_update(&X[10], X[11], X[3], X[8]);
246 			sha256_msg_schedule_update(&X[11], X[12], X[4], X[9]);
247 			sha256_msg_schedule_update(&X[12], X[13], X[5], X[10]);
248 			sha256_msg_schedule_update(&X[13], X[14], X[6], X[11]);
249 			sha256_msg_schedule_update(&X[14], X[15], X[7], X[12]);
250 			sha256_msg_schedule_update(&X[15], X[0], X[8], X[13]);
251 
252 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 0], X[0]);
253 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 1], X[1]);
254 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 2], X[2]);
255 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 3], X[3]);
256 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 4], X[4]);
257 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 5], X[5]);
258 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 6], X[6]);
259 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 7], X[7]);
260 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 8], X[8]);
261 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 9], X[9]);
262 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 10], X[10]);
263 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 11], X[11]);
264 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 12], X[12]);
265 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 13], X[13]);
266 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 14], X[14]);
267 			sha256_round(&a, &b, &c, &d, &e, &f, &g, &h, K256[i + 15], X[15]);
268 		}
269 
270 		ctx->h[0] += a;
271 		ctx->h[1] += b;
272 		ctx->h[2] += c;
273 		ctx->h[3] += d;
274 		ctx->h[4] += e;
275 		ctx->h[5] += f;
276 		ctx->h[6] += g;
277 		ctx->h[7] += h;
278 	}
279 }
280 #endif /* SHA256_ASM */
281 
282 int
SHA224_Init(SHA256_CTX * c)283 SHA224_Init(SHA256_CTX *c)
284 {
285 	memset(c, 0, sizeof(*c));
286 
287 	c->h[0] = 0xc1059ed8UL;
288 	c->h[1] = 0x367cd507UL;
289 	c->h[2] = 0x3070dd17UL;
290 	c->h[3] = 0xf70e5939UL;
291 	c->h[4] = 0xffc00b31UL;
292 	c->h[5] = 0x68581511UL;
293 	c->h[6] = 0x64f98fa7UL;
294 	c->h[7] = 0xbefa4fa4UL;
295 
296 	c->md_len = SHA224_DIGEST_LENGTH;
297 
298 	return 1;
299 }
300 LCRYPTO_ALIAS(SHA224_Init);
301 
302 int
SHA224_Update(SHA256_CTX * c,const void * data,size_t len)303 SHA224_Update(SHA256_CTX *c, const void *data, size_t len)
304 {
305 	return SHA256_Update(c, data, len);
306 }
307 LCRYPTO_ALIAS(SHA224_Update);
308 
309 int
SHA224_Final(unsigned char * md,SHA256_CTX * c)310 SHA224_Final(unsigned char *md, SHA256_CTX *c)
311 {
312 	return SHA256_Final(md, c);
313 }
314 LCRYPTO_ALIAS(SHA224_Final);
315 
316 unsigned char *
SHA224(const unsigned char * d,size_t n,unsigned char * md)317 SHA224(const unsigned char *d, size_t n, unsigned char *md)
318 {
319 	SHA256_CTX c;
320 
321 	SHA224_Init(&c);
322 	SHA256_Update(&c, d, n);
323 	SHA256_Final(md, &c);
324 
325 	explicit_bzero(&c, sizeof(c));
326 
327 	return (md);
328 }
329 LCRYPTO_ALIAS(SHA224);
330 
331 int
SHA256_Init(SHA256_CTX * c)332 SHA256_Init(SHA256_CTX *c)
333 {
334 	memset(c, 0, sizeof(*c));
335 
336 	c->h[0] = 0x6a09e667UL;
337 	c->h[1] = 0xbb67ae85UL;
338 	c->h[2] = 0x3c6ef372UL;
339 	c->h[3] = 0xa54ff53aUL;
340 	c->h[4] = 0x510e527fUL;
341 	c->h[5] = 0x9b05688cUL;
342 	c->h[6] = 0x1f83d9abUL;
343 	c->h[7] = 0x5be0cd19UL;
344 
345 	c->md_len = SHA256_DIGEST_LENGTH;
346 
347 	return 1;
348 }
349 LCRYPTO_ALIAS(SHA256_Init);
350 
351 int
SHA256_Update(SHA256_CTX * c,const void * data_,size_t len)352 SHA256_Update(SHA256_CTX *c, const void *data_, size_t len)
353 {
354 	const unsigned char *data = data_;
355 	unsigned char *p;
356 	SHA_LONG l;
357 	size_t n;
358 
359 	if (len == 0)
360 		return 1;
361 
362 	l = (c->Nl + (((SHA_LONG)len) << 3)) & 0xffffffffUL;
363 	/* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
364 	 * Wei Dai <weidai@eskimo.com> for pointing it out. */
365 	if (l < c->Nl) /* overflow */
366 		c->Nh++;
367 	c->Nh += (SHA_LONG)(len >> 29);	/* might cause compiler warning on 16-bit */
368 	c->Nl = l;
369 
370 	n = c->num;
371 	if (n != 0) {
372 		p = (unsigned char *)c->data;
373 
374 		if (len >= SHA_CBLOCK || len + n >= SHA_CBLOCK) {
375 			memcpy(p + n, data, SHA_CBLOCK - n);
376 			sha256_block_data_order(c, p, 1);
377 			n = SHA_CBLOCK - n;
378 			data += n;
379 			len -= n;
380 			c->num = 0;
381 			memset(p, 0, SHA_CBLOCK);	/* keep it zeroed */
382 		} else {
383 			memcpy(p + n, data, len);
384 			c->num += (unsigned int)len;
385 			return 1;
386 		}
387 	}
388 
389 	n = len/SHA_CBLOCK;
390 	if (n > 0) {
391 		sha256_block_data_order(c, data, n);
392 		n *= SHA_CBLOCK;
393 		data += n;
394 		len -= n;
395 	}
396 
397 	if (len != 0) {
398 		p = (unsigned char *)c->data;
399 		c->num = (unsigned int)len;
400 		memcpy(p, data, len);
401 	}
402 	return 1;
403 }
404 LCRYPTO_ALIAS(SHA256_Update);
405 
406 void
SHA256_Transform(SHA256_CTX * c,const unsigned char * data)407 SHA256_Transform(SHA256_CTX *c, const unsigned char *data)
408 {
409 	sha256_block_data_order(c, data, 1);
410 }
411 LCRYPTO_ALIAS(SHA256_Transform);
412 
413 int
SHA256_Final(unsigned char * md,SHA256_CTX * c)414 SHA256_Final(unsigned char *md, SHA256_CTX *c)
415 {
416 	unsigned char *p = (unsigned char *)c->data;
417 	size_t n = c->num;
418 	unsigned int nn;
419 
420 	p[n] = 0x80; /* there is always room for one */
421 	n++;
422 
423 	if (n > (SHA_CBLOCK - 8)) {
424 		memset(p + n, 0, SHA_CBLOCK - n);
425 		n = 0;
426 		sha256_block_data_order(c, p, 1);
427 	}
428 
429 	memset(p + n, 0, SHA_CBLOCK - 8 - n);
430 	c->data[SHA_LBLOCK - 2] = htobe32(c->Nh);
431 	c->data[SHA_LBLOCK - 1] = htobe32(c->Nl);
432 
433 	sha256_block_data_order(c, p, 1);
434 	c->num = 0;
435 	memset(p, 0, SHA_CBLOCK);
436 
437 	/*
438 	 * Note that FIPS180-2 discusses "Truncation of the Hash Function Output."
439 	 * default: case below covers for it. It's not clear however if it's
440 	 * permitted to truncate to amount of bytes not divisible by 4. I bet not,
441 	 * but if it is, then default: case shall be extended. For reference.
442 	 * Idea behind separate cases for pre-defined lengths is to let the
443 	 * compiler decide if it's appropriate to unroll small loops.
444 	 */
445 	switch (c->md_len) {
446 	case SHA224_DIGEST_LENGTH:
447 		for (nn = 0; nn < SHA224_DIGEST_LENGTH / 4; nn++) {
448 			crypto_store_htobe32(md, c->h[nn]);
449 			md += 4;
450 		}
451 		break;
452 
453 	case SHA256_DIGEST_LENGTH:
454 		for (nn = 0; nn < SHA256_DIGEST_LENGTH / 4; nn++) {
455 			crypto_store_htobe32(md, c->h[nn]);
456 			md += 4;
457 		}
458 		break;
459 
460 	default:
461 		if (c->md_len > SHA256_DIGEST_LENGTH)
462 			return 0;
463 		for (nn = 0; nn < c->md_len / 4; nn++) {
464 			crypto_store_htobe32(md, c->h[nn]);
465 			md += 4;
466 		}
467 		break;
468 	}
469 
470 	return 1;
471 }
472 LCRYPTO_ALIAS(SHA256_Final);
473 
474 unsigned char *
SHA256(const unsigned char * d,size_t n,unsigned char * md)475 SHA256(const unsigned char *d, size_t n, unsigned char *md)
476 {
477 	SHA256_CTX c;
478 
479 	SHA256_Init(&c);
480 	SHA256_Update(&c, d, n);
481 	SHA256_Final(md, &c);
482 
483 	explicit_bzero(&c, sizeof(c));
484 
485 	return (md);
486 }
487 LCRYPTO_ALIAS(SHA256);
488 
489 #endif /* OPENSSL_NO_SHA256 */
490