1 /* $OpenBSD: ripemd.c,v 1.19 2024/06/01 07:36:16 tb Exp $ */
2 /* Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
3 * All rights reserved.
4 *
5 * This package is an SSL implementation written
6 * by Eric Young (eay@cryptsoft.com).
7 * The implementation was written so as to conform with Netscapes SSL.
8 *
9 * This library is free for commercial and non-commercial use as long as
10 * the following conditions are aheared to. The following conditions
11 * apply to all code found in this distribution, be it the RC4, RSA,
12 * lhash, DES, etc., code; not just the SSL code. The SSL documentation
13 * included with this distribution is covered by the same copyright terms
14 * except that the holder is Tim Hudson (tjh@cryptsoft.com).
15 *
16 * Copyright remains Eric Young's, and as such any Copyright notices in
17 * the code are not to be removed.
18 * If this package is used in a product, Eric Young should be given attribution
19 * as the author of the parts of the library used.
20 * This can be in the form of a textual message at program startup or
21 * in documentation (online or textual) provided with the package.
22 *
23 * Redistribution and use in source and binary forms, with or without
24 * modification, are permitted provided that the following conditions
25 * are met:
26 * 1. Redistributions of source code must retain the copyright
27 * notice, this list of conditions and the following disclaimer.
28 * 2. Redistributions in binary form must reproduce the above copyright
29 * notice, this list of conditions and the following disclaimer in the
30 * documentation and/or other materials provided with the distribution.
31 * 3. All advertising materials mentioning features or use of this software
32 * must display the following acknowledgement:
33 * "This product includes cryptographic software written by
34 * Eric Young (eay@cryptsoft.com)"
35 * The word 'cryptographic' can be left out if the rouines from the library
36 * being used are not cryptographic related :-).
37 * 4. If you include any Windows specific code (or a derivative thereof) from
38 * the apps directory (application code) you must include an acknowledgement:
39 * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
40 *
41 * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
42 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
43 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
44 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
45 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
46 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
47 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
48 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
49 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
50 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
51 * SUCH DAMAGE.
52 *
53 * The licence and distribution terms for any publically available version or
54 * derivative of this code cannot be changed. i.e. this code cannot simply be
55 * copied and put under another distribution licence
56 * [including the GNU Public Licence.]
57 */
58
59 #include <stdio.h>
60 #include <stdlib.h>
61 #include <string.h>
62
63 #include <openssl/opensslconf.h>
64
65 #include <openssl/crypto.h>
66 #include <openssl/ripemd.h>
67
68 #include "crypto_internal.h"
69
70 /* Ensure that SHA_LONG and uint32_t are equivalent sizes. */
71 CTASSERT(sizeof(RIPEMD160_LONG) == sizeof(uint32_t));
72
73 #if 0
74 #define F1(x,y,z) ((x)^(y)^(z))
75 #define F2(x,y,z) (((x)&(y))|((~x)&z))
76 #define F3(x,y,z) (((x)|(~y))^(z))
77 #define F4(x,y,z) (((x)&(z))|((y)&(~(z))))
78 #define F5(x,y,z) ((x)^((y)|(~(z))))
79 #else
80 /*
81 * Transformed F2 and F4 are courtesy of Wei Dai <weidai@eskimo.com>
82 */
83 #define F1(x,y,z) ((x) ^ (y) ^ (z))
84 #define F2(x,y,z) ((((y) ^ (z)) & (x)) ^ (z))
85 #define F3(x,y,z) (((~(y)) | (x)) ^ (z))
86 #define F4(x,y,z) ((((x) ^ (y)) & (z)) ^ (y))
87 #define F5(x,y,z) (((~(z)) | (y)) ^ (x))
88 #endif
89
90 #define KL0 0x00000000L
91 #define KL1 0x5A827999L
92 #define KL2 0x6ED9EBA1L
93 #define KL3 0x8F1BBCDCL
94 #define KL4 0xA953FD4EL
95
96 #define KR0 0x50A28BE6L
97 #define KR1 0x5C4DD124L
98 #define KR2 0x6D703EF3L
99 #define KR3 0x7A6D76E9L
100 #define KR4 0x00000000L
101
102 #define RIP1(a,b,c,d,e,w,s) { \
103 a+=F1(b,c,d)+w; \
104 a=crypto_rol_u32(a,s)+e; \
105 c=crypto_rol_u32(c,10); }
106
107 #define RIP2(a,b,c,d,e,w,s,K) { \
108 a+=F2(b,c,d)+w+K; \
109 a=crypto_rol_u32(a,s)+e; \
110 c=crypto_rol_u32(c,10); }
111
112 #define RIP3(a,b,c,d,e,w,s,K) { \
113 a+=F3(b,c,d)+w+K; \
114 a=crypto_rol_u32(a,s)+e; \
115 c=crypto_rol_u32(c,10); }
116
117 #define RIP4(a,b,c,d,e,w,s,K) { \
118 a+=F4(b,c,d)+w+K; \
119 a=crypto_rol_u32(a,s)+e; \
120 c=crypto_rol_u32(c,10); }
121
122 #define RIP5(a,b,c,d,e,w,s,K) { \
123 a+=F5(b,c,d)+w+K; \
124 a=crypto_rol_u32(a,s)+e; \
125 c=crypto_rol_u32(c,10); }
126
127 static void
ripemd160_block_data_order(RIPEMD160_CTX * ctx,const void * _in,size_t num)128 ripemd160_block_data_order(RIPEMD160_CTX *ctx, const void *_in, size_t num)
129 {
130 const uint8_t *in = _in;
131 const RIPEMD160_LONG *in32;
132 unsigned int A, B, C, D, E;
133 unsigned int a, b, c, d, e;
134 unsigned int X0, X1, X2, X3, X4, X5, X6, X7,
135 X8, X9, X10, X11, X12, X13, X14, X15;
136
137 for (; num--; ) {
138 A = ctx->A;
139 B = ctx->B;
140 C = ctx->C;
141 D = ctx->D;
142 E = ctx->E;
143
144 if ((uintptr_t)in % 4 == 0) {
145 /* Input is 32 bit aligned. */
146 in32 = (const RIPEMD160_LONG *)in;
147 X0 = le32toh(in32[0]);
148 X1 = le32toh(in32[1]);
149 X2 = le32toh(in32[2]);
150 X3 = le32toh(in32[3]);
151 X4 = le32toh(in32[4]);
152 X5 = le32toh(in32[5]);
153 X6 = le32toh(in32[6]);
154 X7 = le32toh(in32[7]);
155 X8 = le32toh(in32[8]);
156 X9 = le32toh(in32[9]);
157 X10 = le32toh(in32[10]);
158 X11 = le32toh(in32[11]);
159 X12 = le32toh(in32[12]);
160 X13 = le32toh(in32[13]);
161 X14 = le32toh(in32[14]);
162 X15 = le32toh(in32[15]);
163 } else {
164 /* Input is not 32 bit aligned. */
165 X0 = crypto_load_le32toh(&in[0 * 4]);
166 X1 = crypto_load_le32toh(&in[1 * 4]);
167 X2 = crypto_load_le32toh(&in[2 * 4]);
168 X3 = crypto_load_le32toh(&in[3 * 4]);
169 X4 = crypto_load_le32toh(&in[4 * 4]);
170 X5 = crypto_load_le32toh(&in[5 * 4]);
171 X6 = crypto_load_le32toh(&in[6 * 4]);
172 X7 = crypto_load_le32toh(&in[7 * 4]);
173 X8 = crypto_load_le32toh(&in[8 * 4]);
174 X9 = crypto_load_le32toh(&in[9 * 4]);
175 X10 = crypto_load_le32toh(&in[10 * 4]);
176 X11 = crypto_load_le32toh(&in[11 * 4]);
177 X12 = crypto_load_le32toh(&in[12 * 4]);
178 X13 = crypto_load_le32toh(&in[13 * 4]);
179 X14 = crypto_load_le32toh(&in[14 * 4]);
180 X15 = crypto_load_le32toh(&in[15 * 4]);
181 }
182 in += RIPEMD160_CBLOCK;
183
184 RIP1(A, B, C, D, E, X0, 11);
185 RIP1(E, A, B, C, D, X1, 14);
186 RIP1(D, E, A, B, C, X2, 15);
187 RIP1(C, D, E, A, B, X3, 12);
188 RIP1(B, C, D, E, A, X4, 5);
189 RIP1(A, B, C, D, E, X5, 8);
190 RIP1(E, A, B, C, D, X6, 7);
191 RIP1(D, E, A, B, C, X7, 9);
192 RIP1(C, D, E, A, B, X8, 11);
193 RIP1(B, C, D, E, A, X9, 13);
194 RIP1(A, B, C, D, E, X10, 14);
195 RIP1(E, A, B, C, D, X11, 15);
196 RIP1(D, E, A, B, C, X12, 6);
197 RIP1(C, D, E, A, B, X13, 7);
198 RIP1(B, C, D, E, A, X14, 9);
199 RIP1(A, B, C, D, E, X15, 8);
200
201 RIP2(E, A, B, C, D, X7, 7, KL1);
202 RIP2(D, E, A, B, C, X4, 6, KL1);
203 RIP2(C, D, E, A, B, X13, 8, KL1);
204 RIP2(B, C, D, E, A, X1, 13, KL1);
205 RIP2(A, B, C, D, E, X10, 11, KL1);
206 RIP2(E, A, B, C, D, X6, 9, KL1);
207 RIP2(D, E, A, B, C, X15, 7, KL1);
208 RIP2(C, D, E, A, B, X3, 15, KL1);
209 RIP2(B, C, D, E, A, X12, 7, KL1);
210 RIP2(A, B, C, D, E, X0, 12, KL1);
211 RIP2(E, A, B, C, D, X9, 15, KL1);
212 RIP2(D, E, A, B, C, X5, 9, KL1);
213 RIP2(C, D, E, A, B, X2, 11, KL1);
214 RIP2(B, C, D, E, A, X14, 7, KL1);
215 RIP2(A, B, C, D, E, X11, 13, KL1);
216 RIP2(E, A, B, C, D, X8, 12, KL1);
217
218 RIP3(D, E, A, B, C, X3, 11, KL2);
219 RIP3(C, D, E, A, B, X10, 13, KL2);
220 RIP3(B, C, D, E, A, X14, 6, KL2);
221 RIP3(A, B, C, D, E, X4, 7, KL2);
222 RIP3(E, A, B, C, D, X9, 14, KL2);
223 RIP3(D, E, A, B, C, X15, 9, KL2);
224 RIP3(C, D, E, A, B, X8, 13, KL2);
225 RIP3(B, C, D, E, A, X1, 15, KL2);
226 RIP3(A, B, C, D, E, X2, 14, KL2);
227 RIP3(E, A, B, C, D, X7, 8, KL2);
228 RIP3(D, E, A, B, C, X0, 13, KL2);
229 RIP3(C, D, E, A, B, X6, 6, KL2);
230 RIP3(B, C, D, E, A, X13, 5, KL2);
231 RIP3(A, B, C, D, E, X11, 12, KL2);
232 RIP3(E, A, B, C, D, X5, 7, KL2);
233 RIP3(D, E, A, B, C, X12, 5, KL2);
234
235 RIP4(C, D, E, A, B, X1, 11, KL3);
236 RIP4(B, C, D, E, A, X9, 12, KL3);
237 RIP4(A, B, C, D, E, X11, 14, KL3);
238 RIP4(E, A, B, C, D, X10, 15, KL3);
239 RIP4(D, E, A, B, C, X0, 14, KL3);
240 RIP4(C, D, E, A, B, X8, 15, KL3);
241 RIP4(B, C, D, E, A, X12, 9, KL3);
242 RIP4(A, B, C, D, E, X4, 8, KL3);
243 RIP4(E, A, B, C, D, X13, 9, KL3);
244 RIP4(D, E, A, B, C, X3, 14, KL3);
245 RIP4(C, D, E, A, B, X7, 5, KL3);
246 RIP4(B, C, D, E, A, X15, 6, KL3);
247 RIP4(A, B, C, D, E, X14, 8, KL3);
248 RIP4(E, A, B, C, D, X5, 6, KL3);
249 RIP4(D, E, A, B, C, X6, 5, KL3);
250 RIP4(C, D, E, A, B, X2, 12, KL3);
251
252 RIP5(B, C, D, E, A, X4, 9, KL4);
253 RIP5(A, B, C, D, E, X0, 15, KL4);
254 RIP5(E, A, B, C, D, X5, 5, KL4);
255 RIP5(D, E, A, B, C, X9, 11, KL4);
256 RIP5(C, D, E, A, B, X7, 6, KL4);
257 RIP5(B, C, D, E, A, X12, 8, KL4);
258 RIP5(A, B, C, D, E, X2, 13, KL4);
259 RIP5(E, A, B, C, D, X10, 12, KL4);
260 RIP5(D, E, A, B, C, X14, 5, KL4);
261 RIP5(C, D, E, A, B, X1, 12, KL4);
262 RIP5(B, C, D, E, A, X3, 13, KL4);
263 RIP5(A, B, C, D, E, X8, 14, KL4);
264 RIP5(E, A, B, C, D, X11, 11, KL4);
265 RIP5(D, E, A, B, C, X6, 8, KL4);
266 RIP5(C, D, E, A, B, X15, 5, KL4);
267 RIP5(B, C, D, E, A, X13, 6, KL4);
268
269 a = A;
270 b = B;
271 c = C;
272 d = D;
273 e = E;
274 /* Do other half */
275 A = ctx->A;
276 B = ctx->B;
277 C = ctx->C;
278 D = ctx->D;
279 E = ctx->E;
280
281 RIP5(A, B, C, D, E, X5, 8, KR0);
282 RIP5(E, A, B, C, D, X14, 9, KR0);
283 RIP5(D, E, A, B, C, X7, 9, KR0);
284 RIP5(C, D, E, A, B, X0, 11, KR0);
285 RIP5(B, C, D, E, A, X9, 13, KR0);
286 RIP5(A, B, C, D, E, X2, 15, KR0);
287 RIP5(E, A, B, C, D, X11, 15, KR0);
288 RIP5(D, E, A, B, C, X4, 5, KR0);
289 RIP5(C, D, E, A, B, X13, 7, KR0);
290 RIP5(B, C, D, E, A, X6, 7, KR0);
291 RIP5(A, B, C, D, E, X15, 8, KR0);
292 RIP5(E, A, B, C, D, X8, 11, KR0);
293 RIP5(D, E, A, B, C, X1, 14, KR0);
294 RIP5(C, D, E, A, B, X10, 14, KR0);
295 RIP5(B, C, D, E, A, X3, 12, KR0);
296 RIP5(A, B, C, D, E, X12, 6, KR0);
297
298 RIP4(E, A, B, C, D, X6, 9, KR1);
299 RIP4(D, E, A, B, C, X11, 13, KR1);
300 RIP4(C, D, E, A, B, X3, 15, KR1);
301 RIP4(B, C, D, E, A, X7, 7, KR1);
302 RIP4(A, B, C, D, E, X0, 12, KR1);
303 RIP4(E, A, B, C, D, X13, 8, KR1);
304 RIP4(D, E, A, B, C, X5, 9, KR1);
305 RIP4(C, D, E, A, B, X10, 11, KR1);
306 RIP4(B, C, D, E, A, X14, 7, KR1);
307 RIP4(A, B, C, D, E, X15, 7, KR1);
308 RIP4(E, A, B, C, D, X8, 12, KR1);
309 RIP4(D, E, A, B, C, X12, 7, KR1);
310 RIP4(C, D, E, A, B, X4, 6, KR1);
311 RIP4(B, C, D, E, A, X9, 15, KR1);
312 RIP4(A, B, C, D, E, X1, 13, KR1);
313 RIP4(E, A, B, C, D, X2, 11, KR1);
314
315 RIP3(D, E, A, B, C, X15, 9, KR2);
316 RIP3(C, D, E, A, B, X5, 7, KR2);
317 RIP3(B, C, D, E, A, X1, 15, KR2);
318 RIP3(A, B, C, D, E, X3, 11, KR2);
319 RIP3(E, A, B, C, D, X7, 8, KR2);
320 RIP3(D, E, A, B, C, X14, 6, KR2);
321 RIP3(C, D, E, A, B, X6, 6, KR2);
322 RIP3(B, C, D, E, A, X9, 14, KR2);
323 RIP3(A, B, C, D, E, X11, 12, KR2);
324 RIP3(E, A, B, C, D, X8, 13, KR2);
325 RIP3(D, E, A, B, C, X12, 5, KR2);
326 RIP3(C, D, E, A, B, X2, 14, KR2);
327 RIP3(B, C, D, E, A, X10, 13, KR2);
328 RIP3(A, B, C, D, E, X0, 13, KR2);
329 RIP3(E, A, B, C, D, X4, 7, KR2);
330 RIP3(D, E, A, B, C, X13, 5, KR2);
331
332 RIP2(C, D, E, A, B, X8, 15, KR3);
333 RIP2(B, C, D, E, A, X6, 5, KR3);
334 RIP2(A, B, C, D, E, X4, 8, KR3);
335 RIP2(E, A, B, C, D, X1, 11, KR3);
336 RIP2(D, E, A, B, C, X3, 14, KR3);
337 RIP2(C, D, E, A, B, X11, 14, KR3);
338 RIP2(B, C, D, E, A, X15, 6, KR3);
339 RIP2(A, B, C, D, E, X0, 14, KR3);
340 RIP2(E, A, B, C, D, X5, 6, KR3);
341 RIP2(D, E, A, B, C, X12, 9, KR3);
342 RIP2(C, D, E, A, B, X2, 12, KR3);
343 RIP2(B, C, D, E, A, X13, 9, KR3);
344 RIP2(A, B, C, D, E, X9, 12, KR3);
345 RIP2(E, A, B, C, D, X7, 5, KR3);
346 RIP2(D, E, A, B, C, X10, 15, KR3);
347 RIP2(C, D, E, A, B, X14, 8, KR3);
348
349 RIP1(B, C, D, E, A, X12, 8);
350 RIP1(A, B, C, D, E, X15, 5);
351 RIP1(E, A, B, C, D, X10, 12);
352 RIP1(D, E, A, B, C, X4, 9);
353 RIP1(C, D, E, A, B, X1, 12);
354 RIP1(B, C, D, E, A, X5, 5);
355 RIP1(A, B, C, D, E, X8, 14);
356 RIP1(E, A, B, C, D, X7, 6);
357 RIP1(D, E, A, B, C, X6, 8);
358 RIP1(C, D, E, A, B, X2, 13);
359 RIP1(B, C, D, E, A, X13, 6);
360 RIP1(A, B, C, D, E, X14, 5);
361 RIP1(E, A, B, C, D, X0, 15);
362 RIP1(D, E, A, B, C, X3, 13);
363 RIP1(C, D, E, A, B, X9, 11);
364 RIP1(B, C, D, E, A, X11, 11);
365
366 D = ctx->B + c + D;
367 ctx->B = ctx->C + d + E;
368 ctx->C = ctx->D + e + A;
369 ctx->D = ctx->E + a + B;
370 ctx->E = ctx->A + b + C;
371 ctx->A = D;
372 }
373 }
374
375 int
RIPEMD160_Init(RIPEMD160_CTX * c)376 RIPEMD160_Init(RIPEMD160_CTX *c)
377 {
378 memset(c, 0, sizeof(*c));
379
380 c->A = 0x67452301UL;
381 c->B = 0xEFCDAB89UL;
382 c->C = 0x98BADCFEUL;
383 c->D = 0x10325476UL;
384 c->E = 0xC3D2E1F0UL;
385
386 return 1;
387 }
388 LCRYPTO_ALIAS(RIPEMD160_Init);
389
390 int
RIPEMD160_Update(RIPEMD160_CTX * c,const void * data_,size_t len)391 RIPEMD160_Update(RIPEMD160_CTX *c, const void *data_, size_t len)
392 {
393 const unsigned char *data = data_;
394 unsigned char *p;
395 RIPEMD160_LONG l;
396 size_t n;
397
398 if (len == 0)
399 return 1;
400
401 l = (c->Nl + (((RIPEMD160_LONG)len) << 3))&0xffffffffUL;
402 /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
403 * Wei Dai <weidai@eskimo.com> for pointing it out. */
404 if (l < c->Nl) /* overflow */
405 c->Nh++;
406 c->Nh+=(RIPEMD160_LONG)(len>>29); /* might cause compiler warning on 16-bit */
407 c->Nl = l;
408
409 n = c->num;
410 if (n != 0) {
411 p = (unsigned char *)c->data;
412
413 if (len >= RIPEMD160_CBLOCK || len + n >= RIPEMD160_CBLOCK) {
414 memcpy(p + n, data, RIPEMD160_CBLOCK - n);
415 ripemd160_block_data_order(c, p, 1);
416 n = RIPEMD160_CBLOCK - n;
417 data += n;
418 len -= n;
419 c->num = 0;
420 memset(p, 0, RIPEMD160_CBLOCK); /* keep it zeroed */
421 } else {
422 memcpy(p + n, data, len);
423 c->num += (unsigned int)len;
424 return 1;
425 }
426 }
427
428 n = len/RIPEMD160_CBLOCK;
429 if (n > 0) {
430 ripemd160_block_data_order(c, data, n);
431 n *= RIPEMD160_CBLOCK;
432 data += n;
433 len -= n;
434 }
435
436 if (len != 0) {
437 p = (unsigned char *)c->data;
438 c->num = (unsigned int)len;
439 memcpy(p, data, len);
440 }
441 return 1;
442 }
443 LCRYPTO_ALIAS(RIPEMD160_Update);
444
445 void
RIPEMD160_Transform(RIPEMD160_CTX * c,const unsigned char * data)446 RIPEMD160_Transform(RIPEMD160_CTX *c, const unsigned char *data)
447 {
448 ripemd160_block_data_order(c, data, 1);
449 }
450 LCRYPTO_ALIAS(RIPEMD160_Transform);
451
452 int
RIPEMD160_Final(unsigned char * md,RIPEMD160_CTX * c)453 RIPEMD160_Final(unsigned char *md, RIPEMD160_CTX *c)
454 {
455 unsigned char *p = (unsigned char *)c->data;
456 size_t n = c->num;
457
458 p[n] = 0x80; /* there is always room for one */
459 n++;
460
461 if (n > (RIPEMD160_CBLOCK - 8)) {
462 memset(p + n, 0, RIPEMD160_CBLOCK - n);
463 n = 0;
464 ripemd160_block_data_order(c, p, 1);
465 }
466
467 memset(p + n, 0, RIPEMD160_CBLOCK - 8 - n);
468 c->data[RIPEMD160_LBLOCK - 2] = htole32(c->Nl);
469 c->data[RIPEMD160_LBLOCK - 1] = htole32(c->Nh);
470
471 ripemd160_block_data_order(c, p, 1);
472 c->num = 0;
473 memset(p, 0, RIPEMD160_CBLOCK);
474
475 crypto_store_htole32(&md[0 * 4], c->A);
476 crypto_store_htole32(&md[1 * 4], c->B);
477 crypto_store_htole32(&md[2 * 4], c->C);
478 crypto_store_htole32(&md[3 * 4], c->D);
479 crypto_store_htole32(&md[4 * 4], c->E);
480
481 return 1;
482 }
483 LCRYPTO_ALIAS(RIPEMD160_Final);
484
485 unsigned char *
RIPEMD160(const unsigned char * d,size_t n,unsigned char * md)486 RIPEMD160(const unsigned char *d, size_t n, unsigned char *md)
487 {
488 RIPEMD160_CTX c;
489
490 if (!RIPEMD160_Init(&c))
491 return NULL;
492 RIPEMD160_Update(&c, d, n);
493 RIPEMD160_Final(md, &c);
494 explicit_bzero(&c, sizeof(c));
495 return (md);
496 }
497 LCRYPTO_ALIAS(RIPEMD160);
498