1 /* ====================================================================
2  * Copyright (c) 1999-2002 The OpenSSL Project.  All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions
6  * are met:
7  *
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in
13  *    the documentation and/or other materials provided with the
14  *    distribution.
15  *
16  * 3. All advertising materials mentioning features or use of this
17  *    software must display the following acknowledgment:
18  *    "This product includes software developed by the OpenSSL Project
19  *    for use in the OpenSSL Toolkit. (http://www.OpenSSL.org/)"
20  *
21  * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to
22  *    endorse or promote products derived from this software without
23  *    prior written permission. For written permission, please contact
24  *    licensing@OpenSSL.org.
25  *
26  * 5. Products derived from this software may not be called "OpenSSL"
27  *    nor may "OpenSSL" appear in their names without prior written
28  *    permission of the OpenSSL Project.
29  *
30  * 6. Redistributions of any form whatsoever must retain the following
31  *    acknowledgment:
32  *    "This product includes software developed by the OpenSSL Project
33  *    for use in the OpenSSL Toolkit (http://www.OpenSSL.org/)"
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY
36  * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
37  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
38  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE OpenSSL PROJECT OR
39  * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
41  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
42  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
44  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
45  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
46  * OF THE POSSIBILITY OF SUCH DAMAGE.
47  * ====================================================================
48  *
49  * This product includes cryptographic software written by Eric Young
50  * (eay@cryptsoft.com).  This product includes software written by Tim
51  * Hudson (tjh@cryptsoft.com).
52  *
53  */
54 
55 /*
56  * This is a generic 32 bit "collector" for message digest algorithms.
57  * Whenever needed it collects input character stream into chunks of
58  * 32 bit values and invokes a block function that performs actual hash
59  * calculations.
60  *
61  * Porting guide.
62  *
63  * Obligatory macros:
64  *
65  * DATA_ORDER_IS_BIG_ENDIAN or DATA_ORDER_IS_LITTLE_ENDIAN
66  *  this macro defines byte order of input stream.
67  * HASH_CBLOCK
68  *  size of a unit chunk HASH_BLOCK operates on.
69  * HASH_LONG
70  *  has to be at lest 32 bit wide, if it's wider, then
71  *  HASH_LONG_LOG2 *has to* be defined along
72  * HASH_CTX
73  *  context structure that at least contains following
74  *  members:
75  *      typedef struct {
76  *          ...
77  *          HASH_LONG       Nl,Nh;
78  *          HASH_LONG       data[HASH_LBLOCK];
79  *          unsigned int    num;
80  *          ...
81  *          } HASH_CTX;
82  * HASH_UPDATE
83  *  name of "Update" function, implemented here.
84  * HASH_TRANSFORM
85  *  name of "Transform" function, implemented here.
86  * HASH_FINAL
87  *  name of "Final" function, implemented here.
88  * HASH_BLOCK_HOST_ORDER
89  *  name of "block" function treating *aligned* input message
90  *  in host byte order, implemented externally.
91  * HASH_BLOCK_DATA_ORDER
92  *  name of "block" function treating *unaligned* input message
93  *  in original (data) byte order, implemented externally (it
94  *  actually is optional if data and host are of the same
95  *  "endianess").
96  * HASH_MAKE_STRING
97  *  macro convering context variables to an ASCII hash string.
98  *
99  * Optional macros:
100  *
101  * B_ENDIAN or L_ENDIAN
102  *  defines host byte-order.
103  * HASH_LONG_LOG2
104  *  defaults to 2 if not states otherwise.
105  * HASH_LBLOCK
106  *  assumed to be HASH_CBLOCK/4 if not stated otherwise.
107  * HASH_BLOCK_DATA_ORDER_ALIGNED
108  *  alternative "block" function capable of treating
109  *  aligned input message in original (data) order,
110  *  implemented externally.
111  *
112  * MD5 example:
113  *
114  *  #define DATA_ORDER_IS_LITTLE_ENDIAN
115  *
116  *  #define HASH_LONG       MD5_LONG
117  *  #define HASH_LONG_LOG2  MD5_LONG_LOG2
118  *  #define HASH_CTX        MD5_CTX
119  *  #define HASH_CBLOCK     MD5_CBLOCK
120  *  #define HASH_LBLOCK     MD5_LBLOCK
121  *  #define HASH_UPDATE     MD5_Update
122  *  #define HASH_TRANSFORM  MD5_Transform
123  *  #define HASH_FINAL      MD5_Final
124  *  #define HASH_BLOCK_HOST_ORDER   md5_block_host_order
125  *  #define HASH_BLOCK_DATA_ORDER   md5_block_data_order
126  *
127  *              <appro@fy.chalmers.se>
128  */
129 
130 
131 #ifndef _MD32_COMMON__H
132 #define _MD32_COMMON__H
133 
134 
135 #if !defined(DATA_ORDER_IS_BIG_ENDIAN) && !defined(DATA_ORDER_IS_LITTLE_ENDIAN)
136 #error "DATA_ORDER must be defined!"
137 #endif
138 
139 #ifndef HASH_CBLOCK
140 #error "HASH_CBLOCK must be defined!"
141 #endif
142 #ifndef HASH_LONG
143 #error "HASH_LONG must be defined!"
144 #endif
145 #ifndef HASH_CTX
146 #error "HASH_CTX must be defined!"
147 #endif
148 
149 #ifndef HASH_UPDATE
150 #error "HASH_UPDATE must be defined!"
151 #endif
152 #ifndef HASH_TRANSFORM
153 #error "HASH_TRANSFORM must be defined!"
154 #endif
155 #ifndef HASH_FINAL
156 #error "HASH_FINAL must be defined!"
157 #endif
158 
159 #ifndef HASH_BLOCK_HOST_ORDER
160 #error "HASH_BLOCK_HOST_ORDER must be defined!"
161 #endif
162 
163 #if 0
164 /*
165  * Moved below as it's required only if HASH_BLOCK_DATA_ORDER_ALIGNED
166  * isn't defined.
167  */
168 #ifndef HASH_BLOCK_DATA_ORDER
169 #error "HASH_BLOCK_DATA_ORDER must be defined!"
170 #endif
171 #endif
172 
173 #ifndef HASH_LBLOCK
174 #define HASH_LBLOCK (HASH_CBLOCK/4)
175 #endif
176 
177 #ifndef HASH_LONG_LOG2
178 #define HASH_LONG_LOG2  2
179 #endif
180 
181 /*
182  * Engage compiler specific rotate intrinsic function if available.
183  */
184 #undef ROTATE
185 #ifndef PEDANTIC
186 # if defined(_MSC_VER) || defined(__ICC)
187 #  define ROTATE(a,n)   _lrotl(a,n)
188 # elif defined(__MWERKS__)
189 #  if defined(__POWERPC__)
190 #   define ROTATE(a,n)  __rlwinm(a,n,0,31)
191 #  elif defined(__MC68K__)
192 /* Motorola specific tweak. <appro@fy.chalmers.se> */
193 #   define ROTATE(a,n)  ( n<24 ? __rol(a,n) : __ror(a,32-n) )
194 #  else
195 #   define ROTATE(a,n)  __rol(a,n)
196 #  endif
197 # elif defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
198 /*
199  * Some GNU C inline assembler templates. Note that these are
200  * rotates by *constant* number of bits! But that's exactly
201  * what we need here...
202  *              <appro@fy.chalmers.se>
203  */
204 #  if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
205 #   define ROTATE(a,n)  ({ register unsigned int ret;   \
206                 asm (           \
207                 "roll %1,%0"        \
208                 : "=r"(ret)     \
209                 : "I"(n), "0"(a)    \
210                 : "cc");        \
211                ret;             \
212             })
213 #  elif defined(__powerpc) || defined(__ppc__) || defined(__powerpc64__)
214 #   define ROTATE(a,n)  ({ register unsigned int ret;   \
215                 asm (           \
216                 "rlwinm %0,%1,%2,0,31"  \
217                 : "=r"(ret) \
218                 : "r"(a), "I"(n));  \
219                ret;             \
220             })
221 #  endif
222 # endif
223 #endif /* PEDANTIC */
224 
225 #if HASH_LONG_LOG2==2   /* Engage only if sizeof(HASH_LONG)== 4 */
226 /* A nice byte order reversal from Wei Dai <weidai@eskimo.com> */
227 #ifdef ROTATE
228 /* 5 instructions with rotate instruction, else 9 */
229 #define REVERSE_FETCH32(a,l)    (                   \
230         l=*(const HASH_LONG *)(a),                  \
231         ((ROTATE(l,8)&0x00FF00FF)|(ROTATE((l&0x00FF00FF),24)))  \
232                 )
233 #else
234 /* 6 instructions with rotate instruction, else 8 */
235 #define REVERSE_FETCH32(a,l)    (               \
236         l=*(const HASH_LONG *)(a),          \
237         l=(((l>>8)&0x00FF00FF)|((l&0x00FF00FF)<<8)),    \
238         ROTATE(l,16)                    \
239                 )
240 /*
241  * Originally the middle line started with l=(((l&0xFF00FF00)>>8)|...
242  * It's rewritten as above for two reasons:
243  *  - RISCs aren't good at long constants and have to explicitely
244  *    compose 'em with several (well, usually 2) instructions in a
245  *    register before performing the actual operation and (as you
246  *    already realized:-) having same constant should inspire the
247  *    compiler to permanently allocate the only register for it;
248  *  - most modern CPUs have two ALUs, but usually only one has
249  *    circuitry for shifts:-( this minor tweak inspires compiler
250  *    to schedule shift instructions in a better way...
251  *
252  *              <appro@fy.chalmers.se>
253  */
254 #endif
255 #endif
256 
257 #ifndef ROTATE
258 #define ROTATE(a,n)     (((a)<<(n))|(((a)&0xffffffff)>>(32-(n))))
259 #endif
260 
261 /*
262  * Make some obvious choices. E.g., HASH_BLOCK_DATA_ORDER_ALIGNED
263  * and HASH_BLOCK_HOST_ORDER ought to be the same if input data
264  * and host are of the same "endianess". It's possible to mask
265  * this with blank #define HASH_BLOCK_DATA_ORDER though...
266  *
267  *              <appro@fy.chalmers.se>
268  */
269 #if defined(B_ENDIAN)
270 #  if defined(DATA_ORDER_IS_BIG_ENDIAN)
271 #    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
272 #      define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
273 #    endif
274 #  endif
275 #elif defined(L_ENDIAN)
276 #  if defined(DATA_ORDER_IS_LITTLE_ENDIAN)
277 #    if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED) && HASH_LONG_LOG2==2
278 #      define HASH_BLOCK_DATA_ORDER_ALIGNED HASH_BLOCK_HOST_ORDER
279 #    endif
280 #  endif
281 #endif
282 
283 #if !defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
284 #ifndef HASH_BLOCK_DATA_ORDER
285 #error "HASH_BLOCK_DATA_ORDER must be defined!"
286 #endif
287 #endif
288 
289 #if defined(DATA_ORDER_IS_BIG_ENDIAN)
290 
291 #ifndef PEDANTIC
292 # if defined(__GNUC__) && __GNUC__>=2 && !defined(OPENSSL_NO_ASM) && !defined(OPENSSL_NO_INLINE_ASM)
293 #  if ((defined(__i386) || defined(__i386__)) && !defined(I386_ONLY)) || \
294       (defined(__x86_64) || defined(__x86_64__))
295 /*
296  * This gives ~30-40% performance improvement in SHA-256 compiled
297  * with gcc [on P4]. Well, first macro to be frank. We can pull
298  * this trick on x86* platforms only, because these CPUs can fetch
299  * unaligned data without raising an exception.
300  */
301 #   define HOST_c2l(c,l)    ({ unsigned int r=*((const unsigned int *)(c));    \
302                    asm ("bswapl %0":"=r"(r):"0"(r));    \
303                    (c)+=4; (l)=r;            })
304 #   define HOST_l2c(l,c)    ({ unsigned int r=(l);            \
305                    asm ("bswapl %0":"=r"(r):"0"(r));    \
306                    *((unsigned int *)(c))=r; (c)+=4; r;    })
307 #  endif
308 # endif
309 #endif
310 
311 #ifndef HOST_c2l
312 #define HOST_c2l(c,l)    (l =(((unsigned long)(*((c)++)))<<24),        \
313              l|=(((unsigned long)(*((c)++)))<<16),        \
314              l|=(((unsigned long)(*((c)++)))<< 8),        \
315              l|=(((unsigned long)(*((c)++)))    ),        \
316              l)
317 #endif
318 #define HOST_p_c2l(c,l,n)    {                    \
319             switch (n) {                    \
320             case 0: l =((unsigned long)(*((c)++)))<<24;    \
321             case 1: l|=((unsigned long)(*((c)++)))<<16;    \
322             case 2: l|=((unsigned long)(*((c)++)))<< 8;    \
323             case 3: l|=((unsigned long)(*((c)++)));        \
324                 } }
325 #define HOST_p_c2l_p(c,l,sc,len) {                    \
326             switch (sc) {                    \
327             case 0: l =((unsigned long)(*((c)++)))<<24;    \
328                 if (--len == 0) break;            \
329             case 1: l|=((unsigned long)(*((c)++)))<<16;    \
330                 if (--len == 0) break;            \
331             case 2: l|=((unsigned long)(*((c)++)))<< 8;    \
332                 } }
333 /* NOTE the pointer is not incremented at the end of this */
334 #define HOST_c2l_p(c,l,n)    {                    \
335             l=0; (c)+=n;                    \
336             switch (n) {                    \
337             case 3: l =((unsigned long)(*(--(c))))<< 8;    \
338             case 2: l|=((unsigned long)(*(--(c))))<<16;    \
339             case 1: l|=((unsigned long)(*(--(c))))<<24;    \
340                 } }
341 #ifndef HOST_l2c
342 #define HOST_l2c(l,c)    (*((c)++)=(unsigned char)(((l)>>24)&0xff),    \
343              *((c)++)=(unsigned char)(((l)>>16)&0xff),    \
344              *((c)++)=(unsigned char)(((l)>> 8)&0xff),    \
345              *((c)++)=(unsigned char)(((l)    )&0xff),    \
346              l)
347 #endif
348 
349 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
350 
351 #if defined(__i386) || defined(__i386__) || defined(__x86_64) || defined(__x86_64__)
352 # ifndef B_ENDIAN
353 /* See comment in DATA_ORDER_IS_BIG_ENDIAN section. */
354 #  define HOST_c2l(c,l)    ((l)=*((const unsigned int *)(c)), (c)+=4, l)
355 #  define HOST_l2c(l,c)    (*((unsigned int *)(c))=(l), (c)+=4, l)
356 # endif
357 #endif
358 
359 #ifndef HOST_c2l
360 #define HOST_c2l(c,l)    (l =(((unsigned long)(*((c)++)))    ),        \
361              l|=(((unsigned long)(*((c)++)))<< 8),        \
362              l|=(((unsigned long)(*((c)++)))<<16),        \
363              l|=(((unsigned long)(*((c)++)))<<24),        \
364              l)
365 #endif
366 #define HOST_p_c2l(c,l,n)    {                    \
367             switch (n) {                    \
368             case 0: l =((unsigned long)(*((c)++)));        \
369             case 1: l|=((unsigned long)(*((c)++)))<< 8;    \
370             case 2: l|=((unsigned long)(*((c)++)))<<16;    \
371             case 3: l|=((unsigned long)(*((c)++)))<<24;    \
372                 } }
373 #define HOST_p_c2l_p(c,l,sc,len) {                    \
374             switch (sc) {                    \
375             case 0: l =((unsigned long)(*((c)++)));        \
376                 if (--len == 0) break;            \
377             case 1: l|=((unsigned long)(*((c)++)))<< 8;    \
378                 if (--len == 0) break;            \
379             case 2: l|=((unsigned long)(*((c)++)))<<16;    \
380                 } }
381 /* NOTE the pointer is not incremented at the end of this */
382 #define HOST_c2l_p(c,l,n)    {                    \
383             l=0; (c)+=n;                    \
384             switch (n) {                    \
385             case 3: l =((unsigned long)(*(--(c))))<<16;    \
386             case 2: l|=((unsigned long)(*(--(c))))<< 8;    \
387             case 1: l|=((unsigned long)(*(--(c))));        \
388                 } }
389 #ifndef HOST_l2c
390 #define HOST_l2c(l,c)    (*((c)++)=(unsigned char)(((l)    )&0xff),    \
391              *((c)++)=(unsigned char)(((l)>> 8)&0xff),    \
392              *((c)++)=(unsigned char)(((l)>>16)&0xff),    \
393              *((c)++)=(unsigned char)(((l)>>24)&0xff),    \
394              l)
395 #endif
396 
397 #endif
398 
399 /*
400  * Time for some action:-)
401  */
402 
HASH_UPDATE(HASH_CTX * c,const void * data_,size_t len)403 int HASH_UPDATE (HASH_CTX *c, const void *data_, size_t len)
404 {
405     const unsigned char *data = (const unsigned char *)data_;
406     register HASH_LONG *p;
407     register HASH_LONG l;
408     size_t sw, sc, ew, ec;
409 
410     if (len == 0) {
411         return 1;
412     }
413 
414     l = (c->Nl + (((HASH_LONG)len) << 3)) & 0xffffffffUL;
415     /* 95-05-24 eay Fixed a bug with the overflow handling, thanks to
416      * Wei Dai <weidai@eskimo.com> for pointing it out. */
417     if (l < c->Nl) { /* overflow */
418         c->Nh++;
419     }
420     c->Nh += (len >> 29);    /* might cause compiler warning on 16-bit */
421     c->Nl = l;
422 
423     if (c->num != 0) {
424         p = c->data;
425         sw = c->num >> 2;
426         sc = c->num & 0x03;
427 
428         if ((c->num + len) >= HASH_CBLOCK) {
429             l = p[sw];
430             HOST_p_c2l(data, l, sc);
431             p[sw++] = l;
432             for (; sw < HASH_LBLOCK; sw++) {
433                 HOST_c2l(data, l);
434                 p[sw] = l;
435             }
436             HASH_BLOCK_HOST_ORDER (c, p, 1);
437             len -= (HASH_CBLOCK - c->num);
438             c->num = 0;
439             /* drop through and do the rest */
440         } else {
441             c->num += (unsigned int)len;
442             if ((sc + len) < 4) { /* ugly, add char's to a word */
443                 l = p[sw];
444                 HOST_p_c2l_p(data, l, sc, len);
445                 p[sw] = l;
446             } else {
447                 ew = (c->num >> 2);
448                 ec = (c->num & 0x03);
449                 if (sc) {
450                     l = p[sw];
451                 }
452                 HOST_p_c2l(data, l, sc);
453                 p[sw++] = l;
454                 for (; sw < ew; sw++) {
455                     HOST_c2l(data, l);
456                     p[sw] = l;
457                 }
458                 if (ec) {
459                     HOST_c2l_p(data, l, ec);
460                     p[sw] = l;
461                 }
462             }
463             return 1;
464         }
465     }
466 
467     sw = len / HASH_CBLOCK;
468     if (sw > 0) {
469 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
470         /*
471          * Note that HASH_BLOCK_DATA_ORDER_ALIGNED gets defined
472          * only if sizeof(HASH_LONG)==4.
473          */
474         if ((((size_t)data) % 4) == 0) {
475             /* data is properly aligned so that we can cast it: */
476             HASH_BLOCK_DATA_ORDER_ALIGNED (c, (const HASH_LONG *)data, sw);
477             sw *= HASH_CBLOCK;
478             data += sw;
479             len -= sw;
480         } else
481 #if !defined(HASH_BLOCK_DATA_ORDER)
482             while (sw--) {
483                 memcpy (p = c->data, data, HASH_CBLOCK);
484                 HASH_BLOCK_DATA_ORDER_ALIGNED(c, p, 1);
485                 data += HASH_CBLOCK;
486                 len -= HASH_CBLOCK;
487             }
488 #endif
489 #endif
490 #if defined(HASH_BLOCK_DATA_ORDER)
491         {
492             HASH_BLOCK_DATA_ORDER(c, data, sw);
493             sw *= HASH_CBLOCK;
494             data += sw;
495             len -= sw;
496         }
497 #endif
498     }
499 
500     if (len != 0) {
501         p = c->data;
502         c->num = len;
503         ew = len >> 2;    /* words to copy */
504         ec = len & 0x03;
505         for (; ew; ew--, p++) {
506             HOST_c2l(data, l);
507             *p = l;
508         }
509         HOST_c2l_p(data, l, ec);
510         *p = l;
511     }
512     return 1;
513 }
514 
515 
HASH_TRANSFORM(HASH_CTX * c,const unsigned char * data)516 void HASH_TRANSFORM (HASH_CTX *c, const unsigned char *data)
517 {
518 #if defined(HASH_BLOCK_DATA_ORDER_ALIGNED)
519     if ((((size_t)data) % 4) == 0)
520         /* data is properly aligned so that we can cast it: */
521     {
522         HASH_BLOCK_DATA_ORDER_ALIGNED (c, (const HASH_LONG *)data, 1);
523     } else
524 #if !defined(HASH_BLOCK_DATA_ORDER)
525     {
526         memcpy (c->data, data, HASH_CBLOCK);
527         HASH_BLOCK_DATA_ORDER_ALIGNED (c, c->data, 1);
528     }
529 #endif
530 #endif
531 #if defined(HASH_BLOCK_DATA_ORDER)
532     HASH_BLOCK_DATA_ORDER (c, data, 1);
533 #endif
534 }
535 
536 
HASH_FINAL(unsigned char * md,HASH_CTX * c)537 int HASH_FINAL (unsigned char *md, HASH_CTX *c)
538 {
539     register HASH_LONG *p;
540     register unsigned long l;
541     register int i, j;
542     static const unsigned char end[4] = {0x80, 0x00, 0x00, 0x00};
543     const unsigned char *cp = end;
544 
545     /* c->num should definitly have room for at least one more byte. */
546     p = c->data;
547     i = c->num >> 2;
548     j = c->num & 0x03;
549 
550 #if 0
551     /* purify often complains about the following line as an
552      * Uninitialized Memory Read.  While this can be true, the
553      * following p_c2l macro will reset l when that case is true.
554      * This is because j&0x03 contains the number of 'valid' bytes
555      * already in p[i].  If and only if j&0x03 == 0, the UMR will
556      * occur but this is also the only time p_c2l will do
557      * l= *(cp++) instead of l|= *(cp++)
558      * Many thanks to Alex Tang <altitude@cic.net> for pickup this
559      * 'potential bug' */
560 #ifdef PURIFY
561     if (j == 0) {
562         p[i] = 0;    /* Yeah, but that's not the way to fix it:-) */
563     }
564 #endif
565     l = p[i];
566 #else
567     l = (j == 0) ? 0 : p[i];
568 #endif
569     HOST_p_c2l(cp, l, j);
570     p[i++] = l; /* i is the next 'undefined word' */
571 
572     if (i > (HASH_LBLOCK - 2)) { /* save room for Nl and Nh */
573         if (i < HASH_LBLOCK) {
574             p[i] = 0;
575         }
576         HASH_BLOCK_HOST_ORDER (c, p, 1);
577         i = 0;
578     }
579     for (; i < (HASH_LBLOCK - 2); i++) {
580         p[i] = 0;
581     }
582 
583 #if   defined(DATA_ORDER_IS_BIG_ENDIAN)
584     p[HASH_LBLOCK - 2] = c->Nh;
585     p[HASH_LBLOCK - 1] = c->Nl;
586 #elif defined(DATA_ORDER_IS_LITTLE_ENDIAN)
587     p[HASH_LBLOCK - 2] = c->Nl;
588     p[HASH_LBLOCK - 1] = c->Nh;
589 #endif
590     HASH_BLOCK_HOST_ORDER (c, p, 1);
591 
592 #ifndef HASH_MAKE_STRING
593 #error "HASH_MAKE_STRING must be defined!"
594 #else
595     HASH_MAKE_STRING(c, md);
596 #endif
597 
598     c->num = 0;
599     /* clear stuff, HASH_BLOCK may be leaving some stuff on the stack
600      * but I'm not worried :-)
601     OPENSSL_cleanse((void *)c,sizeof(HASH_CTX));
602      */
603     return 1;
604 }
605 
606 #ifndef MD32_REG_T
607 #define MD32_REG_T int
608 /*
609  * This comment was originaly written for MD5, which is why it
610  * discusses A-D. But it basically applies to all 32-bit digests,
611  * which is why it was moved to common header file.
612  *
613  * In case you wonder why A-D are declared as long and not
614  * as MD5_LONG. Doing so results in slight performance
615  * boost on LP64 architectures. The catch is we don't
616  * really care if 32 MSBs of a 64-bit register get polluted
617  * with eventual overflows as we *save* only 32 LSBs in
618  * *either* case. Now declaring 'em long excuses the compiler
619  * from keeping 32 MSBs zeroed resulting in 13% performance
620  * improvement under SPARC Solaris7/64 and 5% under AlphaLinux.
621  * Well, to be honest it should say that this *prevents*
622  * performance degradation.
623  *                <appro@fy.chalmers.se>
624  * Apparently there're LP64 compilers that generate better
625  * code if A-D are declared int. Most notably GCC-x86_64
626  * generates better code.
627  *                <appro@fy.chalmers.se>
628  */
629 #endif
630 
631 
632 #endif /* _MD32_COMMON__H */
633