1 /*
2 * sha.c: routines to compute SHA-1/224/256/384/512 digests
3 *
4 * Ref: NIST FIPS PUB 180-4 Secure Hash Standard
5 *
6 * Copyright (C) 2003-2023 Mark Shelor, All Rights Reserved
7 *
8 * Version: 6.04
9 * Sat Feb 25 12:00:50 PM MST 2023
10 *
11 */
12
13 #include <stdio.h>
14 #include <stdlib.h>
15 #include <stddef.h>
16 #include <string.h>
17 #include <ctype.h>
18 #include "sha.h"
19 #include "sha64bit.h"
20
21 #define W32 SHA32 /* useful abbreviations */
22 #define C32 SHA32_CONST
23 #define SR32 SHA32_SHR
24 #define SL32 SHA32_SHL
25 #define LO32 SHA_LO32
26 #define UCHR unsigned char
27 #define UINT unsigned int
28 #define ULNG unsigned long
29 #define VP void *
30
31 #define ROTR(x, n) (SR32(x, n) | SL32(x, 32-(n)))
32 #define ROTL(x, n) (SL32(x, n) | SR32(x, 32-(n)))
33
34 #define Ch(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
35 #define Pa(x, y, z) ((x) ^ (y) ^ (z))
36 #define Ma(x, y, z) (((x) & (y)) | ((z) & ((x) | (y))))
37
38 #define SIGMA0(x) (ROTR(x, 2) ^ ROTR(x, 13) ^ ROTR(x, 22))
39 #define SIGMA1(x) (ROTR(x, 6) ^ ROTR(x, 11) ^ ROTR(x, 25))
40 #define sigma0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SR32(x, 3))
41 #define sigma1(x) (ROTR(x, 17) ^ ROTR(x, 19) ^ SR32(x, 10))
42
43 #define K1 C32(0x5a827999) /* SHA-1 constants */
44 #define K2 C32(0x6ed9eba1)
45 #define K3 C32(0x8f1bbcdc)
46 #define K4 C32(0xca62c1d6)
47
48 static const W32 K256[64] = /* SHA-224/256 constants */
49 {
50 C32(0x428a2f98), C32(0x71374491), C32(0xb5c0fbcf), C32(0xe9b5dba5),
51 C32(0x3956c25b), C32(0x59f111f1), C32(0x923f82a4), C32(0xab1c5ed5),
52 C32(0xd807aa98), C32(0x12835b01), C32(0x243185be), C32(0x550c7dc3),
53 C32(0x72be5d74), C32(0x80deb1fe), C32(0x9bdc06a7), C32(0xc19bf174),
54 C32(0xe49b69c1), C32(0xefbe4786), C32(0x0fc19dc6), C32(0x240ca1cc),
55 C32(0x2de92c6f), C32(0x4a7484aa), C32(0x5cb0a9dc), C32(0x76f988da),
56 C32(0x983e5152), C32(0xa831c66d), C32(0xb00327c8), C32(0xbf597fc7),
57 C32(0xc6e00bf3), C32(0xd5a79147), C32(0x06ca6351), C32(0x14292967),
58 C32(0x27b70a85), C32(0x2e1b2138), C32(0x4d2c6dfc), C32(0x53380d13),
59 C32(0x650a7354), C32(0x766a0abb), C32(0x81c2c92e), C32(0x92722c85),
60 C32(0xa2bfe8a1), C32(0xa81a664b), C32(0xc24b8b70), C32(0xc76c51a3),
61 C32(0xd192e819), C32(0xd6990624), C32(0xf40e3585), C32(0x106aa070),
62 C32(0x19a4c116), C32(0x1e376c08), C32(0x2748774c), C32(0x34b0bcb5),
63 C32(0x391c0cb3), C32(0x4ed8aa4a), C32(0x5b9cca4f), C32(0x682e6ff3),
64 C32(0x748f82ee), C32(0x78a5636f), C32(0x84c87814), C32(0x8cc70208),
65 C32(0x90befffa), C32(0xa4506ceb), C32(0xbef9a3f7), C32(0xc67178f2)
66 };
67
68 static const W32 H01[8] = /* SHA-1 initial hash value */
69 {
70 C32(0x67452301), C32(0xefcdab89), C32(0x98badcfe), C32(0x10325476),
71 C32(0xc3d2e1f0), C32(0x00000000), C32(0x00000000), C32(0x00000000)
72 };
73
74 static const W32 H0224[8] = /* SHA-224 initial hash value */
75 {
76 C32(0xc1059ed8), C32(0x367cd507), C32(0x3070dd17), C32(0xf70e5939),
77 C32(0xffc00b31), C32(0x68581511), C32(0x64f98fa7), C32(0xbefa4fa4)
78 };
79
80 static const W32 H0256[8] = /* SHA-256 initial hash value */
81 {
82 C32(0x6a09e667), C32(0xbb67ae85), C32(0x3c6ef372), C32(0xa54ff53a),
83 C32(0x510e527f), C32(0x9b05688c), C32(0x1f83d9ab), C32(0x5be0cd19)
84 };
85
sha1(SHA * s,UCHR * block)86 static void sha1(SHA *s, UCHR *block) /* SHA-1 transform */
87 {
88 W32 a, b, c, d, e;
89 W32 W[16];
90 W32 *wp = W;
91 W32 *H = s->H32;
92
93 SHA32_SCHED(W, block);
94
95 /*
96 * Use SHA-1 alternate method from FIPS PUB 180-4 (ref. 6.1.3)
97 *
98 * To improve performance, unroll the loop and consolidate assignments
99 * by changing the roles of variables "a" through "e" at each step.
100 * Note that the variable "T" is no longer needed.
101 */
102
103 #define M1(a, b, c, d, e, f, k, w) \
104 e += ROTL(a, 5) + f(b, c, d) + k + w; \
105 b = ROTL(b, 30)
106
107 #define M11(f, k, w) M1(a, b, c, d, e, f, k, w);
108 #define M12(f, k, w) M1(e, a, b, c, d, f, k, w);
109 #define M13(f, k, w) M1(d, e, a, b, c, f, k, w);
110 #define M14(f, k, w) M1(c, d, e, a, b, f, k, w);
111 #define M15(f, k, w) M1(b, c, d, e, a, f, k, w);
112
113 #define W11(s) W[(s+ 0) & 0xf]
114 #define W12(s) W[(s+13) & 0xf]
115 #define W13(s) W[(s+ 8) & 0xf]
116 #define W14(s) W[(s+ 2) & 0xf]
117
118 #define A1(s) (W11(s) = ROTL(W11(s) ^ W12(s) ^ W13(s) ^ W14(s), 1))
119
120 a = H[0]; b = H[1]; c = H[2]; d = H[3]; e = H[4];
121
122 M11(Ch, K1, *wp++); M12(Ch, K1, *wp++); M13(Ch, K1, *wp++);
123 M14(Ch, K1, *wp++); M15(Ch, K1, *wp++); M11(Ch, K1, *wp++);
124 M12(Ch, K1, *wp++); M13(Ch, K1, *wp++); M14(Ch, K1, *wp++);
125 M15(Ch, K1, *wp++); M11(Ch, K1, *wp++); M12(Ch, K1, *wp++);
126 M13(Ch, K1, *wp++); M14(Ch, K1, *wp++); M15(Ch, K1, *wp++);
127 M11(Ch, K1, *wp ); M12(Ch, K1, A1( 0)); M13(Ch, K1, A1( 1));
128 M14(Ch, K1, A1( 2)); M15(Ch, K1, A1( 3)); M11(Pa, K2, A1( 4));
129 M12(Pa, K2, A1( 5)); M13(Pa, K2, A1( 6)); M14(Pa, K2, A1( 7));
130 M15(Pa, K2, A1( 8)); M11(Pa, K2, A1( 9)); M12(Pa, K2, A1(10));
131 M13(Pa, K2, A1(11)); M14(Pa, K2, A1(12)); M15(Pa, K2, A1(13));
132 M11(Pa, K2, A1(14)); M12(Pa, K2, A1(15)); M13(Pa, K2, A1( 0));
133 M14(Pa, K2, A1( 1)); M15(Pa, K2, A1( 2)); M11(Pa, K2, A1( 3));
134 M12(Pa, K2, A1( 4)); M13(Pa, K2, A1( 5)); M14(Pa, K2, A1( 6));
135 M15(Pa, K2, A1( 7)); M11(Ma, K3, A1( 8)); M12(Ma, K3, A1( 9));
136 M13(Ma, K3, A1(10)); M14(Ma, K3, A1(11)); M15(Ma, K3, A1(12));
137 M11(Ma, K3, A1(13)); M12(Ma, K3, A1(14)); M13(Ma, K3, A1(15));
138 M14(Ma, K3, A1( 0)); M15(Ma, K3, A1( 1)); M11(Ma, K3, A1( 2));
139 M12(Ma, K3, A1( 3)); M13(Ma, K3, A1( 4)); M14(Ma, K3, A1( 5));
140 M15(Ma, K3, A1( 6)); M11(Ma, K3, A1( 7)); M12(Ma, K3, A1( 8));
141 M13(Ma, K3, A1( 9)); M14(Ma, K3, A1(10)); M15(Ma, K3, A1(11));
142 M11(Pa, K4, A1(12)); M12(Pa, K4, A1(13)); M13(Pa, K4, A1(14));
143 M14(Pa, K4, A1(15)); M15(Pa, K4, A1( 0)); M11(Pa, K4, A1( 1));
144 M12(Pa, K4, A1( 2)); M13(Pa, K4, A1( 3)); M14(Pa, K4, A1( 4));
145 M15(Pa, K4, A1( 5)); M11(Pa, K4, A1( 6)); M12(Pa, K4, A1( 7));
146 M13(Pa, K4, A1( 8)); M14(Pa, K4, A1( 9)); M15(Pa, K4, A1(10));
147 M11(Pa, K4, A1(11)); M12(Pa, K4, A1(12)); M13(Pa, K4, A1(13));
148 M14(Pa, K4, A1(14)); M15(Pa, K4, A1(15));
149
150 H[0] += a; H[1] += b; H[2] += c; H[3] += d; H[4] += e;
151 }
152
sha256(SHA * s,UCHR * block)153 static void sha256(SHA *s, UCHR *block) /* SHA-224/256 transform */
154 {
155 W32 a, b, c, d, e, f, g, h, T1;
156 W32 W[16];
157 const W32 *kp = K256;
158 W32 *wp = W;
159 W32 *H = s->H32;
160
161 SHA32_SCHED(W, block);
162
163 /*
164 * Use same technique as in sha1()
165 *
166 * To improve performance, unroll the loop and consolidate assignments
167 * by changing the roles of variables "a" through "h" at each step.
168 * Note that the variable "T2" is no longer needed.
169 */
170
171 #define M2(a, b, c, d, e, f, g, h, w) \
172 T1 = h + SIGMA1(e) + Ch(e, f, g) + (*kp++) + w; \
173 h = T1 + SIGMA0(a) + Ma(a, b, c); d += T1;
174
175 #define W21(s) W[(s+ 0) & 0xf]
176 #define W22(s) W[(s+14) & 0xf]
177 #define W23(s) W[(s+ 9) & 0xf]
178 #define W24(s) W[(s+ 1) & 0xf]
179
180 #define A2(s) (W21(s) += sigma1(W22(s)) + W23(s) + sigma0(W24(s)))
181
182 #define M21(w) M2(a, b, c, d, e, f, g, h, w)
183 #define M22(w) M2(h, a, b, c, d, e, f, g, w)
184 #define M23(w) M2(g, h, a, b, c, d, e, f, w)
185 #define M24(w) M2(f, g, h, a, b, c, d, e, w)
186 #define M25(w) M2(e, f, g, h, a, b, c, d, w)
187 #define M26(w) M2(d, e, f, g, h, a, b, c, w)
188 #define M27(w) M2(c, d, e, f, g, h, a, b, w)
189 #define M28(w) M2(b, c, d, e, f, g, h, a, w)
190
191 a = H[0]; b = H[1]; c = H[2]; d = H[3];
192 e = H[4]; f = H[5]; g = H[6]; h = H[7];
193
194 M21( *wp++); M22( *wp++); M23( *wp++); M24( *wp++);
195 M25( *wp++); M26( *wp++); M27( *wp++); M28( *wp++);
196 M21( *wp++); M22( *wp++); M23( *wp++); M24( *wp++);
197 M25( *wp++); M26( *wp++); M27( *wp++); M28( *wp );
198 M21(A2( 0)); M22(A2( 1)); M23(A2( 2)); M24(A2( 3));
199 M25(A2( 4)); M26(A2( 5)); M27(A2( 6)); M28(A2( 7));
200 M21(A2( 8)); M22(A2( 9)); M23(A2(10)); M24(A2(11));
201 M25(A2(12)); M26(A2(13)); M27(A2(14)); M28(A2(15));
202 M21(A2( 0)); M22(A2( 1)); M23(A2( 2)); M24(A2( 3));
203 M25(A2( 4)); M26(A2( 5)); M27(A2( 6)); M28(A2( 7));
204 M21(A2( 8)); M22(A2( 9)); M23(A2(10)); M24(A2(11));
205 M25(A2(12)); M26(A2(13)); M27(A2(14)); M28(A2(15));
206 M21(A2( 0)); M22(A2( 1)); M23(A2( 2)); M24(A2( 3));
207 M25(A2( 4)); M26(A2( 5)); M27(A2( 6)); M28(A2( 7));
208 M21(A2( 8)); M22(A2( 9)); M23(A2(10)); M24(A2(11));
209 M25(A2(12)); M26(A2(13)); M27(A2(14)); M28(A2(15));
210
211 H[0] += a; H[1] += b; H[2] += c; H[3] += d;
212 H[4] += e; H[5] += f; H[6] += g; H[7] += h;
213 }
214
215 #include "sha64bit.c"
216
217 #define BITSET(s, pos) s[(pos) >> 3] & (UCHR) (0x01 << (7 - (pos) % 8))
218 #define SETBIT(s, pos) s[(pos) >> 3] |= (UCHR) (0x01 << (7 - (pos) % 8))
219 #define CLRBIT(s, pos) s[(pos) >> 3] &= (UCHR) ~(0x01 << (7 - (pos) % 8))
220 #define NBYTES(nbits) (((nbits) + 7) >> 3)
221 #define HEXLEN(nbytes) ((nbytes) << 1)
222 #define B64LEN(nbytes) (((nbytes) % 3 == 0) ? ((nbytes) / 3) * 4 \
223 : ((nbytes) / 3) * 4 + ((nbytes) % 3) + 1)
224
225 /* w32mem: writes 32-bit word to memory in big-endian order */
w32mem(UCHR * mem,W32 w32)226 static UCHR *w32mem(UCHR *mem, W32 w32)
227 {
228 int i;
229
230 for (i = 0; i < 4; i++)
231 *mem++ = (UCHR) (SR32(w32, 24-i*8) & 0xff);
232 return(mem);
233 }
234
235 /* memw32: returns 32-bit word from memory written in big-endian order */
memw32(UCHR * mem)236 static W32 memw32(UCHR *mem)
237 {
238 int i;
239 W32 w = 0;
240
241 for (i = 0; i < 4; i++)
242 w = (w << 8) + *mem++;
243 return(w);
244 }
245
246 /* digcpy: writes current state to digest buffer */
digcpy(SHA * s)247 static UCHR *digcpy(SHA *s)
248 {
249 int i;
250 UCHR *d = s->digest;
251 W32 *p32 = s->H32;
252 W64 *p64 = s->H64;
253
254 if (s->alg <= SHA256)
255 for (i = 0; i < 8; i++, d += 4)
256 w32mem(d, *p32++);
257 else
258 for (i = 0; i < 8; i++, d += 8) {
259 w32mem(d, (W32) ((*p64 >> 16) >> 16));
260 w32mem(d+4, (W32) (*p64++ & SHA32_MAX));
261 }
262 return(s->digest);
263 }
264
265 /* statecpy: writes buffer to current state (opposite of digcpy) */
statecpy(SHA * s,UCHR * buf)266 static UCHR *statecpy(SHA *s, UCHR *buf)
267 {
268 int i;
269 W32 *p32 = s->H32;
270 W64 *p64 = s->H64;
271
272 if (s->alg <= SHA256)
273 for (i = 0; i < 8; i++, buf += 4)
274 *p32++ = memw32(buf);
275 else
276 for (i = 0; i < 8; i++, buf += 8)
277 *p64++ = (((W64)memw32(buf) << 16) << 16) +
278 memw32(buf+4);
279 return(buf);
280 }
281
282 #define SHA_INIT(s, algo, transform, state, state_t) \
283 do { \
284 Zero(s, 1, SHA); \
285 s->alg = algo; s->sha = sha ## transform; \
286 Copy(H0 ## algo, s->state, 8, state_t); \
287 s->blocksize = SHA ## algo ## _BLOCK_BITS; \
288 s->digestlen = SHA ## algo ## _DIGEST_BITS >> 3; \
289 } while (0)
290
291 /* sharewind: resets digest object */
sharewind(SHA * s)292 static void sharewind(SHA *s)
293 {
294 if (s->alg == SHA1) SHA_INIT(s, 1, 1, H32, SHA32);
295 else if (s->alg == SHA224) SHA_INIT(s, 224, 256, H32, SHA32);
296 else if (s->alg == SHA256) SHA_INIT(s, 256, 256, H32, SHA32);
297 else if (s->alg == SHA384) SHA_INIT(s, 384, 512, H64, SHA64);
298 else if (s->alg == SHA512) SHA_INIT(s, 512, 512, H64, SHA64);
299 else if (s->alg == SHA512224) SHA_INIT(s, 512224, 512, H64, SHA64);
300 else if (s->alg == SHA512256) SHA_INIT(s, 512256, 512, H64, SHA64);
301 }
302
303 /* shainit: initializes digest object */
shainit(SHA * s,int alg)304 static int shainit(SHA *s, int alg)
305 {
306 if (alg >= SHA384 && !sha_384_512)
307 return 0;
308 if (alg != SHA1 && alg != SHA224 && alg != SHA256 &&
309 alg != SHA384 && alg != SHA512 &&
310 alg != SHA512224 && alg != SHA512256)
311 return 0;
312 s->alg = alg;
313 sharewind(s);
314 return 1;
315 }
316
317 /* shadirect: updates state directly (w/o going through s->block) */
shadirect(UCHR * bitstr,ULNG bitcnt,SHA * s)318 static ULNG shadirect(UCHR *bitstr, ULNG bitcnt, SHA *s)
319 {
320 ULNG savecnt = bitcnt;
321
322 while (bitcnt >= s->blocksize) {
323 s->sha(s, bitstr);
324 bitstr += (s->blocksize >> 3);
325 bitcnt -= s->blocksize;
326 }
327 if (bitcnt > 0) {
328 Copy(bitstr, s->block, NBYTES(bitcnt), char);
329 s->blockcnt = bitcnt;
330 }
331 return(savecnt);
332 }
333
334 /* shabytes: updates state for byte-aligned data in s->block */
shabytes(UCHR * bitstr,ULNG bitcnt,SHA * s)335 static ULNG shabytes(UCHR *bitstr, ULNG bitcnt, SHA *s)
336 {
337 UINT offset;
338 UINT nbits;
339 ULNG savecnt = bitcnt;
340
341 offset = s->blockcnt >> 3;
342 if (s->blockcnt + bitcnt >= s->blocksize) {
343 nbits = s->blocksize - s->blockcnt;
344 Copy(bitstr, s->block+offset, nbits>>3, char);
345 bitcnt -= nbits;
346 bitstr += (nbits >> 3);
347 s->sha(s, s->block), s->blockcnt = 0;
348 shadirect(bitstr, bitcnt, s);
349 }
350 else {
351 Copy(bitstr, s->block+offset, NBYTES(bitcnt), char);
352 s->blockcnt += bitcnt;
353 }
354 return(savecnt);
355 }
356
357 /* shabits: updates state for bit-aligned data in s->block */
shabits(UCHR * bitstr,ULNG bitcnt,SHA * s)358 static ULNG shabits(UCHR *bitstr, ULNG bitcnt, SHA *s)
359 {
360 ULNG i;
361
362 for (i = 0UL; i < bitcnt; i++) {
363 if (BITSET(bitstr, i))
364 SETBIT(s->block, s->blockcnt);
365 else
366 CLRBIT(s->block, s->blockcnt);
367 if (++s->blockcnt == s->blocksize)
368 s->sha(s, s->block), s->blockcnt = 0;
369 }
370 return(bitcnt);
371 }
372
373 /* shawrite: triggers a state update using data in bitstr/bitcnt */
shawrite(UCHR * bitstr,ULNG bitcnt,SHA * s)374 static ULNG shawrite(UCHR *bitstr, ULNG bitcnt, SHA *s)
375 {
376 if (!bitcnt)
377 return(0);
378 if (SHA_LO32(s->lenll += bitcnt) < bitcnt)
379 if (SHA_LO32(++s->lenlh) == 0)
380 if (SHA_LO32(++s->lenhl) == 0)
381 s->lenhh++;
382 if (s->blockcnt == 0)
383 return(shadirect(bitstr, bitcnt, s));
384 else if (s->blockcnt % 8 == 0)
385 return(shabytes(bitstr, bitcnt, s));
386 else
387 return(shabits(bitstr, bitcnt, s));
388 }
389
390 /* shafinish: pads remaining block(s) and computes final digest state */
shafinish(SHA * s)391 static void shafinish(SHA *s)
392 {
393 UINT lenpos, lhpos, llpos;
394
395 lenpos = s->blocksize == SHA1_BLOCK_BITS ? 448 : 896;
396 lhpos = s->blocksize == SHA1_BLOCK_BITS ? 56 : 120;
397 llpos = s->blocksize == SHA1_BLOCK_BITS ? 60 : 124;
398 SETBIT(s->block, s->blockcnt), s->blockcnt++;
399 while (s->blockcnt > lenpos)
400 if (s->blockcnt < s->blocksize)
401 CLRBIT(s->block, s->blockcnt), s->blockcnt++;
402 else
403 s->sha(s, s->block), s->blockcnt = 0;
404 while (s->blockcnt < lenpos)
405 CLRBIT(s->block, s->blockcnt), s->blockcnt++;
406 if (s->blocksize > SHA1_BLOCK_BITS) {
407 w32mem(s->block + 112, s->lenhh);
408 w32mem(s->block + 116, s->lenhl);
409 }
410 w32mem(s->block + lhpos, s->lenlh);
411 w32mem(s->block + llpos, s->lenll);
412 s->sha(s, s->block);
413 }
414
415 #define shadigest(state) digcpy(state)
416
417 /* xmap: translation map for hexadecimal encoding */
418 static const char xmap[] =
419 "0123456789abcdef";
420
421 /* shahex: returns pointer to current digest (hexadecimal) */
shahex(SHA * s)422 static char *shahex(SHA *s)
423 {
424 UINT i;
425 char *h;
426 UCHR *d;
427
428 d = digcpy(s);
429 s->hex[0] = '\0';
430 if (HEXLEN((size_t) s->digestlen) >= sizeof(s->hex))
431 return(s->hex);
432 for (i = 0, h = s->hex; i < s->digestlen; i++) {
433 *h++ = xmap[(*d >> 4) & 0x0f];
434 *h++ = xmap[(*d++ ) & 0x0f];
435 }
436 *h = '\0';
437 return(s->hex);
438 }
439
440 /* bmap: translation map for Base 64 encoding */
441 static const char bmap[] =
442 "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/";
443
444 /* encbase64: encodes input (0 to 3 bytes) into Base 64 */
encbase64(UCHR * in,UINT n,char * out)445 static void encbase64(UCHR *in, UINT n, char *out)
446 {
447 UCHR byte[3] = {0, 0, 0};
448
449 out[0] = '\0';
450 if (n < 1 || n > 3)
451 return;
452 Copy(in, byte, n, UCHR);
453 out[0] = bmap[byte[0] >> 2];
454 out[1] = bmap[((byte[0] & 0x03) << 4) | (byte[1] >> 4)];
455 out[2] = bmap[((byte[1] & 0x0f) << 2) | (byte[2] >> 6)];
456 out[3] = bmap[byte[2] & 0x3f];
457 out[n+1] = '\0';
458 }
459
460 /* shabase64: returns pointer to current digest (Base 64) */
shabase64(SHA * s)461 static char *shabase64(SHA *s)
462 {
463 UINT n;
464 UCHR *q;
465 char out[5];
466
467 q = digcpy(s);
468 s->base64[0] = '\0';
469 if (B64LEN((size_t) s->digestlen) >= sizeof(s->base64))
470 return(s->base64);
471 for (n = s->digestlen; n > 3; n -= 3, q += 3) {
472 encbase64(q, 3, out);
473 strcat(s->base64, out);
474 }
475 encbase64(q, n, out);
476 strcat(s->base64, out);
477 return(s->base64);
478 }
479
480 /* hmacinit: initializes HMAC-SHA digest object */
hmacinit(HMAC * h,int alg,UCHR * key,UINT keylen)481 static HMAC *hmacinit(HMAC *h, int alg, UCHR *key, UINT keylen)
482 {
483 UINT i;
484 SHA ksha;
485
486 Zero(h, 1, HMAC);
487 if (!shainit(&h->isha, alg))
488 return(NULL);
489 if (!shainit(&h->osha, alg))
490 return(NULL);
491 if (keylen <= h->osha.blocksize / 8)
492 Copy(key, h->key, keylen, char);
493 else {
494 if (!shainit(&ksha, alg))
495 return(NULL);
496 shawrite(key, keylen * 8, &ksha);
497 shafinish(&ksha);
498 Copy(digcpy(&ksha), h->key, ksha.digestlen, char);
499 }
500 h->digestlen = h->osha.digestlen;
501 for (i = 0; i < h->osha.blocksize / 8; i++)
502 h->key[i] ^= 0x5c;
503 shawrite(h->key, h->osha.blocksize, &h->osha);
504 for (i = 0; i < h->isha.blocksize / 8; i++)
505 h->key[i] ^= (0x5c ^ 0x36);
506 shawrite(h->key, h->isha.blocksize, &h->isha);
507 Zero(h->key, sizeof(h->key), char);
508 return(h);
509 }
510
511 /* hmacwrite: triggers a state update using data in bitstr/bitcnt */
hmacwrite(UCHR * bitstr,ULNG bitcnt,HMAC * h)512 static ULNG hmacwrite(UCHR *bitstr, ULNG bitcnt, HMAC *h)
513 {
514 return(shawrite(bitstr, bitcnt, &h->isha));
515 }
516
517 /* hmacfinish: computes final digest state */
hmacfinish(HMAC * h)518 static void hmacfinish(HMAC *h)
519 {
520 shafinish(&h->isha);
521 shawrite(digcpy(&h->isha), h->isha.digestlen * 8, &h->osha);
522 shafinish(&h->osha);
523 }
524
525 #define hmacdigest(h) digcpy(&(h)->osha)
526
527 /* hmachex: returns pointer to digest (hexadecimal) */
hmachex(HMAC * h)528 static char *hmachex(HMAC *h)
529 {
530 return(shahex(&h->osha));
531 }
532
533 /* hmacbase64: returns pointer to digest (Base 64) */
hmacbase64(HMAC * h)534 static char *hmacbase64(HMAC *h)
535 {
536 return(shabase64(&h->osha));
537 }
538