1 /* $OpenBSD: s3_cbc.c,v 1.25 2021/12/09 17:45:49 tb Exp $ */ 2 /* ==================================================================== 3 * Copyright (c) 2012 The OpenSSL Project. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in 14 * the documentation and/or other materials provided with the 15 * distribution. 16 * 17 * 3. All advertising materials mentioning features or use of this 18 * software must display the following acknowledgment: 19 * "This product includes software developed by the OpenSSL Project 20 * for use in the OpenSSL Toolkit. (http://www.openssl.org/)" 21 * 22 * 4. The names "OpenSSL Toolkit" and "OpenSSL Project" must not be used to 23 * endorse or promote products derived from this software without 24 * prior written permission. For written permission, please contact 25 * openssl-core@openssl.org. 26 * 27 * 5. Products derived from this software may not be called "OpenSSL" 28 * nor may "OpenSSL" appear in their names without prior written 29 * permission of the OpenSSL Project. 30 * 31 * 6. Redistributions of any form whatsoever must retain the following 32 * acknowledgment: 33 * "This product includes software developed by the OpenSSL Project 34 * for use in the OpenSSL Toolkit (http://www.openssl.org/)" 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE OpenSSL PROJECT ``AS IS'' AND ANY 37 * EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 39 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE OpenSSL PROJECT OR 40 * ITS CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 41 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 42 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 43 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 44 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, 45 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 46 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED 47 * OF THE POSSIBILITY OF SUCH DAMAGE. 48 * ==================================================================== 49 * 50 * This product includes cryptographic software written by Eric Young 51 * (eay@cryptsoft.com). This product includes software written by Tim 52 * Hudson (tjh@cryptsoft.com). 53 * 54 */ 55 56 #include <openssl/md5.h> 57 #include <openssl/sha.h> 58 59 #include "ssl_locl.h" 60 61 /* MAX_HASH_BIT_COUNT_BYTES is the maximum number of bytes in the hash's length 62 * field. (SHA-384/512 have 128-bit length.) */ 63 #define MAX_HASH_BIT_COUNT_BYTES 16 64 65 /* MAX_HASH_BLOCK_SIZE is the maximum hash block size that we'll support. 66 * Currently SHA-384/512 has a 128-byte block size and that's the largest 67 * supported by TLS.) */ 68 #define MAX_HASH_BLOCK_SIZE 128 69 70 /* Some utility functions are needed: 71 * 72 * These macros return the given value with the MSB copied to all the other 73 * bits. They use the fact that arithmetic shift shifts-in the sign bit. 74 * However, this is not ensured by the C standard so you may need to replace 75 * them with something else on odd CPUs. */ 76 #define DUPLICATE_MSB_TO_ALL(x) ((unsigned int)((int)(x) >> (sizeof(int) * 8 - 1))) 77 #define DUPLICATE_MSB_TO_ALL_8(x) ((unsigned char)(DUPLICATE_MSB_TO_ALL(x))) 78 79 /* constant_time_lt returns 0xff if a<b and 0x00 otherwise. */ 80 static unsigned int 81 constant_time_lt(unsigned int a, unsigned int b) 82 { 83 a -= b; 84 return DUPLICATE_MSB_TO_ALL(a); 85 } 86 87 /* constant_time_ge returns 0xff if a>=b and 0x00 otherwise. */ 88 static unsigned int 89 constant_time_ge(unsigned int a, unsigned int b) 90 { 91 a -= b; 92 return DUPLICATE_MSB_TO_ALL(~a); 93 } 94 95 /* constant_time_eq_8 returns 0xff if a==b and 0x00 otherwise. */ 96 static unsigned char 97 constant_time_eq_8(unsigned int a, unsigned int b) 98 { 99 unsigned int c = a ^ b; 100 c--; 101 return DUPLICATE_MSB_TO_ALL_8(c); 102 } 103 104 /* ssl3_cbc_remove_padding removes the CBC padding from the decrypted, TLS, CBC 105 * record in |rec| in constant time and returns 1 if the padding is valid and 106 * -1 otherwise. It also removes any explicit IV from the start of the record 107 * without leaking any timing about whether there was enough space after the 108 * padding was removed. 109 * 110 * block_size: the block size of the cipher used to encrypt the record. 111 * returns: 112 * 0: (in non-constant time) if the record is publicly invalid. 113 * 1: if the padding was valid 114 * -1: otherwise. */ 115 int 116 ssl3_cbc_remove_padding(SSL3_RECORD_INTERNAL *rec, unsigned int eiv_len, 117 unsigned int mac_size) 118 { 119 unsigned int padding_length, good, to_check, i; 120 const unsigned int overhead = 1 /* padding length byte */ + mac_size; 121 122 /* 123 * These lengths are all public so we can test them in 124 * non-constant time. 125 */ 126 if (overhead + eiv_len > rec->length) 127 return 0; 128 129 /* We can now safely skip explicit IV, if any. */ 130 rec->data += eiv_len; 131 rec->input += eiv_len; 132 rec->length -= eiv_len; 133 134 padding_length = rec->data[rec->length - 1]; 135 136 good = constant_time_ge(rec->length, overhead + padding_length); 137 /* The padding consists of a length byte at the end of the record and 138 * then that many bytes of padding, all with the same value as the 139 * length byte. Thus, with the length byte included, there are i+1 140 * bytes of padding. 141 * 142 * We can't check just |padding_length+1| bytes because that leaks 143 * decrypted information. Therefore we always have to check the maximum 144 * amount of padding possible. (Again, the length of the record is 145 * public information so we can use it.) */ 146 to_check = 256; /* maximum amount of padding, inc length byte. */ 147 if (to_check > rec->length) 148 to_check = rec->length; 149 150 for (i = 0; i < to_check; i++) { 151 unsigned char mask = constant_time_ge(padding_length, i); 152 unsigned char b = rec->data[rec->length - 1 - i]; 153 /* The final |padding_length+1| bytes should all have the value 154 * |padding_length|. Therefore the XOR should be zero. */ 155 good &= ~(mask&(padding_length ^ b)); 156 } 157 158 /* If any of the final |padding_length+1| bytes had the wrong value, 159 * one or more of the lower eight bits of |good| will be cleared. We 160 * AND the bottom 8 bits together and duplicate the result to all the 161 * bits. */ 162 good &= good >> 4; 163 good &= good >> 2; 164 good &= good >> 1; 165 good <<= sizeof(good)*8 - 1; 166 good = DUPLICATE_MSB_TO_ALL(good); 167 168 padding_length = good & (padding_length + 1); 169 rec->length -= padding_length; 170 rec->padding_length = padding_length; 171 172 return (int)((good & 1) | (~good & -1)); 173 } 174 175 /* ssl3_cbc_copy_mac copies |md_size| bytes from the end of |rec| to |out| in 176 * constant time (independent of the concrete value of rec->length, which may 177 * vary within a 256-byte window). 178 * 179 * ssl3_cbc_remove_padding or tls1_cbc_remove_padding must be called prior to 180 * this function. 181 * 182 * On entry: 183 * rec->orig_len >= md_size 184 * md_size <= EVP_MAX_MD_SIZE 185 * 186 * If CBC_MAC_ROTATE_IN_PLACE is defined then the rotation is performed with 187 * variable accesses in a 64-byte-aligned buffer. Assuming that this fits into 188 * a single or pair of cache-lines, then the variable memory accesses don't 189 * actually affect the timing. CPUs with smaller cache-lines [if any] are 190 * not multi-core and are not considered vulnerable to cache-timing attacks. 191 */ 192 #define CBC_MAC_ROTATE_IN_PLACE 193 194 void 195 ssl3_cbc_copy_mac(unsigned char* out, const SSL3_RECORD_INTERNAL *rec, 196 unsigned int md_size, unsigned int orig_len) 197 { 198 #if defined(CBC_MAC_ROTATE_IN_PLACE) 199 unsigned char rotated_mac_buf[64 + EVP_MAX_MD_SIZE]; 200 unsigned char *rotated_mac; 201 #else 202 unsigned char rotated_mac[EVP_MAX_MD_SIZE]; 203 #endif 204 205 /* mac_end is the index of |rec->data| just after the end of the MAC. */ 206 unsigned int mac_end = rec->length; 207 unsigned int mac_start = mac_end - md_size; 208 /* scan_start contains the number of bytes that we can ignore because 209 * the MAC's position can only vary by 255 bytes. */ 210 unsigned int scan_start = 0; 211 unsigned int i, j; 212 unsigned int div_spoiler; 213 unsigned int rotate_offset; 214 215 OPENSSL_assert(orig_len >= md_size); 216 OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); 217 218 #if defined(CBC_MAC_ROTATE_IN_PLACE) 219 rotated_mac = rotated_mac_buf + ((0 - (size_t)rotated_mac_buf)&63); 220 #endif 221 222 /* This information is public so it's safe to branch based on it. */ 223 if (orig_len > md_size + 255 + 1) 224 scan_start = orig_len - (md_size + 255 + 1); 225 /* div_spoiler contains a multiple of md_size that is used to cause the 226 * modulo operation to be constant time. Without this, the time varies 227 * based on the amount of padding when running on Intel chips at least. 228 * 229 * The aim of right-shifting md_size is so that the compiler doesn't 230 * figure out that it can remove div_spoiler as that would require it 231 * to prove that md_size is always even, which I hope is beyond it. */ 232 div_spoiler = md_size >> 1; 233 div_spoiler <<= (sizeof(div_spoiler) - 1) * 8; 234 rotate_offset = (div_spoiler + mac_start - scan_start) % md_size; 235 236 memset(rotated_mac, 0, md_size); 237 for (i = scan_start, j = 0; i < orig_len; i++) { 238 unsigned char mac_started = constant_time_ge(i, mac_start); 239 unsigned char mac_ended = constant_time_ge(i, mac_end); 240 unsigned char b = rec->data[i]; 241 rotated_mac[j++] |= b & mac_started & ~mac_ended; 242 j &= constant_time_lt(j, md_size); 243 } 244 245 /* Now rotate the MAC */ 246 #if defined(CBC_MAC_ROTATE_IN_PLACE) 247 j = 0; 248 for (i = 0; i < md_size; i++) { 249 /* in case cache-line is 32 bytes, touch second line */ 250 ((volatile unsigned char *)rotated_mac)[rotate_offset^32]; 251 out[j++] = rotated_mac[rotate_offset++]; 252 rotate_offset &= constant_time_lt(rotate_offset, md_size); 253 } 254 #else 255 memset(out, 0, md_size); 256 rotate_offset = md_size - rotate_offset; 257 rotate_offset &= constant_time_lt(rotate_offset, md_size); 258 for (i = 0; i < md_size; i++) { 259 for (j = 0; j < md_size; j++) 260 out[j] |= rotated_mac[i] & constant_time_eq_8(j, rotate_offset); 261 rotate_offset++; 262 rotate_offset &= constant_time_lt(rotate_offset, md_size); 263 } 264 #endif 265 } 266 267 #define l2n(l,c) (*((c)++)=(unsigned char)(((l)>>24)&0xff), \ 268 *((c)++)=(unsigned char)(((l)>>16)&0xff), \ 269 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ 270 *((c)++)=(unsigned char)(((l) )&0xff)) 271 272 #define l2n8(l,c) (*((c)++)=(unsigned char)(((l)>>56)&0xff), \ 273 *((c)++)=(unsigned char)(((l)>>48)&0xff), \ 274 *((c)++)=(unsigned char)(((l)>>40)&0xff), \ 275 *((c)++)=(unsigned char)(((l)>>32)&0xff), \ 276 *((c)++)=(unsigned char)(((l)>>24)&0xff), \ 277 *((c)++)=(unsigned char)(((l)>>16)&0xff), \ 278 *((c)++)=(unsigned char)(((l)>> 8)&0xff), \ 279 *((c)++)=(unsigned char)(((l) )&0xff)) 280 281 /* u32toLE serialises an unsigned, 32-bit number (n) as four bytes at (p) in 282 * little-endian order. The value of p is advanced by four. */ 283 #define u32toLE(n, p) \ 284 (*((p)++)=(unsigned char)(n), \ 285 *((p)++)=(unsigned char)(n>>8), \ 286 *((p)++)=(unsigned char)(n>>16), \ 287 *((p)++)=(unsigned char)(n>>24)) 288 289 /* These functions serialize the state of a hash and thus perform the standard 290 * "final" operation without adding the padding and length that such a function 291 * typically does. */ 292 static void 293 tls1_md5_final_raw(void* ctx, unsigned char *md_out) 294 { 295 MD5_CTX *md5 = ctx; 296 u32toLE(md5->A, md_out); 297 u32toLE(md5->B, md_out); 298 u32toLE(md5->C, md_out); 299 u32toLE(md5->D, md_out); 300 } 301 302 static void 303 tls1_sha1_final_raw(void* ctx, unsigned char *md_out) 304 { 305 SHA_CTX *sha1 = ctx; 306 l2n(sha1->h0, md_out); 307 l2n(sha1->h1, md_out); 308 l2n(sha1->h2, md_out); 309 l2n(sha1->h3, md_out); 310 l2n(sha1->h4, md_out); 311 } 312 313 static void 314 tls1_sha256_final_raw(void* ctx, unsigned char *md_out) 315 { 316 SHA256_CTX *sha256 = ctx; 317 unsigned int i; 318 319 for (i = 0; i < 8; i++) { 320 l2n(sha256->h[i], md_out); 321 } 322 } 323 324 static void 325 tls1_sha512_final_raw(void* ctx, unsigned char *md_out) 326 { 327 SHA512_CTX *sha512 = ctx; 328 unsigned int i; 329 330 for (i = 0; i < 8; i++) { 331 l2n8(sha512->h[i], md_out); 332 } 333 } 334 335 /* Largest hash context ever used by the functions above. */ 336 #define LARGEST_DIGEST_CTX SHA512_CTX 337 338 /* Type giving the alignment needed by the above */ 339 #define LARGEST_DIGEST_CTX_ALIGNMENT SHA_LONG64 340 341 /* ssl3_cbc_record_digest_supported returns 1 iff |ctx| uses a hash function 342 * which ssl3_cbc_digest_record supports. */ 343 char 344 ssl3_cbc_record_digest_supported(const EVP_MD_CTX *ctx) 345 { 346 switch (EVP_MD_CTX_type(ctx)) { 347 case NID_md5: 348 case NID_sha1: 349 case NID_sha224: 350 case NID_sha256: 351 case NID_sha384: 352 case NID_sha512: 353 return 1; 354 default: 355 return 0; 356 } 357 } 358 359 /* ssl3_cbc_digest_record computes the MAC of a decrypted, padded TLS 360 * record. 361 * 362 * ctx: the EVP_MD_CTX from which we take the hash function. 363 * ssl3_cbc_record_digest_supported must return true for this EVP_MD_CTX. 364 * md_out: the digest output. At most EVP_MAX_MD_SIZE bytes will be written. 365 * md_out_size: if non-NULL, the number of output bytes is written here. 366 * header: the 13-byte, TLS record header. 367 * data: the record data itself, less any preceeding explicit IV. 368 * data_plus_mac_size: the secret, reported length of the data and MAC 369 * once the padding has been removed. 370 * data_plus_mac_plus_padding_size: the public length of the whole 371 * record, including padding. 372 * 373 * On entry: by virtue of having been through one of the remove_padding 374 * functions, above, we know that data_plus_mac_size is large enough to contain 375 * a padding byte and MAC. (If the padding was invalid, it might contain the 376 * padding too. ) 377 */ 378 int 379 ssl3_cbc_digest_record(const EVP_MD_CTX *ctx, unsigned char* md_out, 380 size_t* md_out_size, const unsigned char header[13], 381 const unsigned char *data, size_t data_plus_mac_size, 382 size_t data_plus_mac_plus_padding_size, const unsigned char *mac_secret, 383 unsigned int mac_secret_length) 384 { 385 union { 386 /* 387 * Alignment here is to allow this to be cast as SHA512_CTX 388 * without losing alignment required by the 64-bit SHA_LONG64 389 * integer it contains. 390 */ 391 LARGEST_DIGEST_CTX_ALIGNMENT align; 392 unsigned char c[sizeof(LARGEST_DIGEST_CTX)]; 393 } md_state; 394 void (*md_final_raw)(void *ctx, unsigned char *md_out); 395 void (*md_transform)(void *ctx, const unsigned char *block); 396 unsigned int md_size, md_block_size = 64; 397 unsigned int header_length, variance_blocks, 398 len, max_mac_bytes, num_blocks, 399 num_starting_blocks, k, mac_end_offset, c, index_a, index_b; 400 unsigned int bits; /* at most 18 bits */ 401 unsigned char length_bytes[MAX_HASH_BIT_COUNT_BYTES]; 402 /* hmac_pad is the masked HMAC key. */ 403 unsigned char hmac_pad[MAX_HASH_BLOCK_SIZE]; 404 unsigned char first_block[MAX_HASH_BLOCK_SIZE]; 405 unsigned char mac_out[EVP_MAX_MD_SIZE]; 406 unsigned int i, j, md_out_size_u; 407 EVP_MD_CTX *md_ctx; 408 /* mdLengthSize is the number of bytes in the length field that terminates 409 * the hash. */ 410 unsigned int md_length_size = 8; 411 char length_is_big_endian = 1; 412 413 /* This is a, hopefully redundant, check that allows us to forget about 414 * many possible overflows later in this function. */ 415 OPENSSL_assert(data_plus_mac_plus_padding_size < 1024*1024); 416 417 switch (EVP_MD_CTX_type(ctx)) { 418 case NID_md5: 419 MD5_Init((MD5_CTX*)md_state.c); 420 md_final_raw = tls1_md5_final_raw; 421 md_transform = (void(*)(void *ctx, const unsigned char *block)) MD5_Transform; 422 md_size = 16; 423 length_is_big_endian = 0; 424 break; 425 case NID_sha1: 426 SHA1_Init((SHA_CTX*)md_state.c); 427 md_final_raw = tls1_sha1_final_raw; 428 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA1_Transform; 429 md_size = 20; 430 break; 431 case NID_sha224: 432 SHA224_Init((SHA256_CTX*)md_state.c); 433 md_final_raw = tls1_sha256_final_raw; 434 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; 435 md_size = 224/8; 436 break; 437 case NID_sha256: 438 SHA256_Init((SHA256_CTX*)md_state.c); 439 md_final_raw = tls1_sha256_final_raw; 440 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA256_Transform; 441 md_size = 32; 442 break; 443 case NID_sha384: 444 SHA384_Init((SHA512_CTX*)md_state.c); 445 md_final_raw = tls1_sha512_final_raw; 446 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; 447 md_size = 384/8; 448 md_block_size = 128; 449 md_length_size = 16; 450 break; 451 case NID_sha512: 452 SHA512_Init((SHA512_CTX*)md_state.c); 453 md_final_raw = tls1_sha512_final_raw; 454 md_transform = (void(*)(void *ctx, const unsigned char *block)) SHA512_Transform; 455 md_size = 64; 456 md_block_size = 128; 457 md_length_size = 16; 458 break; 459 default: 460 /* ssl3_cbc_record_digest_supported should have been 461 * called first to check that the hash function is 462 * supported. */ 463 OPENSSL_assert(0); 464 if (md_out_size) 465 *md_out_size = 0; 466 return 0; 467 } 468 469 OPENSSL_assert(md_length_size <= MAX_HASH_BIT_COUNT_BYTES); 470 OPENSSL_assert(md_block_size <= MAX_HASH_BLOCK_SIZE); 471 OPENSSL_assert(md_size <= EVP_MAX_MD_SIZE); 472 473 header_length = 13; 474 475 /* variance_blocks is the number of blocks of the hash that we have to 476 * calculate in constant time because they could be altered by the 477 * padding value. 478 * 479 * TLSv1 has MACs up to 48 bytes long (SHA-384) and the padding is not 480 * required to be minimal. Therefore we say that the final six blocks 481 * can vary based on the padding. 482 * 483 * Later in the function, if the message is short and there obviously 484 * cannot be this many blocks then variance_blocks can be reduced. */ 485 variance_blocks = 6; 486 /* From now on we're dealing with the MAC, which conceptually has 13 487 * bytes of `header' before the start of the data (TLS) */ 488 len = data_plus_mac_plus_padding_size + header_length; 489 /* max_mac_bytes contains the maximum bytes of bytes in the MAC, including 490 * |header|, assuming that there's no padding. */ 491 max_mac_bytes = len - md_size - 1; 492 /* num_blocks is the maximum number of hash blocks. */ 493 num_blocks = (max_mac_bytes + 1 + md_length_size + md_block_size - 1) / md_block_size; 494 /* In order to calculate the MAC in constant time we have to handle 495 * the final blocks specially because the padding value could cause the 496 * end to appear somewhere in the final |variance_blocks| blocks and we 497 * can't leak where. However, |num_starting_blocks| worth of data can 498 * be hashed right away because no padding value can affect whether 499 * they are plaintext. */ 500 num_starting_blocks = 0; 501 /* k is the starting byte offset into the conceptual header||data where 502 * we start processing. */ 503 k = 0; 504 /* mac_end_offset is the index just past the end of the data to be 505 * MACed. */ 506 mac_end_offset = data_plus_mac_size + header_length - md_size; 507 /* c is the index of the 0x80 byte in the final hash block that 508 * contains application data. */ 509 c = mac_end_offset % md_block_size; 510 /* index_a is the hash block number that contains the 0x80 terminating 511 * value. */ 512 index_a = mac_end_offset / md_block_size; 513 /* index_b is the hash block number that contains the 64-bit hash 514 * length, in bits. */ 515 index_b = (mac_end_offset + md_length_size) / md_block_size; 516 /* bits is the hash-length in bits. It includes the additional hash 517 * block for the masked HMAC key. */ 518 519 if (num_blocks > variance_blocks) { 520 num_starting_blocks = num_blocks - variance_blocks; 521 k = md_block_size*num_starting_blocks; 522 } 523 524 bits = 8*mac_end_offset; 525 /* Compute the initial HMAC block. */ 526 bits += 8*md_block_size; 527 memset(hmac_pad, 0, md_block_size); 528 OPENSSL_assert(mac_secret_length <= sizeof(hmac_pad)); 529 memcpy(hmac_pad, mac_secret, mac_secret_length); 530 for (i = 0; i < md_block_size; i++) 531 hmac_pad[i] ^= 0x36; 532 533 md_transform(md_state.c, hmac_pad); 534 535 if (length_is_big_endian) { 536 memset(length_bytes, 0, md_length_size - 4); 537 length_bytes[md_length_size - 4] = (unsigned char)(bits >> 24); 538 length_bytes[md_length_size - 3] = (unsigned char)(bits >> 16); 539 length_bytes[md_length_size - 2] = (unsigned char)(bits >> 8); 540 length_bytes[md_length_size - 1] = (unsigned char)bits; 541 } else { 542 memset(length_bytes, 0, md_length_size); 543 length_bytes[md_length_size - 5] = (unsigned char)(bits >> 24); 544 length_bytes[md_length_size - 6] = (unsigned char)(bits >> 16); 545 length_bytes[md_length_size - 7] = (unsigned char)(bits >> 8); 546 length_bytes[md_length_size - 8] = (unsigned char)bits; 547 } 548 549 if (k > 0) { 550 /* k is a multiple of md_block_size. */ 551 memcpy(first_block, header, 13); 552 memcpy(first_block + 13, data, md_block_size - 13); 553 md_transform(md_state.c, first_block); 554 for (i = 1; i < k/md_block_size; i++) 555 md_transform(md_state.c, data + md_block_size*i - 13); 556 } 557 558 memset(mac_out, 0, sizeof(mac_out)); 559 560 /* We now process the final hash blocks. For each block, we construct 561 * it in constant time. If the |i==index_a| then we'll include the 0x80 562 * bytes and zero pad etc. For each block we selectively copy it, in 563 * constant time, to |mac_out|. */ 564 for (i = num_starting_blocks; i <= num_starting_blocks + variance_blocks; i++) { 565 unsigned char block[MAX_HASH_BLOCK_SIZE]; 566 unsigned char is_block_a = constant_time_eq_8(i, index_a); 567 unsigned char is_block_b = constant_time_eq_8(i, index_b); 568 for (j = 0; j < md_block_size; j++) { 569 unsigned char b = 0, is_past_c, is_past_cp1; 570 if (k < header_length) 571 b = header[k]; 572 else if (k < data_plus_mac_plus_padding_size + header_length) 573 b = data[k - header_length]; 574 k++; 575 576 is_past_c = is_block_a & constant_time_ge(j, c); 577 is_past_cp1 = is_block_a & constant_time_ge(j, c + 1); 578 /* If this is the block containing the end of the 579 * application data, and we are at the offset for the 580 * 0x80 value, then overwrite b with 0x80. */ 581 b = (b&~is_past_c) | (0x80&is_past_c); 582 /* If this is the block containing the end of the 583 * application data and we're past the 0x80 value then 584 * just write zero. */ 585 b = b&~is_past_cp1; 586 /* If this is index_b (the final block), but not 587 * index_a (the end of the data), then the 64-bit 588 * length didn't fit into index_a and we're having to 589 * add an extra block of zeros. */ 590 b &= ~is_block_b | is_block_a; 591 592 /* The final bytes of one of the blocks contains the 593 * length. */ 594 if (j >= md_block_size - md_length_size) { 595 /* If this is index_b, write a length byte. */ 596 b = (b&~is_block_b) | (is_block_b&length_bytes[j - (md_block_size - md_length_size)]); 597 } 598 block[j] = b; 599 } 600 601 md_transform(md_state.c, block); 602 md_final_raw(md_state.c, block); 603 /* If this is index_b, copy the hash value to |mac_out|. */ 604 for (j = 0; j < md_size; j++) 605 mac_out[j] |= block[j]&is_block_b; 606 } 607 608 if ((md_ctx = EVP_MD_CTX_new()) == NULL) 609 return 0; 610 if (!EVP_DigestInit_ex(md_ctx, EVP_MD_CTX_md(ctx), NULL /* engine */)) { 611 EVP_MD_CTX_free(md_ctx); 612 return 0; 613 } 614 615 /* Complete the HMAC in the standard manner. */ 616 for (i = 0; i < md_block_size; i++) 617 hmac_pad[i] ^= 0x6a; 618 619 EVP_DigestUpdate(md_ctx, hmac_pad, md_block_size); 620 EVP_DigestUpdate(md_ctx, mac_out, md_size); 621 622 EVP_DigestFinal(md_ctx, md_out, &md_out_size_u); 623 if (md_out_size) 624 *md_out_size = md_out_size_u; 625 EVP_MD_CTX_free(md_ctx); 626 627 return 1; 628 } 629