1 /* $OpenBSD: tls12_record_layer.c,v 1.4 2020/09/16 17:15:01 jsing Exp $ */
2 /*
3  * Copyright (c) 2020 Joel Sing <jsing@openbsd.org>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include <stdlib.h>
19 
20 #include <openssl/evp.h>
21 
22 #include "ssl_locl.h"
23 
24 struct tls12_record_layer {
25 	uint16_t version;
26 	int dtls;
27 
28 	uint16_t read_epoch;
29 	uint16_t write_epoch;
30 
31 	int read_stream_mac;
32 	int write_stream_mac;
33 
34 	/*
35 	 * XXX - for now these are just pointers to externally managed
36 	 * structs/memory. These should eventually be owned by the record layer.
37 	 */
38 	SSL_AEAD_CTX *read_aead_ctx;
39 	SSL_AEAD_CTX *write_aead_ctx;
40 
41 	EVP_CIPHER_CTX *read_cipher_ctx;
42 	EVP_MD_CTX *read_hash_ctx;
43 	EVP_CIPHER_CTX *write_cipher_ctx;
44 	EVP_MD_CTX *write_hash_ctx;
45 
46 	uint8_t *read_seq_num;
47 	uint8_t *write_seq_num;
48 };
49 
50 struct tls12_record_layer *
51 tls12_record_layer_new(void)
52 {
53 	struct tls12_record_layer *rl;
54 
55 	if ((rl = calloc(1, sizeof(struct tls12_record_layer))) == NULL)
56 		return NULL;
57 
58 	return rl;
59 }
60 
61 void
62 tls12_record_layer_free(struct tls12_record_layer *rl)
63 {
64 	freezero(rl, sizeof(struct tls12_record_layer));
65 }
66 
67 void
68 tls12_record_layer_set_version(struct tls12_record_layer *rl, uint16_t version)
69 {
70 	rl->version = version;
71 	rl->dtls = (version == DTLS1_VERSION);
72 }
73 
74 void
75 tls12_record_layer_set_read_epoch(struct tls12_record_layer *rl, uint16_t epoch)
76 {
77 	rl->read_epoch = epoch;
78 }
79 
80 void
81 tls12_record_layer_set_write_epoch(struct tls12_record_layer *rl, uint16_t epoch)
82 {
83 	rl->write_epoch = epoch;
84 }
85 
86 static void
87 tls12_record_layer_set_read_state(struct tls12_record_layer *rl,
88     SSL_AEAD_CTX *aead_ctx, EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx,
89     int stream_mac)
90 {
91 	rl->read_aead_ctx = aead_ctx;
92 
93 	rl->read_cipher_ctx = cipher_ctx;
94 	rl->read_hash_ctx = hash_ctx;
95 	rl->read_stream_mac = stream_mac;
96 }
97 
98 static void
99 tls12_record_layer_set_write_state(struct tls12_record_layer *rl,
100     SSL_AEAD_CTX *aead_ctx, EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx,
101     int stream_mac)
102 {
103 	rl->write_aead_ctx = aead_ctx;
104 
105 	rl->write_cipher_ctx = cipher_ctx;
106 	rl->write_hash_ctx = hash_ctx;
107 	rl->write_stream_mac = stream_mac;
108 }
109 
110 void
111 tls12_record_layer_clear_read_state(struct tls12_record_layer *rl)
112 {
113 	tls12_record_layer_set_read_state(rl, NULL, NULL, NULL, 0);
114 	rl->read_seq_num = NULL;
115 }
116 
117 void
118 tls12_record_layer_clear_write_state(struct tls12_record_layer *rl)
119 {
120 	tls12_record_layer_set_write_state(rl, NULL, NULL, NULL, 0);
121 	rl->write_seq_num = NULL;
122 }
123 
124 void
125 tls12_record_layer_set_read_seq_num(struct tls12_record_layer *rl,
126     uint8_t *seq_num)
127 {
128 	rl->read_seq_num = seq_num;
129 }
130 
131 void
132 tls12_record_layer_set_write_seq_num(struct tls12_record_layer *rl,
133     uint8_t *seq_num)
134 {
135 	rl->write_seq_num = seq_num;
136 }
137 
138 int
139 tls12_record_layer_set_read_aead(struct tls12_record_layer *rl,
140     SSL_AEAD_CTX *aead_ctx)
141 {
142 	tls12_record_layer_set_read_state(rl, aead_ctx, NULL, NULL, 0);
143 
144 	return 1;
145 }
146 
147 int
148 tls12_record_layer_set_write_aead(struct tls12_record_layer *rl,
149     SSL_AEAD_CTX *aead_ctx)
150 {
151 	tls12_record_layer_set_write_state(rl, aead_ctx, NULL, NULL, 0);
152 
153 	return 1;
154 }
155 
156 int
157 tls12_record_layer_set_read_cipher_hash(struct tls12_record_layer *rl,
158     EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac)
159 {
160 	tls12_record_layer_set_read_state(rl, NULL, cipher_ctx, hash_ctx,
161 	    stream_mac);
162 
163 	return 1;
164 }
165 
166 int
167 tls12_record_layer_set_write_cipher_hash(struct tls12_record_layer *rl,
168     EVP_CIPHER_CTX *cipher_ctx, EVP_MD_CTX *hash_ctx, int stream_mac)
169 {
170 	tls12_record_layer_set_write_state(rl, NULL, cipher_ctx, hash_ctx,
171 	    stream_mac);
172 
173 	return 1;
174 }
175 
176 static int
177 tls12_record_layer_build_seq_num(struct tls12_record_layer *rl, CBB *cbb,
178     uint16_t epoch, uint8_t *seq_num, size_t seq_num_len)
179 {
180 	CBS seq;
181 
182 	CBS_init(&seq, seq_num, seq_num_len);
183 
184 	if (rl->dtls) {
185 		if (!CBB_add_u16(cbb, epoch))
186 			return 0;
187 		if (!CBS_skip(&seq, 2))
188 			return 0;
189 	}
190 
191 	return CBB_add_bytes(cbb, CBS_data(&seq), CBS_len(&seq));
192 }
193 
194 static int
195 tls12_record_layer_pseudo_header(struct tls12_record_layer *rl,
196     uint8_t content_type, uint16_t record_len, uint16_t epoch, uint8_t *seq_num,
197     size_t seq_num_len, uint8_t **out, size_t *out_len)
198 {
199 	CBB cbb;
200 
201 	*out = NULL;
202 	*out_len = 0;
203 
204 	/* Build the pseudo-header used for MAC/AEAD. */
205 	if (!CBB_init(&cbb, 13))
206 		goto err;
207 
208 	if (!tls12_record_layer_build_seq_num(rl, &cbb, epoch,
209 	    seq_num, seq_num_len))
210 		goto err;
211 	if (!CBB_add_u8(&cbb, content_type))
212 		goto err;
213 	if (!CBB_add_u16(&cbb, rl->version))
214 		goto err;
215 	if (!CBB_add_u16(&cbb, record_len))
216 		goto err;
217 
218 	if (!CBB_finish(&cbb, out, out_len))
219 		goto err;
220 
221 	return 1;
222 
223  err:
224 	CBB_cleanup(&cbb);
225 
226 	return 0;
227 }
228 
229 static int
230 tls12_record_layer_mac(struct tls12_record_layer *rl, CBB *cbb,
231     EVP_MD_CTX *hash_ctx, int stream_mac, uint16_t epoch, uint8_t *seq_num,
232     size_t seq_num_len, uint8_t content_type, const uint8_t *content,
233     size_t content_len, size_t *out_len)
234 {
235 	EVP_MD_CTX *mac_ctx = NULL;
236 	uint8_t *header = NULL;
237 	size_t header_len;
238 	size_t mac_len;
239 	uint8_t *mac;
240 	int ret = 0;
241 
242 	if ((mac_ctx = EVP_MD_CTX_new()) == NULL)
243 		goto err;
244 	if (!EVP_MD_CTX_copy(mac_ctx, hash_ctx))
245 		goto err;
246 
247 	if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
248 	    epoch, seq_num, seq_num_len, &header, &header_len))
249 		goto err;
250 
251 	if (EVP_DigestSignUpdate(mac_ctx, header, header_len) <= 0)
252 		goto err;
253 	if (EVP_DigestSignUpdate(mac_ctx, content, content_len) <= 0)
254 		goto err;
255 	if (EVP_DigestSignFinal(mac_ctx, NULL, &mac_len) <= 0)
256 		goto err;
257 	if (!CBB_add_space(cbb, &mac, mac_len))
258 		goto err;
259 	if (EVP_DigestSignFinal(mac_ctx, mac, &mac_len) <= 0)
260 		goto err;
261 
262 	if (stream_mac) {
263 		if (!EVP_MD_CTX_copy(hash_ctx, mac_ctx))
264 			goto err;
265 	}
266 
267 	*out_len = mac_len;
268 	ret = 1;
269 
270  err:
271 	EVP_MD_CTX_free(mac_ctx);
272 	free(header);
273 
274 	return ret;
275 }
276 
277 static int
278 tls12_record_layer_write_mac(struct tls12_record_layer *rl, CBB *cbb,
279     uint8_t content_type, const uint8_t *content, size_t content_len,
280     size_t *out_len)
281 {
282 	return tls12_record_layer_mac(rl, cbb, rl->write_hash_ctx,
283 	    rl->write_stream_mac, rl->write_epoch, rl->write_seq_num,
284 	    SSL3_SEQUENCE_SIZE, content_type, content, content_len, out_len);
285 }
286 
287 static int
288 tls12_record_layer_aead_concat_nonce(struct tls12_record_layer *rl,
289     const SSL_AEAD_CTX *aead, uint8_t *seq_num, uint8_t **out, size_t *out_len)
290 {
291 	CBB cbb;
292 
293 	if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE)
294 		return 0;
295 
296 	/* Fixed nonce and variable nonce (sequence number) are concatenated. */
297 	if (!CBB_init(&cbb, 16))
298 		goto err;
299 	if (!CBB_add_bytes(&cbb, aead->fixed_nonce,
300 	    aead->fixed_nonce_len))
301 		goto err;
302 	if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len))
303 		goto err;
304 	if (!CBB_finish(&cbb, out, out_len))
305 		goto err;
306 
307 	return 1;
308 
309  err:
310 	CBB_cleanup(&cbb);
311 
312 	return 0;
313 }
314 
315 static int
316 tls12_record_layer_aead_xored_nonce(struct tls12_record_layer *rl,
317     const SSL_AEAD_CTX *aead, uint8_t *seq_num, uint8_t **out, size_t *out_len)
318 {
319 	uint8_t *nonce = NULL;
320 	size_t nonce_len = 0;
321 	uint8_t *pad;
322 	CBB cbb;
323 	int i;
324 
325 	if (aead->variable_nonce_len > SSL3_SEQUENCE_SIZE)
326 		return 0;
327 	if (aead->fixed_nonce_len < aead->variable_nonce_len)
328 		return 0;
329 
330 	/*
331 	 * Variable nonce (sequence number) is right padded, before the fixed
332 	 * nonce is XOR'd in.
333 	 */
334 	if (!CBB_init(&cbb, 16))
335 		goto err;
336 	if (!CBB_add_space(&cbb, &pad,
337 	    aead->fixed_nonce_len - aead->variable_nonce_len))
338 		goto err;
339 	if (!CBB_add_bytes(&cbb, seq_num, aead->variable_nonce_len))
340 		goto err;
341 	if (!CBB_finish(&cbb, &nonce, &nonce_len))
342 		goto err;
343 
344 	for (i = 0; i < aead->fixed_nonce_len; i++)
345 		nonce[i] ^= aead->fixed_nonce[i];
346 
347 	*out = nonce;
348 	*out_len = nonce_len;
349 
350 	return 1;
351 
352  err:
353 	CBB_cleanup(&cbb);
354 	freezero(nonce, nonce_len);
355 
356 	return 0;
357 }
358 
359 static int
360 tls12_record_layer_seal_record_plaintext(struct tls12_record_layer *rl,
361     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out)
362 {
363 	if (rl->write_aead_ctx != NULL || rl->write_cipher_ctx != NULL)
364 		return 0;
365 
366 	return CBB_add_bytes(out, content, content_len);
367 }
368 
369 static int
370 tls12_record_layer_seal_record_protected_aead(struct tls12_record_layer *rl,
371     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out)
372 {
373 	const SSL_AEAD_CTX *aead = rl->write_aead_ctx;
374 	uint8_t *header = NULL, *nonce = NULL;
375 	size_t header_len = 0, nonce_len = 0;
376 	size_t enc_record_len, out_len;
377 	uint16_t epoch = 0;
378 	uint8_t *enc_data;
379 	int ret = 0;
380 
381 	/* XXX - move to nonce allocated in record layer, matching TLSv1.3 */
382 	if (aead->xor_fixed_nonce) {
383 		if (!tls12_record_layer_aead_xored_nonce(rl, aead,
384 		    rl->write_seq_num, &nonce, &nonce_len))
385 			goto err;
386 	} else {
387 		if (!tls12_record_layer_aead_concat_nonce(rl, aead,
388 		    rl->write_seq_num, &nonce, &nonce_len))
389 			goto err;
390 	}
391 
392 	if (aead->variable_nonce_in_record) {
393 		/* XXX - length check? */
394 		if (!CBB_add_bytes(out, rl->write_seq_num, aead->variable_nonce_len))
395 			goto err;
396 	}
397 
398 	if (!tls12_record_layer_pseudo_header(rl, content_type, content_len,
399 	    epoch, rl->write_seq_num, SSL3_SEQUENCE_SIZE, &header, &header_len))
400 		goto err;
401 
402 	/* XXX EVP_AEAD_max_tag_len vs EVP_AEAD_CTX_tag_len. */
403 	enc_record_len = content_len + aead->tag_len;
404 	if (enc_record_len > SSL3_RT_MAX_ENCRYPTED_LENGTH)
405 		goto err;
406 	if (!CBB_add_space(out, &enc_data, enc_record_len))
407 		goto err;
408 
409 	if (!EVP_AEAD_CTX_seal(&aead->ctx, enc_data, &out_len, enc_record_len,
410 	    nonce, nonce_len, content, content_len, header, header_len))
411 		goto err;
412 
413 	if (out_len != enc_record_len)
414 		goto err;
415 
416 	ret = 1;
417 
418  err:
419 	freezero(header, header_len);
420 	freezero(nonce, nonce_len);
421 
422 	return ret;
423 }
424 
425 static int
426 tls12_record_layer_seal_record_protected_cipher(struct tls12_record_layer *rl,
427     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *out)
428 {
429 	EVP_CIPHER_CTX *enc = rl->write_cipher_ctx;
430 	size_t mac_len, pad_len;
431 	int block_size, eiv_len;
432 	uint8_t *enc_data, *eiv, *pad, pad_val;
433 	uint8_t *plain = NULL;
434 	size_t plain_len = 0;
435 	int ret = 0;
436 	CBB cbb;
437 
438 	if (!CBB_init(&cbb, SSL3_RT_MAX_PLAIN_LENGTH))
439 		goto err;
440 
441 	/* Add explicit IV if necessary. */
442 	eiv_len = 0;
443 	if (rl->version != TLS1_VERSION &&
444 	    EVP_CIPHER_CTX_mode(enc) == EVP_CIPH_CBC_MODE)
445 		eiv_len = EVP_CIPHER_CTX_iv_length(enc);
446 	if (eiv_len < 0 || eiv_len > EVP_MAX_IV_LENGTH)
447 		goto err;
448 	if (eiv_len > 0) {
449 		if (!CBB_add_space(&cbb, &eiv, eiv_len))
450 			goto err;
451 		arc4random_buf(eiv, eiv_len);
452 	}
453 
454 	if (!CBB_add_bytes(&cbb, content, content_len))
455 		goto err;
456 
457 	mac_len = 0;
458 	if (rl->write_hash_ctx != NULL) {
459 		if (!tls12_record_layer_write_mac(rl, &cbb, content_type,
460 		    content, content_len, &mac_len))
461 			goto err;
462 	}
463 
464 	plain_len = (size_t)eiv_len + content_len + mac_len;
465 
466 	/* Add padding to block size, if necessary. */
467 	block_size = EVP_CIPHER_CTX_block_size(enc);
468 	if (block_size < 0 || block_size > EVP_MAX_BLOCK_LENGTH)
469 		goto err;
470 	if (block_size > 1) {
471 		pad_len = block_size - (plain_len % block_size);
472 		pad_val = pad_len - 1;
473 
474 		if (pad_len > 255)
475 			goto err;
476 		if (!CBB_add_space(&cbb, &pad, pad_len))
477 			goto err;
478 		memset(pad, pad_val, pad_len);
479 	}
480 
481 	if (!CBB_finish(&cbb, &plain, &plain_len))
482 		goto err;
483 
484 	if (plain_len % block_size != 0)
485 		goto err;
486 	if (plain_len > SSL3_RT_MAX_ENCRYPTED_LENGTH)
487 		goto err;
488 
489 	if (!CBB_add_space(out, &enc_data, plain_len))
490 		goto err;
491 	if (!EVP_Cipher(enc, enc_data, plain, plain_len))
492 		goto err;
493 
494 	ret = 1;
495 
496  err:
497 	CBB_cleanup(&cbb);
498 	freezero(plain, plain_len);
499 
500 	return ret;
501 }
502 
503 int
504 tls12_record_layer_seal_record(struct tls12_record_layer *rl,
505     uint8_t content_type, const uint8_t *content, size_t content_len, CBB *cbb)
506 {
507 	CBB fragment;
508 
509 	if (!CBB_add_u8(cbb, content_type))
510 		return 0;
511 	if (!CBB_add_u16(cbb, rl->version))
512 		return 0;
513 	if (rl->dtls) {
514 		if (!tls12_record_layer_build_seq_num(rl, cbb,
515 		    rl->write_epoch, rl->write_seq_num,
516 		    SSL3_SEQUENCE_SIZE))
517 			return 0;
518 	}
519 	if (!CBB_add_u16_length_prefixed(cbb, &fragment))
520 		return 0;
521 
522 	if (rl->write_aead_ctx != NULL) {
523 		if (!tls12_record_layer_seal_record_protected_aead(rl,
524 		    content_type, content, content_len, &fragment))
525 			return 0;
526 	} else if (rl->write_cipher_ctx != NULL) {
527 		if (!tls12_record_layer_seal_record_protected_cipher(rl,
528 		    content_type, content, content_len, &fragment))
529 			return 0;
530 	} else {
531 		if (!tls12_record_layer_seal_record_plaintext(rl,
532 		    content_type, content, content_len, &fragment))
533 			return 0;
534 	}
535 
536 	if (!CBB_flush(cbb))
537 		return 0;
538 
539 	tls1_record_sequence_increment(rl->write_seq_num);
540 
541 	return 1;
542 }
543