1 /*	$NetBSD: cfx.c,v 1.1.1.2 2014/04/24 12:45:29 pettai Exp $	*/
2 
3 /*
4  * Copyright (c) 2003, PADL Software Pty Ltd.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  *
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * 3. Neither the name of PADL Software nor the names of its contributors
19  *    may be used to endorse or promote products derived from this software
20  *    without specific prior written permission.
21  *
22  * THIS SOFTWARE IS PROVIDED BY PADL SOFTWARE AND CONTRIBUTORS ``AS IS'' AND
23  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25  * ARE DISCLAIMED.  IN NO EVENT SHALL PADL SOFTWARE OR CONTRIBUTORS BE LIABLE
26  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 #include "gsskrb5_locl.h"
36 
37 /*
38  * Implementation of RFC 4121
39  */
40 
41 #define CFXSentByAcceptor	(1 << 0)
42 #define CFXSealed		(1 << 1)
43 #define CFXAcceptorSubkey	(1 << 2)
44 
45 krb5_error_code
_gsskrb5cfx_wrap_length_cfx(krb5_context context,krb5_crypto crypto,int conf_req_flag,int dce_style,size_t input_length,size_t * output_length,size_t * cksumsize,uint16_t * padlength)46 _gsskrb5cfx_wrap_length_cfx(krb5_context context,
47 			    krb5_crypto crypto,
48 			    int conf_req_flag,
49 			    int dce_style,
50 			    size_t input_length,
51 			    size_t *output_length,
52 			    size_t *cksumsize,
53 			    uint16_t *padlength)
54 {
55     krb5_error_code ret;
56     krb5_cksumtype type;
57 
58     /* 16-byte header is always first */
59     *output_length = sizeof(gss_cfx_wrap_token_desc);
60     *padlength = 0;
61 
62     ret = krb5_crypto_get_checksum_type(context, crypto, &type);
63     if (ret)
64 	return ret;
65 
66     ret = krb5_checksumsize(context, type, cksumsize);
67     if (ret)
68 	return ret;
69 
70     if (conf_req_flag) {
71 	size_t padsize;
72 
73 	/* Header is concatenated with data before encryption */
74 	input_length += sizeof(gss_cfx_wrap_token_desc);
75 
76 	if (dce_style) {
77 		ret = krb5_crypto_getblocksize(context, crypto, &padsize);
78 	} else {
79 		ret = krb5_crypto_getpadsize(context, crypto, &padsize);
80 	}
81 	if (ret) {
82 	    return ret;
83 	}
84 	if (padsize > 1) {
85 	    /* XXX check this */
86 	    *padlength = padsize - (input_length % padsize);
87 
88 	    /* We add the pad ourselves (noted here for completeness only) */
89 	    input_length += *padlength;
90 	}
91 
92 	*output_length += krb5_get_wrapped_length(context,
93 						  crypto, input_length);
94     } else {
95 	/* Checksum is concatenated with data */
96 	*output_length += input_length + *cksumsize;
97     }
98 
99     assert(*output_length > input_length);
100 
101     return 0;
102 }
103 
104 OM_uint32
_gssapi_wrap_size_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,gss_qop_t qop_req,OM_uint32 req_output_size,OM_uint32 * max_input_size)105 _gssapi_wrap_size_cfx(OM_uint32 *minor_status,
106 		      const gsskrb5_ctx ctx,
107 		      krb5_context context,
108 		      int conf_req_flag,
109 		      gss_qop_t qop_req,
110 		      OM_uint32 req_output_size,
111 		      OM_uint32 *max_input_size)
112 {
113     krb5_error_code ret;
114 
115     *max_input_size = 0;
116 
117     /* 16-byte header is always first */
118     if (req_output_size < 16)
119 	return 0;
120     req_output_size -= 16;
121 
122     if (conf_req_flag) {
123 	size_t wrapped_size, sz;
124 
125 	wrapped_size = req_output_size + 1;
126 	do {
127 	    wrapped_size--;
128 	    sz = krb5_get_wrapped_length(context,
129 					 ctx->crypto, wrapped_size);
130 	} while (wrapped_size && sz > req_output_size);
131 	if (wrapped_size == 0)
132 	    return 0;
133 
134 	/* inner header */
135 	if (wrapped_size < 16)
136 	    return 0;
137 
138 	wrapped_size -= 16;
139 
140 	*max_input_size = wrapped_size;
141     } else {
142 	krb5_cksumtype type;
143 	size_t cksumsize;
144 
145 	ret = krb5_crypto_get_checksum_type(context, ctx->crypto, &type);
146 	if (ret)
147 	    return ret;
148 
149 	ret = krb5_checksumsize(context, type, &cksumsize);
150 	if (ret)
151 	    return ret;
152 
153 	if (req_output_size < cksumsize)
154 	    return 0;
155 
156 	/* Checksum is concatenated with data */
157 	*max_input_size = req_output_size - cksumsize;
158     }
159 
160     return 0;
161 }
162 
163 /*
164  * Rotate "rrc" bytes to the front or back
165  */
166 
167 static krb5_error_code
rrc_rotate(void * data,size_t len,uint16_t rrc,krb5_boolean unrotate)168 rrc_rotate(void *data, size_t len, uint16_t rrc, krb5_boolean unrotate)
169 {
170     u_char *tmp, buf[256];
171     size_t left;
172 
173     if (len == 0)
174 	return 0;
175 
176     rrc %= len;
177 
178     if (rrc == 0)
179 	return 0;
180 
181     left = len - rrc;
182 
183     if (rrc <= sizeof(buf)) {
184 	tmp = buf;
185     } else {
186 	tmp = malloc(rrc);
187 	if (tmp == NULL)
188 	    return ENOMEM;
189     }
190 
191     if (unrotate) {
192 	memcpy(tmp, data, rrc);
193 	memmove(data, (u_char *)data + rrc, left);
194 	memcpy((u_char *)data + left, tmp, rrc);
195     } else {
196 	memcpy(tmp, (u_char *)data + left, rrc);
197 	memmove((u_char *)data + rrc, data, left);
198 	memcpy(data, tmp, rrc);
199     }
200 
201     if (rrc > sizeof(buf))
202 	free(tmp);
203 
204     return 0;
205 }
206 
207 gss_iov_buffer_desc *
_gk_find_buffer(gss_iov_buffer_desc * iov,int iov_count,OM_uint32 type)208 _gk_find_buffer(gss_iov_buffer_desc *iov, int iov_count, OM_uint32 type)
209 {
210     int i;
211 
212     for (i = 0; i < iov_count; i++)
213 	if (type == GSS_IOV_BUFFER_TYPE(iov[i].type))
214 	    return &iov[i];
215     return NULL;
216 }
217 
218 OM_uint32
_gk_allocate_buffer(OM_uint32 * minor_status,gss_iov_buffer_desc * buffer,size_t size)219 _gk_allocate_buffer(OM_uint32 *minor_status, gss_iov_buffer_desc *buffer, size_t size)
220 {
221     if (buffer->type & GSS_IOV_BUFFER_FLAG_ALLOCATED) {
222 	if (buffer->buffer.length == size)
223 	    return GSS_S_COMPLETE;
224 	free(buffer->buffer.value);
225     }
226 
227     buffer->buffer.value = malloc(size);
228     buffer->buffer.length = size;
229     if (buffer->buffer.value == NULL) {
230 	*minor_status = ENOMEM;
231 	return GSS_S_FAILURE;
232     }
233     buffer->type |= GSS_IOV_BUFFER_FLAG_ALLOCATED;
234 
235     return GSS_S_COMPLETE;
236 }
237 
238 
239 OM_uint32
_gk_verify_buffers(OM_uint32 * minor_status,const gsskrb5_ctx ctx,const gss_iov_buffer_desc * header,const gss_iov_buffer_desc * padding,const gss_iov_buffer_desc * trailer)240 _gk_verify_buffers(OM_uint32 *minor_status,
241 		   const gsskrb5_ctx ctx,
242 		   const gss_iov_buffer_desc *header,
243 		   const gss_iov_buffer_desc *padding,
244 		   const gss_iov_buffer_desc *trailer)
245 {
246     if (header == NULL) {
247 	*minor_status = EINVAL;
248 	return GSS_S_FAILURE;
249     }
250 
251     if (IS_DCE_STYLE(ctx)) {
252 	/*
253 	 * In DCE style mode we reject having a padding or trailer buffer
254 	 */
255 	if (padding) {
256 	    *minor_status = EINVAL;
257 	    return GSS_S_FAILURE;
258 	}
259 	if (trailer) {
260 	    *minor_status = EINVAL;
261 	    return GSS_S_FAILURE;
262 	}
263     } else {
264 	/*
265 	 * In non-DCE style mode we require having a padding buffer
266 	 */
267 	if (padding == NULL) {
268 	    *minor_status = EINVAL;
269 	    return GSS_S_FAILURE;
270 	}
271     }
272 
273     *minor_status = 0;
274     return GSS_S_COMPLETE;
275 }
276 
277 OM_uint32
_gssapi_wrap_cfx_iov(OM_uint32 * minor_status,gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,int * conf_state,gss_iov_buffer_desc * iov,int iov_count)278 _gssapi_wrap_cfx_iov(OM_uint32 *minor_status,
279 		     gsskrb5_ctx ctx,
280 		     krb5_context context,
281 		     int conf_req_flag,
282 		     int *conf_state,
283 		     gss_iov_buffer_desc *iov,
284 		     int iov_count)
285 {
286     OM_uint32 major_status, junk;
287     gss_iov_buffer_desc *header, *trailer, *padding;
288     size_t gsshsize, k5hsize;
289     size_t gsstsize, k5tsize;
290     size_t rrc = 0, ec = 0;
291     int i;
292     gss_cfx_wrap_token token;
293     krb5_error_code ret;
294     int32_t seq_number;
295     unsigned usage;
296     krb5_crypto_iov *data = NULL;
297 
298     header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
299     if (header == NULL) {
300 	*minor_status = EINVAL;
301 	return GSS_S_FAILURE;
302     }
303 
304     padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
305     if (padding != NULL) {
306 	padding->buffer.length = 0;
307     }
308 
309     trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
310 
311     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
312     if (major_status != GSS_S_COMPLETE) {
313 	    return major_status;
314     }
315 
316     if (conf_req_flag) {
317 	size_t k5psize = 0;
318 	size_t k5pbase = 0;
319 	size_t k5bsize = 0;
320 	size_t size = 0;
321 
322 	for (i = 0; i < iov_count; i++) {
323 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
324 	    case GSS_IOV_BUFFER_TYPE_DATA:
325 		size += iov[i].buffer.length;
326 		break;
327 	    default:
328 		break;
329 	    }
330 	}
331 
332 	size += sizeof(gss_cfx_wrap_token_desc);
333 
334 	*minor_status = krb5_crypto_length(context, ctx->crypto,
335 					   KRB5_CRYPTO_TYPE_HEADER,
336 					   &k5hsize);
337 	if (*minor_status)
338 	    return GSS_S_FAILURE;
339 
340 	*minor_status = krb5_crypto_length(context, ctx->crypto,
341 					   KRB5_CRYPTO_TYPE_TRAILER,
342 					   &k5tsize);
343 	if (*minor_status)
344 	    return GSS_S_FAILURE;
345 
346 	*minor_status = krb5_crypto_length(context, ctx->crypto,
347 					   KRB5_CRYPTO_TYPE_PADDING,
348 					   &k5pbase);
349 	if (*minor_status)
350 	    return GSS_S_FAILURE;
351 
352 	if (k5pbase > 1) {
353 	    k5psize = k5pbase - (size % k5pbase);
354 	} else {
355 	    k5psize = 0;
356 	}
357 
358 	if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
359 	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
360 						     &k5bsize);
361 	    if (*minor_status)
362 		return GSS_S_FAILURE;
363 	    ec = k5bsize;
364 	} else {
365 	    ec = k5psize;
366 	}
367 
368 	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
369 	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
370     } else {
371 	if (IS_DCE_STYLE(ctx)) {
372 	    *minor_status = EINVAL;
373 	    return GSS_S_FAILURE;
374 	}
375 
376 	k5hsize = 0;
377 	*minor_status = krb5_crypto_length(context, ctx->crypto,
378 					   KRB5_CRYPTO_TYPE_CHECKSUM,
379 					   &k5tsize);
380 	if (*minor_status)
381 	    return GSS_S_FAILURE;
382 
383 	gsshsize = sizeof(gss_cfx_wrap_token_desc);
384 	gsstsize = k5tsize;
385     }
386 
387     /*
388      *
389      */
390 
391     if (trailer == NULL) {
392 	rrc = gsstsize;
393 	if (IS_DCE_STYLE(ctx))
394 	    rrc -= ec;
395 	gsshsize += gsstsize;
396 	gsstsize = 0;
397     } else if (GSS_IOV_BUFFER_FLAGS(trailer->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
398 	major_status = _gk_allocate_buffer(minor_status, trailer, gsstsize);
399 	if (major_status)
400 	    goto failure;
401     } else if (trailer->buffer.length < gsstsize) {
402 	*minor_status = KRB5_BAD_MSIZE;
403 	major_status = GSS_S_FAILURE;
404 	goto failure;
405     } else
406 	trailer->buffer.length = gsstsize;
407 
408     /*
409      *
410      */
411 
412     if (GSS_IOV_BUFFER_FLAGS(header->type) & GSS_IOV_BUFFER_FLAG_ALLOCATE) {
413 	major_status = _gk_allocate_buffer(minor_status, header, gsshsize);
414 	if (major_status != GSS_S_COMPLETE)
415 	    goto failure;
416     } else if (header->buffer.length < gsshsize) {
417 	*minor_status = KRB5_BAD_MSIZE;
418 	major_status = GSS_S_FAILURE;
419 	goto failure;
420     } else
421 	header->buffer.length = gsshsize;
422 
423     token = (gss_cfx_wrap_token)header->buffer.value;
424 
425     token->TOK_ID[0] = 0x05;
426     token->TOK_ID[1] = 0x04;
427     token->Flags     = 0;
428     token->Filler    = 0xFF;
429 
430     if ((ctx->more_flags & LOCAL) == 0)
431 	token->Flags |= CFXSentByAcceptor;
432 
433     if (ctx->more_flags & ACCEPTOR_SUBKEY)
434 	token->Flags |= CFXAcceptorSubkey;
435 
436     if (ctx->more_flags & LOCAL)
437 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
438     else
439 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
440 
441     if (conf_req_flag) {
442 	/*
443 	 * In Wrap tokens with confidentiality, the EC field is
444 	 * used to encode the size (in bytes) of the random filler.
445 	 */
446 	token->Flags |= CFXSealed;
447 	token->EC[0] = (ec >> 8) & 0xFF;
448 	token->EC[1] = (ec >> 0) & 0xFF;
449 
450     } else {
451 	/*
452 	 * In Wrap tokens without confidentiality, the EC field is
453 	 * used to encode the size (in bytes) of the trailing
454 	 * checksum.
455 	 *
456 	 * This is not used in the checksum calcuation itself,
457 	 * because the checksum length could potentially vary
458 	 * depending on the data length.
459 	 */
460 	token->EC[0] = 0;
461 	token->EC[1] = 0;
462     }
463 
464     /*
465      * In Wrap tokens that provide for confidentiality, the RRC
466      * field in the header contains the hex value 00 00 before
467      * encryption.
468      *
469      * In Wrap tokens that do not provide for confidentiality,
470      * both the EC and RRC fields in the appended checksum
471      * contain the hex value 00 00 for the purpose of calculating
472      * the checksum.
473      */
474     token->RRC[0] = 0;
475     token->RRC[1] = 0;
476 
477     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
478     krb5_auth_con_getlocalseqnumber(context,
479 				    ctx->auth_context,
480 				    &seq_number);
481     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
482     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
483     krb5_auth_con_setlocalseqnumber(context,
484 				    ctx->auth_context,
485 				    ++seq_number);
486     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
487 
488     data = calloc(iov_count + 3, sizeof(data[0]));
489     if (data == NULL) {
490 	*minor_status = ENOMEM;
491 	major_status = GSS_S_FAILURE;
492 	goto failure;
493     }
494 
495     if (conf_req_flag) {
496 	/*
497 	  plain packet:
498 
499 	  {"header" | encrypt(plaintext-data | ec-padding | E"header")}
500 
501 	  Expanded, this is with with RRC = 0:
502 
503 	  {"header" | krb5-header | plaintext-data | ec-padding | E"header" | krb5-trailer }
504 
505 	  In DCE-RPC mode == no trailer: RRC = gss "trailer" == length(ec-padding | E"header" | krb5-trailer)
506 
507 	  {"header" | ec-padding | E"header" | krb5-trailer | krb5-header | plaintext-data  }
508 	 */
509 
510 	i = 0;
511 	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
512 	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
513 	data[i].data.length = k5hsize;
514 
515 	for (i = 1; i < iov_count + 1; i++) {
516 	    switch (GSS_IOV_BUFFER_TYPE(iov[i - 1].type)) {
517 	    case GSS_IOV_BUFFER_TYPE_DATA:
518 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
519 		break;
520 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
521 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
522 		break;
523 	    default:
524 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
525 		break;
526 	    }
527 	    data[i].data.length = iov[i - 1].buffer.length;
528 	    data[i].data.data = iov[i - 1].buffer.value;
529 	}
530 
531 	/*
532 	 * Any necessary padding is added here to ensure that the
533 	 * encrypted token header is always at the end of the
534 	 * ciphertext.
535 	 */
536 
537 	/* encrypted CFX header in trailer (or after the header if in
538 	   DCE mode). Copy in header into E"header"
539 	*/
540 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
541 	if (trailer)
542 	    data[i].data.data = trailer->buffer.value;
543 	else
544 	    data[i].data.data = ((uint8_t *)header->buffer.value) + sizeof(*token);
545 
546 	data[i].data.length = ec + sizeof(*token);
547 	memset(data[i].data.data, 0xFF, ec);
548 	memcpy(((uint8_t *)data[i].data.data) + ec, token, sizeof(*token));
549 	i++;
550 
551 	/* Kerberos trailer comes after the gss trailer */
552 	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
553 	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
554 	data[i].data.length = k5tsize;
555 	i++;
556 
557 	ret = krb5_encrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
558 	if (ret != 0) {
559 	    *minor_status = ret;
560 	    major_status = GSS_S_FAILURE;
561 	    goto failure;
562 	}
563 
564 	if (rrc) {
565 	    token->RRC[0] = (rrc >> 8) & 0xFF;
566 	    token->RRC[1] = (rrc >> 0) & 0xFF;
567 	}
568 
569     } else {
570 	/*
571 	  plain packet:
572 
573 	  {data | "header" | gss-trailer (krb5 checksum)
574 
575 	  don't do RRC != 0
576 
577 	 */
578 
579 	for (i = 0; i < iov_count; i++) {
580 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
581 	    case GSS_IOV_BUFFER_TYPE_DATA:
582 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
583 		break;
584 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
585 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
586 		break;
587 	    default:
588 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
589 		break;
590 	    }
591 	    data[i].data.length = iov[i].buffer.length;
592 	    data[i].data.data = iov[i].buffer.value;
593 	}
594 
595 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
596 	data[i].data.data = header->buffer.value;
597 	data[i].data.length = sizeof(gss_cfx_wrap_token_desc);
598 	i++;
599 
600 	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
601 	if (trailer) {
602 		data[i].data.data = trailer->buffer.value;
603 	} else {
604 		data[i].data.data = (uint8_t *)header->buffer.value +
605 				     sizeof(gss_cfx_wrap_token_desc);
606 	}
607 	data[i].data.length = k5tsize;
608 	i++;
609 
610 	ret = krb5_create_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
611 	if (ret) {
612 	    *minor_status = ret;
613 	    major_status = GSS_S_FAILURE;
614 	    goto failure;
615 	}
616 
617 	if (rrc) {
618 	    token->RRC[0] = (rrc >> 8) & 0xFF;
619 	    token->RRC[1] = (rrc >> 0) & 0xFF;
620 	}
621 
622 	token->EC[0] =  (k5tsize >> 8) & 0xFF;
623 	token->EC[1] =  (k5tsize >> 0) & 0xFF;
624     }
625 
626     if (conf_state != NULL)
627 	*conf_state = conf_req_flag;
628 
629     free(data);
630 
631     *minor_status = 0;
632     return GSS_S_COMPLETE;
633 
634  failure:
635     if (data)
636 	free(data);
637 
638     gss_release_iov_buffer(&junk, iov, iov_count);
639 
640     return major_status;
641 }
642 
643 /* This is slowpath */
644 static OM_uint32
unrotate_iov(OM_uint32 * minor_status,size_t rrc,gss_iov_buffer_desc * iov,int iov_count)645 unrotate_iov(OM_uint32 *minor_status, size_t rrc, gss_iov_buffer_desc *iov, int iov_count)
646 {
647     uint8_t *p, *q;
648     size_t len = 0, skip;
649     int i;
650 
651     for (i = 0; i < iov_count; i++)
652 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
653 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
654 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
655 	    len += iov[i].buffer.length;
656 
657     p = malloc(len);
658     if (p == NULL) {
659 	*minor_status = ENOMEM;
660 	return GSS_S_FAILURE;
661     }
662     q = p;
663 
664     /* copy up */
665 
666     for (i = 0; i < iov_count; i++) {
667 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
668 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
669 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
670 	{
671 	    memcpy(q, iov[i].buffer.value, iov[i].buffer.length);
672 	    q += iov[i].buffer.length;
673 	}
674     }
675     assert((size_t)(q - p) == len);
676 
677     /* unrotate first part */
678     q = p + rrc;
679     skip = rrc;
680     for (i = 0; i < iov_count; i++) {
681 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
682 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
683 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
684 	{
685 	    if (iov[i].buffer.length <= skip) {
686 		skip -= iov[i].buffer.length;
687 	    } else {
688 		memcpy(((uint8_t *)iov[i].buffer.value) + skip, q, iov[i].buffer.length - skip);
689 		q += iov[i].buffer.length - skip;
690 		skip = 0;
691 	    }
692 	}
693     }
694     /* copy trailer */
695     q = p;
696     skip = rrc;
697     for (i = 0; i < iov_count; i++) {
698 	if (GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_DATA ||
699 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_PADDING ||
700 	    GSS_IOV_BUFFER_TYPE(iov[i].type) == GSS_IOV_BUFFER_TYPE_TRAILER)
701 	{
702 	    memcpy(q, iov[i].buffer.value, min(iov[i].buffer.length, skip));
703 	    if (iov[i].buffer.length > skip)
704 		break;
705 	    skip -= iov[i].buffer.length;
706 	    q += iov[i].buffer.length;
707 	}
708     }
709     return GSS_S_COMPLETE;
710 }
711 
712 
713 OM_uint32
_gssapi_unwrap_cfx_iov(OM_uint32 * minor_status,gsskrb5_ctx ctx,krb5_context context,int * conf_state,gss_qop_t * qop_state,gss_iov_buffer_desc * iov,int iov_count)714 _gssapi_unwrap_cfx_iov(OM_uint32 *minor_status,
715 		       gsskrb5_ctx ctx,
716 		       krb5_context context,
717 		       int *conf_state,
718 		       gss_qop_t *qop_state,
719 		       gss_iov_buffer_desc *iov,
720 		       int iov_count)
721 {
722     OM_uint32 seq_number_lo, seq_number_hi, major_status, junk;
723     gss_iov_buffer_desc *header, *trailer, *padding;
724     gss_cfx_wrap_token token, ttoken;
725     u_char token_flags;
726     krb5_error_code ret;
727     unsigned usage;
728     uint16_t ec, rrc;
729     krb5_crypto_iov *data = NULL;
730     int i, j;
731 
732     *minor_status = 0;
733 
734     header = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_HEADER);
735     if (header == NULL) {
736 	*minor_status = EINVAL;
737 	return GSS_S_FAILURE;
738     }
739 
740     if (header->buffer.length < sizeof(*token)) /* we check exact below */
741 	return GSS_S_DEFECTIVE_TOKEN;
742 
743     padding = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_PADDING);
744     if (padding != NULL && padding->buffer.length != 0) {
745 	*minor_status = EINVAL;
746 	return GSS_S_FAILURE;
747     }
748 
749     trailer = _gk_find_buffer(iov, iov_count, GSS_IOV_BUFFER_TYPE_TRAILER);
750 
751     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
752     if (major_status != GSS_S_COMPLETE) {
753 	    return major_status;
754     }
755 
756     token = (gss_cfx_wrap_token)header->buffer.value;
757 
758     if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04)
759 	return GSS_S_DEFECTIVE_TOKEN;
760 
761     /* Ignore unknown flags */
762     token_flags = token->Flags &
763 	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
764 
765     if (token_flags & CFXSentByAcceptor) {
766 	if ((ctx->more_flags & LOCAL) == 0)
767 	    return GSS_S_DEFECTIVE_TOKEN;
768     }
769 
770     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
771 	if ((token_flags & CFXAcceptorSubkey) == 0)
772 	    return GSS_S_DEFECTIVE_TOKEN;
773     } else {
774 	if (token_flags & CFXAcceptorSubkey)
775 	    return GSS_S_DEFECTIVE_TOKEN;
776     }
777 
778     if (token->Filler != 0xFF)
779 	return GSS_S_DEFECTIVE_TOKEN;
780 
781     if (conf_state != NULL)
782 	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
783 
784     ec  = (token->EC[0]  << 8) | token->EC[1];
785     rrc = (token->RRC[0] << 8) | token->RRC[1];
786 
787     /*
788      * Check sequence number
789      */
790     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
791     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
792     if (seq_number_hi) {
793 	/* no support for 64-bit sequence numbers */
794 	*minor_status = ERANGE;
795 	return GSS_S_UNSEQ_TOKEN;
796     }
797 
798     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
799     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
800     if (ret != 0) {
801 	*minor_status = 0;
802 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
803 	return ret;
804     }
805     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
806 
807     /*
808      * Decrypt and/or verify checksum
809      */
810 
811     if (ctx->more_flags & LOCAL) {
812 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
813     } else {
814 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
815     }
816 
817     data = calloc(iov_count + 3, sizeof(data[0]));
818     if (data == NULL) {
819 	*minor_status = ENOMEM;
820 	major_status = GSS_S_FAILURE;
821 	goto failure;
822     }
823 
824     if (token_flags & CFXSealed) {
825 	size_t k5tsize, k5hsize;
826 
827 	krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_HEADER, &k5hsize);
828 	krb5_crypto_length(context, ctx->crypto, KRB5_CRYPTO_TYPE_TRAILER, &k5tsize);
829 
830 	/* Rotate by RRC; bogus to do this in-place XXX */
831 	/* Check RRC */
832 
833 	if (trailer == NULL) {
834 	    size_t gsstsize = k5tsize + sizeof(*token);
835 	    size_t gsshsize = k5hsize + sizeof(*token);
836 
837 	    if (rrc != gsstsize) {
838 		major_status = GSS_S_DEFECTIVE_TOKEN;
839 		goto failure;
840 	    }
841 
842 	    if (IS_DCE_STYLE(ctx))
843 		gsstsize += ec;
844 
845 	    gsshsize += gsstsize;
846 
847 	    if (header->buffer.length != gsshsize) {
848 		major_status = GSS_S_DEFECTIVE_TOKEN;
849 		goto failure;
850 	    }
851 	} else if (trailer->buffer.length != sizeof(*token) + k5tsize) {
852 	    major_status = GSS_S_DEFECTIVE_TOKEN;
853 	    goto failure;
854 	} else if (header->buffer.length != sizeof(*token) + k5hsize) {
855 	    major_status = GSS_S_DEFECTIVE_TOKEN;
856 	    goto failure;
857 	} else if (rrc != 0) {
858 	    /* go though slowpath */
859 	    major_status = unrotate_iov(minor_status, rrc, iov, iov_count);
860 	    if (major_status)
861 		goto failure;
862 	}
863 
864 	i = 0;
865 	data[i].flags = KRB5_CRYPTO_TYPE_HEADER;
866 	data[i].data.data = ((uint8_t *)header->buffer.value) + header->buffer.length - k5hsize;
867 	data[i].data.length = k5hsize;
868 	i++;
869 
870 	for (j = 0; j < iov_count; i++, j++) {
871 	    switch (GSS_IOV_BUFFER_TYPE(iov[j].type)) {
872 	    case GSS_IOV_BUFFER_TYPE_DATA:
873 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
874 		break;
875 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
876 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
877 		break;
878 	    default:
879 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
880 		break;
881 	    }
882 	    data[i].data.length = iov[j].buffer.length;
883 	    data[i].data.data = iov[j].buffer.value;
884 	}
885 
886 	/* encrypted CFX header in trailer (or after the header if in
887 	   DCE mode). Copy in header into E"header"
888 	*/
889 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
890 	if (trailer) {
891 	    data[i].data.data = trailer->buffer.value;
892 	} else {
893 	    data[i].data.data = ((uint8_t *)header->buffer.value) +
894 		header->buffer.length - k5hsize - k5tsize - ec- sizeof(*token);
895 	}
896 
897 	data[i].data.length = ec + sizeof(*token);
898 	ttoken = (gss_cfx_wrap_token)(((uint8_t *)data[i].data.data) + ec);
899 	i++;
900 
901 	/* Kerberos trailer comes after the gss trailer */
902 	data[i].flags = KRB5_CRYPTO_TYPE_TRAILER;
903 	data[i].data.data = ((uint8_t *)data[i-1].data.data) + ec + sizeof(*token);
904 	data[i].data.length = k5tsize;
905 	i++;
906 
907 	ret = krb5_decrypt_iov_ivec(context, ctx->crypto, usage, data, i, NULL);
908 	if (ret != 0) {
909 	    *minor_status = ret;
910 	    major_status = GSS_S_FAILURE;
911 	    goto failure;
912 	}
913 
914 	ttoken->RRC[0] = token->RRC[0];
915 	ttoken->RRC[1] = token->RRC[1];
916 
917 	/* Check the integrity of the header */
918 	if (ct_memcmp(ttoken, token, sizeof(*token)) != 0) {
919 	    major_status = GSS_S_BAD_MIC;
920 	    goto failure;
921 	}
922     } else {
923 	size_t gsstsize = ec;
924 	size_t gsshsize = sizeof(*token);
925 
926 	if (trailer == NULL) {
927 	    /* Check RRC */
928 	    if (rrc != gsstsize) {
929 	       *minor_status = EINVAL;
930 	       major_status = GSS_S_FAILURE;
931 	       goto failure;
932 	    }
933 
934 	    gsshsize += gsstsize;
935 	    gsstsize = 0;
936 	} else if (trailer->buffer.length != gsstsize) {
937 	    major_status = GSS_S_DEFECTIVE_TOKEN;
938 	    goto failure;
939 	} else if (rrc != 0) {
940 	    /* Check RRC */
941 	    *minor_status = EINVAL;
942 	    major_status = GSS_S_FAILURE;
943 	    goto failure;
944 	}
945 
946 	if (header->buffer.length != gsshsize) {
947 	    major_status = GSS_S_DEFECTIVE_TOKEN;
948 	    goto failure;
949 	}
950 
951 	for (i = 0; i < iov_count; i++) {
952 	    switch (GSS_IOV_BUFFER_TYPE(iov[i].type)) {
953 	    case GSS_IOV_BUFFER_TYPE_DATA:
954 		data[i].flags = KRB5_CRYPTO_TYPE_DATA;
955 		break;
956 	    case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
957 		data[i].flags = KRB5_CRYPTO_TYPE_SIGN_ONLY;
958 		break;
959 	    default:
960 		data[i].flags = KRB5_CRYPTO_TYPE_EMPTY;
961 		break;
962 	    }
963 	    data[i].data.length = iov[i].buffer.length;
964 	    data[i].data.data = iov[i].buffer.value;
965 	}
966 
967 	data[i].flags = KRB5_CRYPTO_TYPE_DATA;
968 	data[i].data.data = header->buffer.value;
969 	data[i].data.length = sizeof(*token);
970 	i++;
971 
972 	data[i].flags = KRB5_CRYPTO_TYPE_CHECKSUM;
973 	if (trailer) {
974 		data[i].data.data = trailer->buffer.value;
975 	} else {
976 		data[i].data.data = (uint8_t *)header->buffer.value +
977 				     sizeof(*token);
978 	}
979 	data[i].data.length = ec;
980 	i++;
981 
982 	token = (gss_cfx_wrap_token)header->buffer.value;
983 	token->EC[0]  = 0;
984 	token->EC[1]  = 0;
985 	token->RRC[0] = 0;
986 	token->RRC[1] = 0;
987 
988 	ret = krb5_verify_checksum_iov(context, ctx->crypto, usage, data, i, NULL);
989 	if (ret) {
990 	    *minor_status = ret;
991 	    major_status = GSS_S_FAILURE;
992 	    goto failure;
993 	}
994     }
995 
996     if (qop_state != NULL) {
997 	*qop_state = GSS_C_QOP_DEFAULT;
998     }
999 
1000     free(data);
1001 
1002     *minor_status = 0;
1003     return GSS_S_COMPLETE;
1004 
1005  failure:
1006     if (data)
1007 	free(data);
1008 
1009     gss_release_iov_buffer(&junk, iov, iov_count);
1010 
1011     return major_status;
1012 }
1013 
1014 OM_uint32
_gssapi_wrap_iov_length_cfx(OM_uint32 * minor_status,gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,gss_qop_t qop_req,int * conf_state,gss_iov_buffer_desc * iov,int iov_count)1015 _gssapi_wrap_iov_length_cfx(OM_uint32 *minor_status,
1016 			    gsskrb5_ctx ctx,
1017 			    krb5_context context,
1018 			    int conf_req_flag,
1019 			    gss_qop_t qop_req,
1020 			    int *conf_state,
1021 			    gss_iov_buffer_desc *iov,
1022 			    int iov_count)
1023 {
1024     OM_uint32 major_status;
1025     size_t size;
1026     int i;
1027     gss_iov_buffer_desc *header = NULL;
1028     gss_iov_buffer_desc *padding = NULL;
1029     gss_iov_buffer_desc *trailer = NULL;
1030     size_t gsshsize = 0;
1031     size_t gsstsize = 0;
1032     size_t k5hsize = 0;
1033     size_t k5tsize = 0;
1034 
1035     GSSAPI_KRB5_INIT (&context);
1036     *minor_status = 0;
1037 
1038     for (size = 0, i = 0; i < iov_count; i++) {
1039 	switch(GSS_IOV_BUFFER_TYPE(iov[i].type)) {
1040 	case GSS_IOV_BUFFER_TYPE_EMPTY:
1041 	    break;
1042 	case GSS_IOV_BUFFER_TYPE_DATA:
1043 	    size += iov[i].buffer.length;
1044 	    break;
1045 	case GSS_IOV_BUFFER_TYPE_HEADER:
1046 	    if (header != NULL) {
1047 		*minor_status = 0;
1048 		return GSS_S_FAILURE;
1049 	    }
1050 	    header = &iov[i];
1051 	    break;
1052 	case GSS_IOV_BUFFER_TYPE_TRAILER:
1053 	    if (trailer != NULL) {
1054 		*minor_status = 0;
1055 		return GSS_S_FAILURE;
1056 	    }
1057 	    trailer = &iov[i];
1058 	    break;
1059 	case GSS_IOV_BUFFER_TYPE_PADDING:
1060 	    if (padding != NULL) {
1061 		*minor_status = 0;
1062 		return GSS_S_FAILURE;
1063 	    }
1064 	    padding = &iov[i];
1065 	    break;
1066 	case GSS_IOV_BUFFER_TYPE_SIGN_ONLY:
1067 	    break;
1068 	default:
1069 	    *minor_status = EINVAL;
1070 	    return GSS_S_FAILURE;
1071 	}
1072     }
1073 
1074     major_status = _gk_verify_buffers(minor_status, ctx, header, padding, trailer);
1075     if (major_status != GSS_S_COMPLETE) {
1076 	    return major_status;
1077     }
1078 
1079     if (conf_req_flag) {
1080 	size_t k5psize = 0;
1081 	size_t k5pbase = 0;
1082 	size_t k5bsize = 0;
1083 	size_t ec = 0;
1084 
1085 	size += sizeof(gss_cfx_wrap_token_desc);
1086 
1087 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1088 					   KRB5_CRYPTO_TYPE_HEADER,
1089 					   &k5hsize);
1090 	if (*minor_status)
1091 	    return GSS_S_FAILURE;
1092 
1093 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1094 					   KRB5_CRYPTO_TYPE_TRAILER,
1095 					   &k5tsize);
1096 	if (*minor_status)
1097 	    return GSS_S_FAILURE;
1098 
1099 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1100 					   KRB5_CRYPTO_TYPE_PADDING,
1101 					   &k5pbase);
1102 	if (*minor_status)
1103 	    return GSS_S_FAILURE;
1104 
1105 	if (k5pbase > 1) {
1106 	    k5psize = k5pbase - (size % k5pbase);
1107 	} else {
1108 	    k5psize = 0;
1109 	}
1110 
1111 	if (k5psize == 0 && IS_DCE_STYLE(ctx)) {
1112 	    *minor_status = krb5_crypto_getblocksize(context, ctx->crypto,
1113 						     &k5bsize);
1114 	    if (*minor_status)
1115 		return GSS_S_FAILURE;
1116 
1117 	    ec = k5bsize;
1118 	} else {
1119 	    ec = k5psize;
1120 	}
1121 
1122 	gsshsize = sizeof(gss_cfx_wrap_token_desc) + k5hsize;
1123 	gsstsize = sizeof(gss_cfx_wrap_token_desc) + ec + k5tsize;
1124     } else {
1125 	*minor_status = krb5_crypto_length(context, ctx->crypto,
1126 					   KRB5_CRYPTO_TYPE_CHECKSUM,
1127 					   &k5tsize);
1128 	if (*minor_status)
1129 	    return GSS_S_FAILURE;
1130 
1131 	gsshsize = sizeof(gss_cfx_wrap_token_desc);
1132 	gsstsize = k5tsize;
1133     }
1134 
1135     if (trailer != NULL) {
1136 	trailer->buffer.length = gsstsize;
1137     } else {
1138 	gsshsize += gsstsize;
1139     }
1140 
1141     header->buffer.length = gsshsize;
1142 
1143     if (padding) {
1144 	/* padding is done via EC and is contained in the header or trailer */
1145 	padding->buffer.length = 0;
1146     }
1147 
1148     if (conf_state) {
1149 	*conf_state = conf_req_flag;
1150     }
1151 
1152     return GSS_S_COMPLETE;
1153 }
1154 
1155 
1156 
1157 
_gssapi_wrap_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,int conf_req_flag,const gss_buffer_t input_message_buffer,int * conf_state,gss_buffer_t output_message_buffer)1158 OM_uint32 _gssapi_wrap_cfx(OM_uint32 *minor_status,
1159 			   const gsskrb5_ctx ctx,
1160 			   krb5_context context,
1161 			   int conf_req_flag,
1162 			   const gss_buffer_t input_message_buffer,
1163 			   int *conf_state,
1164 			   gss_buffer_t output_message_buffer)
1165 {
1166     gss_cfx_wrap_token token;
1167     krb5_error_code ret;
1168     unsigned usage;
1169     krb5_data cipher;
1170     size_t wrapped_len, cksumsize;
1171     uint16_t padlength, rrc = 0;
1172     int32_t seq_number;
1173     u_char *p;
1174 
1175     ret = _gsskrb5cfx_wrap_length_cfx(context,
1176 				      ctx->crypto, conf_req_flag,
1177 				      IS_DCE_STYLE(ctx),
1178 				      input_message_buffer->length,
1179 				      &wrapped_len, &cksumsize, &padlength);
1180     if (ret != 0) {
1181 	*minor_status = ret;
1182 	return GSS_S_FAILURE;
1183     }
1184 
1185     /* Always rotate encrypted token (if any) and checksum to header */
1186     rrc = (conf_req_flag ? sizeof(*token) : 0) + (uint16_t)cksumsize;
1187 
1188     output_message_buffer->length = wrapped_len;
1189     output_message_buffer->value = malloc(output_message_buffer->length);
1190     if (output_message_buffer->value == NULL) {
1191 	*minor_status = ENOMEM;
1192 	return GSS_S_FAILURE;
1193     }
1194 
1195     p = output_message_buffer->value;
1196     token = (gss_cfx_wrap_token)p;
1197     token->TOK_ID[0] = 0x05;
1198     token->TOK_ID[1] = 0x04;
1199     token->Flags     = 0;
1200     token->Filler    = 0xFF;
1201     if ((ctx->more_flags & LOCAL) == 0)
1202 	token->Flags |= CFXSentByAcceptor;
1203     if (ctx->more_flags & ACCEPTOR_SUBKEY)
1204 	token->Flags |= CFXAcceptorSubkey;
1205     if (conf_req_flag) {
1206 	/*
1207 	 * In Wrap tokens with confidentiality, the EC field is
1208 	 * used to encode the size (in bytes) of the random filler.
1209 	 */
1210 	token->Flags |= CFXSealed;
1211 	token->EC[0] = (padlength >> 8) & 0xFF;
1212 	token->EC[1] = (padlength >> 0) & 0xFF;
1213     } else {
1214 	/*
1215 	 * In Wrap tokens without confidentiality, the EC field is
1216 	 * used to encode the size (in bytes) of the trailing
1217 	 * checksum.
1218 	 *
1219 	 * This is not used in the checksum calcuation itself,
1220 	 * because the checksum length could potentially vary
1221 	 * depending on the data length.
1222 	 */
1223 	token->EC[0] = 0;
1224 	token->EC[1] = 0;
1225     }
1226 
1227     /*
1228      * In Wrap tokens that provide for confidentiality, the RRC
1229      * field in the header contains the hex value 00 00 before
1230      * encryption.
1231      *
1232      * In Wrap tokens that do not provide for confidentiality,
1233      * both the EC and RRC fields in the appended checksum
1234      * contain the hex value 00 00 for the purpose of calculating
1235      * the checksum.
1236      */
1237     token->RRC[0] = 0;
1238     token->RRC[1] = 0;
1239 
1240     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1241     krb5_auth_con_getlocalseqnumber(context,
1242 				    ctx->auth_context,
1243 				    &seq_number);
1244     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
1245     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1246     krb5_auth_con_setlocalseqnumber(context,
1247 				    ctx->auth_context,
1248 				    ++seq_number);
1249     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1250 
1251     /*
1252      * If confidentiality is requested, the token header is
1253      * appended to the plaintext before encryption; the resulting
1254      * token is {"header" | encrypt(plaintext | pad | "header")}.
1255      *
1256      * If no confidentiality is requested, the checksum is
1257      * calculated over the plaintext concatenated with the
1258      * token header.
1259      */
1260     if (ctx->more_flags & LOCAL) {
1261 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1262     } else {
1263 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1264     }
1265 
1266     if (conf_req_flag) {
1267 	/*
1268 	 * Any necessary padding is added here to ensure that the
1269 	 * encrypted token header is always at the end of the
1270 	 * ciphertext.
1271 	 *
1272 	 * The specification does not require that the padding
1273 	 * bytes are initialized.
1274 	 */
1275 	p += sizeof(*token);
1276 	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1277 	memset(p + input_message_buffer->length, 0xFF, padlength);
1278 	memcpy(p + input_message_buffer->length + padlength,
1279 	       token, sizeof(*token));
1280 
1281 	ret = krb5_encrypt(context, ctx->crypto,
1282 			   usage, p,
1283 			   input_message_buffer->length + padlength +
1284 				sizeof(*token),
1285 			   &cipher);
1286 	if (ret != 0) {
1287 	    *minor_status = ret;
1288 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1289 	    return GSS_S_FAILURE;
1290 	}
1291 	assert(sizeof(*token) + cipher.length == wrapped_len);
1292 	token->RRC[0] = (rrc >> 8) & 0xFF;
1293 	token->RRC[1] = (rrc >> 0) & 0xFF;
1294 
1295 	/*
1296 	 * this is really ugly, but needed against windows
1297 	 * for DCERPC, as windows rotates by EC+RRC.
1298 	 */
1299 	if (IS_DCE_STYLE(ctx)) {
1300 		ret = rrc_rotate(cipher.data, cipher.length, rrc+padlength, FALSE);
1301 	} else {
1302 		ret = rrc_rotate(cipher.data, cipher.length, rrc, FALSE);
1303 	}
1304 	if (ret != 0) {
1305 	    *minor_status = ret;
1306 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1307 	    return GSS_S_FAILURE;
1308 	}
1309 	memcpy(p, cipher.data, cipher.length);
1310 	krb5_data_free(&cipher);
1311     } else {
1312 	char *buf;
1313 	Checksum cksum;
1314 
1315 	buf = malloc(input_message_buffer->length + sizeof(*token));
1316 	if (buf == NULL) {
1317 	    *minor_status = ENOMEM;
1318 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1319 	    return GSS_S_FAILURE;
1320 	}
1321 	memcpy(buf, input_message_buffer->value, input_message_buffer->length);
1322 	memcpy(buf + input_message_buffer->length, token, sizeof(*token));
1323 
1324 	ret = krb5_create_checksum(context, ctx->crypto,
1325 				   usage, 0, buf,
1326 				   input_message_buffer->length +
1327 					sizeof(*token),
1328 				   &cksum);
1329 	if (ret != 0) {
1330 	    *minor_status = ret;
1331 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1332 	    free(buf);
1333 	    return GSS_S_FAILURE;
1334 	}
1335 
1336 	free(buf);
1337 
1338 	assert(cksum.checksum.length == cksumsize);
1339 	token->EC[0] =  (cksum.checksum.length >> 8) & 0xFF;
1340 	token->EC[1] =  (cksum.checksum.length >> 0) & 0xFF;
1341 	token->RRC[0] = (rrc >> 8) & 0xFF;
1342 	token->RRC[1] = (rrc >> 0) & 0xFF;
1343 
1344 	p += sizeof(*token);
1345 	memcpy(p, input_message_buffer->value, input_message_buffer->length);
1346 	memcpy(p + input_message_buffer->length,
1347 	       cksum.checksum.data, cksum.checksum.length);
1348 
1349 	ret = rrc_rotate(p,
1350 	    input_message_buffer->length + cksum.checksum.length, rrc, FALSE);
1351 	if (ret != 0) {
1352 	    *minor_status = ret;
1353 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1354 	    free_Checksum(&cksum);
1355 	    return GSS_S_FAILURE;
1356 	}
1357 	free_Checksum(&cksum);
1358     }
1359 
1360     if (conf_state != NULL) {
1361 	*conf_state = conf_req_flag;
1362     }
1363 
1364     *minor_status = 0;
1365     return GSS_S_COMPLETE;
1366 }
1367 
_gssapi_unwrap_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,const gss_buffer_t input_message_buffer,gss_buffer_t output_message_buffer,int * conf_state,gss_qop_t * qop_state)1368 OM_uint32 _gssapi_unwrap_cfx(OM_uint32 *minor_status,
1369 			     const gsskrb5_ctx ctx,
1370 			     krb5_context context,
1371 			     const gss_buffer_t input_message_buffer,
1372 			     gss_buffer_t output_message_buffer,
1373 			     int *conf_state,
1374 			     gss_qop_t *qop_state)
1375 {
1376     gss_cfx_wrap_token token;
1377     u_char token_flags;
1378     krb5_error_code ret;
1379     unsigned usage;
1380     krb5_data data;
1381     uint16_t ec, rrc;
1382     OM_uint32 seq_number_lo, seq_number_hi;
1383     size_t len;
1384     u_char *p;
1385 
1386     *minor_status = 0;
1387 
1388     if (input_message_buffer->length < sizeof(*token)) {
1389 	return GSS_S_DEFECTIVE_TOKEN;
1390     }
1391 
1392     p = input_message_buffer->value;
1393 
1394     token = (gss_cfx_wrap_token)p;
1395 
1396     if (token->TOK_ID[0] != 0x05 || token->TOK_ID[1] != 0x04) {
1397 	return GSS_S_DEFECTIVE_TOKEN;
1398     }
1399 
1400     /* Ignore unknown flags */
1401     token_flags = token->Flags &
1402 	(CFXSentByAcceptor | CFXSealed | CFXAcceptorSubkey);
1403 
1404     if (token_flags & CFXSentByAcceptor) {
1405 	if ((ctx->more_flags & LOCAL) == 0)
1406 	    return GSS_S_DEFECTIVE_TOKEN;
1407     }
1408 
1409     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1410 	if ((token_flags & CFXAcceptorSubkey) == 0)
1411 	    return GSS_S_DEFECTIVE_TOKEN;
1412     } else {
1413 	if (token_flags & CFXAcceptorSubkey)
1414 	    return GSS_S_DEFECTIVE_TOKEN;
1415     }
1416 
1417     if (token->Filler != 0xFF) {
1418 	return GSS_S_DEFECTIVE_TOKEN;
1419     }
1420 
1421     if (conf_state != NULL) {
1422 	*conf_state = (token_flags & CFXSealed) ? 1 : 0;
1423     }
1424 
1425     ec  = (token->EC[0]  << 8) | token->EC[1];
1426     rrc = (token->RRC[0] << 8) | token->RRC[1];
1427 
1428     /*
1429      * Check sequence number
1430      */
1431     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1432     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1433     if (seq_number_hi) {
1434 	/* no support for 64-bit sequence numbers */
1435 	*minor_status = ERANGE;
1436 	return GSS_S_UNSEQ_TOKEN;
1437     }
1438 
1439     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1440     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1441     if (ret != 0) {
1442 	*minor_status = 0;
1443 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1444 	_gsskrb5_release_buffer(minor_status, output_message_buffer);
1445 	return ret;
1446     }
1447     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1448 
1449     /*
1450      * Decrypt and/or verify checksum
1451      */
1452 
1453     if (ctx->more_flags & LOCAL) {
1454 	usage = KRB5_KU_USAGE_ACCEPTOR_SEAL;
1455     } else {
1456 	usage = KRB5_KU_USAGE_INITIATOR_SEAL;
1457     }
1458 
1459     p += sizeof(*token);
1460     len = input_message_buffer->length;
1461     len -= (p - (u_char *)input_message_buffer->value);
1462 
1463     if (token_flags & CFXSealed) {
1464 	/*
1465 	 * this is really ugly, but needed against windows
1466 	 * for DCERPC, as windows rotates by EC+RRC.
1467 	 */
1468 	if (IS_DCE_STYLE(ctx)) {
1469 		*minor_status = rrc_rotate(p, len, rrc+ec, TRUE);
1470 	} else {
1471 		*minor_status = rrc_rotate(p, len, rrc, TRUE);
1472 	}
1473 	if (*minor_status != 0) {
1474 	    return GSS_S_FAILURE;
1475 	}
1476 
1477 	ret = krb5_decrypt(context, ctx->crypto, usage,
1478 	    p, len, &data);
1479 	if (ret != 0) {
1480 	    *minor_status = ret;
1481 	    return GSS_S_BAD_MIC;
1482 	}
1483 
1484 	/* Check that there is room for the pad and token header */
1485 	if (data.length < ec + sizeof(*token)) {
1486 	    krb5_data_free(&data);
1487 	    return GSS_S_DEFECTIVE_TOKEN;
1488 	}
1489 	p = data.data;
1490 	p += data.length - sizeof(*token);
1491 
1492 	/* RRC is unprotected; don't modify input buffer */
1493 	((gss_cfx_wrap_token)p)->RRC[0] = token->RRC[0];
1494 	((gss_cfx_wrap_token)p)->RRC[1] = token->RRC[1];
1495 
1496 	/* Check the integrity of the header */
1497 	if (ct_memcmp(p, token, sizeof(*token)) != 0) {
1498 	    krb5_data_free(&data);
1499 	    return GSS_S_BAD_MIC;
1500 	}
1501 
1502 	output_message_buffer->value = data.data;
1503 	output_message_buffer->length = data.length - ec - sizeof(*token);
1504     } else {
1505 	Checksum cksum;
1506 
1507 	/* Rotate by RRC; bogus to do this in-place XXX */
1508 	*minor_status = rrc_rotate(p, len, rrc, TRUE);
1509 	if (*minor_status != 0) {
1510 	    return GSS_S_FAILURE;
1511 	}
1512 
1513 	/* Determine checksum type */
1514 	ret = krb5_crypto_get_checksum_type(context,
1515 					    ctx->crypto,
1516 					    &cksum.cksumtype);
1517 	if (ret != 0) {
1518 	    *minor_status = ret;
1519 	    return GSS_S_FAILURE;
1520 	}
1521 
1522 	cksum.checksum.length = ec;
1523 
1524 	/* Check we have at least as much data as the checksum */
1525 	if (len < cksum.checksum.length) {
1526 	    *minor_status = ERANGE;
1527 	    return GSS_S_BAD_MIC;
1528 	}
1529 
1530 	/* Length now is of the plaintext only, no checksum */
1531 	len -= cksum.checksum.length;
1532 	cksum.checksum.data = p + len;
1533 
1534 	output_message_buffer->length = len; /* for later */
1535 	output_message_buffer->value = malloc(len + sizeof(*token));
1536 	if (output_message_buffer->value == NULL) {
1537 	    *minor_status = ENOMEM;
1538 	    return GSS_S_FAILURE;
1539 	}
1540 
1541 	/* Checksum is over (plaintext-data | "header") */
1542 	memcpy(output_message_buffer->value, p, len);
1543 	memcpy((u_char *)output_message_buffer->value + len,
1544 	       token, sizeof(*token));
1545 
1546 	/* EC is not included in checksum calculation */
1547 	token = (gss_cfx_wrap_token)((u_char *)output_message_buffer->value +
1548 				     len);
1549 	token->EC[0]  = 0;
1550 	token->EC[1]  = 0;
1551 	token->RRC[0] = 0;
1552 	token->RRC[1] = 0;
1553 
1554 	ret = krb5_verify_checksum(context, ctx->crypto,
1555 				   usage,
1556 				   output_message_buffer->value,
1557 				   len + sizeof(*token),
1558 				   &cksum);
1559 	if (ret != 0) {
1560 	    *minor_status = ret;
1561 	    _gsskrb5_release_buffer(minor_status, output_message_buffer);
1562 	    return GSS_S_BAD_MIC;
1563 	}
1564     }
1565 
1566     if (qop_state != NULL) {
1567 	*qop_state = GSS_C_QOP_DEFAULT;
1568     }
1569 
1570     *minor_status = 0;
1571     return GSS_S_COMPLETE;
1572 }
1573 
_gssapi_mic_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,gss_qop_t qop_req,const gss_buffer_t message_buffer,gss_buffer_t message_token)1574 OM_uint32 _gssapi_mic_cfx(OM_uint32 *minor_status,
1575 			  const gsskrb5_ctx ctx,
1576 			  krb5_context context,
1577 			  gss_qop_t qop_req,
1578 			  const gss_buffer_t message_buffer,
1579 			  gss_buffer_t message_token)
1580 {
1581     gss_cfx_mic_token token;
1582     krb5_error_code ret;
1583     unsigned usage;
1584     Checksum cksum;
1585     u_char *buf;
1586     size_t len;
1587     int32_t seq_number;
1588 
1589     len = message_buffer->length + sizeof(*token);
1590     buf = malloc(len);
1591     if (buf == NULL) {
1592 	*minor_status = ENOMEM;
1593 	return GSS_S_FAILURE;
1594     }
1595 
1596     memcpy(buf, message_buffer->value, message_buffer->length);
1597 
1598     token = (gss_cfx_mic_token)(buf + message_buffer->length);
1599     token->TOK_ID[0] = 0x04;
1600     token->TOK_ID[1] = 0x04;
1601     token->Flags = 0;
1602     if ((ctx->more_flags & LOCAL) == 0)
1603 	token->Flags |= CFXSentByAcceptor;
1604     if (ctx->more_flags & ACCEPTOR_SUBKEY)
1605 	token->Flags |= CFXAcceptorSubkey;
1606     memset(token->Filler, 0xFF, 5);
1607 
1608     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1609     krb5_auth_con_getlocalseqnumber(context,
1610 				    ctx->auth_context,
1611 				    &seq_number);
1612     _gsskrb5_encode_be_om_uint32(0,          &token->SND_SEQ[0]);
1613     _gsskrb5_encode_be_om_uint32(seq_number, &token->SND_SEQ[4]);
1614     krb5_auth_con_setlocalseqnumber(context,
1615 				    ctx->auth_context,
1616 				    ++seq_number);
1617     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1618 
1619     if (ctx->more_flags & LOCAL) {
1620 	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1621     } else {
1622 	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1623     }
1624 
1625     ret = krb5_create_checksum(context, ctx->crypto,
1626 	usage, 0, buf, len, &cksum);
1627     if (ret != 0) {
1628 	*minor_status = ret;
1629 	free(buf);
1630 	return GSS_S_FAILURE;
1631     }
1632 
1633     /* Determine MIC length */
1634     message_token->length = sizeof(*token) + cksum.checksum.length;
1635     message_token->value = malloc(message_token->length);
1636     if (message_token->value == NULL) {
1637 	*minor_status = ENOMEM;
1638 	free_Checksum(&cksum);
1639 	free(buf);
1640 	return GSS_S_FAILURE;
1641     }
1642 
1643     /* Token is { "header" | get_mic("header" | plaintext-data) } */
1644     memcpy(message_token->value, token, sizeof(*token));
1645     memcpy((u_char *)message_token->value + sizeof(*token),
1646 	   cksum.checksum.data, cksum.checksum.length);
1647 
1648     free_Checksum(&cksum);
1649     free(buf);
1650 
1651     *minor_status = 0;
1652     return GSS_S_COMPLETE;
1653 }
1654 
_gssapi_verify_mic_cfx(OM_uint32 * minor_status,const gsskrb5_ctx ctx,krb5_context context,const gss_buffer_t message_buffer,const gss_buffer_t token_buffer,gss_qop_t * qop_state)1655 OM_uint32 _gssapi_verify_mic_cfx(OM_uint32 *minor_status,
1656 				 const gsskrb5_ctx ctx,
1657 				 krb5_context context,
1658 				 const gss_buffer_t message_buffer,
1659 				 const gss_buffer_t token_buffer,
1660 				 gss_qop_t *qop_state)
1661 {
1662     gss_cfx_mic_token token;
1663     u_char token_flags;
1664     krb5_error_code ret;
1665     unsigned usage;
1666     OM_uint32 seq_number_lo, seq_number_hi;
1667     u_char *buf, *p;
1668     Checksum cksum;
1669 
1670     *minor_status = 0;
1671 
1672     if (token_buffer->length < sizeof(*token)) {
1673 	return GSS_S_DEFECTIVE_TOKEN;
1674     }
1675 
1676     p = token_buffer->value;
1677 
1678     token = (gss_cfx_mic_token)p;
1679 
1680     if (token->TOK_ID[0] != 0x04 || token->TOK_ID[1] != 0x04) {
1681 	return GSS_S_DEFECTIVE_TOKEN;
1682     }
1683 
1684     /* Ignore unknown flags */
1685     token_flags = token->Flags & (CFXSentByAcceptor | CFXAcceptorSubkey);
1686 
1687     if (token_flags & CFXSentByAcceptor) {
1688 	if ((ctx->more_flags & LOCAL) == 0)
1689 	    return GSS_S_DEFECTIVE_TOKEN;
1690     }
1691     if (ctx->more_flags & ACCEPTOR_SUBKEY) {
1692 	if ((token_flags & CFXAcceptorSubkey) == 0)
1693 	    return GSS_S_DEFECTIVE_TOKEN;
1694     } else {
1695 	if (token_flags & CFXAcceptorSubkey)
1696 	    return GSS_S_DEFECTIVE_TOKEN;
1697     }
1698 
1699     if (ct_memcmp(token->Filler, "\xff\xff\xff\xff\xff", 5) != 0) {
1700 	return GSS_S_DEFECTIVE_TOKEN;
1701     }
1702 
1703     /*
1704      * Check sequence number
1705      */
1706     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[0], &seq_number_hi);
1707     _gsskrb5_decode_be_om_uint32(&token->SND_SEQ[4], &seq_number_lo);
1708     if (seq_number_hi) {
1709 	*minor_status = ERANGE;
1710 	return GSS_S_UNSEQ_TOKEN;
1711     }
1712 
1713     HEIMDAL_MUTEX_lock(&ctx->ctx_id_mutex);
1714     ret = _gssapi_msg_order_check(ctx->order, seq_number_lo);
1715     if (ret != 0) {
1716 	*minor_status = 0;
1717 	HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1718 	return ret;
1719     }
1720     HEIMDAL_MUTEX_unlock(&ctx->ctx_id_mutex);
1721 
1722     /*
1723      * Verify checksum
1724      */
1725     ret = krb5_crypto_get_checksum_type(context, ctx->crypto,
1726 					&cksum.cksumtype);
1727     if (ret != 0) {
1728 	*minor_status = ret;
1729 	return GSS_S_FAILURE;
1730     }
1731 
1732     cksum.checksum.data = p + sizeof(*token);
1733     cksum.checksum.length = token_buffer->length - sizeof(*token);
1734 
1735     if (ctx->more_flags & LOCAL) {
1736 	usage = KRB5_KU_USAGE_ACCEPTOR_SIGN;
1737     } else {
1738 	usage = KRB5_KU_USAGE_INITIATOR_SIGN;
1739     }
1740 
1741     buf = malloc(message_buffer->length + sizeof(*token));
1742     if (buf == NULL) {
1743 	*minor_status = ENOMEM;
1744 	return GSS_S_FAILURE;
1745     }
1746     memcpy(buf, message_buffer->value, message_buffer->length);
1747     memcpy(buf + message_buffer->length, token, sizeof(*token));
1748 
1749     ret = krb5_verify_checksum(context, ctx->crypto,
1750 			       usage,
1751 			       buf,
1752 			       sizeof(*token) + message_buffer->length,
1753 			       &cksum);
1754     if (ret != 0) {
1755 	*minor_status = ret;
1756 	free(buf);
1757 	return GSS_S_BAD_MIC;
1758     }
1759 
1760     free(buf);
1761 
1762     if (qop_state != NULL) {
1763 	*qop_state = GSS_C_QOP_DEFAULT;
1764     }
1765 
1766     return GSS_S_COMPLETE;
1767 }
1768