1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright 2019 Joyent, Inc.
26 * Copyright 2017 Jason King.
27 */
28
29 #include <pthread.h>
30 #include <stdlib.h>
31 #include <string.h>
32 #include <strings.h>
33 #include <sys/debug.h>
34 #include <sys/types.h>
35 #include <security/cryptoki.h>
36 #include <aes_impl.h>
37 #include <cryptoutil.h>
38 #include "softSession.h"
39 #include "softObject.h"
40 #include "softCrypt.h"
41 #include "softOps.h"
42
43 /*
44 * Check that the mechanism parameter is present and the correct size if
45 * required and allocate an AES context.
46 */
47 static CK_RV
soft_aes_check_mech_param(CK_MECHANISM_PTR mech,aes_ctx_t ** ctxp)48 soft_aes_check_mech_param(CK_MECHANISM_PTR mech, aes_ctx_t **ctxp)
49 {
50 void *(*allocf)(int) = NULL;
51 size_t param_len = 0;
52 boolean_t param_req = B_TRUE;
53
54 switch (mech->mechanism) {
55 case CKM_AES_ECB:
56 param_req = B_FALSE;
57 allocf = ecb_alloc_ctx;
58 break;
59 case CKM_AES_CMAC:
60 param_req = B_FALSE;
61 allocf = cmac_alloc_ctx;
62 break;
63 case CKM_AES_CMAC_GENERAL:
64 param_len = sizeof (CK_MAC_GENERAL_PARAMS);
65 allocf = cmac_alloc_ctx;
66 break;
67 case CKM_AES_CBC:
68 case CKM_AES_CBC_PAD:
69 param_len = AES_BLOCK_LEN;
70 allocf = cbc_alloc_ctx;
71 break;
72 case CKM_AES_CTR:
73 param_len = sizeof (CK_AES_CTR_PARAMS);
74 allocf = ctr_alloc_ctx;
75 break;
76 case CKM_AES_CCM:
77 param_len = sizeof (CK_CCM_PARAMS);
78 allocf = ccm_alloc_ctx;
79 break;
80 case CKM_AES_GCM:
81 param_len = sizeof (CK_GCM_PARAMS);
82 allocf = gcm_alloc_ctx;
83 break;
84 default:
85 return (CKR_MECHANISM_INVALID);
86 }
87
88 if (param_req && (mech->pParameter == NULL ||
89 mech->ulParameterLen != param_len)) {
90 return (CKR_MECHANISM_PARAM_INVALID);
91 }
92
93 *ctxp = allocf(0);
94 if (*ctxp == NULL) {
95 return (CKR_HOST_MEMORY);
96 }
97
98 return (CKR_OK);
99 }
100
101 /*
102 * Create an AES key schedule for the given AES context from the given key.
103 * If the key is not sensitive, cache a copy of the key schedule in the
104 * key object and/or use the cached copy of the key schedule.
105 *
106 * Must be called before the init function for a given mode is called.
107 */
108 static CK_RV
soft_aes_init_key(aes_ctx_t * aes_ctx,soft_object_t * key_p)109 soft_aes_init_key(aes_ctx_t *aes_ctx, soft_object_t *key_p)
110 {
111 void *ks = NULL;
112 size_t size = 0;
113 CK_RV rv = CKR_OK;
114
115 (void) pthread_mutex_lock(&key_p->object_mutex);
116
117 /*
118 * AES keys should be either 128, 192, or 256 bits long.
119 * soft_object_t stores the key size in bytes, so we check those sizes
120 * in bytes.
121 *
122 * While soft_build_secret_key_object() does these same validations for
123 * keys created by the user, it may be possible that a key loaded from
124 * disk could be invalid or corrupt. We err on the side of caution
125 * and check again that it's the correct size before performing any
126 * AES operations.
127 */
128 switch (OBJ_SEC_VALUE_LEN(key_p)) {
129 case AES_MIN_KEY_BYTES:
130 case AES_MAX_KEY_BYTES:
131 case AES_192_KEY_BYTES:
132 break;
133 default:
134 rv = CKR_KEY_SIZE_RANGE;
135 goto done;
136 }
137
138 ks = aes_alloc_keysched(&size, 0);
139 if (ks == NULL) {
140 rv = CKR_HOST_MEMORY;
141 goto done;
142 }
143
144 /* If this is a sensitive key, always expand the key schedule */
145 if (key_p->bool_attr_mask & SENSITIVE_BOOL_ON) {
146 /* aes_init_keysched() requires key length in bits. */
147 #ifdef __sparcv9
148 /* LINTED */
149 aes_init_keysched(OBJ_SEC_VALUE(key_p), (uint_t)
150 (OBJ_SEC_VALUE_LEN(key_p) * NBBY), ks);
151 #else /* !__sparcv9 */
152 aes_init_keysched(OBJ_SEC_VALUE(key_p),
153 (OBJ_SEC_VALUE_LEN(key_p) * NBBY), ks);
154 #endif /* __sparcv9 */
155
156 goto done;
157 }
158
159 /* If a non-sensitive key and doesn't have a key schedule, create it */
160 if (OBJ_KEY_SCHED(key_p) == NULL) {
161 void *obj_ks = NULL;
162
163 obj_ks = aes_alloc_keysched(&size, 0);
164 if (obj_ks == NULL) {
165 rv = CKR_HOST_MEMORY;
166 goto done;
167 }
168
169 #ifdef __sparcv9
170 /* LINTED */
171 aes_init_keysched(OBJ_SEC_VALUE(key_p),
172 (uint_t)(OBJ_SEC_VALUE_LEN(key_p) * 8), obj_ks);
173 #else /* !__sparcv9 */
174 aes_init_keysched(OBJ_SEC_VALUE(key_p),
175 (OBJ_SEC_VALUE_LEN(key_p) * 8), obj_ks);
176 #endif /* __sparcv9 */
177
178 OBJ_KEY_SCHED_LEN(key_p) = size;
179 OBJ_KEY_SCHED(key_p) = obj_ks;
180 }
181
182 (void) memcpy(ks, OBJ_KEY_SCHED(key_p), OBJ_KEY_SCHED_LEN(key_p));
183
184 done:
185 (void) pthread_mutex_unlock(&key_p->object_mutex);
186
187 if (rv == CKR_OK) {
188 aes_ctx->ac_keysched = ks;
189 aes_ctx->ac_keysched_len = size;
190 } else {
191 freezero(ks, size);
192 }
193
194 return (rv);
195 }
196
197 /*
198 * Initialize the AES context for the given mode, including allocating and
199 * expanding the key schedule if required.
200 */
201 static CK_RV
soft_aes_init_ctx(aes_ctx_t * aes_ctx,CK_MECHANISM_PTR mech_p,boolean_t encrypt)202 soft_aes_init_ctx(aes_ctx_t *aes_ctx, CK_MECHANISM_PTR mech_p,
203 boolean_t encrypt)
204 {
205 int rc = CRYPTO_SUCCESS;
206
207 switch (mech_p->mechanism) {
208 case CKM_AES_ECB:
209 aes_ctx->ac_flags |= ECB_MODE;
210 break;
211 case CKM_AES_CMAC:
212 case CKM_AES_CMAC_GENERAL:
213 rc = cmac_init_ctx((cbc_ctx_t *)aes_ctx, AES_BLOCK_LEN);
214 break;
215 case CKM_AES_CBC:
216 case CKM_AES_CBC_PAD:
217 rc = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mech_p->pParameter,
218 mech_p->ulParameterLen, AES_BLOCK_LEN, aes_copy_block64);
219 break;
220 case CKM_AES_CTR:
221 {
222 /*
223 * soft_aes_check_param() verifies this is !NULL and is the
224 * correct size.
225 */
226 CK_AES_CTR_PARAMS *pp = (CK_AES_CTR_PARAMS *)mech_p->pParameter;
227
228 rc = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
229 pp->cb, aes_encrypt_block, aes_copy_block);
230 break;
231 }
232 case CKM_AES_CCM: {
233 CK_CCM_PARAMS *pp = (CK_CCM_PARAMS *)mech_p->pParameter;
234
235 /*
236 * The illumos ccm mode implementation predates the PKCS#11
237 * version that specifies CK_CCM_PARAMS. As a result, the order
238 * and names of the struct members are different, so we must
239 * translate. ccm_init_ctx() does not store a ref ccm_params,
240 * so it is safe to allocate on the stack.
241 */
242 CK_AES_CCM_PARAMS ccm_params = {
243 .ulMACSize = pp->ulMACLen,
244 .ulNonceSize = pp->ulNonceLen,
245 .ulAuthDataSize = pp->ulAADLen,
246 .ulDataSize = pp->ulDataLen,
247 .nonce = pp->pNonce,
248 .authData = pp->pAAD
249 };
250
251 rc = ccm_init_ctx((ccm_ctx_t *)aes_ctx, (char *)&ccm_params, 0,
252 encrypt, AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
253 break;
254 }
255 case CKM_AES_GCM:
256 /*
257 * Similar to the ccm mode implementation, the gcm mode also
258 * predates PKCS#11 2.40, however in this instance
259 * CK_AES_GCM_PARAMS and CK_GCM_PARAMS are identical except
260 * for the member names, so we can just pass it along.
261 */
262 rc = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mech_p->pParameter,
263 AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
264 aes_xor_block);
265 break;
266 }
267
268 return (crypto2pkcs11_error_number(rc));
269 }
270
271 /*
272 * Allocate context for the active encryption or decryption operation, and
273 * generate AES key schedule to speed up the operation.
274 */
275 CK_RV
soft_aes_crypt_init_common(soft_session_t * session_p,CK_MECHANISM_PTR pMechanism,soft_object_t * key_p,boolean_t encrypt)276 soft_aes_crypt_init_common(soft_session_t *session_p,
277 CK_MECHANISM_PTR pMechanism, soft_object_t *key_p,
278 boolean_t encrypt)
279 {
280 aes_ctx_t *aes_ctx = NULL;
281 CK_RV rv = CKR_OK;
282
283 if (key_p->key_type != CKK_AES)
284 return (CKR_KEY_TYPE_INCONSISTENT);
285
286 /* C_{Encrypt,Decrypt}Init() validate pMechanism != NULL */
287 rv = soft_aes_check_mech_param(pMechanism, &aes_ctx);
288 if (rv != CKR_OK) {
289 goto done;
290 }
291
292 rv = soft_aes_init_key(aes_ctx, key_p);
293 if (rv != CKR_OK) {
294 goto done;
295 }
296
297 rv = soft_aes_init_ctx(aes_ctx, pMechanism, encrypt);
298 if (rv != CKR_OK) {
299 goto done;
300 }
301
302 (void) pthread_mutex_lock(&session_p->session_mutex);
303 if (encrypt) {
304 /* Called by C_EncryptInit. */
305 session_p->encrypt.context = aes_ctx;
306 session_p->encrypt.mech.mechanism = pMechanism->mechanism;
307 } else {
308 /* Called by C_DecryptInit. */
309 session_p->decrypt.context = aes_ctx;
310 session_p->decrypt.mech.mechanism = pMechanism->mechanism;
311 }
312 (void) pthread_mutex_unlock(&session_p->session_mutex);
313
314 done:
315 if (rv != CKR_OK) {
316 soft_aes_free_ctx(aes_ctx);
317 }
318
319 return (rv);
320 }
321
322
323 CK_RV
soft_aes_encrypt(soft_session_t * session_p,CK_BYTE_PTR pData,CK_ULONG ulDataLen,CK_BYTE_PTR pEncryptedData,CK_ULONG_PTR pulEncryptedDataLen)324 soft_aes_encrypt(soft_session_t *session_p, CK_BYTE_PTR pData,
325 CK_ULONG ulDataLen, CK_BYTE_PTR pEncryptedData,
326 CK_ULONG_PTR pulEncryptedDataLen)
327 {
328 aes_ctx_t *aes_ctx = session_p->encrypt.context;
329 CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
330 size_t length_needed;
331 size_t remainder;
332 int rc = CRYPTO_SUCCESS;
333 CK_RV rv = CKR_OK;
334 crypto_data_t out = {
335 .cd_format = CRYPTO_DATA_RAW,
336 .cd_offset = 0,
337 .cd_length = *pulEncryptedDataLen,
338 .cd_raw.iov_base = (char *)pEncryptedData,
339 .cd_raw.iov_len = *pulEncryptedDataLen
340 };
341
342 /*
343 * A bit unusual, but it's permissible for ccm and gcm modes to not
344 * encrypt any data. This ends up being equivalent to CKM_AES_CMAC
345 * or CKM_AES_GMAC of the additional authenticated data (AAD).
346 */
347 if ((pData == NULL || ulDataLen == 0) &&
348 !(aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|CMAC_MODE))) {
349 return (CKR_ARGUMENTS_BAD);
350 }
351
352 remainder = ulDataLen % AES_BLOCK_LEN;
353
354 /*
355 * CTR, CCM, CMAC, and GCM modes do not require the plaintext
356 * to be a multiple of the AES block size. CKM_AES_CBC_PAD as the
357 * name suggests pads it's output, so it can also accept any
358 * size plaintext.
359 */
360 switch (mech) {
361 case CKM_AES_CBC_PAD:
362 case CKM_AES_CMAC:
363 case CKM_AES_CMAC_GENERAL:
364 case CKM_AES_CTR:
365 case CKM_AES_CCM:
366 case CKM_AES_GCM:
367 break;
368 default:
369 if (remainder != 0) {
370 rv = CKR_DATA_LEN_RANGE;
371 goto cleanup;
372 }
373 }
374
375 switch (mech) {
376 case CKM_AES_CCM:
377 length_needed = ulDataLen + aes_ctx->ac_mac_len;
378 break;
379 case CKM_AES_GCM:
380 length_needed = ulDataLen + aes_ctx->ac_tag_len;
381 break;
382 case CKM_AES_CMAC:
383 case CKM_AES_CMAC_GENERAL:
384 length_needed = AES_BLOCK_LEN;
385 break;
386 case CKM_AES_CBC_PAD:
387 /* CKM_AES_CBC_PAD always adds 1..AES_BLOCK_LEN of padding */
388 length_needed = ulDataLen + AES_BLOCK_LEN - remainder;
389 break;
390 default:
391 length_needed = ulDataLen;
392 break;
393 }
394
395 if (pEncryptedData == NULL) {
396 /*
397 * The application can ask for the size of the output buffer
398 * with a NULL output buffer (pEncryptedData).
399 * C_Encrypt() guarantees pulEncryptedDataLen != NULL.
400 */
401 *pulEncryptedDataLen = length_needed;
402 return (CKR_OK);
403 }
404
405 if (*pulEncryptedDataLen < length_needed) {
406 *pulEncryptedDataLen = length_needed;
407 return (CKR_BUFFER_TOO_SMALL);
408 }
409
410 if (ulDataLen > 0) {
411 rv = soft_aes_encrypt_update(session_p, pData, ulDataLen,
412 pEncryptedData, pulEncryptedDataLen);
413
414 if (rv != CKR_OK) {
415 rv = CKR_FUNCTION_FAILED;
416 goto cleanup;
417 }
418
419 /*
420 * Some modes (e.g. CCM and GCM) will append data such as a MAC
421 * to the ciphertext after the plaintext has been encrypted.
422 * Update out to reflect the amount of data in pEncryptedData
423 * after encryption.
424 */
425 out.cd_offset = *pulEncryptedDataLen;
426 }
427
428 switch (mech) {
429 case CKM_AES_CBC_PAD: {
430 /*
431 * aes_encrypt_contiguous_blocks() accumulates plaintext
432 * in aes_ctx until it has at least one full block of
433 * plaintext. Any partial blocks of data remaining after
434 * encrypting are left for subsequent calls to
435 * aes_encrypt_contiguous_blocks(). If the input happened
436 * to be an exact multiple of AES_BLOCK_LEN, we must still
437 * append a block of padding (a full block in that case) so
438 * that the correct amount of padding to remove is known
439 * during decryption.
440 *
441 * soft_add_pkcs7_padding() is a bit overkill -- we just
442 * create a block filled with the pad amount using memset(),
443 * and encrypt 'amt' bytes of the block to pad out the input.
444 */
445 char block[AES_BLOCK_LEN];
446 size_t amt = AES_BLOCK_LEN - remainder;
447
448 VERIFY3U(remainder, ==, aes_ctx->ac_remainder_len);
449
450 (void) memset(block, amt & 0xff, sizeof (block));
451 rc = aes_encrypt_contiguous_blocks(aes_ctx, block, amt, &out);
452 rv = crypto2pkcs11_error_number(rc);
453 explicit_bzero(block, sizeof (block));
454 break;
455 }
456 case CKM_AES_CCM:
457 rc = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, &out,
458 AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
459 rv = crypto2pkcs11_error_number(rc);
460 break;
461 case CKM_AES_GCM:
462 rc = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, &out,
463 AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
464 aes_xor_block);
465 rv = crypto2pkcs11_error_number(rc);
466 break;
467 case CKM_AES_CMAC:
468 case CKM_AES_CMAC_GENERAL:
469 rc = cmac_mode_final((cbc_ctx_t *)aes_ctx, &out,
470 aes_encrypt_block, aes_xor_block);
471 rv = crypto2pkcs11_error_number(rc);
472 aes_ctx->ac_remainder_len = 0;
473 break;
474 case CKM_AES_CTR:
475 /*
476 * As CKM_AES_CTR is a stream cipher, ctr_mode_final is always
477 * invoked in the xx_update() functions, so we do not need to
478 * call it again here.
479 */
480 break;
481 case CKM_AES_ECB:
482 case CKM_AES_CBC:
483 /*
484 * These mechanisms do not have nor require a xx_final function.
485 */
486 break;
487 default:
488 rv = CKR_MECHANISM_INVALID;
489 break;
490 }
491
492 cleanup:
493 switch (rv) {
494 case CKR_OK:
495 *pulEncryptedDataLen = out.cd_offset;
496 break;
497 case CKR_BUFFER_TOO_SMALL:
498 /* *pulEncryptedDataLen was set earlier */
499 break;
500 default:
501 /* something else failed */
502 *pulEncryptedDataLen = 0;
503 break;
504 }
505
506 (void) pthread_mutex_lock(&session_p->session_mutex);
507 soft_aes_free_ctx(aes_ctx);
508 session_p->encrypt.context = NULL;
509 (void) pthread_mutex_unlock(&session_p->session_mutex);
510
511 return (rv);
512 }
513
514 static CK_RV
soft_aes_cbc_pad_decrypt(aes_ctx_t * aes_ctx,CK_BYTE_PTR pEncryptedData,CK_ULONG ulEncryptedDataLen,crypto_data_t * out_orig)515 soft_aes_cbc_pad_decrypt(aes_ctx_t *aes_ctx, CK_BYTE_PTR pEncryptedData,
516 CK_ULONG ulEncryptedDataLen, crypto_data_t *out_orig)
517 {
518 aes_ctx_t *ctx = aes_ctx;
519 uint8_t *buf = NULL;
520 uint8_t *outbuf = (uint8_t *)out_orig->cd_raw.iov_base;
521 crypto_data_t out = *out_orig;
522 size_t i;
523 int rc;
524 CK_RV rv = CKR_OK;
525 uint8_t pad_len;
526 boolean_t speculate = B_FALSE;
527
528 /*
529 * Just a query for the output size. When the output buffer is
530 * NULL, we are allowed to return a size slightly larger than
531 * necessary. We know the output will never be larger than the
532 * input ciphertext, so we use that as an estimate.
533 */
534 if (out_orig->cd_raw.iov_base == NULL) {
535 out_orig->cd_length = ulEncryptedDataLen;
536 return (CKR_OK);
537 }
538
539 /*
540 * The output plaintext size will be 1..AES_BLOCK_LEN bytes
541 * smaller than the input ciphertext. However we cannot know
542 * exactly how much smaller until we decrypt the entire
543 * input ciphertext. If we are unsure we have enough output buffer
544 * space, we have to allocate our own memory to hold the output,
545 * then see if we have enough room to hold the result.
546 *
547 * Unfortunately, having an output buffer that's too small does
548 * not terminate the operation, nor are we allowed to return
549 * partial results. Therefore we must also duplicate the initial
550 * aes_ctx so that this can potentially be run again.
551 */
552 if (out_orig->cd_length < ulEncryptedDataLen) {
553 void *ks = malloc(aes_ctx->ac_keysched_len);
554
555 ctx = malloc(sizeof (*aes_ctx));
556 buf = malloc(ulEncryptedDataLen);
557 if (ks == NULL || ctx == NULL || buf == NULL) {
558 free(ks);
559 free(ctx);
560 free(buf);
561 return (CKR_HOST_MEMORY);
562 }
563
564 bcopy(aes_ctx, ctx, sizeof (*ctx));
565 bcopy(aes_ctx->ac_keysched, ks, aes_ctx->ac_keysched_len);
566 ctx->ac_keysched = ks;
567
568 out.cd_length = ulEncryptedDataLen;
569 out.cd_raw.iov_base = (char *)buf;
570 out.cd_raw.iov_len = ulEncryptedDataLen;
571 outbuf = buf;
572
573 speculate = B_TRUE;
574 }
575
576 rc = aes_decrypt_contiguous_blocks(ctx, (char *)pEncryptedData,
577 ulEncryptedDataLen, &out);
578 if (rc != CRYPTO_SUCCESS) {
579 out_orig->cd_offset = 0;
580 rv = CKR_FUNCTION_FAILED;
581 goto done;
582 }
583
584 /*
585 * RFC5652 6.3 The amount of padding must be
586 * block_sz - (len mod block_size). This means
587 * the amount of padding must always be in the
588 * range [1..block_size].
589 */
590 pad_len = outbuf[ulEncryptedDataLen - 1];
591 if (pad_len == 0 || pad_len > AES_BLOCK_LEN) {
592 rv = CKR_ENCRYPTED_DATA_INVALID;
593 goto done;
594 }
595 out.cd_offset -= pad_len;
596
597 /*
598 * Verify pad values, trying to do so in as close to constant
599 * time as possible.
600 */
601 for (i = ulEncryptedDataLen - pad_len; i < ulEncryptedDataLen; i++) {
602 if (outbuf[i] != pad_len) {
603 rv = CKR_ENCRYPTED_DATA_INVALID;
604 }
605 }
606 if (rv != CKR_OK) {
607 goto done;
608 }
609
610 if (speculate) {
611 if (out.cd_offset <= out_orig->cd_length) {
612 bcopy(out.cd_raw.iov_base, out_orig->cd_raw.iov_base,
613 out.cd_offset);
614 } else {
615 rv = CKR_BUFFER_TOO_SMALL;
616 }
617 }
618
619 /*
620 * No matter what, we report the exact size required.
621 */
622 out_orig->cd_offset = out.cd_offset;
623
624 done:
625 freezero(buf, ulEncryptedDataLen);
626 if (ctx != aes_ctx) {
627 VERIFY(speculate);
628 soft_aes_free_ctx(ctx);
629 }
630
631 return (rv);
632 }
633
634 CK_RV
soft_aes_decrypt(soft_session_t * session_p,CK_BYTE_PTR pEncryptedData,CK_ULONG ulEncryptedDataLen,CK_BYTE_PTR pData,CK_ULONG_PTR pulDataLen)635 soft_aes_decrypt(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
636 CK_ULONG ulEncryptedDataLen, CK_BYTE_PTR pData, CK_ULONG_PTR pulDataLen)
637 {
638 aes_ctx_t *aes_ctx = session_p->decrypt.context;
639 CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
640 size_t length_needed;
641 size_t remainder;
642 int rc = CRYPTO_SUCCESS;
643 CK_RV rv = CKR_OK;
644 crypto_data_t out = {
645 .cd_format = CRYPTO_DATA_RAW,
646 .cd_offset = 0,
647 .cd_length = *pulDataLen,
648 .cd_raw.iov_base = (char *)pData,
649 .cd_raw.iov_len = *pulDataLen
650 };
651
652 /*
653 * A bit unusual, but it's permissible for ccm and gcm modes to not
654 * decrypt any data. This ends up being equivalent to CKM_AES_CMAC
655 * or CKM_AES_GMAC of the additional authenticated data (AAD).
656 */
657 if ((pEncryptedData == NULL || ulEncryptedDataLen == 0) &&
658 !(aes_ctx->ac_flags & (CCM_MODE|GCM_MODE))) {
659 return (CKR_ARGUMENTS_BAD);
660 }
661
662 remainder = ulEncryptedDataLen % AES_BLOCK_LEN;
663
664 /*
665 * CTR, CCM, CMAC, and GCM modes do not require the ciphertext
666 * to be a multiple of the AES block size. Note that while
667 * CKM_AES_CBC_PAD accepts an arbitrary sized plaintext, the
668 * ciphertext is always a multiple of the AES block size
669 */
670 switch (mech) {
671 case CKM_AES_CMAC:
672 case CKM_AES_CMAC_GENERAL:
673 case CKM_AES_CTR:
674 case CKM_AES_CCM:
675 case CKM_AES_GCM:
676 break;
677 default:
678 if (remainder != 0) {
679 rv = CKR_DATA_LEN_RANGE;
680 goto cleanup;
681 }
682 }
683
684 if (mech == CKM_AES_CBC_PAD) {
685 rv = soft_aes_cbc_pad_decrypt(aes_ctx, pEncryptedData,
686 ulEncryptedDataLen, &out);
687 if (pData == NULL || rv == CKR_BUFFER_TOO_SMALL) {
688 *pulDataLen = out.cd_offset;
689 return (rv);
690 }
691 goto cleanup;
692 }
693
694 switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE)) {
695 case CCM_MODE:
696 length_needed = aes_ctx->ac_processed_data_len;
697 break;
698 case GCM_MODE:
699 length_needed = ulEncryptedDataLen - aes_ctx->ac_tag_len;
700 break;
701 default:
702 /*
703 * Note: for CKM_AES_CBC_PAD, we cannot know exactly how much
704 * space is needed for the plaintext until after we decrypt it.
705 * However, it is permissible to return a value 'somewhat'
706 * larger than necessary (PKCS#11 Base Specification, sec 5.2).
707 *
708 * Since CKM_AES_CBC_PAD adds at most AES_BLOCK_LEN bytes to
709 * the plaintext, we report the ciphertext length as the
710 * required plaintext length. This means we specify at most
711 * AES_BLOCK_LEN additional bytes of memory for the plaintext.
712 *
713 * This behavior is slightly different from the earlier
714 * version of this code which returned the value of
715 * (ulEncryptedDataLen - AES_BLOCK_LEN), which was only ever
716 * correct when the original plaintext was already a multiple
717 * of AES_BLOCK_LEN (i.e. when AES_BLOCK_LEN of padding was
718 * added). This should not be a concern for existing
719 * consumers -- if they were previously using the value of
720 * *pulDataLen to size the outbut buffer, the resulting
721 * plaintext would be truncated anytime the original plaintext
722 * wasn't a multiple of AES_BLOCK_LEN. No consumer should
723 * be relying on such wrong behavior. More likely they are
724 * using the size of the ciphertext or larger for the
725 * buffer to hold the decrypted plaintext (which is always
726 * acceptable).
727 */
728 length_needed = ulEncryptedDataLen;
729 }
730
731 if (pData == NULL) {
732 /*
733 * The application can ask for the size of the output buffer
734 * with a NULL output buffer (pData).
735 * C_Decrypt() guarantees pulDataLen != NULL.
736 */
737 *pulDataLen = length_needed;
738 return (CKR_OK);
739 }
740
741 if (*pulDataLen < length_needed) {
742 *pulDataLen = length_needed;
743 return (CKR_BUFFER_TOO_SMALL);
744 }
745
746 if (ulEncryptedDataLen > 0) {
747 rv = soft_aes_decrypt_update(session_p, pEncryptedData,
748 ulEncryptedDataLen, pData, pulDataLen);
749 }
750
751 if (rv != CKR_OK) {
752 rv = CKR_FUNCTION_FAILED;
753 goto cleanup;
754 }
755
756 /*
757 * Some modes (e.g. CCM and GCM) will output additional data
758 * after the plaintext (such as the MAC). Update out to
759 * reflect the amount of data in pData for the _final() functions.
760 */
761 out.cd_offset = *pulDataLen;
762
763 /*
764 * As CKM_AES_CTR is a stream cipher, ctr_mode_final is always
765 * invoked in the _update() functions, so we do not need to call it
766 * here.
767 */
768 if (aes_ctx->ac_flags & CCM_MODE) {
769 ASSERT3U(aes_ctx->ac_processed_data_len, ==,
770 aes_ctx->ac_data_len);
771 ASSERT3U(aes_ctx->ac_processed_mac_len, ==,
772 aes_ctx->ac_mac_len);
773
774 rc = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, &out,
775 AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
776 aes_xor_block);
777 rv = crypto2pkcs11_error_number(rc);
778 } else if (aes_ctx->ac_flags & GCM_MODE) {
779 rc = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, &out,
780 AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
781 rv = crypto2pkcs11_error_number(rc);
782 }
783
784 cleanup:
785 if (rv == CKR_OK) {
786 *pulDataLen = out.cd_offset;
787 } else {
788 *pulDataLen = 0;
789 }
790
791 (void) pthread_mutex_lock(&session_p->session_mutex);
792 soft_aes_free_ctx(aes_ctx);
793 session_p->decrypt.context = NULL;
794 (void) pthread_mutex_unlock(&session_p->session_mutex);
795
796 return (rv);
797 }
798
799 CK_RV
soft_aes_encrypt_update(soft_session_t * session_p,CK_BYTE_PTR pData,CK_ULONG ulDataLen,CK_BYTE_PTR pEncryptedData,CK_ULONG_PTR pulEncryptedDataLen)800 soft_aes_encrypt_update(soft_session_t *session_p, CK_BYTE_PTR pData,
801 CK_ULONG ulDataLen, CK_BYTE_PTR pEncryptedData,
802 CK_ULONG_PTR pulEncryptedDataLen)
803 {
804 aes_ctx_t *aes_ctx = session_p->encrypt.context;
805 crypto_data_t out = {
806 .cd_format = CRYPTO_DATA_RAW,
807 .cd_offset = 0,
808 .cd_length = *pulEncryptedDataLen,
809 .cd_raw.iov_base = (char *)pEncryptedData,
810 .cd_raw.iov_len = *pulEncryptedDataLen
811 };
812 CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
813 CK_RV rv = CKR_OK;
814 size_t out_len;
815 int rc;
816
817 /*
818 * If pData is NULL, we should have zero bytes to process, and
819 * the aes_encrypt_contiguous_blocks() call will be an effective no-op.
820 */
821 IMPLY(pData == NULL, ulDataLen == 0);
822
823 /* Check size of the output buffer */
824 switch (mech) {
825 case CKM_AES_CMAC:
826 /*
827 * The underlying CMAC implementation handles the storing of
828 * extra bytes and does not output any data until *_final,
829 * so do not bother looking at the size of the output
830 * buffer at this time.
831 */
832 out_len = 0;
833 break;
834 case CKM_AES_CTR:
835 /*
836 * CTR mode is a stream cipher, so we always output exactly as
837 * much ciphertext as input plaintext
838 */
839 out_len = ulDataLen;
840 break;
841 default:
842 out_len = aes_ctx->ac_remainder_len + ulDataLen;
843
844 /*
845 * The number of complete blocks we can encrypt right now.
846 * The underlying implementation will buffer any remaining data
847 * until the next *_update call.
848 */
849 out_len &= ~(AES_BLOCK_LEN - 1);
850 break;
851 }
852
853 if (pEncryptedData == NULL) {
854 *pulEncryptedDataLen = out_len;
855 return (CKR_OK);
856 }
857
858 if (*pulEncryptedDataLen < out_len) {
859 *pulEncryptedDataLen = out_len;
860 return (CKR_BUFFER_TOO_SMALL);
861 }
862
863 rc = aes_encrypt_contiguous_blocks(aes_ctx, (char *)pData, ulDataLen,
864 &out);
865
866 /*
867 * Since out.cd_offset is set to 0 initially and the underlying
868 * implementation increments out.cd_offset by the amount of output
869 * written, so we can just use the value as the amount written.
870 */
871 *pulEncryptedDataLen = out.cd_offset;
872
873 if (rc != CRYPTO_SUCCESS) {
874 return (CKR_FUNCTION_FAILED);
875 }
876
877 rv = crypto2pkcs11_error_number(rc);
878
879 return (rv);
880 }
881
882 CK_RV
soft_aes_decrypt_update(soft_session_t * session_p,CK_BYTE_PTR pEncryptedData,CK_ULONG ulEncryptedDataLen,CK_BYTE_PTR pData,CK_ULONG_PTR pulDataLen)883 soft_aes_decrypt_update(soft_session_t *session_p, CK_BYTE_PTR pEncryptedData,
884 CK_ULONG ulEncryptedDataLen, CK_BYTE_PTR pData, CK_ULONG_PTR pulDataLen)
885 {
886 aes_ctx_t *aes_ctx = session_p->decrypt.context;
887 uint8_t *buffer_block = NULL;
888 crypto_data_t out = {
889 .cd_format = CRYPTO_DATA_RAW,
890 .cd_offset = 0,
891 .cd_length = *pulDataLen,
892 .cd_raw.iov_base = (char *)pData,
893 .cd_raw.iov_len = *pulDataLen
894 };
895 CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
896 CK_RV rv = CKR_OK;
897 size_t in_len = ulEncryptedDataLen;
898 size_t out_len;
899 int rc = CRYPTO_SUCCESS;
900
901 switch (mech) {
902 case CKM_AES_CCM:
903 case CKM_AES_GCM:
904 out_len = 0;
905 break;
906 case CKM_AES_CBC_PAD:
907 /*
908 * For CKM_AES_CBC_PAD, we use the existing code for CBC
909 * mode in libsoftcrypto (which itself uses the code in
910 * usr/src/common/crypto/modes for CBC mode). For
911 * non-padding AES CBC mode, aes_decrypt_contiguous_blocks()
912 * will accumulate ciphertext in aes_ctx->ac_remainder until
913 * there is at least AES_BLOCK_LEN bytes of ciphertext available
914 * to decrypt. At that point, as many blocks of AES_BLOCK_LEN
915 * sized ciphertext blocks are decrypted. Any remainder is
916 * copied into aes_ctx->ac_remainder for decryption in
917 * subsequent calls to aes_decrypt_contiguous_blocks().
918 *
919 * When PKCS#7 padding is used, the buffering
920 * aes_decrypt_contigous_blocks() performs is insufficient.
921 * PKCS#7 padding always adds [1..AES_BLOCK_LEN] bytes of
922 * padding to plaintext, so the resulting ciphertext is always
923 * larger than the input plaintext. However we cannot know
924 * which block is the final block (and needs its padding
925 * stripped) until C_DecryptFinal() is called. Additionally,
926 * it is permissible for a caller to use buffers sized to the
927 * output plaintext -- i.e. smaller than the input ciphertext.
928 * This leads to a more complicated buffering/accumulation
929 * strategy than what aes_decrypt_contiguous_blocks() provides
930 * us.
931 *
932 * Our buffering strategy works as follows:
933 * For each call to C_DecryptUpdate, we calculate the
934 * total amount of ciphertext available (buffered plus what's
935 * passed in) as the initial output size (out_len). Based
936 * on the value of out_len, there are three possibilties:
937 *
938 * 1. We have less than AES_BLOCK_LEN + 1 bytes of
939 * ciphertext available. Accumulate the ciphertext in
940 * aes_ctx->ac_remainder. Note that while we could let
941 * aes_decrypt_contiguous_blocks() buffer the input for us
942 * when we have less than AES_BLOCK_LEN bytes, we would still
943 * need to buffer when we have exactly AES_BLOCK_LEN
944 * bytes available, so we just handle both situations with
945 * one if clause.
946 *
947 * 2. We have at least AES_BLOCK_LEN + 1 bytes of
948 * ciphertext, and the total amount available is also an
949 * exact multiple of AES_BLOCK_LEN. We cannot know if the
950 * last block of input is the final block (yet), but we
951 * are an exact multiple of AES_BLOCK_LEN, and we have
952 * at least AES_BLOCK_LEN + 1 bytes available, therefore
953 * there must be at least 2 * AES_BLOCK_LEN bytes of input
954 * ciphertext available. It also means there's at least one
955 * full block of input ciphertext that can be decrypted. We
956 * reduce the size of the input (in_len) given to
957 * aes_decrypt_contiguous_bytes() by AES_BLOCK_LEN to prevent
958 * it from decrypting the last full block of data.
959 * aes_decrypt_contiguous_blocks() will when decrypt any
960 * buffered data in aex_ctx->ac_remainder, and then any
961 * input data passed. Since we have an exact multiple of
962 * AES_BLOCK_LEN, aes_ctx->ac_remainder will be empty
963 * (aes_ctx->ac_remainder_len == 0), once
964 * aes_decrypt_contiguout_block() completes, and we can
965 * copy the last block of data into aes_ctx->ac_remainder.
966 *
967 * 3. We have at least AES_BLOCK_LEN + 1 bytes of
968 * ciphertext, but the total amount available is not an
969 * exact multiple of AES_BLOCK_LEN. We decrypt all of
970 * full blocks of data we have. The remainder will be
971 * less than AES_BLOCK_LEN bytes. We let
972 * aes_decrypt_contiguous_blocks() buffer the remainder
973 * for us since it would normally do this anyway. Since there
974 * is a remainder, the full blocks that are present cannot
975 * be the last block, so we can safey decrypt all of them.
976 *
977 * Some things to note:
978 * - The above semantics will cause aes_ctx->ac_remainder to
979 * never accumulate more than AES_BLOCK_LEN bytes of
980 * ciphertext. Once we reach at least AES_BLOCK_LEN + 1 bytes,
981 * we will decrypt the contents of aes_ctx->ac_remainder by one
982 * of the last two scenarios described above.
983 *
984 * - We must always end up with AES_BLOCK_LEN bytes of data
985 * in aes_ctx->ac_remainder when C_DecryptFinal() is called.
986 * The first and third scenarios above may leave
987 * aes_ctx->ac_remainder with less than AES_BLOCK_LEN bytes,
988 * however the total size of the input ciphertext that's
989 * been decrypted must end up a multiple of AES_BLOCK_LEN.
990 * Therefore, we can always assume when there is a
991 * remainder that more data is coming. If we do end up
992 * with a remainder that's not AES_BLOCK_LEN bytes long
993 * when C_DecryptFinal() is called, the input is assumed
994 * invalid and we return CKR_DATA_LEN_RANGE (see
995 * soft_aes_decrypt_final()).
996 */
997
998 VERIFY3U(aes_ctx->ac_remainder_len, <=, AES_BLOCK_LEN);
999 if (in_len >= SIZE_MAX - AES_BLOCK_LEN)
1000 return (CKR_ENCRYPTED_DATA_LEN_RANGE);
1001
1002 out_len = aes_ctx->ac_remainder_len + in_len;
1003
1004 if (out_len <= AES_BLOCK_LEN) {
1005 /*
1006 * The first scenario detailed above, accumulate
1007 * ciphertext in ac_remainder_len and return.
1008 */
1009 uint8_t *dest = (uint8_t *)aes_ctx->ac_remainder +
1010 aes_ctx->ac_remainder_len;
1011
1012 bcopy(pEncryptedData, dest, in_len);
1013 aes_ctx->ac_remainder_len += in_len;
1014 *pulDataLen = 0;
1015
1016 /*
1017 * Since we aren't writing an output, and are returning
1018 * here, we don't need to adjust out_len -- we never
1019 * reach the output buffer size checks after the
1020 * switch statement.
1021 */
1022 return (CKR_OK);
1023 } else if (out_len % AES_BLOCK_LEN == 0) {
1024 /*
1025 * The second scenario decribed above. The total amount
1026 * available is a multiple of AES_BLOCK_LEN, and
1027 * we have more than one block. We reduce the
1028 * input size (in_len) by AES_BLOCK_LEN. We also
1029 * reduce the output size (out_len) by AES_BLOCK_LEN
1030 * for the output buffer size checks that follow
1031 * the switch statement. In certain situations,
1032 * PKCS#11 requires this to be an exact value, so
1033 * the size check cannot occur for CKM_AES_CBC_PAD
1034 * until after we've determine which scenario we
1035 * have.
1036 *
1037 * Because we never accumulate more than AES_BLOCK_LEN
1038 * bytes in aes_ctx->ac_remainder, when we are in
1039 * this scenario, the following VERIFYs should always
1040 * be true (and serve as a final safeguard against
1041 * underflow).
1042 */
1043 VERIFY3U(in_len, >=, AES_BLOCK_LEN);
1044
1045 buffer_block = pEncryptedData + in_len - AES_BLOCK_LEN;
1046
1047 in_len -= AES_BLOCK_LEN;
1048
1049 /*
1050 * This else clause explicity checks
1051 * out_len > AES_BLOCK_LEN, so this is also safe.
1052 */
1053 out_len -= AES_BLOCK_LEN;
1054 } else {
1055 /*
1056 * The third scenario above. We have at least
1057 * AES_BLOCK_LEN + 1 bytes, but the total amount of
1058 * input ciphertext available is not an exact
1059 * multiple of AES_BLOCK_LEN. Let
1060 * aes_decrypt_contiguous_blocks() handle the
1061 * buffering of the remainder. Update the
1062 * output size to reflect the actual amount of output
1063 * we want to emit for the checks after the switch
1064 * statement.
1065 */
1066 out_len &= ~(AES_BLOCK_LEN - 1);
1067 }
1068 break;
1069 case CKM_AES_CTR:
1070 /*
1071 * CKM_AES_CTR is a stream cipher, so we always output
1072 * exactly as much output plaintext as input ciphertext
1073 */
1074 out_len = in_len;
1075 break;
1076 default:
1077 out_len = aes_ctx->ac_remainder_len + in_len;
1078 out_len &= ~(AES_BLOCK_LEN - 1);
1079 break;
1080 }
1081
1082 /*
1083 * C_DecryptUpdate() verifies that pulDataLen is not NULL prior
1084 * to calling soft_decrypt_common() (which calls us).
1085 */
1086
1087 if (pData == NULL) {
1088 /*
1089 * If the output buffer (pData) is NULL, that means the
1090 * caller is inquiring about the size buffer needed to
1091 * complete the C_DecryptUpdate() request. While we are
1092 * permitted to set *pulDataLen to an estimated value that can
1093 * be 'slightly' larger than the actual value required,
1094 * since we know the exact size we need, we stick with the
1095 * exact size.
1096 */
1097 *pulDataLen = out_len;
1098 return (CKR_OK);
1099 }
1100
1101 if (*pulDataLen < out_len) {
1102 /*
1103 * Not an inquiry, but the output buffer isn't large enough.
1104 * PKCS#11 requires that this scenario not fail fatally (as
1105 * well as return a different error value). This situation
1106 * also requires us to set *pulDataLen to the _exact_ size
1107 * required.
1108 */
1109 *pulDataLen = out_len;
1110 return (CKR_BUFFER_TOO_SMALL);
1111 }
1112
1113 rc = aes_decrypt_contiguous_blocks(aes_ctx, (char *)pEncryptedData,
1114 in_len, &out);
1115
1116 if (rc != CRYPTO_SUCCESS) {
1117 rv = CKR_FUNCTION_FAILED;
1118 goto done;
1119 }
1120
1121 *pulDataLen = out.cd_offset;
1122
1123 switch (mech) {
1124 case CKM_AES_CBC_PAD:
1125 if (buffer_block == NULL) {
1126 break;
1127 }
1128
1129 VERIFY0(aes_ctx->ac_remainder_len);
1130
1131 /*
1132 * We had multiple blocks of data to decrypt with nothing
1133 * left over and deferred decrypting the last block of data.
1134 * Copy it into aes_ctx->ac_remainder to decrypt on the
1135 * next update call (or final).
1136 */
1137 bcopy(buffer_block, aes_ctx->ac_remainder, AES_BLOCK_LEN);
1138 aes_ctx->ac_remainder_len = AES_BLOCK_LEN;
1139 break;
1140 }
1141
1142 done:
1143 return (rv);
1144 }
1145
1146 CK_RV
soft_aes_encrypt_final(soft_session_t * session_p,CK_BYTE_PTR pLastEncryptedPart,CK_ULONG_PTR pulLastEncryptedPartLen)1147 soft_aes_encrypt_final(soft_session_t *session_p,
1148 CK_BYTE_PTR pLastEncryptedPart, CK_ULONG_PTR pulLastEncryptedPartLen)
1149 {
1150 aes_ctx_t *aes_ctx = session_p->encrypt.context;
1151 crypto_data_t data = {
1152 .cd_format = CRYPTO_DATA_RAW,
1153 .cd_offset = 0,
1154 .cd_length = *pulLastEncryptedPartLen,
1155 .cd_raw.iov_base = (char *)pLastEncryptedPart,
1156 .cd_raw.iov_len = *pulLastEncryptedPartLen
1157 };
1158 CK_MECHANISM_TYPE mech = session_p->encrypt.mech.mechanism;
1159 CK_RV rv = CKR_OK;
1160 size_t out_len;
1161 int rc = CRYPTO_SUCCESS;
1162
1163 switch (mech) {
1164 case CKM_AES_CBC_PAD:
1165 /*
1166 * We always add 1..AES_BLOCK_LEN of padding to the input
1167 * plaintext to round up to a multiple of AES_BLOCK_LEN.
1168 * During encryption, we never output a partially encrypted
1169 * block (that is the amount encrypted by each call of
1170 * C_EncryptUpdate() is always either 0 or n * AES_BLOCK_LEN).
1171 * As a result, at the end of the encryption operation, we
1172 * output AES_BLOCK_LEN bytes of data -- this could be a full
1173 * block of padding, or a combination of data + padding.
1174 */
1175 out_len = AES_BLOCK_LEN;
1176 break;
1177 case CKM_AES_CTR:
1178 /*
1179 * Since CKM_AES_CTR is a stream cipher, we never buffer any
1180 * input, so we always have 0 remaining bytes of output.
1181 */
1182 out_len = 0;
1183 break;
1184 case CKM_AES_CCM:
1185 out_len = aes_ctx->ac_remainder_len +
1186 aes_ctx->acu.acu_ccm.ccm_mac_len;
1187 break;
1188 case CKM_AES_GCM:
1189 out_len = aes_ctx->ac_remainder_len +
1190 aes_ctx->acu.acu_gcm.gcm_tag_len;
1191 break;
1192 case CKM_AES_CMAC:
1193 case CKM_AES_CMAC_GENERAL:
1194 out_len = AES_BLOCK_LEN;
1195 break;
1196 default:
1197 /*
1198 * Everything other AES mechansism requires full blocks of
1199 * input. If the input was not an exact multiple of
1200 * AES_BLOCK_LEN, it is a fatal error.
1201 */
1202 if (aes_ctx->ac_remainder_len > 0) {
1203 rv = CKR_DATA_LEN_RANGE;
1204 goto done;
1205 }
1206 out_len = 0;
1207 }
1208
1209 if (*pulLastEncryptedPartLen < out_len || pLastEncryptedPart == NULL) {
1210 *pulLastEncryptedPartLen = out_len;
1211 return ((pLastEncryptedPart == NULL) ?
1212 CKR_OK : CKR_BUFFER_TOO_SMALL);
1213 }
1214
1215 switch (mech) {
1216 case CKM_AES_CBC_PAD: {
1217 char block[AES_BLOCK_LEN] = { 0 };
1218 size_t padlen = AES_BLOCK_LEN - aes_ctx->ac_remainder_len;
1219
1220 if (padlen == 0) {
1221 padlen = AES_BLOCK_LEN;
1222 }
1223
1224 (void) memset(block, padlen & 0xff, sizeof (block));
1225 rc = aes_encrypt_contiguous_blocks(aes_ctx, block,
1226 padlen, &data);
1227 explicit_bzero(block, sizeof (block));
1228 break;
1229 }
1230 case CKM_AES_CTR:
1231 /*
1232 * Since CKM_AES_CTR is a stream cipher, we never
1233 * buffer any data, and thus have no remaining data
1234 * to output at the end
1235 */
1236 break;
1237 case CKM_AES_CCM:
1238 rc = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, &data,
1239 AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
1240 break;
1241 case CKM_AES_GCM:
1242 rc = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, &data,
1243 AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
1244 aes_xor_block);
1245 break;
1246 case CKM_AES_CMAC:
1247 case CKM_AES_CMAC_GENERAL:
1248 rc = cmac_mode_final((cbc_ctx_t *)aes_ctx, &data,
1249 aes_encrypt_block, aes_xor_block);
1250 break;
1251 default:
1252 break;
1253 }
1254 rv = crypto2pkcs11_error_number(rc);
1255
1256 done:
1257 if (rv == CKR_OK) {
1258 *pulLastEncryptedPartLen = data.cd_offset;
1259 }
1260
1261 soft_aes_free_ctx(aes_ctx);
1262 session_p->encrypt.context = NULL;
1263 return (rv);
1264 }
1265
1266 CK_RV
soft_aes_decrypt_final(soft_session_t * session_p,CK_BYTE_PTR pLastPart,CK_ULONG_PTR pulLastPartLen)1267 soft_aes_decrypt_final(soft_session_t *session_p, CK_BYTE_PTR pLastPart,
1268 CK_ULONG_PTR pulLastPartLen)
1269 {
1270 aes_ctx_t *aes_ctx = session_p->decrypt.context;
1271 CK_MECHANISM_TYPE mech = session_p->decrypt.mech.mechanism;
1272 CK_RV rv = CKR_OK;
1273 int rc = CRYPTO_SUCCESS;
1274 size_t out_len;
1275 crypto_data_t out = {
1276 .cd_format = CRYPTO_DATA_RAW,
1277 .cd_offset = 0,
1278 .cd_length = *pulLastPartLen,
1279 .cd_raw.iov_base = (char *)pLastPart,
1280 .cd_raw.iov_len = *pulLastPartLen
1281 };
1282
1283 switch (mech) {
1284 case CKM_AES_CBC_PAD:
1285 /*
1286 * PKCS#11 requires that a caller can discover the size of
1287 * the output buffer required by calling
1288 * C_DecryptFinal(hSession, NULL, &len) which sets
1289 * *pulLastPartLen to the size required. However, it also
1290 * allows if one calls C_DecryptFinal with a buffer (i.e.
1291 * pLastPart != NULL) that is too small, to return
1292 * CKR_BUFFER_TOO_SMALL with *pulLastPartLen set to the
1293 * _exact_ size required (when pLastPart is NULL, the
1294 * implementation is allowed to set a 'sightly' larger
1295 * value than is strictly necessary. In either case, the
1296 * caller is allowed to retry the operation (the operation
1297 * is not terminated).
1298 *
1299 * With PKCS#7 padding, we cannot determine the exact size of
1300 * the output until we decrypt the final block. As such, the
1301 * first time for a given decrypt operation we are called,
1302 * we decrypt the final block and stash it in the aes_ctx
1303 * remainder block. On any subsequent calls in the
1304 * current decrypt operation, we then can use the decrypted
1305 * block as necessary to provide the correct semantics.
1306 *
1307 * The cleanup of aes_ctx when the operation terminates
1308 * will take care of clearing out aes_ctx->ac_remainder_len.
1309 */
1310 if ((aes_ctx->ac_flags & P11_DECRYPTED) == 0) {
1311 uint8_t block[AES_BLOCK_LEN] = { 0 };
1312 crypto_data_t block_out = {
1313 .cd_format = CRYPTO_DATA_RAW,
1314 .cd_offset = 0,
1315 .cd_length = sizeof (block),
1316 .cd_raw.iov_base = (char *)block,
1317 .cd_raw.iov_len = sizeof (block)
1318 };
1319 size_t amt, i;
1320 uint8_t pad_len;
1321
1322 if (aes_ctx->ac_remainder_len != AES_BLOCK_LEN) {
1323 return (CKR_DATA_LEN_RANGE);
1324 }
1325
1326 rc = aes_decrypt_contiguous_blocks(aes_ctx,
1327 (char *)block, 0, &block_out);
1328 if (rc != CRYPTO_SUCCESS) {
1329 explicit_bzero(block, sizeof (block));
1330 return (CKR_FUNCTION_FAILED);
1331 }
1332
1333 pad_len = block[AES_BLOCK_LEN - 1];
1334
1335 /*
1336 * RFC5652 6.3 The amount of padding must be
1337 * block_sz - (len mod block_size). This means
1338 * the amount of padding must always be in the
1339 * range [1..block_size].
1340 */
1341 if (pad_len == 0 || pad_len > AES_BLOCK_LEN) {
1342 rv = CKR_ENCRYPTED_DATA_INVALID;
1343 explicit_bzero(block, sizeof (block));
1344 goto done;
1345 }
1346 amt = AES_BLOCK_LEN - pad_len;
1347
1348 /*
1349 * Verify the padding is correct. Try to do so
1350 * in as constant a time as possible.
1351 */
1352 for (i = amt; i < AES_BLOCK_LEN; i++) {
1353 if (block[i] != pad_len) {
1354 rv = CKR_ENCRYPTED_DATA_INVALID;
1355 }
1356 }
1357 if (rv != CKR_OK) {
1358 explicit_bzero(block, sizeof (block));
1359 goto done;
1360 }
1361
1362 bcopy(block, aes_ctx->ac_remainder, amt);
1363 explicit_bzero(block, sizeof (block));
1364
1365 aes_ctx->ac_flags |= P11_DECRYPTED;
1366 aes_ctx->ac_remainder_len = amt;
1367 }
1368
1369 out_len = aes_ctx->ac_remainder_len;
1370 break;
1371 case CKM_AES_CTR:
1372 /*
1373 * Since CKM_AES_CTR is a stream cipher, we never have
1374 * any remaining bytes to output.
1375 */
1376 out_len = 0;
1377 break;
1378 case CKM_AES_CCM:
1379 out_len = aes_ctx->ac_data_len;
1380 break;
1381 case CKM_AES_GCM:
1382 out_len = aes_ctx->acu.acu_gcm.gcm_processed_data_len -
1383 aes_ctx->acu.acu_gcm.gcm_tag_len;
1384 break;
1385 default:
1386 /*
1387 * The remaining mechanims require an exact multiple of
1388 * AES_BLOCK_LEN of ciphertext. Any other value is an error.
1389 */
1390 if (aes_ctx->ac_remainder_len > 0) {
1391 rv = CKR_DATA_LEN_RANGE;
1392 goto done;
1393 }
1394 out_len = 0;
1395 break;
1396 }
1397
1398 if (*pulLastPartLen < out_len || pLastPart == NULL) {
1399 *pulLastPartLen = out_len;
1400 return ((pLastPart == NULL) ? CKR_OK : CKR_BUFFER_TOO_SMALL);
1401 }
1402
1403 switch (mech) {
1404 case CKM_AES_CBC_PAD:
1405 *pulLastPartLen = out_len;
1406 if (out_len == 0) {
1407 break;
1408 }
1409 bcopy(aes_ctx->ac_remainder, pLastPart, out_len);
1410 out.cd_offset += out_len;
1411 break;
1412 case CKM_AES_CCM:
1413 ASSERT3U(aes_ctx->ac_processed_data_len, ==, out_len);
1414 ASSERT3U(aes_ctx->ac_processed_mac_len, ==,
1415 aes_ctx->ac_mac_len);
1416
1417 rc = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, &out,
1418 AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
1419 aes_xor_block);
1420 break;
1421 case CKM_AES_GCM:
1422 rc = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, &out,
1423 AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
1424 break;
1425 default:
1426 break;
1427 }
1428
1429 VERIFY3U(out.cd_offset, ==, out_len);
1430 rv = crypto2pkcs11_error_number(rc);
1431
1432 done:
1433 if (rv == CKR_OK) {
1434 *pulLastPartLen = out.cd_offset;
1435 }
1436
1437 soft_aes_free_ctx(aes_ctx);
1438 session_p->decrypt.context = NULL;
1439
1440 return (rv);
1441 }
1442
1443 /*
1444 * Allocate and initialize AES contexts for sign and verify operations
1445 * (including the underlying encryption context needed to sign or verify) --
1446 * called by C_SignInit() and C_VerifyInit() to perform the CKM_AES_* MAC
1447 * mechanisms. For general-length AES MAC, also validate the MAC length.
1448 */
1449 CK_RV
soft_aes_sign_verify_init_common(soft_session_t * session_p,CK_MECHANISM_PTR pMechanism,soft_object_t * key_p,boolean_t sign_op)1450 soft_aes_sign_verify_init_common(soft_session_t *session_p,
1451 CK_MECHANISM_PTR pMechanism, soft_object_t *key_p, boolean_t sign_op)
1452 {
1453 soft_aes_sign_ctx_t *ctx = NULL;
1454 /* For AES CMAC (the only AES MAC currently), iv is always 0 */
1455 CK_BYTE iv[AES_BLOCK_LEN] = { 0 };
1456 CK_MECHANISM encrypt_mech = {
1457 .mechanism = CKM_AES_CMAC,
1458 .pParameter = iv,
1459 .ulParameterLen = sizeof (iv)
1460 };
1461 CK_RV rv;
1462 size_t mac_len = AES_BLOCK_LEN;
1463
1464 if (key_p->key_type != CKK_AES)
1465 return (CKR_KEY_TYPE_INCONSISTENT);
1466
1467 /* C_{Sign,Verify}Init() validate pMechanism != NULL */
1468 if (pMechanism->mechanism == CKM_AES_CMAC_GENERAL) {
1469 if (pMechanism->pParameter == NULL) {
1470 return (CKR_MECHANISM_PARAM_INVALID);
1471 }
1472
1473 mac_len = *(CK_MAC_GENERAL_PARAMS *)pMechanism->pParameter;
1474
1475 if (mac_len > AES_BLOCK_LEN) {
1476 return (CKR_MECHANISM_PARAM_INVALID);
1477 }
1478 }
1479
1480 ctx = calloc(1, sizeof (*ctx));
1481 if (ctx == NULL) {
1482 return (CKR_HOST_MEMORY);
1483 }
1484
1485 rv = soft_aes_check_mech_param(pMechanism, &ctx->aes_ctx);
1486 if (rv != CKR_OK) {
1487 soft_aes_free_ctx(ctx->aes_ctx);
1488 goto done;
1489 }
1490
1491 if ((rv = soft_encrypt_init_internal(session_p, &encrypt_mech,
1492 key_p)) != CKR_OK) {
1493 soft_aes_free_ctx(ctx->aes_ctx);
1494 goto done;
1495 }
1496
1497 ctx->mac_len = mac_len;
1498
1499 (void) pthread_mutex_lock(&session_p->session_mutex);
1500
1501 if (sign_op) {
1502 session_p->sign.context = ctx;
1503 session_p->sign.mech.mechanism = pMechanism->mechanism;
1504 } else {
1505 session_p->verify.context = ctx;
1506 session_p->verify.mech.mechanism = pMechanism->mechanism;
1507 }
1508
1509 (void) pthread_mutex_unlock(&session_p->session_mutex);
1510
1511 done:
1512 if (rv != CKR_OK) {
1513 soft_aes_free_ctx(ctx->aes_ctx);
1514 free(ctx);
1515 }
1516
1517 return (rv);
1518 }
1519
1520 CK_RV
soft_aes_sign_verify_common(soft_session_t * session_p,CK_BYTE_PTR pData,CK_ULONG ulDataLen,CK_BYTE_PTR pSigned,CK_ULONG_PTR pulSignedLen,boolean_t sign_op,boolean_t Final)1521 soft_aes_sign_verify_common(soft_session_t *session_p, CK_BYTE_PTR pData,
1522 CK_ULONG ulDataLen, CK_BYTE_PTR pSigned, CK_ULONG_PTR pulSignedLen,
1523 boolean_t sign_op, boolean_t Final)
1524 {
1525 soft_aes_sign_ctx_t *soft_aes_ctx_sign_verify;
1526 CK_RV rv;
1527 CK_BYTE *pEncrypted = NULL;
1528 CK_ULONG ulEncryptedLen = AES_BLOCK_LEN;
1529 CK_BYTE last_block[AES_BLOCK_LEN];
1530
1531 if (sign_op) {
1532 soft_aes_ctx_sign_verify =
1533 (soft_aes_sign_ctx_t *)session_p->sign.context;
1534
1535 if (soft_aes_ctx_sign_verify->mac_len == 0) {
1536 *pulSignedLen = 0;
1537 goto clean_exit;
1538 }
1539
1540 /* Application asks for the length of the output buffer. */
1541 if (pSigned == NULL) {
1542 *pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
1543 return (CKR_OK);
1544 }
1545
1546 /* Is the application-supplied buffer large enough? */
1547 if (*pulSignedLen < soft_aes_ctx_sign_verify->mac_len) {
1548 *pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
1549 return (CKR_BUFFER_TOO_SMALL);
1550 }
1551 } else {
1552 soft_aes_ctx_sign_verify =
1553 (soft_aes_sign_ctx_t *)session_p->verify.context;
1554 }
1555
1556 if (Final) {
1557 rv = soft_encrypt_final(session_p, last_block,
1558 &ulEncryptedLen);
1559 } else {
1560 rv = soft_encrypt(session_p, pData, ulDataLen,
1561 last_block, &ulEncryptedLen);
1562 }
1563
1564 if (rv == CKR_OK) {
1565 *pulSignedLen = soft_aes_ctx_sign_verify->mac_len;
1566
1567 /* the leftmost mac_len bytes of last_block is our MAC */
1568 (void) memcpy(pSigned, last_block, *pulSignedLen);
1569 }
1570
1571 clean_exit:
1572
1573 (void) pthread_mutex_lock(&session_p->session_mutex);
1574
1575 /* soft_encrypt_common() has freed the encrypt context */
1576 if (sign_op) {
1577 free(session_p->sign.context);
1578 session_p->sign.context = NULL;
1579 } else {
1580 free(session_p->verify.context);
1581 session_p->verify.context = NULL;
1582 }
1583 session_p->encrypt.flags = 0;
1584
1585 (void) pthread_mutex_unlock(&session_p->session_mutex);
1586
1587 if (pEncrypted) {
1588 free(pEncrypted);
1589 }
1590
1591 return (rv);
1592 }
1593
1594 /*
1595 * Called by soft_sign_update()
1596 */
1597 CK_RV
soft_aes_mac_sign_verify_update(soft_session_t * session_p,CK_BYTE_PTR pPart,CK_ULONG ulPartLen)1598 soft_aes_mac_sign_verify_update(soft_session_t *session_p, CK_BYTE_PTR pPart,
1599 CK_ULONG ulPartLen)
1600 {
1601 CK_BYTE buf[AES_BLOCK_LEN];
1602 CK_ULONG ulEncryptedLen = AES_BLOCK_LEN;
1603 CK_RV rv;
1604
1605 rv = soft_encrypt_update(session_p, pPart, ulPartLen,
1606 buf, &ulEncryptedLen);
1607 explicit_bzero(buf, sizeof (buf));
1608
1609 return (rv);
1610 }
1611
1612 void
soft_aes_free_ctx(aes_ctx_t * ctx)1613 soft_aes_free_ctx(aes_ctx_t *ctx)
1614 {
1615 size_t len = 0;
1616
1617 if (ctx == NULL)
1618 return;
1619
1620 if (ctx->ac_flags & ECB_MODE) {
1621 len = sizeof (ecb_ctx_t);
1622 } else if (ctx->ac_flags & (CBC_MODE|CMAC_MODE)) {
1623 len = sizeof (cbc_ctx_t);
1624 } else if (ctx->ac_flags & CTR_MODE) {
1625 len = sizeof (ctr_ctx_t);
1626 } else if (ctx->ac_flags & CCM_MODE) {
1627 len = sizeof (ccm_ctx_t);
1628 } else if (ctx->ac_flags & GCM_MODE) {
1629 len = sizeof (gcm_ctx_t);
1630 }
1631
1632 freezero(ctx->ac_keysched, ctx->ac_keysched_len);
1633 freezero(ctx, len);
1634 }
1635