1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 #pragma ident "%Z%%M% %I% %E% SMI" 27 28 /* 29 * This file contains the core framework routines for the 30 * kernel cryptographic framework. These routines are at the 31 * layer, between the kernel API/ioctls and the SPI. 32 */ 33 34 #include <sys/types.h> 35 #include <sys/errno.h> 36 #include <sys/kmem.h> 37 #include <sys/proc.h> 38 #include <sys/cpuvar.h> 39 #include <sys/cpupart.h> 40 #include <sys/ksynch.h> 41 #include <sys/callb.h> 42 #include <sys/cmn_err.h> 43 #include <sys/systm.h> 44 #include <sys/sysmacros.h> 45 #include <sys/kstat.h> 46 #include <sys/crypto/common.h> 47 #include <sys/crypto/impl.h> 48 #include <sys/crypto/sched_impl.h> 49 #include <sys/crypto/api.h> 50 #include <sys/crypto/spi.h> 51 #include <sys/taskq_impl.h> 52 #include <sys/ddi.h> 53 #include <sys/sunddi.h> 54 55 56 kcf_global_swq_t *gswq; /* Global software queue */ 57 58 /* Thread pool related variables */ 59 static kcf_pool_t *kcfpool; /* Thread pool of kcfd LWPs */ 60 int kcf_maxthreads; 61 int kcf_minthreads; 62 int kcf_thr_multiple = 2; /* Boot-time tunable for experimentation */ 63 static ulong_t kcf_idlethr_timeout; 64 static boolean_t kcf_sched_running = B_FALSE; 65 #define KCF_DEFAULT_THRTIMEOUT 60000000 /* 60 seconds */ 66 67 /* kmem caches used by the scheduler */ 68 static struct kmem_cache *kcf_sreq_cache; 69 static struct kmem_cache *kcf_areq_cache; 70 static struct kmem_cache *kcf_context_cache; 71 72 /* Global request ID table */ 73 static kcf_reqid_table_t *kcf_reqid_table[REQID_TABLES]; 74 75 /* KCF stats. Not protected. */ 76 static kcf_stats_t kcf_ksdata = { 77 { "total threads in pool", KSTAT_DATA_UINT32}, 78 { "idle threads in pool", KSTAT_DATA_UINT32}, 79 { "min threads in pool", KSTAT_DATA_UINT32}, 80 { "max threads in pool", KSTAT_DATA_UINT32}, 81 { "requests in gswq", KSTAT_DATA_UINT32}, 82 { "max requests in gswq", KSTAT_DATA_UINT32}, 83 { "threads for HW taskq", KSTAT_DATA_UINT32}, 84 { "minalloc for HW taskq", KSTAT_DATA_UINT32}, 85 { "maxalloc for HW taskq", KSTAT_DATA_UINT32} 86 }; 87 88 static kstat_t *kcf_misc_kstat = NULL; 89 ulong_t kcf_swprov_hndl = 0; 90 91 static kcf_areq_node_t *kcf_areqnode_alloc(kcf_provider_desc_t *, 92 kcf_context_t *, crypto_call_req_t *, kcf_req_params_t *, boolean_t); 93 static int kcf_disp_sw_request(kcf_areq_node_t *); 94 static void process_req_hwp(void *); 95 static kcf_areq_node_t *kcf_dequeue(); 96 static int kcf_enqueue(kcf_areq_node_t *); 97 static void kcf_failover_thread(); 98 static void kcfpool_alloc(); 99 static void kcf_reqid_delete(kcf_areq_node_t *areq); 100 static crypto_req_id_t kcf_reqid_insert(kcf_areq_node_t *areq); 101 static int kcf_misc_kstat_update(kstat_t *ksp, int rw); 102 static void compute_min_max_threads(); 103 104 105 /* 106 * Create a new context. 107 */ 108 crypto_ctx_t * 109 kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd, 110 crypto_session_id_t sid) 111 { 112 crypto_ctx_t *ctx; 113 kcf_context_t *kcf_ctx; 114 115 kcf_ctx = kmem_cache_alloc(kcf_context_cache, 116 (crq == NULL) ? KM_SLEEP : KM_NOSLEEP); 117 if (kcf_ctx == NULL) 118 return (NULL); 119 120 /* initialize the context for the consumer */ 121 kcf_ctx->kc_refcnt = 1; 122 kcf_ctx->kc_req_chain_first = NULL; 123 kcf_ctx->kc_req_chain_last = NULL; 124 kcf_ctx->kc_secondctx = NULL; 125 KCF_PROV_REFHOLD(pd); 126 kcf_ctx->kc_prov_desc = pd; 127 kcf_ctx->kc_sw_prov_desc = NULL; 128 kcf_ctx->kc_mech = NULL; 129 130 ctx = &kcf_ctx->kc_glbl_ctx; 131 ctx->cc_provider = pd->pd_prov_handle; 132 ctx->cc_session = sid; 133 ctx->cc_provider_private = NULL; 134 ctx->cc_framework_private = (void *)kcf_ctx; 135 ctx->cc_flags = 0; 136 ctx->cc_opstate = NULL; 137 138 return (ctx); 139 } 140 141 /* 142 * Allocate a new async request node. 143 * 144 * ictx - Framework private context pointer 145 * crq - Has callback function and argument. Should be non NULL. 146 * req - The parameters to pass to the SPI 147 */ 148 static kcf_areq_node_t * 149 kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx, 150 crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual) 151 { 152 kcf_areq_node_t *arptr, *areq; 153 154 ASSERT(crq != NULL); 155 arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP); 156 if (arptr == NULL) 157 return (NULL); 158 159 arptr->an_state = REQ_ALLOCATED; 160 arptr->an_reqarg = *crq; 161 arptr->an_params = *req; 162 arptr->an_context = ictx; 163 arptr->an_isdual = isdual; 164 165 arptr->an_next = arptr->an_prev = NULL; 166 KCF_PROV_REFHOLD(pd); 167 arptr->an_provider = pd; 168 arptr->an_tried_plist = NULL; 169 arptr->an_refcnt = 1; 170 arptr->an_idnext = arptr->an_idprev = NULL; 171 172 /* 173 * Requests for context-less operations do not use the 174 * fields - an_is_my_turn, and an_ctxchain_next. 175 */ 176 if (ictx == NULL) 177 return (arptr); 178 179 KCF_CONTEXT_REFHOLD(ictx); 180 /* 181 * Chain this request to the context. 182 */ 183 mutex_enter(&ictx->kc_in_use_lock); 184 arptr->an_ctxchain_next = NULL; 185 if ((areq = ictx->kc_req_chain_last) == NULL) { 186 arptr->an_is_my_turn = B_TRUE; 187 ictx->kc_req_chain_last = 188 ictx->kc_req_chain_first = arptr; 189 } else { 190 ASSERT(ictx->kc_req_chain_first != NULL); 191 arptr->an_is_my_turn = B_FALSE; 192 /* Insert the new request to the end of the chain. */ 193 areq->an_ctxchain_next = arptr; 194 ictx->kc_req_chain_last = arptr; 195 } 196 mutex_exit(&ictx->kc_in_use_lock); 197 198 return (arptr); 199 } 200 201 /* 202 * Queue the request node and do one of the following: 203 * - If there is an idle thread signal it to run. 204 * - If there is no idle thread and max running threads is not 205 * reached, signal the creator thread for more threads. 206 * 207 * If the two conditions above are not met, we don't need to do 208 * any thing. The request will be picked up by one of the 209 * worker threads when it becomes available. 210 */ 211 static int 212 kcf_disp_sw_request(kcf_areq_node_t *areq) 213 { 214 int err; 215 int cnt = 0; 216 217 if ((err = kcf_enqueue(areq)) != 0) 218 return (err); 219 220 if (kcfpool->kp_idlethreads > 0) { 221 /* Signal an idle thread to run */ 222 mutex_enter(&gswq->gs_lock); 223 cv_signal(&gswq->gs_cv); 224 mutex_exit(&gswq->gs_lock); 225 226 return (CRYPTO_QUEUED); 227 } 228 229 /* 230 * We keep the number of running threads to be at 231 * kcf_minthreads to reduce gs_lock contention. 232 */ 233 cnt = kcf_minthreads - 234 (kcfpool->kp_threads - kcfpool->kp_blockedthreads); 235 if (cnt > 0) { 236 /* 237 * The following ensures the number of threads in pool 238 * does not exceed kcf_maxthreads. 239 */ 240 cnt = min(cnt, kcf_maxthreads - kcfpool->kp_threads); 241 if (cnt > 0) { 242 /* Signal the creator thread for more threads */ 243 mutex_enter(&kcfpool->kp_user_lock); 244 if (!kcfpool->kp_signal_create_thread) { 245 kcfpool->kp_signal_create_thread = B_TRUE; 246 kcfpool->kp_nthrs = cnt; 247 cv_signal(&kcfpool->kp_user_cv); 248 } 249 mutex_exit(&kcfpool->kp_user_lock); 250 } 251 } 252 253 return (CRYPTO_QUEUED); 254 } 255 256 /* 257 * This routine is called by the taskq associated with 258 * each hardware provider. We notify the kernel consumer 259 * via the callback routine in case of CRYPTO_SUCCESS or 260 * a failure. 261 * 262 * A request can be of type kcf_areq_node_t or of type 263 * kcf_sreq_node_t. 264 */ 265 static void 266 process_req_hwp(void *ireq) 267 { 268 int error = 0; 269 crypto_ctx_t *ctx; 270 kcf_call_type_t ctype; 271 kcf_provider_desc_t *pd; 272 kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq; 273 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq; 274 275 pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ? 276 sreq->sn_provider : areq->an_provider; 277 278 mutex_enter(&pd->pd_lock); 279 280 /* 281 * Wait if flow control is in effect for the provider. A 282 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED 283 * notification will signal us. We also get signaled if 284 * the provider is unregistering. 285 */ 286 while (pd->pd_state == KCF_PROV_BUSY) 287 cv_wait(&pd->pd_resume_cv, &pd->pd_lock); 288 289 /* 290 * Bump the internal reference count while the request is being 291 * processed. This is how we know when it's safe to unregister 292 * a provider. This step must precede the pd_state check below. 293 */ 294 KCF_PROV_IREFHOLD(pd); 295 296 /* 297 * Fail the request if the provider has failed. We return a 298 * recoverable error and the notified clients attempt any 299 * recovery. For async clients this is done in kcf_aop_done() 300 * and for sync clients it is done in the k-api routines. 301 */ 302 if (pd->pd_state >= KCF_PROV_FAILED) { 303 mutex_exit(&pd->pd_lock); 304 error = CRYPTO_DEVICE_ERROR; 305 goto bail; 306 } 307 308 mutex_exit(&pd->pd_lock); 309 310 if (ctype == CRYPTO_SYNCH) { 311 mutex_enter(&sreq->sn_lock); 312 sreq->sn_state = REQ_INPROGRESS; 313 mutex_exit(&sreq->sn_lock); 314 315 ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL; 316 error = common_submit_request(sreq->sn_provider, ctx, 317 sreq->sn_params, sreq); 318 } else { 319 kcf_context_t *ictx; 320 ASSERT(ctype == CRYPTO_ASYNCH); 321 322 /* 323 * We are in the per-hardware provider thread context and 324 * hence can sleep. Note that the caller would have done 325 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned. 326 */ 327 ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL; 328 329 mutex_enter(&areq->an_lock); 330 /* 331 * We need to maintain ordering for multi-part requests. 332 * an_is_my_turn is set to B_TRUE initially for a request 333 * when it is enqueued and there are no other requests 334 * for that context. It is set later from kcf_aop_done() when 335 * the request before us in the chain of requests for the 336 * context completes. We get signaled at that point. 337 */ 338 if (ictx != NULL) { 339 ASSERT(ictx->kc_prov_desc == areq->an_provider); 340 341 while (areq->an_is_my_turn == B_FALSE) { 342 cv_wait(&areq->an_turn_cv, &areq->an_lock); 343 } 344 } 345 areq->an_state = REQ_INPROGRESS; 346 mutex_exit(&areq->an_lock); 347 348 error = common_submit_request(areq->an_provider, ctx, 349 &areq->an_params, areq); 350 } 351 352 bail: 353 if (error == CRYPTO_QUEUED) { 354 /* 355 * The request is queued by the provider and we should 356 * get a crypto_op_notification() from the provider later. 357 * We notify the consumer at that time. 358 */ 359 return; 360 } else { /* CRYPTO_SUCCESS or other failure */ 361 KCF_PROV_IREFRELE(pd); 362 if (ctype == CRYPTO_SYNCH) 363 kcf_sop_done(sreq, error); 364 else 365 kcf_aop_done(areq, error); 366 } 367 } 368 369 /* 370 * This routine checks if a request can be retried on another 371 * provider. If true, mech1 is initialized to point to the mechanism 372 * structure. mech2 is also initialized in case of a dual operation. fg 373 * is initialized to the correct crypto_func_group_t bit flag. They are 374 * initialized by this routine, so that the caller can pass them to a 375 * kcf_get_mech_provider() or kcf_get_dual_provider() with no further change. 376 * 377 * We check that the request is for a init or atomic routine and that 378 * it is for one of the operation groups used from k-api . 379 */ 380 static boolean_t 381 can_resubmit(kcf_areq_node_t *areq, crypto_mechanism_t **mech1, 382 crypto_mechanism_t **mech2, crypto_func_group_t *fg) 383 { 384 kcf_req_params_t *params; 385 kcf_op_type_t optype; 386 387 params = &areq->an_params; 388 optype = params->rp_optype; 389 390 if (!(IS_INIT_OP(optype) || IS_ATOMIC_OP(optype))) 391 return (B_FALSE); 392 393 switch (params->rp_opgrp) { 394 case KCF_OG_DIGEST: { 395 kcf_digest_ops_params_t *dops = ¶ms->rp_u.digest_params; 396 397 dops->do_mech.cm_type = dops->do_framework_mechtype; 398 *mech1 = &dops->do_mech; 399 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DIGEST : 400 CRYPTO_FG_DIGEST_ATOMIC; 401 break; 402 } 403 404 case KCF_OG_MAC: { 405 kcf_mac_ops_params_t *mops = ¶ms->rp_u.mac_params; 406 407 mops->mo_mech.cm_type = mops->mo_framework_mechtype; 408 *mech1 = &mops->mo_mech; 409 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC : 410 CRYPTO_FG_MAC_ATOMIC; 411 break; 412 } 413 414 case KCF_OG_SIGN: { 415 kcf_sign_ops_params_t *sops = ¶ms->rp_u.sign_params; 416 417 sops->so_mech.cm_type = sops->so_framework_mechtype; 418 *mech1 = &sops->so_mech; 419 switch (optype) { 420 case KCF_OP_INIT: 421 *fg = CRYPTO_FG_SIGN; 422 break; 423 case KCF_OP_ATOMIC: 424 *fg = CRYPTO_FG_SIGN_ATOMIC; 425 break; 426 default: 427 ASSERT(optype == KCF_OP_SIGN_RECOVER_ATOMIC); 428 *fg = CRYPTO_FG_SIGN_RECOVER_ATOMIC; 429 } 430 break; 431 } 432 433 case KCF_OG_VERIFY: { 434 kcf_verify_ops_params_t *vops = ¶ms->rp_u.verify_params; 435 436 vops->vo_mech.cm_type = vops->vo_framework_mechtype; 437 *mech1 = &vops->vo_mech; 438 switch (optype) { 439 case KCF_OP_INIT: 440 *fg = CRYPTO_FG_VERIFY; 441 break; 442 case KCF_OP_ATOMIC: 443 *fg = CRYPTO_FG_VERIFY_ATOMIC; 444 break; 445 default: 446 ASSERT(optype == KCF_OP_VERIFY_RECOVER_ATOMIC); 447 *fg = CRYPTO_FG_VERIFY_RECOVER_ATOMIC; 448 } 449 break; 450 } 451 452 case KCF_OG_ENCRYPT: { 453 kcf_encrypt_ops_params_t *eops = ¶ms->rp_u.encrypt_params; 454 455 eops->eo_mech.cm_type = eops->eo_framework_mechtype; 456 *mech1 = &eops->eo_mech; 457 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT : 458 CRYPTO_FG_ENCRYPT_ATOMIC; 459 break; 460 } 461 462 case KCF_OG_DECRYPT: { 463 kcf_decrypt_ops_params_t *dcrops = ¶ms->rp_u.decrypt_params; 464 465 dcrops->dop_mech.cm_type = dcrops->dop_framework_mechtype; 466 *mech1 = &dcrops->dop_mech; 467 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_DECRYPT : 468 CRYPTO_FG_DECRYPT_ATOMIC; 469 break; 470 } 471 472 case KCF_OG_ENCRYPT_MAC: { 473 kcf_encrypt_mac_ops_params_t *eops = 474 ¶ms->rp_u.encrypt_mac_params; 475 476 eops->em_encr_mech.cm_type = eops->em_framework_encr_mechtype; 477 *mech1 = &eops->em_encr_mech; 478 eops->em_mac_mech.cm_type = eops->em_framework_mac_mechtype; 479 *mech2 = &eops->em_mac_mech; 480 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_ENCRYPT_MAC : 481 CRYPTO_FG_ENCRYPT_MAC_ATOMIC; 482 break; 483 } 484 485 case KCF_OG_MAC_DECRYPT: { 486 kcf_mac_decrypt_ops_params_t *dops = 487 ¶ms->rp_u.mac_decrypt_params; 488 489 dops->md_mac_mech.cm_type = dops->md_framework_mac_mechtype; 490 *mech1 = &dops->md_mac_mech; 491 dops->md_decr_mech.cm_type = dops->md_framework_decr_mechtype; 492 *mech2 = &dops->md_decr_mech; 493 *fg = (optype == KCF_OP_INIT) ? CRYPTO_FG_MAC_DECRYPT : 494 CRYPTO_FG_MAC_DECRYPT_ATOMIC; 495 break; 496 } 497 498 default: 499 return (B_FALSE); 500 } 501 502 return (B_TRUE); 503 } 504 505 /* 506 * This routine is called when a request to a provider has failed 507 * with a recoverable error. This routine tries to find another provider 508 * and dispatches the request to the new provider, if one is available. 509 * We reuse the request structure. 510 * 511 * A return value of NULL from kcf_get_mech_provider() indicates 512 * we have tried the last provider. 513 */ 514 static int 515 kcf_resubmit_request(kcf_areq_node_t *areq) 516 { 517 int error = CRYPTO_FAILED; 518 kcf_context_t *ictx; 519 kcf_provider_desc_t *old_pd; 520 kcf_provider_desc_t *new_pd; 521 crypto_mechanism_t *mech1 = NULL, *mech2 = NULL; 522 crypto_mech_type_t prov_mt1, prov_mt2; 523 crypto_func_group_t fg; 524 525 if (!can_resubmit(areq, &mech1, &mech2, &fg)) 526 return (error); 527 528 old_pd = areq->an_provider; 529 /* 530 * Add old_pd to the list of providers already tried. We release 531 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in 532 * kcf_free_triedlist(). 533 */ 534 if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd, 535 KM_NOSLEEP) == NULL) 536 return (error); 537 538 if (mech1 && !mech2) { 539 new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error, 540 areq->an_tried_plist, fg, 541 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 542 } else { 543 ASSERT(mech1 != NULL && mech2 != NULL); 544 545 new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1, 546 &prov_mt2, &error, areq->an_tried_plist, fg, fg, 547 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); 548 } 549 550 if (new_pd == NULL) 551 return (error); 552 553 /* 554 * We reuse the old context by resetting provider specific 555 * fields in it. 556 */ 557 if ((ictx = areq->an_context) != NULL) { 558 crypto_ctx_t *ctx; 559 560 ASSERT(old_pd == ictx->kc_prov_desc); 561 KCF_PROV_REFRELE(ictx->kc_prov_desc); 562 KCF_PROV_REFHOLD(new_pd); 563 ictx->kc_prov_desc = new_pd; 564 565 ctx = &ictx->kc_glbl_ctx; 566 ctx->cc_provider = new_pd->pd_prov_handle; 567 ctx->cc_session = new_pd->pd_sid; 568 ctx->cc_provider_private = NULL; 569 } 570 571 /* We reuse areq. by resetting the provider and context fields. */ 572 KCF_PROV_REFRELE(old_pd); 573 KCF_PROV_REFHOLD(new_pd); 574 areq->an_provider = new_pd; 575 mutex_enter(&areq->an_lock); 576 areq->an_state = REQ_WAITING; 577 mutex_exit(&areq->an_lock); 578 579 switch (new_pd->pd_prov_type) { 580 case CRYPTO_SW_PROVIDER: 581 error = kcf_disp_sw_request(areq); 582 break; 583 584 case CRYPTO_HW_PROVIDER: { 585 taskq_t *taskq = new_pd->pd_sched_info.ks_taskq; 586 587 if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == 588 (taskqid_t)0) { 589 error = CRYPTO_HOST_MEMORY; 590 } else { 591 error = CRYPTO_QUEUED; 592 } 593 594 break; 595 } 596 } 597 598 return (error); 599 } 600 601 #define EMPTY_TASKQ(tq) ((tq)->tq_task.tqent_next == &(tq)->tq_task) 602 603 /* 604 * Routine called by both ioctl and k-api. The consumer should 605 * bundle the parameters into a kcf_req_params_t structure. A bunch 606 * of macros are available in ops_impl.h for this bundling. They are: 607 * 608 * KCF_WRAP_DIGEST_OPS_PARAMS() 609 * KCF_WRAP_MAC_OPS_PARAMS() 610 * KCF_WRAP_ENCRYPT_OPS_PARAMS() 611 * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc. 612 * 613 * It is the caller's responsibility to free the ctx argument when 614 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details. 615 */ 616 int 617 kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, 618 crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont) 619 { 620 int error = CRYPTO_SUCCESS; 621 kcf_areq_node_t *areq; 622 kcf_sreq_node_t *sreq; 623 kcf_context_t *kcf_ctx; 624 taskq_t *taskq = pd->pd_sched_info.ks_taskq; 625 626 kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL; 627 628 /* Synchronous cases */ 629 if (crq == NULL) { 630 switch (pd->pd_prov_type) { 631 case CRYPTO_SW_PROVIDER: 632 error = common_submit_request(pd, ctx, params, 633 KCF_RHNDL(KM_SLEEP)); 634 break; 635 636 case CRYPTO_HW_PROVIDER: 637 sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP); 638 sreq->sn_state = REQ_ALLOCATED; 639 sreq->sn_rv = CRYPTO_FAILED; 640 641 sreq->sn_params = params; 642 KCF_PROV_REFHOLD(pd); 643 sreq->sn_provider = pd; 644 645 /* 646 * Note that we do not need to hold the context 647 * for synchronous case as the context will never 648 * become invalid underneath us in this case. 649 */ 650 sreq->sn_context = kcf_ctx; 651 652 ASSERT(taskq != NULL); 653 /* 654 * Call the SPI directly if the taskq is empty and the 655 * provider is not busy, else dispatch to the taskq. 656 * Calling directly is fine as this is the synchronous 657 * case. This is unlike the asynchronous case where we 658 * must always dispatch to the taskq. 659 */ 660 if (EMPTY_TASKQ(taskq) && 661 pd->pd_state == KCF_PROV_READY) { 662 process_req_hwp(sreq); 663 } else { 664 /* 665 * We can not tell from taskq_dispatch() return 666 * value if we exceeded maxalloc. Hence the 667 * check here. Since we are allowed to wait in 668 * the synchronous case, we wait for the taskq 669 * to become empty. 670 */ 671 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 672 taskq_wait(taskq); 673 } 674 675 (void) taskq_dispatch(taskq, process_req_hwp, 676 sreq, TQ_SLEEP); 677 } 678 679 /* 680 * Wait for the notification to arrive, 681 * if the operation is not done yet. 682 * Bug# 4722589 will make the wait a cv_wait_sig(). 683 */ 684 mutex_enter(&sreq->sn_lock); 685 while (sreq->sn_state < REQ_DONE) 686 cv_wait(&sreq->sn_cv, &sreq->sn_lock); 687 mutex_exit(&sreq->sn_lock); 688 689 error = sreq->sn_rv; 690 KCF_PROV_REFRELE(sreq->sn_provider); 691 kmem_cache_free(kcf_sreq_cache, sreq); 692 693 break; 694 695 default: 696 error = CRYPTO_FAILED; 697 break; 698 } 699 700 } else { /* Asynchronous cases */ 701 switch (pd->pd_prov_type) { 702 case CRYPTO_SW_PROVIDER: 703 if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) { 704 /* 705 * This case has less overhead since there is 706 * no switching of context. 707 */ 708 error = common_submit_request(pd, ctx, params, 709 KCF_RHNDL(KM_NOSLEEP)); 710 } else { 711 /* 712 * CRYPTO_ALWAYS_QUEUE is set. We need to 713 * queue the request and return. 714 */ 715 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, 716 params, cont); 717 if (areq == NULL) 718 error = CRYPTO_HOST_MEMORY; 719 else { 720 if (!(crq->cr_flag 721 & CRYPTO_SKIP_REQID)) { 722 /* 723 * Set the request handle. This handle 724 * is used for any crypto_cancel_req(9f) 725 * calls from the consumer. We have to 726 * do this before dispatching the 727 * request. 728 */ 729 crq->cr_reqid = kcf_reqid_insert(areq); 730 } 731 732 error = kcf_disp_sw_request(areq); 733 /* 734 * There is an error processing this 735 * request. Remove the handle and 736 * release the request structure. 737 */ 738 if (error != CRYPTO_QUEUED) { 739 if (!(crq->cr_flag 740 & CRYPTO_SKIP_REQID)) 741 kcf_reqid_delete(areq); 742 KCF_AREQ_REFRELE(areq); 743 } 744 } 745 } 746 break; 747 748 case CRYPTO_HW_PROVIDER: 749 /* 750 * We need to queue the request and return. 751 */ 752 areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params, 753 cont); 754 if (areq == NULL) { 755 error = CRYPTO_HOST_MEMORY; 756 goto done; 757 } 758 759 ASSERT(taskq != NULL); 760 /* 761 * We can not tell from taskq_dispatch() return 762 * value if we exceeded maxalloc. Hence the check 763 * here. 764 */ 765 if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { 766 error = CRYPTO_BUSY; 767 KCF_AREQ_REFRELE(areq); 768 goto done; 769 } 770 771 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) { 772 /* 773 * Set the request handle. This handle is used 774 * for any crypto_cancel_req(9f) calls from the 775 * consumer. We have to do this before dispatching 776 * the request. 777 */ 778 crq->cr_reqid = kcf_reqid_insert(areq); 779 } 780 781 if (taskq_dispatch(taskq, 782 process_req_hwp, areq, TQ_NOSLEEP) == 783 (taskqid_t)0) { 784 error = CRYPTO_HOST_MEMORY; 785 if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) 786 kcf_reqid_delete(areq); 787 KCF_AREQ_REFRELE(areq); 788 } else { 789 error = CRYPTO_QUEUED; 790 } 791 break; 792 793 default: 794 error = CRYPTO_FAILED; 795 break; 796 } 797 } 798 799 done: 800 return (error); 801 } 802 803 /* 804 * We're done with this framework context, so free it. Note that freeing 805 * framework context (kcf_context) frees the global context (crypto_ctx). 806 * 807 * The provider is responsible for freeing provider private context after a 808 * final or single operation and resetting the cc_provider_private field 809 * to NULL. It should do this before it notifies the framework of the 810 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases 811 * like crypto_cancel_ctx(9f). 812 */ 813 void 814 kcf_free_context(kcf_context_t *kcf_ctx) 815 { 816 kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc; 817 crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx; 818 kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx; 819 820 /* Release the second context, if any */ 821 822 if (kcf_secondctx != NULL) 823 KCF_CONTEXT_REFRELE(kcf_secondctx); 824 825 if (gctx->cc_provider_private != NULL) { 826 mutex_enter(&pd->pd_lock); 827 if (!KCF_IS_PROV_REMOVED(pd)) { 828 /* 829 * Increment the provider's internal refcnt so it 830 * doesn't unregister from the framework while 831 * we're calling the entry point. 832 */ 833 KCF_PROV_IREFHOLD(pd); 834 mutex_exit(&pd->pd_lock); 835 (void) KCF_PROV_FREE_CONTEXT(pd, gctx); 836 KCF_PROV_IREFRELE(pd); 837 } else { 838 mutex_exit(&pd->pd_lock); 839 } 840 } 841 842 /* kcf_ctx->kc_prov_desc has a hold on pd */ 843 KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc); 844 845 /* check if this context is shared with a software provider */ 846 if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) && 847 kcf_ctx->kc_sw_prov_desc != NULL) { 848 KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc); 849 } 850 851 kmem_cache_free(kcf_context_cache, kcf_ctx); 852 } 853 854 /* 855 * Free the request after releasing all the holds. 856 */ 857 void 858 kcf_free_req(kcf_areq_node_t *areq) 859 { 860 KCF_PROV_REFRELE(areq->an_provider); 861 if (areq->an_context != NULL) 862 KCF_CONTEXT_REFRELE(areq->an_context); 863 864 if (areq->an_tried_plist != NULL) 865 kcf_free_triedlist(areq->an_tried_plist); 866 kmem_cache_free(kcf_areq_cache, areq); 867 } 868 869 /* 870 * Utility routine to remove a request from the chain of requests 871 * hanging off a context. 872 */ 873 void 874 kcf_removereq_in_ctxchain(kcf_context_t *ictx, kcf_areq_node_t *areq) 875 { 876 kcf_areq_node_t *cur, *prev; 877 878 /* 879 * Get context lock, search for areq in the chain and remove it. 880 */ 881 ASSERT(ictx != NULL); 882 mutex_enter(&ictx->kc_in_use_lock); 883 prev = cur = ictx->kc_req_chain_first; 884 885 while (cur != NULL) { 886 if (cur == areq) { 887 if (prev == cur) { 888 if ((ictx->kc_req_chain_first = 889 cur->an_ctxchain_next) == NULL) 890 ictx->kc_req_chain_last = NULL; 891 } else { 892 if (cur == ictx->kc_req_chain_last) 893 ictx->kc_req_chain_last = prev; 894 prev->an_ctxchain_next = cur->an_ctxchain_next; 895 } 896 897 break; 898 } 899 prev = cur; 900 cur = cur->an_ctxchain_next; 901 } 902 mutex_exit(&ictx->kc_in_use_lock); 903 } 904 905 /* 906 * Remove the specified node from the global software queue. 907 * 908 * The caller must hold the queue lock and request lock (an_lock). 909 */ 910 void 911 kcf_remove_node(kcf_areq_node_t *node) 912 { 913 kcf_areq_node_t *nextp = node->an_next; 914 kcf_areq_node_t *prevp = node->an_prev; 915 916 ASSERT(mutex_owned(&gswq->gs_lock)); 917 918 if (nextp != NULL) 919 nextp->an_prev = prevp; 920 else 921 gswq->gs_last = prevp; 922 923 if (prevp != NULL) 924 prevp->an_next = nextp; 925 else 926 gswq->gs_first = nextp; 927 928 ASSERT(mutex_owned(&node->an_lock)); 929 node->an_state = REQ_CANCELED; 930 } 931 932 /* 933 * Remove and return the first node in the global software queue. 934 * 935 * The caller must hold the queue lock. 936 */ 937 static kcf_areq_node_t * 938 kcf_dequeue() 939 { 940 kcf_areq_node_t *tnode = NULL; 941 942 ASSERT(mutex_owned(&gswq->gs_lock)); 943 if ((tnode = gswq->gs_first) == NULL) { 944 return (NULL); 945 } else { 946 ASSERT(gswq->gs_first->an_prev == NULL); 947 gswq->gs_first = tnode->an_next; 948 if (tnode->an_next == NULL) 949 gswq->gs_last = NULL; 950 else 951 tnode->an_next->an_prev = NULL; 952 } 953 954 gswq->gs_njobs--; 955 return (tnode); 956 } 957 958 /* 959 * Add the request node to the end of the global software queue. 960 * 961 * The caller should not hold the queue lock. Returns 0 if the 962 * request is successfully queued. Returns CRYPTO_BUSY if the limit 963 * on the number of jobs is exceeded. 964 */ 965 static int 966 kcf_enqueue(kcf_areq_node_t *node) 967 { 968 kcf_areq_node_t *tnode; 969 970 mutex_enter(&gswq->gs_lock); 971 972 if (gswq->gs_njobs >= gswq->gs_maxjobs) { 973 mutex_exit(&gswq->gs_lock); 974 return (CRYPTO_BUSY); 975 } 976 977 if (gswq->gs_last == NULL) { 978 gswq->gs_first = gswq->gs_last = node; 979 } else { 980 ASSERT(gswq->gs_last->an_next == NULL); 981 tnode = gswq->gs_last; 982 tnode->an_next = node; 983 gswq->gs_last = node; 984 node->an_prev = tnode; 985 } 986 987 gswq->gs_njobs++; 988 989 /* an_lock not needed here as we hold gs_lock */ 990 node->an_state = REQ_WAITING; 991 992 mutex_exit(&gswq->gs_lock); 993 994 return (0); 995 } 996 997 /* 998 * Decrement the thread pool count and signal the failover 999 * thread if we are the last one out. 1000 */ 1001 static void 1002 kcf_decrcnt_andsignal() 1003 { 1004 KCF_ATOMIC_DECR(kcfpool->kp_threads); 1005 1006 mutex_enter(&kcfpool->kp_thread_lock); 1007 if (kcfpool->kp_threads == 0) 1008 cv_signal(&kcfpool->kp_nothr_cv); 1009 mutex_exit(&kcfpool->kp_thread_lock); 1010 } 1011 1012 /* 1013 * Function run by a thread from kcfpool to work on global software queue. 1014 * It is called from ioctl(CRYPTO_POOL_RUN, ...). 1015 */ 1016 int 1017 kcf_svc_do_run(void) 1018 { 1019 int error = 0; 1020 clock_t rv; 1021 clock_t timeout_val; 1022 kcf_areq_node_t *req; 1023 kcf_context_t *ictx; 1024 kcf_provider_desc_t *pd; 1025 1026 KCF_ATOMIC_INCR(kcfpool->kp_threads); 1027 1028 for (;;) { 1029 mutex_enter(&gswq->gs_lock); 1030 1031 while ((req = kcf_dequeue()) == NULL) { 1032 timeout_val = ddi_get_lbolt() + 1033 drv_usectohz(kcf_idlethr_timeout); 1034 1035 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1036 rv = cv_timedwait_sig(&gswq->gs_cv, &gswq->gs_lock, 1037 timeout_val); 1038 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1039 1040 switch (rv) { 1041 case 0: 1042 /* 1043 * A signal (as in kill(2)) is pending. We did 1044 * not get any cv_signal(). 1045 */ 1046 kcf_decrcnt_andsignal(); 1047 mutex_exit(&gswq->gs_lock); 1048 return (EINTR); 1049 1050 case -1: 1051 /* 1052 * Timed out and we are not signaled. Let us 1053 * see if this thread should exit. We should 1054 * keep at least kcf_minthreads. 1055 */ 1056 if (kcfpool->kp_threads > kcf_minthreads) { 1057 kcf_decrcnt_andsignal(); 1058 mutex_exit(&gswq->gs_lock); 1059 return (0); 1060 } 1061 1062 /* Resume the wait for work */ 1063 break; 1064 1065 default: 1066 /* 1067 * We are signaled to work on the queue. 1068 */ 1069 break; 1070 } 1071 } 1072 1073 mutex_exit(&gswq->gs_lock); 1074 1075 ictx = req->an_context; 1076 if (ictx == NULL) { /* Context-less operation */ 1077 pd = req->an_provider; 1078 error = common_submit_request(pd, NULL, 1079 &req->an_params, req); 1080 kcf_aop_done(req, error); 1081 continue; 1082 } 1083 1084 /* 1085 * We check if we can work on the request now. 1086 * Solaris does not guarantee any order on how the threads 1087 * are scheduled or how the waiters on a mutex are chosen. 1088 * So, we need to maintain our own order. 1089 * 1090 * is_my_turn is set to B_TRUE initially for a request when 1091 * it is enqueued and there are no other requests 1092 * for that context. Note that a thread sleeping on 1093 * an_turn_cv is not counted as an idle thread. This is 1094 * because we define an idle thread as one that sleeps on the 1095 * global queue waiting for new requests. 1096 */ 1097 mutex_enter(&req->an_lock); 1098 while (req->an_is_my_turn == B_FALSE) { 1099 KCF_ATOMIC_INCR(kcfpool->kp_blockedthreads); 1100 cv_wait(&req->an_turn_cv, &req->an_lock); 1101 KCF_ATOMIC_DECR(kcfpool->kp_blockedthreads); 1102 } 1103 1104 req->an_state = REQ_INPROGRESS; 1105 mutex_exit(&req->an_lock); 1106 1107 pd = ictx->kc_prov_desc; 1108 ASSERT(pd == req->an_provider); 1109 error = common_submit_request(pd, &ictx->kc_glbl_ctx, 1110 &req->an_params, req); 1111 1112 kcf_aop_done(req, error); 1113 } 1114 } 1115 1116 /* 1117 * kmem_cache_alloc constructor for sync request structure. 1118 */ 1119 /* ARGSUSED */ 1120 static int 1121 kcf_sreq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1122 { 1123 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1124 1125 sreq->sn_type = CRYPTO_SYNCH; 1126 cv_init(&sreq->sn_cv, NULL, CV_DEFAULT, NULL); 1127 mutex_init(&sreq->sn_lock, NULL, MUTEX_DEFAULT, NULL); 1128 1129 return (0); 1130 } 1131 1132 /* ARGSUSED */ 1133 static void 1134 kcf_sreq_cache_destructor(void *buf, void *cdrarg) 1135 { 1136 kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)buf; 1137 1138 mutex_destroy(&sreq->sn_lock); 1139 cv_destroy(&sreq->sn_cv); 1140 } 1141 1142 /* 1143 * kmem_cache_alloc constructor for async request structure. 1144 */ 1145 /* ARGSUSED */ 1146 static int 1147 kcf_areq_cache_constructor(void *buf, void *cdrarg, int kmflags) 1148 { 1149 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1150 1151 areq->an_type = CRYPTO_ASYNCH; 1152 mutex_init(&areq->an_lock, NULL, MUTEX_DEFAULT, NULL); 1153 cv_init(&areq->an_done, NULL, CV_DEFAULT, NULL); 1154 cv_init(&areq->an_turn_cv, NULL, CV_DEFAULT, NULL); 1155 1156 return (0); 1157 } 1158 1159 /* ARGSUSED */ 1160 static void 1161 kcf_areq_cache_destructor(void *buf, void *cdrarg) 1162 { 1163 kcf_areq_node_t *areq = (kcf_areq_node_t *)buf; 1164 1165 ASSERT(areq->an_refcnt == 0); 1166 mutex_destroy(&areq->an_lock); 1167 cv_destroy(&areq->an_done); 1168 cv_destroy(&areq->an_turn_cv); 1169 } 1170 1171 /* 1172 * kmem_cache_alloc constructor for kcf_context structure. 1173 */ 1174 /* ARGSUSED */ 1175 static int 1176 kcf_context_cache_constructor(void *buf, void *cdrarg, int kmflags) 1177 { 1178 kcf_context_t *kctx = (kcf_context_t *)buf; 1179 1180 mutex_init(&kctx->kc_in_use_lock, NULL, MUTEX_DEFAULT, NULL); 1181 1182 return (0); 1183 } 1184 1185 /* ARGSUSED */ 1186 static void 1187 kcf_context_cache_destructor(void *buf, void *cdrarg) 1188 { 1189 kcf_context_t *kctx = (kcf_context_t *)buf; 1190 1191 ASSERT(kctx->kc_refcnt == 0); 1192 mutex_destroy(&kctx->kc_in_use_lock); 1193 } 1194 1195 /* 1196 * Creates and initializes all the structures needed by the framework. 1197 */ 1198 void 1199 kcf_sched_init(void) 1200 { 1201 int i; 1202 kcf_reqid_table_t *rt; 1203 1204 /* 1205 * Create all the kmem caches needed by the framework. We set the 1206 * align argument to 64, to get a slab aligned to 64-byte as well as 1207 * have the objects (cache_chunksize) to be a 64-byte multiple. 1208 * This helps to avoid false sharing as this is the size of the 1209 * CPU cache line. 1210 */ 1211 kcf_sreq_cache = kmem_cache_create("kcf_sreq_cache", 1212 sizeof (struct kcf_sreq_node), 64, kcf_sreq_cache_constructor, 1213 kcf_sreq_cache_destructor, NULL, NULL, NULL, 0); 1214 1215 kcf_areq_cache = kmem_cache_create("kcf_areq_cache", 1216 sizeof (struct kcf_areq_node), 64, kcf_areq_cache_constructor, 1217 kcf_areq_cache_destructor, NULL, NULL, NULL, 0); 1218 1219 kcf_context_cache = kmem_cache_create("kcf_context_cache", 1220 sizeof (struct kcf_context), 64, kcf_context_cache_constructor, 1221 kcf_context_cache_destructor, NULL, NULL, NULL, 0); 1222 1223 mutex_init(&kcf_dh_lock, NULL, MUTEX_DEFAULT, NULL); 1224 1225 gswq = kmem_alloc(sizeof (kcf_global_swq_t), KM_SLEEP); 1226 1227 mutex_init(&gswq->gs_lock, NULL, MUTEX_DEFAULT, NULL); 1228 cv_init(&gswq->gs_cv, NULL, CV_DEFAULT, NULL); 1229 gswq->gs_njobs = 0; 1230 compute_min_max_threads(); /* Computes gs_maxjobs also. */ 1231 gswq->gs_first = gswq->gs_last = NULL; 1232 1233 /* Initialize the global reqid table */ 1234 for (i = 0; i < REQID_TABLES; i++) { 1235 rt = kmem_zalloc(sizeof (kcf_reqid_table_t), KM_SLEEP); 1236 kcf_reqid_table[i] = rt; 1237 mutex_init(&rt->rt_lock, NULL, MUTEX_DEFAULT, NULL); 1238 rt->rt_curid = i; 1239 } 1240 1241 /* Allocate and initialize the thread pool */ 1242 kcfpool_alloc(); 1243 1244 /* Initialize the event notification list variables */ 1245 mutex_init(&ntfy_list_lock, NULL, MUTEX_DEFAULT, NULL); 1246 cv_init(&ntfy_list_cv, NULL, CV_DEFAULT, NULL); 1247 1248 /* Initialize the crypto_bufcall list variables */ 1249 mutex_init(&cbuf_list_lock, NULL, MUTEX_DEFAULT, NULL); 1250 cv_init(&cbuf_list_cv, NULL, CV_DEFAULT, NULL); 1251 1252 /* Create the kcf kstat */ 1253 kcf_misc_kstat = kstat_create("kcf", 0, "framework_stats", "crypto", 1254 KSTAT_TYPE_NAMED, sizeof (kcf_stats_t) / sizeof (kstat_named_t), 1255 KSTAT_FLAG_VIRTUAL); 1256 1257 if (kcf_misc_kstat != NULL) { 1258 kcf_misc_kstat->ks_data = &kcf_ksdata; 1259 kcf_misc_kstat->ks_update = kcf_misc_kstat_update; 1260 kstat_install(kcf_misc_kstat); 1261 } 1262 } 1263 1264 /* 1265 * This routine should only be called by drv/cryptoadm. 1266 * 1267 * kcf_sched_running flag isn't protected by a lock. But, we are safe because 1268 * the first thread ("cryptoadm refresh") calling this routine during 1269 * boot time completes before any other thread that can call this routine. 1270 */ 1271 void 1272 kcf_sched_start(void) 1273 { 1274 if (kcf_sched_running) 1275 return; 1276 1277 /* Start the failover kernel thread for now */ 1278 (void) thread_create(NULL, 0, &kcf_failover_thread, 0, 0, &p0, 1279 TS_RUN, minclsyspri); 1280 1281 /* Start the background processing thread. */ 1282 (void) thread_create(NULL, 0, &crypto_bufcall_service, 0, 0, &p0, 1283 TS_RUN, minclsyspri); 1284 1285 kcf_sched_running = B_TRUE; 1286 } 1287 1288 /* 1289 * Signal the waiting sync client. 1290 */ 1291 void 1292 kcf_sop_done(kcf_sreq_node_t *sreq, int error) 1293 { 1294 mutex_enter(&sreq->sn_lock); 1295 sreq->sn_state = REQ_DONE; 1296 sreq->sn_rv = error; 1297 cv_signal(&sreq->sn_cv); 1298 mutex_exit(&sreq->sn_lock); 1299 } 1300 1301 /* 1302 * Callback the async client with the operation status. 1303 * We free the async request node and possibly the context. 1304 * We also handle any chain of requests hanging off of 1305 * the context. 1306 */ 1307 void 1308 kcf_aop_done(kcf_areq_node_t *areq, int error) 1309 { 1310 kcf_op_type_t optype; 1311 boolean_t skip_notify = B_FALSE; 1312 kcf_context_t *ictx; 1313 kcf_areq_node_t *nextreq; 1314 1315 /* 1316 * Handle recoverable errors. This has to be done first 1317 * before doing any thing else in this routine so that 1318 * we do not change the state of the request. 1319 */ 1320 if (error != CRYPTO_SUCCESS && IS_RECOVERABLE(error)) { 1321 /* 1322 * We try another provider, if one is available. Else 1323 * we continue with the failure notification to the 1324 * client. 1325 */ 1326 if (kcf_resubmit_request(areq) == CRYPTO_QUEUED) 1327 return; 1328 } 1329 1330 mutex_enter(&areq->an_lock); 1331 areq->an_state = REQ_DONE; 1332 mutex_exit(&areq->an_lock); 1333 1334 optype = (&areq->an_params)->rp_optype; 1335 if ((ictx = areq->an_context) != NULL) { 1336 /* 1337 * A request after it is removed from the request 1338 * queue, still stays on a chain of requests hanging 1339 * of its context structure. It needs to be removed 1340 * from this chain at this point. 1341 */ 1342 mutex_enter(&ictx->kc_in_use_lock); 1343 nextreq = areq->an_ctxchain_next; 1344 if (nextreq != NULL) { 1345 mutex_enter(&nextreq->an_lock); 1346 nextreq->an_is_my_turn = B_TRUE; 1347 cv_signal(&nextreq->an_turn_cv); 1348 mutex_exit(&nextreq->an_lock); 1349 } 1350 1351 ictx->kc_req_chain_first = nextreq; 1352 if (nextreq == NULL) 1353 ictx->kc_req_chain_last = NULL; 1354 mutex_exit(&ictx->kc_in_use_lock); 1355 1356 if (IS_SINGLE_OP(optype) || IS_FINAL_OP(optype)) { 1357 ASSERT(nextreq == NULL); 1358 KCF_CONTEXT_REFRELE(ictx); 1359 } else if (error != CRYPTO_SUCCESS && IS_INIT_OP(optype)) { 1360 /* 1361 * NOTE - We do not release the context in case of update 1362 * operations. We require the consumer to free it explicitly, 1363 * in case it wants to abandon an update operation. This is done 1364 * as there may be mechanisms in ECB mode that can continue 1365 * even if an operation on a block fails. 1366 */ 1367 KCF_CONTEXT_REFRELE(ictx); 1368 } 1369 } 1370 1371 /* Deal with the internal continuation to this request first */ 1372 1373 if (areq->an_isdual) { 1374 kcf_dual_req_t *next_arg; 1375 next_arg = (kcf_dual_req_t *)areq->an_reqarg.cr_callback_arg; 1376 next_arg->kr_areq = areq; 1377 KCF_AREQ_REFHOLD(areq); 1378 areq->an_isdual = B_FALSE; 1379 1380 NOTIFY_CLIENT(areq, error); 1381 return; 1382 } 1383 1384 /* 1385 * If CRYPTO_NOTIFY_OPDONE flag is set, we should notify 1386 * always. If this flag is clear, we skip the notification 1387 * provided there are no errors. We check this flag for only 1388 * init or update operations. It is ignored for single, final or 1389 * atomic operations. 1390 */ 1391 skip_notify = (IS_UPDATE_OP(optype) || IS_INIT_OP(optype)) && 1392 (!(areq->an_reqarg.cr_flag & CRYPTO_NOTIFY_OPDONE)) && 1393 (error == CRYPTO_SUCCESS); 1394 1395 if (!skip_notify) { 1396 NOTIFY_CLIENT(areq, error); 1397 } 1398 1399 if (!(areq->an_reqarg.cr_flag & CRYPTO_SKIP_REQID)) 1400 kcf_reqid_delete(areq); 1401 1402 KCF_AREQ_REFRELE(areq); 1403 } 1404 1405 /* 1406 * Allocate the thread pool and initialize all the fields. 1407 */ 1408 static void 1409 kcfpool_alloc() 1410 { 1411 kcfpool = kmem_alloc(sizeof (kcf_pool_t), KM_SLEEP); 1412 1413 kcfpool->kp_threads = kcfpool->kp_idlethreads = 0; 1414 kcfpool->kp_blockedthreads = 0; 1415 kcfpool->kp_signal_create_thread = B_FALSE; 1416 kcfpool->kp_nthrs = 0; 1417 kcfpool->kp_user_waiting = B_FALSE; 1418 1419 mutex_init(&kcfpool->kp_thread_lock, NULL, MUTEX_DEFAULT, NULL); 1420 cv_init(&kcfpool->kp_nothr_cv, NULL, CV_DEFAULT, NULL); 1421 1422 mutex_init(&kcfpool->kp_user_lock, NULL, MUTEX_DEFAULT, NULL); 1423 cv_init(&kcfpool->kp_user_cv, NULL, CV_DEFAULT, NULL); 1424 1425 kcf_idlethr_timeout = KCF_DEFAULT_THRTIMEOUT; 1426 } 1427 1428 /* 1429 * This function is run by the 'creator' thread in the pool. 1430 * It is called from ioctl(CRYPTO_POOL_WAIT, ...). 1431 */ 1432 int 1433 kcf_svc_wait(int *nthrs) 1434 { 1435 clock_t rv; 1436 clock_t timeout_val; 1437 1438 if (kcfpool == NULL) 1439 return (ENOENT); 1440 1441 mutex_enter(&kcfpool->kp_user_lock); 1442 /* Check if there's already a user thread waiting on this kcfpool */ 1443 if (kcfpool->kp_user_waiting) { 1444 mutex_exit(&kcfpool->kp_user_lock); 1445 *nthrs = 0; 1446 return (EBUSY); 1447 } 1448 1449 kcfpool->kp_user_waiting = B_TRUE; 1450 1451 /* Go to sleep, waiting for the signaled flag. */ 1452 while (!kcfpool->kp_signal_create_thread) { 1453 timeout_val = ddi_get_lbolt() + 1454 drv_usectohz(kcf_idlethr_timeout); 1455 1456 rv = cv_timedwait_sig(&kcfpool->kp_user_cv, 1457 &kcfpool->kp_user_lock, timeout_val); 1458 switch (rv) { 1459 case 0: 1460 /* Interrupted, return to handle exit or signal */ 1461 kcfpool->kp_user_waiting = B_FALSE; 1462 kcfpool->kp_signal_create_thread = B_FALSE; 1463 mutex_exit(&kcfpool->kp_user_lock); 1464 /* 1465 * kcfd is exiting. Release the door and 1466 * invalidate it. 1467 */ 1468 mutex_enter(&kcf_dh_lock); 1469 if (kcf_dh != NULL) { 1470 door_ki_rele(kcf_dh); 1471 kcf_dh = NULL; 1472 } 1473 mutex_exit(&kcf_dh_lock); 1474 return (EINTR); 1475 1476 case -1: 1477 /* Timed out. Recalculate the min/max threads */ 1478 compute_min_max_threads(); 1479 break; 1480 1481 default: 1482 /* Worker thread did a cv_signal() */ 1483 break; 1484 } 1485 } 1486 1487 kcfpool->kp_signal_create_thread = B_FALSE; 1488 kcfpool->kp_user_waiting = B_FALSE; 1489 1490 *nthrs = kcfpool->kp_nthrs; 1491 mutex_exit(&kcfpool->kp_user_lock); 1492 1493 /* Return to userland for possible thread creation. */ 1494 return (0); 1495 } 1496 1497 1498 /* 1499 * This routine introduces a locking order for gswq->gs_lock followed 1500 * by cpu_lock. 1501 * This means that no consumer of the k-api should hold cpu_lock when calling 1502 * k-api routines. 1503 */ 1504 static void 1505 compute_min_max_threads() 1506 { 1507 psetid_t psid = PS_MYID; 1508 1509 mutex_enter(&gswq->gs_lock); 1510 if (cpupart_get_cpus(&psid, NULL, (uint_t *)&kcf_minthreads) != 0) { 1511 cmn_err(CE_WARN, "kcf:compute_min_max_threads cpupart_get_cpus:" 1512 " failed, setting kcf_minthreads to 1"); 1513 kcf_minthreads = 1; 1514 } 1515 kcf_maxthreads = kcf_thr_multiple * kcf_minthreads; 1516 gswq->gs_maxjobs = kcf_maxthreads * crypto_taskq_maxalloc; 1517 mutex_exit(&gswq->gs_lock); 1518 } 1519 1520 /* 1521 * This is the main routine of the failover kernel thread. 1522 * If there are any threads in the pool we sleep. The last thread in the 1523 * pool to exit will signal us to get to work. We get back to sleep 1524 * once we detect that the pool has threads. 1525 * 1526 * Note that in the hand-off from us to a pool thread we get to run once. 1527 * Since this hand-off is a rare event this should be fine. 1528 */ 1529 static void 1530 kcf_failover_thread() 1531 { 1532 int error = 0; 1533 kcf_context_t *ictx; 1534 kcf_areq_node_t *req; 1535 callb_cpr_t cpr_info; 1536 kmutex_t cpr_lock; 1537 static boolean_t is_logged = B_FALSE; 1538 1539 mutex_init(&cpr_lock, NULL, MUTEX_DEFAULT, NULL); 1540 CALLB_CPR_INIT(&cpr_info, &cpr_lock, callb_generic_cpr, 1541 "kcf_failover_thread"); 1542 1543 for (;;) { 1544 /* 1545 * Wait if there are any threads are in the pool. 1546 */ 1547 if (kcfpool->kp_threads > 0) { 1548 mutex_enter(&cpr_lock); 1549 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1550 mutex_exit(&cpr_lock); 1551 1552 mutex_enter(&kcfpool->kp_thread_lock); 1553 cv_wait(&kcfpool->kp_nothr_cv, 1554 &kcfpool->kp_thread_lock); 1555 mutex_exit(&kcfpool->kp_thread_lock); 1556 1557 mutex_enter(&cpr_lock); 1558 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1559 mutex_exit(&cpr_lock); 1560 is_logged = B_FALSE; 1561 } 1562 1563 /* 1564 * Get the requests from the queue and wait if needed. 1565 */ 1566 mutex_enter(&gswq->gs_lock); 1567 1568 while ((req = kcf_dequeue()) == NULL) { 1569 mutex_enter(&cpr_lock); 1570 CALLB_CPR_SAFE_BEGIN(&cpr_info); 1571 mutex_exit(&cpr_lock); 1572 1573 KCF_ATOMIC_INCR(kcfpool->kp_idlethreads); 1574 cv_wait(&gswq->gs_cv, &gswq->gs_lock); 1575 KCF_ATOMIC_DECR(kcfpool->kp_idlethreads); 1576 1577 mutex_enter(&cpr_lock); 1578 CALLB_CPR_SAFE_END(&cpr_info, &cpr_lock); 1579 mutex_exit(&cpr_lock); 1580 } 1581 1582 mutex_exit(&gswq->gs_lock); 1583 1584 /* 1585 * We check the kp_threads since kcfd could have started 1586 * while we are waiting on the global software queue. 1587 */ 1588 if (kcfpool->kp_threads <= 0 && !is_logged) { 1589 cmn_err(CE_WARN, "kcfd is not running. Please check " 1590 "and restart kcfd. Using the failover kernel " 1591 "thread for now.\n"); 1592 is_logged = B_TRUE; 1593 } 1594 1595 /* 1596 * Get to work on the request. 1597 */ 1598 ictx = req->an_context; 1599 mutex_enter(&req->an_lock); 1600 req->an_state = REQ_INPROGRESS; 1601 mutex_exit(&req->an_lock); 1602 1603 error = common_submit_request(req->an_provider, ictx ? 1604 &ictx->kc_glbl_ctx : NULL, &req->an_params, req); 1605 1606 kcf_aop_done(req, error); 1607 } 1608 } 1609 1610 /* 1611 * Insert the async request in the hash table after assigning it 1612 * an ID. Returns the ID. 1613 * 1614 * The ID is used by the caller to pass as an argument to a 1615 * cancel_req() routine later. 1616 */ 1617 static crypto_req_id_t 1618 kcf_reqid_insert(kcf_areq_node_t *areq) 1619 { 1620 int indx; 1621 crypto_req_id_t id; 1622 kcf_areq_node_t *headp; 1623 kcf_reqid_table_t *rt = 1624 kcf_reqid_table[CPU->cpu_seqid & REQID_TABLE_MASK]; 1625 1626 mutex_enter(&rt->rt_lock); 1627 1628 rt->rt_curid = id = 1629 (rt->rt_curid - REQID_COUNTER_LOW) | REQID_COUNTER_HIGH; 1630 SET_REQID(areq, id); 1631 indx = REQID_HASH(id); 1632 headp = areq->an_idnext = rt->rt_idhash[indx]; 1633 areq->an_idprev = NULL; 1634 if (headp != NULL) 1635 headp->an_idprev = areq; 1636 1637 rt->rt_idhash[indx] = areq; 1638 mutex_exit(&rt->rt_lock); 1639 1640 return (id); 1641 } 1642 1643 /* 1644 * Delete the async request from the hash table. 1645 */ 1646 static void 1647 kcf_reqid_delete(kcf_areq_node_t *areq) 1648 { 1649 int indx; 1650 kcf_areq_node_t *nextp, *prevp; 1651 crypto_req_id_t id = GET_REQID(areq); 1652 kcf_reqid_table_t *rt; 1653 1654 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1655 indx = REQID_HASH(id); 1656 1657 mutex_enter(&rt->rt_lock); 1658 1659 nextp = areq->an_idnext; 1660 prevp = areq->an_idprev; 1661 if (nextp != NULL) 1662 nextp->an_idprev = prevp; 1663 if (prevp != NULL) 1664 prevp->an_idnext = nextp; 1665 else 1666 rt->rt_idhash[indx] = nextp; 1667 1668 SET_REQID(areq, 0); 1669 cv_broadcast(&areq->an_done); 1670 1671 mutex_exit(&rt->rt_lock); 1672 } 1673 1674 /* 1675 * Cancel a single asynchronous request. 1676 * 1677 * We guarantee that no problems will result from calling 1678 * crypto_cancel_req() for a request which is either running, or 1679 * has already completed. We remove the request from any queues 1680 * if it is possible. We wait for request completion if the 1681 * request is dispatched to a provider. 1682 * 1683 * Calling context: 1684 * Can be called from user context only. 1685 * 1686 * NOTE: We acquire the following locks in this routine (in order): 1687 * - rt_lock (kcf_reqid_table_t) 1688 * - gswq->gs_lock 1689 * - areq->an_lock 1690 * - ictx->kc_in_use_lock (from kcf_removereq_in_ctxchain()) 1691 * 1692 * This locking order MUST be maintained in code every where else. 1693 */ 1694 void 1695 crypto_cancel_req(crypto_req_id_t id) 1696 { 1697 int indx; 1698 kcf_areq_node_t *areq; 1699 kcf_provider_desc_t *pd; 1700 kcf_context_t *ictx; 1701 kcf_reqid_table_t *rt; 1702 1703 rt = kcf_reqid_table[id & REQID_TABLE_MASK]; 1704 indx = REQID_HASH(id); 1705 1706 mutex_enter(&rt->rt_lock); 1707 for (areq = rt->rt_idhash[indx]; areq; areq = areq->an_idnext) { 1708 if (GET_REQID(areq) == id) { 1709 /* 1710 * We found the request. It is either still waiting 1711 * in the framework queues or running at the provider. 1712 */ 1713 pd = areq->an_provider; 1714 ASSERT(pd != NULL); 1715 1716 switch (pd->pd_prov_type) { 1717 case CRYPTO_SW_PROVIDER: 1718 mutex_enter(&gswq->gs_lock); 1719 mutex_enter(&areq->an_lock); 1720 1721 /* This request can be safely canceled. */ 1722 if (areq->an_state <= REQ_WAITING) { 1723 /* Remove from gswq, global software queue. */ 1724 kcf_remove_node(areq); 1725 if ((ictx = areq->an_context) != NULL) 1726 kcf_removereq_in_ctxchain(ictx, areq); 1727 1728 mutex_exit(&areq->an_lock); 1729 mutex_exit(&gswq->gs_lock); 1730 mutex_exit(&rt->rt_lock); 1731 1732 /* Remove areq from hash table and free it. */ 1733 kcf_reqid_delete(areq); 1734 KCF_AREQ_REFRELE(areq); 1735 return; 1736 } 1737 1738 mutex_exit(&areq->an_lock); 1739 mutex_exit(&gswq->gs_lock); 1740 break; 1741 1742 case CRYPTO_HW_PROVIDER: 1743 /* 1744 * There is no interface to remove an entry 1745 * once it is on the taskq. So, we do not do 1746 * any thing for a hardware provider. 1747 */ 1748 break; 1749 } 1750 1751 /* 1752 * The request is running. Wait for the request completion 1753 * to notify us. 1754 */ 1755 KCF_AREQ_REFHOLD(areq); 1756 while (GET_REQID(areq) == id) 1757 cv_wait(&areq->an_done, &rt->rt_lock); 1758 KCF_AREQ_REFRELE(areq); 1759 break; 1760 } 1761 } 1762 1763 mutex_exit(&rt->rt_lock); 1764 } 1765 1766 /* 1767 * Cancel all asynchronous requests associated with the 1768 * passed in crypto context and free it. 1769 * 1770 * A client SHOULD NOT call this routine after calling a crypto_*_final 1771 * routine. This routine is called only during intermediate operations. 1772 * The client should not use the crypto context after this function returns 1773 * since we destroy it. 1774 * 1775 * Calling context: 1776 * Can be called from user context only. 1777 */ 1778 void 1779 crypto_cancel_ctx(crypto_context_t ctx) 1780 { 1781 kcf_context_t *ictx; 1782 kcf_areq_node_t *areq; 1783 1784 if (ctx == NULL) 1785 return; 1786 1787 ictx = (kcf_context_t *)((crypto_ctx_t *)ctx)->cc_framework_private; 1788 1789 mutex_enter(&ictx->kc_in_use_lock); 1790 1791 /* Walk the chain and cancel each request */ 1792 while ((areq = ictx->kc_req_chain_first) != NULL) { 1793 /* 1794 * We have to drop the lock here as we may have 1795 * to wait for request completion. We hold the 1796 * request before dropping the lock though, so that it 1797 * won't be freed underneath us. 1798 */ 1799 KCF_AREQ_REFHOLD(areq); 1800 mutex_exit(&ictx->kc_in_use_lock); 1801 1802 crypto_cancel_req(GET_REQID(areq)); 1803 KCF_AREQ_REFRELE(areq); 1804 1805 mutex_enter(&ictx->kc_in_use_lock); 1806 } 1807 1808 mutex_exit(&ictx->kc_in_use_lock); 1809 KCF_CONTEXT_REFRELE(ictx); 1810 } 1811 1812 /* 1813 * Update kstats. 1814 */ 1815 static int 1816 kcf_misc_kstat_update(kstat_t *ksp, int rw) 1817 { 1818 uint_t tcnt; 1819 kcf_stats_t *ks_data; 1820 1821 if (rw == KSTAT_WRITE) 1822 return (EACCES); 1823 1824 ks_data = ksp->ks_data; 1825 1826 ks_data->ks_thrs_in_pool.value.ui32 = kcfpool->kp_threads; 1827 /* 1828 * The failover thread is counted in kp_idlethreads in 1829 * some corner cases. This is done to avoid doing more checks 1830 * when submitting a request. We account for those cases below. 1831 */ 1832 if ((tcnt = kcfpool->kp_idlethreads) == (kcfpool->kp_threads + 1)) 1833 tcnt--; 1834 ks_data->ks_idle_thrs.value.ui32 = tcnt; 1835 ks_data->ks_minthrs.value.ui32 = kcf_minthreads; 1836 ks_data->ks_maxthrs.value.ui32 = kcf_maxthreads; 1837 ks_data->ks_swq_njobs.value.ui32 = gswq->gs_njobs; 1838 ks_data->ks_swq_maxjobs.value.ui32 = gswq->gs_maxjobs; 1839 ks_data->ks_taskq_threads.value.ui32 = crypto_taskq_threads; 1840 ks_data->ks_taskq_minalloc.value.ui32 = crypto_taskq_minalloc; 1841 ks_data->ks_taskq_maxalloc.value.ui32 = crypto_taskq_maxalloc; 1842 1843 return (0); 1844 } 1845 1846 /* 1847 * Allocate and initiatize a kcf_dual_req, used for saving the arguments of 1848 * a dual operation or an atomic operation that has to be internally 1849 * simulated with multiple single steps. 1850 * crq determines the memory allocation flags. 1851 */ 1852 1853 kcf_dual_req_t * 1854 kcf_alloc_req(crypto_call_req_t *crq) 1855 { 1856 kcf_dual_req_t *kcr; 1857 1858 kcr = kmem_alloc(sizeof (kcf_dual_req_t), KCF_KMFLAG(crq)); 1859 1860 if (kcr == NULL) 1861 return (NULL); 1862 1863 /* Copy the whole crypto_call_req struct, as it isn't persistant */ 1864 if (crq != NULL) 1865 kcr->kr_callreq = *crq; 1866 else 1867 bzero(&(kcr->kr_callreq), sizeof (crypto_call_req_t)); 1868 kcr->kr_areq = NULL; 1869 kcr->kr_saveoffset = 0; 1870 kcr->kr_savelen = 0; 1871 1872 return (kcr); 1873 } 1874 1875 /* 1876 * Callback routine for the next part of a simulated dual part. 1877 * Schedules the next step. 1878 * 1879 * This routine can be called from interrupt context. 1880 */ 1881 void 1882 kcf_next_req(void *next_req_arg, int status) 1883 { 1884 kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg; 1885 kcf_req_params_t *params = &(next_req->kr_params); 1886 kcf_areq_node_t *areq = next_req->kr_areq; 1887 int error = status; 1888 kcf_provider_desc_t *pd; 1889 crypto_dual_data_t *ct; 1890 1891 /* Stop the processing if an error occured at this step */ 1892 if (error != CRYPTO_SUCCESS) { 1893 out: 1894 areq->an_reqarg = next_req->kr_callreq; 1895 KCF_AREQ_REFRELE(areq); 1896 kmem_free(next_req, sizeof (kcf_dual_req_t)); 1897 areq->an_isdual = B_FALSE; 1898 kcf_aop_done(areq, error); 1899 return; 1900 } 1901 1902 switch (params->rp_opgrp) { 1903 case KCF_OG_MAC: { 1904 1905 /* 1906 * The next req is submitted with the same reqid as the 1907 * first part. The consumer only got back that reqid, and 1908 * should still be able to cancel the operation during its 1909 * second step. 1910 */ 1911 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 1912 crypto_ctx_template_t mac_tmpl; 1913 kcf_mech_entry_t *me; 1914 1915 ct = (crypto_dual_data_t *)mops->mo_data; 1916 mac_tmpl = (crypto_ctx_template_t)mops->mo_templ; 1917 1918 /* No expected recoverable failures, so no retry list */ 1919 pd = kcf_get_mech_provider(mops->mo_framework_mechtype, 1920 &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC, 1921 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2); 1922 1923 if (pd == NULL) { 1924 error = CRYPTO_MECH_NOT_SUPPORTED; 1925 goto out; 1926 } 1927 /* Validate the MAC context template here */ 1928 if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && 1929 (mac_tmpl != NULL)) { 1930 kcf_ctx_template_t *ctx_mac_tmpl; 1931 1932 ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl; 1933 1934 if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) { 1935 KCF_PROV_REFRELE(pd); 1936 error = CRYPTO_OLD_CTX_TEMPLATE; 1937 goto out; 1938 } 1939 mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl; 1940 } 1941 1942 break; 1943 } 1944 case KCF_OG_DECRYPT: { 1945 kcf_decrypt_ops_params_t *dcrops = 1946 &(params->rp_u.decrypt_params); 1947 1948 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 1949 /* No expected recoverable failures, so no retry list */ 1950 pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype, 1951 NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC, 1952 (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1); 1953 1954 if (pd == NULL) { 1955 error = CRYPTO_MECH_NOT_SUPPORTED; 1956 goto out; 1957 } 1958 break; 1959 } 1960 } 1961 1962 /* The second step uses len2 and offset2 of the dual_data */ 1963 next_req->kr_saveoffset = ct->dd_offset1; 1964 next_req->kr_savelen = ct->dd_len1; 1965 ct->dd_offset1 = ct->dd_offset2; 1966 ct->dd_len1 = ct->dd_len2; 1967 1968 /* preserve if the caller is restricted */ 1969 if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) { 1970 areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED; 1971 } else { 1972 areq->an_reqarg.cr_flag = 0; 1973 } 1974 1975 areq->an_reqarg.cr_callback_func = kcf_last_req; 1976 areq->an_reqarg.cr_callback_arg = next_req; 1977 areq->an_isdual = B_TRUE; 1978 1979 /* 1980 * We would like to call kcf_submit_request() here. But, 1981 * that is not possible as that routine allocates a new 1982 * kcf_areq_node_t request structure, while we need to 1983 * reuse the existing request structure. 1984 */ 1985 switch (pd->pd_prov_type) { 1986 case CRYPTO_SW_PROVIDER: 1987 error = common_submit_request(pd, NULL, params, 1988 KCF_RHNDL(KM_NOSLEEP)); 1989 break; 1990 1991 case CRYPTO_HW_PROVIDER: { 1992 kcf_provider_desc_t *old_pd; 1993 taskq_t *taskq = pd->pd_sched_info.ks_taskq; 1994 1995 /* 1996 * Set the params for the second step in the 1997 * dual-ops. 1998 */ 1999 areq->an_params = *params; 2000 old_pd = areq->an_provider; 2001 KCF_PROV_REFRELE(old_pd); 2002 KCF_PROV_REFHOLD(pd); 2003 areq->an_provider = pd; 2004 2005 /* 2006 * Note that we have to do a taskq_dispatch() 2007 * here as we may be in interrupt context. 2008 */ 2009 if (taskq_dispatch(taskq, process_req_hwp, areq, 2010 TQ_NOSLEEP) == (taskqid_t)0) { 2011 error = CRYPTO_HOST_MEMORY; 2012 } else { 2013 error = CRYPTO_QUEUED; 2014 } 2015 break; 2016 } 2017 } 2018 2019 /* 2020 * We have to release the holds on the request and the provider 2021 * in all cases. 2022 */ 2023 KCF_AREQ_REFRELE(areq); 2024 KCF_PROV_REFRELE(pd); 2025 2026 if (error != CRYPTO_QUEUED) { 2027 /* restore, clean up, and invoke the client's callback */ 2028 2029 ct->dd_offset1 = next_req->kr_saveoffset; 2030 ct->dd_len1 = next_req->kr_savelen; 2031 areq->an_reqarg = next_req->kr_callreq; 2032 kmem_free(next_req, sizeof (kcf_dual_req_t)); 2033 areq->an_isdual = B_FALSE; 2034 kcf_aop_done(areq, error); 2035 } 2036 } 2037 2038 /* 2039 * Last part of an emulated dual operation. 2040 * Clean up and restore ... 2041 */ 2042 void 2043 kcf_last_req(void *last_req_arg, int status) 2044 { 2045 kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg; 2046 2047 kcf_req_params_t *params = &(last_req->kr_params); 2048 kcf_areq_node_t *areq = last_req->kr_areq; 2049 crypto_dual_data_t *ct; 2050 2051 switch (params->rp_opgrp) { 2052 case KCF_OG_MAC: { 2053 kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); 2054 2055 ct = (crypto_dual_data_t *)mops->mo_data; 2056 break; 2057 } 2058 case KCF_OG_DECRYPT: { 2059 kcf_decrypt_ops_params_t *dcrops = 2060 &(params->rp_u.decrypt_params); 2061 2062 ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; 2063 break; 2064 } 2065 } 2066 ct->dd_offset1 = last_req->kr_saveoffset; 2067 ct->dd_len1 = last_req->kr_savelen; 2068 2069 /* The submitter used kcf_last_req as its callback */ 2070 2071 if (areq == NULL) { 2072 crypto_call_req_t *cr = &last_req->kr_callreq; 2073 2074 (*(cr->cr_callback_func))(cr->cr_callback_arg, status); 2075 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2076 return; 2077 } 2078 areq->an_reqarg = last_req->kr_callreq; 2079 KCF_AREQ_REFRELE(areq); 2080 kmem_free(last_req, sizeof (kcf_dual_req_t)); 2081 areq->an_isdual = B_FALSE; 2082 kcf_aop_done(areq, status); 2083 } 2084