1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * This file is part of the core Kernel Cryptographic Framework.
28  * It implements the SPI functions exported to cryptographic
29  * providers.
30  */
31 
32 #include <sys/ksynch.h>
33 #include <sys/cmn_err.h>
34 #include <sys/ddi.h>
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/crypto/common.h>
38 #include <sys/crypto/impl.h>
39 #include <sys/crypto/sched_impl.h>
40 #include <sys/crypto/spi.h>
41 #include <sys/crypto/ioctladmin.h>
42 #include <sys/taskq.h>
43 #include <sys/disp.h>
44 #include <sys/kstat.h>
45 #include <sys/policy.h>
46 #include <sys/cpuvar.h>
47 
48 /*
49  * minalloc and maxalloc values to be used for taskq_create().
50  */
51 int crypto_taskq_threads = CRYPTO_TASKQ_THREADS;
52 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
53 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
54 
55 static void remove_provider(kcf_provider_desc_t *);
56 static void process_logical_providers(crypto_provider_info_t *,
57     kcf_provider_desc_t *);
58 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
59 static int kcf_prov_kstat_update(kstat_t *, int);
60 static void undo_register_provider_extra(kcf_provider_desc_t *);
61 static void delete_kstat(kcf_provider_desc_t *);
62 
63 static kcf_prov_stats_t kcf_stats_ks_data_template = {
64 	{ "kcf_ops_total",		KSTAT_DATA_UINT64 },
65 	{ "kcf_ops_passed",		KSTAT_DATA_UINT64 },
66 	{ "kcf_ops_failed",		KSTAT_DATA_UINT64 },
67 	{ "kcf_ops_returned_busy",	KSTAT_DATA_UINT64 }
68 };
69 
70 #define	KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
71 	*((dst)->ops) = *((src)->ops);
72 
73 /*
74  * Copy an ops vector from src to dst. Used during provider registration
75  * to copy the ops vector from the provider info structure to the
76  * provider descriptor maintained by KCF.
77  * Copying the ops vector specified by the provider is needed since the
78  * framework does not require the provider info structure to be
79  * persistent.
80  */
81 static void
82 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
83 {
84 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
85 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
86 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
87 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
88 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
89 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
90 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
91 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
92 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
93 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
94 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
95 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
96 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
97 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
98 }
99 
100 static void
101 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
102 {
103 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
104 }
105 
106 static void
107 copy_ops_vector_v3(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
108 {
109 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_nostore_key_ops);
110 }
111 
112 static void
113 copy_ops_vector_v4(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
114 {
115 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_fips140_ops);
116 }
117 
118 /*
119  * This routine is used to add cryptographic providers to the KEF framework.
120  * Providers pass a crypto_provider_info structure to crypto_register_provider()
121  * and get back a handle.  The crypto_provider_info structure contains a
122  * list of mechanisms supported by the provider and an ops vector containing
123  * provider entry points.  Hardware providers call this routine in their attach
124  * routines.  Software providers call this routine in their _init() routine.
125  */
126 int
127 crypto_register_provider(crypto_provider_info_t *info,
128     crypto_kcf_provider_handle_t *handle)
129 {
130 	int need_fips140_verify, need_verify = 1;
131 	struct modctl *mcp;
132 	char *name;
133 	char ks_name[KSTAT_STRLEN];
134 	kcf_provider_desc_t *prov_desc = NULL;
135 	int ret = CRYPTO_ARGUMENTS_BAD;
136 
137 	if (info->pi_interface_version > CRYPTO_SPI_VERSION_4)
138 		return (CRYPTO_VERSION_MISMATCH);
139 
140 	/*
141 	 * Check provider type, must be software, hardware, or logical.
142 	 */
143 	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
144 	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
145 	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
146 		return (CRYPTO_ARGUMENTS_BAD);
147 
148 	/*
149 	 * Allocate and initialize a new provider descriptor. We also
150 	 * hold it and release it when done.
151 	 */
152 	prov_desc = kcf_alloc_provider_desc(info);
153 	KCF_PROV_REFHOLD(prov_desc);
154 
155 	prov_desc->pd_prov_type = info->pi_provider_type;
156 
157 	/* provider-private handle, opaque to KCF */
158 	prov_desc->pd_prov_handle = info->pi_provider_handle;
159 
160 	/* copy provider description string */
161 	if (info->pi_provider_description != NULL) {
162 		/*
163 		 * pi_provider_descriptor is a string that can contain
164 		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
165 		 * INCLUDING the terminating null character. A bcopy()
166 		 * is necessary here as pd_description should not have
167 		 * a null character. See comments in kcf_alloc_provider_desc()
168 		 * for details on pd_description field.
169 		 */
170 		bcopy(info->pi_provider_description, prov_desc->pd_description,
171 		    min(strlen(info->pi_provider_description),
172 		    CRYPTO_PROVIDER_DESCR_MAX_LEN));
173 	}
174 
175 	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
176 		if (info->pi_ops_vector == NULL) {
177 			goto bail;
178 		}
179 		copy_ops_vector_v1(info->pi_ops_vector,
180 		    prov_desc->pd_ops_vector);
181 		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
182 			copy_ops_vector_v2(info->pi_ops_vector,
183 			    prov_desc->pd_ops_vector);
184 			prov_desc->pd_flags = info->pi_flags;
185 		}
186 		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_3) {
187 			copy_ops_vector_v3(info->pi_ops_vector,
188 			    prov_desc->pd_ops_vector);
189 		}
190 		if (info->pi_interface_version == CRYPTO_SPI_VERSION_4) {
191 			copy_ops_vector_v4(info->pi_ops_vector,
192 			    prov_desc->pd_ops_vector);
193 		}
194 	}
195 
196 	/* object_ops and nostore_key_ops are mutually exclusive */
197 	if (prov_desc->pd_ops_vector->co_object_ops &&
198 	    prov_desc->pd_ops_vector->co_nostore_key_ops) {
199 		goto bail;
200 	}
201 	/*
202 	 * For software providers, copy the module name and module ID.
203 	 * For hardware providers, copy the driver name and instance.
204 	 */
205 	switch (info->pi_provider_type) {
206 	case  CRYPTO_SW_PROVIDER:
207 		if (info->pi_provider_dev.pd_sw == NULL)
208 			goto bail;
209 
210 		if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
211 			goto bail;
212 
213 		prov_desc->pd_module_id = mcp->mod_id;
214 		name = mcp->mod_modname;
215 		break;
216 
217 	case CRYPTO_HW_PROVIDER:
218 	case CRYPTO_LOGICAL_PROVIDER:
219 		if (info->pi_provider_dev.pd_hw == NULL)
220 			goto bail;
221 
222 		prov_desc->pd_instance =
223 		    ddi_get_instance(info->pi_provider_dev.pd_hw);
224 		name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
225 		break;
226 	}
227 	if (name == NULL)
228 		goto bail;
229 
230 	prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
231 	(void) strcpy(prov_desc->pd_name, name);
232 
233 	if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
234 		goto bail;
235 
236 	/* process the mechanisms supported by the provider */
237 	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
238 		goto bail;
239 
240 	/*
241 	 * Add provider to providers tables, also sets the descriptor
242 	 * pd_prov_id field.
243 	 */
244 	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
245 		undo_register_provider(prov_desc, B_FALSE);
246 		goto bail;
247 	}
248 
249 	if ((need_verify = kcf_need_signature_verification(prov_desc)) == -1) {
250 		undo_register_provider(prov_desc, B_TRUE);
251 		ret = CRYPTO_MODVERIFICATION_FAILED;
252 		goto bail;
253 	}
254 
255 	if ((need_fips140_verify =
256 	    kcf_need_fips140_verification(prov_desc)) == -1) {
257 		mutex_enter(&prov_desc->pd_lock);
258 		prov_desc->pd_state = KCF_PROV_VERIFICATION_FAILED;
259 		mutex_exit(&prov_desc->pd_lock);
260 		ret = CRYPTO_FIPS140_ERROR;
261 		goto bail;
262 	}
263 
264 	/*
265 	 * We create a taskq only for a hardware provider. The global
266 	 * software queue is used for software providers. We handle ordering
267 	 * of multi-part requests in the taskq routine. So, it is safe to
268 	 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
269 	 * to keep some entries cached to improve performance.
270 	 */
271 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
272 		prov_desc->pd_taskq = taskq_create("kcf_taskq",
273 		    crypto_taskq_threads, minclsyspri,
274 		    crypto_taskq_minalloc, crypto_taskq_maxalloc,
275 		    TASKQ_PREPOPULATE);
276 	else
277 		prov_desc->pd_taskq = NULL;
278 
279 	/* no kernel session to logical providers and no pd_flags  */
280 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
281 		/*
282 		 * Open a session for session-oriented providers. This session
283 		 * is used for all kernel consumers. This is fine as a provider
284 		 * is required to support multiple thread access to a session.
285 		 * We can do this only after the taskq has been created as we
286 		 * do a kcf_submit_request() to open the session.
287 		 */
288 		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
289 			kcf_req_params_t params;
290 
291 			KCF_WRAP_SESSION_OPS_PARAMS(&params,
292 			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
293 			    CRYPTO_USER, NULL, 0, prov_desc);
294 			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
295 			    B_FALSE);
296 			if (ret != CRYPTO_SUCCESS)
297 				goto undo_then_bail;
298 		}
299 
300 		/*
301 		 * Get the value for the maximum input length allowed if
302 		 * CRYPTO_HASH_NO_UPDATE or CRYPTO_HASH_NO_UPDATE is specified.
303 		 */
304 		if (prov_desc->pd_flags &
305 		    (CRYPTO_HASH_NO_UPDATE | CRYPTO_HMAC_NO_UPDATE)) {
306 			kcf_req_params_t params;
307 			crypto_provider_ext_info_t ext_info;
308 
309 			if (KCF_PROV_PROVMGMT_OPS(prov_desc) == NULL)
310 				goto undo_then_bail;
311 
312 			bzero(&ext_info, sizeof (ext_info));
313 			KCF_WRAP_PROVMGMT_OPS_PARAMS(&params,
314 			    KCF_OP_MGMT_EXTINFO,
315 			    0, NULL, 0, NULL, 0, NULL, &ext_info, prov_desc);
316 			ret = kcf_submit_request(prov_desc, NULL, NULL,
317 			    &params, B_FALSE);
318 			if (ret != CRYPTO_SUCCESS)
319 				goto undo_then_bail;
320 
321 			if (prov_desc->pd_flags & CRYPTO_HASH_NO_UPDATE) {
322 				prov_desc->pd_hash_limit =
323 				    ext_info.ei_hash_max_input_len;
324 			}
325 			if (prov_desc->pd_flags & CRYPTO_HMAC_NO_UPDATE) {
326 				prov_desc->pd_hmac_limit =
327 				    ext_info.ei_hmac_max_input_len;
328 			}
329 		}
330 	}
331 
332 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
333 		/*
334 		 * Create the kstat for this provider. There is a kstat
335 		 * installed for each successfully registered provider.
336 		 * This kstat is deleted, when the provider unregisters.
337 		 */
338 		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
339 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
340 			    prov_desc->pd_name, "provider_stats");
341 		} else {
342 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
343 			    prov_desc->pd_name, prov_desc->pd_instance,
344 			    prov_desc->pd_prov_id, "provider_stats");
345 		}
346 
347 		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
348 		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
349 		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
350 
351 		if (prov_desc->pd_kstat != NULL) {
352 			bcopy(&kcf_stats_ks_data_template,
353 			    &prov_desc->pd_ks_data,
354 			    sizeof (kcf_stats_ks_data_template));
355 			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
356 			KCF_PROV_REFHOLD(prov_desc);
357 			prov_desc->pd_kstat->ks_private = prov_desc;
358 			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
359 			kstat_install(prov_desc->pd_kstat);
360 		}
361 	}
362 
363 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
364 		process_logical_providers(info, prov_desc);
365 
366 	/* This provider needs to wait until we know the FIPS 140 status */
367 	if (need_fips140_verify == 1) {
368 		mutex_enter(&prov_desc->pd_lock);
369 		prov_desc->pd_state = KCF_PROV_UNVERIFIED_FIPS140;
370 		mutex_exit(&prov_desc->pd_lock);
371 		goto exit;
372 	}
373 
374 	/* This provider needs to have the signature verified */
375 	if (need_verify == 1) {
376 		mutex_enter(&prov_desc->pd_lock);
377 		prov_desc->pd_state = KCF_PROV_UNVERIFIED;
378 		mutex_exit(&prov_desc->pd_lock);
379 
380 		/* kcf_verify_signature routine will release this hold */
381 		KCF_PROV_REFHOLD(prov_desc);
382 
383 		if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) {
384 			/*
385 			 * It is not safe to make the door upcall to kcfd from
386 			 * this context since the kcfd thread could reenter
387 			 * devfs. So, we dispatch a taskq job to do the
388 			 * verification and return to the provider.
389 			 */
390 			(void) taskq_dispatch(system_taskq,
391 			    kcf_verify_signature, (void *)prov_desc, TQ_SLEEP);
392 		} else if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
393 			kcf_verify_signature(prov_desc);
394 			if (prov_desc->pd_state ==
395 			    KCF_PROV_VERIFICATION_FAILED) {
396 				undo_register_provider_extra(prov_desc);
397 				ret = CRYPTO_MODVERIFICATION_FAILED;
398 				goto bail;
399 			}
400 		}
401 	} else {
402 		mutex_enter(&prov_desc->pd_lock);
403 		prov_desc->pd_state = KCF_PROV_READY;
404 		mutex_exit(&prov_desc->pd_lock);
405 		kcf_do_notify(prov_desc, B_TRUE);
406 	}
407 
408 exit:
409 	*handle = prov_desc->pd_kcf_prov_handle;
410 	KCF_PROV_REFRELE(prov_desc);
411 	return (CRYPTO_SUCCESS);
412 
413 undo_then_bail:
414 	undo_register_provider(prov_desc, B_TRUE);
415 	ret = CRYPTO_FAILED;
416 bail:
417 	KCF_PROV_REFRELE(prov_desc);
418 	return (ret);
419 }
420 
421 /* Return the number of holds on a provider. */
422 int
423 kcf_get_refcnt(kcf_provider_desc_t *pd, boolean_t do_lock)
424 {
425 	int i;
426 	int refcnt = 0;
427 
428 	if (do_lock)
429 		for (i = 0; i < pd->pd_nbins; i++)
430 			mutex_enter(&(pd->pd_percpu_bins[i].kp_lock));
431 
432 	for (i = 0; i < pd->pd_nbins; i++)
433 		refcnt += pd->pd_percpu_bins[i].kp_holdcnt;
434 
435 	if (do_lock)
436 		for (i = 0; i < pd->pd_nbins; i++)
437 			mutex_exit(&(pd->pd_percpu_bins[i].kp_lock));
438 
439 	return (refcnt);
440 }
441 
442 /*
443  * This routine is used to notify the framework when a provider is being
444  * removed.  Hardware providers call this routine in their detach routines.
445  * Software providers call this routine in their _fini() routine.
446  */
447 int
448 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
449 {
450 	uint_t mech_idx;
451 	kcf_provider_desc_t *desc;
452 	kcf_prov_state_t saved_state;
453 
454 	/* lookup provider descriptor */
455 	if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
456 		return (CRYPTO_UNKNOWN_PROVIDER);
457 
458 	mutex_enter(&desc->pd_lock);
459 	/*
460 	 * Check if any other thread is disabling or removing
461 	 * this provider. We return if this is the case.
462 	 */
463 	if (desc->pd_state >= KCF_PROV_DISABLED) {
464 		mutex_exit(&desc->pd_lock);
465 		/* Release reference held by kcf_prov_tab_lookup(). */
466 		KCF_PROV_REFRELE(desc);
467 		return (CRYPTO_BUSY);
468 	}
469 
470 	saved_state = desc->pd_state;
471 	desc->pd_state = KCF_PROV_UNREGISTERING;
472 
473 	if (saved_state == KCF_PROV_BUSY) {
474 		/*
475 		 * The per-provider taskq threads may be waiting. We
476 		 * signal them so that they can start failing requests.
477 		 */
478 		cv_broadcast(&desc->pd_resume_cv);
479 	}
480 
481 	mutex_exit(&desc->pd_lock);
482 
483 	if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
484 		remove_provider(desc);
485 	}
486 
487 	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
488 		/* remove the provider from the mechanisms tables */
489 		for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
490 		    mech_idx++) {
491 			kcf_remove_mech_provider(
492 			    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
493 		}
494 	}
495 
496 	/* remove provider from providers table */
497 	if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
498 	    CRYPTO_SUCCESS) {
499 		/* Release reference held by kcf_prov_tab_lookup(). */
500 		KCF_PROV_REFRELE(desc);
501 		return (CRYPTO_UNKNOWN_PROVIDER);
502 	}
503 
504 	delete_kstat(desc);
505 
506 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
507 		/*
508 		 * Wait till the existing requests with the provider complete
509 		 * and all the holds are released. All the holds on a software
510 		 * provider are from kernel clients and the hold time
511 		 * is expected to be short. So, we won't be stuck here forever.
512 		 */
513 		while (kcf_get_refcnt(desc, B_TRUE) > 1) {
514 			/* wait 1 second and try again. */
515 			delay(1 * drv_usectohz(1000000));
516 		}
517 	} else {
518 		int i;
519 		kcf_prov_cpu_t *mp;
520 
521 		/*
522 		 * Wait until requests that have been sent to the provider
523 		 * complete.
524 		 */
525 		for (i = 0; i < desc->pd_nbins; i++) {
526 			mp = &(desc->pd_percpu_bins[i]);
527 
528 			mutex_enter(&mp->kp_lock);
529 			while (mp->kp_jobcnt > 0) {
530 				cv_wait(&mp->kp_cv, &mp->kp_lock);
531 			}
532 			mutex_exit(&mp->kp_lock);
533 		}
534 	}
535 
536 	mutex_enter(&desc->pd_lock);
537 	desc->pd_state = KCF_PROV_UNREGISTERED;
538 	mutex_exit(&desc->pd_lock);
539 
540 	kcf_do_notify(desc, B_FALSE);
541 
542 	mutex_enter(&prov_tab_mutex);
543 	/* Release reference held by kcf_prov_tab_lookup(). */
544 	KCF_PROV_REFRELE(desc);
545 
546 	if (kcf_get_refcnt(desc, B_TRUE) == 0) {
547 		/* kcf_free_provider_desc drops prov_tab_mutex */
548 		kcf_free_provider_desc(desc);
549 	} else {
550 		ASSERT(desc->pd_prov_type != CRYPTO_SW_PROVIDER);
551 		/*
552 		 * We could avoid this if /dev/crypto can proactively
553 		 * remove any holds on us from a dormant PKCS #11 app.
554 		 * For now, we check the provider table for
555 		 * KCF_PROV_UNREGISTERED entries when a provider is
556 		 * added to the table or when a provider is removed from it
557 		 * and free them when refcnt reaches zero.
558 		 */
559 		kcf_need_provtab_walk = B_TRUE;
560 		mutex_exit(&prov_tab_mutex);
561 	}
562 
563 	return (CRYPTO_SUCCESS);
564 }
565 
566 /*
567  * This routine is used to notify the framework that the state of
568  * a cryptographic provider has changed. Valid state codes are:
569  *
570  * CRYPTO_PROVIDER_READY
571  * 	The provider indicates that it can process more requests. A provider
572  *	will notify with this event if it previously has notified us with a
573  *	CRYPTO_PROVIDER_BUSY.
574  *
575  * CRYPTO_PROVIDER_BUSY
576  * 	The provider can not take more requests.
577  *
578  * CRYPTO_PROVIDER_FAILED
579  *	The provider encountered an internal error. The framework will not
580  * 	be sending any more requests to the provider. The provider may notify
581  *	with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
582  *
583  * This routine can be called from user or interrupt context.
584  */
585 void
586 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
587 {
588 	kcf_provider_desc_t *pd;
589 
590 	/* lookup the provider from the given handle */
591 	if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
592 		return;
593 
594 	mutex_enter(&pd->pd_lock);
595 
596 	if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
597 		goto out;
598 
599 	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
600 		cmn_err(CE_WARN, "crypto_provider_notification: "
601 		    "logical provider (%x) ignored\n", handle);
602 		goto out;
603 	}
604 	switch (state) {
605 	case CRYPTO_PROVIDER_READY:
606 		switch (pd->pd_state) {
607 		case KCF_PROV_BUSY:
608 			pd->pd_state = KCF_PROV_READY;
609 			/*
610 			 * Signal the per-provider taskq threads that they
611 			 * can start submitting requests.
612 			 */
613 			cv_broadcast(&pd->pd_resume_cv);
614 			break;
615 
616 		case KCF_PROV_FAILED:
617 			/*
618 			 * The provider recovered from the error. Let us
619 			 * use it now.
620 			 */
621 			pd->pd_state = KCF_PROV_READY;
622 			break;
623 		}
624 		break;
625 
626 	case CRYPTO_PROVIDER_BUSY:
627 		switch (pd->pd_state) {
628 		case KCF_PROV_READY:
629 			pd->pd_state = KCF_PROV_BUSY;
630 			break;
631 		}
632 		break;
633 
634 	case CRYPTO_PROVIDER_FAILED:
635 		/*
636 		 * We note the failure and return. The per-provider taskq
637 		 * threads check this flag and start failing the
638 		 * requests, if it is set. See process_req_hwp() for details.
639 		 */
640 		switch (pd->pd_state) {
641 		case KCF_PROV_READY:
642 			pd->pd_state = KCF_PROV_FAILED;
643 			break;
644 
645 		case KCF_PROV_BUSY:
646 			pd->pd_state = KCF_PROV_FAILED;
647 			/*
648 			 * The per-provider taskq threads may be waiting. We
649 			 * signal them so that they can start failing requests.
650 			 */
651 			cv_broadcast(&pd->pd_resume_cv);
652 			break;
653 		}
654 		break;
655 	}
656 out:
657 	mutex_exit(&pd->pd_lock);
658 	KCF_PROV_REFRELE(pd);
659 }
660 
661 /*
662  * This routine is used to notify the framework the result of
663  * an asynchronous request handled by a provider. Valid error
664  * codes are the same as the CRYPTO_* errors defined in common.h.
665  *
666  * This routine can be called from user or interrupt context.
667  */
668 void
669 crypto_op_notification(crypto_req_handle_t handle, int error)
670 {
671 	kcf_call_type_t ctype;
672 
673 	if (handle == NULL)
674 		return;
675 
676 	if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
677 		kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
678 
679 		KCF_PROV_JOB_RELE_STAT(sreq->sn_mp, (error != CRYPTO_SUCCESS));
680 		kcf_sop_done(sreq, error);
681 	} else {
682 		kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
683 
684 		ASSERT(ctype == CRYPTO_ASYNCH);
685 		KCF_PROV_JOB_RELE_STAT(areq->an_mp, (error != CRYPTO_SUCCESS));
686 		kcf_aop_done(areq, error);
687 	}
688 }
689 
690 /*
691  * This routine is used by software providers to determine
692  * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
693  * Note that hardware providers can always use KM_SLEEP. So,
694  * they do not need to call this routine.
695  *
696  * This routine can be called from user or interrupt context.
697  */
698 int
699 crypto_kmflag(crypto_req_handle_t handle)
700 {
701 	return (REQHNDL2_KMFLAG(handle));
702 }
703 
704 /*
705  * Process the mechanism info structures specified by the provider
706  * during registration. A NULL crypto_provider_info_t indicates
707  * an already initialized provider descriptor.
708  *
709  * Mechanisms are not added to the kernel's mechanism table if the
710  * provider is a logical provider.
711  *
712  * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
713  * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
714  * if the table of mechanisms is full.
715  */
716 static int
717 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
718 {
719 	uint_t mech_idx;
720 	uint_t cleanup_idx;
721 	int err = CRYPTO_SUCCESS;
722 	kcf_prov_mech_desc_t *pmd;
723 	int desc_use_count = 0;
724 	int mcount = desc->pd_mech_list_count;
725 
726 	if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
727 		if (info != NULL) {
728 			ASSERT(info->pi_mechanisms != NULL);
729 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
730 			    sizeof (crypto_mech_info_t) * mcount);
731 		}
732 		return (CRYPTO_SUCCESS);
733 	}
734 
735 	/*
736 	 * Copy the mechanism list from the provider info to the provider
737 	 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
738 	 * element if the provider has random_ops since we keep an internal
739 	 * mechanism, SUN_RANDOM, in this case.
740 	 */
741 	if (info != NULL) {
742 		if (info->pi_ops_vector->co_random_ops != NULL) {
743 			crypto_mech_info_t *rand_mi;
744 
745 			/*
746 			 * Need the following check as it is possible to have
747 			 * a provider that implements just random_ops and has
748 			 * pi_mechanisms == NULL.
749 			 */
750 			if (info->pi_mechanisms != NULL) {
751 				bcopy(info->pi_mechanisms, desc->pd_mechanisms,
752 				    sizeof (crypto_mech_info_t) * (mcount - 1));
753 			}
754 			rand_mi = &desc->pd_mechanisms[mcount - 1];
755 
756 			bzero(rand_mi, sizeof (crypto_mech_info_t));
757 			(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
758 			    CRYPTO_MAX_MECH_NAME);
759 			rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
760 		} else {
761 			ASSERT(info->pi_mechanisms != NULL);
762 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
763 			    sizeof (crypto_mech_info_t) * mcount);
764 		}
765 	}
766 
767 	/*
768 	 * For each mechanism support by the provider, add the provider
769 	 * to the corresponding KCF mechanism mech_entry chain.
770 	 */
771 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
772 		crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
773 
774 		if ((mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BITS) &&
775 		    (mi->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)) {
776 			err = CRYPTO_ARGUMENTS_BAD;
777 			break;
778 		}
779 
780 		if ((err = kcf_add_mech_provider(mech_idx, desc, &pmd)) !=
781 		    KCF_SUCCESS)
782 			break;
783 
784 		if (pmd == NULL)
785 			continue;
786 
787 		/* The provider will be used for this mechanism */
788 		desc_use_count++;
789 	}
790 
791 	/*
792 	 * Don't allow multiple software providers with disabled mechanisms
793 	 * to register. Subsequent enabling of mechanisms will result in
794 	 * an unsupported configuration, i.e. multiple software providers
795 	 * per mechanism.
796 	 */
797 	if (desc_use_count == 0 && desc->pd_prov_type == CRYPTO_SW_PROVIDER)
798 		return (CRYPTO_ARGUMENTS_BAD);
799 
800 	if (err == KCF_SUCCESS)
801 		return (CRYPTO_SUCCESS);
802 
803 	/*
804 	 * An error occurred while adding the mechanism, cleanup
805 	 * and bail.
806 	 */
807 	for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
808 		kcf_remove_mech_provider(
809 		    desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
810 	}
811 
812 	if (err == KCF_MECH_TAB_FULL)
813 		return (CRYPTO_HOST_MEMORY);
814 
815 	return (CRYPTO_ARGUMENTS_BAD);
816 }
817 
818 /*
819  * Update routine for kstat. Only privileged users are allowed to
820  * access this information, since this information is sensitive.
821  * There are some cryptographic attacks (e.g. traffic analysis)
822  * which can use this information.
823  */
824 static int
825 kcf_prov_kstat_update(kstat_t *ksp, int rw)
826 {
827 	kcf_prov_stats_t *ks_data;
828 	kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
829 	int i;
830 
831 	if (rw == KSTAT_WRITE)
832 		return (EACCES);
833 
834 	ks_data = ksp->ks_data;
835 
836 	if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
837 		ks_data->ps_ops_total.value.ui64 = 0;
838 		ks_data->ps_ops_passed.value.ui64 = 0;
839 		ks_data->ps_ops_failed.value.ui64 = 0;
840 		ks_data->ps_ops_busy_rval.value.ui64 = 0;
841 	} else {
842 		uint64_t dtotal, ftotal, btotal;
843 
844 		dtotal = ftotal = btotal = 0;
845 		/* No locking done since an exact count is not required. */
846 		for (i = 0; i < pd->pd_nbins; i++) {
847 			dtotal += pd->pd_percpu_bins[i].kp_ndispatches;
848 			ftotal += pd->pd_percpu_bins[i].kp_nfails;
849 			btotal += pd->pd_percpu_bins[i].kp_nbusy_rval;
850 		}
851 
852 		ks_data->ps_ops_total.value.ui64 = dtotal;
853 		ks_data->ps_ops_failed.value.ui64 = ftotal;
854 		ks_data->ps_ops_busy_rval.value.ui64 = btotal;
855 		ks_data->ps_ops_passed.value.ui64 = dtotal - ftotal - btotal;
856 	}
857 
858 	return (0);
859 }
860 
861 
862 /*
863  * Utility routine called from failure paths in crypto_register_provider()
864  * and from crypto_load_soft_disabled().
865  */
866 void
867 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
868 {
869 	uint_t mech_idx;
870 
871 	/* remove the provider from the mechanisms tables */
872 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
873 	    mech_idx++) {
874 		kcf_remove_mech_provider(
875 		    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
876 	}
877 
878 	/* remove provider from providers table */
879 	if (remove_prov)
880 		(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
881 }
882 
883 static void
884 undo_register_provider_extra(kcf_provider_desc_t *desc)
885 {
886 	delete_kstat(desc);
887 	undo_register_provider(desc, B_TRUE);
888 }
889 
890 /*
891  * Utility routine called from crypto_load_soft_disabled(). Callers
892  * should have done a prior undo_register_provider().
893  */
894 void
895 redo_register_provider(kcf_provider_desc_t *pd)
896 {
897 	/* process the mechanisms supported by the provider */
898 	(void) init_prov_mechs(NULL, pd);
899 
900 	/*
901 	 * Hold provider in providers table. We should not call
902 	 * kcf_prov_tab_add_provider() here as the provider descriptor
903 	 * is still valid which means it has an entry in the provider
904 	 * table.
905 	 */
906 	KCF_PROV_REFHOLD(pd);
907 }
908 
909 /*
910  * Add provider (p1) to another provider's array of providers (p2).
911  * Hardware and logical providers use this array to cross-reference
912  * each other.
913  */
914 static void
915 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
916 {
917 	kcf_provider_list_t *new;
918 
919 	new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
920 	mutex_enter(&p2->pd_lock);
921 	new->pl_next = p2->pd_provider_list;
922 	p2->pd_provider_list = new;
923 	new->pl_provider = p1;
924 	mutex_exit(&p2->pd_lock);
925 }
926 
927 /*
928  * Remove provider (p1) from another provider's array of providers (p2).
929  * Hardware and logical providers use this array to cross-reference
930  * each other.
931  */
932 static void
933 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
934 {
935 
936 	kcf_provider_list_t *pl = NULL, **prev;
937 
938 	mutex_enter(&p2->pd_lock);
939 	for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
940 	    pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
941 		if (pl->pl_provider == p1) {
942 			break;
943 		}
944 	}
945 
946 	if (p1 == NULL) {
947 		mutex_exit(&p2->pd_lock);
948 		return;
949 	}
950 
951 	/* detach and free kcf_provider_list structure */
952 	*prev = pl->pl_next;
953 	kmem_free(pl, sizeof (*pl));
954 	mutex_exit(&p2->pd_lock);
955 }
956 
957 /*
958  * Convert an array of logical provider handles (crypto_provider_id)
959  * stored in a crypto_provider_info structure into an array of provider
960  * descriptors (kcf_provider_desc_t) attached to a logical provider.
961  */
962 static void
963 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
964 {
965 	kcf_provider_desc_t *lp;
966 	crypto_provider_id_t handle;
967 	int count = info->pi_logical_provider_count;
968 	int i;
969 
970 	/* add hardware provider to each logical provider */
971 	for (i = 0; i < count; i++) {
972 		handle = info->pi_logical_providers[i];
973 		lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
974 		if (lp == NULL) {
975 			continue;
976 		}
977 		add_provider_to_array(hp, lp);
978 		hp->pd_flags |= KCF_LPROV_MEMBER;
979 
980 		/*
981 		 * A hardware provider has to have the provider descriptor of
982 		 * every logical provider it belongs to, so it can be removed
983 		 * from the logical provider if the hardware provider
984 		 * unregisters from the framework.
985 		 */
986 		add_provider_to_array(lp, hp);
987 		KCF_PROV_REFRELE(lp);
988 	}
989 }
990 
991 /*
992  * This routine removes a provider from all of the logical or
993  * hardware providers it belongs to, and frees the provider's
994  * array of pointers to providers.
995  */
996 static void
997 remove_provider(kcf_provider_desc_t *pp)
998 {
999 	kcf_provider_desc_t *p;
1000 	kcf_provider_list_t *e, *next;
1001 
1002 	mutex_enter(&pp->pd_lock);
1003 	for (e = pp->pd_provider_list; e != NULL; e = next) {
1004 		p = e->pl_provider;
1005 		remove_provider_from_array(pp, p);
1006 		if (p->pd_prov_type == CRYPTO_HW_PROVIDER &&
1007 		    p->pd_provider_list == NULL)
1008 			p->pd_flags &= ~KCF_LPROV_MEMBER;
1009 		next = e->pl_next;
1010 		kmem_free(e, sizeof (*e));
1011 	}
1012 	pp->pd_provider_list = NULL;
1013 	mutex_exit(&pp->pd_lock);
1014 }
1015 
1016 /*
1017  * Dispatch events as needed for a provider. is_added flag tells
1018  * whether the provider is registering or unregistering.
1019  */
1020 void
1021 kcf_do_notify(kcf_provider_desc_t *prov_desc, boolean_t is_added)
1022 {
1023 	int i;
1024 	crypto_notify_event_change_t ec;
1025 
1026 	ASSERT(prov_desc->pd_state > KCF_PROV_VERIFICATION_FAILED);
1027 
1028 	/*
1029 	 * Inform interested clients of the mechanisms becoming
1030 	 * available/unavailable. We skip this for logical providers
1031 	 * as they do not affect mechanisms.
1032 	 */
1033 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
1034 		ec.ec_provider_type = prov_desc->pd_prov_type;
1035 		ec.ec_change = is_added ? CRYPTO_MECH_ADDED :
1036 		    CRYPTO_MECH_REMOVED;
1037 		for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
1038 			/* Skip any mechanisms not allowed by the policy */
1039 			if (is_mech_disabled(prov_desc,
1040 			    prov_desc->pd_mechanisms[i].cm_mech_name))
1041 				continue;
1042 
1043 			(void) strncpy(ec.ec_mech_name,
1044 			    prov_desc->pd_mechanisms[i].cm_mech_name,
1045 			    CRYPTO_MAX_MECH_NAME);
1046 			kcf_walk_ntfylist(CRYPTO_EVENT_MECHS_CHANGED, &ec);
1047 		}
1048 
1049 	}
1050 
1051 	/*
1052 	 * Inform interested clients about the new or departing provider.
1053 	 * In case of a logical provider, we need to notify the event only
1054 	 * for the logical provider and not for the underlying
1055 	 * providers which are known by the KCF_LPROV_MEMBER bit.
1056 	 */
1057 	if (prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER ||
1058 	    (prov_desc->pd_flags & KCF_LPROV_MEMBER) == 0) {
1059 		kcf_walk_ntfylist(is_added ? CRYPTO_EVENT_PROVIDER_REGISTERED :
1060 		    CRYPTO_EVENT_PROVIDER_UNREGISTERED, prov_desc);
1061 	}
1062 }
1063 
1064 static void
1065 delete_kstat(kcf_provider_desc_t *desc)
1066 {
1067 	/* destroy the kstat created for this provider */
1068 	if (desc->pd_kstat != NULL) {
1069 		kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
1070 
1071 		/* release reference held by desc->pd_kstat->ks_private */
1072 		ASSERT(desc == kspd);
1073 		kstat_delete(kspd->pd_kstat);
1074 		desc->pd_kstat = NULL;
1075 		KCF_PROV_REFRELE(kspd);
1076 	}
1077 }
1078