1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * This file is part of the core Kernel Cryptographic Framework.
31  * It implements the SPI functions exported to cryptographic
32  * providers.
33  */
34 
35 #include <sys/ksynch.h>
36 #include <sys/cmn_err.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/modctl.h>
40 #include <sys/crypto/common.h>
41 #include <sys/crypto/impl.h>
42 #include <sys/crypto/sched_impl.h>
43 #include <sys/crypto/spi.h>
44 #include <sys/taskq.h>
45 #include <sys/disp.h>
46 #include <sys/kstat.h>
47 #include <sys/policy.h>
48 
49 /*
50  * minalloc and maxalloc values to be used for taskq_create().
51  */
52 int crypto_taskq_minalloc = CYRPTO_TASKQ_MIN;
53 int crypto_taskq_maxalloc = CRYPTO_TASKQ_MAX;
54 
55 static void free_provider_list(kcf_provider_list_t *);
56 static void remove_provider(kcf_provider_desc_t *);
57 static void process_logical_providers(crypto_provider_info_t *,
58     kcf_provider_desc_t *);
59 static void copy_ops_vector_v1(crypto_ops_t *, crypto_ops_t *);
60 static void copy_ops_vector_v2(crypto_ops_t *, crypto_ops_t *);
61 static int init_prov_mechs(crypto_provider_info_t *, kcf_provider_desc_t *);
62 static int kcf_prov_kstat_update(kstat_t *, int);
63 
64 static kcf_prov_stats_t kcf_stats_ks_data_template = {
65 	{ "kcf_ops_total",		KSTAT_DATA_UINT64 },
66 	{ "kcf_ops_passed",		KSTAT_DATA_UINT64 },
67 	{ "kcf_ops_failed",		KSTAT_DATA_UINT64 },
68 	{ "kcf_ops_returned_busy",	KSTAT_DATA_UINT64 }
69 };
70 
71 #define	KCF_SPI_COPY_OPS(src, dst, ops) if ((src)->ops != NULL) \
72 	*((dst)->ops) = *((src)->ops);
73 
74 /*
75  * This routine is used to add cryptographic providers to the KEF framework.
76  * Providers pass a crypto_provider_info structure to crypto_register_provider()
77  * and get back a handle.  The crypto_provider_info structure contains a
78  * list of mechanisms supported by the provider and an ops vector containing
79  * provider entry points.  Hardware providers call this routine in their attach
80  * routines.  Software providers call this routine in their _init() routine.
81  */
82 int
83 crypto_register_provider(crypto_provider_info_t *info,
84     crypto_kcf_provider_handle_t *handle)
85 {
86 	int i;
87 	int vstatus = 0;
88 	struct modctl *mcp;
89 	char *name;
90 	char ks_name[KSTAT_STRLEN];
91 	crypto_notify_event_change_t ec;
92 
93 	kcf_provider_desc_t *prov_desc = NULL;
94 	int ret = CRYPTO_ARGUMENTS_BAD;
95 
96 	if (info->pi_interface_version > CRYPTO_SPI_VERSION_2)
97 		return (CRYPTO_VERSION_MISMATCH);
98 
99 	/*
100 	 * Check provider type, must be software, hardware, or logical.
101 	 */
102 	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
103 	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
104 	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
105 		return (CRYPTO_ARGUMENTS_BAD);
106 
107 	/*
108 	 * Allocate and initialize a new provider descriptor. We also
109 	 * hold it and release it when done.
110 	 */
111 	prov_desc = kcf_alloc_provider_desc(info);
112 	KCF_PROV_REFHOLD(prov_desc);
113 
114 	prov_desc->pd_prov_type = info->pi_provider_type;
115 
116 	/* provider-private handle, opaque to KCF */
117 	prov_desc->pd_prov_handle = info->pi_provider_handle;
118 
119 	/* copy provider description string */
120 	if (info->pi_provider_description != NULL) {
121 		/*
122 		 * pi_provider_descriptor is a string that can contain
123 		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
124 		 * INCLUDING the terminating null character. A bcopy()
125 		 * is necessary here as pd_description should not have
126 		 * a null character. See comments in kcf_alloc_provider_desc()
127 		 * for details on pd_description field.
128 		 */
129 		bcopy(info->pi_provider_description, prov_desc->pd_description,
130 		    min(strlen(info->pi_provider_description),
131 		    CRYPTO_PROVIDER_DESCR_MAX_LEN));
132 	}
133 
134 	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
135 		if (info->pi_ops_vector == NULL) {
136 			return (CRYPTO_ARGUMENTS_BAD);
137 		}
138 		copy_ops_vector_v1(info->pi_ops_vector,
139 		    prov_desc->pd_ops_vector);
140 		if (info->pi_interface_version == CRYPTO_SPI_VERSION_2) {
141 			copy_ops_vector_v2(info->pi_ops_vector,
142 			    prov_desc->pd_ops_vector);
143 			prov_desc->pd_flags = info->pi_flags;
144 		}
145 	}
146 
147 	/*
148 	 * For software providers, copy the module name and module ID.
149 	 * For hardware providers, copy the driver name and instance.
150 	 */
151 	switch (info->pi_provider_type) {
152 	case  CRYPTO_SW_PROVIDER:
153 		if (info->pi_provider_dev.pd_sw == NULL)
154 			goto bail;
155 
156 		if ((mcp = mod_getctl(info->pi_provider_dev.pd_sw)) == NULL)
157 			goto bail;
158 
159 		prov_desc->pd_module_id = mcp->mod_id;
160 		name = mcp->mod_modname;
161 		break;
162 
163 	case CRYPTO_HW_PROVIDER:
164 	case CRYPTO_LOGICAL_PROVIDER:
165 		if (info->pi_provider_dev.pd_hw == NULL)
166 			goto bail;
167 
168 		prov_desc->pd_instance =
169 		    ddi_get_instance(info->pi_provider_dev.pd_hw);
170 		name = (char *)ddi_driver_name(info->pi_provider_dev.pd_hw);
171 		break;
172 	}
173 	if (name == NULL)
174 		goto bail;
175 
176 	prov_desc->pd_name = kmem_alloc(strlen(name) + 1, KM_SLEEP);
177 	(void) strcpy(prov_desc->pd_name, name);
178 
179 	if ((prov_desc->pd_mctlp = kcf_get_modctl(info)) == NULL)
180 		goto bail;
181 
182 	/* process the mechanisms supported by the provider */
183 	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
184 		goto bail;
185 
186 	/*
187 	 * Add provider to providers tables, also sets the descriptor
188 	 * pd_prov_id field.
189 	 */
190 	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
191 		undo_register_provider(prov_desc, B_FALSE);
192 		goto bail;
193 	}
194 
195 	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
196 		if ((vstatus = kcf_verify_signature(prov_desc)) ==
197 		    CRYPTO_MODVERIFICATION_FAILED) {
198 			undo_register_provider(prov_desc, B_TRUE);
199 			ret = CRYPTO_MODVERIFICATION_FAILED;
200 			goto bail;
201 		}
202 	}
203 
204 	/*
205 	 * We create a taskq only for a hardware provider. The global
206 	 * software queue is used for software providers. The taskq
207 	 * is limited to one thread since tasks are guaranteed to be
208 	 * executed in the order they are scheduled, if nthreads == 1. We
209 	 * pass TASKQ_PREPOPULATE flag to keep some entries cached to
210 	 * improve performance.
211 	 */
212 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
213 		prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
214 		    1, minclsyspri, crypto_taskq_minalloc,
215 		    crypto_taskq_maxalloc, TASKQ_PREPOPULATE);
216 	else
217 		prov_desc->pd_sched_info.ks_taskq = NULL;
218 
219 	/* no kernel session to logical providers */
220 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
221 		/*
222 		 * Open a session for session-oriented providers. This session
223 		 * is used for all kernel consumers. This is fine as a provider
224 		 * is required to support multiple thread access to a session.
225 		 * We can do this only after the taskq has been created as we
226 		 * do a kcf_submit_request() to open the session.
227 		 */
228 		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
229 			kcf_req_params_t params;
230 
231 			KCF_WRAP_SESSION_OPS_PARAMS(&params,
232 			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
233 			    CRYPTO_USER, NULL, 0, prov_desc);
234 			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
235 			    B_FALSE);
236 
237 			if (ret != CRYPTO_SUCCESS) {
238 				undo_register_provider(prov_desc, B_TRUE);
239 				ret = CRYPTO_FAILED;
240 				goto bail;
241 			}
242 		}
243 	}
244 
245 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
246 		/*
247 		 * Create the kstat for this provider. There is a kstat
248 		 * installed for each successfully registered provider.
249 		 * This kstat is deleted, when the provider unregisters.
250 		 */
251 		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
252 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
253 			    prov_desc->pd_name, "provider_stats");
254 		} else {
255 			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
256 			    prov_desc->pd_name, prov_desc->pd_instance,
257 			    prov_desc->pd_prov_id, "provider_stats");
258 		}
259 
260 		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
261 		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
262 		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
263 
264 		if (prov_desc->pd_kstat != NULL) {
265 			bcopy(&kcf_stats_ks_data_template,
266 			    &prov_desc->pd_ks_data,
267 			    sizeof (kcf_stats_ks_data_template));
268 			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
269 			KCF_PROV_REFHOLD(prov_desc);
270 			KCF_PROV_IREFHOLD(prov_desc);
271 			prov_desc->pd_kstat->ks_private = prov_desc;
272 			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
273 			kstat_install(prov_desc->pd_kstat);
274 		}
275 	}
276 
277 	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
278 		process_logical_providers(info, prov_desc);
279 
280 	/*
281 	 * Inform interested kernel clients of the event.
282 	 * Logical providers are not visible to kernel clients.
283 	 */
284 	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
285 		ec.ec_provider_type = prov_desc->pd_prov_type;
286 		ec.ec_change = CRYPTO_EVENT_CHANGE_ADDED;
287 		for (i = 0; i < prov_desc->pd_mech_list_count; i++) {
288 			/* Skip any mechanisms not allowed by the policy */
289 			if (is_mech_disabled(prov_desc,
290 			    prov_desc->pd_mechanisms[i].cm_mech_name))
291 				continue;
292 
293 			(void) strncpy(ec.ec_mech_name,
294 			    prov_desc->pd_mechanisms[i].cm_mech_name,
295 			    CRYPTO_MAX_MECH_NAME);
296 			kcf_walk_ntfylist(CRYPTO_EVENT_PROVIDERS_CHANGE, &ec);
297 		}
298 	}
299 
300 	mutex_enter(&prov_desc->pd_lock);
301 	prov_desc->pd_state = (vstatus == 0) ? KCF_PROV_READY :
302 	    KCF_PROV_UNVERIFIED;
303 	mutex_exit(&prov_desc->pd_lock);
304 
305 	*handle = prov_desc->pd_kcf_prov_handle;
306 	KCF_PROV_REFRELE(prov_desc);
307 	return (CRYPTO_SUCCESS);
308 
309 bail:
310 	KCF_PROV_REFRELE(prov_desc);
311 	return (ret);
312 }
313 
314 /*
315  * This routine is used to notify the framework when a provider is being
316  * removed.  Hardware providers call this routine in their detach routines.
317  * Software providers call this routine in their _fini() routine.
318  */
319 int
320 crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
321 {
322 	int i;
323 	uint_t mech_idx;
324 	kcf_provider_desc_t *desc;
325 	crypto_notify_event_change_t ec;
326 	kcf_prov_state_t saved_state;
327 
328 	/* lookup provider descriptor */
329 	if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
330 		return (CRYPTO_UNKNOWN_PROVIDER);
331 
332 	mutex_enter(&desc->pd_lock);
333 	/*
334 	 * Check if any other thread is disabling or removing
335 	 * this provider. We return if this is the case.
336 	 */
337 	if (desc->pd_state >= KCF_PROV_DISABLED) {
338 		mutex_exit(&desc->pd_lock);
339 		/* Release reference held by kcf_prov_tab_lookup(). */
340 		KCF_PROV_REFRELE(desc);
341 		return (CRYPTO_BUSY);
342 	}
343 
344 	saved_state = desc->pd_state;
345 	desc->pd_state = KCF_PROV_REMOVED;
346 
347 	if (saved_state == KCF_PROV_BUSY) {
348 		/*
349 		 * The per-provider taskq thread may be waiting. We
350 		 * signal it so that it can start failing requests.
351 		 * Note that we do not need a cv_broadcast() as we keep
352 		 * only a single thread per taskq.
353 		 */
354 		cv_signal(&desc->pd_resume_cv);
355 	}
356 
357 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
358 		/*
359 		 * Check if this provider is currently being used.
360 		 * pd_irefcnt is the number of holds from the internal
361 		 * structures. We add one to account for the above lookup.
362 		 */
363 		if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
364 			desc->pd_state = saved_state;
365 			mutex_exit(&desc->pd_lock);
366 			/* Release reference held by kcf_prov_tab_lookup(). */
367 			KCF_PROV_REFRELE(desc);
368 			/*
369 			 * The administrator presumably will stop the clients
370 			 * thus removing the holds, when they get the busy
371 			 * return value.  Any retry will succeed then.
372 			 */
373 			return (CRYPTO_BUSY);
374 		}
375 	}
376 	mutex_exit(&desc->pd_lock);
377 
378 	if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
379 		remove_provider(desc);
380 	}
381 
382 	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
383 		/* remove the provider from the mechanisms tables */
384 		for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
385 		    mech_idx++) {
386 			kcf_remove_mech_provider(
387 			    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
388 		}
389 	}
390 
391 	/* remove provider from providers table */
392 	if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
393 	    CRYPTO_SUCCESS) {
394 		/* Release reference held by kcf_prov_tab_lookup(). */
395 		KCF_PROV_REFRELE(desc);
396 		return (CRYPTO_UNKNOWN_PROVIDER);
397 	}
398 
399 	/* destroy the kstat created for this provider */
400 	if (desc->pd_kstat != NULL) {
401 		kcf_provider_desc_t *kspd = desc->pd_kstat->ks_private;
402 
403 		/* release reference held by desc->pd_kstat->ks_private */
404 		ASSERT(desc == kspd);
405 		kstat_delete(kspd->pd_kstat);
406 		KCF_PROV_REFRELE(kspd);
407 		KCF_PROV_IREFRELE(kspd);
408 	}
409 
410 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
411 		/* Release reference held by kcf_prov_tab_lookup(). */
412 		KCF_PROV_REFRELE(desc);
413 
414 		/*
415 		 * Wait till the existing requests complete.
416 		 */
417 		mutex_enter(&desc->pd_lock);
418 		while (desc->pd_state != KCF_PROV_FREED)
419 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
420 		mutex_exit(&desc->pd_lock);
421 	} else {
422 		/*
423 		 * Wait until requests that have been sent to the provider
424 		 * complete.
425 		 */
426 		mutex_enter(&desc->pd_lock);
427 		while (desc->pd_irefcnt > 0)
428 			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
429 		mutex_exit(&desc->pd_lock);
430 	}
431 
432 	/*
433 	 * Inform interested kernel clients of the event.
434 	 * Logical providers are not visible to kernel clients.
435 	 */
436 	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
437 		ec.ec_provider_type = desc->pd_prov_type;
438 		ec.ec_change = CRYPTO_EVENT_CHANGE_REMOVED;
439 		for (i = 0; i < desc->pd_mech_list_count; i++) {
440 			/* Skip any mechanisms not allowed by the policy */
441 			if (is_mech_disabled(desc,
442 			    desc->pd_mechanisms[i].cm_mech_name))
443 				continue;
444 
445 			(void) strncpy(ec.ec_mech_name,
446 			    desc->pd_mechanisms[i].cm_mech_name,
447 			    CRYPTO_MAX_MECH_NAME);
448 			kcf_walk_ntfylist(CRYPTO_EVENT_PROVIDERS_CHANGE, &ec);
449 		}
450 	}
451 
452 	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
453 		/*
454 		 * This is the only place where kcf_free_provider_desc()
455 		 * is called directly. KCF_PROV_REFRELE() should free the
456 		 * structure in all other places.
457 		 */
458 		ASSERT(desc->pd_state == KCF_PROV_FREED &&
459 		    desc->pd_refcnt == 0);
460 		kcf_free_provider_desc(desc);
461 	} else {
462 		KCF_PROV_REFRELE(desc);
463 	}
464 
465 	return (CRYPTO_SUCCESS);
466 }
467 
468 /*
469  * This routine is used to notify the framework that the state of
470  * a cryptographic provider has changed. Valid state codes are:
471  *
472  * CRYPTO_PROVIDER_READY
473  * 	The provider indicates that it can process more requests. A provider
474  *	will notify with this event if it previously has notified us with a
475  *	CRYPTO_PROVIDER_BUSY.
476  *
477  * CRYPTO_PROVIDER_BUSY
478  * 	The provider can not take more requests.
479  *
480  * CRYPTO_PROVIDER_FAILED
481  *	The provider encountered an internal error. The framework will not
482  * 	be sending any more requests to the provider. The provider may notify
483  *	with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
484  *
485  * This routine can be called from user or interrupt context.
486  */
487 void
488 crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
489 {
490 	kcf_provider_desc_t *pd;
491 
492 	/* lookup the provider from the given handle */
493 	if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
494 		return;
495 
496 	mutex_enter(&pd->pd_lock);
497 
498 	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
499 		cmn_err(CE_WARN, "crypto_provider_notification: "
500 		    "logical provider (%x) ignored\n", handle);
501 		goto out;
502 	}
503 	switch (state) {
504 	case CRYPTO_PROVIDER_READY:
505 		switch (pd->pd_state) {
506 		case KCF_PROV_BUSY:
507 			pd->pd_state = KCF_PROV_READY;
508 			/*
509 			 * Signal the per-provider taskq thread that it
510 			 * can start submitting requests. Note that we do
511 			 * not need a cv_broadcast() as we keep only a
512 			 * single thread per taskq.
513 			 */
514 			cv_signal(&pd->pd_resume_cv);
515 			break;
516 
517 		case KCF_PROV_FAILED:
518 			/*
519 			 * The provider recovered from the error. Let us
520 			 * use it now.
521 			 */
522 			pd->pd_state = KCF_PROV_READY;
523 			break;
524 		}
525 		break;
526 
527 	case CRYPTO_PROVIDER_BUSY:
528 		switch (pd->pd_state) {
529 		case KCF_PROV_READY:
530 			pd->pd_state = KCF_PROV_BUSY;
531 			break;
532 		}
533 		break;
534 
535 	case CRYPTO_PROVIDER_FAILED:
536 		/*
537 		 * We note the failure and return. The per-provider taskq
538 		 * thread checks this flag and starts failing the
539 		 * requests, if it is set. See process_req_hwp() for details.
540 		 */
541 		switch (pd->pd_state) {
542 		case KCF_PROV_READY:
543 			pd->pd_state = KCF_PROV_FAILED;
544 			break;
545 
546 		case KCF_PROV_BUSY:
547 			pd->pd_state = KCF_PROV_FAILED;
548 			/*
549 			 * The per-provider taskq thread may be waiting. We
550 			 * signal it so that it can start failing requests.
551 			 */
552 			cv_signal(&pd->pd_resume_cv);
553 			break;
554 		}
555 		break;
556 	}
557 out:
558 	mutex_exit(&pd->pd_lock);
559 	KCF_PROV_REFRELE(pd);
560 }
561 
562 /*
563  * This routine is used to notify the framework the result of
564  * an asynchronous request handled by a provider. Valid error
565  * codes are the same as the CRYPTO_* errors defined in common.h.
566  *
567  * This routine can be called from user or interrupt context.
568  */
569 void
570 crypto_op_notification(crypto_req_handle_t handle, int error)
571 {
572 	kcf_call_type_t ctype;
573 
574 	if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
575 		kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;
576 
577 		if (error != CRYPTO_SUCCESS)
578 			sreq->sn_provider->pd_sched_info.ks_nfails++;
579 		KCF_PROV_IREFRELE(sreq->sn_provider);
580 		kcf_sop_done(sreq, error);
581 	} else {
582 		kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;
583 
584 		ASSERT(ctype == CRYPTO_ASYNCH);
585 		if (error != CRYPTO_SUCCESS)
586 			areq->an_provider->pd_sched_info.ks_nfails++;
587 		KCF_PROV_IREFRELE(areq->an_provider);
588 		kcf_aop_done(areq, error);
589 	}
590 }
591 
592 /*
593  * This routine is used by software providers to determine
594  * whether to use KM_SLEEP or KM_NOSLEEP during memory allocation.
595  * Note that hardware providers can always use KM_SLEEP. So,
596  * they do not need to call this routine.
597  *
598  * This routine can be called from user or interrupt context.
599  */
600 int
601 crypto_kmflag(crypto_req_handle_t handle)
602 {
603 	return (REQHNDL2_KMFLAG(handle));
604 }
605 
606 
607 /*
608  * Copy an ops vector from src to dst. Used during provider registration
609  * to copy the ops vector from the provider info structure to the
610  * provider descriptor maintained by KCF.
611  * Copying the ops vector specified by the provider is needed since the
612  * framework does not require the provider info structure to be
613  * persistent.
614  */
615 static void
616 copy_ops_vector_v1(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
617 {
618 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_control_ops);
619 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_digest_ops);
620 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_cipher_ops);
621 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mac_ops);
622 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_sign_ops);
623 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_verify_ops);
624 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_ops);
625 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_dual_cipher_mac_ops);
626 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_random_ops);
627 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_session_ops);
628 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_object_ops);
629 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_key_ops);
630 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_provider_ops);
631 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_ctx_ops);
632 }
633 
634 static void
635 copy_ops_vector_v2(crypto_ops_t *src_ops, crypto_ops_t *dst_ops)
636 {
637 	KCF_SPI_COPY_OPS(src_ops, dst_ops, co_mech_ops);
638 }
639 
640 /*
641  * Process the mechanism info structures specified by the provider
642  * during registration. A NULL crypto_provider_info_t indicates
643  * an already initialized provider descriptor.
644  *
645  * Mechanisms are not added to the kernel's mechanism table if the
646  * provider is a logical provider.
647  *
648  * Returns CRYPTO_SUCCESS on success, CRYPTO_ARGUMENTS if one
649  * of the specified mechanisms was malformed, or CRYPTO_HOST_MEMORY
650  * if the table of mechanisms is full.
651  */
652 static int
653 init_prov_mechs(crypto_provider_info_t *info, kcf_provider_desc_t *desc)
654 {
655 	uint_t mech_idx;
656 	uint_t cleanup_idx;
657 	int err = CRYPTO_SUCCESS;
658 	kcf_prov_mech_desc_t *pmd;
659 	int desc_use_count = 0;
660 	int mcount = desc->pd_mech_list_count;
661 
662 	if (desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
663 		if (info != NULL) {
664 			ASSERT(info->pi_mechanisms != NULL);
665 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
666 			    sizeof (crypto_mech_info_t) * mcount);
667 		}
668 		return (CRYPTO_SUCCESS);
669 	}
670 
671 	/*
672 	 * Copy the mechanism list from the provider info to the provider
673 	 * descriptor. desc->pd_mechanisms has an extra crypto_mech_info_t
674 	 * element if the provider has random_ops since we keep an internal
675 	 * mechanism, SUN_RANDOM, in this case.
676 	 */
677 	if (info != NULL) {
678 		if (info->pi_ops_vector->co_random_ops != NULL) {
679 			crypto_mech_info_t *rand_mi;
680 
681 			/*
682 			 * Need the following check as it is possible to have
683 			 * a provider that implements just random_ops and has
684 			 * pi_mechanisms == NULL.
685 			 */
686 			if (info->pi_mechanisms != NULL) {
687 				bcopy(info->pi_mechanisms, desc->pd_mechanisms,
688 				    sizeof (crypto_mech_info_t) * (mcount - 1));
689 			}
690 			rand_mi = &desc->pd_mechanisms[mcount - 1];
691 
692 			bzero(rand_mi, sizeof (crypto_mech_info_t));
693 			(void) strncpy(rand_mi->cm_mech_name, SUN_RANDOM,
694 			    CRYPTO_MAX_MECH_NAME);
695 			rand_mi->cm_func_group_mask = CRYPTO_FG_RANDOM;
696 			/*
697 			 * What we really need here is a
698 			 * CRYPTO_KEYSIZE_NOT_APPLICABLE. We make do with the
699 			 * following for now.
700 			 */
701 			rand_mi->cm_keysize_unit = CRYPTO_KEYSIZE_UNIT_IN_BITS;
702 		} else {
703 			ASSERT(info->pi_mechanisms != NULL);
704 			bcopy(info->pi_mechanisms, desc->pd_mechanisms,
705 			    sizeof (crypto_mech_info_t) * mcount);
706 		}
707 	}
708 
709 	/*
710 	 * For each mechanism support by the provider, add the provider
711 	 * to the corresponding KCF mechanism mech_entry chain.
712 	 */
713 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count; mech_idx++) {
714 		crypto_mech_info_t *mi = &desc->pd_mechanisms[mech_idx];
715 
716 		if (mi->cm_keysize_unit != CRYPTO_KEYSIZE_UNIT_IN_BITS &&
717 		    mi->cm_keysize_unit != CRYPTO_KEYSIZE_UNIT_IN_BYTES) {
718 			err = CRYPTO_ARGUMENTS_BAD;
719 			break;
720 		}
721 
722 		if (kcf_add_mech_provider(mi, desc, &pmd) != KCF_SUCCESS)
723 			break;
724 
725 		if (pmd == NULL)
726 			continue;
727 
728 		/* The provider will be used for this mechanism */
729 		desc_use_count++;
730 	}
731 
732 	/*
733 	 * The provider will not be used for any mechanism. So, we fail its
734 	 * registration. Note that if at least one of the mechanisms from the
735 	 * provider can be used, we do it. This means there can be a overlap
736 	 * between the mechanisms offered by providers. The first one to
737 	 * register is used. Also, a policy to disable mechanisms of a provider
738 	 * will cause the provider to be not used for those mechanisms.
739 	 */
740 	if (desc_use_count == 0)
741 		return (CRYPTO_ARGUMENTS_BAD);
742 
743 	if (err == KCF_SUCCESS)
744 		return (CRYPTO_SUCCESS);
745 
746 	/*
747 	 * An error occurred while adding the mechanism, cleanup
748 	 * and bail.
749 	 */
750 	for (cleanup_idx = 0; cleanup_idx < mech_idx; cleanup_idx++) {
751 		kcf_remove_mech_provider(
752 		    desc->pd_mechanisms[cleanup_idx].cm_mech_name, desc);
753 	}
754 
755 	if (err == KCF_MECH_TAB_FULL)
756 		return (CRYPTO_HOST_MEMORY);
757 
758 	return (CRYPTO_ARGUMENTS_BAD);
759 }
760 
761 /*
762  * Update routine for kstat. Only privileged users are allowed to
763  * access this information, since this information is sensitive.
764  * There are some cryptographic attacks (e.g. traffic analysis)
765  * which can use this information.
766  */
767 static int
768 kcf_prov_kstat_update(kstat_t *ksp, int rw)
769 {
770 	kcf_prov_stats_t *ks_data;
771 	kcf_provider_desc_t *pd = (kcf_provider_desc_t *)ksp->ks_private;
772 
773 	if (rw == KSTAT_WRITE)
774 		return (EACCES);
775 
776 	ks_data = ksp->ks_data;
777 
778 	if (secpolicy_sys_config(CRED(), B_TRUE) != 0) {
779 		ks_data->ps_ops_total.value.ui64 = 0;
780 		ks_data->ps_ops_passed.value.ui64 = 0;
781 		ks_data->ps_ops_failed.value.ui64 = 0;
782 		ks_data->ps_ops_busy_rval.value.ui64 = 0;
783 	} else {
784 		ks_data->ps_ops_total.value.ui64 =
785 		    pd->pd_sched_info.ks_ndispatches;
786 		ks_data->ps_ops_failed.value.ui64 =
787 		    pd->pd_sched_info.ks_nfails;
788 		ks_data->ps_ops_busy_rval.value.ui64 =
789 		    pd->pd_sched_info.ks_nbusy_rval;
790 		ks_data->ps_ops_passed.value.ui64 =
791 		    pd->pd_sched_info.ks_ndispatches -
792 		    pd->pd_sched_info.ks_nfails -
793 		    pd->pd_sched_info.ks_nbusy_rval;
794 	}
795 
796 	return (0);
797 }
798 
799 
800 /*
801  * Utility routine called from failure paths in crypto_register_provider()
802  * and from crypto_load_soft_disabled().
803  */
804 void
805 undo_register_provider(kcf_provider_desc_t *desc, boolean_t remove_prov)
806 {
807 	uint_t mech_idx;
808 
809 	/* remove the provider from the mechanisms tables */
810 	for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
811 	    mech_idx++) {
812 		kcf_remove_mech_provider(
813 		    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
814 	}
815 
816 	/* remove provider from providers table */
817 	if (remove_prov)
818 		(void) kcf_prov_tab_rem_provider(desc->pd_prov_id);
819 }
820 
821 /*
822  * Utility routine called from crypto_load_soft_disabled(). Callers
823  * should have done a prior undo_register_provider().
824  */
825 void
826 redo_register_provider(kcf_provider_desc_t *pd)
827 {
828 	/* process the mechanisms supported by the provider */
829 	(void) init_prov_mechs(NULL, pd);
830 
831 	/*
832 	 * Hold provider in providers table. We should not call
833 	 * kcf_prov_tab_add_provider() here as the provider descriptor
834 	 * is still valid which means it has an entry in the provider
835 	 * table.
836 	 */
837 	KCF_PROV_REFHOLD(pd);
838 	KCF_PROV_IREFHOLD(pd);
839 }
840 
841 /*
842  * Add provider (p1) to another provider's array of providers (p2).
843  * Hardware and logical providers use this array to cross-reference
844  * each other.
845  */
846 static void
847 add_provider_to_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
848 {
849 	kcf_provider_list_t *new;
850 
851 	new = kmem_alloc(sizeof (kcf_provider_list_t), KM_SLEEP);
852 	mutex_enter(&p2->pd_lock);
853 	new->pl_next = p2->pd_provider_list;
854 	p2->pd_provider_list = new;
855 	KCF_PROV_IREFHOLD(p1);
856 	new->pl_provider = p1;
857 	mutex_exit(&p2->pd_lock);
858 }
859 
860 /*
861  * Remove provider (p1) from another provider's array of providers (p2).
862  * Hardware and logical providers use this array to cross-reference
863  * each other.
864  */
865 static void
866 remove_provider_from_array(kcf_provider_desc_t *p1, kcf_provider_desc_t *p2)
867 {
868 
869 	kcf_provider_list_t *pl = NULL, **prev;
870 
871 	mutex_enter(&p2->pd_lock);
872 	for (pl = p2->pd_provider_list, prev = &p2->pd_provider_list;
873 	    pl != NULL; prev = &pl->pl_next, pl = pl->pl_next) {
874 		if (pl->pl_provider == p1) {
875 			break;
876 		}
877 	}
878 
879 	if (p1 == NULL) {
880 		mutex_exit(&p2->pd_lock);
881 		return;
882 	}
883 
884 	/* detach and free kcf_provider_list structure */
885 	KCF_PROV_IREFRELE(p1);
886 	*prev = pl->pl_next;
887 	kmem_free(pl, sizeof (*pl));
888 	mutex_exit(&p2->pd_lock);
889 }
890 
891 /*
892  * Convert an array of logical provider handles (crypto_provider_id)
893  * stored in a crypto_provider_info structure into an array of provider
894  * descriptors (kcf_provider_desc_t) attached to a logical provider.
895  */
896 static void
897 process_logical_providers(crypto_provider_info_t *info, kcf_provider_desc_t *hp)
898 {
899 	kcf_provider_desc_t *lp;
900 	crypto_provider_id_t handle;
901 	int count = info->pi_logical_provider_count;
902 	int i;
903 
904 	/* add hardware provider to each logical provider */
905 	for (i = 0; i < count; i++) {
906 		handle = info->pi_logical_providers[i];
907 		lp = kcf_prov_tab_lookup((crypto_provider_id_t)handle);
908 		if (lp == NULL) {
909 			continue;
910 		}
911 		add_provider_to_array(hp, lp);
912 
913 		/*
914 		 * A hardware provider has to have the provider descriptor of
915 		 * every logical provider it belongs to, so it can be removed
916 		 * from the logical provider if the hardware provider
917 		 * unregisters from the framework.
918 		 */
919 		add_provider_to_array(lp, hp);
920 		KCF_PROV_REFRELE(lp);
921 	}
922 }
923 
924 /*
925  * This routine removes a provider from all of the logical or
926  * hardware providers it belongs to, and frees the provider's
927  * array of pointers to providers.
928  */
929 static void
930 remove_provider(kcf_provider_desc_t *pp)
931 {
932 	kcf_provider_desc_t *p;
933 	kcf_provider_list_t *e, *next;
934 
935 	mutex_enter(&pp->pd_lock);
936 	for (e = pp->pd_provider_list; e != NULL; e = next) {
937 		p = e->pl_provider;
938 		remove_provider_from_array(pp, p);
939 		KCF_PROV_IREFRELE(p);
940 		next = e->pl_next;
941 		kmem_free(e, sizeof (*e));
942 	}
943 	pp->pd_provider_list = NULL;
944 	mutex_exit(&pp->pd_lock);
945 }
946