1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * This file contains routines which call into a provider's
30  * entry points and do other related work.
31  */
32 
33 #include <sys/types.h>
34 #include <sys/systm.h>
35 #include <sys/taskq_impl.h>
36 #include <sys/cmn_err.h>
37 
38 #include <sys/crypto/common.h>
39 #include <sys/crypto/impl.h>
40 #include <sys/crypto/sched_impl.h>
41 
42 /*
43  * Return B_TRUE if the specified entry point is NULL. We rely on the
44  * caller to provide, with offset_1 and offset_2, information to calculate
45  * the location of the entry point. The ops argument is a temporary local
46  * variable defined as caddr_t *.
47  */
48 #define	KCF_PROV_NULL_ENTRY_POINT(pd, o1, o2, ops)			\
49 	(ops = (caddr_t *)((caddr_t)(pd)->pd_ops_vector + (o1)),	\
50 	(*ops == NULL || *(caddr_t *)((caddr_t)(*ops) + (o2)) == NULL))
51 
52 
53 static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *,
54     kcf_req_params_t *);
55 void
56 kcf_free_triedlist(kcf_prov_tried_t *list)
57 {
58 	kcf_prov_tried_t *l;
59 
60 	while ((l = list) != NULL) {
61 		list = list->pt_next;
62 		KCF_PROV_REFRELE(l->pt_pd);
63 		kmem_free(l, sizeof (kcf_prov_tried_t));
64 	}
65 }
66 
67 kcf_prov_tried_t *
68 kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd,
69     int kmflag)
70 {
71 	kcf_prov_tried_t *l;
72 
73 	l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag);
74 	if (l == NULL)
75 		return (NULL);
76 
77 	l->pt_pd = pd;
78 	l->pt_next = *list;
79 	*list = l;
80 
81 	return (l);
82 }
83 
84 static boolean_t
85 is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl)
86 {
87 	while (triedl != NULL) {
88 		if (triedl->pt_pd == pd)
89 			return (B_TRUE);
90 		triedl = triedl->pt_next;
91 	};
92 
93 	return (B_FALSE);
94 }
95 
96 /*
97  * Search a mech entry's hardware provider list for the specified
98  * provider. Return true if found.
99  */
100 static boolean_t
101 is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me)
102 {
103 	kcf_prov_mech_desc_t *prov_chain;
104 
105 	prov_chain = me->me_hw_prov_chain;
106 	if (prov_chain != NULL) {
107 		ASSERT(me->me_num_hwprov > 0);
108 		for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) {
109 			if (prov_chain->pm_prov_desc == pd) {
110 				return (B_TRUE);
111 			}
112 		}
113 	}
114 	return (B_FALSE);
115 }
116 
117 /*
118  * This routine, given a logical provider, returns the least loaded
119  * provider belonging to the logical provider. The provider must be
120  * able to do the specified mechanism, i.e. check that the mechanism
121  * hasn't been disabled. In addition, just in case providers are not
122  * entirely equivalent, the provider's entry point is checked for
123  * non-nullness. This is accomplished by having the caller pass, as
124  * arguments, the offset of the function group (offset_1), and the
125  * offset of the function within the function group (offset_2).
126  * Returns NULL if no provider can be found.
127  */
128 int
129 kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
130     crypto_mech_type_t mech_type_2, offset_t offset_1, offset_t offset_2,
131     boolean_t call_restrict, kcf_provider_desc_t *old,
132     kcf_provider_desc_t **new)
133 {
134 	kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
135 	kcf_provider_list_t *p;
136 	kcf_ops_class_t class;
137 	kcf_mech_entry_t *me;
138 	kcf_mech_entry_tab_t *me_tab;
139 	caddr_t *ops;
140 	int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
141 
142 	/* get the mech entry for the specified mechanism */
143 	class = KCF_MECH2CLASS(mech_type_1);
144 	if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
145 		return (CRYPTO_MECHANISM_INVALID);
146 	}
147 
148 	me_tab = &kcf_mech_tabs_tab[class];
149 	index = KCF_MECH2INDEX(mech_type_1);
150 	if ((index < 0) || (index >= me_tab->met_size)) {
151 		return (CRYPTO_MECHANISM_INVALID);
152 	}
153 
154 	me = &((me_tab->met_tab)[index]);
155 	mutex_enter(&me->me_mutex);
156 
157 	/*
158 	 * We assume the provider descriptor will not go away because
159 	 * it is being held somewhere, i.e. its reference count has been
160 	 * incremented. In the case of the crypto module, the provider
161 	 * descriptor is held by the session structure.
162 	 */
163 	if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
164 		if (old->pd_provider_list == NULL) {
165 			real_pd = NULL;
166 			rv = CRYPTO_DEVICE_ERROR;
167 			goto out;
168 		}
169 		/*
170 		 * Find the least loaded real provider. tq_nalloc gives
171 		 * the number of task entries in the task queue. We do
172 		 * not acquire tq_lock here as it is not critical to
173 		 * get the exact number and the lock contention may be
174 		 * too costly for this code path.
175 		 */
176 		mutex_enter(&old->pd_lock);
177 		p = old->pd_provider_list;
178 		while (p != NULL) {
179 			provider = p->pl_provider;
180 
181 			ASSERT(provider->pd_prov_type !=
182 			    CRYPTO_LOGICAL_PROVIDER);
183 
184 			if (!KCF_IS_PROV_USABLE(provider) ||
185 			    (call_restrict && provider->pd_restricted)) {
186 				p = p->pl_next;
187 				continue;
188 			}
189 
190 			if (!is_valid_provider_for_mech(provider, me)) {
191 				p = p->pl_next;
192 				continue;
193 			}
194 
195 			/* provider does second mech */
196 			if (mech_type_2 != CRYPTO_MECH_INVALID) {
197 				crypto_mech_type_t mech_type;
198 				int i;
199 
200 				/* convert from kef to provider's number */
201 				mech_type = provider->pd_map_mechnums
202 				    [KCF_MECH2CLASS(mech_type_2)]
203 				    [KCF_MECH2INDEX(mech_type_2)];
204 
205 				for (i = 0; i < provider->pd_mech_list_count;
206 				    i++) {
207 					if (provider->pd_mechanisms[i]
208 					    .cm_mech_number == mech_type)
209 						break;
210 				}
211 				if (i == provider->pd_mech_list_count) {
212 					p = p->pl_next;
213 					continue;
214 				}
215 			}
216 
217 			if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1,
218 			    offset_2, ops)) {
219 				p = p->pl_next;
220 				continue;
221 			}
222 
223 			len = provider->pd_sched_info.ks_taskq->tq_nalloc;
224 			if (len < gqlen) {
225 				gqlen = len;
226 				gpd = provider;
227 			}
228 
229 			p = p->pl_next;
230 		}
231 
232 		if (gpd != NULL) {
233 			real_pd = gpd;
234 			KCF_PROV_REFHOLD(real_pd);
235 		} else {
236 			/* can't find provider */
237 			real_pd = NULL;
238 			rv = CRYPTO_MECHANISM_INVALID;
239 		}
240 		mutex_exit(&old->pd_lock);
241 
242 	} else {
243 		if (!KCF_IS_PROV_USABLE(old) ||
244 		    (call_restrict && old->pd_restricted)) {
245 			real_pd = NULL;
246 			rv = CRYPTO_DEVICE_ERROR;
247 			goto out;
248 		}
249 
250 		if (!is_valid_provider_for_mech(old, me)) {
251 			real_pd = NULL;
252 			rv = CRYPTO_MECHANISM_INVALID;
253 			goto out;
254 		}
255 
256 		if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
257 			real_pd = NULL;
258 			rv = CRYPTO_NOT_SUPPORTED;
259 			goto out;
260 		}
261 		KCF_PROV_REFHOLD(real_pd);
262 	}
263 out:
264 	mutex_exit(&me->me_mutex);
265 	*new = real_pd;
266 	return (rv);
267 }
268 
269 /*
270  * This routine, given a logical provider, returns the least loaded
271  * provider belonging to the logical provider. Just in case providers
272  * are not entirely equivalent, the provider's entry point is checked
273  * for non-nullness. This is accomplished by having the caller pass, as
274  * arguments, the offset of the function group (offset_1), and the
275  * offset of the function within the function group (offset_2).
276  * Returns NULL if no provider can be found.
277  */
278 int
279 kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
280     boolean_t call_restrict, kcf_provider_desc_t *old,
281     kcf_provider_desc_t **new)
282 {
283 	kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
284 	kcf_provider_list_t *p;
285 	caddr_t *ops;
286 	int len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
287 
288 	/*
289 	 * We assume the provider descriptor will not go away because
290 	 * it is being held somewhere, i.e. its reference count has been
291 	 * incremented. In the case of the crypto module, the provider
292 	 * descriptor is held by the session structure.
293 	 */
294 	if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
295 		if (old->pd_provider_list == NULL) {
296 			real_pd = NULL;
297 			rv = CRYPTO_DEVICE_ERROR;
298 			goto out;
299 		}
300 		/*
301 		 * Find the least loaded real provider. tq_nalloc gives
302 		 * the number of task entries in the task queue. We do
303 		 * not acquire tq_lock here as it is not critical to
304 		 * get the exact number and the lock contention may be
305 		 * too costly for this code path.
306 		 */
307 		mutex_enter(&old->pd_lock);
308 		p = old->pd_provider_list;
309 		while (p != NULL) {
310 			provider = p->pl_provider;
311 
312 			ASSERT(provider->pd_prov_type !=
313 			    CRYPTO_LOGICAL_PROVIDER);
314 
315 			if (!KCF_IS_PROV_USABLE(provider) ||
316 			    (call_restrict && provider->pd_restricted)) {
317 				p = p->pl_next;
318 				continue;
319 			}
320 
321 			if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1,
322 			    offset_2, ops)) {
323 				p = p->pl_next;
324 				continue;
325 			}
326 
327 			len = provider->pd_sched_info.ks_taskq->tq_nalloc;
328 			if (len < gqlen) {
329 				gqlen = len;
330 				gpd = provider;
331 			}
332 
333 			p = p->pl_next;
334 		}
335 		mutex_exit(&old->pd_lock);
336 
337 		if (gpd != NULL) {
338 			real_pd = gpd;
339 			KCF_PROV_REFHOLD(real_pd);
340 		} else {
341 			/* can't find provider */
342 			real_pd = NULL;
343 			rv = CRYPTO_DEVICE_ERROR;
344 		}
345 
346 	} else {
347 		if (!KCF_IS_PROV_USABLE(old) ||
348 		    (call_restrict && old->pd_restricted)) {
349 			real_pd = NULL;
350 			rv = CRYPTO_DEVICE_ERROR;
351 			goto out;
352 		}
353 
354 		if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
355 			real_pd = NULL;
356 			rv = CRYPTO_NOT_SUPPORTED;
357 			goto out;
358 		}
359 		KCF_PROV_REFHOLD(real_pd);
360 	}
361 out:
362 	*new = real_pd;
363 	return (rv);
364 }
365 
366 /*
367  * Return the next member of a logical provider, given the previous
368  * member. The function returns true if the next member is found and
369  * bumps its refcnt before returning.
370  */
371 boolean_t
372 kcf_get_next_logical_provider_member(kcf_provider_desc_t *logical_provider,
373     kcf_provider_desc_t *prev, kcf_provider_desc_t **pd)
374 {
375 	kcf_provider_list_t *p;
376 	kcf_provider_desc_t *next;
377 
378 	ASSERT(MUTEX_HELD(&logical_provider->pd_lock));
379 	p = logical_provider->pd_provider_list;
380 	while (p != NULL) {
381 		/* start the search */
382 		if (prev == NULL) {
383 			next = p->pl_provider;
384 			goto found;
385 		} else {
386 			/* find where we were before */
387 			if (p->pl_provider == prev) {
388 				if (p->pl_next != NULL) {
389 					next = p->pl_next->pl_provider;
390 					goto found;
391 				}
392 			}
393 		}
394 		p = p->pl_next;
395 	}
396 	return (B_FALSE);
397 
398 found:
399 	KCF_PROV_REFHOLD(next);
400 	*pd = next;
401 	return (B_TRUE);
402 }
403 
404 /*
405  * Return the best provider for the specified mechanism. The provider
406  * is held and it is the caller's responsibility to release it when done.
407  * The fg input argument is used as a search criterion to pick a provider.
408  * A provider has to support this function group to be picked.
409  *
410  * Find the least loaded provider in the list of providers. We do a linear
411  * search to find one. This is fine as we assume there are only a few
412  * number of providers in this list. If this assumption ever changes,
413  * we should revisit this.
414  *
415  * call_restrict represents if the caller should not be allowed to
416  * use restricted providers.
417  */
418 kcf_provider_desc_t *
419 kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
420     int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg,
421     boolean_t call_restrict, size_t data_size)
422 {
423 	kcf_provider_desc_t *pd = NULL, *gpd = NULL;
424 	kcf_prov_mech_desc_t *prov_chain, *mdesc;
425 	int len, gqlen = INT_MAX;
426 	kcf_ops_class_t class;
427 	int index;
428 	kcf_mech_entry_t *me;
429 	kcf_mech_entry_tab_t *me_tab;
430 
431 	class = KCF_MECH2CLASS(mech_type);
432 	if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
433 		*error = CRYPTO_MECHANISM_INVALID;
434 		return (NULL);
435 	}
436 
437 	me_tab = &kcf_mech_tabs_tab[class];
438 	index = KCF_MECH2INDEX(mech_type);
439 	if ((index < 0) || (index >= me_tab->met_size)) {
440 		*error = CRYPTO_MECHANISM_INVALID;
441 		return (NULL);
442 	}
443 
444 	me = &((me_tab->met_tab)[index]);
445 	if (mepp != NULL)
446 		*mepp = me;
447 
448 	mutex_enter(&me->me_mutex);
449 
450 	prov_chain = me->me_hw_prov_chain;
451 
452 	/*
453 	 * We check for the threshhold for using a hardware provider for
454 	 * this amount of data. If there is no software provider available
455 	 * for the mechanism, then the threshold is ignored.
456 	 */
457 	if ((prov_chain != NULL) &&
458 	    ((data_size == 0) || (me->me_threshold == 0) ||
459 	    (data_size > me->me_threshold) ||
460 	    ((mdesc = me->me_sw_prov) == NULL) ||
461 	    (!IS_FG_SUPPORTED(mdesc, fg)) ||
462 	    (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
463 		ASSERT(me->me_num_hwprov > 0);
464 		/* there is at least one provider */
465 
466 		/*
467 		 * Find the least loaded provider. tq_nalloc gives
468 		 * the number of task entries in the task queue. We do
469 		 * not acquire tq_lock here as it is not critical to
470 		 * get the exact number and the lock contention may be
471 		 * too costly for this code path.
472 		 */
473 		while (prov_chain != NULL) {
474 			pd = prov_chain->pm_prov_desc;
475 
476 			if (!IS_FG_SUPPORTED(prov_chain, fg) ||
477 			    !KCF_IS_PROV_USABLE(pd) ||
478 			    IS_PROVIDER_TRIED(pd, triedl) ||
479 			    (call_restrict && pd->pd_restricted)) {
480 				prov_chain = prov_chain->pm_next;
481 				continue;
482 			}
483 
484 			if ((len = pd->pd_sched_info.ks_taskq->tq_nalloc)
485 			    < gqlen) {
486 				gqlen = len;
487 				gpd = pd;
488 			}
489 
490 			prov_chain = prov_chain->pm_next;
491 		}
492 
493 		pd = gpd;
494 	}
495 
496 	/* No HW provider for this mech, is there a SW provider? */
497 	if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
498 		pd = mdesc->pm_prov_desc;
499 		if (!IS_FG_SUPPORTED(mdesc, fg) ||
500 		    !KCF_IS_PROV_USABLE(pd) ||
501 		    IS_PROVIDER_TRIED(pd, triedl) ||
502 		    (call_restrict && pd->pd_restricted))
503 			pd = NULL;
504 	}
505 
506 	if (pd == NULL) {
507 		/*
508 		 * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when
509 		 * we are in the "fallback to the next provider" case. Rather
510 		 * we preserve the error, so that the client gets the right
511 		 * error code.
512 		 */
513 		if (triedl == NULL)
514 			*error = CRYPTO_MECH_NOT_SUPPORTED;
515 	} else
516 		KCF_PROV_REFHOLD(pd);
517 
518 	mutex_exit(&me->me_mutex);
519 	return (pd);
520 }
521 
522 /*
523  * Very similar to kcf_get_mech_provider(). Finds the best provider capable of
524  * a dual operation with both me1 and me2.
525  * When no dual-ops capable providers are available, return the best provider
526  * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID;
527  * We assume/expect that a slower HW capable of the dual is still
528  * faster than the 2 fastest providers capable of the individual ops
529  * separately.
530  */
531 kcf_provider_desc_t *
532 kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
533     kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1,
534     crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl,
535     crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict,
536     size_t data_size)
537 {
538 	kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL;
539 	kcf_prov_mech_desc_t *prov_chain, *mdesc;
540 	int len, gqlen = INT_MAX, dgqlen = INT_MAX;
541 	crypto_mech_info_list_t *mil;
542 	crypto_mech_type_t m2id =  mech2->cm_type;
543 	kcf_mech_entry_t *me;
544 
545 	/* when mech is a valid mechanism, me will be its mech_entry */
546 	if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) {
547 		*error = CRYPTO_MECHANISM_INVALID;
548 		return (NULL);
549 	}
550 
551 	*prov_mt2 = CRYPTO_MECH_INVALID;
552 
553 	if (mepp != NULL)
554 		*mepp = me;
555 	mutex_enter(&me->me_mutex);
556 
557 	prov_chain = me->me_hw_prov_chain;
558 	/*
559 	 * We check the threshold for using a hardware provider for
560 	 * this amount of data. If there is no software provider available
561 	 * for the first mechanism, then the threshold is ignored.
562 	 */
563 	if ((prov_chain != NULL) &&
564 	    ((data_size == 0) || (me->me_threshold == 0) ||
565 	    (data_size > me->me_threshold) ||
566 	    ((mdesc = me->me_sw_prov) == NULL) ||
567 	    (!IS_FG_SUPPORTED(mdesc, fg1)) ||
568 	    (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
569 		/* there is at least one provider */
570 		ASSERT(me->me_num_hwprov > 0);
571 
572 		/*
573 		 * Find the least loaded provider capable of the combo
574 		 * me1 + me2, and save a pointer to the least loaded
575 		 * provider capable of me1 only.
576 		 */
577 		while (prov_chain != NULL) {
578 			pd = prov_chain->pm_prov_desc;
579 			len = pd->pd_sched_info.ks_taskq->tq_nalloc;
580 
581 			if (!IS_FG_SUPPORTED(prov_chain, fg1) ||
582 			    !KCF_IS_PROV_USABLE(pd) ||
583 			    IS_PROVIDER_TRIED(pd, triedl) ||
584 			    (call_restrict && pd->pd_restricted)) {
585 				prov_chain = prov_chain->pm_next;
586 				continue;
587 			}
588 
589 			/* Save the best provider capable of m1 */
590 			if (len < gqlen) {
591 				*prov_mt1 =
592 				    prov_chain->pm_mech_info.cm_mech_number;
593 				gqlen = len;
594 				pdm1 = pd;
595 			}
596 
597 			/* See if pd can do me2 too */
598 			for (mil = prov_chain->pm_mi_list;
599 			    mil != NULL; mil = mil->ml_next) {
600 				if ((mil->ml_mech_info.cm_func_group_mask &
601 				    fg2) == 0)
602 					continue;
603 
604 				if ((mil->ml_kcf_mechid == m2id) &&
605 				    (len < dgqlen)) {
606 					/* Bingo! */
607 					dgqlen = len;
608 					pdm1m2 = pd;
609 					*prov_mt2 =
610 					    mil->ml_mech_info.cm_mech_number;
611 					*prov_mt1 = prov_chain->
612 					    pm_mech_info.cm_mech_number;
613 					break;
614 				}
615 			}
616 
617 			prov_chain = prov_chain->pm_next;
618 		}
619 
620 		pd =  (pdm1m2 != NULL) ? pdm1m2 : pdm1;
621 	}
622 
623 	/* no HW provider for this mech, is there a SW provider? */
624 	if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
625 		pd = mdesc->pm_prov_desc;
626 		if (!IS_FG_SUPPORTED(mdesc, fg1) ||
627 		    !KCF_IS_PROV_USABLE(pd) ||
628 		    IS_PROVIDER_TRIED(pd, triedl) ||
629 		    (call_restrict && pd->pd_restricted))
630 			pd = NULL;
631 		else {
632 			/* See if pd can do me2 too */
633 			for (mil = me->me_sw_prov->pm_mi_list;
634 			    mil != NULL; mil = mil->ml_next) {
635 				if ((mil->ml_mech_info.cm_func_group_mask &
636 				    fg2) == 0)
637 					continue;
638 
639 				if (mil->ml_kcf_mechid == m2id) {
640 					/* Bingo! */
641 					*prov_mt2 =
642 					    mil->ml_mech_info.cm_mech_number;
643 					break;
644 				}
645 			}
646 			*prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number;
647 		}
648 	}
649 
650 	if (pd == NULL)
651 		*error = CRYPTO_MECH_NOT_SUPPORTED;
652 	else
653 		KCF_PROV_REFHOLD(pd);
654 
655 	mutex_exit(&me->me_mutex);
656 	return (pd);
657 }
658 
659 /*
660  * Do the actual work of calling the provider routines.
661  *
662  * pd - Provider structure
663  * ctx - Context for this operation
664  * params - Parameters for this operation
665  * rhndl - Request handle to use for notification
666  *
667  * The return values are the same as that of the respective SPI.
668  */
669 int
670 common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
671     kcf_req_params_t *params, crypto_req_handle_t rhndl)
672 {
673 	int err = CRYPTO_ARGUMENTS_BAD;
674 	kcf_op_type_t optype;
675 
676 	optype = params->rp_optype;
677 
678 	switch (params->rp_opgrp) {
679 	case KCF_OG_DIGEST: {
680 		kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
681 
682 		switch (optype) {
683 		case KCF_OP_INIT:
684 			/*
685 			 * We should do this only here and not in KCF_WRAP_*
686 			 * macros. This is because we may want to try other
687 			 * providers, in case we recover from a failure.
688 			 */
689 			KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
690 			    pd, &dops->do_mech);
691 
692 			err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech,
693 			    rhndl);
694 			break;
695 
696 		case KCF_OP_SINGLE:
697 			err = KCF_PROV_DIGEST(pd, ctx, dops->do_data,
698 			    dops->do_digest, rhndl);
699 			break;
700 
701 		case KCF_OP_UPDATE:
702 			err = KCF_PROV_DIGEST_UPDATE(pd, ctx,
703 			    dops->do_data, rhndl);
704 			break;
705 
706 		case KCF_OP_FINAL:
707 			err = KCF_PROV_DIGEST_FINAL(pd, ctx,
708 			    dops->do_digest, rhndl);
709 			break;
710 
711 		case KCF_OP_ATOMIC:
712 			ASSERT(ctx == NULL);
713 			KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
714 			    pd, &dops->do_mech);
715 			err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid,
716 			    &dops->do_mech, dops->do_data, dops->do_digest,
717 			    rhndl);
718 			break;
719 
720 		case KCF_OP_DIGEST_KEY:
721 			err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key,
722 			    rhndl);
723 			break;
724 
725 		default:
726 			break;
727 		}
728 		break;
729 	}
730 
731 	case KCF_OG_MAC: {
732 		kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
733 
734 		switch (optype) {
735 		case KCF_OP_INIT:
736 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
737 			    pd, &mops->mo_mech);
738 
739 			err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech,
740 			    mops->mo_key, mops->mo_templ, rhndl);
741 			break;
742 
743 		case KCF_OP_SINGLE:
744 			err = KCF_PROV_MAC(pd, ctx, mops->mo_data,
745 			    mops->mo_mac, rhndl);
746 			break;
747 
748 		case KCF_OP_UPDATE:
749 			err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data,
750 			    rhndl);
751 			break;
752 
753 		case KCF_OP_FINAL:
754 			err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl);
755 			break;
756 
757 		case KCF_OP_ATOMIC:
758 			ASSERT(ctx == NULL);
759 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
760 			    pd, &mops->mo_mech);
761 
762 			err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid,
763 			    &mops->mo_mech, mops->mo_key, mops->mo_data,
764 			    mops->mo_mac, mops->mo_templ, rhndl);
765 			break;
766 
767 		case KCF_OP_MAC_VERIFY_ATOMIC:
768 			ASSERT(ctx == NULL);
769 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
770 			    pd, &mops->mo_mech);
771 
772 			err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid,
773 			    &mops->mo_mech, mops->mo_key, mops->mo_data,
774 			    mops->mo_mac, mops->mo_templ, rhndl);
775 			break;
776 
777 		default:
778 			break;
779 		}
780 		break;
781 	}
782 
783 	case KCF_OG_ENCRYPT: {
784 		kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
785 
786 		switch (optype) {
787 		case KCF_OP_INIT:
788 			KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
789 			    pd, &eops->eo_mech);
790 
791 			err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech,
792 			    eops->eo_key, eops->eo_templ, rhndl);
793 			break;
794 
795 		case KCF_OP_SINGLE:
796 			err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext,
797 			    eops->eo_ciphertext, rhndl);
798 			break;
799 
800 		case KCF_OP_UPDATE:
801 			err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx,
802 			    eops->eo_plaintext, eops->eo_ciphertext, rhndl);
803 			break;
804 
805 		case KCF_OP_FINAL:
806 			err = KCF_PROV_ENCRYPT_FINAL(pd, ctx,
807 			    eops->eo_ciphertext, rhndl);
808 			break;
809 
810 		case KCF_OP_ATOMIC:
811 			ASSERT(ctx == NULL);
812 			KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
813 			    pd, &eops->eo_mech);
814 
815 			err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid,
816 			    &eops->eo_mech, eops->eo_key, eops->eo_plaintext,
817 			    eops->eo_ciphertext, eops->eo_templ, rhndl);
818 			break;
819 
820 		default:
821 			break;
822 		}
823 		break;
824 	}
825 
826 	case KCF_OG_DECRYPT: {
827 		kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
828 
829 		switch (optype) {
830 		case KCF_OP_INIT:
831 			KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
832 			    pd, &dcrops->dop_mech);
833 
834 			err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech,
835 			    dcrops->dop_key, dcrops->dop_templ, rhndl);
836 			break;
837 
838 		case KCF_OP_SINGLE:
839 			err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext,
840 			    dcrops->dop_plaintext, rhndl);
841 			break;
842 
843 		case KCF_OP_UPDATE:
844 			err = KCF_PROV_DECRYPT_UPDATE(pd, ctx,
845 			    dcrops->dop_ciphertext, dcrops->dop_plaintext,
846 			    rhndl);
847 			break;
848 
849 		case KCF_OP_FINAL:
850 			err = KCF_PROV_DECRYPT_FINAL(pd, ctx,
851 			    dcrops->dop_plaintext, rhndl);
852 			break;
853 
854 		case KCF_OP_ATOMIC:
855 			ASSERT(ctx == NULL);
856 			KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
857 			    pd, &dcrops->dop_mech);
858 
859 			err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid,
860 			    &dcrops->dop_mech, dcrops->dop_key,
861 			    dcrops->dop_ciphertext, dcrops->dop_plaintext,
862 			    dcrops->dop_templ, rhndl);
863 			break;
864 
865 		default:
866 			break;
867 		}
868 		break;
869 	}
870 
871 	case KCF_OG_SIGN: {
872 		kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
873 
874 		switch (optype) {
875 		case KCF_OP_INIT:
876 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
877 			    pd, &sops->so_mech);
878 
879 			err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech,
880 			    sops->so_key, sops->so_templ, rhndl);
881 			break;
882 
883 		case KCF_OP_SIGN_RECOVER_INIT:
884 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
885 			    pd, &sops->so_mech);
886 
887 			err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx,
888 			    &sops->so_mech, sops->so_key, sops->so_templ,
889 			    rhndl);
890 			break;
891 
892 		case KCF_OP_SINGLE:
893 			err = KCF_PROV_SIGN(pd, ctx, sops->so_data,
894 			    sops->so_signature, rhndl);
895 			break;
896 
897 		case KCF_OP_SIGN_RECOVER:
898 			err = KCF_PROV_SIGN_RECOVER(pd, ctx,
899 			    sops->so_data, sops->so_signature, rhndl);
900 			break;
901 
902 		case KCF_OP_UPDATE:
903 			err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data,
904 			    rhndl);
905 			break;
906 
907 		case KCF_OP_FINAL:
908 			err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature,
909 			    rhndl);
910 			break;
911 
912 		case KCF_OP_ATOMIC:
913 			ASSERT(ctx == NULL);
914 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
915 			    pd, &sops->so_mech);
916 
917 			err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid,
918 			    &sops->so_mech, sops->so_key, sops->so_data,
919 			    sops->so_templ, sops->so_signature, rhndl);
920 			break;
921 
922 		case KCF_OP_SIGN_RECOVER_ATOMIC:
923 			ASSERT(ctx == NULL);
924 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
925 			    pd, &sops->so_mech);
926 
927 			err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid,
928 			    &sops->so_mech, sops->so_key, sops->so_data,
929 			    sops->so_templ, sops->so_signature, rhndl);
930 			break;
931 
932 		default:
933 			break;
934 		}
935 		break;
936 	}
937 
938 	case KCF_OG_VERIFY: {
939 		kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
940 
941 		switch (optype) {
942 		case KCF_OP_INIT:
943 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
944 			    pd, &vops->vo_mech);
945 
946 			err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech,
947 			    vops->vo_key, vops->vo_templ, rhndl);
948 			break;
949 
950 		case KCF_OP_VERIFY_RECOVER_INIT:
951 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
952 			    pd, &vops->vo_mech);
953 
954 			err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx,
955 			    &vops->vo_mech, vops->vo_key, vops->vo_templ,
956 			    rhndl);
957 			break;
958 
959 		case KCF_OP_SINGLE:
960 			err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data,
961 			    vops->vo_signature, rhndl);
962 			break;
963 
964 		case KCF_OP_VERIFY_RECOVER:
965 			err = KCF_PROV_VERIFY_RECOVER(pd, ctx,
966 			    vops->vo_signature, vops->vo_data, rhndl);
967 			break;
968 
969 		case KCF_OP_UPDATE:
970 			err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data,
971 			    rhndl);
972 			break;
973 
974 		case KCF_OP_FINAL:
975 			err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature,
976 			    rhndl);
977 			break;
978 
979 		case KCF_OP_ATOMIC:
980 			ASSERT(ctx == NULL);
981 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
982 			    pd, &vops->vo_mech);
983 
984 			err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid,
985 			    &vops->vo_mech, vops->vo_key, vops->vo_data,
986 			    vops->vo_templ, vops->vo_signature, rhndl);
987 			break;
988 
989 		case KCF_OP_VERIFY_RECOVER_ATOMIC:
990 			ASSERT(ctx == NULL);
991 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
992 			    pd, &vops->vo_mech);
993 
994 			err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid,
995 			    &vops->vo_mech, vops->vo_key, vops->vo_signature,
996 			    vops->vo_templ, vops->vo_data, rhndl);
997 			break;
998 
999 		default:
1000 			break;
1001 		}
1002 		break;
1003 	}
1004 
1005 	case KCF_OG_ENCRYPT_MAC: {
1006 		kcf_encrypt_mac_ops_params_t *eops =
1007 		    &params->rp_u.encrypt_mac_params;
1008 		kcf_context_t *kcf_secondctx;
1009 
1010 		switch (optype) {
1011 		case KCF_OP_INIT:
1012 			kcf_secondctx = ((kcf_context_t *)
1013 			    (ctx->cc_framework_private))->kc_secondctx;
1014 
1015 			if (kcf_secondctx != NULL) {
1016 				err = kcf_emulate_dual(pd, ctx, params);
1017 				break;
1018 			}
1019 			KCF_SET_PROVIDER_MECHNUM(
1020 			    eops->em_framework_encr_mechtype,
1021 			    pd, &eops->em_encr_mech);
1022 
1023 			KCF_SET_PROVIDER_MECHNUM(
1024 			    eops->em_framework_mac_mechtype,
1025 			    pd, &eops->em_mac_mech);
1026 
1027 			err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx,
1028 			    &eops->em_encr_mech, eops->em_encr_key,
1029 			    &eops->em_mac_mech, eops->em_mac_key,
1030 			    eops->em_encr_templ, eops->em_mac_templ,
1031 			    rhndl);
1032 
1033 			break;
1034 
1035 		case KCF_OP_SINGLE:
1036 			err = KCF_PROV_ENCRYPT_MAC(pd, ctx,
1037 			    eops->em_plaintext, eops->em_ciphertext,
1038 			    eops->em_mac, rhndl);
1039 			break;
1040 
1041 		case KCF_OP_UPDATE:
1042 			kcf_secondctx = ((kcf_context_t *)
1043 			    (ctx->cc_framework_private))->kc_secondctx;
1044 			if (kcf_secondctx != NULL) {
1045 				err = kcf_emulate_dual(pd, ctx, params);
1046 				break;
1047 			}
1048 			err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx,
1049 			    eops->em_plaintext, eops->em_ciphertext, rhndl);
1050 			break;
1051 
1052 		case KCF_OP_FINAL:
1053 			kcf_secondctx = ((kcf_context_t *)
1054 			    (ctx->cc_framework_private))->kc_secondctx;
1055 			if (kcf_secondctx != NULL) {
1056 				err = kcf_emulate_dual(pd, ctx, params);
1057 				break;
1058 			}
1059 			err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx,
1060 			    eops->em_ciphertext, eops->em_mac, rhndl);
1061 			break;
1062 
1063 		case KCF_OP_ATOMIC:
1064 			ASSERT(ctx == NULL);
1065 
1066 			KCF_SET_PROVIDER_MECHNUM(
1067 			    eops->em_framework_encr_mechtype,
1068 			    pd, &eops->em_encr_mech);
1069 
1070 			KCF_SET_PROVIDER_MECHNUM(
1071 			    eops->em_framework_mac_mechtype,
1072 			    pd, &eops->em_mac_mech);
1073 
1074 			err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid,
1075 			    &eops->em_encr_mech, eops->em_encr_key,
1076 			    &eops->em_mac_mech, eops->em_mac_key,
1077 			    eops->em_plaintext, eops->em_ciphertext,
1078 			    eops->em_mac,
1079 			    eops->em_encr_templ, eops->em_mac_templ,
1080 			    rhndl);
1081 
1082 			break;
1083 
1084 		default:
1085 			break;
1086 		}
1087 		break;
1088 	}
1089 
1090 	case KCF_OG_MAC_DECRYPT: {
1091 		kcf_mac_decrypt_ops_params_t *dops =
1092 		    &params->rp_u.mac_decrypt_params;
1093 		kcf_context_t *kcf_secondctx;
1094 
1095 		switch (optype) {
1096 		case KCF_OP_INIT:
1097 			kcf_secondctx = ((kcf_context_t *)
1098 			    (ctx->cc_framework_private))->kc_secondctx;
1099 
1100 			if (kcf_secondctx != NULL) {
1101 				err = kcf_emulate_dual(pd, ctx, params);
1102 				break;
1103 			}
1104 			KCF_SET_PROVIDER_MECHNUM(
1105 			    dops->md_framework_mac_mechtype,
1106 			    pd, &dops->md_mac_mech);
1107 
1108 			KCF_SET_PROVIDER_MECHNUM(
1109 			    dops->md_framework_decr_mechtype,
1110 			    pd, &dops->md_decr_mech);
1111 
1112 			err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx,
1113 			    &dops->md_mac_mech, dops->md_mac_key,
1114 			    &dops->md_decr_mech, dops->md_decr_key,
1115 			    dops->md_mac_templ, dops->md_decr_templ,
1116 			    rhndl);
1117 
1118 			break;
1119 
1120 		case KCF_OP_SINGLE:
1121 			err = KCF_PROV_MAC_DECRYPT(pd, ctx,
1122 			    dops->md_ciphertext, dops->md_mac,
1123 			    dops->md_plaintext, rhndl);
1124 			break;
1125 
1126 		case KCF_OP_UPDATE:
1127 			kcf_secondctx = ((kcf_context_t *)
1128 			    (ctx->cc_framework_private))->kc_secondctx;
1129 			if (kcf_secondctx != NULL) {
1130 				err = kcf_emulate_dual(pd, ctx, params);
1131 				break;
1132 			}
1133 			err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx,
1134 			    dops->md_ciphertext, dops->md_plaintext, rhndl);
1135 			break;
1136 
1137 		case KCF_OP_FINAL:
1138 			kcf_secondctx = ((kcf_context_t *)
1139 			    (ctx->cc_framework_private))->kc_secondctx;
1140 			if (kcf_secondctx != NULL) {
1141 				err = kcf_emulate_dual(pd, ctx, params);
1142 				break;
1143 			}
1144 			err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx,
1145 			    dops->md_mac, dops->md_plaintext, rhndl);
1146 			break;
1147 
1148 		case KCF_OP_ATOMIC:
1149 			ASSERT(ctx == NULL);
1150 
1151 			KCF_SET_PROVIDER_MECHNUM(
1152 			    dops->md_framework_mac_mechtype,
1153 			    pd, &dops->md_mac_mech);
1154 
1155 			KCF_SET_PROVIDER_MECHNUM(
1156 			    dops->md_framework_decr_mechtype,
1157 			    pd, &dops->md_decr_mech);
1158 
1159 			err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid,
1160 			    &dops->md_mac_mech, dops->md_mac_key,
1161 			    &dops->md_decr_mech, dops->md_decr_key,
1162 			    dops->md_ciphertext, dops->md_mac,
1163 			    dops->md_plaintext,
1164 			    dops->md_mac_templ, dops->md_decr_templ,
1165 			    rhndl);
1166 
1167 			break;
1168 
1169 		case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC:
1170 			ASSERT(ctx == NULL);
1171 
1172 			KCF_SET_PROVIDER_MECHNUM(
1173 			    dops->md_framework_mac_mechtype,
1174 			    pd, &dops->md_mac_mech);
1175 
1176 			KCF_SET_PROVIDER_MECHNUM(
1177 			    dops->md_framework_decr_mechtype,
1178 			    pd, &dops->md_decr_mech);
1179 
1180 			err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd,
1181 			    dops->md_sid, &dops->md_mac_mech, dops->md_mac_key,
1182 			    &dops->md_decr_mech, dops->md_decr_key,
1183 			    dops->md_ciphertext, dops->md_mac,
1184 			    dops->md_plaintext,
1185 			    dops->md_mac_templ, dops->md_decr_templ,
1186 			    rhndl);
1187 
1188 			break;
1189 
1190 		default:
1191 			break;
1192 		}
1193 		break;
1194 	}
1195 
1196 	case KCF_OG_KEY: {
1197 		kcf_key_ops_params_t *kops = &params->rp_u.key_params;
1198 
1199 		ASSERT(ctx == NULL);
1200 		KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd,
1201 		    &kops->ko_mech);
1202 
1203 		switch (optype) {
1204 		case KCF_OP_KEY_GENERATE:
1205 			err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid,
1206 			    &kops->ko_mech,
1207 			    kops->ko_key_template, kops->ko_key_attribute_count,
1208 			    kops->ko_key_object_id_ptr, rhndl);
1209 			break;
1210 
1211 		case KCF_OP_KEY_GENERATE_PAIR:
1212 			err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid,
1213 			    &kops->ko_mech,
1214 			    kops->ko_key_template, kops->ko_key_attribute_count,
1215 			    kops->ko_private_key_template,
1216 			    kops->ko_private_key_attribute_count,
1217 			    kops->ko_key_object_id_ptr,
1218 			    kops->ko_private_key_object_id_ptr, rhndl);
1219 			break;
1220 
1221 		case KCF_OP_KEY_WRAP:
1222 			err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid,
1223 			    &kops->ko_mech,
1224 			    kops->ko_key, kops->ko_key_object_id_ptr,
1225 			    kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr,
1226 			    rhndl);
1227 			break;
1228 
1229 		case KCF_OP_KEY_UNWRAP:
1230 			err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid,
1231 			    &kops->ko_mech,
1232 			    kops->ko_key, kops->ko_wrapped_key,
1233 			    kops->ko_wrapped_key_len_ptr,
1234 			    kops->ko_key_template, kops->ko_key_attribute_count,
1235 			    kops->ko_key_object_id_ptr, rhndl);
1236 			break;
1237 
1238 		case KCF_OP_KEY_DERIVE:
1239 			err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid,
1240 			    &kops->ko_mech,
1241 			    kops->ko_key, kops->ko_key_template,
1242 			    kops->ko_key_attribute_count,
1243 			    kops->ko_key_object_id_ptr, rhndl);
1244 			break;
1245 
1246 		default:
1247 			break;
1248 		}
1249 		break;
1250 	}
1251 
1252 	case KCF_OG_RANDOM: {
1253 		kcf_random_number_ops_params_t *rops =
1254 		    &params->rp_u.random_number_params;
1255 
1256 		ASSERT(ctx == NULL);
1257 
1258 		switch (optype) {
1259 		case KCF_OP_RANDOM_SEED:
1260 			err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid,
1261 			    rops->rn_buf, rops->rn_buflen, rhndl);
1262 			break;
1263 
1264 		case KCF_OP_RANDOM_GENERATE:
1265 			err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid,
1266 			    rops->rn_buf, rops->rn_buflen, rhndl);
1267 			break;
1268 
1269 		default:
1270 			break;
1271 		}
1272 		break;
1273 	}
1274 
1275 	case KCF_OG_SESSION: {
1276 		kcf_session_ops_params_t *sops = &params->rp_u.session_params;
1277 
1278 		ASSERT(ctx == NULL);
1279 		switch (optype) {
1280 		case KCF_OP_SESSION_OPEN:
1281 			/*
1282 			 * so_pd may be a logical provider, in which case
1283 			 * we need to check whether it has been removed.
1284 			 */
1285 			if (KCF_IS_PROV_REMOVED(sops->so_pd)) {
1286 				err = CRYPTO_DEVICE_ERROR;
1287 				break;
1288 			}
1289 			err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr,
1290 			    rhndl, sops->so_pd);
1291 			break;
1292 
1293 		case KCF_OP_SESSION_CLOSE:
1294 			/*
1295 			 * so_pd may be a logical provider, in which case
1296 			 * we need to check whether it has been removed.
1297 			 */
1298 			if (KCF_IS_PROV_REMOVED(sops->so_pd)) {
1299 				err = CRYPTO_DEVICE_ERROR;
1300 				break;
1301 			}
1302 			err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid,
1303 			    rhndl, sops->so_pd);
1304 			break;
1305 
1306 		case KCF_OP_SESSION_LOGIN:
1307 			err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid,
1308 			    sops->so_user_type, sops->so_pin,
1309 			    sops->so_pin_len, rhndl);
1310 			break;
1311 
1312 		case KCF_OP_SESSION_LOGOUT:
1313 			err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl);
1314 			break;
1315 
1316 		default:
1317 			break;
1318 		}
1319 		break;
1320 	}
1321 
1322 	case KCF_OG_OBJECT: {
1323 		kcf_object_ops_params_t *jops = &params->rp_u.object_params;
1324 
1325 		ASSERT(ctx == NULL);
1326 		switch (optype) {
1327 		case KCF_OP_OBJECT_CREATE:
1328 			err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid,
1329 			    jops->oo_template, jops->oo_attribute_count,
1330 			    jops->oo_object_id_ptr, rhndl);
1331 			break;
1332 
1333 		case KCF_OP_OBJECT_COPY:
1334 			err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid,
1335 			    jops->oo_object_id,
1336 			    jops->oo_template, jops->oo_attribute_count,
1337 			    jops->oo_object_id_ptr, rhndl);
1338 			break;
1339 
1340 		case KCF_OP_OBJECT_DESTROY:
1341 			err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid,
1342 			    jops->oo_object_id, rhndl);
1343 			break;
1344 
1345 		case KCF_OP_OBJECT_GET_SIZE:
1346 			err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid,
1347 			    jops->oo_object_id, jops->oo_object_size, rhndl);
1348 			break;
1349 
1350 		case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE:
1351 			err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd,
1352 			    jops->oo_sid, jops->oo_object_id,
1353 			    jops->oo_template, jops->oo_attribute_count, rhndl);
1354 			break;
1355 
1356 		case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE:
1357 			err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd,
1358 			    jops->oo_sid, jops->oo_object_id,
1359 			    jops->oo_template, jops->oo_attribute_count, rhndl);
1360 			break;
1361 
1362 		case KCF_OP_OBJECT_FIND_INIT:
1363 			err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid,
1364 			    jops->oo_template, jops->oo_attribute_count,
1365 			    jops->oo_find_init_pp_ptr, rhndl);
1366 			break;
1367 
1368 		case KCF_OP_OBJECT_FIND:
1369 			err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp,
1370 			    jops->oo_object_id_ptr, jops->oo_max_object_count,
1371 			    jops->oo_object_count_ptr, rhndl);
1372 			break;
1373 
1374 		case KCF_OP_OBJECT_FIND_FINAL:
1375 			err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp,
1376 			    rhndl);
1377 			break;
1378 
1379 		default:
1380 			break;
1381 		}
1382 		break;
1383 	}
1384 
1385 	case KCF_OG_PROVMGMT: {
1386 		kcf_provmgmt_ops_params_t *pops = &params->rp_u.provmgmt_params;
1387 
1388 		ASSERT(ctx == NULL);
1389 		switch (optype) {
1390 		case KCF_OP_MGMT_EXTINFO:
1391 			/*
1392 			 * po_pd may be a logical provider, in which case
1393 			 * we need to check whether it has been removed.
1394 			 */
1395 			if (KCF_IS_PROV_REMOVED(pops->po_pd)) {
1396 				err = CRYPTO_DEVICE_ERROR;
1397 				break;
1398 			}
1399 			err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl,
1400 			    pops->po_pd);
1401 			break;
1402 
1403 		case KCF_OP_MGMT_INITTOKEN:
1404 			err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin,
1405 			    pops->po_pin_len, pops->po_label, rhndl);
1406 			break;
1407 
1408 		case KCF_OP_MGMT_INITPIN:
1409 			err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin,
1410 			    pops->po_pin_len, rhndl);
1411 			break;
1412 
1413 		case KCF_OP_MGMT_SETPIN:
1414 			err = KCF_PROV_SET_PIN(pd, pops->po_sid,
1415 			    pops->po_old_pin, pops->po_old_pin_len,
1416 			    pops->po_pin, pops->po_pin_len, rhndl);
1417 			break;
1418 
1419 		default:
1420 			break;
1421 		}
1422 		break;
1423 	}
1424 
1425 	default:
1426 		break;
1427 	}		/* end of switch(params->rp_opgrp) */
1428 
1429 	KCF_PROV_INCRSTATS(pd, err);
1430 	return (err);
1431 }
1432 
1433 /*
1434  * Emulate the call for a multipart dual ops with 2 single steps.
1435  * This routine is always called in the context of a working thread
1436  * running kcf_svc_do_run().
1437  * The single steps are submitted in a pure synchronous way (blocking).
1438  * When this routine returns, kcf_svc_do_run() will call kcf_aop_done()
1439  * so the originating consumer's callback gets invoked. kcf_aop_done()
1440  * takes care of freeing the operation context. So, this routine does
1441  * not free the operation context.
1442  *
1443  * The provider descriptor is assumed held by the callers.
1444  */
1445 static int
1446 kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
1447     kcf_req_params_t *params)
1448 {
1449 	int err = CRYPTO_ARGUMENTS_BAD;
1450 	kcf_op_type_t optype;
1451 	size_t save_len;
1452 	off_t save_offset;
1453 
1454 	optype = params->rp_optype;
1455 
1456 	switch (params->rp_opgrp) {
1457 	case KCF_OG_ENCRYPT_MAC: {
1458 		kcf_encrypt_mac_ops_params_t *cmops =
1459 		    &params->rp_u.encrypt_mac_params;
1460 		kcf_context_t *encr_kcf_ctx;
1461 		crypto_ctx_t *mac_ctx;
1462 		kcf_req_params_t encr_params;
1463 
1464 		encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
1465 
1466 		switch (optype) {
1467 		case KCF_OP_INIT: {
1468 			encr_kcf_ctx->kc_secondctx = NULL;
1469 
1470 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT,
1471 			    pd->pd_sid, &cmops->em_encr_mech,
1472 			    cmops->em_encr_key, NULL, NULL,
1473 			    cmops->em_encr_templ);
1474 
1475 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1476 			    B_FALSE);
1477 
1478 			/* It can't be CRYPTO_QUEUED */
1479 			if (err != CRYPTO_SUCCESS) {
1480 				break;
1481 			}
1482 
1483 			err = crypto_mac_init(&cmops->em_mac_mech,
1484 			    cmops->em_mac_key, cmops->em_mac_templ,
1485 			    (crypto_context_t *)&mac_ctx, NULL);
1486 
1487 			if (err == CRYPTO_SUCCESS) {
1488 				encr_kcf_ctx->kc_secondctx = (kcf_context_t *)
1489 				    mac_ctx->cc_framework_private;
1490 				KCF_CONTEXT_REFHOLD((kcf_context_t *)
1491 				    mac_ctx->cc_framework_private);
1492 			}
1493 
1494 			break;
1495 
1496 		}
1497 		case KCF_OP_UPDATE: {
1498 			crypto_dual_data_t *ct = cmops->em_ciphertext;
1499 			crypto_data_t *pt = cmops->em_plaintext;
1500 			kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
1501 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1502 
1503 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE,
1504 			    pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct,
1505 			    NULL);
1506 
1507 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1508 			    B_FALSE);
1509 
1510 			/* It can't be CRYPTO_QUEUED */
1511 			if (err != CRYPTO_SUCCESS) {
1512 				break;
1513 			}
1514 
1515 			save_offset = ct->dd_offset1;
1516 			save_len = ct->dd_len1;
1517 			if (ct->dd_len2 == 0) {
1518 				/*
1519 				 * The previous encrypt step was an
1520 				 * accumulation only and didn't produce any
1521 				 * partial output
1522 				 */
1523 				if (ct->dd_len1 == 0)
1524 					break;
1525 
1526 			} else {
1527 				ct->dd_offset1 = ct->dd_offset2;
1528 				ct->dd_len1 = ct->dd_len2;
1529 			}
1530 			err = crypto_mac_update((crypto_context_t)mac_ctx,
1531 			    (crypto_data_t *)ct, NULL);
1532 
1533 			ct->dd_offset1 = save_offset;
1534 			ct->dd_len1 = save_len;
1535 
1536 			break;
1537 		}
1538 		case KCF_OP_FINAL: {
1539 			crypto_dual_data_t *ct = cmops->em_ciphertext;
1540 			crypto_data_t *mac = cmops->em_mac;
1541 			kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
1542 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1543 			crypto_context_t mac_context = mac_ctx;
1544 
1545 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL,
1546 			    pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct,
1547 			    NULL);
1548 
1549 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1550 			    B_FALSE);
1551 
1552 			/* It can't be CRYPTO_QUEUED */
1553 			if (err != CRYPTO_SUCCESS) {
1554 				crypto_cancel_ctx(mac_context);
1555 				break;
1556 			}
1557 
1558 			if (ct->dd_len2 > 0) {
1559 				save_offset = ct->dd_offset1;
1560 				save_len = ct->dd_len1;
1561 				ct->dd_offset1 = ct->dd_offset2;
1562 				ct->dd_len1 = ct->dd_len2;
1563 
1564 				err = crypto_mac_update(mac_context,
1565 				    (crypto_data_t *)ct, NULL);
1566 
1567 				ct->dd_offset1 = save_offset;
1568 				ct->dd_len1 = save_len;
1569 
1570 				if (err != CRYPTO_SUCCESS)  {
1571 					crypto_cancel_ctx(mac_context);
1572 					return (err);
1573 				}
1574 			}
1575 
1576 			/* and finally, collect the MAC */
1577 			err = crypto_mac_final(mac_context, mac, NULL);
1578 			break;
1579 		}
1580 
1581 		default:
1582 			break;
1583 		}
1584 		KCF_PROV_INCRSTATS(pd, err);
1585 		break;
1586 	}
1587 	case KCF_OG_MAC_DECRYPT: {
1588 		kcf_mac_decrypt_ops_params_t *mdops =
1589 		    &params->rp_u.mac_decrypt_params;
1590 		kcf_context_t *decr_kcf_ctx;
1591 		crypto_ctx_t *mac_ctx;
1592 		kcf_req_params_t decr_params;
1593 
1594 		decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
1595 
1596 		switch (optype) {
1597 		case KCF_OP_INIT: {
1598 			decr_kcf_ctx->kc_secondctx = NULL;
1599 
1600 			err = crypto_mac_init(&mdops->md_mac_mech,
1601 			    mdops->md_mac_key, mdops->md_mac_templ,
1602 			    (crypto_context_t *)&mac_ctx, NULL);
1603 
1604 			/* It can't be CRYPTO_QUEUED */
1605 			if (err != CRYPTO_SUCCESS) {
1606 				break;
1607 			}
1608 
1609 			KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT,
1610 			    pd->pd_sid, &mdops->md_decr_mech,
1611 			    mdops->md_decr_key, NULL, NULL,
1612 			    mdops->md_decr_templ);
1613 
1614 			err = kcf_submit_request(pd, ctx, NULL, &decr_params,
1615 			    B_FALSE);
1616 
1617 			/* It can't be CRYPTO_QUEUED */
1618 			if (err != CRYPTO_SUCCESS) {
1619 				crypto_cancel_ctx((crypto_context_t)mac_ctx);
1620 				break;
1621 			}
1622 
1623 			decr_kcf_ctx->kc_secondctx = (kcf_context_t *)
1624 			    mac_ctx->cc_framework_private;
1625 			KCF_CONTEXT_REFHOLD((kcf_context_t *)
1626 			    mac_ctx->cc_framework_private);
1627 
1628 			break;
1629 
1630 		}
1631 		case KCF_OP_UPDATE: {
1632 			crypto_dual_data_t *ct = mdops->md_ciphertext;
1633 			crypto_data_t *pt = mdops->md_plaintext;
1634 			kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
1635 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1636 
1637 			err = crypto_mac_update((crypto_context_t)mac_ctx,
1638 			    (crypto_data_t *)ct, NULL);
1639 
1640 			if (err != CRYPTO_SUCCESS)
1641 				break;
1642 
1643 			save_offset = ct->dd_offset1;
1644 			save_len = ct->dd_len1;
1645 
1646 			/* zero ct->dd_len2 means decrypt everything */
1647 			if (ct->dd_len2 > 0) {
1648 				ct->dd_offset1 = ct->dd_offset2;
1649 				ct->dd_len1 = ct->dd_len2;
1650 			}
1651 
1652 			err = crypto_decrypt_update((crypto_context_t)ctx,
1653 			    (crypto_data_t *)ct, pt, NULL);
1654 
1655 			ct->dd_offset1 = save_offset;
1656 			ct->dd_len1 = save_len;
1657 
1658 			break;
1659 		}
1660 		case KCF_OP_FINAL: {
1661 			crypto_data_t *pt = mdops->md_plaintext;
1662 			crypto_data_t *mac = mdops->md_mac;
1663 			kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
1664 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1665 
1666 			err = crypto_mac_final((crypto_context_t)mac_ctx,
1667 			    mac, NULL);
1668 
1669 			if (err != CRYPTO_SUCCESS) {
1670 				crypto_cancel_ctx(ctx);
1671 				break;
1672 			}
1673 
1674 			/* Get the last chunk of plaintext */
1675 			KCF_CONTEXT_REFHOLD(decr_kcf_ctx);
1676 			err = crypto_decrypt_final((crypto_context_t)ctx, pt,
1677 			    NULL);
1678 
1679 			break;
1680 		}
1681 		}
1682 		break;
1683 	}
1684 	default:
1685 
1686 		break;
1687 	}		/* end of switch(params->rp_opgrp) */
1688 
1689 	return (err);
1690 }
1691