1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * This file contains routines which call into a provider's
31  * entry points and do other related work.
32  */
33 
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/taskq_impl.h>
37 #include <sys/cmn_err.h>
38 
39 #include <sys/crypto/common.h>
40 #include <sys/crypto/impl.h>
41 #include <sys/crypto/sched_impl.h>
42 
43 /*
44  * Return B_TRUE if the specified entry point is NULL. We rely on the
45  * caller to provide, with offset_1 and offset_2, information to calculate
46  * the location of the entry point. The ops argument is a temporary local
47  * variable defined as caddr_t *.
48  */
49 #define	KCF_PROV_NULL_ENTRY_POINT(pd, o1, o2, ops)			\
50 	(ops = (caddr_t *)((caddr_t)(pd)->pd_ops_vector + (o1)),	\
51 	(*ops == NULL || *(caddr_t *)((caddr_t)(*ops) + (o2)) == NULL))
52 
53 
54 static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *,
55     kcf_req_params_t *);
56 void
57 kcf_free_triedlist(kcf_prov_tried_t *list)
58 {
59 	kcf_prov_tried_t *l;
60 
61 	while ((l = list) != NULL) {
62 		list = list->pt_next;
63 		KCF_PROV_REFRELE(l->pt_pd);
64 		kmem_free(l, sizeof (kcf_prov_tried_t));
65 	}
66 }
67 
68 kcf_prov_tried_t *
69 kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd,
70     int kmflag)
71 {
72 	kcf_prov_tried_t *l;
73 
74 	l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag);
75 	if (l == NULL)
76 		return (NULL);
77 
78 	l->pt_pd = pd;
79 	l->pt_next = *list;
80 	*list = l;
81 
82 	return (l);
83 }
84 
85 static boolean_t
86 is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl)
87 {
88 	while (triedl != NULL) {
89 		if (triedl->pt_pd == pd)
90 			return (B_TRUE);
91 		triedl = triedl->pt_next;
92 	};
93 
94 	return (B_FALSE);
95 }
96 
97 /*
98  * Search a mech entry's hardware provider list for the specified
99  * provider. Return true if found.
100  */
101 static boolean_t
102 is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me)
103 {
104 	kcf_prov_mech_desc_t *prov_chain;
105 
106 	prov_chain = me->me_hw_prov_chain;
107 	if (prov_chain != NULL) {
108 		ASSERT(me->me_num_hwprov > 0);
109 		for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) {
110 			if (prov_chain->pm_prov_desc == pd) {
111 				return (B_TRUE);
112 			}
113 		}
114 	}
115 	return (B_FALSE);
116 }
117 
118 /*
119  * This routine, given a logical provider, returns the least loaded
120  * provider belonging to the logical provider. The provider must be
121  * able to do the specified mechanism, i.e. check that the mechanism
122  * hasn't been disabled. In addition, just in case providers are not
123  * entirely equivalent, the provider's entry point is checked for
124  * non-nullness. This is accomplished by having the caller pass, as
125  * arguments, the offset of the function group (offset_1), and the
126  * offset of the function within the function group (offset_2).
127  * Returns NULL if no provider can be found.
128  */
129 int
130 kcf_get_hardware_provider(crypto_mech_type_t mech_type_1,
131     crypto_mech_type_t mech_type_2, offset_t offset_1, offset_t offset_2,
132     boolean_t call_restrict, kcf_provider_desc_t *old,
133     kcf_provider_desc_t **new)
134 {
135 	kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
136 	kcf_provider_list_t *p;
137 	kcf_ops_class_t class;
138 	kcf_mech_entry_t *me;
139 	kcf_mech_entry_tab_t *me_tab;
140 	caddr_t *ops;
141 	int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
142 
143 	/* get the mech entry for the specified mechanism */
144 	class = KCF_MECH2CLASS(mech_type_1);
145 	if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
146 		return (CRYPTO_MECHANISM_INVALID);
147 	}
148 
149 	me_tab = &kcf_mech_tabs_tab[class];
150 	index = KCF_MECH2INDEX(mech_type_1);
151 	if ((index < 0) || (index >= me_tab->met_size)) {
152 		return (CRYPTO_MECHANISM_INVALID);
153 	}
154 
155 	me = &((me_tab->met_tab)[index]);
156 	mutex_enter(&me->me_mutex);
157 
158 	/*
159 	 * We assume the provider descriptor will not go away because
160 	 * it is being held somewhere, i.e. its reference count has been
161 	 * incremented. In the case of the crypto module, the provider
162 	 * descriptor is held by the session structure.
163 	 */
164 	if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
165 		if (old->pd_provider_list == NULL) {
166 			real_pd = NULL;
167 			rv = CRYPTO_DEVICE_ERROR;
168 			goto out;
169 		}
170 		/*
171 		 * Find the least loaded real provider. tq_nalloc gives
172 		 * the number of task entries in the task queue. We do
173 		 * not acquire tq_lock here as it is not critical to
174 		 * get the exact number and the lock contention may be
175 		 * too costly for this code path.
176 		 */
177 		mutex_enter(&old->pd_lock);
178 		p = old->pd_provider_list;
179 		while (p != NULL) {
180 			provider = p->pl_provider;
181 
182 			ASSERT(provider->pd_prov_type !=
183 			    CRYPTO_LOGICAL_PROVIDER);
184 
185 			if (!KCF_IS_PROV_USABLE(provider) ||
186 			    (call_restrict && provider->pd_restricted)) {
187 				p = p->pl_next;
188 				continue;
189 			}
190 
191 			if (!is_valid_provider_for_mech(provider, me)) {
192 				p = p->pl_next;
193 				continue;
194 			}
195 
196 			/* provider does second mech */
197 			if (mech_type_2 != CRYPTO_MECH_INVALID) {
198 				crypto_mech_type_t mech_type;
199 				int i;
200 
201 				/* convert from kef to provider's number */
202 				mech_type = provider->pd_map_mechnums
203 				    [KCF_MECH2CLASS(mech_type_2)]
204 				    [KCF_MECH2INDEX(mech_type_2)];
205 
206 				for (i = 0; i < provider->pd_mech_list_count;
207 				    i++) {
208 					if (provider->pd_mechanisms[i]
209 					    .cm_mech_number == mech_type)
210 						break;
211 				}
212 				if (i == provider->pd_mech_list_count) {
213 					p = p->pl_next;
214 					continue;
215 				}
216 			}
217 
218 			if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1,
219 			    offset_2, ops)) {
220 				p = p->pl_next;
221 				continue;
222 			}
223 
224 			len = provider->pd_sched_info.ks_taskq->tq_nalloc;
225 			if (len < gqlen) {
226 				gqlen = len;
227 				gpd = provider;
228 			}
229 
230 			p = p->pl_next;
231 		}
232 
233 		if (gpd != NULL) {
234 			real_pd = gpd;
235 			KCF_PROV_REFHOLD(real_pd);
236 		} else {
237 			/* can't find provider */
238 			real_pd = NULL;
239 			rv = CRYPTO_MECHANISM_INVALID;
240 		}
241 		mutex_exit(&old->pd_lock);
242 
243 	} else {
244 		if (!KCF_IS_PROV_USABLE(old) ||
245 		    (call_restrict && old->pd_restricted)) {
246 			real_pd = NULL;
247 			rv = CRYPTO_DEVICE_ERROR;
248 			goto out;
249 		}
250 
251 		if (!is_valid_provider_for_mech(old, me)) {
252 			real_pd = NULL;
253 			rv = CRYPTO_MECHANISM_INVALID;
254 			goto out;
255 		}
256 
257 		if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
258 			real_pd = NULL;
259 			rv = CRYPTO_NOT_SUPPORTED;
260 			goto out;
261 		}
262 		KCF_PROV_REFHOLD(real_pd);
263 	}
264 out:
265 	mutex_exit(&me->me_mutex);
266 	*new = real_pd;
267 	return (rv);
268 }
269 
270 /*
271  * This routine, given a logical provider, returns the least loaded
272  * provider belonging to the logical provider. Just in case providers
273  * are not entirely equivalent, the provider's entry point is checked
274  * for non-nullness. This is accomplished by having the caller pass, as
275  * arguments, the offset of the function group (offset_1), and the
276  * offset of the function within the function group (offset_2).
277  * Returns NULL if no provider can be found.
278  */
279 int
280 kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
281     boolean_t call_restrict, kcf_provider_desc_t *old,
282     kcf_provider_desc_t **new)
283 {
284 	kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
285 	kcf_provider_list_t *p;
286 	caddr_t *ops;
287 	int len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
288 
289 	/*
290 	 * We assume the provider descriptor will not go away because
291 	 * it is being held somewhere, i.e. its reference count has been
292 	 * incremented. In the case of the crypto module, the provider
293 	 * descriptor is held by the session structure.
294 	 */
295 	if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
296 		if (old->pd_provider_list == NULL) {
297 			real_pd = NULL;
298 			rv = CRYPTO_DEVICE_ERROR;
299 			goto out;
300 		}
301 		/*
302 		 * Find the least loaded real provider. tq_nalloc gives
303 		 * the number of task entries in the task queue. We do
304 		 * not acquire tq_lock here as it is not critical to
305 		 * get the exact number and the lock contention may be
306 		 * too costly for this code path.
307 		 */
308 		mutex_enter(&old->pd_lock);
309 		p = old->pd_provider_list;
310 		while (p != NULL) {
311 			provider = p->pl_provider;
312 
313 			ASSERT(provider->pd_prov_type !=
314 			    CRYPTO_LOGICAL_PROVIDER);
315 
316 			if (!KCF_IS_PROV_USABLE(provider) ||
317 			    (call_restrict && provider->pd_restricted)) {
318 				p = p->pl_next;
319 				continue;
320 			}
321 
322 			if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1,
323 			    offset_2, ops)) {
324 				p = p->pl_next;
325 				continue;
326 			}
327 
328 			len = provider->pd_sched_info.ks_taskq->tq_nalloc;
329 			if (len < gqlen) {
330 				gqlen = len;
331 				gpd = provider;
332 			}
333 
334 			p = p->pl_next;
335 		}
336 		mutex_exit(&old->pd_lock);
337 
338 		if (gpd != NULL) {
339 			real_pd = gpd;
340 			KCF_PROV_REFHOLD(real_pd);
341 		} else {
342 			/* can't find provider */
343 			real_pd = NULL;
344 			rv = CRYPTO_DEVICE_ERROR;
345 		}
346 
347 	} else {
348 		if (!KCF_IS_PROV_USABLE(old) ||
349 		    (call_restrict && old->pd_restricted)) {
350 			real_pd = NULL;
351 			rv = CRYPTO_DEVICE_ERROR;
352 			goto out;
353 		}
354 
355 		if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
356 			real_pd = NULL;
357 			rv = CRYPTO_NOT_SUPPORTED;
358 			goto out;
359 		}
360 		KCF_PROV_REFHOLD(real_pd);
361 	}
362 out:
363 	*new = real_pd;
364 	return (rv);
365 }
366 
367 /*
368  * Return the next member of a logical provider, given the previous
369  * member. The function returns true if the next member is found and
370  * bumps its refcnt before returning.
371  */
372 boolean_t
373 kcf_get_next_logical_provider_member(kcf_provider_desc_t *logical_provider,
374     kcf_provider_desc_t *prev, kcf_provider_desc_t **pd)
375 {
376 	kcf_provider_list_t *p;
377 	kcf_provider_desc_t *next;
378 
379 	ASSERT(MUTEX_HELD(&logical_provider->pd_lock));
380 	p = logical_provider->pd_provider_list;
381 	while (p != NULL) {
382 		/* start the search */
383 		if (prev == NULL) {
384 			next = p->pl_provider;
385 			goto found;
386 		} else {
387 			/* find where we were before */
388 			if (p->pl_provider == prev) {
389 				if (p->pl_next != NULL) {
390 					next = p->pl_next->pl_provider;
391 					goto found;
392 				}
393 			}
394 		}
395 		p = p->pl_next;
396 	}
397 	return (B_FALSE);
398 
399 found:
400 	KCF_PROV_REFHOLD(next);
401 	*pd = next;
402 	return (B_TRUE);
403 }
404 
405 /*
406  * Return the best provider for the specified mechanism. The provider
407  * is held and it is the caller's responsibility to release it when done.
408  * The fg input argument is used as a search criterion to pick a provider.
409  * A provider has to support this function group to be picked.
410  *
411  * Find the least loaded provider in the list of providers. We do a linear
412  * search to find one. This is fine as we assume there are only a few
413  * number of providers in this list. If this assumption ever changes,
414  * we should revisit this.
415  *
416  * call_restrict represents if the caller should not be allowed to
417  * use restricted providers.
418  */
419 kcf_provider_desc_t *
420 kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
421     int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg,
422     boolean_t call_restrict, size_t data_size)
423 {
424 	kcf_provider_desc_t *pd = NULL, *gpd = NULL;
425 	kcf_prov_mech_desc_t *prov_chain, *mdesc;
426 	int len, gqlen = INT_MAX;
427 	kcf_ops_class_t class;
428 	int index;
429 	kcf_mech_entry_t *me;
430 	kcf_mech_entry_tab_t *me_tab;
431 
432 	class = KCF_MECH2CLASS(mech_type);
433 	if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
434 		*error = CRYPTO_MECHANISM_INVALID;
435 		return (NULL);
436 	}
437 
438 	me_tab = &kcf_mech_tabs_tab[class];
439 	index = KCF_MECH2INDEX(mech_type);
440 	if ((index < 0) || (index >= me_tab->met_size)) {
441 		*error = CRYPTO_MECHANISM_INVALID;
442 		return (NULL);
443 	}
444 
445 	me = &((me_tab->met_tab)[index]);
446 	if (mepp != NULL)
447 		*mepp = me;
448 
449 	mutex_enter(&me->me_mutex);
450 
451 	prov_chain = me->me_hw_prov_chain;
452 
453 	/*
454 	 * We check for the threshhold for using a hardware provider for
455 	 * this amount of data. If there is no software provider available
456 	 * for the mechanism, then the threshold is ignored.
457 	 */
458 	if ((prov_chain != NULL) &&
459 	    ((data_size == 0) || (me->me_threshold == 0) ||
460 	    (data_size > me->me_threshold) ||
461 	    ((mdesc = me->me_sw_prov) == NULL) ||
462 	    (!IS_FG_SUPPORTED(mdesc, fg)) ||
463 	    (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
464 		ASSERT(me->me_num_hwprov > 0);
465 		/* there is at least one provider */
466 
467 		/*
468 		 * Find the least loaded provider. tq_nalloc gives
469 		 * the number of task entries in the task queue. We do
470 		 * not acquire tq_lock here as it is not critical to
471 		 * get the exact number and the lock contention may be
472 		 * too costly for this code path.
473 		 */
474 		while (prov_chain != NULL) {
475 			pd = prov_chain->pm_prov_desc;
476 
477 			if (!IS_FG_SUPPORTED(prov_chain, fg) ||
478 			    !KCF_IS_PROV_USABLE(pd) ||
479 			    IS_PROVIDER_TRIED(pd, triedl) ||
480 			    (call_restrict && pd->pd_restricted)) {
481 				prov_chain = prov_chain->pm_next;
482 				continue;
483 			}
484 
485 			if ((len = pd->pd_sched_info.ks_taskq->tq_nalloc)
486 			    < gqlen) {
487 				gqlen = len;
488 				gpd = pd;
489 			}
490 
491 			prov_chain = prov_chain->pm_next;
492 		}
493 
494 		pd = gpd;
495 	}
496 
497 	/* No HW provider for this mech, is there a SW provider? */
498 	if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
499 		pd = mdesc->pm_prov_desc;
500 		if (!IS_FG_SUPPORTED(mdesc, fg) ||
501 		    !KCF_IS_PROV_USABLE(pd) ||
502 		    IS_PROVIDER_TRIED(pd, triedl) ||
503 		    (call_restrict && pd->pd_restricted))
504 			pd = NULL;
505 	}
506 
507 	if (pd == NULL) {
508 		/*
509 		 * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when
510 		 * we are in the "fallback to the next provider" case. Rather
511 		 * we preserve the error, so that the client gets the right
512 		 * error code.
513 		 */
514 		if (triedl == NULL)
515 			*error = CRYPTO_MECH_NOT_SUPPORTED;
516 	} else
517 		KCF_PROV_REFHOLD(pd);
518 
519 	mutex_exit(&me->me_mutex);
520 	return (pd);
521 }
522 
523 /*
524  * Very similar to kcf_get_mech_provider(). Finds the best provider capable of
525  * a dual operation with both me1 and me2.
526  * When no dual-ops capable providers are available, return the best provider
527  * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID;
528  * We assume/expect that a slower HW capable of the dual is still
529  * faster than the 2 fastest providers capable of the individual ops
530  * separately.
531  */
532 kcf_provider_desc_t *
533 kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
534     kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1,
535     crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl,
536     crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict,
537     size_t data_size)
538 {
539 	kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL;
540 	kcf_prov_mech_desc_t *prov_chain, *mdesc;
541 	int len, gqlen = INT_MAX, dgqlen = INT_MAX;
542 	crypto_mech_info_list_t *mil;
543 	crypto_mech_type_t m2id =  mech2->cm_type;
544 	kcf_mech_entry_t *me;
545 
546 	/* when mech is a valid mechanism, me will be its mech_entry */
547 	if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) {
548 		*error = CRYPTO_MECHANISM_INVALID;
549 		return (NULL);
550 	}
551 
552 	*prov_mt2 = CRYPTO_MECH_INVALID;
553 
554 	if (mepp != NULL)
555 		*mepp = me;
556 	mutex_enter(&me->me_mutex);
557 
558 	prov_chain = me->me_hw_prov_chain;
559 	/*
560 	 * We check the threshold for using a hardware provider for
561 	 * this amount of data. If there is no software provider available
562 	 * for the first mechanism, then the threshold is ignored.
563 	 */
564 	if ((prov_chain != NULL) &&
565 	    ((data_size == 0) || (me->me_threshold == 0) ||
566 	    (data_size > me->me_threshold) ||
567 	    ((mdesc = me->me_sw_prov) == NULL) ||
568 	    (!IS_FG_SUPPORTED(mdesc, fg1)) ||
569 	    (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
570 		/* there is at least one provider */
571 		ASSERT(me->me_num_hwprov > 0);
572 
573 		/*
574 		 * Find the least loaded provider capable of the combo
575 		 * me1 + me2, and save a pointer to the least loaded
576 		 * provider capable of me1 only.
577 		 */
578 		while (prov_chain != NULL) {
579 			pd = prov_chain->pm_prov_desc;
580 			len = pd->pd_sched_info.ks_taskq->tq_nalloc;
581 
582 			if (!IS_FG_SUPPORTED(prov_chain, fg1) ||
583 			    !KCF_IS_PROV_USABLE(pd) ||
584 			    IS_PROVIDER_TRIED(pd, triedl) ||
585 			    (call_restrict && pd->pd_restricted)) {
586 				prov_chain = prov_chain->pm_next;
587 				continue;
588 			}
589 
590 			/* Save the best provider capable of m1 */
591 			if (len < gqlen) {
592 				*prov_mt1 =
593 				    prov_chain->pm_mech_info.cm_mech_number;
594 				gqlen = len;
595 				pdm1 = pd;
596 			}
597 
598 			/* See if pd can do me2 too */
599 			for (mil = prov_chain->pm_mi_list;
600 			    mil != NULL; mil = mil->ml_next) {
601 				if ((mil->ml_mech_info.cm_func_group_mask &
602 				    fg2) == 0)
603 					continue;
604 
605 				if ((mil->ml_kcf_mechid == m2id) &&
606 				    (len < dgqlen)) {
607 					/* Bingo! */
608 					dgqlen = len;
609 					pdm1m2 = pd;
610 					*prov_mt2 =
611 					    mil->ml_mech_info.cm_mech_number;
612 					*prov_mt1 = prov_chain->
613 					    pm_mech_info.cm_mech_number;
614 					break;
615 				}
616 			}
617 
618 			prov_chain = prov_chain->pm_next;
619 		}
620 
621 		pd =  (pdm1m2 != NULL) ? pdm1m2 : pdm1;
622 	}
623 
624 	/* no HW provider for this mech, is there a SW provider? */
625 	if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
626 		pd = mdesc->pm_prov_desc;
627 		if (!IS_FG_SUPPORTED(mdesc, fg1) ||
628 		    !KCF_IS_PROV_USABLE(pd) ||
629 		    IS_PROVIDER_TRIED(pd, triedl) ||
630 		    (call_restrict && pd->pd_restricted))
631 			pd = NULL;
632 		else {
633 			/* See if pd can do me2 too */
634 			for (mil = me->me_sw_prov->pm_mi_list;
635 			    mil != NULL; mil = mil->ml_next) {
636 				if ((mil->ml_mech_info.cm_func_group_mask &
637 				    fg2) == 0)
638 					continue;
639 
640 				if (mil->ml_kcf_mechid == m2id) {
641 					/* Bingo! */
642 					*prov_mt2 =
643 					    mil->ml_mech_info.cm_mech_number;
644 					break;
645 				}
646 			}
647 			*prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number;
648 		}
649 	}
650 
651 	if (pd == NULL)
652 		*error = CRYPTO_MECH_NOT_SUPPORTED;
653 	else
654 		KCF_PROV_REFHOLD(pd);
655 
656 	mutex_exit(&me->me_mutex);
657 	return (pd);
658 }
659 
660 /*
661  * Do the actual work of calling the provider routines.
662  *
663  * pd - Provider structure
664  * ctx - Context for this operation
665  * params - Parameters for this operation
666  * rhndl - Request handle to use for notification
667  *
668  * The return values are the same as that of the respective SPI.
669  */
670 int
671 common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
672     kcf_req_params_t *params, crypto_req_handle_t rhndl)
673 {
674 	int err = CRYPTO_ARGUMENTS_BAD;
675 	kcf_op_type_t optype;
676 
677 	optype = params->rp_optype;
678 
679 	switch (params->rp_opgrp) {
680 	case KCF_OG_DIGEST: {
681 		kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
682 
683 		switch (optype) {
684 		case KCF_OP_INIT:
685 			/*
686 			 * We should do this only here and not in KCF_WRAP_*
687 			 * macros. This is because we may want to try other
688 			 * providers, in case we recover from a failure.
689 			 */
690 			KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
691 			    pd, &dops->do_mech);
692 
693 			err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech,
694 			    rhndl);
695 			break;
696 
697 		case KCF_OP_SINGLE:
698 			err = KCF_PROV_DIGEST(pd, ctx, dops->do_data,
699 			    dops->do_digest, rhndl);
700 			break;
701 
702 		case KCF_OP_UPDATE:
703 			err = KCF_PROV_DIGEST_UPDATE(pd, ctx,
704 			    dops->do_data, rhndl);
705 			break;
706 
707 		case KCF_OP_FINAL:
708 			err = KCF_PROV_DIGEST_FINAL(pd, ctx,
709 			    dops->do_digest, rhndl);
710 			break;
711 
712 		case KCF_OP_ATOMIC:
713 			ASSERT(ctx == NULL);
714 			KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
715 			    pd, &dops->do_mech);
716 			err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid,
717 			    &dops->do_mech, dops->do_data, dops->do_digest,
718 			    rhndl);
719 			break;
720 
721 		case KCF_OP_DIGEST_KEY:
722 			err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key,
723 			    rhndl);
724 			break;
725 
726 		default:
727 			break;
728 		}
729 		break;
730 	}
731 
732 	case KCF_OG_MAC: {
733 		kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
734 
735 		switch (optype) {
736 		case KCF_OP_INIT:
737 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
738 			    pd, &mops->mo_mech);
739 
740 			err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech,
741 			    mops->mo_key, mops->mo_templ, rhndl);
742 			break;
743 
744 		case KCF_OP_SINGLE:
745 			err = KCF_PROV_MAC(pd, ctx, mops->mo_data,
746 			    mops->mo_mac, rhndl);
747 			break;
748 
749 		case KCF_OP_UPDATE:
750 			err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data,
751 			    rhndl);
752 			break;
753 
754 		case KCF_OP_FINAL:
755 			err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl);
756 			break;
757 
758 		case KCF_OP_ATOMIC:
759 			ASSERT(ctx == NULL);
760 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
761 			    pd, &mops->mo_mech);
762 
763 			err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid,
764 			    &mops->mo_mech, mops->mo_key, mops->mo_data,
765 			    mops->mo_mac, mops->mo_templ, rhndl);
766 			break;
767 
768 		case KCF_OP_MAC_VERIFY_ATOMIC:
769 			ASSERT(ctx == NULL);
770 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
771 			    pd, &mops->mo_mech);
772 
773 			err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid,
774 			    &mops->mo_mech, mops->mo_key, mops->mo_data,
775 			    mops->mo_mac, mops->mo_templ, rhndl);
776 			break;
777 
778 		default:
779 			break;
780 		}
781 		break;
782 	}
783 
784 	case KCF_OG_ENCRYPT: {
785 		kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
786 
787 		switch (optype) {
788 		case KCF_OP_INIT:
789 			KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
790 			    pd, &eops->eo_mech);
791 
792 			err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech,
793 			    eops->eo_key, eops->eo_templ, rhndl);
794 			break;
795 
796 		case KCF_OP_SINGLE:
797 			err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext,
798 			    eops->eo_ciphertext, rhndl);
799 			break;
800 
801 		case KCF_OP_UPDATE:
802 			err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx,
803 			    eops->eo_plaintext, eops->eo_ciphertext, rhndl);
804 			break;
805 
806 		case KCF_OP_FINAL:
807 			err = KCF_PROV_ENCRYPT_FINAL(pd, ctx,
808 			    eops->eo_ciphertext, rhndl);
809 			break;
810 
811 		case KCF_OP_ATOMIC:
812 			ASSERT(ctx == NULL);
813 			KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
814 			    pd, &eops->eo_mech);
815 
816 			err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid,
817 			    &eops->eo_mech, eops->eo_key, eops->eo_plaintext,
818 			    eops->eo_ciphertext, eops->eo_templ, rhndl);
819 			break;
820 
821 		default:
822 			break;
823 		}
824 		break;
825 	}
826 
827 	case KCF_OG_DECRYPT: {
828 		kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
829 
830 		switch (optype) {
831 		case KCF_OP_INIT:
832 			KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
833 			    pd, &dcrops->dop_mech);
834 
835 			err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech,
836 			    dcrops->dop_key, dcrops->dop_templ, rhndl);
837 			break;
838 
839 		case KCF_OP_SINGLE:
840 			err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext,
841 			    dcrops->dop_plaintext, rhndl);
842 			break;
843 
844 		case KCF_OP_UPDATE:
845 			err = KCF_PROV_DECRYPT_UPDATE(pd, ctx,
846 			    dcrops->dop_ciphertext, dcrops->dop_plaintext,
847 			    rhndl);
848 			break;
849 
850 		case KCF_OP_FINAL:
851 			err = KCF_PROV_DECRYPT_FINAL(pd, ctx,
852 			    dcrops->dop_plaintext, rhndl);
853 			break;
854 
855 		case KCF_OP_ATOMIC:
856 			ASSERT(ctx == NULL);
857 			KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
858 			    pd, &dcrops->dop_mech);
859 
860 			err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid,
861 			    &dcrops->dop_mech, dcrops->dop_key,
862 			    dcrops->dop_ciphertext, dcrops->dop_plaintext,
863 			    dcrops->dop_templ, rhndl);
864 			break;
865 
866 		default:
867 			break;
868 		}
869 		break;
870 	}
871 
872 	case KCF_OG_SIGN: {
873 		kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
874 
875 		switch (optype) {
876 		case KCF_OP_INIT:
877 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
878 			    pd, &sops->so_mech);
879 
880 			err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech,
881 			    sops->so_key, sops->so_templ, rhndl);
882 			break;
883 
884 		case KCF_OP_SIGN_RECOVER_INIT:
885 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
886 			    pd, &sops->so_mech);
887 
888 			err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx,
889 			    &sops->so_mech, sops->so_key, sops->so_templ,
890 			    rhndl);
891 			break;
892 
893 		case KCF_OP_SINGLE:
894 			err = KCF_PROV_SIGN(pd, ctx, sops->so_data,
895 			    sops->so_signature, rhndl);
896 			break;
897 
898 		case KCF_OP_SIGN_RECOVER:
899 			err = KCF_PROV_SIGN_RECOVER(pd, ctx,
900 			    sops->so_data, sops->so_signature, rhndl);
901 			break;
902 
903 		case KCF_OP_UPDATE:
904 			err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data,
905 			    rhndl);
906 			break;
907 
908 		case KCF_OP_FINAL:
909 			err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature,
910 			    rhndl);
911 			break;
912 
913 		case KCF_OP_ATOMIC:
914 			ASSERT(ctx == NULL);
915 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
916 			    pd, &sops->so_mech);
917 
918 			err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid,
919 			    &sops->so_mech, sops->so_key, sops->so_data,
920 			    sops->so_templ, sops->so_signature, rhndl);
921 			break;
922 
923 		case KCF_OP_SIGN_RECOVER_ATOMIC:
924 			ASSERT(ctx == NULL);
925 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
926 			    pd, &sops->so_mech);
927 
928 			err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid,
929 			    &sops->so_mech, sops->so_key, sops->so_data,
930 			    sops->so_templ, sops->so_signature, rhndl);
931 			break;
932 
933 		default:
934 			break;
935 		}
936 		break;
937 	}
938 
939 	case KCF_OG_VERIFY: {
940 		kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
941 
942 		switch (optype) {
943 		case KCF_OP_INIT:
944 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
945 			    pd, &vops->vo_mech);
946 
947 			err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech,
948 			    vops->vo_key, vops->vo_templ, rhndl);
949 			break;
950 
951 		case KCF_OP_VERIFY_RECOVER_INIT:
952 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
953 			    pd, &vops->vo_mech);
954 
955 			err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx,
956 			    &vops->vo_mech, vops->vo_key, vops->vo_templ,
957 			    rhndl);
958 			break;
959 
960 		case KCF_OP_SINGLE:
961 			err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data,
962 			    vops->vo_signature, rhndl);
963 			break;
964 
965 		case KCF_OP_VERIFY_RECOVER:
966 			err = KCF_PROV_VERIFY_RECOVER(pd, ctx,
967 			    vops->vo_signature, vops->vo_data, rhndl);
968 			break;
969 
970 		case KCF_OP_UPDATE:
971 			err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data,
972 			    rhndl);
973 			break;
974 
975 		case KCF_OP_FINAL:
976 			err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature,
977 			    rhndl);
978 			break;
979 
980 		case KCF_OP_ATOMIC:
981 			ASSERT(ctx == NULL);
982 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
983 			    pd, &vops->vo_mech);
984 
985 			err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid,
986 			    &vops->vo_mech, vops->vo_key, vops->vo_data,
987 			    vops->vo_templ, vops->vo_signature, rhndl);
988 			break;
989 
990 		case KCF_OP_VERIFY_RECOVER_ATOMIC:
991 			ASSERT(ctx == NULL);
992 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
993 			    pd, &vops->vo_mech);
994 
995 			err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid,
996 			    &vops->vo_mech, vops->vo_key, vops->vo_signature,
997 			    vops->vo_templ, vops->vo_data, rhndl);
998 			break;
999 
1000 		default:
1001 			break;
1002 		}
1003 		break;
1004 	}
1005 
1006 	case KCF_OG_ENCRYPT_MAC: {
1007 		kcf_encrypt_mac_ops_params_t *eops =
1008 		    &params->rp_u.encrypt_mac_params;
1009 		kcf_context_t *kcf_secondctx;
1010 
1011 		switch (optype) {
1012 		case KCF_OP_INIT:
1013 			kcf_secondctx = ((kcf_context_t *)
1014 			    (ctx->cc_framework_private))->kc_secondctx;
1015 
1016 			if (kcf_secondctx != NULL) {
1017 				err = kcf_emulate_dual(pd, ctx, params);
1018 				break;
1019 			}
1020 			KCF_SET_PROVIDER_MECHNUM(
1021 			    eops->em_framework_encr_mechtype,
1022 			    pd, &eops->em_encr_mech);
1023 
1024 			KCF_SET_PROVIDER_MECHNUM(
1025 			    eops->em_framework_mac_mechtype,
1026 			    pd, &eops->em_mac_mech);
1027 
1028 			err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx,
1029 			    &eops->em_encr_mech, eops->em_encr_key,
1030 			    &eops->em_mac_mech, eops->em_mac_key,
1031 			    eops->em_encr_templ, eops->em_mac_templ,
1032 			    rhndl);
1033 
1034 			break;
1035 
1036 		case KCF_OP_SINGLE:
1037 			err = KCF_PROV_ENCRYPT_MAC(pd, ctx,
1038 			    eops->em_plaintext, eops->em_ciphertext,
1039 			    eops->em_mac, rhndl);
1040 			break;
1041 
1042 		case KCF_OP_UPDATE:
1043 			kcf_secondctx = ((kcf_context_t *)
1044 			    (ctx->cc_framework_private))->kc_secondctx;
1045 			if (kcf_secondctx != NULL) {
1046 				err = kcf_emulate_dual(pd, ctx, params);
1047 				break;
1048 			}
1049 			err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx,
1050 			    eops->em_plaintext, eops->em_ciphertext, rhndl);
1051 			break;
1052 
1053 		case KCF_OP_FINAL:
1054 			kcf_secondctx = ((kcf_context_t *)
1055 			    (ctx->cc_framework_private))->kc_secondctx;
1056 			if (kcf_secondctx != NULL) {
1057 				err = kcf_emulate_dual(pd, ctx, params);
1058 				break;
1059 			}
1060 			err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx,
1061 			    eops->em_ciphertext, eops->em_mac, rhndl);
1062 			break;
1063 
1064 		case KCF_OP_ATOMIC:
1065 			ASSERT(ctx == NULL);
1066 
1067 			KCF_SET_PROVIDER_MECHNUM(
1068 			    eops->em_framework_encr_mechtype,
1069 			    pd, &eops->em_encr_mech);
1070 
1071 			KCF_SET_PROVIDER_MECHNUM(
1072 			    eops->em_framework_mac_mechtype,
1073 			    pd, &eops->em_mac_mech);
1074 
1075 			err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid,
1076 			    &eops->em_encr_mech, eops->em_encr_key,
1077 			    &eops->em_mac_mech, eops->em_mac_key,
1078 			    eops->em_plaintext, eops->em_ciphertext,
1079 			    eops->em_mac,
1080 			    eops->em_encr_templ, eops->em_mac_templ,
1081 			    rhndl);
1082 
1083 			break;
1084 
1085 		default:
1086 			break;
1087 		}
1088 		break;
1089 	}
1090 
1091 	case KCF_OG_MAC_DECRYPT: {
1092 		kcf_mac_decrypt_ops_params_t *dops =
1093 		    &params->rp_u.mac_decrypt_params;
1094 		kcf_context_t *kcf_secondctx;
1095 
1096 		switch (optype) {
1097 		case KCF_OP_INIT:
1098 			kcf_secondctx = ((kcf_context_t *)
1099 			    (ctx->cc_framework_private))->kc_secondctx;
1100 
1101 			if (kcf_secondctx != NULL) {
1102 				err = kcf_emulate_dual(pd, ctx, params);
1103 				break;
1104 			}
1105 			KCF_SET_PROVIDER_MECHNUM(
1106 			    dops->md_framework_mac_mechtype,
1107 			    pd, &dops->md_mac_mech);
1108 
1109 			KCF_SET_PROVIDER_MECHNUM(
1110 			    dops->md_framework_decr_mechtype,
1111 			    pd, &dops->md_decr_mech);
1112 
1113 			err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx,
1114 			    &dops->md_mac_mech, dops->md_mac_key,
1115 			    &dops->md_decr_mech, dops->md_decr_key,
1116 			    dops->md_mac_templ, dops->md_decr_templ,
1117 			    rhndl);
1118 
1119 			break;
1120 
1121 		case KCF_OP_SINGLE:
1122 			err = KCF_PROV_MAC_DECRYPT(pd, ctx,
1123 			    dops->md_ciphertext, dops->md_mac,
1124 			    dops->md_plaintext, rhndl);
1125 			break;
1126 
1127 		case KCF_OP_UPDATE:
1128 			kcf_secondctx = ((kcf_context_t *)
1129 			    (ctx->cc_framework_private))->kc_secondctx;
1130 			if (kcf_secondctx != NULL) {
1131 				err = kcf_emulate_dual(pd, ctx, params);
1132 				break;
1133 			}
1134 			err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx,
1135 			    dops->md_ciphertext, dops->md_plaintext, rhndl);
1136 			break;
1137 
1138 		case KCF_OP_FINAL:
1139 			kcf_secondctx = ((kcf_context_t *)
1140 			    (ctx->cc_framework_private))->kc_secondctx;
1141 			if (kcf_secondctx != NULL) {
1142 				err = kcf_emulate_dual(pd, ctx, params);
1143 				break;
1144 			}
1145 			err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx,
1146 			    dops->md_mac, dops->md_plaintext, rhndl);
1147 			break;
1148 
1149 		case KCF_OP_ATOMIC:
1150 			ASSERT(ctx == NULL);
1151 
1152 			KCF_SET_PROVIDER_MECHNUM(
1153 			    dops->md_framework_mac_mechtype,
1154 			    pd, &dops->md_mac_mech);
1155 
1156 			KCF_SET_PROVIDER_MECHNUM(
1157 			    dops->md_framework_decr_mechtype,
1158 			    pd, &dops->md_decr_mech);
1159 
1160 			err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid,
1161 			    &dops->md_mac_mech, dops->md_mac_key,
1162 			    &dops->md_decr_mech, dops->md_decr_key,
1163 			    dops->md_ciphertext, dops->md_mac,
1164 			    dops->md_plaintext,
1165 			    dops->md_mac_templ, dops->md_decr_templ,
1166 			    rhndl);
1167 
1168 			break;
1169 
1170 		case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC:
1171 			ASSERT(ctx == NULL);
1172 
1173 			KCF_SET_PROVIDER_MECHNUM(
1174 			    dops->md_framework_mac_mechtype,
1175 			    pd, &dops->md_mac_mech);
1176 
1177 			KCF_SET_PROVIDER_MECHNUM(
1178 			    dops->md_framework_decr_mechtype,
1179 			    pd, &dops->md_decr_mech);
1180 
1181 			err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd,
1182 			    dops->md_sid, &dops->md_mac_mech, dops->md_mac_key,
1183 			    &dops->md_decr_mech, dops->md_decr_key,
1184 			    dops->md_ciphertext, dops->md_mac,
1185 			    dops->md_plaintext,
1186 			    dops->md_mac_templ, dops->md_decr_templ,
1187 			    rhndl);
1188 
1189 			break;
1190 
1191 		default:
1192 			break;
1193 		}
1194 		break;
1195 	}
1196 
1197 	case KCF_OG_KEY: {
1198 		kcf_key_ops_params_t *kops = &params->rp_u.key_params;
1199 
1200 		ASSERT(ctx == NULL);
1201 		KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd,
1202 		    &kops->ko_mech);
1203 
1204 		switch (optype) {
1205 		case KCF_OP_KEY_GENERATE:
1206 			err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid,
1207 			    &kops->ko_mech,
1208 			    kops->ko_key_template, kops->ko_key_attribute_count,
1209 			    kops->ko_key_object_id_ptr, rhndl);
1210 			break;
1211 
1212 		case KCF_OP_KEY_GENERATE_PAIR:
1213 			err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid,
1214 			    &kops->ko_mech,
1215 			    kops->ko_key_template, kops->ko_key_attribute_count,
1216 			    kops->ko_private_key_template,
1217 			    kops->ko_private_key_attribute_count,
1218 			    kops->ko_key_object_id_ptr,
1219 			    kops->ko_private_key_object_id_ptr, rhndl);
1220 			break;
1221 
1222 		case KCF_OP_KEY_WRAP:
1223 			err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid,
1224 			    &kops->ko_mech,
1225 			    kops->ko_key, kops->ko_key_object_id_ptr,
1226 			    kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr,
1227 			    rhndl);
1228 			break;
1229 
1230 		case KCF_OP_KEY_UNWRAP:
1231 			err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid,
1232 			    &kops->ko_mech,
1233 			    kops->ko_key, kops->ko_wrapped_key,
1234 			    kops->ko_wrapped_key_len_ptr,
1235 			    kops->ko_key_template, kops->ko_key_attribute_count,
1236 			    kops->ko_key_object_id_ptr, rhndl);
1237 			break;
1238 
1239 		case KCF_OP_KEY_DERIVE:
1240 			err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid,
1241 			    &kops->ko_mech,
1242 			    kops->ko_key, kops->ko_key_template,
1243 			    kops->ko_key_attribute_count,
1244 			    kops->ko_key_object_id_ptr, rhndl);
1245 			break;
1246 
1247 		default:
1248 			break;
1249 		}
1250 		break;
1251 	}
1252 
1253 	case KCF_OG_RANDOM: {
1254 		kcf_random_number_ops_params_t *rops =
1255 		    &params->rp_u.random_number_params;
1256 
1257 		ASSERT(ctx == NULL);
1258 
1259 		switch (optype) {
1260 		case KCF_OP_RANDOM_SEED:
1261 			err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid,
1262 			    rops->rn_buf, rops->rn_buflen, rhndl);
1263 			break;
1264 
1265 		case KCF_OP_RANDOM_GENERATE:
1266 			err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid,
1267 			    rops->rn_buf, rops->rn_buflen, rhndl);
1268 			break;
1269 
1270 		default:
1271 			break;
1272 		}
1273 		break;
1274 	}
1275 
1276 	case KCF_OG_SESSION: {
1277 		kcf_session_ops_params_t *sops = &params->rp_u.session_params;
1278 
1279 		ASSERT(ctx == NULL);
1280 		switch (optype) {
1281 		case KCF_OP_SESSION_OPEN:
1282 			err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr,
1283 			    rhndl, sops->so_pd);
1284 			break;
1285 
1286 		case KCF_OP_SESSION_CLOSE:
1287 			err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid,
1288 			    rhndl, sops->so_pd);
1289 			break;
1290 
1291 		case KCF_OP_SESSION_LOGIN:
1292 			err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid,
1293 			    sops->so_user_type, sops->so_pin,
1294 			    sops->so_pin_len, rhndl);
1295 			break;
1296 
1297 		case KCF_OP_SESSION_LOGOUT:
1298 			err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl);
1299 			break;
1300 
1301 		default:
1302 			break;
1303 		}
1304 		break;
1305 	}
1306 
1307 	case KCF_OG_OBJECT: {
1308 		kcf_object_ops_params_t *jops = &params->rp_u.object_params;
1309 
1310 		ASSERT(ctx == NULL);
1311 		switch (optype) {
1312 		case KCF_OP_OBJECT_CREATE:
1313 			err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid,
1314 			    jops->oo_template, jops->oo_attribute_count,
1315 			    jops->oo_object_id_ptr, rhndl);
1316 			break;
1317 
1318 		case KCF_OP_OBJECT_COPY:
1319 			err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid,
1320 			    jops->oo_object_id,
1321 			    jops->oo_template, jops->oo_attribute_count,
1322 			    jops->oo_object_id_ptr, rhndl);
1323 			break;
1324 
1325 		case KCF_OP_OBJECT_DESTROY:
1326 			err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid,
1327 			    jops->oo_object_id, rhndl);
1328 			break;
1329 
1330 		case KCF_OP_OBJECT_GET_SIZE:
1331 			err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid,
1332 			    jops->oo_object_id, jops->oo_object_size, rhndl);
1333 			break;
1334 
1335 		case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE:
1336 			err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd,
1337 			    jops->oo_sid, jops->oo_object_id,
1338 			    jops->oo_template, jops->oo_attribute_count, rhndl);
1339 			break;
1340 
1341 		case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE:
1342 			err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd,
1343 			    jops->oo_sid, jops->oo_object_id,
1344 			    jops->oo_template, jops->oo_attribute_count, rhndl);
1345 			break;
1346 
1347 		case KCF_OP_OBJECT_FIND_INIT:
1348 			err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid,
1349 			    jops->oo_template, jops->oo_attribute_count,
1350 			    jops->oo_find_init_pp_ptr, rhndl);
1351 			break;
1352 
1353 		case KCF_OP_OBJECT_FIND:
1354 			err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp,
1355 			    jops->oo_object_id_ptr, jops->oo_max_object_count,
1356 			    jops->oo_object_count_ptr, rhndl);
1357 			break;
1358 
1359 		case KCF_OP_OBJECT_FIND_FINAL:
1360 			err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp,
1361 			    rhndl);
1362 			break;
1363 
1364 		default:
1365 			break;
1366 		}
1367 		break;
1368 	}
1369 
1370 	case KCF_OG_PROVMGMT: {
1371 		kcf_provmgmt_ops_params_t *pops = &params->rp_u.provmgmt_params;
1372 
1373 		ASSERT(ctx == NULL);
1374 		switch (optype) {
1375 		case KCF_OP_MGMT_EXTINFO:
1376 			err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl,
1377 			    pops->po_pd);
1378 			break;
1379 
1380 		case KCF_OP_MGMT_INITTOKEN:
1381 			err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin,
1382 			    pops->po_pin_len, pops->po_label, rhndl);
1383 			break;
1384 
1385 		case KCF_OP_MGMT_INITPIN:
1386 			err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin,
1387 			    pops->po_pin_len, rhndl);
1388 			break;
1389 
1390 		case KCF_OP_MGMT_SETPIN:
1391 			err = KCF_PROV_SET_PIN(pd, pops->po_sid,
1392 			    pops->po_old_pin, pops->po_old_pin_len,
1393 			    pops->po_pin, pops->po_pin_len, rhndl);
1394 			break;
1395 
1396 		default:
1397 			break;
1398 		}
1399 		break;
1400 	}
1401 
1402 	default:
1403 		break;
1404 	}		/* end of switch(params->rp_opgrp) */
1405 
1406 	KCF_PROV_INCRSTATS(pd, err);
1407 	return (err);
1408 }
1409 
1410 /*
1411  * Emulate the call for a multipart dual ops with 2 single steps.
1412  * This routine is always called in the context of a working thread
1413  * running kcf_svc_do_run().
1414  * The single steps are submitted in a pure synchronous way (blocking).
1415  * When this routine returns, kcf_svc_do_run() will call kcf_aop_done()
1416  * so the originating consumer's callback gets invoked. kcf_aop_done()
1417  * takes care of freeing the operation context. So, this routine does
1418  * not free the operation context.
1419  *
1420  * The provider descriptor is assumed held by the callers.
1421  */
1422 static int
1423 kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
1424     kcf_req_params_t *params)
1425 {
1426 	int err = CRYPTO_ARGUMENTS_BAD;
1427 	kcf_op_type_t optype;
1428 	size_t save_len;
1429 	off_t save_offset;
1430 
1431 	optype = params->rp_optype;
1432 
1433 	switch (params->rp_opgrp) {
1434 	case KCF_OG_ENCRYPT_MAC: {
1435 		kcf_encrypt_mac_ops_params_t *cmops =
1436 		    &params->rp_u.encrypt_mac_params;
1437 		kcf_context_t *encr_kcf_ctx;
1438 		crypto_ctx_t *mac_ctx;
1439 		kcf_req_params_t encr_params;
1440 
1441 		encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
1442 
1443 		switch (optype) {
1444 		case KCF_OP_INIT: {
1445 			encr_kcf_ctx->kc_secondctx = NULL;
1446 
1447 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT,
1448 			    pd->pd_sid, &cmops->em_encr_mech,
1449 			    cmops->em_encr_key, NULL, NULL,
1450 			    cmops->em_encr_templ);
1451 
1452 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1453 			    B_FALSE);
1454 
1455 			/* It can't be CRYPTO_QUEUED */
1456 			if (err != CRYPTO_SUCCESS) {
1457 				break;
1458 			}
1459 
1460 			err = crypto_mac_init(&cmops->em_mac_mech,
1461 			    cmops->em_mac_key, cmops->em_mac_templ,
1462 			    (crypto_context_t *)&mac_ctx, NULL);
1463 
1464 			if (err == CRYPTO_SUCCESS) {
1465 				encr_kcf_ctx->kc_secondctx = (kcf_context_t *)
1466 				    mac_ctx->cc_framework_private;
1467 				KCF_CONTEXT_REFHOLD((kcf_context_t *)
1468 				    mac_ctx->cc_framework_private);
1469 			}
1470 
1471 			break;
1472 
1473 		}
1474 		case KCF_OP_UPDATE: {
1475 			crypto_dual_data_t *ct = cmops->em_ciphertext;
1476 			crypto_data_t *pt = cmops->em_plaintext;
1477 			kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
1478 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1479 
1480 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE,
1481 			    pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct,
1482 			    NULL);
1483 
1484 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1485 			    B_FALSE);
1486 
1487 			/* It can't be CRYPTO_QUEUED */
1488 			if (err != CRYPTO_SUCCESS) {
1489 				break;
1490 			}
1491 
1492 			save_offset = ct->dd_offset1;
1493 			save_len = ct->dd_len1;
1494 			if (ct->dd_len2 == 0) {
1495 				/*
1496 				 * The previous encrypt step was an
1497 				 * accumulation only and didn't produce any
1498 				 * partial output
1499 				 */
1500 				if (ct->dd_len1 == 0)
1501 					break;
1502 
1503 			} else {
1504 				ct->dd_offset1 = ct->dd_offset2;
1505 				ct->dd_len1 = ct->dd_len2;
1506 			}
1507 			err = crypto_mac_update((crypto_context_t)mac_ctx,
1508 			    (crypto_data_t *)ct, NULL);
1509 
1510 			ct->dd_offset1 = save_offset;
1511 			ct->dd_len1 = save_len;
1512 
1513 			break;
1514 		}
1515 		case KCF_OP_FINAL: {
1516 			crypto_dual_data_t *ct = cmops->em_ciphertext;
1517 			crypto_data_t *mac = cmops->em_mac;
1518 			kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
1519 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1520 			crypto_context_t mac_context = mac_ctx;
1521 
1522 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL,
1523 			    pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct,
1524 			    NULL);
1525 
1526 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1527 			    B_FALSE);
1528 
1529 			/* It can't be CRYPTO_QUEUED */
1530 			if (err != CRYPTO_SUCCESS) {
1531 				crypto_cancel_ctx(mac_context);
1532 				break;
1533 			}
1534 
1535 			if (ct->dd_len2 > 0) {
1536 				save_offset = ct->dd_offset1;
1537 				save_len = ct->dd_len1;
1538 				ct->dd_offset1 = ct->dd_offset2;
1539 				ct->dd_len1 = ct->dd_len2;
1540 
1541 				err = crypto_mac_update(mac_context,
1542 				    (crypto_data_t *)ct, NULL);
1543 
1544 				ct->dd_offset1 = save_offset;
1545 				ct->dd_len1 = save_len;
1546 
1547 				if (err != CRYPTO_SUCCESS)  {
1548 					crypto_cancel_ctx(mac_context);
1549 					return (err);
1550 				}
1551 			}
1552 
1553 			/* and finally, collect the MAC */
1554 			err = crypto_mac_final(mac_context, mac, NULL);
1555 			break;
1556 		}
1557 
1558 		default:
1559 			break;
1560 		}
1561 		KCF_PROV_INCRSTATS(pd, err);
1562 		break;
1563 	}
1564 	case KCF_OG_MAC_DECRYPT: {
1565 		kcf_mac_decrypt_ops_params_t *mdops =
1566 		    &params->rp_u.mac_decrypt_params;
1567 		kcf_context_t *decr_kcf_ctx;
1568 		crypto_ctx_t *mac_ctx;
1569 		kcf_req_params_t decr_params;
1570 
1571 		decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
1572 
1573 		switch (optype) {
1574 		case KCF_OP_INIT: {
1575 			decr_kcf_ctx->kc_secondctx = NULL;
1576 
1577 			err = crypto_mac_init(&mdops->md_mac_mech,
1578 			    mdops->md_mac_key, mdops->md_mac_templ,
1579 			    (crypto_context_t *)&mac_ctx, NULL);
1580 
1581 			/* It can't be CRYPTO_QUEUED */
1582 			if (err != CRYPTO_SUCCESS) {
1583 				break;
1584 			}
1585 
1586 			KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT,
1587 			    pd->pd_sid, &mdops->md_decr_mech,
1588 			    mdops->md_decr_key, NULL, NULL,
1589 			    mdops->md_decr_templ);
1590 
1591 			err = kcf_submit_request(pd, ctx, NULL, &decr_params,
1592 			    B_FALSE);
1593 
1594 			/* It can't be CRYPTO_QUEUED */
1595 			if (err != CRYPTO_SUCCESS) {
1596 				crypto_cancel_ctx((crypto_context_t)mac_ctx);
1597 				break;
1598 			}
1599 
1600 			decr_kcf_ctx->kc_secondctx = (kcf_context_t *)
1601 			    mac_ctx->cc_framework_private;
1602 			KCF_CONTEXT_REFHOLD((kcf_context_t *)
1603 			    mac_ctx->cc_framework_private);
1604 
1605 			break;
1606 
1607 		}
1608 		case KCF_OP_UPDATE: {
1609 			crypto_dual_data_t *ct = mdops->md_ciphertext;
1610 			crypto_data_t *pt = mdops->md_plaintext;
1611 			kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
1612 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1613 
1614 			err = crypto_mac_update((crypto_context_t)mac_ctx,
1615 			    (crypto_data_t *)ct, NULL);
1616 
1617 			if (err != CRYPTO_SUCCESS)
1618 				break;
1619 
1620 			save_offset = ct->dd_offset1;
1621 			save_len = ct->dd_len1;
1622 
1623 			/* zero ct->dd_len2 means decrypt everything */
1624 			if (ct->dd_len2 > 0) {
1625 				ct->dd_offset1 = ct->dd_offset2;
1626 				ct->dd_len1 = ct->dd_len2;
1627 			}
1628 
1629 			err = crypto_decrypt_update((crypto_context_t)ctx,
1630 			    (crypto_data_t *)ct, pt, NULL);
1631 
1632 			ct->dd_offset1 = save_offset;
1633 			ct->dd_len1 = save_len;
1634 
1635 			break;
1636 		}
1637 		case KCF_OP_FINAL: {
1638 			crypto_data_t *pt = mdops->md_plaintext;
1639 			crypto_data_t *mac = mdops->md_mac;
1640 			kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
1641 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1642 
1643 			err = crypto_mac_final((crypto_context_t)mac_ctx,
1644 			    mac, NULL);
1645 
1646 			if (err != CRYPTO_SUCCESS) {
1647 				crypto_cancel_ctx(ctx);
1648 				break;
1649 			}
1650 
1651 			/* Get the last chunk of plaintext */
1652 			KCF_CONTEXT_REFHOLD(decr_kcf_ctx);
1653 			err = crypto_decrypt_final((crypto_context_t)ctx, pt,
1654 			    NULL);
1655 
1656 			break;
1657 		}
1658 		}
1659 		break;
1660 	}
1661 	default:
1662 
1663 		break;
1664 	}		/* end of switch(params->rp_opgrp) */
1665 
1666 	return (err);
1667 }
1668