1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * This file contains routines which call into a provider's
31  * entry points and do other related work.
32  */
33 
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/taskq_impl.h>
37 #include <sys/cmn_err.h>
38 
39 #include <sys/crypto/common.h>
40 #include <sys/crypto/impl.h>
41 #include <sys/crypto/sched_impl.h>
42 
43 /*
44  * Return B_TRUE if the specified entry point is NULL. We rely on the
45  * caller to provide, with offset_1 and offset_2, information to calculate
46  * the location of the entry point. The ops argument is a temporary local
47  * variable defined as caddr_t *.
48  */
49 #define	KCF_PROV_NULL_ENTRY_POINT(pd, o1, o2, ops)			\
50 	(ops = (caddr_t *)((caddr_t)(pd)->pd_ops_vector + (o1)),	\
51 	(*ops == NULL || *(caddr_t *)((caddr_t)(*ops) + (o2)) == NULL))
52 
53 
54 static int kcf_emulate_dual(kcf_provider_desc_t *, crypto_ctx_t *,
55     kcf_req_params_t *);
56 void
57 kcf_free_triedlist(kcf_prov_tried_t *list)
58 {
59 	kcf_prov_tried_t *l;
60 
61 	while ((l = list) != NULL) {
62 		list = list->pt_next;
63 		KCF_PROV_REFRELE(l->pt_pd);
64 		kmem_free(l, sizeof (kcf_prov_tried_t));
65 	}
66 }
67 
68 kcf_prov_tried_t *
69 kcf_insert_triedlist(kcf_prov_tried_t **list, kcf_provider_desc_t *pd,
70     int kmflag)
71 {
72 	kcf_prov_tried_t *l;
73 
74 	l = kmem_alloc(sizeof (kcf_prov_tried_t), kmflag);
75 	if (l == NULL)
76 		return (NULL);
77 
78 	l->pt_pd = pd;
79 	l->pt_next = *list;
80 	*list = l;
81 
82 	return (l);
83 }
84 
85 static boolean_t
86 is_in_triedlist(kcf_provider_desc_t *pd, kcf_prov_tried_t *triedl)
87 {
88 	while (triedl != NULL) {
89 		if (triedl->pt_pd == pd)
90 			return (B_TRUE);
91 		triedl = triedl->pt_next;
92 	};
93 
94 	return (B_FALSE);
95 }
96 
97 /*
98  * Search a mech entry's hardware provider list for the specified
99  * provider. Return true if found.
100  */
101 static boolean_t
102 is_valid_provider_for_mech(kcf_provider_desc_t *pd, kcf_mech_entry_t *me)
103 {
104 	kcf_prov_mech_desc_t *prov_chain;
105 
106 	prov_chain = me->me_hw_prov_chain;
107 	if (prov_chain != NULL) {
108 		ASSERT(me->me_num_hwprov > 0);
109 		for (; prov_chain != NULL; prov_chain = prov_chain->pm_next) {
110 			if (prov_chain->pm_prov_desc == pd) {
111 				return (B_TRUE);
112 			}
113 		}
114 	}
115 	return (B_FALSE);
116 }
117 
118 /*
119  * This routine, given a logical provider, returns the least loaded
120  * provider belonging to the logical provider. The provider must be
121  * able to do the specified mechanism, i.e. check that the mechanism
122  * hasn't been disabled. In addition, just in case providers are not
123  * entirely equivalent, the provider's entry point is checked for
124  * non-nullness. This is accomplished by having the caller pass, as
125  * arguments, the offset of the function group (offset_1), and the
126  * offset of the function within the function group (offset_2).
127  * Returns NULL if no provider can be found.
128  */
129 int
130 kcf_get_hardware_provider(crypto_mech_type_t mech_type, offset_t offset_1,
131 offset_t offset_2, kcf_provider_desc_t *old, kcf_provider_desc_t **new)
132 {
133 	kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
134 	kcf_provider_list_t *p;
135 	kcf_ops_class_t class;
136 	kcf_mech_entry_t *me;
137 	kcf_mech_entry_tab_t *me_tab;
138 	caddr_t *ops;
139 	int index, len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
140 
141 	/* get the mech entry for the specified mechanism */
142 	class = KCF_MECH2CLASS(mech_type);
143 	if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
144 		return (CRYPTO_MECHANISM_INVALID);
145 	}
146 
147 	me_tab = &kcf_mech_tabs_tab[class];
148 	index = KCF_MECH2INDEX(mech_type);
149 	if ((index < 0) || (index >= me_tab->met_size)) {
150 		return (CRYPTO_MECHANISM_INVALID);
151 	}
152 
153 	me = &((me_tab->met_tab)[index]);
154 	mutex_enter(&me->me_mutex);
155 
156 	/*
157 	 * We assume the provider descriptor will not go away because
158 	 * it is being held somewhere, i.e. its reference count has been
159 	 * incremented. In the case of the crypto module, the provider
160 	 * descriptor is held by the session structure.
161 	 */
162 	if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
163 		if (old->pd_provider_list == NULL) {
164 			real_pd = NULL;
165 			rv = CRYPTO_DEVICE_ERROR;
166 			goto out;
167 		}
168 		/*
169 		 * Find the least loaded real provider. tq_nalloc gives
170 		 * the number of task entries in the task queue. We do
171 		 * not acquire tq_lock here as it is not critical to
172 		 * get the exact number and the lock contention may be
173 		 * too costly for this code path.
174 		 */
175 		mutex_enter(&old->pd_lock);
176 		p = old->pd_provider_list;
177 		while (p != NULL) {
178 			provider = p->pl_provider;
179 
180 			ASSERT(provider->pd_prov_type !=
181 			    CRYPTO_LOGICAL_PROVIDER);
182 
183 			if (!KCF_IS_PROV_USABLE(provider)) {
184 				p = p->pl_next;
185 				continue;
186 			}
187 
188 			if (!is_valid_provider_for_mech(provider, me)) {
189 				p = p->pl_next;
190 				continue;
191 			}
192 
193 			if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1,
194 			    offset_2, ops)) {
195 				p = p->pl_next;
196 				continue;
197 			}
198 
199 			len = provider->pd_sched_info.ks_taskq->tq_nalloc;
200 			if (len < gqlen) {
201 				gqlen = len;
202 				gpd = provider;
203 			}
204 
205 			p = p->pl_next;
206 		}
207 		mutex_exit(&old->pd_lock);
208 
209 		if (gpd != NULL)
210 			real_pd = gpd;
211 		else {
212 			/* can't find provider */
213 			real_pd = NULL;
214 			rv = CRYPTO_MECHANISM_INVALID;
215 		}
216 
217 	} else {
218 		if (!KCF_IS_PROV_USABLE(old)) {
219 			real_pd = NULL;
220 			rv = CRYPTO_DEVICE_ERROR;
221 			goto out;
222 		}
223 
224 		if (!is_valid_provider_for_mech(old, me)) {
225 			real_pd = NULL;
226 			rv = CRYPTO_MECHANISM_INVALID;
227 			goto out;
228 		}
229 
230 		if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
231 			real_pd = NULL;
232 			rv = CRYPTO_NOT_SUPPORTED;
233 		}
234 	}
235 out:
236 	mutex_exit(&me->me_mutex);
237 	*new = real_pd;
238 	return (rv);
239 }
240 
241 /*
242  * This routine, given a logical provider, returns the least loaded
243  * provider belonging to the logical provider. Just in case providers
244  * are not entirely equivalent, the provider's entry point is checked
245  * for non-nullness. This is accomplished by having the caller pass, as
246  * arguments, the offset of the function group (offset_1), and the
247  * offset of the function within the function group (offset_2).
248  * Returns NULL if no provider can be found.
249  */
250 int
251 kcf_get_hardware_provider_nomech(offset_t offset_1, offset_t offset_2,
252     kcf_provider_desc_t *old, kcf_provider_desc_t **new)
253 {
254 	kcf_provider_desc_t *provider, *gpd = NULL, *real_pd = old;
255 	kcf_provider_list_t *p;
256 	caddr_t *ops;
257 	int len, gqlen = INT_MAX, rv = CRYPTO_SUCCESS;
258 
259 	/*
260 	 * We assume the provider descriptor will not go away because
261 	 * it is being held somewhere, i.e. its reference count has been
262 	 * incremented. In the case of the crypto module, the provider
263 	 * descriptor is held by the session structure.
264 	 */
265 	if (old->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
266 		if (old->pd_provider_list == NULL) {
267 			real_pd = NULL;
268 			rv = CRYPTO_DEVICE_ERROR;
269 			goto out;
270 		}
271 		/*
272 		 * Find the least loaded real provider. tq_nalloc gives
273 		 * the number of task entries in the task queue. We do
274 		 * not acquire tq_lock here as it is not critical to
275 		 * get the exact number and the lock contention may be
276 		 * too costly for this code path.
277 		 */
278 		mutex_enter(&old->pd_lock);
279 		p = old->pd_provider_list;
280 		while (p != NULL) {
281 			provider = p->pl_provider;
282 
283 			ASSERT(provider->pd_prov_type !=
284 			    CRYPTO_LOGICAL_PROVIDER);
285 
286 			if (!KCF_IS_PROV_USABLE(provider)) {
287 				p = p->pl_next;
288 				continue;
289 			}
290 
291 			if (KCF_PROV_NULL_ENTRY_POINT(provider, offset_1,
292 			    offset_2, ops)) {
293 				p = p->pl_next;
294 				continue;
295 			}
296 
297 			len = provider->pd_sched_info.ks_taskq->tq_nalloc;
298 			if (len < gqlen) {
299 				gqlen = len;
300 				gpd = provider;
301 			}
302 
303 			p = p->pl_next;
304 		}
305 		mutex_exit(&old->pd_lock);
306 
307 		if (gpd != NULL)
308 			real_pd = gpd;
309 		else {
310 			/* can't find provider */
311 			real_pd = NULL;
312 			rv = CRYPTO_DEVICE_ERROR;
313 		}
314 
315 	} else {
316 		if (!KCF_IS_PROV_USABLE(old)) {
317 			real_pd = NULL;
318 			rv = CRYPTO_DEVICE_ERROR;
319 			goto out;
320 		}
321 
322 		if (KCF_PROV_NULL_ENTRY_POINT(old, offset_1, offset_2, ops)) {
323 			real_pd = NULL;
324 			rv = CRYPTO_NOT_SUPPORTED;
325 		}
326 	}
327 out:
328 	*new = real_pd;
329 	return (rv);
330 }
331 
332 /*
333  * Return the next member of a logical provider, given the previous
334  * member. The function returns true if the next member is found and
335  * bumps its refcnt before returning.
336  */
337 boolean_t
338 kcf_get_next_logical_provider_member(kcf_provider_desc_t *logical_provider,
339     kcf_provider_desc_t *prev, kcf_provider_desc_t **pd)
340 {
341 	kcf_provider_list_t *p;
342 	kcf_provider_desc_t *next;
343 
344 	ASSERT(MUTEX_HELD(&logical_provider->pd_lock));
345 	p = logical_provider->pd_provider_list;
346 	while (p != NULL) {
347 		/* start the search */
348 		if (prev == NULL) {
349 			next = p->pl_provider;
350 			goto found;
351 		} else {
352 			/* find where we were before */
353 			if (p->pl_provider == prev) {
354 				if (p->pl_next != NULL) {
355 					next = p->pl_next->pl_provider;
356 					goto found;
357 				}
358 			}
359 		}
360 		p = p->pl_next;
361 	}
362 	return (B_FALSE);
363 
364 found:
365 	KCF_PROV_REFHOLD(next);
366 	*pd = next;
367 	return (B_TRUE);
368 }
369 
370 /*
371  * Return the best provider for the specified mechanism. The provider
372  * is held and it is the caller's responsibility to release it when done.
373  * The fg input argument is used as a search criterion to pick a provider.
374  * A provider has to support this function group to be picked.
375  *
376  * Find the least loaded provider in the list of providers. We do a linear
377  * search to find one. This is fine as we assume there are only a few
378  * number of providers in this list. If this assumption ever changes,
379  * we should revisit this.
380  *
381  * call_restrict represents if the caller should not be allowed to
382  * use restricted providers.
383  */
384 kcf_provider_desc_t *
385 kcf_get_mech_provider(crypto_mech_type_t mech_type, kcf_mech_entry_t **mepp,
386     int *error, kcf_prov_tried_t *triedl, crypto_func_group_t fg,
387     boolean_t call_restrict, size_t data_size)
388 {
389 	kcf_provider_desc_t *pd = NULL, *gpd = NULL;
390 	kcf_prov_mech_desc_t *prov_chain, *mdesc;
391 	int len, gqlen = INT_MAX;
392 	kcf_ops_class_t class;
393 	int index;
394 	kcf_mech_entry_t *me;
395 	kcf_mech_entry_tab_t *me_tab;
396 
397 	class = KCF_MECH2CLASS(mech_type);
398 	if ((class < KCF_FIRST_OPSCLASS) || (class > KCF_LAST_OPSCLASS)) {
399 		*error = CRYPTO_MECHANISM_INVALID;
400 		return (NULL);
401 	}
402 
403 	me_tab = &kcf_mech_tabs_tab[class];
404 	index = KCF_MECH2INDEX(mech_type);
405 	if ((index < 0) || (index >= me_tab->met_size)) {
406 		*error = CRYPTO_MECHANISM_INVALID;
407 		return (NULL);
408 	}
409 
410 	me = &((me_tab->met_tab)[index]);
411 	if (mepp != NULL)
412 		*mepp = me;
413 
414 	mutex_enter(&me->me_mutex);
415 
416 	prov_chain = me->me_hw_prov_chain;
417 
418 	/*
419 	 * We check for the threshhold for using a hardware provider for
420 	 * this amount of data. If there is no software provider available
421 	 * for the mechanism, then the threshold is ignored.
422 	 */
423 	if ((prov_chain != NULL) &&
424 	    ((data_size == 0) || (me->me_threshold == 0) ||
425 	    (data_size > me->me_threshold) ||
426 	    ((mdesc = me->me_sw_prov) == NULL) ||
427 	    (!IS_FG_SUPPORTED(mdesc, fg)) ||
428 	    (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
429 		ASSERT(me->me_num_hwprov > 0);
430 		/* there is at least one provider */
431 
432 		/*
433 		 * Find the least loaded provider. tq_nalloc gives
434 		 * the number of task entries in the task queue. We do
435 		 * not acquire tq_lock here as it is not critical to
436 		 * get the exact number and the lock contention may be
437 		 * too costly for this code path.
438 		 */
439 		while (prov_chain != NULL) {
440 			pd = prov_chain->pm_prov_desc;
441 
442 			if (!IS_FG_SUPPORTED(prov_chain, fg) ||
443 			    !KCF_IS_PROV_USABLE(pd) ||
444 			    IS_PROVIDER_TRIED(pd, triedl) ||
445 			    (call_restrict && pd->pd_restricted)) {
446 				prov_chain = prov_chain->pm_next;
447 				continue;
448 			}
449 
450 			if ((len = pd->pd_sched_info.ks_taskq->tq_nalloc)
451 			    < gqlen) {
452 				gqlen = len;
453 				gpd = pd;
454 			}
455 
456 			prov_chain = prov_chain->pm_next;
457 		}
458 
459 		pd = gpd;
460 	}
461 
462 	/* No HW provider for this mech, is there a SW provider? */
463 	if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
464 		pd = mdesc->pm_prov_desc;
465 		if (!IS_FG_SUPPORTED(mdesc, fg) ||
466 		    !KCF_IS_PROV_USABLE(pd) ||
467 		    IS_PROVIDER_TRIED(pd, triedl) ||
468 		    (call_restrict && pd->pd_restricted))
469 			pd = NULL;
470 	}
471 
472 	if (pd == NULL) {
473 		/*
474 		 * We do not want to report CRYPTO_MECH_NOT_SUPPORTED, when
475 		 * we are in the "fallback to the next provider" case. Rather
476 		 * we preserve the error, so that the client gets the right
477 		 * error code.
478 		 */
479 		if (triedl == NULL)
480 			*error = CRYPTO_MECH_NOT_SUPPORTED;
481 	} else
482 		KCF_PROV_REFHOLD(pd);
483 
484 	mutex_exit(&me->me_mutex);
485 	return (pd);
486 }
487 
488 /*
489  * Very similar to kcf_get_mech_provider(). Finds the best provider capable of
490  * a dual operation with both me1 and me2.
491  * When no dual-ops capable providers are available, return the best provider
492  * for me1 only, and sets *prov_mt2 to CRYPTO_INVALID_MECHID;
493  * We assume/expect that a slower HW capable of the dual is still
494  * faster than the 2 fastest providers capable of the individual ops
495  * separately.
496  */
497 kcf_provider_desc_t *
498 kcf_get_dual_provider(crypto_mechanism_t *mech1, crypto_mechanism_t *mech2,
499     kcf_mech_entry_t **mepp, crypto_mech_type_t *prov_mt1,
500     crypto_mech_type_t *prov_mt2, int *error, kcf_prov_tried_t *triedl,
501     crypto_func_group_t fg1, crypto_func_group_t fg2, boolean_t call_restrict,
502     size_t data_size)
503 {
504 	kcf_provider_desc_t *pd = NULL, *pdm1 = NULL, *pdm1m2 = NULL;
505 	kcf_prov_mech_desc_t *prov_chain, *mdesc;
506 	int len, gqlen = INT_MAX, dgqlen = INT_MAX;
507 	crypto_mech_info_list_t *mil;
508 	crypto_mech_type_t m2id =  mech2->cm_type;
509 	kcf_mech_entry_t *me;
510 
511 	/* when mech is a valid mechanism, me will be its mech_entry */
512 	if (kcf_get_mech_entry(mech1->cm_type, &me) != KCF_SUCCESS) {
513 		*error = CRYPTO_MECHANISM_INVALID;
514 		return (NULL);
515 	}
516 
517 	*prov_mt2 = CRYPTO_MECH_INVALID;
518 
519 	if (mepp != NULL)
520 		*mepp = me;
521 	mutex_enter(&me->me_mutex);
522 
523 	prov_chain = me->me_hw_prov_chain;
524 	/*
525 	 * We check the threshold for using a hardware provider for
526 	 * this amount of data. If there is no software provider available
527 	 * for the first mechanism, then the threshold is ignored.
528 	 */
529 	if ((prov_chain != NULL) &&
530 	    ((data_size == 0) || (me->me_threshold == 0) ||
531 	    (data_size > me->me_threshold) ||
532 	    ((mdesc = me->me_sw_prov) == NULL) ||
533 	    (!IS_FG_SUPPORTED(mdesc, fg1)) ||
534 	    (!KCF_IS_PROV_USABLE(mdesc->pm_prov_desc)))) {
535 		/* there is at least one provider */
536 		ASSERT(me->me_num_hwprov > 0);
537 
538 		/*
539 		 * Find the least loaded provider capable of the combo
540 		 * me1 + me2, and save a pointer to the least loaded
541 		 * provider capable of me1 only.
542 		 */
543 		while (prov_chain != NULL) {
544 			pd = prov_chain->pm_prov_desc;
545 			len = pd->pd_sched_info.ks_taskq->tq_nalloc;
546 
547 			if (!IS_FG_SUPPORTED(prov_chain, fg1) ||
548 			    !KCF_IS_PROV_USABLE(pd) ||
549 			    IS_PROVIDER_TRIED(pd, triedl) ||
550 			    (call_restrict && pd->pd_restricted)) {
551 				prov_chain = prov_chain->pm_next;
552 				continue;
553 			}
554 
555 			/* Save the best provider capable of m1 */
556 			if (len < gqlen) {
557 				*prov_mt1 =
558 				    prov_chain->pm_mech_info.cm_mech_number;
559 				gqlen = len;
560 				pdm1 = pd;
561 			}
562 
563 			/* See if pd can do me2 too */
564 			for (mil = prov_chain->pm_mi_list;
565 			    mil != NULL; mil = mil->ml_next) {
566 				if ((mil->ml_mech_info.cm_func_group_mask &
567 				    fg2) == 0)
568 					continue;
569 
570 				if ((mil->ml_kcf_mechid == m2id) &&
571 				    (len < dgqlen)) {
572 					/* Bingo! */
573 					dgqlen = len;
574 					pdm1m2 = pd;
575 					*prov_mt2 =
576 					    mil->ml_mech_info.cm_mech_number;
577 					*prov_mt1 = prov_chain->
578 					    pm_mech_info.cm_mech_number;
579 					break;
580 				}
581 			}
582 
583 			prov_chain = prov_chain->pm_next;
584 		}
585 
586 		pd =  (pdm1m2 != NULL) ? pdm1m2 : pdm1;
587 	}
588 
589 	/* no HW provider for this mech, is there a SW provider? */
590 	if (pd == NULL && (mdesc = me->me_sw_prov) != NULL) {
591 		pd = mdesc->pm_prov_desc;
592 		if (!IS_FG_SUPPORTED(mdesc, fg1) ||
593 		    !KCF_IS_PROV_USABLE(pd) ||
594 		    IS_PROVIDER_TRIED(pd, triedl) ||
595 		    (call_restrict && pd->pd_restricted))
596 			pd = NULL;
597 		else {
598 			/* See if pd can do me2 too */
599 			for (mil = me->me_sw_prov->pm_mi_list;
600 			    mil != NULL; mil = mil->ml_next) {
601 				if ((mil->ml_mech_info.cm_func_group_mask &
602 				    fg2) == 0)
603 					continue;
604 
605 				if (mil->ml_kcf_mechid == m2id) {
606 					/* Bingo! */
607 					*prov_mt2 =
608 					    mil->ml_mech_info.cm_mech_number;
609 					break;
610 				}
611 			}
612 			*prov_mt1 = me->me_sw_prov->pm_mech_info.cm_mech_number;
613 		}
614 	}
615 
616 	if (pd == NULL)
617 		*error = CRYPTO_MECH_NOT_SUPPORTED;
618 	else
619 		KCF_PROV_REFHOLD(pd);
620 
621 	mutex_exit(&me->me_mutex);
622 	return (pd);
623 }
624 
625 /*
626  * Do the actual work of calling the provider routines.
627  *
628  * pd - Provider structure
629  * ctx - Context for this operation
630  * params - Parameters for this operation
631  * rhndl - Request handle to use for notification
632  *
633  * The return values are the same as that of the respective SPI.
634  */
635 int
636 common_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
637     kcf_req_params_t *params, crypto_req_handle_t rhndl)
638 {
639 	int err = CRYPTO_ARGUMENTS_BAD;
640 	kcf_op_type_t optype;
641 
642 	optype = params->rp_optype;
643 
644 	switch (params->rp_opgrp) {
645 	case KCF_OG_DIGEST: {
646 		kcf_digest_ops_params_t *dops = &params->rp_u.digest_params;
647 
648 		switch (optype) {
649 		case KCF_OP_INIT:
650 			/*
651 			 * We should do this only here and not in KCF_WRAP_*
652 			 * macros. This is because we may want to try other
653 			 * providers, in case we recover from a failure.
654 			 */
655 			KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
656 			    pd, &dops->do_mech);
657 
658 			err = KCF_PROV_DIGEST_INIT(pd, ctx, &dops->do_mech,
659 			    rhndl);
660 			break;
661 
662 		case KCF_OP_SINGLE:
663 			err = KCF_PROV_DIGEST(pd, ctx, dops->do_data,
664 			    dops->do_digest, rhndl);
665 			break;
666 
667 		case KCF_OP_UPDATE:
668 			err = KCF_PROV_DIGEST_UPDATE(pd, ctx,
669 			    dops->do_data, rhndl);
670 			break;
671 
672 		case KCF_OP_FINAL:
673 			err = KCF_PROV_DIGEST_FINAL(pd, ctx,
674 			    dops->do_digest, rhndl);
675 			break;
676 
677 		case KCF_OP_ATOMIC:
678 			ASSERT(ctx == NULL);
679 			KCF_SET_PROVIDER_MECHNUM(dops->do_framework_mechtype,
680 			    pd, &dops->do_mech);
681 			err = KCF_PROV_DIGEST_ATOMIC(pd, dops->do_sid,
682 			    &dops->do_mech, dops->do_data, dops->do_digest,
683 			    rhndl);
684 			break;
685 
686 		case KCF_OP_DIGEST_KEY:
687 			err = KCF_PROV_DIGEST_KEY(pd, ctx, dops->do_digest_key,
688 			    rhndl);
689 			break;
690 
691 		default:
692 			break;
693 		}
694 		break;
695 	}
696 
697 	case KCF_OG_MAC: {
698 		kcf_mac_ops_params_t *mops = &params->rp_u.mac_params;
699 
700 		switch (optype) {
701 		case KCF_OP_INIT:
702 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
703 			    pd, &mops->mo_mech);
704 
705 			err = KCF_PROV_MAC_INIT(pd, ctx, &mops->mo_mech,
706 			    mops->mo_key, mops->mo_templ, rhndl);
707 			break;
708 
709 		case KCF_OP_SINGLE:
710 			err = KCF_PROV_MAC(pd, ctx, mops->mo_data,
711 			    mops->mo_mac, rhndl);
712 			break;
713 
714 		case KCF_OP_UPDATE:
715 			err = KCF_PROV_MAC_UPDATE(pd, ctx, mops->mo_data,
716 			    rhndl);
717 			break;
718 
719 		case KCF_OP_FINAL:
720 			err = KCF_PROV_MAC_FINAL(pd, ctx, mops->mo_mac, rhndl);
721 			break;
722 
723 		case KCF_OP_ATOMIC:
724 			ASSERT(ctx == NULL);
725 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
726 			    pd, &mops->mo_mech);
727 
728 			err = KCF_PROV_MAC_ATOMIC(pd, mops->mo_sid,
729 			    &mops->mo_mech, mops->mo_key, mops->mo_data,
730 			    mops->mo_mac, mops->mo_templ, rhndl);
731 			break;
732 
733 		case KCF_OP_MAC_VERIFY_ATOMIC:
734 			ASSERT(ctx == NULL);
735 			KCF_SET_PROVIDER_MECHNUM(mops->mo_framework_mechtype,
736 			    pd, &mops->mo_mech);
737 
738 			err = KCF_PROV_MAC_VERIFY_ATOMIC(pd, mops->mo_sid,
739 			    &mops->mo_mech, mops->mo_key, mops->mo_data,
740 			    mops->mo_mac, mops->mo_templ, rhndl);
741 			break;
742 
743 		default:
744 			break;
745 		}
746 		break;
747 	}
748 
749 	case KCF_OG_ENCRYPT: {
750 		kcf_encrypt_ops_params_t *eops = &params->rp_u.encrypt_params;
751 
752 		switch (optype) {
753 		case KCF_OP_INIT:
754 			KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
755 			    pd, &eops->eo_mech);
756 
757 			err = KCF_PROV_ENCRYPT_INIT(pd, ctx, &eops->eo_mech,
758 			    eops->eo_key, eops->eo_templ, rhndl);
759 			break;
760 
761 		case KCF_OP_SINGLE:
762 			err = KCF_PROV_ENCRYPT(pd, ctx, eops->eo_plaintext,
763 			    eops->eo_ciphertext, rhndl);
764 			break;
765 
766 		case KCF_OP_UPDATE:
767 			err = KCF_PROV_ENCRYPT_UPDATE(pd, ctx,
768 			    eops->eo_plaintext, eops->eo_ciphertext, rhndl);
769 			break;
770 
771 		case KCF_OP_FINAL:
772 			err = KCF_PROV_ENCRYPT_FINAL(pd, ctx,
773 			    eops->eo_ciphertext, rhndl);
774 			break;
775 
776 		case KCF_OP_ATOMIC:
777 			ASSERT(ctx == NULL);
778 			KCF_SET_PROVIDER_MECHNUM(eops->eo_framework_mechtype,
779 			    pd, &eops->eo_mech);
780 
781 			err = KCF_PROV_ENCRYPT_ATOMIC(pd, eops->eo_sid,
782 			    &eops->eo_mech, eops->eo_key, eops->eo_plaintext,
783 			    eops->eo_ciphertext, eops->eo_templ, rhndl);
784 			break;
785 
786 		default:
787 			break;
788 		}
789 		break;
790 	}
791 
792 	case KCF_OG_DECRYPT: {
793 		kcf_decrypt_ops_params_t *dcrops = &params->rp_u.decrypt_params;
794 
795 		switch (optype) {
796 		case KCF_OP_INIT:
797 			KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
798 			    pd, &dcrops->dop_mech);
799 
800 			err = KCF_PROV_DECRYPT_INIT(pd, ctx, &dcrops->dop_mech,
801 			    dcrops->dop_key, dcrops->dop_templ, rhndl);
802 			break;
803 
804 		case KCF_OP_SINGLE:
805 			err = KCF_PROV_DECRYPT(pd, ctx, dcrops->dop_ciphertext,
806 			    dcrops->dop_plaintext, rhndl);
807 			break;
808 
809 		case KCF_OP_UPDATE:
810 			err = KCF_PROV_DECRYPT_UPDATE(pd, ctx,
811 			    dcrops->dop_ciphertext, dcrops->dop_plaintext,
812 			    rhndl);
813 			break;
814 
815 		case KCF_OP_FINAL:
816 			err = KCF_PROV_DECRYPT_FINAL(pd, ctx,
817 			    dcrops->dop_plaintext, rhndl);
818 			break;
819 
820 		case KCF_OP_ATOMIC:
821 			ASSERT(ctx == NULL);
822 			KCF_SET_PROVIDER_MECHNUM(dcrops->dop_framework_mechtype,
823 			    pd, &dcrops->dop_mech);
824 
825 			err = KCF_PROV_DECRYPT_ATOMIC(pd, dcrops->dop_sid,
826 			    &dcrops->dop_mech, dcrops->dop_key,
827 			    dcrops->dop_ciphertext, dcrops->dop_plaintext,
828 			    dcrops->dop_templ, rhndl);
829 			break;
830 
831 		default:
832 			break;
833 		}
834 		break;
835 	}
836 
837 	case KCF_OG_SIGN: {
838 		kcf_sign_ops_params_t *sops = &params->rp_u.sign_params;
839 
840 		switch (optype) {
841 		case KCF_OP_INIT:
842 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
843 			    pd, &sops->so_mech);
844 
845 			err = KCF_PROV_SIGN_INIT(pd, ctx, &sops->so_mech,
846 			    sops->so_key, sops->so_templ, rhndl);
847 			break;
848 
849 		case KCF_OP_SIGN_RECOVER_INIT:
850 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
851 			    pd, &sops->so_mech);
852 
853 			err = KCF_PROV_SIGN_RECOVER_INIT(pd, ctx,
854 			    &sops->so_mech, sops->so_key, sops->so_templ,
855 			    rhndl);
856 			break;
857 
858 		case KCF_OP_SINGLE:
859 			err = KCF_PROV_SIGN(pd, ctx, sops->so_data,
860 			    sops->so_signature, rhndl);
861 			break;
862 
863 		case KCF_OP_SIGN_RECOVER:
864 			err = KCF_PROV_SIGN_RECOVER(pd, ctx,
865 			    sops->so_data, sops->so_signature, rhndl);
866 			break;
867 
868 		case KCF_OP_UPDATE:
869 			err = KCF_PROV_SIGN_UPDATE(pd, ctx, sops->so_data,
870 			    rhndl);
871 			break;
872 
873 		case KCF_OP_FINAL:
874 			err = KCF_PROV_SIGN_FINAL(pd, ctx, sops->so_signature,
875 			    rhndl);
876 			break;
877 
878 		case KCF_OP_ATOMIC:
879 			ASSERT(ctx == NULL);
880 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
881 			    pd, &sops->so_mech);
882 
883 			err = KCF_PROV_SIGN_ATOMIC(pd, sops->so_sid,
884 			    &sops->so_mech, sops->so_key, sops->so_data,
885 			    sops->so_templ, sops->so_signature, rhndl);
886 			break;
887 
888 		case KCF_OP_SIGN_RECOVER_ATOMIC:
889 			ASSERT(ctx == NULL);
890 			KCF_SET_PROVIDER_MECHNUM(sops->so_framework_mechtype,
891 			    pd, &sops->so_mech);
892 
893 			err = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, sops->so_sid,
894 			    &sops->so_mech, sops->so_key, sops->so_data,
895 			    sops->so_templ, sops->so_signature, rhndl);
896 			break;
897 
898 		default:
899 			break;
900 		}
901 		break;
902 	}
903 
904 	case KCF_OG_VERIFY: {
905 		kcf_verify_ops_params_t *vops = &params->rp_u.verify_params;
906 
907 		switch (optype) {
908 		case KCF_OP_INIT:
909 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
910 			    pd, &vops->vo_mech);
911 
912 			err = KCF_PROV_VERIFY_INIT(pd, ctx, &vops->vo_mech,
913 			    vops->vo_key, vops->vo_templ, rhndl);
914 			break;
915 
916 		case KCF_OP_VERIFY_RECOVER_INIT:
917 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
918 			    pd, &vops->vo_mech);
919 
920 			err = KCF_PROV_VERIFY_RECOVER_INIT(pd, ctx,
921 			    &vops->vo_mech, vops->vo_key, vops->vo_templ,
922 			    rhndl);
923 			break;
924 
925 		case KCF_OP_SINGLE:
926 			err = KCF_PROV_VERIFY(pd, ctx, vops->vo_data,
927 			    vops->vo_signature, rhndl);
928 			break;
929 
930 		case KCF_OP_VERIFY_RECOVER:
931 			err = KCF_PROV_VERIFY_RECOVER(pd, ctx,
932 			    vops->vo_signature, vops->vo_data, rhndl);
933 			break;
934 
935 		case KCF_OP_UPDATE:
936 			err = KCF_PROV_VERIFY_UPDATE(pd, ctx, vops->vo_data,
937 			    rhndl);
938 			break;
939 
940 		case KCF_OP_FINAL:
941 			err = KCF_PROV_VERIFY_FINAL(pd, ctx, vops->vo_signature,
942 			    rhndl);
943 			break;
944 
945 		case KCF_OP_ATOMIC:
946 			ASSERT(ctx == NULL);
947 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
948 			    pd, &vops->vo_mech);
949 
950 			err = KCF_PROV_VERIFY_ATOMIC(pd, vops->vo_sid,
951 			    &vops->vo_mech, vops->vo_key, vops->vo_data,
952 			    vops->vo_templ, vops->vo_signature, rhndl);
953 			break;
954 
955 		case KCF_OP_VERIFY_RECOVER_ATOMIC:
956 			ASSERT(ctx == NULL);
957 			KCF_SET_PROVIDER_MECHNUM(vops->vo_framework_mechtype,
958 			    pd, &vops->vo_mech);
959 
960 			err = KCF_PROV_VERIFY_RECOVER_ATOMIC(pd, vops->vo_sid,
961 			    &vops->vo_mech, vops->vo_key, vops->vo_signature,
962 			    vops->vo_templ, vops->vo_data, rhndl);
963 			break;
964 
965 		default:
966 			break;
967 		}
968 		break;
969 	}
970 
971 	case KCF_OG_ENCRYPT_MAC: {
972 		kcf_encrypt_mac_ops_params_t *eops =
973 		    &params->rp_u.encrypt_mac_params;
974 		kcf_context_t *kcf_secondctx;
975 
976 		switch (optype) {
977 		case KCF_OP_INIT:
978 			kcf_secondctx = ((kcf_context_t *)
979 			    (ctx->cc_framework_private))->kc_secondctx;
980 
981 			if (kcf_secondctx != NULL) {
982 				err = kcf_emulate_dual(pd, ctx, params);
983 				break;
984 			}
985 			KCF_SET_PROVIDER_MECHNUM(
986 			    eops->em_framework_encr_mechtype,
987 			    pd, &eops->em_encr_mech);
988 
989 			KCF_SET_PROVIDER_MECHNUM(
990 			    eops->em_framework_mac_mechtype,
991 			    pd, &eops->em_mac_mech);
992 
993 			err = KCF_PROV_ENCRYPT_MAC_INIT(pd, ctx,
994 			    &eops->em_encr_mech, eops->em_encr_key,
995 			    &eops->em_mac_mech, eops->em_mac_key,
996 			    eops->em_encr_templ, eops->em_mac_templ,
997 			    rhndl);
998 
999 			break;
1000 
1001 		case KCF_OP_SINGLE:
1002 			err = KCF_PROV_ENCRYPT_MAC(pd, ctx,
1003 			    eops->em_plaintext, eops->em_ciphertext,
1004 			    eops->em_mac, rhndl);
1005 			break;
1006 
1007 		case KCF_OP_UPDATE:
1008 			kcf_secondctx = ((kcf_context_t *)
1009 			    (ctx->cc_framework_private))->kc_secondctx;
1010 			if (kcf_secondctx != NULL) {
1011 				err = kcf_emulate_dual(pd, ctx, params);
1012 				break;
1013 			}
1014 			err = KCF_PROV_ENCRYPT_MAC_UPDATE(pd, ctx,
1015 			    eops->em_plaintext, eops->em_ciphertext, rhndl);
1016 			break;
1017 
1018 		case KCF_OP_FINAL:
1019 			kcf_secondctx = ((kcf_context_t *)
1020 			    (ctx->cc_framework_private))->kc_secondctx;
1021 			if (kcf_secondctx != NULL) {
1022 				err = kcf_emulate_dual(pd, ctx, params);
1023 				break;
1024 			}
1025 			err = KCF_PROV_ENCRYPT_MAC_FINAL(pd, ctx,
1026 			    eops->em_ciphertext, eops->em_mac, rhndl);
1027 			break;
1028 
1029 		case KCF_OP_ATOMIC:
1030 			ASSERT(ctx == NULL);
1031 
1032 			KCF_SET_PROVIDER_MECHNUM(
1033 			    eops->em_framework_encr_mechtype,
1034 			    pd, &eops->em_encr_mech);
1035 
1036 			KCF_SET_PROVIDER_MECHNUM(
1037 			    eops->em_framework_mac_mechtype,
1038 			    pd, &eops->em_mac_mech);
1039 
1040 			err = KCF_PROV_ENCRYPT_MAC_ATOMIC(pd, eops->em_sid,
1041 			    &eops->em_encr_mech, eops->em_encr_key,
1042 			    &eops->em_mac_mech, eops->em_mac_key,
1043 			    eops->em_plaintext, eops->em_ciphertext,
1044 			    eops->em_mac,
1045 			    eops->em_encr_templ, eops->em_mac_templ,
1046 			    rhndl);
1047 
1048 			break;
1049 
1050 		default:
1051 			break;
1052 		}
1053 		break;
1054 	}
1055 
1056 	case KCF_OG_MAC_DECRYPT: {
1057 		kcf_mac_decrypt_ops_params_t *dops =
1058 		    &params->rp_u.mac_decrypt_params;
1059 		kcf_context_t *kcf_secondctx;
1060 
1061 		switch (optype) {
1062 		case KCF_OP_INIT:
1063 			kcf_secondctx = ((kcf_context_t *)
1064 			    (ctx->cc_framework_private))->kc_secondctx;
1065 
1066 			if (kcf_secondctx != NULL) {
1067 				err = kcf_emulate_dual(pd, ctx, params);
1068 				break;
1069 			}
1070 			KCF_SET_PROVIDER_MECHNUM(
1071 			    dops->md_framework_mac_mechtype,
1072 			    pd, &dops->md_mac_mech);
1073 
1074 			KCF_SET_PROVIDER_MECHNUM(
1075 			    dops->md_framework_decr_mechtype,
1076 			    pd, &dops->md_decr_mech);
1077 
1078 			err = KCF_PROV_MAC_DECRYPT_INIT(pd, ctx,
1079 			    &dops->md_mac_mech, dops->md_mac_key,
1080 			    &dops->md_decr_mech, dops->md_decr_key,
1081 			    dops->md_mac_templ, dops->md_decr_templ,
1082 			    rhndl);
1083 
1084 			break;
1085 
1086 		case KCF_OP_SINGLE:
1087 			err = KCF_PROV_MAC_DECRYPT(pd, ctx,
1088 			    dops->md_ciphertext, dops->md_mac,
1089 			    dops->md_plaintext, rhndl);
1090 			break;
1091 
1092 		case KCF_OP_UPDATE:
1093 			kcf_secondctx = ((kcf_context_t *)
1094 			    (ctx->cc_framework_private))->kc_secondctx;
1095 			if (kcf_secondctx != NULL) {
1096 				err = kcf_emulate_dual(pd, ctx, params);
1097 				break;
1098 			}
1099 			err = KCF_PROV_MAC_DECRYPT_UPDATE(pd, ctx,
1100 			    dops->md_ciphertext, dops->md_plaintext, rhndl);
1101 			break;
1102 
1103 		case KCF_OP_FINAL:
1104 			kcf_secondctx = ((kcf_context_t *)
1105 			    (ctx->cc_framework_private))->kc_secondctx;
1106 			if (kcf_secondctx != NULL) {
1107 				err = kcf_emulate_dual(pd, ctx, params);
1108 				break;
1109 			}
1110 			err = KCF_PROV_MAC_DECRYPT_FINAL(pd, ctx,
1111 			    dops->md_mac, dops->md_plaintext, rhndl);
1112 			break;
1113 
1114 		case KCF_OP_ATOMIC:
1115 			ASSERT(ctx == NULL);
1116 
1117 			KCF_SET_PROVIDER_MECHNUM(
1118 			    dops->md_framework_mac_mechtype,
1119 			    pd, &dops->md_mac_mech);
1120 
1121 			KCF_SET_PROVIDER_MECHNUM(
1122 			    dops->md_framework_decr_mechtype,
1123 			    pd, &dops->md_decr_mech);
1124 
1125 			err = KCF_PROV_MAC_DECRYPT_ATOMIC(pd, dops->md_sid,
1126 			    &dops->md_mac_mech, dops->md_mac_key,
1127 			    &dops->md_decr_mech, dops->md_decr_key,
1128 			    dops->md_ciphertext, dops->md_mac,
1129 			    dops->md_plaintext,
1130 			    dops->md_mac_templ, dops->md_decr_templ,
1131 			    rhndl);
1132 
1133 			break;
1134 
1135 		case KCF_OP_MAC_VERIFY_DECRYPT_ATOMIC:
1136 			ASSERT(ctx == NULL);
1137 
1138 			KCF_SET_PROVIDER_MECHNUM(
1139 			    dops->md_framework_mac_mechtype,
1140 			    pd, &dops->md_mac_mech);
1141 
1142 			KCF_SET_PROVIDER_MECHNUM(
1143 			    dops->md_framework_decr_mechtype,
1144 			    pd, &dops->md_decr_mech);
1145 
1146 			err = KCF_PROV_MAC_VERIFY_DECRYPT_ATOMIC(pd,
1147 			    dops->md_sid, &dops->md_mac_mech, dops->md_mac_key,
1148 			    &dops->md_decr_mech, dops->md_decr_key,
1149 			    dops->md_ciphertext, dops->md_mac,
1150 			    dops->md_plaintext,
1151 			    dops->md_mac_templ, dops->md_decr_templ,
1152 			    rhndl);
1153 
1154 			break;
1155 
1156 		default:
1157 			break;
1158 		}
1159 		break;
1160 	}
1161 
1162 	case KCF_OG_KEY: {
1163 		kcf_key_ops_params_t *kops = &params->rp_u.key_params;
1164 
1165 		ASSERT(ctx == NULL);
1166 		KCF_SET_PROVIDER_MECHNUM(kops->ko_framework_mechtype, pd,
1167 		    &kops->ko_mech);
1168 
1169 		switch (optype) {
1170 		case KCF_OP_KEY_GENERATE:
1171 			err = KCF_PROV_KEY_GENERATE(pd, kops->ko_sid,
1172 			    &kops->ko_mech,
1173 			    kops->ko_key_template, kops->ko_key_attribute_count,
1174 			    kops->ko_key_object_id_ptr, rhndl);
1175 			break;
1176 
1177 		case KCF_OP_KEY_GENERATE_PAIR:
1178 			err = KCF_PROV_KEY_GENERATE_PAIR(pd, kops->ko_sid,
1179 			    &kops->ko_mech,
1180 			    kops->ko_key_template, kops->ko_key_attribute_count,
1181 			    kops->ko_private_key_template,
1182 			    kops->ko_private_key_attribute_count,
1183 			    kops->ko_key_object_id_ptr,
1184 			    kops->ko_private_key_object_id_ptr, rhndl);
1185 			break;
1186 
1187 		case KCF_OP_KEY_WRAP:
1188 			err = KCF_PROV_KEY_WRAP(pd, kops->ko_sid,
1189 			    &kops->ko_mech,
1190 			    kops->ko_key, kops->ko_key_object_id_ptr,
1191 			    kops->ko_wrapped_key, kops->ko_wrapped_key_len_ptr,
1192 			    rhndl);
1193 			break;
1194 
1195 		case KCF_OP_KEY_UNWRAP:
1196 			err = KCF_PROV_KEY_UNWRAP(pd, kops->ko_sid,
1197 			    &kops->ko_mech,
1198 			    kops->ko_key, kops->ko_wrapped_key,
1199 			    kops->ko_wrapped_key_len_ptr,
1200 			    kops->ko_key_template, kops->ko_key_attribute_count,
1201 			    kops->ko_key_object_id_ptr, rhndl);
1202 			break;
1203 
1204 		case KCF_OP_KEY_DERIVE:
1205 			err = KCF_PROV_KEY_DERIVE(pd, kops->ko_sid,
1206 			    &kops->ko_mech,
1207 			    kops->ko_key, kops->ko_key_template,
1208 			    kops->ko_key_attribute_count,
1209 			    kops->ko_key_object_id_ptr, rhndl);
1210 			break;
1211 
1212 		default:
1213 			break;
1214 		}
1215 		break;
1216 	}
1217 
1218 	case KCF_OG_RANDOM: {
1219 		kcf_random_number_ops_params_t *rops =
1220 		    &params->rp_u.random_number_params;
1221 
1222 		ASSERT(ctx == NULL);
1223 
1224 		switch (optype) {
1225 		case KCF_OP_RANDOM_SEED:
1226 			err = KCF_PROV_SEED_RANDOM(pd, rops->rn_sid,
1227 			    rops->rn_buf, rops->rn_buflen, rhndl);
1228 			break;
1229 
1230 		case KCF_OP_RANDOM_GENERATE:
1231 			err = KCF_PROV_GENERATE_RANDOM(pd, rops->rn_sid,
1232 			    rops->rn_buf, rops->rn_buflen, rhndl);
1233 			break;
1234 
1235 		default:
1236 			break;
1237 		}
1238 		break;
1239 	}
1240 
1241 	case KCF_OG_SESSION: {
1242 		kcf_session_ops_params_t *sops = &params->rp_u.session_params;
1243 
1244 		ASSERT(ctx == NULL);
1245 		switch (optype) {
1246 		case KCF_OP_SESSION_OPEN:
1247 			err = KCF_PROV_SESSION_OPEN(pd, sops->so_sid_ptr,
1248 			    rhndl, sops->so_pd);
1249 			break;
1250 
1251 		case KCF_OP_SESSION_CLOSE:
1252 			err = KCF_PROV_SESSION_CLOSE(pd, sops->so_sid,
1253 			    rhndl, sops->so_pd);
1254 			break;
1255 
1256 		case KCF_OP_SESSION_LOGIN:
1257 			err = KCF_PROV_SESSION_LOGIN(pd, sops->so_sid,
1258 			    sops->so_user_type, sops->so_pin,
1259 			    sops->so_pin_len, rhndl);
1260 			break;
1261 
1262 		case KCF_OP_SESSION_LOGOUT:
1263 			err = KCF_PROV_SESSION_LOGOUT(pd, sops->so_sid, rhndl);
1264 			break;
1265 
1266 		default:
1267 			break;
1268 		}
1269 		break;
1270 	}
1271 
1272 	case KCF_OG_OBJECT: {
1273 		kcf_object_ops_params_t *jops = &params->rp_u.object_params;
1274 
1275 		ASSERT(ctx == NULL);
1276 		switch (optype) {
1277 		case KCF_OP_OBJECT_CREATE:
1278 			err = KCF_PROV_OBJECT_CREATE(pd, jops->oo_sid,
1279 			    jops->oo_template, jops->oo_attribute_count,
1280 			    jops->oo_object_id_ptr, rhndl);
1281 			break;
1282 
1283 		case KCF_OP_OBJECT_COPY:
1284 			err = KCF_PROV_OBJECT_COPY(pd, jops->oo_sid,
1285 			    jops->oo_object_id,
1286 			    jops->oo_template, jops->oo_attribute_count,
1287 			    jops->oo_object_id_ptr, rhndl);
1288 			break;
1289 
1290 		case KCF_OP_OBJECT_DESTROY:
1291 			err = KCF_PROV_OBJECT_DESTROY(pd, jops->oo_sid,
1292 			    jops->oo_object_id, rhndl);
1293 			break;
1294 
1295 		case KCF_OP_OBJECT_GET_SIZE:
1296 			err = KCF_PROV_OBJECT_GET_SIZE(pd, jops->oo_sid,
1297 			    jops->oo_object_id, jops->oo_object_size, rhndl);
1298 			break;
1299 
1300 		case KCF_OP_OBJECT_GET_ATTRIBUTE_VALUE:
1301 			err = KCF_PROV_OBJECT_GET_ATTRIBUTE_VALUE(pd,
1302 			    jops->oo_sid, jops->oo_object_id,
1303 			    jops->oo_template, jops->oo_attribute_count, rhndl);
1304 			break;
1305 
1306 		case KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE:
1307 			err = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(pd,
1308 			    jops->oo_sid, jops->oo_object_id,
1309 			    jops->oo_template, jops->oo_attribute_count, rhndl);
1310 			break;
1311 
1312 		case KCF_OP_OBJECT_FIND_INIT:
1313 			err = KCF_PROV_OBJECT_FIND_INIT(pd, jops->oo_sid,
1314 			    jops->oo_template, jops->oo_attribute_count,
1315 			    jops->oo_find_init_pp_ptr, rhndl);
1316 			break;
1317 
1318 		case KCF_OP_OBJECT_FIND:
1319 			err = KCF_PROV_OBJECT_FIND(pd, jops->oo_find_pp,
1320 			    jops->oo_object_id_ptr, jops->oo_max_object_count,
1321 			    jops->oo_object_count_ptr, rhndl);
1322 			break;
1323 
1324 		case KCF_OP_OBJECT_FIND_FINAL:
1325 			err = KCF_PROV_OBJECT_FIND_FINAL(pd, jops->oo_find_pp,
1326 			    rhndl);
1327 			break;
1328 
1329 		default:
1330 			break;
1331 		}
1332 		break;
1333 	}
1334 
1335 	case KCF_OG_PROVMGMT: {
1336 		kcf_provmgmt_ops_params_t *pops = &params->rp_u.provmgmt_params;
1337 
1338 		ASSERT(ctx == NULL);
1339 		switch (optype) {
1340 		case KCF_OP_MGMT_EXTINFO:
1341 			err = KCF_PROV_EXT_INFO(pd, pops->po_ext_info, rhndl,
1342 			    pops->po_pd);
1343 			break;
1344 
1345 		case KCF_OP_MGMT_INITTOKEN:
1346 			err = KCF_PROV_INIT_TOKEN(pd, pops->po_pin,
1347 			    pops->po_pin_len, pops->po_label, rhndl);
1348 			break;
1349 
1350 		case KCF_OP_MGMT_INITPIN:
1351 			err = KCF_PROV_INIT_PIN(pd, pops->po_sid, pops->po_pin,
1352 			    pops->po_pin_len, rhndl);
1353 			break;
1354 
1355 		case KCF_OP_MGMT_SETPIN:
1356 			err = KCF_PROV_SET_PIN(pd, pops->po_sid,
1357 			    pops->po_old_pin, pops->po_old_pin_len,
1358 			    pops->po_pin, pops->po_pin_len, rhndl);
1359 			break;
1360 
1361 		default:
1362 			break;
1363 		}
1364 		break;
1365 	}
1366 
1367 	default:
1368 		break;
1369 	}		/* end of switch(params->rp_opgrp) */
1370 
1371 	KCF_PROV_INCRSTATS(pd, err);
1372 	return (err);
1373 }
1374 
1375 /*
1376  * Emulate the call for a multipart dual ops with 2 single steps.
1377  * This routine is always called in the context of a working thread
1378  * running kcf_svc_do_run().
1379  * The single steps are submitted in a pure synchronous way (blocking).
1380  * When this routine returns, kcf_svc_do_run() will call kcf_aop_done()
1381  * so the originating consumer's callback gets invoked. kcf_aop_done()
1382  * takes care of freeing the operation context. So, this routine does
1383  * not free the operation context.
1384  *
1385  * The provider descriptor is assumed held by the callers.
1386  */
1387 static int
1388 kcf_emulate_dual(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
1389     kcf_req_params_t *params)
1390 {
1391 	int err = CRYPTO_ARGUMENTS_BAD;
1392 	kcf_op_type_t optype;
1393 	size_t save_len;
1394 	off_t save_offset;
1395 
1396 	optype = params->rp_optype;
1397 
1398 	switch (params->rp_opgrp) {
1399 	case KCF_OG_ENCRYPT_MAC: {
1400 		kcf_encrypt_mac_ops_params_t *cmops =
1401 		    &params->rp_u.encrypt_mac_params;
1402 		kcf_context_t *encr_kcf_ctx;
1403 		crypto_ctx_t *mac_ctx;
1404 		kcf_req_params_t encr_params;
1405 
1406 		encr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
1407 
1408 		switch (optype) {
1409 		case KCF_OP_INIT: {
1410 			encr_kcf_ctx->kc_secondctx = NULL;
1411 
1412 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_INIT,
1413 			    pd->pd_sid, &cmops->em_encr_mech,
1414 			    cmops->em_encr_key, NULL, NULL,
1415 			    cmops->em_encr_templ);
1416 
1417 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1418 			    B_FALSE);
1419 
1420 			/* It can't be CRYPTO_QUEUED */
1421 			if (err != CRYPTO_SUCCESS) {
1422 				break;
1423 			}
1424 
1425 			err = crypto_mac_init(&cmops->em_mac_mech,
1426 			    cmops->em_mac_key, cmops->em_mac_templ,
1427 			    (crypto_context_t *)&mac_ctx, NULL);
1428 
1429 			if (err == CRYPTO_SUCCESS) {
1430 				encr_kcf_ctx->kc_secondctx = (kcf_context_t *)
1431 				    mac_ctx->cc_framework_private;
1432 				KCF_CONTEXT_REFHOLD((kcf_context_t *)
1433 				    mac_ctx->cc_framework_private);
1434 			}
1435 
1436 			break;
1437 
1438 		}
1439 		case KCF_OP_UPDATE: {
1440 			crypto_dual_data_t *ct = cmops->em_ciphertext;
1441 			crypto_data_t *pt = cmops->em_plaintext;
1442 			kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
1443 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1444 
1445 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_UPDATE,
1446 			    pd->pd_sid, NULL, NULL, pt, (crypto_data_t *)ct,
1447 			    NULL);
1448 
1449 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1450 			    B_FALSE);
1451 
1452 			/* It can't be CRYPTO_QUEUED */
1453 			if (err != CRYPTO_SUCCESS) {
1454 				break;
1455 			}
1456 
1457 			save_offset = ct->dd_offset1;
1458 			save_len = ct->dd_len1;
1459 			if (ct->dd_len2 == 0) {
1460 				/*
1461 				 * The previous encrypt step was an
1462 				 * accumulation only and didn't produce any
1463 				 * partial output
1464 				 */
1465 				if (ct->dd_len1 == 0)
1466 					break;
1467 
1468 			} else {
1469 				ct->dd_offset1 = ct->dd_offset2;
1470 				ct->dd_len1 = ct->dd_len2;
1471 			}
1472 			err = crypto_mac_update((crypto_context_t)mac_ctx,
1473 			    (crypto_data_t *)ct, NULL);
1474 
1475 			ct->dd_offset1 = save_offset;
1476 			ct->dd_len1 = save_len;
1477 
1478 			break;
1479 		}
1480 		case KCF_OP_FINAL: {
1481 			crypto_dual_data_t *ct = cmops->em_ciphertext;
1482 			crypto_data_t *mac = cmops->em_mac;
1483 			kcf_context_t *mac_kcf_ctx = encr_kcf_ctx->kc_secondctx;
1484 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1485 			crypto_context_t mac_context = mac_ctx;
1486 
1487 			KCF_WRAP_ENCRYPT_OPS_PARAMS(&encr_params, KCF_OP_FINAL,
1488 			    pd->pd_sid, NULL, NULL, NULL, (crypto_data_t *)ct,
1489 			    NULL);
1490 
1491 			err = kcf_submit_request(pd, ctx, NULL, &encr_params,
1492 			    B_FALSE);
1493 
1494 			/* It can't be CRYPTO_QUEUED */
1495 			if (err != CRYPTO_SUCCESS) {
1496 				crypto_cancel_ctx(mac_context);
1497 				break;
1498 			}
1499 
1500 			if (ct->dd_len2 > 0) {
1501 				save_offset = ct->dd_offset1;
1502 				save_len = ct->dd_len1;
1503 				ct->dd_offset1 = ct->dd_offset2;
1504 				ct->dd_len1 = ct->dd_len2;
1505 
1506 				err = crypto_mac_update(mac_context,
1507 				    (crypto_data_t *)ct, NULL);
1508 
1509 				ct->dd_offset1 = save_offset;
1510 				ct->dd_len1 = save_len;
1511 
1512 				if (err != CRYPTO_SUCCESS)  {
1513 					crypto_cancel_ctx(mac_context);
1514 					return (err);
1515 				}
1516 			}
1517 
1518 			/* and finally, collect the MAC */
1519 			err = crypto_mac_final(mac_context, mac, NULL);
1520 			break;
1521 		}
1522 
1523 		default:
1524 			break;
1525 		}
1526 		KCF_PROV_INCRSTATS(pd, err);
1527 		break;
1528 	}
1529 	case KCF_OG_MAC_DECRYPT: {
1530 		kcf_mac_decrypt_ops_params_t *mdops =
1531 		    &params->rp_u.mac_decrypt_params;
1532 		kcf_context_t *decr_kcf_ctx;
1533 		crypto_ctx_t *mac_ctx;
1534 		kcf_req_params_t decr_params;
1535 
1536 		decr_kcf_ctx = (kcf_context_t *)(ctx->cc_framework_private);
1537 
1538 		switch (optype) {
1539 		case KCF_OP_INIT: {
1540 			decr_kcf_ctx->kc_secondctx = NULL;
1541 
1542 			err = crypto_mac_init(&mdops->md_mac_mech,
1543 			    mdops->md_mac_key, mdops->md_mac_templ,
1544 			    (crypto_context_t *)&mac_ctx, NULL);
1545 
1546 			/* It can't be CRYPTO_QUEUED */
1547 			if (err != CRYPTO_SUCCESS) {
1548 				break;
1549 			}
1550 
1551 			KCF_WRAP_DECRYPT_OPS_PARAMS(&decr_params, KCF_OP_INIT,
1552 			    pd->pd_sid, &mdops->md_decr_mech,
1553 			    mdops->md_decr_key, NULL, NULL,
1554 			    mdops->md_decr_templ);
1555 
1556 			err = kcf_submit_request(pd, ctx, NULL, &decr_params,
1557 			    B_FALSE);
1558 
1559 			/* It can't be CRYPTO_QUEUED */
1560 			if (err != CRYPTO_SUCCESS) {
1561 				crypto_cancel_ctx((crypto_context_t)mac_ctx);
1562 				break;
1563 			}
1564 
1565 			decr_kcf_ctx->kc_secondctx = (kcf_context_t *)
1566 			    mac_ctx->cc_framework_private;
1567 			KCF_CONTEXT_REFHOLD((kcf_context_t *)
1568 			    mac_ctx->cc_framework_private);
1569 
1570 			break;
1571 
1572 		}
1573 		case KCF_OP_UPDATE: {
1574 			crypto_dual_data_t *ct = mdops->md_ciphertext;
1575 			crypto_data_t *pt = mdops->md_plaintext;
1576 			kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
1577 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1578 
1579 			err = crypto_mac_update((crypto_context_t)mac_ctx,
1580 			    (crypto_data_t *)ct, NULL);
1581 
1582 			if (err != CRYPTO_SUCCESS)
1583 				break;
1584 
1585 			save_offset = ct->dd_offset1;
1586 			save_len = ct->dd_len1;
1587 
1588 			/* zero ct->dd_len2 means decrypt everything */
1589 			if (ct->dd_len2 > 0) {
1590 				ct->dd_offset1 = ct->dd_offset2;
1591 				ct->dd_len1 = ct->dd_len2;
1592 			}
1593 
1594 			err = crypto_decrypt_update((crypto_context_t)ctx,
1595 			    (crypto_data_t *)ct, pt, NULL);
1596 
1597 			ct->dd_offset1 = save_offset;
1598 			ct->dd_len1 = save_len;
1599 
1600 			break;
1601 		}
1602 		case KCF_OP_FINAL: {
1603 			crypto_data_t *pt = mdops->md_plaintext;
1604 			crypto_data_t *mac = mdops->md_mac;
1605 			kcf_context_t *mac_kcf_ctx = decr_kcf_ctx->kc_secondctx;
1606 			crypto_ctx_t *mac_ctx = &mac_kcf_ctx->kc_glbl_ctx;
1607 
1608 			err = crypto_mac_final((crypto_context_t)mac_ctx,
1609 			    mac, NULL);
1610 
1611 			if (err != CRYPTO_SUCCESS) {
1612 				crypto_cancel_ctx(ctx);
1613 				break;
1614 			}
1615 
1616 			/* Get the last chunk of plaintext */
1617 			KCF_CONTEXT_REFHOLD(decr_kcf_ctx);
1618 			err = crypto_decrypt_final((crypto_context_t)ctx, pt,
1619 			    NULL);
1620 
1621 			break;
1622 		}
1623 		}
1624 		break;
1625 	}
1626 	default:
1627 
1628 		break;
1629 	}		/* end of switch(params->rp_opgrp) */
1630 
1631 	return (err);
1632 }
1633