1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #ifndef _SYS_CRYPTO_SCHED_IMPL_H
27 #define	_SYS_CRYPTO_SCHED_IMPL_H
28 
29 /*
30  * Scheduler internal structures.
31  */
32 
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36 
37 #include <sys/zfs_context.h>
38 #include <sys/crypto/api.h>
39 #include <sys/crypto/spi.h>
40 #include <sys/crypto/impl.h>
41 #include <sys/crypto/common.h>
42 #include <sys/crypto/ops_impl.h>
43 
44 typedef void (kcf_func_t)(void *, int);
45 
46 typedef enum kcf_req_status {
47 	REQ_ALLOCATED = 1,
48 	REQ_WAITING,		/* At the framework level */
49 	REQ_INPROGRESS,		/* At the provider level */
50 	REQ_DONE,
51 	REQ_CANCELED
52 } kcf_req_status_t;
53 
54 typedef enum kcf_call_type {
55 	CRYPTO_SYNCH = 1,
56 	CRYPTO_ASYNCH
57 } kcf_call_type_t;
58 
59 #define	CHECK_RESTRICT(crq) (crq != NULL &&	\
60 	((crq)->cr_flag & CRYPTO_RESTRICTED))
61 
62 #define	CHECK_RESTRICT_FALSE	B_FALSE
63 
64 #define	CHECK_FASTPATH(crq, pd) ((crq) == NULL ||	\
65 	!((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE)) &&	\
66 	(pd)->pd_prov_type == CRYPTO_SW_PROVIDER
67 
68 #define	KCF_KMFLAG(crq)	(((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
69 
70 /*
71  * The framework keeps an internal handle to use in the adaptive
72  * asynchronous case. This is the case when a client has the
73  * CRYPTO_ALWAYS_QUEUE bit clear and a software provider is used for
74  * the request. The request is completed in the context of the calling
75  * thread and kernel memory must be allocated with KM_NOSLEEP.
76  *
77  * The framework passes a pointer to the handle in crypto_req_handle_t
78  * argument when it calls the SPI of the software provider. The macros
79  * KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this.
80  *
81  * When a provider asks the framework for kmflag value via
82  * crypto_kmflag(9S) we use REQHNDL2_KMFLAG() macro.
83  */
84 extern ulong_t kcf_swprov_hndl;
85 #define	KCF_RHNDL(kmflag) (((kmflag) == KM_SLEEP) ? NULL : &kcf_swprov_hndl)
86 #define	KCF_SWFP_RHNDL(crq) (((crq) == NULL) ? NULL : &kcf_swprov_hndl)
87 #define	REQHNDL2_KMFLAG(rhndl) \
88 	((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP)
89 
90 /* Internal call_req flags. They start after the public ones in api.h */
91 
92 #define	CRYPTO_SETDUAL	0x00001000	/* Set the 'cont' boolean before */
93 					/* submitting the request */
94 #define	KCF_ISDUALREQ(crq)	\
95 	(((crq) == NULL) ? B_FALSE : (crq->cr_flag & CRYPTO_SETDUAL))
96 
97 typedef struct kcf_prov_tried {
98 	kcf_provider_desc_t	*pt_pd;
99 	struct kcf_prov_tried	*pt_next;
100 } kcf_prov_tried_t;
101 
102 #define	IS_FG_SUPPORTED(mdesc, fg)		\
103 	(((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
104 
105 #define	IS_PROVIDER_TRIED(pd, tlist)		\
106 	(tlist != NULL && is_in_triedlist(pd, tlist))
107 
108 #define	IS_RECOVERABLE(error)			\
109 	(error == CRYPTO_BUFFER_TOO_BIG ||	\
110 	error == CRYPTO_BUSY ||			\
111 	error == CRYPTO_DEVICE_ERROR ||		\
112 	error == CRYPTO_DEVICE_MEMORY ||	\
113 	error == CRYPTO_KEY_SIZE_RANGE ||	\
114 	error == CRYPTO_NO_PERMISSION)
115 
116 #define	KCF_ATOMIC_INCR(x)	atomic_add_32(&(x), 1)
117 #define	KCF_ATOMIC_DECR(x)	atomic_add_32(&(x), -1)
118 
119 /*
120  * Node structure for synchronous requests.
121  */
122 typedef struct kcf_sreq_node {
123 	/* Should always be the first field in this structure */
124 	kcf_call_type_t		sn_type;
125 	/*
126 	 * sn_cv and sr_lock are used to wait for the
127 	 * operation to complete. sn_lock also protects
128 	 * the sn_state field.
129 	 */
130 	kcondvar_t		sn_cv;
131 	kmutex_t		sn_lock;
132 	kcf_req_status_t	sn_state;
133 
134 	/*
135 	 * Return value from the operation. This will be
136 	 * one of the CRYPTO_* errors defined in common.h.
137 	 */
138 	int			sn_rv;
139 
140 	/*
141 	 * parameters to call the SPI with. This can be
142 	 * a pointer as we know the caller context/stack stays.
143 	 */
144 	struct kcf_req_params	*sn_params;
145 
146 	/* Internal context for this request */
147 	struct kcf_context	*sn_context;
148 
149 	/* Provider handling this request */
150 	kcf_provider_desc_t	*sn_provider;
151 } kcf_sreq_node_t;
152 
153 /*
154  * Node structure for asynchronous requests. A node can be on
155  * on a chain of requests hanging of the internal context
156  * structure and can be in the global software provider queue.
157  */
158 typedef struct kcf_areq_node {
159 	/* Should always be the first field in this structure */
160 	kcf_call_type_t		an_type;
161 
162 	/* an_lock protects the field an_state  */
163 	kmutex_t		an_lock;
164 	kcf_req_status_t	an_state;
165 	crypto_call_req_t	an_reqarg;
166 
167 	/*
168 	 * parameters to call the SPI with. We need to
169 	 * save the params since the caller stack can go away.
170 	 */
171 	struct kcf_req_params	an_params;
172 
173 	/*
174 	 * The next two fields should be NULL for operations that
175 	 * don't need a context.
176 	 */
177 	/* Internal context for this request */
178 	struct kcf_context	*an_context;
179 
180 	/* next in chain of requests for context */
181 	struct kcf_areq_node	*an_ctxchain_next;
182 
183 	kcondvar_t		an_turn_cv;
184 	boolean_t		an_is_my_turn;
185 	boolean_t		an_isdual;	/* for internal reuse */
186 
187 	/*
188 	 * Next and previous nodes in the global software
189 	 * queue. These fields are NULL for a hardware
190 	 * provider since we use a taskq there.
191 	 */
192 	struct kcf_areq_node	*an_next;
193 	struct kcf_areq_node	*an_prev;
194 
195 	/* Provider handling this request */
196 	kcf_provider_desc_t	*an_provider;
197 	kcf_prov_tried_t	*an_tried_plist;
198 
199 	struct kcf_areq_node	*an_idnext;	/* Next in ID hash */
200 	struct kcf_areq_node	*an_idprev;	/* Prev in ID hash */
201 	kcondvar_t		an_done;	/* Signal request completion */
202 	uint_t			an_refcnt;
203 } kcf_areq_node_t;
204 
205 #define	KCF_AREQ_REFHOLD(areq) {		\
206 	atomic_add_32(&(areq)->an_refcnt, 1);	\
207 	ASSERT((areq)->an_refcnt != 0);		\
208 }
209 
210 #define	KCF_AREQ_REFRELE(areq) {				\
211 	ASSERT((areq)->an_refcnt != 0);				\
212 	membar_exit();						\
213 	if (atomic_add_32_nv(&(areq)->an_refcnt, -1) == 0)	\
214 		kcf_free_req(areq);				\
215 }
216 
217 #define	GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
218 
219 #define	NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
220 	(areq)->an_reqarg.cr_callback_arg, err);
221 
222 /* For internally generated call requests for dual operations */
223 typedef	struct kcf_call_req {
224 	crypto_call_req_t	kr_callreq;	/* external client call req */
225 	kcf_req_params_t	kr_params;	/* Params saved for next call */
226 	kcf_areq_node_t		*kr_areq;	/* Use this areq */
227 	off_t			kr_saveoffset;
228 	size_t			kr_savelen;
229 } kcf_dual_req_t;
230 
231 /*
232  * The following are some what similar to macros in callo.h, which implement
233  * callout tables.
234  *
235  * The lower four bits of the ID are used to encode the table ID to
236  * index in to. The REQID_COUNTER_HIGH bit is used to avoid any check for
237  * wrap around when generating ID. We assume that there won't be a request
238  * which takes more time than 2^^(sizeof (long) - 5) other requests submitted
239  * after it. This ensures there won't be any ID collision.
240  */
241 #define	REQID_COUNTER_HIGH	(1UL << (8 * sizeof (long) - 1))
242 #define	REQID_COUNTER_SHIFT	4
243 #define	REQID_COUNTER_LOW	(1 << REQID_COUNTER_SHIFT)
244 #define	REQID_TABLES		16
245 #define	REQID_TABLE_MASK	(REQID_TABLES - 1)
246 
247 #define	REQID_BUCKETS		512
248 #define	REQID_BUCKET_MASK	(REQID_BUCKETS - 1)
249 #define	REQID_HASH(id)	(((id) >> REQID_COUNTER_SHIFT) & REQID_BUCKET_MASK)
250 
251 #define	GET_REQID(areq) (areq)->an_reqarg.cr_reqid
252 #define	SET_REQID(areq, val)	GET_REQID(areq) = val
253 
254 /*
255  * Hash table for async requests.
256  */
257 typedef struct kcf_reqid_table {
258 	kmutex_t		rt_lock;
259 	crypto_req_id_t		rt_curid;
260 	kcf_areq_node_t		*rt_idhash[REQID_BUCKETS];
261 } kcf_reqid_table_t;
262 
263 /*
264  * Global software provider queue structure. Requests to be
265  * handled by a SW provider and have the ALWAYS_QUEUE flag set
266  * get queued here.
267  */
268 typedef struct kcf_global_swq {
269 	/*
270 	 * gs_cv and gs_lock are used to wait for new requests.
271 	 * gs_lock protects the changes to the queue.
272 	 */
273 	kcondvar_t		gs_cv;
274 	kmutex_t		gs_lock;
275 	uint_t			gs_njobs;
276 	uint_t			gs_maxjobs;
277 	kcf_areq_node_t		*gs_first;
278 	kcf_areq_node_t		*gs_last;
279 } kcf_global_swq_t;
280 
281 
282 /*
283  * Internal representation of a canonical context. We contain crypto_ctx_t
284  * structure in order to have just one memory allocation. The SPI
285  * ((crypto_ctx_t *)ctx)->cc_framework_private maps to this structure.
286  */
287 typedef struct kcf_context {
288 	crypto_ctx_t		kc_glbl_ctx;
289 	uint_t			kc_refcnt;
290 	kmutex_t		kc_in_use_lock;
291 	/*
292 	 * kc_req_chain_first and kc_req_chain_last are used to chain
293 	 * multiple async requests using the same context. They should be
294 	 * NULL for sync requests.
295 	 */
296 	kcf_areq_node_t		*kc_req_chain_first;
297 	kcf_areq_node_t		*kc_req_chain_last;
298 	kcf_provider_desc_t	*kc_prov_desc;	/* Prov. descriptor */
299 	kcf_provider_desc_t	*kc_sw_prov_desc;	/* Prov. descriptor */
300 	kcf_mech_entry_t	*kc_mech;
301 	struct kcf_context	*kc_secondctx;	/* for dual contexts */
302 } kcf_context_t;
303 
304 /*
305  * Bump up the reference count on the framework private context. A
306  * global context or a request that references this structure should
307  * do a hold.
308  */
309 #define	KCF_CONTEXT_REFHOLD(ictx) {		\
310 	atomic_add_32(&(ictx)->kc_refcnt, 1);	\
311 	ASSERT((ictx)->kc_refcnt != 0);		\
312 }
313 
314 /*
315  * Decrement the reference count on the framework private context.
316  * When the last reference is released, the framework private
317  * context structure is freed along with the global context.
318  */
319 #define	KCF_CONTEXT_REFRELE(ictx) {				\
320 	ASSERT((ictx)->kc_refcnt != 0);				\
321 	membar_exit();						\
322 	if (atomic_add_32_nv(&(ictx)->kc_refcnt, -1) == 0)	\
323 		kcf_free_context(ictx);				\
324 }
325 
326 /*
327  * Check if we can release the context now. In case of CRYPTO_QUEUED
328  * we do not release it as we can do it only after the provider notified
329  * us. In case of CRYPTO_BUSY, the client can retry the request using
330  * the context, so we do not release the context.
331  *
332  * This macro should be called only from the final routine in
333  * an init/update/final sequence. We do not release the context in case
334  * of update operations. We require the consumer to free it
335  * explicitly, in case it wants to abandon the operation. This is done
336  * as there may be mechanisms in ECB mode that can continue even if
337  * an operation on a block fails.
338  */
339 #define	KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) {			\
340 	if (KCF_CONTEXT_DONE(rv))				\
341 		KCF_CONTEXT_REFRELE(kcf_ctx);			\
342 }
343 
344 /*
345  * This macro determines whether we're done with a context.
346  */
347 #define	KCF_CONTEXT_DONE(rv)					\
348 	((rv) != CRYPTO_QUEUED && (rv) != CRYPTO_BUSY &&	\
349 	    (rv) != CRYPTO_BUFFER_TOO_SMALL)
350 
351 /*
352  * A crypto_ctx_template_t is internally a pointer to this struct
353  */
354 typedef	struct kcf_ctx_template {
355 	crypto_kcf_provider_handle_t	ct_prov_handle;	/* provider handle */
356 	uint_t				ct_generation;	/* generation # */
357 	size_t				ct_size;	/* for freeing */
358 	crypto_spi_ctx_template_t	ct_prov_tmpl;	/* context template */
359 							/* from the SW prov */
360 } kcf_ctx_template_t;
361 
362 /*
363  * Structure for pool of threads working on global software queue.
364  */
365 typedef struct kcf_pool {
366 	uint32_t	kp_threads;		/* Number of threads in pool */
367 	uint32_t	kp_idlethreads;		/* Idle threads in pool */
368 	uint32_t	kp_blockedthreads;	/* Blocked threads in pool */
369 
370 	/*
371 	 * cv & lock to monitor the condition when no threads
372 	 * are around. In this case the failover thread kicks in.
373 	 */
374 	kcondvar_t	kp_nothr_cv;
375 	kmutex_t	kp_thread_lock;
376 
377 	/* Userspace thread creator variables. */
378 	boolean_t	kp_signal_create_thread; /* Create requested flag  */
379 	int		kp_nthrs;		/* # of threads to create */
380 	boolean_t	kp_user_waiting;	/* Thread waiting for work */
381 
382 	/*
383 	 * cv & lock for the condition where more threads need to be
384 	 * created. kp_user_lock also protects the three fields above.
385 	 */
386 	kcondvar_t	kp_user_cv;		/* Creator cond. variable */
387 	kmutex_t	kp_user_lock;		/* Creator lock */
388 } kcf_pool_t;
389 
390 
391 /*
392  * State of a crypto bufcall element.
393  */
394 typedef enum cbuf_state {
395 	CBUF_FREE = 1,
396 	CBUF_WAITING,
397 	CBUF_RUNNING
398 } cbuf_state_t;
399 
400 /*
401  * Structure of a crypto bufcall element.
402  */
403 typedef struct kcf_cbuf_elem {
404 	/*
405 	 * lock and cv to wait for CBUF_RUNNING to be done
406 	 * kc_lock also protects kc_state.
407 	 */
408 	kmutex_t		kc_lock;
409 	kcondvar_t		kc_cv;
410 	cbuf_state_t		kc_state;
411 
412 	struct kcf_cbuf_elem	*kc_next;
413 	struct kcf_cbuf_elem	*kc_prev;
414 
415 	void			(*kc_func)(void *arg);
416 	void			*kc_arg;
417 } kcf_cbuf_elem_t;
418 
419 /*
420  * State of a notify element.
421  */
422 typedef enum ntfy_elem_state {
423 	NTFY_WAITING = 1,
424 	NTFY_RUNNING
425 } ntfy_elem_state_t;
426 
427 /*
428  * Structure of a notify list element.
429  */
430 typedef struct kcf_ntfy_elem {
431 	/*
432 	 * lock and cv to wait for NTFY_RUNNING to be done.
433 	 * kn_lock also protects kn_state.
434 	 */
435 	kmutex_t			kn_lock;
436 	kcondvar_t			kn_cv;
437 	ntfy_elem_state_t		kn_state;
438 
439 	struct kcf_ntfy_elem		*kn_next;
440 	struct kcf_ntfy_elem		*kn_prev;
441 
442 	crypto_notify_callback_t	kn_func;
443 	uint32_t			kn_event_mask;
444 } kcf_ntfy_elem_t;
445 
446 
447 /*
448  * The following values are based on the assumption that it would
449  * take around eight cpus to load a hardware provider (This is true for
450  * at least one product) and a kernel client may come from different
451  * low-priority interrupt levels. We will have CRYPTO_TASKQ_MIN number
452  * of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on
453  * a throughput of 1GB/s using 512-byte buffers. These are just
454  * reasonable estimates and might need to change in future.
455  */
456 #define	CRYPTO_TASKQ_THREADS	8
457 #define	CRYPTO_TASKQ_MIN	64
458 #define	CRYPTO_TASKQ_MAX	2 * 1024 * 1024
459 
460 extern int crypto_taskq_threads;
461 extern int crypto_taskq_minalloc;
462 extern int crypto_taskq_maxalloc;
463 extern kcf_global_swq_t *gswq;
464 extern int kcf_maxthreads;
465 extern int kcf_minthreads;
466 
467 /*
468  * All pending crypto bufcalls are put on a list. cbuf_list_lock
469  * protects changes to this list.
470  */
471 extern kmutex_t cbuf_list_lock;
472 extern kcondvar_t cbuf_list_cv;
473 
474 /*
475  * All event subscribers are put on a list. kcf_notify_list_lock
476  * protects changes to this list.
477  */
478 extern kmutex_t ntfy_list_lock;
479 extern kcondvar_t ntfy_list_cv;
480 
481 boolean_t kcf_get_next_logical_provider_member(kcf_provider_desc_t *,
482     kcf_provider_desc_t *, kcf_provider_desc_t **);
483 extern int kcf_get_hardware_provider(crypto_mech_type_t, crypto_mech_type_t,
484     boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **,
485     crypto_func_group_t);
486 extern int kcf_get_hardware_provider_nomech(offset_t, offset_t,
487     boolean_t, kcf_provider_desc_t *, kcf_provider_desc_t **);
488 extern void kcf_free_triedlist(kcf_prov_tried_t *);
489 extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
490     kcf_provider_desc_t *, int);
491 extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
492     kcf_mech_entry_t **, int *, kcf_prov_tried_t *, crypto_func_group_t,
493     boolean_t, size_t);
494 extern kcf_provider_desc_t *kcf_get_dual_provider(crypto_mechanism_t *,
495     crypto_mechanism_t *, kcf_mech_entry_t **, crypto_mech_type_t *,
496     crypto_mech_type_t *, int *, kcf_prov_tried_t *,
497     crypto_func_group_t, crypto_func_group_t, boolean_t, size_t);
498 extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t  *, kcf_provider_desc_t *,
499     crypto_session_id_t);
500 extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,
501     crypto_call_req_t *, kcf_req_params_t *, boolean_t);
502 extern void kcf_sched_destroy(void);
503 extern void kcf_sched_init(void);
504 extern void kcf_sched_start(void);
505 extern void kcf_sop_done(kcf_sreq_node_t *, int);
506 extern void kcf_aop_done(kcf_areq_node_t *, int);
507 extern int common_submit_request(kcf_provider_desc_t *,
508     crypto_ctx_t *, kcf_req_params_t *, crypto_req_handle_t);
509 extern void kcf_free_context(kcf_context_t *);
510 
511 extern int kcf_svc_wait(int *);
512 extern int kcf_svc_do_run(void);
513 extern int kcf_need_signature_verification(kcf_provider_desc_t *);
514 extern void kcf_verify_signature(void *);
515 extern struct modctl *kcf_get_modctl(crypto_provider_info_t *);
516 extern void verify_unverified_providers(void);
517 extern void kcf_free_req(kcf_areq_node_t *areq);
518 extern void crypto_bufcall_service(void);
519 
520 extern void kcf_walk_ntfylist(uint32_t, void *);
521 extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t);
522 
523 extern kcf_dual_req_t *kcf_alloc_req(crypto_call_req_t *);
524 extern void kcf_next_req(void *, int);
525 extern void kcf_last_req(void *, int);
526 
527 #ifdef __cplusplus
528 }
529 #endif
530 
531 #endif /* _SYS_CRYPTO_SCHED_IMPL_H */
532