xref: /openbsd/sys/crypto/crypto.c (revision 133306f0)
1 /*	$OpenBSD: crypto.c,v 1.15 2000/12/13 08:34:05 provos Exp $	*/
2 
3 /*
4  * The author of this code is Angelos D. Keromytis (angelos@cis.upenn.edu)
5  *
6  * This code was written by Angelos D. Keromytis in Athens, Greece, in
7  * February 2000. Network Security Technologies Inc. (NSTI) kindly
8  * supported the development of this code.
9  *
10  * Copyright (c) 2000 Angelos D. Keromytis
11  *
12  * Permission to use, copy, and modify this software without fee
13  * is hereby granted, provided that this entire notice is included in
14  * all source code copies of any software which is or includes a copy or
15  * modification of this software.
16  *
17  * THIS SOFTWARE IS BEING PROVIDED "AS IS", WITHOUT ANY EXPRESS OR
18  * IMPLIED WARRANTY. IN PARTICULAR, NONE OF THE AUTHORS MAKES ANY
19  * REPRESENTATION OR WARRANTY OF ANY KIND CONCERNING THE
20  * MERCHANTABILITY OF THIS SOFTWARE OR ITS FITNESS FOR ANY PARTICULAR
21  * PURPOSE.
22  */
23 
24 #include <sys/param.h>
25 #include <sys/systm.h>
26 #include <sys/malloc.h>
27 #include <sys/mbuf.h>
28 #include <sys/sysctl.h>
29 #include <sys/errno.h>
30 #include <sys/md5k.h>
31 #include <dev/rndvar.h>
32 #include <crypto/sha1.h>
33 #include <crypto/rmd160.h>
34 #include <crypto/cast.h>
35 #include <crypto/skipjack.h>
36 #include <crypto/blf.h>
37 #include <crypto/crypto.h>
38 #include <crypto/xform.h>
39 
40 struct cryptocap *crypto_drivers = NULL;
41 int crypto_drivers_num = 0;
42 
43 struct cryptop *cryptop_queue = NULL;
44 struct cryptodesc *cryptodesc_queue = NULL;
45 
46 int crypto_queue_num = 0;
47 int crypto_queue_max = CRYPTO_MAX_CACHED;
48 
49 struct cryptop *crp_req_queue = NULL;
50 struct cryptop **crp_req_queue_tail = NULL;
51 
52 /*
53  * Create a new session.
54  */
55 int
56 crypto_newsession(u_int64_t *sid, struct cryptoini *cri)
57 {
58     struct cryptoini *cr;
59     u_int32_t hid, lid;
60     int err;
61 
62     if (crypto_drivers == NULL)
63       return EINVAL;
64 
65     /*
66      * The algorithm we use here is pretty stupid; just use the
67      * first driver that supports all the algorithms we need.
68      *
69      * XXX We need more smarts here (in real life too, but that's
70      * XXX another story altogether).
71      */
72 
73     for (hid = 0; hid < crypto_drivers_num; hid++)
74     {
75 	/*
76          * If it's not initialized or has remaining sessions referencing
77          * it, skip.
78          */
79 	if ((crypto_drivers[hid].cc_newsession == NULL) ||
80 	    (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP))
81 	  continue;
82 
83 	/* See if all the algorithms are supported */
84 	for (cr = cri; cr; cr = cr->cri_next)
85 	  if (crypto_drivers[hid].cc_alg[cr->cri_alg] == 0)
86 	    break;
87 
88 	/* Ok, all algorithms are supported */
89 	if (cr == NULL)
90 	  break;
91     }
92 
93     /*
94      * Can't do everything in one session.
95      *
96      * XXX Fix this. We need to inject a "virtual" session layer right
97      * XXX about here.
98      */
99 
100     if (hid == crypto_drivers_num)
101       return EINVAL;
102 
103     /* Call the driver initialization routine */
104     lid = hid; /* Pass the driver ID */
105     err = crypto_drivers[hid].cc_newsession(&lid, cri);
106     if (err == 0)
107     {
108 	(*sid) = hid;
109 	(*sid) <<= 32;
110 	(*sid) |= (lid & 0xffffffff);
111         crypto_drivers[hid].cc_sessions++;
112     }
113 
114     return err;
115 }
116 
117 /*
118  * Delete an existing session (or a reserved session on an unregistered
119  * driver).
120  */
121 int
122 crypto_freesession(u_int64_t sid)
123 {
124     u_int32_t hid;
125     int err = 0;
126 
127     if (crypto_drivers == NULL)
128       return EINVAL;
129 
130     /* Determine two IDs */
131     hid = (sid >> 32) & 0xffffffff;
132 
133     if (hid >= crypto_drivers_num)
134       return ENOENT;
135 
136     if (crypto_drivers[hid].cc_sessions)
137       crypto_drivers[hid].cc_sessions--;
138 
139     /* Call the driver cleanup routine, if available */
140     if (crypto_drivers[hid].cc_freesession)
141       err = crypto_drivers[hid].cc_freesession(sid);
142 
143     /*
144      * If this was the last session of a driver marked as invalid, make
145      * the entry available for reuse.
146      */
147     if ((crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP) &&
148 	(crypto_drivers[hid].cc_sessions == 0))
149       bzero(&crypto_drivers[hid], sizeof(struct cryptocap));
150 
151     return err;
152 }
153 
154 /*
155  * Find an empty slot.
156  */
157 int32_t
158 crypto_get_driverid(void)
159 {
160     struct cryptocap *newdrv;
161     int i;
162 
163     if (crypto_drivers_num == 0)
164     {
165 	crypto_drivers_num = CRYPTO_DRIVERS_INITIAL;
166 	crypto_drivers = malloc(crypto_drivers_num * sizeof(struct cryptocap),
167 				M_XDATA, M_NOWAIT);
168 	if (crypto_drivers == NULL)
169 	{
170 	    crypto_drivers_num = 0;
171 	    return -1;
172 	}
173 
174 	bzero(crypto_drivers, crypto_drivers_num * sizeof(struct cryptocap));
175     }
176 
177     for (i = 0; i < crypto_drivers_num; i++)
178       if ((crypto_drivers[i].cc_process == NULL) &&
179 	  !(crypto_drivers[i].cc_flags & CRYPTOCAP_F_CLEANUP) &&
180 	  (crypto_drivers[i].cc_sessions == 0))
181 	return i;
182 
183     /* Out of entries, allocate some more */
184     if (i == crypto_drivers_num)
185     {
186 	/* Be careful about wrap-around */
187 	if (2 * crypto_drivers_num <= crypto_drivers_num)
188 	  return -1;
189 
190 	newdrv = malloc(2 * crypto_drivers_num * sizeof(struct cryptocap),
191 			M_XDATA, M_NOWAIT);
192 	if (newdrv == NULL)
193 	  return -1;
194 
195         bcopy(crypto_drivers, newdrv,
196 	      crypto_drivers_num * sizeof(struct cryptocap));
197 	bzero(&newdrv[crypto_drivers_num],
198 	      crypto_drivers_num * sizeof(struct cryptocap));
199 	crypto_drivers_num *= 2;
200 	return i;
201     }
202 
203     /* Shouldn't really get here... */
204     return -1;
205 }
206 
207 /*
208  * Register a crypto driver. It should be called once for each algorithm
209  * supported by the driver.
210  */
211 int
212 crypto_register(u_int32_t driverid, int alg,
213     int (*newses)(u_int32_t *, struct cryptoini *),
214     int (*freeses)(u_int64_t), int (*process)(struct cryptop *))
215 {
216     if ((driverid >= crypto_drivers_num) || (alg <= 0) ||
217 	(alg > CRYPTO_ALGORITHM_MAX) || (crypto_drivers == NULL))
218       return EINVAL;
219 
220     /*
221      * XXX Do some performance testing to determine placing.
222      * XXX We probably need an auxiliary data structure that describes
223      * XXX relative performances.
224      */
225 
226     crypto_drivers[driverid].cc_alg[alg] = 1;
227 
228     if (crypto_drivers[driverid].cc_process == NULL)
229     {
230 	crypto_drivers[driverid].cc_newsession = newses;
231 	crypto_drivers[driverid].cc_process = process;
232 	crypto_drivers[driverid].cc_freesession = freeses;
233     }
234 
235     return 0;
236 }
237 
238 /*
239  * Unregister a crypto driver. If there are pending sessions using it,
240  * leave enough information around so that subsequent calls using those
241  * sessions will correctly detect the driver being unregistered and reroute
242  * the request.
243  */
244 int
245 crypto_unregister(u_int32_t driverid, int alg)
246 {
247     u_int32_t ses;
248     int i;
249 
250     /* Sanity checks */
251     if ((driverid >= crypto_drivers_num) || (alg <= 0) ||
252         (alg > CRYPTO_ALGORITHM_MAX) || (crypto_drivers == NULL) ||
253 	(crypto_drivers[driverid].cc_alg[alg] == 0))
254       return EINVAL;
255 
256     crypto_drivers[driverid].cc_alg[alg] = 0;
257 
258     /* Was this the last algorithm ? */
259     for (i = 1; i <= CRYPTO_ALGORITHM_MAX; i++)
260       if (crypto_drivers[driverid].cc_alg[i] != 0)
261 	break;
262 
263     if (i == CRYPTO_ALGORITHM_MAX + 1)
264     {
265 	ses = crypto_drivers[driverid].cc_sessions;
266         bzero(&crypto_drivers[driverid], sizeof(struct cryptocap));
267 
268         if (ses != 0)
269 	{
270             /* If there are pending sessions, just mark as invalid */
271             crypto_drivers[driverid].cc_flags |= CRYPTOCAP_F_CLEANUP;
272             crypto_drivers[driverid].cc_sessions = ses;
273 	}
274     }
275 
276     return 0;
277 }
278 
279 /*
280  * Add crypto request to a queue, to be processed by a kernel thread.
281  */
282 int
283 crypto_dispatch(struct cryptop *crp)
284 {
285     int s = splhigh();
286 
287     if (crp_req_queue == NULL) {
288 	crp_req_queue = crp;
289 	crp_req_queue_tail = &(crp->crp_next);
290 	wakeup((caddr_t) &crp_req_queue);
291     } else {
292 	*crp_req_queue_tail = crp;
293 	crp_req_queue_tail = &(crp->crp_next);
294     }
295     splx(s);
296     return 0;
297 }
298 
299 /*
300  * Dispatch a crypto request to the appropriate crypto devices.
301  */
302 int
303 crypto_invoke(struct cryptop *crp)
304 {
305     struct cryptodesc *crd;
306     u_int64_t nid;
307     u_int32_t hid;
308 
309     /* Sanity checks */
310     if ((crp == NULL) || (crp->crp_callback == NULL))
311       return EINVAL;
312 
313     if ((crp->crp_desc == NULL) || (crypto_drivers == NULL))
314     {
315 	crp->crp_etype = EINVAL;
316 	crypto_done(crp);
317 	return 0;
318     }
319 
320     hid = (crp->crp_sid >> 32) & 0xffffffff;
321 
322     if (hid >= crypto_drivers_num)
323     {
324 	/* Migrate session */
325 	for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
326 	  crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
327 
328 	if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI)) == 0)
329 	  crp->crp_sid = nid;
330 
331 	crp->crp_etype = EAGAIN;
332 	crypto_done(crp);
333 	return 0;
334     }
335 
336     if (crypto_drivers[hid].cc_flags & CRYPTOCAP_F_CLEANUP)
337       crypto_freesession(crp->crp_sid);
338 
339     if (crypto_drivers[hid].cc_process == NULL)
340     {
341 	/* Migrate session */
342 	for (crd = crp->crp_desc; crd->crd_next; crd = crd->crd_next)
343 	  crd->CRD_INI.cri_next = &(crd->crd_next->CRD_INI);
344 
345 	if (crypto_newsession(&nid, &(crp->crp_desc->CRD_INI)) == 0)
346 	  crp->crp_sid = nid;
347 
348 	crp->crp_etype = EAGAIN;
349 	crypto_done(crp);
350 	return 0;
351     }
352 
353     crypto_drivers[hid].cc_process(crp);
354     return 0;
355 }
356 
357 /*
358  * Release a set of crypto descriptors.
359  */
360 void
361 crypto_freereq(struct cryptop *crp)
362 {
363     struct cryptodesc *crd;
364     int s;
365 
366     if (crp == NULL)
367       return;
368 
369     s = splhigh();
370 
371     while ((crd = crp->crp_desc) != NULL)
372     {
373 	crp->crp_desc = crd->crd_next;
374 
375 	if (crypto_queue_num + 1 > crypto_queue_max)
376 	  FREE(crd, M_XDATA);
377 	else
378 	{
379 	    crd->crd_next = cryptodesc_queue;
380 	    cryptodesc_queue = crd;
381 	    crypto_queue_num++;
382 	}
383     }
384 
385     if (crypto_queue_num + 1 > crypto_queue_max)
386       FREE(crp, M_XDATA);
387     else
388     {
389         crp->crp_next = cryptop_queue;
390         cryptop_queue = crp;
391         crypto_queue_num++;
392     }
393 
394     splx(s);
395 }
396 
397 /*
398  * Acquire a set of crypto descriptors.
399  */
400 struct cryptop *
401 crypto_getreq(int num)
402 {
403     struct cryptodesc *crd;
404     struct cryptop *crp;
405     int s = splhigh();
406 
407     if (cryptop_queue == NULL)
408     {
409         MALLOC(crp, struct cryptop *, sizeof(struct cryptop), M_XDATA,
410 	       M_NOWAIT);
411         if (crp == NULL)
412         {
413             splx(s);
414             return NULL;
415         }
416     }
417     else
418     {
419 	crp = cryptop_queue;
420 	cryptop_queue = crp->crp_next;
421         crypto_queue_num--;
422     }
423 
424     bzero(crp, sizeof(struct cryptop));
425 
426     while (num--)
427     {
428         if (cryptodesc_queue == NULL)
429 	{
430 	    MALLOC(crd, struct cryptodesc *, sizeof(struct cryptodesc),
431 		   M_XDATA, M_NOWAIT);
432 	    if (crd == NULL)
433 	    {
434                 splx(s);
435 		crypto_freereq(crp);
436 	        return NULL;
437 	    }
438 	}
439 	else
440 	{
441 	    crd = cryptodesc_queue;
442 	    cryptodesc_queue = crd->crd_next;
443 	    crypto_queue_num--;
444 	}
445 
446 	bzero(crd, sizeof(struct cryptodesc));
447 	crd->crd_next = crp->crp_desc;
448 	crp->crp_desc = crd;
449     }
450 
451     splx(s);
452     return crp;
453 }
454 
455 /*
456  * Crypto thread, runs as a kernel thread to process crypto requests.
457  */
458 void
459 crypto_thread(void)
460 {
461     struct cryptop *crp;
462     int s;
463 
464     s = splhigh();
465 
466     for (;;)
467     {
468 	crp = crp_req_queue;
469 	if (crp == NULL) /* No work to do */
470 	{
471 	    (void) tsleep(&crp_req_queue, PLOCK, "crypto_wait", 0);
472 	    continue;
473 	}
474 
475 	/* Remove from the queue */
476 	crp_req_queue = crp->crp_next;
477 	splx(s);
478 
479 	crypto_invoke(crp);
480 
481 	s = splhigh();
482     }
483 }
484 
485 /*
486  * Invoke the callback on behalf of the driver.
487  */
488 void
489 crypto_done(struct cryptop *crp)
490 {
491     crp->crp_callback(crp);
492 }
493