xref: /dragonfly/sys/dev/crypto/aesni/aesni.c (revision 10f4bf95)
1 /*-
2  * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/crypto/aesni/aesni.c,v 1.3 2010/09/23 11:57:25 pjd Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/kobj.h>
34 #include <sys/libkern.h>
35 #include <sys/lock.h>
36 #include <sys/module.h>
37 #include <sys/malloc.h>
38 #include <sys/spinlock.h>
39 #include <sys/spinlock2.h>
40 #include <sys/bus.h>
41 #include <sys/uio.h>
42 #include <dev/crypto/aesni/aesni.h>
43 #include "cryptodev_if.h"
44 
45 struct aesni_softc {
46 	int32_t cid;
47 	uint32_t sid;
48 	TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions;
49 	struct spinlock lock;
50 };
51 
52 static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri);
53 static int aesni_freesession(device_t, uint64_t tid);
54 static void aesni_freesession_locked(struct aesni_softc *sc,
55     struct aesni_session *ses);
56 
57 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
58 
59 static void
60 aesni_identify(driver_t *drv, device_t parent)
61 {
62 
63 	/* NB: order 10 is so we get attached after h/w devices */
64 	if (device_find_child(parent, "aesni", -1) == NULL &&
65 	    BUS_ADD_CHILD(parent, parent, 10, "aesni", -1) == 0)
66 		panic("aesni: could not attach");
67 }
68 
69 static int
70 aesni_probe(device_t dev)
71 {
72 
73 	if ((cpu_feature2 & CPUID2_AESNI) == 0) {
74 		device_printf(dev, "No AESNI support.\n");
75 		return (EINVAL);
76 	}
77 	device_set_desc_copy(dev, "AES-CBC,AES-XTS");
78 	return (0);
79 }
80 
81 static int
82 aesni_attach(device_t dev)
83 {
84 	struct aesni_softc *sc;
85 
86 	sc = device_get_softc(dev);
87 	TAILQ_INIT(&sc->sessions);
88 	sc->sid = 1;
89 	sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
90 	if (sc->cid < 0) {
91 		device_printf(dev, "Could not get crypto driver id.\n");
92 		return (ENOMEM);
93 	}
94 
95 	spin_init(&sc->lock);
96 	crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
97 	crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
98 	return (0);
99 }
100 
101 static int
102 aesni_detach(device_t dev)
103 {
104 	struct aesni_softc *sc;
105 	struct aesni_session *ses;
106 
107 	sc = device_get_softc(dev);
108 	spin_lock(&sc->lock);
109 	TAILQ_FOREACH(ses, &sc->sessions, next) {
110 		if (ses->used) {
111 			spin_unlock(&sc->lock);
112 			device_printf(dev,
113 			    "Cannot detach, sessions still active.\n");
114 			return (EBUSY);
115 		}
116 	}
117 	while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
118 		TAILQ_REMOVE(&sc->sessions, ses, next);
119 		kfree(ses, M_AESNI);
120 	}
121 	spin_unlock(&sc->lock);
122 	spin_uninit(&sc->lock);
123 	crypto_unregister_all(sc->cid);
124 	return (0);
125 }
126 
127 static int
128 aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
129 {
130 	struct aesni_softc *sc;
131 	struct aesni_session *ses;
132 	struct cryptoini *encini;
133 	int error;
134 
135 	if (sidp == NULL || cri == NULL)
136 		return (EINVAL);
137 
138 	sc = device_get_softc(dev);
139 	ses = NULL;
140 	encini = NULL;
141 	for (; cri != NULL; cri = cri->cri_next) {
142 		switch (cri->cri_alg) {
143 		case CRYPTO_AES_CBC:
144 		case CRYPTO_AES_XTS:
145 			if (encini != NULL)
146 				return (EINVAL);
147 			encini = cri;
148 			break;
149 		default:
150 			return (EINVAL);
151 		}
152 	}
153 	if (encini == NULL)
154 		return (EINVAL);
155 
156 	spin_lock(&sc->lock);
157 	/*
158 	 * Free sessions goes first, so if first session is used, we need to
159 	 * allocate one.
160 	 */
161 	ses = TAILQ_FIRST(&sc->sessions);
162 	if (ses == NULL || ses->used) {
163 		size_t size;
164 
165 		/*
166 		 * Release the spinlock here, since the following
167 		 * kmalloc(M_WAITOK) may block.  kmalloc(M_NOWAIT)
168 		 * is not acceptable on this code path.
169 		 */
170 		spin_unlock(&sc->lock);
171 
172 		/*
173 		 * NOTE:
174 		 * If the allocation size is 2^n, then the memory returned
175 		 * by kmalloc(9) will be 2^n aligned.
176 		 */
177 		for (size = AESNI_ALIGN; size < sizeof(*ses); size <<= 1)
178 			;
179 		ses = kmalloc(size, M_AESNI, M_WAITOK | M_ZERO);
180 		if ((uintptr_t)ses & (AESNI_ALIGN - 1)) {
181 			panic("aesni: ses %p is not %d aligned\n",
182 			    ses, AESNI_ALIGN);
183 		}
184 
185 		spin_lock(&sc->lock);
186 		ses->id = sc->sid++;
187 	} else {
188 		TAILQ_REMOVE(&sc->sessions, ses, next);
189 	}
190 	ses->used = 1;
191 	TAILQ_INSERT_TAIL(&sc->sessions, ses, next);
192 	spin_unlock(&sc->lock);
193 	ses->algo = encini->cri_alg;
194 
195 	error = aesni_cipher_setup(ses, encini);
196 	if (error != 0) {
197 		spin_lock(&sc->lock);
198 		aesni_freesession_locked(sc, ses);
199 		spin_unlock(&sc->lock);
200 		return (error);
201 	}
202 
203 	*sidp = ses->id;
204 	return (0);
205 }
206 
207 static void
208 aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
209 {
210 	uint32_t sid;
211 
212 	sid = ses->id;
213 	TAILQ_REMOVE(&sc->sessions, ses, next);
214 
215 	bzero(ses, sizeof(*ses));
216 	ses->id = sid;
217 	TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
218 }
219 
220 static int
221 aesni_freesession(device_t dev, uint64_t tid)
222 {
223 	struct aesni_softc *sc;
224 	struct aesni_session *ses;
225 	uint32_t sid;
226 
227 	sc = device_get_softc(dev);
228 	sid = ((uint32_t)tid) & 0xffffffff;
229 	spin_lock(&sc->lock);
230 	TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
231 		if (ses->id == sid)
232 			break;
233 	}
234 	if (ses == NULL) {
235 		spin_unlock(&sc->lock);
236 		return (EINVAL);
237 	}
238 	aesni_freesession_locked(sc, ses);
239 	spin_unlock(&sc->lock);
240 	return (0);
241 }
242 
243 static int
244 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
245 {
246 	struct aesni_softc *sc = device_get_softc(dev);
247 	struct aesni_session *ses = NULL;
248 	struct cryptodesc *crd, *enccrd;
249 	int error;
250 
251 	error = 0;
252 	enccrd = NULL;
253 
254 	/* Sanity check. */
255 	if (crp == NULL)
256 		return (EINVAL);
257 
258 	if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
259 		error = EINVAL;
260 		goto out;
261 	}
262 
263 	for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
264 		switch (crd->crd_alg) {
265 		case CRYPTO_AES_CBC:
266 		case CRYPTO_AES_XTS:
267 			if (enccrd != NULL) {
268 				error = EINVAL;
269 				goto out;
270 			}
271 			enccrd = crd;
272 			break;
273 		default:
274 			return (EINVAL);
275 		}
276 	}
277 	if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
278 		error = EINVAL;
279 		goto out;
280 	}
281 
282 	spin_lock(&sc->lock); /* XXX: was rd lock */
283 	TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
284 		if (ses->id == (crp->crp_sid & 0xffffffff))
285 			break;
286 	}
287 	spin_unlock(&sc->lock); /* XXX: was rd lock */
288 	if (ses == NULL) {
289 		error = EINVAL;
290 		goto out;
291 	}
292 
293 	error = aesni_cipher_process(ses, enccrd, crp);
294 	if (error != 0)
295 		goto out;
296 
297 out:
298 	crp->crp_etype = error;
299 	crypto_done(crp);
300 	return (error);
301 }
302 
303 uint8_t *
304 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
305     int *allocated)
306 {
307 	struct uio *uio;
308 	struct iovec *iov;
309 	uint8_t *addr;
310 
311 	if (crp->crp_flags & CRYPTO_F_IMBUF)
312 		goto alloc;
313 	else if (crp->crp_flags & CRYPTO_F_IOV) {
314 		uio = (struct uio *)crp->crp_buf;
315 		if (uio->uio_iovcnt != 1)
316 			goto alloc;
317 		iov = uio->uio_iov;
318 		addr = (u_char *)iov->iov_base + enccrd->crd_skip;
319 	} else
320 		addr = (u_char *)crp->crp_buf;
321 	*allocated = 0;
322 	return (addr);
323 
324 alloc:
325 	addr = kmalloc(enccrd->crd_len, M_AESNI, M_NOWAIT);
326 	if (addr != NULL) {
327 		*allocated = 1;
328 		crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
329 		    enccrd->crd_len, addr);
330 	} else
331 		*allocated = 0;
332 	return (addr);
333 }
334 
335 static device_method_t aesni_methods[] = {
336 	DEVMETHOD(device_identify, aesni_identify),
337 	DEVMETHOD(device_probe, aesni_probe),
338 	DEVMETHOD(device_attach, aesni_attach),
339 	DEVMETHOD(device_detach, aesni_detach),
340 
341 	DEVMETHOD(cryptodev_newsession, aesni_newsession),
342 	DEVMETHOD(cryptodev_freesession, aesni_freesession),
343 	DEVMETHOD(cryptodev_process, aesni_process),
344 
345 	{0, 0},
346 };
347 
348 static driver_t aesni_driver = {
349 	"aesni",
350 	aesni_methods,
351 	sizeof(struct aesni_softc),
352 };
353 static devclass_t aesni_devclass;
354 
355 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, NULL, NULL);
356 MODULE_VERSION(aesni, 1);
357 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
358