xref: /dragonfly/sys/dev/crypto/aesni/aesni.c (revision 1975d09e)
1 /*-
2  * Copyright (c) 2005-2008 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * Copyright (c) 2010 Konstantin Belousov <kib@FreeBSD.org>
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  * $FreeBSD: src/sys/crypto/aesni/aesni.c,v 1.3 2010/09/23 11:57:25 pjd Exp $
28  */
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/kernel.h>
33 #include <sys/kobj.h>
34 #include <sys/libkern.h>
35 #include <sys/lock.h>
36 #include <sys/module.h>
37 #include <sys/malloc.h>
38 #include <sys/spinlock.h>
39 #include <sys/spinlock2.h>
40 #include <sys/bus.h>
41 #include <sys/uio.h>
42 #include <dev/crypto/aesni/aesni.h>
43 #include "cryptodev_if.h"
44 
45 struct aesni_softc {
46 	int32_t cid;
47 	uint32_t sid;
48 	TAILQ_HEAD(aesni_sessions_head, aesni_session) sessions;
49 	struct spinlock lock;
50 };
51 
52 static int aesni_newsession(device_t, uint32_t *sidp, struct cryptoini *cri);
53 static int aesni_freesession(device_t, uint64_t tid);
54 static void aesni_freesession_locked(struct aesni_softc *sc,
55     struct aesni_session *ses);
56 
57 MALLOC_DEFINE(M_AESNI, "aesni_data", "AESNI Data");
58 
59 static void
60 aesni_identify(driver_t *drv, device_t parent)
61 {
62 
63 	/* NB: order 10 is so we get attached after h/w devices */
64 	if (device_find_child(parent, "aesni", -1) == NULL &&
65 	    BUS_ADD_CHILD(parent, parent, 10, "aesni", -1) == 0)
66 		panic("aesni: could not attach");
67 }
68 
69 static int
70 aesni_probe(device_t dev)
71 {
72 
73 	if ((cpu_feature2 & CPUID2_AESNI) == 0) {
74 		device_printf(dev, "No AESNI support.\n");
75 		return (EINVAL);
76 	}
77 	device_set_desc_copy(dev, "AES-CBC,AES-XTS");
78 	return (0);
79 }
80 
81 static int
82 aesni_attach(device_t dev)
83 {
84 	struct aesni_softc *sc;
85 
86 	sc = device_get_softc(dev);
87 	TAILQ_INIT(&sc->sessions);
88 	sc->sid = 1;
89 	sc->cid = crypto_get_driverid(dev, CRYPTOCAP_F_HARDWARE);
90 	if (sc->cid < 0) {
91 		device_printf(dev, "Could not get crypto driver id.\n");
92 		return (ENOMEM);
93 	}
94 
95 	spin_init(&sc->lock, "aesniattach");
96 	crypto_register(sc->cid, CRYPTO_AES_CBC, 0, 0);
97 	crypto_register(sc->cid, CRYPTO_AES_XTS, 0, 0);
98 	return (0);
99 }
100 
101 static int
102 aesni_detach(device_t dev)
103 {
104 	struct aesni_softc *sc;
105 	struct aesni_session *ses;
106 
107 	sc = device_get_softc(dev);
108 	spin_lock(&sc->lock);
109 	TAILQ_FOREACH(ses, &sc->sessions, next) {
110 		if (ses->used) {
111 			spin_unlock(&sc->lock);
112 			device_printf(dev,
113 			    "Cannot detach, sessions still active.\n");
114 			return (EBUSY);
115 		}
116 	}
117 	while ((ses = TAILQ_FIRST(&sc->sessions)) != NULL) {
118 		TAILQ_REMOVE(&sc->sessions, ses, next);
119 		kfree(ses, M_AESNI);
120 	}
121 	spin_unlock(&sc->lock);
122 	spin_uninit(&sc->lock);
123 	crypto_unregister_all(sc->cid);
124 	return (0);
125 }
126 
127 static int
128 aesni_newsession(device_t dev, uint32_t *sidp, struct cryptoini *cri)
129 {
130 	struct aesni_softc *sc;
131 	struct aesni_session *ses;
132 	struct cryptoini *encini;
133 	int error;
134 
135 	if (sidp == NULL || cri == NULL)
136 		return (EINVAL);
137 
138 	sc = device_get_softc(dev);
139 	ses = NULL;
140 	encini = NULL;
141 	for (; cri != NULL; cri = cri->cri_next) {
142 		switch (cri->cri_alg) {
143 		case CRYPTO_AES_CBC:
144 		case CRYPTO_AES_XTS:
145 			if (encini != NULL)
146 				return (EINVAL);
147 			encini = cri;
148 			break;
149 		default:
150 			return (EINVAL);
151 		}
152 	}
153 	if (encini == NULL)
154 		return (EINVAL);
155 
156 	spin_lock(&sc->lock);
157 	/*
158 	 * Free sessions goes first, so if first session is used, we need to
159 	 * allocate one.
160 	 */
161 	ses = TAILQ_FIRST(&sc->sessions);
162 	if (ses == NULL || ses->used) {
163 		/*
164 		 * Release the spinlock here, since the following
165 		 * kmalloc(M_WAITOK) may block.  kmalloc(M_NOWAIT)
166 		 * is not acceptable on this code path.
167 		 */
168 		spin_unlock(&sc->lock);
169 
170 		/*
171 		 * aesni_session must be at least AESNI_ALIGN aligned.  To
172 		 * make sure about that we always do a power-of-2 allocation.
173 		 */
174 		ses = kmalloc(sizeof(*ses), M_AESNI,
175 		    M_WAITOK | M_ZERO | M_POWEROF2);
176 		if ((uintptr_t)ses & (AESNI_ALIGN - 1)) {
177 			panic("aesni: ses %p is not %d aligned",
178 			    ses, AESNI_ALIGN);
179 		}
180 
181 		spin_lock(&sc->lock);
182 		ses->id = sc->sid++;
183 	} else {
184 		TAILQ_REMOVE(&sc->sessions, ses, next);
185 	}
186 	ses->used = 1;
187 	TAILQ_INSERT_TAIL(&sc->sessions, ses, next);
188 	spin_unlock(&sc->lock);
189 	ses->algo = encini->cri_alg;
190 
191 	error = aesni_cipher_setup(ses, encini);
192 	if (error != 0) {
193 		spin_lock(&sc->lock);
194 		aesni_freesession_locked(sc, ses);
195 		spin_unlock(&sc->lock);
196 		return (error);
197 	}
198 
199 	*sidp = ses->id;
200 	return (0);
201 }
202 
203 static void
204 aesni_freesession_locked(struct aesni_softc *sc, struct aesni_session *ses)
205 {
206 	uint32_t sid;
207 
208 	sid = ses->id;
209 	TAILQ_REMOVE(&sc->sessions, ses, next);
210 
211 	bzero(ses, sizeof(*ses));
212 	ses->id = sid;
213 	TAILQ_INSERT_HEAD(&sc->sessions, ses, next);
214 }
215 
216 static int
217 aesni_freesession(device_t dev, uint64_t tid)
218 {
219 	struct aesni_softc *sc;
220 	struct aesni_session *ses;
221 	uint32_t sid;
222 
223 	sc = device_get_softc(dev);
224 	sid = ((uint32_t)tid) & 0xffffffff;
225 	spin_lock(&sc->lock);
226 	TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
227 		if (ses->id == sid)
228 			break;
229 	}
230 	if (ses == NULL) {
231 		spin_unlock(&sc->lock);
232 		return (EINVAL);
233 	}
234 	aesni_freesession_locked(sc, ses);
235 	spin_unlock(&sc->lock);
236 	return (0);
237 }
238 
239 static int
240 aesni_process(device_t dev, struct cryptop *crp, int hint __unused)
241 {
242 	struct aesni_softc *sc = device_get_softc(dev);
243 	struct aesni_session *ses = NULL;
244 	struct cryptodesc *crd, *enccrd;
245 	int error;
246 
247 	error = 0;
248 	enccrd = NULL;
249 
250 	/* Sanity check. */
251 	if (crp == NULL)
252 		return (EINVAL);
253 
254 	if (crp->crp_callback == NULL || crp->crp_desc == NULL) {
255 		error = EINVAL;
256 		goto out;
257 	}
258 
259 	for (crd = crp->crp_desc; crd != NULL; crd = crd->crd_next) {
260 		switch (crd->crd_alg) {
261 		case CRYPTO_AES_CBC:
262 		case CRYPTO_AES_XTS:
263 			if (enccrd != NULL) {
264 				error = EINVAL;
265 				goto out;
266 			}
267 			enccrd = crd;
268 			break;
269 		default:
270 			return (EINVAL);
271 		}
272 	}
273 	if (enccrd == NULL || (enccrd->crd_len % AES_BLOCK_LEN) != 0) {
274 		error = EINVAL;
275 		goto out;
276 	}
277 
278 	spin_lock(&sc->lock); /* XXX: was rd lock */
279 	TAILQ_FOREACH_REVERSE(ses, &sc->sessions, aesni_sessions_head, next) {
280 		if (ses->id == (crp->crp_sid & 0xffffffff))
281 			break;
282 	}
283 	spin_unlock(&sc->lock); /* XXX: was rd lock */
284 	if (ses == NULL) {
285 		error = EINVAL;
286 		goto out;
287 	}
288 
289 	error = aesni_cipher_process(ses, enccrd, crp);
290 	if (error != 0)
291 		goto out;
292 
293 out:
294 	crp->crp_etype = error;
295 	crypto_done(crp);
296 	return (error);
297 }
298 
299 uint8_t *
300 aesni_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
301     int *allocated)
302 {
303 	struct uio *uio;
304 	struct iovec *iov;
305 	uint8_t *addr;
306 
307 	if (crp->crp_flags & CRYPTO_F_IMBUF)
308 		goto alloc;
309 	else if (crp->crp_flags & CRYPTO_F_IOV) {
310 		uio = (struct uio *)crp->crp_buf;
311 		if (uio->uio_iovcnt != 1)
312 			goto alloc;
313 		iov = uio->uio_iov;
314 		addr = (u_char *)iov->iov_base + enccrd->crd_skip;
315 	} else
316 		addr = (u_char *)crp->crp_buf;
317 	*allocated = 0;
318 	return (addr);
319 
320 alloc:
321 	addr = kmalloc(enccrd->crd_len, M_AESNI, M_NOWAIT);
322 	if (addr != NULL) {
323 		*allocated = 1;
324 		crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
325 		    enccrd->crd_len, addr);
326 	} else
327 		*allocated = 0;
328 	return (addr);
329 }
330 
331 static device_method_t aesni_methods[] = {
332 	DEVMETHOD(device_identify, aesni_identify),
333 	DEVMETHOD(device_probe, aesni_probe),
334 	DEVMETHOD(device_attach, aesni_attach),
335 	DEVMETHOD(device_detach, aesni_detach),
336 
337 	DEVMETHOD(cryptodev_newsession, aesni_newsession),
338 	DEVMETHOD(cryptodev_freesession, aesni_freesession),
339 	DEVMETHOD(cryptodev_process, aesni_process),
340 
341 	DEVMETHOD_END
342 };
343 
344 static driver_t aesni_driver = {
345 	"aesni",
346 	aesni_methods,
347 	sizeof(struct aesni_softc),
348 };
349 static devclass_t aesni_devclass;
350 
351 DRIVER_MODULE(aesni, nexus, aesni_driver, aesni_devclass, NULL, NULL);
352 MODULE_VERSION(aesni, 1);
353 MODULE_DEPEND(aesni, crypto, 1, 1, 1);
354