xref: /freebsd/sys/crypto/via/padlock_cipher.c (revision f05cddf9)
1 /*-
2  * Copyright (c) 2005-2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * Copyright (c) 2004 Mark R V Murray
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 /*	$OpenBSD: via.c,v 1.3 2004/06/15 23:36:55 deraadt Exp $	*/
29 /*-
30  * Copyright (c) 2003 Jason Wright
31  * Copyright (c) 2003, 2004 Theo de Raadt
32  * All rights reserved.
33  *
34  * Permission to use, copy, modify, and distribute this software for any
35  * purpose with or without fee is hereby granted, provided that the above
36  * copyright notice and this permission notice appear in all copies.
37  *
38  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
39  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
40  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
41  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
42  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
43  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
44  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
45  */
46 
47 #include <sys/cdefs.h>
48 __FBSDID("$FreeBSD$");
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/kernel.h>
53 #include <sys/module.h>
54 #include <sys/malloc.h>
55 #include <sys/libkern.h>
56 #include <sys/pcpu.h>
57 #include <sys/uio.h>
58 
59 #include <opencrypto/cryptodev.h>
60 #include <crypto/rijndael/rijndael.h>
61 
62 #include <crypto/via/padlock.h>
63 
64 #define	PADLOCK_ROUND_COUNT_AES128	10
65 #define	PADLOCK_ROUND_COUNT_AES192	12
66 #define	PADLOCK_ROUND_COUNT_AES256	14
67 
68 #define	PADLOCK_ALGORITHM_TYPE_AES	0
69 
70 #define	PADLOCK_KEY_GENERATION_HW	0
71 #define	PADLOCK_KEY_GENERATION_SW	1
72 
73 #define	PADLOCK_DIRECTION_ENCRYPT	0
74 #define	PADLOCK_DIRECTION_DECRYPT	1
75 
76 #define	PADLOCK_KEY_SIZE_128	0
77 #define	PADLOCK_KEY_SIZE_192	1
78 #define	PADLOCK_KEY_SIZE_256	2
79 
80 MALLOC_DECLARE(M_PADLOCK);
81 
82 static __inline void
83 padlock_cbc(void *in, void *out, size_t count, void *key, union padlock_cw *cw,
84     void *iv)
85 {
86 #ifdef __GNUCLIKE_ASM
87 	/* The .byte line is really VIA C3 "xcrypt-cbc" instruction */
88 	__asm __volatile(
89 		"pushf				\n\t"
90 		"popf				\n\t"
91 		"rep				\n\t"
92 		".byte	0x0f, 0xa7, 0xd0"
93 			: "+a" (iv), "+c" (count), "+D" (out), "+S" (in)
94 			: "b" (key), "d" (cw)
95 			: "cc", "memory"
96 		);
97 #endif
98 }
99 
100 static void
101 padlock_cipher_key_setup(struct padlock_session *ses, caddr_t key, int klen)
102 {
103 	union padlock_cw *cw;
104 	int i;
105 
106 	cw = &ses->ses_cw;
107 	if (cw->cw_key_generation == PADLOCK_KEY_GENERATION_SW) {
108 		/* Build expanded keys for both directions */
109 		rijndaelKeySetupEnc(ses->ses_ekey, key, klen);
110 		rijndaelKeySetupDec(ses->ses_dkey, key, klen);
111 		for (i = 0; i < 4 * (RIJNDAEL_MAXNR + 1); i++) {
112 			ses->ses_ekey[i] = ntohl(ses->ses_ekey[i]);
113 			ses->ses_dkey[i] = ntohl(ses->ses_dkey[i]);
114 		}
115 	} else {
116 		bcopy(key, ses->ses_ekey, klen);
117 		bcopy(key, ses->ses_dkey, klen);
118 	}
119 }
120 
121 int
122 padlock_cipher_setup(struct padlock_session *ses, struct cryptoini *encini)
123 {
124 	union padlock_cw *cw;
125 
126 	if (encini->cri_klen != 128 && encini->cri_klen != 192 &&
127 	    encini->cri_klen != 256) {
128 		return (EINVAL);
129 	}
130 
131 	cw = &ses->ses_cw;
132 	bzero(cw, sizeof(*cw));
133 	cw->cw_algorithm_type = PADLOCK_ALGORITHM_TYPE_AES;
134 	cw->cw_key_generation = PADLOCK_KEY_GENERATION_SW;
135 	cw->cw_intermediate = 0;
136 	switch (encini->cri_klen) {
137 	case 128:
138 		cw->cw_round_count = PADLOCK_ROUND_COUNT_AES128;
139 		cw->cw_key_size = PADLOCK_KEY_SIZE_128;
140 #ifdef HW_KEY_GENERATION
141 		/* This doesn't buy us much, that's why it is commented out. */
142 		cw->cw_key_generation = PADLOCK_KEY_GENERATION_HW;
143 #endif
144 		break;
145 	case 192:
146 		cw->cw_round_count = PADLOCK_ROUND_COUNT_AES192;
147 		cw->cw_key_size = PADLOCK_KEY_SIZE_192;
148 		break;
149 	case 256:
150 		cw->cw_round_count = PADLOCK_ROUND_COUNT_AES256;
151 		cw->cw_key_size = PADLOCK_KEY_SIZE_256;
152 		break;
153 	}
154 	if (encini->cri_key != NULL) {
155 		padlock_cipher_key_setup(ses, encini->cri_key,
156 		    encini->cri_klen);
157 	}
158 
159 	arc4rand(ses->ses_iv, sizeof(ses->ses_iv), 0);
160 	return (0);
161 }
162 
163 /*
164  * Function checks if the given buffer is already 16 bytes aligned.
165  * If it is there is no need to allocate new buffer.
166  * If it isn't, new buffer is allocated.
167  */
168 static u_char *
169 padlock_cipher_alloc(struct cryptodesc *enccrd, struct cryptop *crp,
170     int *allocated)
171 {
172 	u_char *addr;
173 
174 	if (crp->crp_flags & CRYPTO_F_IMBUF)
175 		goto alloc;
176 	else {
177 		if (crp->crp_flags & CRYPTO_F_IOV) {
178 			struct uio *uio;
179 			struct iovec *iov;
180 
181 			uio = (struct uio *)crp->crp_buf;
182 			if (uio->uio_iovcnt != 1)
183 				goto alloc;
184 			iov = uio->uio_iov;
185 			addr = (u_char *)iov->iov_base + enccrd->crd_skip;
186 		} else {
187 			addr = (u_char *)crp->crp_buf;
188 		}
189 		if (((uintptr_t)addr & 0xf) != 0) /* 16 bytes aligned? */
190 			goto alloc;
191 		*allocated = 0;
192 		return (addr);
193 	}
194 alloc:
195 	*allocated = 1;
196 	addr = malloc(enccrd->crd_len + 16, M_PADLOCK, M_NOWAIT);
197 	return (addr);
198 }
199 
200 int
201 padlock_cipher_process(struct padlock_session *ses, struct cryptodesc *enccrd,
202     struct cryptop *crp)
203 {
204 	union padlock_cw *cw;
205 	struct thread *td;
206 	u_char *buf, *abuf;
207 	uint32_t *key;
208 	int allocated, error, saved_ctx;
209 
210 	buf = padlock_cipher_alloc(enccrd, crp, &allocated);
211 	if (buf == NULL)
212 		return (ENOMEM);
213 	/* Buffer has to be 16 bytes aligned. */
214 	abuf = PADLOCK_ALIGN(buf);
215 
216 	if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
217 		padlock_cipher_key_setup(ses, enccrd->crd_key,
218 		    enccrd->crd_klen);
219 	}
220 
221 	cw = &ses->ses_cw;
222 	cw->cw_filler0 = 0;
223 	cw->cw_filler1 = 0;
224 	cw->cw_filler2 = 0;
225 	cw->cw_filler3 = 0;
226 	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
227 		cw->cw_direction = PADLOCK_DIRECTION_ENCRYPT;
228 		key = ses->ses_ekey;
229 		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
230 			bcopy(enccrd->crd_iv, ses->ses_iv, AES_BLOCK_LEN);
231 
232 		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0) {
233 			crypto_copyback(crp->crp_flags, crp->crp_buf,
234 			    enccrd->crd_inject, AES_BLOCK_LEN, ses->ses_iv);
235 		}
236 	} else {
237 		cw->cw_direction = PADLOCK_DIRECTION_DECRYPT;
238 		key = ses->ses_dkey;
239 		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
240 			bcopy(enccrd->crd_iv, ses->ses_iv, AES_BLOCK_LEN);
241 		else {
242 			crypto_copydata(crp->crp_flags, crp->crp_buf,
243 			    enccrd->crd_inject, AES_BLOCK_LEN, ses->ses_iv);
244 		}
245 	}
246 
247 	if (allocated) {
248 		crypto_copydata(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
249 		    enccrd->crd_len, abuf);
250 	}
251 
252 	td = curthread;
253 	if (!is_fpu_kern_thread(0)) {
254 		error = fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL);
255 		saved_ctx = 1;
256 	} else {
257 		error = 0;
258 		saved_ctx = 0;
259 	}
260 	if (error != 0)
261 		goto out;
262 
263 	padlock_cbc(abuf, abuf, enccrd->crd_len / AES_BLOCK_LEN, key, cw,
264 	    ses->ses_iv);
265 
266 	if (saved_ctx)
267 		fpu_kern_leave(td, ses->ses_fpu_ctx);
268 
269 	if (allocated) {
270 		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
271 		    enccrd->crd_len, abuf);
272 	}
273 
274 	/* copy out last block for use as next session IV */
275 	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
276 		crypto_copydata(crp->crp_flags, crp->crp_buf,
277 		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
278 		    AES_BLOCK_LEN, ses->ses_iv);
279 	}
280 
281  out:
282 	if (allocated) {
283 		bzero(buf, enccrd->crd_len + 16);
284 		free(buf, M_PADLOCK);
285 	}
286 	return (error);
287 }
288