xref: /netbsd/sys/dev/cgd.c (revision e86caeae)
1 /* $NetBSD: cgd.c,v 1.146 2022/04/02 09:53:20 riastradh Exp $ */
2 
3 /*-
4  * Copyright (c) 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Roland C. Dowdeswell.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: cgd.c,v 1.146 2022/04/02 09:53:20 riastradh Exp $");
34 
35 #include <sys/types.h>
36 #include <sys/param.h>
37 #include <sys/buf.h>
38 #include <sys/bufq.h>
39 #include <sys/conf.h>
40 #include <sys/cpu.h>
41 #include <sys/device.h>
42 #include <sys/disk.h>
43 #include <sys/disklabel.h>
44 #include <sys/errno.h>
45 #include <sys/fcntl.h>
46 #include <sys/ioctl.h>
47 #include <sys/kmem.h>
48 #include <sys/module.h>
49 #include <sys/namei.h> /* for pathbuf */
50 #include <sys/pool.h>
51 #include <sys/proc.h>
52 #include <sys/syslog.h>
53 #include <sys/systm.h>
54 #include <sys/vnode.h>
55 #include <sys/workqueue.h>
56 
57 #include <dev/cgd_crypto.h>
58 #include <dev/cgdvar.h>
59 #include <dev/dkvar.h>
60 
61 #include <miscfs/specfs/specdev.h> /* for v_rdev */
62 
63 #include "ioconf.h"
64 
65 struct selftest_params {
66 	const char *alg;
67 	int encblkno8;
68 	int blocksize;	/* number of bytes */
69 	int secsize;
70 	daddr_t blkno;
71 	int keylen;	/* number of bits */
72 	int txtlen;	/* number of bytes */
73 	const uint8_t *key;
74 	const uint8_t *ptxt;
75 	const uint8_t *ctxt;
76 };
77 
78 /* Entry Point Functions */
79 
80 static dev_type_open(cgdopen);
81 static dev_type_close(cgdclose);
82 static dev_type_read(cgdread);
83 static dev_type_write(cgdwrite);
84 static dev_type_ioctl(cgdioctl);
85 static dev_type_strategy(cgdstrategy);
86 static dev_type_dump(cgddump);
87 static dev_type_size(cgdsize);
88 
89 const struct bdevsw cgd_bdevsw = {
90 	.d_open = cgdopen,
91 	.d_close = cgdclose,
92 	.d_strategy = cgdstrategy,
93 	.d_ioctl = cgdioctl,
94 	.d_dump = cgddump,
95 	.d_psize = cgdsize,
96 	.d_discard = nodiscard,
97 	.d_flag = D_DISK | D_MPSAFE
98 };
99 
100 const struct cdevsw cgd_cdevsw = {
101 	.d_open = cgdopen,
102 	.d_close = cgdclose,
103 	.d_read = cgdread,
104 	.d_write = cgdwrite,
105 	.d_ioctl = cgdioctl,
106 	.d_stop = nostop,
107 	.d_tty = notty,
108 	.d_poll = nopoll,
109 	.d_mmap = nommap,
110 	.d_kqfilter = nokqfilter,
111 	.d_discard = nodiscard,
112 	.d_flag = D_DISK | D_MPSAFE
113 };
114 
115 /*
116  * Vector 5 from IEEE 1619/D16 truncated to 64 bytes, blkno 1.
117  */
118 static const uint8_t selftest_aes_xts_256_ptxt[64] = {
119 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
120 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
121 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
122 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
123 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
124 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
125 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
126 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
127 };
128 
129 static const uint8_t selftest_aes_xts_256_ctxt[512] = {
130 	0x26, 0x4d, 0x3c, 0xa8, 0x51, 0x21, 0x94, 0xfe,
131 	0xc3, 0x12, 0xc8, 0xc9, 0x89, 0x1f, 0x27, 0x9f,
132 	0xef, 0xdd, 0x60, 0x8d, 0x0c, 0x02, 0x7b, 0x60,
133 	0x48, 0x3a, 0x3f, 0xa8, 0x11, 0xd6, 0x5e, 0xe5,
134 	0x9d, 0x52, 0xd9, 0xe4, 0x0e, 0xc5, 0x67, 0x2d,
135 	0x81, 0x53, 0x2b, 0x38, 0xb6, 0xb0, 0x89, 0xce,
136 	0x95, 0x1f, 0x0f, 0x9c, 0x35, 0x59, 0x0b, 0x8b,
137 	0x97, 0x8d, 0x17, 0x52, 0x13, 0xf3, 0x29, 0xbb,
138 };
139 
140 static const uint8_t selftest_aes_xts_256_key[33] = {
141 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
142 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
143 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
144 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
145 	0
146 };
147 
148 /*
149  * Vector 11 from IEEE 1619/D16 truncated to 64 bytes, blkno 0xffff.
150  */
151 static const uint8_t selftest_aes_xts_512_ptxt[64] = {
152 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
153 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
154 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
155 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
156 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
157 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
158 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
159 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
160 };
161 
162 static const uint8_t selftest_aes_xts_512_ctxt[64] = {
163 	0x77, 0xa3, 0x12, 0x51, 0x61, 0x8a, 0x15, 0xe6,
164 	0xb9, 0x2d, 0x1d, 0x66, 0xdf, 0xfe, 0x7b, 0x50,
165 	0xb5, 0x0b, 0xad, 0x55, 0x23, 0x05, 0xba, 0x02,
166 	0x17, 0xa6, 0x10, 0x68, 0x8e, 0xff, 0x7e, 0x11,
167 	0xe1, 0xd0, 0x22, 0x54, 0x38, 0xe0, 0x93, 0x24,
168 	0x2d, 0x6d, 0xb2, 0x74, 0xfd, 0xe8, 0x01, 0xd4,
169 	0xca, 0xe0, 0x6f, 0x20, 0x92, 0xc7, 0x28, 0xb2,
170 	0x47, 0x85, 0x59, 0xdf, 0x58, 0xe8, 0x37, 0xc2,
171 };
172 
173 static const uint8_t selftest_aes_xts_512_key[65] = {
174 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
175 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
176 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
177 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
178 	0x31, 0x41, 0x59, 0x26, 0x53, 0x58, 0x97, 0x93,
179 	0x23, 0x84, 0x62, 0x64, 0x33, 0x83, 0x27, 0x95,
180 	0x02, 0x88, 0x41, 0x97, 0x16, 0x93, 0x99, 0x37,
181 	0x51, 0x05, 0x82, 0x09, 0x74, 0x94, 0x45, 0x92,
182 	0
183 };
184 
185 static const uint8_t selftest_aes_cbc_key[32] = {
186 	0x27, 0x18, 0x28, 0x18, 0x28, 0x45, 0x90, 0x45,
187 	0x23, 0x53, 0x60, 0x28, 0x74, 0x71, 0x35, 0x26,
188 	0x62, 0x49, 0x77, 0x57, 0x24, 0x70, 0x93, 0x69,
189 	0x99, 0x59, 0x57, 0x49, 0x66, 0x96, 0x76, 0x27,
190 };
191 
192 static const uint8_t selftest_aes_cbc_128_ptxt[64] = {
193 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
194 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
195 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
196 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
197 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
198 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
199 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
200 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
201 };
202 
203 static const uint8_t selftest_aes_cbc_128_ctxt[64] = { /* blkno=1 */
204 	0x93, 0x94, 0x56, 0x36, 0x83, 0xbc, 0xff, 0xa4,
205 	0xe0, 0x24, 0x34, 0x12, 0xbe, 0xfa, 0xb0, 0x7d,
206 	0x88, 0x1e, 0xc5, 0x57, 0x55, 0x23, 0x05, 0x0c,
207 	0x69, 0xa5, 0xc1, 0xda, 0x64, 0xee, 0x74, 0x10,
208 	0xc2, 0xc5, 0xe6, 0x66, 0xd6, 0xa7, 0x49, 0x1c,
209 	0x9d, 0x40, 0xb5, 0x0c, 0x9b, 0x6e, 0x1c, 0xe6,
210 	0xb1, 0x7a, 0x1c, 0xe7, 0x5a, 0xfe, 0xf9, 0x2a,
211 	0x78, 0xfa, 0xb7, 0x7b, 0x08, 0xdf, 0x8e, 0x51,
212 };
213 
214 static const uint8_t selftest_aes_cbc_256_ptxt[64] = {
215 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
216 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
217 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
218 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
219 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
220 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
221 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
222 	0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f,
223 };
224 
225 static const uint8_t selftest_aes_cbc_256_ctxt[64] = { /* blkno=0xffff */
226 	0x6c, 0xa3, 0x15, 0x17, 0x51, 0x90, 0xe9, 0x69,
227 	0x08, 0x36, 0x7b, 0xa6, 0xbb, 0xd1, 0x0b, 0x9e,
228 	0xcd, 0x6b, 0x1e, 0xaf, 0xb6, 0x2e, 0x62, 0x7d,
229 	0x8e, 0xde, 0xf0, 0xed, 0x0d, 0x44, 0xe7, 0x31,
230 	0x26, 0xcf, 0xd5, 0x0b, 0x3e, 0x95, 0x59, 0x89,
231 	0xdf, 0x5d, 0xd6, 0x9a, 0x00, 0x66, 0xcc, 0x7f,
232 	0x45, 0xd3, 0x06, 0x58, 0xed, 0xef, 0x49, 0x47,
233 	0x87, 0x89, 0x17, 0x7d, 0x08, 0x56, 0x50, 0xe1,
234 };
235 
236 static const uint8_t selftest_3des_cbc_key[24] = {
237 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
238 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
239 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
240 };
241 
242 static const uint8_t selftest_3des_cbc_ptxt[64] = {
243 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
244 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
245 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
246 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
247 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
248 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
249 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
250 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
251 };
252 
253 static const uint8_t selftest_3des_cbc_ctxt[64] = {
254 	0xa2, 0xfe, 0x81, 0xaa, 0x10, 0x6c, 0xea, 0xb9,
255 	0x11, 0x58, 0x1f, 0x29, 0xb5, 0x86, 0x71, 0x56,
256 	0xe9, 0x25, 0x1d, 0x07, 0xb1, 0x69, 0x59, 0x6c,
257 	0x96, 0x80, 0xf7, 0x54, 0x38, 0xaa, 0xa7, 0xe4,
258 	0xe8, 0x81, 0xf5, 0x00, 0xbb, 0x1c, 0x00, 0x3c,
259 	0xba, 0x38, 0x45, 0x97, 0x4c, 0xcf, 0x84, 0x14,
260 	0x46, 0x86, 0xd9, 0xf4, 0xc5, 0xe2, 0xf0, 0x54,
261 	0xde, 0x41, 0xf6, 0xa1, 0xef, 0x1b, 0x0a, 0xea,
262 };
263 
264 static const uint8_t selftest_bf_cbc_key[56] = {
265 	0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
266 	0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f,
267 	0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17,
268 	0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f,
269 	0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27,
270 	0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f,
271 	0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37,
272 };
273 
274 static const uint8_t selftest_bf_cbc_ptxt[64] = {
275 	0x27, 0xa7, 0x47, 0x9b, 0xef, 0xa1, 0xd4, 0x76,
276 	0x48, 0x9f, 0x30, 0x8c, 0xd4, 0xcf, 0xa6, 0xe2,
277 	0xa9, 0x6e, 0x4b, 0xbe, 0x32, 0x08, 0xff, 0x25,
278 	0x28, 0x7d, 0xd3, 0x81, 0x96, 0x16, 0xe8, 0x9c,
279 	0xc7, 0x8c, 0xf7, 0xf5, 0xe5, 0x43, 0x44, 0x5f,
280 	0x83, 0x33, 0xd8, 0xfa, 0x7f, 0x56, 0x00, 0x00,
281 	0x05, 0x27, 0x9f, 0xa5, 0xd8, 0xb5, 0xe4, 0xad,
282 	0x40, 0xe7, 0x36, 0xdd, 0xb4, 0xd3, 0x54, 0x12,
283 };
284 
285 static const uint8_t selftest_bf_cbc_ctxt[64] = {
286 	0xec, 0xa2, 0xc0, 0x0e, 0xa9, 0x7f, 0x04, 0x1e,
287 	0x2e, 0x4f, 0x64, 0x07, 0x67, 0x3e, 0xf4, 0x58,
288 	0x61, 0x5f, 0xd3, 0x50, 0x5e, 0xd3, 0x4d, 0x34,
289 	0xa0, 0x53, 0xbe, 0x47, 0x75, 0x69, 0x3b, 0x1f,
290 	0x86, 0xf2, 0xae, 0x8b, 0xb7, 0x91, 0xda, 0xd4,
291 	0x2b, 0xa5, 0x47, 0x9b, 0x7d, 0x13, 0x30, 0xdd,
292 	0x7b, 0xad, 0x86, 0x57, 0x51, 0x11, 0x74, 0x42,
293 	0xb8, 0xbf, 0x69, 0x17, 0x20, 0x0a, 0xf7, 0xda,
294 };
295 
296 static const uint8_t selftest_aes_cbc_encblkno8_zero64[64];
297 static const uint8_t selftest_aes_cbc_encblkno8_ctxt[64] = {
298 	0xa2, 0x06, 0x26, 0x26, 0xac, 0xdc, 0xe7, 0xcf,
299 	0x47, 0x68, 0x24, 0x0e, 0xfa, 0x40, 0x44, 0x83,
300 	0x07, 0xe1, 0xf4, 0x5d, 0x53, 0x47, 0xa0, 0xfe,
301 	0xc0, 0x6e, 0x4e, 0xf8, 0x9d, 0x98, 0x63, 0xb8,
302 	0x2c, 0x27, 0xfa, 0x3a, 0xd5, 0x40, 0xda, 0xdb,
303 	0xe6, 0xc3, 0xe4, 0xfb, 0x85, 0x53, 0xfb, 0x78,
304 	0x5d, 0xbd, 0x8f, 0x4c, 0x1a, 0x04, 0x9c, 0x88,
305 	0x85, 0xec, 0x3c, 0x56, 0x46, 0x1a, 0x6e, 0xf5,
306 };
307 
308 const struct selftest_params selftests[] = {
309 	{
310 		.alg = "aes-xts",
311 		.blocksize = 16,
312 		.secsize = 512,
313 		.blkno = 1,
314 		.keylen = 256,
315 		.txtlen = sizeof(selftest_aes_xts_256_ptxt),
316 		.key  = selftest_aes_xts_256_key,
317 		.ptxt = selftest_aes_xts_256_ptxt,
318 		.ctxt = selftest_aes_xts_256_ctxt
319 	},
320 	{
321 		.alg = "aes-xts",
322 		.blocksize = 16,
323 		.secsize = 512,
324 		.blkno = 0xffff,
325 		.keylen = 512,
326 		.txtlen = sizeof(selftest_aes_xts_512_ptxt),
327 		.key  = selftest_aes_xts_512_key,
328 		.ptxt = selftest_aes_xts_512_ptxt,
329 		.ctxt = selftest_aes_xts_512_ctxt
330 	},
331 	{
332 		.alg = "aes-cbc",
333 		.blocksize = 16,
334 		.secsize = 512,
335 		.blkno = 1,
336 		.keylen = 128,
337 		.txtlen = sizeof(selftest_aes_cbc_128_ptxt),
338 		.key  = selftest_aes_cbc_key,
339 		.ptxt = selftest_aes_cbc_128_ptxt,
340 		.ctxt = selftest_aes_cbc_128_ctxt,
341 	},
342 	{
343 		.alg = "aes-cbc",
344 		.blocksize = 16,
345 		.secsize = 512,
346 		.blkno = 0xffff,
347 		.keylen = 256,
348 		.txtlen = sizeof(selftest_aes_cbc_256_ptxt),
349 		.key  = selftest_aes_cbc_key,
350 		.ptxt = selftest_aes_cbc_256_ptxt,
351 		.ctxt = selftest_aes_cbc_256_ctxt,
352 	},
353 	{
354 		.alg = "3des-cbc",
355 		.blocksize = 8,
356 		.secsize = 512,
357 		.blkno = 1,
358 		.keylen = 192,	/* 168 + 3*8 parity bits */
359 		.txtlen = sizeof(selftest_3des_cbc_ptxt),
360 		.key  = selftest_3des_cbc_key,
361 		.ptxt = selftest_3des_cbc_ptxt,
362 		.ctxt = selftest_3des_cbc_ctxt,
363 	},
364 	{
365 		.alg = "blowfish-cbc",
366 		.blocksize = 8,
367 		.secsize = 512,
368 		.blkno = 1,
369 		.keylen = 448,
370 		.txtlen = sizeof(selftest_bf_cbc_ptxt),
371 		.key  = selftest_bf_cbc_key,
372 		.ptxt = selftest_bf_cbc_ptxt,
373 		.ctxt = selftest_bf_cbc_ctxt,
374 	},
375 	{
376 		.alg = "aes-cbc",
377 		.encblkno8 = 1,
378 		.blocksize = 16,
379 		.secsize = 512,
380 		.blkno = 0,
381 		.keylen = 128,
382 		.txtlen = sizeof(selftest_aes_cbc_encblkno8_zero64),
383 		.key = selftest_aes_cbc_encblkno8_zero64,
384 		.ptxt = selftest_aes_cbc_encblkno8_zero64,
385 		.ctxt = selftest_aes_cbc_encblkno8_ctxt,
386 	},
387 };
388 
389 static int cgd_match(device_t, cfdata_t, void *);
390 static void cgd_attach(device_t, device_t, void *);
391 static int cgd_detach(device_t, int);
392 static struct cgd_softc	*cgd_spawn(int);
393 static struct cgd_worker *cgd_create_one_worker(void);
394 static void cgd_destroy_one_worker(struct cgd_worker *);
395 static struct cgd_worker *cgd_create_worker(void);
396 static void cgd_destroy_worker(struct cgd_worker *);
397 static int cgd_destroy(device_t);
398 
399 /* Internal Functions */
400 
401 static int	cgd_diskstart(device_t, struct buf *);
402 static void	cgd_diskstart2(struct cgd_softc *, struct cgd_xfer *);
403 static void	cgdiodone(struct buf *);
404 static void	cgd_iodone2(struct cgd_softc *, struct cgd_xfer *);
405 static void	cgd_enqueue(struct cgd_softc *, struct cgd_xfer *);
406 static void	cgd_process(struct work *, void *);
407 static int	cgd_dumpblocks(device_t, void *, daddr_t, int);
408 
409 static int	cgd_ioctl_set(struct cgd_softc *, void *, struct lwp *);
410 static int	cgd_ioctl_clr(struct cgd_softc *, struct lwp *);
411 static int	cgd_ioctl_get(dev_t, void *, struct lwp *);
412 static int	cgdinit(struct cgd_softc *, const char *, struct vnode *,
413 			struct lwp *);
414 static void	cgd_cipher(struct cgd_softc *, void *, const void *,
415 			   size_t, daddr_t, size_t, int);
416 
417 static void	cgd_selftest(void);
418 
419 static const struct dkdriver cgddkdriver = {
420         .d_minphys  = minphys,
421         .d_open = cgdopen,
422         .d_close = cgdclose,
423         .d_strategy = cgdstrategy,
424         .d_iosize = NULL,
425         .d_diskstart = cgd_diskstart,
426         .d_dumpblocks = cgd_dumpblocks,
427         .d_lastclose = NULL
428 };
429 
430 CFATTACH_DECL3_NEW(cgd, sizeof(struct cgd_softc),
431     cgd_match, cgd_attach, cgd_detach, NULL, NULL, NULL, DVF_DETACH_SHUTDOWN);
432 
433 /* DIAGNOSTIC and DEBUG definitions */
434 
435 #if defined(CGDDEBUG) && !defined(DEBUG)
436 #define DEBUG
437 #endif
438 
439 #ifdef DEBUG
440 int cgddebug = 0;
441 
442 #define CGDB_FOLLOW	0x1
443 #define CGDB_IO	0x2
444 #define CGDB_CRYPTO	0x4
445 
446 #define IFDEBUG(x,y)		if (cgddebug & (x)) y
447 #define DPRINTF(x,y)		IFDEBUG(x, printf y)
448 #define DPRINTF_FOLLOW(y)	DPRINTF(CGDB_FOLLOW, y)
449 
450 static void	hexprint(const char *, void *, int);
451 
452 #else
453 #define IFDEBUG(x,y)
454 #define DPRINTF(x,y)
455 #define DPRINTF_FOLLOW(y)
456 #endif
457 
458 /* Global variables */
459 
460 static kmutex_t cgd_spawning_mtx;
461 static kcondvar_t cgd_spawning_cv;
462 static bool cgd_spawning;
463 static struct cgd_worker *cgd_worker;
464 static u_int cgd_refcnt;	/* number of users of cgd_worker */
465 
466 /* Utility Functions */
467 
468 #define CGDUNIT(x)		DISKUNIT(x)
469 
470 /* The code */
471 
472 static int
cgd_lock(bool intr)473 cgd_lock(bool intr)
474 {
475 	int error = 0;
476 
477 	mutex_enter(&cgd_spawning_mtx);
478 	while (cgd_spawning) {
479 		if (intr)
480 			error = cv_wait_sig(&cgd_spawning_cv, &cgd_spawning_mtx);
481 		else
482 			cv_wait(&cgd_spawning_cv, &cgd_spawning_mtx);
483 	}
484 	if (error == 0)
485 		cgd_spawning = true;
486 	mutex_exit(&cgd_spawning_mtx);
487 	return error;
488 }
489 
490 static void
cgd_unlock(void)491 cgd_unlock(void)
492 {
493 	mutex_enter(&cgd_spawning_mtx);
494 	cgd_spawning = false;
495 	cv_broadcast(&cgd_spawning_cv);
496 	mutex_exit(&cgd_spawning_mtx);
497 }
498 
499 static struct cgd_softc *
getcgd_softc(dev_t dev)500 getcgd_softc(dev_t dev)
501 {
502 	return device_lookup_private(&cgd_cd, CGDUNIT(dev));
503 }
504 
505 static int
cgd_match(device_t self,cfdata_t cfdata,void * aux)506 cgd_match(device_t self, cfdata_t cfdata, void *aux)
507 {
508 
509 	return 1;
510 }
511 
512 static void
cgd_attach(device_t parent,device_t self,void * aux)513 cgd_attach(device_t parent, device_t self, void *aux)
514 {
515 	struct cgd_softc *sc = device_private(self);
516 
517 	mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_BIO);
518 	cv_init(&sc->sc_cv, "cgdcv");
519 	dk_init(&sc->sc_dksc, self, DKTYPE_CGD);
520 	disk_init(&sc->sc_dksc.sc_dkdev, sc->sc_dksc.sc_xname, &cgddkdriver);
521 
522 	if (!pmf_device_register(self, NULL, NULL))
523 		aprint_error_dev(self,
524 		    "unable to register power management hooks\n");
525 }
526 
527 
528 static int
cgd_detach(device_t self,int flags)529 cgd_detach(device_t self, int flags)
530 {
531 	int ret;
532 	struct cgd_softc *sc = device_private(self);
533 	struct dk_softc *dksc = &sc->sc_dksc;
534 
535 	if (DK_BUSY(dksc, 0))
536 		return EBUSY;
537 
538 	if (DK_ATTACHED(dksc) &&
539 	    (ret = cgd_ioctl_clr(sc, curlwp)) != 0)
540 		return ret;
541 
542 	disk_destroy(&dksc->sc_dkdev);
543 	cv_destroy(&sc->sc_cv);
544 	mutex_destroy(&sc->sc_lock);
545 
546 	return 0;
547 }
548 
549 void
cgdattach(int num)550 cgdattach(int num)
551 {
552 #ifndef _MODULE
553 	int error;
554 
555 	mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
556 	cv_init(&cgd_spawning_cv, "cgspwn");
557 
558 	error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
559 	if (error != 0)
560 		aprint_error("%s: unable to register cfattach\n",
561 		    cgd_cd.cd_name);
562 #endif
563 
564 	cgd_selftest();
565 }
566 
567 static struct cgd_softc *
cgd_spawn(int unit)568 cgd_spawn(int unit)
569 {
570 	cfdata_t cf;
571 	struct cgd_worker *cw;
572 	struct cgd_softc *sc;
573 
574 	cf = kmem_alloc(sizeof(*cf), KM_SLEEP);
575 	cf->cf_name = cgd_cd.cd_name;
576 	cf->cf_atname = cgd_cd.cd_name;
577 	cf->cf_unit = unit;
578 	cf->cf_fstate = FSTATE_STAR;
579 
580 	cw = cgd_create_one_worker();
581 	if (cw == NULL) {
582 		kmem_free(cf, sizeof(*cf));
583 		return NULL;
584 	}
585 
586 	sc = device_private(config_attach_pseudo(cf));
587 	if (sc == NULL) {
588 		cgd_destroy_one_worker(cw);
589 		return NULL;
590 	}
591 
592 	sc->sc_worker = cw;
593 
594 	return sc;
595 }
596 
597 static int
cgd_destroy(device_t dev)598 cgd_destroy(device_t dev)
599 {
600 	struct cgd_softc *sc = device_private(dev);
601 	struct cgd_worker *cw = sc->sc_worker;
602 	cfdata_t cf;
603 	int error;
604 
605 	cf = device_cfdata(dev);
606 	error = config_detach(dev, DETACH_QUIET);
607 	if (error)
608 		return error;
609 
610 	cgd_destroy_one_worker(cw);
611 
612 	kmem_free(cf, sizeof(*cf));
613 	return 0;
614 }
615 
616 static void
cgd_busy(struct cgd_softc * sc)617 cgd_busy(struct cgd_softc *sc)
618 {
619 
620 	mutex_enter(&sc->sc_lock);
621 	while (sc->sc_busy)
622 		cv_wait(&sc->sc_cv, &sc->sc_lock);
623 	sc->sc_busy = true;
624 	mutex_exit(&sc->sc_lock);
625 }
626 
627 static void
cgd_unbusy(struct cgd_softc * sc)628 cgd_unbusy(struct cgd_softc *sc)
629 {
630 
631 	mutex_enter(&sc->sc_lock);
632 	sc->sc_busy = false;
633 	cv_broadcast(&sc->sc_cv);
634 	mutex_exit(&sc->sc_lock);
635 }
636 
637 static struct cgd_worker *
cgd_create_one_worker(void)638 cgd_create_one_worker(void)
639 {
640 	KASSERT(cgd_spawning);
641 
642 	if (cgd_refcnt++ == 0) {
643 		KASSERT(cgd_worker == NULL);
644 		cgd_worker = cgd_create_worker();
645 	}
646 
647 	KASSERT(cgd_worker != NULL);
648 	return cgd_worker;
649 }
650 
651 static void
cgd_destroy_one_worker(struct cgd_worker * cw)652 cgd_destroy_one_worker(struct cgd_worker *cw)
653 {
654 	KASSERT(cgd_spawning);
655 	KASSERT(cw == cgd_worker);
656 
657 	if (--cgd_refcnt == 0) {
658 		cgd_destroy_worker(cgd_worker);
659 		cgd_worker = NULL;
660 	}
661 }
662 
663 static struct cgd_worker *
cgd_create_worker(void)664 cgd_create_worker(void)
665 {
666 	struct cgd_worker *cw;
667 	struct workqueue *wq;
668 	struct pool *cp;
669 	int error;
670 
671 	cw = kmem_alloc(sizeof(struct cgd_worker), KM_SLEEP);
672 	cp = kmem_alloc(sizeof(struct pool), KM_SLEEP);
673 
674 	error = workqueue_create(&wq, "cgd", cgd_process, NULL,
675 	    PRI_BIO, IPL_BIO, WQ_FPU|WQ_MPSAFE|WQ_PERCPU);
676 	if (error) {
677 		kmem_free(cp, sizeof(struct pool));
678 		kmem_free(cw, sizeof(struct cgd_worker));
679 		return NULL;
680 	}
681 
682 	cw->cw_cpool = cp;
683 	cw->cw_wq = wq;
684 	pool_init(cw->cw_cpool, sizeof(struct cgd_xfer), 0,
685 	    0, 0, "cgdcpl", NULL, IPL_BIO);
686 	mutex_init(&cw->cw_lock, MUTEX_DEFAULT, IPL_BIO);
687 
688 	return cw;
689 }
690 
691 static void
cgd_destroy_worker(struct cgd_worker * cw)692 cgd_destroy_worker(struct cgd_worker *cw)
693 {
694 
695 	/*
696 	 * Wait for all worker threads to complete before destroying
697 	 * the rest of the cgd_worker.
698 	 */
699 	if (cw->cw_wq)
700 		workqueue_destroy(cw->cw_wq);
701 
702 	mutex_destroy(&cw->cw_lock);
703 
704 	if (cw->cw_cpool) {
705 		pool_destroy(cw->cw_cpool);
706 		kmem_free(cw->cw_cpool, sizeof(struct pool));
707 	}
708 
709 	kmem_free(cw, sizeof(struct cgd_worker));
710 }
711 
712 static int
cgdopen(dev_t dev,int flags,int fmt,struct lwp * l)713 cgdopen(dev_t dev, int flags, int fmt, struct lwp *l)
714 {
715 	struct	cgd_softc *sc;
716 	int error;
717 
718 	DPRINTF_FOLLOW(("cgdopen(0x%"PRIx64", %d)\n", dev, flags));
719 
720 	error = cgd_lock(true);
721 	if (error)
722 		return error;
723 	sc = getcgd_softc(dev);
724 	if (sc == NULL)
725 		sc = cgd_spawn(CGDUNIT(dev));
726 	cgd_unlock();
727 	if (sc == NULL)
728 		return ENXIO;
729 
730 	return dk_open(&sc->sc_dksc, dev, flags, fmt, l);
731 }
732 
733 static int
cgdclose(dev_t dev,int flags,int fmt,struct lwp * l)734 cgdclose(dev_t dev, int flags, int fmt, struct lwp *l)
735 {
736 	struct	cgd_softc *sc;
737 	struct	dk_softc *dksc;
738 	int error;
739 
740 	DPRINTF_FOLLOW(("cgdclose(0x%"PRIx64", %d)\n", dev, flags));
741 
742 	error = cgd_lock(false);
743 	if (error)
744 		return error;
745 	sc = getcgd_softc(dev);
746 	if (sc == NULL) {
747 		error = ENXIO;
748 		goto done;
749 	}
750 
751 	dksc = &sc->sc_dksc;
752 	if ((error =  dk_close(dksc, dev, flags, fmt, l)) != 0)
753 		goto done;
754 
755 	if (!DK_ATTACHED(dksc)) {
756 		if ((error = cgd_destroy(sc->sc_dksc.sc_dev)) != 0) {
757 			device_printf(dksc->sc_dev,
758 			    "unable to detach instance\n");
759 			goto done;
760 		}
761 	}
762 
763 done:
764 	cgd_unlock();
765 
766 	return error;
767 }
768 
769 static void
cgdstrategy(struct buf * bp)770 cgdstrategy(struct buf *bp)
771 {
772 	struct	cgd_softc *sc = getcgd_softc(bp->b_dev);
773 
774 	DPRINTF_FOLLOW(("cgdstrategy(%p): b_bcount = %ld\n", bp,
775 	    (long)bp->b_bcount));
776 
777 	/*
778 	 * Reject unaligned writes.
779 	 */
780 	if (((uintptr_t)bp->b_data & 3) != 0) {
781 		bp->b_error = EINVAL;
782 		goto bail;
783 	}
784 
785 	dk_strategy(&sc->sc_dksc, bp);
786 	return;
787 
788 bail:
789 	bp->b_resid = bp->b_bcount;
790 	biodone(bp);
791 	return;
792 }
793 
794 static int
cgdsize(dev_t dev)795 cgdsize(dev_t dev)
796 {
797 	struct cgd_softc *sc = getcgd_softc(dev);
798 
799 	DPRINTF_FOLLOW(("cgdsize(0x%"PRIx64")\n", dev));
800 	if (!sc)
801 		return -1;
802 	return dk_size(&sc->sc_dksc, dev);
803 }
804 
805 /*
806  * cgd_{get,put}data are functions that deal with getting a buffer
807  * for the new encrypted data.
808  * We can no longer have a buffer per device, we need a buffer per
809  * work queue...
810  */
811 
812 static void *
cgd_getdata(struct cgd_softc * sc,unsigned long size)813 cgd_getdata(struct cgd_softc *sc, unsigned long size)
814 {
815 	void *data = NULL;
816 
817 	mutex_enter(&sc->sc_lock);
818 	if (!sc->sc_data_used) {
819 		sc->sc_data_used = true;
820 		data = sc->sc_data;
821 	}
822 	mutex_exit(&sc->sc_lock);
823 
824 	if (data)
825 		return data;
826 
827 	return kmem_intr_alloc(size, KM_NOSLEEP);
828 }
829 
830 static void
cgd_putdata(struct cgd_softc * sc,void * data,unsigned long size)831 cgd_putdata(struct cgd_softc *sc, void *data, unsigned long size)
832 {
833 
834 	if (data == sc->sc_data) {
835 		mutex_enter(&sc->sc_lock);
836 		sc->sc_data_used = false;
837 		mutex_exit(&sc->sc_lock);
838 	} else
839 		kmem_intr_free(data, size);
840 }
841 
842 static int
cgd_diskstart(device_t dev,struct buf * bp)843 cgd_diskstart(device_t dev, struct buf *bp)
844 {
845 	struct	cgd_softc *sc = device_private(dev);
846 	struct	cgd_worker *cw = sc->sc_worker;
847 	struct	dk_softc *dksc = &sc->sc_dksc;
848 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
849 	struct	cgd_xfer *cx;
850 	struct	buf *nbp;
851 	void *	newaddr;
852 	daddr_t	bn;
853 
854 	DPRINTF_FOLLOW(("cgd_diskstart(%p, %p)\n", dksc, bp));
855 
856 	bn = bp->b_rawblkno;
857 
858 	/*
859 	 * We attempt to allocate all of our resources up front, so that
860 	 * we can fail quickly if they are unavailable.
861 	 */
862 	nbp = getiobuf(sc->sc_tvn, false);
863 	if (nbp == NULL)
864 		return EAGAIN;
865 
866 	cx = pool_get(cw->cw_cpool, PR_NOWAIT);
867 	if (cx == NULL) {
868 		putiobuf(nbp);
869 		return EAGAIN;
870 	}
871 
872 	cx->cx_sc = sc;
873 	cx->cx_obp = bp;
874 	cx->cx_nbp = nbp;
875 	cx->cx_srcv = cx->cx_dstv = bp->b_data;
876 	cx->cx_blkno = bn;
877 	cx->cx_secsize = dg->dg_secsize;
878 
879 	/*
880 	 * If we are writing, then we need to encrypt the outgoing
881 	 * block into a new block of memory.
882 	 */
883 	if ((bp->b_flags & B_READ) == 0) {
884 		newaddr = cgd_getdata(sc, bp->b_bcount);
885 		if (!newaddr) {
886 			pool_put(cw->cw_cpool, cx);
887 			putiobuf(nbp);
888 			return EAGAIN;
889 		}
890 
891 		cx->cx_dstv = newaddr;
892 		cx->cx_len = bp->b_bcount;
893 		cx->cx_dir = CGD_CIPHER_ENCRYPT;
894 
895 		cgd_enqueue(sc, cx);
896 		return 0;
897 	}
898 
899 	cgd_diskstart2(sc, cx);
900 	return 0;
901 }
902 
903 static void
cgd_diskstart2(struct cgd_softc * sc,struct cgd_xfer * cx)904 cgd_diskstart2(struct cgd_softc *sc, struct cgd_xfer *cx)
905 {
906 	struct	vnode *vp;
907 	struct	buf *bp;
908 	struct	buf *nbp;
909 
910 	bp = cx->cx_obp;
911 	nbp = cx->cx_nbp;
912 
913 	nbp->b_data = cx->cx_dstv;
914 	nbp->b_flags = bp->b_flags;
915 	nbp->b_oflags = bp->b_oflags;
916 	nbp->b_cflags = bp->b_cflags;
917 	nbp->b_iodone = cgdiodone;
918 	nbp->b_proc = bp->b_proc;
919 	nbp->b_blkno = btodb(cx->cx_blkno * cx->cx_secsize);
920 	nbp->b_bcount = bp->b_bcount;
921 	nbp->b_private = cx;
922 
923 	BIO_COPYPRIO(nbp, bp);
924 
925 	if ((nbp->b_flags & B_READ) == 0) {
926 		vp = nbp->b_vp;
927 		mutex_enter(vp->v_interlock);
928 		vp->v_numoutput++;
929 		mutex_exit(vp->v_interlock);
930 	}
931 	VOP_STRATEGY(sc->sc_tvn, nbp);
932 }
933 
934 static void
cgdiodone(struct buf * nbp)935 cgdiodone(struct buf *nbp)
936 {
937 	struct	cgd_xfer *cx = nbp->b_private;
938 	struct	buf *obp = cx->cx_obp;
939 	struct	cgd_softc *sc = getcgd_softc(obp->b_dev);
940 	struct	dk_softc *dksc = &sc->sc_dksc;
941 	struct	disk_geom *dg = &dksc->sc_dkdev.dk_geom;
942 	daddr_t	bn;
943 
944 	KDASSERT(sc);
945 
946 	DPRINTF_FOLLOW(("cgdiodone(%p)\n", nbp));
947 	DPRINTF(CGDB_IO, ("cgdiodone: bp %p bcount %d resid %d\n",
948 	    obp, obp->b_bcount, obp->b_resid));
949 	DPRINTF(CGDB_IO, (" dev 0x%"PRIx64", nbp %p bn %" PRId64
950 	    " addr %p bcnt %d\n", nbp->b_dev, nbp, nbp->b_blkno, nbp->b_data,
951 		nbp->b_bcount));
952 	if (nbp->b_error != 0) {
953 		obp->b_error = nbp->b_error;
954 		DPRINTF(CGDB_IO, ("%s: error %d\n", dksc->sc_xname,
955 		    obp->b_error));
956 	}
957 
958 	/* Perform the decryption if we are reading.
959 	 *
960 	 * Note: use the blocknumber from nbp, since it is what
961 	 *       we used to encrypt the blocks.
962 	 */
963 
964 	if (nbp->b_flags & B_READ) {
965 		bn = dbtob(nbp->b_blkno) / dg->dg_secsize;
966 
967 		cx->cx_obp     = obp;
968 		cx->cx_nbp     = nbp;
969 		cx->cx_dstv    = obp->b_data;
970 		cx->cx_srcv    = obp->b_data;
971 		cx->cx_len     = obp->b_bcount;
972 		cx->cx_blkno   = bn;
973 		cx->cx_secsize = dg->dg_secsize;
974 		cx->cx_dir     = CGD_CIPHER_DECRYPT;
975 
976 		cgd_enqueue(sc, cx);
977 		return;
978 	}
979 
980 	cgd_iodone2(sc, cx);
981 }
982 
983 static void
cgd_iodone2(struct cgd_softc * sc,struct cgd_xfer * cx)984 cgd_iodone2(struct cgd_softc *sc, struct cgd_xfer *cx)
985 {
986 	struct cgd_worker *cw = sc->sc_worker;
987 	struct buf *obp = cx->cx_obp;
988 	struct buf *nbp = cx->cx_nbp;
989 	struct dk_softc *dksc = &sc->sc_dksc;
990 
991 	pool_put(cw->cw_cpool, cx);
992 
993 	/* If we allocated memory, free it now... */
994 	if (nbp->b_data != obp->b_data)
995 		cgd_putdata(sc, nbp->b_data, nbp->b_bcount);
996 
997 	putiobuf(nbp);
998 
999 	/* Request is complete for whatever reason */
1000 	obp->b_resid = 0;
1001 	if (obp->b_error != 0)
1002 		obp->b_resid = obp->b_bcount;
1003 
1004 	dk_done(dksc, obp);
1005 	dk_start(dksc, NULL);
1006 }
1007 
1008 static int
cgd_dumpblocks(device_t dev,void * va,daddr_t blkno,int nblk)1009 cgd_dumpblocks(device_t dev, void *va, daddr_t blkno, int nblk)
1010 {
1011 	struct cgd_softc *sc = device_private(dev);
1012 	struct dk_softc *dksc = &sc->sc_dksc;
1013 	struct disk_geom *dg = &dksc->sc_dkdev.dk_geom;
1014 	size_t nbytes, blksize;
1015 	void *buf;
1016 	int error;
1017 
1018 	/*
1019 	 * dk_dump gives us units of disklabel sectors.  Everything
1020 	 * else in cgd uses units of diskgeom sectors.  These had
1021 	 * better agree; otherwise we need to figure out how to convert
1022 	 * between them.
1023 	 */
1024 	KASSERTMSG((dg->dg_secsize == dksc->sc_dkdev.dk_label->d_secsize),
1025 	    "diskgeom secsize %"PRIu32" != disklabel secsize %"PRIu32,
1026 	    dg->dg_secsize, dksc->sc_dkdev.dk_label->d_secsize);
1027 	blksize = dg->dg_secsize;
1028 
1029 	/*
1030 	 * Compute the number of bytes in this request, which dk_dump
1031 	 * has `helpfully' converted to a number of blocks for us.
1032 	 */
1033 	nbytes = nblk*blksize;
1034 
1035 	/* Try to acquire a buffer to store the ciphertext.  */
1036 	buf = cgd_getdata(sc, nbytes);
1037 	if (buf == NULL)
1038 		/* Out of memory: give up.  */
1039 		return ENOMEM;
1040 
1041 	/* Encrypt the caller's data into the temporary buffer.  */
1042 	cgd_cipher(sc, buf, va, nbytes, blkno, blksize, CGD_CIPHER_ENCRYPT);
1043 
1044 	/* Pass it on to the underlying disk device.  */
1045 	error = bdev_dump(sc->sc_tdev, blkno, buf, nbytes);
1046 
1047 	/* Release the buffer.  */
1048 	cgd_putdata(sc, buf, nbytes);
1049 
1050 	/* Return any error from the underlying disk device.  */
1051 	return error;
1052 }
1053 
1054 /* XXX: we should probably put these into dksubr.c, mostly */
1055 static int
cgdread(dev_t dev,struct uio * uio,int flags)1056 cgdread(dev_t dev, struct uio *uio, int flags)
1057 {
1058 	struct	cgd_softc *sc;
1059 	struct	dk_softc *dksc;
1060 
1061 	DPRINTF_FOLLOW(("cgdread(0x%llx, %p, %d)\n",
1062 	    (unsigned long long)dev, uio, flags));
1063 	sc = getcgd_softc(dev);
1064 	if (sc == NULL)
1065 		return ENXIO;
1066 	dksc = &sc->sc_dksc;
1067 	if (!DK_ATTACHED(dksc))
1068 		return ENXIO;
1069 	return physio(cgdstrategy, NULL, dev, B_READ, minphys, uio);
1070 }
1071 
1072 /* XXX: we should probably put these into dksubr.c, mostly */
1073 static int
cgdwrite(dev_t dev,struct uio * uio,int flags)1074 cgdwrite(dev_t dev, struct uio *uio, int flags)
1075 {
1076 	struct	cgd_softc *sc;
1077 	struct	dk_softc *dksc;
1078 
1079 	DPRINTF_FOLLOW(("cgdwrite(0x%"PRIx64", %p, %d)\n", dev, uio, flags));
1080 	sc = getcgd_softc(dev);
1081 	if (sc == NULL)
1082 		return ENXIO;
1083 	dksc = &sc->sc_dksc;
1084 	if (!DK_ATTACHED(dksc))
1085 		return ENXIO;
1086 	return physio(cgdstrategy, NULL, dev, B_WRITE, minphys, uio);
1087 }
1088 
1089 static int
cgdioctl(dev_t dev,u_long cmd,void * data,int flag,struct lwp * l)1090 cgdioctl(dev_t dev, u_long cmd, void *data, int flag, struct lwp *l)
1091 {
1092 	struct	cgd_softc *sc;
1093 	struct	dk_softc *dksc;
1094 	int	part = DISKPART(dev);
1095 	int	pmask = 1 << part;
1096 	int	error;
1097 
1098 	DPRINTF_FOLLOW(("cgdioctl(0x%"PRIx64", %ld, %p, %d, %p)\n",
1099 	    dev, cmd, data, flag, l));
1100 
1101 	switch (cmd) {
1102 	case CGDIOCGET:
1103 		return cgd_ioctl_get(dev, data, l);
1104 	case CGDIOCSET:
1105 	case CGDIOCCLR:
1106 		if ((flag & FWRITE) == 0)
1107 			return EBADF;
1108 		/* FALLTHROUGH */
1109 	default:
1110 		sc = getcgd_softc(dev);
1111 		if (sc == NULL)
1112 			return ENXIO;
1113 		dksc = &sc->sc_dksc;
1114 		break;
1115 	}
1116 
1117 	switch (cmd) {
1118 	case CGDIOCSET:
1119 		cgd_busy(sc);
1120 		if (DK_ATTACHED(dksc))
1121 			error = EBUSY;
1122 		else
1123 			error = cgd_ioctl_set(sc, data, l);
1124 		cgd_unbusy(sc);
1125 		break;
1126 	case CGDIOCCLR:
1127 		cgd_busy(sc);
1128 		if (DK_BUSY(&sc->sc_dksc, pmask))
1129 			error = EBUSY;
1130 		else
1131 			error = cgd_ioctl_clr(sc, l);
1132 		cgd_unbusy(sc);
1133 		break;
1134 	case DIOCGCACHE:
1135 	case DIOCCACHESYNC:
1136 		cgd_busy(sc);
1137 		if (!DK_ATTACHED(dksc)) {
1138 			cgd_unbusy(sc);
1139 			error = ENOENT;
1140 			break;
1141 		}
1142 		/*
1143 		 * We pass this call down to the underlying disk.
1144 		 */
1145 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1146 		cgd_unbusy(sc);
1147 		break;
1148 	case DIOCGSECTORALIGN: {
1149 		struct disk_sectoralign *dsa = data;
1150 
1151 		cgd_busy(sc);
1152 		if (!DK_ATTACHED(dksc)) {
1153 			cgd_unbusy(sc);
1154 			error = ENOENT;
1155 			break;
1156 		}
1157 
1158 		/* Get the underlying disk's sector alignment.  */
1159 		error = VOP_IOCTL(sc->sc_tvn, cmd, data, flag, l->l_cred);
1160 		if (error) {
1161 			cgd_unbusy(sc);
1162 			break;
1163 		}
1164 
1165 		/* Adjust for the disklabel partition if necessary.  */
1166 		if (part != RAW_PART) {
1167 			struct disklabel *lp = dksc->sc_dkdev.dk_label;
1168 			daddr_t offset = lp->d_partitions[part].p_offset;
1169 			uint32_t r = offset % dsa->dsa_alignment;
1170 
1171 			if (r < dsa->dsa_firstaligned)
1172 				dsa->dsa_firstaligned = dsa->dsa_firstaligned
1173 				    - r;
1174 			else
1175 				dsa->dsa_firstaligned = (dsa->dsa_firstaligned
1176 				    + dsa->dsa_alignment) - r;
1177 		}
1178 		cgd_unbusy(sc);
1179 		break;
1180 	}
1181 	case DIOCGSTRATEGY:
1182 	case DIOCSSTRATEGY:
1183 		if (!DK_ATTACHED(dksc)) {
1184 			error = ENOENT;
1185 			break;
1186 		}
1187 		/*FALLTHROUGH*/
1188 	default:
1189 		error = dk_ioctl(dksc, dev, cmd, data, flag, l);
1190 		break;
1191 	case CGDIOCGET:
1192 		KASSERT(0);
1193 		error = EINVAL;
1194 	}
1195 
1196 	return error;
1197 }
1198 
1199 static int
cgddump(dev_t dev,daddr_t blkno,void * va,size_t size)1200 cgddump(dev_t dev, daddr_t blkno, void *va, size_t size)
1201 {
1202 	struct	cgd_softc *sc;
1203 
1204 	DPRINTF_FOLLOW(("cgddump(0x%"PRIx64", %" PRId64 ", %p, %lu)\n",
1205 	    dev, blkno, va, (unsigned long)size));
1206 	sc = getcgd_softc(dev);
1207 	if (sc == NULL)
1208 		return ENXIO;
1209 	return dk_dump(&sc->sc_dksc, dev, blkno, va, size, DK_DUMP_RECURSIVE);
1210 }
1211 
1212 /*
1213  * XXXrcd:
1214  *  for now we hardcode the maximum key length.
1215  */
1216 #define MAX_KEYSIZE	1024
1217 
1218 static const struct {
1219 	const char *n;
1220 	int v;
1221 	int d;
1222 } encblkno[] = {
1223 	{ "encblkno",  CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1224 	{ "encblkno8", CGD_CIPHER_CBC_ENCBLKNO8, 1 },
1225 	{ "encblkno1", CGD_CIPHER_CBC_ENCBLKNO1, 8 },
1226 };
1227 
1228 /* ARGSUSED */
1229 static int
cgd_ioctl_set(struct cgd_softc * sc,void * data,struct lwp * l)1230 cgd_ioctl_set(struct cgd_softc *sc, void *data, struct lwp *l)
1231 {
1232 	struct	 cgd_ioctl *ci = data;
1233 	struct	 vnode *vp;
1234 	int	 ret;
1235 	size_t	 i;
1236 	size_t	 keybytes;			/* key length in bytes */
1237 	const char *cp;
1238 	struct pathbuf *pb;
1239 	char	 *inbuf;
1240 	struct dk_softc *dksc = &sc->sc_dksc;
1241 
1242 	cp = ci->ci_disk;
1243 
1244 	ret = pathbuf_copyin(ci->ci_disk, &pb);
1245 	if (ret != 0) {
1246 		return ret;
1247 	}
1248 	ret = vn_bdev_openpath(pb, &vp, l);
1249 	pathbuf_destroy(pb);
1250 	if (ret != 0) {
1251 		return ret;
1252 	}
1253 
1254 	inbuf = kmem_alloc(MAX_KEYSIZE, KM_SLEEP);
1255 
1256 	if ((ret = cgdinit(sc, cp, vp, l)) != 0)
1257 		goto bail;
1258 
1259 	(void)memset(inbuf, 0, MAX_KEYSIZE);
1260 	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
1261 	if (ret)
1262 		goto bail;
1263 	sc->sc_cfuncs = cryptfuncs_find(inbuf);
1264 	if (!sc->sc_cfuncs) {
1265 		ret = EINVAL;
1266 		goto bail;
1267 	}
1268 
1269 	(void)memset(inbuf, 0, MAX_KEYSIZE);
1270 	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
1271 	if (ret)
1272 		goto bail;
1273 
1274 	for (i = 0; i < __arraycount(encblkno); i++)
1275 		if (strcmp(encblkno[i].n, inbuf) == 0)
1276 			break;
1277 
1278 	if (i == __arraycount(encblkno)) {
1279 		ret = EINVAL;
1280 		goto bail;
1281 	}
1282 
1283 	keybytes = ci->ci_keylen / 8 + 1;
1284 	if (keybytes > MAX_KEYSIZE) {
1285 		ret = EINVAL;
1286 		goto bail;
1287 	}
1288 
1289 	(void)memset(inbuf, 0, MAX_KEYSIZE);
1290 	ret = copyin(ci->ci_key, inbuf, keybytes);
1291 	if (ret)
1292 		goto bail;
1293 
1294 	sc->sc_cdata.cf_blocksize = ci->ci_blocksize;
1295 	sc->sc_cdata.cf_mode = encblkno[i].v;
1296 
1297 	/*
1298 	 * Print a warning if the user selected the legacy encblkno8
1299 	 * mistake, and reject it altogether for ciphers that it
1300 	 * doesn't apply to.
1301 	 */
1302 	if (encblkno[i].v != CGD_CIPHER_CBC_ENCBLKNO1) {
1303 		if (strcmp(sc->sc_cfuncs->cf_name, "aes-cbc") &&
1304 		    strcmp(sc->sc_cfuncs->cf_name, "3des-cbc") &&
1305 		    strcmp(sc->sc_cfuncs->cf_name, "blowfish-cbc")) {
1306 			log(LOG_WARNING, "cgd: %s only makes sense for cbc,"
1307 			    " not for %s; ignoring\n",
1308 			    encblkno[i].n, sc->sc_cfuncs->cf_name);
1309 			sc->sc_cdata.cf_mode = CGD_CIPHER_CBC_ENCBLKNO1;
1310 		} else {
1311 			log(LOG_WARNING, "cgd: enabling legacy encblkno8\n");
1312 		}
1313 	}
1314 
1315 	sc->sc_cdata.cf_keylen = ci->ci_keylen;
1316 	sc->sc_cdata.cf_priv = sc->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
1317 	    &sc->sc_cdata.cf_blocksize);
1318 	if (sc->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
1319 	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
1320 		sc->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
1321 	    sc->sc_cdata.cf_priv = NULL;
1322 	}
1323 
1324 	/*
1325 	 * The blocksize is supposed to be in bytes. Unfortunately originally
1326 	 * it was expressed in bits. For compatibility we maintain encblkno
1327 	 * and encblkno8.
1328 	 */
1329 	sc->sc_cdata.cf_blocksize /= encblkno[i].d;
1330 	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
1331 	if (!sc->sc_cdata.cf_priv) {
1332 		ret = EINVAL;		/* XXX is this the right error? */
1333 		goto bail;
1334 	}
1335 	kmem_free(inbuf, MAX_KEYSIZE);
1336 
1337 	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);
1338 
1339 	sc->sc_data = kmem_alloc(MAXPHYS, KM_SLEEP);
1340 	sc->sc_data_used = false;
1341 
1342 	/* Attach the disk. */
1343 	dk_attach(dksc);
1344 	disk_attach(&dksc->sc_dkdev);
1345 
1346 	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);
1347 
1348 	/* Discover wedges on this disk. */
1349 	dkwedge_discover(&dksc->sc_dkdev);
1350 
1351 	return 0;
1352 
1353 bail:
1354 	kmem_free(inbuf, MAX_KEYSIZE);
1355 	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
1356 	return ret;
1357 }
1358 
1359 /* ARGSUSED */
1360 static int
cgd_ioctl_clr(struct cgd_softc * sc,struct lwp * l)1361 cgd_ioctl_clr(struct cgd_softc *sc, struct lwp *l)
1362 {
1363 	struct	dk_softc *dksc = &sc->sc_dksc;
1364 
1365 	if (!DK_ATTACHED(dksc))
1366 		return ENXIO;
1367 
1368 	/* Delete all of our wedges. */
1369 	dkwedge_delall(&dksc->sc_dkdev);
1370 
1371 	/* Kill off any queued buffers. */
1372 	dk_drain(dksc);
1373 	bufq_free(dksc->sc_bufq);
1374 
1375 	(void)vn_close(sc->sc_tvn, FREAD|FWRITE, l->l_cred);
1376 	sc->sc_cfuncs->cf_destroy(sc->sc_cdata.cf_priv);
1377 	kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1378 	kmem_free(sc->sc_data, MAXPHYS);
1379 	sc->sc_data_used = false;
1380 	dk_detach(dksc);
1381 	disk_detach(&dksc->sc_dkdev);
1382 
1383 	return 0;
1384 }
1385 
1386 static int
cgd_ioctl_get(dev_t dev,void * data,struct lwp * l)1387 cgd_ioctl_get(dev_t dev, void *data, struct lwp *l)
1388 {
1389 	struct cgd_softc *sc;
1390 	struct cgd_user *cgu;
1391 	int unit, error;
1392 
1393 	unit = CGDUNIT(dev);
1394 	cgu = (struct cgd_user *)data;
1395 
1396 	DPRINTF_FOLLOW(("cgd_ioctl_get(0x%"PRIx64", %d, %p, %p)\n",
1397 			   dev, unit, data, l));
1398 
1399 	/* XXX, we always return this units data, so if cgu_unit is
1400 	 * not -1, that field doesn't match the rest
1401 	 */
1402 	if (cgu->cgu_unit == -1)
1403 		cgu->cgu_unit = unit;
1404 
1405 	if (cgu->cgu_unit < 0)
1406 		return EINVAL;	/* XXX: should this be ENXIO? */
1407 
1408 	error = cgd_lock(false);
1409 	if (error)
1410 		return error;
1411 
1412 	sc = device_lookup_private(&cgd_cd, unit);
1413 	if (sc == NULL || !DK_ATTACHED(&sc->sc_dksc)) {
1414 		cgu->cgu_dev = 0;
1415 		cgu->cgu_alg[0] = '\0';
1416 		cgu->cgu_blocksize = 0;
1417 		cgu->cgu_mode = 0;
1418 		cgu->cgu_keylen = 0;
1419 	}
1420 	else {
1421 		mutex_enter(&sc->sc_lock);
1422 		cgu->cgu_dev = sc->sc_tdev;
1423 		strncpy(cgu->cgu_alg, sc->sc_cfuncs->cf_name,
1424 		    sizeof(cgu->cgu_alg));
1425 		cgu->cgu_blocksize = sc->sc_cdata.cf_blocksize;
1426 		cgu->cgu_mode = sc->sc_cdata.cf_mode;
1427 		cgu->cgu_keylen = sc->sc_cdata.cf_keylen;
1428 		mutex_exit(&sc->sc_lock);
1429 	}
1430 
1431 	cgd_unlock();
1432 	return 0;
1433 }
1434 
1435 static int
cgdinit(struct cgd_softc * sc,const char * cpath,struct vnode * vp,struct lwp * l)1436 cgdinit(struct cgd_softc *sc, const char *cpath, struct vnode *vp,
1437 	struct lwp *l)
1438 {
1439 	struct	disk_geom *dg;
1440 	int	ret;
1441 	char	*tmppath;
1442 	uint64_t psize;
1443 	unsigned secsize;
1444 	struct dk_softc *dksc = &sc->sc_dksc;
1445 
1446 	sc->sc_tvn = vp;
1447 	sc->sc_tpath = NULL;
1448 
1449 	tmppath = kmem_alloc(MAXPATHLEN, KM_SLEEP);
1450 	ret = copyinstr(cpath, tmppath, MAXPATHLEN, &sc->sc_tpathlen);
1451 	if (ret)
1452 		goto bail;
1453 	sc->sc_tpath = kmem_alloc(sc->sc_tpathlen, KM_SLEEP);
1454 	memcpy(sc->sc_tpath, tmppath, sc->sc_tpathlen);
1455 
1456 	sc->sc_tdev = vp->v_rdev;
1457 
1458 	if ((ret = getdisksize(vp, &psize, &secsize)) != 0)
1459 		goto bail;
1460 
1461 	if (psize == 0) {
1462 		ret = ENODEV;
1463 		goto bail;
1464 	}
1465 
1466 	/*
1467 	 * XXX here we should probe the underlying device.  If we
1468 	 *     are accessing a partition of type RAW_PART, then
1469 	 *     we should populate our initial geometry with the
1470 	 *     geometry that we discover from the device.
1471 	 */
1472 	dg = &dksc->sc_dkdev.dk_geom;
1473 	memset(dg, 0, sizeof(*dg));
1474 	dg->dg_secperunit = psize;
1475 	dg->dg_secsize = secsize;
1476 	dg->dg_ntracks = 1;
1477 	dg->dg_nsectors = 1024 * 1024 / dg->dg_secsize;
1478 	dg->dg_ncylinders = dg->dg_secperunit / dg->dg_nsectors;
1479 
1480 bail:
1481 	kmem_free(tmppath, MAXPATHLEN);
1482 	if (ret && sc->sc_tpath)
1483 		kmem_free(sc->sc_tpath, sc->sc_tpathlen);
1484 	return ret;
1485 }
1486 
1487 /*
1488  * Our generic cipher entry point.  This takes care of the
1489  * IV mode and passes off the work to the specific cipher.
1490  * We implement here the IV method ``encrypted block
1491  * number''.
1492  *
1493  * XXXrcd: for now we rely on our own crypto framework defined
1494  *         in dev/cgd_crypto.c.  This will change when we
1495  *         get a generic kernel crypto framework.
1496  */
1497 
1498 static void
blkno2blkno_buf(char * sbuf,daddr_t blkno)1499 blkno2blkno_buf(char *sbuf, daddr_t blkno)
1500 {
1501 	int	i;
1502 
1503 	/* Set up the blkno in blkno_buf, here we do not care much
1504 	 * about the final layout of the information as long as we
1505 	 * can guarantee that each sector will have a different IV
1506 	 * and that the endianness of the machine will not affect
1507 	 * the representation that we have chosen.
1508 	 *
1509 	 * We choose this representation, because it does not rely
1510 	 * on the size of buf (which is the blocksize of the cipher),
1511 	 * but allows daddr_t to grow without breaking existing
1512 	 * disks.
1513 	 *
1514 	 * Note that blkno2blkno_buf does not take a size as input,
1515 	 * and hence must be called on a pre-zeroed buffer of length
1516 	 * greater than or equal to sizeof(daddr_t).
1517 	 */
1518 	for (i=0; i < sizeof(daddr_t); i++) {
1519 		*sbuf++ = blkno & 0xff;
1520 		blkno >>= 8;
1521 	}
1522 }
1523 
1524 static struct cpu_info *
cgd_cpu(struct cgd_softc * sc)1525 cgd_cpu(struct cgd_softc *sc)
1526 {
1527 	struct cgd_worker *cw = sc->sc_worker;
1528 	struct cpu_info *ci = NULL;
1529 	u_int cidx, i;
1530 
1531 	if (cw->cw_busy == 0) {
1532 		cw->cw_last = cpu_index(curcpu());
1533 		return NULL;
1534 	}
1535 
1536 	for (i=0, cidx = cw->cw_last+1; i<maxcpus; ++i, ++cidx) {
1537 		if (cidx >= maxcpus)
1538 			cidx = 0;
1539 		ci = cpu_lookup(cidx);
1540 		if (ci) {
1541 			cw->cw_last = cidx;
1542 			break;
1543 		}
1544 	}
1545 
1546 	return ci;
1547 }
1548 
1549 static void
cgd_enqueue(struct cgd_softc * sc,struct cgd_xfer * cx)1550 cgd_enqueue(struct cgd_softc *sc, struct cgd_xfer *cx)
1551 {
1552 	struct cgd_worker *cw = sc->sc_worker;
1553 	struct cpu_info *ci;
1554 
1555 	mutex_enter(&cw->cw_lock);
1556 	ci = cgd_cpu(sc);
1557 	cw->cw_busy++;
1558 	mutex_exit(&cw->cw_lock);
1559 
1560 	workqueue_enqueue(cw->cw_wq, &cx->cx_work, ci);
1561 }
1562 
1563 static void
cgd_process(struct work * wk,void * arg)1564 cgd_process(struct work *wk, void *arg)
1565 {
1566 	struct cgd_xfer *cx = (struct cgd_xfer *)wk;
1567 	struct cgd_softc *sc = cx->cx_sc;
1568 	struct cgd_worker *cw = sc->sc_worker;
1569 
1570 	cgd_cipher(sc, cx->cx_dstv, cx->cx_srcv, cx->cx_len,
1571 	    cx->cx_blkno, cx->cx_secsize, cx->cx_dir);
1572 
1573 	if (cx->cx_dir == CGD_CIPHER_ENCRYPT) {
1574 		cgd_diskstart2(sc, cx);
1575 	} else {
1576 		cgd_iodone2(sc, cx);
1577 	}
1578 
1579 	mutex_enter(&cw->cw_lock);
1580 	if (cw->cw_busy > 0)
1581 		cw->cw_busy--;
1582 	mutex_exit(&cw->cw_lock);
1583 }
1584 
1585 static void
cgd_cipher(struct cgd_softc * sc,void * dstv,const void * srcv,size_t len,daddr_t blkno,size_t secsize,int dir)1586 cgd_cipher(struct cgd_softc *sc, void *dstv, const void *srcv,
1587     size_t len, daddr_t blkno, size_t secsize, int dir)
1588 {
1589 	char		*dst = dstv;
1590 	const char	*src = srcv;
1591 	cfunc_cipher	*cipher = sc->sc_cfuncs->cf_cipher;
1592 	size_t		blocksize = sc->sc_cdata.cf_blocksize;
1593 	size_t		todo;
1594 	char		blkno_buf[CGD_MAXBLOCKSIZE] __aligned(CGD_BLOCKALIGN);
1595 
1596 	DPRINTF_FOLLOW(("cgd_cipher() dir=%d\n", dir));
1597 
1598 	if (sc->sc_cdata.cf_mode == CGD_CIPHER_CBC_ENCBLKNO8)
1599 		blocksize /= 8;
1600 
1601 	KASSERT(len % blocksize == 0);
1602 	/* ensure that sizeof(daddr_t) <= blocksize (for encblkno IVing) */
1603 	KASSERT(sizeof(daddr_t) <= blocksize);
1604 	KASSERT(blocksize <= CGD_MAXBLOCKSIZE);
1605 
1606 	for (; len > 0; len -= todo) {
1607 		todo = MIN(len, secsize);
1608 
1609 		memset(blkno_buf, 0x0, blocksize);
1610 		blkno2blkno_buf(blkno_buf, blkno);
1611 		IFDEBUG(CGDB_CRYPTO, hexprint("step 1: blkno_buf",
1612 		    blkno_buf, blocksize));
1613 
1614 		/*
1615 		 * Handle bollocksed up encblkno8 mistake.  We used to
1616 		 * compute the encryption of a zero block with blkno as
1617 		 * the CBC IV -- except in an early mistake arising
1618 		 * from bit/byte confusion, we actually computed the
1619 		 * encryption of the last of _eight_ zero blocks under
1620 		 * CBC as the CBC IV.
1621 		 *
1622 		 * Encrypting the block number is handled inside the
1623 		 * cipher dispatch now (even though in practice, both
1624 		 * CBC and XTS will do the same thing), so we have to
1625 		 * simulate the block number that would yield the same
1626 		 * result.  So we encrypt _six_ zero blocks -- the
1627 		 * first one and the last one are handled inside the
1628 		 * cipher dispatch.
1629 		 */
1630 		if (sc->sc_cdata.cf_mode == CGD_CIPHER_CBC_ENCBLKNO8) {
1631 			static const uint8_t zero[CGD_MAXBLOCKSIZE];
1632 			uint8_t iv[CGD_MAXBLOCKSIZE];
1633 
1634 			memcpy(iv, blkno_buf, blocksize);
1635 			cipher(sc->sc_cdata.cf_priv, blkno_buf, zero,
1636 			    6*blocksize, iv, CGD_CIPHER_ENCRYPT);
1637 			memmove(blkno_buf, blkno_buf + 5*blocksize, blocksize);
1638 		}
1639 
1640 		cipher(sc->sc_cdata.cf_priv, dst, src, todo, blkno_buf, dir);
1641 
1642 		dst += todo;
1643 		src += todo;
1644 		blkno++;
1645 	}
1646 }
1647 
1648 #ifdef DEBUG
1649 static void
hexprint(const char * start,void * buf,int len)1650 hexprint(const char *start, void *buf, int len)
1651 {
1652 	char	*c = buf;
1653 
1654 	KASSERTMSG(len >= 0, "hexprint: called with len < 0");
1655 	printf("%s: len=%06d 0x", start, len);
1656 	while (len--)
1657 		printf("%02x", (unsigned char) *c++);
1658 }
1659 #endif
1660 
1661 static void
cgd_selftest(void)1662 cgd_selftest(void)
1663 {
1664 	struct cgd_softc sc;
1665 	void *buf;
1666 
1667 	for (size_t i = 0; i < __arraycount(selftests); i++) {
1668 		const char *alg = selftests[i].alg;
1669 		int encblkno8 = selftests[i].encblkno8;
1670 		const uint8_t *key = selftests[i].key;
1671 		int keylen = selftests[i].keylen;
1672 		int txtlen = selftests[i].txtlen;
1673 
1674 		aprint_debug("cgd: self-test %s-%d%s\n", alg, keylen,
1675 		    encblkno8 ? " (encblkno8)" : "");
1676 
1677 		memset(&sc, 0, sizeof(sc));
1678 
1679 		sc.sc_cfuncs = cryptfuncs_find(alg);
1680 		if (sc.sc_cfuncs == NULL)
1681 			panic("%s not implemented", alg);
1682 
1683 		sc.sc_cdata.cf_blocksize = 8 * selftests[i].blocksize;
1684 		sc.sc_cdata.cf_mode = encblkno8 ? CGD_CIPHER_CBC_ENCBLKNO8 :
1685 		    CGD_CIPHER_CBC_ENCBLKNO1;
1686 		sc.sc_cdata.cf_keylen = keylen;
1687 
1688 		sc.sc_cdata.cf_priv = sc.sc_cfuncs->cf_init(keylen,
1689 		    key, &sc.sc_cdata.cf_blocksize);
1690 		if (sc.sc_cdata.cf_priv == NULL)
1691 			panic("cf_priv is NULL");
1692 		if (sc.sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE)
1693 			panic("bad block size %zu", sc.sc_cdata.cf_blocksize);
1694 
1695 		if (!encblkno8)
1696 			sc.sc_cdata.cf_blocksize /= 8;
1697 
1698 		buf = kmem_alloc(txtlen, KM_SLEEP);
1699 		memcpy(buf, selftests[i].ptxt, txtlen);
1700 
1701 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1702 				selftests[i].secsize, CGD_CIPHER_ENCRYPT);
1703 		if (memcmp(buf, selftests[i].ctxt, txtlen) != 0) {
1704 			hexdump(printf, "was", buf, txtlen);
1705 			hexdump(printf, "exp", selftests[i].ctxt, txtlen);
1706 			panic("cgd %s-%d encryption is broken [%zu]",
1707 			    selftests[i].alg, keylen, i);
1708 		}
1709 
1710 		cgd_cipher(&sc, buf, buf, txtlen, selftests[i].blkno,
1711 				selftests[i].secsize, CGD_CIPHER_DECRYPT);
1712 		if (memcmp(buf, selftests[i].ptxt, txtlen) != 0) {
1713 			hexdump(printf, "was", buf, txtlen);
1714 			hexdump(printf, "exp", selftests[i].ptxt, txtlen);
1715 			panic("cgd %s-%d decryption is broken [%zu]",
1716 			    selftests[i].alg, keylen, i);
1717 		}
1718 
1719 		kmem_free(buf, txtlen);
1720 		sc.sc_cfuncs->cf_destroy(sc.sc_cdata.cf_priv);
1721 	}
1722 
1723 	aprint_debug("cgd: self-tests passed\n");
1724 }
1725 
1726 MODULE(MODULE_CLASS_DRIVER, cgd, "blowfish,des,dk_subr,bufq_fcfs");
1727 
1728 #ifdef _MODULE
1729 CFDRIVER_DECL(cgd, DV_DISK, NULL);
1730 
1731 devmajor_t cgd_bmajor = -1, cgd_cmajor = -1;
1732 #endif
1733 
1734 static int
cgd_modcmd(modcmd_t cmd,void * arg)1735 cgd_modcmd(modcmd_t cmd, void *arg)
1736 {
1737 	int error = 0;
1738 
1739 	switch (cmd) {
1740 	case MODULE_CMD_INIT:
1741 #ifdef _MODULE
1742 		mutex_init(&cgd_spawning_mtx, MUTEX_DEFAULT, IPL_NONE);
1743 		cv_init(&cgd_spawning_cv, "cgspwn");
1744 
1745 		/*
1746 		 * Attach the {b,c}devsw's
1747 		 */
1748 		error = devsw_attach("cgd", &cgd_bdevsw, &cgd_bmajor,
1749 		    &cgd_cdevsw, &cgd_cmajor);
1750 		if (error) {
1751 			aprint_error("%s: unable to attach %s devsw, "
1752 			    "error %d", __func__, cgd_cd.cd_name, error);
1753 			break;
1754 		}
1755 
1756 		/*
1757 		 * Attach to autoconf database
1758 		 */
1759 		error = config_cfdriver_attach(&cgd_cd);
1760 		if (error) {
1761 			devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1762 			aprint_error("%s: unable to register cfdriver for"
1763 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1764 			break;
1765 		}
1766 
1767 		error = config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1768 	        if (error) {
1769 			config_cfdriver_detach(&cgd_cd);
1770 			devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1771 			aprint_error("%s: unable to register cfattach for"
1772 			    "%s, error %d\n", __func__, cgd_cd.cd_name, error);
1773 			break;
1774 		}
1775 #endif
1776 		break;
1777 
1778 	case MODULE_CMD_FINI:
1779 #ifdef _MODULE
1780 		/*
1781 		 * Remove device from autoconf database
1782 		 */
1783 		error = config_cfattach_detach(cgd_cd.cd_name, &cgd_ca);
1784 		if (error) {
1785 			aprint_error("%s: failed to detach %s cfattach, "
1786 			    "error %d\n", __func__, cgd_cd.cd_name, error);
1787  			break;
1788 		}
1789 		error = config_cfdriver_detach(&cgd_cd);
1790 		if (error) {
1791 			(void)config_cfattach_attach(cgd_cd.cd_name, &cgd_ca);
1792 			aprint_error("%s: failed to detach %s cfdriver, "
1793 			    "error %d\n", __func__, cgd_cd.cd_name, error);
1794 			break;
1795 		}
1796 
1797 		/*
1798 		 * Remove {b,c}devsw's
1799 		 */
1800 		devsw_detach(&cgd_bdevsw, &cgd_cdevsw);
1801 
1802 		cv_destroy(&cgd_spawning_cv);
1803 		mutex_destroy(&cgd_spawning_mtx);
1804 #endif
1805 		break;
1806 
1807 	case MODULE_CMD_STAT:
1808 		error = ENOTTY;
1809 		break;
1810 	default:
1811 		error = ENOTTY;
1812 		break;
1813 	}
1814 
1815 	return error;
1816 }
1817