1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Alex Hornung <ahornung@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * This file implements initial version of device-mapper crypt target.
37  */
38 #include <sys/endian.h>
39 
40 #include <sys/bio.h>
41 #include <sys/globaldata.h>
42 #include <sys/kerneldump.h>
43 #include <sys/malloc.h>
44 #include <sys/mpipe.h>
45 #include <sys/md5.h>
46 #include <sys/mutex2.h>
47 #include <crypto/sha1.h>
48 #include <crypto/sha2/sha2.h>
49 #include <opencrypto/cryptodev.h>
50 #include <opencrypto/rmd160.h>
51 #include <machine/cpufunc.h>
52 #include <cpu/atomic.h>
53 
54 #include <sys/ktr.h>
55 
56 #include <dev/disk/dm/dm.h>
57 MALLOC_DEFINE(M_DMCRYPT, "dm_crypt", "Device Mapper Target Crypt");
58 
59 KTR_INFO_MASTER(dmcrypt);
60 
61 #if !defined(KTR_DMCRYPT)
62 #define KTR_DMCRYPT	KTR_ALL
63 #endif
64 
65 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_dispatch, 0,
66     "crypto_dispatch(%p)", struct cryptop *crp);
67 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypt_strategy, 0,
68     "crypt_strategy(b_cmd = %d, bp = %p)", int cmd, struct buf *bp);
69 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_write_start, 1,
70     "crypto_write_start(crp = %p, bp = %p, sector = %d/%d)",
71     struct cryptop *crp, struct buf *bp, int i, int sectors);
72 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_cb_write_done, 1,
73     "crypto_cb_write_done(crp = %p, bp = %p, n = %d)",
74     struct cryptop *crp, struct buf *bp, int n);
75 KTR_INFO(KTR_DMCRYPT, dmcrypt, bio_write_done, 1,
76     "bio_write_done(bp = %p)", struct buf *bp);
77 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_write_retry, 1,
78     "crypto_write_retry(crp = %p)", struct buf *bp);
79 KTR_INFO(KTR_DMCRYPT, dmcrypt, bio_read_done, 2,
80     "bio_read_done(bp = %p)", struct buf *bp);
81 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_read_start, 2,
82     "crypto_read_start(crp = %p, bp = %p, sector = %d/%d)",
83     struct cryptop *crp, struct buf *bp, int i, int sectors);
84 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_cb_read_done, 2,
85     "crypto_cb_read_done(crp = %p, bp = %p, n = %d)",
86     struct cryptop *crp, struct buf *bp, int n);
87 
88 struct target_crypt_config;
89 
90 typedef void dispatch_t(void *);
91 typedef void ivgen_t(struct target_crypt_config *, u_int8_t *, size_t, off_t,
92     void *);
93 
94 typedef int ivgen_ctor_t(struct target_crypt_config *, char *, void **);
95 typedef int ivgen_dtor_t(struct target_crypt_config *, void *);
96 
97 struct iv_generator {
98 	const char	*name;
99 	ivgen_ctor_t	*ctor;
100 	ivgen_dtor_t	*dtor;
101 	ivgen_t		*gen_iv;
102 };
103 
104 struct essiv_ivgen_priv {
105 	struct cryptoini	crypto_session;
106 	struct objcache	*crp_crd_cache;
107 	u_int64_t	crypto_sid;
108 	size_t		keyhash_len;
109 	u_int8_t	crypto_keyhash[SHA512_DIGEST_LENGTH];
110 };
111 
112 typedef struct target_crypt_config {
113 	size_t	params_len;
114 	dm_pdev_t *pdev;
115 	char	*status_str;
116 	int	crypto_alg;
117 	int	crypto_klen;
118 	u_int8_t	crypto_key[512>>3];
119 
120 	u_int64_t	crypto_sid;
121 	u_int64_t	block_offset;
122 	int64_t		iv_offset;
123 	SHA512_CTX	essivsha512_ctx;
124 
125 	struct cryptoini	crypto_session;
126 
127 	struct iv_generator	*ivgen;
128 	void	*ivgen_priv;
129 
130 	struct malloc_pipe	read_mpipe;
131 	struct malloc_pipe	write_mpipe;
132 } dm_target_crypt_config_t;
133 
134 struct dmtc_helper {
135 	dm_target_crypt_config_t *priv;
136 	caddr_t	free_addr;
137 	caddr_t	orig_buf;
138 	caddr_t data_buf;
139 };
140 
141 struct dmtc_dump_helper {
142 	dm_target_crypt_config_t *priv;
143 	void *data;
144 	size_t length;
145 	off_t offset;
146 
147 	int sectors;
148 	int *ident;
149 
150 	struct cryptodesc crd[128];
151 	struct cryptop crp[128];
152 	u_char space[65536];
153 };
154 
155 #define DMTC_BUF_SIZE_WRITE \
156     MAXPHYS + sizeof(struct dmtc_helper) + \
157     MAXPHYS/DEV_BSIZE*(sizeof(struct cryptop) + sizeof(struct cryptodesc))
158 #define DMTC_BUF_SIZE_READ \
159     sizeof(struct dmtc_helper) + \
160     MAXPHYS/DEV_BSIZE*(sizeof(struct cryptop) + sizeof(struct cryptodesc))
161 
162 static void dmtc_crypto_dispatch(void *arg);
163 static void dmtc_crypto_dump_start(dm_target_crypt_config_t *priv,
164 				struct dmtc_dump_helper *dump_helper);
165 static void dmtc_crypto_read_start(dm_target_crypt_config_t *priv,
166 				struct bio *bio);
167 static void dmtc_crypto_write_start(dm_target_crypt_config_t *priv,
168 				struct bio *bio);
169 static void dmtc_bio_read_done(struct bio *bio);
170 static void dmtc_bio_write_done(struct bio *bio);
171 static int dmtc_crypto_cb_dump_done(struct cryptop *crp);
172 static int dmtc_crypto_cb_read_done(struct cryptop *crp);
173 static int dmtc_crypto_cb_write_done(struct cryptop *crp);
174 
175 static ivgen_ctor_t	essiv_ivgen_ctor;
176 static ivgen_dtor_t	essiv_ivgen_dtor;
177 static ivgen_t		essiv_ivgen;
178 static ivgen_t		plain_ivgen;
179 static ivgen_t		plain64_ivgen;
180 
181 static struct iv_generator ivgens[] = {
182 	{ .name = "essiv", .ctor = essiv_ivgen_ctor, .dtor = essiv_ivgen_dtor,
183 	    .gen_iv = essiv_ivgen },
184 	{ .name = "plain", .ctor = NULL, .dtor = NULL, .gen_iv = plain_ivgen },
185 	{ .name = "plain64", .ctor = NULL, .dtor = NULL, .gen_iv = plain64_ivgen },
186 	{ NULL, NULL, NULL, NULL }
187 };
188 
189 struct objcache_malloc_args essiv_ivgen_malloc_args = {
190 		2*sizeof(void *) + (sizeof(struct cryptodesc) +
191 		sizeof(struct cryptop)), M_DMCRYPT };
192 
193 static void
194 dmtc_init_mpipe(struct target_crypt_config *priv)
195 {
196 	int nmax;
197 
198 	nmax = (physmem*2/1000*PAGE_SIZE)/(DMTC_BUF_SIZE_WRITE + DMTC_BUF_SIZE_READ) + 1;
199 
200 	if (nmax < 2)
201 		nmax = 2;
202 
203 	kprintf("dm_target_crypt: Setting min/max mpipe buffers: %d/%d\n", 2, nmax);
204 
205 	mpipe_init(&priv->write_mpipe, M_DMCRYPT, DMTC_BUF_SIZE_WRITE,
206 		   2, nmax, MPF_NOZERO | MPF_CALLBACK, NULL, NULL, NULL);
207 	mpipe_init(&priv->read_mpipe, M_DMCRYPT, DMTC_BUF_SIZE_READ,
208 		   2, nmax, MPF_NOZERO | MPF_CALLBACK, NULL, NULL, NULL);
209 }
210 
211 static void
212 dmtc_destroy_mpipe(struct target_crypt_config *priv)
213 {
214 	mpipe_done(&priv->write_mpipe);
215 	mpipe_done(&priv->read_mpipe);
216 }
217 
218 /*
219  * Overwrite private information (in buf) to avoid leaking it
220  */
221 static void
222 dmtc_crypto_clear(void *buf, size_t len)
223 {
224 	memset(buf, 0xFF, len);
225 	bzero(buf, len);
226 }
227 
228 /*
229  * ESSIV IV Generator Routines
230  */
231 static int
232 essiv_ivgen_ctor(struct target_crypt_config *priv, char *iv_hash, void **p_ivpriv)
233 {
234 	struct essiv_ivgen_priv *ivpriv;
235 	u_int8_t crypto_keyhash[SHA512_DIGEST_LENGTH];
236 	unsigned int klen, hashlen;
237 	int error;
238 
239 	klen = (priv->crypto_klen >> 3);
240 
241 	if (iv_hash == NULL)
242 		return EINVAL;
243 
244 	if (!strcmp(iv_hash, "sha1")) {
245 		SHA1_CTX ctx;
246 
247 		hashlen = SHA1_RESULTLEN;
248 		SHA1Init(&ctx);
249 		SHA1Update(&ctx, priv->crypto_key, klen);
250 		SHA1Final(crypto_keyhash, &ctx);
251 	} else if (!strcmp(iv_hash, "sha256")) {
252 		SHA256_CTX ctx;
253 
254 		hashlen = SHA256_DIGEST_LENGTH;
255 		SHA256_Init(&ctx);
256 		SHA256_Update(&ctx, priv->crypto_key, klen);
257 		SHA256_Final(crypto_keyhash, &ctx);
258 	} else if (!strcmp(iv_hash, "sha384")) {
259 		SHA384_CTX ctx;
260 
261 		hashlen = SHA384_DIGEST_LENGTH;
262 		SHA384_Init(&ctx);
263 		SHA384_Update(&ctx, priv->crypto_key, klen);
264 		SHA384_Final(crypto_keyhash, &ctx);
265 	} else if (!strcmp(iv_hash, "sha512")) {
266 		SHA512_CTX ctx;
267 
268 		hashlen = SHA512_DIGEST_LENGTH;
269 		SHA512_Init(&ctx);
270 		SHA512_Update(&ctx, priv->crypto_key, klen);
271 		SHA512_Final(crypto_keyhash, &ctx);
272 	} else if (!strcmp(iv_hash, "md5")) {
273 		MD5_CTX ctx;
274 
275 		hashlen = MD5_DIGEST_LENGTH;
276 		MD5Init(&ctx);
277 		MD5Update(&ctx, priv->crypto_key, klen);
278 		MD5Final(crypto_keyhash, &ctx);
279 	} else if (!strcmp(iv_hash, "rmd160") ||
280 		   !strcmp(iv_hash, "ripemd160")) {
281 		RMD160_CTX ctx;
282 
283 		hashlen = 160/8;
284 		RMD160Init(&ctx);
285 		RMD160Update(&ctx, priv->crypto_key, klen);
286 		RMD160Final(crypto_keyhash, &ctx);
287 	} else {
288 		return EINVAL;
289 	}
290 
291 	/* Convert hashlen to bits */
292 	hashlen <<= 3;
293 
294 	ivpriv = kmalloc(sizeof(struct essiv_ivgen_priv), M_DMCRYPT,
295 	    M_WAITOK | M_ZERO);
296 	memcpy(ivpriv->crypto_keyhash, crypto_keyhash, sizeof(crypto_keyhash));
297 	ivpriv->keyhash_len = sizeof(crypto_keyhash);
298 	dmtc_crypto_clear(crypto_keyhash, sizeof(crypto_keyhash));
299 
300 	ivpriv->crypto_session.cri_alg = priv->crypto_alg;
301 	ivpriv->crypto_session.cri_key = (u_int8_t *)ivpriv->crypto_keyhash;
302 	ivpriv->crypto_session.cri_klen = hashlen;
303 	ivpriv->crypto_session.cri_mlen = 0;
304 	ivpriv->crypto_session.cri_next = NULL;
305 
306 	/*
307 	 * XXX: in principle we also need to check if the block size of the
308 	 *	cipher is a valid iv size for the block cipher.
309 	 */
310 
311 	error = crypto_newsession(&ivpriv->crypto_sid,
312 				  &ivpriv->crypto_session,
313 				  CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
314 	if (error) {
315 		kprintf("dm_target_crypt: Error during crypto_newsession "
316 			"for essiv_ivgen, error = %d\n",
317 			error);
318 		dmtc_crypto_clear(ivpriv->crypto_keyhash, ivpriv->keyhash_len);
319 		kfree(ivpriv, M_DMCRYPT);
320 		return ENOTSUP;
321 	}
322 
323 	ivpriv->crp_crd_cache = objcache_create(
324 	    "dmcrypt-essiv-cache", 0, 0,
325 	    NULL, NULL, NULL,
326 	    objcache_malloc_alloc,
327 	    objcache_malloc_free,
328 	    &essiv_ivgen_malloc_args );
329 
330 	*p_ivpriv = ivpriv;
331 	return 0;
332 }
333 
334 static int
335 essiv_ivgen_dtor(struct target_crypt_config *priv, void *arg)
336 {
337 	struct essiv_ivgen_priv *ivpriv;
338 
339 	ivpriv = (struct essiv_ivgen_priv *)arg;
340 	KKASSERT(ivpriv != NULL);
341 
342 	crypto_freesession(ivpriv->crypto_sid);
343 
344 	objcache_destroy(ivpriv->crp_crd_cache);
345 
346 	dmtc_crypto_clear(ivpriv->crypto_keyhash, ivpriv->keyhash_len);
347 	kfree(ivpriv, M_DMCRYPT);
348 
349 	return 0;
350 }
351 
352 static int
353 essiv_ivgen_done(struct cryptop *crp)
354 {
355 	struct essiv_ivgen_priv *ivpriv;
356 	void *free_addr;
357 	void *opaque;
358 
359 
360 	if (crp->crp_etype == EAGAIN)
361 		return crypto_dispatch(crp);
362 
363 	if (crp->crp_etype != 0) {
364 		kprintf("dm_target_crypt: essiv_ivgen_done, "
365 			"crp->crp_etype = %d\n", crp->crp_etype);
366 	}
367 
368 	free_addr = crp->crp_opaque;
369 	/*
370 	 * In-memory structure is:
371 	 * |  ivpriv  |  opaque  |     crp     |      crd      |
372 	 * | (void *) | (void *) |   (cryptop) |  (cryptodesc) |
373 	 */
374 	ivpriv = *((struct essiv_ivgen_priv **)crp->crp_opaque);
375 	crp->crp_opaque += sizeof(void *);
376 	opaque = *((void **)crp->crp_opaque);
377 
378 	objcache_put(ivpriv->crp_crd_cache, free_addr);
379 	dmtc_crypto_dispatch(opaque);
380 	return 0;
381 }
382 
383 static void
384 essiv_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
385 	    size_t iv_len, off_t sector, void *opaque)
386 {
387 	struct essiv_ivgen_priv *ivpriv;
388 	struct cryptodesc *crd;
389 	struct cryptop *crp;
390 	caddr_t space, alloc_addr;
391 	int error;
392 
393 	ivpriv = priv->ivgen_priv;
394 	KKASSERT(ivpriv != NULL);
395 
396 	/*
397 	 * In-memory structure is:
398 	 * |  ivpriv  |  opaque  |     crp     |      crd      |
399 	 * | (void *) | (void *) |   (cryptop) |  (cryptodesc) |
400 	 */
401 	alloc_addr = space = objcache_get(ivpriv->crp_crd_cache, M_WAITOK);
402 	*((struct essiv_ivgen_priv **)space) = ivpriv;
403 	space += sizeof(void *);
404 	*((void **)space) = opaque;
405 	space += sizeof(void *);
406 	crp = (struct cryptop *)space;
407 	space += sizeof(struct cryptop);
408 	crd = (struct cryptodesc *)space;
409 
410 	bzero(iv, iv_len);
411 	bzero(crd, sizeof(struct cryptodesc));
412 	bzero(crp, sizeof(struct cryptop));
413 	*((off_t *)iv) = htole64(sector + priv->iv_offset);
414 	crp->crp_buf = (caddr_t)iv;
415 
416 	crp->crp_sid = ivpriv->crypto_sid;
417 	crp->crp_ilen = crp->crp_olen = iv_len;
418 
419 	crp->crp_opaque = alloc_addr;
420 
421 	crp->crp_callback = essiv_ivgen_done;
422 
423 	crp->crp_desc = crd;
424 	crp->crp_etype = 0;
425 	crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL | CRYPTO_F_BATCH;
426 
427 	crd->crd_alg = priv->crypto_alg;
428 #if 0
429 	crd->crd_key = (caddr_t)priv->crypto_keyhash;
430 	crd->crd_klen = priv->crypto_klen;
431 #endif
432 
433 	bzero(crd->crd_iv, sizeof(crd->crd_iv));
434 
435 	crd->crd_skip = 0;
436 	crd->crd_len = iv_len;
437 	crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
438 	crd->crd_flags |= CRD_F_ENCRYPT;
439 	crd->crd_next = NULL;
440 
441 	error = crypto_dispatch(crp);
442 	if (error)
443 		kprintf("dm_target_crypt: essiv_ivgen, error = %d\n", error);
444 }
445 
446 
447 static void
448 plain_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
449 	    size_t iv_len, off_t sector, void *opaque)
450 {
451 	bzero(iv, iv_len);
452 	*((uint32_t *)iv) = htole32((uint32_t)(sector + priv->iv_offset));
453 	dmtc_crypto_dispatch(opaque);
454 }
455 
456 static void
457 plain64_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
458     size_t iv_len, off_t sector, void *opaque)
459 {
460 	bzero(iv, iv_len);
461 	*((uint64_t *)iv) = htole64((uint64_t)(sector + priv->iv_offset));
462 	dmtc_crypto_dispatch(opaque);
463 }
464 
465 #if 0
466 static void
467 geli_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
468 	   size_t iv_len, off_t sector, void *opaque)
469 {
470 
471 	SHA512_CTX	ctx512;
472 	u_int8_t	md[SHA512_DIGEST_LENGTH]; /* Max. Digest Size */
473 
474 	memcpy(&ctx512, &priv->essivsha512_ctx, sizeof(SHA512_CTX));
475 	SHA512_Update(&ctx512, (u_int8_t*)&sector, sizeof(off_t));
476 	SHA512_Final(md, &ctx512);
477 
478 	memcpy(iv, md, iv_len);
479 	dmtc_crypto_dispatch(opaque);
480 }
481 #endif
482 
483 /*
484  * Init function called from dm_table_load_ioctl.
485  * cryptsetup actually passes us this:
486  * aes-cbc-essiv:sha256 7997f8af... 0 /dev/ad0s0a 8
487  */
488 static int
489 hex2key(char *hex, size_t key_len, u_int8_t *key)
490 {
491 	char hex_buf[3];
492 	size_t key_idx;
493 
494 	hex_buf[2] = 0;
495 	for (key_idx = 0; key_idx < key_len; ++key_idx) {
496 		hex_buf[0] = *hex++;
497 		hex_buf[1] = *hex++;
498 		key[key_idx] = (u_int8_t)strtoul(hex_buf, NULL, 16);
499 	}
500 	hex_buf[0] = 0;
501 	hex_buf[1] = 0;
502 
503 	return 0;
504 }
505 
506 static int
507 dm_target_crypt_init(dm_table_entry_t *table_en, int argc, char **argv)
508 {
509 	dm_target_crypt_config_t *priv;
510 	size_t len;
511 	char *crypto_alg, *crypto_mode, *iv_mode, *iv_opt, *key, *dev;
512 	char *status_str;
513 	int i, klen, error;
514 	uint64_t iv_offset, block_offset;
515 
516 	if (argc != 5) {
517 		kprintf("dm_target_crypt: not enough arguments, "
518 			"need exactly 5\n");
519 		return EINVAL;
520 	}
521 
522 	len = 0;
523 	for (i = 0; i < argc; i++) {
524 		len += strlen(argv[i]);
525 		len++;
526 	}
527 	/* len is strlen() of input string +1 */
528 	status_str = kmalloc(len, M_DMCRYPT, M_WAITOK);
529 
530 	crypto_alg = strsep(&argv[0], "-");
531 	crypto_mode = strsep(&argv[0], "-");
532 	iv_opt = strsep(&argv[0], "-");
533 	iv_mode = strsep(&iv_opt, ":");
534 	key = argv[1];
535 	iv_offset = strtouq(argv[2], NULL, 0);
536 	dev = argv[3];
537 	block_offset = strtouq(argv[4], NULL, 0);
538 	/* bits / 8 = bytes, 1 byte = 2 hexa chars, so << 2 */
539 	klen = strlen(key) << 2;
540 
541 #if 0
542 	kprintf("dm_target_crypt - new: dev=%s, crypto_alg=%s, crypto_mode=%s, "
543 		"iv_mode=%s, iv_opt=%s, key=%s, iv_offset=%ju, "
544 		"block_offset=%ju\n",
545 		dev, crypto_alg, crypto_mode, iv_mode, iv_opt, key, iv_offset,
546 		block_offset);
547 #endif
548 
549 	priv = kmalloc(sizeof(dm_target_crypt_config_t), M_DMCRYPT, M_WAITOK);
550 
551 	/* Insert dmp to global pdev list */
552 	if ((priv->pdev = dm_pdev_insert(dev)) == NULL) {
553 		kprintf("dm_target_crypt: dm_pdev_insert failed\n");
554 		kfree(status_str, M_DMCRYPT);
555 		return ENOENT;
556 	}
557 
558 	/*
559 	 * This code checks for valid combinations of algorithm and mode.
560 	 * Currently supported options are:
561 	 *
562 	 * *-cbc
563 	 * aes-xts
564 	 * twofish-xts
565 	 * serpent-xts
566 	 */
567 	if ((strcmp(crypto_mode, "cbc") != 0) &&
568 	    !((strcmp(crypto_mode, "xts") == 0) &&
569 	    ((strcmp(crypto_alg, "aes") == 0) ||
570 	    (strcmp(crypto_alg, "twofish") == 0) ||
571 	    (strcmp(crypto_alg, "serpent") == 0))))
572 	{
573 		kprintf("dm_target_crypt: only support 'cbc' chaining mode,"
574 		    " aes-xts, twofish-xts and serpent-xts, "
575 		    "invalid mode '%s-%s'\n",
576 		    crypto_alg, crypto_mode);
577 		goto notsup;
578 	}
579 
580 	if (!strcmp(crypto_alg, "aes")) {
581 		if (!strcmp(crypto_mode, "xts")) {
582 			priv->crypto_alg = CRYPTO_AES_XTS;
583 			if (klen != 256 && klen != 512)
584 				goto notsup;
585 		} else if (!strcmp(crypto_mode, "cbc")) {
586 			priv->crypto_alg = CRYPTO_AES_CBC;
587 			if (klen != 128 && klen != 192 && klen != 256)
588 				goto notsup;
589 		} else {
590 			goto notsup;
591 		}
592 		priv->crypto_klen = klen;
593 	} else if (!strcmp(crypto_alg, "twofish")) {
594 		if (!strcmp(crypto_mode, "xts")) {
595 			priv->crypto_alg = CRYPTO_TWOFISH_XTS;
596 			if (klen != 256 && klen != 512)
597 				goto notsup;
598 		} else if (!strcmp(crypto_mode, "cbc")) {
599 			priv->crypto_alg = CRYPTO_TWOFISH_CBC;
600 			if (klen != 128 && klen != 192 && klen != 256)
601 				goto notsup;
602 		} else {
603 			goto notsup;
604 		}
605 		priv->crypto_klen = klen;
606 	} else if (!strcmp(crypto_alg, "serpent")) {
607 		if (!strcmp(crypto_mode, "xts")) {
608 			priv->crypto_alg = CRYPTO_SERPENT_XTS;
609 			if (klen != 256 && klen != 512)
610 				goto notsup;
611 		} else if (!strcmp(crypto_mode, "cbc")) {
612 			priv->crypto_alg = CRYPTO_SERPENT_CBC;
613 			if (klen != 128 && klen != 192 && klen != 256)
614 				goto notsup;
615 		} else {
616 			goto notsup;
617 		}
618 		priv->crypto_klen = klen;
619 	} else if (!strcmp(crypto_alg, "blowfish")) {
620 		priv->crypto_alg = CRYPTO_BLF_CBC;
621 		if (klen < 128 || klen > 448 || (klen % 8) != 0)
622 			goto notsup;
623 		priv->crypto_klen = klen;
624 	} else if (!strcmp(crypto_alg, "3des") ||
625 		   !strncmp(crypto_alg, "des3", 4)) {
626 		priv->crypto_alg = CRYPTO_3DES_CBC;
627 		if (klen != 168)
628 			goto notsup;
629 		priv->crypto_klen = 168;
630 	} else if (!strcmp(crypto_alg, "camellia")) {
631 		priv->crypto_alg = CRYPTO_CAMELLIA_CBC;
632 		if (klen != 128 && klen != 192 && klen != 256)
633 			goto notsup;
634 		priv->crypto_klen = klen;
635 	} else if (!strcmp(crypto_alg, "skipjack")) {
636 		priv->crypto_alg = CRYPTO_SKIPJACK_CBC;
637 		if (klen != 80)
638 			goto notsup;
639 		priv->crypto_klen = 80;
640 	} else if (!strcmp(crypto_alg, "cast5")) {
641 		priv->crypto_alg = CRYPTO_CAST_CBC;
642 		if (klen != 128)
643 			goto notsup;
644 		priv->crypto_klen = 128;
645 	} else if (!strcmp(crypto_alg, "null")) {
646 		priv->crypto_alg = CRYPTO_NULL_CBC;
647 		if (klen != 128)
648 			goto notsup;
649 		priv->crypto_klen = 128;
650 	} else {
651 		kprintf("dm_target_crypt: Unsupported crypto algorithm: %s\n",
652 			crypto_alg);
653 		goto notsup;
654 	}
655 
656 	/* Save length of param string */
657 	priv->params_len = len;
658 	priv->block_offset = block_offset;
659 	priv->iv_offset = iv_offset - block_offset;
660 
661 	dm_table_add_deps(table_en, priv->pdev);
662 
663 	dm_table_init_target(table_en, priv);
664 
665 	error = hex2key(key, priv->crypto_klen >> 3,
666 			(u_int8_t *)priv->crypto_key);
667 
668 	if (error) {
669 		kprintf("dm_target_crypt: hex2key failed, "
670 			"invalid key format\n");
671 		goto notsup;
672 	}
673 
674 	/* Handle cmd */
675 	for(i = 0; ivgens[i].name != NULL; i++) {
676 		if (!strcmp(iv_mode, ivgens[i].name))
677 			break;
678 	}
679 
680 	if (ivgens[i].name == NULL) {
681 		kprintf("dm_target_crypt: iv_mode='%s' unsupported\n",
682 			iv_mode);
683 		goto notsup;
684 	}
685 
686 	/* Call our ivgen constructor */
687 	if (ivgens[i].ctor != NULL) {
688 		error = ivgens[i].ctor(priv, iv_opt,
689 		    &priv->ivgen_priv);
690 		if (error) {
691 			kprintf("dm_target_crypt: ctor for '%s' failed\n",
692 			    ivgens[i].name);
693 			goto notsup;
694 		}
695 	}
696 
697 	priv->ivgen = &ivgens[i];
698 
699 	priv->crypto_session.cri_alg = priv->crypto_alg;
700 	priv->crypto_session.cri_key = (u_int8_t *)priv->crypto_key;
701 	priv->crypto_session.cri_klen = priv->crypto_klen;
702 	priv->crypto_session.cri_mlen = 0;
703 	priv->crypto_session.cri_next = NULL;
704 
705 	error = crypto_newsession(&priv->crypto_sid,
706 				  &priv->crypto_session,
707 				  CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
708 	if (error) {
709 		kprintf("dm_target_crypt: Error during crypto_newsession, "
710 			"error = %d\n",
711 			error);
712 		goto notsup;
713 	}
714 
715 	memset(key, '0', strlen(key));
716 	if (iv_opt) {
717 		ksprintf(status_str, "%s-%s-%s:%s %s %ju %s %ju",
718 		    crypto_alg, crypto_mode, iv_mode, iv_opt,
719 		    key, iv_offset, dev, block_offset);
720 	} else {
721 		ksprintf(status_str, "%s-%s-%s %s %ju %s %ju",
722 		    crypto_alg, crypto_mode, iv_mode,
723 		    key, iv_offset, dev, block_offset);
724 	}
725 	priv->status_str = status_str;
726 
727 	/* Initialize mpipes */
728 	dmtc_init_mpipe(priv);
729 
730 	return 0;
731 
732 notsup:
733 	kprintf("dm_target_crypt: ENOTSUP\n");
734 	kfree(status_str, M_DMCRYPT);
735 	return ENOTSUP;
736 }
737 
738 /* Table routine called to get params string. */
739 static char *
740 dm_target_crypt_table(void *target_config)
741 {
742 	dm_target_crypt_config_t *priv;
743 	char *params;
744 
745 	priv = target_config;
746 
747 	params = dm_alloc_string(DM_MAX_PARAMS_SIZE);
748 
749 	ksnprintf(params, DM_MAX_PARAMS_SIZE, "%s",
750 	    priv->status_str);
751 
752 	return params;
753 }
754 
755 static int
756 dm_target_crypt_destroy(dm_table_entry_t *table_en)
757 {
758 	dm_target_crypt_config_t *priv;
759 
760 	/*
761 	 * Disconnect the crypt config before unbusying the target.
762 	 */
763 	priv = table_en->target_config;
764 	if (priv == NULL)
765 		return 0;
766 	dm_pdev_decr(priv->pdev);
767 
768 	/*
769 	 * Clean up the crypt config
770 	 *
771 	 * Overwrite the private information before freeing memory to
772 	 * avoid leaking it.
773 	 */
774 	if (priv->status_str) {
775 		dmtc_crypto_clear(priv->status_str, strlen(priv->status_str));
776 		kfree(priv->status_str, M_DMCRYPT);
777 		crypto_freesession(priv->crypto_sid);
778 	}
779 
780 	if ((priv->ivgen) && (priv->ivgen->dtor != NULL)) {
781 		priv->ivgen->dtor(priv, priv->ivgen_priv);
782 	}
783 
784 	/* Destroy mpipes */
785 	dmtc_destroy_mpipe(priv);
786 
787 	dmtc_crypto_clear(priv, sizeof(dm_target_crypt_config_t));
788 	kfree(priv, M_DMCRYPT);
789 
790 	return 0;
791 }
792 
793 /************************************************************************
794  *			STRATEGY SUPPORT FUNCTIONS			*
795  ************************************************************************
796  *
797  * READ PATH:	doio -> bio_read_done -> crypto_work -> crypto_cb_read_done
798  * WRITE PATH:	crypto_work -> crypto_cb_write_done -> doio -> bio_write_done
799  */
800 
801 /*
802  * Wrapper around crypto_dispatch() to match dispatch_t type
803  */
804 static void
805 dmtc_crypto_dispatch(void *arg)
806 {
807 	struct cryptop *crp;
808 
809 	crp = (struct cryptop *)arg;
810 	KKASSERT(crp != NULL);
811 	KTR_LOG(dmcrypt_crypto_dispatch, crp);
812 	crypto_dispatch(crp);
813 }
814 
815 /*
816  * Start IO operation, called from dmstrategy routine.
817  */
818 static int
819 dm_target_crypt_strategy(dm_table_entry_t *table_en, struct buf *bp)
820 {
821 	struct bio *bio;
822 
823 	dm_target_crypt_config_t *priv;
824 	priv = table_en->target_config;
825 
826 	/* Get rid of stuff we can't really handle */
827 	if ((bp->b_cmd == BUF_CMD_READ) || (bp->b_cmd == BUF_CMD_WRITE)) {
828 		if (((bp->b_bcount % DEV_BSIZE) != 0) || (bp->b_bcount == 0)) {
829 			kprintf("dm_target_crypt_strategy: can't really "
830 				"handle bp->b_bcount = %d\n",
831 				bp->b_bcount);
832 			bp->b_error = EINVAL;
833 			bp->b_flags |= B_ERROR | B_INVAL;
834 			biodone(&bp->b_bio1);
835 			return 0;
836 		}
837 	}
838 
839 	KTR_LOG(dmcrypt_crypt_strategy, bp->b_cmd, bp);
840 
841 	switch (bp->b_cmd) {
842 	case BUF_CMD_READ:
843 		bio = push_bio(&bp->b_bio1);
844 		bio->bio_offset = bp->b_bio1.bio_offset +
845 				  priv->block_offset * DEV_BSIZE;
846 		bio->bio_caller_info1.ptr = priv;
847 		bio->bio_done = dmtc_bio_read_done;
848 		vn_strategy(priv->pdev->pdev_vnode, bio);
849 		break;
850 	case BUF_CMD_WRITE:
851 		bio = push_bio(&bp->b_bio1);
852 		bio->bio_offset = bp->b_bio1.bio_offset +
853 				  priv->block_offset * DEV_BSIZE;
854 		bio->bio_caller_info1.ptr = priv;
855 		dmtc_crypto_write_start(priv, bio);
856 		break;
857 	default:
858 		vn_strategy(priv->pdev->pdev_vnode, &bp->b_bio1);
859 		break;
860 	}
861 	return 0;
862 }
863 
864 /*
865  * STRATEGY READ PATH PART 1/3 (after read BIO completes)
866  */
867 static void
868 dmtc_bio_read_done(struct bio *bio)
869 {
870 	struct bio *obio;
871 
872 	dm_target_crypt_config_t *priv;
873 
874 	KTR_LOG(dmcrypt_bio_read_done, bio->bio_buf);
875 
876 	/*
877 	 * If a read error occurs we shortcut the operation, otherwise
878 	 * go on to stage 2.
879 	 */
880 	if (bio->bio_buf->b_flags & B_ERROR) {
881 		obio = pop_bio(bio);
882 		biodone(obio);
883 	} else {
884 		priv = bio->bio_caller_info1.ptr;
885 		dmtc_crypto_read_start(priv, bio);
886 	}
887 }
888 
889 /*
890  * STRATEGY READ PATH PART 2/3
891  */
892 static void
893 dmtc_crypto_read_retry(void *arg1, void *arg2)
894 {
895 	dm_target_crypt_config_t *priv = arg1;
896 	struct bio *bio = arg2;
897 
898 	dmtc_crypto_read_start(priv, bio);
899 }
900 
901 static void
902 dmtc_crypto_read_start(dm_target_crypt_config_t *priv, struct bio *bio)
903 {
904 	struct dmtc_helper *dmtc;
905 	struct cryptodesc *crd;
906 	struct cryptop *crp;
907 	int i, bytes, sectors, sz;
908 	off_t isector;
909 	u_char *ptr, *space;
910 
911 	/*
912 	 * Note: b_resid no good after read I/O, it will be 0, use
913 	 *	 b_bcount.
914 	 */
915 	bytes = bio->bio_buf->b_bcount;
916 	isector = bio->bio_offset / DEV_BSIZE;	/* ivgen salt base? */
917 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
918 	sz = sectors * (sizeof(*crp) + sizeof(*crd));
919 
920 	/*
921 	 * For reads with bogus page we can't decrypt in place as stuff
922 	 * can get ripped out from under us.
923 	 *
924 	 * XXX actually it looks like we can, and in any case the initial
925 	 * read already completed and threw crypted data into the buffer
926 	 * cache buffer.  Disable for now.
927 	 */
928 	space = mpipe_alloc_callback(&priv->read_mpipe,
929 				     dmtc_crypto_read_retry, priv, bio);
930 	if (space == NULL)
931 		return;
932 
933 	dmtc = (struct dmtc_helper *)space;
934 	dmtc->free_addr = space;
935 	space += sizeof(struct dmtc_helper);
936 	dmtc->orig_buf = NULL;
937 	dmtc->data_buf = bio->bio_buf->b_data;
938 	dmtc->priv = priv;
939 	bio->bio_caller_info2.ptr = dmtc;
940 	bio->bio_buf->b_error = 0;
941 
942 	/*
943 	 * Load crypto descriptors (crp/crd loop)
944 	 */
945 	bzero(space, sz);
946 	ptr = space;
947 	bio->bio_caller_info3.value = sectors;
948 	cpu_sfence();
949 #if 0
950 	kprintf("Read, bytes = %d (b_bcount), "
951 		"sectors = %d (bio = %p, b_cmd = %d)\n",
952 		bytes, sectors, bio, bio->bio_buf->b_cmd);
953 #endif
954 	for (i = 0; i < sectors; i++) {
955 		crp = (struct cryptop *)ptr;
956 		ptr += sizeof(*crp);
957 		crd = (struct cryptodesc *)ptr;
958 		ptr += sizeof (*crd);
959 
960 		crp->crp_buf = dmtc->data_buf + i * DEV_BSIZE;
961 
962 		crp->crp_sid = priv->crypto_sid;
963 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
964 
965 		crp->crp_opaque = (void *)bio;
966 
967 		crp->crp_callback = dmtc_crypto_cb_read_done;
968 		crp->crp_desc = crd;
969 		crp->crp_etype = 0;
970 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
971 				 CRYPTO_F_BATCH;
972 
973 		crd->crd_alg = priv->crypto_alg;
974 #if 0
975 		crd->crd_key = (caddr_t)priv->crypto_key;
976 		crd->crd_klen = priv->crypto_klen;
977 #endif
978 
979 		crd->crd_skip = 0;
980 		crd->crd_len = DEV_BSIZE /* XXX */;
981 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
982 		crd->crd_next = NULL;
983 
984 		crd->crd_flags &= ~CRD_F_ENCRYPT;
985 
986 		KTR_LOG(dmcrypt_crypto_read_start, crp, bio->bio_buf, i,
987 		    sectors);
988 
989 		/*
990 		 * Note: last argument is used to generate salt(?) and is
991 		 *	 a 64 bit value, but the original code passed an
992 		 *	 int.  Changing it now will break pre-existing
993 		 *	 crypt volumes.
994 		 */
995 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
996 				    isector + i, crp);
997 	}
998 }
999 
1000 /*
1001  * STRATEGY READ PATH PART 3/3
1002  */
1003 static int
1004 dmtc_crypto_cb_read_done(struct cryptop *crp)
1005 {
1006 	struct dmtc_helper *dmtc;
1007 	struct bio *bio, *obio;
1008 	int n;
1009 
1010 	if (crp->crp_etype == EAGAIN)
1011 		return crypto_dispatch(crp);
1012 
1013 	bio = (struct bio *)crp->crp_opaque;
1014 	KKASSERT(bio != NULL);
1015 
1016 	/*
1017 	 * Cumulative error
1018 	 */
1019 	if (crp->crp_etype) {
1020 		kprintf("dm_target_crypt: dmtc_crypto_cb_read_done "
1021 			"crp_etype = %d\n",
1022 			crp->crp_etype);
1023 		bio->bio_buf->b_error = crp->crp_etype;
1024 	}
1025 
1026 	/*
1027 	 * On the last chunk of the decryption we do any required copybacks
1028 	 * and complete the I/O.
1029 	 */
1030 	n = atomic_fetchadd_int(&bio->bio_caller_info3.value, -1);
1031 #if 0
1032 	kprintf("dmtc_crypto_cb_read_done %p, n = %d\n", bio, n);
1033 #endif
1034 
1035 	KTR_LOG(dmcrypt_crypto_cb_read_done, crp, bio->bio_buf, n);
1036 
1037 	if (n == 1) {
1038 		/*
1039 		 * For the B_HASBOGUS case we didn't decrypt in place,
1040 		 * so we need to copy stuff back into the buf.
1041 		 *
1042 		 * (disabled for now).
1043 		 */
1044 		dmtc = bio->bio_caller_info2.ptr;
1045 		if (bio->bio_buf->b_error) {
1046 			bio->bio_buf->b_flags |= B_ERROR;
1047 		}
1048 #if 0
1049 		else if (bio->bio_buf->b_flags & B_HASBOGUS) {
1050 			memcpy(bio->bio_buf->b_data, dmtc->data_buf,
1051 			       bio->bio_buf->b_bcount);
1052 		}
1053 #endif
1054 		mpipe_free(&dmtc->priv->read_mpipe, dmtc->free_addr);
1055 		obio = pop_bio(bio);
1056 		biodone(obio);
1057 	}
1058 	return 0;
1059 }
1060 /* END OF STRATEGY READ SECTION */
1061 
1062 /*
1063  * STRATEGY WRITE PATH PART 1/3
1064  */
1065 
1066 static void
1067 dmtc_crypto_write_retry(void *arg1, void *arg2)
1068 {
1069 	dm_target_crypt_config_t *priv = arg1;
1070 	struct bio *bio = arg2;
1071 
1072 	KTR_LOG(dmcrypt_crypto_write_retry, bio->bio_buf);
1073 
1074 	dmtc_crypto_write_start(priv, bio);
1075 }
1076 
1077 static void
1078 dmtc_crypto_write_start(dm_target_crypt_config_t *priv, struct bio *bio)
1079 {
1080 	struct dmtc_helper *dmtc;
1081 	struct cryptodesc *crd;
1082 	struct cryptop *crp;
1083 	int i, bytes, sectors, sz;
1084 	off_t isector;
1085 	u_char *ptr, *space;
1086 
1087 	/*
1088 	 * Use b_bcount for consistency
1089 	 */
1090 	bytes = bio->bio_buf->b_bcount;
1091 
1092 	isector = bio->bio_offset / DEV_BSIZE;	/* ivgen salt base? */
1093 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
1094 	sz = sectors * (sizeof(*crp) + sizeof(*crd));
1095 
1096 	/*
1097 	 * For writes and reads with bogus page don't decrypt in place.
1098 	 */
1099 	space = mpipe_alloc_callback(&priv->write_mpipe,
1100 				     dmtc_crypto_write_retry, priv, bio);
1101 	if (space == NULL)
1102 		return;
1103 
1104 	dmtc = (struct dmtc_helper *)space;
1105 	dmtc->free_addr = space;
1106 	space += sizeof(struct dmtc_helper);
1107 	memcpy(space + sz, bio->bio_buf->b_data, bytes);
1108 
1109 	bio->bio_caller_info2.ptr = dmtc;
1110 	bio->bio_buf->b_error = 0;
1111 
1112 	dmtc->orig_buf = bio->bio_buf->b_data;
1113 	dmtc->data_buf = space + sz;
1114 	dmtc->priv = priv;
1115 
1116 	/*
1117 	 * Load crypto descriptors (crp/crd loop)
1118 	 */
1119 	bzero(space, sz);
1120 	ptr = space;
1121 	bio->bio_caller_info3.value = sectors;
1122 	cpu_sfence();
1123 #if 0
1124 	kprintf("Write, bytes = %d (b_bcount), "
1125 		"sectors = %d (bio = %p, b_cmd = %d)\n",
1126 		bytes, sectors, bio, bio->bio_buf->b_cmd);
1127 #endif
1128 	for (i = 0; i < sectors; i++) {
1129 		crp = (struct cryptop *)ptr;
1130 		ptr += sizeof(*crp);
1131 		crd = (struct cryptodesc *)ptr;
1132 		ptr += sizeof (*crd);
1133 
1134 		crp->crp_buf = dmtc->data_buf + i * DEV_BSIZE;
1135 
1136 		crp->crp_sid = priv->crypto_sid;
1137 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1138 
1139 		crp->crp_opaque = (void *)bio;
1140 
1141 		crp->crp_callback = dmtc_crypto_cb_write_done;
1142 		crp->crp_desc = crd;
1143 		crp->crp_etype = 0;
1144 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1145 				 CRYPTO_F_BATCH;
1146 
1147 		crd->crd_alg = priv->crypto_alg;
1148 #if 0
1149 		crd->crd_key = (caddr_t)priv->crypto_key;
1150 		crd->crd_klen = priv->crypto_klen;
1151 #endif
1152 
1153 		crd->crd_skip = 0;
1154 		crd->crd_len = DEV_BSIZE /* XXX */;
1155 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1156 		crd->crd_next = NULL;
1157 
1158 		crd->crd_flags |= CRD_F_ENCRYPT;
1159 
1160 		/*
1161 		 * Note: last argument is used to generate salt(?) and is
1162 		 *	 a 64 bit value, but the original code passed an
1163 		 *	 int.  Changing it now will break pre-existing
1164 		 *	 crypt volumes.
1165 		 */
1166 
1167 		KTR_LOG(dmcrypt_crypto_write_start, crp, bio->bio_buf,
1168 		    i, sectors);
1169 
1170 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1171 				    isector + i, crp);
1172 	}
1173 }
1174 
1175 /*
1176  * STRATEGY WRITE PATH PART 2/3
1177  */
1178 static int
1179 dmtc_crypto_cb_write_done(struct cryptop *crp)
1180 {
1181 	struct dmtc_helper *dmtc;
1182 	dm_target_crypt_config_t *priv;
1183 	struct bio *bio, *obio;
1184 	int n;
1185 
1186 	if (crp->crp_etype == EAGAIN)
1187 		return crypto_dispatch(crp);
1188 
1189 	bio = (struct bio *)crp->crp_opaque;
1190 	KKASSERT(bio != NULL);
1191 
1192 	/*
1193 	 * Cumulative error
1194 	 */
1195 	if (crp->crp_etype != 0) {
1196 		kprintf("dm_target_crypt: dmtc_crypto_cb_write_done "
1197 			"crp_etype = %d\n",
1198 		crp->crp_etype);
1199 		bio->bio_buf->b_error = crp->crp_etype;
1200 	}
1201 
1202 	/*
1203 	 * On the last chunk of the encryption we issue the write
1204 	 */
1205 	n = atomic_fetchadd_int(&bio->bio_caller_info3.value, -1);
1206 #if 0
1207 	kprintf("dmtc_crypto_cb_write_done %p, n = %d\n", bio, n);
1208 #endif
1209 
1210 	KTR_LOG(dmcrypt_crypto_cb_write_done, crp, bio->bio_buf, n);
1211 
1212 	if (n == 1) {
1213 		dmtc = bio->bio_caller_info2.ptr;
1214 		priv = (dm_target_crypt_config_t *)bio->bio_caller_info1.ptr;
1215 
1216 		if (bio->bio_buf->b_error) {
1217 			bio->bio_buf->b_flags |= B_ERROR;
1218 			mpipe_free(&dmtc->priv->write_mpipe, dmtc->free_addr);
1219 			obio = pop_bio(bio);
1220 			biodone(obio);
1221 		} else {
1222 			dmtc->orig_buf = bio->bio_buf->b_data;
1223 			bio->bio_buf->b_data = dmtc->data_buf;
1224 			bio->bio_done = dmtc_bio_write_done;
1225 			vn_strategy(priv->pdev->pdev_vnode, bio);
1226 		}
1227 	}
1228 	return 0;
1229 }
1230 
1231 /*
1232  * STRATEGY WRITE PATH PART 3/3
1233  */
1234 static void
1235 dmtc_bio_write_done(struct bio *bio)
1236 {
1237 	struct dmtc_helper *dmtc;
1238 	struct bio *obio;
1239 
1240 	dmtc = bio->bio_caller_info2.ptr;
1241 	bio->bio_buf->b_data = dmtc->orig_buf;
1242 	mpipe_free(&dmtc->priv->write_mpipe, dmtc->free_addr);
1243 
1244 	KTR_LOG(dmcrypt_bio_write_done, bio->bio_buf);
1245 
1246 	obio = pop_bio(bio);
1247 	biodone(obio);
1248 }
1249 /* END OF STRATEGY WRITE SECTION */
1250 
1251 
1252 
1253 /* DUMPING MAGIC */
1254 
1255 extern int tsleep_crypto_dump;
1256 
1257 static int
1258 dm_target_crypt_dump(dm_table_entry_t *table_en, void *data, size_t length, off_t offset)
1259 {
1260 	static struct dmtc_dump_helper dump_helper;
1261 	dm_target_crypt_config_t *priv;
1262 	int id;
1263 	static int first_call = 1;
1264 
1265 	priv = table_en->target_config;
1266 
1267 	if (first_call) {
1268 		first_call = 0;
1269 		dump_reactivate_cpus();
1270 	}
1271 
1272 	/* Magically enable tsleep */
1273 	tsleep_crypto_dump = 1;
1274 	id = 0;
1275 
1276 	/*
1277 	 * 0 length means flush buffers and return
1278 	 */
1279 	if (length == 0) {
1280 		if (priv->pdev->pdev_vnode->v_rdev == NULL) {
1281 			tsleep_crypto_dump = 0;
1282 			return ENXIO;
1283 		}
1284 		dev_ddump(priv->pdev->pdev_vnode->v_rdev,
1285 		    data, 0, offset, 0);
1286 		tsleep_crypto_dump = 0;
1287 		return 0;
1288 	}
1289 
1290 	bzero(&dump_helper, sizeof(dump_helper));
1291 	dump_helper.priv = priv;
1292 	dump_helper.data = data;
1293 	dump_helper.length = length;
1294 	dump_helper.offset = offset +
1295 	    priv->block_offset * DEV_BSIZE;
1296 	dump_helper.ident = &id;
1297 	dmtc_crypto_dump_start(priv, &dump_helper);
1298 
1299 	/*
1300 	 * Hackery to make stuff appear synchronous. The crypto callback will
1301 	 * set id to 1 and call wakeup on it. If the request completed
1302 	 * synchronously, id will be 1 and we won't bother to sleep. If not,
1303 	 * the crypto request will complete asynchronously and we sleep until
1304 	 * it's done.
1305 	 */
1306 	if (id == 0)
1307 		tsleep(&dump_helper, 0, "cryptdump", 0);
1308 
1309 	dump_helper.offset = dm_pdev_correct_dump_offset(priv->pdev,
1310 	    dump_helper.offset);
1311 
1312 	dev_ddump(priv->pdev->pdev_vnode->v_rdev,
1313 	    dump_helper.space, 0, dump_helper.offset,
1314 	    dump_helper.length);
1315 
1316 	tsleep_crypto_dump = 0;
1317 	return 0;
1318 }
1319 
1320 static void
1321 dmtc_crypto_dump_start(dm_target_crypt_config_t *priv, struct dmtc_dump_helper *dump_helper)
1322 {
1323 	struct cryptodesc *crd;
1324 	struct cryptop *crp;
1325 	int i, bytes, sectors;
1326 	off_t isector;
1327 
1328 	bytes = dump_helper->length;
1329 
1330 	isector = dump_helper->offset / DEV_BSIZE;	/* ivgen salt base? */
1331 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
1332 	dump_helper->sectors = sectors;
1333 #if 0
1334 	kprintf("Dump, bytes = %d, "
1335 		"sectors = %d, LENGTH=%zu\n", bytes, sectors, dump_helper->length);
1336 #endif
1337 	KKASSERT(dump_helper->length <= 65536);
1338 
1339 	memcpy(dump_helper->space, dump_helper->data, bytes);
1340 
1341 	cpu_sfence();
1342 
1343 	for (i = 0; i < sectors; i++) {
1344 		crp = &dump_helper->crp[i];
1345 		crd = &dump_helper->crd[i];
1346 
1347 		crp->crp_buf = dump_helper->space + i * DEV_BSIZE;
1348 
1349 		crp->crp_sid = priv->crypto_sid;
1350 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1351 
1352 		crp->crp_opaque = (void *)dump_helper;
1353 
1354 		crp->crp_callback = dmtc_crypto_cb_dump_done;
1355 		crp->crp_desc = crd;
1356 		crp->crp_etype = 0;
1357 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1358 				 CRYPTO_F_BATCH;
1359 
1360 		crd->crd_alg = priv->crypto_alg;
1361 
1362 		crd->crd_skip = 0;
1363 		crd->crd_len = DEV_BSIZE /* XXX */;
1364 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1365 		crd->crd_next = NULL;
1366 
1367 		crd->crd_flags |= CRD_F_ENCRYPT;
1368 
1369 		/*
1370 		 * Note: last argument is used to generate salt(?) and is
1371 		 *	 a 64 bit value, but the original code passed an
1372 		 *	 int.  Changing it now will break pre-existing
1373 		 *	 crypt volumes.
1374 		 */
1375 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1376 				    isector + i, crp);
1377 	}
1378 }
1379 
1380 static int
1381 dmtc_crypto_cb_dump_done(struct cryptop *crp)
1382 {
1383 	struct dmtc_dump_helper *dump_helper;
1384 	int n;
1385 
1386 	if (crp->crp_etype == EAGAIN)
1387 		return crypto_dispatch(crp);
1388 
1389 	dump_helper = (struct dmtc_dump_helper *)crp->crp_opaque;
1390 	KKASSERT(dump_helper != NULL);
1391 
1392 	if (crp->crp_etype != 0) {
1393 		kprintf("dm_target_crypt: dmtc_crypto_cb_dump_done "
1394 			"crp_etype = %d\n",
1395 		crp->crp_etype);
1396 		return crp->crp_etype;
1397 	}
1398 
1399 	/*
1400 	 * On the last chunk of the encryption we return control
1401 	 */
1402 	n = atomic_fetchadd_int(&dump_helper->sectors, -1);
1403 
1404 	if (n == 1) {
1405 		atomic_add_int(dump_helper->ident, 1);
1406 		wakeup(dump_helper);
1407 	}
1408 
1409 	return 0;
1410 }
1411 
1412 static int
1413 dmtc_mod_handler(module_t mod, int type, void *unused)
1414 {
1415 	dm_target_t *dmt = NULL;
1416 	int err = 0;
1417 
1418 	switch (type) {
1419 	case MOD_LOAD:
1420 		if ((dmt = dm_target_lookup("crypt")) != NULL) {
1421 			dm_target_unbusy(dmt);
1422 			return EEXIST;
1423 		}
1424 		dmt = dm_target_alloc("crypt");
1425 		dmt->version[0] = 1;
1426 		dmt->version[1] = 6;
1427 		dmt->version[2] = 0;
1428 		dmt->init = &dm_target_crypt_init;
1429 		dmt->destroy = &dm_target_crypt_destroy;
1430 		dmt->strategy = &dm_target_crypt_strategy;
1431 		dmt->table = &dm_target_crypt_table;
1432 		dmt->dump = &dm_target_crypt_dump;
1433 
1434 		err = dm_target_insert(dmt);
1435 		if (!err)
1436 			kprintf("dm_target_crypt: Successfully initialized\n");
1437 		break;
1438 
1439 	case MOD_UNLOAD:
1440 		err = dm_target_remove("crypt");
1441 		if (err == 0) {
1442 			kprintf("dm_target_crypt: unloaded\n");
1443 		}
1444 		break;
1445 	}
1446 
1447 	return err;
1448 }
1449 
1450 DM_TARGET_MODULE(dm_target_crypt, dmtc_mod_handler);
1451