1 /*
2  * Copyright (c) 2010 The DragonFly Project.  All rights reserved.
3  *
4  * This code is derived from software contributed to The DragonFly Project
5  * by Alex Hornung <ahornung@gmail.com>
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  *
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in
15  *    the documentation and/or other materials provided with the
16  *    distribution.
17  * 3. Neither the name of The DragonFly Project nor the names of its
18  *    contributors may be used to endorse or promote products derived
19  *    from this software without specific, prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
24  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE
25  * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
26  * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
27  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
28  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
29  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
30  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
31  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32  * SUCH DAMAGE.
33  */
34 
35 /*
36  * This file implements initial version of device-mapper crypt target.
37  */
38 #include <sys/endian.h>
39 
40 #include <sys/bio.h>
41 #include <sys/globaldata.h>
42 #include <sys/kerneldump.h>
43 #include <sys/malloc.h>
44 #include <sys/mpipe.h>
45 #include <sys/md5.h>
46 #include <crypto/sha1.h>
47 #include <crypto/sha2/sha2.h>
48 #include <opencrypto/cryptodev.h>
49 #include <opencrypto/rmd160.h>
50 #include <machine/cpufunc.h>
51 #include <cpu/atomic.h>
52 
53 #include <sys/ktr.h>
54 #include <sys/spinlock2.h>
55 
56 #include <dev/disk/dm/dm.h>
57 MALLOC_DEFINE(M_DMCRYPT, "dm_crypt", "Device Mapper Target Crypt");
58 
59 KTR_INFO_MASTER(dmcrypt);
60 
61 #if !defined(KTR_DMCRYPT)
62 #define KTR_DMCRYPT	KTR_ALL
63 #endif
64 
65 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_dispatch, 0,
66     "crypto_dispatch(%p)", struct cryptop *crp);
67 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypt_strategy, 0,
68     "crypt_strategy(b_cmd = %d, bp = %p)", int cmd, struct buf *bp);
69 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_write_start, 1,
70     "crypto_write_start(crp = %p, bp = %p, sector = %d/%d)",
71     struct cryptop *crp, struct buf *bp, int i, int sectors);
72 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_cb_write_done, 1,
73     "crypto_cb_write_done(crp = %p, bp = %p, n = %d)",
74     struct cryptop *crp, struct buf *bp, int n);
75 KTR_INFO(KTR_DMCRYPT, dmcrypt, bio_write_done, 1,
76     "bio_write_done(bp = %p)", struct buf *bp);
77 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_write_retry, 1,
78     "crypto_write_retry(crp = %p)", struct buf *bp);
79 KTR_INFO(KTR_DMCRYPT, dmcrypt, bio_read_done, 2,
80     "bio_read_done(bp = %p)", struct buf *bp);
81 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_read_start, 2,
82     "crypto_read_start(crp = %p, bp = %p, sector = %d/%d)",
83     struct cryptop *crp, struct buf *bp, int i, int sectors);
84 KTR_INFO(KTR_DMCRYPT, dmcrypt, crypto_cb_read_done, 2,
85     "crypto_cb_read_done(crp = %p, bp = %p, n = %d)",
86     struct cryptop *crp, struct buf *bp, int n);
87 
88 struct target_crypt_config;
89 
90 typedef void dispatch_t(void *);
91 typedef void ivgen_t(struct target_crypt_config *, u_int8_t *, size_t, off_t,
92     void *);
93 
94 typedef int ivgen_ctor_t(struct target_crypt_config *, char *, void **);
95 typedef int ivgen_dtor_t(struct target_crypt_config *, void *);
96 
97 struct iv_generator {
98 	const char	*name;
99 	ivgen_ctor_t	*ctor;
100 	ivgen_dtor_t	*dtor;
101 	ivgen_t		*gen_iv;
102 };
103 
104 struct essiv_ivgen_data {
105 	struct essiv_ivgen_data *next;
106 	void		*ivpriv;
107 	void		*opaque;
108 	struct cryptop	crp;
109 	struct cryptodesc crd;
110 };
111 
112 struct essiv_ivgen_priv {
113 	struct cryptoini	crypto_session;
114 	struct spinlock		ivdata_spin;
115 	struct essiv_ivgen_data	*ivdata_base;
116 	u_int64_t		crypto_sid;
117 	size_t			keyhash_len;
118 	u_int8_t		crypto_keyhash[SHA512_DIGEST_LENGTH];
119 };
120 
121 typedef struct target_crypt_config {
122 	size_t	params_len;
123 	dm_pdev_t *pdev;
124 	char	*status_str;
125 	int	crypto_alg;
126 	int	crypto_klen;
127 	u_int8_t	crypto_key[512>>3];
128 
129 	u_int64_t	crypto_sid;
130 	u_int64_t	block_offset;
131 	int64_t		iv_offset;
132 	SHA512_CTX	essivsha512_ctx;
133 
134 	struct cryptoini	crypto_session;
135 
136 	struct iv_generator	*ivgen;
137 	void	*ivgen_priv;
138 
139 	struct malloc_pipe	read_mpipe;
140 	struct malloc_pipe	write_mpipe;
141 } dm_target_crypt_config_t;
142 
143 struct dmtc_helper {
144 	dm_target_crypt_config_t *priv;
145 	caddr_t	free_addr;
146 	caddr_t	orig_buf;
147 	caddr_t data_buf;
148 };
149 
150 struct dmtc_dump_helper {
151 	dm_target_crypt_config_t *priv;
152 	void *data;
153 	size_t length;
154 	off_t offset;
155 
156 	int sectors;
157 	int *ident;
158 
159 	struct cryptodesc crd[128];
160 	struct cryptop crp[128];
161 	u_char space[65536];
162 };
163 
164 #define DMTC_BUF_SIZE_WRITE \
165     (MAXPHYS + sizeof(struct dmtc_helper) + \
166      MAXPHYS/DEV_BSIZE*(sizeof(struct cryptop) + sizeof(struct cryptodesc)))
167 #define DMTC_BUF_SIZE_READ \
168     (sizeof(struct dmtc_helper) + \
169      MAXPHYS/DEV_BSIZE*(sizeof(struct cryptop) + sizeof(struct cryptodesc)))
170 
171 static void dmtc_crypto_dispatch(void *arg);
172 static void dmtc_crypto_dump_start(dm_target_crypt_config_t *priv,
173 				struct dmtc_dump_helper *dump_helper);
174 static void dmtc_crypto_read_start(dm_target_crypt_config_t *priv,
175 				struct bio *bio);
176 static void dmtc_crypto_write_start(dm_target_crypt_config_t *priv,
177 				struct bio *bio);
178 static void dmtc_bio_read_done(struct bio *bio);
179 static void dmtc_bio_write_done(struct bio *bio);
180 static int dmtc_crypto_cb_dump_done(struct cryptop *crp);
181 static int dmtc_crypto_cb_read_done(struct cryptop *crp);
182 static int dmtc_crypto_cb_write_done(struct cryptop *crp);
183 
184 static ivgen_ctor_t	essiv_ivgen_ctor;
185 static ivgen_dtor_t	essiv_ivgen_dtor;
186 static ivgen_t		essiv_ivgen;
187 static ivgen_t		plain_ivgen;
188 static ivgen_t		plain64_ivgen;
189 
190 static struct iv_generator ivgens[] = {
191 	{ .name = "essiv", .ctor = essiv_ivgen_ctor, .dtor = essiv_ivgen_dtor,
192 	    .gen_iv = essiv_ivgen },
193 	{ .name = "plain", .ctor = NULL, .dtor = NULL, .gen_iv = plain_ivgen },
194 	{ .name = "plain64", .ctor = NULL, .dtor = NULL, .gen_iv = plain64_ivgen },
195 	{ NULL, NULL, NULL, NULL }
196 };
197 
198 /*
199  * Number of crypto buffers.  All crypto buffers will be preallocated
200  * in order to avoid kmalloc() deadlocks in critical low-memory paging
201  * paths.
202  */
203 static __inline int
204 dmtc_get_nmax(void)
205 {
206 	int nmax;
207 
208 	nmax = (physmem * 2 / 1000 * PAGE_SIZE) /
209 	       (DMTC_BUF_SIZE_WRITE + DMTC_BUF_SIZE_READ) + 1;
210 
211 	if (nmax < 2)
212 		nmax = 2;
213 	if (nmax > 8 + ncpus * 2)
214 		nmax = 8 + ncpus * 2;
215 
216 	return nmax;
217 }
218 
219 /*
220  * Initialize the crypto buffer mpipe.  Preallocate all crypto buffers
221  * to avoid making any kmalloc()s in the critical path.
222  */
223 static void
224 dmtc_init_mpipe(struct target_crypt_config *priv)
225 {
226 	int nmax;
227 
228 	nmax = dmtc_get_nmax();
229 
230 	kprintf("dm_target_crypt: Setting %d mpipe buffers\n", nmax);
231 
232 	mpipe_init(&priv->write_mpipe, M_DMCRYPT, DMTC_BUF_SIZE_WRITE,
233 		   nmax, nmax, MPF_NOZERO | MPF_CALLBACK, NULL, NULL, NULL);
234 	mpipe_init(&priv->read_mpipe, M_DMCRYPT, DMTC_BUF_SIZE_READ,
235 		   nmax, nmax, MPF_NOZERO | MPF_CALLBACK, NULL, NULL, NULL);
236 }
237 
238 static void
239 dmtc_destroy_mpipe(struct target_crypt_config *priv)
240 {
241 	mpipe_done(&priv->write_mpipe);
242 	mpipe_done(&priv->read_mpipe);
243 }
244 
245 /*
246  * Overwrite private information (in buf) to avoid leaking it
247  */
248 static void
249 dmtc_crypto_clear(void *buf, size_t len)
250 {
251 	memset(buf, 0xFF, len);
252 	bzero(buf, len);
253 }
254 
255 /*
256  * ESSIV IV Generator Routines
257  */
258 static int
259 essiv_ivgen_ctor(struct target_crypt_config *priv, char *iv_hash, void **p_ivpriv)
260 {
261 	struct essiv_ivgen_priv *ivpriv;
262 	u_int8_t crypto_keyhash[SHA512_DIGEST_LENGTH];
263 	unsigned int klen, hashlen;
264 	int error;
265 	int nmax;
266 
267 	klen = (priv->crypto_klen >> 3);
268 
269 	if (iv_hash == NULL)
270 		return EINVAL;
271 
272 	if (!strcmp(iv_hash, "sha1")) {
273 		SHA1_CTX ctx;
274 
275 		hashlen = SHA1_RESULTLEN;
276 		SHA1Init(&ctx);
277 		SHA1Update(&ctx, priv->crypto_key, klen);
278 		SHA1Final(crypto_keyhash, &ctx);
279 	} else if (!strcmp(iv_hash, "sha256")) {
280 		SHA256_CTX ctx;
281 
282 		hashlen = SHA256_DIGEST_LENGTH;
283 		SHA256_Init(&ctx);
284 		SHA256_Update(&ctx, priv->crypto_key, klen);
285 		SHA256_Final(crypto_keyhash, &ctx);
286 	} else if (!strcmp(iv_hash, "sha384")) {
287 		SHA384_CTX ctx;
288 
289 		hashlen = SHA384_DIGEST_LENGTH;
290 		SHA384_Init(&ctx);
291 		SHA384_Update(&ctx, priv->crypto_key, klen);
292 		SHA384_Final(crypto_keyhash, &ctx);
293 	} else if (!strcmp(iv_hash, "sha512")) {
294 		SHA512_CTX ctx;
295 
296 		hashlen = SHA512_DIGEST_LENGTH;
297 		SHA512_Init(&ctx);
298 		SHA512_Update(&ctx, priv->crypto_key, klen);
299 		SHA512_Final(crypto_keyhash, &ctx);
300 	} else if (!strcmp(iv_hash, "md5")) {
301 		MD5_CTX ctx;
302 
303 		hashlen = MD5_DIGEST_LENGTH;
304 		MD5Init(&ctx);
305 		MD5Update(&ctx, priv->crypto_key, klen);
306 		MD5Final(crypto_keyhash, &ctx);
307 	} else if (!strcmp(iv_hash, "rmd160") ||
308 		   !strcmp(iv_hash, "ripemd160")) {
309 		RMD160_CTX ctx;
310 
311 		hashlen = 160/8;
312 		RMD160Init(&ctx);
313 		RMD160Update(&ctx, priv->crypto_key, klen);
314 		RMD160Final(crypto_keyhash, &ctx);
315 	} else {
316 		return EINVAL;
317 	}
318 
319 	/* Convert hashlen to bits */
320 	hashlen <<= 3;
321 
322 	ivpriv = kmalloc(sizeof(struct essiv_ivgen_priv), M_DMCRYPT,
323 			 M_WAITOK | M_ZERO);
324 	memcpy(ivpriv->crypto_keyhash, crypto_keyhash, sizeof(crypto_keyhash));
325 	ivpriv->keyhash_len = sizeof(crypto_keyhash);
326 	dmtc_crypto_clear(crypto_keyhash, sizeof(crypto_keyhash));
327 
328 	ivpriv->crypto_session.cri_alg = priv->crypto_alg;
329 	ivpriv->crypto_session.cri_key = (u_int8_t *)ivpriv->crypto_keyhash;
330 	ivpriv->crypto_session.cri_klen = hashlen;
331 	ivpriv->crypto_session.cri_mlen = 0;
332 	ivpriv->crypto_session.cri_next = NULL;
333 
334 	/*
335 	 * XXX: in principle we also need to check if the block size of the
336 	 *	cipher is a valid iv size for the block cipher.
337 	 */
338 
339 	error = crypto_newsession(&ivpriv->crypto_sid,
340 				  &ivpriv->crypto_session,
341 				  CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
342 	if (error) {
343 		kprintf("dm_target_crypt: Error during crypto_newsession "
344 			"for essiv_ivgen, error = %d\n",
345 			error);
346 		dmtc_crypto_clear(ivpriv->crypto_keyhash, ivpriv->keyhash_len);
347 		kfree(ivpriv, M_DMCRYPT);
348 		return ENOTSUP;
349 	}
350 
351 	/*
352 	 * mpipe for 512-byte ivgen elements, make sure there are enough
353 	 * to cover all in-flight read and write buffers.
354 	 */
355 	nmax = dmtc_get_nmax() * (int)(MAXPHYS / DEV_BSIZE) * 2;
356 
357 	spin_init(&ivpriv->ivdata_spin, "ivdata");
358 
359 	while (nmax) {
360 		struct essiv_ivgen_data *ivdata;
361 
362 		ivdata = kmalloc(sizeof(*ivdata), M_DMCRYPT, M_WAITOK|M_ZERO);
363 		ivdata->next = ivpriv->ivdata_base;
364 		ivpriv->ivdata_base = ivdata;
365 		--nmax;
366 	}
367 	*p_ivpriv = ivpriv;
368 
369 	return 0;
370 }
371 
372 static int
373 essiv_ivgen_dtor(struct target_crypt_config *priv, void *arg)
374 {
375 	struct essiv_ivgen_priv *ivpriv;
376 	struct essiv_ivgen_data *ivdata;
377 
378 	ivpriv = (struct essiv_ivgen_priv *)arg;
379 	KKASSERT(ivpriv != NULL);
380 
381 	crypto_freesession(ivpriv->crypto_sid);
382 
383 	while ((ivdata = ivpriv->ivdata_base) != NULL) {
384 		ivpriv->ivdata_base = ivdata->next;
385 		kfree(ivdata, M_DMCRYPT);
386 	}
387 	spin_uninit(&ivpriv->ivdata_spin);
388 
389 	dmtc_crypto_clear(ivpriv->crypto_keyhash, ivpriv->keyhash_len);
390 	kfree(ivpriv, M_DMCRYPT);
391 
392 	return 0;
393 }
394 
395 static int
396 essiv_ivgen_done(struct cryptop *crp)
397 {
398 	struct essiv_ivgen_priv *ivpriv;
399 	struct essiv_ivgen_data *ivdata;
400 	void *opaque;
401 
402 
403 	if (crp->crp_etype == EAGAIN)
404 		return crypto_dispatch(crp);
405 
406 	if (crp->crp_etype != 0) {
407 		kprintf("dm_target_crypt: essiv_ivgen_done, "
408 			"crp->crp_etype = %d\n", crp->crp_etype);
409 	}
410 
411 	ivdata = (void *)crp->crp_opaque;
412 
413 	/*
414 	 * In-memory structure is:
415 	 * |  ivpriv  |  opaque  |     crp     |      crd      |
416 	 * | (void *) | (void *) |   (cryptop) |  (cryptodesc) |
417 	 */
418 	ivpriv = ivdata->ivpriv;
419 	opaque = ivdata->opaque;
420 
421 	spin_lock(&ivpriv->ivdata_spin);
422 	ivdata->next = ivpriv->ivdata_base;
423 	ivpriv->ivdata_base = ivdata;
424 	spin_unlock(&ivpriv->ivdata_spin);
425 
426 	dmtc_crypto_dispatch(opaque);
427 
428 	return 0;
429 }
430 
431 static void
432 essiv_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
433 	    size_t iv_len, off_t sector, void *opaque)
434 {
435 	struct essiv_ivgen_priv *ivpriv;
436 	struct essiv_ivgen_data *ivdata;
437 	struct cryptodesc *crd;
438 	struct cryptop *crp;
439 	int error;
440 
441 	ivpriv = priv->ivgen_priv;
442 	KKASSERT(ivpriv != NULL);
443 
444 	/*
445 	 * We preallocated all necessary ivdata's, so pull one off and use
446 	 * it.
447 	 */
448 	spin_lock(&ivpriv->ivdata_spin);
449 	ivdata = ivpriv->ivdata_base;
450 	ivpriv->ivdata_base = ivdata->next;
451 	spin_unlock(&ivpriv->ivdata_spin);
452 
453 	KKASSERT(ivdata != NULL);
454 
455 	ivdata->ivpriv = ivpriv;
456 	ivdata->opaque = opaque;
457 	crp = &ivdata->crp;
458 	crd = &ivdata->crd;
459 
460 	bzero(iv, iv_len);
461 	bzero(crd, sizeof(struct cryptodesc));
462 	bzero(crp, sizeof(struct cryptop));
463 	*((off_t *)iv) = htole64(sector + priv->iv_offset);
464 	crp->crp_buf = (caddr_t)iv;
465 
466 	crp->crp_sid = ivpriv->crypto_sid;
467 	crp->crp_ilen = crp->crp_olen = iv_len;
468 
469 	crp->crp_opaque =  (caddr_t)ivdata;
470 
471 	crp->crp_callback = essiv_ivgen_done;
472 
473 	crp->crp_desc = crd;
474 	crp->crp_etype = 0;
475 	crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL | CRYPTO_F_BATCH;
476 
477 	crd->crd_alg = priv->crypto_alg;
478 #if 0
479 	crd->crd_key = (caddr_t)priv->crypto_keyhash;
480 	crd->crd_klen = priv->crypto_klen;
481 #endif
482 
483 	bzero(crd->crd_iv, sizeof(crd->crd_iv));
484 
485 	crd->crd_skip = 0;
486 	crd->crd_len = iv_len;
487 	crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
488 	crd->crd_flags |= CRD_F_ENCRYPT;
489 	crd->crd_next = NULL;
490 
491 	error = crypto_dispatch(crp);
492 	if (error)
493 		kprintf("dm_target_crypt: essiv_ivgen, error = %d\n", error);
494 }
495 
496 
497 static void
498 plain_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
499 	    size_t iv_len, off_t sector, void *opaque)
500 {
501 	bzero(iv, iv_len);
502 	*((uint32_t *)iv) = htole32((uint32_t)(sector + priv->iv_offset));
503 	dmtc_crypto_dispatch(opaque);
504 }
505 
506 static void
507 plain64_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
508     size_t iv_len, off_t sector, void *opaque)
509 {
510 	bzero(iv, iv_len);
511 	*((uint64_t *)iv) = htole64((uint64_t)(sector + priv->iv_offset));
512 	dmtc_crypto_dispatch(opaque);
513 }
514 
515 #if 0
516 static void
517 geli_ivgen(dm_target_crypt_config_t *priv, u_int8_t *iv,
518 	   size_t iv_len, off_t sector, void *opaque)
519 {
520 
521 	SHA512_CTX	ctx512;
522 	u_int8_t	md[SHA512_DIGEST_LENGTH]; /* Max. Digest Size */
523 
524 	memcpy(&ctx512, &priv->essivsha512_ctx, sizeof(SHA512_CTX));
525 	SHA512_Update(&ctx512, (u_int8_t*)&sector, sizeof(off_t));
526 	SHA512_Final(md, &ctx512);
527 
528 	memcpy(iv, md, iv_len);
529 	dmtc_crypto_dispatch(opaque);
530 }
531 #endif
532 
533 /*
534  * Init function called from dm_table_load_ioctl.
535  * cryptsetup actually passes us this:
536  * aes-cbc-essiv:sha256 7997f8af... 0 /dev/ad0s0a 8
537  */
538 static int
539 hex2key(char *hex, size_t key_len, u_int8_t *key)
540 {
541 	char hex_buf[3];
542 	size_t key_idx;
543 
544 	hex_buf[2] = 0;
545 	for (key_idx = 0; key_idx < key_len; ++key_idx) {
546 		hex_buf[0] = *hex++;
547 		hex_buf[1] = *hex++;
548 		key[key_idx] = (u_int8_t)strtoul(hex_buf, NULL, 16);
549 	}
550 	hex_buf[0] = 0;
551 	hex_buf[1] = 0;
552 
553 	return 0;
554 }
555 
556 static int
557 dm_target_crypt_init(dm_table_entry_t *table_en, int argc, char **argv)
558 {
559 	dm_target_crypt_config_t *priv;
560 	size_t len;
561 	char *crypto_alg, *crypto_mode, *iv_mode, *iv_opt, *key, *dev;
562 	char *status_str;
563 	int i, klen, error;
564 	uint64_t iv_offset, block_offset;
565 
566 	if (argc != 5) {
567 		kprintf("dm_target_crypt: not enough arguments, "
568 			"need exactly 5\n");
569 		return EINVAL;
570 	}
571 
572 	len = 0;
573 	for (i = 0; i < argc; i++) {
574 		len += strlen(argv[i]);
575 		len++;
576 	}
577 	/* len is strlen() of input string +1 */
578 	status_str = kmalloc(len, M_DMCRYPT, M_WAITOK);
579 
580 	crypto_alg = strsep(&argv[0], "-");
581 	crypto_mode = strsep(&argv[0], "-");
582 	iv_opt = strsep(&argv[0], "-");
583 	iv_mode = strsep(&iv_opt, ":");
584 	key = argv[1];
585 	iv_offset = strtouq(argv[2], NULL, 0);
586 	dev = argv[3];
587 	block_offset = strtouq(argv[4], NULL, 0);
588 	/* bits / 8 = bytes, 1 byte = 2 hexa chars, so << 2 */
589 	klen = strlen(key) << 2;
590 
591 #if 0
592 	kprintf("dm_target_crypt - new: dev=%s, crypto_alg=%s, crypto_mode=%s, "
593 		"iv_mode=%s, iv_opt=%s, key=%s, iv_offset=%ju, "
594 		"block_offset=%ju\n",
595 		dev, crypto_alg, crypto_mode, iv_mode, iv_opt, key, iv_offset,
596 		block_offset);
597 #endif
598 
599 	priv = kmalloc(sizeof(dm_target_crypt_config_t), M_DMCRYPT, M_WAITOK);
600 
601 	/* Insert dmp to global pdev list */
602 	if ((priv->pdev = dm_pdev_insert(dev)) == NULL) {
603 		kprintf("dm_target_crypt: dm_pdev_insert failed\n");
604 		kfree(status_str, M_DMCRYPT);
605 		return ENOENT;
606 	}
607 
608 	/*
609 	 * This code checks for valid combinations of algorithm and mode.
610 	 * Currently supported options are:
611 	 *
612 	 * *-cbc
613 	 * aes-xts
614 	 * twofish-xts
615 	 * serpent-xts
616 	 */
617 	if ((strcmp(crypto_mode, "cbc") != 0) &&
618 	    !((strcmp(crypto_mode, "xts") == 0) &&
619 	    ((strcmp(crypto_alg, "aes") == 0) ||
620 	    (strcmp(crypto_alg, "twofish") == 0) ||
621 	    (strcmp(crypto_alg, "serpent") == 0))))
622 	{
623 		kprintf("dm_target_crypt: only support 'cbc' chaining mode,"
624 		    " aes-xts, twofish-xts and serpent-xts, "
625 		    "invalid mode '%s-%s'\n",
626 		    crypto_alg, crypto_mode);
627 		goto notsup;
628 	}
629 
630 	if (!strcmp(crypto_alg, "aes")) {
631 		if (!strcmp(crypto_mode, "xts")) {
632 			priv->crypto_alg = CRYPTO_AES_XTS;
633 			if (klen != 256 && klen != 512)
634 				goto notsup;
635 		} else if (!strcmp(crypto_mode, "cbc")) {
636 			priv->crypto_alg = CRYPTO_AES_CBC;
637 			if (klen != 128 && klen != 192 && klen != 256)
638 				goto notsup;
639 		} else {
640 			goto notsup;
641 		}
642 		priv->crypto_klen = klen;
643 	} else if (!strcmp(crypto_alg, "twofish")) {
644 		if (!strcmp(crypto_mode, "xts")) {
645 			priv->crypto_alg = CRYPTO_TWOFISH_XTS;
646 			if (klen != 256 && klen != 512)
647 				goto notsup;
648 		} else if (!strcmp(crypto_mode, "cbc")) {
649 			priv->crypto_alg = CRYPTO_TWOFISH_CBC;
650 			if (klen != 128 && klen != 192 && klen != 256)
651 				goto notsup;
652 		} else {
653 			goto notsup;
654 		}
655 		priv->crypto_klen = klen;
656 	} else if (!strcmp(crypto_alg, "serpent")) {
657 		if (!strcmp(crypto_mode, "xts")) {
658 			priv->crypto_alg = CRYPTO_SERPENT_XTS;
659 			if (klen != 256 && klen != 512)
660 				goto notsup;
661 		} else if (!strcmp(crypto_mode, "cbc")) {
662 			priv->crypto_alg = CRYPTO_SERPENT_CBC;
663 			if (klen != 128 && klen != 192 && klen != 256)
664 				goto notsup;
665 		} else {
666 			goto notsup;
667 		}
668 		priv->crypto_klen = klen;
669 	} else if (!strcmp(crypto_alg, "blowfish")) {
670 		priv->crypto_alg = CRYPTO_BLF_CBC;
671 		if (klen < 128 || klen > 448 || (klen % 8) != 0)
672 			goto notsup;
673 		priv->crypto_klen = klen;
674 	} else if (!strcmp(crypto_alg, "3des") ||
675 		   !strncmp(crypto_alg, "des3", 4)) {
676 		priv->crypto_alg = CRYPTO_3DES_CBC;
677 		if (klen != 168)
678 			goto notsup;
679 		priv->crypto_klen = 168;
680 	} else if (!strcmp(crypto_alg, "camellia")) {
681 		priv->crypto_alg = CRYPTO_CAMELLIA_CBC;
682 		if (klen != 128 && klen != 192 && klen != 256)
683 			goto notsup;
684 		priv->crypto_klen = klen;
685 	} else if (!strcmp(crypto_alg, "skipjack")) {
686 		priv->crypto_alg = CRYPTO_SKIPJACK_CBC;
687 		if (klen != 80)
688 			goto notsup;
689 		priv->crypto_klen = 80;
690 	} else if (!strcmp(crypto_alg, "cast5")) {
691 		priv->crypto_alg = CRYPTO_CAST_CBC;
692 		if (klen != 128)
693 			goto notsup;
694 		priv->crypto_klen = 128;
695 	} else if (!strcmp(crypto_alg, "null")) {
696 		priv->crypto_alg = CRYPTO_NULL_CBC;
697 		if (klen != 128)
698 			goto notsup;
699 		priv->crypto_klen = 128;
700 	} else {
701 		kprintf("dm_target_crypt: Unsupported crypto algorithm: %s\n",
702 			crypto_alg);
703 		goto notsup;
704 	}
705 
706 	/* Save length of param string */
707 	priv->params_len = len;
708 	priv->block_offset = block_offset;
709 	priv->iv_offset = iv_offset - block_offset;
710 
711 	dm_table_add_deps(table_en, priv->pdev);
712 
713 	dm_table_init_target(table_en, priv);
714 
715 	error = hex2key(key, priv->crypto_klen >> 3,
716 			(u_int8_t *)priv->crypto_key);
717 
718 	if (error) {
719 		kprintf("dm_target_crypt: hex2key failed, "
720 			"invalid key format\n");
721 		goto notsup;
722 	}
723 
724 	/* Handle cmd */
725 	for(i = 0; ivgens[i].name != NULL; i++) {
726 		if (!strcmp(iv_mode, ivgens[i].name))
727 			break;
728 	}
729 
730 	if (ivgens[i].name == NULL) {
731 		kprintf("dm_target_crypt: iv_mode='%s' unsupported\n",
732 			iv_mode);
733 		goto notsup;
734 	}
735 
736 	/* Call our ivgen constructor */
737 	if (ivgens[i].ctor != NULL) {
738 		error = ivgens[i].ctor(priv, iv_opt,
739 		    &priv->ivgen_priv);
740 		if (error) {
741 			kprintf("dm_target_crypt: ctor for '%s' failed\n",
742 			    ivgens[i].name);
743 			goto notsup;
744 		}
745 	}
746 
747 	priv->ivgen = &ivgens[i];
748 
749 	priv->crypto_session.cri_alg = priv->crypto_alg;
750 	priv->crypto_session.cri_key = (u_int8_t *)priv->crypto_key;
751 	priv->crypto_session.cri_klen = priv->crypto_klen;
752 	priv->crypto_session.cri_mlen = 0;
753 	priv->crypto_session.cri_next = NULL;
754 
755 	error = crypto_newsession(&priv->crypto_sid,
756 				  &priv->crypto_session,
757 				  CRYPTOCAP_F_SOFTWARE | CRYPTOCAP_F_HARDWARE);
758 	if (error) {
759 		kprintf("dm_target_crypt: Error during crypto_newsession, "
760 			"error = %d\n",
761 			error);
762 		goto notsup;
763 	}
764 
765 	memset(key, '0', strlen(key));
766 	if (iv_opt) {
767 		ksprintf(status_str, "%s-%s-%s:%s %s %ju %s %ju",
768 		    crypto_alg, crypto_mode, iv_mode, iv_opt,
769 		    key, iv_offset, dev, block_offset);
770 	} else {
771 		ksprintf(status_str, "%s-%s-%s %s %ju %s %ju",
772 		    crypto_alg, crypto_mode, iv_mode,
773 		    key, iv_offset, dev, block_offset);
774 	}
775 	priv->status_str = status_str;
776 
777 	/* Initialize mpipes */
778 	dmtc_init_mpipe(priv);
779 
780 	return 0;
781 
782 notsup:
783 	kprintf("dm_target_crypt: ENOTSUP\n");
784 	kfree(status_str, M_DMCRYPT);
785 	return ENOTSUP;
786 }
787 
788 /* Table routine called to get params string. */
789 static char *
790 dm_target_crypt_table(void *target_config)
791 {
792 	dm_target_crypt_config_t *priv;
793 	char *params;
794 
795 	priv = target_config;
796 
797 	params = dm_alloc_string(DM_MAX_PARAMS_SIZE);
798 
799 	ksnprintf(params, DM_MAX_PARAMS_SIZE, "%s",
800 	    priv->status_str);
801 
802 	return params;
803 }
804 
805 static int
806 dm_target_crypt_destroy(dm_table_entry_t *table_en)
807 {
808 	dm_target_crypt_config_t *priv;
809 
810 	/*
811 	 * Disconnect the crypt config before unbusying the target.
812 	 */
813 	priv = table_en->target_config;
814 	if (priv == NULL)
815 		return 0;
816 	dm_pdev_decr(priv->pdev);
817 
818 	/*
819 	 * Clean up the crypt config
820 	 *
821 	 * Overwrite the private information before freeing memory to
822 	 * avoid leaking it.
823 	 */
824 	if (priv->status_str) {
825 		dmtc_crypto_clear(priv->status_str, strlen(priv->status_str));
826 		kfree(priv->status_str, M_DMCRYPT);
827 		crypto_freesession(priv->crypto_sid);
828 	}
829 
830 	if ((priv->ivgen) && (priv->ivgen->dtor != NULL)) {
831 		priv->ivgen->dtor(priv, priv->ivgen_priv);
832 	}
833 
834 	/* Destroy mpipes */
835 	dmtc_destroy_mpipe(priv);
836 
837 	dmtc_crypto_clear(priv, sizeof(dm_target_crypt_config_t));
838 	kfree(priv, M_DMCRYPT);
839 
840 	return 0;
841 }
842 
843 /************************************************************************
844  *			STRATEGY SUPPORT FUNCTIONS			*
845  ************************************************************************
846  *
847  * READ PATH:	doio -> bio_read_done -> crypto_work -> crypto_cb_read_done
848  * WRITE PATH:	crypto_work -> crypto_cb_write_done -> doio -> bio_write_done
849  */
850 
851 /*
852  * Wrapper around crypto_dispatch() to match dispatch_t type
853  */
854 static void
855 dmtc_crypto_dispatch(void *arg)
856 {
857 	struct cryptop *crp;
858 
859 	crp = (struct cryptop *)arg;
860 	KKASSERT(crp != NULL);
861 	KTR_LOG(dmcrypt_crypto_dispatch, crp);
862 	crypto_dispatch(crp);
863 }
864 
865 /*
866  * Start IO operation, called from dmstrategy routine.
867  */
868 static int
869 dm_target_crypt_strategy(dm_table_entry_t *table_en, struct buf *bp)
870 {
871 	struct bio *bio;
872 
873 	dm_target_crypt_config_t *priv;
874 	priv = table_en->target_config;
875 
876 	/* Get rid of stuff we can't really handle */
877 	if ((bp->b_cmd == BUF_CMD_READ) || (bp->b_cmd == BUF_CMD_WRITE)) {
878 		if (((bp->b_bcount % DEV_BSIZE) != 0) || (bp->b_bcount == 0)) {
879 			kprintf("dm_target_crypt_strategy: can't really "
880 				"handle bp->b_bcount = %d\n",
881 				bp->b_bcount);
882 			bp->b_error = EINVAL;
883 			bp->b_flags |= B_ERROR | B_INVAL;
884 			biodone(&bp->b_bio1);
885 			return 0;
886 		}
887 	}
888 
889 	KTR_LOG(dmcrypt_crypt_strategy, bp->b_cmd, bp);
890 
891 	switch (bp->b_cmd) {
892 	case BUF_CMD_READ:
893 		bio = push_bio(&bp->b_bio1);
894 		bio->bio_offset = bp->b_bio1.bio_offset +
895 				  priv->block_offset * DEV_BSIZE;
896 		bio->bio_caller_info1.ptr = priv;
897 		bio->bio_done = dmtc_bio_read_done;
898 		vn_strategy(priv->pdev->pdev_vnode, bio);
899 		break;
900 	case BUF_CMD_WRITE:
901 		bio = push_bio(&bp->b_bio1);
902 		bio->bio_offset = bp->b_bio1.bio_offset +
903 				  priv->block_offset * DEV_BSIZE;
904 		bio->bio_caller_info1.ptr = priv;
905 		dmtc_crypto_write_start(priv, bio);
906 		break;
907 	default:
908 		vn_strategy(priv->pdev->pdev_vnode, &bp->b_bio1);
909 		break;
910 	}
911 	return 0;
912 }
913 
914 /*
915  * STRATEGY READ PATH PART 1/3 (after read BIO completes)
916  */
917 static void
918 dmtc_bio_read_done(struct bio *bio)
919 {
920 	struct bio *obio;
921 
922 	dm_target_crypt_config_t *priv;
923 
924 	KTR_LOG(dmcrypt_bio_read_done, bio->bio_buf);
925 
926 	/*
927 	 * If a read error occurs we shortcut the operation, otherwise
928 	 * go on to stage 2.
929 	 */
930 	if (bio->bio_buf->b_flags & B_ERROR) {
931 		obio = pop_bio(bio);
932 		biodone(obio);
933 	} else {
934 		priv = bio->bio_caller_info1.ptr;
935 		dmtc_crypto_read_start(priv, bio);
936 	}
937 }
938 
939 /*
940  * STRATEGY READ PATH PART 2/3
941  */
942 static void
943 dmtc_crypto_read_retry(void *arg1, void *arg2)
944 {
945 	dm_target_crypt_config_t *priv = arg1;
946 	struct bio *bio = arg2;
947 
948 	dmtc_crypto_read_start(priv, bio);
949 }
950 
951 static void
952 dmtc_crypto_read_start(dm_target_crypt_config_t *priv, struct bio *bio)
953 {
954 	struct dmtc_helper *dmtc;
955 	struct cryptodesc *crd;
956 	struct cryptop *crp;
957 	int i, bytes, sectors, sz;
958 	off_t isector;
959 	u_char *ptr, *space;
960 
961 	/*
962 	 * Note: b_resid no good after read I/O, it will be 0, use
963 	 *	 b_bcount.
964 	 */
965 	bytes = bio->bio_buf->b_bcount;
966 	isector = bio->bio_offset / DEV_BSIZE;	/* ivgen salt base? */
967 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
968 	sz = sectors * (sizeof(*crp) + sizeof(*crd));
969 
970 	/*
971 	 * For reads with bogus page we can't decrypt in place as stuff
972 	 * can get ripped out from under us.
973 	 *
974 	 * XXX actually it looks like we can, and in any case the initial
975 	 * read already completed and threw crypted data into the buffer
976 	 * cache buffer.  Disable for now.
977 	 */
978 	space = mpipe_alloc_callback(&priv->read_mpipe,
979 				     dmtc_crypto_read_retry, priv, bio);
980 	if (space == NULL)
981 		return;
982 
983 	dmtc = (struct dmtc_helper *)space;
984 	dmtc->free_addr = space;
985 	space += sizeof(struct dmtc_helper);
986 	dmtc->orig_buf = NULL;
987 	dmtc->data_buf = bio->bio_buf->b_data;
988 	dmtc->priv = priv;
989 	bio->bio_caller_info2.ptr = dmtc;
990 	bio->bio_buf->b_error = 0;
991 
992 	/*
993 	 * Load crypto descriptors (crp/crd loop)
994 	 */
995 	bzero(space, sz);
996 	ptr = space;
997 	bio->bio_caller_info3.value = sectors;
998 	cpu_sfence();
999 #if 0
1000 	kprintf("Read, bytes = %d (b_bcount), "
1001 		"sectors = %d (bio = %p, b_cmd = %d)\n",
1002 		bytes, sectors, bio, bio->bio_buf->b_cmd);
1003 #endif
1004 	for (i = 0; i < sectors; i++) {
1005 		crp = (struct cryptop *)ptr;
1006 		ptr += sizeof(*crp);
1007 		crd = (struct cryptodesc *)ptr;
1008 		ptr += sizeof (*crd);
1009 
1010 		crp->crp_buf = dmtc->data_buf + i * DEV_BSIZE;
1011 
1012 		crp->crp_sid = priv->crypto_sid;
1013 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1014 
1015 		crp->crp_opaque = (void *)bio;
1016 
1017 		crp->crp_callback = dmtc_crypto_cb_read_done;
1018 		crp->crp_desc = crd;
1019 		crp->crp_etype = 0;
1020 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1021 				 CRYPTO_F_BATCH;
1022 
1023 		crd->crd_alg = priv->crypto_alg;
1024 #if 0
1025 		crd->crd_key = (caddr_t)priv->crypto_key;
1026 		crd->crd_klen = priv->crypto_klen;
1027 #endif
1028 
1029 		crd->crd_skip = 0;
1030 		crd->crd_len = DEV_BSIZE /* XXX */;
1031 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1032 		crd->crd_next = NULL;
1033 
1034 		crd->crd_flags &= ~CRD_F_ENCRYPT;
1035 
1036 		KTR_LOG(dmcrypt_crypto_read_start, crp, bio->bio_buf, i,
1037 		    sectors);
1038 
1039 		/*
1040 		 * Note: last argument is used to generate salt(?) and is
1041 		 *	 a 64 bit value, but the original code passed an
1042 		 *	 int.  Changing it now will break pre-existing
1043 		 *	 crypt volumes.
1044 		 */
1045 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1046 				    isector + i, crp);
1047 	}
1048 }
1049 
1050 /*
1051  * STRATEGY READ PATH PART 3/3
1052  */
1053 static int
1054 dmtc_crypto_cb_read_done(struct cryptop *crp)
1055 {
1056 	struct dmtc_helper *dmtc;
1057 	struct bio *bio, *obio;
1058 	int n;
1059 
1060 	if (crp->crp_etype == EAGAIN)
1061 		return crypto_dispatch(crp);
1062 
1063 	bio = (struct bio *)crp->crp_opaque;
1064 	KKASSERT(bio != NULL);
1065 
1066 	/*
1067 	 * Cumulative error
1068 	 */
1069 	if (crp->crp_etype) {
1070 		kprintf("dm_target_crypt: dmtc_crypto_cb_read_done "
1071 			"crp_etype = %d\n",
1072 			crp->crp_etype);
1073 		bio->bio_buf->b_error = crp->crp_etype;
1074 	}
1075 
1076 	/*
1077 	 * On the last chunk of the decryption we do any required copybacks
1078 	 * and complete the I/O.
1079 	 */
1080 	n = atomic_fetchadd_int(&bio->bio_caller_info3.value, -1);
1081 #if 0
1082 	kprintf("dmtc_crypto_cb_read_done %p, n = %d\n", bio, n);
1083 #endif
1084 
1085 	KTR_LOG(dmcrypt_crypto_cb_read_done, crp, bio->bio_buf, n);
1086 
1087 	if (n == 1) {
1088 		/*
1089 		 * For the B_HASBOGUS case we didn't decrypt in place,
1090 		 * so we need to copy stuff back into the buf.
1091 		 *
1092 		 * (disabled for now).
1093 		 */
1094 		dmtc = bio->bio_caller_info2.ptr;
1095 		if (bio->bio_buf->b_error) {
1096 			bio->bio_buf->b_flags |= B_ERROR;
1097 		}
1098 #if 0
1099 		else if (bio->bio_buf->b_flags & B_HASBOGUS) {
1100 			memcpy(bio->bio_buf->b_data, dmtc->data_buf,
1101 			       bio->bio_buf->b_bcount);
1102 		}
1103 #endif
1104 		mpipe_free(&dmtc->priv->read_mpipe, dmtc->free_addr);
1105 		obio = pop_bio(bio);
1106 		biodone(obio);
1107 	}
1108 	return 0;
1109 }
1110 /* END OF STRATEGY READ SECTION */
1111 
1112 /*
1113  * STRATEGY WRITE PATH PART 1/3
1114  */
1115 
1116 static void
1117 dmtc_crypto_write_retry(void *arg1, void *arg2)
1118 {
1119 	dm_target_crypt_config_t *priv = arg1;
1120 	struct bio *bio = arg2;
1121 
1122 	KTR_LOG(dmcrypt_crypto_write_retry, bio->bio_buf);
1123 
1124 	dmtc_crypto_write_start(priv, bio);
1125 }
1126 
1127 static void
1128 dmtc_crypto_write_start(dm_target_crypt_config_t *priv, struct bio *bio)
1129 {
1130 	struct dmtc_helper *dmtc;
1131 	struct cryptodesc *crd;
1132 	struct cryptop *crp;
1133 	int i, bytes, sectors, sz;
1134 	off_t isector;
1135 	u_char *ptr, *space;
1136 
1137 	/*
1138 	 * Use b_bcount for consistency
1139 	 */
1140 	bytes = bio->bio_buf->b_bcount;
1141 
1142 	isector = bio->bio_offset / DEV_BSIZE;	/* ivgen salt base? */
1143 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
1144 	sz = sectors * (sizeof(*crp) + sizeof(*crd));
1145 
1146 	/*
1147 	 * For writes and reads with bogus page don't decrypt in place.
1148 	 */
1149 	space = mpipe_alloc_callback(&priv->write_mpipe,
1150 				     dmtc_crypto_write_retry, priv, bio);
1151 	if (space == NULL)
1152 		return;
1153 
1154 	dmtc = (struct dmtc_helper *)space;
1155 	dmtc->free_addr = space;
1156 	space += sizeof(struct dmtc_helper);
1157 	memcpy(space + sz, bio->bio_buf->b_data, bytes);
1158 
1159 	bio->bio_caller_info2.ptr = dmtc;
1160 	bio->bio_buf->b_error = 0;
1161 
1162 	dmtc->orig_buf = bio->bio_buf->b_data;
1163 	dmtc->data_buf = space + sz;
1164 	dmtc->priv = priv;
1165 
1166 	/*
1167 	 * Load crypto descriptors (crp/crd loop)
1168 	 */
1169 	bzero(space, sz);
1170 	ptr = space;
1171 	bio->bio_caller_info3.value = sectors;
1172 	cpu_sfence();
1173 #if 0
1174 	kprintf("Write, bytes = %d (b_bcount), "
1175 		"sectors = %d (bio = %p, b_cmd = %d)\n",
1176 		bytes, sectors, bio, bio->bio_buf->b_cmd);
1177 #endif
1178 	for (i = 0; i < sectors; i++) {
1179 		crp = (struct cryptop *)ptr;
1180 		ptr += sizeof(*crp);
1181 		crd = (struct cryptodesc *)ptr;
1182 		ptr += sizeof (*crd);
1183 
1184 		crp->crp_buf = dmtc->data_buf + i * DEV_BSIZE;
1185 
1186 		crp->crp_sid = priv->crypto_sid;
1187 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1188 
1189 		crp->crp_opaque = (void *)bio;
1190 
1191 		crp->crp_callback = dmtc_crypto_cb_write_done;
1192 		crp->crp_desc = crd;
1193 		crp->crp_etype = 0;
1194 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1195 				 CRYPTO_F_BATCH;
1196 
1197 		crd->crd_alg = priv->crypto_alg;
1198 #if 0
1199 		crd->crd_key = (caddr_t)priv->crypto_key;
1200 		crd->crd_klen = priv->crypto_klen;
1201 #endif
1202 
1203 		crd->crd_skip = 0;
1204 		crd->crd_len = DEV_BSIZE /* XXX */;
1205 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1206 		crd->crd_next = NULL;
1207 
1208 		crd->crd_flags |= CRD_F_ENCRYPT;
1209 
1210 		/*
1211 		 * Note: last argument is used to generate salt(?) and is
1212 		 *	 a 64 bit value, but the original code passed an
1213 		 *	 int.  Changing it now will break pre-existing
1214 		 *	 crypt volumes.
1215 		 */
1216 
1217 		KTR_LOG(dmcrypt_crypto_write_start, crp, bio->bio_buf,
1218 		    i, sectors);
1219 
1220 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1221 				    isector + i, crp);
1222 	}
1223 }
1224 
1225 /*
1226  * STRATEGY WRITE PATH PART 2/3
1227  */
1228 static int
1229 dmtc_crypto_cb_write_done(struct cryptop *crp)
1230 {
1231 	struct dmtc_helper *dmtc;
1232 	dm_target_crypt_config_t *priv;
1233 	struct bio *bio, *obio;
1234 	int n;
1235 
1236 	if (crp->crp_etype == EAGAIN)
1237 		return crypto_dispatch(crp);
1238 
1239 	bio = (struct bio *)crp->crp_opaque;
1240 	KKASSERT(bio != NULL);
1241 
1242 	/*
1243 	 * Cumulative error
1244 	 */
1245 	if (crp->crp_etype != 0) {
1246 		kprintf("dm_target_crypt: dmtc_crypto_cb_write_done "
1247 			"crp_etype = %d\n",
1248 		crp->crp_etype);
1249 		bio->bio_buf->b_error = crp->crp_etype;
1250 	}
1251 
1252 	/*
1253 	 * On the last chunk of the encryption we issue the write
1254 	 */
1255 	n = atomic_fetchadd_int(&bio->bio_caller_info3.value, -1);
1256 #if 0
1257 	kprintf("dmtc_crypto_cb_write_done %p, n = %d\n", bio, n);
1258 #endif
1259 
1260 	KTR_LOG(dmcrypt_crypto_cb_write_done, crp, bio->bio_buf, n);
1261 
1262 	if (n == 1) {
1263 		dmtc = bio->bio_caller_info2.ptr;
1264 		priv = (dm_target_crypt_config_t *)bio->bio_caller_info1.ptr;
1265 
1266 		if (bio->bio_buf->b_error) {
1267 			bio->bio_buf->b_flags |= B_ERROR;
1268 			mpipe_free(&dmtc->priv->write_mpipe, dmtc->free_addr);
1269 			obio = pop_bio(bio);
1270 			biodone(obio);
1271 		} else {
1272 			dmtc->orig_buf = bio->bio_buf->b_data;
1273 			bio->bio_buf->b_data = dmtc->data_buf;
1274 			bio->bio_done = dmtc_bio_write_done;
1275 			vn_strategy(priv->pdev->pdev_vnode, bio);
1276 		}
1277 	}
1278 	return 0;
1279 }
1280 
1281 /*
1282  * STRATEGY WRITE PATH PART 3/3
1283  */
1284 static void
1285 dmtc_bio_write_done(struct bio *bio)
1286 {
1287 	struct dmtc_helper *dmtc;
1288 	struct bio *obio;
1289 
1290 	dmtc = bio->bio_caller_info2.ptr;
1291 	bio->bio_buf->b_data = dmtc->orig_buf;
1292 	mpipe_free(&dmtc->priv->write_mpipe, dmtc->free_addr);
1293 
1294 	KTR_LOG(dmcrypt_bio_write_done, bio->bio_buf);
1295 
1296 	obio = pop_bio(bio);
1297 	biodone(obio);
1298 }
1299 /* END OF STRATEGY WRITE SECTION */
1300 
1301 
1302 
1303 /* DUMPING MAGIC */
1304 
1305 extern int tsleep_crypto_dump;
1306 
1307 static int
1308 dm_target_crypt_dump(dm_table_entry_t *table_en, void *data, size_t length, off_t offset)
1309 {
1310 	static struct dmtc_dump_helper dump_helper;
1311 	dm_target_crypt_config_t *priv;
1312 	int id;
1313 	static int first_call = 1;
1314 
1315 	priv = table_en->target_config;
1316 
1317 	if (first_call) {
1318 		first_call = 0;
1319 		dump_reactivate_cpus();
1320 	}
1321 
1322 	/* Magically enable tsleep */
1323 	tsleep_crypto_dump = 1;
1324 	id = 0;
1325 
1326 	/*
1327 	 * 0 length means flush buffers and return
1328 	 */
1329 	if (length == 0) {
1330 		if (priv->pdev->pdev_vnode->v_rdev == NULL) {
1331 			tsleep_crypto_dump = 0;
1332 			return ENXIO;
1333 		}
1334 		dev_ddump(priv->pdev->pdev_vnode->v_rdev,
1335 		    data, 0, offset, 0);
1336 		tsleep_crypto_dump = 0;
1337 		return 0;
1338 	}
1339 
1340 	bzero(&dump_helper, sizeof(dump_helper));
1341 	dump_helper.priv = priv;
1342 	dump_helper.data = data;
1343 	dump_helper.length = length;
1344 	dump_helper.offset = offset +
1345 	    priv->block_offset * DEV_BSIZE;
1346 	dump_helper.ident = &id;
1347 	dmtc_crypto_dump_start(priv, &dump_helper);
1348 
1349 	/*
1350 	 * Hackery to make stuff appear synchronous. The crypto callback will
1351 	 * set id to 1 and call wakeup on it. If the request completed
1352 	 * synchronously, id will be 1 and we won't bother to sleep. If not,
1353 	 * the crypto request will complete asynchronously and we sleep until
1354 	 * it's done.
1355 	 */
1356 	if (id == 0)
1357 		tsleep(&dump_helper, 0, "cryptdump", 0);
1358 
1359 	dump_helper.offset = dm_pdev_correct_dump_offset(priv->pdev,
1360 	    dump_helper.offset);
1361 
1362 	dev_ddump(priv->pdev->pdev_vnode->v_rdev,
1363 	    dump_helper.space, 0, dump_helper.offset,
1364 	    dump_helper.length);
1365 
1366 	tsleep_crypto_dump = 0;
1367 	return 0;
1368 }
1369 
1370 static void
1371 dmtc_crypto_dump_start(dm_target_crypt_config_t *priv, struct dmtc_dump_helper *dump_helper)
1372 {
1373 	struct cryptodesc *crd;
1374 	struct cryptop *crp;
1375 	int i, bytes, sectors;
1376 	off_t isector;
1377 
1378 	bytes = dump_helper->length;
1379 
1380 	isector = dump_helper->offset / DEV_BSIZE;	/* ivgen salt base? */
1381 	sectors = bytes / DEV_BSIZE;		/* Number of sectors */
1382 	dump_helper->sectors = sectors;
1383 #if 0
1384 	kprintf("Dump, bytes = %d, "
1385 		"sectors = %d, LENGTH=%zu\n", bytes, sectors, dump_helper->length);
1386 #endif
1387 	KKASSERT(dump_helper->length <= 65536);
1388 
1389 	memcpy(dump_helper->space, dump_helper->data, bytes);
1390 
1391 	cpu_sfence();
1392 
1393 	for (i = 0; i < sectors; i++) {
1394 		crp = &dump_helper->crp[i];
1395 		crd = &dump_helper->crd[i];
1396 
1397 		crp->crp_buf = dump_helper->space + i * DEV_BSIZE;
1398 
1399 		crp->crp_sid = priv->crypto_sid;
1400 		crp->crp_ilen = crp->crp_olen = DEV_BSIZE;
1401 
1402 		crp->crp_opaque = (void *)dump_helper;
1403 
1404 		crp->crp_callback = dmtc_crypto_cb_dump_done;
1405 		crp->crp_desc = crd;
1406 		crp->crp_etype = 0;
1407 		crp->crp_flags = CRYPTO_F_CBIFSYNC | CRYPTO_F_REL |
1408 				 CRYPTO_F_BATCH;
1409 
1410 		crd->crd_alg = priv->crypto_alg;
1411 
1412 		crd->crd_skip = 0;
1413 		crd->crd_len = DEV_BSIZE /* XXX */;
1414 		crd->crd_flags = CRD_F_IV_EXPLICIT | CRD_F_IV_PRESENT;
1415 		crd->crd_next = NULL;
1416 
1417 		crd->crd_flags |= CRD_F_ENCRYPT;
1418 
1419 		/*
1420 		 * Note: last argument is used to generate salt(?) and is
1421 		 *	 a 64 bit value, but the original code passed an
1422 		 *	 int.  Changing it now will break pre-existing
1423 		 *	 crypt volumes.
1424 		 */
1425 		priv->ivgen->gen_iv(priv, crd->crd_iv, sizeof(crd->crd_iv),
1426 				    isector + i, crp);
1427 	}
1428 }
1429 
1430 static int
1431 dmtc_crypto_cb_dump_done(struct cryptop *crp)
1432 {
1433 	struct dmtc_dump_helper *dump_helper;
1434 	int n;
1435 
1436 	if (crp->crp_etype == EAGAIN)
1437 		return crypto_dispatch(crp);
1438 
1439 	dump_helper = (struct dmtc_dump_helper *)crp->crp_opaque;
1440 	KKASSERT(dump_helper != NULL);
1441 
1442 	if (crp->crp_etype != 0) {
1443 		kprintf("dm_target_crypt: dmtc_crypto_cb_dump_done "
1444 			"crp_etype = %d\n",
1445 		crp->crp_etype);
1446 		return crp->crp_etype;
1447 	}
1448 
1449 	/*
1450 	 * On the last chunk of the encryption we return control
1451 	 */
1452 	n = atomic_fetchadd_int(&dump_helper->sectors, -1);
1453 
1454 	if (n == 1) {
1455 		atomic_add_int(dump_helper->ident, 1);
1456 		wakeup(dump_helper);
1457 	}
1458 
1459 	return 0;
1460 }
1461 
1462 static int
1463 dmtc_mod_handler(module_t mod, int type, void *unused)
1464 {
1465 	dm_target_t *dmt = NULL;
1466 	int err = 0;
1467 
1468 	switch (type) {
1469 	case MOD_LOAD:
1470 		if ((dmt = dm_target_lookup("crypt")) != NULL) {
1471 			dm_target_unbusy(dmt);
1472 			return EEXIST;
1473 		}
1474 		dmt = dm_target_alloc("crypt");
1475 		dmt->version[0] = 1;
1476 		dmt->version[1] = 6;
1477 		dmt->version[2] = 0;
1478 		dmt->init = &dm_target_crypt_init;
1479 		dmt->destroy = &dm_target_crypt_destroy;
1480 		dmt->strategy = &dm_target_crypt_strategy;
1481 		dmt->table = &dm_target_crypt_table;
1482 		dmt->dump = &dm_target_crypt_dump;
1483 
1484 		err = dm_target_insert(dmt);
1485 		if (!err)
1486 			kprintf("dm_target_crypt: Successfully initialized\n");
1487 		break;
1488 
1489 	case MOD_UNLOAD:
1490 		err = dm_target_remove("crypt");
1491 		if (err == 0) {
1492 			kprintf("dm_target_crypt: unloaded\n");
1493 		}
1494 		break;
1495 	}
1496 
1497 	return err;
1498 }
1499 
1500 DM_TARGET_MODULE(dm_target_crypt, dmtc_mod_handler);
1501