1/* $OpenBSD: softraid_crypto.c,v 1.91 2013/03/31 15:44:52 jsing Exp $ */
2/*
3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include "bio.h"
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/buf.h>
26#include <sys/device.h>
27#include <sys/ioctl.h>
28#include <sys/proc.h>
29#include <sys/malloc.h>
30#include <sys/pool.h>
31#include <sys/kernel.h>
32#include <sys/disk.h>
33#include <sys/rwlock.h>
34#include <sys/queue.h>
35#include <sys/fcntl.h>
36#include <sys/disklabel.h>
37#include <sys/mount.h>
38#include <sys/sensors.h>
39#include <sys/stat.h>
40#include <sys/conf.h>
41#include <sys/uio.h>
42#include <sys/dkio.h>
43
44#include <crypto/cryptodev.h>
45#include <crypto/cryptosoft.h>
46#include <crypto/rijndael.h>
47#include <crypto/md5.h>
48#include <crypto/sha1.h>
49#include <crypto/sha2.h>
50#include <crypto/hmac.h>
51
52#include <scsi/scsi_all.h>
53#include <scsi/scsiconf.h>
54#include <scsi/scsi_disk.h>
55
56#include <dev/softraidvar.h>
57#include <dev/rndvar.h>
58
59/*
60 * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
61 * to start failing when memory pressure kicks in. We can store this in the WU
62 * because we assert that only one ccb per WU will ever be active.
63 */
64struct sr_crypto_wu {
65	TAILQ_ENTRY(sr_crypto_wu)	 cr_link;
66	struct uio			 cr_uio;
67	struct iovec			 cr_iov;
68	struct cryptop	 		*cr_crp;
69	struct cryptodesc		*cr_descs;
70	struct sr_workunit		*cr_wu;
71	void				*cr_dmabuf;
72};
73
74
75struct sr_crypto_wu *sr_crypto_wu_get(struct sr_workunit *, int);
76void		sr_crypto_wu_put(struct sr_crypto_wu *);
77int		sr_crypto_create_keys(struct sr_discipline *);
78int		sr_crypto_get_kdf(struct bioc_createraid *,
79		    struct sr_discipline *);
80int		sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
81int		sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
82int		sr_crypto_decrypt_key(struct sr_discipline *);
83int		sr_crypto_change_maskkey(struct sr_discipline *,
84		    struct sr_crypto_kdfinfo *, struct sr_crypto_kdfinfo *);
85int		sr_crypto_create(struct sr_discipline *,
86		    struct bioc_createraid *, int, int64_t);
87int		sr_crypto_assemble(struct sr_discipline *,
88		    struct bioc_createraid *, int, void *);
89int		sr_crypto_alloc_resources(struct sr_discipline *);
90void		sr_crypto_free_resources(struct sr_discipline *);
91int		sr_crypto_ioctl(struct sr_discipline *,
92		    struct bioc_discipline *);
93int		sr_crypto_meta_opt_handler(struct sr_discipline *,
94		    struct sr_meta_opt_hdr *);
95int		sr_crypto_write(struct cryptop *);
96int		sr_crypto_rw(struct sr_workunit *);
97int		sr_crypto_rw2(struct sr_workunit *, struct sr_crypto_wu *);
98void		sr_crypto_done(struct sr_workunit *);
99int		sr_crypto_read(struct cryptop *);
100void		sr_crypto_finish_io(struct sr_workunit *);
101void		sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
102		   u_int8_t *, int, u_char *);
103void		sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
104
105#ifdef SR_DEBUG0
106void		 sr_crypto_dumpkeys(struct sr_discipline *);
107#endif
108
109/* Discipline initialisation. */
110void
111sr_crypto_discipline_init(struct sr_discipline *sd)
112{
113	int i;
114
115	/* Fill out discipline members. */
116	sd->sd_type = SR_MD_CRYPTO;
117	strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
118	sd->sd_capabilities = SR_CAP_SYSTEM_DISK | SR_CAP_AUTO_ASSEMBLE;
119	sd->sd_max_wu = SR_CRYPTO_NOWU;
120
121	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
122		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
123
124	/* Setup discipline specific function pointers. */
125	sd->sd_alloc_resources = sr_crypto_alloc_resources;
126	sd->sd_assemble = sr_crypto_assemble;
127	sd->sd_create = sr_crypto_create;
128	sd->sd_free_resources = sr_crypto_free_resources;
129	sd->sd_ioctl_handler = sr_crypto_ioctl;
130	sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
131	sd->sd_scsi_rw = sr_crypto_rw;
132	sd->sd_scsi_done = sr_crypto_done;
133}
134
135int
136sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
137    int no_chunk, int64_t coerced_size)
138{
139	struct sr_meta_opt_item	*omi;
140	int			rv = EINVAL;
141
142	if (no_chunk != 1) {
143		sr_error(sd->sd_sc, "%s requires exactly one chunk",
144		    sd->sd_name);
145		goto done;
146        }
147
148	/* Create crypto optional metadata. */
149	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
150	    M_WAITOK | M_ZERO);
151	omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF,
152	    M_WAITOK | M_ZERO);
153	omi->omi_som->som_type = SR_OPT_CRYPTO;
154	omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
155	SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link);
156	sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)omi->omi_som;
157	sd->sd_meta->ssdi.ssd_opt_no++;
158
159	sd->mds.mdd_crypto.key_disk = NULL;
160
161	if (bc->bc_key_disk != NODEV) {
162
163		/* Create a key disk. */
164		if (sr_crypto_get_kdf(bc, sd))
165			goto done;
166		sd->mds.mdd_crypto.key_disk =
167		    sr_crypto_create_key_disk(sd, bc->bc_key_disk);
168		if (sd->mds.mdd_crypto.key_disk == NULL)
169			goto done;
170		sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE;
171
172	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
173
174		/* No hint available yet. */
175		bc->bc_opaque_status = BIOC_SOINOUT_FAILED;
176		rv = EAGAIN;
177		goto done;
178
179	} else if (sr_crypto_get_kdf(bc, sd))
180		goto done;
181
182	/* Passphrase volumes cannot be automatically assembled. */
183	if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE) && bc->bc_key_disk == NODEV)
184		goto done;
185
186	sd->sd_meta->ssdi.ssd_size = coerced_size;
187
188	sr_crypto_create_keys(sd);
189
190	sd->sd_max_ccb_per_wu = no_chunk;
191
192	rv = 0;
193done:
194	return (rv);
195}
196
197int
198sr_crypto_assemble(struct sr_discipline *sd, struct bioc_createraid *bc,
199    int no_chunk, void *data)
200{
201	int	rv = EINVAL;
202
203	sd->mds.mdd_crypto.key_disk = NULL;
204
205	/* Crypto optional metadata must already exist... */
206	if (sd->mds.mdd_crypto.scr_meta == NULL)
207		goto done;
208
209	if (data != NULL) {
210		/* Kernel already has mask key. */
211		bcopy(data, sd->mds.mdd_crypto.scr_maskkey,
212		    sizeof(sd->mds.mdd_crypto.scr_maskkey));
213	} else if (bc->bc_key_disk != NODEV) {
214		/* Read the mask key from the key disk. */
215		sd->mds.mdd_crypto.key_disk =
216		    sr_crypto_read_key_disk(sd, bc->bc_key_disk);
217		if (sd->mds.mdd_crypto.key_disk == NULL)
218			goto done;
219	} else if (bc->bc_opaque_flags & BIOC_SOOUT) {
220		/* provide userland with kdf hint */
221		if (bc->bc_opaque == NULL)
222			goto done;
223
224		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
225		    bc->bc_opaque_size)
226			goto done;
227
228		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
229		    bc->bc_opaque, bc->bc_opaque_size))
230			goto done;
231
232		/* we're done */
233		bc->bc_opaque_status = BIOC_SOINOUT_OK;
234		rv = EAGAIN;
235		goto done;
236	} else if (bc->bc_opaque_flags & BIOC_SOIN) {
237		/* get kdf with maskkey from userland */
238		if (sr_crypto_get_kdf(bc, sd))
239			goto done;
240	} else
241		goto done;
242
243	sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi.ssd_chunk_no;
244
245	rv = 0;
246done:
247	return (rv);
248}
249
250struct sr_crypto_wu *
251sr_crypto_wu_get(struct sr_workunit *wu, int encrypt)
252{
253	struct scsi_xfer	*xs = wu->swu_xs;
254	struct sr_discipline	*sd = wu->swu_dis;
255	struct sr_crypto_wu	*crwu;
256	struct cryptodesc	*crd;
257	int			flags, i, n;
258	daddr64_t		blk = 0;
259	u_int			keyndx;
260
261	DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_get wu: %p encrypt: %d\n",
262	    DEVNAME(sd->sd_sc), wu, encrypt);
263
264	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
265	if ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL)
266		TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
267	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
268	if (crwu == NULL)
269		panic("sr_crypto_wu_get: out of wus");
270
271	crwu->cr_uio.uio_iovcnt = 1;
272	crwu->cr_uio.uio_iov->iov_len = xs->datalen;
273	if (xs->flags & SCSI_DATA_OUT) {
274		crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
275		bcopy(xs->data, crwu->cr_uio.uio_iov->iov_base, xs->datalen);
276	} else
277		crwu->cr_uio.uio_iov->iov_base = xs->data;
278
279	if (xs->cmdlen == 10)
280		blk = _4btol(((struct scsi_rw_big *)xs->cmd)->addr);
281	else if (xs->cmdlen == 16)
282		blk = _8btol(((struct scsi_rw_16 *)xs->cmd)->addr);
283	else if (xs->cmdlen == 6)
284		blk = _3btol(((struct scsi_rw *)xs->cmd)->addr);
285
286	n = xs->datalen >> DEV_BSHIFT;
287
288	/*
289	 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
290	 * Since there may be less than that we need to tweak the linked list
291	 * of crypto desc structures to be just long enough for our needs.
292	 */
293	crd = crwu->cr_descs;
294	for (i = 0; i < ((MAXPHYS >> DEV_BSHIFT) - n); i++) {
295		crd = crd->crd_next;
296		KASSERT(crd);
297	}
298	crwu->cr_crp->crp_desc = crd;
299	flags = (encrypt ? CRD_F_ENCRYPT : 0) |
300	    CRD_F_IV_PRESENT | CRD_F_IV_EXPLICIT;
301
302	/* Select crypto session based on block number */
303	keyndx = blk >> SR_CRYPTO_KEY_BLKSHIFT;
304	if (keyndx >= SR_CRYPTO_MAXKEYS)
305		goto unwind;
306	crwu->cr_crp->crp_sid = sd->mds.mdd_crypto.scr_sid[keyndx];
307	if (crwu->cr_crp->crp_sid == (u_int64_t)-1)
308		goto unwind;
309
310	crwu->cr_crp->crp_ilen = xs->datalen;
311	crwu->cr_crp->crp_alloctype = M_DEVBUF;
312	crwu->cr_crp->crp_buf = &crwu->cr_uio;
313	for (i = 0, crd = crwu->cr_crp->crp_desc; crd;
314	    i++, blk++, crd = crd->crd_next) {
315		crd->crd_skip = i << DEV_BSHIFT;
316		crd->crd_len = DEV_BSIZE;
317		crd->crd_inject = 0;
318		crd->crd_flags = flags;
319		crd->crd_alg = CRYPTO_AES_XTS;
320
321		switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
322		case SR_CRYPTOA_AES_XTS_128:
323			crd->crd_klen = 256;
324			break;
325		case SR_CRYPTOA_AES_XTS_256:
326			crd->crd_klen = 512;
327			break;
328		default:
329			goto unwind;
330		}
331		crd->crd_key = sd->mds.mdd_crypto.scr_key[0];
332		bcopy(&blk, crd->crd_iv, sizeof(blk));
333	}
334	crwu->cr_wu = wu;
335	crwu->cr_crp->crp_opaque = crwu;
336
337	return (crwu);
338
339unwind:
340	/* steal the descriptors back from the cryptop */
341	crwu->cr_crp->crp_desc = NULL;
342
343	return (NULL);
344}
345
346void
347sr_crypto_wu_put(struct sr_crypto_wu *crwu)
348{
349	struct cryptop		*crp = crwu->cr_crp;
350	struct sr_workunit	*wu = crwu->cr_wu;
351	struct sr_discipline	*sd = wu->swu_dis;
352
353	DNPRINTF(SR_D_DIS, "%s: sr_crypto_wu_put crwu: %p\n",
354	    DEVNAME(wu->swu_dis->sd_sc), crwu);
355
356	/* steal the descriptors back from the cryptop */
357	crp->crp_desc = NULL;
358
359	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
360	TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
361	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
362}
363
364int
365sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd)
366{
367	int			rv = EINVAL;
368	struct sr_crypto_kdfinfo *kdfinfo;
369
370	if (!(bc->bc_opaque_flags & BIOC_SOIN))
371		return (rv);
372	if (bc->bc_opaque == NULL)
373		return (rv);
374	if (bc->bc_opaque_size != sizeof(*kdfinfo))
375		return (rv);
376
377	kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF, M_WAITOK | M_ZERO);
378	if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
379		goto out;
380
381	if (kdfinfo->len != bc->bc_opaque_size)
382		goto out;
383
384	/* copy KDF hint to disk meta data */
385	if (kdfinfo->flags & SR_CRYPTOKDF_HINT) {
386		if (sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint) <
387		    kdfinfo->genkdf.len)
388			goto out;
389		bcopy(&kdfinfo->genkdf,
390		    sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
391		    kdfinfo->genkdf.len);
392	}
393
394	/* copy mask key to run-time meta data */
395	if ((kdfinfo->flags & SR_CRYPTOKDF_KEY)) {
396		if (sizeof(sd->mds.mdd_crypto.scr_maskkey) <
397		    sizeof(kdfinfo->maskkey))
398			goto out;
399		bcopy(&kdfinfo->maskkey, sd->mds.mdd_crypto.scr_maskkey,
400		    sizeof(kdfinfo->maskkey));
401	}
402
403	bc->bc_opaque_status = BIOC_SOINOUT_OK;
404	rv = 0;
405out:
406	explicit_bzero(kdfinfo, bc->bc_opaque_size);
407	free(kdfinfo, M_DEVBUF);
408
409	return (rv);
410}
411
412int
413sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
414{
415	rijndael_ctx		ctx;
416	int			i, rv = 1;
417
418	switch (alg) {
419	case SR_CRYPTOM_AES_ECB_256:
420		if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
421			goto out;
422		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
423			rijndael_encrypt(&ctx, &p[i], &c[i]);
424		rv = 0;
425		break;
426	default:
427		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
428		    "softraid", alg);
429		rv = -1;
430		goto out;
431	}
432
433out:
434	explicit_bzero(&ctx, sizeof(ctx));
435	return (rv);
436}
437
438int
439sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
440{
441	rijndael_ctx		ctx;
442	int			i, rv = 1;
443
444	switch (alg) {
445	case SR_CRYPTOM_AES_ECB_256:
446		if (rijndael_set_key(&ctx, key, 256) != 0)
447			goto out;
448		for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN)
449			rijndael_decrypt(&ctx, &c[i], &p[i]);
450		rv = 0;
451		break;
452	default:
453		DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %u\n",
454		    "softraid", alg);
455		rv = -1;
456		goto out;
457	}
458
459out:
460	explicit_bzero(&ctx, sizeof(ctx));
461	return (rv);
462}
463
464void
465sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
466    u_int8_t *key, int key_size, u_char *check_digest)
467{
468	u_char			check_key[SHA1_DIGEST_LENGTH];
469	HMAC_SHA1_CTX		hmacctx;
470	SHA1_CTX		shactx;
471
472	bzero(check_key, sizeof(check_key));
473	bzero(&hmacctx, sizeof(hmacctx));
474	bzero(&shactx, sizeof(shactx));
475
476	/* k = SHA1(mask_key) */
477	SHA1Init(&shactx);
478	SHA1Update(&shactx, maskkey, maskkey_size);
479	SHA1Final(check_key, &shactx);
480
481	/* mac = HMAC_SHA1_k(unencrypted key) */
482	HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
483	HMAC_SHA1_Update(&hmacctx, key, key_size);
484	HMAC_SHA1_Final(check_digest, &hmacctx);
485
486	explicit_bzero(check_key, sizeof(check_key));
487	explicit_bzero(&hmacctx, sizeof(hmacctx));
488	explicit_bzero(&shactx, sizeof(shactx));
489}
490
491int
492sr_crypto_decrypt_key(struct sr_discipline *sd)
493{
494	u_char			check_digest[SHA1_DIGEST_LENGTH];
495	int			rv = 1;
496
497	DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
498
499	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
500		goto out;
501
502	if (sr_crypto_decrypt((u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
503	    (u_char *)sd->mds.mdd_crypto.scr_key,
504	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
505	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
506		goto out;
507
508#ifdef SR_DEBUG0
509	sr_crypto_dumpkeys(sd);
510#endif
511
512	/* Check that the key decrypted properly. */
513	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
514	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
515	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
516	    sizeof(sd->mds.mdd_crypto.scr_key),
517	    check_digest);
518	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
519	    check_digest, sizeof(check_digest)) != 0) {
520		explicit_bzero(sd->mds.mdd_crypto.scr_key,
521		    sizeof(sd->mds.mdd_crypto.scr_key));
522		goto out;
523	}
524
525	rv = 0; /* Success */
526out:
527	/* we don't need the mask key anymore */
528	explicit_bzero(&sd->mds.mdd_crypto.scr_maskkey,
529	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
530
531	explicit_bzero(check_digest, sizeof(check_digest));
532
533	return rv;
534}
535
536int
537sr_crypto_create_keys(struct sr_discipline *sd)
538{
539
540	DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
541	    DEVNAME(sd->sd_sc));
542
543	if (AES_MAXKEYBYTES < sizeof(sd->mds.mdd_crypto.scr_maskkey))
544		return (1);
545
546	/* XXX allow user to specify */
547	sd->mds.mdd_crypto.scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_256;
548
549	/* generate crypto keys */
550	arc4random_buf(sd->mds.mdd_crypto.scr_key,
551	    sizeof(sd->mds.mdd_crypto.scr_key));
552
553	/* Mask the disk keys. */
554	sd->mds.mdd_crypto.scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_256;
555	sr_crypto_encrypt((u_char *)sd->mds.mdd_crypto.scr_key,
556	    (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key,
557	    sd->mds.mdd_crypto.scr_maskkey, sizeof(sd->mds.mdd_crypto.scr_key),
558	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg);
559
560	/* Prepare key decryption check code. */
561	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
562	sr_crypto_calculate_check_hmac_sha1(sd->mds.mdd_crypto.scr_maskkey,
563	    sizeof(sd->mds.mdd_crypto.scr_maskkey),
564	    (u_int8_t *)sd->mds.mdd_crypto.scr_key,
565	    sizeof(sd->mds.mdd_crypto.scr_key),
566	    sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac);
567
568	/* Erase the plaintext disk keys */
569	explicit_bzero(sd->mds.mdd_crypto.scr_key,
570	    sizeof(sd->mds.mdd_crypto.scr_key));
571
572#ifdef SR_DEBUG0
573	sr_crypto_dumpkeys(sd);
574#endif
575
576	sd->mds.mdd_crypto.scr_meta->scm_flags = SR_CRYPTOF_KEY |
577	    SR_CRYPTOF_KDFHINT;
578
579	return (0);
580}
581
582int
583sr_crypto_change_maskkey(struct sr_discipline *sd,
584  struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
585{
586	u_char			check_digest[SHA1_DIGEST_LENGTH];
587	u_char			*c, *p = NULL;
588	size_t			ksz;
589	int			rv = 1;
590
591	DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
592	    DEVNAME(sd->sd_sc));
593
594	if (sd->mds.mdd_crypto.scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA1)
595		goto out;
596
597	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
598	ksz = sizeof(sd->mds.mdd_crypto.scr_key);
599	p = malloc(ksz, M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
600	if (p == NULL)
601		goto out;
602
603	if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
604	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
605		goto out;
606
607#ifdef SR_DEBUG0
608	sr_crypto_dumpkeys(sd);
609#endif
610
611	sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
612	    sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
613	if (memcmp(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
614	    check_digest, sizeof(check_digest)) != 0) {
615		sr_error(sd->sd_sc, "incorrect key or passphrase");
616		rv = EPERM;
617		goto out;
618	}
619
620	/* Mask the disk keys. */
621	c = (u_char *)sd->mds.mdd_crypto.scr_meta->scm_key;
622	if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
623	    sd->mds.mdd_crypto.scr_meta->scm_mask_alg) == -1)
624		goto out;
625
626	/* Prepare key decryption check code. */
627	sd->mds.mdd_crypto.scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA1;
628	sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
629	    sizeof(kdfinfo2->maskkey), (u_int8_t *)sd->mds.mdd_crypto.scr_key,
630	    sizeof(sd->mds.mdd_crypto.scr_key), check_digest);
631
632	/* Copy new encrypted key and HMAC to metadata. */
633	bcopy(check_digest, sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac,
634	    sizeof(sd->mds.mdd_crypto.scr_meta->chk_hmac_sha1.sch_mac));
635
636	rv = 0; /* Success */
637
638out:
639	if (p) {
640		explicit_bzero(p, ksz);
641		free(p, M_DEVBUF);
642	}
643
644	explicit_bzero(check_digest, sizeof(check_digest));
645	explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
646	explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
647
648	return (rv);
649}
650
651struct sr_chunk *
652sr_crypto_create_key_disk(struct sr_discipline *sd, dev_t dev)
653{
654	struct sr_softc		*sc = sd->sd_sc;
655	struct sr_discipline	*fakesd = NULL;
656	struct sr_metadata	*sm = NULL;
657	struct sr_meta_chunk    *km;
658	struct sr_meta_opt_item *omi = NULL;
659	struct sr_meta_keydisk	*skm;
660	struct sr_chunk		*key_disk = NULL;
661	struct disklabel	label;
662	struct vnode		*vn;
663	char			devname[32];
664	int			c, part, open = 0;
665
666	/*
667	 * Create a metadata structure on the key disk and store
668	 * keying material in the optional metadata.
669	 */
670
671	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
672
673	/* Make sure chunk is not already in use. */
674	c = sr_chunk_in_use(sc, dev);
675	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
676		sr_error(sc, "%s is already in use", devname);
677		goto done;
678	}
679
680	/* Open device. */
681	if (bdevvp(dev, &vn)) {
682		sr_error(sc, "cannot open key disk %s", devname);
683		goto done;
684	}
685	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
686		DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
687		    "open %s\n", DEVNAME(sc), devname);
688		vput(vn);
689		goto fail;
690	}
691	open = 1; /* close dev on error */
692
693	/* Get partition details. */
694	part = DISKPART(dev);
695	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label,
696	    FREAD, NOCRED, curproc)) {
697		DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
698		    "failed\n", DEVNAME(sc));
699		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
700		vput(vn);
701		goto fail;
702	}
703	if (label.d_secsize != DEV_BSIZE) {
704		sr_error(sc, "%s has unsupported sector size (%d)",
705		    devname, label.d_secsize);
706		goto fail;
707	}
708	if (label.d_partitions[part].p_fstype != FS_RAID) {
709		sr_error(sc, "%s partition not of type RAID (%d)\n",
710		    devname, label.d_partitions[part].p_fstype);
711		goto fail;
712	}
713
714	/*
715	 * Create and populate chunk metadata.
716	 */
717
718	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
719	km = &key_disk->src_meta;
720
721	key_disk->src_dev_mm = dev;
722	key_disk->src_vn = vn;
723	strlcpy(key_disk->src_devname, devname, sizeof(km->scmi.scm_devname));
724	key_disk->src_size = 0;
725
726	km->scmi.scm_volid = sd->sd_meta->ssdi.ssd_level;
727	km->scmi.scm_chunk_id = 0;
728	km->scmi.scm_size = 0;
729	km->scmi.scm_coerced_size = 0;
730	strlcpy(km->scmi.scm_devname, devname, sizeof(km->scmi.scm_devname));
731	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &km->scmi.scm_uuid,
732	    sizeof(struct sr_uuid));
733
734	sr_checksum(sc, km, &km->scm_checksum,
735	    sizeof(struct sr_meta_chunk_invariant));
736
737	km->scm_status = BIOC_SDONLINE;
738
739	/*
740	 * Create and populate our own discipline and metadata.
741	 */
742
743	sm = malloc(sizeof(struct sr_metadata), M_DEVBUF, M_WAITOK | M_ZERO);
744	sm->ssdi.ssd_magic = SR_MAGIC;
745	sm->ssdi.ssd_version = SR_META_VERSION;
746	sm->ssd_ondisk = 0;
747	sm->ssdi.ssd_vol_flags = 0;
748	bcopy(&sd->sd_meta->ssdi.ssd_uuid, &sm->ssdi.ssd_uuid,
749	    sizeof(struct sr_uuid));
750	sm->ssdi.ssd_chunk_no = 1;
751	sm->ssdi.ssd_volid = SR_KEYDISK_VOLID;
752	sm->ssdi.ssd_level = SR_KEYDISK_LEVEL;
753	sm->ssdi.ssd_size = 0;
754	strlcpy(sm->ssdi.ssd_vendor, "OPENBSD", sizeof(sm->ssdi.ssd_vendor));
755	snprintf(sm->ssdi.ssd_product, sizeof(sm->ssdi.ssd_product),
756	    "SR %s", "KEYDISK");
757	snprintf(sm->ssdi.ssd_revision, sizeof(sm->ssdi.ssd_revision),
758	    "%03d", SR_META_VERSION);
759
760	fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF,
761	    M_WAITOK | M_ZERO);
762	fakesd->sd_sc = sd->sd_sc;
763	fakesd->sd_meta = sm;
764	fakesd->sd_meta_type = SR_META_F_NATIVE;
765	fakesd->sd_vol_status = BIOC_SVONLINE;
766	strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
767	SLIST_INIT(&fakesd->sd_meta_opt);
768
769	/* Add chunk to volume. */
770	fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF,
771	    M_WAITOK | M_ZERO);
772	fakesd->sd_vol.sv_chunks[0] = key_disk;
773	SLIST_INIT(&fakesd->sd_vol.sv_chunk_list);
774	SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link);
775
776	/* Generate mask key. */
777	arc4random_buf(sd->mds.mdd_crypto.scr_maskkey,
778	    sizeof(sd->mds.mdd_crypto.scr_maskkey));
779
780	/* Copy mask key to optional metadata area. */
781	omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF,
782	    M_WAITOK | M_ZERO);
783	omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF,
784	    M_WAITOK | M_ZERO);
785	omi->omi_som->som_type = SR_OPT_KEYDISK;
786	omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
787	skm = (struct sr_meta_keydisk *)omi->omi_som;
788	bcopy(sd->mds.mdd_crypto.scr_maskkey, &skm->skm_maskkey,
789	    sizeof(skm->skm_maskkey));
790	SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link);
791	fakesd->sd_meta->ssdi.ssd_opt_no++;
792
793	/* Save metadata. */
794	if (sr_meta_save(fakesd, SR_META_DIRTY)) {
795		sr_error(sc, "could not save metadata to %s", devname);
796		goto fail;
797	}
798
799	goto done;
800
801fail:
802	if (key_disk)
803		free(key_disk, M_DEVBUF);
804	key_disk = NULL;
805
806done:
807	if (omi)
808		free(omi, M_DEVBUF);
809	if (fakesd && fakesd->sd_vol.sv_chunks)
810		free(fakesd->sd_vol.sv_chunks, M_DEVBUF);
811	if (fakesd)
812		free(fakesd, M_DEVBUF);
813	if (sm)
814		free(sm, M_DEVBUF);
815	if (open) {
816		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
817		vput(vn);
818	}
819
820	return key_disk;
821}
822
823struct sr_chunk *
824sr_crypto_read_key_disk(struct sr_discipline *sd, dev_t dev)
825{
826	struct sr_softc		*sc = sd->sd_sc;
827	struct sr_metadata	*sm = NULL;
828	struct sr_meta_opt_item *omi, *omi_next;
829	struct sr_meta_opt_hdr	*omh;
830	struct sr_meta_keydisk	*skm;
831	struct sr_meta_opt_head som;
832	struct sr_chunk		*key_disk = NULL;
833	struct disklabel	label;
834	struct vnode		*vn = NULL;
835	char			devname[32];
836	int			c, part, open = 0;
837
838	/*
839	 * Load a key disk and load keying material into memory.
840	 */
841
842	SLIST_INIT(&som);
843
844	sr_meta_getdevname(sc, dev, devname, sizeof(devname));
845
846	/* Make sure chunk is not already in use. */
847	c = sr_chunk_in_use(sc, dev);
848	if (c != BIOC_SDINVALID && c != BIOC_SDOFFLINE) {
849		sr_error(sc, "%s is already in use", devname);
850		goto done;
851	}
852
853	/* Open device. */
854	if (bdevvp(dev, &vn)) {
855		sr_error(sc, "cannot open key disk %s", devname);
856		goto done;
857	}
858	if (VOP_OPEN(vn, FREAD | FWRITE, NOCRED, curproc)) {
859		DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
860		    "open %s\n", DEVNAME(sc), devname);
861		vput(vn);
862		goto done;
863	}
864	open = 1; /* close dev on error */
865
866	/* Get partition details. */
867	part = DISKPART(dev);
868	if (VOP_IOCTL(vn, DIOCGDINFO, (caddr_t)&label, FREAD,
869	    NOCRED, curproc)) {
870		DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
871		    "failed\n", DEVNAME(sc));
872		VOP_CLOSE(vn, FREAD | FWRITE, NOCRED, curproc);
873		vput(vn);
874		goto done;
875	}
876	if (label.d_secsize != DEV_BSIZE) {
877		sr_error(sc, "%s has unsupported sector size (%d)",
878		    devname, label.d_secsize);
879		goto done;
880	}
881	if (label.d_partitions[part].p_fstype != FS_RAID) {
882		sr_error(sc, "%s partition not of type RAID (%d)\n",
883		    devname, label.d_partitions[part].p_fstype);
884		goto done;
885	}
886
887	/*
888	 * Read and validate key disk metadata.
889	 */
890	sm = malloc(SR_META_SIZE * 512, M_DEVBUF, M_WAITOK | M_ZERO);
891	if (sr_meta_native_read(sd, dev, sm, NULL)) {
892		sr_error(sc, "native bootprobe could not read native metadata");
893		goto done;
894	}
895
896	if (sr_meta_validate(sd, dev, sm, NULL)) {
897		DNPRINTF(SR_D_META, "%s: invalid metadata\n",
898		    DEVNAME(sc));
899		goto done;
900	}
901
902	/* Make sure this is a key disk. */
903	if (sm->ssdi.ssd_level != SR_KEYDISK_LEVEL) {
904		sr_error(sc, "%s is not a key disk", devname);
905		goto done;
906	}
907
908	/* Construct key disk chunk. */
909	key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF, M_WAITOK | M_ZERO);
910	key_disk->src_dev_mm = dev;
911	key_disk->src_vn = vn;
912	key_disk->src_size = 0;
913
914	bcopy((struct sr_meta_chunk *)(sm + 1), &key_disk->src_meta,
915	    sizeof(key_disk->src_meta));
916
917	/* Read mask key from optional metadata. */
918	sr_meta_opt_load(sc, sm, &som);
919	SLIST_FOREACH(omi, &som, omi_link) {
920		omh = omi->omi_som;
921		if (omh->som_type == SR_OPT_KEYDISK) {
922			skm = (struct sr_meta_keydisk *)omh;
923			bcopy(&skm->skm_maskkey,
924			    sd->mds.mdd_crypto.scr_maskkey,
925			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
926		} else if (omh->som_type == SR_OPT_CRYPTO) {
927			/* Original keydisk format with key in crypto area. */
928			bcopy(omh + sizeof(struct sr_meta_opt_hdr),
929			    sd->mds.mdd_crypto.scr_maskkey,
930			    sizeof(sd->mds.mdd_crypto.scr_maskkey));
931		}
932	}
933
934	open = 0;
935
936done:
937	for (omi = SLIST_FIRST(&som); omi != SLIST_END(&som); omi = omi_next) {
938		omi_next = SLIST_NEXT(omi, omi_link);
939		if (omi->omi_som)
940			free(omi->omi_som, M_DEVBUF);
941		free(omi, M_DEVBUF);
942	}
943
944	if (sm)
945		free(sm, M_DEVBUF);
946
947	if (vn && open) {
948		VOP_CLOSE(vn, FREAD, NOCRED, curproc);
949		vput(vn);
950	}
951
952	return key_disk;
953}
954
955int
956sr_crypto_alloc_resources(struct sr_discipline *sd)
957{
958	struct cryptoini	cri;
959	struct sr_crypto_wu	*crwu;
960	u_int			num_keys, i;
961
962	DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
963	    DEVNAME(sd->sd_sc));
964
965	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++)
966		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
967
968	if (sr_wu_alloc(sd)) {
969		sr_error(sd->sd_sc, "unable to allocate work units");
970		return (ENOMEM);
971	}
972	if (sr_ccb_alloc(sd)) {
973		sr_error(sd->sd_sc, "unable to allocate CCBs");
974		return (ENOMEM);
975	}
976	if (sr_crypto_decrypt_key(sd)) {
977		sr_error(sd->sd_sc, "incorrect key or passphrase");
978		return (EPERM);
979	}
980
981	/*
982	 * For each wu allocate the uio, iovec and crypto structures.
983	 * these have to be allocated now because during runtime we can't
984	 * fail an allocation without failing the io (which can cause real
985	 * problems).
986	 */
987	mtx_init(&sd->mds.mdd_crypto.scr_mutex, IPL_BIO);
988	TAILQ_INIT(&sd->mds.mdd_crypto.scr_wus);
989	for (i = 0; i < sd->sd_max_wu; i++) {
990		crwu = malloc(sizeof(*crwu), M_DEVBUF,
991		    M_WAITOK | M_ZERO | M_CANFAIL);
992		if (crwu == NULL)
993		    return (ENOMEM);
994		/* put it on the list now so if we fail it'll be freed */
995		mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
996		TAILQ_INSERT_TAIL(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
997		mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
998
999		crwu->cr_uio.uio_iov = &crwu->cr_iov;
1000		crwu->cr_dmabuf = dma_alloc(MAXPHYS, PR_WAITOK);
1001		crwu->cr_crp = crypto_getreq(MAXPHYS >> DEV_BSHIFT);
1002		if (crwu->cr_crp == NULL)
1003			return (ENOMEM);
1004		/* steal the list of cryptodescs */
1005		crwu->cr_descs = crwu->cr_crp->crp_desc;
1006		crwu->cr_crp->crp_desc = NULL;
1007	}
1008
1009	bzero(&cri, sizeof(cri));
1010	cri.cri_alg = CRYPTO_AES_XTS;
1011	switch (sd->mds.mdd_crypto.scr_meta->scm_alg) {
1012	case SR_CRYPTOA_AES_XTS_128:
1013		cri.cri_klen = 256;
1014		break;
1015	case SR_CRYPTOA_AES_XTS_256:
1016		cri.cri_klen = 512;
1017		break;
1018	default:
1019		return (EINVAL);
1020	}
1021
1022	/* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks */
1023	num_keys = sd->sd_meta->ssdi.ssd_size >> SR_CRYPTO_KEY_BLKSHIFT;
1024	if (num_keys >= SR_CRYPTO_MAXKEYS)
1025		return (EFBIG);
1026	for (i = 0; i <= num_keys; i++) {
1027		cri.cri_key = sd->mds.mdd_crypto.scr_key[i];
1028		if (crypto_newsession(&sd->mds.mdd_crypto.scr_sid[i],
1029		    &cri, 0) != 0) {
1030			for (i = 0;
1031			     sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1;
1032			     i++) {
1033				crypto_freesession(
1034				    sd->mds.mdd_crypto.scr_sid[i]);
1035				sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1036			}
1037			return (EINVAL);
1038		}
1039	}
1040
1041	sr_hotplug_register(sd, sr_crypto_hotplug);
1042
1043	return (0);
1044}
1045
1046void
1047sr_crypto_free_resources(struct sr_discipline *sd)
1048{
1049	struct sr_crypto_wu	*crwu;
1050	u_int			i;
1051
1052	DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
1053	    DEVNAME(sd->sd_sc));
1054
1055	if (sd->mds.mdd_crypto.key_disk != NULL) {
1056		explicit_bzero(sd->mds.mdd_crypto.key_disk, sizeof
1057		    sd->mds.mdd_crypto.key_disk);
1058		free(sd->mds.mdd_crypto.key_disk, M_DEVBUF);
1059	}
1060
1061	sr_hotplug_unregister(sd, sr_crypto_hotplug);
1062
1063	for (i = 0; sd->mds.mdd_crypto.scr_sid[i] != (u_int64_t)-1; i++) {
1064		crypto_freesession(sd->mds.mdd_crypto.scr_sid[i]);
1065		sd->mds.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
1066	}
1067
1068	mtx_enter(&sd->mds.mdd_crypto.scr_mutex);
1069	while ((crwu = TAILQ_FIRST(&sd->mds.mdd_crypto.scr_wus)) != NULL) {
1070		TAILQ_REMOVE(&sd->mds.mdd_crypto.scr_wus, crwu, cr_link);
1071
1072		if (crwu->cr_dmabuf != NULL)
1073			dma_free(crwu->cr_dmabuf, MAXPHYS);
1074		if (crwu->cr_crp) {
1075			/* twiddle cryptoreq back */
1076			crwu->cr_crp->crp_desc = crwu->cr_descs;
1077			crypto_freereq(crwu->cr_crp);
1078		}
1079		free(crwu, M_DEVBUF);
1080	}
1081	mtx_leave(&sd->mds.mdd_crypto.scr_mutex);
1082
1083	sr_wu_free(sd);
1084	sr_ccb_free(sd);
1085}
1086
1087int
1088sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1089{
1090	struct sr_crypto_kdfpair kdfpair;
1091	struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1092	int			size, rv = 1;
1093
1094	DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1095	    DEVNAME(sd->sd_sc), bd->bd_cmd);
1096
1097	switch (bd->bd_cmd) {
1098	case SR_IOCTL_GET_KDFHINT:
1099
1100		/* Get KDF hint for userland. */
1101		size = sizeof(sd->mds.mdd_crypto.scr_meta->scm_kdfhint);
1102		if (bd->bd_data == NULL || bd->bd_size > size)
1103			goto bad;
1104		if (copyout(sd->mds.mdd_crypto.scr_meta->scm_kdfhint,
1105		    bd->bd_data, bd->bd_size))
1106			goto bad;
1107
1108		rv = 0;
1109
1110		break;
1111
1112	case SR_IOCTL_CHANGE_PASSPHRASE:
1113
1114		/* Attempt to change passphrase. */
1115
1116		size = sizeof(kdfpair);
1117		if (bd->bd_data == NULL || bd->bd_size > size)
1118			goto bad;
1119		if (copyin(bd->bd_data, &kdfpair, size))
1120			goto bad;
1121
1122		size = sizeof(kdfinfo1);
1123		if (kdfpair.kdfinfo1 == NULL || kdfpair.kdfsize1 > size)
1124			goto bad;
1125		if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1126			goto bad;
1127
1128		size = sizeof(kdfinfo2);
1129		if (kdfpair.kdfinfo2 == NULL || kdfpair.kdfsize2 > size)
1130			goto bad;
1131		if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1132			goto bad;
1133
1134		if (sr_crypto_change_maskkey(sd, &kdfinfo1, &kdfinfo2))
1135			goto bad;
1136
1137		/* Save metadata to disk. */
1138		rv = sr_meta_save(sd, SR_META_DIRTY);
1139
1140		break;
1141	}
1142
1143bad:
1144	explicit_bzero(&kdfpair, sizeof(kdfpair));
1145	explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1146	explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1147
1148	return (rv);
1149}
1150
1151int
1152sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1153{
1154	int rv = EINVAL;
1155
1156	if (om->som_type == SR_OPT_CRYPTO) {
1157		sd->mds.mdd_crypto.scr_meta = (struct sr_meta_crypto *)om;
1158		rv = 0;
1159	}
1160
1161	return (rv);
1162}
1163
1164int
1165sr_crypto_rw(struct sr_workunit *wu)
1166{
1167	struct sr_crypto_wu	*crwu;
1168	int			s, rv = 0;
1169
1170	DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu: %p\n",
1171	    DEVNAME(wu->swu_dis->sd_sc), wu);
1172
1173	if (wu->swu_xs->flags & SCSI_DATA_OUT) {
1174		crwu = sr_crypto_wu_get(wu, 1);
1175		if (crwu == NULL)
1176			return (1);
1177		crwu->cr_crp->crp_callback = sr_crypto_write;
1178		s = splvm();
1179		if (crypto_invoke(crwu->cr_crp))
1180			rv = 1;
1181		else
1182			rv = crwu->cr_crp->crp_etype;
1183		splx(s);
1184	} else
1185		rv = sr_crypto_rw2(wu, NULL);
1186
1187	return (rv);
1188}
1189
1190int
1191sr_crypto_write(struct cryptop *crp)
1192{
1193	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1194	struct sr_workunit	*wu = crwu->cr_wu;
1195	int			s;
1196
1197	DNPRINTF(SR_D_INTR, "%s: sr_crypto_write: wu %x xs: %x\n",
1198	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1199
1200	if (crp->crp_etype) {
1201		/* fail io */
1202		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1203		s = splbio();
1204		sr_crypto_finish_io(wu);
1205		splx(s);
1206	}
1207
1208	return (sr_crypto_rw2(wu, crwu));
1209}
1210
1211int
1212sr_crypto_rw2(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1213{
1214	struct sr_discipline	*sd = wu->swu_dis;
1215	struct scsi_xfer	*xs = wu->swu_xs;
1216	struct sr_ccb		*ccb;
1217	struct uio		*uio;
1218	int			s;
1219	daddr64_t		blk;
1220
1221	if (sr_validate_io(wu, &blk, "sr_crypto_rw2"))
1222		goto bad;
1223
1224	blk += sd->sd_meta->ssd_data_offset;
1225
1226	ccb = sr_ccb_rw(sd, 0, blk, xs->datalen, xs->data, xs->flags, 0);
1227	if (!ccb) {
1228		/* should never happen but handle more gracefully */
1229		printf("%s: %s: too many ccbs queued\n",
1230		    DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname);
1231		goto bad;
1232	}
1233	if (!ISSET(xs->flags, SCSI_DATA_IN)) {
1234		uio = crwu->cr_crp->crp_buf;
1235		ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1236		ccb->ccb_opaque = crwu;
1237	}
1238	sr_wu_enqueue_ccb(wu, ccb);
1239
1240	s = splbio();
1241
1242	if (sr_check_io_collision(wu))
1243		goto queued;
1244
1245	sr_raid_startwu(wu);
1246
1247queued:
1248	splx(s);
1249	return (0);
1250bad:
1251	/* wu is unwound by sr_wu_put */
1252	if (crwu)
1253		crwu->cr_crp->crp_etype = EINVAL;
1254	return (1);
1255}
1256
1257void
1258sr_crypto_done(struct sr_workunit *wu)
1259{
1260	struct scsi_xfer	*xs = wu->swu_xs;
1261	struct sr_crypto_wu	*crwu;
1262	struct sr_ccb		*ccb;
1263	int			s;
1264
1265	/* If this was a successful read, initiate decryption of the data. */
1266	if (ISSET(xs->flags, SCSI_DATA_IN) && xs->error == XS_NOERROR) {
1267		/* only fails on implementation error */
1268		crwu = sr_crypto_wu_get(wu, 0);
1269		if (crwu == NULL)
1270			panic("sr_crypto_intr: no wu");
1271		crwu->cr_crp->crp_callback = sr_crypto_read;
1272		ccb = TAILQ_FIRST(&wu->swu_ccb);
1273		if (ccb == NULL)
1274			panic("sr_crypto_done: no ccbs on workunit");
1275		ccb->ccb_opaque = crwu;
1276		DNPRINTF(SR_D_INTR, "%s: sr_crypto_intr: crypto_invoke %p\n",
1277		    DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1278		s = splvm();
1279		crypto_invoke(crwu->cr_crp);
1280		splx(s);
1281		return;
1282	}
1283
1284	s = splbio();
1285	sr_crypto_finish_io(wu);
1286	splx(s);
1287}
1288
1289void
1290sr_crypto_finish_io(struct sr_workunit *wu)
1291{
1292	struct sr_discipline	*sd = wu->swu_dis;
1293	struct scsi_xfer	*xs = wu->swu_xs;
1294	struct sr_ccb		*ccb;
1295#ifdef SR_DEBUG
1296	struct sr_softc		*sc = sd->sd_sc;
1297#endif /* SR_DEBUG */
1298
1299	splassert(IPL_BIO);
1300
1301	DNPRINTF(SR_D_INTR, "%s: sr_crypto_finish_io: wu %x xs: %x\n",
1302	    DEVNAME(sc), wu, xs);
1303
1304	if (wu->swu_cb_active == 1)
1305		panic("%s: sr_crypto_finish_io", DEVNAME(sd->sd_sc));
1306	TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link) {
1307		if (ccb->ccb_opaque == NULL)
1308			continue;
1309		sr_crypto_wu_put(ccb->ccb_opaque);
1310	}
1311
1312	sr_scsi_done(sd, xs);
1313}
1314
1315int
1316sr_crypto_read(struct cryptop *crp)
1317{
1318	struct sr_crypto_wu	*crwu = crp->crp_opaque;
1319	struct sr_workunit	*wu = crwu->cr_wu;
1320	int			s;
1321
1322	DNPRINTF(SR_D_INTR, "%s: sr_crypto_read: wu %x xs: %x\n",
1323	    DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1324
1325	if (crp->crp_etype)
1326		wu->swu_xs->error = XS_DRIVER_STUFFUP;
1327
1328	s = splbio();
1329	sr_crypto_finish_io(wu);
1330	splx(s);
1331
1332	return (0);
1333}
1334
1335void
1336sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1337{
1338	DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1339	    DEVNAME(sd->sd_sc), diskp->dk_name, action);
1340}
1341
1342#ifdef SR_DEBUG0
1343void
1344sr_crypto_dumpkeys(struct sr_discipline *sd)
1345{
1346	int			i, j;
1347
1348	printf("sr_crypto_dumpkeys:\n");
1349	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1350		printf("\tscm_key[%d]: 0x", i);
1351		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1352			printf("%02x",
1353			    sd->mds.mdd_crypto.scr_meta->scm_key[i][j]);
1354		}
1355		printf("\n");
1356	}
1357	printf("sr_crypto_dumpkeys: runtime data keys:\n");
1358	for (i = 0; i < SR_CRYPTO_MAXKEYS; i++) {
1359		printf("\tscr_key[%d]: 0x", i);
1360		for (j = 0; j < SR_CRYPTO_KEYBYTES; j++) {
1361			printf("%02x",
1362			    sd->mds.mdd_crypto.scr_key[i][j]);
1363		}
1364		printf("\n");
1365	}
1366}
1367#endif	/* SR_DEBUG */
1368