xref: /openbsd/sys/dev/softraidvar.h (revision 5b6e6e1e)
1 /* $OpenBSD: softraidvar.h,v 1.176 2022/12/19 15:27:06 kn Exp $ */
2 /*
3  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4  * Copyright (c) 2008 Chris Kuethe <ckuethe@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #ifndef SOFTRAIDVAR_H
20 #define SOFTRAIDVAR_H
21 
22 #define SR_META_VERSION		6	/* bump when sr_metadata changes */
23 #define SR_META_SIZE		64	/* save space at chunk beginning */
24 #define SR_META_OFFSET		16	/* skip 8192 bytes at chunk beginning */
25 
26 #define SR_BOOT_OFFSET		(SR_META_OFFSET + SR_META_SIZE)
27 #define SR_BOOT_LOADER_SIZE	320	/* Size of boot loader storage. */
28 #define SR_BOOT_LOADER_OFFSET	SR_BOOT_OFFSET
29 #define SR_BOOT_BLOCKS_SIZE	128	/* Size of boot block storage. */
30 #define SR_BOOT_BLOCKS_OFFSET	(SR_BOOT_LOADER_OFFSET + SR_BOOT_LOADER_SIZE)
31 #define SR_BOOT_SIZE		(SR_BOOT_LOADER_SIZE + SR_BOOT_BLOCKS_SIZE)
32 
33 #define SR_CRYPTO_MAXKEYBYTES	32	/* max bytes in a key (AES-XTS-256) */
34 #define SR_CRYPTO_MAXKEYS	32	/* max keys per volume */
35 #define SR_CRYPTO_KEYBITS	512	/* AES-XTS with 2 * 256 bit keys */
36 #define SR_CRYPTO_KEYBYTES	(SR_CRYPTO_KEYBITS >> 3)
37 #define SR_CRYPTO_KDFHINTBYTES	256	/* size of opaque KDF hint */
38 #define SR_CRYPTO_CHECKBYTES	64	/* size of generic key chksum struct */
39 #define SR_CRYPTO_KEY_BLKSHIFT	30	/* 0.5TB per key */
40 #define SR_CRYPTO_KEY_BLKSIZE	(1ULL << SR_CRYPTO_KEY_BLKSHIFT)
41 #define SR_CRYPTO_MAXSIZE	(SR_CRYPTO_KEY_BLKSIZE * SR_CRYPTO_MAXKEYS)
42 
43 /*
44  * sr_crypto_genkdf is a generic hint for the KDF performed in userland and
45  * is not interpreted by the kernel.
46  */
47 struct sr_crypto_genkdf {
48 	u_int32_t	len;
49 	u_int32_t	type;
50 #define SR_CRYPTOKDFT_INVALID		0
51 #define SR_CRYPTOKDFT_PKCS5_PBKDF2	1
52 #define SR_CRYPTOKDFT_KEYDISK		2
53 #define SR_CRYPTOKDFT_BCRYPT_PBKDF	3
54 };
55 
56 /*
57  * sr_crypto_pbkdf is a hint for a PBKDF performed in userland and is not
58  * interpreted by the kernel.
59  */
60 struct sr_crypto_pbkdf {
61 	struct sr_crypto_genkdf generic;
62 	u_int32_t	rounds;
63 	u_int8_t	salt[128];
64 };
65 
66 /*
67  * sr_crypto_kdfinfo is used to copy masking keys and KDF hints from/to
68  * userland. The embedded hint structures are not interpreted by the kernel.
69  */
70 struct sr_crypto_kdfinfo {
71 	u_int32_t	len;
72 	u_int32_t	flags;
73 #define SR_CRYPTOKDF_INVALID	(0)
74 #define SR_CRYPTOKDF_KEY	(1<<0)
75 #define SR_CRYPTOKDF_HINT	(1<<1)
76 	u_int8_t	maskkey[SR_CRYPTO_MAXKEYBYTES];
77 	union {
78 		struct sr_crypto_genkdf	generic;
79 		struct sr_crypto_pbkdf	pbkdf;
80 	}		_kdfhint;
81 #define genkdf		_kdfhint.generic
82 #define pbkdf		_kdfhint.pbkdf
83 };
84 
85 #define SR_IOCTL_GET_KDFHINT		0x01	/* Get KDF hint. */
86 #define SR_IOCTL_CHANGE_PASSPHRASE	0x02	/* Change passphrase. */
87 
88 struct sr_crypto_kdfpair {
89 	struct sr_crypto_kdfinfo *kdfinfo1;
90 	u_int32_t	kdfsize1;
91 	struct sr_crypto_kdfinfo *kdfinfo2;
92 	u_int32_t	kdfsize2;
93 };
94 
95 #if defined(_KERNEL) || defined(_STANDALONE)
96 
97 #include <crypto/md5.h>
98 
99 #define SR_META_V3_SIZE		64
100 #define SR_META_V3_OFFSET	16
101 #define SR_META_V3_DATA_OFFSET	(SR_META_V3_OFFSET + SR_META_V3_SIZE)
102 
103 #define SR_META_F_NATIVE	0	/* Native metadata format. */
104 #define SR_META_F_INVALID	-1
105 
106 #define SR_HEADER_SIZE		(SR_META_SIZE + SR_BOOT_SIZE)
107 #define SR_DATA_OFFSET		(SR_META_OFFSET + SR_HEADER_SIZE)
108 
109 #define SR_HOTSPARE_LEVEL	0xffffffff
110 #define SR_HOTSPARE_VOLID	0xffffffff
111 #define SR_KEYDISK_LEVEL	0xfffffffe
112 #define SR_KEYDISK_VOLID	0xfffffffe
113 
114 #define SR_UUID_MAX		16
115 struct sr_uuid {
116 	u_int8_t		sui_id[SR_UUID_MAX];
117 } __packed;
118 
119 struct sr_disk {
120 	dev_t			sdk_devno;
121 	SLIST_ENTRY(sr_disk)	sdk_link;
122 };
123 SLIST_HEAD(sr_disk_head, sr_disk);
124 
125 struct sr_metadata {
126 	struct sr_meta_invariant {
127 		/* do not change order of ssd_magic, ssd_version */
128 		u_int64_t	ssd_magic;	/* magic id */
129 #define	SR_MAGIC		0x4d4152436372616dLLU
130 		u_int32_t	ssd_version;	/* meta data version */
131 		u_int32_t	ssd_vol_flags;	/* volume specific flags. */
132 		struct sr_uuid	ssd_uuid;	/* unique identifier */
133 
134 		/* chunks */
135 		u_int32_t	ssd_chunk_no;	/* number of chunks */
136 		u_int32_t	ssd_chunk_id;	/* chunk identifier */
137 
138 		/* optional */
139 		u_int32_t	ssd_opt_no;	/* nr of optional md elements */
140 		u_int32_t	ssd_secsize;
141 
142 		/* volume metadata */
143 		u_int32_t	ssd_volid;	/* volume id */
144 		u_int32_t	ssd_level;	/* raid level */
145 		int64_t		ssd_size;	/* virt disk size in blocks */
146 		char		ssd_vendor[8];	/* scsi vendor */
147 		char		ssd_product[16];/* scsi product */
148 		char		ssd_revision[4];/* scsi revision */
149 		/* optional volume members */
150 		u_int32_t	ssd_strip_size;	/* strip size */
151 	} _sdd_invariant;
152 #define ssdi			_sdd_invariant
153 	/* MD5 of invariant metadata */
154 	u_int8_t		ssd_checksum[MD5_DIGEST_LENGTH];
155 	char			ssd_devname[32];/* /dev/XXXXX */
156 	u_int32_t		ssd_meta_flags;
157 #define	SR_META_DIRTY		0x1
158 	u_int32_t		ssd_data_blkno;
159 	u_int64_t		ssd_ondisk;	/* on disk version counter */
160 	int64_t			ssd_rebuild;	/* last block of rebuild */
161 } __packed;
162 
163 struct sr_meta_chunk {
164 	struct sr_meta_chunk_invariant {
165 		u_int32_t	scm_volid;	/* vd we belong to */
166 		u_int32_t	scm_chunk_id;	/* chunk id */
167 		char		scm_devname[32];/* /dev/XXXXX */
168 		int64_t		scm_size;	/* size of partition in blocks*/
169 		int64_t		scm_coerced_size; /* coerced sz of part in blk*/
170 		struct sr_uuid	scm_uuid;	/* unique identifier */
171 	} _scm_invariant;
172 #define scmi			_scm_invariant
173 	/* MD5 of invariant chunk metadata */
174 	u_int8_t		scm_checksum[MD5_DIGEST_LENGTH];
175 	u_int32_t		scm_status;	/* use bio bioc_disk status */
176 } __packed;
177 
178 /*
179  * Check that HMAC-SHA1_k(decrypted scm_key) == sch_mac, where
180  * k = SHA1(masking key)
181  */
182 struct sr_crypto_chk_hmac_sha1 {
183 	u_int8_t	sch_mac[20];
184 } __packed;
185 
186 #define SR_OPT_INVALID		0x00
187 #define SR_OPT_CRYPTO		0x01
188 #define SR_OPT_BOOT		0x02
189 #define SR_OPT_KEYDISK		0x03
190 
191 struct sr_meta_opt_hdr {
192 	u_int32_t	som_type;	/* optional metadata type. */
193 	u_int32_t	som_length;	/* optional metadata length. */
194 	u_int8_t	som_checksum[MD5_DIGEST_LENGTH];
195 } __packed;
196 
197 struct sr_meta_crypto {
198 	struct sr_meta_opt_hdr	scm_hdr;
199 	u_int32_t		scm_alg;	/* vol crypto algorithm */
200 #define SR_CRYPTOA_AES_XTS_128	1
201 #define SR_CRYPTOA_AES_XTS_256	2
202 	u_int32_t		scm_flags;	/* key & kdfhint valid */
203 #define SR_CRYPTOF_INVALID	(0)
204 #define SR_CRYPTOF_KEY		(1<<0)
205 #define SR_CRYPTOF_KDFHINT	(1<<1)
206 	u_int32_t		scm_mask_alg;	/* disk key masking crypt alg */
207 #define SR_CRYPTOM_AES_ECB_256	1
208 	u_int32_t		scm_pad1;
209 	u_int8_t		scm_reserved[64];
210 
211 	/* symmetric keys used for disk encryption */
212 	u_int8_t		scm_key[SR_CRYPTO_MAXKEYS][SR_CRYPTO_KEYBYTES];
213 	/* hint to kdf algorithm (opaque to kernel) */
214 	u_int8_t		scm_kdfhint[SR_CRYPTO_KDFHINTBYTES];
215 
216 	u_int32_t		scm_check_alg;	/* key chksum algorithm */
217 #define SR_CRYPTOC_HMAC_SHA1		1
218 	u_int32_t		scm_pad2;
219 	union {
220 		struct sr_crypto_chk_hmac_sha1	chk_hmac_sha1;
221 		u_int8_t			chk_reserved2[64];
222 	}			_scm_chk;
223 #define	chk_hmac_sha1	_scm_chk.chk_hmac_sha1
224 } __packed;
225 
226 #define SR_MAX_BOOT_DISKS 16
227 struct sr_meta_boot {
228 	struct sr_meta_opt_hdr	sbm_hdr;
229 	u_int32_t		sbm_bootblk_size;
230 	u_int32_t		sbm_bootldr_size;
231 	u_char			sbm_root_duid[8];
232 	u_char			sbm_boot_duid[SR_MAX_BOOT_DISKS][8];
233 } __packed;
234 
235 struct sr_meta_keydisk {
236 	struct sr_meta_opt_hdr	skm_hdr;
237 	u_int8_t		skm_maskkey[SR_CRYPTO_MAXKEYBYTES];
238 } __packed;
239 
240 #define SR_OLD_META_OPT_SIZE	2480
241 #define SR_OLD_META_OPT_OFFSET	8
242 #define SR_OLD_META_OPT_MD5	(SR_OLD_META_OPT_SIZE - MD5_DIGEST_LENGTH)
243 
244 struct sr_meta_opt_item {
245 	struct sr_meta_opt_hdr	*omi_som;
246 	SLIST_ENTRY(sr_meta_opt_item) omi_link;
247 };
248 
249 SLIST_HEAD(sr_meta_opt_head, sr_meta_opt_item);
250 
251 struct sr_boot_chunk {
252 	struct sr_metadata *sbc_metadata;
253 	dev_t		sbc_mm;			/* Device major/minor. */
254 
255 	u_int32_t	sbc_chunk_id;		/* Chunk ID. */
256 	u_int32_t	sbc_state;		/* Chunk state. */
257 	u_int32_t	sbc_disk;		/* Disk number. */
258 	int		sbc_part;		/* Partition number. */
259 	u_int64_t	sbc_ondisk;		/* Ondisk version. */
260 
261 	void		*sbc_diskinfo;		/* MD disk information. */
262 
263 	SLIST_ENTRY(sr_boot_chunk) sbc_link;
264 };
265 
266 SLIST_HEAD(sr_boot_chunk_head, sr_boot_chunk);
267 
268 struct sr_boot_volume {
269 	struct sr_uuid	sbv_uuid;		/* Volume UUID. */
270 	u_int32_t	sbv_level;		/* RAID Level. */
271 	u_int32_t	sbv_volid;		/* Volume ID. */
272 	u_int32_t	sbv_chunk_no;		/* Number of chunks. */
273 	u_int32_t	sbv_flags;		/* Volume specific flags. */
274 	u_int32_t	sbv_state;		/* Volume state. */
275 	int64_t		sbv_size;		/* Virtual disk size. */
276 	u_int32_t	sbv_secsize;		/* Sector size */
277 	u_int32_t	sbv_data_blkno;		/* Data offset. */
278 	u_int64_t	sbv_ondisk;		/* Ondisk version. */
279 
280 	u_int32_t	sbv_chunks_found;	/* Number of chunks found. */
281 	u_int32_t	sbv_unit;		/* Disk unit number. */
282 	char		sbv_part;		/* Partition opened. */
283 	void		*sbv_diskinfo;		/* MD disk information. */
284 
285 	u_int8_t	*sbv_keys;		/* Disk keys for volume. */
286 	u_int8_t	*sbv_maskkey;		/* Mask key for disk keys. */
287 
288 	struct sr_boot_chunk_head sbv_chunks;	/* List of chunks. */
289 	struct sr_meta_opt_head sbv_meta_opt;	/* List of optional metadata. */
290 
291 	SLIST_ENTRY(sr_boot_volume)	sbv_link;
292 };
293 
294 SLIST_HEAD(sr_boot_volume_head, sr_boot_volume);
295 
296 #endif /* _KERNEL | _STANDALONE */
297 
298 #ifdef _KERNEL
299 
300 #include <dev/biovar.h>
301 
302 #include <sys/buf.h>
303 #include <sys/queue.h>
304 #include <sys/rwlock.h>
305 #include <sys/task.h>
306 
307 #include <scsi/scsi_all.h>
308 #include <scsi/scsi_disk.h>
309 #include <scsi/scsiconf.h>
310 
311 #define DEVNAME(_s)     ((_s)->sc_dev.dv_xname)
312 
313 /* #define SR_DEBUG */
314 #ifdef SR_DEBUG
315 extern u_int32_t		sr_debug;
316 #define DNPRINTF(n,x...)	do { if (sr_debug & n) printf(x); } while(0)
317 #define	SR_D_CMD		0x0001
318 #define	SR_D_INTR		0x0002
319 #define	SR_D_MISC		0x0004
320 #define	SR_D_IOCTL		0x0008
321 #define	SR_D_CCB		0x0010
322 #define	SR_D_WU			0x0020
323 #define	SR_D_META		0x0040
324 #define	SR_D_DIS		0x0080
325 #define	SR_D_STATE		0x0100
326 #define	SR_D_REBUILD		0x0200
327 #else
328 #define DNPRINTF(n,x...)
329 #endif
330 
331 #define	SR_MAX_LD		256
332 #define	SR_MAX_CMDS		16
333 #define	SR_MAX_STATES		7
334 #define SR_VM_IGNORE_DIRTY	1
335 #define SR_REBUILD_IO_SIZE	128 /* blocks */
336 
337 extern struct sr_uuid	sr_bootuuid;
338 extern u_int8_t		sr_bootkey[SR_CRYPTO_MAXKEYBYTES];
339 
340 /* forward define to prevent dependency goo */
341 struct sr_softc;
342 
343 struct sr_ccb {
344 	struct buf		ccb_buf;	/* MUST BE FIRST!! */
345 
346 	struct sr_workunit	*ccb_wu;
347 	struct sr_discipline	*ccb_dis;
348 
349 	int			ccb_target;
350 	int			ccb_state;
351 #define SR_CCB_FREE		0
352 #define SR_CCB_INPROGRESS	1
353 #define SR_CCB_OK		2
354 #define SR_CCB_FAILED		3
355 
356 	int			ccb_flags;
357 #define SR_CCBF_FREEBUF		(1<<0)		/* free ccb_buf.b_data */
358 
359 	void			*ccb_opaque; /* discipline usable pointer */
360 
361 	TAILQ_ENTRY(sr_ccb)	ccb_link;
362 };
363 
364 TAILQ_HEAD(sr_ccb_list, sr_ccb);
365 
366 struct sr_workunit {
367 	struct scsi_xfer	*swu_xs;
368 	struct sr_discipline	*swu_dis;
369 
370 	int			swu_state;
371 #define SR_WU_FREE		0
372 #define SR_WU_INPROGRESS	1
373 #define SR_WU_OK		2
374 #define SR_WU_FAILED		3
375 #define SR_WU_PARTIALLYFAILED	4
376 #define SR_WU_DEFERRED		5
377 #define SR_WU_PENDING		6
378 #define SR_WU_RESTART		7
379 #define SR_WU_REQUEUE		8
380 #define SR_WU_CONSTRUCT		9
381 
382 	int			swu_flags;	/* additional hints */
383 #define SR_WUF_REBUILD		(1<<0)		/* rebuild io */
384 #define SR_WUF_REBUILDIOCOMP	(1<<1)		/* rebuild io complete */
385 #define SR_WUF_FAIL		(1<<2)		/* RAID6: failure */
386 #define SR_WUF_FAILIOCOMP	(1<<3)
387 #define SR_WUF_WAKEUP		(1<<4)		/* Wakeup on I/O completion. */
388 #define SR_WUF_DISCIPLINE	(1<<5)		/* Discipline specific I/O. */
389 #define SR_WUF_FAKE		(1<<6)		/* Faked workunit. */
390 
391 	/* workunit io range */
392 	daddr_t			swu_blk_start;
393 	daddr_t			swu_blk_end;
394 
395 	/* number of ios that makes up the whole work unit */
396 	u_int32_t		swu_io_count;
397 
398 	/* in flight totals */
399 	u_int32_t		swu_ios_complete;
400 	u_int32_t		swu_ios_failed;
401 	u_int32_t		swu_ios_succeeded;
402 
403 	/* colliding wu */
404 	struct sr_workunit	*swu_collider;
405 
406 	/* all ios that make up this workunit */
407 	struct sr_ccb_list	swu_ccb;
408 
409 	/* task memory */
410 	struct task		swu_task;
411 	int			swu_cb_active;	/* in callback */
412 
413 	TAILQ_ENTRY(sr_workunit) swu_link;	/* Link in processing queue. */
414 	TAILQ_ENTRY(sr_workunit) swu_next;	/* Next work unit in chain. */
415 };
416 
417 TAILQ_HEAD(sr_wu_list, sr_workunit);
418 
419 /* RAID 0 */
420 #define SR_RAID0_NOWU		16
421 struct sr_raid0 {
422 	int32_t			sr0_strip_bits;
423 };
424 
425 /* RAID 1 */
426 #define SR_RAID1_NOWU		16
427 struct sr_raid1 {
428 	u_int32_t		sr1_counter;
429 };
430 
431 /* RAID 5 */
432 #define SR_RAID5_NOWU		16
433 struct sr_raid5 {
434 	int32_t			sr5_strip_bits;
435 };
436 
437 /* RAID 6 */
438 #define SR_RAID6_NOWU		16
439 struct sr_raid6 {
440 	int32_t			sr6_strip_bits;
441 };
442 
443 /* CRYPTO */
444 TAILQ_HEAD(sr_crypto_wu_head, sr_crypto_wu);
445 #define SR_CRYPTO_NOWU		16
446 
447 /*
448  * The per-I/O data that we need to preallocate. We cannot afford to allow I/O
449  * to start failing when memory pressure kicks in. We can store this in the WU
450  * because we assert that only one ccb per WU will ever be active during crypto.
451  */
452 struct sr_crypto_wu {
453 	struct sr_workunit		 cr_wu;		/* Must be first. */
454 	struct uio			 cr_uio;
455 	struct iovec			 cr_iov;
456 	struct cryptop	 		*cr_crp;
457 	void				*cr_dmabuf;
458 };
459 
460 struct sr_crypto {
461 	struct sr_meta_crypto	*scr_meta;
462 	struct sr_chunk		*key_disk;
463 
464 	int			scr_alg;
465 	int			scr_klen;
466 
467 	/* XXX only keep scr_sid over time */
468 	u_int8_t		scr_key[SR_CRYPTO_MAXKEYS][SR_CRYPTO_KEYBYTES];
469 	u_int8_t		scr_maskkey[SR_CRYPTO_MAXKEYBYTES];
470 	u_int64_t		scr_sid[SR_CRYPTO_MAXKEYS];
471 };
472 
473 #define SR_CONCAT_NOWU		16
474 struct sr_concat {
475 };
476 
477 /* RAID 1C */
478 #define SR_RAID1C_NOWU		16
479 struct sr_raid1c {
480 	struct sr_crypto	sr1c_crypto;
481 	struct sr_raid1		sr1c_raid1;
482 };
483 
484 struct sr_chunk {
485 	struct sr_meta_chunk	src_meta;	/* chunk meta data */
486 
487 	/* runtime data */
488 	dev_t			src_dev_mm;	/* major/minor */
489 	struct vnode		*src_vn;	/* vnode */
490 
491 	/* helper members before metadata makes it onto the chunk  */
492 	int			src_meta_ondisk;/* set when meta is on disk */
493 	char			src_devname[32];
494 	u_char			src_duid[8];	/* Chunk disklabel UID. */
495 	int64_t			src_size;	/* in blocks */
496 	u_int32_t		src_secsize;
497 
498 	SLIST_ENTRY(sr_chunk)	src_link;
499 };
500 
501 SLIST_HEAD(sr_chunk_head, sr_chunk);
502 
503 struct sr_volume {
504 	/* runtime data */
505 	struct sr_chunk_head	sv_chunk_list;	/* linked list of all chunks */
506 	struct sr_chunk		**sv_chunks;	/* array to same chunks */
507 	int64_t			sv_chunk_minsz; /* Size of smallest chunk. */
508 	int64_t			sv_chunk_maxsz; /* Size of largest chunk. */
509 
510 	/* sensors */
511 	struct ksensor		sv_sensor;
512 	int			sv_sensor_attached;
513 };
514 
515 struct sr_discipline {
516 	struct sr_softc		*sd_sc;		/* link back to sr softc */
517 	size_t			sd_wu_size;	/* alloc and free size */
518 	u_int8_t		sd_type;	/* type of discipline */
519 #define	SR_MD_RAID0		0
520 #define	SR_MD_RAID1		1
521 #define	SR_MD_RAID5		2
522 #define	SR_MD_CACHE		3
523 #define	SR_MD_CRYPTO		4
524 	/* AOE was 5 and 6. */
525 	/* SR_MD_RAID4 was 7. */
526 #define	SR_MD_RAID6		8
527 #define	SR_MD_CONCAT		9
528 #define	SR_MD_RAID1C		10
529 	char			sd_name[10];	/* human readable discipline name */
530 	u_int16_t		sd_target;	/* scsibus target discipline uses */
531 
532 	u_int32_t		sd_capabilities;
533 #define SR_CAP_SYSTEM_DISK	0x00000001	/* Attaches as a system disk. */
534 #define SR_CAP_AUTO_ASSEMBLE	0x00000002	/* Can auto assemble. */
535 #define SR_CAP_REBUILD		0x00000004	/* Supports rebuild. */
536 #define SR_CAP_NON_COERCED	0x00000008	/* Uses non-coerced size. */
537 #define SR_CAP_REDUNDANT	0x00000010	/* Redundant copies of data. */
538 
539 	union {
540 	    struct sr_raid0	mdd_raid0;
541 	    struct sr_raid1	mdd_raid1;
542 	    struct sr_raid5	mdd_raid5;
543 	    struct sr_raid6	mdd_raid6;
544 	    struct sr_concat	mdd_concat;
545 #ifdef CRYPTO
546 	    struct sr_crypto	mdd_crypto;
547 	    struct sr_raid1c	mdd_raid1c;
548 #endif /* CRYPTO */
549 	}			sd_dis_specific;/* dis specific members */
550 #define mds			sd_dis_specific
551 
552 	struct taskq		*sd_taskq;
553 
554 	/* discipline metadata */
555 	struct sr_metadata	*sd_meta;	/* in memory copy of metadata */
556 	void			*sd_meta_foreign; /* non native metadata */
557 	u_int32_t		sd_meta_flags;
558 	int			sd_meta_type;	/* metadata functions */
559 	struct sr_meta_opt_head sd_meta_opt; /* optional metadata. */
560 
561 	int			sd_sync;
562 	int			sd_must_flush;
563 
564 	int			sd_deleted;
565 
566 	/* discipline volume */
567 	struct sr_volume	sd_vol;		/* volume associated */
568 	int			sd_vol_status;	/* runtime vol status */
569 	/* discipline resources */
570 	struct sr_ccb		*sd_ccb;
571 	struct sr_ccb_list	sd_ccb_freeq;
572 	u_int32_t		sd_max_ccb_per_wu;
573 
574 	struct sr_wu_list	sd_wu;		/* all workunits */
575 	u_int32_t		sd_max_wu;
576 	int			sd_reb_active;	/* rebuild in progress */
577 	int			sd_reb_abort;	/* abort rebuild */
578 	int			sd_ready;	/* fully operational */
579 
580 	struct sr_wu_list	sd_wu_freeq;	/* free wu queue */
581 	struct sr_wu_list	sd_wu_pendq;	/* pending wu queue */
582 	struct sr_wu_list	sd_wu_defq;	/* deferred wu queue */
583 
584 	struct mutex		sd_wu_mtx;
585 	struct scsi_iopool	sd_iopool;
586 
587 	/* discipline stats */
588 	int			sd_wu_pending;
589 	u_int64_t		sd_wu_collisions;
590 
591 	/* discipline functions */
592 	int			(*sd_create)(struct sr_discipline *,
593 				    struct bioc_createraid *, int, int64_t);
594 	int			(*sd_assemble)(struct sr_discipline *,
595 				    struct bioc_createraid *, int, void *);
596 	int			(*sd_alloc_resources)(struct sr_discipline *);
597 	void			(*sd_free_resources)(struct sr_discipline *);
598 	int			(*sd_ioctl_handler)(struct sr_discipline *,
599 				    struct bioc_discipline *);
600 	int			(*sd_start_discipline)(struct sr_discipline *);
601 	void			(*sd_set_chunk_state)(struct sr_discipline *,
602 				    int, int);
603 	void			(*sd_set_vol_state)(struct sr_discipline *);
604 	int			(*sd_openings)(struct sr_discipline *);
605 	int			(*sd_meta_opt_handler)(struct sr_discipline *,
606 				    struct sr_meta_opt_hdr *);
607 	void			(*sd_rebuild)(struct sr_discipline *);
608 
609 	/* SCSI emulation */
610 	struct scsi_sense_data	sd_scsi_sense;
611 	int			(*sd_scsi_rw)(struct sr_workunit *);
612 	void			(*sd_scsi_intr)(struct buf *);
613 	int			(*sd_scsi_wu_done)(struct sr_workunit *);
614 	void			(*sd_scsi_done)(struct sr_workunit *);
615 	int			(*sd_scsi_sync)(struct sr_workunit *);
616 	int			(*sd_scsi_tur)(struct sr_workunit *);
617 	int			(*sd_scsi_start_stop)(struct sr_workunit *);
618 	int			(*sd_scsi_inquiry)(struct sr_workunit *);
619 	int			(*sd_scsi_read_cap)(struct sr_workunit *);
620 	int			(*sd_scsi_req_sense)(struct sr_workunit *);
621 
622 	/* background operation */
623 	struct proc		*sd_background_proc;
624 
625 	/* Tasks. */
626 	struct task		sd_meta_save_task;
627 	struct task		sd_hotspare_rebuild_task;
628 
629 	TAILQ_ENTRY(sr_discipline) sd_link;
630 };
631 
632 TAILQ_HEAD(sr_discipline_list, sr_discipline);
633 
634 struct sr_softc {
635 	struct device		sc_dev;
636 
637 	struct rwlock		sc_lock;
638 
639 	struct bio_status	sc_status;	/* Status and messages. */
640 
641 	struct sr_chunk_head	sc_hotspare_list;	/* List of hotspares. */
642 	struct rwlock		sc_hs_lock;	/* Lock for hotspares list. */
643 	int			sc_hotspare_no; /* Number of hotspares. */
644 
645 	struct ksensordev	sc_sensordev;
646 	struct sensor_task	*sc_sensor_task;
647 
648 	struct scsibus_softc	*sc_scsibus;
649 
650 	/* The target lookup has to be cheap since it happens for each I/O. */
651 	struct sr_discipline	*sc_targets[SR_MAX_LD];
652 	struct sr_discipline_list sc_dis_list;
653 };
654 
655 /* hotplug */
656 void			sr_hotplug_register(struct sr_discipline *, void *);
657 void			sr_hotplug_unregister(struct sr_discipline *, void *);
658 
659 /* Hotspare and rebuild. */
660 void			sr_hotspare_rebuild_callback(void *);
661 
662 /* work units & ccbs */
663 int			sr_ccb_alloc(struct sr_discipline *);
664 void			sr_ccb_free(struct sr_discipline *);
665 struct sr_ccb		*sr_ccb_get(struct sr_discipline *);
666 void			sr_ccb_put(struct sr_ccb *);
667 struct sr_ccb		*sr_ccb_rw(struct sr_discipline *, int, daddr_t,
668 			    long, u_int8_t *, int, int);
669 void			sr_ccb_done(struct sr_ccb *);
670 int			sr_wu_alloc(struct sr_discipline *);
671 void			sr_wu_free(struct sr_discipline *);
672 void			*sr_wu_get(void *);
673 void			sr_wu_put(void *, void *);
674 void			sr_wu_init(struct sr_discipline *,
675 			    struct sr_workunit *);
676 void			sr_wu_enqueue_ccb(struct sr_workunit *,
677 			    struct sr_ccb *);
678 void			sr_wu_release_ccbs(struct sr_workunit *);
679 void			sr_wu_done(struct sr_workunit *);
680 
681 /* misc functions */
682 void			sr_info(struct sr_softc *, const char *, ...);
683 void			sr_warn(struct sr_softc *, const char *, ...);
684 void			sr_error(struct sr_softc *, const char *, ...);
685 int32_t			sr_validate_stripsize(u_int32_t);
686 int			sr_meta_read(struct sr_discipline *);
687 int			sr_meta_native_read(struct sr_discipline *, dev_t,
688 			    struct sr_metadata *, void *);
689 int			sr_meta_validate(struct sr_discipline *, dev_t,
690 			    struct sr_metadata *, void *);
691 void			sr_meta_save_callback(void *);
692 int			sr_meta_save(struct sr_discipline *, u_int32_t);
693 void			sr_meta_getdevname(struct sr_softc *, dev_t, char *,
694 			    int);
695 void			sr_meta_opt_load(struct sr_softc *,
696 			    struct sr_metadata *, struct sr_meta_opt_head *);
697 void			*sr_block_get(struct sr_discipline *, long);
698 void			sr_block_put(struct sr_discipline *, void *, int);
699 void			sr_checksum(struct sr_softc *, void *, void *,
700 			    u_int32_t);
701 int			sr_validate_io(struct sr_workunit *, daddr_t *,
702 			    char *);
703 void			sr_schedule_wu(struct sr_workunit *);
704 void			sr_scsi_done(struct sr_discipline *,
705 			    struct scsi_xfer *);
706 struct sr_workunit	*sr_scsi_wu_get(struct sr_discipline *, int);
707 void			sr_scsi_wu_put(struct sr_discipline *,
708 			    struct sr_workunit *);
709 int			sr_chunk_in_use(struct sr_softc *, dev_t);
710 int			sr_rebuild_percent(struct sr_discipline *);
711 
712 /* discipline functions */
713 int			sr_raid_inquiry(struct sr_workunit *);
714 int			sr_raid_read_cap(struct sr_workunit *);
715 int			sr_raid_tur(struct sr_workunit *);
716 int			sr_raid_request_sense( struct sr_workunit *);
717 int			sr_raid_start_stop(struct sr_workunit *);
718 int			sr_raid_sync(struct sr_workunit *);
719 void			sr_raid_intr(struct buf *);
720 void			sr_raid_startwu(struct sr_workunit *);
721 void			sr_raid_recreate_wu(struct sr_workunit *);
722 
723 /* Discipline specific initialisation. */
724 void			sr_raid0_discipline_init(struct sr_discipline *);
725 void			sr_raid1_discipline_init(struct sr_discipline *);
726 void			sr_raid5_discipline_init(struct sr_discipline *);
727 void			sr_raid6_discipline_init(struct sr_discipline *);
728 void			sr_crypto_discipline_init(struct sr_discipline *);
729 void			sr_concat_discipline_init(struct sr_discipline *);
730 void			sr_raid1c_discipline_init(struct sr_discipline *);
731 
732 /* Crypto discipline hooks. */
733 int			sr_crypto_get_kdf(struct bioc_createraid *,
734 			    struct sr_discipline *, struct sr_crypto *);
735 int			sr_crypto_create_keys(struct sr_discipline *,
736 			    struct sr_crypto *);
737 struct sr_chunk *	sr_crypto_create_key_disk(struct sr_discipline *,
738 			    struct sr_crypto *, dev_t);
739 struct sr_chunk *	sr_crypto_read_key_disk(struct sr_discipline *,
740 			    struct sr_crypto *, dev_t);
741 
742 /* Hibernate I/O function */
743 int			sr_hibernate_io(dev_t dev, daddr_t blkno, vaddr_t addr,
744 			    size_t size, int op, void *page);
745 
746 #ifdef SR_DEBUG
747 void			sr_dump_block(void *, int);
748 void			sr_dump_mem(u_int8_t *, int);
749 #endif
750 
751 #endif /* _KERNEL */
752 
753 #endif /* SOFTRAIDVAR_H */
754