xref: /openbsd/sys/dev/pci/mfii.c (revision 097a140d)
1 /* $OpenBSD: mfii.c,v 1.83 2020/12/15 03:05:31 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 #include <sys/syslog.h>
32 #include <sys/smr.h>
33 
34 #include <dev/biovar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcivar.h>
37 
38 #include <machine/bus.h>
39 
40 #include <scsi/scsi_all.h>
41 #include <scsi/scsi_disk.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/ic/mfireg.h>
45 #include <dev/pci/mpiireg.h>
46 
47 #define	MFII_BAR		0x14
48 #define MFII_BAR_35		0x10
49 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
50 
51 #define MFII_OSTS_INTR_VALID	0x00000009
52 #define MFII_RPI		0x6c /* reply post host index */
53 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
54 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
55 
56 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
57 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
58 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
59 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
60 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
61 
62 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
63 
64 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
65 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
66 
67 #define MFII_MAX_CHAIN_UNIT	0x00400000
68 #define MFII_MAX_CHAIN_MASK	0x000003E0
69 #define MFII_MAX_CHAIN_SHIFT	5
70 
71 #define MFII_256K_IO		128
72 #define MFII_1MB_IO		(MFII_256K_IO * 4)
73 
74 #define MFII_CHAIN_FRAME_MIN	1024
75 
76 struct mfii_request_descr {
77 	u_int8_t	flags;
78 	u_int8_t	msix_index;
79 	u_int16_t	smid;
80 
81 	u_int16_t	lmid;
82 	u_int16_t	dev_handle;
83 } __packed;
84 
85 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
86 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
87 
88 struct mfii_raid_context {
89 	u_int8_t	type_nseg;
90 	u_int8_t	_reserved1;
91 	u_int16_t	timeout_value;
92 
93 	u_int16_t	reg_lock_flags;
94 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
95 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
96 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
97 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
98 
99 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
100 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
101 	u_int16_t	virtual_disk_target_id;
102 
103 	u_int64_t	reg_lock_row_lba;
104 
105 	u_int32_t	reg_lock_length;
106 
107 	u_int16_t	next_lm_id;
108 	u_int8_t	ex_status;
109 	u_int8_t	status;
110 
111 	u_int8_t	raid_flags;
112 	u_int8_t	num_sge;
113 	u_int16_t	config_seq_num;
114 
115 	u_int8_t	span_arm;
116 	u_int8_t	_reserved3[3];
117 } __packed;
118 
119 struct mfii_sge {
120 	u_int64_t	sg_addr;
121 	u_int32_t	sg_len;
122 	u_int16_t	_reserved;
123 	u_int8_t	sg_next_chain_offset;
124 	u_int8_t	sg_flags;
125 } __packed;
126 
127 #define MFII_SGE_ADDR_MASK		(0x03)
128 #define MFII_SGE_ADDR_SYSTEM		(0x00)
129 #define MFII_SGE_ADDR_IOCDDR		(0x01)
130 #define MFII_SGE_ADDR_IOCPLB		(0x02)
131 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
132 #define MFII_SGE_END_OF_LIST		(0x40)
133 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
134 
135 #define MFII_REQUEST_SIZE	256
136 
137 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
138 
139 #define MFII_MAX_ROW		32
140 #define MFII_MAX_ARRAY		128
141 
142 struct mfii_array_map {
143 	uint16_t		mam_pd[MFII_MAX_ROW];
144 } __packed;
145 
146 struct mfii_dev_handle {
147 	uint16_t		mdh_cur_handle;
148 	uint8_t			mdh_valid;
149 	uint8_t			mdh_reserved;
150 	uint16_t		mdh_handle[2];
151 } __packed;
152 
153 struct mfii_ld_map {
154 	uint32_t		mlm_total_size;
155 	uint32_t		mlm_reserved1[5];
156 	uint32_t		mlm_num_lds;
157 	uint32_t		mlm_reserved2;
158 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
159 	uint8_t			mlm_pd_timeout;
160 	uint8_t			mlm_reserved3[7];
161 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
162 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
163 } __packed;
164 
165 struct mfii_task_mgmt {
166 	union {
167 		uint8_t			request[128];
168 		struct mpii_msg_scsi_task_request
169 					mpii_request;
170 	} __packed __aligned(8);
171 
172 	union {
173 		uint8_t			reply[128];
174 		uint32_t		flags;
175 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
176 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
177 		struct mpii_msg_scsi_task_reply
178 					mpii_reply;
179 	} __packed __aligned(8);
180 } __packed __aligned(8);
181 
182 struct mfii_dmamem {
183 	bus_dmamap_t		mdm_map;
184 	bus_dma_segment_t	mdm_seg;
185 	size_t			mdm_size;
186 	caddr_t			mdm_kva;
187 };
188 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
189 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
190 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
191 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
192 
193 struct mfii_softc;
194 
195 struct mfii_ccb {
196 	void			*ccb_request;
197 	u_int64_t		ccb_request_dva;
198 	bus_addr_t		ccb_request_offset;
199 
200 	void			*ccb_mfi;
201 	u_int64_t		ccb_mfi_dva;
202 	bus_addr_t		ccb_mfi_offset;
203 
204 	struct mfi_sense	*ccb_sense;
205 	u_int64_t		ccb_sense_dva;
206 	bus_addr_t		ccb_sense_offset;
207 
208 	struct mfii_sge		*ccb_sgl;
209 	u_int64_t		ccb_sgl_dva;
210 	bus_addr_t		ccb_sgl_offset;
211 	u_int			ccb_sgl_len;
212 
213 	struct mfii_request_descr ccb_req;
214 
215 	bus_dmamap_t		ccb_dmamap;
216 
217 	/* data for sgl */
218 	void			*ccb_data;
219 	size_t			ccb_len;
220 
221 	int			ccb_direction;
222 #define MFII_DATA_NONE			0
223 #define MFII_DATA_IN			1
224 #define MFII_DATA_OUT			2
225 
226 	void			*ccb_cookie;
227 	void			(*ccb_done)(struct mfii_softc *,
228 				    struct mfii_ccb *);
229 
230 	u_int32_t		ccb_flags;
231 #define MFI_CCB_F_ERR			(1<<0)
232 	u_int			ccb_smid;
233 	u_int			ccb_refcnt;
234 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
235 };
236 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
237 
238 struct mfii_pd_dev_handles {
239 	struct smr_entry	pd_smr;
240 	uint16_t		pd_handles[MFI_MAX_PD];
241 };
242 
243 struct mfii_pd_softc {
244 	struct scsibus_softc	*pd_scsibus;
245 	struct mfii_pd_dev_handles *pd_dev_handles;
246 	uint8_t			pd_timeout;
247 };
248 
249 struct mfii_iop {
250 	int bar;
251 	int num_sge_loc;
252 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
253 #define MFII_IOP_NUM_SGE_LOC_35		1
254 	u_int16_t ldio_ctx_reg_lock_flags;
255 	u_int8_t ldio_req_type;
256 	u_int8_t ldio_ctx_type_nseg;
257 	u_int8_t sge_flag_chain;
258 	u_int8_t sge_flag_eol;
259 };
260 
261 struct mfii_softc {
262 	struct device		sc_dev;
263 	const struct mfii_iop	*sc_iop;
264 
265 	pci_chipset_tag_t	sc_pc;
266 	pcitag_t		sc_tag;
267 
268 	bus_space_tag_t		sc_iot;
269 	bus_space_handle_t	sc_ioh;
270 	bus_size_t		sc_ios;
271 	bus_dma_tag_t		sc_dmat;
272 
273 	void			*sc_ih;
274 
275 	struct mutex		sc_ccb_mtx;
276 	struct mutex		sc_post_mtx;
277 
278 	u_int			sc_max_fw_cmds;
279 	u_int			sc_max_cmds;
280 	u_int			sc_max_sgl;
281 
282 	u_int			sc_reply_postq_depth;
283 	u_int			sc_reply_postq_index;
284 	struct mutex		sc_reply_postq_mtx;
285 	struct mfii_dmamem	*sc_reply_postq;
286 
287 	struct mfii_dmamem	*sc_requests;
288 	struct mfii_dmamem	*sc_mfi;
289 	struct mfii_dmamem	*sc_sense;
290 	struct mfii_dmamem	*sc_sgl;
291 
292 	struct mfii_ccb		*sc_ccb;
293 	struct mfii_ccb_list	sc_ccb_freeq;
294 
295 	struct mfii_ccb		*sc_aen_ccb;
296 	struct task		sc_aen_task;
297 
298 	struct mutex		sc_abort_mtx;
299 	struct mfii_ccb_list	sc_abort_list;
300 	struct task		sc_abort_task;
301 
302 	struct scsibus_softc	*sc_scsibus;
303 	struct mfii_pd_softc	*sc_pd;
304 	struct scsi_iopool	sc_iopool;
305 
306 	/* save some useful information for logical drives that is missing
307 	 * in sc_ld_list
308 	 */
309 	struct {
310 		char		ld_dev[16];	/* device name sd? */
311 	}			sc_ld[MFI_MAX_LD];
312 	int			sc_target_lds[MFI_MAX_LD];
313 
314 	/* scsi ioctl from sd device */
315 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
316 
317 	/* bio */
318 	struct mfi_conf		*sc_cfg;
319 	struct mfi_ctrl_info	sc_info;
320 	struct mfi_ld_list	sc_ld_list;
321 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
322 	int			sc_no_pd; /* used physical disks */
323 	int			sc_ld_sz; /* sizeof sc_ld_details */
324 
325 	/* mgmt lock */
326 	struct rwlock		sc_lock;
327 
328 	/* sensors */
329 	struct ksensordev	sc_sensordev;
330 	struct ksensor		*sc_bbu;
331 	struct ksensor		*sc_bbu_status;
332 	struct ksensor		*sc_sensors;
333 };
334 
335 #ifdef MFII_DEBUG
336 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
337 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
338 #define	MFII_D_CMD		0x0001
339 #define	MFII_D_INTR		0x0002
340 #define	MFII_D_MISC		0x0004
341 #define	MFII_D_DMA		0x0008
342 #define	MFII_D_IOCTL		0x0010
343 #define	MFII_D_RW		0x0020
344 #define	MFII_D_MEM		0x0040
345 #define	MFII_D_CCB		0x0080
346 uint32_t	mfii_debug = 0
347 /*		    | MFII_D_CMD */
348 /*		    | MFII_D_INTR */
349 		    | MFII_D_MISC
350 /*		    | MFII_D_DMA */
351 /*		    | MFII_D_IOCTL */
352 /*		    | MFII_D_RW */
353 /*		    | MFII_D_MEM */
354 /*		    | MFII_D_CCB */
355 		;
356 #else
357 #define DPRINTF(x...)
358 #define DNPRINTF(n,x...)
359 #endif
360 
361 int		mfii_match(struct device *, void *, void *);
362 void		mfii_attach(struct device *, struct device *, void *);
363 int		mfii_detach(struct device *, int);
364 int		mfii_activate(struct device *, int);
365 
366 struct cfattach mfii_ca = {
367 	sizeof(struct mfii_softc),
368 	mfii_match,
369 	mfii_attach,
370 	mfii_detach,
371 	mfii_activate,
372 };
373 
374 struct cfdriver mfii_cd = {
375 	NULL,
376 	"mfii",
377 	DV_DULL
378 };
379 
380 void		mfii_scsi_cmd(struct scsi_xfer *);
381 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
382 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
383 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
384 
385 struct scsi_adapter mfii_switch = {
386 	mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl
387 };
388 
389 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
390 int		mfii_pd_scsi_probe(struct scsi_link *);
391 
392 struct scsi_adapter mfii_pd_switch = {
393 	mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL,
394 };
395 
396 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
397 
398 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
399 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
400 
401 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
402 void			mfii_dmamem_free(struct mfii_softc *,
403 			    struct mfii_dmamem *);
404 
405 void *			mfii_get_ccb(void *);
406 void			mfii_put_ccb(void *, void *);
407 int			mfii_init_ccb(struct mfii_softc *);
408 void			mfii_scrub_ccb(struct mfii_ccb *);
409 
410 int			mfii_transition_firmware(struct mfii_softc *);
411 int			mfii_initialise_firmware(struct mfii_softc *);
412 int			mfii_get_info(struct mfii_softc *);
413 int			mfii_syspd(struct mfii_softc *);
414 
415 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
416 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
417 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
418 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
419 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
420 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
421 int			mfii_my_intr(struct mfii_softc *);
422 int			mfii_intr(void *);
423 void			mfii_postq(struct mfii_softc *);
424 
425 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
426 			    void *, int);
427 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
428 			    void *, int);
429 
430 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
431 
432 int			mfii_mgmt(struct mfii_softc *, uint32_t,
433 			    const union mfi_mbox *, void *, size_t, int);
434 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
435 			    uint32_t, const union mfi_mbox *, void *, size_t,
436 			    int);
437 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
438 
439 int			mfii_scsi_cmd_io(struct mfii_softc *,
440 			    struct scsi_xfer *);
441 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
442 			    struct scsi_xfer *);
443 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
444 			    struct scsi_xfer *);
445 void			mfii_scsi_cmd_tmo(void *);
446 
447 int			mfii_dev_handles_update(struct mfii_softc *sc);
448 void			mfii_dev_handles_smr(void *pd_arg);
449 
450 void			mfii_abort_task(void *);
451 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
452 			    uint16_t, uint16_t, uint8_t, uint32_t);
453 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
454 			    struct mfii_ccb *);
455 
456 int			mfii_aen_register(struct mfii_softc *);
457 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
458 			    struct mfii_dmamem *, uint32_t);
459 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
460 void			mfii_aen(void *);
461 void			mfii_aen_unregister(struct mfii_softc *);
462 
463 void			mfii_aen_pd_insert(struct mfii_softc *,
464 			    const struct mfi_evtarg_pd_address *);
465 void			mfii_aen_pd_remove(struct mfii_softc *,
466 			    const struct mfi_evtarg_pd_address *);
467 void			mfii_aen_pd_state_change(struct mfii_softc *,
468 			    const struct mfi_evtarg_pd_state *);
469 void			mfii_aen_ld_update(struct mfii_softc *);
470 
471 #if NBIO > 0
472 int		mfii_ioctl(struct device *, u_long, caddr_t);
473 int		mfii_bio_getitall(struct mfii_softc *);
474 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
475 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
476 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
477 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
478 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
479 int		mfii_ioctl_setstate(struct mfii_softc *,
480 		    struct bioc_setstate *);
481 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
482 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
483 
484 #ifndef SMALL_KERNEL
485 static const char *mfi_bbu_indicators[] = {
486 	"pack missing",
487 	"voltage low",
488 	"temp high",
489 	"charge active",
490 	"discharge active",
491 	"learn cycle req'd",
492 	"learn cycle active",
493 	"learn cycle failed",
494 	"learn cycle timeout",
495 	"I2C errors",
496 	"replace pack",
497 	"low capacity",
498 	"periodic learn req'd"
499 };
500 
501 void		mfii_init_ld_sensor(struct mfii_softc *, int);
502 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
503 int		mfii_create_sensors(struct mfii_softc *);
504 void		mfii_refresh_sensors(void *);
505 void		mfii_bbu(struct mfii_softc *);
506 #endif /* SMALL_KERNEL */
507 #endif /* NBIO > 0 */
508 
509 /*
510  * mfii boards support asynchronous (and non-polled) completion of
511  * dcmds by proxying them through a passthru mpii command that points
512  * at a dcmd frame. since the passthru command is submitted like
513  * the scsi commands using an SMID in the request descriptor,
514  * ccb_request memory * must contain the passthru command because
515  * that is what the SMID refers to. this means ccb_request cannot
516  * contain the dcmd. rather than allocating separate dma memory to
517  * hold the dcmd, we reuse the sense memory buffer for it.
518  */
519 
520 void			mfii_dcmd_start(struct mfii_softc *,
521 			    struct mfii_ccb *);
522 
523 static inline void
524 mfii_dcmd_scrub(struct mfii_ccb *ccb)
525 {
526 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
527 }
528 
529 static inline struct mfi_dcmd_frame *
530 mfii_dcmd_frame(struct mfii_ccb *ccb)
531 {
532 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
533 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
534 }
535 
536 static inline void
537 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
538 {
539 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
540 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
541 }
542 
543 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
544 
545 const struct mfii_iop mfii_iop_thunderbolt = {
546 	MFII_BAR,
547 	MFII_IOP_NUM_SGE_LOC_ORIG,
548 	0,
549 	MFII_REQ_TYPE_LDIO,
550 	0,
551 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
552 	0
553 };
554 
555 /*
556  * a lot of these values depend on us not implementing fastpath yet.
557  */
558 const struct mfii_iop mfii_iop_25 = {
559 	MFII_BAR,
560 	MFII_IOP_NUM_SGE_LOC_ORIG,
561 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
562 	MFII_REQ_TYPE_NO_LOCK,
563 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
564 	MFII_SGE_CHAIN_ELEMENT,
565 	MFII_SGE_END_OF_LIST
566 };
567 
568 const struct mfii_iop mfii_iop_35 = {
569 	MFII_BAR_35,
570 	MFII_IOP_NUM_SGE_LOC_35,
571 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
572 	MFII_REQ_TYPE_NO_LOCK,
573 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
574 	MFII_SGE_CHAIN_ELEMENT,
575 	MFII_SGE_END_OF_LIST
576 };
577 
578 struct mfii_device {
579 	pcireg_t		mpd_vendor;
580 	pcireg_t		mpd_product;
581 	const struct mfii_iop	*mpd_iop;
582 };
583 
584 const struct mfii_device mfii_devices[] = {
585 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
586 	    &mfii_iop_thunderbolt },
587 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
588 	    &mfii_iop_25 },
589 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
590 	    &mfii_iop_25 },
591 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
592 	    &mfii_iop_35 },
593 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
594 	    &mfii_iop_35 },
595 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
596 	    &mfii_iop_35 },
597 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
598 	    &mfii_iop_35 },
599 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
600 	    &mfii_iop_35 },
601 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
602 	    &mfii_iop_35 }
603 };
604 
605 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
606 
607 const struct mfii_iop *
608 mfii_find_iop(struct pci_attach_args *pa)
609 {
610 	const struct mfii_device *mpd;
611 	int i;
612 
613 	for (i = 0; i < nitems(mfii_devices); i++) {
614 		mpd = &mfii_devices[i];
615 
616 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
617 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
618 			return (mpd->mpd_iop);
619 	}
620 
621 	return (NULL);
622 }
623 
624 int
625 mfii_match(struct device *parent, void *match, void *aux)
626 {
627 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
628 }
629 
630 void
631 mfii_attach(struct device *parent, struct device *self, void *aux)
632 {
633 	struct mfii_softc *sc = (struct mfii_softc *)self;
634 	struct pci_attach_args *pa = aux;
635 	pcireg_t memtype;
636 	pci_intr_handle_t ih;
637 	struct scsibus_attach_args saa;
638 	u_int32_t status, scpad2, scpad3;
639 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
640 
641 	/* init sc */
642 	sc->sc_iop = mfii_find_iop(aux);
643 	sc->sc_dmat = pa->pa_dmat;
644 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
645 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
646 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
647 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
648 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
649 
650 	rw_init(&sc->sc_lock, "mfii_lock");
651 
652 	sc->sc_aen_ccb = NULL;
653 	task_set(&sc->sc_aen_task, mfii_aen, sc);
654 
655 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
656 	SIMPLEQ_INIT(&sc->sc_abort_list);
657 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
658 
659 	/* wire up the bus shizz */
660 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
661 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
662 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
663 		printf(": unable to map registers\n");
664 		return;
665 	}
666 
667 	/* disable interrupts */
668 	mfii_write(sc, MFI_OMSK, 0xffffffff);
669 
670 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
671 		printf(": unable to map interrupt\n");
672 		goto pci_unmap;
673 	}
674 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
675 
676 	/* lets get started */
677 	if (mfii_transition_firmware(sc))
678 		goto pci_unmap;
679 
680 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
681 	scpad3 = mfii_read(sc, MFII_OSP3);
682 	status = mfii_fw_state(sc);
683 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
684 	if (sc->sc_max_fw_cmds == 0)
685 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
686 	/*
687 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
688 	 * exceed FW supplied max_fw_cmds.
689 	 */
690 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
691 
692 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
693 	scpad2 = mfii_read(sc, MFII_OSP2);
694 	chain_frame_sz =
695 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
696 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
697 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
698 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
699 
700 	nsge_in_io = (MFII_REQUEST_SIZE -
701 		sizeof(struct mpii_msg_scsi_io) -
702 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
703 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
704 
705 	/* round down to nearest power of two */
706 	sc->sc_max_sgl = 1;
707 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
708 		sc->sc_max_sgl <<= 1;
709 
710 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
711 	    DEVNAME(sc), status, scpad2, scpad3);
712 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
713 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
714 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
715 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
716 	    sc->sc_max_sgl);
717 
718 	/* sense memory */
719 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
720 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
721 	if (sc->sc_sense == NULL) {
722 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
723 		goto pci_unmap;
724 	}
725 
726 	/* reply post queue */
727 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
728 
729 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
730 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
731 	if (sc->sc_reply_postq == NULL)
732 		goto free_sense;
733 
734 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
735 	    MFII_DMA_LEN(sc->sc_reply_postq));
736 
737 	/* MPII request frame array */
738 	sc->sc_requests = mfii_dmamem_alloc(sc,
739 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
740 	if (sc->sc_requests == NULL)
741 		goto free_reply_postq;
742 
743 	/* MFI command frame array */
744 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
745 	if (sc->sc_mfi == NULL)
746 		goto free_requests;
747 
748 	/* MPII SGL array */
749 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
750 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
751 	if (sc->sc_sgl == NULL)
752 		goto free_mfi;
753 
754 	if (mfii_init_ccb(sc) != 0) {
755 		printf("%s: could not init ccb list\n", DEVNAME(sc));
756 		goto free_sgl;
757 	}
758 
759 	/* kickstart firmware with all addresses and pointers */
760 	if (mfii_initialise_firmware(sc) != 0) {
761 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
762 		goto free_sgl;
763 	}
764 
765 	if (mfii_get_info(sc) != 0) {
766 		printf("%s: could not retrieve controller information\n",
767 		    DEVNAME(sc));
768 		goto free_sgl;
769 	}
770 
771 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
772 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
773 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
774 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
775 	printf("\n");
776 
777 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
778 	    mfii_intr, sc, DEVNAME(sc));
779 	if (sc->sc_ih == NULL)
780 		goto free_sgl;
781 
782 	saa.saa_adapter_softc = sc;
783 	saa.saa_adapter = &mfii_switch;
784 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
785 	saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
786 	saa.saa_luns = 8;
787 	saa.saa_openings = sc->sc_max_cmds;
788 	saa.saa_pool = &sc->sc_iopool;
789 	saa.saa_quirks = saa.saa_flags = 0;
790 	saa.saa_wwpn = saa.saa_wwnn = 0;
791 
792 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,
793 	    scsiprint);
794 
795 	mfii_syspd(sc);
796 
797 	if (mfii_aen_register(sc) != 0) {
798 		/* error printed by mfii_aen_register */
799 		goto intr_disestablish;
800 	}
801 
802 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
803 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
804 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
805 		goto intr_disestablish;
806 	}
807 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
808 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
809 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
810 		sc->sc_target_lds[target] = i;
811 	}
812 
813 	/* enable interrupts */
814 	mfii_write(sc, MFI_OSTS, 0xffffffff);
815 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
816 
817 #if NBIO > 0
818 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
819 		panic("%s: controller registration failed", DEVNAME(sc));
820 	else
821 		sc->sc_ioctl = mfii_ioctl;
822 
823 #ifndef SMALL_KERNEL
824 	if (mfii_create_sensors(sc) != 0)
825 		printf("%s: unable to create sensors\n", DEVNAME(sc));
826 #endif
827 #endif /* NBIO > 0 */
828 
829 	return;
830 intr_disestablish:
831 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
832 free_sgl:
833 	mfii_dmamem_free(sc, sc->sc_sgl);
834 free_mfi:
835 	mfii_dmamem_free(sc, sc->sc_mfi);
836 free_requests:
837 	mfii_dmamem_free(sc, sc->sc_requests);
838 free_reply_postq:
839 	mfii_dmamem_free(sc, sc->sc_reply_postq);
840 free_sense:
841 	mfii_dmamem_free(sc, sc->sc_sense);
842 pci_unmap:
843 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
844 }
845 
846 static inline uint16_t
847 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
848 {
849 	struct mfii_pd_dev_handles *handles;
850 	uint16_t handle;
851 
852 	smr_read_enter();
853 	handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles);
854 	handle = handles->pd_handles[target];
855 	smr_read_leave();
856 
857 	return (handle);
858 }
859 
860 void
861 mfii_dev_handles_smr(void *pd_arg)
862 {
863 	struct mfii_pd_dev_handles *handles = pd_arg;
864 
865 	free(handles, M_DEVBUF, sizeof(*handles));
866 }
867 
868 int
869 mfii_dev_handles_update(struct mfii_softc *sc)
870 {
871 	struct mfii_ld_map *lm;
872 	struct mfii_pd_dev_handles *handles, *old_handles;
873 	int i;
874 	int rv = 0;
875 
876 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
877 
878 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
879 	    SCSI_DATA_IN|SCSI_NOSLEEP);
880 
881 	if (rv != 0) {
882 		rv = EIO;
883 		goto free_lm;
884 	}
885 
886 	handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK);
887 	smr_init(&handles->pd_smr);
888 	for (i = 0; i < MFI_MAX_PD; i++)
889 		handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
890 
891 	/* commit the updated info */
892 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
893 	old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles);
894 	SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles);
895 
896 	if (old_handles != NULL)
897 		smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles);
898 
899 free_lm:
900 	free(lm, M_TEMP, sizeof(*lm));
901 
902 	return (rv);
903 }
904 
905 int
906 mfii_syspd(struct mfii_softc *sc)
907 {
908 	struct scsibus_attach_args saa;
909 
910 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
911 	if (sc->sc_pd == NULL)
912 		return (1);
913 
914 	if (mfii_dev_handles_update(sc) != 0)
915 		goto free_pdsc;
916 
917 	saa.saa_adapter =  &mfii_pd_switch;
918 	saa.saa_adapter_softc = sc;
919 	saa.saa_adapter_buswidth = MFI_MAX_PD;
920 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
921 	saa.saa_luns = 8;
922 	saa.saa_openings = sc->sc_max_cmds - 1;
923 	saa.saa_pool = &sc->sc_iopool;
924 	saa.saa_quirks = saa.saa_flags = 0;
925 	saa.saa_wwpn = saa.saa_wwnn = 0;
926 
927 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
928 	    config_found(&sc->sc_dev, &saa, scsiprint);
929 
930 	return (0);
931 
932 free_pdsc:
933 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
934 	return (1);
935 }
936 
937 int
938 mfii_detach(struct device *self, int flags)
939 {
940 	struct mfii_softc *sc = (struct mfii_softc *)self;
941 
942 	if (sc->sc_ih == NULL)
943 		return (0);
944 
945 #ifndef SMALL_KERNEL
946 	if (sc->sc_sensors) {
947 		sensordev_deinstall(&sc->sc_sensordev);
948 		free(sc->sc_sensors, M_DEVBUF,
949 		    MFI_MAX_LD * sizeof(struct ksensor));
950 	}
951 
952 	if (sc->sc_bbu) {
953 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
954 	}
955 
956 	if (sc->sc_bbu_status) {
957 		free(sc->sc_bbu_status, M_DEVBUF,
958 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
959 	}
960 #endif /* SMALL_KERNEL */
961 
962 	mfii_aen_unregister(sc);
963 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
964 	mfii_dmamem_free(sc, sc->sc_sgl);
965 	mfii_dmamem_free(sc, sc->sc_mfi);
966 	mfii_dmamem_free(sc, sc->sc_requests);
967 	mfii_dmamem_free(sc, sc->sc_reply_postq);
968 	mfii_dmamem_free(sc, sc->sc_sense);
969 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
970 
971 	return (0);
972 }
973 
974 static void
975 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
976 {
977 #if 0
978 	union mfi_mbox mbox = {
979 		.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE,
980 	};
981 	int rv;
982 
983 	mfii_scrub_ccb(ccb);
984 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
985 	    NULL, 0, SCSI_NOSLEEP);
986 	if (rv != 0) {
987 		printf("%s: unable to flush cache\n", DEVNAME(sc));
988 		return;
989 	}
990 #endif
991 }
992 
993 static void
994 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
995 {
996 #if 0
997 	int rv;
998 
999 	mfii_scrub_ccb(ccb);
1000 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL,
1001 	    NULL, 0, SCSI_POLL);
1002 	if (rv != 0) {
1003 		printf("%s: unable to shutdown controller\n", DEVNAME(sc));
1004 		return;
1005 	}
1006 #endif
1007 }
1008 
1009 static void
1010 mfii_powerdown(struct mfii_softc *sc)
1011 {
1012 	struct mfii_ccb *ccb;
1013 
1014 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1015 	if (ccb == NULL) {
1016 		printf("%s: unable to allocate ccb for shutdown\n",
1017 		    DEVNAME(sc));
1018 		return;
1019 	}
1020 
1021 	mfii_flush_cache(sc, ccb);
1022 	mfii_shutdown(sc, ccb);
1023 	scsi_io_put(&sc->sc_iopool, ccb);
1024 }
1025 
1026 int
1027 mfii_activate(struct device *self, int act)
1028 {
1029 	struct mfii_softc *sc = (struct mfii_softc *)self;
1030 	int rv;
1031 
1032 	switch (act) {
1033 	case DVACT_POWERDOWN:
1034 		rv = config_activate_children(&sc->sc_dev, act);
1035 		mfii_powerdown(sc);
1036 		break;
1037 	default:
1038 		rv = config_activate_children(&sc->sc_dev, act);
1039 		break;
1040 	}
1041 
1042 	return (rv);
1043 }
1044 
1045 u_int32_t
1046 mfii_read(struct mfii_softc *sc, bus_size_t r)
1047 {
1048 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1049 	    BUS_SPACE_BARRIER_READ);
1050 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1051 }
1052 
1053 void
1054 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1055 {
1056 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1057 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1058 	    BUS_SPACE_BARRIER_WRITE);
1059 }
1060 
1061 struct mfii_dmamem *
1062 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1063 {
1064 	struct mfii_dmamem *m;
1065 	int nsegs;
1066 
1067 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1068 	if (m == NULL)
1069 		return (NULL);
1070 
1071 	m->mdm_size = size;
1072 
1073 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1074 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1075 		goto mdmfree;
1076 
1077 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1078 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1079 		goto destroy;
1080 
1081 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1082 	    BUS_DMA_NOWAIT) != 0)
1083 		goto free;
1084 
1085 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1086 	    BUS_DMA_NOWAIT) != 0)
1087 		goto unmap;
1088 
1089 	return (m);
1090 
1091 unmap:
1092 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1093 free:
1094 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1095 destroy:
1096 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1097 mdmfree:
1098 	free(m, M_DEVBUF, sizeof *m);
1099 
1100 	return (NULL);
1101 }
1102 
1103 void
1104 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1105 {
1106 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1107 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1108 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1109 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1110 	free(m, M_DEVBUF, sizeof *m);
1111 }
1112 
1113 void
1114 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1115 {
1116 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1117 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1118 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1119 
1120 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1121 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1122 	io->chain_offset = io->sgl_offset0 / 4;
1123 
1124 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1125 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1126 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1127 
1128 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1129 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1130 
1131 	mfii_start(sc, ccb);
1132 }
1133 
1134 int
1135 mfii_aen_register(struct mfii_softc *sc)
1136 {
1137 	struct mfi_evt_log_info mel;
1138 	struct mfii_ccb *ccb;
1139 	struct mfii_dmamem *mdm;
1140 	int rv;
1141 
1142 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1143 	if (ccb == NULL) {
1144 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1145 		return (ENOMEM);
1146 	}
1147 
1148 	memset(&mel, 0, sizeof(mel));
1149 	mfii_scrub_ccb(ccb);
1150 
1151 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1152 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1153 	if (rv != 0) {
1154 		scsi_io_put(&sc->sc_iopool, ccb);
1155 		printf("%s: unable to get event info\n", DEVNAME(sc));
1156 		return (EIO);
1157 	}
1158 
1159 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1160 	if (mdm == NULL) {
1161 		scsi_io_put(&sc->sc_iopool, ccb);
1162 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1163 		return (ENOMEM);
1164 	}
1165 
1166 	/* replay all the events from boot */
1167 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1168 
1169 	return (0);
1170 }
1171 
1172 void
1173 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1174     struct mfii_dmamem *mdm, uint32_t seq)
1175 {
1176 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1177 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1178 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1179 	union mfi_evt_class_locale mec;
1180 
1181 	mfii_scrub_ccb(ccb);
1182 	mfii_dcmd_scrub(ccb);
1183 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1184 
1185 	ccb->ccb_cookie = mdm;
1186 	ccb->ccb_done = mfii_aen_done;
1187 	sc->sc_aen_ccb = ccb;
1188 
1189 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1190 	mec.mec_members.reserved = 0;
1191 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1192 
1193 	hdr->mfh_cmd = MFI_CMD_DCMD;
1194 	hdr->mfh_sg_count = 1;
1195 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1196 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1197 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1198 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1199 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1200 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1201 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1202 
1203 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1204 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1205 
1206 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1207 	mfii_dcmd_start(sc, ccb);
1208 }
1209 
1210 void
1211 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1212 {
1213 	KASSERT(sc->sc_aen_ccb == ccb);
1214 
1215 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1216 	task_add(systq, &sc->sc_aen_task);
1217 }
1218 
1219 void
1220 mfii_aen(void *arg)
1221 {
1222 	struct mfii_softc *sc = arg;
1223 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1224 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1225 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1226 	uint32_t code;
1227 
1228 	mfii_dcmd_sync(sc, ccb,
1229 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1230 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1231 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1232 
1233 	code = lemtoh32(&med->med_code);
1234 
1235 #if 0
1236 	log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc),
1237 	    lemtoh32(&med->med_seq_num), code, med->med_description);
1238 #endif
1239 
1240 	switch (code) {
1241 	case MFI_EVT_PD_INSERTED_EXT:
1242 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1243 			break;
1244 
1245 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1246 		break;
1247  	case MFI_EVT_PD_REMOVED_EXT:
1248 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1249 			break;
1250 
1251 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1252 		break;
1253 
1254 	case MFI_EVT_PD_STATE_CHANGE:
1255 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1256 			break;
1257 
1258 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1259 		break;
1260 
1261 	case MFI_EVT_LD_CREATED:
1262 	case MFI_EVT_LD_DELETED:
1263 		mfii_aen_ld_update(sc);
1264 		break;
1265 
1266 	default:
1267 		break;
1268 	}
1269 
1270 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1271 }
1272 
1273 void
1274 mfii_aen_pd_insert(struct mfii_softc *sc,
1275     const struct mfi_evtarg_pd_address *pd)
1276 {
1277 #if 0
1278 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1279 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1280 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1281 	    pd->scsi_dev_type);
1282 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1283 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1284 	    lemtoh64(&pd->sas_addr[1]));
1285 #endif
1286 
1287 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1288 		return;
1289 
1290 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1291 }
1292 
1293 void
1294 mfii_aen_pd_remove(struct mfii_softc *sc,
1295     const struct mfi_evtarg_pd_address *pd)
1296 {
1297 #if 0
1298 	printf("%s: pd removed ext\n", DEVNAME(sc));
1299 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1300 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1301 	    pd->scsi_dev_type);
1302 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1303 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1304 	    lemtoh64(&pd->sas_addr[1]));
1305 #endif
1306 	uint16_t target = lemtoh16(&pd->device_id);
1307 
1308 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1309 
1310 	/* the firmware will abort outstanding commands for us */
1311 
1312 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1313 }
1314 
1315 void
1316 mfii_aen_pd_state_change(struct mfii_softc *sc,
1317     const struct mfi_evtarg_pd_state *state)
1318 {
1319 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1320 
1321 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1322 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1323 		/* it's been pulled or configured for raid */
1324 
1325 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1326 		    DVACT_DEACTIVATE);
1327 		/* outstanding commands will simply complete or get aborted */
1328 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1329 		    DETACH_FORCE);
1330 
1331 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1332 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1333 		/* the firmware is handing the disk over */
1334 
1335 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1336 	}
1337 }
1338 
1339 void
1340 mfii_aen_ld_update(struct mfii_softc *sc)
1341 {
1342 	int i, state, target, old, nld;
1343 	int newlds[MFI_MAX_LD];
1344 
1345 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1346 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1347 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1348 		    DEVNAME(sc));
1349 		return;
1350 	}
1351 
1352 	memset(newlds, -1, sizeof(newlds));
1353 
1354 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1355 		state = sc->sc_ld_list.mll_list[i].mll_state;
1356 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1357 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1358 		    DEVNAME(sc), target, state);
1359 		newlds[target] = i;
1360 	}
1361 
1362 	for (i = 0; i < MFI_MAX_LD; i++) {
1363 		old = sc->sc_target_lds[i];
1364 		nld = newlds[i];
1365 
1366 		if (old == -1 && nld != -1) {
1367 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1368 			    DEVNAME(sc), i);
1369 
1370 			scsi_probe_target(sc->sc_scsibus, i);
1371 
1372 #ifndef SMALL_KERNEL
1373 			mfii_init_ld_sensor(sc, nld);
1374 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1375 #endif
1376 		} else if (nld == -1 && old != -1) {
1377 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1378 			    DEVNAME(sc), i);
1379 
1380 			scsi_activate(sc->sc_scsibus, i, -1,
1381 			    DVACT_DEACTIVATE);
1382 			scsi_detach_target(sc->sc_scsibus, i,
1383 			    DETACH_FORCE);
1384 #ifndef SMALL_KERNEL
1385 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1386 #endif
1387 		}
1388 	}
1389 
1390 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1391 }
1392 
1393 void
1394 mfii_aen_unregister(struct mfii_softc *sc)
1395 {
1396 	/* XXX */
1397 }
1398 
1399 int
1400 mfii_transition_firmware(struct mfii_softc *sc)
1401 {
1402 	int32_t			fw_state, cur_state;
1403 	int			max_wait, i;
1404 
1405 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1406 
1407 	while (fw_state != MFI_STATE_READY) {
1408 		cur_state = fw_state;
1409 		switch (fw_state) {
1410 		case MFI_STATE_FAULT:
1411 			printf("%s: firmware fault\n", DEVNAME(sc));
1412 			return (1);
1413 		case MFI_STATE_WAIT_HANDSHAKE:
1414 			mfii_write(sc, MFI_SKINNY_IDB,
1415 			    MFI_INIT_CLEAR_HANDSHAKE);
1416 			max_wait = 2;
1417 			break;
1418 		case MFI_STATE_OPERATIONAL:
1419 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1420 			max_wait = 10;
1421 			break;
1422 		case MFI_STATE_UNDEFINED:
1423 		case MFI_STATE_BB_INIT:
1424 			max_wait = 2;
1425 			break;
1426 		case MFI_STATE_FW_INIT:
1427 		case MFI_STATE_DEVICE_SCAN:
1428 		case MFI_STATE_FLUSH_CACHE:
1429 			max_wait = 20;
1430 			break;
1431 		default:
1432 			printf("%s: unknown firmware state %d\n",
1433 			    DEVNAME(sc), fw_state);
1434 			return (1);
1435 		}
1436 		for (i = 0; i < (max_wait * 10); i++) {
1437 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1438 			if (fw_state == cur_state)
1439 				DELAY(100000);
1440 			else
1441 				break;
1442 		}
1443 		if (fw_state == cur_state) {
1444 			printf("%s: firmware stuck in state %#x\n",
1445 			    DEVNAME(sc), fw_state);
1446 			return (1);
1447 		}
1448 	}
1449 
1450 	return (0);
1451 }
1452 
1453 int
1454 mfii_get_info(struct mfii_softc *sc)
1455 {
1456 	int i, rv;
1457 
1458 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1459 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1460 
1461 	if (rv != 0)
1462 		return (rv);
1463 
1464 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1465 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1466 		    DEVNAME(sc),
1467 		    sc->sc_info.mci_image_component[i].mic_name,
1468 		    sc->sc_info.mci_image_component[i].mic_version,
1469 		    sc->sc_info.mci_image_component[i].mic_build_date,
1470 		    sc->sc_info.mci_image_component[i].mic_build_time);
1471 	}
1472 
1473 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1474 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1475 		    DEVNAME(sc),
1476 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1477 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1478 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1479 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1480 	}
1481 
1482 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1483 	    DEVNAME(sc),
1484 	    sc->sc_info.mci_max_arms,
1485 	    sc->sc_info.mci_max_spans,
1486 	    sc->sc_info.mci_max_arrays,
1487 	    sc->sc_info.mci_max_lds,
1488 	    sc->sc_info.mci_product_name);
1489 
1490 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1491 	    DEVNAME(sc),
1492 	    sc->sc_info.mci_serial_number,
1493 	    sc->sc_info.mci_hw_present,
1494 	    sc->sc_info.mci_current_fw_time,
1495 	    sc->sc_info.mci_max_cmds,
1496 	    sc->sc_info.mci_max_sg_elements);
1497 
1498 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1499 	    DEVNAME(sc),
1500 	    sc->sc_info.mci_max_request_size,
1501 	    sc->sc_info.mci_lds_present,
1502 	    sc->sc_info.mci_lds_degraded,
1503 	    sc->sc_info.mci_lds_offline,
1504 	    sc->sc_info.mci_pd_present);
1505 
1506 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1507 	    DEVNAME(sc),
1508 	    sc->sc_info.mci_pd_disks_present,
1509 	    sc->sc_info.mci_pd_disks_pred_failure,
1510 	    sc->sc_info.mci_pd_disks_failed);
1511 
1512 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1513 	    DEVNAME(sc),
1514 	    sc->sc_info.mci_nvram_size,
1515 	    sc->sc_info.mci_memory_size,
1516 	    sc->sc_info.mci_flash_size);
1517 
1518 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1519 	    DEVNAME(sc),
1520 	    sc->sc_info.mci_ram_correctable_errors,
1521 	    sc->sc_info.mci_ram_uncorrectable_errors,
1522 	    sc->sc_info.mci_cluster_allowed,
1523 	    sc->sc_info.mci_cluster_active);
1524 
1525 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1526 	    DEVNAME(sc),
1527 	    sc->sc_info.mci_max_strips_per_io,
1528 	    sc->sc_info.mci_raid_levels,
1529 	    sc->sc_info.mci_adapter_ops,
1530 	    sc->sc_info.mci_ld_ops);
1531 
1532 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1533 	    DEVNAME(sc),
1534 	    sc->sc_info.mci_stripe_sz_ops.min,
1535 	    sc->sc_info.mci_stripe_sz_ops.max,
1536 	    sc->sc_info.mci_pd_ops,
1537 	    sc->sc_info.mci_pd_mix_support);
1538 
1539 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1540 	    DEVNAME(sc),
1541 	    sc->sc_info.mci_ecc_bucket_count,
1542 	    sc->sc_info.mci_package_version);
1543 
1544 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1545 	    DEVNAME(sc),
1546 	    sc->sc_info.mci_properties.mcp_seq_num,
1547 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1548 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1549 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1550 
1551 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1552 	    DEVNAME(sc),
1553 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1554 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1555 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1556 	    sc->sc_info.mci_properties.mcp_cc_rate);
1557 
1558 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1559 	    DEVNAME(sc),
1560 	    sc->sc_info.mci_properties.mcp_recon_rate,
1561 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1562 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1563 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1564 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1565 
1566 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1567 	    DEVNAME(sc),
1568 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1569 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1570 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1571 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1572 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1573 
1574 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1575 	    DEVNAME(sc),
1576 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1577 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1578 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1579 
1580 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1581 	    DEVNAME(sc),
1582 	    sc->sc_info.mci_pci.mip_vendor,
1583 	    sc->sc_info.mci_pci.mip_device,
1584 	    sc->sc_info.mci_pci.mip_subvendor,
1585 	    sc->sc_info.mci_pci.mip_subdevice);
1586 
1587 	DPRINTF("%s: type %#x port_count %d port_addr ",
1588 	    DEVNAME(sc),
1589 	    sc->sc_info.mci_host.mih_type,
1590 	    sc->sc_info.mci_host.mih_port_count);
1591 
1592 	for (i = 0; i < 8; i++)
1593 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1594 	DPRINTF("\n");
1595 
1596 	DPRINTF("%s: type %.x port_count %d port_addr ",
1597 	    DEVNAME(sc),
1598 	    sc->sc_info.mci_device.mid_type,
1599 	    sc->sc_info.mci_device.mid_port_count);
1600 
1601 	for (i = 0; i < 8; i++)
1602 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1603 	DPRINTF("\n");
1604 
1605 	return (0);
1606 }
1607 
1608 int
1609 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1610 {
1611 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1612 	u_int64_t r;
1613 	int to = 0, rv = 0;
1614 
1615 #ifdef DIAGNOSTIC
1616 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1617 		panic("mfii_mfa_poll called with cookie or done set");
1618 #endif
1619 
1620 	hdr->mfh_context = ccb->ccb_smid;
1621 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1622 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1623 
1624 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1625 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1626 
1627 	mfii_start(sc, ccb);
1628 
1629 	for (;;) {
1630 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1631 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1632 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1633 
1634 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1635 			break;
1636 
1637 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1638 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1639 			    ccb->ccb_smid);
1640 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1641 			rv = 1;
1642 			break;
1643 		}
1644 
1645 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1646 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1647 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1648 
1649 		delay(1000);
1650 	}
1651 
1652 	if (ccb->ccb_len > 0) {
1653 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1654 		    0, ccb->ccb_dmamap->dm_mapsize,
1655 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1656 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1657 
1658 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1659 	}
1660 
1661 	return (rv);
1662 }
1663 
1664 int
1665 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1666 {
1667 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1668 	void *cookie;
1669 	int rv = 1;
1670 
1671 	done = ccb->ccb_done;
1672 	cookie = ccb->ccb_cookie;
1673 
1674 	ccb->ccb_done = mfii_poll_done;
1675 	ccb->ccb_cookie = &rv;
1676 
1677 	mfii_start(sc, ccb);
1678 
1679 	do {
1680 		delay(10);
1681 		mfii_postq(sc);
1682 	} while (rv == 1);
1683 
1684 	ccb->ccb_cookie = cookie;
1685 	done(sc, ccb);
1686 
1687 	return (0);
1688 }
1689 
1690 void
1691 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1692 {
1693 	int *rv = ccb->ccb_cookie;
1694 
1695 	*rv = 0;
1696 }
1697 
1698 int
1699 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1700 {
1701 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1702 
1703 #ifdef DIAGNOSTIC
1704 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1705 		panic("mfii_exec called with cookie or done set");
1706 #endif
1707 
1708 	ccb->ccb_cookie = &m;
1709 	ccb->ccb_done = mfii_exec_done;
1710 
1711 	mfii_start(sc, ccb);
1712 
1713 	mtx_enter(&m);
1714 	while (ccb->ccb_cookie != NULL)
1715 		msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP);
1716 	mtx_leave(&m);
1717 
1718 	return (0);
1719 }
1720 
1721 void
1722 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1723 {
1724 	struct mutex *m = ccb->ccb_cookie;
1725 
1726 	mtx_enter(m);
1727 	ccb->ccb_cookie = NULL;
1728 	wakeup_one(ccb);
1729 	mtx_leave(m);
1730 }
1731 
1732 int
1733 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1734     void *buf, size_t len, int flags)
1735 {
1736 	struct mfii_ccb *ccb;
1737 	int rv;
1738 
1739 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1740 	if (ccb == NULL)
1741 		return (ENOMEM);
1742 
1743 	mfii_scrub_ccb(ccb);
1744 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1745 	scsi_io_put(&sc->sc_iopool, ccb);
1746 
1747 	return (rv);
1748 }
1749 
1750 int
1751 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1752     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1753 {
1754 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1755 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1756 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1757 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1758 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1759 	u_int8_t *dma_buf = NULL;
1760 	int rv = EIO;
1761 
1762 	if (cold)
1763 		flags |= SCSI_NOSLEEP;
1764 
1765 	if (buf != NULL) {
1766 		dma_buf = dma_alloc(len, PR_WAITOK);
1767 		if (dma_buf == NULL)
1768 			return (ENOMEM);
1769 	}
1770 
1771 	ccb->ccb_data = dma_buf;
1772 	ccb->ccb_len = len;
1773 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1774 	case SCSI_DATA_IN:
1775 		ccb->ccb_direction = MFII_DATA_IN;
1776 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1777 		break;
1778 	case SCSI_DATA_OUT:
1779 		ccb->ccb_direction = MFII_DATA_OUT;
1780 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1781 		memcpy(dma_buf, buf, len);
1782 		break;
1783 	case 0:
1784 		ccb->ccb_direction = MFII_DATA_NONE;
1785 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1786 		break;
1787 	}
1788 
1789 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1790 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1791 		rv = ENOMEM;
1792 		goto done;
1793 	}
1794 
1795 	hdr->mfh_cmd = MFI_CMD_DCMD;
1796 	hdr->mfh_context = ccb->ccb_smid;
1797 	hdr->mfh_data_len = htole32(len);
1798 	hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0;
1799 
1800 	dcmd->mdf_opcode = opc;
1801 	/* handle special opcodes */
1802 	if (mbox != NULL)
1803 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1804 
1805 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1806 
1807 	if (len) {
1808 		io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1809 		io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1810 		htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1811 		htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1812 		sge->sg_flags =
1813 		    MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1814 	}
1815 
1816 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1817 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1818 
1819 	if (ISSET(flags, SCSI_NOSLEEP)) {
1820 		ccb->ccb_done = mfii_empty_done;
1821 		mfii_poll(sc, ccb);
1822 	} else
1823 		mfii_exec(sc, ccb);
1824 
1825 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1826 		rv = 0;
1827 
1828 		if (ccb->ccb_direction == MFII_DATA_IN)
1829 			memcpy(buf, dma_buf, len);
1830 	}
1831 
1832 done:
1833 	if (buf != NULL)
1834 		dma_free(dma_buf, len);
1835 
1836 	return (rv);
1837 }
1838 
1839 void
1840 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1841 {
1842 	return;
1843 }
1844 
1845 int
1846 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1847     void *sglp, int nosleep)
1848 {
1849 	union mfi_sgl *sgl = sglp;
1850 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1851 	int error;
1852 	int i;
1853 
1854 	if (ccb->ccb_len == 0)
1855 		return (0);
1856 
1857 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1858 	    ccb->ccb_data, ccb->ccb_len, NULL,
1859 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1860 	if (error) {
1861 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1862 		return (1);
1863 	}
1864 
1865 	for (i = 0; i < dmap->dm_nsegs; i++) {
1866 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1867 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1868 	}
1869 
1870 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1871 	    ccb->ccb_direction == MFII_DATA_OUT ?
1872 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1873 
1874 	return (0);
1875 }
1876 
1877 void
1878 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1879 {
1880 	u_long *r = (u_long *)&ccb->ccb_req;
1881 
1882 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1883 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1884 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1885 
1886 #if defined(__LP64__)
1887 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1888 #else
1889 	mtx_enter(&sc->sc_post_mtx);
1890 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1891 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1892 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1893 
1894 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1895 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1896 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1897 	mtx_leave(&sc->sc_post_mtx);
1898 #endif
1899 }
1900 
1901 void
1902 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1903 {
1904 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1905 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1906 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1907 
1908 	if (ccb->ccb_sgl_len > 0) {
1909 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1910 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1911 		    BUS_DMASYNC_POSTWRITE);
1912 	}
1913 
1914 	if (ccb->ccb_len > 0) {
1915 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1916 		    0, ccb->ccb_dmamap->dm_mapsize,
1917 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1918 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1919 
1920 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1921 	}
1922 
1923 	ccb->ccb_done(sc, ccb);
1924 }
1925 
1926 int
1927 mfii_initialise_firmware(struct mfii_softc *sc)
1928 {
1929 	struct mpii_msg_iocinit_request *iiq;
1930 	struct mfii_dmamem *m;
1931 	struct mfii_ccb *ccb;
1932 	struct mfi_init_frame *init;
1933 	int rv;
1934 
1935 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1936 	if (m == NULL)
1937 		return (1);
1938 
1939 	iiq = MFII_DMA_KVA(m);
1940 	memset(iiq, 0, sizeof(*iiq));
1941 
1942 	iiq->function = MPII_FUNCTION_IOC_INIT;
1943 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1944 
1945 	iiq->msg_version_maj = 0x02;
1946 	iiq->msg_version_min = 0x00;
1947 	iiq->hdr_version_unit = 0x10;
1948 	iiq->hdr_version_dev = 0x0;
1949 
1950 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1951 
1952 	iiq->reply_descriptor_post_queue_depth =
1953 	    htole16(sc->sc_reply_postq_depth);
1954 	iiq->reply_free_queue_depth = htole16(0);
1955 
1956 	htolem32(&iiq->sense_buffer_address_high,
1957 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1958 
1959 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1960 	    MFII_DMA_DVA(sc->sc_reply_postq));
1961 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1962 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1963 
1964 	htolem32(&iiq->system_request_frame_base_address_lo,
1965 	    MFII_DMA_DVA(sc->sc_requests));
1966 	htolem32(&iiq->system_request_frame_base_address_hi,
1967 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1968 
1969 	iiq->timestamp = htole64(getuptime());
1970 
1971 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1972 	if (ccb == NULL) {
1973 		/* shouldn't ever run out of ccbs during attach */
1974 		return (1);
1975 	}
1976 	mfii_scrub_ccb(ccb);
1977 	init = ccb->ccb_request;
1978 
1979 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1980 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1981 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1982 
1983 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1984 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1985 	    BUS_DMASYNC_PREREAD);
1986 
1987 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1988 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1989 
1990 	rv = mfii_mfa_poll(sc, ccb);
1991 
1992 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1993 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1994 
1995 	scsi_io_put(&sc->sc_iopool, ccb);
1996 	mfii_dmamem_free(sc, m);
1997 
1998 	return (rv);
1999 }
2000 
2001 int
2002 mfii_my_intr(struct mfii_softc *sc)
2003 {
2004 	u_int32_t status;
2005 
2006 	status = mfii_read(sc, MFI_OSTS);
2007 	if (ISSET(status, 0x1)) {
2008 		mfii_write(sc, MFI_OSTS, status);
2009 		return (1);
2010 	}
2011 
2012 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2013 }
2014 
2015 int
2016 mfii_intr(void *arg)
2017 {
2018 	struct mfii_softc *sc = arg;
2019 
2020 	if (!mfii_my_intr(sc))
2021 		return (0);
2022 
2023 	mfii_postq(sc);
2024 
2025 	return (1);
2026 }
2027 
2028 void
2029 mfii_postq(struct mfii_softc *sc)
2030 {
2031 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2032 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2033 	struct mpii_reply_descr *rdp;
2034 	struct mfii_ccb *ccb;
2035 	int rpi = 0;
2036 
2037 	mtx_enter(&sc->sc_reply_postq_mtx);
2038 
2039 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2040 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2041 	    BUS_DMASYNC_POSTREAD);
2042 
2043 	for (;;) {
2044 		rdp = &postq[sc->sc_reply_postq_index];
2045 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2046 		    MPII_REPLY_DESCR_UNUSED)
2047 			break;
2048 		if (rdp->data == 0xffffffff) {
2049 			/*
2050 			 * ioc is still writing to the reply post queue
2051 			 * race condition - bail!
2052 			 */
2053 			break;
2054 		}
2055 
2056 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
2057 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2058 		memset(rdp, 0xff, sizeof(*rdp));
2059 
2060 		sc->sc_reply_postq_index++;
2061 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2062 		rpi = 1;
2063 	}
2064 
2065 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2066 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2067 	    BUS_DMASYNC_PREREAD);
2068 
2069 	if (rpi)
2070 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2071 
2072 	mtx_leave(&sc->sc_reply_postq_mtx);
2073 
2074 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2075 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2076 		mfii_done(sc, ccb);
2077 	}
2078 }
2079 
2080 void
2081 mfii_scsi_cmd(struct scsi_xfer *xs)
2082 {
2083 	struct scsi_link *link = xs->sc_link;
2084 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2085 	struct mfii_ccb *ccb = xs->io;
2086 
2087 	mfii_scrub_ccb(ccb);
2088 	ccb->ccb_cookie = xs;
2089 	ccb->ccb_done = mfii_scsi_cmd_done;
2090 	ccb->ccb_data = xs->data;
2091 	ccb->ccb_len = xs->datalen;
2092 
2093 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2094 
2095 	switch (xs->cmd.opcode) {
2096 	case READ_COMMAND:
2097 	case READ_10:
2098 	case READ_12:
2099 	case READ_16:
2100 	case WRITE_COMMAND:
2101 	case WRITE_10:
2102 	case WRITE_12:
2103 	case WRITE_16:
2104 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2105 			goto stuffup;
2106 
2107 		break;
2108 
2109 	default:
2110 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2111 			goto stuffup;
2112 		break;
2113 	}
2114 
2115 	xs->error = XS_NOERROR;
2116 	xs->resid = 0;
2117 
2118 	if (ISSET(xs->flags, SCSI_POLL)) {
2119 		if (mfii_poll(sc, ccb) != 0)
2120 			goto stuffup;
2121 		return;
2122 	}
2123 
2124 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2125 	timeout_add_msec(&xs->stimeout, xs->timeout);
2126 	mfii_start(sc, ccb);
2127 
2128 	return;
2129 
2130 stuffup:
2131 	xs->error = XS_DRIVER_STUFFUP;
2132 	scsi_done(xs);
2133 }
2134 
2135 void
2136 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2137 {
2138 	struct scsi_xfer *xs = ccb->ccb_cookie;
2139 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2140 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2141 	u_int refs = 1;
2142 
2143 	if (timeout_del(&xs->stimeout))
2144 		refs = 2;
2145 
2146 	switch (ctx->status) {
2147 	case MFI_STAT_OK:
2148 		break;
2149 
2150 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2151 		xs->error = XS_SENSE;
2152 		memset(&xs->sense, 0, sizeof(xs->sense));
2153 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2154 		break;
2155 
2156 	case MFI_STAT_LD_OFFLINE:
2157 	case MFI_STAT_DEVICE_NOT_FOUND:
2158 		xs->error = XS_SELTIMEOUT;
2159 		break;
2160 
2161 	default:
2162 		xs->error = XS_DRIVER_STUFFUP;
2163 		break;
2164 	}
2165 
2166 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2167 		scsi_done(xs);
2168 }
2169 
2170 int
2171 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2172 {
2173 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2174 
2175 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2176 
2177 	switch (cmd) {
2178 	case DIOCGCACHE:
2179 	case DIOCSCACHE:
2180 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2181 		break;
2182 
2183 	default:
2184 		if (sc->sc_ioctl)
2185 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2186 		break;
2187 	}
2188 
2189 	return (ENOTTY);
2190 }
2191 
2192 int
2193 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2194 {
2195 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2196 	int			 rv, wrenable, rdenable;
2197 	struct mfi_ld_prop	 ldp;
2198 	union mfi_mbox		 mbox;
2199 
2200 	if (mfii_get_info(sc)) {
2201 		rv = EIO;
2202 		goto done;
2203 	}
2204 
2205 	if (sc->sc_target_lds[link->target] == -1) {
2206 		rv = EIO;
2207 		goto done;
2208 	}
2209 
2210 	memset(&mbox, 0, sizeof(mbox));
2211 	mbox.b[0] = link->target;
2212 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2213 	    SCSI_DATA_IN);
2214 	if (rv != 0)
2215 		goto done;
2216 
2217 	if (sc->sc_info.mci_memory_size > 0) {
2218 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2219 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2220 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2221 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2222 	} else {
2223 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2224 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2225 		rdenable = 0;
2226 	}
2227 
2228 	if (cmd == DIOCGCACHE) {
2229 		dc->wrcache = wrenable;
2230 		dc->rdcache = rdenable;
2231 		goto done;
2232 	} /* else DIOCSCACHE */
2233 
2234 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2235 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2236 		goto done;
2237 
2238 	memset(&mbox, 0, sizeof(mbox));
2239 	mbox.b[0] = ldp.mlp_ld.mld_target;
2240 	mbox.b[1] = ldp.mlp_ld.mld_res;
2241 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2242 
2243 	if (sc->sc_info.mci_memory_size > 0) {
2244 		if (dc->rdcache)
2245 			SET(ldp.mlp_cur_cache_policy,
2246 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2247 		else
2248 			CLR(ldp.mlp_cur_cache_policy,
2249 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2250 		if (dc->wrcache)
2251 			SET(ldp.mlp_cur_cache_policy,
2252 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2253 		else
2254 			CLR(ldp.mlp_cur_cache_policy,
2255 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2256 	} else {
2257 		if (dc->rdcache) {
2258 			rv = EOPNOTSUPP;
2259 			goto done;
2260 		}
2261 		if (dc->wrcache)
2262 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2263 		else
2264 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2265 	}
2266 
2267 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2268 	    SCSI_DATA_OUT);
2269 done:
2270 	return (rv);
2271 }
2272 
2273 int
2274 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2275 {
2276 	struct scsi_link *link = xs->sc_link;
2277 	struct mfii_ccb *ccb = xs->io;
2278 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2279 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2280 	int segs;
2281 
2282 	io->dev_handle = htole16(link->target);
2283 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2284 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2285 	io->sgl_flags = htole16(0x02); /* XXX */
2286 	io->sense_buffer_length = sizeof(xs->sense);
2287 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2288 	io->data_length = htole32(xs->datalen);
2289 	io->io_flags = htole16(xs->cmdlen);
2290 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2291 	case SCSI_DATA_IN:
2292 		ccb->ccb_direction = MFII_DATA_IN;
2293 		io->direction = MPII_SCSIIO_DIR_READ;
2294 		break;
2295 	case SCSI_DATA_OUT:
2296 		ccb->ccb_direction = MFII_DATA_OUT;
2297 		io->direction = MPII_SCSIIO_DIR_WRITE;
2298 		break;
2299 	default:
2300 		ccb->ccb_direction = MFII_DATA_NONE;
2301 		io->direction = MPII_SCSIIO_DIR_NONE;
2302 		break;
2303 	}
2304 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2305 
2306 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2307 	ctx->timeout_value = htole16(0x14); /* XXX */
2308 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2309 	ctx->virtual_disk_target_id = htole16(link->target);
2310 
2311 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2312 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2313 		return (1);
2314 
2315 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2316 	switch (sc->sc_iop->num_sge_loc) {
2317 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2318 		ctx->num_sge = segs;
2319 		break;
2320 	case MFII_IOP_NUM_SGE_LOC_35:
2321 		/* 12 bit field, but we're only using the lower 8 */
2322 		ctx->span_arm = segs;
2323 		break;
2324 	}
2325 
2326 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2327 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2328 
2329 	return (0);
2330 }
2331 
2332 int
2333 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2334 {
2335 	struct scsi_link *link = xs->sc_link;
2336 	struct mfii_ccb *ccb = xs->io;
2337 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2338 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2339 
2340 	io->dev_handle = htole16(link->target);
2341 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2342 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2343 	io->sgl_flags = htole16(0x02); /* XXX */
2344 	io->sense_buffer_length = sizeof(xs->sense);
2345 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2346 	io->data_length = htole32(xs->datalen);
2347 	io->io_flags = htole16(xs->cmdlen);
2348 	io->lun[0] = htobe16(link->lun);
2349 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2350 	case SCSI_DATA_IN:
2351 		ccb->ccb_direction = MFII_DATA_IN;
2352 		io->direction = MPII_SCSIIO_DIR_READ;
2353 		break;
2354 	case SCSI_DATA_OUT:
2355 		ccb->ccb_direction = MFII_DATA_OUT;
2356 		io->direction = MPII_SCSIIO_DIR_WRITE;
2357 		break;
2358 	default:
2359 		ccb->ccb_direction = MFII_DATA_NONE;
2360 		io->direction = MPII_SCSIIO_DIR_NONE;
2361 		break;
2362 	}
2363 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2364 
2365 	ctx->virtual_disk_target_id = htole16(link->target);
2366 
2367 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2368 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2369 		return (1);
2370 
2371 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2372 
2373 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2374 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2375 
2376 	return (0);
2377 }
2378 
2379 void
2380 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2381 {
2382 	struct scsi_link *link = xs->sc_link;
2383 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2384 	struct mfii_ccb *ccb = xs->io;
2385 
2386 	mfii_scrub_ccb(ccb);
2387 	ccb->ccb_cookie = xs;
2388 	ccb->ccb_done = mfii_scsi_cmd_done;
2389 	ccb->ccb_data = xs->data;
2390 	ccb->ccb_len = xs->datalen;
2391 
2392 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2393 
2394 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2395 	if (xs->error != XS_NOERROR)
2396 		goto done;
2397 
2398 	xs->resid = 0;
2399 
2400 	if (ISSET(xs->flags, SCSI_POLL)) {
2401 		if (mfii_poll(sc, ccb) != 0)
2402 			goto stuffup;
2403 		return;
2404 	}
2405 
2406 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2407 	timeout_add_msec(&xs->stimeout, xs->timeout);
2408 	mfii_start(sc, ccb);
2409 
2410 	return;
2411 
2412 stuffup:
2413 	xs->error = XS_DRIVER_STUFFUP;
2414 done:
2415 	scsi_done(xs);
2416 }
2417 
2418 int
2419 mfii_pd_scsi_probe(struct scsi_link *link)
2420 {
2421 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2422 	struct mfi_pd_details mpd;
2423 	union mfi_mbox mbox;
2424 	int rv;
2425 
2426 	if (link->lun > 0)
2427 		return (0);
2428 
2429 	memset(&mbox, 0, sizeof(mbox));
2430 	mbox.s[0] = htole16(link->target);
2431 
2432 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2433 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2434 	if (rv != 0)
2435 		return (EIO);
2436 
2437 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2438 		return (ENXIO);
2439 
2440 	return (0);
2441 }
2442 
2443 int
2444 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2445 {
2446 	struct scsi_link *link = xs->sc_link;
2447 	struct mfii_ccb *ccb = xs->io;
2448 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2449 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2450 	uint16_t dev_handle;
2451 
2452 	dev_handle = mfii_dev_handle(sc, link->target);
2453 	if (dev_handle == htole16(0xffff))
2454 		return (XS_SELTIMEOUT);
2455 
2456 	io->dev_handle = dev_handle;
2457 	io->function = 0;
2458 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2459 	io->sgl_flags = htole16(0x02); /* XXX */
2460 	io->sense_buffer_length = sizeof(xs->sense);
2461 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2462 	io->data_length = htole32(xs->datalen);
2463 	io->io_flags = htole16(xs->cmdlen);
2464 	io->lun[0] = htobe16(link->lun);
2465 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2466 	case SCSI_DATA_IN:
2467 		ccb->ccb_direction = MFII_DATA_IN;
2468 		io->direction = MPII_SCSIIO_DIR_READ;
2469 		break;
2470 	case SCSI_DATA_OUT:
2471 		ccb->ccb_direction = MFII_DATA_OUT;
2472 		io->direction = MPII_SCSIIO_DIR_WRITE;
2473 		break;
2474 	default:
2475 		ccb->ccb_direction = MFII_DATA_NONE;
2476 		io->direction = MPII_SCSIIO_DIR_NONE;
2477 		break;
2478 	}
2479 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2480 
2481 	ctx->virtual_disk_target_id = htole16(link->target);
2482 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2483 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2484 
2485 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2486 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2487 		return (XS_DRIVER_STUFFUP);
2488 
2489 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2490 
2491 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2492 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2493 	ccb->ccb_req.dev_handle = dev_handle;
2494 
2495 	return (XS_NOERROR);
2496 }
2497 
2498 int
2499 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2500     int nosleep)
2501 {
2502 	struct mpii_msg_request *req = ccb->ccb_request;
2503 	struct mfii_sge *sge = NULL, *nsge = sglp;
2504 	struct mfii_sge *ce = NULL;
2505 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2506 	u_int space;
2507 	int i;
2508 
2509 	int error;
2510 
2511 	if (ccb->ccb_len == 0)
2512 		return (0);
2513 
2514 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2515 	    ccb->ccb_data, ccb->ccb_len, NULL,
2516 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2517 	if (error) {
2518 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2519 		return (1);
2520 	}
2521 
2522 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2523 	    sizeof(*nsge);
2524 	if (dmap->dm_nsegs > space) {
2525 		space--;
2526 
2527 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2528 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2529 
2530 		ce = nsge + space;
2531 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2532 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2533 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2534 
2535 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2536 	}
2537 
2538 	for (i = 0; i < dmap->dm_nsegs; i++) {
2539 		if (nsge == ce)
2540 			nsge = ccb->ccb_sgl;
2541 
2542 		sge = nsge;
2543 
2544 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2545 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2546 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2547 
2548 		nsge = sge + 1;
2549 	}
2550 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2551 
2552 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2553 	    ccb->ccb_direction == MFII_DATA_OUT ?
2554 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2555 
2556 	if (ccb->ccb_sgl_len > 0) {
2557 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2558 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2559 		    BUS_DMASYNC_PREWRITE);
2560 	}
2561 
2562 	return (0);
2563 }
2564 
2565 void
2566 mfii_scsi_cmd_tmo(void *xsp)
2567 {
2568 	struct scsi_xfer *xs = xsp;
2569 	struct scsi_link *link = xs->sc_link;
2570 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2571 	struct mfii_ccb *ccb = xs->io;
2572 
2573 	mtx_enter(&sc->sc_abort_mtx);
2574 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2575 	mtx_leave(&sc->sc_abort_mtx);
2576 
2577 	task_add(systqmp, &sc->sc_abort_task);
2578 }
2579 
2580 void
2581 mfii_abort_task(void *scp)
2582 {
2583 	struct mfii_softc *sc = scp;
2584 	struct mfii_ccb *list;
2585 
2586 	mtx_enter(&sc->sc_abort_mtx);
2587 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2588 	SIMPLEQ_INIT(&sc->sc_abort_list);
2589 	mtx_leave(&sc->sc_abort_mtx);
2590 
2591 	while (list != NULL) {
2592 		struct mfii_ccb *ccb = list;
2593 		struct scsi_xfer *xs = ccb->ccb_cookie;
2594 		struct scsi_link *link = xs->sc_link;
2595 
2596 		uint16_t dev_handle;
2597 		struct mfii_ccb *accb;
2598 
2599 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2600 
2601 		dev_handle = mfii_dev_handle(sc, link->target);
2602 		if (dev_handle == htole16(0xffff)) {
2603 			/* device is gone */
2604 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2605 				scsi_done(xs);
2606 			continue;
2607 		}
2608 
2609 		accb = scsi_io_get(&sc->sc_iopool, 0);
2610 		mfii_scrub_ccb(accb);
2611 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2612 		    MPII_SCSI_TASK_ABORT_TASK,
2613 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2614 
2615 		accb->ccb_cookie = ccb;
2616 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2617 
2618 		mfii_start(sc, accb);
2619 	}
2620 }
2621 
2622 void
2623 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2624     uint16_t smid, uint8_t type, uint32_t flags)
2625 {
2626 	struct mfii_task_mgmt *msg;
2627 	struct mpii_msg_scsi_task_request *req;
2628 
2629 	msg = accb->ccb_request;
2630 	req = &msg->mpii_request;
2631 	req->dev_handle = dev_handle;
2632 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2633 	req->task_type = type;
2634 	htolem16(&req->task_mid, smid);
2635 	msg->flags = flags;
2636 
2637 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2638 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2639 }
2640 
2641 void
2642 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2643 {
2644 	struct mfii_ccb *ccb = accb->ccb_cookie;
2645 	struct scsi_xfer *xs = ccb->ccb_cookie;
2646 
2647 	/* XXX check accb completion? */
2648 
2649 	scsi_io_put(&sc->sc_iopool, accb);
2650 
2651 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2652 		scsi_done(xs);
2653 }
2654 
2655 void *
2656 mfii_get_ccb(void *cookie)
2657 {
2658 	struct mfii_softc *sc = cookie;
2659 	struct mfii_ccb *ccb;
2660 
2661 	mtx_enter(&sc->sc_ccb_mtx);
2662 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2663 	if (ccb != NULL)
2664 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2665 	mtx_leave(&sc->sc_ccb_mtx);
2666 
2667 	return (ccb);
2668 }
2669 
2670 void
2671 mfii_scrub_ccb(struct mfii_ccb *ccb)
2672 {
2673 	ccb->ccb_cookie = NULL;
2674 	ccb->ccb_done = NULL;
2675 	ccb->ccb_flags = 0;
2676 	ccb->ccb_data = NULL;
2677 	ccb->ccb_direction = 0;
2678 	ccb->ccb_len = 0;
2679 	ccb->ccb_sgl_len = 0;
2680 	ccb->ccb_refcnt = 1;
2681 
2682 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2683 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2684 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2685 }
2686 
2687 void
2688 mfii_put_ccb(void *cookie, void *io)
2689 {
2690 	struct mfii_softc *sc = cookie;
2691 	struct mfii_ccb *ccb = io;
2692 
2693 	mtx_enter(&sc->sc_ccb_mtx);
2694 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2695 	mtx_leave(&sc->sc_ccb_mtx);
2696 }
2697 
2698 int
2699 mfii_init_ccb(struct mfii_softc *sc)
2700 {
2701 	struct mfii_ccb *ccb;
2702 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2703 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2704 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2705 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2706 	u_int i;
2707 	int error;
2708 
2709 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2710 	    M_DEVBUF, M_WAITOK|M_ZERO);
2711 
2712 	for (i = 0; i < sc->sc_max_cmds; i++) {
2713 		ccb = &sc->sc_ccb[i];
2714 
2715 		/* create a dma map for transfer */
2716 		error = bus_dmamap_create(sc->sc_dmat,
2717 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2718 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2719 		if (error) {
2720 			printf("%s: cannot create ccb dmamap (%d)\n",
2721 			    DEVNAME(sc), error);
2722 			goto destroy;
2723 		}
2724 
2725 		/* select i + 1'th request. 0 is reserved for events */
2726 		ccb->ccb_smid = i + 1;
2727 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2728 		ccb->ccb_request = request + ccb->ccb_request_offset;
2729 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2730 		    ccb->ccb_request_offset;
2731 
2732 		/* select i'th MFI command frame */
2733 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2734 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2735 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2736 		    ccb->ccb_mfi_offset;
2737 
2738 		/* select i'th sense */
2739 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2740 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2741 		    ccb->ccb_sense_offset);
2742 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2743 		    ccb->ccb_sense_offset;
2744 
2745 		/* select i'th sgl */
2746 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2747 		    sc->sc_max_sgl * i;
2748 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2749 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2750 		    ccb->ccb_sgl_offset;
2751 
2752 		/* add ccb to queue */
2753 		mfii_put_ccb(sc, ccb);
2754 	}
2755 
2756 	return (0);
2757 
2758 destroy:
2759 	/* free dma maps and ccb memory */
2760 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2761 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2762 
2763 	free(sc->sc_ccb, M_DEVBUF, 0);
2764 
2765 	return (1);
2766 }
2767 
2768 #if NBIO > 0
2769 int
2770 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2771 {
2772 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2773 	int error = 0;
2774 
2775 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2776 
2777 	rw_enter_write(&sc->sc_lock);
2778 
2779 	switch (cmd) {
2780 	case BIOCINQ:
2781 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2782 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2783 		break;
2784 
2785 	case BIOCVOL:
2786 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2787 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2788 		break;
2789 
2790 	case BIOCDISK:
2791 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2792 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2793 		break;
2794 
2795 	case BIOCALARM:
2796 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2797 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2798 		break;
2799 
2800 	case BIOCBLINK:
2801 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2802 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2803 		break;
2804 
2805 	case BIOCSETSTATE:
2806 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2807 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2808 		break;
2809 
2810 	case BIOCPATROL:
2811 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2812 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2813 		break;
2814 
2815 	default:
2816 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2817 		error = ENOTTY;
2818 	}
2819 
2820 	rw_exit_write(&sc->sc_lock);
2821 
2822 	return (error);
2823 }
2824 
2825 int
2826 mfii_bio_getitall(struct mfii_softc *sc)
2827 {
2828 	int			i, d, rv = EINVAL;
2829 	size_t			size;
2830 	union mfi_mbox		mbox;
2831 	struct mfi_conf		*cfg = NULL;
2832 	struct mfi_ld_details	*ld_det = NULL;
2833 
2834 	/* get info */
2835 	if (mfii_get_info(sc)) {
2836 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2837 		    DEVNAME(sc));
2838 		goto done;
2839 	}
2840 
2841 	/* send single element command to retrieve size for full structure */
2842 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2843 	if (cfg == NULL)
2844 		goto done;
2845 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2846 	    SCSI_DATA_IN)) {
2847 		free(cfg, M_DEVBUF, sizeof *cfg);
2848 		goto done;
2849 	}
2850 
2851 	size = cfg->mfc_size;
2852 	free(cfg, M_DEVBUF, sizeof *cfg);
2853 
2854 	/* memory for read config */
2855 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2856 	if (cfg == NULL)
2857 		goto done;
2858 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2859 		free(cfg, M_DEVBUF, size);
2860 		goto done;
2861 	}
2862 
2863 	/* replace current pointer with new one */
2864 	if (sc->sc_cfg)
2865 		free(sc->sc_cfg, M_DEVBUF, 0);
2866 	sc->sc_cfg = cfg;
2867 
2868 	/* get all ld info */
2869 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2870 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2871 		goto done;
2872 
2873 	/* get memory for all ld structures */
2874 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2875 	if (sc->sc_ld_sz != size) {
2876 		if (sc->sc_ld_details)
2877 			free(sc->sc_ld_details, M_DEVBUF, 0);
2878 
2879 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2880 		if (ld_det == NULL)
2881 			goto done;
2882 		sc->sc_ld_sz = size;
2883 		sc->sc_ld_details = ld_det;
2884 	}
2885 
2886 	/* find used physical disks */
2887 	size = sizeof(struct mfi_ld_details);
2888 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2889 		memset(&mbox, 0, sizeof(mbox));
2890 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2891 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2892 		    SCSI_DATA_IN))
2893 			goto done;
2894 
2895 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2896 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2897 	}
2898 	sc->sc_no_pd = d;
2899 
2900 	rv = 0;
2901 done:
2902 	return (rv);
2903 }
2904 
2905 int
2906 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2907 {
2908 	int			rv = EINVAL;
2909 	struct mfi_conf		*cfg = NULL;
2910 
2911 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2912 
2913 	if (mfii_bio_getitall(sc)) {
2914 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2915 		    DEVNAME(sc));
2916 		goto done;
2917 	}
2918 
2919 	/* count unused disks as volumes */
2920 	if (sc->sc_cfg == NULL)
2921 		goto done;
2922 	cfg = sc->sc_cfg;
2923 
2924 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2925 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2926 #if notyet
2927 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2928 	    (bi->bi_nodisk - sc->sc_no_pd);
2929 #endif
2930 	/* tell bio who we are */
2931 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2932 
2933 	rv = 0;
2934 done:
2935 	return (rv);
2936 }
2937 
2938 int
2939 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2940 {
2941 	int			i, per, target, rv = EINVAL;
2942 	struct scsi_link	*link;
2943 	struct device		*dev;
2944 
2945 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2946 	    DEVNAME(sc), bv->bv_volid);
2947 
2948 	/* we really could skip and expect that inq took care of it */
2949 	if (mfii_bio_getitall(sc)) {
2950 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2951 		    DEVNAME(sc));
2952 		goto done;
2953 	}
2954 
2955 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2956 		/* go do hotspares & unused disks */
2957 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2958 		goto done;
2959 	}
2960 
2961 	i = bv->bv_volid;
2962 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2963 	link = scsi_get_link(sc->sc_scsibus, target, 0);
2964 	if (link == NULL) {
2965 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
2966 	} else {
2967 		dev = link->device_softc;
2968 		if (dev == NULL)
2969 			goto done;
2970 
2971 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
2972 	}
2973 
2974 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
2975 	case MFI_LD_OFFLINE:
2976 		bv->bv_status = BIOC_SVOFFLINE;
2977 		break;
2978 
2979 	case MFI_LD_PART_DEGRADED:
2980 	case MFI_LD_DEGRADED:
2981 		bv->bv_status = BIOC_SVDEGRADED;
2982 		break;
2983 
2984 	case MFI_LD_ONLINE:
2985 		bv->bv_status = BIOC_SVONLINE;
2986 		break;
2987 
2988 	default:
2989 		bv->bv_status = BIOC_SVINVALID;
2990 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2991 		    DEVNAME(sc),
2992 		    sc->sc_ld_list.mll_list[i].mll_state);
2993 	}
2994 
2995 	/* additional status can modify MFI status */
2996 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2997 	case MFI_LD_PROG_CC:
2998 		bv->bv_status = BIOC_SVSCRUB;
2999 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3000 		bv->bv_percent = (per * 100) / 0xffff;
3001 		bv->bv_seconds =
3002 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3003 		break;
3004 
3005 	case MFI_LD_PROG_BGI:
3006 		bv->bv_status = BIOC_SVSCRUB;
3007 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3008 		bv->bv_percent = (per * 100) / 0xffff;
3009 		bv->bv_seconds =
3010 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3011 		break;
3012 
3013 	case MFI_LD_PROG_FGI:
3014 	case MFI_LD_PROG_RECONSTRUCT:
3015 		/* nothing yet */
3016 		break;
3017 	}
3018 
3019 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3020 		bv->bv_cache = BIOC_CVWRITEBACK;
3021 	else
3022 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3023 
3024 	/*
3025 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3026 	 * a subset that is valid for the MFI controller.
3027 	 */
3028 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3029 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3030 		bv->bv_level *= 10;
3031 
3032 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3033 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3034 
3035 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3036 
3037 	rv = 0;
3038 done:
3039 	return (rv);
3040 }
3041 
3042 int
3043 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3044 {
3045 	struct mfi_conf		*cfg;
3046 	struct mfi_array	*ar;
3047 	struct mfi_ld_cfg	*ld;
3048 	struct mfi_pd_details	*pd;
3049 	struct mfi_pd_list	*pl;
3050 	struct mfi_pd_progress	*mfp;
3051 	struct mfi_progress	*mp;
3052 	struct scsi_inquiry_data *inqbuf;
3053 	char			vend[8+16+4+1], *vendp;
3054 	int			i, rv = EINVAL;
3055 	int			arr, vol, disk, span;
3056 	union mfi_mbox		mbox;
3057 
3058 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3059 	    DEVNAME(sc), bd->bd_diskid);
3060 
3061 	/* we really could skip and expect that inq took care of it */
3062 	if (mfii_bio_getitall(sc)) {
3063 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3064 		    DEVNAME(sc));
3065 		return (rv);
3066 	}
3067 	cfg = sc->sc_cfg;
3068 
3069 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3070 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3071 
3072 	ar = cfg->mfc_array;
3073 	vol = bd->bd_volid;
3074 	if (vol >= cfg->mfc_no_ld) {
3075 		/* do hotspares */
3076 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3077 		goto freeme;
3078 	}
3079 
3080 	/* calculate offset to ld structure */
3081 	ld = (struct mfi_ld_cfg *)(
3082 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3083 	    cfg->mfc_array_size * cfg->mfc_no_array);
3084 
3085 	/* use span 0 only when raid group is not spanned */
3086 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3087 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3088 	else
3089 		span = 0;
3090 	arr = ld[vol].mlc_span[span].mls_index;
3091 
3092 	/* offset disk into pd list */
3093 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3094 
3095 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3096 		/* disk is missing but succeed command */
3097 		bd->bd_status = BIOC_SDFAILED;
3098 		rv = 0;
3099 
3100 		/* try to find an unused disk for the target to rebuild */
3101 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3102 		    SCSI_DATA_IN))
3103 			goto freeme;
3104 
3105 		for (i = 0; i < pl->mpl_no_pd; i++) {
3106 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3107 				continue;
3108 
3109 			memset(&mbox, 0, sizeof(mbox));
3110 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3111 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3112 			    SCSI_DATA_IN))
3113 				continue;
3114 
3115 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3116 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3117 				break;
3118 		}
3119 
3120 		if (i == pl->mpl_no_pd)
3121 			goto freeme;
3122 	} else {
3123 		memset(&mbox, 0, sizeof(mbox));
3124 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3125 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3126 		    SCSI_DATA_IN)) {
3127 			bd->bd_status = BIOC_SDINVALID;
3128 			goto freeme;
3129 		}
3130 	}
3131 
3132 	/* get the remaining fields */
3133 	bd->bd_channel = pd->mpd_enc_idx;
3134 	bd->bd_target = pd->mpd_enc_slot;
3135 
3136 	/* get status */
3137 	switch (pd->mpd_fw_state){
3138 	case MFI_PD_UNCONFIG_GOOD:
3139 	case MFI_PD_UNCONFIG_BAD:
3140 		bd->bd_status = BIOC_SDUNUSED;
3141 		break;
3142 
3143 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3144 		bd->bd_status = BIOC_SDHOTSPARE;
3145 		break;
3146 
3147 	case MFI_PD_OFFLINE:
3148 		bd->bd_status = BIOC_SDOFFLINE;
3149 		break;
3150 
3151 	case MFI_PD_FAILED:
3152 		bd->bd_status = BIOC_SDFAILED;
3153 		break;
3154 
3155 	case MFI_PD_REBUILD:
3156 		bd->bd_status = BIOC_SDREBUILD;
3157 		break;
3158 
3159 	case MFI_PD_ONLINE:
3160 		bd->bd_status = BIOC_SDONLINE;
3161 		break;
3162 
3163 	case MFI_PD_COPYBACK:
3164 	case MFI_PD_SYSTEM:
3165 	default:
3166 		bd->bd_status = BIOC_SDINVALID;
3167 		break;
3168 	}
3169 
3170 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3171 
3172 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3173 	vendp = inqbuf->vendor;
3174 	memcpy(vend, vendp, sizeof vend - 1);
3175 	vend[sizeof vend - 1] = '\0';
3176 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3177 
3178 	/* XXX find a way to retrieve serial nr from drive */
3179 	/* XXX find a way to get bd_procdev */
3180 
3181 	mfp = &pd->mpd_progress;
3182 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3183 		mp = &mfp->mfp_patrol_read;
3184 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3185 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3186 	}
3187 
3188 	rv = 0;
3189 freeme:
3190 	free(pd, M_DEVBUF, sizeof *pd);
3191 	free(pl, M_DEVBUF, sizeof *pl);
3192 
3193 	return (rv);
3194 }
3195 
3196 int
3197 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3198 {
3199 	uint32_t		opc, flags = 0;
3200 	int			rv = 0;
3201 	int8_t			ret;
3202 
3203 	switch(ba->ba_opcode) {
3204 	case BIOC_SADISABLE:
3205 		opc = MR_DCMD_SPEAKER_DISABLE;
3206 		break;
3207 
3208 	case BIOC_SAENABLE:
3209 		opc = MR_DCMD_SPEAKER_ENABLE;
3210 		break;
3211 
3212 	case BIOC_SASILENCE:
3213 		opc = MR_DCMD_SPEAKER_SILENCE;
3214 		break;
3215 
3216 	case BIOC_GASTATUS:
3217 		opc = MR_DCMD_SPEAKER_GET;
3218 		flags = SCSI_DATA_IN;
3219 		break;
3220 
3221 	case BIOC_SATEST:
3222 		opc = MR_DCMD_SPEAKER_TEST;
3223 		break;
3224 
3225 	default:
3226 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3227 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3228 		return (EINVAL);
3229 	}
3230 
3231 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3232 		rv = EINVAL;
3233 	else
3234 		if (ba->ba_opcode == BIOC_GASTATUS)
3235 			ba->ba_status = ret;
3236 		else
3237 			ba->ba_status = 0;
3238 
3239 	return (rv);
3240 }
3241 
3242 int
3243 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3244 {
3245 	int			i, found, rv = EINVAL;
3246 	union mfi_mbox		mbox;
3247 	uint32_t		cmd;
3248 	struct mfi_pd_list	*pd;
3249 
3250 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3251 	    bb->bb_status);
3252 
3253 	/* channel 0 means not in an enclosure so can't be blinked */
3254 	if (bb->bb_channel == 0)
3255 		return (EINVAL);
3256 
3257 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3258 
3259 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3260 		goto done;
3261 
3262 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3263 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3264 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3265 			found = 1;
3266 			break;
3267 		}
3268 
3269 	if (!found)
3270 		goto done;
3271 
3272 	memset(&mbox, 0, sizeof(mbox));
3273 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3274 
3275 	switch (bb->bb_status) {
3276 	case BIOC_SBUNBLINK:
3277 		cmd = MR_DCMD_PD_UNBLINK;
3278 		break;
3279 
3280 	case BIOC_SBBLINK:
3281 		cmd = MR_DCMD_PD_BLINK;
3282 		break;
3283 
3284 	case BIOC_SBALARM:
3285 	default:
3286 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3287 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3288 		goto done;
3289 	}
3290 
3291 
3292 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0) == 0)
3293 		rv = 0;
3294 
3295 done:
3296 	free(pd, M_DEVBUF, sizeof *pd);
3297 	return (rv);
3298 }
3299 
3300 static int
3301 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3302 {
3303 	struct mfii_foreign_scan_info *fsi;
3304 	struct mfi_pd_details	*pd;
3305 	union mfi_mbox		mbox;
3306 	int			rv;
3307 
3308 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3309 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3310 
3311 	memset(&mbox, 0, sizeof mbox);
3312 	mbox.s[0] = pd_id;
3313 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3314 	if (rv != 0)
3315 		goto done;
3316 
3317 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3318 		mbox.s[0] = pd_id;
3319 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3320 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3321 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3322 		if (rv != 0)
3323 			goto done;
3324 	}
3325 
3326 	memset(&mbox, 0, sizeof mbox);
3327 	mbox.s[0] = pd_id;
3328 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3329 	if (rv != 0)
3330 		goto done;
3331 
3332 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3333 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3334 		    SCSI_DATA_IN);
3335 		if (rv != 0)
3336 			goto done;
3337 
3338 		if (fsi->count > 0) {
3339 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3340 			if (rv != 0)
3341 				goto done;
3342 		}
3343 	}
3344 
3345 	memset(&mbox, 0, sizeof mbox);
3346 	mbox.s[0] = pd_id;
3347 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3348 	if (rv != 0)
3349 		goto done;
3350 
3351 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3352 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3353 		rv = ENXIO;
3354 
3355 done:
3356 	free(fsi, M_DEVBUF, sizeof *fsi);
3357 	free(pd, M_DEVBUF, sizeof *pd);
3358 
3359 	return (rv);
3360 }
3361 
3362 static int
3363 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3364 {
3365 	struct mfi_hotspare	*hs;
3366 	struct mfi_pd_details	*pd;
3367 	union mfi_mbox		mbox;
3368 	size_t			size;
3369 	int			rv = EINVAL;
3370 
3371 	/* we really could skip and expect that inq took care of it */
3372 	if (mfii_bio_getitall(sc)) {
3373 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3374 		    DEVNAME(sc));
3375 		return (rv);
3376 	}
3377 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3378 
3379 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3380 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3381 
3382 	memset(&mbox, 0, sizeof mbox);
3383 	mbox.s[0] = pd_id;
3384 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3385 	    SCSI_DATA_IN);
3386 	if (rv != 0)
3387 		goto done;
3388 
3389 	memset(hs, 0, size);
3390 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3391 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3392 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3393 
3394 done:
3395 	free(hs, M_DEVBUF, size);
3396 	free(pd, M_DEVBUF, sizeof *pd);
3397 
3398 	return (rv);
3399 }
3400 
3401 int
3402 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3403 {
3404 	struct mfi_pd_details	*pd;
3405 	struct mfi_pd_list	*pl;
3406 	int			i, found, rv = EINVAL;
3407 	union mfi_mbox		mbox;
3408 
3409 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3410 	    bs->bs_status);
3411 
3412 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3413 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3414 
3415 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3416 		goto done;
3417 
3418 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3419 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3420 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3421 			found = 1;
3422 			break;
3423 		}
3424 
3425 	if (!found)
3426 		goto done;
3427 
3428 	memset(&mbox, 0, sizeof(mbox));
3429 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3430 
3431 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3432 		goto done;
3433 
3434 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3435 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3436 
3437 	switch (bs->bs_status) {
3438 	case BIOC_SSONLINE:
3439 		mbox.b[4] = MFI_PD_ONLINE;
3440 		break;
3441 
3442 	case BIOC_SSOFFLINE:
3443 		mbox.b[4] = MFI_PD_OFFLINE;
3444 		break;
3445 
3446 	case BIOC_SSHOTSPARE:
3447 		mbox.b[4] = MFI_PD_HOTSPARE;
3448 		break;
3449 
3450 	case BIOC_SSREBUILD:
3451 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3452 			if ((rv = mfii_makegood(sc,
3453 			    pl->mpl_address[i].mpa_pd_id)))
3454 				goto done;
3455 
3456 			if ((rv = mfii_makespare(sc,
3457 			    pl->mpl_address[i].mpa_pd_id)))
3458 				goto done;
3459 
3460 			memset(&mbox, 0, sizeof(mbox));
3461 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3462 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3463 			    SCSI_DATA_IN);
3464 			if (rv != 0)
3465 				goto done;
3466 
3467 			/* rebuilding might be started by mfii_makespare() */
3468 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3469 				rv = 0;
3470 				goto done;
3471 			}
3472 
3473 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3474 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3475 		}
3476 		mbox.b[4] = MFI_PD_REBUILD;
3477 		break;
3478 
3479 	default:
3480 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3481 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3482 		goto done;
3483 	}
3484 
3485 
3486 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3487 done:
3488 	free(pd, M_DEVBUF, sizeof *pd);
3489 	free(pl, M_DEVBUF, sizeof *pl);
3490 	return (rv);
3491 }
3492 
3493 int
3494 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3495 {
3496 	uint32_t		opc;
3497 	int			rv = 0;
3498 	struct mfi_pr_properties prop;
3499 	struct mfi_pr_status	status;
3500 	uint32_t		time, exec_freq;
3501 
3502 	switch (bp->bp_opcode) {
3503 	case BIOC_SPSTOP:
3504 	case BIOC_SPSTART:
3505 		if (bp->bp_opcode == BIOC_SPSTART)
3506 			opc = MR_DCMD_PR_START;
3507 		else
3508 			opc = MR_DCMD_PR_STOP;
3509 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3510 			return (EINVAL);
3511 		break;
3512 
3513 	case BIOC_SPMANUAL:
3514 	case BIOC_SPDISABLE:
3515 	case BIOC_SPAUTO:
3516 		/* Get device's time. */
3517 		opc = MR_DCMD_TIME_SECS_GET;
3518 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3519 			return (EINVAL);
3520 
3521 		opc = MR_DCMD_PR_GET_PROPERTIES;
3522 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3523 			return (EINVAL);
3524 
3525 		switch (bp->bp_opcode) {
3526 		case BIOC_SPMANUAL:
3527 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3528 			break;
3529 		case BIOC_SPDISABLE:
3530 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3531 			break;
3532 		case BIOC_SPAUTO:
3533 			if (bp->bp_autoival != 0) {
3534 				if (bp->bp_autoival == -1)
3535 					/* continuously */
3536 					exec_freq = 0xffffffffU;
3537 				else if (bp->bp_autoival > 0)
3538 					exec_freq = bp->bp_autoival;
3539 				else
3540 					return (EINVAL);
3541 				prop.exec_freq = exec_freq;
3542 			}
3543 			if (bp->bp_autonext != 0) {
3544 				if (bp->bp_autonext < 0)
3545 					return (EINVAL);
3546 				else
3547 					prop.next_exec = time + bp->bp_autonext;
3548 			}
3549 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3550 			break;
3551 		}
3552 
3553 		opc = MR_DCMD_PR_SET_PROPERTIES;
3554 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3555 			return (EINVAL);
3556 
3557 		break;
3558 
3559 	case BIOC_GPSTATUS:
3560 		opc = MR_DCMD_PR_GET_PROPERTIES;
3561 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3562 			return (EINVAL);
3563 
3564 		opc = MR_DCMD_PR_GET_STATUS;
3565 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3566 			return (EINVAL);
3567 
3568 		/* Get device's time. */
3569 		opc = MR_DCMD_TIME_SECS_GET;
3570 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3571 			return (EINVAL);
3572 
3573 		switch (prop.op_mode) {
3574 		case MFI_PR_OPMODE_AUTO:
3575 			bp->bp_mode = BIOC_SPMAUTO;
3576 			bp->bp_autoival = prop.exec_freq;
3577 			bp->bp_autonext = prop.next_exec;
3578 			bp->bp_autonow = time;
3579 			break;
3580 		case MFI_PR_OPMODE_MANUAL:
3581 			bp->bp_mode = BIOC_SPMMANUAL;
3582 			break;
3583 		case MFI_PR_OPMODE_DISABLED:
3584 			bp->bp_mode = BIOC_SPMDISABLED;
3585 			break;
3586 		default:
3587 			printf("%s: unknown patrol mode %d\n",
3588 			    DEVNAME(sc), prop.op_mode);
3589 			break;
3590 		}
3591 
3592 		switch (status.state) {
3593 		case MFI_PR_STATE_STOPPED:
3594 			bp->bp_status = BIOC_SPSSTOPPED;
3595 			break;
3596 		case MFI_PR_STATE_READY:
3597 			bp->bp_status = BIOC_SPSREADY;
3598 			break;
3599 		case MFI_PR_STATE_ACTIVE:
3600 			bp->bp_status = BIOC_SPSACTIVE;
3601 			break;
3602 		case MFI_PR_STATE_ABORTED:
3603 			bp->bp_status = BIOC_SPSABORTED;
3604 			break;
3605 		default:
3606 			printf("%s: unknown patrol state %d\n",
3607 			    DEVNAME(sc), status.state);
3608 			break;
3609 		}
3610 
3611 		break;
3612 
3613 	default:
3614 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3615 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3616 		return (EINVAL);
3617 	}
3618 
3619 	return (rv);
3620 }
3621 
3622 int
3623 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3624 {
3625 	struct mfi_conf		*cfg;
3626 	struct mfi_hotspare	*hs;
3627 	struct mfi_pd_details	*pd;
3628 	struct bioc_disk	*sdhs;
3629 	struct bioc_vol		*vdhs;
3630 	struct scsi_inquiry_data *inqbuf;
3631 	char			vend[8+16+4+1], *vendp;
3632 	int			i, rv = EINVAL;
3633 	uint32_t		size;
3634 	union mfi_mbox		mbox;
3635 
3636 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3637 
3638 	if (!bio_hs)
3639 		return (EINVAL);
3640 
3641 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3642 
3643 	/* send single element command to retrieve size for full structure */
3644 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3645 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3646 		goto freeme;
3647 
3648 	size = cfg->mfc_size;
3649 	free(cfg, M_DEVBUF, sizeof *cfg);
3650 
3651 	/* memory for read config */
3652 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3653 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3654 		goto freeme;
3655 
3656 	/* calculate offset to hs structure */
3657 	hs = (struct mfi_hotspare *)(
3658 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3659 	    cfg->mfc_array_size * cfg->mfc_no_array +
3660 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3661 
3662 	if (volid < cfg->mfc_no_ld)
3663 		goto freeme; /* not a hotspare */
3664 
3665 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3666 		goto freeme; /* not a hotspare */
3667 
3668 	/* offset into hotspare structure */
3669 	i = volid - cfg->mfc_no_ld;
3670 
3671 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3672 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3673 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3674 
3675 	/* get pd fields */
3676 	memset(&mbox, 0, sizeof(mbox));
3677 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3678 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3679 	    SCSI_DATA_IN)) {
3680 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3681 		    DEVNAME(sc));
3682 		goto freeme;
3683 	}
3684 
3685 	switch (type) {
3686 	case MFI_MGMT_VD:
3687 		vdhs = bio_hs;
3688 		vdhs->bv_status = BIOC_SVONLINE;
3689 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3690 		vdhs->bv_level = -1; /* hotspare */
3691 		vdhs->bv_nodisk = 1;
3692 		break;
3693 
3694 	case MFI_MGMT_SD:
3695 		sdhs = bio_hs;
3696 		sdhs->bd_status = BIOC_SDHOTSPARE;
3697 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3698 		sdhs->bd_channel = pd->mpd_enc_idx;
3699 		sdhs->bd_target = pd->mpd_enc_slot;
3700 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3701 		vendp = inqbuf->vendor;
3702 		memcpy(vend, vendp, sizeof vend - 1);
3703 		vend[sizeof vend - 1] = '\0';
3704 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3705 		break;
3706 
3707 	default:
3708 		goto freeme;
3709 	}
3710 
3711 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3712 	rv = 0;
3713 freeme:
3714 	free(pd, M_DEVBUF, sizeof *pd);
3715 	free(cfg, M_DEVBUF, 0);
3716 
3717 	return (rv);
3718 }
3719 
3720 #ifndef SMALL_KERNEL
3721 
3722 #define MFI_BBU_SENSORS 4
3723 
3724 void
3725 mfii_bbu(struct mfii_softc *sc)
3726 {
3727 	struct mfi_bbu_status bbu;
3728 	u_int32_t status;
3729 	u_int32_t mask;
3730 	u_int32_t soh_bad;
3731 	int i;
3732 
3733 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3734 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3735 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3736 			sc->sc_bbu[i].value = 0;
3737 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3738 		}
3739 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3740 			sc->sc_bbu_status[i].value = 0;
3741 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3742 		}
3743 		return;
3744 	}
3745 
3746 	switch (bbu.battery_type) {
3747 	case MFI_BBU_TYPE_IBBU:
3748 		mask = MFI_BBU_STATE_BAD_IBBU;
3749 		soh_bad = 0;
3750 		break;
3751 	case MFI_BBU_TYPE_BBU:
3752 		mask = MFI_BBU_STATE_BAD_BBU;
3753 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3754 		break;
3755 
3756 	case MFI_BBU_TYPE_NONE:
3757 	default:
3758 		sc->sc_bbu[0].value = 0;
3759 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3760 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3761 			sc->sc_bbu[i].value = 0;
3762 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3763 		}
3764 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3765 			sc->sc_bbu_status[i].value = 0;
3766 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3767 		}
3768 		return;
3769 	}
3770 
3771 	status = letoh32(bbu.fw_status);
3772 
3773 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3774 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3775 	    SENSOR_S_OK;
3776 
3777 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3778 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3779 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3780 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3781 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3782 
3783 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3784 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3785 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3786 	}
3787 }
3788 
3789 void
3790 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3791 {
3792 	struct ksensor *sensor;
3793 	int target;
3794 
3795 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3796 	sensor = &sc->sc_sensors[target];
3797 
3798 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3799 	case MFI_LD_OFFLINE:
3800 		sensor->value = SENSOR_DRIVE_FAIL;
3801 		sensor->status = SENSOR_S_CRIT;
3802 		break;
3803 
3804 	case MFI_LD_PART_DEGRADED:
3805 	case MFI_LD_DEGRADED:
3806 		sensor->value = SENSOR_DRIVE_PFAIL;
3807 		sensor->status = SENSOR_S_WARN;
3808 		break;
3809 
3810 	case MFI_LD_ONLINE:
3811 		sensor->value = SENSOR_DRIVE_ONLINE;
3812 		sensor->status = SENSOR_S_OK;
3813 		break;
3814 
3815 	default:
3816 		sensor->value = 0; /* unknown */
3817 		sensor->status = SENSOR_S_UNKNOWN;
3818 		break;
3819 	}
3820 }
3821 
3822 void
3823 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3824 {
3825 	struct device		*dev;
3826 	struct scsi_link	*link;
3827 	struct ksensor		*sensor;
3828 	int			target;
3829 
3830 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3831 	sensor = &sc->sc_sensors[target];
3832 
3833 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3834 	if (link == NULL) {
3835 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3836 	} else {
3837 		dev = link->device_softc;
3838 		if (dev != NULL)
3839 			strlcpy(sensor->desc, dev->dv_xname,
3840 			    sizeof(sensor->desc));
3841 	}
3842 	sensor->type = SENSOR_DRIVE;
3843 	mfii_refresh_ld_sensor(sc, ld);
3844 }
3845 
3846 int
3847 mfii_create_sensors(struct mfii_softc *sc)
3848 {
3849 	int			i, target;
3850 
3851 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3852 	    sizeof(sc->sc_sensordev.xname));
3853 
3854 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3855 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3856 		    M_DEVBUF, M_WAITOK | M_ZERO);
3857 
3858 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3859 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3860 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3861 		    sizeof(sc->sc_bbu[0].desc));
3862 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3863 
3864 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3865 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3866 		sc->sc_bbu[2].type = SENSOR_AMPS;
3867 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3868 		sc->sc_bbu[3].type = SENSOR_TEMP;
3869 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3870 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3871 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3872 			    sizeof(sc->sc_bbu[i].desc));
3873 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3874 		}
3875 
3876 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3877 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3878 
3879 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3880 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3881 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3882 			strlcpy(sc->sc_bbu_status[i].desc,
3883 			    mfi_bbu_indicators[i],
3884 			    sizeof(sc->sc_bbu_status[i].desc));
3885 
3886 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3887 		}
3888 	}
3889 
3890 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3891 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3892 	if (sc->sc_sensors == NULL)
3893 		return (1);
3894 
3895 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3896 		mfii_init_ld_sensor(sc, i);
3897 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3898 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3899 	}
3900 
3901 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3902 		goto bad;
3903 
3904 	sensordev_install(&sc->sc_sensordev);
3905 
3906 	return (0);
3907 
3908 bad:
3909 	free(sc->sc_sensors, M_DEVBUF,
3910 	    MFI_MAX_LD * sizeof(struct ksensor));
3911 
3912 	return (1);
3913 }
3914 
3915 void
3916 mfii_refresh_sensors(void *arg)
3917 {
3918 	struct mfii_softc	*sc = arg;
3919 	int			i;
3920 
3921 	rw_enter_write(&sc->sc_lock);
3922 	if (sc->sc_bbu != NULL)
3923 		mfii_bbu(sc);
3924 
3925 	mfii_bio_getitall(sc);
3926 	rw_exit_write(&sc->sc_lock);
3927 
3928 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3929 		mfii_refresh_ld_sensor(sc, i);
3930 }
3931 #endif /* SMALL_KERNEL */
3932 #endif /* NBIO > 0 */
3933