xref: /openbsd/sys/dev/pci/mfii.c (revision e5dd7070)
1 /* $OpenBSD: mfii.c,v 1.80 2020/07/22 13:16:04 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/malloc.h>
24 #include <sys/device.h>
25 #include <sys/dkio.h>
26 #include <sys/pool.h>
27 #include <sys/task.h>
28 #include <sys/atomic.h>
29 #include <sys/sensors.h>
30 #include <sys/rwlock.h>
31 #include <sys/syslog.h>
32 #include <sys/smr.h>
33 
34 #include <dev/biovar.h>
35 #include <dev/pci/pcidevs.h>
36 #include <dev/pci/pcivar.h>
37 
38 #include <machine/bus.h>
39 
40 #include <scsi/scsi_all.h>
41 #include <scsi/scsi_disk.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/ic/mfireg.h>
45 #include <dev/pci/mpiireg.h>
46 
47 #define	MFII_BAR		0x14
48 #define MFII_BAR_35		0x10
49 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
50 
51 #define MFII_OSTS_INTR_VALID	0x00000009
52 #define MFII_RPI		0x6c /* reply post host index */
53 #define MFII_OSP2		0xb4 /* outbound scratch pad 2 */
54 #define MFII_OSP3		0xb8 /* outbound scratch pad 3 */
55 
56 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
57 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
58 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
59 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
60 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
61 
62 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
63 
64 #define MFII_FUNCTION_PASSTHRU_IO			(0xf0)
65 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
66 
67 #define MFII_MAX_CHAIN_UNIT	0x00400000
68 #define MFII_MAX_CHAIN_MASK	0x000003E0
69 #define MFII_MAX_CHAIN_SHIFT	5
70 
71 #define MFII_256K_IO		128
72 #define MFII_1MB_IO		(MFII_256K_IO * 4)
73 
74 #define MFII_CHAIN_FRAME_MIN	1024
75 
76 struct mfii_request_descr {
77 	u_int8_t	flags;
78 	u_int8_t	msix_index;
79 	u_int16_t	smid;
80 
81 	u_int16_t	lmid;
82 	u_int16_t	dev_handle;
83 } __packed;
84 
85 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
86 #define MFII_RAID_CTX_TYPE_CUDA		(0x2 << 4)
87 
88 struct mfii_raid_context {
89 	u_int8_t	type_nseg;
90 	u_int8_t	_reserved1;
91 	u_int16_t	timeout_value;
92 
93 	u_int16_t	reg_lock_flags;
94 #define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN	(0x08)
95 #define MFII_RAID_CTX_RL_FLAGS_CPU0	(0x00)
96 #define MFII_RAID_CTX_RL_FLAGS_CPU1	(0x10)
97 #define MFII_RAID_CTX_RL_FLAGS_CUDA	(0x80)
98 
99 #define MFII_RAID_CTX_ROUTING_FLAGS_SQN	(1 << 4)
100 #define MFII_RAID_CTX_ROUTING_FLAGS_CPU0 0
101 	u_int16_t	virtual_disk_target_id;
102 
103 	u_int64_t	reg_lock_row_lba;
104 
105 	u_int32_t	reg_lock_length;
106 
107 	u_int16_t	next_lm_id;
108 	u_int8_t	ex_status;
109 	u_int8_t	status;
110 
111 	u_int8_t	raid_flags;
112 	u_int8_t	num_sge;
113 	u_int16_t	config_seq_num;
114 
115 	u_int8_t	span_arm;
116 	u_int8_t	_reserved3[3];
117 } __packed;
118 
119 struct mfii_sge {
120 	u_int64_t	sg_addr;
121 	u_int32_t	sg_len;
122 	u_int16_t	_reserved;
123 	u_int8_t	sg_next_chain_offset;
124 	u_int8_t	sg_flags;
125 } __packed;
126 
127 #define MFII_SGE_ADDR_MASK		(0x03)
128 #define MFII_SGE_ADDR_SYSTEM		(0x00)
129 #define MFII_SGE_ADDR_IOCDDR		(0x01)
130 #define MFII_SGE_ADDR_IOCPLB		(0x02)
131 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
132 #define MFII_SGE_END_OF_LIST		(0x40)
133 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
134 
135 #define MFII_REQUEST_SIZE	256
136 
137 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
138 
139 #define MFII_MAX_ROW		32
140 #define MFII_MAX_ARRAY		128
141 
142 struct mfii_array_map {
143 	uint16_t		mam_pd[MFII_MAX_ROW];
144 } __packed;
145 
146 struct mfii_dev_handle {
147 	uint16_t		mdh_cur_handle;
148 	uint8_t			mdh_valid;
149 	uint8_t			mdh_reserved;
150 	uint16_t		mdh_handle[2];
151 } __packed;
152 
153 struct mfii_ld_map {
154 	uint32_t		mlm_total_size;
155 	uint32_t		mlm_reserved1[5];
156 	uint32_t		mlm_num_lds;
157 	uint32_t		mlm_reserved2;
158 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
159 	uint8_t			mlm_pd_timeout;
160 	uint8_t			mlm_reserved3[7];
161 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
162 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
163 } __packed;
164 
165 struct mfii_task_mgmt {
166 	union {
167 		uint8_t			request[128];
168 		struct mpii_msg_scsi_task_request
169 					mpii_request;
170 	} __packed __aligned(8);
171 
172 	union {
173 		uint8_t			reply[128];
174 		uint32_t		flags;
175 #define MFII_TASK_MGMT_FLAGS_LD				(1 << 0)
176 #define MFII_TASK_MGMT_FLAGS_PD				(1 << 1)
177 		struct mpii_msg_scsi_task_reply
178 					mpii_reply;
179 	} __packed __aligned(8);
180 } __packed __aligned(8);
181 
182 struct mfii_dmamem {
183 	bus_dmamap_t		mdm_map;
184 	bus_dma_segment_t	mdm_seg;
185 	size_t			mdm_size;
186 	caddr_t			mdm_kva;
187 };
188 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
189 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
190 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
191 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
192 
193 struct mfii_softc;
194 
195 struct mfii_ccb {
196 	void			*ccb_request;
197 	u_int64_t		ccb_request_dva;
198 	bus_addr_t		ccb_request_offset;
199 
200 	void			*ccb_mfi;
201 	u_int64_t		ccb_mfi_dva;
202 	bus_addr_t		ccb_mfi_offset;
203 
204 	struct mfi_sense	*ccb_sense;
205 	u_int64_t		ccb_sense_dva;
206 	bus_addr_t		ccb_sense_offset;
207 
208 	struct mfii_sge		*ccb_sgl;
209 	u_int64_t		ccb_sgl_dva;
210 	bus_addr_t		ccb_sgl_offset;
211 	u_int			ccb_sgl_len;
212 
213 	struct mfii_request_descr ccb_req;
214 
215 	bus_dmamap_t		ccb_dmamap;
216 
217 	/* data for sgl */
218 	void			*ccb_data;
219 	size_t			ccb_len;
220 
221 	int			ccb_direction;
222 #define MFII_DATA_NONE			0
223 #define MFII_DATA_IN			1
224 #define MFII_DATA_OUT			2
225 
226 	void			*ccb_cookie;
227 	void			(*ccb_done)(struct mfii_softc *,
228 				    struct mfii_ccb *);
229 
230 	u_int32_t		ccb_flags;
231 #define MFI_CCB_F_ERR			(1<<0)
232 	u_int			ccb_smid;
233 	u_int			ccb_refcnt;
234 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
235 };
236 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
237 
238 struct mfii_pd_dev_handles {
239 	struct smr_entry	pd_smr;
240 	uint16_t		pd_handles[MFI_MAX_PD];
241 };
242 
243 struct mfii_pd_softc {
244 	struct scsibus_softc	*pd_scsibus;
245 	struct mfii_pd_dev_handles *pd_dev_handles;
246 	uint8_t			pd_timeout;
247 };
248 
249 struct mfii_iop {
250 	int bar;
251 	int num_sge_loc;
252 #define MFII_IOP_NUM_SGE_LOC_ORIG	0
253 #define MFII_IOP_NUM_SGE_LOC_35		1
254 	u_int16_t ldio_ctx_reg_lock_flags;
255 	u_int8_t ldio_req_type;
256 	u_int8_t ldio_ctx_type_nseg;
257 	u_int8_t sge_flag_chain;
258 	u_int8_t sge_flag_eol;
259 };
260 
261 struct mfii_softc {
262 	struct device		sc_dev;
263 	const struct mfii_iop	*sc_iop;
264 
265 	pci_chipset_tag_t	sc_pc;
266 	pcitag_t		sc_tag;
267 
268 	bus_space_tag_t		sc_iot;
269 	bus_space_handle_t	sc_ioh;
270 	bus_size_t		sc_ios;
271 	bus_dma_tag_t		sc_dmat;
272 
273 	void			*sc_ih;
274 
275 	struct mutex		sc_ccb_mtx;
276 	struct mutex		sc_post_mtx;
277 
278 	u_int			sc_max_fw_cmds;
279 	u_int			sc_max_cmds;
280 	u_int			sc_max_sgl;
281 
282 	u_int			sc_reply_postq_depth;
283 	u_int			sc_reply_postq_index;
284 	struct mutex		sc_reply_postq_mtx;
285 	struct mfii_dmamem	*sc_reply_postq;
286 
287 	struct mfii_dmamem	*sc_requests;
288 	struct mfii_dmamem	*sc_mfi;
289 	struct mfii_dmamem	*sc_sense;
290 	struct mfii_dmamem	*sc_sgl;
291 
292 	struct mfii_ccb		*sc_ccb;
293 	struct mfii_ccb_list	sc_ccb_freeq;
294 
295 	struct mfii_ccb		*sc_aen_ccb;
296 	struct task		sc_aen_task;
297 
298 	struct mutex		sc_abort_mtx;
299 	struct mfii_ccb_list	sc_abort_list;
300 	struct task		sc_abort_task;
301 
302 	struct scsibus_softc	*sc_scsibus;
303 	struct mfii_pd_softc	*sc_pd;
304 	struct scsi_iopool	sc_iopool;
305 
306 	/* save some useful information for logical drives that is missing
307 	 * in sc_ld_list
308 	 */
309 	struct {
310 		char		ld_dev[16];	/* device name sd? */
311 	}			sc_ld[MFI_MAX_LD];
312 	int			sc_target_lds[MFI_MAX_LD];
313 
314 	/* scsi ioctl from sd device */
315 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
316 
317 	/* bio */
318 	struct mfi_conf		*sc_cfg;
319 	struct mfi_ctrl_info	sc_info;
320 	struct mfi_ld_list	sc_ld_list;
321 	struct mfi_ld_details	*sc_ld_details; /* array to all logical disks */
322 	int			sc_no_pd; /* used physical disks */
323 	int			sc_ld_sz; /* sizeof sc_ld_details */
324 
325 	/* mgmt lock */
326 	struct rwlock		sc_lock;
327 
328 	/* sensors */
329 	struct ksensordev	sc_sensordev;
330 	struct ksensor		*sc_bbu;
331 	struct ksensor		*sc_bbu_status;
332 	struct ksensor		*sc_sensors;
333 };
334 
335 #ifdef MFII_DEBUG
336 #define DPRINTF(x...)		do { if (mfii_debug) printf(x); } while(0)
337 #define DNPRINTF(n,x...)	do { if (mfii_debug & n) printf(x); } while(0)
338 #define	MFII_D_CMD		0x0001
339 #define	MFII_D_INTR		0x0002
340 #define	MFII_D_MISC		0x0004
341 #define	MFII_D_DMA		0x0008
342 #define	MFII_D_IOCTL		0x0010
343 #define	MFII_D_RW		0x0020
344 #define	MFII_D_MEM		0x0040
345 #define	MFII_D_CCB		0x0080
346 uint32_t	mfii_debug = 0
347 /*		    | MFII_D_CMD */
348 /*		    | MFII_D_INTR */
349 		    | MFII_D_MISC
350 /*		    | MFII_D_DMA */
351 /*		    | MFII_D_IOCTL */
352 /*		    | MFII_D_RW */
353 /*		    | MFII_D_MEM */
354 /*		    | MFII_D_CCB */
355 		;
356 #else
357 #define DPRINTF(x...)
358 #define DNPRINTF(n,x...)
359 #endif
360 
361 int		mfii_match(struct device *, void *, void *);
362 void		mfii_attach(struct device *, struct device *, void *);
363 int		mfii_detach(struct device *, int);
364 int		mfii_activate(struct device *, int);
365 
366 struct cfattach mfii_ca = {
367 	sizeof(struct mfii_softc),
368 	mfii_match,
369 	mfii_attach,
370 	mfii_detach,
371 	mfii_activate,
372 };
373 
374 struct cfdriver mfii_cd = {
375 	NULL,
376 	"mfii",
377 	DV_DULL
378 };
379 
380 void		mfii_scsi_cmd(struct scsi_xfer *);
381 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
382 int		mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
383 int		mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
384 
385 struct scsi_adapter mfii_switch = {
386 	mfii_scsi_cmd, NULL, NULL, NULL, mfii_scsi_ioctl
387 };
388 
389 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
390 int		mfii_pd_scsi_probe(struct scsi_link *);
391 
392 struct scsi_adapter mfii_pd_switch = {
393 	mfii_pd_scsi_cmd, NULL, mfii_pd_scsi_probe, NULL, NULL,
394 };
395 
396 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
397 
398 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
399 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
400 
401 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
402 void			mfii_dmamem_free(struct mfii_softc *,
403 			    struct mfii_dmamem *);
404 
405 void *			mfii_get_ccb(void *);
406 void			mfii_put_ccb(void *, void *);
407 int			mfii_init_ccb(struct mfii_softc *);
408 void			mfii_scrub_ccb(struct mfii_ccb *);
409 
410 int			mfii_transition_firmware(struct mfii_softc *);
411 int			mfii_initialise_firmware(struct mfii_softc *);
412 int			mfii_get_info(struct mfii_softc *);
413 int			mfii_syspd(struct mfii_softc *);
414 
415 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
416 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
417 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
418 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
419 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
420 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
421 int			mfii_my_intr(struct mfii_softc *);
422 int			mfii_intr(void *);
423 void			mfii_postq(struct mfii_softc *);
424 
425 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
426 			    void *, int);
427 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
428 			    void *, int);
429 
430 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
431 
432 int			mfii_mgmt(struct mfii_softc *, uint32_t,
433 			    const union mfi_mbox *, void *, size_t, int);
434 int			mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
435 			    uint32_t, const union mfi_mbox *, void *, size_t,
436 			    int);
437 void			mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
438 
439 int			mfii_scsi_cmd_io(struct mfii_softc *,
440 			    struct scsi_xfer *);
441 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
442 			    struct scsi_xfer *);
443 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
444 			    struct scsi_xfer *);
445 void			mfii_scsi_cmd_tmo(void *);
446 
447 int			mfii_dev_handles_update(struct mfii_softc *sc);
448 void			mfii_dev_handles_smr(void *pd_arg);
449 
450 void			mfii_abort_task(void *);
451 void			mfii_abort(struct mfii_softc *, struct mfii_ccb *,
452 			    uint16_t, uint16_t, uint8_t, uint32_t);
453 void			mfii_scsi_cmd_abort_done(struct mfii_softc *,
454 			    struct mfii_ccb *);
455 
456 int			mfii_aen_register(struct mfii_softc *);
457 void			mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
458 			    struct mfii_dmamem *, uint32_t);
459 void			mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
460 void			mfii_aen(void *);
461 void			mfii_aen_unregister(struct mfii_softc *);
462 
463 void			mfii_aen_pd_insert(struct mfii_softc *,
464 			    const struct mfi_evtarg_pd_address *);
465 void			mfii_aen_pd_remove(struct mfii_softc *,
466 			    const struct mfi_evtarg_pd_address *);
467 void			mfii_aen_pd_state_change(struct mfii_softc *,
468 			    const struct mfi_evtarg_pd_state *);
469 void			mfii_aen_ld_update(struct mfii_softc *);
470 
471 #if NBIO > 0
472 int		mfii_ioctl(struct device *, u_long, caddr_t);
473 int		mfii_bio_getitall(struct mfii_softc *);
474 int		mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
475 int		mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
476 int		mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
477 int		mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
478 int		mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
479 int		mfii_ioctl_setstate(struct mfii_softc *,
480 		    struct bioc_setstate *);
481 int		mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
482 int		mfii_bio_hs(struct mfii_softc *, int, int, void *);
483 
484 #ifndef SMALL_KERNEL
485 static const char *mfi_bbu_indicators[] = {
486 	"pack missing",
487 	"voltage low",
488 	"temp high",
489 	"charge active",
490 	"discharge active",
491 	"learn cycle req'd",
492 	"learn cycle active",
493 	"learn cycle failed",
494 	"learn cycle timeout",
495 	"I2C errors",
496 	"replace pack",
497 	"low capacity",
498 	"periodic learn req'd"
499 };
500 
501 void		mfii_init_ld_sensor(struct mfii_softc *, int);
502 void		mfii_refresh_ld_sensor(struct mfii_softc *, int);
503 int		mfii_create_sensors(struct mfii_softc *);
504 void		mfii_refresh_sensors(void *);
505 void		mfii_bbu(struct mfii_softc *);
506 #endif /* SMALL_KERNEL */
507 #endif /* NBIO > 0 */
508 
509 /*
510  * mfii boards support asynchronous (and non-polled) completion of
511  * dcmds by proxying them through a passthru mpii command that points
512  * at a dcmd frame. since the passthru command is submitted like
513  * the scsi commands using an SMID in the request descriptor,
514  * ccb_request memory * must contain the passthru command because
515  * that is what the SMID refers to. this means ccb_request cannot
516  * contain the dcmd. rather than allocating separate dma memory to
517  * hold the dcmd, we reuse the sense memory buffer for it.
518  */
519 
520 void			mfii_dcmd_start(struct mfii_softc *,
521 			    struct mfii_ccb *);
522 
523 static inline void
524 mfii_dcmd_scrub(struct mfii_ccb *ccb)
525 {
526 	memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense));
527 }
528 
529 static inline struct mfi_dcmd_frame *
530 mfii_dcmd_frame(struct mfii_ccb *ccb)
531 {
532 	CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense));
533 	return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
534 }
535 
536 static inline void
537 mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
538 {
539 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),
540 	    ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags);
541 }
542 
543 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
544 
545 const struct mfii_iop mfii_iop_thunderbolt = {
546 	MFII_BAR,
547 	MFII_IOP_NUM_SGE_LOC_ORIG,
548 	0,
549 	MFII_REQ_TYPE_LDIO,
550 	0,
551 	MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA,
552 	0
553 };
554 
555 /*
556  * a lot of these values depend on us not implementing fastpath yet.
557  */
558 const struct mfii_iop mfii_iop_25 = {
559 	MFII_BAR,
560 	MFII_IOP_NUM_SGE_LOC_ORIG,
561 	MFII_RAID_CTX_RL_FLAGS_CPU0, /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
562 	MFII_REQ_TYPE_NO_LOCK,
563 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
564 	MFII_SGE_CHAIN_ELEMENT,
565 	MFII_SGE_END_OF_LIST
566 };
567 
568 const struct mfii_iop mfii_iop_35 = {
569 	MFII_BAR_35,
570 	MFII_IOP_NUM_SGE_LOC_35,
571 	MFII_RAID_CTX_ROUTING_FLAGS_CPU0, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
572 	MFII_REQ_TYPE_NO_LOCK,
573 	MFII_RAID_CTX_TYPE_CUDA | 0x1,
574 	MFII_SGE_CHAIN_ELEMENT,
575 	MFII_SGE_END_OF_LIST
576 };
577 
578 struct mfii_device {
579 	pcireg_t		mpd_vendor;
580 	pcireg_t		mpd_product;
581 	const struct mfii_iop	*mpd_iop;
582 };
583 
584 const struct mfii_device mfii_devices[] = {
585 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208,
586 	    &mfii_iop_thunderbolt },
587 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008,
588 	    &mfii_iop_25 },
589 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108,
590 	    &mfii_iop_25 },
591 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3404,
592 	    &mfii_iop_35 },
593 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3504,
594 	    &mfii_iop_35 },
595 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3408,
596 	    &mfii_iop_35 },
597 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3508,
598 	    &mfii_iop_35 },
599 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3416,
600 	    &mfii_iop_35 },
601 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3516,
602 	    &mfii_iop_35 }
603 };
604 
605 const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
606 
607 const struct mfii_iop *
608 mfii_find_iop(struct pci_attach_args *pa)
609 {
610 	const struct mfii_device *mpd;
611 	int i;
612 
613 	for (i = 0; i < nitems(mfii_devices); i++) {
614 		mpd = &mfii_devices[i];
615 
616 		if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id) &&
617 		    mpd->mpd_product == PCI_PRODUCT(pa->pa_id))
618 			return (mpd->mpd_iop);
619 	}
620 
621 	return (NULL);
622 }
623 
624 int
625 mfii_match(struct device *parent, void *match, void *aux)
626 {
627 	return ((mfii_find_iop(aux) != NULL) ? 1 : 0);
628 }
629 
630 void
631 mfii_attach(struct device *parent, struct device *self, void *aux)
632 {
633 	struct mfii_softc *sc = (struct mfii_softc *)self;
634 	struct pci_attach_args *pa = aux;
635 	pcireg_t memtype;
636 	pci_intr_handle_t ih;
637 	struct scsibus_attach_args saa;
638 	u_int32_t status, scpad2, scpad3;
639 	int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
640 
641 	/* init sc */
642 	sc->sc_iop = mfii_find_iop(aux);
643 	sc->sc_dmat = pa->pa_dmat;
644 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
645 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
646 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
647 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
648 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
649 
650 	rw_init(&sc->sc_lock, "mfii_lock");
651 
652 	sc->sc_aen_ccb = NULL;
653 	task_set(&sc->sc_aen_task, mfii_aen, sc);
654 
655 	mtx_init(&sc->sc_abort_mtx, IPL_BIO);
656 	SIMPLEQ_INIT(&sc->sc_abort_list);
657 	task_set(&sc->sc_abort_task, mfii_abort_task, sc);
658 
659 	/* wire up the bus shizz */
660 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
661 	if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
662 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
663 		printf(": unable to map registers\n");
664 		return;
665 	}
666 
667 	/* disable interrupts */
668 	mfii_write(sc, MFI_OMSK, 0xffffffff);
669 
670 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
671 		printf(": unable to map interrupt\n");
672 		goto pci_unmap;
673 	}
674 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
675 
676 	/* lets get started */
677 	if (mfii_transition_firmware(sc))
678 		goto pci_unmap;
679 
680 	/* determine max_cmds (refer to the Linux megaraid_sas driver) */
681 	scpad3 = mfii_read(sc, MFII_OSP3);
682 	status = mfii_fw_state(sc);
683 	sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK;
684 	if (sc->sc_max_fw_cmds == 0)
685 		sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK;
686 	/*
687 	 * reduce max_cmds by 1 to ensure that the reply queue depth does not
688 	 * exceed FW supplied max_fw_cmds.
689 	 */
690 	sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
691 
692 	/* determine max_sgl (refer to the Linux megaraid_sas driver) */
693 	scpad2 = mfii_read(sc, MFII_OSP2);
694 	chain_frame_sz =
695 		((scpad2 & MFII_MAX_CHAIN_MASK) >> MFII_MAX_CHAIN_SHIFT) *
696 		((scpad2 & MFII_MAX_CHAIN_UNIT) ? MFII_1MB_IO : MFII_256K_IO);
697 	if (chain_frame_sz < MFII_CHAIN_FRAME_MIN)
698 		chain_frame_sz = MFII_CHAIN_FRAME_MIN;
699 
700 	nsge_in_io = (MFII_REQUEST_SIZE -
701 		sizeof(struct mpii_msg_scsi_io) -
702 		sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
703 	nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
704 
705 	/* round down to nearest power of two */
706 	sc->sc_max_sgl = 1;
707 	while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
708 		sc->sc_max_sgl <<= 1;
709 
710 	DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
711 	    DEVNAME(sc), status, scpad2, scpad3);
712 	DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
713 	    DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
714 	DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
715 	    "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
716 	    sc->sc_max_sgl);
717 
718 	/* sense memory */
719 	CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE);
720 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
721 	if (sc->sc_sense == NULL) {
722 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
723 		goto pci_unmap;
724 	}
725 
726 	/* reply post queue */
727 	sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16);
728 
729 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
730 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
731 	if (sc->sc_reply_postq == NULL)
732 		goto free_sense;
733 
734 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
735 	    MFII_DMA_LEN(sc->sc_reply_postq));
736 
737 	/* MPII request frame array */
738 	sc->sc_requests = mfii_dmamem_alloc(sc,
739 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
740 	if (sc->sc_requests == NULL)
741 		goto free_reply_postq;
742 
743 	/* MFI command frame array */
744 	sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE);
745 	if (sc->sc_mfi == NULL)
746 		goto free_requests;
747 
748 	/* MPII SGL array */
749 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
750 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
751 	if (sc->sc_sgl == NULL)
752 		goto free_mfi;
753 
754 	if (mfii_init_ccb(sc) != 0) {
755 		printf("%s: could not init ccb list\n", DEVNAME(sc));
756 		goto free_sgl;
757 	}
758 
759 	/* kickstart firmware with all addresses and pointers */
760 	if (mfii_initialise_firmware(sc) != 0) {
761 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
762 		goto free_sgl;
763 	}
764 
765 	if (mfii_get_info(sc) != 0) {
766 		printf("%s: could not retrieve controller information\n",
767 		    DEVNAME(sc));
768 		goto free_sgl;
769 	}
770 
771 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
772 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
773 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
774 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
775 	printf("\n");
776 
777 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
778 	    mfii_intr, sc, DEVNAME(sc));
779 	if (sc->sc_ih == NULL)
780 		goto free_sgl;
781 
782 	saa.saa_adapter_softc = sc;
783 	saa.saa_adapter = &mfii_switch;
784 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
785 	saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
786 	saa.saa_luns = 8;
787 	saa.saa_openings = sc->sc_max_cmds;
788 	saa.saa_pool = &sc->sc_iopool;
789 	saa.saa_quirks = saa.saa_flags = 0;
790 	saa.saa_wwpn = saa.saa_wwnn = 0;
791 
792 	sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,
793 	    scsiprint);
794 
795 	mfii_syspd(sc);
796 
797 	if (mfii_aen_register(sc) != 0) {
798 		/* error printed by mfii_aen_register */
799 		goto intr_disestablish;
800 	}
801 
802 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
803 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
804 		printf("%s: getting list of logical disks failed\n", DEVNAME(sc));
805 		goto intr_disestablish;
806 	}
807 	memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds));
808 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
809 		int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
810 		sc->sc_target_lds[target] = i;
811 	}
812 
813 	/* enable interrupts */
814 	mfii_write(sc, MFI_OSTS, 0xffffffff);
815 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
816 
817 #if NBIO > 0
818 	if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
819 		panic("%s: controller registration failed", DEVNAME(sc));
820 	else
821 		sc->sc_ioctl = mfii_ioctl;
822 
823 #ifndef SMALL_KERNEL
824 	if (mfii_create_sensors(sc) != 0)
825 		printf("%s: unable to create sensors\n", DEVNAME(sc));
826 #endif
827 #endif /* NBIO > 0 */
828 
829 	return;
830 intr_disestablish:
831 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
832 free_sgl:
833 	mfii_dmamem_free(sc, sc->sc_sgl);
834 free_mfi:
835 	mfii_dmamem_free(sc, sc->sc_mfi);
836 free_requests:
837 	mfii_dmamem_free(sc, sc->sc_requests);
838 free_reply_postq:
839 	mfii_dmamem_free(sc, sc->sc_reply_postq);
840 free_sense:
841 	mfii_dmamem_free(sc, sc->sc_sense);
842 pci_unmap:
843 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
844 }
845 
846 static inline uint16_t
847 mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
848 {
849 	struct mfii_pd_dev_handles *handles;
850 	uint16_t handle;
851 
852 	smr_read_enter();
853 	handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles);
854 	handle = handles->pd_handles[target];
855 	smr_read_leave();
856 
857 	return (handle);
858 }
859 
860 void
861 mfii_dev_handles_smr(void *pd_arg)
862 {
863 	struct mfii_pd_dev_handles *handles = pd_arg;
864 
865 	free(handles, M_DEVBUF, sizeof(*handles));
866 }
867 
868 int
869 mfii_dev_handles_update(struct mfii_softc *sc)
870 {
871 	struct mfii_ld_map *lm;
872 	struct mfii_pd_dev_handles *handles, *old_handles;
873 	int i;
874 	int rv = 0;
875 
876 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
877 
878 	rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO, NULL, lm, sizeof(*lm),
879 	    SCSI_DATA_IN|SCSI_NOSLEEP);
880 
881 	if (rv != 0) {
882 		rv = EIO;
883 		goto free_lm;
884 	}
885 
886 	handles = malloc(sizeof(*handles), M_DEVBUF, M_WAITOK);
887 	smr_init(&handles->pd_smr);
888 	for (i = 0; i < MFI_MAX_PD; i++)
889 		handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
890 
891 	/* commit the updated info */
892 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
893 	old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles);
894 	SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles);
895 
896 	if (old_handles != NULL)
897 		smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles);
898 
899 free_lm:
900 	free(lm, M_TEMP, sizeof(*lm));
901 
902 	return (rv);
903 }
904 
905 int
906 mfii_syspd(struct mfii_softc *sc)
907 {
908 	struct scsibus_attach_args saa;
909 
910 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
911 	if (sc->sc_pd == NULL)
912 		return (1);
913 
914 	if (mfii_dev_handles_update(sc) != 0)
915 		goto free_pdsc;
916 
917 	saa.saa_adapter =  &mfii_pd_switch;
918 	saa.saa_adapter_softc = sc;
919 	saa.saa_adapter_buswidth = MFI_MAX_PD;
920 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
921 	saa.saa_luns = 8;
922 	saa.saa_openings = sc->sc_max_cmds - 1;
923 	saa.saa_pool = &sc->sc_iopool;
924 	saa.saa_quirks = saa.saa_flags = 0;
925 	saa.saa_wwpn = saa.saa_wwnn = 0;
926 
927 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
928 	    config_found(&sc->sc_dev, &saa, scsiprint);
929 
930 	return (0);
931 
932 free_pdsc:
933 	free(sc->sc_pd, M_DEVBUF, sizeof(*sc->sc_pd));
934 	return (1);
935 }
936 
937 int
938 mfii_detach(struct device *self, int flags)
939 {
940 	struct mfii_softc *sc = (struct mfii_softc *)self;
941 
942 	if (sc->sc_ih == NULL)
943 		return (0);
944 
945 #ifndef SMALL_KERNEL
946 	if (sc->sc_sensors) {
947 		sensordev_deinstall(&sc->sc_sensordev);
948 		free(sc->sc_sensors, M_DEVBUF,
949 		    MFI_MAX_LD * sizeof(struct ksensor));
950 	}
951 
952 	if (sc->sc_bbu) {
953 		free(sc->sc_bbu, M_DEVBUF, 4 * sizeof(*sc->sc_bbu));
954 	}
955 
956 	if (sc->sc_bbu_status) {
957 		free(sc->sc_bbu_status, M_DEVBUF,
958 		    sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
959 	}
960 #endif /* SMALL_KERNEL */
961 
962 	mfii_aen_unregister(sc);
963 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
964 	mfii_dmamem_free(sc, sc->sc_sgl);
965 	mfii_dmamem_free(sc, sc->sc_mfi);
966 	mfii_dmamem_free(sc, sc->sc_requests);
967 	mfii_dmamem_free(sc, sc->sc_reply_postq);
968 	mfii_dmamem_free(sc, sc->sc_sense);
969 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
970 
971 	return (0);
972 }
973 
974 static void
975 mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
976 {
977 	union mfi_mbox mbox = {
978 		.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE,
979 	};
980 	int rv;
981 
982 	mfii_scrub_ccb(ccb);
983 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH, &mbox,
984 	    NULL, 0, SCSI_NOSLEEP);
985 	if (rv != 0) {
986 		printf("%s: unable to flush cache\n", DEVNAME(sc));
987 		return;
988 	}
989 }
990 
991 static void
992 mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
993 {
994 	int rv;
995 
996 	mfii_scrub_ccb(ccb);
997 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN, NULL,
998 	    NULL, 0, SCSI_POLL);
999 	if (rv != 0) {
1000 		printf("%s: unable to shutdown controller\n", DEVNAME(sc));
1001 		return;
1002 	}
1003 }
1004 
1005 static void
1006 mfii_powerdown(struct mfii_softc *sc)
1007 {
1008 	struct mfii_ccb *ccb;
1009 
1010 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1011 	if (ccb == NULL) {
1012 		printf("%s: unable to allocate ccb for shutdown\n",
1013 		    DEVNAME(sc));
1014 		return;
1015 	}
1016 
1017 	mfii_flush_cache(sc, ccb);
1018 	mfii_shutdown(sc, ccb);
1019 	scsi_io_put(&sc->sc_iopool, ccb);
1020 }
1021 
1022 int
1023 mfii_activate(struct device *self, int act)
1024 {
1025 	struct mfii_softc *sc = (struct mfii_softc *)self;
1026 	int rv;
1027 
1028 	switch (act) {
1029 	case DVACT_POWERDOWN:
1030 		rv = config_activate_children(&sc->sc_dev, act);
1031 		mfii_powerdown(sc);
1032 		break;
1033 	default:
1034 		rv = config_activate_children(&sc->sc_dev, act);
1035 		break;
1036 	}
1037 
1038 	return (rv);
1039 }
1040 
1041 u_int32_t
1042 mfii_read(struct mfii_softc *sc, bus_size_t r)
1043 {
1044 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1045 	    BUS_SPACE_BARRIER_READ);
1046 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
1047 }
1048 
1049 void
1050 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1051 {
1052 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1053 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1054 	    BUS_SPACE_BARRIER_WRITE);
1055 }
1056 
1057 struct mfii_dmamem *
1058 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1059 {
1060 	struct mfii_dmamem *m;
1061 	int nsegs;
1062 
1063 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
1064 	if (m == NULL)
1065 		return (NULL);
1066 
1067 	m->mdm_size = size;
1068 
1069 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
1070 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
1071 		goto mdmfree;
1072 
1073 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
1074 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1075 		goto destroy;
1076 
1077 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
1078 	    BUS_DMA_NOWAIT) != 0)
1079 		goto free;
1080 
1081 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
1082 	    BUS_DMA_NOWAIT) != 0)
1083 		goto unmap;
1084 
1085 	return (m);
1086 
1087 unmap:
1088 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1089 free:
1090 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1091 destroy:
1092 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1093 mdmfree:
1094 	free(m, M_DEVBUF, sizeof *m);
1095 
1096 	return (NULL);
1097 }
1098 
1099 void
1100 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1101 {
1102 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
1103 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
1104 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
1105 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
1106 	free(m, M_DEVBUF, sizeof *m);
1107 }
1108 
1109 void
1110 mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1111 {
1112 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1113 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1114 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1115 
1116 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1117 	io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1118 	io->chain_offset = io->sgl_offset0 / 4;
1119 
1120 	htolem64(&sge->sg_addr, ccb->ccb_sense_dva);
1121 	htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense));
1122 	sge->sg_flags = MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1123 
1124 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1125 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1126 
1127 	mfii_start(sc, ccb);
1128 }
1129 
1130 int
1131 mfii_aen_register(struct mfii_softc *sc)
1132 {
1133 	struct mfi_evt_log_info mel;
1134 	struct mfii_ccb *ccb;
1135 	struct mfii_dmamem *mdm;
1136 	int rv;
1137 
1138 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1139 	if (ccb == NULL) {
1140 		printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc));
1141 		return (ENOMEM);
1142 	}
1143 
1144 	memset(&mel, 0, sizeof(mel));
1145 	mfii_scrub_ccb(ccb);
1146 
1147 	rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO, NULL,
1148 	    &mel, sizeof(mel), SCSI_DATA_IN|SCSI_NOSLEEP);
1149 	if (rv != 0) {
1150 		scsi_io_put(&sc->sc_iopool, ccb);
1151 		printf("%s: unable to get event info\n", DEVNAME(sc));
1152 		return (EIO);
1153 	}
1154 
1155 	mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1156 	if (mdm == NULL) {
1157 		scsi_io_put(&sc->sc_iopool, ccb);
1158 		printf("%s: unable to allocate event data\n", DEVNAME(sc));
1159 		return (ENOMEM);
1160 	}
1161 
1162 	/* replay all the events from boot */
1163 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num));
1164 
1165 	return (0);
1166 }
1167 
1168 void
1169 mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1170     struct mfii_dmamem *mdm, uint32_t seq)
1171 {
1172 	struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1173 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1174 	union mfi_sgl *sgl = &dcmd->mdf_sgl;
1175 	union mfi_evt_class_locale mec;
1176 
1177 	mfii_scrub_ccb(ccb);
1178 	mfii_dcmd_scrub(ccb);
1179 	memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm));
1180 
1181 	ccb->ccb_cookie = mdm;
1182 	ccb->ccb_done = mfii_aen_done;
1183 	sc->sc_aen_ccb = ccb;
1184 
1185 	mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1186 	mec.mec_members.reserved = 0;
1187 	mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL);
1188 
1189 	hdr->mfh_cmd = MFI_CMD_DCMD;
1190 	hdr->mfh_sg_count = 1;
1191 	hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64);
1192 	htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm));
1193 	dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT);
1194 	htolem32(&dcmd->mdf_mbox.w[0], seq);
1195 	htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word);
1196 	htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm));
1197 	htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm));
1198 
1199 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1200 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD);
1201 
1202 	mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1203 	mfii_dcmd_start(sc, ccb);
1204 }
1205 
1206 void
1207 mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1208 {
1209 	KASSERT(sc->sc_aen_ccb == ccb);
1210 
1211 	/* defer to a thread with KERNEL_LOCK so we can run autoconf */
1212 	task_add(systq, &sc->sc_aen_task);
1213 }
1214 
1215 void
1216 mfii_aen(void *arg)
1217 {
1218 	struct mfii_softc *sc = arg;
1219 	struct mfii_ccb *ccb = sc->sc_aen_ccb;
1220 	struct mfii_dmamem *mdm = ccb->ccb_cookie;
1221 	const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm);
1222 	uint32_t code;
1223 
1224 	mfii_dcmd_sync(sc, ccb,
1225 	    BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE);
1226 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),
1227 	    0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD);
1228 
1229 	code = lemtoh32(&med->med_code);
1230 
1231 #if 0
1232 	log(LOG_DEBUG, "%s (seq %u, code %08x) %s\n", DEVNAME(sc),
1233 	    lemtoh32(&med->med_seq_num), code, med->med_description);
1234 #endif
1235 
1236 	switch (code) {
1237 	case MFI_EVT_PD_INSERTED_EXT:
1238 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1239 			break;
1240 
1241 		mfii_aen_pd_insert(sc, &med->args.pd_address);
1242 		break;
1243  	case MFI_EVT_PD_REMOVED_EXT:
1244 		if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS)
1245 			break;
1246 
1247 		mfii_aen_pd_remove(sc, &med->args.pd_address);
1248 		break;
1249 
1250 	case MFI_EVT_PD_STATE_CHANGE:
1251 		if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE)
1252 			break;
1253 
1254 		mfii_aen_pd_state_change(sc, &med->args.pd_state);
1255 		break;
1256 
1257 	case MFI_EVT_LD_CREATED:
1258 	case MFI_EVT_LD_DELETED:
1259 		mfii_aen_ld_update(sc);
1260 		break;
1261 
1262 	default:
1263 		break;
1264 	}
1265 
1266 	mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num) + 1);
1267 }
1268 
1269 void
1270 mfii_aen_pd_insert(struct mfii_softc *sc,
1271     const struct mfi_evtarg_pd_address *pd)
1272 {
1273 #if 0
1274 	printf("%s: pd inserted ext\n", DEVNAME(sc));
1275 	printf("%s:  device_id %04x encl_id: %04x type %x\n", DEVNAME(sc),
1276 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1277 	    pd->scsi_dev_type);
1278 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1279 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1280 	    lemtoh64(&pd->sas_addr[1]));
1281 #endif
1282 
1283 	if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1284 		return;
1285 
1286 	scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id));
1287 }
1288 
1289 void
1290 mfii_aen_pd_remove(struct mfii_softc *sc,
1291     const struct mfi_evtarg_pd_address *pd)
1292 {
1293 #if 0
1294 	printf("%s: pd removed ext\n", DEVNAME(sc));
1295 	printf("%s:  device_id %04x encl_id: %04x type %u\n", DEVNAME(sc),
1296 	    lemtoh16(&pd->device_id), lemtoh16(&pd->encl_id),
1297 	    pd->scsi_dev_type);
1298 	printf("%s:  connected %02x addrs %016llx %016llx\n", DEVNAME(sc),
1299 	    pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0]),
1300 	    lemtoh64(&pd->sas_addr[1]));
1301 #endif
1302 	uint16_t target = lemtoh16(&pd->device_id);
1303 
1304 	scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE);
1305 
1306 	/* the firmware will abort outstanding commands for us */
1307 
1308 	scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE);
1309 }
1310 
1311 void
1312 mfii_aen_pd_state_change(struct mfii_softc *sc,
1313     const struct mfi_evtarg_pd_state *state)
1314 {
1315 	uint16_t target = lemtoh16(&state->pd.mep_device_id);
1316 
1317 	if (state->prev_state == htole32(MFI_PD_SYSTEM) &&
1318 	    state->new_state != htole32(MFI_PD_SYSTEM)) {
1319 		/* it's been pulled or configured for raid */
1320 
1321 		scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1322 		    DVACT_DEACTIVATE);
1323 		/* outstanding commands will simply complete or get aborted */
1324 		scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1325 		    DETACH_FORCE);
1326 
1327 	} else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD) &&
1328 	    state->new_state == htole32(MFI_PD_SYSTEM)) {
1329 		/* the firmware is handing the disk over */
1330 
1331 		scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1332 	}
1333 }
1334 
1335 void
1336 mfii_aen_ld_update(struct mfii_softc *sc)
1337 {
1338 	int i, state, target, old, nld;
1339 	int newlds[MFI_MAX_LD];
1340 
1341 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
1342 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN) != 0) {
1343 		DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1344 		    DEVNAME(sc));
1345 		return;
1346 	}
1347 
1348 	memset(newlds, -1, sizeof(newlds));
1349 
1350 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1351 		state = sc->sc_ld_list.mll_list[i].mll_state;
1352 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1353 		DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1354 		    DEVNAME(sc), target, state);
1355 		newlds[target] = i;
1356 	}
1357 
1358 	for (i = 0; i < MFI_MAX_LD; i++) {
1359 		old = sc->sc_target_lds[i];
1360 		nld = newlds[i];
1361 
1362 		if (old == -1 && nld != -1) {
1363 			DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1364 			    DEVNAME(sc), i);
1365 
1366 			scsi_probe_target(sc->sc_scsibus, i);
1367 
1368 #ifndef SMALL_KERNEL
1369 			mfii_init_ld_sensor(sc, nld);
1370 			sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1371 #endif
1372 		} else if (nld == -1 && old != -1) {
1373 			DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1374 			    DEVNAME(sc), i);
1375 
1376 			scsi_activate(sc->sc_scsibus, i, -1,
1377 			    DVACT_DEACTIVATE);
1378 			scsi_detach_target(sc->sc_scsibus, i,
1379 			    DETACH_FORCE);
1380 #ifndef SMALL_KERNEL
1381 			sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1382 #endif
1383 		}
1384 	}
1385 
1386 	memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds));
1387 }
1388 
1389 void
1390 mfii_aen_unregister(struct mfii_softc *sc)
1391 {
1392 	/* XXX */
1393 }
1394 
1395 int
1396 mfii_transition_firmware(struct mfii_softc *sc)
1397 {
1398 	int32_t			fw_state, cur_state;
1399 	int			max_wait, i;
1400 
1401 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1402 
1403 	while (fw_state != MFI_STATE_READY) {
1404 		cur_state = fw_state;
1405 		switch (fw_state) {
1406 		case MFI_STATE_FAULT:
1407 			printf("%s: firmware fault\n", DEVNAME(sc));
1408 			return (1);
1409 		case MFI_STATE_WAIT_HANDSHAKE:
1410 			mfii_write(sc, MFI_SKINNY_IDB,
1411 			    MFI_INIT_CLEAR_HANDSHAKE);
1412 			max_wait = 2;
1413 			break;
1414 		case MFI_STATE_OPERATIONAL:
1415 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
1416 			max_wait = 10;
1417 			break;
1418 		case MFI_STATE_UNDEFINED:
1419 		case MFI_STATE_BB_INIT:
1420 			max_wait = 2;
1421 			break;
1422 		case MFI_STATE_FW_INIT:
1423 		case MFI_STATE_DEVICE_SCAN:
1424 		case MFI_STATE_FLUSH_CACHE:
1425 			max_wait = 20;
1426 			break;
1427 		default:
1428 			printf("%s: unknown firmware state %d\n",
1429 			    DEVNAME(sc), fw_state);
1430 			return (1);
1431 		}
1432 		for (i = 0; i < (max_wait * 10); i++) {
1433 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
1434 			if (fw_state == cur_state)
1435 				DELAY(100000);
1436 			else
1437 				break;
1438 		}
1439 		if (fw_state == cur_state) {
1440 			printf("%s: firmware stuck in state %#x\n",
1441 			    DEVNAME(sc), fw_state);
1442 			return (1);
1443 		}
1444 	}
1445 
1446 	return (0);
1447 }
1448 
1449 int
1450 mfii_get_info(struct mfii_softc *sc)
1451 {
1452 	int i, rv;
1453 
1454 	rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO, NULL, &sc->sc_info,
1455 	    sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1456 
1457 	if (rv != 0)
1458 		return (rv);
1459 
1460 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1461 		DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1462 		    DEVNAME(sc),
1463 		    sc->sc_info.mci_image_component[i].mic_name,
1464 		    sc->sc_info.mci_image_component[i].mic_version,
1465 		    sc->sc_info.mci_image_component[i].mic_build_date,
1466 		    sc->sc_info.mci_image_component[i].mic_build_time);
1467 	}
1468 
1469 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1470 		DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1471 		    DEVNAME(sc),
1472 		    sc->sc_info.mci_pending_image_component[i].mic_name,
1473 		    sc->sc_info.mci_pending_image_component[i].mic_version,
1474 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
1475 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
1476 	}
1477 
1478 	DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1479 	    DEVNAME(sc),
1480 	    sc->sc_info.mci_max_arms,
1481 	    sc->sc_info.mci_max_spans,
1482 	    sc->sc_info.mci_max_arrays,
1483 	    sc->sc_info.mci_max_lds,
1484 	    sc->sc_info.mci_product_name);
1485 
1486 	DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1487 	    DEVNAME(sc),
1488 	    sc->sc_info.mci_serial_number,
1489 	    sc->sc_info.mci_hw_present,
1490 	    sc->sc_info.mci_current_fw_time,
1491 	    sc->sc_info.mci_max_cmds,
1492 	    sc->sc_info.mci_max_sg_elements);
1493 
1494 	DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1495 	    DEVNAME(sc),
1496 	    sc->sc_info.mci_max_request_size,
1497 	    sc->sc_info.mci_lds_present,
1498 	    sc->sc_info.mci_lds_degraded,
1499 	    sc->sc_info.mci_lds_offline,
1500 	    sc->sc_info.mci_pd_present);
1501 
1502 	DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1503 	    DEVNAME(sc),
1504 	    sc->sc_info.mci_pd_disks_present,
1505 	    sc->sc_info.mci_pd_disks_pred_failure,
1506 	    sc->sc_info.mci_pd_disks_failed);
1507 
1508 	DPRINTF("%s: nvram %d mem %d flash %d\n",
1509 	    DEVNAME(sc),
1510 	    sc->sc_info.mci_nvram_size,
1511 	    sc->sc_info.mci_memory_size,
1512 	    sc->sc_info.mci_flash_size);
1513 
1514 	DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1515 	    DEVNAME(sc),
1516 	    sc->sc_info.mci_ram_correctable_errors,
1517 	    sc->sc_info.mci_ram_uncorrectable_errors,
1518 	    sc->sc_info.mci_cluster_allowed,
1519 	    sc->sc_info.mci_cluster_active);
1520 
1521 	DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1522 	    DEVNAME(sc),
1523 	    sc->sc_info.mci_max_strips_per_io,
1524 	    sc->sc_info.mci_raid_levels,
1525 	    sc->sc_info.mci_adapter_ops,
1526 	    sc->sc_info.mci_ld_ops);
1527 
1528 	DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1529 	    DEVNAME(sc),
1530 	    sc->sc_info.mci_stripe_sz_ops.min,
1531 	    sc->sc_info.mci_stripe_sz_ops.max,
1532 	    sc->sc_info.mci_pd_ops,
1533 	    sc->sc_info.mci_pd_mix_support);
1534 
1535 	DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1536 	    DEVNAME(sc),
1537 	    sc->sc_info.mci_ecc_bucket_count,
1538 	    sc->sc_info.mci_package_version);
1539 
1540 	DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1541 	    DEVNAME(sc),
1542 	    sc->sc_info.mci_properties.mcp_seq_num,
1543 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1544 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1545 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1546 
1547 	DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1548 	    DEVNAME(sc),
1549 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
1550 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
1551 	    sc->sc_info.mci_properties.mcp_bgi_rate,
1552 	    sc->sc_info.mci_properties.mcp_cc_rate);
1553 
1554 	DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1555 	    DEVNAME(sc),
1556 	    sc->sc_info.mci_properties.mcp_recon_rate,
1557 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
1558 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1559 	    sc->sc_info.mci_properties.mcp_spinup_delay,
1560 	    sc->sc_info.mci_properties.mcp_cluster_enable);
1561 
1562 	DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1563 	    DEVNAME(sc),
1564 	    sc->sc_info.mci_properties.mcp_coercion_mode,
1565 	    sc->sc_info.mci_properties.mcp_alarm_enable,
1566 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1567 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
1568 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1569 
1570 	DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1571 	    DEVNAME(sc),
1572 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1573 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1574 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
1575 
1576 	DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1577 	    DEVNAME(sc),
1578 	    sc->sc_info.mci_pci.mip_vendor,
1579 	    sc->sc_info.mci_pci.mip_device,
1580 	    sc->sc_info.mci_pci.mip_subvendor,
1581 	    sc->sc_info.mci_pci.mip_subdevice);
1582 
1583 	DPRINTF("%s: type %#x port_count %d port_addr ",
1584 	    DEVNAME(sc),
1585 	    sc->sc_info.mci_host.mih_type,
1586 	    sc->sc_info.mci_host.mih_port_count);
1587 
1588 	for (i = 0; i < 8; i++)
1589 		DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1590 	DPRINTF("\n");
1591 
1592 	DPRINTF("%s: type %.x port_count %d port_addr ",
1593 	    DEVNAME(sc),
1594 	    sc->sc_info.mci_device.mid_type,
1595 	    sc->sc_info.mci_device.mid_port_count);
1596 
1597 	for (i = 0; i < 8; i++)
1598 		DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1599 	DPRINTF("\n");
1600 
1601 	return (0);
1602 }
1603 
1604 int
1605 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1606 {
1607 	struct mfi_frame_header	*hdr = ccb->ccb_request;
1608 	u_int64_t r;
1609 	int to = 0, rv = 0;
1610 
1611 #ifdef DIAGNOSTIC
1612 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1613 		panic("mfii_mfa_poll called with cookie or done set");
1614 #endif
1615 
1616 	hdr->mfh_context = ccb->ccb_smid;
1617 	hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1618 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
1619 
1620 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
1621 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1622 
1623 	mfii_start(sc, ccb);
1624 
1625 	for (;;) {
1626 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1627 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1628 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1629 
1630 		if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1631 			break;
1632 
1633 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1634 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
1635 			    ccb->ccb_smid);
1636 			ccb->ccb_flags |= MFI_CCB_F_ERR;
1637 			rv = 1;
1638 			break;
1639 		}
1640 
1641 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1642 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1643 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1644 
1645 		delay(1000);
1646 	}
1647 
1648 	if (ccb->ccb_len > 0) {
1649 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1650 		    0, ccb->ccb_dmamap->dm_mapsize,
1651 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1652 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1653 
1654 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1655 	}
1656 
1657 	return (rv);
1658 }
1659 
1660 int
1661 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1662 {
1663 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
1664 	void *cookie;
1665 	int rv = 1;
1666 
1667 	done = ccb->ccb_done;
1668 	cookie = ccb->ccb_cookie;
1669 
1670 	ccb->ccb_done = mfii_poll_done;
1671 	ccb->ccb_cookie = &rv;
1672 
1673 	mfii_start(sc, ccb);
1674 
1675 	do {
1676 		delay(10);
1677 		mfii_postq(sc);
1678 	} while (rv == 1);
1679 
1680 	ccb->ccb_cookie = cookie;
1681 	done(sc, ccb);
1682 
1683 	return (0);
1684 }
1685 
1686 void
1687 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1688 {
1689 	int *rv = ccb->ccb_cookie;
1690 
1691 	*rv = 0;
1692 }
1693 
1694 int
1695 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1696 {
1697 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
1698 
1699 #ifdef DIAGNOSTIC
1700 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
1701 		panic("mfii_exec called with cookie or done set");
1702 #endif
1703 
1704 	ccb->ccb_cookie = &m;
1705 	ccb->ccb_done = mfii_exec_done;
1706 
1707 	mfii_start(sc, ccb);
1708 
1709 	mtx_enter(&m);
1710 	while (ccb->ccb_cookie != NULL)
1711 		msleep_nsec(ccb, &m, PRIBIO, "mfiiexec", INFSLP);
1712 	mtx_leave(&m);
1713 
1714 	return (0);
1715 }
1716 
1717 void
1718 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1719 {
1720 	struct mutex *m = ccb->ccb_cookie;
1721 
1722 	mtx_enter(m);
1723 	ccb->ccb_cookie = NULL;
1724 	wakeup_one(ccb);
1725 	mtx_leave(m);
1726 }
1727 
1728 int
1729 mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1730     void *buf, size_t len, int flags)
1731 {
1732 	struct mfii_ccb *ccb;
1733 	int rv;
1734 
1735 	ccb = scsi_io_get(&sc->sc_iopool, flags);
1736 	if (ccb == NULL)
1737 		return (ENOMEM);
1738 
1739 	mfii_scrub_ccb(ccb);
1740 	rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
1741 	scsi_io_put(&sc->sc_iopool, ccb);
1742 
1743 	return (rv);
1744 }
1745 
1746 int
1747 mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1748     const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1749 {
1750 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1751 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1752 	struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1753 	struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1754 	struct mfi_frame_header *hdr = &dcmd->mdf_header;
1755 	u_int8_t *dma_buf = NULL;
1756 	int rv = EIO;
1757 
1758 	if (cold)
1759 		flags |= SCSI_NOSLEEP;
1760 
1761 	if (buf != NULL) {
1762 		dma_buf = dma_alloc(len, PR_WAITOK);
1763 		if (dma_buf == NULL)
1764 			return (ENOMEM);
1765 	}
1766 
1767 	ccb->ccb_data = dma_buf;
1768 	ccb->ccb_len = len;
1769 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1770 	case SCSI_DATA_IN:
1771 		ccb->ccb_direction = MFII_DATA_IN;
1772 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1773 		break;
1774 	case SCSI_DATA_OUT:
1775 		ccb->ccb_direction = MFII_DATA_OUT;
1776 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1777 		memcpy(dma_buf, buf, len);
1778 		break;
1779 	case 0:
1780 		ccb->ccb_direction = MFII_DATA_NONE;
1781 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE);
1782 		break;
1783 	}
1784 
1785 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1786 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1787 		rv = ENOMEM;
1788 		goto done;
1789 	}
1790 
1791 	hdr->mfh_cmd = MFI_CMD_DCMD;
1792 	hdr->mfh_context = ccb->ccb_smid;
1793 	hdr->mfh_data_len = htole32(len);
1794 	hdr->mfh_sg_count = len ? ccb->ccb_dmamap->dm_nsegs : 0;
1795 
1796 	dcmd->mdf_opcode = opc;
1797 	/* handle special opcodes */
1798 	if (mbox != NULL)
1799 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1800 
1801 	io->function = MFII_FUNCTION_PASSTHRU_IO;
1802 
1803 	if (len) {
1804 		io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1805 		io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1806 		htolem64(&sge->sg_addr, ccb->ccb_mfi_dva);
1807 		htolem32(&sge->sg_len, MFI_FRAME_SIZE);
1808 		sge->sg_flags =
1809 		    MFII_SGE_CHAIN_ELEMENT | MFII_SGE_ADDR_IOCPLBNTA;
1810 	}
1811 
1812 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1813 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1814 
1815 	if (ISSET(flags, SCSI_NOSLEEP)) {
1816 		ccb->ccb_done = mfii_empty_done;
1817 		mfii_poll(sc, ccb);
1818 	} else
1819 		mfii_exec(sc, ccb);
1820 
1821 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1822 		rv = 0;
1823 
1824 		if (ccb->ccb_direction == MFII_DATA_IN)
1825 			memcpy(buf, dma_buf, len);
1826 	}
1827 
1828 done:
1829 	if (buf != NULL)
1830 		dma_free(dma_buf, len);
1831 
1832 	return (rv);
1833 }
1834 
1835 void
1836 mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1837 {
1838 	return;
1839 }
1840 
1841 int
1842 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1843     void *sglp, int nosleep)
1844 {
1845 	union mfi_sgl *sgl = sglp;
1846 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1847 	int error;
1848 	int i;
1849 
1850 	if (ccb->ccb_len == 0)
1851 		return (0);
1852 
1853 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1854 	    ccb->ccb_data, ccb->ccb_len, NULL,
1855 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1856 	if (error) {
1857 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1858 		return (1);
1859 	}
1860 
1861 	for (i = 0; i < dmap->dm_nsegs; i++) {
1862 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1863 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1864 	}
1865 
1866 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1867 	    ccb->ccb_direction == MFII_DATA_OUT ?
1868 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1869 
1870 	return (0);
1871 }
1872 
1873 void
1874 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1875 {
1876 	u_long *r = (u_long *)&ccb->ccb_req;
1877 
1878 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1879 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1880 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1881 
1882 #if defined(__LP64__)
1883 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r);
1884 #else
1885 	mtx_enter(&sc->sc_post_mtx);
1886 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0]);
1887 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1888 	    MFI_IQPL, 8, BUS_SPACE_BARRIER_WRITE);
1889 
1890 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1]);
1891 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1892 	    MFI_IQPH, 8, BUS_SPACE_BARRIER_WRITE);
1893 	mtx_leave(&sc->sc_post_mtx);
1894 #endif
1895 }
1896 
1897 void
1898 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1899 {
1900 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1901 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1902 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1903 
1904 	if (ccb->ccb_sgl_len > 0) {
1905 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1906 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1907 		    BUS_DMASYNC_POSTWRITE);
1908 	}
1909 
1910 	if (ccb->ccb_len > 0) {
1911 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1912 		    0, ccb->ccb_dmamap->dm_mapsize,
1913 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1914 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1915 
1916 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1917 	}
1918 
1919 	ccb->ccb_done(sc, ccb);
1920 }
1921 
1922 int
1923 mfii_initialise_firmware(struct mfii_softc *sc)
1924 {
1925 	struct mpii_msg_iocinit_request *iiq;
1926 	struct mfii_dmamem *m;
1927 	struct mfii_ccb *ccb;
1928 	struct mfi_init_frame *init;
1929 	int rv;
1930 
1931 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1932 	if (m == NULL)
1933 		return (1);
1934 
1935 	iiq = MFII_DMA_KVA(m);
1936 	memset(iiq, 0, sizeof(*iiq));
1937 
1938 	iiq->function = MPII_FUNCTION_IOC_INIT;
1939 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1940 
1941 	iiq->msg_version_maj = 0x02;
1942 	iiq->msg_version_min = 0x00;
1943 	iiq->hdr_version_unit = 0x10;
1944 	iiq->hdr_version_dev = 0x0;
1945 
1946 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1947 
1948 	iiq->reply_descriptor_post_queue_depth =
1949 	    htole16(sc->sc_reply_postq_depth);
1950 	iiq->reply_free_queue_depth = htole16(0);
1951 
1952 	htolem32(&iiq->sense_buffer_address_high,
1953 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1954 
1955 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1956 	    MFII_DMA_DVA(sc->sc_reply_postq));
1957 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1958 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1959 
1960 	htolem32(&iiq->system_request_frame_base_address_lo,
1961 	    MFII_DMA_DVA(sc->sc_requests));
1962 	htolem32(&iiq->system_request_frame_base_address_hi,
1963 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1964 
1965 	iiq->timestamp = htole64(getuptime());
1966 
1967 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
1968 	if (ccb == NULL) {
1969 		/* shouldn't ever run out of ccbs during attach */
1970 		return (1);
1971 	}
1972 	mfii_scrub_ccb(ccb);
1973 	init = ccb->ccb_request;
1974 
1975 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1976 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1977 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1978 
1979 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1980 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1981 	    BUS_DMASYNC_PREREAD);
1982 
1983 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1984 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1985 
1986 	rv = mfii_mfa_poll(sc, ccb);
1987 
1988 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1989 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1990 
1991 	scsi_io_put(&sc->sc_iopool, ccb);
1992 	mfii_dmamem_free(sc, m);
1993 
1994 	return (rv);
1995 }
1996 
1997 int
1998 mfii_my_intr(struct mfii_softc *sc)
1999 {
2000 	u_int32_t status;
2001 
2002 	status = mfii_read(sc, MFI_OSTS);
2003 	if (ISSET(status, 0x1)) {
2004 		mfii_write(sc, MFI_OSTS, status);
2005 		return (1);
2006 	}
2007 
2008 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
2009 }
2010 
2011 int
2012 mfii_intr(void *arg)
2013 {
2014 	struct mfii_softc *sc = arg;
2015 
2016 	if (!mfii_my_intr(sc))
2017 		return (0);
2018 
2019 	mfii_postq(sc);
2020 
2021 	return (1);
2022 }
2023 
2024 void
2025 mfii_postq(struct mfii_softc *sc)
2026 {
2027 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
2028 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
2029 	struct mpii_reply_descr *rdp;
2030 	struct mfii_ccb *ccb;
2031 	int rpi = 0;
2032 
2033 	mtx_enter(&sc->sc_reply_postq_mtx);
2034 
2035 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2036 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2037 	    BUS_DMASYNC_POSTREAD);
2038 
2039 	for (;;) {
2040 		rdp = &postq[sc->sc_reply_postq_index];
2041 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2042 		    MPII_REPLY_DESCR_UNUSED)
2043 			break;
2044 		if (rdp->data == 0xffffffff) {
2045 			/*
2046 			 * ioc is still writing to the reply post queue
2047 			 * race condition - bail!
2048 			 */
2049 			break;
2050 		}
2051 
2052 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
2053 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
2054 		memset(rdp, 0xff, sizeof(*rdp));
2055 
2056 		sc->sc_reply_postq_index++;
2057 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2058 		rpi = 1;
2059 	}
2060 
2061 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
2062 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
2063 	    BUS_DMASYNC_PREREAD);
2064 
2065 	if (rpi)
2066 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
2067 
2068 	mtx_leave(&sc->sc_reply_postq_mtx);
2069 
2070 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
2071 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
2072 		mfii_done(sc, ccb);
2073 	}
2074 }
2075 
2076 void
2077 mfii_scsi_cmd(struct scsi_xfer *xs)
2078 {
2079 	struct scsi_link *link = xs->sc_link;
2080 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2081 	struct mfii_ccb *ccb = xs->io;
2082 
2083 	mfii_scrub_ccb(ccb);
2084 	ccb->ccb_cookie = xs;
2085 	ccb->ccb_done = mfii_scsi_cmd_done;
2086 	ccb->ccb_data = xs->data;
2087 	ccb->ccb_len = xs->datalen;
2088 
2089 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2090 
2091 	switch (xs->cmd->opcode) {
2092 	case READ_COMMAND:
2093 	case READ_BIG:
2094 	case READ_12:
2095 	case READ_16:
2096 	case WRITE_COMMAND:
2097 	case WRITE_BIG:
2098 	case WRITE_12:
2099 	case WRITE_16:
2100 		if (mfii_scsi_cmd_io(sc, xs) != 0)
2101 			goto stuffup;
2102 
2103 		break;
2104 
2105 	default:
2106 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2107 			goto stuffup;
2108 		break;
2109 	}
2110 
2111 	xs->error = XS_NOERROR;
2112 	xs->resid = 0;
2113 
2114 	if (ISSET(xs->flags, SCSI_POLL)) {
2115 		if (mfii_poll(sc, ccb) != 0)
2116 			goto stuffup;
2117 		return;
2118 	}
2119 
2120 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2121 	timeout_add_msec(&xs->stimeout, xs->timeout);
2122 	mfii_start(sc, ccb);
2123 
2124 	return;
2125 
2126 stuffup:
2127 	xs->error = XS_DRIVER_STUFFUP;
2128 	scsi_done(xs);
2129 }
2130 
2131 void
2132 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2133 {
2134 	struct scsi_xfer *xs = ccb->ccb_cookie;
2135 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2136 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2137 	u_int refs = 1;
2138 
2139 	if (timeout_del(&xs->stimeout))
2140 		refs = 2;
2141 
2142 	switch (ctx->status) {
2143 	case MFI_STAT_OK:
2144 		break;
2145 
2146 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
2147 		xs->error = XS_SENSE;
2148 		memset(&xs->sense, 0, sizeof(xs->sense));
2149 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
2150 		break;
2151 
2152 	case MFI_STAT_LD_OFFLINE:
2153 	case MFI_STAT_DEVICE_NOT_FOUND:
2154 		xs->error = XS_SELTIMEOUT;
2155 		break;
2156 
2157 	default:
2158 		xs->error = XS_DRIVER_STUFFUP;
2159 		break;
2160 	}
2161 
2162 	if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2163 		scsi_done(xs);
2164 }
2165 
2166 int
2167 mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2168 {
2169 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2170 
2171 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2172 
2173 	switch (cmd) {
2174 	case DIOCGCACHE:
2175 	case DIOCSCACHE:
2176 		return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2177 		break;
2178 
2179 	default:
2180 		if (sc->sc_ioctl)
2181 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2182 		break;
2183 	}
2184 
2185 	return (ENOTTY);
2186 }
2187 
2188 int
2189 mfii_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
2190 {
2191 	struct mfii_softc	*sc = link->bus->sb_adapter_softc;
2192 	int			 rv, wrenable, rdenable;
2193 	struct mfi_ld_prop	 ldp;
2194 	union mfi_mbox		 mbox;
2195 
2196 	if (mfii_get_info(sc)) {
2197 		rv = EIO;
2198 		goto done;
2199 	}
2200 
2201 	if (sc->sc_target_lds[link->target] == -1) {
2202 		rv = EIO;
2203 		goto done;
2204 	}
2205 
2206 	memset(&mbox, 0, sizeof(mbox));
2207 	mbox.b[0] = link->target;
2208 	rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2209 	    SCSI_DATA_IN);
2210 	if (rv != 0)
2211 		goto done;
2212 
2213 	if (sc->sc_info.mci_memory_size > 0) {
2214 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
2215 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
2216 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
2217 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
2218 	} else {
2219 		wrenable = ISSET(ldp.mlp_diskcache_policy,
2220 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
2221 		rdenable = 0;
2222 	}
2223 
2224 	if (cmd == DIOCGCACHE) {
2225 		dc->wrcache = wrenable;
2226 		dc->rdcache = rdenable;
2227 		goto done;
2228 	} /* else DIOCSCACHE */
2229 
2230 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
2231 	    ((dc->rdcache) ? 1 : 0) == rdenable)
2232 		goto done;
2233 
2234 	memset(&mbox, 0, sizeof(mbox));
2235 	mbox.b[0] = ldp.mlp_ld.mld_target;
2236 	mbox.b[1] = ldp.mlp_ld.mld_res;
2237 	mbox.s[1] = ldp.mlp_ld.mld_seq;
2238 
2239 	if (sc->sc_info.mci_memory_size > 0) {
2240 		if (dc->rdcache)
2241 			SET(ldp.mlp_cur_cache_policy,
2242 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2243 		else
2244 			CLR(ldp.mlp_cur_cache_policy,
2245 			    MR_LD_CACHE_ALLOW_READ_CACHE);
2246 		if (dc->wrcache)
2247 			SET(ldp.mlp_cur_cache_policy,
2248 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2249 		else
2250 			CLR(ldp.mlp_cur_cache_policy,
2251 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
2252 	} else {
2253 		if (dc->rdcache) {
2254 			rv = EOPNOTSUPP;
2255 			goto done;
2256 		}
2257 		if (dc->wrcache)
2258 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
2259 		else
2260 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
2261 	}
2262 
2263 	rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, &mbox, &ldp, sizeof(ldp),
2264 	    SCSI_DATA_OUT);
2265 done:
2266 	return (rv);
2267 }
2268 
2269 int
2270 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2271 {
2272 	struct scsi_link *link = xs->sc_link;
2273 	struct mfii_ccb *ccb = xs->io;
2274 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2275 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2276 	int segs;
2277 
2278 	io->dev_handle = htole16(link->target);
2279 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2280 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2281 	io->sgl_flags = htole16(0x02); /* XXX */
2282 	io->sense_buffer_length = sizeof(xs->sense);
2283 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2284 	io->data_length = htole32(xs->datalen);
2285 	io->io_flags = htole16(xs->cmdlen);
2286 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2287 	case SCSI_DATA_IN:
2288 		ccb->ccb_direction = MFII_DATA_IN;
2289 		io->direction = MPII_SCSIIO_DIR_READ;
2290 		break;
2291 	case SCSI_DATA_OUT:
2292 		ccb->ccb_direction = MFII_DATA_OUT;
2293 		io->direction = MPII_SCSIIO_DIR_WRITE;
2294 		break;
2295 	default:
2296 		ccb->ccb_direction = MFII_DATA_NONE;
2297 		io->direction = MPII_SCSIIO_DIR_NONE;
2298 		break;
2299 	}
2300 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2301 
2302 	ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2303 	ctx->timeout_value = htole16(0x14); /* XXX */
2304 	ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags);
2305 	ctx->virtual_disk_target_id = htole16(link->target);
2306 
2307 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2308 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2309 		return (1);
2310 
2311 	segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2312 	switch (sc->sc_iop->num_sge_loc) {
2313 	case MFII_IOP_NUM_SGE_LOC_ORIG:
2314 		ctx->num_sge = segs;
2315 		break;
2316 	case MFII_IOP_NUM_SGE_LOC_35:
2317 		/* 12 bit field, but we're only using the lower 8 */
2318 		ctx->span_arm = segs;
2319 		break;
2320 	}
2321 
2322 	ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2323 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2324 
2325 	return (0);
2326 }
2327 
2328 int
2329 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2330 {
2331 	struct scsi_link *link = xs->sc_link;
2332 	struct mfii_ccb *ccb = xs->io;
2333 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2334 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2335 
2336 	io->dev_handle = htole16(link->target);
2337 	io->function = MFII_FUNCTION_LDIO_REQUEST;
2338 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2339 	io->sgl_flags = htole16(0x02); /* XXX */
2340 	io->sense_buffer_length = sizeof(xs->sense);
2341 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2342 	io->data_length = htole32(xs->datalen);
2343 	io->io_flags = htole16(xs->cmdlen);
2344 	io->lun[0] = htobe16(link->lun);
2345 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2346 	case SCSI_DATA_IN:
2347 		ccb->ccb_direction = MFII_DATA_IN;
2348 		io->direction = MPII_SCSIIO_DIR_READ;
2349 		break;
2350 	case SCSI_DATA_OUT:
2351 		ccb->ccb_direction = MFII_DATA_OUT;
2352 		io->direction = MPII_SCSIIO_DIR_WRITE;
2353 		break;
2354 	default:
2355 		ccb->ccb_direction = MFII_DATA_NONE;
2356 		io->direction = MPII_SCSIIO_DIR_NONE;
2357 		break;
2358 	}
2359 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2360 
2361 	ctx->virtual_disk_target_id = htole16(link->target);
2362 
2363 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2364 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2365 		return (1);
2366 
2367 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2368 
2369 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
2370 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2371 
2372 	return (0);
2373 }
2374 
2375 void
2376 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2377 {
2378 	struct scsi_link *link = xs->sc_link;
2379 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2380 	struct mfii_ccb *ccb = xs->io;
2381 
2382 	mfii_scrub_ccb(ccb);
2383 	ccb->ccb_cookie = xs;
2384 	ccb->ccb_done = mfii_scsi_cmd_done;
2385 	ccb->ccb_data = xs->data;
2386 	ccb->ccb_len = xs->datalen;
2387 
2388 	timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2389 
2390 	xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2391 	if (xs->error != XS_NOERROR)
2392 		goto done;
2393 
2394 	xs->resid = 0;
2395 
2396 	if (ISSET(xs->flags, SCSI_POLL)) {
2397 		if (mfii_poll(sc, ccb) != 0)
2398 			goto stuffup;
2399 		return;
2400 	}
2401 
2402 	ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2403 	timeout_add_msec(&xs->stimeout, xs->timeout);
2404 	mfii_start(sc, ccb);
2405 
2406 	return;
2407 
2408 stuffup:
2409 	xs->error = XS_DRIVER_STUFFUP;
2410 done:
2411 	scsi_done(xs);
2412 }
2413 
2414 int
2415 mfii_pd_scsi_probe(struct scsi_link *link)
2416 {
2417 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2418 	struct mfi_pd_details mpd;
2419 	union mfi_mbox mbox;
2420 	int rv;
2421 
2422 	if (link->lun > 0)
2423 		return (0);
2424 
2425 	memset(&mbox, 0, sizeof(mbox));
2426 	mbox.s[0] = htole16(link->target);
2427 
2428 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, &mpd, sizeof(mpd),
2429 	    SCSI_DATA_IN|SCSI_NOSLEEP);
2430 	if (rv != 0)
2431 		return (EIO);
2432 
2433 	if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM))
2434 		return (ENXIO);
2435 
2436 	return (0);
2437 }
2438 
2439 int
2440 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2441 {
2442 	struct scsi_link *link = xs->sc_link;
2443 	struct mfii_ccb *ccb = xs->io;
2444 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
2445 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2446 	uint16_t dev_handle;
2447 
2448 	dev_handle = mfii_dev_handle(sc, link->target);
2449 	if (dev_handle == htole16(0xffff))
2450 		return (XS_SELTIMEOUT);
2451 
2452 	io->dev_handle = dev_handle;
2453 	io->function = 0;
2454 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
2455 	io->sgl_flags = htole16(0x02); /* XXX */
2456 	io->sense_buffer_length = sizeof(xs->sense);
2457 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2458 	io->data_length = htole32(xs->datalen);
2459 	io->io_flags = htole16(xs->cmdlen);
2460 	io->lun[0] = htobe16(link->lun);
2461 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2462 	case SCSI_DATA_IN:
2463 		ccb->ccb_direction = MFII_DATA_IN;
2464 		io->direction = MPII_SCSIIO_DIR_READ;
2465 		break;
2466 	case SCSI_DATA_OUT:
2467 		ccb->ccb_direction = MFII_DATA_OUT;
2468 		io->direction = MPII_SCSIIO_DIR_WRITE;
2469 		break;
2470 	default:
2471 		ccb->ccb_direction = MFII_DATA_NONE;
2472 		io->direction = MPII_SCSIIO_DIR_NONE;
2473 		break;
2474 	}
2475 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
2476 
2477 	ctx->virtual_disk_target_id = htole16(link->target);
2478 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
2479 	ctx->timeout_value = sc->sc_pd->pd_timeout;
2480 
2481 	if (mfii_load_ccb(sc, ccb, ctx + 1,
2482 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
2483 		return (XS_DRIVER_STUFFUP);
2484 
2485 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2486 
2487 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2488 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
2489 	ccb->ccb_req.dev_handle = dev_handle;
2490 
2491 	return (XS_NOERROR);
2492 }
2493 
2494 int
2495 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2496     int nosleep)
2497 {
2498 	struct mpii_msg_request *req = ccb->ccb_request;
2499 	struct mfii_sge *sge = NULL, *nsge = sglp;
2500 	struct mfii_sge *ce = NULL;
2501 	bus_dmamap_t dmap = ccb->ccb_dmamap;
2502 	u_int space;
2503 	int i;
2504 
2505 	int error;
2506 
2507 	if (ccb->ccb_len == 0)
2508 		return (0);
2509 
2510 	error = bus_dmamap_load(sc->sc_dmat, dmap,
2511 	    ccb->ccb_data, ccb->ccb_len, NULL,
2512 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
2513 	if (error) {
2514 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
2515 		return (1);
2516 	}
2517 
2518 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2519 	    sizeof(*nsge);
2520 	if (dmap->dm_nsegs > space) {
2521 		space--;
2522 
2523 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2524 		memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len);
2525 
2526 		ce = nsge + space;
2527 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
2528 		ce->sg_len = htole32(ccb->ccb_sgl_len);
2529 		ce->sg_flags = sc->sc_iop->sge_flag_chain;
2530 
2531 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2532 	}
2533 
2534 	for (i = 0; i < dmap->dm_nsegs; i++) {
2535 		if (nsge == ce)
2536 			nsge = ccb->ccb_sgl;
2537 
2538 		sge = nsge;
2539 
2540 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
2541 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
2542 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
2543 
2544 		nsge = sge + 1;
2545 	}
2546 	sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2547 
2548 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
2549 	    ccb->ccb_direction == MFII_DATA_OUT ?
2550 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
2551 
2552 	if (ccb->ccb_sgl_len > 0) {
2553 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
2554 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
2555 		    BUS_DMASYNC_PREWRITE);
2556 	}
2557 
2558 	return (0);
2559 }
2560 
2561 void
2562 mfii_scsi_cmd_tmo(void *xsp)
2563 {
2564 	struct scsi_xfer *xs = xsp;
2565 	struct scsi_link *link = xs->sc_link;
2566 	struct mfii_softc *sc = link->bus->sb_adapter_softc;
2567 	struct mfii_ccb *ccb = xs->io;
2568 
2569 	mtx_enter(&sc->sc_abort_mtx);
2570 	SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link);
2571 	mtx_leave(&sc->sc_abort_mtx);
2572 
2573 	task_add(systqmp, &sc->sc_abort_task);
2574 }
2575 
2576 void
2577 mfii_abort_task(void *scp)
2578 {
2579 	struct mfii_softc *sc = scp;
2580 	struct mfii_ccb *list;
2581 
2582 	mtx_enter(&sc->sc_abort_mtx);
2583 	list = SIMPLEQ_FIRST(&sc->sc_abort_list);
2584 	SIMPLEQ_INIT(&sc->sc_abort_list);
2585 	mtx_leave(&sc->sc_abort_mtx);
2586 
2587 	while (list != NULL) {
2588 		struct mfii_ccb *ccb = list;
2589 		struct scsi_xfer *xs = ccb->ccb_cookie;
2590 		struct scsi_link *link = xs->sc_link;
2591 
2592 		uint16_t dev_handle;
2593 		struct mfii_ccb *accb;
2594 
2595 		list = SIMPLEQ_NEXT(ccb, ccb_link);
2596 
2597 		dev_handle = mfii_dev_handle(sc, link->target);
2598 		if (dev_handle == htole16(0xffff)) {
2599 			/* device is gone */
2600 			if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2601 				scsi_done(xs);
2602 			continue;
2603 		}
2604 
2605 		accb = scsi_io_get(&sc->sc_iopool, 0);
2606 		mfii_scrub_ccb(accb);
2607 		mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2608 		    MPII_SCSI_TASK_ABORT_TASK,
2609 		    htole32(MFII_TASK_MGMT_FLAGS_PD));
2610 
2611 		accb->ccb_cookie = ccb;
2612 		accb->ccb_done = mfii_scsi_cmd_abort_done;
2613 
2614 		mfii_start(sc, accb);
2615 	}
2616 }
2617 
2618 void
2619 mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2620     uint16_t smid, uint8_t type, uint32_t flags)
2621 {
2622 	struct mfii_task_mgmt *msg;
2623 	struct mpii_msg_scsi_task_request *req;
2624 
2625 	msg = accb->ccb_request;
2626 	req = &msg->mpii_request;
2627 	req->dev_handle = dev_handle;
2628 	req->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2629 	req->task_type = type;
2630 	htolem16(&req->task_mid, smid);
2631 	msg->flags = flags;
2632 
2633 	accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
2634 	accb->ccb_req.smid = letoh16(accb->ccb_smid);
2635 }
2636 
2637 void
2638 mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2639 {
2640 	struct mfii_ccb *ccb = accb->ccb_cookie;
2641 	struct scsi_xfer *xs = ccb->ccb_cookie;
2642 
2643 	/* XXX check accb completion? */
2644 
2645 	scsi_io_put(&sc->sc_iopool, accb);
2646 
2647 	if (atomic_dec_int_nv(&ccb->ccb_refcnt) == 0)
2648 		scsi_done(xs);
2649 }
2650 
2651 void *
2652 mfii_get_ccb(void *cookie)
2653 {
2654 	struct mfii_softc *sc = cookie;
2655 	struct mfii_ccb *ccb;
2656 
2657 	mtx_enter(&sc->sc_ccb_mtx);
2658 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
2659 	if (ccb != NULL)
2660 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
2661 	mtx_leave(&sc->sc_ccb_mtx);
2662 
2663 	return (ccb);
2664 }
2665 
2666 void
2667 mfii_scrub_ccb(struct mfii_ccb *ccb)
2668 {
2669 	ccb->ccb_cookie = NULL;
2670 	ccb->ccb_done = NULL;
2671 	ccb->ccb_flags = 0;
2672 	ccb->ccb_data = NULL;
2673 	ccb->ccb_direction = 0;
2674 	ccb->ccb_len = 0;
2675 	ccb->ccb_sgl_len = 0;
2676 	ccb->ccb_refcnt = 1;
2677 
2678 	memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req));
2679 	memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE);
2680 	memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE);
2681 }
2682 
2683 void
2684 mfii_put_ccb(void *cookie, void *io)
2685 {
2686 	struct mfii_softc *sc = cookie;
2687 	struct mfii_ccb *ccb = io;
2688 
2689 	mtx_enter(&sc->sc_ccb_mtx);
2690 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
2691 	mtx_leave(&sc->sc_ccb_mtx);
2692 }
2693 
2694 int
2695 mfii_init_ccb(struct mfii_softc *sc)
2696 {
2697 	struct mfii_ccb *ccb;
2698 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
2699 	u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi);
2700 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
2701 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
2702 	u_int i;
2703 	int error;
2704 
2705 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2706 	    M_DEVBUF, M_WAITOK|M_ZERO);
2707 
2708 	for (i = 0; i < sc->sc_max_cmds; i++) {
2709 		ccb = &sc->sc_ccb[i];
2710 
2711 		/* create a dma map for transfer */
2712 		error = bus_dmamap_create(sc->sc_dmat,
2713 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
2714 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
2715 		if (error) {
2716 			printf("%s: cannot create ccb dmamap (%d)\n",
2717 			    DEVNAME(sc), error);
2718 			goto destroy;
2719 		}
2720 
2721 		/* select i + 1'th request. 0 is reserved for events */
2722 		ccb->ccb_smid = i + 1;
2723 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
2724 		ccb->ccb_request = request + ccb->ccb_request_offset;
2725 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
2726 		    ccb->ccb_request_offset;
2727 
2728 		/* select i'th MFI command frame */
2729 		ccb->ccb_mfi_offset = MFI_FRAME_SIZE * i;
2730 		ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2731 		ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi) +
2732 		    ccb->ccb_mfi_offset;
2733 
2734 		/* select i'th sense */
2735 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
2736 		ccb->ccb_sense = (struct mfi_sense *)(sense +
2737 		    ccb->ccb_sense_offset);
2738 		ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense) +
2739 		    ccb->ccb_sense_offset;
2740 
2741 		/* select i'th sgl */
2742 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2743 		    sc->sc_max_sgl * i;
2744 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2745 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
2746 		    ccb->ccb_sgl_offset;
2747 
2748 		/* add ccb to queue */
2749 		mfii_put_ccb(sc, ccb);
2750 	}
2751 
2752 	return (0);
2753 
2754 destroy:
2755 	/* free dma maps and ccb memory */
2756 	while ((ccb = mfii_get_ccb(sc)) != NULL)
2757 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2758 
2759 	free(sc->sc_ccb, M_DEVBUF, 0);
2760 
2761 	return (1);
2762 }
2763 
2764 #if NBIO > 0
2765 int
2766 mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2767 {
2768 	struct mfii_softc	*sc = (struct mfii_softc *)dev;
2769 	int error = 0;
2770 
2771 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2772 
2773 	rw_enter_write(&sc->sc_lock);
2774 
2775 	switch (cmd) {
2776 	case BIOCINQ:
2777 		DNPRINTF(MFII_D_IOCTL, "inq\n");
2778 		error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2779 		break;
2780 
2781 	case BIOCVOL:
2782 		DNPRINTF(MFII_D_IOCTL, "vol\n");
2783 		error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2784 		break;
2785 
2786 	case BIOCDISK:
2787 		DNPRINTF(MFII_D_IOCTL, "disk\n");
2788 		error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2789 		break;
2790 
2791 	case BIOCALARM:
2792 		DNPRINTF(MFII_D_IOCTL, "alarm\n");
2793 		error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2794 		break;
2795 
2796 	case BIOCBLINK:
2797 		DNPRINTF(MFII_D_IOCTL, "blink\n");
2798 		error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2799 		break;
2800 
2801 	case BIOCSETSTATE:
2802 		DNPRINTF(MFII_D_IOCTL, "setstate\n");
2803 		error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2804 		break;
2805 
2806 	case BIOCPATROL:
2807 		DNPRINTF(MFII_D_IOCTL, "patrol\n");
2808 		error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2809 		break;
2810 
2811 	default:
2812 		DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2813 		error = ENOTTY;
2814 	}
2815 
2816 	rw_exit_write(&sc->sc_lock);
2817 
2818 	return (error);
2819 }
2820 
2821 int
2822 mfii_bio_getitall(struct mfii_softc *sc)
2823 {
2824 	int			i, d, rv = EINVAL;
2825 	size_t			size;
2826 	union mfi_mbox		mbox;
2827 	struct mfi_conf		*cfg = NULL;
2828 	struct mfi_ld_details	*ld_det = NULL;
2829 
2830 	/* get info */
2831 	if (mfii_get_info(sc)) {
2832 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2833 		    DEVNAME(sc));
2834 		goto done;
2835 	}
2836 
2837 	/* send single element command to retrieve size for full structure */
2838 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
2839 	if (cfg == NULL)
2840 		goto done;
2841 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg),
2842 	    SCSI_DATA_IN)) {
2843 		free(cfg, M_DEVBUF, sizeof *cfg);
2844 		goto done;
2845 	}
2846 
2847 	size = cfg->mfc_size;
2848 	free(cfg, M_DEVBUF, sizeof *cfg);
2849 
2850 	/* memory for read config */
2851 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2852 	if (cfg == NULL)
2853 		goto done;
2854 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN)) {
2855 		free(cfg, M_DEVBUF, size);
2856 		goto done;
2857 	}
2858 
2859 	/* replace current pointer with new one */
2860 	if (sc->sc_cfg)
2861 		free(sc->sc_cfg, M_DEVBUF, 0);
2862 	sc->sc_cfg = cfg;
2863 
2864 	/* get all ld info */
2865 	if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST, NULL, &sc->sc_ld_list,
2866 	    sizeof(sc->sc_ld_list), SCSI_DATA_IN))
2867 		goto done;
2868 
2869 	/* get memory for all ld structures */
2870 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2871 	if (sc->sc_ld_sz != size) {
2872 		if (sc->sc_ld_details)
2873 			free(sc->sc_ld_details, M_DEVBUF, 0);
2874 
2875 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
2876 		if (ld_det == NULL)
2877 			goto done;
2878 		sc->sc_ld_sz = size;
2879 		sc->sc_ld_details = ld_det;
2880 	}
2881 
2882 	/* find used physical disks */
2883 	size = sizeof(struct mfi_ld_details);
2884 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2885 		memset(&mbox, 0, sizeof(mbox));
2886 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2887 		if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO, &mbox, &sc->sc_ld_details[i], size,
2888 		    SCSI_DATA_IN))
2889 			goto done;
2890 
2891 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2892 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2893 	}
2894 	sc->sc_no_pd = d;
2895 
2896 	rv = 0;
2897 done:
2898 	return (rv);
2899 }
2900 
2901 int
2902 mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2903 {
2904 	int			rv = EINVAL;
2905 	struct mfi_conf		*cfg = NULL;
2906 
2907 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2908 
2909 	if (mfii_bio_getitall(sc)) {
2910 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2911 		    DEVNAME(sc));
2912 		goto done;
2913 	}
2914 
2915 	/* count unused disks as volumes */
2916 	if (sc->sc_cfg == NULL)
2917 		goto done;
2918 	cfg = sc->sc_cfg;
2919 
2920 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2921 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2922 #if notyet
2923 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2924 	    (bi->bi_nodisk - sc->sc_no_pd);
2925 #endif
2926 	/* tell bio who we are */
2927 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
2928 
2929 	rv = 0;
2930 done:
2931 	return (rv);
2932 }
2933 
2934 int
2935 mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2936 {
2937 	int			i, per, target, rv = EINVAL;
2938 	struct scsi_link	*link;
2939 	struct device		*dev;
2940 
2941 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2942 	    DEVNAME(sc), bv->bv_volid);
2943 
2944 	/* we really could skip and expect that inq took care of it */
2945 	if (mfii_bio_getitall(sc)) {
2946 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2947 		    DEVNAME(sc));
2948 		goto done;
2949 	}
2950 
2951 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2952 		/* go do hotspares & unused disks */
2953 		rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
2954 		goto done;
2955 	}
2956 
2957 	i = bv->bv_volid;
2958 	target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2959 	link = scsi_get_link(sc->sc_scsibus, target, 0);
2960 	if (link == NULL) {
2961 		strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
2962 	} else {
2963 		dev = link->device_softc;
2964 		if (dev == NULL)
2965 			goto done;
2966 
2967 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
2968 	}
2969 
2970 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
2971 	case MFI_LD_OFFLINE:
2972 		bv->bv_status = BIOC_SVOFFLINE;
2973 		break;
2974 
2975 	case MFI_LD_PART_DEGRADED:
2976 	case MFI_LD_DEGRADED:
2977 		bv->bv_status = BIOC_SVDEGRADED;
2978 		break;
2979 
2980 	case MFI_LD_ONLINE:
2981 		bv->bv_status = BIOC_SVONLINE;
2982 		break;
2983 
2984 	default:
2985 		bv->bv_status = BIOC_SVINVALID;
2986 		DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2987 		    DEVNAME(sc),
2988 		    sc->sc_ld_list.mll_list[i].mll_state);
2989 	}
2990 
2991 	/* additional status can modify MFI status */
2992 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2993 	case MFI_LD_PROG_CC:
2994 		bv->bv_status = BIOC_SVSCRUB;
2995 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
2996 		bv->bv_percent = (per * 100) / 0xffff;
2997 		bv->bv_seconds =
2998 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
2999 		break;
3000 
3001 	case MFI_LD_PROG_BGI:
3002 		bv->bv_status = BIOC_SVSCRUB;
3003 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3004 		bv->bv_percent = (per * 100) / 0xffff;
3005 		bv->bv_seconds =
3006 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3007 		break;
3008 
3009 	case MFI_LD_PROG_FGI:
3010 	case MFI_LD_PROG_RECONSTRUCT:
3011 		/* nothing yet */
3012 		break;
3013 	}
3014 
3015 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3016 		bv->bv_cache = BIOC_CVWRITEBACK;
3017 	else
3018 		bv->bv_cache = BIOC_CVWRITETHROUGH;
3019 
3020 	/*
3021 	 * The RAID levels are determined per the SNIA DDF spec, this is only
3022 	 * a subset that is valid for the MFI controller.
3023 	 */
3024 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3025 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3026 		bv->bv_level *= 10;
3027 
3028 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3029 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3030 
3031 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3032 
3033 	rv = 0;
3034 done:
3035 	return (rv);
3036 }
3037 
3038 int
3039 mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3040 {
3041 	struct mfi_conf		*cfg;
3042 	struct mfi_array	*ar;
3043 	struct mfi_ld_cfg	*ld;
3044 	struct mfi_pd_details	*pd;
3045 	struct mfi_pd_list	*pl;
3046 	struct mfi_pd_progress	*mfp;
3047 	struct mfi_progress	*mp;
3048 	struct scsi_inquiry_data *inqbuf;
3049 	char			vend[8+16+4+1], *vendp;
3050 	int			i, rv = EINVAL;
3051 	int			arr, vol, disk, span;
3052 	union mfi_mbox		mbox;
3053 
3054 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3055 	    DEVNAME(sc), bd->bd_diskid);
3056 
3057 	/* we really could skip and expect that inq took care of it */
3058 	if (mfii_bio_getitall(sc)) {
3059 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3060 		    DEVNAME(sc));
3061 		return (rv);
3062 	}
3063 	cfg = sc->sc_cfg;
3064 
3065 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3066 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3067 
3068 	ar = cfg->mfc_array;
3069 	vol = bd->bd_volid;
3070 	if (vol >= cfg->mfc_no_ld) {
3071 		/* do hotspares */
3072 		rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
3073 		goto freeme;
3074 	}
3075 
3076 	/* calculate offset to ld structure */
3077 	ld = (struct mfi_ld_cfg *)(
3078 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3079 	    cfg->mfc_array_size * cfg->mfc_no_array);
3080 
3081 	/* use span 0 only when raid group is not spanned */
3082 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
3083 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3084 	else
3085 		span = 0;
3086 	arr = ld[vol].mlc_span[span].mls_index;
3087 
3088 	/* offset disk into pd list */
3089 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3090 
3091 	if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3092 		/* disk is missing but succeed command */
3093 		bd->bd_status = BIOC_SDFAILED;
3094 		rv = 0;
3095 
3096 		/* try to find an unused disk for the target to rebuild */
3097 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl),
3098 		    SCSI_DATA_IN))
3099 			goto freeme;
3100 
3101 		for (i = 0; i < pl->mpl_no_pd; i++) {
3102 			if (pl->mpl_address[i].mpa_scsi_type != 0)
3103 				continue;
3104 
3105 			memset(&mbox, 0, sizeof(mbox));
3106 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3107 			if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3108 			    SCSI_DATA_IN))
3109 				continue;
3110 
3111 			if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD ||
3112 			    pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD)
3113 				break;
3114 		}
3115 
3116 		if (i == pl->mpl_no_pd)
3117 			goto freeme;
3118 	} else {
3119 		memset(&mbox, 0, sizeof(mbox));
3120 		mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3121 		if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3122 		    SCSI_DATA_IN)) {
3123 			bd->bd_status = BIOC_SDINVALID;
3124 			goto freeme;
3125 		}
3126 	}
3127 
3128 	/* get the remaining fields */
3129 	bd->bd_channel = pd->mpd_enc_idx;
3130 	bd->bd_target = pd->mpd_enc_slot;
3131 
3132 	/* get status */
3133 	switch (pd->mpd_fw_state){
3134 	case MFI_PD_UNCONFIG_GOOD:
3135 	case MFI_PD_UNCONFIG_BAD:
3136 		bd->bd_status = BIOC_SDUNUSED;
3137 		break;
3138 
3139 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
3140 		bd->bd_status = BIOC_SDHOTSPARE;
3141 		break;
3142 
3143 	case MFI_PD_OFFLINE:
3144 		bd->bd_status = BIOC_SDOFFLINE;
3145 		break;
3146 
3147 	case MFI_PD_FAILED:
3148 		bd->bd_status = BIOC_SDFAILED;
3149 		break;
3150 
3151 	case MFI_PD_REBUILD:
3152 		bd->bd_status = BIOC_SDREBUILD;
3153 		break;
3154 
3155 	case MFI_PD_ONLINE:
3156 		bd->bd_status = BIOC_SDONLINE;
3157 		break;
3158 
3159 	case MFI_PD_COPYBACK:
3160 	case MFI_PD_SYSTEM:
3161 	default:
3162 		bd->bd_status = BIOC_SDINVALID;
3163 		break;
3164 	}
3165 
3166 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3167 
3168 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3169 	vendp = inqbuf->vendor;
3170 	memcpy(vend, vendp, sizeof vend - 1);
3171 	vend[sizeof vend - 1] = '\0';
3172 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3173 
3174 	/* XXX find a way to retrieve serial nr from drive */
3175 	/* XXX find a way to get bd_procdev */
3176 
3177 	mfp = &pd->mpd_progress;
3178 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
3179 		mp = &mfp->mfp_patrol_read;
3180 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3181 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3182 	}
3183 
3184 	rv = 0;
3185 freeme:
3186 	free(pd, M_DEVBUF, sizeof *pd);
3187 	free(pl, M_DEVBUF, sizeof *pl);
3188 
3189 	return (rv);
3190 }
3191 
3192 int
3193 mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3194 {
3195 	uint32_t		opc, flags = 0;
3196 	int			rv = 0;
3197 	int8_t			ret;
3198 
3199 	switch(ba->ba_opcode) {
3200 	case BIOC_SADISABLE:
3201 		opc = MR_DCMD_SPEAKER_DISABLE;
3202 		break;
3203 
3204 	case BIOC_SAENABLE:
3205 		opc = MR_DCMD_SPEAKER_ENABLE;
3206 		break;
3207 
3208 	case BIOC_SASILENCE:
3209 		opc = MR_DCMD_SPEAKER_SILENCE;
3210 		break;
3211 
3212 	case BIOC_GASTATUS:
3213 		opc = MR_DCMD_SPEAKER_GET;
3214 		flags = SCSI_DATA_IN;
3215 		break;
3216 
3217 	case BIOC_SATEST:
3218 		opc = MR_DCMD_SPEAKER_TEST;
3219 		break;
3220 
3221 	default:
3222 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3223 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3224 		return (EINVAL);
3225 	}
3226 
3227 	if (mfii_mgmt(sc, opc, NULL, &ret, sizeof(ret), flags))
3228 		rv = EINVAL;
3229 	else
3230 		if (ba->ba_opcode == BIOC_GASTATUS)
3231 			ba->ba_status = ret;
3232 		else
3233 			ba->ba_status = 0;
3234 
3235 	return (rv);
3236 }
3237 
3238 int
3239 mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3240 {
3241 	int			i, found, rv = EINVAL;
3242 	union mfi_mbox		mbox;
3243 	uint32_t		cmd;
3244 	struct mfi_pd_list	*pd;
3245 
3246 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3247 	    bb->bb_status);
3248 
3249 	/* channel 0 means not in an enclosure so can't be blinked */
3250 	if (bb->bb_channel == 0)
3251 		return (EINVAL);
3252 
3253 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
3254 
3255 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pd, sizeof(*pd), SCSI_DATA_IN))
3256 		goto done;
3257 
3258 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3259 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3260 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3261 			found = 1;
3262 			break;
3263 		}
3264 
3265 	if (!found)
3266 		goto done;
3267 
3268 	memset(&mbox, 0, sizeof(mbox));
3269 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3270 
3271 	switch (bb->bb_status) {
3272 	case BIOC_SBUNBLINK:
3273 		cmd = MR_DCMD_PD_UNBLINK;
3274 		break;
3275 
3276 	case BIOC_SBBLINK:
3277 		cmd = MR_DCMD_PD_BLINK;
3278 		break;
3279 
3280 	case BIOC_SBALARM:
3281 	default:
3282 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3283 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
3284 		goto done;
3285 	}
3286 
3287 
3288 	if (mfii_mgmt(sc, cmd, &mbox, NULL, 0, 0) == 0)
3289 		rv = 0;
3290 
3291 done:
3292 	free(pd, M_DEVBUF, sizeof *pd);
3293 	return (rv);
3294 }
3295 
3296 static int
3297 mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3298 {
3299 	struct mfii_foreign_scan_info *fsi;
3300 	struct mfi_pd_details	*pd;
3301 	union mfi_mbox		mbox;
3302 	int			rv;
3303 
3304 	fsi = malloc(sizeof *fsi, M_DEVBUF, M_WAITOK);
3305 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3306 
3307 	memset(&mbox, 0, sizeof mbox);
3308 	mbox.s[0] = pd_id;
3309 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3310 	if (rv != 0)
3311 		goto done;
3312 
3313 	if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD) {
3314 		mbox.s[0] = pd_id;
3315 		mbox.s[1] = pd->mpd_pd.mfp_seq;
3316 		mbox.b[4] = MFI_PD_UNCONFIG_GOOD;
3317 		rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3318 		if (rv != 0)
3319 			goto done;
3320 	}
3321 
3322 	memset(&mbox, 0, sizeof mbox);
3323 	mbox.s[0] = pd_id;
3324 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3325 	if (rv != 0)
3326 		goto done;
3327 
3328 	if (pd->mpd_ddf_state & MFI_DDF_FOREIGN) {
3329 		rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN, NULL, fsi, sizeof(*fsi),
3330 		    SCSI_DATA_IN);
3331 		if (rv != 0)
3332 			goto done;
3333 
3334 		if (fsi->count > 0) {
3335 			rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR, NULL, NULL, 0, 0);
3336 			if (rv != 0)
3337 				goto done;
3338 		}
3339 	}
3340 
3341 	memset(&mbox, 0, sizeof mbox);
3342 	mbox.s[0] = pd_id;
3343 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN);
3344 	if (rv != 0)
3345 		goto done;
3346 
3347 	if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD ||
3348 	    pd->mpd_ddf_state & MFI_DDF_FOREIGN)
3349 		rv = ENXIO;
3350 
3351 done:
3352 	free(fsi, M_DEVBUF, sizeof *fsi);
3353 	free(pd, M_DEVBUF, sizeof *pd);
3354 
3355 	return (rv);
3356 }
3357 
3358 static int
3359 mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3360 {
3361 	struct mfi_hotspare	*hs;
3362 	struct mfi_pd_details	*pd;
3363 	union mfi_mbox		mbox;
3364 	size_t			size;
3365 	int			rv = EINVAL;
3366 
3367 	/* we really could skip and expect that inq took care of it */
3368 	if (mfii_bio_getitall(sc)) {
3369 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3370 		    DEVNAME(sc));
3371 		return (rv);
3372 	}
3373 	size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3374 
3375 	hs = malloc(size, M_DEVBUF, M_WAITOK);
3376 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3377 
3378 	memset(&mbox, 0, sizeof mbox);
3379 	mbox.s[0] = pd_id;
3380 	rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3381 	    SCSI_DATA_IN);
3382 	if (rv != 0)
3383 		goto done;
3384 
3385 	memset(hs, 0, size);
3386 	hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3387 	hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3388 	rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE, NULL, hs, size, SCSI_DATA_OUT);
3389 
3390 done:
3391 	free(hs, M_DEVBUF, size);
3392 	free(pd, M_DEVBUF, sizeof *pd);
3393 
3394 	return (rv);
3395 }
3396 
3397 int
3398 mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3399 {
3400 	struct mfi_pd_details	*pd;
3401 	struct mfi_pd_list	*pl;
3402 	int			i, found, rv = EINVAL;
3403 	union mfi_mbox		mbox;
3404 
3405 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3406 	    bs->bs_status);
3407 
3408 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3409 	pl = malloc(sizeof *pl, M_DEVBUF, M_WAITOK);
3410 
3411 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST, NULL, pl, sizeof(*pl), SCSI_DATA_IN))
3412 		goto done;
3413 
3414 	for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3415 		if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3416 		    bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3417 			found = 1;
3418 			break;
3419 		}
3420 
3421 	if (!found)
3422 		goto done;
3423 
3424 	memset(&mbox, 0, sizeof(mbox));
3425 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3426 
3427 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd), SCSI_DATA_IN))
3428 		goto done;
3429 
3430 	mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3431 	mbox.s[1] = pd->mpd_pd.mfp_seq;
3432 
3433 	switch (bs->bs_status) {
3434 	case BIOC_SSONLINE:
3435 		mbox.b[4] = MFI_PD_ONLINE;
3436 		break;
3437 
3438 	case BIOC_SSOFFLINE:
3439 		mbox.b[4] = MFI_PD_OFFLINE;
3440 		break;
3441 
3442 	case BIOC_SSHOTSPARE:
3443 		mbox.b[4] = MFI_PD_HOTSPARE;
3444 		break;
3445 
3446 	case BIOC_SSREBUILD:
3447 		if (pd->mpd_fw_state != MFI_PD_OFFLINE) {
3448 			if ((rv = mfii_makegood(sc,
3449 			    pl->mpl_address[i].mpa_pd_id)))
3450 				goto done;
3451 
3452 			if ((rv = mfii_makespare(sc,
3453 			    pl->mpl_address[i].mpa_pd_id)))
3454 				goto done;
3455 
3456 			memset(&mbox, 0, sizeof(mbox));
3457 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3458 			rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3459 			    SCSI_DATA_IN);
3460 			if (rv != 0)
3461 				goto done;
3462 
3463 			/* rebuilding might be started by mfii_makespare() */
3464 			if (pd->mpd_fw_state == MFI_PD_REBUILD) {
3465 				rv = 0;
3466 				goto done;
3467 			}
3468 
3469 			mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3470 			mbox.s[1] = pd->mpd_pd.mfp_seq;
3471 		}
3472 		mbox.b[4] = MFI_PD_REBUILD;
3473 		break;
3474 
3475 	default:
3476 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3477 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
3478 		goto done;
3479 	}
3480 
3481 
3482 	rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE, &mbox, NULL, 0, 0);
3483 done:
3484 	free(pd, M_DEVBUF, sizeof *pd);
3485 	free(pl, M_DEVBUF, sizeof *pl);
3486 	return (rv);
3487 }
3488 
3489 int
3490 mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3491 {
3492 	uint32_t		opc;
3493 	int			rv = 0;
3494 	struct mfi_pr_properties prop;
3495 	struct mfi_pr_status	status;
3496 	uint32_t		time, exec_freq;
3497 
3498 	switch (bp->bp_opcode) {
3499 	case BIOC_SPSTOP:
3500 	case BIOC_SPSTART:
3501 		if (bp->bp_opcode == BIOC_SPSTART)
3502 			opc = MR_DCMD_PR_START;
3503 		else
3504 			opc = MR_DCMD_PR_STOP;
3505 		if (mfii_mgmt(sc, opc, NULL, NULL, 0, SCSI_DATA_IN))
3506 			return (EINVAL);
3507 		break;
3508 
3509 	case BIOC_SPMANUAL:
3510 	case BIOC_SPDISABLE:
3511 	case BIOC_SPAUTO:
3512 		/* Get device's time. */
3513 		opc = MR_DCMD_TIME_SECS_GET;
3514 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3515 			return (EINVAL);
3516 
3517 		opc = MR_DCMD_PR_GET_PROPERTIES;
3518 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3519 			return (EINVAL);
3520 
3521 		switch (bp->bp_opcode) {
3522 		case BIOC_SPMANUAL:
3523 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
3524 			break;
3525 		case BIOC_SPDISABLE:
3526 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
3527 			break;
3528 		case BIOC_SPAUTO:
3529 			if (bp->bp_autoival != 0) {
3530 				if (bp->bp_autoival == -1)
3531 					/* continuously */
3532 					exec_freq = 0xffffffffU;
3533 				else if (bp->bp_autoival > 0)
3534 					exec_freq = bp->bp_autoival;
3535 				else
3536 					return (EINVAL);
3537 				prop.exec_freq = exec_freq;
3538 			}
3539 			if (bp->bp_autonext != 0) {
3540 				if (bp->bp_autonext < 0)
3541 					return (EINVAL);
3542 				else
3543 					prop.next_exec = time + bp->bp_autonext;
3544 			}
3545 			prop.op_mode = MFI_PR_OPMODE_AUTO;
3546 			break;
3547 		}
3548 
3549 		opc = MR_DCMD_PR_SET_PROPERTIES;
3550 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_OUT))
3551 			return (EINVAL);
3552 
3553 		break;
3554 
3555 	case BIOC_GPSTATUS:
3556 		opc = MR_DCMD_PR_GET_PROPERTIES;
3557 		if (mfii_mgmt(sc, opc, NULL, &prop, sizeof(prop), SCSI_DATA_IN))
3558 			return (EINVAL);
3559 
3560 		opc = MR_DCMD_PR_GET_STATUS;
3561 		if (mfii_mgmt(sc, opc, NULL, &status, sizeof(status), SCSI_DATA_IN))
3562 			return (EINVAL);
3563 
3564 		/* Get device's time. */
3565 		opc = MR_DCMD_TIME_SECS_GET;
3566 		if (mfii_mgmt(sc, opc, NULL, &time, sizeof(time), SCSI_DATA_IN))
3567 			return (EINVAL);
3568 
3569 		switch (prop.op_mode) {
3570 		case MFI_PR_OPMODE_AUTO:
3571 			bp->bp_mode = BIOC_SPMAUTO;
3572 			bp->bp_autoival = prop.exec_freq;
3573 			bp->bp_autonext = prop.next_exec;
3574 			bp->bp_autonow = time;
3575 			break;
3576 		case MFI_PR_OPMODE_MANUAL:
3577 			bp->bp_mode = BIOC_SPMMANUAL;
3578 			break;
3579 		case MFI_PR_OPMODE_DISABLED:
3580 			bp->bp_mode = BIOC_SPMDISABLED;
3581 			break;
3582 		default:
3583 			printf("%s: unknown patrol mode %d\n",
3584 			    DEVNAME(sc), prop.op_mode);
3585 			break;
3586 		}
3587 
3588 		switch (status.state) {
3589 		case MFI_PR_STATE_STOPPED:
3590 			bp->bp_status = BIOC_SPSSTOPPED;
3591 			break;
3592 		case MFI_PR_STATE_READY:
3593 			bp->bp_status = BIOC_SPSREADY;
3594 			break;
3595 		case MFI_PR_STATE_ACTIVE:
3596 			bp->bp_status = BIOC_SPSACTIVE;
3597 			break;
3598 		case MFI_PR_STATE_ABORTED:
3599 			bp->bp_status = BIOC_SPSABORTED;
3600 			break;
3601 		default:
3602 			printf("%s: unknown patrol state %d\n",
3603 			    DEVNAME(sc), status.state);
3604 			break;
3605 		}
3606 
3607 		break;
3608 
3609 	default:
3610 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3611 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3612 		return (EINVAL);
3613 	}
3614 
3615 	return (rv);
3616 }
3617 
3618 int
3619 mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3620 {
3621 	struct mfi_conf		*cfg;
3622 	struct mfi_hotspare	*hs;
3623 	struct mfi_pd_details	*pd;
3624 	struct bioc_disk	*sdhs;
3625 	struct bioc_vol		*vdhs;
3626 	struct scsi_inquiry_data *inqbuf;
3627 	char			vend[8+16+4+1], *vendp;
3628 	int			i, rv = EINVAL;
3629 	uint32_t		size;
3630 	union mfi_mbox		mbox;
3631 
3632 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3633 
3634 	if (!bio_hs)
3635 		return (EINVAL);
3636 
3637 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
3638 
3639 	/* send single element command to retrieve size for full structure */
3640 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
3641 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, sizeof(*cfg), SCSI_DATA_IN))
3642 		goto freeme;
3643 
3644 	size = cfg->mfc_size;
3645 	free(cfg, M_DEVBUF, sizeof *cfg);
3646 
3647 	/* memory for read config */
3648 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
3649 	if (mfii_mgmt(sc, MR_DCMD_CONF_GET, NULL, cfg, size, SCSI_DATA_IN))
3650 		goto freeme;
3651 
3652 	/* calculate offset to hs structure */
3653 	hs = (struct mfi_hotspare *)(
3654 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
3655 	    cfg->mfc_array_size * cfg->mfc_no_array +
3656 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
3657 
3658 	if (volid < cfg->mfc_no_ld)
3659 		goto freeme; /* not a hotspare */
3660 
3661 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3662 		goto freeme; /* not a hotspare */
3663 
3664 	/* offset into hotspare structure */
3665 	i = volid - cfg->mfc_no_ld;
3666 
3667 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3668 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3669 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3670 
3671 	/* get pd fields */
3672 	memset(&mbox, 0, sizeof(mbox));
3673 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
3674 	if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO, &mbox, pd, sizeof(*pd),
3675 	    SCSI_DATA_IN)) {
3676 		DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3677 		    DEVNAME(sc));
3678 		goto freeme;
3679 	}
3680 
3681 	switch (type) {
3682 	case MFI_MGMT_VD:
3683 		vdhs = bio_hs;
3684 		vdhs->bv_status = BIOC_SVONLINE;
3685 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3686 		vdhs->bv_level = -1; /* hotspare */
3687 		vdhs->bv_nodisk = 1;
3688 		break;
3689 
3690 	case MFI_MGMT_SD:
3691 		sdhs = bio_hs;
3692 		sdhs->bd_status = BIOC_SDHOTSPARE;
3693 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3694 		sdhs->bd_channel = pd->mpd_enc_idx;
3695 		sdhs->bd_target = pd->mpd_enc_slot;
3696 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3697 		vendp = inqbuf->vendor;
3698 		memcpy(vend, vendp, sizeof vend - 1);
3699 		vend[sizeof vend - 1] = '\0';
3700 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3701 		break;
3702 
3703 	default:
3704 		goto freeme;
3705 	}
3706 
3707 	DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3708 	rv = 0;
3709 freeme:
3710 	free(pd, M_DEVBUF, sizeof *pd);
3711 	free(cfg, M_DEVBUF, 0);
3712 
3713 	return (rv);
3714 }
3715 
3716 #ifndef SMALL_KERNEL
3717 
3718 #define MFI_BBU_SENSORS 4
3719 
3720 void
3721 mfii_bbu(struct mfii_softc *sc)
3722 {
3723 	struct mfi_bbu_status bbu;
3724 	u_int32_t status;
3725 	u_int32_t mask;
3726 	u_int32_t soh_bad;
3727 	int i;
3728 
3729 	if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS, NULL, &bbu,
3730 	    sizeof(bbu), SCSI_DATA_IN) != 0) {
3731 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
3732 			sc->sc_bbu[i].value = 0;
3733 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3734 		}
3735 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3736 			sc->sc_bbu_status[i].value = 0;
3737 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3738 		}
3739 		return;
3740 	}
3741 
3742 	switch (bbu.battery_type) {
3743 	case MFI_BBU_TYPE_IBBU:
3744 		mask = MFI_BBU_STATE_BAD_IBBU;
3745 		soh_bad = 0;
3746 		break;
3747 	case MFI_BBU_TYPE_BBU:
3748 		mask = MFI_BBU_STATE_BAD_BBU;
3749 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3750 		break;
3751 
3752 	case MFI_BBU_TYPE_NONE:
3753 	default:
3754 		sc->sc_bbu[0].value = 0;
3755 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
3756 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3757 			sc->sc_bbu[i].value = 0;
3758 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3759 		}
3760 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3761 			sc->sc_bbu_status[i].value = 0;
3762 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3763 		}
3764 		return;
3765 	}
3766 
3767 	status = letoh32(bbu.fw_status);
3768 
3769 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3770 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3771 	    SENSOR_S_OK;
3772 
3773 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
3774 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
3775 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
3776 	for (i = 1; i < MFI_BBU_SENSORS; i++)
3777 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3778 
3779 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3780 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3781 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3782 	}
3783 }
3784 
3785 void
3786 mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3787 {
3788 	struct ksensor *sensor;
3789 	int target;
3790 
3791 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3792 	sensor = &sc->sc_sensors[target];
3793 
3794 	switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3795 	case MFI_LD_OFFLINE:
3796 		sensor->value = SENSOR_DRIVE_FAIL;
3797 		sensor->status = SENSOR_S_CRIT;
3798 		break;
3799 
3800 	case MFI_LD_PART_DEGRADED:
3801 	case MFI_LD_DEGRADED:
3802 		sensor->value = SENSOR_DRIVE_PFAIL;
3803 		sensor->status = SENSOR_S_WARN;
3804 		break;
3805 
3806 	case MFI_LD_ONLINE:
3807 		sensor->value = SENSOR_DRIVE_ONLINE;
3808 		sensor->status = SENSOR_S_OK;
3809 		break;
3810 
3811 	default:
3812 		sensor->value = 0; /* unknown */
3813 		sensor->status = SENSOR_S_UNKNOWN;
3814 		break;
3815 	}
3816 }
3817 
3818 void
3819 mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3820 {
3821 	struct device		*dev;
3822 	struct scsi_link	*link;
3823 	struct ksensor		*sensor;
3824 	int			target;
3825 
3826 	target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3827 	sensor = &sc->sc_sensors[target];
3828 
3829 	link = scsi_get_link(sc->sc_scsibus, target, 0);
3830 	if (link == NULL) {
3831 		strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3832 	} else {
3833 		dev = link->device_softc;
3834 		if (dev != NULL)
3835 			strlcpy(sensor->desc, dev->dv_xname,
3836 			    sizeof(sensor->desc));
3837 	}
3838 	sensor->type = SENSOR_DRIVE;
3839 	mfii_refresh_ld_sensor(sc, ld);
3840 }
3841 
3842 int
3843 mfii_create_sensors(struct mfii_softc *sc)
3844 {
3845 	int			i, target;
3846 
3847 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3848 	    sizeof(sc->sc_sensordev.xname));
3849 
3850 	if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)) {
3851 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3852 		    M_DEVBUF, M_WAITOK | M_ZERO);
3853 
3854 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
3855 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3856 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3857 		    sizeof(sc->sc_bbu[0].desc));
3858 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3859 
3860 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3861 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3862 		sc->sc_bbu[2].type = SENSOR_AMPS;
3863 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3864 		sc->sc_bbu[3].type = SENSOR_TEMP;
3865 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3866 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
3867 			strlcpy(sc->sc_bbu[i].desc, "bbu",
3868 			    sizeof(sc->sc_bbu[i].desc));
3869 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3870 		}
3871 
3872 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3873 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
3874 
3875 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
3876 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3877 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3878 			strlcpy(sc->sc_bbu_status[i].desc,
3879 			    mfi_bbu_indicators[i],
3880 			    sizeof(sc->sc_bbu_status[i].desc));
3881 
3882 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3883 		}
3884 	}
3885 
3886 	sc->sc_sensors = mallocarray(MFI_MAX_LD, sizeof(struct ksensor),
3887 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3888 	if (sc->sc_sensors == NULL)
3889 		return (1);
3890 
3891 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3892 		mfii_init_ld_sensor(sc, i);
3893 		target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3894 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3895 	}
3896 
3897 	if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL)
3898 		goto bad;
3899 
3900 	sensordev_install(&sc->sc_sensordev);
3901 
3902 	return (0);
3903 
3904 bad:
3905 	free(sc->sc_sensors, M_DEVBUF,
3906 	    MFI_MAX_LD * sizeof(struct ksensor));
3907 
3908 	return (1);
3909 }
3910 
3911 void
3912 mfii_refresh_sensors(void *arg)
3913 {
3914 	struct mfii_softc	*sc = arg;
3915 	int			i;
3916 
3917 	rw_enter_write(&sc->sc_lock);
3918 	if (sc->sc_bbu != NULL)
3919 		mfii_bbu(sc);
3920 
3921 	mfii_bio_getitall(sc);
3922 	rw_exit_write(&sc->sc_lock);
3923 
3924 	for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3925 		mfii_refresh_ld_sensor(sc, i);
3926 }
3927 #endif /* SMALL_KERNEL */
3928 #endif /* NBIO > 0 */
3929