xref: /openbsd/sys/dev/pci/mpii.c (revision d415bd75)
1 /*	$OpenBSD: mpii.c,v 1.146 2023/07/06 10:17:43 visa Exp $	*/
2 /*
3  * Copyright (c) 2010, 2012 Mike Belopuhov
4  * Copyright (c) 2009 James Giannoules
5  * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6  * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include "bio.h"
22 
23 #include <sys/param.h>
24 #include <sys/systm.h>
25 #include <sys/device.h>
26 #include <sys/ioctl.h>
27 #include <sys/malloc.h>
28 #include <sys/kernel.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/tree.h>
33 #include <sys/task.h>
34 
35 #include <machine/bus.h>
36 
37 #include <dev/pci/pcireg.h>
38 #include <dev/pci/pcivar.h>
39 #include <dev/pci/pcidevs.h>
40 
41 #include <scsi/scsi_all.h>
42 #include <scsi/scsiconf.h>
43 
44 #include <dev/biovar.h>
45 
46 #include <dev/pci/mpiireg.h>
47 
48 /* #define MPII_DEBUG */
49 #ifdef MPII_DEBUG
50 #define DPRINTF(x...)		do { if (mpii_debug) printf(x); } while(0)
51 #define DNPRINTF(n,x...)	do { if (mpii_debug & (n)) printf(x); } while(0)
52 #define	MPII_D_CMD		(0x0001)
53 #define	MPII_D_INTR		(0x0002)
54 #define	MPII_D_MISC		(0x0004)
55 #define	MPII_D_DMA		(0x0008)
56 #define	MPII_D_IOCTL		(0x0010)
57 #define	MPII_D_RW		(0x0020)
58 #define	MPII_D_MEM		(0x0040)
59 #define	MPII_D_CCB		(0x0080)
60 #define	MPII_D_PPR		(0x0100)
61 #define	MPII_D_RAID		(0x0200)
62 #define	MPII_D_EVT		(0x0400)
63 #define MPII_D_CFG		(0x0800)
64 #define MPII_D_MAP		(0x1000)
65 
66 u_int32_t  mpii_debug = 0
67 		| MPII_D_CMD
68 		| MPII_D_INTR
69 		| MPII_D_MISC
70 		| MPII_D_DMA
71 		| MPII_D_IOCTL
72 		| MPII_D_RW
73 		| MPII_D_MEM
74 		| MPII_D_CCB
75 		| MPII_D_PPR
76 		| MPII_D_RAID
77 		| MPII_D_EVT
78 		| MPII_D_CFG
79 		| MPII_D_MAP
80 	;
81 #else
82 #define DPRINTF(x...)
83 #define DNPRINTF(n,x...)
84 #endif
85 
86 #define MPII_REQUEST_SIZE		(512)
87 #define MPII_REQUEST_CREDIT		(128)
88 
89 struct mpii_dmamem {
90 	bus_dmamap_t		mdm_map;
91 	bus_dma_segment_t	mdm_seg;
92 	size_t			mdm_size;
93 	caddr_t			mdm_kva;
94 };
95 #define MPII_DMA_MAP(_mdm) ((_mdm)->mdm_map)
96 #define MPII_DMA_DVA(_mdm) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
97 #define MPII_DMA_KVA(_mdm) ((void *)(_mdm)->mdm_kva)
98 
99 struct mpii_softc;
100 
101 struct mpii_rcb {
102 	SIMPLEQ_ENTRY(mpii_rcb)	rcb_link;
103 	void			*rcb_reply;
104 	u_int32_t		rcb_reply_dva;
105 };
106 
107 SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb);
108 
109 struct mpii_device {
110 	int			flags;
111 #define MPII_DF_ATTACH		(0x0001)
112 #define MPII_DF_DETACH		(0x0002)
113 #define MPII_DF_HIDDEN		(0x0004)
114 #define MPII_DF_UNUSED		(0x0008)
115 #define MPII_DF_VOLUME		(0x0010)
116 #define MPII_DF_VOLUME_DISK	(0x0020)
117 #define MPII_DF_HOT_SPARE	(0x0040)
118 	short			slot;
119 	short			percent;
120 	u_int16_t		dev_handle;
121 	u_int16_t		enclosure;
122 	u_int16_t		expander;
123 	u_int8_t		phy_num;
124 	u_int8_t		physical_port;
125 };
126 
127 struct mpii_ccb {
128 	struct mpii_softc	*ccb_sc;
129 
130 	void *			ccb_cookie;
131 	bus_dmamap_t		ccb_dmamap;
132 
133 	bus_addr_t		ccb_offset;
134 	void			*ccb_cmd;
135 	bus_addr_t		ccb_cmd_dva;
136 	u_int16_t		ccb_dev_handle;
137 	u_int16_t		ccb_smid;
138 
139 	volatile enum {
140 		MPII_CCB_FREE,
141 		MPII_CCB_READY,
142 		MPII_CCB_QUEUED,
143 		MPII_CCB_TIMEOUT
144 	}			ccb_state;
145 
146 	void			(*ccb_done)(struct mpii_ccb *);
147 	struct mpii_rcb		*ccb_rcb;
148 
149 	SIMPLEQ_ENTRY(mpii_ccb)	ccb_link;
150 };
151 
152 SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb);
153 
154 struct mpii_softc {
155 	struct device		sc_dev;
156 
157 	pci_chipset_tag_t	sc_pc;
158 	pcitag_t		sc_tag;
159 
160 	void			*sc_ih;
161 
162 	int			sc_flags;
163 #define MPII_F_RAID		(1<<1)
164 #define MPII_F_SAS3		(1<<2)
165 
166 	struct scsibus_softc	*sc_scsibus;
167 	unsigned int		sc_pending;
168 
169 	struct mpii_device	**sc_devs;
170 
171 	bus_space_tag_t		sc_iot;
172 	bus_space_handle_t	sc_ioh;
173 	bus_size_t		sc_ios;
174 	bus_dma_tag_t		sc_dmat;
175 
176 	struct mutex		sc_req_mtx;
177 	struct mutex		sc_rep_mtx;
178 
179 	ushort			sc_reply_size;
180 	ushort			sc_request_size;
181 
182 	ushort			sc_max_cmds;
183 	ushort			sc_num_reply_frames;
184 	u_int			sc_reply_free_qdepth;
185 	u_int			sc_reply_post_qdepth;
186 
187 	ushort			sc_chain_sge;
188 	ushort			sc_max_sgl;
189 	int			sc_max_chain;
190 
191 	u_int8_t		sc_ioc_event_replay;
192 
193 	u_int8_t		sc_porttype;
194 	u_int8_t		sc_max_volumes;
195 	u_int16_t		sc_max_devices;
196 	u_int16_t		sc_vd_count;
197 	u_int16_t		sc_vd_id_low;
198 	u_int16_t		sc_pd_id_start;
199 	int			sc_ioc_number;
200 	u_int8_t		sc_vf_id;
201 
202 	struct mpii_ccb		*sc_ccbs;
203 	struct mpii_ccb_list	sc_ccb_free;
204 	struct mutex		sc_ccb_free_mtx;
205 
206 	struct mutex		sc_ccb_mtx;
207 				/*
208 				 * this protects the ccb state and list entry
209 				 * between mpii_scsi_cmd and scsidone.
210 				 */
211 
212 	struct mpii_ccb_list	sc_ccb_tmos;
213 	struct scsi_iohandler	sc_ccb_tmo_handler;
214 
215 	struct scsi_iopool	sc_iopool;
216 
217 	struct mpii_dmamem	*sc_requests;
218 
219 	struct mpii_dmamem	*sc_replies;
220 	struct mpii_rcb		*sc_rcbs;
221 
222 	struct mpii_dmamem	*sc_reply_postq;
223 	struct mpii_reply_descr	*sc_reply_postq_kva;
224 	u_int			sc_reply_post_host_index;
225 
226 	struct mpii_dmamem	*sc_reply_freeq;
227 	u_int			sc_reply_free_host_index;
228 
229 	struct mpii_rcb_list	sc_evt_sas_queue;
230 	struct mutex		sc_evt_sas_mtx;
231 	struct task		sc_evt_sas_task;
232 
233 	struct mpii_rcb_list	sc_evt_ack_queue;
234 	struct mutex		sc_evt_ack_mtx;
235 	struct scsi_iohandler	sc_evt_ack_handler;
236 
237 	/* scsi ioctl from sd device */
238 	int			(*sc_ioctl)(struct device *, u_long, caddr_t);
239 
240 	int			sc_nsensors;
241 	struct ksensor		*sc_sensors;
242 	struct ksensordev	sc_sensordev;
243 };
244 
245 int	mpii_match(struct device *, void *, void *);
246 void	mpii_attach(struct device *, struct device *, void *);
247 int	mpii_detach(struct device *, int);
248 
249 int	mpii_intr(void *);
250 
251 const struct cfattach mpii_ca = {
252 	sizeof(struct mpii_softc),
253 	mpii_match,
254 	mpii_attach,
255 	mpii_detach
256 };
257 
258 struct cfdriver mpii_cd = {
259 	NULL,
260 	"mpii",
261 	DV_DULL
262 };
263 
264 void		mpii_scsi_cmd(struct scsi_xfer *);
265 void		mpii_scsi_cmd_done(struct mpii_ccb *);
266 int		mpii_scsi_probe(struct scsi_link *);
267 int		mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
268 
269 const struct scsi_adapter mpii_switch = {
270 	mpii_scsi_cmd, NULL, mpii_scsi_probe, NULL, mpii_scsi_ioctl
271 };
272 
273 struct mpii_dmamem *
274 		mpii_dmamem_alloc(struct mpii_softc *, size_t);
275 void		mpii_dmamem_free(struct mpii_softc *,
276 		    struct mpii_dmamem *);
277 int		mpii_alloc_ccbs(struct mpii_softc *);
278 void *		mpii_get_ccb(void *);
279 void		mpii_put_ccb(void *, void *);
280 int		mpii_alloc_replies(struct mpii_softc *);
281 int		mpii_alloc_queues(struct mpii_softc *);
282 void		mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
283 void		mpii_push_replies(struct mpii_softc *);
284 
285 void		mpii_scsi_cmd_tmo(void *);
286 void		mpii_scsi_cmd_tmo_handler(void *, void *);
287 void		mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
288 
289 int		mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
290 int		mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
291 struct mpii_device *
292 		mpii_find_dev(struct mpii_softc *, u_int16_t);
293 
294 void		mpii_start(struct mpii_softc *, struct mpii_ccb *);
295 int		mpii_poll(struct mpii_softc *, struct mpii_ccb *);
296 void		mpii_poll_done(struct mpii_ccb *);
297 struct mpii_rcb *
298 		mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
299 
300 void		mpii_wait(struct mpii_softc *, struct mpii_ccb *);
301 void		mpii_wait_done(struct mpii_ccb *);
302 
303 void		mpii_init_queues(struct mpii_softc *);
304 
305 int		mpii_load_xs(struct mpii_ccb *);
306 int		mpii_load_xs_sas3(struct mpii_ccb *);
307 
308 u_int32_t	mpii_read(struct mpii_softc *, bus_size_t);
309 void		mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
310 int		mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
311 		    u_int32_t);
312 int		mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
313 		    u_int32_t);
314 
315 int		mpii_init(struct mpii_softc *);
316 int		mpii_reset_soft(struct mpii_softc *);
317 int		mpii_reset_hard(struct mpii_softc *);
318 
319 int		mpii_handshake_send(struct mpii_softc *, void *, size_t);
320 int		mpii_handshake_recv_dword(struct mpii_softc *,
321 		    u_int32_t *);
322 int		mpii_handshake_recv(struct mpii_softc *, void *, size_t);
323 
324 void		mpii_empty_done(struct mpii_ccb *);
325 
326 int		mpii_iocinit(struct mpii_softc *);
327 int		mpii_iocfacts(struct mpii_softc *);
328 int		mpii_portfacts(struct mpii_softc *);
329 int		mpii_portenable(struct mpii_softc *);
330 int		mpii_cfg_coalescing(struct mpii_softc *);
331 int		mpii_board_info(struct mpii_softc *);
332 int		mpii_target_map(struct mpii_softc *);
333 
334 int		mpii_eventnotify(struct mpii_softc *);
335 void		mpii_eventnotify_done(struct mpii_ccb *);
336 void		mpii_eventack(void *, void *);
337 void		mpii_eventack_done(struct mpii_ccb *);
338 void		mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
339 void		mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
340 void		mpii_event_sas(void *);
341 void		mpii_event_raid(struct mpii_softc *,
342 		    struct mpii_msg_event_reply *);
343 void		mpii_event_discovery(struct mpii_softc *,
344 		    struct mpii_msg_event_reply *);
345 
346 void		mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
347 
348 int		mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
349 		    u_int8_t, u_int32_t, int, void *);
350 int		mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
351 		    void *, int, void *, size_t);
352 
353 int		mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
354 
355 #if NBIO > 0
356 int		mpii_ioctl(struct device *, u_long, caddr_t);
357 int		mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
358 int		mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
359 int		mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
360 int		mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
361 		    int, int *);
362 int		mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
363 		    u_int8_t);
364 struct mpii_device *
365 		mpii_find_vol(struct mpii_softc *, int);
366 #ifndef SMALL_KERNEL
367  int		mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
368 int		mpii_create_sensors(struct mpii_softc *);
369 void		mpii_refresh_sensors(void *);
370 #endif /* SMALL_KERNEL */
371 #endif /* NBIO > 0 */
372 
373 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
374 
375 #define dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
376 
377 #define mpii_read_db(s)		mpii_read((s), MPII_DOORBELL)
378 #define mpii_write_db(s, v)	mpii_write((s), MPII_DOORBELL, (v))
379 #define mpii_read_intr(s)	mpii_read((s), MPII_INTR_STATUS)
380 #define mpii_write_intr(s, v)	mpii_write((s), MPII_INTR_STATUS, (v))
381 #define mpii_reply_waiting(s)	((mpii_read_intr((s)) & MPII_INTR_STATUS_REPLY)\
382 				    == MPII_INTR_STATUS_REPLY)
383 
384 #define mpii_write_reply_free(s, v) \
385     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
386     MPII_REPLY_FREE_HOST_INDEX, (v))
387 #define mpii_write_reply_post(s, v) \
388     bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
389     MPII_REPLY_POST_HOST_INDEX, (v))
390 
391 #define mpii_wait_db_int(s)	mpii_wait_ne((s), MPII_INTR_STATUS, \
392 				    MPII_INTR_STATUS_IOC2SYSDB, 0)
393 #define mpii_wait_db_ack(s)	mpii_wait_eq((s), MPII_INTR_STATUS, \
394 				    MPII_INTR_STATUS_SYS2IOCDB, 0)
395 
396 static inline void
397 mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
398 {
399 	htolem32(&sge->sg_addr_lo, dva);
400 	htolem32(&sge->sg_addr_hi, dva >> 32);
401 }
402 
403 #define MPII_PG_EXTENDED	(1<<0)
404 #define MPII_PG_POLL		(1<<1)
405 #define MPII_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
406 
407 static const struct pci_matchid mpii_devices[] = {
408 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2004 },
409 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2008 },
410 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SSS6200 },
411 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_3 },
412 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_4 },
413 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2108_5 },
414 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_1 },
415 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2116_2 },
416 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_1 },
417 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_2 },
418 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_3 },
419 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_4 },
420 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_5 },
421 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2208_6 },
422 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_1 },
423 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_2 },
424 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS2308_3 },
425 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3004 },
426 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3008 },
427 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_1 },
428 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_2 },
429 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_3 },
430 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3108_4 },
431 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3408 },
432 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3416 },
433 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508 },
434 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3508_1 },
435 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516 },
436 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_SAS3516_1 }
437 };
438 
439 int
440 mpii_match(struct device *parent, void *match, void *aux)
441 {
442 	return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)));
443 }
444 
445 void
446 mpii_attach(struct device *parent, struct device *self, void *aux)
447 {
448 	struct mpii_softc		*sc = (struct mpii_softc *)self;
449 	struct pci_attach_args		*pa = aux;
450 	pcireg_t			memtype;
451 	int				r;
452 	pci_intr_handle_t		ih;
453 	struct scsibus_attach_args	saa;
454 	struct mpii_ccb			*ccb;
455 
456 	sc->sc_pc = pa->pa_pc;
457 	sc->sc_tag = pa->pa_tag;
458 	sc->sc_dmat = pa->pa_dmat;
459 
460 	mtx_init(&sc->sc_req_mtx, IPL_BIO);
461 	mtx_init(&sc->sc_rep_mtx, IPL_BIO);
462 
463 	/* find the appropriate memory base */
464 	for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) {
465 		memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
466 		if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM)
467 			break;
468 	}
469 	if (r >= PCI_MAPREG_END) {
470 		printf(": unable to locate system interface registers\n");
471 		return;
472 	}
473 
474 	if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
475 	    NULL, &sc->sc_ios, 0xFF) != 0) {
476 		printf(": unable to map system interface registers\n");
477 		return;
478 	}
479 
480 	/* disable the expansion rom */
481 	pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG,
482 	    pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG) &
483 	    ~PCI_ROM_ENABLE);
484 
485 	/* disable interrupts */
486 	mpii_write(sc, MPII_INTR_MASK,
487 	    MPII_INTR_MASK_RESET | MPII_INTR_MASK_REPLY |
488 	    MPII_INTR_MASK_DOORBELL);
489 
490 	/* hook up the interrupt */
491 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
492 		printf(": unable to map interrupt\n");
493 		goto unmap;
494 	}
495 	printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
496 
497 	if (mpii_iocfacts(sc) != 0) {
498 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
499 		goto unmap;
500 	}
501 
502 	if (mpii_init(sc) != 0) {
503 		printf("%s: unable to initialize ioc\n", DEVNAME(sc));
504 		goto unmap;
505 	}
506 
507 	if (mpii_alloc_ccbs(sc) != 0) {
508 		/* error already printed */
509 		goto unmap;
510 	}
511 
512 	if (mpii_alloc_replies(sc) != 0) {
513 		printf("%s: unable to allocated reply space\n", DEVNAME(sc));
514 		goto free_ccbs;
515 	}
516 
517 	if (mpii_alloc_queues(sc) != 0) {
518 		printf("%s: unable to allocate reply queues\n", DEVNAME(sc));
519 		goto free_replies;
520 	}
521 
522 	if (mpii_iocinit(sc) != 0) {
523 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
524 		goto free_queues;
525 	}
526 
527 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
528 	    MPII_DOORBELL_STATE_OPER) != 0) {
529 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
530 			mpii_read_db(sc) & MPII_DOORBELL_STATE);
531 		printf("%s: operational state timeout\n", DEVNAME(sc));
532 		goto free_queues;
533 	}
534 
535 	mpii_push_replies(sc);
536 	mpii_init_queues(sc);
537 
538 	if (mpii_board_info(sc) != 0) {
539 		printf("%s: unable to get manufacturing page 0\n",
540 		    DEVNAME(sc));
541 		goto free_queues;
542 	}
543 
544 	if (mpii_portfacts(sc) != 0) {
545 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
546 		goto free_queues;
547 	}
548 
549 	if (mpii_target_map(sc) != 0) {
550 		printf("%s: unable to setup target mappings\n", DEVNAME(sc));
551 		goto free_queues;
552 	}
553 
554 	if (mpii_cfg_coalescing(sc) != 0) {
555 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
556 		goto free_queues;
557 	}
558 
559 	/* XXX bail on unsupported porttype? */
560 	if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) ||
561 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) ||
562 	    (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE)) {
563 		if (mpii_eventnotify(sc) != 0) {
564 			printf("%s: unable to enable events\n", DEVNAME(sc));
565 			goto free_queues;
566 		}
567 	}
568 
569 	sc->sc_devs = mallocarray(sc->sc_max_devices,
570 	    sizeof(struct mpii_device *), M_DEVBUF, M_NOWAIT | M_ZERO);
571 	if (sc->sc_devs == NULL) {
572 		printf("%s: unable to allocate memory for mpii_device\n",
573 		    DEVNAME(sc));
574 		goto free_queues;
575 	}
576 
577 	if (mpii_portenable(sc) != 0) {
578 		printf("%s: unable to enable port\n", DEVNAME(sc));
579 		goto free_devs;
580 	}
581 
582 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
583 	    mpii_intr, sc, sc->sc_dev.dv_xname);
584 	if (sc->sc_ih == NULL)
585 		goto free_devs;
586 
587 	/* force autoconf to wait for the first sas discovery to complete */
588 	sc->sc_pending = 1;
589 	config_pending_incr();
590 
591 	saa.saa_adapter = &mpii_switch;
592 	saa.saa_adapter_softc = sc;
593 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
594 	saa.saa_adapter_buswidth = sc->sc_max_devices;
595 	saa.saa_luns = 1;
596 	saa.saa_openings = sc->sc_max_cmds - 1;
597 	saa.saa_pool = &sc->sc_iopool;
598 	saa.saa_quirks = saa.saa_flags = 0;
599 	saa.saa_wwpn = saa.saa_wwnn = 0;
600 
601 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
602 	    &saa, scsiprint);
603 
604 	/* enable interrupts */
605 	mpii_write(sc, MPII_INTR_MASK, MPII_INTR_MASK_DOORBELL
606 	    | MPII_INTR_MASK_RESET);
607 
608 #if NBIO > 0
609 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
610 		if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
611 			panic("%s: controller registration failed",
612 			    DEVNAME(sc));
613 		else
614 			sc->sc_ioctl = mpii_ioctl;
615 
616 #ifndef SMALL_KERNEL
617 		if (mpii_create_sensors(sc) != 0)
618 			printf("%s: unable to create sensors\n", DEVNAME(sc));
619 #endif
620 	}
621 #endif
622 
623 	return;
624 
625 free_devs:
626 	free(sc->sc_devs, M_DEVBUF, 0);
627 	sc->sc_devs = NULL;
628 
629 free_queues:
630 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),
631 	    0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD);
632 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
633 
634 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
635 	    0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD);
636 	mpii_dmamem_free(sc, sc->sc_reply_postq);
637 
638 free_replies:
639 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
640 		0, PAGE_SIZE, BUS_DMASYNC_POSTREAD);
641 	mpii_dmamem_free(sc, sc->sc_replies);
642 
643 free_ccbs:
644 	while ((ccb = mpii_get_ccb(sc)) != NULL)
645 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
646 	mpii_dmamem_free(sc, sc->sc_requests);
647 	free(sc->sc_ccbs, M_DEVBUF, 0);
648 
649 unmap:
650 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
651 	sc->sc_ios = 0;
652 }
653 
654 int
655 mpii_detach(struct device *self, int flags)
656 {
657 	struct mpii_softc		*sc = (struct mpii_softc *)self;
658 
659 	if (sc->sc_ih != NULL) {
660 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
661 		sc->sc_ih = NULL;
662 	}
663 	if (sc->sc_ios != 0) {
664 		bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
665 		sc->sc_ios = 0;
666 	}
667 
668 	return (0);
669 }
670 
671 int
672 mpii_intr(void *arg)
673 {
674 	struct mpii_rcb_list		evts = SIMPLEQ_HEAD_INITIALIZER(evts);
675 	struct mpii_ccb_list		ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
676 	struct mpii_softc		*sc = arg;
677 	struct mpii_reply_descr		*postq = sc->sc_reply_postq_kva, *rdp;
678 	struct mpii_ccb			*ccb;
679 	struct mpii_rcb			*rcb;
680 	int				smid;
681 	u_int				idx;
682 	int				rv = 0;
683 
684 	mtx_enter(&sc->sc_rep_mtx);
685 	bus_dmamap_sync(sc->sc_dmat,
686 	    MPII_DMA_MAP(sc->sc_reply_postq),
687 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
688 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
689 
690 	idx = sc->sc_reply_post_host_index;
691 	for (;;) {
692 		rdp = &postq[idx];
693 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
694 		    MPII_REPLY_DESCR_UNUSED)
695 			break;
696 		if (rdp->data == 0xffffffff) {
697 			/*
698 			 * ioc is still writing to the reply post queue
699 			 * race condition - bail!
700 			 */
701 			break;
702 		}
703 
704 		smid = lemtoh16(&rdp->smid);
705 		rcb = mpii_reply(sc, rdp);
706 
707 		if (smid) {
708 			ccb = &sc->sc_ccbs[smid - 1];
709 			ccb->ccb_state = MPII_CCB_READY;
710 			ccb->ccb_rcb = rcb;
711 			SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
712 		} else
713 			SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link);
714 
715 		if (++idx >= sc->sc_reply_post_qdepth)
716 			idx = 0;
717 
718 		rv = 1;
719 	}
720 
721 	bus_dmamap_sync(sc->sc_dmat,
722 	    MPII_DMA_MAP(sc->sc_reply_postq),
723 	    0, sc->sc_reply_post_qdepth * sizeof(*rdp),
724 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
725 
726 	if (rv)
727 		mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx);
728 
729 	mtx_leave(&sc->sc_rep_mtx);
730 
731 	if (rv == 0)
732 		return (0);
733 
734 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
735 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
736 		ccb->ccb_done(ccb);
737 	}
738 	while ((rcb = SIMPLEQ_FIRST(&evts)) != NULL) {
739 		SIMPLEQ_REMOVE_HEAD(&evts, rcb_link);
740 		mpii_event_process(sc, rcb);
741 	}
742 
743 	return (1);
744 }
745 
746 int
747 mpii_load_xs_sas3(struct mpii_ccb *ccb)
748 {
749 	struct mpii_softc	*sc = ccb->ccb_sc;
750 	struct scsi_xfer	*xs = ccb->ccb_cookie;
751 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
752 	struct mpii_ieee_sge	*csge, *nsge, *sge;
753 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
754 	int			i, error;
755 
756 	/* Request frame structure is described in the mpii_iocfacts */
757 	nsge = (struct mpii_ieee_sge *)(io + 1);
758 
759 	/* zero length transfer still requires an SGE */
760 	if (xs->datalen == 0) {
761 		nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST;
762 		return (0);
763 	}
764 
765 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
766 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
767 	if (error) {
768 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
769 		return (1);
770 	}
771 
772 	csge = NULL;
773 	if (dmap->dm_nsegs > sc->sc_chain_sge) {
774 		csge = nsge + sc->sc_chain_sge;
775 
776 		/* offset to the chain sge from the beginning */
777 		io->chain_offset = ((caddr_t)csge - (caddr_t)io) / sizeof(*sge);
778 	}
779 
780 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
781 		if (nsge == csge) {
782 			nsge++;
783 
784 			/* address of the next sge */
785 			htolem64(&csge->sg_addr, ccb->ccb_cmd_dva +
786 			    ((caddr_t)nsge - (caddr_t)io));
787 			htolem32(&csge->sg_len, (dmap->dm_nsegs - i) *
788 			    sizeof(*sge));
789 			csge->sg_next_chain_offset = 0;
790 			csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT |
791 			    MPII_IEEE_SGE_ADDR_SYSTEM;
792 
793 			if ((dmap->dm_nsegs - i) > sc->sc_max_chain) {
794 				csge->sg_next_chain_offset = sc->sc_max_chain;
795 				csge += sc->sc_max_chain;
796 			}
797 		}
798 
799 		sge = nsge;
800 		sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM;
801 		sge->sg_next_chain_offset = 0;
802 		htolem32(&sge->sg_len, dmap->dm_segs[i].ds_len);
803 		htolem64(&sge->sg_addr, dmap->dm_segs[i].ds_addr);
804 	}
805 
806 	/* terminate list */
807 	sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST;
808 
809 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
810 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
811 	    BUS_DMASYNC_PREWRITE);
812 
813 	return (0);
814 }
815 
816 int
817 mpii_load_xs(struct mpii_ccb *ccb)
818 {
819 	struct mpii_softc	*sc = ccb->ccb_sc;
820 	struct scsi_xfer	*xs = ccb->ccb_cookie;
821 	struct mpii_msg_scsi_io	*io = ccb->ccb_cmd;
822 	struct mpii_sge		*csge, *nsge, *sge;
823 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
824 	u_int32_t		flags;
825 	u_int16_t		len;
826 	int			i, error;
827 
828 	/* Request frame structure is described in the mpii_iocfacts */
829 	nsge = (struct mpii_sge *)(io + 1);
830 	csge = nsge + sc->sc_chain_sge;
831 
832 	/* zero length transfer still requires an SGE */
833 	if (xs->datalen == 0) {
834 		nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |
835 		    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
836 		return (0);
837 	}
838 
839 	error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,
840 	    (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
841 	if (error) {
842 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
843 		return (1);
844 	}
845 
846 	/* safe default starting flags */
847 	flags = MPII_SGE_FL_TYPE_SIMPLE | MPII_SGE_FL_SIZE_64;
848 	if (xs->flags & SCSI_DATA_OUT)
849 		flags |= MPII_SGE_FL_DIR_OUT;
850 
851 	for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
852 		if (nsge == csge) {
853 			nsge++;
854 			/* offset to the chain sge from the beginning */
855 			io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
856 			/* length of the sgl segment we're pointing to */
857 			len = (dmap->dm_nsegs - i) * sizeof(*sge);
858 			htolem32(&csge->sg_hdr, MPII_SGE_FL_TYPE_CHAIN |
859 			    MPII_SGE_FL_SIZE_64 | len);
860 			/* address of the next sge */
861 			mpii_dvatosge(csge, ccb->ccb_cmd_dva +
862 			    ((caddr_t)nsge - (caddr_t)io));
863 		}
864 
865 		sge = nsge;
866 		htolem32(&sge->sg_hdr, flags | dmap->dm_segs[i].ds_len);
867 		mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
868 	}
869 
870 	/* terminate list */
871 	sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |
872 	    MPII_SGE_FL_EOL);
873 
874 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
875 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
876 	    BUS_DMASYNC_PREWRITE);
877 
878 	return (0);
879 }
880 
881 int
882 mpii_scsi_probe(struct scsi_link *link)
883 {
884 	struct mpii_softc *sc = link->bus->sb_adapter_softc;
885 	struct mpii_cfg_sas_dev_pg0 pg0;
886 	struct mpii_ecfg_hdr ehdr;
887 	struct mpii_device *dev;
888 	uint32_t address;
889 	int flags;
890 
891 	if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL) &&
892 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL) &&
893 	    (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE))
894 		return (ENXIO);
895 
896 	dev = sc->sc_devs[link->target];
897 	if (dev == NULL)
898 		return (1);
899 
900 	flags = dev->flags;
901 	if (ISSET(flags, MPII_DF_HIDDEN) || ISSET(flags, MPII_DF_UNUSED))
902 		return (1);
903 
904 	if (ISSET(flags, MPII_DF_VOLUME)) {
905 		struct mpii_cfg_hdr hdr;
906 		struct mpii_cfg_raid_vol_pg1 vpg;
907 		size_t pagelen;
908 
909 		address = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
910 
911 		if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
912 		    1, address, MPII_PG_POLL, &hdr) != 0)
913 			return (EINVAL);
914 
915 		memset(&vpg, 0, sizeof(vpg));
916 		/* avoid stack trash on future page growth */
917 		pagelen = min(sizeof(vpg), hdr.page_length * 4);
918 
919 		if (mpii_req_cfg_page(sc, address, MPII_PG_POLL, &hdr, 1,
920 		    &vpg, pagelen) != 0)
921 			return (EINVAL);
922 
923 		link->port_wwn = letoh64(vpg.wwid);
924 		/*
925 		 * WWIDs generated by LSI firmware are not IEEE NAA compliant
926 		 * and historical practise in OBP on sparc64 is to set the top
927 		 * nibble to 3 to indicate that this is a RAID volume.
928 		 */
929 		link->port_wwn &= 0x0fffffffffffffff;
930 		link->port_wwn |= 0x3000000000000000;
931 
932 		return (0);
933 	}
934 
935 	memset(&ehdr, 0, sizeof(ehdr));
936 	ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
937 	ehdr.page_number = 0;
938 	ehdr.page_version = 0;
939 	ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE;
940 	ehdr.ext_page_length = htole16(sizeof(pg0) / 4); /* dwords */
941 
942 	address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE | (uint32_t)dev->dev_handle;
943 	if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED,
944 	    &ehdr, 1, &pg0, sizeof(pg0)) != 0) {
945 		printf("%s: unable to fetch SAS device page 0 for target %u\n",
946 		    DEVNAME(sc), link->target);
947 
948 		return (0); /* the handle should still work */
949 	}
950 
951 	link->port_wwn = letoh64(pg0.sas_addr);
952 	link->node_wwn = letoh64(pg0.device_name);
953 
954 	if (ISSET(lemtoh32(&pg0.device_info),
955 	    MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
956 		link->flags |= SDEV_ATAPI;
957 	}
958 
959 	return (0);
960 }
961 
962 u_int32_t
963 mpii_read(struct mpii_softc *sc, bus_size_t r)
964 {
965 	u_int32_t			rv;
966 
967 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
968 	    BUS_SPACE_BARRIER_READ);
969 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
970 
971 	DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
972 
973 	return (rv);
974 }
975 
976 void
977 mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
978 {
979 	DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
980 
981 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
982 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
983 	    BUS_SPACE_BARRIER_WRITE);
984 }
985 
986 
987 int
988 mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
989     u_int32_t target)
990 {
991 	int			i;
992 
993 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
994 	    mask, target);
995 
996 	for (i = 0; i < 15000; i++) {
997 		if ((mpii_read(sc, r) & mask) == target)
998 			return (0);
999 		delay(1000);
1000 	}
1001 
1002 	return (1);
1003 }
1004 
1005 int
1006 mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1007     u_int32_t target)
1008 {
1009 	int			i;
1010 
1011 	DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
1012 	    mask, target);
1013 
1014 	for (i = 0; i < 15000; i++) {
1015 		if ((mpii_read(sc, r) & mask) != target)
1016 			return (0);
1017 		delay(1000);
1018 	}
1019 
1020 	return (1);
1021 }
1022 
1023 int
1024 mpii_init(struct mpii_softc *sc)
1025 {
1026 	u_int32_t		db;
1027 	int			i;
1028 
1029 	/* spin until the ioc leaves the reset state */
1030 	if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1031 	    MPII_DOORBELL_STATE_RESET) != 0) {
1032 		DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1033 		    "reset state\n", DEVNAME(sc));
1034 		return (1);
1035 	}
1036 
1037 	/* check current ownership */
1038 	db = mpii_read_db(sc);
1039 	if ((db & MPII_DOORBELL_WHOINIT) == MPII_DOORBELL_WHOINIT_PCIPEER) {
1040 		DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1041 		    DEVNAME(sc));
1042 		return (0);
1043 	}
1044 
1045 	for (i = 0; i < 5; i++) {
1046 		switch (db & MPII_DOORBELL_STATE) {
1047 		case MPII_DOORBELL_STATE_READY:
1048 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1049 			    DEVNAME(sc));
1050 			return (0);
1051 
1052 		case MPII_DOORBELL_STATE_OPER:
1053 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1054 			    DEVNAME(sc));
1055 			if (sc->sc_ioc_event_replay)
1056 				mpii_reset_soft(sc);
1057 			else
1058 				mpii_reset_hard(sc);
1059 			break;
1060 
1061 		case MPII_DOORBELL_STATE_FAULT:
1062 			DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1063 			    "reset hard\n" , DEVNAME(sc));
1064 			mpii_reset_hard(sc);
1065 			break;
1066 
1067 		case MPII_DOORBELL_STATE_RESET:
1068 			DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1069 			    "out of reset\n", DEVNAME(sc));
1070 			if (mpii_wait_ne(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1071 			    MPII_DOORBELL_STATE_RESET) != 0)
1072 				return (1);
1073 			break;
1074 		}
1075 		db = mpii_read_db(sc);
1076 	}
1077 
1078 	return (1);
1079 }
1080 
1081 int
1082 mpii_reset_soft(struct mpii_softc *sc)
1083 {
1084 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1085 
1086 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE) {
1087 		return (1);
1088 	}
1089 
1090 	mpii_write_db(sc,
1091 	    MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1092 
1093 	/* XXX LSI waits 15 sec */
1094 	if (mpii_wait_db_ack(sc) != 0)
1095 		return (1);
1096 
1097 	/* XXX LSI waits 15 sec */
1098 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_STATE,
1099 	    MPII_DOORBELL_STATE_READY) != 0)
1100 		return (1);
1101 
1102 	/* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1103 
1104 	return (0);
1105 }
1106 
1107 int
1108 mpii_reset_hard(struct mpii_softc *sc)
1109 {
1110 	u_int16_t		i;
1111 
1112 	DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1113 
1114 	mpii_write_intr(sc, 0);
1115 
1116 	/* enable diagnostic register */
1117 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_FLUSH);
1118 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_1);
1119 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_2);
1120 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_3);
1121 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_4);
1122 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_5);
1123 	mpii_write(sc, MPII_WRITESEQ, MPII_WRITESEQ_6);
1124 
1125 	delay(100);
1126 
1127 	if ((mpii_read(sc, MPII_HOSTDIAG) & MPII_HOSTDIAG_DWRE) == 0) {
1128 		DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1129 		    "diagnostic read/write\n", DEVNAME(sc));
1130 		return(1);
1131 	}
1132 
1133 	/* reset ioc */
1134 	mpii_write(sc, MPII_HOSTDIAG, MPII_HOSTDIAG_RESET_ADAPTER);
1135 
1136 	/* 240 milliseconds */
1137 	delay(240000);
1138 
1139 
1140 	/* XXX this whole function should be more robust */
1141 
1142 	/* XXX  read the host diagnostic reg until reset adapter bit clears ? */
1143 	for (i = 0; i < 30000; i++) {
1144 		if ((mpii_read(sc, MPII_HOSTDIAG) &
1145 		    MPII_HOSTDIAG_RESET_ADAPTER) == 0)
1146 			break;
1147 		delay(10000);
1148 	}
1149 
1150 	/* disable diagnostic register */
1151 	mpii_write(sc, MPII_WRITESEQ, 0xff);
1152 
1153 	/* XXX what else? */
1154 
1155 	DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1156 
1157 	return(0);
1158 }
1159 
1160 int
1161 mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1162 {
1163 	u_int32_t		*query = buf;
1164 	int			i;
1165 
1166 	/* make sure the doorbell is not in use. */
1167 	if (mpii_read_db(sc) & MPII_DOORBELL_INUSE)
1168 		return (1);
1169 
1170 	/* clear pending doorbell interrupts */
1171 	if (mpii_read_intr(sc) & MPII_INTR_STATUS_IOC2SYSDB)
1172 		mpii_write_intr(sc, 0);
1173 
1174 	/*
1175 	 * first write the doorbell with the handshake function and the
1176 	 * dword count.
1177 	 */
1178 	mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |
1179 	    MPII_DOORBELL_DWORDS(dwords));
1180 
1181 	/*
1182 	 * the doorbell used bit will be set because a doorbell function has
1183 	 * started. wait for the interrupt and then ack it.
1184 	 */
1185 	if (mpii_wait_db_int(sc) != 0)
1186 		return (1);
1187 	mpii_write_intr(sc, 0);
1188 
1189 	/* poll for the acknowledgement. */
1190 	if (mpii_wait_db_ack(sc) != 0)
1191 		return (1);
1192 
1193 	/* write the query through the doorbell. */
1194 	for (i = 0; i < dwords; i++) {
1195 		mpii_write_db(sc, htole32(query[i]));
1196 		if (mpii_wait_db_ack(sc) != 0)
1197 			return (1);
1198 	}
1199 
1200 	return (0);
1201 }
1202 
1203 int
1204 mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1205 {
1206 	u_int16_t		*words = (u_int16_t *)dword;
1207 	int			i;
1208 
1209 	for (i = 0; i < 2; i++) {
1210 		if (mpii_wait_db_int(sc) != 0)
1211 			return (1);
1212 		words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK);
1213 		mpii_write_intr(sc, 0);
1214 	}
1215 
1216 	return (0);
1217 }
1218 
1219 int
1220 mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1221 {
1222 	struct mpii_msg_reply	*reply = buf;
1223 	u_int32_t		*dbuf = buf, dummy;
1224 	int			i;
1225 
1226 	/* get the first dword so we can read the length out of the header. */
1227 	if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1228 		return (1);
1229 
1230 	DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1231 	    DEVNAME(sc), dwords, reply->msg_length);
1232 
1233 	/*
1234 	 * the total length, in dwords, is in the message length field of the
1235 	 * reply header.
1236 	 */
1237 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1238 		if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1239 			return (1);
1240 	}
1241 
1242 	/* if there's extra stuff to come off the ioc, discard it */
1243 	while (i++ < reply->msg_length) {
1244 		if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1245 			return (1);
1246 		DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1247 		    "0x%08x\n", DEVNAME(sc), dummy);
1248 	}
1249 
1250 	/* wait for the doorbell used bit to be reset and clear the intr */
1251 	if (mpii_wait_db_int(sc) != 0)
1252 		return (1);
1253 
1254 	if (mpii_wait_eq(sc, MPII_DOORBELL, MPII_DOORBELL_INUSE, 0) != 0)
1255 		return (1);
1256 
1257 	mpii_write_intr(sc, 0);
1258 
1259 	return (0);
1260 }
1261 
1262 void
1263 mpii_empty_done(struct mpii_ccb *ccb)
1264 {
1265 	/* nothing to do */
1266 }
1267 
1268 int
1269 mpii_iocfacts(struct mpii_softc *sc)
1270 {
1271 	struct mpii_msg_iocfacts_request	ifq;
1272 	struct mpii_msg_iocfacts_reply		ifp;
1273 	int					irs;
1274 	int					sge_size;
1275 	u_int					qdepth;
1276 
1277 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1278 
1279 	memset(&ifq, 0, sizeof(ifq));
1280 	memset(&ifp, 0, sizeof(ifp));
1281 
1282 	ifq.function = MPII_FUNCTION_IOC_FACTS;
1283 
1284 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1285 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1286 		    DEVNAME(sc));
1287 		return (1);
1288 	}
1289 
1290 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1291 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1292 		    DEVNAME(sc));
1293 		return (1);
1294 	}
1295 
1296 	sc->sc_ioc_number = ifp.ioc_number;
1297 	sc->sc_vf_id = ifp.vf_id;
1298 
1299 	sc->sc_max_volumes = ifp.max_volumes;
1300 	sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets);
1301 
1302 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1303 	    MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
1304 		SET(sc->sc_flags, MPII_F_RAID);
1305 	if (ISSET(lemtoh32(&ifp.ioc_capabilities),
1306 	    MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY))
1307 		sc->sc_ioc_event_replay = 1;
1308 
1309 	sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),
1310 	    MPII_REQUEST_CREDIT);
1311 
1312 	/* SAS3 and 3.5 controllers have different sgl layouts */
1313 	if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1314 	    || (ifp.msg_version_min == 6)))
1315 		SET(sc->sc_flags, MPII_F_SAS3);
1316 
1317 	/*
1318 	 * The host driver must ensure that there is at least one
1319 	 * unused entry in the Reply Free Queue. One way to ensure
1320 	 * that this requirement is met is to never allocate a number
1321 	 * of reply frames that is a multiple of 16.
1322 	 */
1323 	sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1324 	if (!(sc->sc_num_reply_frames % 16))
1325 		sc->sc_num_reply_frames--;
1326 
1327 	/* must be multiple of 16 */
1328 	sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1329 	    sc->sc_num_reply_frames;
1330 	sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1331 
1332 	qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth);
1333 	if (sc->sc_reply_post_qdepth > qdepth) {
1334 		sc->sc_reply_post_qdepth = qdepth;
1335 		if (sc->sc_reply_post_qdepth < 16) {
1336 			printf("%s: RDPQ is too shallow\n", DEVNAME(sc));
1337 			return (1);
1338 		}
1339 		sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1340 		sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1341 	}
1342 
1343 	sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1344 	    16 - (sc->sc_num_reply_frames % 16);
1345 
1346 	/*
1347 	 * Our request frame for an I/O operation looks like this:
1348 	 *
1349 	 * +-------------------+ -.
1350 	 * | mpii_msg_scsi_io  |  |
1351 	 * +-------------------|  |
1352 	 * | mpii_sge          |  |
1353 	 * + - - - - - - - - - +  |
1354 	 * | ...               |  > ioc_request_frame_size
1355 	 * + - - - - - - - - - +  |
1356 	 * | mpii_sge (tail)   |  |
1357 	 * + - - - - - - - - - +  |
1358 	 * | mpii_sge (csge)   |  | --.
1359 	 * + - - - - - - - - - + -'   | chain sge points to the next sge
1360 	 * | mpii_sge          |<-----'
1361 	 * + - - - - - - - - - +
1362 	 * | ...               |
1363 	 * + - - - - - - - - - +
1364 	 * | mpii_sge (tail)   |
1365 	 * +-------------------+
1366 	 * |                   |
1367 	 * ~~~~~~~~~~~~~~~~~~~~~
1368 	 * |                   |
1369 	 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1370 	 * | scsi_sense_data   |
1371 	 * +-------------------+
1372 	 *
1373 	 * If the controller gives us a maximum chain size, there can be
1374 	 * multiple chain sges, each of which points to the sge following it.
1375 	 * Otherwise, there will only be one chain sge.
1376 	 */
1377 
1378 	/* both sizes are in 32-bit words */
1379 	sc->sc_reply_size = ifp.reply_frame_size * 4;
1380 	irs = lemtoh16(&ifp.ioc_request_frame_size) * 4;
1381 	sc->sc_request_size = MPII_REQUEST_SIZE;
1382 	/* make sure we have enough space for scsi sense data */
1383 	if (irs > sc->sc_request_size) {
1384 		sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1385 		sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1386 	}
1387 
1388 	if (ISSET(sc->sc_flags, MPII_F_SAS3)) {
1389 		sge_size = sizeof(struct mpii_ieee_sge);
1390 	} else {
1391 		sge_size = sizeof(struct mpii_sge);
1392 	}
1393 
1394 	/* offset to the chain sge */
1395 	sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1396 	    sge_size - 1;
1397 
1398 	sc->sc_max_chain = lemtoh16(&ifp.ioc_max_chain_seg_size);
1399 
1400 	/*
1401 	 * A number of simple scatter-gather elements we can fit into the
1402 	 * request buffer after the I/O command minus the chain element(s).
1403 	 */
1404 	sc->sc_max_sgl = (sc->sc_request_size -
1405  	    sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1406 	    sge_size - 1;
1407 	if (sc->sc_max_chain > 0) {
1408 		sc->sc_max_sgl -= (sc->sc_max_sgl - sc->sc_chain_sge) /
1409 		    sc->sc_max_chain;
1410 	}
1411 
1412 	return (0);
1413 }
1414 
1415 int
1416 mpii_iocinit(struct mpii_softc *sc)
1417 {
1418 	struct mpii_msg_iocinit_request		iiq;
1419 	struct mpii_msg_iocinit_reply		iip;
1420 
1421 	DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1422 
1423 	memset(&iiq, 0, sizeof(iiq));
1424 	memset(&iip, 0, sizeof(iip));
1425 
1426 	iiq.function = MPII_FUNCTION_IOC_INIT;
1427 	iiq.whoinit = MPII_WHOINIT_HOST_DRIVER;
1428 
1429 	/* XXX JPG do something about vf_id */
1430 	iiq.vf_id = 0;
1431 
1432 	iiq.msg_version_maj = 0x02;
1433 	iiq.msg_version_min = 0x00;
1434 
1435 	/* XXX JPG ensure compliance with some level and hard-code? */
1436 	iiq.hdr_version_unit = 0x00;
1437 	iiq.hdr_version_dev = 0x00;
1438 
1439 	htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4);
1440 
1441 	htolem16(&iiq.reply_descriptor_post_queue_depth,
1442 	    sc->sc_reply_post_qdepth);
1443 
1444 	htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth);
1445 
1446 	htolem32(&iiq.sense_buffer_address_high,
1447 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1448 
1449 	htolem32(&iiq.system_reply_address_high,
1450 	    MPII_DMA_DVA(sc->sc_replies) >> 32);
1451 
1452 	htolem32(&iiq.system_request_frame_base_address_lo,
1453 	    MPII_DMA_DVA(sc->sc_requests));
1454 	htolem32(&iiq.system_request_frame_base_address_hi,
1455 	    MPII_DMA_DVA(sc->sc_requests) >> 32);
1456 
1457 	htolem32(&iiq.reply_descriptor_post_queue_address_lo,
1458 	    MPII_DMA_DVA(sc->sc_reply_postq));
1459 	htolem32(&iiq.reply_descriptor_post_queue_address_hi,
1460 	    MPII_DMA_DVA(sc->sc_reply_postq) >> 32);
1461 
1462 	htolem32(&iiq.reply_free_queue_address_lo,
1463 	    MPII_DMA_DVA(sc->sc_reply_freeq));
1464 	htolem32(&iiq.reply_free_queue_address_hi,
1465 	    MPII_DMA_DVA(sc->sc_reply_freeq) >> 32);
1466 
1467 	if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
1468 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1469 		    DEVNAME(sc));
1470 		return (1);
1471 	}
1472 
1473 	if (mpii_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
1474 		DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1475 		    DEVNAME(sc));
1476 		return (1);
1477 	}
1478 
1479 	DNPRINTF(MPII_D_MISC, "%s:  function: 0x%02x msg_length: %d "
1480 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1481 	    iip.msg_length, iip.whoinit);
1482 	DNPRINTF(MPII_D_MISC, "%s:  msg_flags: 0x%02x\n", DEVNAME(sc),
1483 	    iip.msg_flags);
1484 	DNPRINTF(MPII_D_MISC, "%s:  vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1485 	    iip.vf_id, iip.vp_id);
1486 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
1487 	    lemtoh16(&iip.ioc_status));
1488 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1489 	    lemtoh32(&iip.ioc_loginfo));
1490 
1491 	if (lemtoh16(&iip.ioc_status) != MPII_IOCSTATUS_SUCCESS ||
1492 	    lemtoh32(&iip.ioc_loginfo))
1493 		return (1);
1494 
1495 	return (0);
1496 }
1497 
1498 void
1499 mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1500 {
1501 	u_int32_t		*rfp;
1502 	u_int			idx;
1503 
1504 	if (rcb == NULL)
1505 		return;
1506 
1507 	idx = sc->sc_reply_free_host_index;
1508 
1509 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
1510 	htolem32(&rfp[idx], rcb->rcb_reply_dva);
1511 
1512 	if (++idx >= sc->sc_reply_free_qdepth)
1513 		idx = 0;
1514 
1515 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx);
1516 }
1517 
1518 int
1519 mpii_portfacts(struct mpii_softc *sc)
1520 {
1521 	struct mpii_msg_portfacts_request	*pfq;
1522 	struct mpii_msg_portfacts_reply		*pfp;
1523 	struct mpii_ccb				*ccb;
1524 	int					rv = 1;
1525 
1526 	DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1527 
1528 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1529 	if (ccb == NULL) {
1530 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1531 		    DEVNAME(sc));
1532 		return (rv);
1533 	}
1534 
1535 	ccb->ccb_done = mpii_empty_done;
1536 	pfq = ccb->ccb_cmd;
1537 
1538 	memset(pfq, 0, sizeof(*pfq));
1539 
1540 	pfq->function = MPII_FUNCTION_PORT_FACTS;
1541 	pfq->chain_offset = 0;
1542 	pfq->msg_flags = 0;
1543 	pfq->port_number = 0;
1544 	pfq->vp_id = 0;
1545 	pfq->vf_id = 0;
1546 
1547 	if (mpii_poll(sc, ccb) != 0) {
1548 		DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1549 		    DEVNAME(sc));
1550 		goto err;
1551 	}
1552 
1553 	if (ccb->ccb_rcb == NULL) {
1554 		DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1555 		    DEVNAME(sc));
1556 		goto err;
1557 	}
1558 
1559 	pfp = ccb->ccb_rcb->rcb_reply;
1560 	sc->sc_porttype = pfp->port_type;
1561 
1562 	mpii_push_reply(sc, ccb->ccb_rcb);
1563 	rv = 0;
1564 err:
1565 	scsi_io_put(&sc->sc_iopool, ccb);
1566 
1567 	return (rv);
1568 }
1569 
1570 void
1571 mpii_eventack(void *cookie, void *io)
1572 {
1573 	struct mpii_softc			*sc = cookie;
1574 	struct mpii_ccb				*ccb = io;
1575 	struct mpii_rcb				*rcb, *next;
1576 	struct mpii_msg_event_reply		*enp;
1577 	struct mpii_msg_eventack_request	*eaq;
1578 
1579 	mtx_enter(&sc->sc_evt_ack_mtx);
1580 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
1581 	if (rcb != NULL) {
1582 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1583 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
1584 	}
1585 	mtx_leave(&sc->sc_evt_ack_mtx);
1586 
1587 	if (rcb == NULL) {
1588 		scsi_io_put(&sc->sc_iopool, ccb);
1589 		return;
1590 	}
1591 
1592 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1593 
1594 	ccb->ccb_done = mpii_eventack_done;
1595 	eaq = ccb->ccb_cmd;
1596 
1597 	eaq->function = MPII_FUNCTION_EVENT_ACK;
1598 
1599 	eaq->event = enp->event;
1600 	eaq->event_context = enp->event_context;
1601 
1602 	mpii_push_reply(sc, rcb);
1603 
1604 	mpii_start(sc, ccb);
1605 
1606 	if (next != NULL)
1607 		scsi_ioh_add(&sc->sc_evt_ack_handler);
1608 }
1609 
1610 void
1611 mpii_eventack_done(struct mpii_ccb *ccb)
1612 {
1613 	struct mpii_softc			*sc = ccb->ccb_sc;
1614 
1615 	DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1616 
1617 	mpii_push_reply(sc, ccb->ccb_rcb);
1618 	scsi_io_put(&sc->sc_iopool, ccb);
1619 }
1620 
1621 int
1622 mpii_portenable(struct mpii_softc *sc)
1623 {
1624 	struct mpii_msg_portenable_request	*peq;
1625 	struct mpii_ccb				*ccb;
1626 
1627 	DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1628 
1629 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1630 	if (ccb == NULL) {
1631 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1632 		    DEVNAME(sc));
1633 		return (1);
1634 	}
1635 
1636 	ccb->ccb_done = mpii_empty_done;
1637 	peq = ccb->ccb_cmd;
1638 
1639 	peq->function = MPII_FUNCTION_PORT_ENABLE;
1640 	peq->vf_id = sc->sc_vf_id;
1641 
1642 	if (mpii_poll(sc, ccb) != 0) {
1643 		DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1644 		    DEVNAME(sc));
1645 		return (1);
1646 	}
1647 
1648 	if (ccb->ccb_rcb == NULL) {
1649 		DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1650 		    DEVNAME(sc));
1651 		return (1);
1652 	}
1653 
1654 	mpii_push_reply(sc, ccb->ccb_rcb);
1655 	scsi_io_put(&sc->sc_iopool, ccb);
1656 
1657 	return (0);
1658 }
1659 
1660 int
1661 mpii_cfg_coalescing(struct mpii_softc *sc)
1662 {
1663 	struct mpii_cfg_hdr			hdr;
1664 	struct mpii_cfg_ioc_pg1			ipg;
1665 
1666 	hdr.page_version = 0;
1667 	hdr.page_length = sizeof(ipg) / 4;
1668 	hdr.page_number = 1;
1669 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
1670 	memset(&ipg, 0, sizeof(ipg));
1671 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
1672 	    sizeof(ipg)) != 0) {
1673 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1674 		    "page 1\n", DEVNAME(sc));
1675 		return (1);
1676 	}
1677 
1678 	if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING))
1679 		return (0);
1680 
1681 	/* Disable coalescing */
1682 	CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING));
1683 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 0, &ipg,
1684 	    sizeof(ipg)) != 0) {
1685 		DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1686 		    DEVNAME(sc));
1687 		return (1);
1688 	}
1689 
1690 	return (0);
1691 }
1692 
1693 #define MPII_EVENT_MASKALL(enq)		do {			\
1694 		enq->event_masks[0] = 0xffffffff;		\
1695 		enq->event_masks[1] = 0xffffffff;		\
1696 		enq->event_masks[2] = 0xffffffff;		\
1697 		enq->event_masks[3] = 0xffffffff;		\
1698 	} while (0)
1699 
1700 #define MPII_EVENT_UNMASK(enq, evt)	do {			\
1701 		enq->event_masks[evt / 32] &=			\
1702 		    htole32(~(1 << (evt % 32)));		\
1703 	} while (0)
1704 
1705 int
1706 mpii_eventnotify(struct mpii_softc *sc)
1707 {
1708 	struct mpii_msg_event_request		*enq;
1709 	struct mpii_ccb				*ccb;
1710 
1711 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1712 	if (ccb == NULL) {
1713 		DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1714 		    DEVNAME(sc));
1715 		return (1);
1716 	}
1717 
1718 	SIMPLEQ_INIT(&sc->sc_evt_sas_queue);
1719 	mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO);
1720 	task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1721 
1722 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
1723 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
1724 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1725 	    mpii_eventack, sc);
1726 
1727 	ccb->ccb_done = mpii_eventnotify_done;
1728 	enq = ccb->ccb_cmd;
1729 
1730 	enq->function = MPII_FUNCTION_EVENT_NOTIFICATION;
1731 
1732 	/*
1733 	 * Enable reporting of the following events:
1734 	 *
1735 	 * MPII_EVENT_SAS_DISCOVERY
1736 	 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1737 	 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1738 	 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1739 	 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1740 	 * MPII_EVENT_IR_VOLUME
1741 	 * MPII_EVENT_IR_PHYSICAL_DISK
1742 	 * MPII_EVENT_IR_OPERATION_STATUS
1743 	 */
1744 
1745 	MPII_EVENT_MASKALL(enq);
1746 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY);
1747 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
1748 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE);
1749 	MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
1750 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST);
1751 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME);
1752 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK);
1753 	MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS);
1754 
1755 	mpii_start(sc, ccb);
1756 
1757 	return (0);
1758 }
1759 
1760 void
1761 mpii_eventnotify_done(struct mpii_ccb *ccb)
1762 {
1763 	struct mpii_softc			*sc = ccb->ccb_sc;
1764 	struct mpii_rcb				*rcb = ccb->ccb_rcb;
1765 
1766 	DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1767 
1768 	scsi_io_put(&sc->sc_iopool, ccb);
1769 	mpii_event_process(sc, rcb);
1770 }
1771 
1772 void
1773 mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1774 {
1775 	struct mpii_evt_ir_cfg_change_list	*ccl;
1776 	struct mpii_evt_ir_cfg_element		*ce;
1777 	struct mpii_device			*dev;
1778 	u_int16_t				type;
1779 	int					i;
1780 
1781 	ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1782 	if (ccl->num_elements == 0)
1783 		return;
1784 
1785 	if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)) {
1786 		/* bail on foreign configurations */
1787 		return;
1788 	}
1789 
1790 	ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1791 
1792 	for (i = 0; i < ccl->num_elements; i++, ce++) {
1793 		type = (lemtoh16(&ce->element_flags) &
1794 		    MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK);
1795 
1796 		switch (type) {
1797 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME:
1798 			switch (ce->reason_code) {
1799 			case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED:
1800 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED:
1801 				if (mpii_find_dev(sc,
1802 				    lemtoh16(&ce->vol_dev_handle))) {
1803 					printf("%s: device %#x is already "
1804 					    "configured\n", DEVNAME(sc),
1805 					    lemtoh16(&ce->vol_dev_handle));
1806 					break;
1807 				}
1808 				dev = malloc(sizeof(*dev), M_DEVBUF,
1809 				    M_NOWAIT | M_ZERO);
1810 				if (!dev) {
1811 					printf("%s: failed to allocate a "
1812 					    "device structure\n", DEVNAME(sc));
1813 					break;
1814 				}
1815 				SET(dev->flags, MPII_DF_VOLUME);
1816 				dev->slot = sc->sc_vd_id_low;
1817 				dev->dev_handle = lemtoh16(&ce->vol_dev_handle);
1818 				if (mpii_insert_dev(sc, dev)) {
1819 					free(dev, M_DEVBUF, sizeof *dev);
1820 					break;
1821 				}
1822 				sc->sc_vd_count++;
1823 				break;
1824 			case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED:
1825 			case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED:
1826 				if (!(dev = mpii_find_dev(sc,
1827 				    lemtoh16(&ce->vol_dev_handle))))
1828 					break;
1829 				mpii_remove_dev(sc, dev);
1830 				sc->sc_vd_count--;
1831 				break;
1832 			}
1833 			break;
1834 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK:
1835 			if (ce->reason_code ==
1836 			    MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED ||
1837 			    ce->reason_code ==
1838 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1839 				/* there should be an underlying sas drive */
1840 				if (!(dev = mpii_find_dev(sc,
1841 				    lemtoh16(&ce->phys_disk_dev_handle))))
1842 					break;
1843 				/* promoted from a hot spare? */
1844 				CLR(dev->flags, MPII_DF_HOT_SPARE);
1845 				SET(dev->flags, MPII_DF_VOLUME_DISK |
1846 				    MPII_DF_HIDDEN);
1847 			}
1848 			break;
1849 		case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE:
1850 			if (ce->reason_code ==
1851 			    MPII_EVT_IR_CFG_ELEMENT_RC_HIDE) {
1852 				/* there should be an underlying sas drive */
1853 				if (!(dev = mpii_find_dev(sc,
1854 				    lemtoh16(&ce->phys_disk_dev_handle))))
1855 					break;
1856 				SET(dev->flags, MPII_DF_HOT_SPARE |
1857 				    MPII_DF_HIDDEN);
1858 			}
1859 			break;
1860 		}
1861 	}
1862 }
1863 
1864 void
1865 mpii_event_sas(void *xsc)
1866 {
1867 	struct mpii_softc *sc = xsc;
1868 	struct mpii_rcb *rcb, *next;
1869 	struct mpii_msg_event_reply *enp;
1870 	struct mpii_evt_sas_tcl		*tcl;
1871 	struct mpii_evt_phy_entry	*pe;
1872 	struct mpii_device		*dev;
1873 	int				i;
1874 	u_int16_t			handle;
1875 
1876 	mtx_enter(&sc->sc_evt_sas_mtx);
1877 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue);
1878 	if (rcb != NULL) {
1879 		next = SIMPLEQ_NEXT(rcb, rcb_link);
1880 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link);
1881 	}
1882 	mtx_leave(&sc->sc_evt_sas_mtx);
1883 
1884 	if (rcb == NULL)
1885 		return;
1886 	if (next != NULL)
1887 		task_add(systq, &sc->sc_evt_sas_task);
1888 
1889 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1890 	switch (lemtoh16(&enp->event)) {
1891 	case MPII_EVENT_SAS_DISCOVERY:
1892 		mpii_event_discovery(sc, enp);
1893 		goto done;
1894 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1895 		/* handle below */
1896 		break;
1897 	default:
1898 		panic("%s: unexpected event %#x in sas event queue",
1899 		    DEVNAME(sc), lemtoh16(&enp->event));
1900 		/* NOTREACHED */
1901 	}
1902 
1903 	tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1904 	pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1905 
1906 	for (i = 0; i < tcl->num_entries; i++, pe++) {
1907 		switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK) {
1908 		case MPII_EVENT_SAS_TOPO_PS_RC_ADDED:
1909 			handle = lemtoh16(&pe->dev_handle);
1910 			if (mpii_find_dev(sc, handle)) {
1911 				printf("%s: device %#x is already "
1912 				    "configured\n", DEVNAME(sc), handle);
1913 				break;
1914 			}
1915 
1916 			dev = malloc(sizeof(*dev), M_DEVBUF, M_WAITOK | M_ZERO);
1917 			dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1918 			dev->dev_handle = handle;
1919 			dev->phy_num = tcl->start_phy_num + i;
1920 			if (tcl->enclosure_handle)
1921 				dev->physical_port = tcl->physical_port;
1922 			dev->enclosure = lemtoh16(&tcl->enclosure_handle);
1923 			dev->expander = lemtoh16(&tcl->expander_handle);
1924 
1925 			if (mpii_insert_dev(sc, dev)) {
1926 				free(dev, M_DEVBUF, sizeof *dev);
1927 				break;
1928 			}
1929 
1930 			if (sc->sc_scsibus != NULL)
1931 				scsi_probe_target(sc->sc_scsibus, dev->slot);
1932 			break;
1933 
1934 		case MPII_EVENT_SAS_TOPO_PS_RC_MISSING:
1935 			dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle));
1936 			if (dev == NULL)
1937 				break;
1938 
1939 			mpii_remove_dev(sc, dev);
1940 			mpii_sas_remove_device(sc, dev->dev_handle);
1941 			if (sc->sc_scsibus != NULL &&
1942 			    !ISSET(dev->flags, MPII_DF_HIDDEN)) {
1943 				scsi_activate(sc->sc_scsibus, dev->slot, -1,
1944 				    DVACT_DEACTIVATE);
1945 				scsi_detach_target(sc->sc_scsibus, dev->slot,
1946 				    DETACH_FORCE);
1947 			}
1948 
1949 			free(dev, M_DEVBUF, sizeof *dev);
1950 			break;
1951 		}
1952 	}
1953 
1954 done:
1955 	mpii_event_done(sc, rcb);
1956 }
1957 
1958 void
1959 mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1960 {
1961 	struct mpii_evt_sas_discovery *esd =
1962 	    (struct mpii_evt_sas_discovery *)(enp + 1);
1963 
1964 	if (sc->sc_pending == 0)
1965 		return;
1966 
1967 	switch (esd->reason_code) {
1968 	case MPII_EVENT_SAS_DISC_REASON_CODE_STARTED:
1969 		++sc->sc_pending;
1970 		break;
1971 	case MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED:
1972 		if (--sc->sc_pending == 1) {
1973 			sc->sc_pending = 0;
1974 			config_pending_decr();
1975 		}
1976 		break;
1977 	}
1978 }
1979 
1980 void
1981 mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1982 {
1983 	struct mpii_msg_event_reply		*enp;
1984 
1985 	enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1986 
1987 	DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1988 	    lemtoh16(&enp->event));
1989 
1990 	switch (lemtoh16(&enp->event)) {
1991 	case MPII_EVENT_EVENT_CHANGE:
1992 		/* should be properly ignored */
1993 		break;
1994 	case MPII_EVENT_SAS_DISCOVERY:
1995 	case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1996 		mtx_enter(&sc->sc_evt_sas_mtx);
1997 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link);
1998 		mtx_leave(&sc->sc_evt_sas_mtx);
1999 		task_add(systq, &sc->sc_evt_sas_task);
2000 		return;
2001 	case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE:
2002 		break;
2003 	case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
2004 		break;
2005 	case MPII_EVENT_IR_VOLUME: {
2006 		struct mpii_evt_ir_volume	*evd =
2007 		    (struct mpii_evt_ir_volume *)(enp + 1);
2008 		struct mpii_device		*dev;
2009 #if NBIO > 0
2010 		const char *vol_states[] = {
2011 			BIOC_SVINVALID_S,
2012 			BIOC_SVOFFLINE_S,
2013 			BIOC_SVBUILDING_S,
2014 			BIOC_SVONLINE_S,
2015 			BIOC_SVDEGRADED_S,
2016 			BIOC_SVONLINE_S,
2017 		};
2018 #endif
2019 
2020 		if (cold)
2021 			break;
2022 		KERNEL_LOCK();
2023 		dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle));
2024 		KERNEL_UNLOCK();
2025 		if (dev == NULL)
2026 			break;
2027 #if NBIO > 0
2028 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED)
2029 			printf("%s: volume %d state changed from %s to %s\n",
2030 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low,
2031 			    vol_states[evd->prev_value],
2032 			    vol_states[evd->new_value]);
2033 #endif
2034 		if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED &&
2035 		    ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC) &&
2036 		    !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
2037 			printf("%s: started resync on a volume %d\n",
2038 			    DEVNAME(sc), dev->slot - sc->sc_vd_id_low);
2039 		}
2040 		break;
2041 	case MPII_EVENT_IR_PHYSICAL_DISK:
2042 		break;
2043 	case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST:
2044 		mpii_event_raid(sc, enp);
2045 		break;
2046 	case MPII_EVENT_IR_OPERATION_STATUS: {
2047 		struct mpii_evt_ir_status	*evs =
2048 		    (struct mpii_evt_ir_status *)(enp + 1);
2049 		struct mpii_device		*dev;
2050 
2051 		KERNEL_LOCK();
2052 		dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle));
2053 		KERNEL_UNLOCK();
2054 		if (dev != NULL &&
2055 		    evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC)
2056 			dev->percent = evs->percent;
2057 		break;
2058 		}
2059 	default:
2060 		DNPRINTF(MPII_D_EVT, "%s:  unhandled event 0x%02x\n",
2061 		    DEVNAME(sc), lemtoh16(&enp->event));
2062 	}
2063 
2064 	mpii_event_done(sc, rcb);
2065 }
2066 
2067 void
2068 mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2069 {
2070 	struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2071 
2072 	if (enp->ack_required) {
2073 		mtx_enter(&sc->sc_evt_ack_mtx);
2074 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2075 		mtx_leave(&sc->sc_evt_ack_mtx);
2076 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2077 	} else
2078 		mpii_push_reply(sc, rcb);
2079 }
2080 
2081 void
2082 mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2083 {
2084 	struct mpii_msg_scsi_task_request	*stq;
2085 	struct mpii_msg_sas_oper_request	*soq;
2086 	struct mpii_ccb				*ccb;
2087 
2088 	ccb = scsi_io_get(&sc->sc_iopool, 0);
2089 	if (ccb == NULL)
2090 		return;
2091 
2092 	stq = ccb->ccb_cmd;
2093 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
2094 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
2095 	htolem16(&stq->dev_handle, handle);
2096 
2097 	ccb->ccb_done = mpii_empty_done;
2098 	mpii_wait(sc, ccb);
2099 
2100 	if (ccb->ccb_rcb != NULL)
2101 		mpii_push_reply(sc, ccb->ccb_rcb);
2102 
2103 	/* reuse a ccb */
2104 	ccb->ccb_state = MPII_CCB_READY;
2105 	ccb->ccb_rcb = NULL;
2106 
2107 	soq = ccb->ccb_cmd;
2108 	memset(soq, 0, sizeof(*soq));
2109 	soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL;
2110 	soq->operation = MPII_SAS_OP_REMOVE_DEVICE;
2111 	htolem16(&soq->dev_handle, handle);
2112 
2113 	ccb->ccb_done = mpii_empty_done;
2114 	mpii_wait(sc, ccb);
2115 	if (ccb->ccb_rcb != NULL)
2116 		mpii_push_reply(sc, ccb->ccb_rcb);
2117 
2118 	scsi_io_put(&sc->sc_iopool, ccb);
2119 }
2120 
2121 int
2122 mpii_board_info(struct mpii_softc *sc)
2123 {
2124 	struct mpii_msg_iocfacts_request	ifq;
2125 	struct mpii_msg_iocfacts_reply		ifp;
2126 	struct mpii_cfg_manufacturing_pg0	mpg;
2127 	struct mpii_cfg_hdr			hdr;
2128 
2129 	memset(&ifq, 0, sizeof(ifq));
2130 	memset(&ifp, 0, sizeof(ifp));
2131 
2132 	ifq.function = MPII_FUNCTION_IOC_FACTS;
2133 
2134 	if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
2135 		DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2136 		    DEVNAME(sc));
2137 		return (1);
2138 	}
2139 
2140 	if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
2141 		DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2142 		    DEVNAME(sc));
2143 		return (1);
2144 	}
2145 
2146 	hdr.page_version = 0;
2147 	hdr.page_length = sizeof(mpg) / 4;
2148 	hdr.page_number = 0;
2149 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING;
2150 	memset(&mpg, 0, sizeof(mpg));
2151 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &mpg,
2152 	    sizeof(mpg)) != 0) {
2153 		printf("%s: unable to fetch manufacturing page 0\n",
2154 		    DEVNAME(sc));
2155 		return (EINVAL);
2156 	}
2157 
2158 	printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc),
2159 	    mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2160 	    ifp.fw_version_unit, ifp.fw_version_dev,
2161 	    ISSET(sc->sc_flags, MPII_F_RAID) ? " IR" : "",
2162 	    ifp.msg_version_maj, ifp.msg_version_min);
2163 
2164 	return (0);
2165 }
2166 
2167 int
2168 mpii_target_map(struct mpii_softc *sc)
2169 {
2170 	struct mpii_cfg_hdr			hdr;
2171 	struct mpii_cfg_ioc_pg8			ipg;
2172 	int					flags, pad = 0;
2173 
2174 	hdr.page_version = 0;
2175 	hdr.page_length = sizeof(ipg) / 4;
2176 	hdr.page_number = 8;
2177 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC;
2178 	memset(&ipg, 0, sizeof(ipg));
2179 	if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL, &hdr, 1, &ipg,
2180 	    sizeof(ipg)) != 0) {
2181 		printf("%s: unable to fetch ioc page 8\n",
2182 		    DEVNAME(sc));
2183 		return (EINVAL);
2184 	}
2185 
2186 	if (lemtoh16(&ipg.flags) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0)
2187 		pad = 1;
2188 
2189 	flags = lemtoh16(&ipg.ir_volume_mapping_flags) &
2190 	    MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK;
2191 	if (ISSET(sc->sc_flags, MPII_F_RAID)) {
2192 		if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING) {
2193 			sc->sc_vd_id_low += pad;
2194 			pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2195 		} else
2196 			sc->sc_vd_id_low = sc->sc_max_devices -
2197 			    sc->sc_max_volumes;
2198 	}
2199 
2200 	sc->sc_pd_id_start += pad;
2201 
2202 	return (0);
2203 }
2204 
2205 int
2206 mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2207     u_int32_t address, int flags, void *p)
2208 {
2209 	struct mpii_msg_config_request		*cq;
2210 	struct mpii_msg_config_reply		*cp;
2211 	struct mpii_ccb				*ccb;
2212 	struct mpii_cfg_hdr			*hdr = p;
2213 	struct mpii_ecfg_hdr			*ehdr = p;
2214 	int					etype = 0;
2215 	int					rv = 0;
2216 
2217 	DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2218 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2219 	    address, flags, MPII_PG_FMT);
2220 
2221 	ccb = scsi_io_get(&sc->sc_iopool,
2222 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2223 	if (ccb == NULL) {
2224 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2225 		    DEVNAME(sc));
2226 		return (1);
2227 	}
2228 
2229 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2230 		etype = type;
2231 		type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2232 	}
2233 
2234 	cq = ccb->ccb_cmd;
2235 
2236 	cq->function = MPII_FUNCTION_CONFIG;
2237 
2238 	cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER;
2239 
2240 	cq->config_header.page_number = number;
2241 	cq->config_header.page_type = type;
2242 	cq->ext_page_type = etype;
2243 	htolem32(&cq->page_address, address);
2244 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2245 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL);
2246 
2247 	ccb->ccb_done = mpii_empty_done;
2248 	if (ISSET(flags, MPII_PG_POLL)) {
2249 		if (mpii_poll(sc, ccb) != 0) {
2250 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2251 			    DEVNAME(sc));
2252 			return (1);
2253 		}
2254 	} else
2255 		mpii_wait(sc, ccb);
2256 
2257 	if (ccb->ccb_rcb == NULL) {
2258 		scsi_io_put(&sc->sc_iopool, ccb);
2259 		return (1);
2260 	}
2261 	cp = ccb->ccb_rcb->rcb_reply;
2262 
2263 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x sgl_flags: 0x%02x "
2264 	    "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2265 	    cp->sgl_flags, cp->msg_length, cp->function);
2266 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2267 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2268 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2269 	    cp->msg_flags);
2270 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2271 	    cp->vp_id, cp->vf_id);
2272 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2273 	    lemtoh16(&cp->ioc_status));
2274 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2275 	    lemtoh32(&cp->ioc_loginfo));
2276 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2277 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2278 	    cp->config_header.page_version,
2279 	    cp->config_header.page_length,
2280 	    cp->config_header.page_number,
2281 	    cp->config_header.page_type);
2282 
2283 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2284 		rv = 1;
2285 	else if (ISSET(flags, MPII_PG_EXTENDED)) {
2286 		memset(ehdr, 0, sizeof(*ehdr));
2287 		ehdr->page_version = cp->config_header.page_version;
2288 		ehdr->page_number = cp->config_header.page_number;
2289 		ehdr->page_type = cp->config_header.page_type;
2290 		ehdr->ext_page_length = cp->ext_page_length;
2291 		ehdr->ext_page_type = cp->ext_page_type;
2292 	} else
2293 		*hdr = cp->config_header;
2294 
2295 	mpii_push_reply(sc, ccb->ccb_rcb);
2296 	scsi_io_put(&sc->sc_iopool, ccb);
2297 
2298 	return (rv);
2299 }
2300 
2301 int
2302 mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2303     void *p, int read, void *page, size_t len)
2304 {
2305 	struct mpii_msg_config_request		*cq;
2306 	struct mpii_msg_config_reply		*cp;
2307 	struct mpii_ccb				*ccb;
2308 	struct mpii_cfg_hdr			*hdr = p;
2309 	struct mpii_ecfg_hdr			*ehdr = p;
2310 	caddr_t					kva;
2311 	int					page_length;
2312 	int					rv = 0;
2313 
2314 	DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2315 	    "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2316 
2317 	page_length = ISSET(flags, MPII_PG_EXTENDED) ?
2318 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2319 
2320 	if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2321 		return (1);
2322 
2323 	ccb = scsi_io_get(&sc->sc_iopool,
2324 	    ISSET(flags, MPII_PG_POLL) ? SCSI_NOSLEEP : 0);
2325 	if (ccb == NULL) {
2326 		DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2327 		    DEVNAME(sc));
2328 		return (1);
2329 	}
2330 
2331 	cq = ccb->ccb_cmd;
2332 
2333 	cq->function = MPII_FUNCTION_CONFIG;
2334 
2335 	cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2336 	    MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2337 
2338 	if (ISSET(flags, MPII_PG_EXTENDED)) {
2339 		cq->config_header.page_version = ehdr->page_version;
2340 		cq->config_header.page_number = ehdr->page_number;
2341 		cq->config_header.page_type = ehdr->page_type;
2342 		cq->ext_page_len = ehdr->ext_page_length;
2343 		cq->ext_page_type = ehdr->ext_page_type;
2344 	} else
2345 		cq->config_header = *hdr;
2346 	cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK;
2347 	htolem32(&cq->page_address, address);
2348 	htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |
2349 	    MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |
2350 	    MPII_SGE_FL_SIZE_64 | (page_length * 4) |
2351 	    (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT));
2352 
2353 	/* bounce the page via the request space to avoid more bus_dma games */
2354 	mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2355 	    sizeof(struct mpii_msg_config_request));
2356 
2357 	kva = ccb->ccb_cmd;
2358 	kva += sizeof(struct mpii_msg_config_request);
2359 
2360 	if (!read)
2361 		memcpy(kva, page, len);
2362 
2363 	ccb->ccb_done = mpii_empty_done;
2364 	if (ISSET(flags, MPII_PG_POLL)) {
2365 		if (mpii_poll(sc, ccb) != 0) {
2366 			DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2367 			    DEVNAME(sc));
2368 			return (1);
2369 		}
2370 	} else
2371 		mpii_wait(sc, ccb);
2372 
2373 	if (ccb->ccb_rcb == NULL) {
2374 		scsi_io_put(&sc->sc_iopool, ccb);
2375 		return (1);
2376 	}
2377 	cp = ccb->ccb_rcb->rcb_reply;
2378 
2379 	DNPRINTF(MPII_D_MISC, "%s:  action: 0x%02x msg_length: %d "
2380 	    "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2381 	    cp->function);
2382 	DNPRINTF(MPII_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2383 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2384 	    lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2385 	    cp->msg_flags);
2386 	DNPRINTF(MPII_D_MISC, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2387 	    cp->vp_id, cp->vf_id);
2388 	DNPRINTF(MPII_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2389 	    lemtoh16(&cp->ioc_status));
2390 	DNPRINTF(MPII_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2391 	    lemtoh32(&cp->ioc_loginfo));
2392 	DNPRINTF(MPII_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2393 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2394 	    cp->config_header.page_version,
2395 	    cp->config_header.page_length,
2396 	    cp->config_header.page_number,
2397 	    cp->config_header.page_type);
2398 
2399 	if (lemtoh16(&cp->ioc_status) != MPII_IOCSTATUS_SUCCESS)
2400 		rv = 1;
2401 	else if (read)
2402 		memcpy(page, kva, len);
2403 
2404 	mpii_push_reply(sc, ccb->ccb_rcb);
2405 	scsi_io_put(&sc->sc_iopool, ccb);
2406 
2407 	return (rv);
2408 }
2409 
2410 struct mpii_rcb *
2411 mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2412 {
2413 	struct mpii_rcb		*rcb = NULL;
2414 	u_int32_t		rfid;
2415 
2416 	DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2417 
2418 	if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
2419 	    MPII_REPLY_DESCR_ADDRESS_REPLY) {
2420 		rfid = (lemtoh32(&rdp->frame_addr) -
2421 		    (u_int32_t)MPII_DMA_DVA(sc->sc_replies)) /
2422 		    sc->sc_reply_size;
2423 
2424 		bus_dmamap_sync(sc->sc_dmat,
2425 		    MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,
2426 		    sc->sc_reply_size, BUS_DMASYNC_POSTREAD);
2427 
2428 		rcb = &sc->sc_rcbs[rfid];
2429 	}
2430 
2431 	memset(rdp, 0xff, sizeof(*rdp));
2432 
2433 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),
2434 	    8 * sc->sc_reply_post_host_index, 8,
2435 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2436 
2437 	return (rcb);
2438 }
2439 
2440 struct mpii_dmamem *
2441 mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2442 {
2443 	struct mpii_dmamem	*mdm;
2444 	int			nsegs;
2445 
2446 	mdm = malloc(sizeof(*mdm), M_DEVBUF, M_NOWAIT | M_ZERO);
2447 	if (mdm == NULL)
2448 		return (NULL);
2449 
2450 	mdm->mdm_size = size;
2451 
2452 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
2453 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
2454 		goto mdmfree;
2455 
2456 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
2457 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
2458 		goto destroy;
2459 
2460 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
2461 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
2462 		goto free;
2463 
2464 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
2465 	    NULL, BUS_DMA_NOWAIT) != 0)
2466 		goto unmap;
2467 
2468 	return (mdm);
2469 
2470 unmap:
2471 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
2472 free:
2473 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2474 destroy:
2475 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2476 mdmfree:
2477 	free(mdm, M_DEVBUF, sizeof *mdm);
2478 
2479 	return (NULL);
2480 }
2481 
2482 void
2483 mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2484 {
2485 	DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2486 
2487 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
2488 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
2489 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
2490 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
2491 	free(mdm, M_DEVBUF, sizeof *mdm);
2492 }
2493 
2494 int
2495 mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2496 {
2497 	int		slot;	/* initial hint */
2498 
2499 	if (dev == NULL || dev->slot < 0)
2500 		return (1);
2501 	slot = dev->slot;
2502 
2503 	while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL)
2504 		slot++;
2505 
2506 	if (slot >= sc->sc_max_devices)
2507 		return (1);
2508 
2509 	dev->slot = slot;
2510 	sc->sc_devs[slot] = dev;
2511 
2512 	return (0);
2513 }
2514 
2515 int
2516 mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2517 {
2518 	int			i;
2519 
2520 	if (dev == NULL)
2521 		return (1);
2522 
2523 	for (i = 0; i < sc->sc_max_devices; i++) {
2524 		if (sc->sc_devs[i] == NULL)
2525 			continue;
2526 
2527 		if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2528 			sc->sc_devs[i] = NULL;
2529 			return (0);
2530 		}
2531 	}
2532 
2533 	return (1);
2534 }
2535 
2536 struct mpii_device *
2537 mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2538 {
2539 	int			i;
2540 
2541 	for (i = 0; i < sc->sc_max_devices; i++) {
2542 		if (sc->sc_devs[i] == NULL)
2543 			continue;
2544 
2545 		if (sc->sc_devs[i]->dev_handle == handle)
2546 			return (sc->sc_devs[i]);
2547 	}
2548 
2549 	return (NULL);
2550 }
2551 
2552 int
2553 mpii_alloc_ccbs(struct mpii_softc *sc)
2554 {
2555 	struct mpii_ccb		*ccb;
2556 	u_int8_t		*cmd;
2557 	int			i;
2558 
2559 	SIMPLEQ_INIT(&sc->sc_ccb_free);
2560 	SIMPLEQ_INIT(&sc->sc_ccb_tmos);
2561 	mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO);
2562 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
2563 	scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2564 	    mpii_scsi_cmd_tmo_handler, sc);
2565 
2566 	sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2567 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2568 	if (sc->sc_ccbs == NULL) {
2569 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
2570 		return (1);
2571 	}
2572 
2573 	sc->sc_requests = mpii_dmamem_alloc(sc,
2574 	    sc->sc_request_size * sc->sc_max_cmds);
2575 	if (sc->sc_requests == NULL) {
2576 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
2577 		goto free_ccbs;
2578 	}
2579 	cmd = MPII_DMA_KVA(sc->sc_requests);
2580 
2581 	/*
2582 	 * we have sc->sc_max_cmds system request message
2583 	 * frames, but smid zero cannot be used. so we then
2584 	 * have (sc->sc_max_cmds - 1) number of ccbs
2585 	 */
2586 	for (i = 1; i < sc->sc_max_cmds; i++) {
2587 		ccb = &sc->sc_ccbs[i - 1];
2588 
2589 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,
2590 		    MAXPHYS, 0,
2591 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,
2592 		    &ccb->ccb_dmamap) != 0) {
2593 			printf("%s: unable to create dma map\n", DEVNAME(sc));
2594 			goto free_maps;
2595 		}
2596 
2597 		ccb->ccb_sc = sc;
2598 		htolem16(&ccb->ccb_smid, i);
2599 		ccb->ccb_offset = sc->sc_request_size * i;
2600 
2601 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2602 		ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests) +
2603 		    ccb->ccb_offset;
2604 
2605 		DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2606 		    "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2607 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2608 		    ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2609 		    ccb->ccb_cmd_dva);
2610 
2611 		mpii_put_ccb(sc, ccb);
2612 	}
2613 
2614 	scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2615 
2616 	return (0);
2617 
2618 free_maps:
2619 	while ((ccb = mpii_get_ccb(sc)) != NULL)
2620 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
2621 
2622 	mpii_dmamem_free(sc, sc->sc_requests);
2623 free_ccbs:
2624 	free(sc->sc_ccbs, M_DEVBUF, (sc->sc_max_cmds-1) * sizeof(*ccb));
2625 
2626 	return (1);
2627 }
2628 
2629 void
2630 mpii_put_ccb(void *cookie, void *io)
2631 {
2632 	struct mpii_softc	*sc = cookie;
2633 	struct mpii_ccb		*ccb = io;
2634 
2635 	DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2636 
2637 	ccb->ccb_state = MPII_CCB_FREE;
2638 	ccb->ccb_cookie = NULL;
2639 	ccb->ccb_done = NULL;
2640 	ccb->ccb_rcb = NULL;
2641 	memset(ccb->ccb_cmd, 0, sc->sc_request_size);
2642 
2643 	KERNEL_UNLOCK();
2644 	mtx_enter(&sc->sc_ccb_free_mtx);
2645 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
2646 	mtx_leave(&sc->sc_ccb_free_mtx);
2647 	KERNEL_LOCK();
2648 }
2649 
2650 void *
2651 mpii_get_ccb(void *cookie)
2652 {
2653 	struct mpii_softc	*sc = cookie;
2654 	struct mpii_ccb		*ccb;
2655 
2656 	KERNEL_UNLOCK();
2657 
2658 	mtx_enter(&sc->sc_ccb_free_mtx);
2659 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free);
2660 	if (ccb != NULL) {
2661 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
2662 		ccb->ccb_state = MPII_CCB_READY;
2663 	}
2664 	mtx_leave(&sc->sc_ccb_free_mtx);
2665 
2666 	KERNEL_LOCK();
2667 
2668 	DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2669 
2670 	return (ccb);
2671 }
2672 
2673 int
2674 mpii_alloc_replies(struct mpii_softc *sc)
2675 {
2676 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2677 
2678 	sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2679 	    sizeof(struct mpii_rcb), M_DEVBUF, M_NOWAIT);
2680 	if (sc->sc_rcbs == NULL)
2681 		return (1);
2682 
2683 	sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2684 	    sc->sc_num_reply_frames);
2685 	if (sc->sc_replies == NULL) {
2686 		free(sc->sc_rcbs, M_DEVBUF,
2687 		    sc->sc_num_reply_frames * sizeof(struct mpii_rcb));
2688 		return (1);
2689 	}
2690 
2691 	return (0);
2692 }
2693 
2694 void
2695 mpii_push_replies(struct mpii_softc *sc)
2696 {
2697 	struct mpii_rcb		*rcb;
2698 	caddr_t			kva = MPII_DMA_KVA(sc->sc_replies);
2699 	int			i;
2700 
2701 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),
2702 	    0, sc->sc_reply_size * sc->sc_num_reply_frames,
2703 	    BUS_DMASYNC_PREREAD);
2704 
2705 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2706 		rcb = &sc->sc_rcbs[i];
2707 
2708 		rcb->rcb_reply = kva + sc->sc_reply_size * i;
2709 		rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2710 		    sc->sc_reply_size * i;
2711 		mpii_push_reply(sc, rcb);
2712 	}
2713 }
2714 
2715 void
2716 mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2717 {
2718 	struct mpii_request_header	*rhp;
2719 	struct mpii_request_descr	descr;
2720 	u_long				 *rdp = (u_long *)&descr;
2721 
2722 	DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2723 	    ccb->ccb_cmd_dva);
2724 
2725 	bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),
2726 	    ccb->ccb_offset, sc->sc_request_size,
2727 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2728 
2729 	ccb->ccb_state = MPII_CCB_QUEUED;
2730 
2731 	rhp = ccb->ccb_cmd;
2732 
2733 	memset(&descr, 0, sizeof(descr));
2734 
2735 	switch (rhp->function) {
2736 	case MPII_FUNCTION_SCSI_IO_REQUEST:
2737 		descr.request_flags = MPII_REQ_DESCR_SCSI_IO;
2738 		descr.dev_handle = htole16(ccb->ccb_dev_handle);
2739 		break;
2740 	case MPII_FUNCTION_SCSI_TASK_MGMT:
2741 		descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY;
2742 		break;
2743 	default:
2744 		descr.request_flags = MPII_REQ_DESCR_DEFAULT;
2745 	}
2746 
2747 	descr.vf_id = sc->sc_vf_id;
2748 	descr.smid = ccb->ccb_smid;
2749 
2750 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2751 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2752 
2753 	DNPRINTF(MPII_D_RW, "%s:   MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2754 	    "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2755 
2756 #if defined(__LP64__)
2757 	bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,
2758 	    MPII_REQ_DESCR_POST_LOW, *rdp);
2759 #else
2760 	mtx_enter(&sc->sc_req_mtx);
2761 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2762 	    MPII_REQ_DESCR_POST_LOW, rdp[0]);
2763 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2764 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2765 
2766 	bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,
2767 	    MPII_REQ_DESCR_POST_HIGH, rdp[1]);
2768 	bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2769 	    MPII_REQ_DESCR_POST_LOW, 8, BUS_SPACE_BARRIER_WRITE);
2770 	mtx_leave(&sc->sc_req_mtx);
2771 #endif
2772 }
2773 
2774 int
2775 mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2776 {
2777 	void				(*done)(struct mpii_ccb *);
2778 	void				*cookie;
2779 	int				rv = 1;
2780 
2781 	DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2782 
2783 	done = ccb->ccb_done;
2784 	cookie = ccb->ccb_cookie;
2785 
2786 	ccb->ccb_done = mpii_poll_done;
2787 	ccb->ccb_cookie = &rv;
2788 
2789 	mpii_start(sc, ccb);
2790 
2791 	while (rv == 1) {
2792 		/* avoid excessive polling */
2793 		if (mpii_reply_waiting(sc))
2794 			mpii_intr(sc);
2795 		else
2796 			delay(10);
2797 	}
2798 
2799 	ccb->ccb_cookie = cookie;
2800 	done(ccb);
2801 
2802 	return (0);
2803 }
2804 
2805 void
2806 mpii_poll_done(struct mpii_ccb *ccb)
2807 {
2808 	int				*rv = ccb->ccb_cookie;
2809 
2810 	*rv = 0;
2811 }
2812 
2813 int
2814 mpii_alloc_queues(struct mpii_softc *sc)
2815 {
2816 	u_int32_t		*rfp;
2817 	int			i;
2818 
2819 	DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2820 
2821 	sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2822 	    sc->sc_reply_free_qdepth * sizeof(*rfp));
2823 	if (sc->sc_reply_freeq == NULL)
2824 		return (1);
2825 	rfp = MPII_DMA_KVA(sc->sc_reply_freeq);
2826 	for (i = 0; i < sc->sc_num_reply_frames; i++) {
2827 		rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies) +
2828 		    sc->sc_reply_size * i;
2829 	}
2830 
2831 	sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2832 	    sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2833 	if (sc->sc_reply_postq == NULL)
2834 		goto free_reply_freeq;
2835 	sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq);
2836 	memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *
2837 	    sizeof(struct mpii_reply_descr));
2838 
2839 	return (0);
2840 
2841 free_reply_freeq:
2842 	mpii_dmamem_free(sc, sc->sc_reply_freeq);
2843 	return (1);
2844 }
2845 
2846 void
2847 mpii_init_queues(struct mpii_softc *sc)
2848 {
2849 	DNPRINTF(MPII_D_MISC, "%s:  mpii_init_queues\n", DEVNAME(sc));
2850 
2851 	sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2852 	sc->sc_reply_post_host_index = 0;
2853 	mpii_write_reply_free(sc, sc->sc_reply_free_host_index);
2854 	mpii_write_reply_post(sc, sc->sc_reply_post_host_index);
2855 }
2856 
2857 void
2858 mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2859 {
2860 	struct mutex		mtx;
2861 	void			(*done)(struct mpii_ccb *);
2862 	void			*cookie;
2863 
2864 	mtx_init(&mtx, IPL_BIO);
2865 
2866 	done = ccb->ccb_done;
2867 	cookie = ccb->ccb_cookie;
2868 
2869 	ccb->ccb_done = mpii_wait_done;
2870 	ccb->ccb_cookie = &mtx;
2871 
2872 	/* XXX this will wait forever for the ccb to complete */
2873 
2874 	mpii_start(sc, ccb);
2875 
2876 	mtx_enter(&mtx);
2877 	while (ccb->ccb_cookie != NULL)
2878 		msleep_nsec(ccb, &mtx, PRIBIO, "mpiiwait", INFSLP);
2879 	mtx_leave(&mtx);
2880 
2881 	ccb->ccb_cookie = cookie;
2882 	done(ccb);
2883 }
2884 
2885 void
2886 mpii_wait_done(struct mpii_ccb *ccb)
2887 {
2888 	struct mutex		*mtx = ccb->ccb_cookie;
2889 
2890 	mtx_enter(mtx);
2891 	ccb->ccb_cookie = NULL;
2892 	mtx_leave(mtx);
2893 
2894 	wakeup_one(ccb);
2895 }
2896 
2897 void
2898 mpii_scsi_cmd(struct scsi_xfer *xs)
2899 {
2900 	struct scsi_link	*link = xs->sc_link;
2901 	struct mpii_softc	*sc = link->bus->sb_adapter_softc;
2902 	struct mpii_ccb		*ccb = xs->io;
2903 	struct mpii_msg_scsi_io	*io;
2904 	struct mpii_device	*dev;
2905 	int			 ret;
2906 
2907 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2908 
2909 	if (xs->cmdlen > MPII_CDB_LEN) {
2910 		DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2911 		    DEVNAME(sc), xs->cmdlen);
2912 		memset(&xs->sense, 0, sizeof(xs->sense));
2913 		xs->sense.error_code = SSD_ERRCODE_VALID | 0x70;
2914 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
2915 		xs->sense.add_sense_code = 0x20;
2916 		xs->error = XS_SENSE;
2917 		scsi_done(xs);
2918 		return;
2919 	}
2920 
2921 	if ((dev = sc->sc_devs[link->target]) == NULL) {
2922 		/* device no longer exists */
2923 		xs->error = XS_SELTIMEOUT;
2924 		scsi_done(xs);
2925 		return;
2926 	}
2927 
2928 	KERNEL_UNLOCK();
2929 
2930 	DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2931 	    DEVNAME(sc), ccb->ccb_smid, xs->flags);
2932 
2933 	ccb->ccb_cookie = xs;
2934 	ccb->ccb_done = mpii_scsi_cmd_done;
2935 	ccb->ccb_dev_handle = dev->dev_handle;
2936 
2937 	io = ccb->ccb_cmd;
2938 	memset(io, 0, sizeof(*io));
2939 	io->function = MPII_FUNCTION_SCSI_IO_REQUEST;
2940 	io->sense_buffer_length = sizeof(xs->sense);
2941 	io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2942 	htolem16(&io->io_flags, xs->cmdlen);
2943 	htolem16(&io->dev_handle, ccb->ccb_dev_handle);
2944 	htobem16(&io->lun[0], link->lun);
2945 
2946 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
2947 	case SCSI_DATA_IN:
2948 		io->direction = MPII_SCSIIO_DIR_READ;
2949 		break;
2950 	case SCSI_DATA_OUT:
2951 		io->direction = MPII_SCSIIO_DIR_WRITE;
2952 		break;
2953 	default:
2954 		io->direction = MPII_SCSIIO_DIR_NONE;
2955 		break;
2956 	}
2957 
2958 	io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q;
2959 
2960 	memcpy(io->cdb, &xs->cmd, xs->cmdlen);
2961 
2962 	htolem32(&io->data_length, xs->datalen);
2963 
2964 	/* sense data is at the end of a request */
2965 	htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +
2966 	    sc->sc_request_size - sizeof(struct scsi_sense_data));
2967 
2968 	if (ISSET(sc->sc_flags, MPII_F_SAS3))
2969 		ret = mpii_load_xs_sas3(ccb);
2970 	else
2971 		ret = mpii_load_xs(ccb);
2972 
2973 	if (ret != 0) {
2974 		xs->error = XS_DRIVER_STUFFUP;
2975 		goto done;
2976 	}
2977 
2978 	timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
2979 	if (xs->flags & SCSI_POLL) {
2980 		if (mpii_poll(sc, ccb) != 0) {
2981 			xs->error = XS_DRIVER_STUFFUP;
2982 			goto done;
2983 		}
2984 	} else {
2985 		timeout_add_msec(&xs->stimeout, xs->timeout);
2986 		mpii_start(sc, ccb);
2987 	}
2988 
2989 	KERNEL_LOCK();
2990 	return;
2991 
2992 done:
2993 	KERNEL_LOCK();
2994 	scsi_done(xs);
2995 }
2996 
2997 void
2998 mpii_scsi_cmd_tmo(void *xccb)
2999 {
3000 	struct mpii_ccb		*ccb = xccb;
3001 	struct mpii_softc	*sc = ccb->ccb_sc;
3002 
3003 	printf("%s: mpii_scsi_cmd_tmo (0x%08x)\n", DEVNAME(sc),
3004 	    mpii_read_db(sc));
3005 
3006 	mtx_enter(&sc->sc_ccb_mtx);
3007 	if (ccb->ccb_state == MPII_CCB_QUEUED) {
3008 		ccb->ccb_state = MPII_CCB_TIMEOUT;
3009 		SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link);
3010 	}
3011 	mtx_leave(&sc->sc_ccb_mtx);
3012 
3013 	scsi_ioh_add(&sc->sc_ccb_tmo_handler);
3014 }
3015 
3016 void
3017 mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
3018 {
3019 	struct mpii_softc			*sc = cookie;
3020 	struct mpii_ccb				*tccb = io;
3021 	struct mpii_ccb				*ccb;
3022 	struct mpii_msg_scsi_task_request	*stq;
3023 
3024 	mtx_enter(&sc->sc_ccb_mtx);
3025 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos);
3026 	if (ccb != NULL) {
3027 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3028 		ccb->ccb_state = MPII_CCB_QUEUED;
3029 	}
3030 	/* should remove any other ccbs for the same dev handle */
3031 	mtx_leave(&sc->sc_ccb_mtx);
3032 
3033 	if (ccb == NULL) {
3034 		scsi_io_put(&sc->sc_iopool, tccb);
3035 		return;
3036 	}
3037 
3038 	stq = tccb->ccb_cmd;
3039 	stq->function = MPII_FUNCTION_SCSI_TASK_MGMT;
3040 	stq->task_type = MPII_SCSI_TASK_TARGET_RESET;
3041 	htolem16(&stq->dev_handle, ccb->ccb_dev_handle);
3042 
3043 	tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3044 	mpii_start(sc, tccb);
3045 }
3046 
3047 void
3048 mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3049 {
3050 	mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
3051 }
3052 
3053 void
3054 mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3055 {
3056 	struct mpii_ccb		*tccb;
3057 	struct mpii_msg_scsi_io_error	*sie;
3058 	struct mpii_softc	*sc = ccb->ccb_sc;
3059 	struct scsi_xfer	*xs = ccb->ccb_cookie;
3060 	struct scsi_sense_data	*sense;
3061 	bus_dmamap_t		dmap = ccb->ccb_dmamap;
3062 
3063 	timeout_del(&xs->stimeout);
3064 	mtx_enter(&sc->sc_ccb_mtx);
3065 	if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
3066 		/* ENOSIMPLEQ_REMOVE :( */
3067 		if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos))
3068 			SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link);
3069 		else {
3070 			SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link) {
3071 				if (SIMPLEQ_NEXT(tccb, ccb_link) == ccb) {
3072 					SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,
3073 					    tccb, ccb_link);
3074 					break;
3075 				}
3076 			}
3077 		}
3078 	}
3079 
3080 	ccb->ccb_state = MPII_CCB_READY;
3081 	mtx_leave(&sc->sc_ccb_mtx);
3082 
3083 	if (xs->datalen != 0) {
3084 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
3085 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
3086 		    BUS_DMASYNC_POSTWRITE);
3087 
3088 		bus_dmamap_unload(sc->sc_dmat, dmap);
3089 	}
3090 
3091 	xs->error = XS_NOERROR;
3092 	xs->resid = 0;
3093 
3094 	if (ccb->ccb_rcb == NULL) {
3095 		/* no scsi error, we're ok so drop out early */
3096 		xs->status = SCSI_OK;
3097 		goto done;
3098 	}
3099 
3100 	sie = ccb->ccb_rcb->rcb_reply;
3101 
3102 	DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3103 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen,
3104 	    xs->flags);
3105 	DNPRINTF(MPII_D_CMD, "%s:  dev_handle: %d msg_length: %d "
3106 	    "function: 0x%02x\n", DEVNAME(sc), lemtoh16(&sie->dev_handle),
3107 	    sie->msg_length, sie->function);
3108 	DNPRINTF(MPII_D_CMD, "%s:  vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3109 	    sie->vp_id, sie->vf_id);
3110 	DNPRINTF(MPII_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
3111 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3112 	    sie->scsi_state, lemtoh16(&sie->ioc_status));
3113 	DNPRINTF(MPII_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3114 	    lemtoh32(&sie->ioc_loginfo));
3115 	DNPRINTF(MPII_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
3116 	    lemtoh32(&sie->transfer_count));
3117 	DNPRINTF(MPII_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
3118 	    lemtoh32(&sie->sense_count));
3119 	DNPRINTF(MPII_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
3120 	    lemtoh32(&sie->response_info));
3121 	DNPRINTF(MPII_D_CMD, "%s:  task_tag: 0x%04x\n", DEVNAME(sc),
3122 	    lemtoh16(&sie->task_tag));
3123 	DNPRINTF(MPII_D_CMD, "%s:  bidirectional_transfer_count: 0x%08x\n",
3124 	    DEVNAME(sc), lemtoh32(&sie->bidirectional_transfer_count));
3125 
3126 	if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS)
3127 		xs->status = SCSI_TERMINATED;
3128 	else
3129 		xs->status = sie->scsi_status;
3130 	xs->resid = 0;
3131 
3132 	switch (lemtoh16(&sie->ioc_status) & MPII_IOCSTATUS_MASK) {
3133 	case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN:
3134 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
3135 		/* FALLTHROUGH */
3136 
3137 	case MPII_IOCSTATUS_SUCCESS:
3138 	case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR:
3139 		switch (xs->status) {
3140 		case SCSI_OK:
3141 			xs->error = XS_NOERROR;
3142 			break;
3143 
3144 		case SCSI_CHECK:
3145 			xs->error = XS_SENSE;
3146 			break;
3147 
3148 		case SCSI_BUSY:
3149 		case SCSI_QUEUE_FULL:
3150 			xs->error = XS_BUSY;
3151 			break;
3152 
3153 		default:
3154 			xs->error = XS_DRIVER_STUFFUP;
3155 		}
3156 		break;
3157 
3158 	case MPII_IOCSTATUS_BUSY:
3159 	case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES:
3160 		xs->error = XS_BUSY;
3161 		break;
3162 
3163 	case MPII_IOCSTATUS_SCSI_IOC_TERMINATED:
3164 	case MPII_IOCSTATUS_SCSI_TASK_TERMINATED:
3165 		xs->error = XS_RESET;
3166 		break;
3167 
3168 	case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
3169 	case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3170 		xs->error = XS_SELTIMEOUT;
3171 		break;
3172 
3173 	default:
3174 		xs->error = XS_DRIVER_STUFFUP;
3175 		break;
3176 	}
3177 
3178 	sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3179 	    sc->sc_request_size - sizeof(*sense));
3180 	if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID)
3181 		memcpy(&xs->sense, sense, sizeof(xs->sense));
3182 
3183 	DNPRINTF(MPII_D_CMD, "%s:  xs err: %d status: %#x\n", DEVNAME(sc),
3184 	    xs->error, xs->status);
3185 
3186 	mpii_push_reply(sc, ccb->ccb_rcb);
3187 done:
3188 	KERNEL_LOCK();
3189 	scsi_done(xs);
3190 	KERNEL_UNLOCK();
3191 }
3192 
3193 int
3194 mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3195 {
3196 	struct mpii_softc	*sc = link->bus->sb_adapter_softc;
3197 	struct mpii_device	*dev = sc->sc_devs[link->target];
3198 
3199 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3200 
3201 	switch (cmd) {
3202 	case DIOCGCACHE:
3203 	case DIOCSCACHE:
3204 		if (dev != NULL && ISSET(dev->flags, MPII_DF_VOLUME)) {
3205 			return (mpii_ioctl_cache(link, cmd,
3206 			    (struct dk_cache *)addr));
3207 		}
3208 		break;
3209 
3210 	default:
3211 		if (sc->sc_ioctl)
3212 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
3213 
3214 		break;
3215 	}
3216 
3217 	return (ENOTTY);
3218 }
3219 
3220 int
3221 mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3222 {
3223 	struct mpii_softc *sc = link->bus->sb_adapter_softc;
3224 	struct mpii_device *dev = sc->sc_devs[link->target];
3225 	struct mpii_cfg_raid_vol_pg0 *vpg;
3226 	struct mpii_msg_raid_action_request *req;
3227 	struct mpii_msg_raid_action_reply *rep;
3228 	struct mpii_cfg_hdr hdr;
3229 	struct mpii_ccb	*ccb;
3230 	u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE | dev->dev_handle;
3231 	size_t pagelen;
3232 	int rv = 0;
3233 	int enabled;
3234 
3235 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3236 	    addr, MPII_PG_POLL, &hdr) != 0)
3237 		return (EINVAL);
3238 
3239 	pagelen = hdr.page_length * 4;
3240 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3241 	if (vpg == NULL)
3242 		return (ENOMEM);
3243 
3244 	if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL, &hdr, 1,
3245 	    vpg, pagelen) != 0) {
3246 		rv = EINVAL;
3247 		goto done;
3248 	}
3249 
3250 	enabled = ((lemtoh16(&vpg->volume_settings) &
3251 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK) ==
3252 	    MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED) ? 1 : 0;
3253 
3254 	if (cmd == DIOCGCACHE) {
3255 		dc->wrcache = enabled;
3256 		dc->rdcache = 0;
3257 		goto done;
3258 	} /* else DIOCSCACHE */
3259 
3260 	if (dc->rdcache) {
3261 		rv = EOPNOTSUPP;
3262 		goto done;
3263 	}
3264 
3265 	if (((dc->wrcache) ? 1 : 0) == enabled)
3266 		goto done;
3267 
3268 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL);
3269 	if (ccb == NULL) {
3270 		rv = ENOMEM;
3271 		goto done;
3272 	}
3273 
3274 	ccb->ccb_done = mpii_empty_done;
3275 
3276 	req = ccb->ccb_cmd;
3277 	memset(req, 0, sizeof(*req));
3278 	req->function = MPII_FUNCTION_RAID_ACTION;
3279 	req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE;
3280 	htolem16(&req->vol_dev_handle, dev->dev_handle);
3281 	htolem32(&req->action_data, dc->wrcache ?
3282 	    MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3283 	    MPII_RAID_VOL_WRITE_CACHE_DISABLE);
3284 
3285 	if (mpii_poll(sc, ccb) != 0) {
3286 		rv = EIO;
3287 		goto done;
3288 	}
3289 
3290 	if (ccb->ccb_rcb != NULL) {
3291 		rep = ccb->ccb_rcb->rcb_reply;
3292 		if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS) ||
3293 		    ((rep->action_data[0] &
3294 		     MPII_RAID_VOL_WRITE_CACHE_MASK) !=
3295 		    (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE :
3296 		     MPII_RAID_VOL_WRITE_CACHE_DISABLE)))
3297 			rv = EINVAL;
3298 		mpii_push_reply(sc, ccb->ccb_rcb);
3299 	}
3300 
3301 	scsi_io_put(&sc->sc_iopool, ccb);
3302 
3303 done:
3304 	free(vpg, M_TEMP, pagelen);
3305 	return (rv);
3306 }
3307 
3308 #if NBIO > 0
3309 int
3310 mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3311 {
3312 	struct mpii_softc	*sc = (struct mpii_softc *)dev;
3313 	int			error = 0;
3314 
3315 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3316 
3317 	switch (cmd) {
3318 	case BIOCINQ:
3319 		DNPRINTF(MPII_D_IOCTL, "inq\n");
3320 		error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3321 		break;
3322 	case BIOCVOL:
3323 		DNPRINTF(MPII_D_IOCTL, "vol\n");
3324 		error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3325 		break;
3326 	case BIOCDISK:
3327 		DNPRINTF(MPII_D_IOCTL, "disk\n");
3328 		error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3329 		break;
3330 	default:
3331 		DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3332 		error = ENOTTY;
3333 	}
3334 
3335 	return (error);
3336 }
3337 
3338 int
3339 mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3340 {
3341 	int			i;
3342 
3343 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3344 
3345 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3346 	for (i = 0; i < sc->sc_max_devices; i++)
3347 		if (sc->sc_devs[i] &&
3348 		    ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME))
3349 			bi->bi_novol++;
3350 	return (0);
3351 }
3352 
3353 int
3354 mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3355 {
3356 	struct mpii_cfg_raid_vol_pg0	*vpg;
3357 	struct mpii_cfg_hdr		hdr;
3358 	struct mpii_device		*dev;
3359 	struct scsi_link		*lnk;
3360 	struct device			*scdev;
3361 	size_t				pagelen;
3362 	u_int16_t			volh;
3363 	int				rv, hcnt = 0;
3364 
3365 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3366 	    DEVNAME(sc), bv->bv_volid);
3367 
3368 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3369 		return (ENODEV);
3370 	volh = dev->dev_handle;
3371 
3372 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3373 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3374 		printf("%s: unable to fetch header for raid volume page 0\n",
3375 		    DEVNAME(sc));
3376 		return (EINVAL);
3377 	}
3378 
3379 	pagelen = hdr.page_length * 4;
3380 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3381 	if (vpg == NULL) {
3382 		printf("%s: unable to allocate space for raid "
3383 		    "volume page 0\n", DEVNAME(sc));
3384 		return (ENOMEM);
3385 	}
3386 
3387 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3388 	    &hdr, 1, vpg, pagelen) != 0) {
3389 		printf("%s: unable to fetch raid volume page 0\n",
3390 		    DEVNAME(sc));
3391 		free(vpg, M_TEMP, pagelen);
3392 		return (EINVAL);
3393 	}
3394 
3395 	switch (vpg->volume_state) {
3396 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3397 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3398 		bv->bv_status = BIOC_SVONLINE;
3399 		break;
3400 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3401 		if (ISSET(lemtoh32(&vpg->volume_status),
3402 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC)) {
3403 			bv->bv_status = BIOC_SVREBUILD;
3404 			bv->bv_percent = dev->percent;
3405 		} else
3406 			bv->bv_status = BIOC_SVDEGRADED;
3407 		break;
3408 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3409 		bv->bv_status = BIOC_SVOFFLINE;
3410 		break;
3411 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3412 		bv->bv_status = BIOC_SVBUILDING;
3413 		break;
3414 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3415 	default:
3416 		bv->bv_status = BIOC_SVINVALID;
3417 		break;
3418 	}
3419 
3420 	switch (vpg->volume_type) {
3421 	case MPII_CFG_RAID_VOL_0_TYPE_RAID0:
3422 		bv->bv_level = 0;
3423 		break;
3424 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1:
3425 		bv->bv_level = 1;
3426 		break;
3427 	case MPII_CFG_RAID_VOL_0_TYPE_RAID1E:
3428 		bv->bv_level = 0x1E;
3429 		break;
3430 	case MPII_CFG_RAID_VOL_0_TYPE_RAID10:
3431 		bv->bv_level = 10;
3432 		break;
3433 	default:
3434 		bv->bv_level = -1;
3435 	}
3436 
3437 	if ((rv = mpii_bio_hs(sc, NULL, 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3438 		free(vpg, M_TEMP, pagelen);
3439 		return (rv);
3440 	}
3441 
3442 	bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3443 
3444 	bv->bv_size = letoh64(vpg->max_lba) * lemtoh16(&vpg->block_size);
3445 
3446 	lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3447 	if (lnk != NULL) {
3448 		scdev = lnk->device_softc;
3449 		strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3450 	}
3451 
3452 	free(vpg, M_TEMP, pagelen);
3453 	return (0);
3454 }
3455 
3456 int
3457 mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3458 {
3459 	struct mpii_cfg_raid_vol_pg0		*vpg;
3460 	struct mpii_cfg_raid_vol_pg0_physdisk	*pd;
3461 	struct mpii_cfg_hdr			hdr;
3462 	struct mpii_device			*dev;
3463 	size_t					pagelen;
3464 	u_int16_t				volh;
3465 	u_int8_t				dn;
3466 
3467 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3468 	    DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3469 
3470 	if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL)
3471 		return (ENODEV);
3472 	volh = dev->dev_handle;
3473 
3474 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3475 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0, &hdr) != 0) {
3476 		printf("%s: unable to fetch header for raid volume page 0\n",
3477 		    DEVNAME(sc));
3478 		return (EINVAL);
3479 	}
3480 
3481 	pagelen = hdr.page_length * 4;
3482 	vpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3483 	if (vpg == NULL) {
3484 		printf("%s: unable to allocate space for raid "
3485 		    "volume page 0\n", DEVNAME(sc));
3486 		return (ENOMEM);
3487 	}
3488 
3489 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, 0,
3490 	    &hdr, 1, vpg, pagelen) != 0) {
3491 		printf("%s: unable to fetch raid volume page 0\n",
3492 		    DEVNAME(sc));
3493 		free(vpg, M_TEMP, pagelen);
3494 		return (EINVAL);
3495 	}
3496 
3497 	if (bd->bd_diskid >= vpg->num_phys_disks) {
3498 		int		nvdsk = vpg->num_phys_disks;
3499 		int		hsmap = vpg->hot_spare_pool;
3500 
3501 		free(vpg, M_TEMP, pagelen);
3502 		return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL));
3503 	}
3504 
3505 	pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3506 	    bd->bd_diskid;
3507 	dn = pd->phys_disk_num;
3508 
3509 	free(vpg, M_TEMP, pagelen);
3510 	return (mpii_bio_disk(sc, bd, dn));
3511 }
3512 
3513 int
3514 mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3515      int hsmap, int *hscnt)
3516 {
3517 	struct mpii_cfg_raid_config_pg0	*cpg;
3518 	struct mpii_raid_config_element	*el;
3519 	struct mpii_ecfg_hdr		ehdr;
3520 	size_t				pagelen;
3521 	int				i, nhs = 0;
3522 
3523 	if (bd)
3524 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3525 		    bd->bd_diskid - nvdsk);
3526 	else
3527 		DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3528 
3529 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG,
3530 	    0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG, MPII_PG_EXTENDED,
3531 	    &ehdr) != 0) {
3532 		printf("%s: unable to fetch header for raid config page 0\n",
3533 		    DEVNAME(sc));
3534 		return (EINVAL);
3535 	}
3536 
3537 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
3538 	cpg = malloc(pagelen, M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3539 	if (cpg == NULL) {
3540 		printf("%s: unable to allocate space for raid config page 0\n",
3541 		    DEVNAME(sc));
3542 		return (ENOMEM);
3543 	}
3544 
3545 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG,
3546 	    MPII_PG_EXTENDED, &ehdr, 1, cpg, pagelen) != 0) {
3547 		printf("%s: unable to fetch raid config page 0\n",
3548 		    DEVNAME(sc));
3549 		free(cpg, M_TEMP, pagelen);
3550 		return (EINVAL);
3551 	}
3552 
3553 	el = (struct mpii_raid_config_element *)(cpg + 1);
3554 	for (i = 0; i < cpg->num_elements; i++, el++) {
3555 		if (ISSET(lemtoh16(&el->element_flags),
3556 		    MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK) &&
3557 		    el->hot_spare_pool == hsmap) {
3558 			/*
3559 			 * diskid comparison is based on the idea that all
3560 			 * disks are counted by the bio(4) in sequence, thus
3561 			 * subtracting the number of disks in the volume
3562 			 * from the diskid yields us a "relative" hotspare
3563 			 * number, which is good enough for us.
3564 			 */
3565 			if (bd != NULL && bd->bd_diskid == nhs + nvdsk) {
3566 				u_int8_t dn = el->phys_disk_num;
3567 
3568 				free(cpg, M_TEMP, pagelen);
3569 				return (mpii_bio_disk(sc, bd, dn));
3570 			}
3571 			nhs++;
3572 		}
3573 	}
3574 
3575 	if (hscnt)
3576 		*hscnt = nhs;
3577 
3578 	free(cpg, M_TEMP, pagelen);
3579 	return (0);
3580 }
3581 
3582 int
3583 mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3584 {
3585 	struct mpii_cfg_raid_physdisk_pg0	*ppg;
3586 	struct mpii_cfg_hdr			hdr;
3587 	struct mpii_device			*dev;
3588 	int					len;
3589 
3590 	DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3591 	    bd->bd_diskid);
3592 
3593 	ppg = malloc(sizeof(*ppg), M_TEMP, M_WAITOK | M_CANFAIL | M_ZERO);
3594 	if (ppg == NULL) {
3595 		printf("%s: unable to allocate space for raid physical disk "
3596 		    "page 0\n", DEVNAME(sc));
3597 		return (ENOMEM);
3598 	}
3599 
3600 	hdr.page_version = 0;
3601 	hdr.page_length = sizeof(*ppg) / 4;
3602 	hdr.page_number = 0;
3603 	hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD;
3604 
3605 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER | dn, 0,
3606 	    &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3607 		printf("%s: unable to fetch raid drive page 0\n",
3608 		    DEVNAME(sc));
3609 		free(ppg, M_TEMP, sizeof(*ppg));
3610 		return (EINVAL);
3611 	}
3612 
3613 	bd->bd_target = ppg->phys_disk_num;
3614 
3615 	if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle))) == NULL) {
3616 		bd->bd_status = BIOC_SDINVALID;
3617 		free(ppg, M_TEMP, sizeof(*ppg));
3618 		return (0);
3619 	}
3620 
3621 	switch (ppg->phys_disk_state) {
3622 	case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3623 	case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL:
3624 		bd->bd_status = BIOC_SDONLINE;
3625 		break;
3626 	case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3627 		if (ppg->offline_reason ==
3628 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED ||
3629 		    ppg->offline_reason ==
3630 		    MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ)
3631 			bd->bd_status = BIOC_SDFAILED;
3632 		else
3633 			bd->bd_status = BIOC_SDOFFLINE;
3634 		break;
3635 	case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED:
3636 		bd->bd_status = BIOC_SDFAILED;
3637 		break;
3638 	case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING:
3639 		bd->bd_status = BIOC_SDREBUILD;
3640 		break;
3641 	case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE:
3642 		bd->bd_status = BIOC_SDHOTSPARE;
3643 		break;
3644 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED:
3645 		bd->bd_status = BIOC_SDUNUSED;
3646 		break;
3647 	case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE:
3648 	default:
3649 		bd->bd_status = BIOC_SDINVALID;
3650 		break;
3651 	}
3652 
3653 	bd->bd_size = letoh64(ppg->dev_max_lba) * lemtoh16(&ppg->block_size);
3654 
3655 	scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3656 	len = strlen(bd->bd_vendor);
3657 	bd->bd_vendor[len] = ' ';
3658 	scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3659 	    sizeof(ppg->product_id));
3660 	scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3661 
3662 	free(ppg, M_TEMP, sizeof(*ppg));
3663 	return (0);
3664 }
3665 
3666 struct mpii_device *
3667 mpii_find_vol(struct mpii_softc *sc, int volid)
3668 {
3669 	struct mpii_device	*dev = NULL;
3670 
3671 	if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3672 		return (NULL);
3673 	dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3674 	if (dev && ISSET(dev->flags, MPII_DF_VOLUME))
3675 		return (dev);
3676 	return (NULL);
3677 }
3678 
3679 #ifndef SMALL_KERNEL
3680 /*
3681  * Non-sleeping lightweight version of the mpii_ioctl_vol
3682  */
3683 int
3684 mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3685 {
3686 	struct mpii_cfg_raid_vol_pg0	*vpg;
3687 	struct mpii_cfg_hdr		hdr;
3688 	struct mpii_device		*dev = NULL;
3689 	size_t				pagelen;
3690 	u_int16_t			volh;
3691 
3692 	if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL)
3693 		return (ENODEV);
3694 	volh = dev->dev_handle;
3695 
3696 	if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3697 	    MPII_CFG_RAID_VOL_ADDR_HANDLE | volh, MPII_PG_POLL, &hdr) != 0) {
3698 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3699 		    "volume page 0\n", DEVNAME(sc));
3700 		return (EINVAL);
3701 	}
3702 
3703 	pagelen = hdr.page_length * 4;
3704 	vpg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
3705 	if (vpg == NULL) {
3706 		DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3707 		    "volume page 0\n", DEVNAME(sc));
3708 		return (ENOMEM);
3709 	}
3710 
3711 	if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE | volh,
3712 	    MPII_PG_POLL, &hdr, 1, vpg, pagelen) != 0) {
3713 		DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3714 		    "page 0\n", DEVNAME(sc));
3715 		free(vpg, M_TEMP, pagelen);
3716 		return (EINVAL);
3717 	}
3718 
3719 	switch (vpg->volume_state) {
3720 	case MPII_CFG_RAID_VOL_0_STATE_ONLINE:
3721 	case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL:
3722 		bv->bv_status = BIOC_SVONLINE;
3723 		break;
3724 	case MPII_CFG_RAID_VOL_0_STATE_DEGRADED:
3725 		if (ISSET(lemtoh32(&vpg->volume_status),
3726 		    MPII_CFG_RAID_VOL_0_STATUS_RESYNC))
3727 			bv->bv_status = BIOC_SVREBUILD;
3728 		else
3729 			bv->bv_status = BIOC_SVDEGRADED;
3730 		break;
3731 	case MPII_CFG_RAID_VOL_0_STATE_FAILED:
3732 		bv->bv_status = BIOC_SVOFFLINE;
3733 		break;
3734 	case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING:
3735 		bv->bv_status = BIOC_SVBUILDING;
3736 		break;
3737 	case MPII_CFG_RAID_VOL_0_STATE_MISSING:
3738 	default:
3739 		bv->bv_status = BIOC_SVINVALID;
3740 		break;
3741 	}
3742 
3743 	free(vpg, M_TEMP, pagelen);
3744 	return (0);
3745 }
3746 
3747 int
3748 mpii_create_sensors(struct mpii_softc *sc)
3749 {
3750 	struct scsibus_softc	*ssc = sc->sc_scsibus;
3751 	struct device		*dev;
3752 	struct scsi_link	*link;
3753 	int			i;
3754 
3755 	sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3756 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3757 	if (sc->sc_sensors == NULL)
3758 		return (1);
3759 	sc->sc_nsensors = sc->sc_vd_count;
3760 
3761 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3762 	    sizeof(sc->sc_sensordev.xname));
3763 
3764 	for (i = 0; i < sc->sc_vd_count; i++) {
3765 		link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3766 		if (link == NULL)
3767 			goto bad;
3768 
3769 		dev = link->device_softc;
3770 
3771 		sc->sc_sensors[i].type = SENSOR_DRIVE;
3772 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3773 
3774 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3775 		    sizeof(sc->sc_sensors[i].desc));
3776 
3777 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3778 	}
3779 
3780 	if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL)
3781 		goto bad;
3782 
3783 	sensordev_install(&sc->sc_sensordev);
3784 
3785 	return (0);
3786 
3787 bad:
3788 	free(sc->sc_sensors, M_DEVBUF, 0);
3789 
3790 	return (1);
3791 }
3792 
3793 void
3794 mpii_refresh_sensors(void *arg)
3795 {
3796 	struct mpii_softc	*sc = arg;
3797 	struct bioc_vol		bv;
3798 	int			i;
3799 
3800 	for (i = 0; i < sc->sc_nsensors; i++) {
3801 		memset(&bv, 0, sizeof(bv));
3802 		bv.bv_volid = i;
3803 		if (mpii_bio_volstate(sc, &bv))
3804 			return;
3805 		switch(bv.bv_status) {
3806 		case BIOC_SVOFFLINE:
3807 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
3808 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
3809 			break;
3810 		case BIOC_SVDEGRADED:
3811 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
3812 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3813 			break;
3814 		case BIOC_SVREBUILD:
3815 			sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD;
3816 			sc->sc_sensors[i].status = SENSOR_S_WARN;
3817 			break;
3818 		case BIOC_SVONLINE:
3819 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
3820 			sc->sc_sensors[i].status = SENSOR_S_OK;
3821 			break;
3822 		case BIOC_SVINVALID:
3823 			/* FALLTHROUGH */
3824 		default:
3825 			sc->sc_sensors[i].value = 0; /* unknown */
3826 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3827 		}
3828 	}
3829 }
3830 #endif /* SMALL_KERNEL */
3831 #endif /* NBIO > 0 */
3832