xref: /openbsd/sys/dev/ic/mpi.c (revision 274d7c50)
1 /*	$OpenBSD: mpi.c,v 1.207 2019/11/14 21:13:58 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5  * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include "bio.h"
21 
22 #include <sys/param.h>
23 #include <sys/systm.h>
24 #include <sys/buf.h>
25 #include <sys/device.h>
26 #include <sys/malloc.h>
27 #include <sys/kernel.h>
28 #include <sys/mutex.h>
29 #include <sys/rwlock.h>
30 #include <sys/sensors.h>
31 #include <sys/dkio.h>
32 #include <sys/task.h>
33 
34 #include <machine/bus.h>
35 
36 #include <scsi/scsi_all.h>
37 #include <scsi/scsiconf.h>
38 
39 #include <dev/biovar.h>
40 #include <dev/ic/mpireg.h>
41 #include <dev/ic/mpivar.h>
42 
43 #ifdef MPI_DEBUG
44 uint32_t	mpi_debug = 0
45 /*		    | MPI_D_CMD */
46 /*		    | MPI_D_INTR */
47 /*		    | MPI_D_MISC */
48 /*		    | MPI_D_DMA */
49 /*		    | MPI_D_IOCTL */
50 /*		    | MPI_D_RW */
51 /*		    | MPI_D_MEM */
52 /*		    | MPI_D_CCB */
53 /*		    | MPI_D_PPR */
54 /*		    | MPI_D_RAID */
55 /*		    | MPI_D_EVT */
56 		;
57 #endif
58 
59 struct cfdriver mpi_cd = {
60 	NULL,
61 	"mpi",
62 	DV_DULL
63 };
64 
65 void			mpi_scsi_cmd(struct scsi_xfer *);
66 void			mpi_scsi_cmd_done(struct mpi_ccb *);
67 void			mpi_minphys(struct buf *bp, struct scsi_link *sl);
68 int			mpi_scsi_probe(struct scsi_link *);
69 int			mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
70 			    int);
71 
72 struct scsi_adapter mpi_switch = {
73 	mpi_scsi_cmd,
74 	mpi_minphys,
75 	mpi_scsi_probe,
76 	NULL,
77 	mpi_scsi_ioctl
78 };
79 
80 struct mpi_dmamem	*mpi_dmamem_alloc(struct mpi_softc *, size_t);
81 void			mpi_dmamem_free(struct mpi_softc *,
82 			    struct mpi_dmamem *);
83 int			mpi_alloc_ccbs(struct mpi_softc *);
84 void			*mpi_get_ccb(void *);
85 void			mpi_put_ccb(void *, void *);
86 int			mpi_alloc_replies(struct mpi_softc *);
87 void			mpi_push_replies(struct mpi_softc *);
88 void			mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
89 
90 void			mpi_start(struct mpi_softc *, struct mpi_ccb *);
91 int			mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
92 void			mpi_poll_done(struct mpi_ccb *);
93 void			mpi_reply(struct mpi_softc *, u_int32_t);
94 
95 void			mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
96 void			mpi_wait_done(struct mpi_ccb *);
97 
98 int			mpi_cfg_spi_port(struct mpi_softc *);
99 void			mpi_squash_ppr(struct mpi_softc *);
100 void			mpi_run_ppr(struct mpi_softc *);
101 int			mpi_ppr(struct mpi_softc *, struct scsi_link *,
102 			    struct mpi_cfg_raid_physdisk *, int, int, int);
103 int			mpi_inq(struct mpi_softc *, u_int16_t, int);
104 
105 int			mpi_cfg_sas(struct mpi_softc *);
106 int			mpi_cfg_fc(struct mpi_softc *);
107 
108 void			mpi_timeout_xs(void *);
109 int			mpi_load_xs(struct mpi_ccb *);
110 
111 u_int32_t		mpi_read(struct mpi_softc *, bus_size_t);
112 void			mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
113 int			mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
114 			    u_int32_t);
115 int			mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
116 			    u_int32_t);
117 
118 int			mpi_init(struct mpi_softc *);
119 int			mpi_reset_soft(struct mpi_softc *);
120 int			mpi_reset_hard(struct mpi_softc *);
121 
122 int			mpi_handshake_send(struct mpi_softc *, void *, size_t);
123 int			mpi_handshake_recv_dword(struct mpi_softc *,
124 			    u_int32_t *);
125 int			mpi_handshake_recv(struct mpi_softc *, void *, size_t);
126 
127 void			mpi_empty_done(struct mpi_ccb *);
128 
129 int			mpi_iocinit(struct mpi_softc *);
130 int			mpi_iocfacts(struct mpi_softc *);
131 int			mpi_portfacts(struct mpi_softc *);
132 int			mpi_portenable(struct mpi_softc *);
133 int			mpi_cfg_coalescing(struct mpi_softc *);
134 void			mpi_get_raid(struct mpi_softc *);
135 int			mpi_fwupload(struct mpi_softc *);
136 int			mpi_manufacturing(struct mpi_softc *);
137 int			mpi_scsi_probe_virtual(struct scsi_link *);
138 
139 int			mpi_eventnotify(struct mpi_softc *);
140 void			mpi_eventnotify_done(struct mpi_ccb *);
141 void			mpi_eventnotify_free(struct mpi_softc *,
142 			    struct mpi_rcb *);
143 void			mpi_eventack(void *, void *);
144 void			mpi_eventack_done(struct mpi_ccb *);
145 int			mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
146 void			mpi_evt_sas_detach(void *, void *);
147 void			mpi_evt_sas_detach_done(struct mpi_ccb *);
148 void			mpi_fc_rescan(void *);
149 
150 int			mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
151 			    u_int8_t, u_int32_t, int, void *);
152 int			mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
153 			    void *, int, void *, size_t);
154 
155 int			mpi_ioctl_cache(struct scsi_link *, u_long,
156 			    struct dk_cache *);
157 
158 #if NBIO > 0
159 int		mpi_bio_get_pg0_raid(struct mpi_softc *, int);
160 int		mpi_ioctl(struct device *, u_long, caddr_t);
161 int		mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
162 int		mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
163 int		mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
164 int		mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
165 #ifndef SMALL_KERNEL
166 int		mpi_create_sensors(struct mpi_softc *);
167 void		mpi_refresh_sensors(void *);
168 #endif /* SMALL_KERNEL */
169 #endif /* NBIO > 0 */
170 
171 #define DEVNAME(s)		((s)->sc_dev.dv_xname)
172 
173 #define	dwordsof(s)		(sizeof(s) / sizeof(u_int32_t))
174 
175 #define mpi_read_db(s)		mpi_read((s), MPI_DOORBELL)
176 #define mpi_write_db(s, v)	mpi_write((s), MPI_DOORBELL, (v))
177 #define mpi_read_intr(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
178 				    MPI_INTR_STATUS)
179 #define mpi_write_intr(s, v)	mpi_write((s), MPI_INTR_STATUS, (v))
180 #define mpi_pop_reply(s)	bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \
181 				    MPI_REPLY_QUEUE)
182 #define mpi_push_reply_db(s, v) bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \
183 				    MPI_REPLY_QUEUE, (v))
184 
185 #define mpi_wait_db_int(s)	mpi_wait_ne((s), MPI_INTR_STATUS, \
186 				    MPI_INTR_STATUS_DOORBELL, 0)
187 #define mpi_wait_db_ack(s)	mpi_wait_eq((s), MPI_INTR_STATUS, \
188 				    MPI_INTR_STATUS_IOCDOORBELL, 0)
189 
190 #define MPI_PG_EXTENDED		(1<<0)
191 #define MPI_PG_POLL		(1<<1)
192 #define MPI_PG_FMT		"\020" "\002POLL" "\001EXTENDED"
193 
194 #define mpi_cfg_header(_s, _t, _n, _a, _h) \
195 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
196 	    MPI_PG_POLL, (_h))
197 #define mpi_ecfg_header(_s, _t, _n, _a, _h) \
198 	mpi_req_cfg_header((_s), (_t), (_n), (_a), \
199 	    MPI_PG_POLL|MPI_PG_EXTENDED, (_h))
200 
201 #define mpi_cfg_page(_s, _a, _h, _r, _p, _l) \
202 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL, \
203 	    (_h), (_r), (_p), (_l))
204 #define mpi_ecfg_page(_s, _a, _h, _r, _p, _l) \
205 	mpi_req_cfg_page((_s), (_a), MPI_PG_POLL|MPI_PG_EXTENDED, \
206 	    (_h), (_r), (_p), (_l))
207 
208 static inline void
209 mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
210 {
211 	htolem32(&sge->sg_addr_lo, dva);
212 	htolem32(&sge->sg_addr_hi, dva >> 32);
213 }
214 
215 int
216 mpi_attach(struct mpi_softc *sc)
217 {
218 	struct scsibus_attach_args	saa;
219 	struct mpi_ccb			*ccb;
220 
221 	printf("\n");
222 
223 	rw_init(&sc->sc_lock, "mpi_lock");
224 	task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc);
225 
226 	/* disable interrupts */
227 	mpi_write(sc, MPI_INTR_MASK,
228 	    MPI_INTR_MASK_REPLY | MPI_INTR_MASK_DOORBELL);
229 
230 	if (mpi_init(sc) != 0) {
231 		printf("%s: unable to initialise\n", DEVNAME(sc));
232 		return (1);
233 	}
234 
235 	if (mpi_iocfacts(sc) != 0) {
236 		printf("%s: unable to get iocfacts\n", DEVNAME(sc));
237 		return (1);
238 	}
239 
240 	if (mpi_alloc_ccbs(sc) != 0) {
241 		/* error already printed */
242 		return (1);
243 	}
244 
245 	if (mpi_alloc_replies(sc) != 0) {
246 		printf("%s: unable to allocate reply space\n", DEVNAME(sc));
247 		goto free_ccbs;
248 	}
249 
250 	if (mpi_iocinit(sc) != 0) {
251 		printf("%s: unable to send iocinit\n", DEVNAME(sc));
252 		goto free_ccbs;
253 	}
254 
255 	/* spin until we're operational */
256 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
257 	    MPI_DOORBELL_STATE_OPER) != 0) {
258 		printf("%s: state: 0x%08x\n", DEVNAME(sc),
259 		    mpi_read_db(sc) & MPI_DOORBELL_STATE);
260 		printf("%s: operational state timeout\n", DEVNAME(sc));
261 		goto free_ccbs;
262 	}
263 
264 	mpi_push_replies(sc);
265 
266 	if (mpi_portfacts(sc) != 0) {
267 		printf("%s: unable to get portfacts\n", DEVNAME(sc));
268 		goto free_replies;
269 	}
270 
271 	if (mpi_cfg_coalescing(sc) != 0) {
272 		printf("%s: unable to configure coalescing\n", DEVNAME(sc));
273 		goto free_replies;
274 	}
275 
276 	switch (sc->sc_porttype) {
277 	case MPI_PORTFACTS_PORTTYPE_SAS:
278 		SIMPLEQ_INIT(&sc->sc_evt_scan_queue);
279 		mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO);
280 		scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
281 		    mpi_evt_sas_detach, sc);
282 		/* FALLTHROUGH */
283 	case MPI_PORTFACTS_PORTTYPE_FC:
284 		if (mpi_eventnotify(sc) != 0) {
285 			printf("%s: unable to enable events\n", DEVNAME(sc));
286 			goto free_replies;
287 		}
288 		break;
289 	}
290 
291 	if (mpi_portenable(sc) != 0) {
292 		printf("%s: unable to enable port\n", DEVNAME(sc));
293 		goto free_replies;
294 	}
295 
296 	if (mpi_fwupload(sc) != 0) {
297 		printf("%s: unable to upload firmware\n", DEVNAME(sc));
298 		goto free_replies;
299 	}
300 
301 	if (mpi_manufacturing(sc) != 0) {
302 		printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc));		goto free_replies;
303 	}
304 
305 	switch (sc->sc_porttype) {
306 	case MPI_PORTFACTS_PORTTYPE_SCSI:
307 		if (mpi_cfg_spi_port(sc) != 0) {
308 			printf("%s: unable to configure spi\n", DEVNAME(sc));
309 			goto free_replies;
310 		}
311 		mpi_squash_ppr(sc);
312 		break;
313 	case MPI_PORTFACTS_PORTTYPE_SAS:
314 		if (mpi_cfg_sas(sc) != 0) {
315 			printf("%s: unable to configure sas\n", DEVNAME(sc));
316 			goto free_replies;
317 		}
318 		break;
319 	case MPI_PORTFACTS_PORTTYPE_FC:
320 		if (mpi_cfg_fc(sc) != 0) {
321 			printf("%s: unable to configure fc\n", DEVNAME(sc));
322 			goto free_replies;
323 		}
324 		break;
325 	}
326 
327 	/* get raid pages */
328 	mpi_get_raid(sc);
329 #if NBIO > 0
330 	if (sc->sc_flags & MPI_F_RAID) {
331 		if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
332 			panic("%s: controller registration failed",
333 			    DEVNAME(sc));
334 		else {
335 			if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,
336 			    2, 0, &sc->sc_cfg_hdr) != 0) {
337 				panic("%s: can't get IOC page 2 hdr",
338 				    DEVNAME(sc));
339 			}
340 
341 			sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length,
342 			    4, M_TEMP, M_WAITOK | M_CANFAIL);
343 			if (sc->sc_vol_page == NULL) {
344 				panic("%s: can't get memory for IOC page 2, "
345 				    "bio disabled", DEVNAME(sc));
346 			}
347 
348 			if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,
349 			    sc->sc_vol_page,
350 			    sc->sc_cfg_hdr.page_length * 4) != 0) {
351 				panic("%s: can't get IOC page 2", DEVNAME(sc));
352 			}
353 
354 			sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
355 			    (sc->sc_vol_page + 1);
356 
357 			sc->sc_ioctl = mpi_ioctl;
358 		}
359 	}
360 #endif /* NBIO > 0 */
361 
362 	/* we should be good to go now, attach scsibus */
363 	sc->sc_link.adapter = &mpi_switch;
364 	sc->sc_link.adapter_softc = sc;
365 	sc->sc_link.adapter_target = sc->sc_target;
366 	sc->sc_link.adapter_buswidth = sc->sc_buswidth;
367 	sc->sc_link.openings = sc->sc_maxcmds - 1;
368 	sc->sc_link.pool = &sc->sc_iopool;
369 
370 	memset(&saa, 0, sizeof(saa));
371 	saa.saa_sc_link = &sc->sc_link;
372 
373 	/* config_found() returns the scsibus attached to us */
374 	sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,
375 	    &saa, scsiprint);
376 
377 	/* do domain validation */
378 	if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI)
379 		mpi_run_ppr(sc);
380 
381 	/* enable interrupts */
382 	mpi_write(sc, MPI_INTR_MASK, MPI_INTR_MASK_DOORBELL);
383 
384 #if NBIO > 0
385 #ifndef SMALL_KERNEL
386 	mpi_create_sensors(sc);
387 #endif /* SMALL_KERNEL */
388 #endif /* NBIO > 0 */
389 
390 	return (0);
391 
392 free_replies:
393 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
394 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
395 	mpi_dmamem_free(sc, sc->sc_replies);
396 free_ccbs:
397 	while ((ccb = mpi_get_ccb(sc)) != NULL)
398 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
399 	mpi_dmamem_free(sc, sc->sc_requests);
400 	free(sc->sc_ccbs, M_DEVBUF, 0);
401 
402 	return(1);
403 }
404 
405 int
406 mpi_cfg_spi_port(struct mpi_softc *sc)
407 {
408 	struct mpi_cfg_hdr		hdr;
409 	struct mpi_cfg_spi_port_pg1	port;
410 
411 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,
412 	    &hdr) != 0)
413 		return (1);
414 
415 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port)) != 0)
416 		return (1);
417 
418 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
419 	DNPRINTF(MPI_D_MISC, "%s:  port_scsi_id: %d port_resp_ids 0x%04x\n",
420 	    DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
421 	DNPRINTF(MPI_D_MISC, "%s:  on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
422 	    letoh32(port.port_scsi_id));
423 	DNPRINTF(MPI_D_MISC, "%s:  target_config: 0x%02x id_config: 0x%04x\n",
424 	    DEVNAME(sc), port.target_config, letoh16(port.id_config));
425 
426 	if (port.port_scsi_id == sc->sc_target &&
427 	    port.port_resp_ids == htole16(1 << sc->sc_target) &&
428 	    port.on_bus_timer_value != htole32(0x0))
429 		return (0);
430 
431 	DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
432 	    sc->sc_target);
433 	port.port_scsi_id = sc->sc_target;
434 	port.port_resp_ids = htole16(1 << sc->sc_target);
435 	port.on_bus_timer_value = htole32(0x07000000); /* XXX magic */
436 
437 	if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port)) != 0) {
438 		printf("%s: unable to configure port scsi id\n", DEVNAME(sc));
439 		return (1);
440 	}
441 
442 	return (0);
443 }
444 
445 void
446 mpi_squash_ppr(struct mpi_softc *sc)
447 {
448 	struct mpi_cfg_hdr		hdr;
449 	struct mpi_cfg_spi_dev_pg1	page;
450 	int				i;
451 
452 	DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
453 
454 	for (i = 0; i < sc->sc_buswidth; i++) {
455 		if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,
456 		    1, i, &hdr) != 0)
457 			return;
458 
459 		if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page)) != 0)
460 			return;
461 
462 		DNPRINTF(MPI_D_PPR, "%s:  target: %d req_params1: 0x%02x "
463 		    "req_offset: 0x%02x req_period: 0x%02x "
464 		    "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
465 		    page.req_params1, page.req_offset, page.req_period,
466 		    page.req_params2, letoh32(page.configuration));
467 
468 		page.req_params1 = 0x0;
469 		page.req_offset = 0x0;
470 		page.req_period = 0x0;
471 		page.req_params2 = 0x0;
472 		page.configuration = htole32(0x0);
473 
474 		if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page)) != 0)
475 			return;
476 	}
477 }
478 
479 void
480 mpi_run_ppr(struct mpi_softc *sc)
481 {
482 	struct mpi_cfg_hdr		hdr;
483 	struct mpi_cfg_spi_port_pg0	port_pg;
484 	struct mpi_cfg_ioc_pg3		*physdisk_pg;
485 	struct mpi_cfg_raid_physdisk	*physdisk_list, *physdisk;
486 	size_t				pagelen;
487 	struct scsi_link		*link;
488 	int				i, tries;
489 
490 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,
491 	    &hdr) != 0) {
492 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
493 		    DEVNAME(sc));
494 		return;
495 	}
496 
497 	if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg)) != 0) {
498 		DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
499 		    DEVNAME(sc));
500 		return;
501 	}
502 
503 	for (i = 0; i < sc->sc_buswidth; i++) {
504 		link = scsi_get_link(sc->sc_scsibus, i, 0);
505 		if (link == NULL)
506 			continue;
507 
508 		/* do not ppr volumes */
509 		if (link->flags & SDEV_VIRTUAL)
510 			continue;
511 
512 		tries = 0;
513 		while (mpi_ppr(sc, link, NULL, port_pg.min_period,
514 		    port_pg.max_offset, tries) == EAGAIN)
515 			tries++;
516 	}
517 
518 	if ((sc->sc_flags & MPI_F_RAID) == 0)
519 		return;
520 
521 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,
522 	    &hdr) != 0) {
523 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
524 		    "fetch ioc pg 3 header\n", DEVNAME(sc));
525 		return;
526 	}
527 
528 	pagelen = hdr.page_length * 4; /* dwords to bytes */
529 	physdisk_pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
530 	if (physdisk_pg == NULL) {
531 		DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
532 		    "allocate ioc pg 3\n", DEVNAME(sc));
533 		return;
534 	}
535 	physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
536 
537 	if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen) != 0) {
538 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
539 		    "fetch ioc page 3\n", DEVNAME(sc));
540 		goto out;
541 	}
542 
543 	DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  no_phys_disks: %d\n", DEVNAME(sc),
544 	    physdisk_pg->no_phys_disks);
545 
546 	for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
547 		physdisk = &physdisk_list[i];
548 
549 		DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s:  id: %d bus: %d ioc: %d "
550 		    "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
551 		    physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
552 		    physdisk->phys_disk_num);
553 
554 		if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
555 			continue;
556 
557 		tries = 0;
558 		while (mpi_ppr(sc, NULL, physdisk, port_pg.min_period,
559 		    port_pg.max_offset, tries) == EAGAIN)
560 			tries++;
561 	}
562 
563 out:
564 	free(physdisk_pg, M_TEMP, pagelen);
565 }
566 
567 int
568 mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
569     struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
570 {
571 	struct mpi_cfg_hdr		hdr0, hdr1;
572 	struct mpi_cfg_spi_dev_pg0	pg0;
573 	struct mpi_cfg_spi_dev_pg1	pg1;
574 	u_int32_t			address;
575 	int				id;
576 	int				raid = 0;
577 
578 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
579 	    "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
580 	    link->quirks);
581 
582 	if (try >= 3)
583 		return (EIO);
584 
585 	if (physdisk == NULL) {
586 		if ((link->inqdata.device & SID_TYPE) == T_PROCESSOR)
587 			return (EIO);
588 
589 		address = link->target;
590 		id = link->target;
591 	} else {
592 		raid = 1;
593 		address = (physdisk->phys_disk_bus << 8) |
594 		    (physdisk->phys_disk_id);
595 		id = physdisk->phys_disk_num;
596 	}
597 
598 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,
599 	    address, &hdr0) != 0) {
600 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
601 		    DEVNAME(sc));
602 		return (EIO);
603 	}
604 
605 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,
606 	    address, &hdr1) != 0) {
607 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
608 		    DEVNAME(sc));
609 		return (EIO);
610 	}
611 
612 #ifdef MPI_DEBUG
613 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
614 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
615 		    DEVNAME(sc));
616 		return (EIO);
617 	}
618 
619 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
620 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
621 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
622 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
623 #endif
624 
625 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
626 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
627 		    DEVNAME(sc));
628 		return (EIO);
629 	}
630 
631 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
632 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
633 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
634 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
635 
636 	pg1.req_params1 = 0;
637 	pg1.req_offset = offset;
638 	pg1.req_period = period;
639 	pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH;
640 
641 	if (raid || !(link->quirks & SDEV_NOSYNC)) {
642 		pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE;
643 
644 		switch (try) {
645 		case 0: /* U320 */
646 			break;
647 		case 1: /* U160 */
648 			pg1.req_period = 0x09;
649 			break;
650 		case 2: /* U80 */
651 			pg1.req_period = 0x0a;
652 			break;
653 		}
654 
655 		if (pg1.req_period < 0x09) {
656 			/* Ultra320: enable QAS & PACKETIZED */
657 			pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS |
658 			    MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED;
659 		}
660 		if (pg1.req_period < 0xa) {
661 			/* >= Ultra160: enable dual xfers */
662 			pg1.req_params1 |=
663 			    MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS;
664 		}
665 	}
666 
667 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
668 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
669 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
670 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
671 
672 	if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1)) != 0) {
673 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
674 		    DEVNAME(sc));
675 		return (EIO);
676 	}
677 
678 	if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1)) != 0) {
679 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
680 		    DEVNAME(sc));
681 		return (EIO);
682 	}
683 
684 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
685 	    "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
686 	    "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
687 	    pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
688 
689 	if (mpi_inq(sc, id, raid) != 0) {
690 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
691 		    "target %d\n", DEVNAME(sc), link->target);
692 		return (EIO);
693 	}
694 
695 	if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0)) != 0) {
696 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
697 		    "inquiry\n", DEVNAME(sc));
698 		return (EIO);
699 	}
700 
701 	DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
702 	    "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
703 	    "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
704 	    pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
705 
706 	if (!(lemtoh32(&pg0.information) & 0x07) && (try == 0)) {
707 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
708 		    DEVNAME(sc));
709 		return (EAGAIN);
710 	}
711 
712 	if ((((lemtoh32(&pg0.information) >> 8) & 0xff) > 0x09) && (try == 1)) {
713 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
714 		    DEVNAME(sc));
715 		return (EAGAIN);
716 	}
717 
718 	if (lemtoh32(&pg0.information) & 0x0e) {
719 		DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
720 		    DEVNAME(sc), lemtoh32(&pg0.information));
721 		return (EAGAIN);
722 	}
723 
724 	switch(pg0.neg_period) {
725 	case 0x08:
726 		period = 160;
727 		break;
728 	case 0x09:
729 		period = 80;
730 		break;
731 	case 0x0a:
732 		period = 40;
733 		break;
734 	case 0x0b:
735 		period = 20;
736 		break;
737 	case 0x0c:
738 		period = 10;
739 		break;
740 	default:
741 		period = 0;
742 		break;
743 	}
744 
745 	printf("%s: %s %d %s at %dMHz width %dbit offset %d "
746 	    "QAS %d DT %d IU %d\n", DEVNAME(sc), raid ? "phys disk" : "target",
747 	    id, period ? "Sync" : "Async", period,
748 	    (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE) ? 16 : 8,
749 	    pg0.neg_offset,
750 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS) ? 1 : 0,
751 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS) ? 1 : 0,
752 	    (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED) ? 1 : 0);
753 
754 	return (0);
755 }
756 
757 int
758 mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
759 {
760 	struct mpi_ccb			*ccb;
761 	struct scsi_inquiry		inq;
762 	struct inq_bundle {
763 		struct mpi_msg_scsi_io		io;
764 		struct mpi_sge			sge;
765 		struct scsi_inquiry_data	inqbuf;
766 		struct scsi_sense_data		sense;
767 	} __packed			*bundle;
768 	struct mpi_msg_scsi_io		*io;
769 	struct mpi_sge			*sge;
770 
771 	DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
772 
773 	memset(&inq, 0, sizeof(inq));
774 	inq.opcode = INQUIRY;
775 	_lto2b(sizeof(struct scsi_inquiry_data), inq.length);
776 
777 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
778 	if (ccb == NULL)
779 		return (1);
780 
781 	ccb->ccb_done = mpi_empty_done;
782 
783 	bundle = ccb->ccb_cmd;
784 	io = &bundle->io;
785 	sge = &bundle->sge;
786 
787 	io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH :
788 	    MPI_FUNCTION_SCSI_IO_REQUEST;
789 	/*
790 	 * bus is always 0
791 	 * io->bus = htole16(sc->sc_bus);
792 	 */
793 	io->target_id = target;
794 
795 	io->cdb_length = sizeof(inq);
796 	io->sense_buf_len = sizeof(struct scsi_sense_data);
797 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
798 
799 	/*
800 	 * always lun 0
801 	 * io->lun[0] = htobe16(link->lun);
802 	 */
803 
804 	io->direction = MPI_SCSIIO_DIR_READ;
805 	io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT;
806 
807 	memcpy(io->cdb, &inq, sizeof(inq));
808 
809 	htolem32(&io->data_length, sizeof(struct scsi_inquiry_data));
810 
811 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
812 	    offsetof(struct inq_bundle, sense));
813 
814 	htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |
815 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
816 	    (u_int32_t)sizeof(inq));
817 
818 	mpi_dvatosge(sge, ccb->ccb_cmd_dva +
819 	    offsetof(struct inq_bundle, inqbuf));
820 
821 	if (mpi_poll(sc, ccb, 5000) != 0)
822 		return (1);
823 
824 	if (ccb->ccb_rcb != NULL)
825 		mpi_push_reply(sc, ccb->ccb_rcb);
826 
827 	scsi_io_put(&sc->sc_iopool, ccb);
828 
829 	return (0);
830 }
831 
832 int
833 mpi_cfg_sas(struct mpi_softc *sc)
834 {
835 	struct mpi_ecfg_hdr		ehdr;
836 	struct mpi_cfg_sas_iou_pg1	*pg;
837 	size_t				pagelen;
838 	int				rv = 0;
839 
840 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,
841 	    &ehdr) != 0)
842 		return (0);
843 
844 	pagelen = lemtoh16(&ehdr.ext_page_length) * 4;
845 	pg = malloc(pagelen, M_TEMP, M_NOWAIT | M_ZERO);
846 	if (pg == NULL)
847 		return (ENOMEM);
848 
849 	if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen) != 0)
850 		goto out;
851 
852 	if (pg->max_sata_q_depth != 32) {
853 		pg->max_sata_q_depth = 32;
854 
855 		if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen) != 0)
856 			goto out;
857 	}
858 
859 out:
860 	free(pg, M_TEMP, pagelen);
861 	return (rv);
862 }
863 
864 int
865 mpi_cfg_fc(struct mpi_softc *sc)
866 {
867 	struct mpi_cfg_hdr		hdr;
868 	struct mpi_cfg_fc_port_pg0	pg0;
869 	struct mpi_cfg_fc_port_pg1	pg1;
870 
871 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,
872 	    &hdr) != 0) {
873 		printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc));
874 		return (1);
875 	}
876 
877 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0)) != 0) {
878 		printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc));
879 		return (1);
880 	}
881 
882 	sc->sc_link.port_wwn = letoh64(pg0.wwpn);
883 	sc->sc_link.node_wwn = letoh64(pg0.wwnn);
884 
885 	/* configure port config more to our liking */
886 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,
887 	    &hdr) != 0) {
888 		printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc));
889 		return (1);
890 	}
891 
892 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1)) != 0) {
893 		printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc));
894 		return (1);
895 	}
896 
897 	SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |
898 	    MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN));
899 
900 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1)) != 0) {
901 		printf("%s: unable to set FC port page 1\n", DEVNAME(sc));
902 		return (1);
903 	}
904 
905 	return (0);
906 }
907 
908 void
909 mpi_detach(struct mpi_softc *sc)
910 {
911 
912 }
913 
914 int
915 mpi_intr(void *arg)
916 {
917 	struct mpi_softc		*sc = arg;
918 	u_int32_t			reg;
919 	int				rv = 0;
920 
921 	if ((mpi_read_intr(sc) & MPI_INTR_STATUS_REPLY) == 0)
922 		return (rv);
923 
924 	while ((reg = mpi_pop_reply(sc)) != 0xffffffff) {
925 		mpi_reply(sc, reg);
926 		rv = 1;
927 	}
928 
929 	return (rv);
930 }
931 
932 void
933 mpi_reply(struct mpi_softc *sc, u_int32_t reg)
934 {
935 	struct mpi_ccb			*ccb;
936 	struct mpi_rcb			*rcb = NULL;
937 	struct mpi_msg_reply		*reply = NULL;
938 	u_int32_t			reply_dva;
939 	int				id;
940 	int				i;
941 
942 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
943 
944 	if (reg & MPI_REPLY_QUEUE_ADDRESS) {
945 		reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK) << 1;
946 		i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)) /
947 		    MPI_REPLY_SIZE;
948 		rcb = &sc->sc_rcbs[i];
949 
950 		bus_dmamap_sync(sc->sc_dmat,
951 		    MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,
952 		    MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD);
953 
954 		reply = rcb->rcb_reply;
955 
956 		id = lemtoh32(&reply->msg_context);
957 	} else {
958 		switch (reg & MPI_REPLY_QUEUE_TYPE_MASK) {
959 		case MPI_REPLY_QUEUE_TYPE_INIT:
960 			id = reg & MPI_REPLY_QUEUE_CONTEXT;
961 			break;
962 
963 		default:
964 			panic("%s: unsupported context reply",
965 			    DEVNAME(sc));
966 		}
967 	}
968 
969 	DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
970 	    DEVNAME(sc), id, reply);
971 
972 	ccb = &sc->sc_ccbs[id];
973 
974 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
975 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
976 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
977 	ccb->ccb_state = MPI_CCB_READY;
978 	ccb->ccb_rcb = rcb;
979 
980 	ccb->ccb_done(ccb);
981 }
982 
983 struct mpi_dmamem *
984 mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
985 {
986 	struct mpi_dmamem		*mdm;
987 	int				nsegs;
988 
989 	mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF, M_NOWAIT | M_ZERO);
990 	if (mdm == NULL)
991 		return (NULL);
992 
993 	mdm->mdm_size = size;
994 
995 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
996 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map) != 0)
997 		goto mdmfree;
998 
999 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,
1000 	    1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
1001 		goto destroy;
1002 
1003 	if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,
1004 	    &mdm->mdm_kva, BUS_DMA_NOWAIT) != 0)
1005 		goto free;
1006 
1007 	if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,
1008 	    NULL, BUS_DMA_NOWAIT) != 0)
1009 		goto unmap;
1010 
1011 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1012 	    "map: %#x nsegs: %d segs: %#x kva: %x\n",
1013 	    DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1014 
1015 	return (mdm);
1016 
1017 unmap:
1018 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size);
1019 free:
1020 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1021 destroy:
1022 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1023 mdmfree:
1024 	free(mdm, M_DEVBUF, sizeof *mdm);
1025 
1026 	return (NULL);
1027 }
1028 
1029 void
1030 mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1031 {
1032 	DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1033 
1034 	bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map);
1035 	bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size);
1036 	bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1);
1037 	bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map);
1038 	free(mdm, M_DEVBUF, sizeof *mdm);
1039 }
1040 
1041 int
1042 mpi_alloc_ccbs(struct mpi_softc *sc)
1043 {
1044 	struct mpi_ccb			*ccb;
1045 	u_int8_t			*cmd;
1046 	int				i;
1047 
1048 	SLIST_INIT(&sc->sc_ccb_free);
1049 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
1050 
1051 	sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb),
1052 	    M_DEVBUF, M_WAITOK | M_CANFAIL | M_ZERO);
1053 	if (sc->sc_ccbs == NULL) {
1054 		printf("%s: unable to allocate ccbs\n", DEVNAME(sc));
1055 		return (1);
1056 	}
1057 
1058 	sc->sc_requests = mpi_dmamem_alloc(sc,
1059 	    MPI_REQUEST_SIZE * sc->sc_maxcmds);
1060 	if (sc->sc_requests == NULL) {
1061 		printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc));
1062 		goto free_ccbs;
1063 	}
1064 	cmd = MPI_DMA_KVA(sc->sc_requests);
1065 	memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds);
1066 
1067 	for (i = 0; i < sc->sc_maxcmds; i++) {
1068 		ccb = &sc->sc_ccbs[i];
1069 
1070 		if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,
1071 		    sc->sc_max_sgl_len, MAXPHYS, 0,
1072 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,
1073 		    &ccb->ccb_dmamap) != 0) {
1074 			printf("%s: unable to create dma map\n", DEVNAME(sc));
1075 			goto free_maps;
1076 		}
1077 
1078 		ccb->ccb_sc = sc;
1079 		ccb->ccb_id = i;
1080 		ccb->ccb_offset = MPI_REQUEST_SIZE * i;
1081 		ccb->ccb_state = MPI_CCB_READY;
1082 
1083 		ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1084 		ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests) +
1085 		    ccb->ccb_offset;
1086 
1087 		DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1088 		    "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1089 		    DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1090 		    ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1091 		    ccb->ccb_cmd_dva);
1092 
1093 		mpi_put_ccb(sc, ccb);
1094 	}
1095 
1096 	scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1097 
1098 	return (0);
1099 
1100 free_maps:
1101 	while ((ccb = mpi_get_ccb(sc)) != NULL)
1102 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1103 
1104 	mpi_dmamem_free(sc, sc->sc_requests);
1105 free_ccbs:
1106 	free(sc->sc_ccbs, M_DEVBUF, 0);
1107 
1108 	return (1);
1109 }
1110 
1111 void *
1112 mpi_get_ccb(void *xsc)
1113 {
1114 	struct mpi_softc		*sc = xsc;
1115 	struct mpi_ccb			*ccb;
1116 
1117 	mtx_enter(&sc->sc_ccb_mtx);
1118 	ccb = SLIST_FIRST(&sc->sc_ccb_free);
1119 	if (ccb != NULL) {
1120 		SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link);
1121 		ccb->ccb_state = MPI_CCB_READY;
1122 	}
1123 	mtx_leave(&sc->sc_ccb_mtx);
1124 
1125 	DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1126 
1127 	return (ccb);
1128 }
1129 
1130 void
1131 mpi_put_ccb(void *xsc, void *io)
1132 {
1133 	struct mpi_softc		*sc = xsc;
1134 	struct mpi_ccb			*ccb = io;
1135 
1136 	DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1137 
1138 #ifdef DIAGNOSTIC
1139 	if (ccb->ccb_state == MPI_CCB_FREE)
1140 		panic("mpi_put_ccb: double free");
1141 #endif
1142 
1143 	ccb->ccb_state = MPI_CCB_FREE;
1144 	ccb->ccb_cookie = NULL;
1145 	ccb->ccb_done = NULL;
1146 	memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE);
1147 	mtx_enter(&sc->sc_ccb_mtx);
1148 	SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link);
1149 	mtx_leave(&sc->sc_ccb_mtx);
1150 }
1151 
1152 int
1153 mpi_alloc_replies(struct mpi_softc *sc)
1154 {
1155 	DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1156 
1157 	sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF,
1158 	    M_WAITOK|M_CANFAIL);
1159 	if (sc->sc_rcbs == NULL)
1160 		return (1);
1161 
1162 	sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE);
1163 	if (sc->sc_replies == NULL) {
1164 		free(sc->sc_rcbs, M_DEVBUF, 0);
1165 		return (1);
1166 	}
1167 
1168 	return (0);
1169 }
1170 
1171 void
1172 mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1173 {
1174 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),
1175 	    rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1176 	mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1177 }
1178 
1179 void
1180 mpi_push_replies(struct mpi_softc *sc)
1181 {
1182 	struct mpi_rcb			*rcb;
1183 	char				*kva = MPI_DMA_KVA(sc->sc_replies);
1184 	int				i;
1185 
1186 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,
1187 	    sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD);
1188 
1189 	for (i = 0; i < sc->sc_repq; i++) {
1190 		rcb = &sc->sc_rcbs[i];
1191 
1192 		rcb->rcb_reply = kva + MPI_REPLY_SIZE * i;
1193 		rcb->rcb_offset = MPI_REPLY_SIZE * i;
1194 		rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies) +
1195 		    MPI_REPLY_SIZE * i;
1196 		mpi_push_reply_db(sc, rcb->rcb_reply_dva);
1197 	}
1198 }
1199 
1200 void
1201 mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1202 {
1203 	struct mpi_msg_request *msg;
1204 
1205 	DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1206 	    ccb->ccb_cmd_dva);
1207 
1208 	msg = ccb->ccb_cmd;
1209 	htolem32(&msg->msg_context, ccb->ccb_id);
1210 
1211 	bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),
1212 	    ccb->ccb_offset, MPI_REQUEST_SIZE,
1213 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1214 
1215 	ccb->ccb_state = MPI_CCB_QUEUED;
1216 	bus_space_write_4(sc->sc_iot, sc->sc_ioh,
1217 	    MPI_REQ_QUEUE, ccb->ccb_cmd_dva);
1218 }
1219 
1220 int
1221 mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1222 {
1223 	void				(*done)(struct mpi_ccb *);
1224 	void				*cookie;
1225 	int				rv = 1;
1226 	u_int32_t			reg;
1227 
1228 	DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1229 	    timeout);
1230 
1231 	done = ccb->ccb_done;
1232 	cookie = ccb->ccb_cookie;
1233 
1234 	ccb->ccb_done = mpi_poll_done;
1235 	ccb->ccb_cookie = &rv;
1236 
1237 	mpi_start(sc, ccb);
1238 	while (rv == 1) {
1239 		reg = mpi_pop_reply(sc);
1240 		if (reg == 0xffffffff) {
1241 			if (timeout-- == 0) {
1242 				printf("%s: timeout\n", DEVNAME(sc));
1243 				goto timeout;
1244 			}
1245 
1246 			delay(1000);
1247 			continue;
1248 		}
1249 
1250 		mpi_reply(sc, reg);
1251 	}
1252 
1253 	ccb->ccb_cookie = cookie;
1254 	done(ccb);
1255 
1256 timeout:
1257 	return (rv);
1258 }
1259 
1260 void
1261 mpi_poll_done(struct mpi_ccb *ccb)
1262 {
1263 	int				*rv = ccb->ccb_cookie;
1264 
1265 	*rv = 0;
1266 }
1267 
1268 void
1269 mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1270 {
1271 	struct mutex			cookie = MUTEX_INITIALIZER(IPL_BIO);
1272 	void				(*done)(struct mpi_ccb *);
1273 
1274 	done = ccb->ccb_done;
1275 	ccb->ccb_done = mpi_wait_done;
1276 	ccb->ccb_cookie = &cookie;
1277 
1278 	/* XXX this will wait forever for the ccb to complete */
1279 
1280 	mpi_start(sc, ccb);
1281 
1282 	mtx_enter(&cookie);
1283 	while (ccb->ccb_cookie != NULL)
1284 		msleep(ccb, &cookie, PRIBIO, "mpiwait", 0);
1285 	mtx_leave(&cookie);
1286 
1287 	done(ccb);
1288 }
1289 
1290 void
1291 mpi_wait_done(struct mpi_ccb *ccb)
1292 {
1293 	struct mutex			*cookie = ccb->ccb_cookie;
1294 
1295 	mtx_enter(cookie);
1296 	ccb->ccb_cookie = NULL;
1297 	wakeup_one(ccb);
1298 	mtx_leave(cookie);
1299 }
1300 
1301 void
1302 mpi_scsi_cmd(struct scsi_xfer *xs)
1303 {
1304 	struct scsi_link		*link = xs->sc_link;
1305 	struct mpi_softc		*sc = link->adapter_softc;
1306 	struct mpi_ccb			*ccb;
1307 	struct mpi_ccb_bundle		*mcb;
1308 	struct mpi_msg_scsi_io		*io;
1309 
1310 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1311 
1312 	KERNEL_UNLOCK();
1313 
1314 	if (xs->cmdlen > MPI_CDB_LEN) {
1315 		DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1316 		    DEVNAME(sc), xs->cmdlen);
1317 		memset(&xs->sense, 0, sizeof(xs->sense));
1318 		xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT;
1319 		xs->sense.flags = SKEY_ILLEGAL_REQUEST;
1320 		xs->sense.add_sense_code = 0x20;
1321 		xs->error = XS_SENSE;
1322 		goto done;
1323 	}
1324 
1325 	ccb = xs->io;
1326 
1327 	DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1328 	    DEVNAME(sc), ccb->ccb_id, xs->flags);
1329 
1330 	ccb->ccb_cookie = xs;
1331 	ccb->ccb_done = mpi_scsi_cmd_done;
1332 
1333 	mcb = ccb->ccb_cmd;
1334 	io = &mcb->mcb_io;
1335 
1336 	io->function = MPI_FUNCTION_SCSI_IO_REQUEST;
1337 	/*
1338 	 * bus is always 0
1339 	 * io->bus = htole16(sc->sc_bus);
1340 	 */
1341 	io->target_id = link->target;
1342 
1343 	io->cdb_length = xs->cmdlen;
1344 	io->sense_buf_len = sizeof(xs->sense);
1345 	io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64;
1346 
1347 	htobem16(&io->lun[0], link->lun);
1348 
1349 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1350 	case SCSI_DATA_IN:
1351 		io->direction = MPI_SCSIIO_DIR_READ;
1352 		break;
1353 	case SCSI_DATA_OUT:
1354 		io->direction = MPI_SCSIIO_DIR_WRITE;
1355 		break;
1356 	default:
1357 		io->direction = MPI_SCSIIO_DIR_NONE;
1358 		break;
1359 	}
1360 
1361 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI &&
1362 	    (link->quirks & SDEV_NOTAGS))
1363 		io->tagging = MPI_SCSIIO_ATTR_UNTAGGED;
1364 	else
1365 		io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q;
1366 
1367 	memcpy(io->cdb, xs->cmd, xs->cmdlen);
1368 
1369 	htolem32(&io->data_length, xs->datalen);
1370 
1371 	htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +
1372 	    offsetof(struct mpi_ccb_bundle, mcb_sense));
1373 
1374 	if (mpi_load_xs(ccb) != 0)
1375 		goto stuffup;
1376 
1377 	timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1378 
1379 	if (xs->flags & SCSI_POLL) {
1380 		if (mpi_poll(sc, ccb, xs->timeout) != 0)
1381 			goto stuffup;
1382 	} else
1383 		mpi_start(sc, ccb);
1384 
1385 	KERNEL_LOCK();
1386 	return;
1387 
1388 stuffup:
1389 	xs->error = XS_DRIVER_STUFFUP;
1390 done:
1391 	KERNEL_LOCK();
1392 	scsi_done(xs);
1393 }
1394 
1395 void
1396 mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1397 {
1398 	struct mpi_softc		*sc = ccb->ccb_sc;
1399 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1400 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1401 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1402 	struct mpi_msg_scsi_io_error	*sie;
1403 
1404 	if (xs->datalen != 0) {
1405 		bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1406 		    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :
1407 		    BUS_DMASYNC_POSTWRITE);
1408 
1409 		bus_dmamap_unload(sc->sc_dmat, dmap);
1410 	}
1411 
1412 	/* timeout_del */
1413 	xs->error = XS_NOERROR;
1414 	xs->resid = 0;
1415 
1416 	if (ccb->ccb_rcb == NULL) {
1417 		/* no scsi error, we're ok so drop out early */
1418 		xs->status = SCSI_OK;
1419 		KERNEL_LOCK();
1420 		scsi_done(xs);
1421 		KERNEL_UNLOCK();
1422 		return;
1423 	}
1424 
1425 	sie = ccb->ccb_rcb->rcb_reply;
1426 
1427 	DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1428 	    "flags 0x%x\n", DEVNAME(sc), xs->cmd->opcode, xs->datalen,
1429 	    xs->flags);
1430 	DNPRINTF(MPI_D_CMD, "%s:  target_id: %d bus: %d msg_length: %d "
1431 	    "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1432 	    sie->msg_length, sie->function);
1433 	DNPRINTF(MPI_D_CMD, "%s:  cdb_length: %d sense_buf_length: %d "
1434 	    "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1435 	    sie->sense_buf_len, sie->msg_flags);
1436 	DNPRINTF(MPI_D_CMD, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
1437 	    letoh32(sie->msg_context));
1438 	DNPRINTF(MPI_D_CMD, "%s:  scsi_status: 0x%02x scsi_state: 0x%02x "
1439 	    "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1440 	    sie->scsi_state, letoh16(sie->ioc_status));
1441 	DNPRINTF(MPI_D_CMD, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1442 	    letoh32(sie->ioc_loginfo));
1443 	DNPRINTF(MPI_D_CMD, "%s:  transfer_count: %d\n", DEVNAME(sc),
1444 	    letoh32(sie->transfer_count));
1445 	DNPRINTF(MPI_D_CMD, "%s:  sense_count: %d\n", DEVNAME(sc),
1446 	    letoh32(sie->sense_count));
1447 	DNPRINTF(MPI_D_CMD, "%s:  response_info: 0x%08x\n", DEVNAME(sc),
1448 	    letoh32(sie->response_info));
1449 	DNPRINTF(MPI_D_CMD, "%s:  tag: 0x%04x\n", DEVNAME(sc),
1450 	    letoh16(sie->tag));
1451 
1452 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS)
1453 		xs->status = SCSI_TERMINATED;
1454 	else
1455 		xs->status = sie->scsi_status;
1456 	xs->resid = 0;
1457 
1458 	switch (lemtoh16(&sie->ioc_status)) {
1459 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
1460 		xs->resid = xs->datalen - lemtoh32(&sie->transfer_count);
1461 		/* FALLTHROUGH */
1462 	case MPI_IOCSTATUS_SUCCESS:
1463 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
1464 		switch (xs->status) {
1465 		case SCSI_OK:
1466 			xs->error = XS_NOERROR;
1467 			break;
1468 
1469 		case SCSI_CHECK:
1470 			xs->error = XS_SENSE;
1471 			break;
1472 
1473 		case SCSI_BUSY:
1474 		case SCSI_QUEUE_FULL:
1475 			xs->error = XS_BUSY;
1476 			break;
1477 
1478 		default:
1479 			xs->error = XS_DRIVER_STUFFUP;
1480 			break;
1481 		}
1482 		break;
1483 
1484 	case MPI_IOCSTATUS_BUSY:
1485 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
1486 		xs->error = XS_BUSY;
1487 		break;
1488 
1489 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
1490 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
1491 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
1492 		xs->error = XS_SELTIMEOUT;
1493 		break;
1494 
1495 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
1496 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
1497 		xs->error = XS_RESET;
1498 		break;
1499 
1500 	default:
1501 		xs->error = XS_DRIVER_STUFFUP;
1502 		break;
1503 	}
1504 
1505 	if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID)
1506 		memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense));
1507 
1508 	DNPRINTF(MPI_D_CMD, "%s:  xs err: 0x%02x status: %d\n", DEVNAME(sc),
1509 	    xs->error, xs->status);
1510 
1511 	mpi_push_reply(sc, ccb->ccb_rcb);
1512 	KERNEL_LOCK();
1513 	scsi_done(xs);
1514 	KERNEL_UNLOCK();
1515 }
1516 
1517 void
1518 mpi_timeout_xs(void *arg)
1519 {
1520 	/* XXX */
1521 }
1522 
1523 int
1524 mpi_load_xs(struct mpi_ccb *ccb)
1525 {
1526 	struct mpi_softc		*sc = ccb->ccb_sc;
1527 	struct scsi_xfer		*xs = ccb->ccb_cookie;
1528 	struct mpi_ccb_bundle		*mcb = ccb->ccb_cmd;
1529 	struct mpi_msg_scsi_io		*io = &mcb->mcb_io;
1530 	struct mpi_sge			*sge = NULL;
1531 	struct mpi_sge			*nsge = &mcb->mcb_sgl[0];
1532 	struct mpi_sge			*ce = NULL, *nce;
1533 	bus_dmamap_t			dmap = ccb->ccb_dmamap;
1534 	u_int32_t			addr, flags;
1535 	int				i, error;
1536 
1537 	if (xs->datalen == 0) {
1538 		htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
1539 		    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
1540 		return (0);
1541 	}
1542 
1543 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1544 	    xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |
1545 	    ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK));
1546 	if (error) {
1547 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1548 		return (1);
1549 	}
1550 
1551 	flags = MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64;
1552 	if (xs->flags & SCSI_DATA_OUT)
1553 		flags |= MPI_SGE_FL_DIR_OUT;
1554 
1555 	if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
1556 		ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1557 		io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1558 	}
1559 
1560 	for (i = 0; i < dmap->dm_nsegs; i++) {
1561 
1562 		if (nsge == ce) {
1563 			nsge++;
1564 			sge->sg_hdr |= htole32(MPI_SGE_FL_LAST);
1565 
1566 			if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1567 				nce = &nsge[sc->sc_chain_len - 1];
1568 				addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1569 				addr = addr << 16 |
1570 				    sizeof(struct mpi_sge) * sc->sc_chain_len;
1571 			} else {
1572 				nce = NULL;
1573 				addr = sizeof(struct mpi_sge) *
1574 				    (dmap->dm_nsegs - i);
1575 			}
1576 
1577 			ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |
1578 			    MPI_SGE_FL_SIZE_64 | addr);
1579 
1580 			mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1581 			    ((u_int8_t *)nsge - (u_int8_t *)mcb));
1582 
1583 			ce = nce;
1584 		}
1585 
1586 		DNPRINTF(MPI_D_DMA, "%s:  %d: %d 0x%016llx\n", DEVNAME(sc),
1587 		    i, dmap->dm_segs[i].ds_len,
1588 		    (u_int64_t)dmap->dm_segs[i].ds_addr);
1589 
1590 		sge = nsge++;
1591 
1592 		sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len);
1593 		mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1594 	}
1595 
1596 	/* terminate list */
1597 	sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
1598 	    MPI_SGE_FL_EOL);
1599 
1600 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1601 	    (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :
1602 	    BUS_DMASYNC_PREWRITE);
1603 
1604 	return (0);
1605 }
1606 
1607 void
1608 mpi_minphys(struct buf *bp, struct scsi_link *sl)
1609 {
1610 	/* XXX */
1611 	if (bp->b_bcount > MAXPHYS)
1612 		bp->b_bcount = MAXPHYS;
1613 	minphys(bp);
1614 }
1615 
1616 int
1617 mpi_scsi_probe_virtual(struct scsi_link *link)
1618 {
1619 	struct mpi_softc		*sc = link->adapter_softc;
1620 	struct mpi_cfg_hdr		hdr;
1621 	struct mpi_cfg_raid_vol_pg0	*rp0;
1622 	int				len;
1623 	int				rv;
1624 
1625 	if (!ISSET(sc->sc_flags, MPI_F_RAID))
1626 		return (0);
1627 
1628 	if (link->lun > 0)
1629 		return (0);
1630 
1631 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL,
1632 	    0, link->target, MPI_PG_POLL, &hdr);
1633 	if (rv != 0)
1634 		return (0);
1635 
1636 	len = hdr.page_length * 4;
1637 	rp0 = malloc(len, M_TEMP, M_NOWAIT);
1638 	if (rp0 == NULL)
1639 		return (ENOMEM);
1640 
1641 	rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1, rp0, len);
1642 	if (rv == 0)
1643 		SET(link->flags, SDEV_VIRTUAL);
1644 
1645 	free(rp0, M_TEMP, len);
1646 	return (0);
1647 }
1648 
1649 int
1650 mpi_scsi_probe(struct scsi_link *link)
1651 {
1652 	struct mpi_softc		*sc = link->adapter_softc;
1653 	struct mpi_ecfg_hdr		ehdr;
1654 	struct mpi_cfg_sas_dev_pg0	pg0;
1655 	u_int32_t			address;
1656 	int				rv;
1657 
1658 	rv = mpi_scsi_probe_virtual(link);
1659 	if (rv != 0)
1660 		return (rv);
1661 
1662 	if (ISSET(link->flags, SDEV_VIRTUAL))
1663 		return (0);
1664 
1665 	if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS)
1666 		return (0);
1667 
1668 	address = MPI_CFG_SAS_DEV_ADDR_BUS | link->target;
1669 
1670 	if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,
1671 	    address, &ehdr) != 0)
1672 		return (EIO);
1673 
1674 	if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0)) != 0)
1675 		return (0);
1676 
1677 	DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1678 	    DEVNAME(sc), link->target);
1679 	DNPRINTF(MPI_D_MISC, "%s:  slot: 0x%04x enc_handle: 0x%04x\n",
1680 	    DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1681 	DNPRINTF(MPI_D_MISC, "%s:  sas_addr: 0x%016llx\n", DEVNAME(sc),
1682 	    letoh64(pg0.sas_addr));
1683 	DNPRINTF(MPI_D_MISC, "%s:  parent_dev_handle: 0x%04x phy_num: 0x%02x "
1684 	    "access_status: 0x%02x\n", DEVNAME(sc),
1685 	    letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1686 	DNPRINTF(MPI_D_MISC, "%s:  dev_handle: 0x%04x "
1687 	    "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1688 	    letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1689 	DNPRINTF(MPI_D_MISC, "%s:  device_info: 0x%08x\n", DEVNAME(sc),
1690 	    letoh32(pg0.device_info));
1691 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%04x physical_port: 0x%02x\n",
1692 	    DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1693 
1694 	if (ISSET(lemtoh32(&pg0.device_info),
1695 	    MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)) {
1696 		DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1697 		    DEVNAME(sc), link->target);
1698 		link->flags |= SDEV_ATAPI;
1699 		link->quirks |= SDEV_ONLYBIG;
1700 	}
1701 
1702 	return (0);
1703 }
1704 
1705 u_int32_t
1706 mpi_read(struct mpi_softc *sc, bus_size_t r)
1707 {
1708 	u_int32_t			rv;
1709 
1710 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1711 	    BUS_SPACE_BARRIER_READ);
1712 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
1713 
1714 	DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1715 
1716 	return (rv);
1717 }
1718 
1719 void
1720 mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1721 {
1722 	DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1723 
1724 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
1725 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1726 	    BUS_SPACE_BARRIER_WRITE);
1727 }
1728 
1729 int
1730 mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1731     u_int32_t target)
1732 {
1733 	int				i;
1734 
1735 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1736 	    mask, target);
1737 
1738 	for (i = 0; i < 10000; i++) {
1739 		if ((mpi_read(sc, r) & mask) == target)
1740 			return (0);
1741 		delay(1000);
1742 	}
1743 
1744 	return (1);
1745 }
1746 
1747 int
1748 mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1749     u_int32_t target)
1750 {
1751 	int				i;
1752 
1753 	DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1754 	    mask, target);
1755 
1756 	for (i = 0; i < 10000; i++) {
1757 		if ((mpi_read(sc, r) & mask) != target)
1758 			return (0);
1759 		delay(1000);
1760 	}
1761 
1762 	return (1);
1763 }
1764 
1765 int
1766 mpi_init(struct mpi_softc *sc)
1767 {
1768 	u_int32_t			db;
1769 	int				i;
1770 
1771 	/* spin until the IOC leaves the RESET state */
1772 	if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1773 	    MPI_DOORBELL_STATE_RESET) != 0) {
1774 		DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1775 		    "reset state\n", DEVNAME(sc));
1776 		return (1);
1777 	}
1778 
1779 	/* check current ownership */
1780 	db = mpi_read_db(sc);
1781 	if ((db & MPI_DOORBELL_WHOINIT) == MPI_DOORBELL_WHOINIT_PCIPEER) {
1782 		DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1783 		    DEVNAME(sc));
1784 		return (0);
1785 	}
1786 
1787 	for (i = 0; i < 5; i++) {
1788 		switch (db & MPI_DOORBELL_STATE) {
1789 		case MPI_DOORBELL_STATE_READY:
1790 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1791 			    DEVNAME(sc));
1792 			return (0);
1793 
1794 		case MPI_DOORBELL_STATE_OPER:
1795 		case MPI_DOORBELL_STATE_FAULT:
1796 			DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1797 			    "reset\n" , DEVNAME(sc));
1798 			if (mpi_reset_soft(sc) != 0)
1799 				mpi_reset_hard(sc);
1800 			break;
1801 
1802 		case MPI_DOORBELL_STATE_RESET:
1803 			DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1804 			    "out of reset\n", DEVNAME(sc));
1805 			if (mpi_wait_ne(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1806 			    MPI_DOORBELL_STATE_RESET) != 0)
1807 				return (1);
1808 			break;
1809 		}
1810 		db = mpi_read_db(sc);
1811 	}
1812 
1813 	return (1);
1814 }
1815 
1816 int
1817 mpi_reset_soft(struct mpi_softc *sc)
1818 {
1819 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1820 
1821 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1822 		return (1);
1823 
1824 	mpi_write_db(sc,
1825 	    MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET));
1826 	if (mpi_wait_eq(sc, MPI_INTR_STATUS,
1827 	    MPI_INTR_STATUS_IOCDOORBELL, 0) != 0)
1828 		return (1);
1829 
1830 	if (mpi_wait_eq(sc, MPI_DOORBELL, MPI_DOORBELL_STATE,
1831 	    MPI_DOORBELL_STATE_READY) != 0)
1832 		return (1);
1833 
1834 	return (0);
1835 }
1836 
1837 int
1838 mpi_reset_hard(struct mpi_softc *sc)
1839 {
1840 	DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1841 
1842 	/* enable diagnostic register */
1843 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1844 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_1);
1845 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_2);
1846 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_3);
1847 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_4);
1848 	mpi_write(sc, MPI_WRITESEQ, MPI_WRITESEQ_5);
1849 
1850 	/* reset ioc */
1851 	mpi_write(sc, MPI_HOSTDIAG, MPI_HOSTDIAG_RESET_ADAPTER);
1852 
1853 	delay(10000);
1854 
1855 	/* disable diagnostic register */
1856 	mpi_write(sc, MPI_WRITESEQ, 0xff);
1857 
1858 	/* restore pci bits? */
1859 
1860 	/* firmware bits? */
1861 	return (0);
1862 }
1863 
1864 int
1865 mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1866 {
1867 	u_int32_t				*query = buf;
1868 	int					i;
1869 
1870 	/* make sure the doorbell is not in use. */
1871 	if (mpi_read_db(sc) & MPI_DOORBELL_INUSE)
1872 		return (1);
1873 
1874 	/* clear pending doorbell interrupts */
1875 	if (mpi_read_intr(sc) & MPI_INTR_STATUS_DOORBELL)
1876 		mpi_write_intr(sc, 0);
1877 
1878 	/*
1879 	 * first write the doorbell with the handshake function and the
1880 	 * dword count.
1881 	 */
1882 	mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |
1883 	    MPI_DOORBELL_DWORDS(dwords));
1884 
1885 	/*
1886 	 * the doorbell used bit will be set because a doorbell function has
1887 	 * started. Wait for the interrupt and then ack it.
1888 	 */
1889 	if (mpi_wait_db_int(sc) != 0)
1890 		return (1);
1891 	mpi_write_intr(sc, 0);
1892 
1893 	/* poll for the acknowledgement. */
1894 	if (mpi_wait_db_ack(sc) != 0)
1895 		return (1);
1896 
1897 	/* write the query through the doorbell. */
1898 	for (i = 0; i < dwords; i++) {
1899 		mpi_write_db(sc, htole32(query[i]));
1900 		if (mpi_wait_db_ack(sc) != 0)
1901 			return (1);
1902 	}
1903 
1904 	return (0);
1905 }
1906 
1907 int
1908 mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1909 {
1910 	u_int16_t				*words = (u_int16_t *)dword;
1911 	int					i;
1912 
1913 	for (i = 0; i < 2; i++) {
1914 		if (mpi_wait_db_int(sc) != 0)
1915 			return (1);
1916 		words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK);
1917 		mpi_write_intr(sc, 0);
1918 	}
1919 
1920 	return (0);
1921 }
1922 
1923 int
1924 mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1925 {
1926 	struct mpi_msg_reply			*reply = buf;
1927 	u_int32_t				*dbuf = buf, dummy;
1928 	int					i;
1929 
1930 	/* get the first dword so we can read the length out of the header. */
1931 	if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1932 		return (1);
1933 
1934 	DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1935 	    DEVNAME(sc), dwords, reply->msg_length);
1936 
1937 	/*
1938 	 * the total length, in dwords, is in the message length field of the
1939 	 * reply header.
1940 	 */
1941 	for (i = 1; i < MIN(dwords, reply->msg_length); i++) {
1942 		if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1943 			return (1);
1944 	}
1945 
1946 	/* if there's extra stuff to come off the ioc, discard it */
1947 	while (i++ < reply->msg_length) {
1948 		if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1949 			return (1);
1950 		DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1951 		    "0x%08x\n", DEVNAME(sc), dummy);
1952 	}
1953 
1954 	/* wait for the doorbell used bit to be reset and clear the intr */
1955 	if (mpi_wait_db_int(sc) != 0)
1956 		return (1);
1957 	mpi_write_intr(sc, 0);
1958 
1959 	return (0);
1960 }
1961 
1962 void
1963 mpi_empty_done(struct mpi_ccb *ccb)
1964 {
1965 	/* nothing to do */
1966 }
1967 
1968 int
1969 mpi_iocfacts(struct mpi_softc *sc)
1970 {
1971 	struct mpi_msg_iocfacts_request		ifq;
1972 	struct mpi_msg_iocfacts_reply		ifp;
1973 
1974 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1975 
1976 	memset(&ifq, 0, sizeof(ifq));
1977 	memset(&ifp, 0, sizeof(ifp));
1978 
1979 	ifq.function = MPI_FUNCTION_IOC_FACTS;
1980 	ifq.chain_offset = 0;
1981 	ifq.msg_flags = 0;
1982 	ifq.msg_context = htole32(0xdeadbeef);
1983 
1984 	if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)) != 0) {
1985 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1986 		    DEVNAME(sc));
1987 		return (1);
1988 	}
1989 
1990 	if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)) != 0) {
1991 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1992 		    DEVNAME(sc));
1993 		return (1);
1994 	}
1995 
1996 	DNPRINTF(MPI_D_MISC, "%s:  func: 0x%02x len: %d msgver: %d.%d\n",
1997 	    DEVNAME(sc), ifp.function, ifp.msg_length,
1998 	    ifp.msg_version_maj, ifp.msg_version_min);
1999 	DNPRINTF(MPI_D_MISC, "%s:  msgflags: 0x%02x iocnumber: 0x%02x "
2000 	    "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
2001 	    ifp.ioc_number, ifp.header_version_maj,
2002 	    ifp.header_version_min);
2003 	DNPRINTF(MPI_D_MISC, "%s:  message context: 0x%08x\n", DEVNAME(sc),
2004 	    letoh32(ifp.msg_context));
2005 	DNPRINTF(MPI_D_MISC, "%s:  iocstatus: 0x%04x ioexcept: 0x%04x\n",
2006 	    DEVNAME(sc), letoh16(ifp.ioc_status),
2007 	    letoh16(ifp.ioc_exceptions));
2008 	DNPRINTF(MPI_D_MISC, "%s:  iocloginfo: 0x%08x\n", DEVNAME(sc),
2009 	    letoh32(ifp.ioc_loginfo));
2010 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%02x blocksize: %d whoinit: 0x%02x "
2011 	    "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
2012 	    ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
2013 	DNPRINTF(MPI_D_MISC, "%s:  reqfrsize: %d replyqdepth: %d\n",
2014 	    DEVNAME(sc), letoh16(ifp.request_frame_size),
2015 	    letoh16(ifp.reply_queue_depth));
2016 	DNPRINTF(MPI_D_MISC, "%s:  productid: 0x%04x\n", DEVNAME(sc),
2017 	    letoh16(ifp.product_id));
2018 	DNPRINTF(MPI_D_MISC, "%s:  hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2019 	    letoh32(ifp.current_host_mfa_hi_addr));
2020 	DNPRINTF(MPI_D_MISC, "%s:  event_state: 0x%02x number_of_ports: %d "
2021 	    "global_credits: %d\n",
2022 	    DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2023 	    letoh16(ifp.global_credits));
2024 	DNPRINTF(MPI_D_MISC, "%s:  sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2025 	    letoh32(ifp.current_sense_buffer_hi_addr));
2026 	DNPRINTF(MPI_D_MISC, "%s:  maxbus: %d maxdev: %d replyfrsize: %d\n",
2027 	    DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2028 	    letoh16(ifp.current_reply_frame_size));
2029 	DNPRINTF(MPI_D_MISC, "%s:  fw_image_size: %d\n", DEVNAME(sc),
2030 	    letoh32(ifp.fw_image_size));
2031 	DNPRINTF(MPI_D_MISC, "%s:  ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2032 	    letoh32(ifp.ioc_capabilities));
2033 	DNPRINTF(MPI_D_MISC, "%s:  fw_version: %d.%d fw_version_unit: 0x%02x "
2034 	    "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2035 	    ifp.fw_version_maj, ifp.fw_version_min,
2036 	    ifp.fw_version_unit, ifp.fw_version_dev);
2037 	DNPRINTF(MPI_D_MISC, "%s:  hi_priority_queue_depth: 0x%04x\n",
2038 	    DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2039 	DNPRINTF(MPI_D_MISC, "%s:  host_page_buffer_sge: hdr: 0x%08x "
2040 	    "addr 0x%08lx%08lx\n", DEVNAME(sc),
2041 	    letoh32(ifp.host_page_buffer_sge.sg_hdr),
2042 	    letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2043 	    letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2044 
2045 	sc->sc_fw_maj = ifp.fw_version_maj;
2046 	sc->sc_fw_min = ifp.fw_version_min;
2047 	sc->sc_fw_unit = ifp.fw_version_unit;
2048 	sc->sc_fw_dev = ifp.fw_version_dev;
2049 
2050 	sc->sc_maxcmds = lemtoh16(&ifp.global_credits);
2051 	sc->sc_maxchdepth = ifp.max_chain_depth;
2052 	sc->sc_ioc_number = ifp.ioc_number;
2053 	if (sc->sc_flags & MPI_F_SPI)
2054 		sc->sc_buswidth = 16;
2055 	else
2056 		sc->sc_buswidth =
2057 		    (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2058 	if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
2059 		sc->sc_fw_len = lemtoh32(&ifp.fw_image_size);
2060 
2061 	sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth));
2062 
2063 	/*
2064 	 * you can fit sg elements on the end of the io cmd if they fit in the
2065 	 * request frame size.
2066 	 */
2067 	sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size) * 4) -
2068 	    sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2069 	DNPRINTF(MPI_D_MISC, "%s:   first sgl len: %d\n", DEVNAME(sc),
2070 	    sc->sc_first_sgl_len);
2071 
2072 	sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size) * 4) /
2073 	    sizeof(struct mpi_sge);
2074 	DNPRINTF(MPI_D_MISC, "%s:   chain len: %d\n", DEVNAME(sc),
2075 	    sc->sc_chain_len);
2076 
2077 	/* the sgl tailing the io cmd loses an entry to the chain element. */
2078 	sc->sc_max_sgl_len = MPI_MAX_SGL - 1;
2079 	/* the sgl chains lose an entry for each chain element */
2080 	sc->sc_max_sgl_len -= (MPI_MAX_SGL - sc->sc_first_sgl_len) /
2081 	    sc->sc_chain_len;
2082 	DNPRINTF(MPI_D_MISC, "%s:   max sgl len: %d\n", DEVNAME(sc),
2083 	    sc->sc_max_sgl_len);
2084 
2085 	/* XXX we're ignoring the max chain depth */
2086 
2087 	return (0);
2088 }
2089 
2090 int
2091 mpi_iocinit(struct mpi_softc *sc)
2092 {
2093 	struct mpi_msg_iocinit_request		iiq;
2094 	struct mpi_msg_iocinit_reply		iip;
2095 	u_int32_t				hi_addr;
2096 
2097 	DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2098 
2099 	memset(&iiq, 0, sizeof(iiq));
2100 	memset(&iip, 0, sizeof(iip));
2101 
2102 	iiq.function = MPI_FUNCTION_IOC_INIT;
2103 	iiq.whoinit = MPI_WHOINIT_HOST_DRIVER;
2104 
2105 	iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2106 	iiq.max_buses = 1;
2107 
2108 	iiq.msg_context = htole32(0xd00fd00f);
2109 
2110 	iiq.reply_frame_size = htole16(MPI_REPLY_SIZE);
2111 
2112 	hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests) >> 32);
2113 	htolem32(&iiq.host_mfa_hi_addr, hi_addr);
2114 	htolem32(&iiq.sense_buffer_hi_addr, hi_addr);
2115 
2116 	iiq.msg_version_maj = 0x01;
2117 	iiq.msg_version_min = 0x02;
2118 
2119 	iiq.hdr_version_unit = 0x0d;
2120 	iiq.hdr_version_dev = 0x00;
2121 
2122 	if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)) != 0) {
2123 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2124 		    DEVNAME(sc));
2125 		return (1);
2126 	}
2127 
2128 	if (mpi_handshake_recv(sc, &iip, dwordsof(iip)) != 0) {
2129 		DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2130 		    DEVNAME(sc));
2131 		return (1);
2132 	}
2133 
2134 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d "
2135 	    "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2136 	    iip.msg_length, iip.whoinit);
2137 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x max_buses: %d "
2138 	    "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2139 	    iip.max_buses, iip.max_devices, iip.flags);
2140 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2141 	    letoh32(iip.msg_context));
2142 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2143 	    letoh16(iip.ioc_status));
2144 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2145 	    letoh32(iip.ioc_loginfo));
2146 
2147 	return (0);
2148 }
2149 
2150 int
2151 mpi_portfacts(struct mpi_softc *sc)
2152 {
2153 	struct mpi_ccb				*ccb;
2154 	struct mpi_msg_portfacts_request	*pfq;
2155 	volatile struct mpi_msg_portfacts_reply	*pfp;
2156 	int					rv = 1;
2157 
2158 	DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2159 
2160 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2161 	if (ccb == NULL) {
2162 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2163 		    DEVNAME(sc));
2164 		return (rv);
2165 	}
2166 
2167 	ccb->ccb_done = mpi_empty_done;
2168 	pfq = ccb->ccb_cmd;
2169 
2170 	pfq->function = MPI_FUNCTION_PORT_FACTS;
2171 	pfq->chain_offset = 0;
2172 	pfq->msg_flags = 0;
2173 	pfq->port_number = 0;
2174 
2175 	if (mpi_poll(sc, ccb, 50000) != 0) {
2176 		DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2177 		goto err;
2178 	}
2179 
2180 	if (ccb->ccb_rcb == NULL) {
2181 		DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2182 		    DEVNAME(sc));
2183 		goto err;
2184 	}
2185 	pfp = ccb->ccb_rcb->rcb_reply;
2186 
2187 	DNPRINTF(MPI_D_MISC, "%s:  function: 0x%02x msg_length: %d\n",
2188 	    DEVNAME(sc), pfp->function, pfp->msg_length);
2189 	DNPRINTF(MPI_D_MISC, "%s:  msg_flags: 0x%02x port_number: %d\n",
2190 	    DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2191 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2192 	    letoh32(pfp->msg_context));
2193 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2194 	    letoh16(pfp->ioc_status));
2195 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2196 	    letoh32(pfp->ioc_loginfo));
2197 	DNPRINTF(MPI_D_MISC, "%s:  max_devices: %d port_type: 0x%02x\n",
2198 	    DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2199 	DNPRINTF(MPI_D_MISC, "%s:  protocol_flags: 0x%04x port_scsi_id: %d\n",
2200 	    DEVNAME(sc), letoh16(pfp->protocol_flags),
2201 	    letoh16(pfp->port_scsi_id));
2202 	DNPRINTF(MPI_D_MISC, "%s:  max_persistent_ids: %d "
2203 	    "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2204 	    letoh16(pfp->max_persistent_ids),
2205 	    letoh16(pfp->max_posted_cmd_buffers));
2206 	DNPRINTF(MPI_D_MISC, "%s:  max_lan_buckets: %d\n", DEVNAME(sc),
2207 	    letoh16(pfp->max_lan_buckets));
2208 
2209 	sc->sc_porttype = pfp->port_type;
2210 	if (sc->sc_target == -1)
2211 		sc->sc_target = lemtoh16(&pfp->port_scsi_id);
2212 
2213 	mpi_push_reply(sc, ccb->ccb_rcb);
2214 	rv = 0;
2215 err:
2216 	scsi_io_put(&sc->sc_iopool, ccb);
2217 
2218 	return (rv);
2219 }
2220 
2221 int
2222 mpi_cfg_coalescing(struct mpi_softc *sc)
2223 {
2224 	struct mpi_cfg_hdr		hdr;
2225 	struct mpi_cfg_ioc_pg1		pg;
2226 	u_int32_t			flags;
2227 
2228 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr) != 0) {
2229 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2230 		    DEVNAME(sc));
2231 		return (1);
2232 	}
2233 
2234 	if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg)) != 0) {
2235 		DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2236 		    DEVNAME(sc));
2237 		return (1);
2238 	}
2239 
2240 	DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2241 	DNPRINTF(MPI_D_MISC, "%s:  flags: 0x%08x\n", DEVNAME(sc),
2242 	    letoh32(pg.flags));
2243 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_timeout: %d\n", DEVNAME(sc),
2244 	    letoh32(pg.coalescing_timeout));
2245 	DNPRINTF(MPI_D_MISC, "%s:  coalescing_depth: %d pci_slot_num: %d\n",
2246 	    DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2247 
2248 	flags = lemtoh32(&pg.flags);
2249 	if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING))
2250 		return (0);
2251 
2252 	CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING));
2253 	if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg)) != 0) {
2254 		DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2255 		    DEVNAME(sc));
2256 		return (1);
2257 	}
2258 
2259 	return (0);
2260 }
2261 
2262 int
2263 mpi_eventnotify(struct mpi_softc *sc)
2264 {
2265 	struct mpi_ccb				*ccb;
2266 	struct mpi_msg_event_request		*enq;
2267 
2268 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2269 	if (ccb == NULL) {
2270 		DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2271 		    DEVNAME(sc));
2272 		return (1);
2273 	}
2274 
2275 	sc->sc_evt_ccb = ccb;
2276 	SIMPLEQ_INIT(&sc->sc_evt_ack_queue);
2277 	mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO);
2278 	scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2279 	    mpi_eventack, sc);
2280 
2281 	ccb->ccb_done = mpi_eventnotify_done;
2282 	enq = ccb->ccb_cmd;
2283 
2284 	enq->function = MPI_FUNCTION_EVENT_NOTIFICATION;
2285 	enq->chain_offset = 0;
2286 	enq->event_switch = MPI_EVENT_SWITCH_ON;
2287 
2288 	mpi_start(sc, ccb);
2289 	return (0);
2290 }
2291 
2292 void
2293 mpi_eventnotify_done(struct mpi_ccb *ccb)
2294 {
2295 	struct mpi_softc			*sc = ccb->ccb_sc;
2296 	struct mpi_rcb				*rcb = ccb->ccb_rcb;
2297 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2298 
2299 	DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2300 
2301 	DNPRINTF(MPI_D_EVT, "%s:  function: 0x%02x msg_length: %d "
2302 	    "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2303 	    letoh16(enp->data_length));
2304 	DNPRINTF(MPI_D_EVT, "%s:  ack_required: %d msg_flags 0x%02x\n",
2305 	    DEVNAME(sc), enp->ack_required, enp->msg_flags);
2306 	DNPRINTF(MPI_D_EVT, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2307 	    letoh32(enp->msg_context));
2308 	DNPRINTF(MPI_D_EVT, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2309 	    letoh16(enp->ioc_status));
2310 	DNPRINTF(MPI_D_EVT, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2311 	    letoh32(enp->ioc_loginfo));
2312 	DNPRINTF(MPI_D_EVT, "%s:  event: 0x%08x\n", DEVNAME(sc),
2313 	    letoh32(enp->event));
2314 	DNPRINTF(MPI_D_EVT, "%s:  event_context: 0x%08x\n", DEVNAME(sc),
2315 	    letoh32(enp->event_context));
2316 
2317 	switch (lemtoh32(&enp->event)) {
2318 	/* ignore these */
2319 	case MPI_EVENT_EVENT_CHANGE:
2320 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
2321 		break;
2322 
2323 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2324 		if (sc->sc_scsibus == NULL)
2325 			break;
2326 
2327 		if (mpi_evt_sas(sc, rcb) != 0) {
2328 			/* reply is freed later on */
2329 			return;
2330 		}
2331 		break;
2332 
2333 	case MPI_EVENT_RESCAN:
2334 		if (sc->sc_scsibus != NULL &&
2335 		    sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC)
2336 			task_add(systq, &sc->sc_evt_rescan);
2337 		break;
2338 
2339 	default:
2340 		DNPRINTF(MPI_D_EVT, "%s:  unhandled event 0x%02x\n",
2341 		    DEVNAME(sc), lemtoh32(&enp->event));
2342 		break;
2343 	}
2344 
2345 	mpi_eventnotify_free(sc, rcb);
2346 }
2347 
2348 void
2349 mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2350 {
2351 	struct mpi_msg_event_reply		*enp = rcb->rcb_reply;
2352 
2353 	if (enp->ack_required) {
2354 		mtx_enter(&sc->sc_evt_ack_mtx);
2355 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link);
2356 		mtx_leave(&sc->sc_evt_ack_mtx);
2357 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2358 	} else
2359 		mpi_push_reply(sc, rcb);
2360 }
2361 
2362 int
2363 mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2364 {
2365 	struct mpi_evt_sas_change		*ch;
2366 	u_int8_t				*data;
2367 
2368 	data = rcb->rcb_reply;
2369 	data += sizeof(struct mpi_msg_event_reply);
2370 	ch = (struct mpi_evt_sas_change *)data;
2371 
2372 	if (ch->bus != 0)
2373 		return (0);
2374 
2375 	switch (ch->reason) {
2376 	case MPI_EVT_SASCH_REASON_ADDED:
2377 	case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED:
2378 		KERNEL_LOCK();
2379 		if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2380 			printf("%s: unable to request attach of %d\n",
2381 			    DEVNAME(sc), ch->target);
2382 		}
2383 		KERNEL_UNLOCK();
2384 		break;
2385 
2386 	case MPI_EVT_SASCH_REASON_NOT_RESPONDING:
2387 		KERNEL_LOCK();
2388 		scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE);
2389 		KERNEL_UNLOCK();
2390 
2391 		mtx_enter(&sc->sc_evt_scan_mtx);
2392 		SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link);
2393 		mtx_leave(&sc->sc_evt_scan_mtx);
2394 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2395 
2396 		/* we'll handle event ack later on */
2397 		return (1);
2398 
2399 	case MPI_EVT_SASCH_REASON_SMART_DATA:
2400 	case MPI_EVT_SASCH_REASON_UNSUPPORTED:
2401 	case MPI_EVT_SASCH_REASON_INTERNAL_RESET:
2402 		break;
2403 	default:
2404 		printf("%s: unknown reason for SAS device status change: "
2405 		    "0x%02x\n", DEVNAME(sc), ch->reason);
2406 		break;
2407 	}
2408 
2409 	return (0);
2410 }
2411 
2412 void
2413 mpi_evt_sas_detach(void *cookie, void *io)
2414 {
2415 	struct mpi_softc			*sc = cookie;
2416 	struct mpi_ccb				*ccb = io;
2417 	struct mpi_rcb				*rcb, *next;
2418 	struct mpi_msg_event_reply		*enp;
2419 	struct mpi_evt_sas_change		*ch;
2420 	struct mpi_msg_scsi_task_request	*str;
2421 
2422 	DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2423 
2424 	mtx_enter(&sc->sc_evt_scan_mtx);
2425 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue);
2426 	if (rcb != NULL) {
2427 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2428 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link);
2429 	}
2430 	mtx_leave(&sc->sc_evt_scan_mtx);
2431 
2432 	if (rcb == NULL) {
2433 		scsi_io_put(&sc->sc_iopool, ccb);
2434 		return;
2435 	}
2436 
2437 	enp = rcb->rcb_reply;
2438 	ch = (struct mpi_evt_sas_change *)(enp + 1);
2439 
2440 	ccb->ccb_done = mpi_evt_sas_detach_done;
2441 	str = ccb->ccb_cmd;
2442 
2443 	str->target_id = ch->target;
2444 	str->bus = 0;
2445 	str->function = MPI_FUNCTION_SCSI_TASK_MGMT;
2446 
2447 	str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET;
2448 
2449 	mpi_eventnotify_free(sc, rcb);
2450 
2451 	mpi_start(sc, ccb);
2452 
2453 	if (next != NULL)
2454 		scsi_ioh_add(&sc->sc_evt_scan_handler);
2455 }
2456 
2457 void
2458 mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2459 {
2460 	struct mpi_softc			*sc = ccb->ccb_sc;
2461 	struct mpi_msg_scsi_task_reply		*r = ccb->ccb_rcb->rcb_reply;
2462 
2463 	KERNEL_LOCK();
2464 	if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2465 	    DETACH_FORCE) != 0) {
2466 		printf("%s: unable to request detach of %d\n",
2467 		    DEVNAME(sc), r->target_id);
2468 	}
2469 	KERNEL_UNLOCK();
2470 
2471 	mpi_push_reply(sc, ccb->ccb_rcb);
2472 	scsi_io_put(&sc->sc_iopool, ccb);
2473 }
2474 
2475 void
2476 mpi_fc_rescan(void *xsc)
2477 {
2478 	struct mpi_softc			*sc = xsc;
2479 	struct mpi_cfg_hdr			hdr;
2480 	struct mpi_cfg_fc_device_pg0		pg;
2481 	struct scsi_link			*link;
2482 	u_int8_t				devmap[256 / NBBY];
2483 	u_int32_t				id = 0xffffff;
2484 	int					i;
2485 
2486 	memset(devmap, 0, sizeof(devmap));
2487 
2488 	do {
2489 		if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV, 0,
2490 		    id, 0, &hdr) != 0) {
2491 			printf("%s: header get for rescan of 0x%08x failed\n",
2492 			    DEVNAME(sc), id);
2493 			return;
2494 		}
2495 
2496 		memset(&pg, 0, sizeof(pg));
2497 		if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2498 			break;
2499 
2500 		if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID) &&
2501 		    pg.current_bus == 0)
2502 			setbit(devmap, pg.current_target_id);
2503 
2504 		id = lemtoh32(&pg.port_id);
2505 	} while (id <= 0xff0000);
2506 
2507 	for (i = 0; i < sc->sc_buswidth; i++) {
2508 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2509 
2510 		if (isset(devmap, i)) {
2511 			if (link == NULL)
2512 				scsi_probe_target(sc->sc_scsibus, i);
2513 		} else {
2514 			if (link != NULL) {
2515 				scsi_activate(sc->sc_scsibus, i, -1,
2516 				    DVACT_DEACTIVATE);
2517 				scsi_detach_target(sc->sc_scsibus, i,
2518 				    DETACH_FORCE);
2519 			}
2520 		}
2521 	}
2522 }
2523 
2524 void
2525 mpi_eventack(void *cookie, void *io)
2526 {
2527 	struct mpi_softc			*sc = cookie;
2528 	struct mpi_ccb				*ccb = io;
2529 	struct mpi_rcb				*rcb, *next;
2530 	struct mpi_msg_event_reply		*enp;
2531 	struct mpi_msg_eventack_request		*eaq;
2532 
2533 	DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2534 
2535 	mtx_enter(&sc->sc_evt_ack_mtx);
2536 	rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue);
2537 	if (rcb != NULL) {
2538 		next = SIMPLEQ_NEXT(rcb, rcb_link);
2539 		SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link);
2540 	}
2541 	mtx_leave(&sc->sc_evt_ack_mtx);
2542 
2543 	if (rcb == NULL) {
2544 		scsi_io_put(&sc->sc_iopool, ccb);
2545 		return;
2546 	}
2547 
2548 	enp = rcb->rcb_reply;
2549 
2550 	ccb->ccb_done = mpi_eventack_done;
2551 	eaq = ccb->ccb_cmd;
2552 
2553 	eaq->function = MPI_FUNCTION_EVENT_ACK;
2554 
2555 	eaq->event = enp->event;
2556 	eaq->event_context = enp->event_context;
2557 
2558 	mpi_push_reply(sc, rcb);
2559 	mpi_start(sc, ccb);
2560 
2561 	if (next != NULL)
2562 		scsi_ioh_add(&sc->sc_evt_ack_handler);
2563 }
2564 
2565 void
2566 mpi_eventack_done(struct mpi_ccb *ccb)
2567 {
2568 	struct mpi_softc			*sc = ccb->ccb_sc;
2569 
2570 	DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2571 
2572 	mpi_push_reply(sc, ccb->ccb_rcb);
2573 	scsi_io_put(&sc->sc_iopool, ccb);
2574 }
2575 
2576 int
2577 mpi_portenable(struct mpi_softc *sc)
2578 {
2579 	struct mpi_ccb				*ccb;
2580 	struct mpi_msg_portenable_request	*peq;
2581 	int					rv = 0;
2582 
2583 	DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2584 
2585 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2586 	if (ccb == NULL) {
2587 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2588 		    DEVNAME(sc));
2589 		return (1);
2590 	}
2591 
2592 	ccb->ccb_done = mpi_empty_done;
2593 	peq = ccb->ccb_cmd;
2594 
2595 	peq->function = MPI_FUNCTION_PORT_ENABLE;
2596 	peq->port_number = 0;
2597 
2598 	if (mpi_poll(sc, ccb, 50000) != 0) {
2599 		DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2600 		return (1);
2601 	}
2602 
2603 	if (ccb->ccb_rcb == NULL) {
2604 		DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2605 		    DEVNAME(sc));
2606 		rv = 1;
2607 	} else
2608 		mpi_push_reply(sc, ccb->ccb_rcb);
2609 
2610 	scsi_io_put(&sc->sc_iopool, ccb);
2611 
2612 	return (rv);
2613 }
2614 
2615 int
2616 mpi_fwupload(struct mpi_softc *sc)
2617 {
2618 	struct mpi_ccb				*ccb;
2619 	struct {
2620 		struct mpi_msg_fwupload_request		req;
2621 		struct mpi_sge				sge;
2622 	} __packed				*bundle;
2623 	struct mpi_msg_fwupload_reply		*upp;
2624 	int					rv = 0;
2625 
2626 	if (sc->sc_fw_len == 0)
2627 		return (0);
2628 
2629 	DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2630 
2631 	sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2632 	if (sc->sc_fw == NULL) {
2633 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2634 		    DEVNAME(sc), sc->sc_fw_len);
2635 		return (1);
2636 	}
2637 
2638 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
2639 	if (ccb == NULL) {
2640 		DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2641 		    DEVNAME(sc));
2642 		goto err;
2643 	}
2644 
2645 	ccb->ccb_done = mpi_empty_done;
2646 	bundle = ccb->ccb_cmd;
2647 
2648 	bundle->req.function = MPI_FUNCTION_FW_UPLOAD;
2649 
2650 	bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW;
2651 
2652 	bundle->req.tce.details_length = 12;
2653 	htolem32(&bundle->req.tce.image_size, sc->sc_fw_len);
2654 
2655 	htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2656 	    MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |
2657 	    MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len);
2658 	mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw));
2659 
2660 	if (mpi_poll(sc, ccb, 50000) != 0) {
2661 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2662 		goto err;
2663 	}
2664 
2665 	if (ccb->ccb_rcb == NULL)
2666 		panic("%s: unable to do fw upload", DEVNAME(sc));
2667 	upp = ccb->ccb_rcb->rcb_reply;
2668 
2669 	if (lemtoh16(&upp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2670 		rv = 1;
2671 
2672 	mpi_push_reply(sc, ccb->ccb_rcb);
2673 	scsi_io_put(&sc->sc_iopool, ccb);
2674 
2675 	return (rv);
2676 
2677 err:
2678 	mpi_dmamem_free(sc, sc->sc_fw);
2679 	return (1);
2680 }
2681 
2682 int
2683 mpi_manufacturing(struct mpi_softc *sc)
2684 {
2685 	char board_name[33];
2686 	struct mpi_cfg_hdr hdr;
2687 	struct mpi_cfg_manufacturing_pg0 *pg;
2688 	size_t pagelen;
2689 	int rv = 1;
2690 
2691 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING,
2692 	    0, 0, &hdr) != 0)
2693 		return (1);
2694 
2695 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2696 	if (pagelen < sizeof(*pg))
2697 		return (1);
2698 
2699 	pg = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2700 	if (pg == NULL)
2701 		return (1);
2702 
2703 	if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen) != 0)
2704 		goto out;
2705 
2706 	scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name));
2707 
2708 	printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc), board_name,
2709 	    sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev);
2710 
2711 	rv = 0;
2712 
2713 out:
2714 	free(pg, M_TEMP, pagelen);
2715 	return (rv);
2716 }
2717 
2718 void
2719 mpi_get_raid(struct mpi_softc *sc)
2720 {
2721 	struct mpi_cfg_hdr		hdr;
2722 	struct mpi_cfg_ioc_pg2		*vol_page;
2723 	size_t				pagelen;
2724 	u_int32_t			capabilities;
2725 
2726 	DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2727 
2728 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr) != 0) {
2729 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2730 		    "for IOC page 2\n", DEVNAME(sc));
2731 		return;
2732 	}
2733 
2734 	pagelen = hdr.page_length * 4; /* dwords to bytes */
2735 	vol_page = malloc(pagelen, M_TEMP, M_WAITOK|M_CANFAIL);
2736 	if (vol_page == NULL) {
2737 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2738 		    "space for ioc config page 2\n", DEVNAME(sc));
2739 		return;
2740 	}
2741 
2742 	if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen) != 0) {
2743 		DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2744 		    "page 2\n", DEVNAME(sc));
2745 		goto out;
2746 	}
2747 
2748 	capabilities = lemtoh32(&vol_page->capabilities);
2749 
2750 	DNPRINTF(MPI_D_RAID, "%s:  capabilities: 0x08%x\n", DEVNAME(sc),
2751 	    letoh32(vol_page->capabilities));
2752 	DNPRINTF(MPI_D_RAID, "%s:  active_vols: %d max_vols: %d "
2753 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2754 	    vol_page->active_vols, vol_page->max_vols,
2755 	    vol_page->active_physdisks, vol_page->max_physdisks);
2756 
2757 	/* don't walk list if there are no RAID capability */
2758 	if (capabilities == 0xdeadbeef) {
2759 		printf("%s: deadbeef in raid configuration\n", DEVNAME(sc));
2760 		goto out;
2761 	}
2762 
2763 	if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID))
2764 		sc->sc_flags |= MPI_F_RAID;
2765 
2766 out:
2767 	free(vol_page, M_TEMP, pagelen);
2768 }
2769 
2770 int
2771 mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2772     u_int32_t address, int flags, void *p)
2773 {
2774 	struct mpi_ccb				*ccb;
2775 	struct mpi_msg_config_request		*cq;
2776 	struct mpi_msg_config_reply		*cp;
2777 	struct mpi_cfg_hdr			*hdr = p;
2778 	struct mpi_ecfg_hdr			*ehdr = p;
2779 	int					etype = 0;
2780 	int					rv = 0;
2781 
2782 	DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2783 	    "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2784 	    address, flags, MPI_PG_FMT);
2785 
2786 	ccb = scsi_io_get(&sc->sc_iopool,
2787 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2788 	if (ccb == NULL) {
2789 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2790 		    DEVNAME(sc));
2791 		return (1);
2792 	}
2793 
2794 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2795 		etype = type;
2796 		type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED;
2797 	}
2798 
2799 	cq = ccb->ccb_cmd;
2800 
2801 	cq->function = MPI_FUNCTION_CONFIG;
2802 
2803 	cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER;
2804 
2805 	cq->config_header.page_number = number;
2806 	cq->config_header.page_type = type;
2807 	cq->ext_page_type = etype;
2808 	htolem32(&cq->page_address, address);
2809 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2810 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL);
2811 
2812 	ccb->ccb_done = mpi_empty_done;
2813 	if (ISSET(flags, MPI_PG_POLL)) {
2814 		if (mpi_poll(sc, ccb, 50000) != 0) {
2815 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2816 			    DEVNAME(sc));
2817 			return (1);
2818 		}
2819 	} else
2820 		mpi_wait(sc, ccb);
2821 
2822 	if (ccb->ccb_rcb == NULL)
2823 		panic("%s: unable to fetch config header", DEVNAME(sc));
2824 	cp = ccb->ccb_rcb->rcb_reply;
2825 
2826 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2827 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2828 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2829 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2830 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2831 	    cp->msg_flags);
2832 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2833 	    letoh32(cp->msg_context));
2834 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2835 	    letoh16(cp->ioc_status));
2836 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2837 	    letoh32(cp->ioc_loginfo));
2838 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2839 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2840 	    cp->config_header.page_version,
2841 	    cp->config_header.page_length,
2842 	    cp->config_header.page_number,
2843 	    cp->config_header.page_type);
2844 
2845 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2846 		rv = 1;
2847 	else if (ISSET(flags, MPI_PG_EXTENDED)) {
2848 		memset(ehdr, 0, sizeof(*ehdr));
2849 		ehdr->page_version = cp->config_header.page_version;
2850 		ehdr->page_number = cp->config_header.page_number;
2851 		ehdr->page_type = cp->config_header.page_type;
2852 		ehdr->ext_page_length = cp->ext_page_length;
2853 		ehdr->ext_page_type = cp->ext_page_type;
2854 	} else
2855 		*hdr = cp->config_header;
2856 
2857 	mpi_push_reply(sc, ccb->ccb_rcb);
2858 	scsi_io_put(&sc->sc_iopool, ccb);
2859 
2860 	return (rv);
2861 }
2862 
2863 int
2864 mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2865     void *p, int read, void *page, size_t len)
2866 {
2867 	struct mpi_ccb				*ccb;
2868 	struct mpi_msg_config_request		*cq;
2869 	struct mpi_msg_config_reply		*cp;
2870 	struct mpi_cfg_hdr			*hdr = p;
2871 	struct mpi_ecfg_hdr			*ehdr = p;
2872 	char					*kva;
2873 	int					page_length;
2874 	int					rv = 0;
2875 
2876 	DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2877 	    DEVNAME(sc), address, read, hdr->page_type);
2878 
2879 	page_length = ISSET(flags, MPI_PG_EXTENDED) ?
2880 	    lemtoh16(&ehdr->ext_page_length) : hdr->page_length;
2881 
2882 	if (len > MPI_REQUEST_SIZE - sizeof(struct mpi_msg_config_request) ||
2883 	    len < page_length * 4)
2884 		return (1);
2885 
2886 	ccb = scsi_io_get(&sc->sc_iopool,
2887 	    ISSET(flags, MPI_PG_POLL) ? SCSI_NOSLEEP : 0);
2888 	if (ccb == NULL) {
2889 		DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2890 		return (1);
2891 	}
2892 
2893 	cq = ccb->ccb_cmd;
2894 
2895 	cq->function = MPI_FUNCTION_CONFIG;
2896 
2897 	cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT :
2898 	    MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT);
2899 
2900 	if (ISSET(flags, MPI_PG_EXTENDED)) {
2901 		cq->config_header.page_version = ehdr->page_version;
2902 		cq->config_header.page_number = ehdr->page_number;
2903 		cq->config_header.page_type = ehdr->page_type;
2904 		cq->ext_page_len = ehdr->ext_page_length;
2905 		cq->ext_page_type = ehdr->ext_page_type;
2906 	} else
2907 		cq->config_header = *hdr;
2908 	cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK;
2909 	htolem32(&cq->page_address, address);
2910 	htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |
2911 	    MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |
2912 	    (page_length * 4) |
2913 	    (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT));
2914 
2915 	/* bounce the page via the request space to avoid more bus_dma games */
2916 	mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2917 	    sizeof(struct mpi_msg_config_request));
2918 
2919 	kva = ccb->ccb_cmd;
2920 	kva += sizeof(struct mpi_msg_config_request);
2921 	if (!read)
2922 		memcpy(kva, page, len);
2923 
2924 	ccb->ccb_done = mpi_empty_done;
2925 	if (ISSET(flags, MPI_PG_POLL)) {
2926 		if (mpi_poll(sc, ccb, 50000) != 0) {
2927 			DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2928 			    DEVNAME(sc));
2929 			return (1);
2930 		}
2931 	} else
2932 		mpi_wait(sc, ccb);
2933 
2934 	if (ccb->ccb_rcb == NULL) {
2935 		scsi_io_put(&sc->sc_iopool, ccb);
2936 		return (1);
2937 	}
2938 	cp = ccb->ccb_rcb->rcb_reply;
2939 
2940 	DNPRINTF(MPI_D_MISC, "%s:  action: 0x%02x msg_length: %d function: "
2941 	    "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2942 	DNPRINTF(MPI_D_MISC, "%s:  ext_page_length: %d ext_page_type: 0x%02x "
2943 	    "msg_flags: 0x%02x\n", DEVNAME(sc),
2944 	    letoh16(cp->ext_page_length), cp->ext_page_type,
2945 	    cp->msg_flags);
2946 	DNPRINTF(MPI_D_MISC, "%s:  msg_context: 0x%08x\n", DEVNAME(sc),
2947 	    letoh32(cp->msg_context));
2948 	DNPRINTF(MPI_D_MISC, "%s:  ioc_status: 0x%04x\n", DEVNAME(sc),
2949 	    letoh16(cp->ioc_status));
2950 	DNPRINTF(MPI_D_MISC, "%s:  ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2951 	    letoh32(cp->ioc_loginfo));
2952 	DNPRINTF(MPI_D_MISC, "%s:  page_version: 0x%02x page_length: %d "
2953 	    "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2954 	    cp->config_header.page_version,
2955 	    cp->config_header.page_length,
2956 	    cp->config_header.page_number,
2957 	    cp->config_header.page_type);
2958 
2959 	if (lemtoh16(&cp->ioc_status) != MPI_IOCSTATUS_SUCCESS)
2960 		rv = 1;
2961 	else if (read)
2962 		memcpy(page, kva, len);
2963 
2964 	mpi_push_reply(sc, ccb->ccb_rcb);
2965 	scsi_io_put(&sc->sc_iopool, ccb);
2966 
2967 	return (rv);
2968 }
2969 
2970 int
2971 mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2972 {
2973 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
2974 
2975 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2976 
2977 	switch (cmd) {
2978 	case DIOCGCACHE:
2979 	case DIOCSCACHE:
2980 		if (ISSET(link->flags, SDEV_VIRTUAL)) {
2981 			return (mpi_ioctl_cache(link, cmd,
2982 			    (struct dk_cache *)addr));
2983 		}
2984 		break;
2985 
2986 	default:
2987 		if (sc->sc_ioctl)
2988 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
2989 
2990 		break;
2991 	}
2992 
2993 	return (ENOTTY);
2994 }
2995 
2996 int
2997 mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2998 {
2999 	struct mpi_softc	*sc = (struct mpi_softc *)link->adapter_softc;
3000 	struct mpi_ccb		*ccb;
3001 	int			len, rv;
3002 	struct mpi_cfg_hdr	hdr;
3003 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3004 	int			enabled;
3005 	struct mpi_msg_raid_action_request *req;
3006 	struct mpi_msg_raid_action_reply *rep;
3007 	struct mpi_raid_settings settings;
3008 
3009 	rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3010 	    link->target, MPI_PG_POLL, &hdr);
3011 	if (rv != 0)
3012 		return (EIO);
3013 
3014 	len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
3015 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3016 	rpg0 = malloc(len, M_TEMP, M_NOWAIT);
3017 	if (rpg0 == NULL)
3018 		return (ENOMEM);
3019 
3020 	if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL, &hdr, 1,
3021 	    rpg0, len) != 0) {
3022 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3023 		    DEVNAME(sc));
3024 		rv = EIO;
3025 		goto done;
3026 	}
3027 
3028 	enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings),
3029 	    MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN) ? 1 : 0;
3030 
3031 	if (cmd == DIOCGCACHE) {
3032 		dc->wrcache = enabled;
3033 		dc->rdcache = 0;
3034 		goto done;
3035 	} /* else DIOCSCACHE */
3036 
3037 	if (dc->rdcache) {
3038 		rv = EOPNOTSUPP;
3039 		goto done;
3040 	}
3041 
3042 	if (((dc->wrcache) ? 1 : 0) == enabled)
3043 		goto done;
3044 
3045 	settings = rpg0->settings;
3046 	if (dc->wrcache) {
3047 		SET(settings.volume_settings,
3048 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3049 	} else {
3050 		CLR(settings.volume_settings,
3051 		    htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN));
3052 	}
3053 
3054 	ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP);
3055 	if (ccb == NULL) {
3056 		rv = ENOMEM;
3057 		goto done;
3058 	}
3059 
3060 	req = ccb->ccb_cmd;
3061 	req->function = MPI_FUNCTION_RAID_ACTION;
3062 	req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS;
3063 	req->vol_id = rpg0->volume_id;
3064 	req->vol_bus = rpg0->volume_bus;
3065 
3066 	memcpy(&req->data_word, &settings, sizeof(req->data_word));
3067 	ccb->ccb_done = mpi_empty_done;
3068 	if (mpi_poll(sc, ccb, 50000) != 0) {
3069 		rv = EIO;
3070 		goto done;
3071 	}
3072 
3073 	rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3074 	if (rep == NULL)
3075 		panic("%s: raid volume settings change failed", DEVNAME(sc));
3076 
3077 	switch (lemtoh16(&rep->action_status)) {
3078 	case MPI_RAID_ACTION_STATUS_OK:
3079 		rv = 0;
3080 		break;
3081 	default:
3082 		rv = EIO;
3083 		break;
3084 	}
3085 
3086 	mpi_push_reply(sc, ccb->ccb_rcb);
3087 	scsi_io_put(&sc->sc_iopool, ccb);
3088 
3089 done:
3090 	free(rpg0, M_TEMP, len);
3091 	return (rv);
3092 }
3093 
3094 #if NBIO > 0
3095 int
3096 mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3097 {
3098 	int			len, rv = EINVAL;
3099 	u_int32_t		address;
3100 	struct mpi_cfg_hdr	hdr;
3101 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3102 
3103 	/* get IOC page 2 */
3104 	if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3105 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3106 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3107 		    "fetch IOC page 2\n", DEVNAME(sc));
3108 		goto done;
3109 	}
3110 
3111 	/* XXX return something else than EINVAL to indicate within hs range */
3112 	if (id > sc->sc_vol_page->active_vols) {
3113 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3114 		    "id: %d\n", DEVNAME(sc), id);
3115 		goto done;
3116 	}
3117 
3118 	/* replace current buffer with new one */
3119 	len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3120 	    sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3121 	rpg0 = malloc(len, M_DEVBUF, M_WAITOK | M_CANFAIL);
3122 	if (rpg0 == NULL) {
3123 		printf("%s: can't get memory for RAID page 0, "
3124 		    "bio disabled\n", DEVNAME(sc));
3125 		goto done;
3126 	}
3127 	if (sc->sc_rpg0)
3128 		free(sc->sc_rpg0, M_DEVBUF, 0);
3129 	sc->sc_rpg0 = rpg0;
3130 
3131 	/* get raid vol page 0 */
3132 	address = sc->sc_vol_list[id].vol_id |
3133 	    (sc->sc_vol_list[id].vol_bus << 8);
3134 	if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL, 0,
3135 	    address, 0, &hdr) != 0)
3136 		goto done;
3137 	if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3138 		DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3139 		    DEVNAME(sc));
3140 		goto done;
3141 	}
3142 
3143 	rv = 0;
3144 done:
3145 	return (rv);
3146 }
3147 
3148 int
3149 mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3150 {
3151 	struct mpi_softc	*sc = (struct mpi_softc *)dev;
3152 	int error = 0;
3153 
3154 	DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3155 
3156 	/* make sure we have bio enabled */
3157 	if (sc->sc_ioctl != mpi_ioctl)
3158 		return (EINVAL);
3159 
3160 	rw_enter_write(&sc->sc_lock);
3161 
3162 	switch (cmd) {
3163 	case BIOCINQ:
3164 		DNPRINTF(MPI_D_IOCTL, "inq\n");
3165 		error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3166 		break;
3167 
3168 	case BIOCVOL:
3169 		DNPRINTF(MPI_D_IOCTL, "vol\n");
3170 		error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3171 		break;
3172 
3173 	case BIOCDISK:
3174 		DNPRINTF(MPI_D_IOCTL, "disk\n");
3175 		error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3176 		break;
3177 
3178 	case BIOCALARM:
3179 		DNPRINTF(MPI_D_IOCTL, "alarm\n");
3180 		break;
3181 
3182 	case BIOCBLINK:
3183 		DNPRINTF(MPI_D_IOCTL, "blink\n");
3184 		break;
3185 
3186 	case BIOCSETSTATE:
3187 		DNPRINTF(MPI_D_IOCTL, "setstate\n");
3188 		error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3189 		break;
3190 
3191 	default:
3192 		DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3193 		error = ENOTTY;
3194 	}
3195 
3196 	rw_exit_write(&sc->sc_lock);
3197 
3198 	return (error);
3199 }
3200 
3201 int
3202 mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3203 {
3204 	if (!(sc->sc_flags & MPI_F_RAID)) {
3205 		bi->bi_novol = 0;
3206 		bi->bi_nodisk = 0;
3207 	}
3208 
3209 	if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3210 	    sc->sc_cfg_hdr.page_length * 4) != 0) {
3211 		DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3212 		    "page 2\n", DEVNAME(sc));
3213 		return (EINVAL);
3214 	}
3215 
3216 	DNPRINTF(MPI_D_IOCTL, "%s:  active_vols: %d max_vols: %d "
3217 	    "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3218 	    sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3219 	    sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3220 
3221 	bi->bi_novol = sc->sc_vol_page->active_vols;
3222 	bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3223 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
3224 
3225 	return (0);
3226 }
3227 
3228 int
3229 mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3230 {
3231 	int			i, vol, id, rv = EINVAL;
3232 	struct device		*dev;
3233 	struct scsi_link	*link;
3234 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3235 	char			*vendp;
3236 
3237 	id = bv->bv_volid;
3238 	if (mpi_bio_get_pg0_raid(sc, id))
3239 		goto done;
3240 
3241 	if (id > sc->sc_vol_page->active_vols)
3242 		return (EINVAL); /* XXX deal with hot spares */
3243 
3244 	rpg0 = sc->sc_rpg0;
3245 	if (rpg0 == NULL)
3246 		goto done;
3247 
3248 	/* determine status */
3249 	switch (rpg0->volume_state) {
3250 	case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3251 		bv->bv_status = BIOC_SVONLINE;
3252 		break;
3253 	case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3254 		bv->bv_status = BIOC_SVDEGRADED;
3255 		break;
3256 	case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3257 	case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3258 		bv->bv_status = BIOC_SVOFFLINE;
3259 		break;
3260 	default:
3261 		bv->bv_status = BIOC_SVINVALID;
3262 	}
3263 
3264 	/* override status if scrubbing or something */
3265 	if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING)
3266 		bv->bv_status = BIOC_SVREBUILD;
3267 
3268 	bv->bv_size = (uint64_t)lemtoh32(&rpg0->max_lba) * 512;
3269 
3270 	switch (sc->sc_vol_list[id].vol_type) {
3271 	case MPI_CFG_RAID_TYPE_RAID_IS:
3272 		bv->bv_level = 0;
3273 		break;
3274 	case MPI_CFG_RAID_TYPE_RAID_IME:
3275 	case MPI_CFG_RAID_TYPE_RAID_IM:
3276 		bv->bv_level = 1;
3277 		break;
3278 	case MPI_CFG_RAID_TYPE_RAID_5:
3279 		bv->bv_level = 5;
3280 		break;
3281 	case MPI_CFG_RAID_TYPE_RAID_6:
3282 		bv->bv_level = 6;
3283 		break;
3284 	case MPI_CFG_RAID_TYPE_RAID_10:
3285 		bv->bv_level = 10;
3286 		break;
3287 	case MPI_CFG_RAID_TYPE_RAID_50:
3288 		bv->bv_level = 50;
3289 		break;
3290 	default:
3291 		bv->bv_level = -1;
3292 	}
3293 
3294 	bv->bv_nodisk = rpg0->num_phys_disks;
3295 
3296 	for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3297 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3298 		if (link == NULL)
3299 			continue;
3300 
3301 		/* skip if not a virtual disk */
3302 		if (!(link->flags & SDEV_VIRTUAL))
3303 			continue;
3304 
3305 		vol++;
3306 		/* are we it? */
3307 		if (vol == bv->bv_volid) {
3308 			dev = link->device_softc;
3309 			vendp = link->inqdata.vendor;
3310 			memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor);
3311 			bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3312 			strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3313 			break;
3314 		}
3315 	}
3316 	rv = 0;
3317 done:
3318 	return (rv);
3319 }
3320 
3321 int
3322 mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3323 {
3324 	int			pdid, id, rv = EINVAL;
3325 	u_int32_t		address;
3326 	struct mpi_cfg_hdr	hdr;
3327 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3328 	struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3329 	struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3330 
3331 	id = bd->bd_volid;
3332 	if (mpi_bio_get_pg0_raid(sc, id))
3333 		goto done;
3334 
3335 	if (id > sc->sc_vol_page->active_vols)
3336 		return (EINVAL); /* XXX deal with hot spares */
3337 
3338 	rpg0 = sc->sc_rpg0;
3339 	if (rpg0 == NULL)
3340 		goto done;
3341 
3342 	pdid = bd->bd_diskid;
3343 	if (pdid > rpg0->num_phys_disks)
3344 		goto done;
3345 	physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3346 	physdisk += pdid;
3347 
3348 	/* get raid phys disk page 0 */
3349 	address = physdisk->phys_disk_num;
3350 	if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,
3351 	    &hdr) != 0)
3352 		goto done;
3353 	if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)) {
3354 		bd->bd_status = BIOC_SDFAILED;
3355 		return (0);
3356 	}
3357 	bd->bd_channel = pdpg0.phys_disk_bus;
3358 	bd->bd_target = pdpg0.phys_disk_id;
3359 	bd->bd_lun = 0;
3360 	bd->bd_size = (uint64_t)lemtoh32(&pdpg0.max_lba) * 512;
3361 	strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3362 
3363 	switch (pdpg0.phys_disk_state) {
3364 	case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE:
3365 		bd->bd_status = BIOC_SDONLINE;
3366 		break;
3367 	case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING:
3368 	case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED:
3369 		bd->bd_status = BIOC_SDFAILED;
3370 		break;
3371 	case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL:
3372 	case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER:
3373 	case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE:
3374 		bd->bd_status = BIOC_SDOFFLINE;
3375 		break;
3376 	case MPI_CFG_RAID_PHYDISK_0_STATE_INIT:
3377 		bd->bd_status = BIOC_SDSCRUB;
3378 		break;
3379 	case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT:
3380 	default:
3381 		bd->bd_status = BIOC_SDINVALID;
3382 		break;
3383 	}
3384 
3385 	/* XXX figure this out */
3386 	/* bd_serial[32]; */
3387 	/* bd_procdev[16]; */
3388 
3389 	rv = 0;
3390 done:
3391 	return (rv);
3392 }
3393 
3394 int
3395 mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3396 {
3397 	return (ENOTTY);
3398 }
3399 
3400 #ifndef SMALL_KERNEL
3401 int
3402 mpi_create_sensors(struct mpi_softc *sc)
3403 {
3404 	struct device		*dev;
3405 	struct scsi_link	*link;
3406 	int			i, vol, nsensors;
3407 
3408 	/* count volumes */
3409 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3410 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3411 		if (link == NULL)
3412 			continue;
3413 		/* skip if not a virtual disk */
3414 		if (!(link->flags & SDEV_VIRTUAL))
3415 			continue;
3416 
3417 		vol++;
3418 	}
3419 	if (vol == 0)
3420 		return (0);
3421 
3422 	sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor),
3423 	    M_DEVBUF, M_NOWAIT | M_ZERO);
3424 	if (sc->sc_sensors == NULL)
3425 		return (1);
3426 	nsensors = vol;
3427 
3428 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
3429 	    sizeof(sc->sc_sensordev.xname));
3430 
3431 	for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3432 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3433 		if (link == NULL)
3434 			continue;
3435 		/* skip if not a virtual disk */
3436 		if (!(link->flags & SDEV_VIRTUAL))
3437 			continue;
3438 
3439 		dev = link->device_softc;
3440 		strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3441 		    sizeof(sc->sc_sensors[vol].desc));
3442 		sc->sc_sensors[vol].type = SENSOR_DRIVE;
3443 		sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3444 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3445 
3446 		vol++;
3447 	}
3448 
3449 	if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL)
3450 		goto bad;
3451 
3452 	sensordev_install(&sc->sc_sensordev);
3453 
3454 	return (0);
3455 
3456 bad:
3457 	free(sc->sc_sensors, M_DEVBUF, nsensors * sizeof(struct ksensor));
3458 	return (1);
3459 }
3460 
3461 void
3462 mpi_refresh_sensors(void *arg)
3463 {
3464 	int			i, vol;
3465 	struct scsi_link	*link;
3466 	struct mpi_softc	*sc = arg;
3467 	struct mpi_cfg_raid_vol_pg0 *rpg0;
3468 
3469 	rw_enter_write(&sc->sc_lock);
3470 
3471 	for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3472 		link = scsi_get_link(sc->sc_scsibus, i, 0);
3473 		if (link == NULL)
3474 			continue;
3475 		/* skip if not a virtual disk */
3476 		if (!(link->flags & SDEV_VIRTUAL))
3477 			continue;
3478 
3479 		if (mpi_bio_get_pg0_raid(sc, vol))
3480 			continue;
3481 
3482 		rpg0 = sc->sc_rpg0;
3483 		if (rpg0 == NULL)
3484 			goto done;
3485 
3486 		/* determine status */
3487 		switch (rpg0->volume_state) {
3488 		case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL:
3489 			sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE;
3490 			sc->sc_sensors[vol].status = SENSOR_S_OK;
3491 			break;
3492 		case MPI_CFG_RAID_VOL_0_STATE_DEGRADED:
3493 			sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL;
3494 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3495 			break;
3496 		case MPI_CFG_RAID_VOL_0_STATE_FAILED:
3497 		case MPI_CFG_RAID_VOL_0_STATE_MISSING:
3498 			sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL;
3499 			sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3500 			break;
3501 		default:
3502 			sc->sc_sensors[vol].value = 0; /* unknown */
3503 			sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3504 		}
3505 
3506 		/* override status if scrubbing or something */
3507 		if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING) {
3508 			sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD;
3509 			sc->sc_sensors[vol].status = SENSOR_S_WARN;
3510 		}
3511 
3512 		vol++;
3513 	}
3514 done:
3515 	rw_exit_write(&sc->sc_lock);
3516 }
3517 #endif /* SMALL_KERNEL */
3518 #endif /* NBIO > 0 */
3519