xref: /openbsd/sys/dev/ic/mfi.c (revision 8529ddd3)
1 /* $OpenBSD: mfi.c,v 1.163 2015/05/18 12:21:04 mikeb Exp $ */
2 /*
3  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bio.h"
19 
20 #include <sys/types.h>
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/buf.h>
24 #include <sys/device.h>
25 #include <sys/kernel.h>
26 #include <sys/malloc.h>
27 #include <sys/rwlock.h>
28 #include <sys/sensors.h>
29 #include <sys/dkio.h>
30 #include <sys/pool.h>
31 
32 #include <machine/bus.h>
33 
34 #include <scsi/scsi_all.h>
35 #include <scsi/scsi_disk.h>
36 #include <scsi/scsiconf.h>
37 
38 #include <dev/biovar.h>
39 #include <dev/ic/mfireg.h>
40 #include <dev/ic/mfivar.h>
41 
42 #ifdef MFI_DEBUG
43 uint32_t	mfi_debug = 0
44 /*		    | MFI_D_CMD */
45 /*		    | MFI_D_INTR */
46 /*		    | MFI_D_MISC */
47 /*		    | MFI_D_DMA */
48 /*		    | MFI_D_IOCTL */
49 /*		    | MFI_D_RW */
50 /*		    | MFI_D_MEM */
51 /*		    | MFI_D_CCB */
52 		;
53 #endif
54 
55 struct cfdriver mfi_cd = {
56 	NULL, "mfi", DV_DULL
57 };
58 
59 void	mfi_scsi_cmd(struct scsi_xfer *);
60 int	mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
61 int	mfi_ioctl_cache(struct scsi_link *, u_long,  struct dk_cache *);
62 void	mfiminphys(struct buf *bp, struct scsi_link *sl);
63 
64 void	mfi_pd_scsi_cmd(struct scsi_xfer *);
65 int	mfi_pd_scsi_probe(struct scsi_link *);
66 
67 struct scsi_adapter mfi_switch = {
68 	mfi_scsi_cmd, mfiminphys, 0, 0, mfi_scsi_ioctl
69 };
70 
71 struct scsi_adapter mfi_pd_switch = {
72 	mfi_pd_scsi_cmd,
73 	mfiminphys,
74 	mfi_pd_scsi_probe,
75 	0,
76 	mfi_scsi_ioctl
77 };
78 
79 void *		mfi_get_ccb(void *);
80 void		mfi_put_ccb(void *, void *);
81 void		mfi_scrub_ccb(struct mfi_ccb *);
82 int		mfi_init_ccb(struct mfi_softc *);
83 
84 struct mfi_mem	*mfi_allocmem(struct mfi_softc *, size_t);
85 void		mfi_freemem(struct mfi_softc *, struct mfi_mem *);
86 
87 int		mfi_transition_firmware(struct mfi_softc *);
88 int		mfi_initialize_firmware(struct mfi_softc *);
89 int		mfi_get_info(struct mfi_softc *);
90 uint32_t	mfi_read(struct mfi_softc *, bus_size_t);
91 void		mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
92 void		mfi_poll(struct mfi_softc *, struct mfi_ccb *);
93 void		mfi_exec(struct mfi_softc *, struct mfi_ccb *);
94 void		mfi_exec_done(struct mfi_softc *, struct mfi_ccb *);
95 int		mfi_create_sgl(struct mfi_softc *, struct mfi_ccb *, int);
96 u_int		mfi_default_sgd_load(struct mfi_softc *, struct mfi_ccb *);
97 int		mfi_syspd(struct mfi_softc *);
98 
99 /* commands */
100 int		mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *,
101 		    struct scsi_xfer *);
102 int		mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *,
103 		    struct scsi_xfer *, uint64_t, uint32_t);
104 void		mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *);
105 int		mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
106 		    void *, uint8_t *);
107 int		mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t,
108 		    uint32_t, uint32_t, void *, uint8_t *);
109 void		mfi_empty_done(struct mfi_softc *, struct mfi_ccb *);
110 
111 #if NBIO > 0
112 int		mfi_ioctl(struct device *, u_long, caddr_t);
113 int		mfi_bio_getitall(struct mfi_softc *);
114 int		mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
115 int		mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
116 int		mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
117 int		mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
118 int		mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
119 int		mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
120 int		mfi_bio_hs(struct mfi_softc *, int, int, void *);
121 #ifndef SMALL_KERNEL
122 int		mfi_create_sensors(struct mfi_softc *);
123 void		mfi_refresh_sensors(void *);
124 int		mfi_bbu(struct mfi_softc *);
125 #endif /* SMALL_KERNEL */
126 #endif /* NBIO > 0 */
127 
128 void		mfi_start(struct mfi_softc *, struct mfi_ccb *);
129 void		mfi_done(struct mfi_softc *, struct mfi_ccb *);
130 u_int32_t	mfi_xscale_fw_state(struct mfi_softc *);
131 void		mfi_xscale_intr_ena(struct mfi_softc *);
132 int		mfi_xscale_intr(struct mfi_softc *);
133 void		mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *);
134 
135 static const struct mfi_iop_ops mfi_iop_xscale = {
136 	mfi_xscale_fw_state,
137 	mfi_xscale_intr_ena,
138 	mfi_xscale_intr,
139 	mfi_xscale_post,
140 	mfi_default_sgd_load,
141 	0,
142 };
143 
144 u_int32_t	mfi_ppc_fw_state(struct mfi_softc *);
145 void		mfi_ppc_intr_ena(struct mfi_softc *);
146 int		mfi_ppc_intr(struct mfi_softc *);
147 void		mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *);
148 
149 static const struct mfi_iop_ops mfi_iop_ppc = {
150 	mfi_ppc_fw_state,
151 	mfi_ppc_intr_ena,
152 	mfi_ppc_intr,
153 	mfi_ppc_post,
154 	mfi_default_sgd_load,
155 	MFI_IDB,
156 	0
157 };
158 
159 u_int32_t	mfi_gen2_fw_state(struct mfi_softc *);
160 void		mfi_gen2_intr_ena(struct mfi_softc *);
161 int		mfi_gen2_intr(struct mfi_softc *);
162 void		mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *);
163 
164 static const struct mfi_iop_ops mfi_iop_gen2 = {
165 	mfi_gen2_fw_state,
166 	mfi_gen2_intr_ena,
167 	mfi_gen2_intr,
168 	mfi_gen2_post,
169 	mfi_default_sgd_load,
170 	MFI_IDB,
171 	0
172 };
173 
174 u_int32_t	mfi_skinny_fw_state(struct mfi_softc *);
175 void		mfi_skinny_intr_ena(struct mfi_softc *);
176 int		mfi_skinny_intr(struct mfi_softc *);
177 void		mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
178 u_int		mfi_skinny_sgd_load(struct mfi_softc *, struct mfi_ccb *);
179 
180 static const struct mfi_iop_ops mfi_iop_skinny = {
181 	mfi_skinny_fw_state,
182 	mfi_skinny_intr_ena,
183 	mfi_skinny_intr,
184 	mfi_skinny_post,
185 	mfi_skinny_sgd_load,
186 	MFI_SKINNY_IDB,
187 	MFI_IOP_F_SYSPD
188 };
189 
190 #define mfi_fw_state(_s)	((_s)->sc_iop->mio_fw_state(_s))
191 #define mfi_intr_enable(_s)	((_s)->sc_iop->mio_intr_ena(_s))
192 #define mfi_my_intr(_s)		((_s)->sc_iop->mio_intr(_s))
193 #define mfi_post(_s, _c)	((_s)->sc_iop->mio_post((_s), (_c)))
194 #define mfi_sgd_load(_s, _c)	((_s)->sc_iop->mio_sgd_load((_s), (_c)))
195 
196 void *
197 mfi_get_ccb(void *cookie)
198 {
199 	struct mfi_softc	*sc = cookie;
200 	struct mfi_ccb		*ccb;
201 
202 	KERNEL_UNLOCK();
203 
204 	mtx_enter(&sc->sc_ccb_mtx);
205 	ccb = SLIST_FIRST(&sc->sc_ccb_freeq);
206 	if (ccb != NULL) {
207 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
208 		ccb->ccb_state = MFI_CCB_READY;
209 	}
210 	mtx_leave(&sc->sc_ccb_mtx);
211 
212 	DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
213 	KERNEL_LOCK();
214 
215 	return (ccb);
216 }
217 
218 void
219 mfi_put_ccb(void *cookie, void *io)
220 {
221 	struct mfi_softc	*sc = cookie;
222 	struct mfi_ccb		*ccb = io;
223 
224 	DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
225 
226 	KERNEL_UNLOCK();
227 	mtx_enter(&sc->sc_ccb_mtx);
228 	SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
229 	mtx_leave(&sc->sc_ccb_mtx);
230 	KERNEL_LOCK();
231 }
232 
233 void
234 mfi_scrub_ccb(struct mfi_ccb *ccb)
235 {
236 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
237 
238 	hdr->mfh_cmd_status = 0x0;
239 	hdr->mfh_flags = 0x0;
240 	ccb->ccb_state = MFI_CCB_FREE;
241 	ccb->ccb_cookie = NULL;
242 	ccb->ccb_flags = 0;
243 	ccb->ccb_done = NULL;
244 	ccb->ccb_direction = 0;
245 	ccb->ccb_frame_size = 0;
246 	ccb->ccb_extra_frames = 0;
247 	ccb->ccb_sgl = NULL;
248 	ccb->ccb_data = NULL;
249 	ccb->ccb_len = 0;
250 }
251 
252 int
253 mfi_init_ccb(struct mfi_softc *sc)
254 {
255 	struct mfi_ccb		*ccb;
256 	uint32_t		i;
257 	int			error;
258 
259 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
260 
261 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfi_ccb),
262 	    M_DEVBUF, M_WAITOK|M_ZERO);
263 
264 	for (i = 0; i < sc->sc_max_cmds; i++) {
265 		ccb = &sc->sc_ccb[i];
266 
267 		/* select i'th frame */
268 		ccb->ccb_frame = (union mfi_frame *)
269 		    (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
270 		ccb->ccb_pframe =
271 		    MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
272 		ccb->ccb_pframe_offset = sc->sc_frames_size * i;
273 		ccb->ccb_frame->mfr_header.mfh_context = i;
274 
275 		/* select i'th sense */
276 		ccb->ccb_sense = (struct mfi_sense *)
277 		    (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
278 		ccb->ccb_psense =
279 		    (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
280 
281 		/* create a dma map for transfer */
282 		error = bus_dmamap_create(sc->sc_dmat,
283 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
284 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
285 		if (error) {
286 			printf("%s: cannot create ccb dmamap (%d)\n",
287 			    DEVNAME(sc), error);
288 			goto destroy;
289 		}
290 
291 		DNPRINTF(MFI_D_CCB,
292 		    "ccb(%d): %p frame: %#x (%#x) sense: %#x (%#x) map: %#x\n",
293 		    ccb->ccb_frame->mfr_header.mfh_context, ccb,
294 		    ccb->ccb_frame, ccb->ccb_pframe,
295 		    ccb->ccb_sense, ccb->ccb_psense,
296 		    ccb->ccb_dmamap);
297 
298 		/* add ccb to queue */
299 		mfi_put_ccb(sc, ccb);
300 	}
301 
302 	return (0);
303 destroy:
304 	/* free dma maps and ccb memory */
305 	while ((ccb = mfi_get_ccb(sc)) != NULL)
306 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
307 
308 	free(sc->sc_ccb, M_DEVBUF, 0);
309 
310 	return (1);
311 }
312 
313 uint32_t
314 mfi_read(struct mfi_softc *sc, bus_size_t r)
315 {
316 	uint32_t rv;
317 
318 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
319 	    BUS_SPACE_BARRIER_READ);
320 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
321 
322 	DNPRINTF(MFI_D_RW, "%s: mr 0x%x 0x08%x ", DEVNAME(sc), r, rv);
323 	return (rv);
324 }
325 
326 void
327 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
328 {
329 	DNPRINTF(MFI_D_RW, "%s: mw 0x%x 0x%08x", DEVNAME(sc), r, v);
330 
331 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
332 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
333 	    BUS_SPACE_BARRIER_WRITE);
334 }
335 
336 struct mfi_mem *
337 mfi_allocmem(struct mfi_softc *sc, size_t size)
338 {
339 	struct mfi_mem		*mm;
340 	int			nsegs;
341 
342 	DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %d\n", DEVNAME(sc),
343 	    size);
344 
345 	mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
346 	if (mm == NULL)
347 		return (NULL);
348 
349 	mm->am_size = size;
350 
351 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
352 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
353 		goto amfree;
354 
355 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
356 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
357 		goto destroy;
358 
359 	if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
360 	    BUS_DMA_NOWAIT) != 0)
361 		goto free;
362 
363 	if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
364 	    BUS_DMA_NOWAIT) != 0)
365 		goto unmap;
366 
367 	DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %p  map: %p\n",
368 	    mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
369 
370 	return (mm);
371 
372 unmap:
373 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
374 free:
375 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
376 destroy:
377 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
378 amfree:
379 	free(mm, M_DEVBUF, 0);
380 
381 	return (NULL);
382 }
383 
384 void
385 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
386 {
387 	DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
388 
389 	bus_dmamap_unload(sc->sc_dmat, mm->am_map);
390 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
391 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
392 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
393 	free(mm, M_DEVBUF, 0);
394 }
395 
396 int
397 mfi_transition_firmware(struct mfi_softc *sc)
398 {
399 	int32_t			fw_state, cur_state;
400 	u_int32_t		idb = sc->sc_iop->mio_idb;
401 	int			max_wait, i;
402 
403 	fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
404 
405 	DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
406 	    fw_state);
407 
408 	while (fw_state != MFI_STATE_READY) {
409 		DNPRINTF(MFI_D_MISC,
410 		    "%s: waiting for firmware to become ready\n",
411 		    DEVNAME(sc));
412 		cur_state = fw_state;
413 		switch (fw_state) {
414 		case MFI_STATE_FAULT:
415 			printf("%s: firmware fault\n", DEVNAME(sc));
416 			return (1);
417 		case MFI_STATE_WAIT_HANDSHAKE:
418 			mfi_write(sc, idb, MFI_INIT_CLEAR_HANDSHAKE);
419 			max_wait = 2;
420 			break;
421 		case MFI_STATE_OPERATIONAL:
422 			mfi_write(sc, idb, MFI_INIT_READY);
423 			max_wait = 10;
424 			break;
425 		case MFI_STATE_UNDEFINED:
426 		case MFI_STATE_BB_INIT:
427 			max_wait = 2;
428 			break;
429 		case MFI_STATE_FW_INIT:
430 		case MFI_STATE_DEVICE_SCAN:
431 		case MFI_STATE_FLUSH_CACHE:
432 			max_wait = 20;
433 			break;
434 		default:
435 			printf("%s: unknown firmware state %d\n",
436 			    DEVNAME(sc), fw_state);
437 			return (1);
438 		}
439 		for (i = 0; i < (max_wait * 10); i++) {
440 			fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
441 			if (fw_state == cur_state)
442 				DELAY(100000);
443 			else
444 				break;
445 		}
446 		if (fw_state == cur_state) {
447 			printf("%s: firmware stuck in state %#x\n",
448 			    DEVNAME(sc), fw_state);
449 			return (1);
450 		}
451 	}
452 
453 	return (0);
454 }
455 
456 int
457 mfi_initialize_firmware(struct mfi_softc *sc)
458 {
459 	struct mfi_ccb		*ccb;
460 	struct mfi_init_frame	*init;
461 	struct mfi_init_qinfo	*qinfo;
462 	int			rv = 0;
463 
464 	DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
465 
466 	ccb = scsi_io_get(&sc->sc_iopool, 0);
467 	mfi_scrub_ccb(ccb);
468 
469 	init = &ccb->ccb_frame->mfr_init;
470 	qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
471 
472 	memset(qinfo, 0, sizeof(*qinfo));
473 	qinfo->miq_rq_entries = htole32(sc->sc_max_cmds + 1);
474 
475 	qinfo->miq_rq_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
476 	    offsetof(struct mfi_prod_cons, mpc_reply_q));
477 
478 	qinfo->miq_pi_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
479 	    offsetof(struct mfi_prod_cons, mpc_producer));
480 
481 	qinfo->miq_ci_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
482 	    offsetof(struct mfi_prod_cons, mpc_consumer));
483 
484 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
485 	init->mif_header.mfh_data_len = htole32(sizeof(*qinfo));
486 	init->mif_qinfo_new_addr = htole64(ccb->ccb_pframe + MFI_FRAME_SIZE);
487 
488 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
489 	    0, MFIMEM_LEN(sc->sc_pcq),
490 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
491 
492 	ccb->ccb_done = mfi_empty_done;
493 	mfi_poll(sc, ccb);
494 	if (init->mif_header.mfh_cmd_status != MFI_STAT_OK)
495 		rv = 1;
496 
497 	mfi_put_ccb(sc, ccb);
498 
499 	return (rv);
500 }
501 
502 void
503 mfi_empty_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
504 {
505 	/* nop */
506 }
507 
508 int
509 mfi_get_info(struct mfi_softc *sc)
510 {
511 #ifdef MFI_DEBUG
512 	int i;
513 #endif
514 	DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
515 
516 	if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
517 	    sizeof(sc->sc_info), &sc->sc_info, NULL))
518 		return (1);
519 
520 #ifdef MFI_DEBUG
521 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
522 		printf("%s: active FW %s Version %s date %s time %s\n",
523 		    DEVNAME(sc),
524 		    sc->sc_info.mci_image_component[i].mic_name,
525 		    sc->sc_info.mci_image_component[i].mic_version,
526 		    sc->sc_info.mci_image_component[i].mic_build_date,
527 		    sc->sc_info.mci_image_component[i].mic_build_time);
528 	}
529 
530 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
531 		printf("%s: pending FW %s Version %s date %s time %s\n",
532 		    DEVNAME(sc),
533 		    sc->sc_info.mci_pending_image_component[i].mic_name,
534 		    sc->sc_info.mci_pending_image_component[i].mic_version,
535 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
536 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
537 	}
538 
539 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
540 	    DEVNAME(sc),
541 	    sc->sc_info.mci_max_arms,
542 	    sc->sc_info.mci_max_spans,
543 	    sc->sc_info.mci_max_arrays,
544 	    sc->sc_info.mci_max_lds,
545 	    sc->sc_info.mci_product_name);
546 
547 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
548 	    DEVNAME(sc),
549 	    sc->sc_info.mci_serial_number,
550 	    sc->sc_info.mci_hw_present,
551 	    sc->sc_info.mci_current_fw_time,
552 	    sc->sc_info.mci_max_cmds,
553 	    sc->sc_info.mci_max_sg_elements);
554 
555 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
556 	    DEVNAME(sc),
557 	    sc->sc_info.mci_max_request_size,
558 	    sc->sc_info.mci_lds_present,
559 	    sc->sc_info.mci_lds_degraded,
560 	    sc->sc_info.mci_lds_offline,
561 	    sc->sc_info.mci_pd_present);
562 
563 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
564 	    DEVNAME(sc),
565 	    sc->sc_info.mci_pd_disks_present,
566 	    sc->sc_info.mci_pd_disks_pred_failure,
567 	    sc->sc_info.mci_pd_disks_failed);
568 
569 	printf("%s: nvram %d mem %d flash %d\n",
570 	    DEVNAME(sc),
571 	    sc->sc_info.mci_nvram_size,
572 	    sc->sc_info.mci_memory_size,
573 	    sc->sc_info.mci_flash_size);
574 
575 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
576 	    DEVNAME(sc),
577 	    sc->sc_info.mci_ram_correctable_errors,
578 	    sc->sc_info.mci_ram_uncorrectable_errors,
579 	    sc->sc_info.mci_cluster_allowed,
580 	    sc->sc_info.mci_cluster_active);
581 
582 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
583 	    DEVNAME(sc),
584 	    sc->sc_info.mci_max_strips_per_io,
585 	    sc->sc_info.mci_raid_levels,
586 	    sc->sc_info.mci_adapter_ops,
587 	    sc->sc_info.mci_ld_ops);
588 
589 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
590 	    DEVNAME(sc),
591 	    sc->sc_info.mci_stripe_sz_ops.min,
592 	    sc->sc_info.mci_stripe_sz_ops.max,
593 	    sc->sc_info.mci_pd_ops,
594 	    sc->sc_info.mci_pd_mix_support);
595 
596 	printf("%s: ecc_bucket %d pckg_prop %s\n",
597 	    DEVNAME(sc),
598 	    sc->sc_info.mci_ecc_bucket_count,
599 	    sc->sc_info.mci_package_version);
600 
601 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
602 	    DEVNAME(sc),
603 	    sc->sc_info.mci_properties.mcp_seq_num,
604 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
605 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
606 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
607 
608 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
609 	    DEVNAME(sc),
610 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
611 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
612 	    sc->sc_info.mci_properties.mcp_bgi_rate,
613 	    sc->sc_info.mci_properties.mcp_cc_rate);
614 
615 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
616 	    DEVNAME(sc),
617 	    sc->sc_info.mci_properties.mcp_recon_rate,
618 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
619 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
620 	    sc->sc_info.mci_properties.mcp_spinup_delay,
621 	    sc->sc_info.mci_properties.mcp_cluster_enable);
622 
623 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
624 	    DEVNAME(sc),
625 	    sc->sc_info.mci_properties.mcp_coercion_mode,
626 	    sc->sc_info.mci_properties.mcp_alarm_enable,
627 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
628 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
629 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
630 
631 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
632 	    DEVNAME(sc),
633 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
634 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
635 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
636 
637 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
638 	    DEVNAME(sc),
639 	    sc->sc_info.mci_pci.mip_vendor,
640 	    sc->sc_info.mci_pci.mip_device,
641 	    sc->sc_info.mci_pci.mip_subvendor,
642 	    sc->sc_info.mci_pci.mip_subdevice);
643 
644 	printf("%s: type %#x port_count %d port_addr ",
645 	    DEVNAME(sc),
646 	    sc->sc_info.mci_host.mih_type,
647 	    sc->sc_info.mci_host.mih_port_count);
648 
649 	for (i = 0; i < 8; i++)
650 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
651 	printf("\n");
652 
653 	printf("%s: type %.x port_count %d port_addr ",
654 	    DEVNAME(sc),
655 	    sc->sc_info.mci_device.mid_type,
656 	    sc->sc_info.mci_device.mid_port_count);
657 
658 	for (i = 0; i < 8; i++)
659 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
660 	printf("\n");
661 #endif /* MFI_DEBUG */
662 
663 	return (0);
664 }
665 
666 void
667 mfiminphys(struct buf *bp, struct scsi_link *sl)
668 {
669 	DNPRINTF(MFI_D_MISC, "mfiminphys: %d\n", bp->b_bcount);
670 
671 	/* XXX currently using MFI_MAXFER = MAXPHYS */
672 	if (bp->b_bcount > MFI_MAXFER)
673 		bp->b_bcount = MFI_MAXFER;
674 	minphys(bp);
675 }
676 
677 int
678 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
679 {
680 	struct scsibus_attach_args saa;
681 	uint32_t		status, frames, max_sgl;
682 	int			i;
683 
684 	switch (iop) {
685 	case MFI_IOP_XSCALE:
686 		sc->sc_iop = &mfi_iop_xscale;
687 		break;
688 	case MFI_IOP_PPC:
689 		sc->sc_iop = &mfi_iop_ppc;
690 		break;
691 	case MFI_IOP_GEN2:
692 		sc->sc_iop = &mfi_iop_gen2;
693 		break;
694 	case MFI_IOP_SKINNY:
695 		sc->sc_iop = &mfi_iop_skinny;
696 		break;
697 	default:
698 		panic("%s: unknown iop %d", DEVNAME(sc), iop);
699 	}
700 
701 	DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
702 
703 	if (mfi_transition_firmware(sc))
704 		return (1);
705 
706 	SLIST_INIT(&sc->sc_ccb_freeq);
707 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
708 	scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb);
709 
710 	rw_init(&sc->sc_lock, "mfi_lock");
711 
712 	status = mfi_fw_state(sc);
713 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
714 	max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
715 	if (sc->sc_64bit_dma) {
716 		sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
717 		sc->sc_sgl_size = sizeof(struct mfi_sg64);
718 		sc->sc_sgl_flags = MFI_FRAME_SGL64;
719 	} else {
720 		sc->sc_max_sgl = max_sgl;
721 		sc->sc_sgl_size = sizeof(struct mfi_sg32);
722 		sc->sc_sgl_flags = MFI_FRAME_SGL32;
723 	}
724 	if (iop == MFI_IOP_SKINNY)
725 		sc->sc_sgl_size = sizeof(struct mfi_sg_skinny);
726 	DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n",
727 	    DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl);
728 
729 	/* consumer/producer and reply queue memory */
730 	sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
731 	    sizeof(struct mfi_prod_cons));
732 	if (sc->sc_pcq == NULL) {
733 		printf("%s: unable to allocate reply queue memory\n",
734 		    DEVNAME(sc));
735 		goto nopcq;
736 	}
737 
738 	/* frame memory */
739 	/* we are not doing 64 bit IO so only calculate # of 32 bit frames */
740 	frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
741 	    MFI_FRAME_SIZE + 1;
742 	sc->sc_frames_size = frames * MFI_FRAME_SIZE;
743 	sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
744 	if (sc->sc_frames == NULL) {
745 		printf("%s: unable to allocate frame memory\n", DEVNAME(sc));
746 		goto noframe;
747 	}
748 	/* XXX hack, fix this */
749 	if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
750 		printf("%s: improper frame alignment (%#lx) FIXME\n",
751 		    DEVNAME(sc), MFIMEM_DVA(sc->sc_frames));
752 		goto noframe;
753 	}
754 
755 	/* sense memory */
756 	sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
757 	if (sc->sc_sense == NULL) {
758 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
759 		goto nosense;
760 	}
761 
762 	/* now that we have all memory bits go initialize ccbs */
763 	if (mfi_init_ccb(sc)) {
764 		printf("%s: could not init ccb list\n", DEVNAME(sc));
765 		goto noinit;
766 	}
767 
768 	/* kickstart firmware with all addresses and pointers */
769 	if (mfi_initialize_firmware(sc)) {
770 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
771 		goto noinit;
772 	}
773 
774 	if (mfi_get_info(sc)) {
775 		printf("%s: could not retrieve controller information\n",
776 		    DEVNAME(sc));
777 		goto noinit;
778 	}
779 
780 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
781 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
782 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
783 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
784 	printf("\n");
785 
786 	sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
787 	for (i = 0; i < sc->sc_ld_cnt; i++)
788 		sc->sc_ld[i].ld_present = 1;
789 
790 	sc->sc_link.adapter = &mfi_switch;
791 	sc->sc_link.adapter_softc = sc;
792 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
793 	sc->sc_link.adapter_target = -1;
794 	sc->sc_link.luns = 1;
795 	sc->sc_link.openings = sc->sc_max_cmds - 1;
796 	sc->sc_link.pool = &sc->sc_iopool;
797 
798 	bzero(&saa, sizeof(saa));
799 	saa.saa_sc_link = &sc->sc_link;
800 
801 	sc->sc_scsibus = (struct scsibus_softc *)
802 	    config_found(&sc->sc_dev, &saa, scsiprint);
803 
804 	if (ISSET(sc->sc_iop->mio_flags, MFI_IOP_F_SYSPD))
805 		mfi_syspd(sc);
806 
807 	/* enable interrupts */
808 	mfi_intr_enable(sc);
809 
810 #if NBIO > 0
811 	if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
812 		panic("%s: controller registration failed", DEVNAME(sc));
813 	else
814 		sc->sc_ioctl = mfi_ioctl;
815 
816 #ifndef SMALL_KERNEL
817 	if (mfi_create_sensors(sc) != 0)
818 		printf("%s: unable to create sensors\n", DEVNAME(sc));
819 #endif
820 #endif /* NBIO > 0 */
821 
822 	return (0);
823 noinit:
824 	mfi_freemem(sc, sc->sc_sense);
825 nosense:
826 	mfi_freemem(sc, sc->sc_frames);
827 noframe:
828 	mfi_freemem(sc, sc->sc_pcq);
829 nopcq:
830 	return (1);
831 }
832 
833 int
834 mfi_syspd(struct mfi_softc *sc)
835 {
836 	struct scsibus_attach_args saa;
837 	struct scsi_link *link;
838 	struct mfi_pd_link *pl;
839 	struct mfi_pd_list *pd;
840 	u_int npds, i;
841 
842 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
843 	if (sc->sc_pd == NULL)
844 		return (1);
845 
846 	pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
847 	if (pd == NULL)
848 		goto nopdsc;
849 
850 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
851 	    sizeof(*pd), pd, NULL) != 0)
852 		goto nopd;
853 
854 	npds = letoh32(pd->mpl_no_pd);
855 	for (i = 0; i < npds; i++) {
856 		pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
857 		if (pl == NULL)
858 			goto nopl;
859 
860 		pl->pd_id = pd->mpl_address[i].mpa_pd_id;
861 		sc->sc_pd->pd_links[i] = pl;
862 	}
863 
864 	free(pd, M_TEMP, 0);
865 
866 	link = &sc->sc_pd->pd_link;
867 	link->adapter = &mfi_pd_switch;
868 	link->adapter_softc = sc;
869 	link->adapter_buswidth = MFI_MAX_PD;
870 	link->adapter_target = -1;
871 	link->openings = sc->sc_max_cmds - 1;
872 	link->pool = &sc->sc_iopool;
873 
874 	bzero(&saa, sizeof(saa));
875 	saa.saa_sc_link = link;
876 
877 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
878 	    config_found(&sc->sc_dev, &saa, scsiprint);
879 
880 	return (0);
881 nopl:
882 	for (i = 0; i < npds; i++) {
883 		pl = sc->sc_pd->pd_links[i];
884 		if (pl == NULL)
885 			break;
886 
887 		free(pl, M_DEVBUF, 0);
888 	}
889 nopd:
890 	free(pd, M_TEMP, 0);
891 nopdsc:
892 	free(sc->sc_pd, M_DEVBUF, 0);
893 	return (1);
894 }
895 
896 void
897 mfi_poll(struct mfi_softc *sc, struct mfi_ccb *ccb)
898 {
899 	struct mfi_frame_header *hdr;
900 	int to = 0;
901 
902 	DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
903 
904 	hdr = &ccb->ccb_frame->mfr_header;
905 	hdr->mfh_cmd_status = 0xff;
906 	hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
907 
908 	mfi_start(sc, ccb);
909 
910 	for (;;) {
911 		delay(1000);
912 
913 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
914 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
915 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
916 
917 		if (hdr->mfh_cmd_status != 0xff)
918 			break;
919 
920 		if (to++ > 5000) {
921 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
922 			    hdr->mfh_context);
923 			ccb->ccb_flags |= MFI_CCB_F_ERR;
924 			break;
925 		}
926 
927 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
928 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
929 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
930 	}
931 
932 	if (ccb->ccb_len > 0) {
933 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
934 		    ccb->ccb_dmamap->dm_mapsize,
935 		    (ccb->ccb_direction & MFI_DATA_IN) ?
936 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
937 
938 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
939 	}
940 
941 	ccb->ccb_done(sc, ccb);
942 }
943 
944 void
945 mfi_exec(struct mfi_softc *sc, struct mfi_ccb *ccb)
946 {
947 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
948 
949 #ifdef DIAGNOSTIC
950 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
951 		panic("mfi_exec called with cookie or done set");
952 #endif
953 
954 	ccb->ccb_cookie = &m;
955 	ccb->ccb_done = mfi_exec_done;
956 
957 	mfi_start(sc, ccb);
958 
959 	mtx_enter(&m);
960 	while (ccb->ccb_cookie != NULL)
961 		msleep(ccb, &m, PRIBIO, "mfiexec", 0);
962 	mtx_leave(&m);
963 }
964 
965 void
966 mfi_exec_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
967 {
968 	struct mutex *m = ccb->ccb_cookie;
969 
970 	mtx_enter(m);
971 	ccb->ccb_cookie = NULL;
972 	wakeup_one(ccb);
973 	mtx_leave(m);
974 }
975 
976 int
977 mfi_intr(void *arg)
978 {
979 	struct mfi_softc	*sc = arg;
980 	struct mfi_prod_cons	*pcq = MFIMEM_KVA(sc->sc_pcq);
981 	struct mfi_ccb		*ccb;
982 	uint32_t		producer, consumer, ctx;
983 	int			claimed = 0;
984 
985 	if (!mfi_my_intr(sc))
986 		return (0);
987 
988 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
989 	    0, MFIMEM_LEN(sc->sc_pcq),
990 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
991 
992 	producer = letoh32(pcq->mpc_producer);
993 	consumer = letoh32(pcq->mpc_consumer);
994 
995 	DNPRINTF(MFI_D_INTR, "%s: mfi_intr %#x %#x\n", DEVNAME(sc), sc, pcq);
996 
997 	while (consumer != producer) {
998 		DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
999 		    DEVNAME(sc), producer, consumer);
1000 
1001 		ctx = pcq->mpc_reply_q[consumer];
1002 		pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
1003 		if (ctx == MFI_INVALID_CTX)
1004 			printf("%s: invalid context, p: %d c: %d\n",
1005 			    DEVNAME(sc), producer, consumer);
1006 		else {
1007 			/* XXX remove from queue and call scsi_done */
1008 			ccb = &sc->sc_ccb[ctx];
1009 			DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
1010 			    DEVNAME(sc), ctx);
1011 			mfi_done(sc, ccb);
1012 
1013 			claimed = 1;
1014 		}
1015 		consumer++;
1016 		if (consumer == (sc->sc_max_cmds + 1))
1017 			consumer = 0;
1018 	}
1019 
1020 	pcq->mpc_consumer = htole32(consumer);
1021 
1022 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
1023 	    0, MFIMEM_LEN(sc->sc_pcq),
1024 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1025 
1026 	return (claimed);
1027 }
1028 
1029 int
1030 mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *ccb,
1031     struct scsi_xfer *xs, uint64_t blockno, uint32_t blockcnt)
1032 {
1033 	struct scsi_link	*link = xs->sc_link;
1034 	struct mfi_io_frame	*io;
1035 
1036 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1037 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1038 
1039 	if (!xs->data)
1040 		return (1);
1041 
1042 	io = &ccb->ccb_frame->mfr_io;
1043 	if (xs->flags & SCSI_DATA_IN) {
1044 		io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1045 		ccb->ccb_direction = MFI_DATA_IN;
1046 	} else {
1047 		io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1048 		ccb->ccb_direction = MFI_DATA_OUT;
1049 	}
1050 	io->mif_header.mfh_target_id = link->target;
1051 	io->mif_header.mfh_timeout = 0;
1052 	io->mif_header.mfh_flags = 0;
1053 	io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1054 	io->mif_header.mfh_data_len = htole32(blockcnt);
1055 	io->mif_lba = htole64(blockno);
1056 	io->mif_sense_addr = htole64(ccb->ccb_psense);
1057 
1058 	ccb->ccb_done = mfi_scsi_xs_done;
1059 	ccb->ccb_cookie = xs;
1060 	ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1061 	ccb->ccb_sgl = &io->mif_sgl;
1062 	ccb->ccb_data = xs->data;
1063 	ccb->ccb_len = xs->datalen;
1064 
1065 	if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1066 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1067 		return (1);
1068 
1069 	return (0);
1070 }
1071 
1072 void
1073 mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
1074 {
1075 	struct scsi_xfer	*xs = ccb->ccb_cookie;
1076 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1077 
1078 	DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %#x %#x\n",
1079 	    DEVNAME(sc), ccb, ccb->ccb_frame);
1080 
1081 	switch (hdr->mfh_cmd_status) {
1082 	case MFI_STAT_OK:
1083 		xs->resid = 0;
1084 		break;
1085 
1086 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1087 		xs->error = XS_SENSE;
1088 		xs->resid = 0;
1089 		memset(&xs->sense, 0, sizeof(xs->sense));
1090 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1091 		break;
1092 
1093 	case MFI_STAT_DEVICE_NOT_FOUND:
1094 		xs->error = XS_SELTIMEOUT;
1095 		break;
1096 
1097 	default:
1098 		xs->error = XS_DRIVER_STUFFUP;
1099 		DPRINTF(MFI_D_CMD,
1100 		    "%s: mfi_scsi_xs_done stuffup %02x on %02x\n",
1101 		    DEVNAME(sc), hdr->mfh_cmd_status, xs->cmd->opcode);
1102 
1103 		if (hdr->mfh_scsi_status != 0) {
1104 			DNPRINTF(MFI_D_INTR,
1105 			    "%s: mfi_scsi_xs_done sense %#x %x %x\n",
1106 			    DEVNAME(sc), hdr->mfh_scsi_status,
1107 			    &xs->sense, ccb->ccb_sense);
1108 			memset(&xs->sense, 0, sizeof(xs->sense));
1109 			memcpy(&xs->sense, ccb->ccb_sense,
1110 			    sizeof(struct scsi_sense_data));
1111 			xs->error = XS_SENSE;
1112 		}
1113 		break;
1114 	}
1115 
1116 	KERNEL_LOCK();
1117 	scsi_done(xs);
1118 	KERNEL_UNLOCK();
1119 }
1120 
1121 int
1122 mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *ccb, struct scsi_xfer *xs)
1123 {
1124 	struct scsi_link	*link = xs->sc_link;
1125 	struct mfi_pass_frame	*pf;
1126 
1127 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1128 	    DEVNAME((struct mfi_softc *)link->adapter_softc), link->target);
1129 
1130 	pf = &ccb->ccb_frame->mfr_pass;
1131 	pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1132 	pf->mpf_header.mfh_target_id = link->target;
1133 	pf->mpf_header.mfh_lun_id = 0;
1134 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1135 	pf->mpf_header.mfh_timeout = 0;
1136 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
1137 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1138 
1139 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
1140 
1141 	memset(pf->mpf_cdb, 0, 16);
1142 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
1143 
1144 	ccb->ccb_done = mfi_scsi_xs_done;
1145 	ccb->ccb_cookie = xs;
1146 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1147 	ccb->ccb_sgl = &pf->mpf_sgl;
1148 
1149 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1150 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
1151 		    MFI_DATA_IN : MFI_DATA_OUT;
1152 	else
1153 		ccb->ccb_direction = MFI_DATA_NONE;
1154 
1155 	if (xs->data) {
1156 		ccb->ccb_data = xs->data;
1157 		ccb->ccb_len = xs->datalen;
1158 
1159 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1160 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1161 			return (1);
1162 	}
1163 
1164 	return (0);
1165 }
1166 
1167 void
1168 mfi_scsi_cmd(struct scsi_xfer *xs)
1169 {
1170 	struct scsi_link	*link = xs->sc_link;
1171 	struct mfi_softc	*sc = link->adapter_softc;
1172 	struct mfi_ccb		*ccb = xs->io;
1173 	struct scsi_rw		*rw;
1174 	struct scsi_rw_big	*rwb;
1175 	struct scsi_rw_16	*rw16;
1176 	uint64_t		blockno;
1177 	uint32_t		blockcnt;
1178 	uint8_t			target = link->target;
1179 	uint8_t			mbox[MFI_MBOX_SIZE];
1180 
1181 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n",
1182 	    DEVNAME(sc), xs->cmd->opcode);
1183 
1184 	KERNEL_UNLOCK();
1185 
1186 	if (!sc->sc_ld[target].ld_present) {
1187 		DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1188 		    DEVNAME(sc), target);
1189 		goto stuffup;
1190 	}
1191 
1192 	mfi_scrub_ccb(ccb);
1193 
1194 	xs->error = XS_NOERROR;
1195 
1196 	switch (xs->cmd->opcode) {
1197 	/* IO path */
1198 	case READ_BIG:
1199 	case WRITE_BIG:
1200 		rwb = (struct scsi_rw_big *)xs->cmd;
1201 		blockno = (uint64_t)_4btol(rwb->addr);
1202 		blockcnt = _2btol(rwb->length);
1203 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1204 			goto stuffup;
1205 		break;
1206 
1207 	case READ_COMMAND:
1208 	case WRITE_COMMAND:
1209 		rw = (struct scsi_rw *)xs->cmd;
1210 		blockno =
1211 		    (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff));
1212 		blockcnt = rw->length ? rw->length : 0x100;
1213 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1214 			goto stuffup;
1215 		break;
1216 
1217 	case READ_16:
1218 	case WRITE_16:
1219 		rw16 = (struct scsi_rw_16 *)xs->cmd;
1220 		blockno = _8btol(rw16->addr);
1221 		blockcnt = _4btol(rw16->length);
1222 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1223 			goto stuffup;
1224 		break;
1225 
1226 	case SYNCHRONIZE_CACHE:
1227 		mbox[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1228 		if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH,
1229 		    MFI_DATA_NONE, 0, NULL, mbox))
1230 			goto stuffup;
1231 
1232 		goto complete;
1233 		/* NOTREACHED */
1234 
1235 	default:
1236 		if (mfi_scsi_ld(sc, ccb, xs))
1237 			goto stuffup;
1238 		break;
1239 	}
1240 
1241 	DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1242 
1243 	if (xs->flags & SCSI_POLL)
1244 		mfi_poll(sc, ccb);
1245 	else
1246 		mfi_start(sc, ccb);
1247 
1248 	KERNEL_LOCK();
1249 	return;
1250 
1251 stuffup:
1252 	xs->error = XS_DRIVER_STUFFUP;
1253 complete:
1254 	KERNEL_LOCK();
1255 	scsi_done(xs);
1256 }
1257 
1258 u_int
1259 mfi_default_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
1260 {
1261 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1262 	union mfi_sgl		*sgl = ccb->ccb_sgl;
1263 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
1264 	int			 i;
1265 
1266 	hdr->mfh_flags |= sc->sc_sgl_flags;
1267 
1268 	for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1269 		if (sc->sc_64bit_dma) {
1270 			sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1271 			sgl->sg64[i].len = htole32(sgd[i].ds_len);
1272 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1273 			    DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1274 		} else {
1275 			sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1276 			sgl->sg32[i].len = htole32(sgd[i].ds_len);
1277 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1278 			    DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1279 		}
1280 	}
1281 
1282 	return (ccb->ccb_dmamap->dm_nsegs *
1283 	    (sc->sc_64bit_dma ? sizeof(sgl->sg64) : sizeof(sgl->sg32)));
1284 }
1285 
1286 int
1287 mfi_create_sgl(struct mfi_softc *sc, struct mfi_ccb *ccb, int flags)
1288 {
1289 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1290 	int			error;
1291 
1292 	DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %#x\n", DEVNAME(sc),
1293 	    ccb->ccb_data);
1294 
1295 	if (!ccb->ccb_data) {
1296 		hdr->mfh_sg_count = 0;
1297 		return (1);
1298 	}
1299 
1300 	error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1301 	    ccb->ccb_data, ccb->ccb_len, NULL, flags);
1302 	if (error) {
1303 		if (error == EFBIG)
1304 			printf("more than %d dma segs\n",
1305 			    sc->sc_max_sgl);
1306 		else
1307 			printf("error %d loading dma map\n", error);
1308 		return (1);
1309 	}
1310 
1311 	ccb->ccb_frame_size += mfi_sgd_load(sc, ccb);
1312 
1313 	if (ccb->ccb_direction == MFI_DATA_IN) {
1314 		hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1315 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1316 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1317 	} else {
1318 		hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1319 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1320 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1321 	}
1322 
1323 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1324 	ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1325 
1326 	DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
1327 	    "  dm_nsegs: %d  extra_frames: %d\n",
1328 	    DEVNAME(sc),
1329 	    hdr->mfh_sg_count,
1330 	    ccb->ccb_frame_size,
1331 	    sc->sc_frames_size,
1332 	    ccb->ccb_dmamap->dm_nsegs,
1333 	    ccb->ccb_extra_frames);
1334 
1335 	return (0);
1336 }
1337 
1338 int
1339 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1340     void *buf, uint8_t *mbox)
1341 {
1342 	struct mfi_ccb *ccb;
1343 	int rv;
1344 
1345 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1346 	mfi_scrub_ccb(ccb);
1347 	rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox);
1348 	scsi_io_put(&sc->sc_iopool, ccb);
1349 
1350 	return (rv);
1351 }
1352 
1353 int
1354 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc,
1355     uint32_t dir, uint32_t len, void *buf, uint8_t *mbox)
1356 {
1357 	struct mfi_dcmd_frame *dcmd;
1358 	uint8_t *dma_buf = NULL;
1359 	int rv = EINVAL;
1360 
1361 	DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc);
1362 
1363 	dma_buf = dma_alloc(len, cold ? PR_NOWAIT : PR_WAITOK);
1364 	if (dma_buf == NULL)
1365 		goto done;
1366 
1367 	dcmd = &ccb->ccb_frame->mfr_dcmd;
1368 	memset(dcmd->mdf_mbox, 0, MFI_MBOX_SIZE);
1369 	dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1370 	dcmd->mdf_header.mfh_timeout = 0;
1371 
1372 	dcmd->mdf_opcode = opc;
1373 	dcmd->mdf_header.mfh_data_len = 0;
1374 	ccb->ccb_direction = dir;
1375 
1376 	ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1377 
1378 	/* handle special opcodes */
1379 	if (mbox)
1380 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1381 
1382 	if (dir != MFI_DATA_NONE) {
1383 		if (dir == MFI_DATA_OUT)
1384 			memcpy(dma_buf, buf, len);
1385 		dcmd->mdf_header.mfh_data_len = len;
1386 		ccb->ccb_data = dma_buf;
1387 		ccb->ccb_len = len;
1388 		ccb->ccb_sgl = &dcmd->mdf_sgl;
1389 
1390 		if (mfi_create_sgl(sc, ccb, cold ? BUS_DMA_NOWAIT :
1391 		    BUS_DMA_WAITOK)) {
1392 			rv = EINVAL;
1393 			goto done;
1394 		}
1395 	}
1396 
1397 	if (cold) {
1398 		ccb->ccb_done = mfi_empty_done;
1399 		mfi_poll(sc, ccb);
1400 	} else
1401 		mfi_exec(sc, ccb);
1402 
1403 	if (dcmd->mdf_header.mfh_cmd_status != MFI_STAT_OK) {
1404 		if (dcmd->mdf_header.mfh_cmd_status == MFI_STAT_WRONG_STATE)
1405 			rv = ENXIO;
1406 		else
1407 			rv = EIO;
1408 		goto done;
1409 	}
1410 
1411 	if (dir == MFI_DATA_IN)
1412 		memcpy(buf, dma_buf, len);
1413 
1414 	rv = 0;
1415 done:
1416 	if (dma_buf)
1417 		dma_free(dma_buf, len);
1418 
1419 	return (rv);
1420 }
1421 
1422 int
1423 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1424 {
1425 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1426 
1427 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc));
1428 
1429 	switch (cmd) {
1430 	case DIOCGCACHE:
1431 	case DIOCSCACHE:
1432 		return (mfi_ioctl_cache(link, cmd, (struct dk_cache *)addr));
1433 		break;
1434 
1435 	default:
1436 		if (sc->sc_ioctl)
1437 			return (sc->sc_ioctl(link->adapter_softc, cmd, addr));
1438 		break;
1439 	}
1440 
1441 	return (ENOTTY);
1442 }
1443 
1444 int
1445 mfi_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
1446 {
1447 	struct mfi_softc	*sc = (struct mfi_softc *)link->adapter_softc;
1448 	int			 rv, wrenable, rdenable;
1449 	struct mfi_ld_prop	 ldp;
1450 	uint8_t			 mbox[MFI_MBOX_SIZE];
1451 
1452 	if (mfi_get_info(sc)) {
1453 		rv = EIO;
1454 		goto done;
1455 	}
1456 
1457 	if (!sc->sc_ld[link->target].ld_present) {
1458 		rv = EIO;
1459 		goto done;
1460 	}
1461 
1462 	mbox[0] = link->target;
1463 	if ((rv = mfi_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, MFI_DATA_IN,
1464 	    sizeof(ldp), &ldp, mbox)) != 0)
1465 		goto done;
1466 
1467 	if (sc->sc_info.mci_memory_size > 0) {
1468 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
1469 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
1470 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
1471 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
1472 	} else {
1473 		wrenable = ISSET(ldp.mlp_diskcache_policy,
1474 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
1475 		rdenable = 0;
1476 	}
1477 
1478 	if (cmd == DIOCGCACHE) {
1479 		dc->wrcache = wrenable;
1480 		dc->rdcache = rdenable;
1481 		goto done;
1482 	} /* else DIOCSCACHE */
1483 
1484 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
1485 	    ((dc->rdcache) ? 1 : 0) == rdenable)
1486 		goto done;
1487 
1488 	mbox[0] = ldp.mlp_ld.mld_target;
1489 	mbox[1] = ldp.mlp_ld.mld_res;
1490 	*(uint16_t *)&mbox[2] = ldp.mlp_ld.mld_seq;
1491 
1492 	if (sc->sc_info.mci_memory_size > 0) {
1493 		if (dc->rdcache)
1494 			SET(ldp.mlp_cur_cache_policy,
1495 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1496 		else
1497 			CLR(ldp.mlp_cur_cache_policy,
1498 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1499 		if (dc->wrcache)
1500 			SET(ldp.mlp_cur_cache_policy,
1501 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1502 		else
1503 			CLR(ldp.mlp_cur_cache_policy,
1504 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1505 	} else {
1506 		if (dc->rdcache) {
1507 			rv = EOPNOTSUPP;
1508 			goto done;
1509 		}
1510 		if (dc->wrcache)
1511 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
1512 		else
1513 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
1514 	}
1515 
1516 	if ((rv = mfi_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, MFI_DATA_OUT,
1517 	    sizeof(ldp), &ldp, mbox)) != 0)
1518 		goto done;
1519 done:
1520 	return (rv);
1521 }
1522 
1523 #if NBIO > 0
1524 int
1525 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1526 {
1527 	struct mfi_softc	*sc = (struct mfi_softc *)dev;
1528 	int error = 0;
1529 
1530 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1531 
1532 	rw_enter_write(&sc->sc_lock);
1533 
1534 	switch (cmd) {
1535 	case BIOCINQ:
1536 		DNPRINTF(MFI_D_IOCTL, "inq\n");
1537 		error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1538 		break;
1539 
1540 	case BIOCVOL:
1541 		DNPRINTF(MFI_D_IOCTL, "vol\n");
1542 		error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1543 		break;
1544 
1545 	case BIOCDISK:
1546 		DNPRINTF(MFI_D_IOCTL, "disk\n");
1547 		error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1548 		break;
1549 
1550 	case BIOCALARM:
1551 		DNPRINTF(MFI_D_IOCTL, "alarm\n");
1552 		error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1553 		break;
1554 
1555 	case BIOCBLINK:
1556 		DNPRINTF(MFI_D_IOCTL, "blink\n");
1557 		error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1558 		break;
1559 
1560 	case BIOCSETSTATE:
1561 		DNPRINTF(MFI_D_IOCTL, "setstate\n");
1562 		error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1563 		break;
1564 
1565 	default:
1566 		DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1567 		error = EINVAL;
1568 	}
1569 
1570 	rw_exit_write(&sc->sc_lock);
1571 
1572 	return (error);
1573 }
1574 
1575 int
1576 mfi_bio_getitall(struct mfi_softc *sc)
1577 {
1578 	int			i, d, size, rv = EINVAL;
1579 	uint8_t			mbox[MFI_MBOX_SIZE];
1580 	struct mfi_conf		*cfg = NULL;
1581 	struct mfi_ld_details	*ld_det = NULL;
1582 
1583 	/* get info */
1584 	if (mfi_get_info(sc)) {
1585 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n",
1586 		    DEVNAME(sc));
1587 		goto done;
1588 	}
1589 
1590 	/* send single element command to retrieve size for full structure */
1591 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
1592 	if (cfg == NULL)
1593 		goto done;
1594 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg,
1595 	    NULL)) {
1596 		free(cfg, M_DEVBUF, 0);
1597 		goto done;
1598 	}
1599 
1600 	size = cfg->mfc_size;
1601 	free(cfg, M_DEVBUF, 0);
1602 
1603 	/* memory for read config */
1604 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1605 	if (cfg == NULL)
1606 		goto done;
1607 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) {
1608 		free(cfg, M_DEVBUF, 0);
1609 		goto done;
1610 	}
1611 
1612 	/* replace current pointer with new one */
1613 	if (sc->sc_cfg)
1614 		free(sc->sc_cfg, M_DEVBUF, 0);
1615 	sc->sc_cfg = cfg;
1616 
1617 	/* get all ld info */
1618 	if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1619 	    sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1620 		goto done;
1621 
1622 	/* get memory for all ld structures */
1623 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
1624 	if (sc->sc_ld_sz != size) {
1625 		if (sc->sc_ld_details)
1626 			free(sc->sc_ld_details, M_DEVBUF, 0);
1627 
1628 		ld_det = malloc( size, M_DEVBUF, M_NOWAIT | M_ZERO);
1629 		if (ld_det == NULL)
1630 			goto done;
1631 		sc->sc_ld_sz = size;
1632 		sc->sc_ld_details = ld_det;
1633 	}
1634 
1635 	/* find used physical disks */
1636 	size = sizeof(struct mfi_ld_details);
1637 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
1638 		mbox[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1639 		if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size,
1640 		    &sc->sc_ld_details[i], mbox))
1641 			goto done;
1642 
1643 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1644 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1645 	}
1646 	sc->sc_no_pd = d;
1647 
1648 	rv = 0;
1649 done:
1650 	return (rv);
1651 }
1652 
1653 int
1654 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1655 {
1656 	int			rv = EINVAL;
1657 	struct mfi_conf		*cfg = NULL;
1658 
1659 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1660 
1661 	if (mfi_bio_getitall(sc)) {
1662 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1663 		    DEVNAME(sc));
1664 		goto done;
1665 	}
1666 
1667 	/* count unused disks as volumes */
1668 	if (sc->sc_cfg == NULL)
1669 		goto done;
1670 	cfg = sc->sc_cfg;
1671 
1672 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1673 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1674 #if notyet
1675 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
1676 	    (bi->bi_nodisk - sc->sc_no_pd);
1677 #endif
1678 	/* tell bio who we are */
1679 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1680 
1681 	rv = 0;
1682 done:
1683 	return (rv);
1684 }
1685 
1686 int
1687 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1688 {
1689 	int			i, per, rv = EINVAL;
1690 	struct scsi_link	*link;
1691 	struct device		*dev;
1692 
1693 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1694 	    DEVNAME(sc), bv->bv_volid);
1695 
1696 	/* we really could skip and expect that inq took care of it */
1697 	if (mfi_bio_getitall(sc)) {
1698 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1699 		    DEVNAME(sc));
1700 		goto done;
1701 	}
1702 
1703 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1704 		/* go do hotspares & unused disks */
1705 		rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1706 		goto done;
1707 	}
1708 
1709 	i = bv->bv_volid;
1710 	link = scsi_get_link(sc->sc_scsibus, i, 0);
1711 	if (link != NULL && link->device_softc != NULL) {
1712 		dev = link->device_softc;
1713 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
1714 	}
1715 
1716 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
1717 	case MFI_LD_OFFLINE:
1718 		bv->bv_status = BIOC_SVOFFLINE;
1719 		break;
1720 
1721 	case MFI_LD_PART_DEGRADED:
1722 	case MFI_LD_DEGRADED:
1723 		bv->bv_status = BIOC_SVDEGRADED;
1724 		break;
1725 
1726 	case MFI_LD_ONLINE:
1727 		bv->bv_status = BIOC_SVONLINE;
1728 		break;
1729 
1730 	default:
1731 		bv->bv_status = BIOC_SVINVALID;
1732 		DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1733 		    DEVNAME(sc),
1734 		    sc->sc_ld_list.mll_list[i].mll_state);
1735 	}
1736 
1737 	/* additional status can modify MFI status */
1738 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
1739 	case MFI_LD_PROG_CC:
1740 	case MFI_LD_PROG_BGI:
1741 		bv->bv_status = BIOC_SVSCRUB;
1742 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
1743 		bv->bv_percent = (per * 100) / 0xffff;
1744 		bv->bv_seconds =
1745 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
1746 		break;
1747 
1748 	case MFI_LD_PROG_FGI:
1749 	case MFI_LD_PROG_RECONSTRUCT:
1750 		/* nothing yet */
1751 		break;
1752 	}
1753 
1754 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
1755 		bv->bv_cache = BIOC_CVWRITEBACK;
1756 	else
1757 		bv->bv_cache = BIOC_CVWRITETHROUGH;
1758 
1759 	/*
1760 	 * The RAID levels are determined per the SNIA DDF spec, this is only
1761 	 * a subset that is valid for the MFI controller.
1762 	 */
1763 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
1764 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid ==
1765 	    MFI_DDF_SRL_SPANNED)
1766 		bv->bv_level *= 10;
1767 
1768 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1769 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1770 
1771 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
1772 
1773 	rv = 0;
1774 done:
1775 	return (rv);
1776 }
1777 
1778 int
1779 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1780 {
1781 	struct mfi_conf		*cfg;
1782 	struct mfi_array	*ar;
1783 	struct mfi_ld_cfg	*ld;
1784 	struct mfi_pd_details	*pd;
1785 	struct scsi_inquiry_data *inqbuf;
1786 	char			vend[8+16+4+1], *vendp;
1787 	int			rv = EINVAL;
1788 	int			arr, vol, disk, span;
1789 	uint8_t			mbox[MFI_MBOX_SIZE];
1790 
1791 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1792 	    DEVNAME(sc), bd->bd_diskid);
1793 
1794 	/* we really could skip and expect that inq took care of it */
1795 	if (mfi_bio_getitall(sc)) {
1796 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1797 		    DEVNAME(sc));
1798 		return (rv);
1799 	}
1800 	cfg = sc->sc_cfg;
1801 
1802 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1803 
1804 	ar = cfg->mfc_array;
1805 	vol = bd->bd_volid;
1806 	if (vol >= cfg->mfc_no_ld) {
1807 		/* do hotspares */
1808 		rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1809 		goto freeme;
1810 	}
1811 
1812 	/* calculate offset to ld structure */
1813 	ld = (struct mfi_ld_cfg *)(
1814 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1815 	    cfg->mfc_array_size * cfg->mfc_no_array);
1816 
1817 	/* use span 0 only when raid group is not spanned */
1818 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
1819 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1820 	else
1821 		span = 0;
1822 	arr = ld[vol].mlc_span[span].mls_index;
1823 
1824 	/* offset disk into pd list */
1825 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1826 	bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1827 
1828 	/* get status */
1829 	switch (ar[arr].pd[disk].mar_pd_state){
1830 	case MFI_PD_UNCONFIG_GOOD:
1831 	case MFI_PD_FAILED:
1832 		bd->bd_status = BIOC_SDFAILED;
1833 		break;
1834 
1835 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1836 		bd->bd_status = BIOC_SDHOTSPARE;
1837 		break;
1838 
1839 	case MFI_PD_OFFLINE:
1840 		bd->bd_status = BIOC_SDOFFLINE;
1841 		break;
1842 
1843 	case MFI_PD_REBUILD:
1844 		bd->bd_status = BIOC_SDREBUILD;
1845 		break;
1846 
1847 	case MFI_PD_ONLINE:
1848 		bd->bd_status = BIOC_SDONLINE;
1849 		break;
1850 
1851 	case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1852 	default:
1853 		bd->bd_status = BIOC_SDINVALID;
1854 		break;
1855 	}
1856 
1857 	/* get the remaining fields */
1858 	*((uint16_t *)&mbox) = ar[arr].pd[disk].mar_pd.mfp_id;
1859 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1860 	    sizeof *pd, pd, mbox)) {
1861 		/* disk is missing but succeed command */
1862 		rv = 0;
1863 		goto freeme;
1864 	}
1865 
1866 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1867 
1868 	/* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1869 	bd->bd_channel = pd->mpd_enc_idx;
1870 
1871 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1872 	vendp = inqbuf->vendor;
1873 	memcpy(vend, vendp, sizeof vend - 1);
1874 	vend[sizeof vend - 1] = '\0';
1875 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1876 
1877 	/* XXX find a way to retrieve serial nr from drive */
1878 	/* XXX find a way to get bd_procdev */
1879 
1880 	rv = 0;
1881 freeme:
1882 	free(pd, M_DEVBUF, 0);
1883 
1884 	return (rv);
1885 }
1886 
1887 int
1888 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1889 {
1890 	uint32_t		opc, dir = MFI_DATA_NONE;
1891 	int			rv = 0;
1892 	int8_t			ret;
1893 
1894 	switch(ba->ba_opcode) {
1895 	case BIOC_SADISABLE:
1896 		opc = MR_DCMD_SPEAKER_DISABLE;
1897 		break;
1898 
1899 	case BIOC_SAENABLE:
1900 		opc = MR_DCMD_SPEAKER_ENABLE;
1901 		break;
1902 
1903 	case BIOC_SASILENCE:
1904 		opc = MR_DCMD_SPEAKER_SILENCE;
1905 		break;
1906 
1907 	case BIOC_GASTATUS:
1908 		opc = MR_DCMD_SPEAKER_GET;
1909 		dir = MFI_DATA_IN;
1910 		break;
1911 
1912 	case BIOC_SATEST:
1913 		opc = MR_DCMD_SPEAKER_TEST;
1914 		break;
1915 
1916 	default:
1917 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1918 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1919 		return (EINVAL);
1920 	}
1921 
1922 	if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1923 		rv = EINVAL;
1924 	else
1925 		if (ba->ba_opcode == BIOC_GASTATUS)
1926 			ba->ba_status = ret;
1927 		else
1928 			ba->ba_status = 0;
1929 
1930 	return (rv);
1931 }
1932 
1933 int
1934 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1935 {
1936 	int			i, found, rv = EINVAL;
1937 	uint8_t			mbox[MFI_MBOX_SIZE];
1938 	uint32_t		cmd;
1939 	struct mfi_pd_list	*pd;
1940 
1941 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1942 	    bb->bb_status);
1943 
1944 	/* channel 0 means not in an enclosure so can't be blinked */
1945 	if (bb->bb_channel == 0)
1946 		return (EINVAL);
1947 
1948 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
1949 
1950 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1951 	    sizeof(*pd), pd, NULL))
1952 		goto done;
1953 
1954 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1955 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1956 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1957 			found = 1;
1958 			break;
1959 		}
1960 
1961 	if (!found)
1962 		goto done;
1963 
1964 	memset(mbox, 0, sizeof mbox);
1965 
1966 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
1967 
1968 	switch (bb->bb_status) {
1969 	case BIOC_SBUNBLINK:
1970 		cmd = MR_DCMD_PD_UNBLINK;
1971 		break;
1972 
1973 	case BIOC_SBBLINK:
1974 		cmd = MR_DCMD_PD_BLINK;
1975 		break;
1976 
1977 	case BIOC_SBALARM:
1978 	default:
1979 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1980 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
1981 		goto done;
1982 	}
1983 
1984 
1985 	if (mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, mbox))
1986 		goto done;
1987 
1988 	rv = 0;
1989 done:
1990 	free(pd, M_DEVBUF, 0);
1991 	return (rv);
1992 }
1993 
1994 int
1995 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
1996 {
1997 	struct mfi_pd_list	*pd;
1998 	struct mfi_pd_details	*info;
1999 	int			i, found, rv = EINVAL;
2000 	uint8_t			mbox[MFI_MBOX_SIZE];
2001 
2002 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2003 	    bs->bs_status);
2004 
2005 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
2006 	info = malloc(sizeof *info, M_DEVBUF, M_WAITOK);
2007 
2008 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2009 	    sizeof(*pd), pd, NULL))
2010 		goto done;
2011 
2012 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2013 		if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2014 		    bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2015 			found = 1;
2016 			break;
2017 		}
2018 
2019 	if (!found)
2020 		goto done;
2021 
2022 	memset(mbox, 0, sizeof mbox);
2023 
2024 	*((uint16_t *)&mbox) = pd->mpl_address[i].mpa_pd_id;
2025 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2026 	    sizeof *info, info, mbox))
2027 		goto done;
2028 
2029 	*((uint16_t *)&mbox[0]) = pd->mpl_address[i].mpa_pd_id;
2030 	*((uint16_t *)&mbox[2]) = info->mpd_pd.mfp_seq;
2031 
2032 	switch (bs->bs_status) {
2033 	case BIOC_SSONLINE:
2034 		mbox[4] = MFI_PD_ONLINE;
2035 		break;
2036 
2037 	case BIOC_SSOFFLINE:
2038 		mbox[4] = MFI_PD_OFFLINE;
2039 		break;
2040 
2041 	case BIOC_SSHOTSPARE:
2042 		mbox[4] = MFI_PD_HOTSPARE;
2043 		break;
2044 
2045 	case BIOC_SSREBUILD:
2046 		mbox[4] = MFI_PD_REBUILD;
2047 		break;
2048 
2049 	default:
2050 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2051 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
2052 		goto done;
2053 	}
2054 
2055 
2056 	if ((rv = mfi_mgmt(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL,
2057 	    mbox)))
2058 		goto done;
2059 
2060 	rv = 0;
2061 done:
2062 	free(pd, M_DEVBUF, 0);
2063 	free(info, M_DEVBUF, 0);
2064 	return (rv);
2065 }
2066 
2067 int
2068 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2069 {
2070 	struct mfi_conf		*cfg;
2071 	struct mfi_hotspare	*hs;
2072 	struct mfi_pd_details	*pd;
2073 	struct bioc_disk	*sdhs;
2074 	struct bioc_vol		*vdhs;
2075 	struct scsi_inquiry_data *inqbuf;
2076 	char			vend[8+16+4+1], *vendp;
2077 	int			i, rv = EINVAL;
2078 	uint32_t		size;
2079 	uint8_t			mbox[MFI_MBOX_SIZE];
2080 
2081 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2082 
2083 	if (!bio_hs)
2084 		return (EINVAL);
2085 
2086 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
2087 
2088 	/* send single element command to retrieve size for full structure */
2089 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2090 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
2091 		goto freeme;
2092 
2093 	size = cfg->mfc_size;
2094 	free(cfg, M_DEVBUF, 0);
2095 
2096 	/* memory for read config */
2097 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2098 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
2099 		goto freeme;
2100 
2101 	/* calculate offset to hs structure */
2102 	hs = (struct mfi_hotspare *)(
2103 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2104 	    cfg->mfc_array_size * cfg->mfc_no_array +
2105 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
2106 
2107 	if (volid < cfg->mfc_no_ld)
2108 		goto freeme; /* not a hotspare */
2109 
2110 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2111 		goto freeme; /* not a hotspare */
2112 
2113 	/* offset into hotspare structure */
2114 	i = volid - cfg->mfc_no_ld;
2115 
2116 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2117 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2118 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2119 
2120 	/* get pd fields */
2121 	memset(mbox, 0, sizeof mbox);
2122 	*((uint16_t *)&mbox) = hs[i].mhs_pd.mfp_id;
2123 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2124 	    sizeof *pd, pd, mbox)) {
2125 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2126 		    DEVNAME(sc));
2127 		goto freeme;
2128 	}
2129 
2130 	switch (type) {
2131 	case MFI_MGMT_VD:
2132 		vdhs = bio_hs;
2133 		vdhs->bv_status = BIOC_SVONLINE;
2134 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2135 		vdhs->bv_level = -1; /* hotspare */
2136 		vdhs->bv_nodisk = 1;
2137 		break;
2138 
2139 	case MFI_MGMT_SD:
2140 		sdhs = bio_hs;
2141 		sdhs->bd_status = BIOC_SDHOTSPARE;
2142 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2143 		sdhs->bd_channel = pd->mpd_enc_idx;
2144 		sdhs->bd_target = pd->mpd_enc_slot;
2145 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
2146 		vendp = inqbuf->vendor;
2147 		memcpy(vend, vendp, sizeof vend - 1);
2148 		vend[sizeof vend - 1] = '\0';
2149 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2150 		break;
2151 
2152 	default:
2153 		goto freeme;
2154 	}
2155 
2156 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2157 	rv = 0;
2158 freeme:
2159 	free(pd, M_DEVBUF, 0);
2160 	free(cfg, M_DEVBUF, 0);
2161 
2162 	return (rv);
2163 }
2164 
2165 #ifndef SMALL_KERNEL
2166 
2167 static const char *mfi_bbu_indicators[] = {
2168 	"pack missing",
2169 	"voltage low",
2170 	"temp high",
2171 	"charge active",
2172 	"discharge active",
2173 	"learn cycle req'd",
2174 	"learn cycle active",
2175 	"learn cycle failed",
2176 	"learn cycle timeout",
2177 	"I2C errors",
2178 	"replace pack",
2179 	"low capacity",
2180 	"periodic learn req'd"
2181 };
2182 
2183 #define MFI_BBU_SENSORS 4
2184 
2185 int
2186 mfi_bbu(struct mfi_softc *sc)
2187 {
2188 	struct mfi_bbu_status bbu;
2189 	u_int32_t status;
2190 	u_int32_t mask;
2191 	u_int32_t soh_bad;
2192 	int i;
2193 
2194 	if (mfi_mgmt(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
2195 	    sizeof(bbu), &bbu, NULL) != 0) {
2196 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
2197 			sc->sc_bbu[i].value = 0;
2198 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2199 		}
2200 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2201 			sc->sc_bbu_status[i].value = 0;
2202 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2203 		}
2204 		return (-1);
2205 	}
2206 
2207 	switch (bbu.battery_type) {
2208 	case MFI_BBU_TYPE_IBBU:
2209 		mask = MFI_BBU_STATE_BAD_IBBU;
2210 		soh_bad = 0;
2211 		break;
2212 	case MFI_BBU_TYPE_BBU:
2213 		mask = MFI_BBU_STATE_BAD_BBU;
2214 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
2215 		break;
2216 
2217 	case MFI_BBU_TYPE_NONE:
2218 	default:
2219 		sc->sc_bbu[0].value = 0;
2220 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
2221 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2222 			sc->sc_bbu[i].value = 0;
2223 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2224 		}
2225 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2226 			sc->sc_bbu_status[i].value = 0;
2227 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2228 		}
2229 		return (0);
2230 	}
2231 
2232 	status = letoh32(bbu.fw_status);
2233 
2234 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
2235 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
2236 	    SENSOR_S_OK;
2237 
2238 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
2239 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
2240 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
2241 	for (i = 1; i < MFI_BBU_SENSORS; i++)
2242 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
2243 
2244 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2245 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
2246 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2247 	}
2248 
2249 	return (0);
2250 }
2251 
2252 int
2253 mfi_create_sensors(struct mfi_softc *sc)
2254 {
2255 	struct device		*dev;
2256 	struct scsi_link	*link;
2257 	int			i;
2258 
2259 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
2260 	    sizeof(sc->sc_sensordev.xname));
2261 
2262 	if (ISSET(letoh32(sc->sc_info.mci_adapter_ops ), MFI_INFO_AOPS_BBU)) {
2263 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
2264 		    M_DEVBUF, M_WAITOK | M_ZERO);
2265 
2266 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
2267 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
2268 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
2269 		    sizeof(sc->sc_bbu[0].desc));
2270 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
2271 
2272 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
2273 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
2274 		sc->sc_bbu[2].type = SENSOR_AMPS;
2275 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
2276 		sc->sc_bbu[3].type = SENSOR_TEMP;
2277 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
2278 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2279 			strlcpy(sc->sc_bbu[i].desc, "bbu",
2280 			    sizeof(sc->sc_bbu[i].desc));
2281 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
2282 		}
2283 
2284 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
2285 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
2286 
2287 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2288 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
2289 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2290 			strlcpy(sc->sc_bbu_status[i].desc,
2291 			    mfi_bbu_indicators[i],
2292 			    sizeof(sc->sc_bbu_status[i].desc));
2293 
2294 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
2295 		}
2296 	}
2297 
2298 	sc->sc_sensors = mallocarray(sc->sc_ld_cnt, sizeof(struct ksensor),
2299 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2300 	if (sc->sc_sensors == NULL)
2301 		return (1);
2302 
2303 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2304 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2305 		if (link == NULL)
2306 			goto bad;
2307 
2308 		dev = link->device_softc;
2309 
2310 		sc->sc_sensors[i].type = SENSOR_DRIVE;
2311 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2312 
2313 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
2314 		    sizeof(sc->sc_sensors[i].desc));
2315 
2316 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
2317 	}
2318 
2319 	if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL)
2320 		goto bad;
2321 
2322 	sensordev_install(&sc->sc_sensordev);
2323 
2324 	return (0);
2325 
2326 bad:
2327 	free(sc->sc_sensors, M_DEVBUF, 0);
2328 
2329 	return (1);
2330 }
2331 
2332 void
2333 mfi_refresh_sensors(void *arg)
2334 {
2335 	struct mfi_softc	*sc = arg;
2336 	int			i, rv;
2337 	struct bioc_vol		bv;
2338 
2339 	if (sc->sc_bbu != NULL && mfi_bbu(sc) != 0)
2340 		return;
2341 
2342 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2343 		bzero(&bv, sizeof(bv));
2344 		bv.bv_volid = i;
2345 
2346 		rw_enter_write(&sc->sc_lock);
2347 		rv = mfi_ioctl_vol(sc, &bv);
2348 		rw_exit_write(&sc->sc_lock);
2349 
2350 		if (rv != 0)
2351 			return;
2352 
2353 		switch(bv.bv_status) {
2354 		case BIOC_SVOFFLINE:
2355 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
2356 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
2357 			break;
2358 
2359 		case BIOC_SVDEGRADED:
2360 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
2361 			sc->sc_sensors[i].status = SENSOR_S_WARN;
2362 			break;
2363 
2364 		case BIOC_SVSCRUB:
2365 		case BIOC_SVONLINE:
2366 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
2367 			sc->sc_sensors[i].status = SENSOR_S_OK;
2368 			break;
2369 
2370 		case BIOC_SVINVALID:
2371 			/* FALLTRHOUGH */
2372 		default:
2373 			sc->sc_sensors[i].value = 0; /* unknown */
2374 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2375 			break;
2376 		}
2377 	}
2378 }
2379 #endif /* SMALL_KERNEL */
2380 #endif /* NBIO > 0 */
2381 
2382 void
2383 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb)
2384 {
2385 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2386 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2387 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2388 
2389 	mfi_post(sc, ccb);
2390 }
2391 
2392 void
2393 mfi_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
2394 {
2395 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2396 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2397 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2398 
2399 	if (ccb->ccb_len > 0) {
2400 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
2401 		    0, ccb->ccb_dmamap->dm_mapsize,
2402 		    (ccb->ccb_direction == MFI_DATA_IN) ?
2403 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2404 
2405 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
2406 	}
2407 
2408 	ccb->ccb_done(sc, ccb);
2409 }
2410 
2411 u_int32_t
2412 mfi_xscale_fw_state(struct mfi_softc *sc)
2413 {
2414 	return (mfi_read(sc, MFI_OMSG0));
2415 }
2416 
2417 void
2418 mfi_xscale_intr_ena(struct mfi_softc *sc)
2419 {
2420 	mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2421 }
2422 
2423 int
2424 mfi_xscale_intr(struct mfi_softc *sc)
2425 {
2426 	u_int32_t status;
2427 
2428 	status = mfi_read(sc, MFI_OSTS);
2429 	if (!ISSET(status, MFI_OSTS_INTR_VALID))
2430 		return (0);
2431 
2432 	/* write status back to acknowledge interrupt */
2433 	mfi_write(sc, MFI_OSTS, status);
2434 
2435 	return (1);
2436 }
2437 
2438 void
2439 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2440 {
2441 	mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2442 	    ccb->ccb_extra_frames);
2443 }
2444 
2445 u_int32_t
2446 mfi_ppc_fw_state(struct mfi_softc *sc)
2447 {
2448 	return (mfi_read(sc, MFI_OSP));
2449 }
2450 
2451 void
2452 mfi_ppc_intr_ena(struct mfi_softc *sc)
2453 {
2454 	mfi_write(sc, MFI_ODC, 0xffffffff);
2455 	mfi_write(sc, MFI_OMSK, ~0x80000004);
2456 }
2457 
2458 int
2459 mfi_ppc_intr(struct mfi_softc *sc)
2460 {
2461 	u_int32_t status;
2462 
2463 	status = mfi_read(sc, MFI_OSTS);
2464 	if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2465 		return (0);
2466 
2467 	/* write status back to acknowledge interrupt */
2468 	mfi_write(sc, MFI_ODC, status);
2469 
2470 	return (1);
2471 }
2472 
2473 void
2474 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2475 {
2476 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2477 	    (ccb->ccb_extra_frames << 1));
2478 }
2479 
2480 u_int32_t
2481 mfi_gen2_fw_state(struct mfi_softc *sc)
2482 {
2483 	return (mfi_read(sc, MFI_OSP));
2484 }
2485 
2486 void
2487 mfi_gen2_intr_ena(struct mfi_softc *sc)
2488 {
2489 	mfi_write(sc, MFI_ODC, 0xffffffff);
2490 	mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2491 }
2492 
2493 int
2494 mfi_gen2_intr(struct mfi_softc *sc)
2495 {
2496 	u_int32_t status;
2497 
2498 	status = mfi_read(sc, MFI_OSTS);
2499 	if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2500 		return (0);
2501 
2502 	/* write status back to acknowledge interrupt */
2503 	mfi_write(sc, MFI_ODC, status);
2504 
2505 	return (1);
2506 }
2507 
2508 void
2509 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2510 {
2511 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2512 	    (ccb->ccb_extra_frames << 1));
2513 }
2514 
2515 u_int32_t
2516 mfi_skinny_fw_state(struct mfi_softc *sc)
2517 {
2518 	return (mfi_read(sc, MFI_OSP));
2519 }
2520 
2521 void
2522 mfi_skinny_intr_ena(struct mfi_softc *sc)
2523 {
2524 	mfi_write(sc, MFI_OMSK, ~0x00000001);
2525 }
2526 
2527 int
2528 mfi_skinny_intr(struct mfi_softc *sc)
2529 {
2530 	u_int32_t status;
2531 
2532 	status = mfi_read(sc, MFI_OSTS);
2533 	if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2534 		return (0);
2535 
2536 	/* write status back to acknowledge interrupt */
2537 	mfi_write(sc, MFI_OSTS, status);
2538 
2539 	return (1);
2540 }
2541 
2542 void
2543 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2544 {
2545 	mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2546 	    (ccb->ccb_extra_frames << 1));
2547 	mfi_write(sc, MFI_IQPH, 0x00000000);
2548 }
2549 
2550 u_int
2551 mfi_skinny_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
2552 {
2553 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
2554 	union mfi_sgl		*sgl = ccb->ccb_sgl;
2555 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
2556 	int			 i;
2557 
2558 	switch (hdr->mfh_cmd) {
2559 	case MFI_CMD_LD_READ:
2560 	case MFI_CMD_LD_WRITE:
2561 	case MFI_CMD_PD_SCSI_IO:
2562 		/* Use MF_FRAME_IEEE for some IO commands on skinny adapters */
2563 		for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
2564 			sgl->sg_skinny[i].addr = htole64(sgd[i].ds_addr);
2565 			sgl->sg_skinny[i].len = htole32(sgd[i].ds_len);
2566 			sgl->sg_skinny[i].flag = 0;
2567 		}
2568 		hdr->mfh_flags |= MFI_FRAME_IEEE | MFI_FRAME_SGL64;
2569 
2570 		return (ccb->ccb_dmamap->dm_nsegs * sizeof(sgl->sg_skinny));
2571 	default:
2572 		return (mfi_default_sgd_load(sc, ccb));
2573 	}
2574 }
2575 
2576 int
2577 mfi_pd_scsi_probe(struct scsi_link *link)
2578 {
2579 	uint8_t mbox[MFI_MBOX_SIZE];
2580 	struct mfi_softc *sc = link->adapter_softc;
2581 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2582 
2583 	if (link->lun > 0)
2584 		return (0);
2585 
2586 	if (pl == NULL)
2587 		return (ENXIO);
2588 
2589 	bzero(mbox, sizeof(mbox));
2590 	memcpy(&mbox[0], &pl->pd_id, sizeof(pl->pd_id));
2591 
2592 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2593 	    sizeof(pl->pd_info), &pl->pd_info, mbox))
2594 		return (EIO);
2595 
2596 	if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
2597 		return (ENXIO);
2598 
2599 	return (0);
2600 }
2601 
2602 void
2603 mfi_pd_scsi_cmd(struct scsi_xfer *xs)
2604 {
2605 	struct scsi_link *link = xs->sc_link;
2606 	struct mfi_softc *sc = link->adapter_softc;
2607 	struct mfi_ccb *ccb = xs->io;
2608 	struct mfi_pass_frame *pf = &ccb->ccb_frame->mfr_pass;
2609 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2610 
2611 	KERNEL_UNLOCK();
2612 
2613 	mfi_scrub_ccb(ccb);
2614 	xs->error = XS_NOERROR;
2615 
2616 	pf->mpf_header.mfh_cmd = MFI_CMD_PD_SCSI_IO;
2617 	pf->mpf_header.mfh_target_id = pl->pd_id;
2618 	pf->mpf_header.mfh_lun_id = link->lun;
2619 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
2620 	pf->mpf_header.mfh_timeout = 0;
2621 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
2622 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
2623 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
2624 
2625 	memset(pf->mpf_cdb, 0, sizeof(pf->mpf_cdb));
2626 	memcpy(pf->mpf_cdb, xs->cmd, xs->cmdlen);
2627 
2628 	ccb->ccb_done = mfi_scsi_xs_done;
2629 	ccb->ccb_cookie = xs;
2630 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
2631 	ccb->ccb_sgl = &pf->mpf_sgl;
2632 
2633 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
2634 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
2635 		    MFI_DATA_IN : MFI_DATA_OUT;
2636 	else
2637 		ccb->ccb_direction = MFI_DATA_NONE;
2638 
2639 	if (xs->data) {
2640 		ccb->ccb_data = xs->data;
2641 		ccb->ccb_len = xs->datalen;
2642 
2643 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
2644 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
2645 			goto stuffup;
2646 	}
2647 
2648 	if (xs->flags & SCSI_POLL)
2649 		mfi_poll(sc, ccb);
2650 	else
2651 		mfi_start(sc, ccb);
2652 
2653 	KERNEL_LOCK();
2654 	return;
2655 
2656 stuffup:
2657 	xs->error = XS_DRIVER_STUFFUP;
2658 	KERNEL_LOCK();
2659 	scsi_done(xs);
2660 }
2661