xref: /openbsd/sys/dev/ic/mfi.c (revision beb6838f)
1 /* $OpenBSD: mfi.c,v 1.191 2023/11/28 09:29:20 jsg Exp $ */
2 /*
3  * Copyright (c) 2006 Marco Peereboom <marco@peereboom.us>
4  *
5  * Permission to use, copy, modify, and distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "bio.h"
19 
20 #include <sys/param.h>
21 #include <sys/systm.h>
22 #include <sys/buf.h>
23 #include <sys/device.h>
24 #include <sys/kernel.h>
25 #include <sys/malloc.h>
26 #include <sys/rwlock.h>
27 #include <sys/sensors.h>
28 #include <sys/dkio.h>
29 #include <sys/pool.h>
30 
31 #include <machine/bus.h>
32 
33 #include <scsi/scsi_all.h>
34 #include <scsi/scsi_disk.h>
35 #include <scsi/scsiconf.h>
36 
37 #include <dev/biovar.h>
38 #include <dev/ic/mfireg.h>
39 #include <dev/ic/mfivar.h>
40 
41 #ifdef MFI_DEBUG
42 uint32_t	mfi_debug = 0
43 /*		    | MFI_D_CMD */
44 /*		    | MFI_D_INTR */
45 /*		    | MFI_D_MISC */
46 /*		    | MFI_D_DMA */
47 /*		    | MFI_D_IOCTL */
48 /*		    | MFI_D_RW */
49 /*		    | MFI_D_MEM */
50 /*		    | MFI_D_CCB */
51 		;
52 #endif
53 
54 struct cfdriver mfi_cd = {
55 	NULL, "mfi", DV_DULL
56 };
57 
58 void	mfi_scsi_cmd(struct scsi_xfer *);
59 int	mfi_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
60 int	mfi_ioctl_cache(struct scsi_link *, u_long,  struct dk_cache *);
61 
62 void	mfi_pd_scsi_cmd(struct scsi_xfer *);
63 int	mfi_pd_scsi_probe(struct scsi_link *);
64 
65 const struct scsi_adapter mfi_switch = {
66 	mfi_scsi_cmd, NULL, NULL, NULL, mfi_scsi_ioctl
67 };
68 
69 const struct scsi_adapter mfi_pd_switch = {
70 	mfi_pd_scsi_cmd, NULL, mfi_pd_scsi_probe, NULL, mfi_scsi_ioctl
71 };
72 
73 void *		mfi_get_ccb(void *);
74 void		mfi_put_ccb(void *, void *);
75 void		mfi_scrub_ccb(struct mfi_ccb *);
76 int		mfi_init_ccb(struct mfi_softc *);
77 
78 struct mfi_mem	*mfi_allocmem(struct mfi_softc *, size_t);
79 void		mfi_freemem(struct mfi_softc *, struct mfi_mem *);
80 
81 int		mfi_transition_firmware(struct mfi_softc *);
82 int		mfi_initialize_firmware(struct mfi_softc *);
83 int		mfi_get_info(struct mfi_softc *);
84 uint32_t	mfi_read(struct mfi_softc *, bus_size_t);
85 void		mfi_write(struct mfi_softc *, bus_size_t, uint32_t);
86 void		mfi_poll(struct mfi_softc *, struct mfi_ccb *);
87 void		mfi_exec(struct mfi_softc *, struct mfi_ccb *);
88 void		mfi_exec_done(struct mfi_softc *, struct mfi_ccb *);
89 int		mfi_create_sgl(struct mfi_softc *, struct mfi_ccb *, int);
90 u_int		mfi_default_sgd_load(struct mfi_softc *, struct mfi_ccb *);
91 int		mfi_syspd(struct mfi_softc *);
92 
93 /* commands */
94 int		mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *,
95 		    struct scsi_xfer *);
96 int		mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *,
97 		    struct scsi_xfer *, uint64_t, uint32_t);
98 void		mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *);
99 int		mfi_mgmt(struct mfi_softc *, uint32_t, uint32_t, uint32_t,
100 		    void *, const union mfi_mbox *);
101 int		mfi_do_mgmt(struct mfi_softc *, struct mfi_ccb * , uint32_t,
102 		    uint32_t, uint32_t, void *, const union mfi_mbox *);
103 void		mfi_empty_done(struct mfi_softc *, struct mfi_ccb *);
104 
105 #if NBIO > 0
106 int		mfi_ioctl(struct device *, u_long, caddr_t);
107 int		mfi_bio_getitall(struct mfi_softc *);
108 int		mfi_ioctl_inq(struct mfi_softc *, struct bioc_inq *);
109 int		mfi_ioctl_vol(struct mfi_softc *, struct bioc_vol *);
110 int		mfi_ioctl_disk(struct mfi_softc *, struct bioc_disk *);
111 int		mfi_ioctl_alarm(struct mfi_softc *, struct bioc_alarm *);
112 int		mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *);
113 int		mfi_ioctl_setstate(struct mfi_softc *, struct bioc_setstate *);
114 int		mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *);
115 int		mfi_bio_hs(struct mfi_softc *, int, int, void *);
116 #ifndef SMALL_KERNEL
117 int		mfi_create_sensors(struct mfi_softc *);
118 void		mfi_refresh_sensors(void *);
119 int		mfi_bbu(struct mfi_softc *);
120 #endif /* SMALL_KERNEL */
121 #endif /* NBIO > 0 */
122 
123 void		mfi_start(struct mfi_softc *, struct mfi_ccb *);
124 void		mfi_done(struct mfi_softc *, struct mfi_ccb *);
125 u_int32_t	mfi_xscale_fw_state(struct mfi_softc *);
126 void		mfi_xscale_intr_ena(struct mfi_softc *);
127 int		mfi_xscale_intr(struct mfi_softc *);
128 void		mfi_xscale_post(struct mfi_softc *, struct mfi_ccb *);
129 
130 static const struct mfi_iop_ops mfi_iop_xscale = {
131 	mfi_xscale_fw_state,
132 	mfi_xscale_intr_ena,
133 	mfi_xscale_intr,
134 	mfi_xscale_post,
135 	mfi_default_sgd_load,
136 	0,
137 };
138 
139 u_int32_t	mfi_ppc_fw_state(struct mfi_softc *);
140 void		mfi_ppc_intr_ena(struct mfi_softc *);
141 int		mfi_ppc_intr(struct mfi_softc *);
142 void		mfi_ppc_post(struct mfi_softc *, struct mfi_ccb *);
143 
144 static const struct mfi_iop_ops mfi_iop_ppc = {
145 	mfi_ppc_fw_state,
146 	mfi_ppc_intr_ena,
147 	mfi_ppc_intr,
148 	mfi_ppc_post,
149 	mfi_default_sgd_load,
150 	MFI_IDB,
151 	0
152 };
153 
154 u_int32_t	mfi_gen2_fw_state(struct mfi_softc *);
155 void		mfi_gen2_intr_ena(struct mfi_softc *);
156 int		mfi_gen2_intr(struct mfi_softc *);
157 void		mfi_gen2_post(struct mfi_softc *, struct mfi_ccb *);
158 
159 static const struct mfi_iop_ops mfi_iop_gen2 = {
160 	mfi_gen2_fw_state,
161 	mfi_gen2_intr_ena,
162 	mfi_gen2_intr,
163 	mfi_gen2_post,
164 	mfi_default_sgd_load,
165 	MFI_IDB,
166 	0
167 };
168 
169 u_int32_t	mfi_skinny_fw_state(struct mfi_softc *);
170 void		mfi_skinny_intr_ena(struct mfi_softc *);
171 int		mfi_skinny_intr(struct mfi_softc *);
172 void		mfi_skinny_post(struct mfi_softc *, struct mfi_ccb *);
173 u_int		mfi_skinny_sgd_load(struct mfi_softc *, struct mfi_ccb *);
174 
175 static const struct mfi_iop_ops mfi_iop_skinny = {
176 	mfi_skinny_fw_state,
177 	mfi_skinny_intr_ena,
178 	mfi_skinny_intr,
179 	mfi_skinny_post,
180 	mfi_skinny_sgd_load,
181 	MFI_SKINNY_IDB,
182 	MFI_IOP_F_SYSPD
183 };
184 
185 #define mfi_fw_state(_s)	((_s)->sc_iop->mio_fw_state(_s))
186 #define mfi_intr_enable(_s)	((_s)->sc_iop->mio_intr_ena(_s))
187 #define mfi_my_intr(_s)		((_s)->sc_iop->mio_intr(_s))
188 #define mfi_post(_s, _c)	((_s)->sc_iop->mio_post((_s), (_c)))
189 #define mfi_sgd_load(_s, _c)	((_s)->sc_iop->mio_sgd_load((_s), (_c)))
190 
191 void *
mfi_get_ccb(void * cookie)192 mfi_get_ccb(void *cookie)
193 {
194 	struct mfi_softc	*sc = cookie;
195 	struct mfi_ccb		*ccb;
196 
197 	KERNEL_UNLOCK();
198 
199 	mtx_enter(&sc->sc_ccb_mtx);
200 	ccb = SLIST_FIRST(&sc->sc_ccb_freeq);
201 	if (ccb != NULL) {
202 		SLIST_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
203 		ccb->ccb_state = MFI_CCB_READY;
204 	}
205 	mtx_leave(&sc->sc_ccb_mtx);
206 
207 	DNPRINTF(MFI_D_CCB, "%s: mfi_get_ccb: %p\n", DEVNAME(sc), ccb);
208 	KERNEL_LOCK();
209 
210 	return (ccb);
211 }
212 
213 void
mfi_put_ccb(void * cookie,void * io)214 mfi_put_ccb(void *cookie, void *io)
215 {
216 	struct mfi_softc	*sc = cookie;
217 	struct mfi_ccb		*ccb = io;
218 
219 	DNPRINTF(MFI_D_CCB, "%s: mfi_put_ccb: %p\n", DEVNAME(sc), ccb);
220 
221 	KERNEL_UNLOCK();
222 	mtx_enter(&sc->sc_ccb_mtx);
223 	SLIST_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
224 	mtx_leave(&sc->sc_ccb_mtx);
225 	KERNEL_LOCK();
226 }
227 
228 void
mfi_scrub_ccb(struct mfi_ccb * ccb)229 mfi_scrub_ccb(struct mfi_ccb *ccb)
230 {
231 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
232 
233 	hdr->mfh_cmd_status = 0x0;
234 	hdr->mfh_flags = 0x0;
235 	ccb->ccb_state = MFI_CCB_FREE;
236 	ccb->ccb_cookie = NULL;
237 	ccb->ccb_flags = 0;
238 	ccb->ccb_done = NULL;
239 	ccb->ccb_direction = 0;
240 	ccb->ccb_frame_size = 0;
241 	ccb->ccb_extra_frames = 0;
242 	ccb->ccb_sgl = NULL;
243 	ccb->ccb_data = NULL;
244 	ccb->ccb_len = 0;
245 }
246 
247 int
mfi_init_ccb(struct mfi_softc * sc)248 mfi_init_ccb(struct mfi_softc *sc)
249 {
250 	struct mfi_ccb		*ccb;
251 	uint32_t		i;
252 	int			error;
253 
254 	DNPRINTF(MFI_D_CCB, "%s: mfi_init_ccb\n", DEVNAME(sc));
255 
256 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfi_ccb),
257 	    M_DEVBUF, M_WAITOK|M_ZERO);
258 
259 	for (i = 0; i < sc->sc_max_cmds; i++) {
260 		ccb = &sc->sc_ccb[i];
261 
262 		/* select i'th frame */
263 		ccb->ccb_frame = (union mfi_frame *)
264 		    (MFIMEM_KVA(sc->sc_frames) + sc->sc_frames_size * i);
265 		ccb->ccb_pframe =
266 		    MFIMEM_DVA(sc->sc_frames) + sc->sc_frames_size * i;
267 		ccb->ccb_pframe_offset = sc->sc_frames_size * i;
268 		ccb->ccb_frame->mfr_header.mfh_context = i;
269 
270 		/* select i'th sense */
271 		ccb->ccb_sense = (struct mfi_sense *)
272 		    (MFIMEM_KVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
273 		ccb->ccb_psense =
274 		    (MFIMEM_DVA(sc->sc_sense) + MFI_SENSE_SIZE * i);
275 
276 		/* create a dma map for transfer */
277 		error = bus_dmamap_create(sc->sc_dmat,
278 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
279 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
280 		if (error) {
281 			printf("%s: cannot create ccb dmamap (%d)\n",
282 			    DEVNAME(sc), error);
283 			goto destroy;
284 		}
285 
286 		DNPRINTF(MFI_D_CCB,
287 		    "ccb(%d): %p frame: %p (%#lx) sense: %p (%#lx) map: %p\n",
288 		    ccb->ccb_frame->mfr_header.mfh_context, ccb,
289 		    ccb->ccb_frame, ccb->ccb_pframe,
290 		    ccb->ccb_sense, ccb->ccb_psense,
291 		    ccb->ccb_dmamap);
292 
293 		/* add ccb to queue */
294 		mfi_put_ccb(sc, ccb);
295 	}
296 
297 	return (0);
298 destroy:
299 	/* free dma maps and ccb memory */
300 	while ((ccb = mfi_get_ccb(sc)) != NULL)
301 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
302 
303 	free(sc->sc_ccb, M_DEVBUF, 0);
304 
305 	return (1);
306 }
307 
308 uint32_t
mfi_read(struct mfi_softc * sc,bus_size_t r)309 mfi_read(struct mfi_softc *sc, bus_size_t r)
310 {
311 	uint32_t rv;
312 
313 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
314 	    BUS_SPACE_BARRIER_READ);
315 	rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r);
316 
317 	DNPRINTF(MFI_D_RW, "%s: mr 0x%lx 0x08%x ", DEVNAME(sc), r, rv);
318 	return (rv);
319 }
320 
321 void
mfi_write(struct mfi_softc * sc,bus_size_t r,uint32_t v)322 mfi_write(struct mfi_softc *sc, bus_size_t r, uint32_t v)
323 {
324 	DNPRINTF(MFI_D_RW, "%s: mw 0x%lx 0x%08x", DEVNAME(sc), r, v);
325 
326 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
327 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
328 	    BUS_SPACE_BARRIER_WRITE);
329 }
330 
331 struct mfi_mem *
mfi_allocmem(struct mfi_softc * sc,size_t size)332 mfi_allocmem(struct mfi_softc *sc, size_t size)
333 {
334 	struct mfi_mem		*mm;
335 	int			nsegs;
336 
337 	DNPRINTF(MFI_D_MEM, "%s: mfi_allocmem: %zu\n", DEVNAME(sc),
338 	    size);
339 
340 	mm = malloc(sizeof(struct mfi_mem), M_DEVBUF, M_NOWAIT|M_ZERO);
341 	if (mm == NULL)
342 		return (NULL);
343 
344 	mm->am_size = size;
345 
346 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
347 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mm->am_map) != 0)
348 		goto amfree;
349 
350 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mm->am_seg, 1,
351 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
352 		goto destroy;
353 
354 	if (bus_dmamem_map(sc->sc_dmat, &mm->am_seg, nsegs, size, &mm->am_kva,
355 	    BUS_DMA_NOWAIT) != 0)
356 		goto free;
357 
358 	if (bus_dmamap_load(sc->sc_dmat, mm->am_map, mm->am_kva, size, NULL,
359 	    BUS_DMA_NOWAIT) != 0)
360 		goto unmap;
361 
362 	DNPRINTF(MFI_D_MEM, "  kva: %p  dva: %lx  map: %p\n",
363 	    mm->am_kva, mm->am_map->dm_segs[0].ds_addr, mm->am_map);
364 
365 	return (mm);
366 
367 unmap:
368 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, size);
369 free:
370 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
371 destroy:
372 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
373 amfree:
374 	free(mm, M_DEVBUF, sizeof *mm);
375 
376 	return (NULL);
377 }
378 
379 void
mfi_freemem(struct mfi_softc * sc,struct mfi_mem * mm)380 mfi_freemem(struct mfi_softc *sc, struct mfi_mem *mm)
381 {
382 	DNPRINTF(MFI_D_MEM, "%s: mfi_freemem: %p\n", DEVNAME(sc), mm);
383 
384 	bus_dmamap_unload(sc->sc_dmat, mm->am_map);
385 	bus_dmamem_unmap(sc->sc_dmat, mm->am_kva, mm->am_size);
386 	bus_dmamem_free(sc->sc_dmat, &mm->am_seg, 1);
387 	bus_dmamap_destroy(sc->sc_dmat, mm->am_map);
388 	free(mm, M_DEVBUF, sizeof *mm);
389 }
390 
391 int
mfi_transition_firmware(struct mfi_softc * sc)392 mfi_transition_firmware(struct mfi_softc *sc)
393 {
394 	int32_t			fw_state, cur_state;
395 	u_int32_t		idb = sc->sc_iop->mio_idb;
396 	int			max_wait, i;
397 
398 	fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
399 
400 	DNPRINTF(MFI_D_CMD, "%s: mfi_transition_firmware: %#x\n", DEVNAME(sc),
401 	    fw_state);
402 
403 	while (fw_state != MFI_STATE_READY) {
404 		DNPRINTF(MFI_D_MISC,
405 		    "%s: waiting for firmware to become ready\n",
406 		    DEVNAME(sc));
407 		cur_state = fw_state;
408 		switch (fw_state) {
409 		case MFI_STATE_FAULT:
410 			printf("%s: firmware fault\n", DEVNAME(sc));
411 			return (1);
412 		case MFI_STATE_WAIT_HANDSHAKE:
413 			mfi_write(sc, idb, MFI_INIT_CLEAR_HANDSHAKE);
414 			max_wait = 2;
415 			break;
416 		case MFI_STATE_OPERATIONAL:
417 			mfi_write(sc, idb, MFI_INIT_READY);
418 			max_wait = 10;
419 			break;
420 		case MFI_STATE_UNDEFINED:
421 		case MFI_STATE_BB_INIT:
422 			max_wait = 2;
423 			break;
424 		case MFI_STATE_FW_INIT:
425 		case MFI_STATE_DEVICE_SCAN:
426 		case MFI_STATE_FLUSH_CACHE:
427 			max_wait = 20;
428 			break;
429 		default:
430 			printf("%s: unknown firmware state %d\n",
431 			    DEVNAME(sc), fw_state);
432 			return (1);
433 		}
434 		for (i = 0; i < (max_wait * 10); i++) {
435 			fw_state = mfi_fw_state(sc) & MFI_STATE_MASK;
436 			if (fw_state == cur_state)
437 				DELAY(100000);
438 			else
439 				break;
440 		}
441 		if (fw_state == cur_state) {
442 			printf("%s: firmware stuck in state %#x\n",
443 			    DEVNAME(sc), fw_state);
444 			return (1);
445 		}
446 	}
447 
448 	return (0);
449 }
450 
451 int
mfi_initialize_firmware(struct mfi_softc * sc)452 mfi_initialize_firmware(struct mfi_softc *sc)
453 {
454 	struct mfi_ccb		*ccb;
455 	struct mfi_init_frame	*init;
456 	struct mfi_init_qinfo	*qinfo;
457 	int			rv = 0;
458 
459 	DNPRINTF(MFI_D_MISC, "%s: mfi_initialize_firmware\n", DEVNAME(sc));
460 
461 	ccb = scsi_io_get(&sc->sc_iopool, 0);
462 	mfi_scrub_ccb(ccb);
463 
464 	init = &ccb->ccb_frame->mfr_init;
465 	qinfo = (struct mfi_init_qinfo *)((uint8_t *)init + MFI_FRAME_SIZE);
466 
467 	memset(qinfo, 0, sizeof(*qinfo));
468 	qinfo->miq_rq_entries = htole32(sc->sc_max_cmds + 1);
469 
470 	qinfo->miq_rq_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
471 	    offsetof(struct mfi_prod_cons, mpc_reply_q));
472 
473 	qinfo->miq_pi_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
474 	    offsetof(struct mfi_prod_cons, mpc_producer));
475 
476 	qinfo->miq_ci_addr = htole64(MFIMEM_DVA(sc->sc_pcq) +
477 	    offsetof(struct mfi_prod_cons, mpc_consumer));
478 
479 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
480 	init->mif_header.mfh_data_len = htole32(sizeof(*qinfo));
481 	init->mif_qinfo_new_addr = htole64(ccb->ccb_pframe + MFI_FRAME_SIZE);
482 
483 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
484 	    0, MFIMEM_LEN(sc->sc_pcq),
485 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
486 
487 	ccb->ccb_done = mfi_empty_done;
488 	mfi_poll(sc, ccb);
489 	if (init->mif_header.mfh_cmd_status != MFI_STAT_OK)
490 		rv = 1;
491 
492 	mfi_put_ccb(sc, ccb);
493 
494 	return (rv);
495 }
496 
497 void
mfi_empty_done(struct mfi_softc * sc,struct mfi_ccb * ccb)498 mfi_empty_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
499 {
500 	/* nop */
501 }
502 
503 int
mfi_get_info(struct mfi_softc * sc)504 mfi_get_info(struct mfi_softc *sc)
505 {
506 #ifdef MFI_DEBUG
507 	int i;
508 #endif
509 	DNPRINTF(MFI_D_MISC, "%s: mfi_get_info\n", DEVNAME(sc));
510 
511 	if (mfi_mgmt(sc, MR_DCMD_CTRL_GET_INFO, MFI_DATA_IN,
512 	    sizeof(sc->sc_info), &sc->sc_info, NULL))
513 		return (1);
514 
515 #ifdef MFI_DEBUG
516 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
517 		printf("%s: active FW %s Version %s date %s time %s\n",
518 		    DEVNAME(sc),
519 		    sc->sc_info.mci_image_component[i].mic_name,
520 		    sc->sc_info.mci_image_component[i].mic_version,
521 		    sc->sc_info.mci_image_component[i].mic_build_date,
522 		    sc->sc_info.mci_image_component[i].mic_build_time);
523 	}
524 
525 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
526 		printf("%s: pending FW %s Version %s date %s time %s\n",
527 		    DEVNAME(sc),
528 		    sc->sc_info.mci_pending_image_component[i].mic_name,
529 		    sc->sc_info.mci_pending_image_component[i].mic_version,
530 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
531 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
532 	}
533 
534 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
535 	    DEVNAME(sc),
536 	    sc->sc_info.mci_max_arms,
537 	    sc->sc_info.mci_max_spans,
538 	    sc->sc_info.mci_max_arrays,
539 	    sc->sc_info.mci_max_lds,
540 	    sc->sc_info.mci_product_name);
541 
542 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
543 	    DEVNAME(sc),
544 	    sc->sc_info.mci_serial_number,
545 	    sc->sc_info.mci_hw_present,
546 	    sc->sc_info.mci_current_fw_time,
547 	    sc->sc_info.mci_max_cmds,
548 	    sc->sc_info.mci_max_sg_elements);
549 
550 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
551 	    DEVNAME(sc),
552 	    sc->sc_info.mci_max_request_size,
553 	    sc->sc_info.mci_lds_present,
554 	    sc->sc_info.mci_lds_degraded,
555 	    sc->sc_info.mci_lds_offline,
556 	    sc->sc_info.mci_pd_present);
557 
558 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
559 	    DEVNAME(sc),
560 	    sc->sc_info.mci_pd_disks_present,
561 	    sc->sc_info.mci_pd_disks_pred_failure,
562 	    sc->sc_info.mci_pd_disks_failed);
563 
564 	printf("%s: nvram %d mem %d flash %d\n",
565 	    DEVNAME(sc),
566 	    sc->sc_info.mci_nvram_size,
567 	    sc->sc_info.mci_memory_size,
568 	    sc->sc_info.mci_flash_size);
569 
570 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
571 	    DEVNAME(sc),
572 	    sc->sc_info.mci_ram_correctable_errors,
573 	    sc->sc_info.mci_ram_uncorrectable_errors,
574 	    sc->sc_info.mci_cluster_allowed,
575 	    sc->sc_info.mci_cluster_active);
576 
577 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
578 	    DEVNAME(sc),
579 	    sc->sc_info.mci_max_strips_per_io,
580 	    sc->sc_info.mci_raid_levels,
581 	    sc->sc_info.mci_adapter_ops,
582 	    sc->sc_info.mci_ld_ops);
583 
584 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
585 	    DEVNAME(sc),
586 	    sc->sc_info.mci_stripe_sz_ops.min,
587 	    sc->sc_info.mci_stripe_sz_ops.max,
588 	    sc->sc_info.mci_pd_ops,
589 	    sc->sc_info.mci_pd_mix_support);
590 
591 	printf("%s: ecc_bucket %d pckg_prop %s\n",
592 	    DEVNAME(sc),
593 	    sc->sc_info.mci_ecc_bucket_count,
594 	    sc->sc_info.mci_package_version);
595 
596 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
597 	    DEVNAME(sc),
598 	    sc->sc_info.mci_properties.mcp_seq_num,
599 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
600 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
601 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
602 
603 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
604 	    DEVNAME(sc),
605 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
606 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
607 	    sc->sc_info.mci_properties.mcp_bgi_rate,
608 	    sc->sc_info.mci_properties.mcp_cc_rate);
609 
610 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
611 	    DEVNAME(sc),
612 	    sc->sc_info.mci_properties.mcp_recon_rate,
613 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
614 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
615 	    sc->sc_info.mci_properties.mcp_spinup_delay,
616 	    sc->sc_info.mci_properties.mcp_cluster_enable);
617 
618 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
619 	    DEVNAME(sc),
620 	    sc->sc_info.mci_properties.mcp_coercion_mode,
621 	    sc->sc_info.mci_properties.mcp_alarm_enable,
622 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
623 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
624 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
625 
626 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
627 	    DEVNAME(sc),
628 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
629 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
630 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
631 
632 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
633 	    DEVNAME(sc),
634 	    sc->sc_info.mci_pci.mip_vendor,
635 	    sc->sc_info.mci_pci.mip_device,
636 	    sc->sc_info.mci_pci.mip_subvendor,
637 	    sc->sc_info.mci_pci.mip_subdevice);
638 
639 	printf("%s: type %#x port_count %d port_addr ",
640 	    DEVNAME(sc),
641 	    sc->sc_info.mci_host.mih_type,
642 	    sc->sc_info.mci_host.mih_port_count);
643 
644 	for (i = 0; i < 8; i++)
645 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
646 	printf("\n");
647 
648 	printf("%s: type %.x port_count %d port_addr ",
649 	    DEVNAME(sc),
650 	    sc->sc_info.mci_device.mid_type,
651 	    sc->sc_info.mci_device.mid_port_count);
652 
653 	for (i = 0; i < 8; i++)
654 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
655 	printf("\n");
656 #endif /* MFI_DEBUG */
657 
658 	return (0);
659 }
660 
661 int
mfi_attach(struct mfi_softc * sc,enum mfi_iop iop)662 mfi_attach(struct mfi_softc *sc, enum mfi_iop iop)
663 {
664 	struct scsibus_attach_args saa;
665 	uint32_t		status, frames, max_sgl;
666 	int			i;
667 
668 	switch (iop) {
669 	case MFI_IOP_XSCALE:
670 		sc->sc_iop = &mfi_iop_xscale;
671 		break;
672 	case MFI_IOP_PPC:
673 		sc->sc_iop = &mfi_iop_ppc;
674 		break;
675 	case MFI_IOP_GEN2:
676 		sc->sc_iop = &mfi_iop_gen2;
677 		break;
678 	case MFI_IOP_SKINNY:
679 		sc->sc_iop = &mfi_iop_skinny;
680 		break;
681 	default:
682 		panic("%s: unknown iop %d", DEVNAME(sc), iop);
683 	}
684 
685 	DNPRINTF(MFI_D_MISC, "%s: mfi_attach\n", DEVNAME(sc));
686 
687 	if (mfi_transition_firmware(sc))
688 		return (1);
689 
690 	SLIST_INIT(&sc->sc_ccb_freeq);
691 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
692 	scsi_iopool_init(&sc->sc_iopool, sc, mfi_get_ccb, mfi_put_ccb);
693 
694 	rw_init(&sc->sc_lock, "mfi_lock");
695 
696 	status = mfi_fw_state(sc);
697 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
698 	max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
699 	if (sc->sc_64bit_dma) {
700 		sc->sc_max_sgl = min(max_sgl, (128 * 1024) / PAGE_SIZE + 1);
701 		sc->sc_sgl_size = sizeof(struct mfi_sg64);
702 		sc->sc_sgl_flags = MFI_FRAME_SGL64;
703 	} else {
704 		sc->sc_max_sgl = max_sgl;
705 		sc->sc_sgl_size = sizeof(struct mfi_sg32);
706 		sc->sc_sgl_flags = MFI_FRAME_SGL32;
707 	}
708 	if (iop == MFI_IOP_SKINNY)
709 		sc->sc_sgl_size = sizeof(struct mfi_sg_skinny);
710 	DNPRINTF(MFI_D_MISC, "%s: 64bit: %d max commands: %u, max sgl: %u\n",
711 	    DEVNAME(sc), sc->sc_64bit_dma, sc->sc_max_cmds, sc->sc_max_sgl);
712 
713 	/* consumer/producer and reply queue memory */
714 	sc->sc_pcq = mfi_allocmem(sc, (sizeof(uint32_t) * sc->sc_max_cmds) +
715 	    sizeof(struct mfi_prod_cons));
716 	if (sc->sc_pcq == NULL) {
717 		printf("%s: unable to allocate reply queue memory\n",
718 		    DEVNAME(sc));
719 		goto nopcq;
720 	}
721 
722 	/* frame memory */
723 	/* we are not doing 64 bit IO so only calculate # of 32 bit frames */
724 	frames = (sc->sc_sgl_size * sc->sc_max_sgl + MFI_FRAME_SIZE - 1) /
725 	    MFI_FRAME_SIZE + 1;
726 	sc->sc_frames_size = frames * MFI_FRAME_SIZE;
727 	sc->sc_frames = mfi_allocmem(sc, sc->sc_frames_size * sc->sc_max_cmds);
728 	if (sc->sc_frames == NULL) {
729 		printf("%s: unable to allocate frame memory\n", DEVNAME(sc));
730 		goto noframe;
731 	}
732 	/* XXX hack, fix this */
733 	if (MFIMEM_DVA(sc->sc_frames) & 0x3f) {
734 		printf("%s: improper frame alignment (%#lx) FIXME\n",
735 		    DEVNAME(sc), MFIMEM_DVA(sc->sc_frames));
736 		goto noframe;
737 	}
738 
739 	/* sense memory */
740 	sc->sc_sense = mfi_allocmem(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
741 	if (sc->sc_sense == NULL) {
742 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
743 		goto nosense;
744 	}
745 
746 	/* now that we have all memory bits go initialize ccbs */
747 	if (mfi_init_ccb(sc)) {
748 		printf("%s: could not init ccb list\n", DEVNAME(sc));
749 		goto noinit;
750 	}
751 
752 	/* kickstart firmware with all addresses and pointers */
753 	if (mfi_initialize_firmware(sc)) {
754 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
755 		goto noinit;
756 	}
757 
758 	if (mfi_get_info(sc)) {
759 		printf("%s: could not retrieve controller information\n",
760 		    DEVNAME(sc));
761 		goto noinit;
762 	}
763 
764 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
765 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
766 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
767 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
768 	printf("\n");
769 
770 	sc->sc_ld_cnt = sc->sc_info.mci_lds_present;
771 	for (i = 0; i < sc->sc_ld_cnt; i++)
772 		sc->sc_ld[i].ld_present = 1;
773 
774 	saa.saa_adapter = &mfi_switch;
775 	saa.saa_adapter_softc = sc;
776 	saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
777 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
778 	saa.saa_luns = 1;
779 	saa.saa_openings = sc->sc_max_cmds - 1;
780 	saa.saa_pool = &sc->sc_iopool;
781 	saa.saa_quirks = saa.saa_flags = 0;
782 	saa.saa_wwpn = saa.saa_wwnn = 0;
783 
784 	sc->sc_scsibus = (struct scsibus_softc *)
785 	    config_found(&sc->sc_dev, &saa, scsiprint);
786 
787 	if (ISSET(sc->sc_iop->mio_flags, MFI_IOP_F_SYSPD))
788 		mfi_syspd(sc);
789 
790 	/* enable interrupts */
791 	mfi_intr_enable(sc);
792 
793 #if NBIO > 0
794 	if (bio_register(&sc->sc_dev, mfi_ioctl) != 0)
795 		panic("%s: controller registration failed", DEVNAME(sc));
796 	else
797 		sc->sc_ioctl = mfi_ioctl;
798 
799 #ifndef SMALL_KERNEL
800 	if (mfi_create_sensors(sc) != 0)
801 		printf("%s: unable to create sensors\n", DEVNAME(sc));
802 #endif
803 #endif /* NBIO > 0 */
804 
805 	return (0);
806 noinit:
807 	mfi_freemem(sc, sc->sc_sense);
808 nosense:
809 	mfi_freemem(sc, sc->sc_frames);
810 noframe:
811 	mfi_freemem(sc, sc->sc_pcq);
812 nopcq:
813 	return (1);
814 }
815 
816 int
mfi_syspd(struct mfi_softc * sc)817 mfi_syspd(struct mfi_softc *sc)
818 {
819 	struct scsibus_attach_args saa;
820 	struct mfi_pd_link *pl;
821 	struct mfi_pd_list *pd;
822 	u_int npds, i;
823 
824 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
825 	if (sc->sc_pd == NULL)
826 		return (1);
827 
828 	pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
829 	if (pd == NULL)
830 		goto nopdsc;
831 
832 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
833 	    sizeof(*pd), pd, NULL) != 0)
834 		goto nopd;
835 
836 	npds = letoh32(pd->mpl_no_pd);
837 	for (i = 0; i < npds; i++) {
838 		pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
839 		if (pl == NULL)
840 			goto nopl;
841 
842 		pl->pd_id = pd->mpl_address[i].mpa_pd_id;
843 		sc->sc_pd->pd_links[i] = pl;
844 	}
845 
846 	free(pd, M_TEMP, sizeof *pd);
847 
848 	saa.saa_adapter = &mfi_pd_switch;
849 	saa.saa_adapter_softc = sc;
850 	saa.saa_adapter_buswidth = MFI_MAX_PD;
851 	saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET;
852 	saa.saa_luns = 8;
853 	saa.saa_openings = sc->sc_max_cmds - 1;
854 	saa.saa_pool = &sc->sc_iopool;
855 	saa.saa_quirks = saa.saa_flags = 0;
856 	saa.saa_wwpn = saa.saa_wwnn = 0;
857 
858 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
859 	    config_found(&sc->sc_dev, &saa, scsiprint);
860 
861 	return (0);
862 nopl:
863 	for (i = 0; i < npds; i++) {
864 		pl = sc->sc_pd->pd_links[i];
865 		if (pl == NULL)
866 			break;
867 
868 		free(pl, M_DEVBUF, sizeof *pl);
869 	}
870 nopd:
871 	free(pd, M_TEMP, sizeof *pd);
872 nopdsc:
873 	free(sc->sc_pd, M_DEVBUF, sizeof *sc->sc_pd);
874 	return (1);
875 }
876 
877 void
mfi_poll(struct mfi_softc * sc,struct mfi_ccb * ccb)878 mfi_poll(struct mfi_softc *sc, struct mfi_ccb *ccb)
879 {
880 	struct mfi_frame_header *hdr;
881 	int to = 0;
882 
883 	DNPRINTF(MFI_D_CMD, "%s: mfi_poll\n", DEVNAME(sc));
884 
885 	hdr = &ccb->ccb_frame->mfr_header;
886 	hdr->mfh_cmd_status = 0xff;
887 	hdr->mfh_flags |= MFI_FRAME_DONT_POST_IN_REPLY_QUEUE;
888 
889 	mfi_start(sc, ccb);
890 
891 	for (;;) {
892 		delay(1000);
893 
894 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
895 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
896 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
897 
898 		if (hdr->mfh_cmd_status != 0xff)
899 			break;
900 
901 		if (to++ > 5000) {
902 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
903 			    hdr->mfh_context);
904 			ccb->ccb_flags |= MFI_CCB_F_ERR;
905 			break;
906 		}
907 
908 		bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
909 		    ccb->ccb_pframe_offset, sc->sc_frames_size,
910 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
911 	}
912 
913 	if (ccb->ccb_len > 0) {
914 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
915 		    ccb->ccb_dmamap->dm_mapsize,
916 		    (ccb->ccb_direction & MFI_DATA_IN) ?
917 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
918 
919 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
920 	}
921 
922 	ccb->ccb_done(sc, ccb);
923 }
924 
925 void
mfi_exec(struct mfi_softc * sc,struct mfi_ccb * ccb)926 mfi_exec(struct mfi_softc *sc, struct mfi_ccb *ccb)
927 {
928 	struct mutex m;
929 
930 	mtx_init(&m, IPL_BIO);
931 
932 #ifdef DIAGNOSTIC
933 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
934 		panic("mfi_exec called with cookie or done set");
935 #endif
936 
937 	ccb->ccb_cookie = &m;
938 	ccb->ccb_done = mfi_exec_done;
939 
940 	mfi_start(sc, ccb);
941 
942 	mtx_enter(&m);
943 	while (ccb->ccb_cookie != NULL)
944 		msleep_nsec(ccb, &m, PRIBIO, "mfiexec", INFSLP);
945 	mtx_leave(&m);
946 }
947 
948 void
mfi_exec_done(struct mfi_softc * sc,struct mfi_ccb * ccb)949 mfi_exec_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
950 {
951 	struct mutex *m = ccb->ccb_cookie;
952 
953 	mtx_enter(m);
954 	ccb->ccb_cookie = NULL;
955 	wakeup_one(ccb);
956 	mtx_leave(m);
957 }
958 
959 int
mfi_intr(void * arg)960 mfi_intr(void *arg)
961 {
962 	struct mfi_softc	*sc = arg;
963 	struct mfi_prod_cons	*pcq = MFIMEM_KVA(sc->sc_pcq);
964 	struct mfi_ccb		*ccb;
965 	uint32_t		producer, consumer, ctx;
966 	int			claimed = 0;
967 
968 	if (!mfi_my_intr(sc))
969 		return (0);
970 
971 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
972 	    0, MFIMEM_LEN(sc->sc_pcq),
973 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
974 
975 	producer = letoh32(pcq->mpc_producer);
976 	consumer = letoh32(pcq->mpc_consumer);
977 
978 	DNPRINTF(MFI_D_INTR, "%s: mfi_intr %p %p\n", DEVNAME(sc), sc, pcq);
979 
980 	while (consumer != producer) {
981 		DNPRINTF(MFI_D_INTR, "%s: mfi_intr pi %#x ci %#x\n",
982 		    DEVNAME(sc), producer, consumer);
983 
984 		ctx = pcq->mpc_reply_q[consumer];
985 		pcq->mpc_reply_q[consumer] = MFI_INVALID_CTX;
986 		if (ctx == MFI_INVALID_CTX)
987 			printf("%s: invalid context, p: %d c: %d\n",
988 			    DEVNAME(sc), producer, consumer);
989 		else {
990 			/* XXX remove from queue and call scsi_done */
991 			ccb = &sc->sc_ccb[ctx];
992 			DNPRINTF(MFI_D_INTR, "%s: mfi_intr context %#x\n",
993 			    DEVNAME(sc), ctx);
994 			mfi_done(sc, ccb);
995 
996 			claimed = 1;
997 		}
998 		consumer++;
999 		if (consumer == (sc->sc_max_cmds + 1))
1000 			consumer = 0;
1001 	}
1002 
1003 	pcq->mpc_consumer = htole32(consumer);
1004 
1005 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_pcq),
1006 	    0, MFIMEM_LEN(sc->sc_pcq),
1007 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1008 
1009 	return (claimed);
1010 }
1011 
1012 int
mfi_scsi_io(struct mfi_softc * sc,struct mfi_ccb * ccb,struct scsi_xfer * xs,uint64_t blockno,uint32_t blockcnt)1013 mfi_scsi_io(struct mfi_softc *sc, struct mfi_ccb *ccb,
1014     struct scsi_xfer *xs, uint64_t blockno, uint32_t blockcnt)
1015 {
1016 	struct scsi_link	*link = xs->sc_link;
1017 	struct mfi_io_frame	*io;
1018 
1019 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_io: %d\n",
1020 	    DEVNAME((struct mfi_softc *)link->bus->sb_adapter_softc), link->target);
1021 
1022 	if (!xs->data)
1023 		return (1);
1024 
1025 	io = &ccb->ccb_frame->mfr_io;
1026 	if (xs->flags & SCSI_DATA_IN) {
1027 		io->mif_header.mfh_cmd = MFI_CMD_LD_READ;
1028 		ccb->ccb_direction = MFI_DATA_IN;
1029 	} else {
1030 		io->mif_header.mfh_cmd = MFI_CMD_LD_WRITE;
1031 		ccb->ccb_direction = MFI_DATA_OUT;
1032 	}
1033 	io->mif_header.mfh_target_id = link->target;
1034 	io->mif_header.mfh_timeout = 0;
1035 	io->mif_header.mfh_flags = 0;
1036 	io->mif_header.mfh_sense_len = MFI_SENSE_SIZE;
1037 	io->mif_header.mfh_data_len = htole32(blockcnt);
1038 	io->mif_lba = htole64(blockno);
1039 	io->mif_sense_addr = htole64(ccb->ccb_psense);
1040 
1041 	ccb->ccb_done = mfi_scsi_xs_done;
1042 	ccb->ccb_cookie = xs;
1043 	ccb->ccb_frame_size = MFI_IO_FRAME_SIZE;
1044 	ccb->ccb_sgl = &io->mif_sgl;
1045 	ccb->ccb_data = xs->data;
1046 	ccb->ccb_len = xs->datalen;
1047 
1048 	if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1049 	    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1050 		return (1);
1051 
1052 	return (0);
1053 }
1054 
1055 void
mfi_scsi_xs_done(struct mfi_softc * sc,struct mfi_ccb * ccb)1056 mfi_scsi_xs_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
1057 {
1058 	struct scsi_xfer	*xs = ccb->ccb_cookie;
1059 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1060 
1061 	DNPRINTF(MFI_D_INTR, "%s: mfi_scsi_xs_done %p %p\n",
1062 	    DEVNAME(sc), ccb, ccb->ccb_frame);
1063 
1064 	switch (hdr->mfh_cmd_status) {
1065 	case MFI_STAT_OK:
1066 		xs->resid = 0;
1067 		break;
1068 
1069 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1070 		xs->error = XS_SENSE;
1071 		xs->resid = 0;
1072 		memset(&xs->sense, 0, sizeof(xs->sense));
1073 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1074 		break;
1075 
1076 	case MFI_STAT_DEVICE_NOT_FOUND:
1077 		xs->error = XS_SELTIMEOUT;
1078 		break;
1079 
1080 	default:
1081 		xs->error = XS_DRIVER_STUFFUP;
1082 		DNPRINTF(MFI_D_CMD,
1083 		    "%s: mfi_scsi_xs_done stuffup %02x on %02x\n",
1084 		    DEVNAME(sc), hdr->mfh_cmd_status, xs->cmd.opcode);
1085 
1086 		if (hdr->mfh_scsi_status != 0) {
1087 			DNPRINTF(MFI_D_INTR,
1088 			    "%s: mfi_scsi_xs_done sense %#x %p %p\n",
1089 			    DEVNAME(sc), hdr->mfh_scsi_status,
1090 			    &xs->sense, ccb->ccb_sense);
1091 			memset(&xs->sense, 0, sizeof(xs->sense));
1092 			memcpy(&xs->sense, ccb->ccb_sense,
1093 			    sizeof(struct scsi_sense_data));
1094 			xs->error = XS_SENSE;
1095 		}
1096 		break;
1097 	}
1098 
1099 	KERNEL_LOCK();
1100 	scsi_done(xs);
1101 	KERNEL_UNLOCK();
1102 }
1103 
1104 int
mfi_scsi_ld(struct mfi_softc * sc,struct mfi_ccb * ccb,struct scsi_xfer * xs)1105 mfi_scsi_ld(struct mfi_softc *sc, struct mfi_ccb *ccb, struct scsi_xfer *xs)
1106 {
1107 	struct scsi_link	*link = xs->sc_link;
1108 	struct mfi_pass_frame	*pf;
1109 
1110 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_ld: %d\n",
1111 	    DEVNAME((struct mfi_softc *)link->bus->sb_adapter_softc), link->target);
1112 
1113 	pf = &ccb->ccb_frame->mfr_pass;
1114 	pf->mpf_header.mfh_cmd = MFI_CMD_LD_SCSI_IO;
1115 	pf->mpf_header.mfh_target_id = link->target;
1116 	pf->mpf_header.mfh_lun_id = 0;
1117 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
1118 	pf->mpf_header.mfh_timeout = 0;
1119 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
1120 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
1121 
1122 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
1123 
1124 	memset(pf->mpf_cdb, 0, 16);
1125 	memcpy(pf->mpf_cdb, &xs->cmd, xs->cmdlen);
1126 
1127 	ccb->ccb_done = mfi_scsi_xs_done;
1128 	ccb->ccb_cookie = xs;
1129 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
1130 	ccb->ccb_sgl = &pf->mpf_sgl;
1131 
1132 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
1133 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
1134 		    MFI_DATA_IN : MFI_DATA_OUT;
1135 	else
1136 		ccb->ccb_direction = MFI_DATA_NONE;
1137 
1138 	if (xs->data) {
1139 		ccb->ccb_data = xs->data;
1140 		ccb->ccb_len = xs->datalen;
1141 
1142 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
1143 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
1144 			return (1);
1145 	}
1146 
1147 	return (0);
1148 }
1149 
1150 void
mfi_scsi_cmd(struct scsi_xfer * xs)1151 mfi_scsi_cmd(struct scsi_xfer *xs)
1152 {
1153 	struct scsi_link	*link = xs->sc_link;
1154 	struct mfi_softc	*sc = link->bus->sb_adapter_softc;
1155 	struct mfi_ccb		*ccb = xs->io;
1156 	struct scsi_rw		*rw;
1157 	struct scsi_rw_10	*rw10;
1158 	struct scsi_rw_16	*rw16;
1159 	uint64_t		blockno;
1160 	uint32_t		blockcnt;
1161 	uint8_t			target = link->target;
1162 	union mfi_mbox		mbox;
1163 
1164 	DNPRINTF(MFI_D_CMD, "%s: mfi_scsi_cmd opcode: %#x\n",
1165 	    DEVNAME(sc), xs->cmd.opcode);
1166 
1167 	KERNEL_UNLOCK();
1168 
1169 	if (!sc->sc_ld[target].ld_present) {
1170 		DNPRINTF(MFI_D_CMD, "%s: invalid target %d\n",
1171 		    DEVNAME(sc), target);
1172 		goto stuffup;
1173 	}
1174 
1175 	mfi_scrub_ccb(ccb);
1176 
1177 	xs->error = XS_NOERROR;
1178 
1179 	switch (xs->cmd.opcode) {
1180 	/* IO path */
1181 	case READ_10:
1182 	case WRITE_10:
1183 		rw10 = (struct scsi_rw_10 *)&xs->cmd;
1184 		blockno = (uint64_t)_4btol(rw10->addr);
1185 		blockcnt = _2btol(rw10->length);
1186 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1187 			goto stuffup;
1188 		break;
1189 
1190 	case READ_COMMAND:
1191 	case WRITE_COMMAND:
1192 		rw = (struct scsi_rw *)&xs->cmd;
1193 		blockno =
1194 		    (uint64_t)(_3btol(rw->addr) & (SRW_TOPADDR << 16 | 0xffff));
1195 		blockcnt = rw->length ? rw->length : 0x100;
1196 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1197 			goto stuffup;
1198 		break;
1199 
1200 	case READ_16:
1201 	case WRITE_16:
1202 		rw16 = (struct scsi_rw_16 *)&xs->cmd;
1203 		blockno = _8btol(rw16->addr);
1204 		blockcnt = _4btol(rw16->length);
1205 		if (mfi_scsi_io(sc, ccb, xs, blockno, blockcnt))
1206 			goto stuffup;
1207 		break;
1208 
1209 	case SYNCHRONIZE_CACHE:
1210 		mbox.b[0] = MR_FLUSH_CTRL_CACHE | MR_FLUSH_DISK_CACHE;
1211 		if (mfi_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH,
1212 		    MFI_DATA_NONE, 0, NULL, &mbox))
1213 			goto stuffup;
1214 
1215 		goto complete;
1216 		/* NOTREACHED */
1217 
1218 	default:
1219 		if (mfi_scsi_ld(sc, ccb, xs))
1220 			goto stuffup;
1221 		break;
1222 	}
1223 
1224 	DNPRINTF(MFI_D_CMD, "%s: start io %d\n", DEVNAME(sc), target);
1225 
1226 	if (xs->flags & SCSI_POLL)
1227 		mfi_poll(sc, ccb);
1228 	else
1229 		mfi_start(sc, ccb);
1230 
1231 	KERNEL_LOCK();
1232 	return;
1233 
1234 stuffup:
1235 	xs->error = XS_DRIVER_STUFFUP;
1236 complete:
1237 	KERNEL_LOCK();
1238 	scsi_done(xs);
1239 }
1240 
1241 u_int
mfi_default_sgd_load(struct mfi_softc * sc,struct mfi_ccb * ccb)1242 mfi_default_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
1243 {
1244 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1245 	union mfi_sgl		*sgl = ccb->ccb_sgl;
1246 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
1247 	int			 i;
1248 
1249 	hdr->mfh_flags |= sc->sc_sgl_flags;
1250 
1251 	for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
1252 		if (sc->sc_64bit_dma) {
1253 			sgl->sg64[i].addr = htole64(sgd[i].ds_addr);
1254 			sgl->sg64[i].len = htole32(sgd[i].ds_len);
1255 			DNPRINTF(MFI_D_DMA, "%s: addr: %#llx  len: %#x\n",
1256 			    DEVNAME(sc), sgl->sg64[i].addr, sgl->sg64[i].len);
1257 		} else {
1258 			sgl->sg32[i].addr = htole32(sgd[i].ds_addr);
1259 			sgl->sg32[i].len = htole32(sgd[i].ds_len);
1260 			DNPRINTF(MFI_D_DMA, "%s: addr: %#x  len: %#x\n",
1261 			    DEVNAME(sc), sgl->sg32[i].addr, sgl->sg32[i].len);
1262 		}
1263 	}
1264 
1265 	return (ccb->ccb_dmamap->dm_nsegs *
1266 	    (sc->sc_64bit_dma ? sizeof(sgl->sg64) : sizeof(sgl->sg32)));
1267 }
1268 
1269 int
mfi_create_sgl(struct mfi_softc * sc,struct mfi_ccb * ccb,int flags)1270 mfi_create_sgl(struct mfi_softc *sc, struct mfi_ccb *ccb, int flags)
1271 {
1272 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
1273 	int			error;
1274 
1275 	DNPRINTF(MFI_D_DMA, "%s: mfi_create_sgl %p\n", DEVNAME(sc),
1276 	    ccb->ccb_data);
1277 
1278 	if (!ccb->ccb_data) {
1279 		hdr->mfh_sg_count = 0;
1280 		return (1);
1281 	}
1282 
1283 	error = bus_dmamap_load(sc->sc_dmat, ccb->ccb_dmamap,
1284 	    ccb->ccb_data, ccb->ccb_len, NULL, flags);
1285 	if (error) {
1286 		if (error == EFBIG)
1287 			printf("more than %d dma segs\n",
1288 			    sc->sc_max_sgl);
1289 		else
1290 			printf("error %d loading dma map\n", error);
1291 		return (1);
1292 	}
1293 
1294 	ccb->ccb_frame_size += mfi_sgd_load(sc, ccb);
1295 
1296 	if (ccb->ccb_direction == MFI_DATA_IN) {
1297 		hdr->mfh_flags |= MFI_FRAME_DIR_READ;
1298 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1299 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD);
1300 	} else {
1301 		hdr->mfh_flags |= MFI_FRAME_DIR_WRITE;
1302 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,
1303 		    ccb->ccb_dmamap->dm_mapsize, BUS_DMASYNC_PREWRITE);
1304 	}
1305 
1306 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1307 	ccb->ccb_extra_frames = (ccb->ccb_frame_size - 1) / MFI_FRAME_SIZE;
1308 
1309 	DNPRINTF(MFI_D_DMA, "%s: sg_count: %d  frame_size: %d  frames_size: %d"
1310 	    "  dm_nsegs: %d  extra_frames: %d\n",
1311 	    DEVNAME(sc),
1312 	    hdr->mfh_sg_count,
1313 	    ccb->ccb_frame_size,
1314 	    sc->sc_frames_size,
1315 	    ccb->ccb_dmamap->dm_nsegs,
1316 	    ccb->ccb_extra_frames);
1317 
1318 	return (0);
1319 }
1320 
1321 int
mfi_mgmt(struct mfi_softc * sc,uint32_t opc,uint32_t dir,uint32_t len,void * buf,const union mfi_mbox * mbox)1322 mfi_mgmt(struct mfi_softc *sc, uint32_t opc, uint32_t dir, uint32_t len,
1323     void *buf, const union mfi_mbox *mbox)
1324 {
1325 	struct mfi_ccb *ccb;
1326 	int rv;
1327 
1328 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1329 	mfi_scrub_ccb(ccb);
1330 	rv = mfi_do_mgmt(sc, ccb, opc, dir, len, buf, mbox);
1331 	scsi_io_put(&sc->sc_iopool, ccb);
1332 
1333 	return (rv);
1334 }
1335 
1336 int
mfi_do_mgmt(struct mfi_softc * sc,struct mfi_ccb * ccb,uint32_t opc,uint32_t dir,uint32_t len,void * buf,const union mfi_mbox * mbox)1337 mfi_do_mgmt(struct mfi_softc *sc, struct mfi_ccb *ccb, uint32_t opc,
1338     uint32_t dir, uint32_t len, void *buf, const union mfi_mbox *mbox)
1339 {
1340 	struct mfi_dcmd_frame *dcmd;
1341 	uint8_t *dma_buf = NULL;
1342 	int rv = EINVAL;
1343 
1344 	DNPRINTF(MFI_D_MISC, "%s: mfi_do_mgmt %#x\n", DEVNAME(sc), opc);
1345 
1346 	dma_buf = dma_alloc(len, cold ? PR_NOWAIT : PR_WAITOK);
1347 	if (dma_buf == NULL)
1348 		goto done;
1349 
1350 	dcmd = &ccb->ccb_frame->mfr_dcmd;
1351 	memset(&dcmd->mdf_mbox, 0, sizeof(dcmd->mdf_mbox));
1352 	dcmd->mdf_header.mfh_cmd = MFI_CMD_DCMD;
1353 	dcmd->mdf_header.mfh_timeout = 0;
1354 
1355 	dcmd->mdf_opcode = opc;
1356 	dcmd->mdf_header.mfh_data_len = 0;
1357 	ccb->ccb_direction = dir;
1358 
1359 	ccb->ccb_frame_size = MFI_DCMD_FRAME_SIZE;
1360 
1361 	/* handle special opcodes */
1362 	if (mbox != NULL)
1363 		memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox));
1364 
1365 	if (dir != MFI_DATA_NONE) {
1366 		if (dir == MFI_DATA_OUT)
1367 			memcpy(dma_buf, buf, len);
1368 		dcmd->mdf_header.mfh_data_len = len;
1369 		ccb->ccb_data = dma_buf;
1370 		ccb->ccb_len = len;
1371 		ccb->ccb_sgl = &dcmd->mdf_sgl;
1372 
1373 		if (mfi_create_sgl(sc, ccb, cold ? BUS_DMA_NOWAIT :
1374 		    BUS_DMA_WAITOK)) {
1375 			rv = EINVAL;
1376 			goto done;
1377 		}
1378 	}
1379 
1380 	if (cold) {
1381 		ccb->ccb_done = mfi_empty_done;
1382 		mfi_poll(sc, ccb);
1383 	} else
1384 		mfi_exec(sc, ccb);
1385 
1386 	if (dcmd->mdf_header.mfh_cmd_status != MFI_STAT_OK) {
1387 		if (dcmd->mdf_header.mfh_cmd_status == MFI_STAT_WRONG_STATE)
1388 			rv = ENXIO;
1389 		else
1390 			rv = EIO;
1391 		goto done;
1392 	}
1393 
1394 	if (dir == MFI_DATA_IN)
1395 		memcpy(buf, dma_buf, len);
1396 
1397 	rv = 0;
1398 done:
1399 	if (dma_buf)
1400 		dma_free(dma_buf, len);
1401 
1402 	return (rv);
1403 }
1404 
1405 int
mfi_scsi_ioctl(struct scsi_link * link,u_long cmd,caddr_t addr,int flag)1406 mfi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
1407 {
1408 	struct mfi_softc	*sc = link->bus->sb_adapter_softc;
1409 
1410 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_scsi_ioctl\n", DEVNAME(sc));
1411 
1412 	switch (cmd) {
1413 	case DIOCGCACHE:
1414 	case DIOCSCACHE:
1415 		return (mfi_ioctl_cache(link, cmd, (struct dk_cache *)addr));
1416 		break;
1417 
1418 	default:
1419 		if (sc->sc_ioctl)
1420 			return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
1421 		break;
1422 	}
1423 
1424 	return (ENOTTY);
1425 }
1426 
1427 int
mfi_ioctl_cache(struct scsi_link * link,u_long cmd,struct dk_cache * dc)1428 mfi_ioctl_cache(struct scsi_link *link, u_long cmd,  struct dk_cache *dc)
1429 {
1430 	struct mfi_softc	*sc = link->bus->sb_adapter_softc;
1431 	int			 rv, wrenable, rdenable;
1432 	struct mfi_ld_prop	 ldp;
1433 	union mfi_mbox		 mbox;
1434 
1435 	if (mfi_get_info(sc)) {
1436 		rv = EIO;
1437 		goto done;
1438 	}
1439 
1440 	if (!sc->sc_ld[link->target].ld_present) {
1441 		rv = EIO;
1442 		goto done;
1443 	}
1444 
1445 	memset(&mbox, 0, sizeof(mbox));
1446 	mbox.b[0] = link->target;
1447 	if ((rv = mfi_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES, MFI_DATA_IN,
1448 	    sizeof(ldp), &ldp, &mbox)) != 0)
1449 		goto done;
1450 
1451 	if (sc->sc_info.mci_memory_size > 0) {
1452 		wrenable = ISSET(ldp.mlp_cur_cache_policy,
1453 		    MR_LD_CACHE_ALLOW_WRITE_CACHE)? 1 : 0;
1454 		rdenable = ISSET(ldp.mlp_cur_cache_policy,
1455 		    MR_LD_CACHE_ALLOW_READ_CACHE)? 1 : 0;
1456 	} else {
1457 		wrenable = ISSET(ldp.mlp_diskcache_policy,
1458 		    MR_LD_DISK_CACHE_ENABLE)? 1 : 0;
1459 		rdenable = 0;
1460 	}
1461 
1462 	if (cmd == DIOCGCACHE) {
1463 		dc->wrcache = wrenable;
1464 		dc->rdcache = rdenable;
1465 		goto done;
1466 	} /* else DIOCSCACHE */
1467 
1468 	if (((dc->wrcache) ? 1 : 0) == wrenable &&
1469 	    ((dc->rdcache) ? 1 : 0) == rdenable)
1470 		goto done;
1471 
1472 	memset(&mbox, 0, sizeof(mbox));
1473 	mbox.b[0] = ldp.mlp_ld.mld_target;
1474 	mbox.b[1] = ldp.mlp_ld.mld_res;
1475 	mbox.s[1] = ldp.mlp_ld.mld_seq;
1476 
1477 	if (sc->sc_info.mci_memory_size > 0) {
1478 		if (dc->rdcache)
1479 			SET(ldp.mlp_cur_cache_policy,
1480 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1481 		else
1482 			CLR(ldp.mlp_cur_cache_policy,
1483 			    MR_LD_CACHE_ALLOW_READ_CACHE);
1484 		if (dc->wrcache)
1485 			SET(ldp.mlp_cur_cache_policy,
1486 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1487 		else
1488 			CLR(ldp.mlp_cur_cache_policy,
1489 			    MR_LD_CACHE_ALLOW_WRITE_CACHE);
1490 	} else {
1491 		if (dc->rdcache) {
1492 			rv = EOPNOTSUPP;
1493 			goto done;
1494 		}
1495 		if (dc->wrcache)
1496 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE;
1497 		else
1498 			ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE;
1499 	}
1500 
1501 	rv = mfi_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES, MFI_DATA_OUT, sizeof(ldp),
1502 	    &ldp, &mbox);
1503 
1504 done:
1505 	return (rv);
1506 }
1507 
1508 #if NBIO > 0
1509 int
mfi_ioctl(struct device * dev,u_long cmd,caddr_t addr)1510 mfi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
1511 {
1512 	struct mfi_softc	*sc = (struct mfi_softc *)dev;
1513 	int error = 0;
1514 
1515 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl ", DEVNAME(sc));
1516 
1517 	rw_enter_write(&sc->sc_lock);
1518 
1519 	switch (cmd) {
1520 	case BIOCINQ:
1521 		DNPRINTF(MFI_D_IOCTL, "inq\n");
1522 		error = mfi_ioctl_inq(sc, (struct bioc_inq *)addr);
1523 		break;
1524 
1525 	case BIOCVOL:
1526 		DNPRINTF(MFI_D_IOCTL, "vol\n");
1527 		error = mfi_ioctl_vol(sc, (struct bioc_vol *)addr);
1528 		break;
1529 
1530 	case BIOCDISK:
1531 		DNPRINTF(MFI_D_IOCTL, "disk\n");
1532 		error = mfi_ioctl_disk(sc, (struct bioc_disk *)addr);
1533 		break;
1534 
1535 	case BIOCALARM:
1536 		DNPRINTF(MFI_D_IOCTL, "alarm\n");
1537 		error = mfi_ioctl_alarm(sc, (struct bioc_alarm *)addr);
1538 		break;
1539 
1540 	case BIOCBLINK:
1541 		DNPRINTF(MFI_D_IOCTL, "blink\n");
1542 		error = mfi_ioctl_blink(sc, (struct bioc_blink *)addr);
1543 		break;
1544 
1545 	case BIOCSETSTATE:
1546 		DNPRINTF(MFI_D_IOCTL, "setstate\n");
1547 		error = mfi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
1548 		break;
1549 
1550 	case BIOCPATROL:
1551 		DNPRINTF(MFI_D_IOCTL, "patrol\n");
1552 		error = mfi_ioctl_patrol(sc, (struct bioc_patrol *)addr);
1553 		break;
1554 
1555 	default:
1556 		DNPRINTF(MFI_D_IOCTL, " invalid ioctl\n");
1557 		error = ENOTTY;
1558 	}
1559 
1560 	rw_exit_write(&sc->sc_lock);
1561 
1562 	return (error);
1563 }
1564 
1565 int
mfi_bio_getitall(struct mfi_softc * sc)1566 mfi_bio_getitall(struct mfi_softc *sc)
1567 {
1568 	int			i, d, size, rv = EINVAL;
1569 	union mfi_mbox		mbox;
1570 	struct mfi_conf		*cfg = NULL;
1571 	struct mfi_ld_details	*ld_det = NULL;
1572 
1573 	/* get info */
1574 	if (mfi_get_info(sc)) {
1575 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_get_info failed\n",
1576 		    DEVNAME(sc));
1577 		goto done;
1578 	}
1579 
1580 	/* send single element command to retrieve size for full structure */
1581 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_NOWAIT | M_ZERO);
1582 	if (cfg == NULL)
1583 		goto done;
1584 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg,
1585 	    NULL)) {
1586 		free(cfg, M_DEVBUF, sizeof *cfg);
1587 		goto done;
1588 	}
1589 
1590 	size = cfg->mfc_size;
1591 	free(cfg, M_DEVBUF, sizeof *cfg);
1592 
1593 	/* memory for read config */
1594 	cfg = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1595 	if (cfg == NULL)
1596 		goto done;
1597 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL)) {
1598 		free(cfg, M_DEVBUF, size);
1599 		goto done;
1600 	}
1601 
1602 	/* replace current pointer with new one */
1603 	if (sc->sc_cfg)
1604 		free(sc->sc_cfg, M_DEVBUF, 0);
1605 	sc->sc_cfg = cfg;
1606 
1607 	/* get all ld info */
1608 	if (mfi_mgmt(sc, MR_DCMD_LD_GET_LIST, MFI_DATA_IN,
1609 	    sizeof(sc->sc_ld_list), &sc->sc_ld_list, NULL))
1610 		goto done;
1611 
1612 	/* get memory for all ld structures */
1613 	size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
1614 	if (sc->sc_ld_sz != size) {
1615 		if (sc->sc_ld_details)
1616 			free(sc->sc_ld_details, M_DEVBUF, 0);
1617 
1618 		ld_det = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
1619 		if (ld_det == NULL)
1620 			goto done;
1621 		sc->sc_ld_sz = size;
1622 		sc->sc_ld_details = ld_det;
1623 	}
1624 
1625 	/* find used physical disks */
1626 	size = sizeof(struct mfi_ld_details);
1627 	for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
1628 		memset(&mbox, 0, sizeof(mbox));
1629 		mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1630 		if (mfi_mgmt(sc, MR_DCMD_LD_GET_INFO, MFI_DATA_IN, size,
1631 		    &sc->sc_ld_details[i], &mbox))
1632 			goto done;
1633 
1634 		d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1635 		    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1636 	}
1637 	sc->sc_no_pd = d;
1638 
1639 	rv = 0;
1640 done:
1641 	return (rv);
1642 }
1643 
1644 int
mfi_ioctl_inq(struct mfi_softc * sc,struct bioc_inq * bi)1645 mfi_ioctl_inq(struct mfi_softc *sc, struct bioc_inq *bi)
1646 {
1647 	int			rv = EINVAL;
1648 	struct mfi_conf		*cfg = NULL;
1649 
1650 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_inq\n", DEVNAME(sc));
1651 
1652 	if (mfi_bio_getitall(sc)) {
1653 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1654 		    DEVNAME(sc));
1655 		goto done;
1656 	}
1657 
1658 	/* count unused disks as volumes */
1659 	if (sc->sc_cfg == NULL)
1660 		goto done;
1661 	cfg = sc->sc_cfg;
1662 
1663 	bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
1664 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
1665 #if notyet
1666 	bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
1667 	    (bi->bi_nodisk - sc->sc_no_pd);
1668 #endif
1669 	/* tell bio who we are */
1670 	strlcpy(bi->bi_dev, DEVNAME(sc), sizeof(bi->bi_dev));
1671 
1672 	rv = 0;
1673 done:
1674 	return (rv);
1675 }
1676 
1677 int
mfi_ioctl_vol(struct mfi_softc * sc,struct bioc_vol * bv)1678 mfi_ioctl_vol(struct mfi_softc *sc, struct bioc_vol *bv)
1679 {
1680 	int			i, per, rv = EINVAL;
1681 	struct scsi_link	*link;
1682 	struct device		*dev;
1683 
1684 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_vol %#x\n",
1685 	    DEVNAME(sc), bv->bv_volid);
1686 
1687 	/* we really could skip and expect that inq took care of it */
1688 	if (mfi_bio_getitall(sc)) {
1689 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1690 		    DEVNAME(sc));
1691 		goto done;
1692 	}
1693 
1694 	if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
1695 		/* go do hotspares & unused disks */
1696 		rv = mfi_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD, bv);
1697 		goto done;
1698 	}
1699 
1700 	i = bv->bv_volid;
1701 	link = scsi_get_link(sc->sc_scsibus, i, 0);
1702 	if (link != NULL && link->device_softc != NULL) {
1703 		dev = link->device_softc;
1704 		strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
1705 	}
1706 
1707 	switch(sc->sc_ld_list.mll_list[i].mll_state) {
1708 	case MFI_LD_OFFLINE:
1709 		bv->bv_status = BIOC_SVOFFLINE;
1710 		break;
1711 
1712 	case MFI_LD_PART_DEGRADED:
1713 	case MFI_LD_DEGRADED:
1714 		bv->bv_status = BIOC_SVDEGRADED;
1715 		break;
1716 
1717 	case MFI_LD_ONLINE:
1718 		bv->bv_status = BIOC_SVONLINE;
1719 		break;
1720 
1721 	default:
1722 		bv->bv_status = BIOC_SVINVALID;
1723 		DNPRINTF(MFI_D_IOCTL, "%s: invalid logical disk state %#x\n",
1724 		    DEVNAME(sc),
1725 		    sc->sc_ld_list.mll_list[i].mll_state);
1726 	}
1727 
1728 	/* additional status can modify MFI status */
1729 	switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
1730 	case MFI_LD_PROG_CC:
1731 		bv->bv_status = BIOC_SVSCRUB;
1732 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
1733 		bv->bv_percent = (per * 100) / 0xffff;
1734 		bv->bv_seconds =
1735 		    sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
1736 		break;
1737 
1738 	case MFI_LD_PROG_BGI:
1739 		bv->bv_status = BIOC_SVSCRUB;
1740 		per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
1741 		bv->bv_percent = (per * 100) / 0xffff;
1742 		bv->bv_seconds =
1743 		    sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
1744 		break;
1745 
1746 	case MFI_LD_PROG_FGI:
1747 	case MFI_LD_PROG_RECONSTRUCT:
1748 		/* nothing yet */
1749 		break;
1750 	}
1751 
1752 	if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
1753 		bv->bv_cache = BIOC_CVWRITEBACK;
1754 	else
1755 		bv->bv_cache = BIOC_CVWRITETHROUGH;
1756 
1757 	/*
1758 	 * The RAID levels are determined per the SNIA DDF spec, this is only
1759 	 * a subset that is valid for the MFI controller.
1760 	 */
1761 	bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
1762 	if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_sec_raid ==
1763 	    MFI_DDF_SRL_SPANNED)
1764 		bv->bv_level *= 10;
1765 
1766 	bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
1767 	    sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
1768 
1769 	bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
1770 
1771 	rv = 0;
1772 done:
1773 	return (rv);
1774 }
1775 
1776 int
mfi_ioctl_disk(struct mfi_softc * sc,struct bioc_disk * bd)1777 mfi_ioctl_disk(struct mfi_softc *sc, struct bioc_disk *bd)
1778 {
1779 	struct mfi_conf		*cfg;
1780 	struct mfi_array	*ar;
1781 	struct mfi_ld_cfg	*ld;
1782 	struct mfi_pd_details	*pd;
1783 	struct mfi_pd_progress	*mfp;
1784 	struct mfi_progress	*mp;
1785 	struct scsi_inquiry_data *inqbuf;
1786 	char			vend[8+16+4+1], *vendp;
1787 	int			rv = EINVAL;
1788 	int			arr, vol, disk, span;
1789 	union mfi_mbox		mbox;
1790 
1791 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_disk %#x\n",
1792 	    DEVNAME(sc), bd->bd_diskid);
1793 
1794 	/* we really could skip and expect that inq took care of it */
1795 	if (mfi_bio_getitall(sc)) {
1796 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_bio_getitall failed\n",
1797 		    DEVNAME(sc));
1798 		return (rv);
1799 	}
1800 	cfg = sc->sc_cfg;
1801 
1802 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
1803 
1804 	ar = cfg->mfc_array;
1805 	vol = bd->bd_volid;
1806 	if (vol >= cfg->mfc_no_ld) {
1807 		/* do hotspares */
1808 		rv = mfi_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD, bd);
1809 		goto freeme;
1810 	}
1811 
1812 	/* calculate offset to ld structure */
1813 	ld = (struct mfi_ld_cfg *)(
1814 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
1815 	    cfg->mfc_array_size * cfg->mfc_no_array);
1816 
1817 	/* use span 0 only when raid group is not spanned */
1818 	if (ld[vol].mlc_parm.mpa_span_depth > 1)
1819 		span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
1820 	else
1821 		span = 0;
1822 	arr = ld[vol].mlc_span[span].mls_index;
1823 
1824 	/* offset disk into pd list */
1825 	disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
1826 	bd->bd_target = ar[arr].pd[disk].mar_enc_slot;
1827 
1828 	/* get status */
1829 	switch (ar[arr].pd[disk].mar_pd_state){
1830 	case MFI_PD_UNCONFIG_GOOD:
1831 	case MFI_PD_FAILED:
1832 		bd->bd_status = BIOC_SDFAILED;
1833 		break;
1834 
1835 	case MFI_PD_HOTSPARE: /* XXX dedicated hotspare part of array? */
1836 		bd->bd_status = BIOC_SDHOTSPARE;
1837 		break;
1838 
1839 	case MFI_PD_OFFLINE:
1840 		bd->bd_status = BIOC_SDOFFLINE;
1841 		break;
1842 
1843 	case MFI_PD_REBUILD:
1844 		bd->bd_status = BIOC_SDREBUILD;
1845 		break;
1846 
1847 	case MFI_PD_ONLINE:
1848 		bd->bd_status = BIOC_SDONLINE;
1849 		break;
1850 
1851 	case MFI_PD_UNCONFIG_BAD: /* XXX define new state in bio */
1852 	default:
1853 		bd->bd_status = BIOC_SDINVALID;
1854 		break;
1855 	}
1856 
1857 	/* get the remaining fields */
1858 	memset(&mbox, 0, sizeof(mbox));
1859 	mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
1860 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
1861 	    sizeof *pd, pd, &mbox)) {
1862 		/* disk is missing but succeed command */
1863 		rv = 0;
1864 		goto freeme;
1865 	}
1866 
1867 	bd->bd_size = pd->mpd_size * 512; /* bytes per block */
1868 
1869 	/* if pd->mpd_enc_idx is 0 then it is not in an enclosure */
1870 	bd->bd_channel = pd->mpd_enc_idx;
1871 
1872 	inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
1873 	vendp = inqbuf->vendor;
1874 	memcpy(vend, vendp, sizeof vend - 1);
1875 	vend[sizeof vend - 1] = '\0';
1876 	strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
1877 
1878 	/* XXX find a way to retrieve serial nr from drive */
1879 	/* XXX find a way to get bd_procdev */
1880 
1881 	mfp = &pd->mpd_progress;
1882 	if (mfp->mfp_in_prog & MFI_PD_PROG_PR) {
1883 		mp = &mfp->mfp_patrol_read;
1884 		bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
1885 		bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
1886 	}
1887 
1888 	rv = 0;
1889 freeme:
1890 	free(pd, M_DEVBUF, sizeof *pd);
1891 
1892 	return (rv);
1893 }
1894 
1895 int
mfi_ioctl_alarm(struct mfi_softc * sc,struct bioc_alarm * ba)1896 mfi_ioctl_alarm(struct mfi_softc *sc, struct bioc_alarm *ba)
1897 {
1898 	uint32_t		opc, dir = MFI_DATA_NONE;
1899 	int			rv = 0;
1900 	int8_t			ret;
1901 
1902 	switch(ba->ba_opcode) {
1903 	case BIOC_SADISABLE:
1904 		opc = MR_DCMD_SPEAKER_DISABLE;
1905 		break;
1906 
1907 	case BIOC_SAENABLE:
1908 		opc = MR_DCMD_SPEAKER_ENABLE;
1909 		break;
1910 
1911 	case BIOC_SASILENCE:
1912 		opc = MR_DCMD_SPEAKER_SILENCE;
1913 		break;
1914 
1915 	case BIOC_GASTATUS:
1916 		opc = MR_DCMD_SPEAKER_GET;
1917 		dir = MFI_DATA_IN;
1918 		break;
1919 
1920 	case BIOC_SATEST:
1921 		opc = MR_DCMD_SPEAKER_TEST;
1922 		break;
1923 
1924 	default:
1925 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_alarm biocalarm invalid "
1926 		    "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
1927 		return (EINVAL);
1928 	}
1929 
1930 	if (mfi_mgmt(sc, opc, dir, sizeof(ret), &ret, NULL))
1931 		rv = EINVAL;
1932 	else
1933 		if (ba->ba_opcode == BIOC_GASTATUS)
1934 			ba->ba_status = ret;
1935 		else
1936 			ba->ba_status = 0;
1937 
1938 	return (rv);
1939 }
1940 
1941 int
mfi_ioctl_blink(struct mfi_softc * sc,struct bioc_blink * bb)1942 mfi_ioctl_blink(struct mfi_softc *sc, struct bioc_blink *bb)
1943 {
1944 	int			i, found, rv = EINVAL;
1945 	union mfi_mbox		mbox;
1946 	uint32_t		cmd;
1947 	struct mfi_pd_list	*pd;
1948 
1949 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink %x\n", DEVNAME(sc),
1950 	    bb->bb_status);
1951 
1952 	/* channel 0 means not in an enclosure so can't be blinked */
1953 	if (bb->bb_channel == 0)
1954 		return (EINVAL);
1955 
1956 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
1957 
1958 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
1959 	    sizeof(*pd), pd, NULL))
1960 		goto done;
1961 
1962 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
1963 		if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
1964 		    bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
1965 			found = 1;
1966 			break;
1967 		}
1968 
1969 	if (!found)
1970 		goto done;
1971 
1972 	memset(&mbox, 0, sizeof(mbox));
1973 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
1974 
1975 	switch (bb->bb_status) {
1976 	case BIOC_SBUNBLINK:
1977 		cmd = MR_DCMD_PD_UNBLINK;
1978 		break;
1979 
1980 	case BIOC_SBBLINK:
1981 		cmd = MR_DCMD_PD_BLINK;
1982 		break;
1983 
1984 	case BIOC_SBALARM:
1985 	default:
1986 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_blink biocblink invalid "
1987 		    "opcode %x\n", DEVNAME(sc), bb->bb_status);
1988 		goto done;
1989 	}
1990 
1991 
1992 	rv = mfi_mgmt(sc, cmd, MFI_DATA_NONE, 0, NULL, &mbox);
1993 
1994 done:
1995 	free(pd, M_DEVBUF, sizeof *pd);
1996 	return (rv);
1997 }
1998 
1999 int
mfi_ioctl_setstate(struct mfi_softc * sc,struct bioc_setstate * bs)2000 mfi_ioctl_setstate(struct mfi_softc *sc, struct bioc_setstate *bs)
2001 {
2002 	struct mfi_pd_list	*pd;
2003 	struct mfi_pd_details	*info;
2004 	int			i, found, rv = EINVAL;
2005 	union mfi_mbox		mbox;
2006 
2007 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate %x\n", DEVNAME(sc),
2008 	    bs->bs_status);
2009 
2010 	pd = malloc(sizeof(*pd), M_DEVBUF, M_WAITOK);
2011 	info = malloc(sizeof *info, M_DEVBUF, M_WAITOK);
2012 
2013 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_LIST, MFI_DATA_IN,
2014 	    sizeof(*pd), pd, NULL))
2015 		goto done;
2016 
2017 	for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
2018 		if (bs->bs_channel == pd->mpl_address[i].mpa_enc_index &&
2019 		    bs->bs_target == pd->mpl_address[i].mpa_enc_slot) {
2020 			found = 1;
2021 			break;
2022 		}
2023 
2024 	if (!found)
2025 		goto done;
2026 
2027 	memset(&mbox, 0, sizeof(mbox));
2028 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
2029 
2030 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2031 	    sizeof *info, info, &mbox))
2032 		goto done;
2033 
2034 	mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
2035 	mbox.s[1] = info->mpd_pd.mfp_seq;
2036 
2037 	switch (bs->bs_status) {
2038 	case BIOC_SSONLINE:
2039 		mbox.b[4] = MFI_PD_ONLINE;
2040 		break;
2041 
2042 	case BIOC_SSOFFLINE:
2043 		mbox.b[4] = MFI_PD_OFFLINE;
2044 		break;
2045 
2046 	case BIOC_SSHOTSPARE:
2047 		mbox.b[4] = MFI_PD_HOTSPARE;
2048 		break;
2049 
2050 	case BIOC_SSREBUILD:
2051 		mbox.b[4] = MFI_PD_REBUILD;
2052 		break;
2053 
2054 	default:
2055 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_setstate invalid "
2056 		    "opcode %x\n", DEVNAME(sc), bs->bs_status);
2057 		goto done;
2058 	}
2059 
2060 	rv = mfi_mgmt(sc, MR_DCMD_PD_SET_STATE, MFI_DATA_NONE, 0, NULL, &mbox);
2061 
2062 done:
2063 	free(pd, M_DEVBUF, sizeof *pd);
2064 	free(info, M_DEVBUF, sizeof *info);
2065 	return (rv);
2066 }
2067 
2068 int
mfi_ioctl_patrol(struct mfi_softc * sc,struct bioc_patrol * bp)2069 mfi_ioctl_patrol(struct mfi_softc *sc, struct bioc_patrol *bp)
2070 {
2071 	uint32_t		opc, dir = MFI_DATA_NONE;
2072 	int			rv = 0;
2073 	struct mfi_pr_properties prop;
2074 	struct mfi_pr_status	status;
2075 	uint32_t		time, exec_freq;
2076 
2077 	switch (bp->bp_opcode) {
2078 	case BIOC_SPSTOP:
2079 	case BIOC_SPSTART:
2080 		if (bp->bp_opcode == BIOC_SPSTART)
2081 			opc = MR_DCMD_PR_START;
2082 		else
2083 			opc = MR_DCMD_PR_STOP;
2084 		dir = MFI_DATA_IN;
2085 		if (mfi_mgmt(sc, opc, dir, 0, NULL, NULL))
2086 			return (EINVAL);
2087 		break;
2088 
2089 	case BIOC_SPMANUAL:
2090 	case BIOC_SPDISABLE:
2091 	case BIOC_SPAUTO:
2092 		/* Get device's time. */
2093 		opc = MR_DCMD_TIME_SECS_GET;
2094 		dir = MFI_DATA_IN;
2095 		if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2096 			return (EINVAL);
2097 
2098 		opc = MR_DCMD_PR_GET_PROPERTIES;
2099 		dir = MFI_DATA_IN;
2100 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2101 			return (EINVAL);
2102 
2103 		switch (bp->bp_opcode) {
2104 		case BIOC_SPMANUAL:
2105 			prop.op_mode = MFI_PR_OPMODE_MANUAL;
2106 			break;
2107 		case BIOC_SPDISABLE:
2108 			prop.op_mode = MFI_PR_OPMODE_DISABLED;
2109 			break;
2110 		case BIOC_SPAUTO:
2111 			if (bp->bp_autoival != 0) {
2112 				if (bp->bp_autoival == -1)
2113 					/* continuously */
2114 					exec_freq = 0xffffffffU;
2115 				else if (bp->bp_autoival > 0)
2116 					exec_freq = bp->bp_autoival;
2117 				else
2118 					return (EINVAL);
2119 				prop.exec_freq = exec_freq;
2120 			}
2121 			if (bp->bp_autonext != 0) {
2122 				if (bp->bp_autonext < 0)
2123 					return (EINVAL);
2124 				else
2125 					prop.next_exec = time + bp->bp_autonext;
2126 			}
2127 			prop.op_mode = MFI_PR_OPMODE_AUTO;
2128 			break;
2129 		}
2130 
2131 		opc = MR_DCMD_PR_SET_PROPERTIES;
2132 		dir = MFI_DATA_OUT;
2133 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2134 			return (EINVAL);
2135 
2136 		break;
2137 
2138 	case BIOC_GPSTATUS:
2139 		opc = MR_DCMD_PR_GET_PROPERTIES;
2140 		dir = MFI_DATA_IN;
2141 		if (mfi_mgmt(sc, opc, dir, sizeof(prop), &prop, NULL))
2142 			return (EINVAL);
2143 
2144 		opc = MR_DCMD_PR_GET_STATUS;
2145 		dir = MFI_DATA_IN;
2146 		if (mfi_mgmt(sc, opc, dir, sizeof(status), &status, NULL))
2147 			return (EINVAL);
2148 
2149 		/* Get device's time. */
2150 		opc = MR_DCMD_TIME_SECS_GET;
2151 		dir = MFI_DATA_IN;
2152 		if (mfi_mgmt(sc, opc, dir, sizeof(time), &time, NULL))
2153 			return (EINVAL);
2154 
2155 		switch (prop.op_mode) {
2156 		case MFI_PR_OPMODE_AUTO:
2157 			bp->bp_mode = BIOC_SPMAUTO;
2158 			bp->bp_autoival = prop.exec_freq;
2159 			bp->bp_autonext = prop.next_exec;
2160 			bp->bp_autonow = time;
2161 			break;
2162 		case MFI_PR_OPMODE_MANUAL:
2163 			bp->bp_mode = BIOC_SPMMANUAL;
2164 			break;
2165 		case MFI_PR_OPMODE_DISABLED:
2166 			bp->bp_mode = BIOC_SPMDISABLED;
2167 			break;
2168 		default:
2169 			printf("%s: unknown patrol mode %d\n",
2170 			    DEVNAME(sc), prop.op_mode);
2171 			break;
2172 		}
2173 
2174 		switch (status.state) {
2175 		case MFI_PR_STATE_STOPPED:
2176 			bp->bp_status = BIOC_SPSSTOPPED;
2177 			break;
2178 		case MFI_PR_STATE_READY:
2179 			bp->bp_status = BIOC_SPSREADY;
2180 			break;
2181 		case MFI_PR_STATE_ACTIVE:
2182 			bp->bp_status = BIOC_SPSACTIVE;
2183 			break;
2184 		case MFI_PR_STATE_ABORTED:
2185 			bp->bp_status = BIOC_SPSABORTED;
2186 			break;
2187 		default:
2188 			printf("%s: unknown patrol state %d\n",
2189 			    DEVNAME(sc), status.state);
2190 			break;
2191 		}
2192 
2193 		break;
2194 
2195 	default:
2196 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_ioctl_patrol biocpatrol invalid "
2197 		    "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
2198 		return (EINVAL);
2199 	}
2200 
2201 	return (rv);
2202 }
2203 
2204 int
mfi_bio_hs(struct mfi_softc * sc,int volid,int type,void * bio_hs)2205 mfi_bio_hs(struct mfi_softc *sc, int volid, int type, void *bio_hs)
2206 {
2207 	struct mfi_conf		*cfg;
2208 	struct mfi_hotspare	*hs;
2209 	struct mfi_pd_details	*pd;
2210 	struct bioc_disk	*sdhs;
2211 	struct bioc_vol		*vdhs;
2212 	struct scsi_inquiry_data *inqbuf;
2213 	char			vend[8+16+4+1], *vendp;
2214 	int			i, rv = EINVAL;
2215 	uint32_t		size;
2216 	union mfi_mbox		mbox;
2217 
2218 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs %d\n", DEVNAME(sc), volid);
2219 
2220 	if (!bio_hs)
2221 		return (EINVAL);
2222 
2223 	pd = malloc(sizeof *pd, M_DEVBUF, M_WAITOK);
2224 
2225 	/* send single element command to retrieve size for full structure */
2226 	cfg = malloc(sizeof *cfg, M_DEVBUF, M_WAITOK);
2227 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, sizeof *cfg, cfg, NULL))
2228 		goto freeme;
2229 
2230 	size = cfg->mfc_size;
2231 	free(cfg, M_DEVBUF, sizeof *cfg);
2232 
2233 	/* memory for read config */
2234 	cfg = malloc(size, M_DEVBUF, M_WAITOK|M_ZERO);
2235 	if (mfi_mgmt(sc, MR_DCMD_CONF_GET, MFI_DATA_IN, size, cfg, NULL))
2236 		goto freeme;
2237 
2238 	/* calculate offset to hs structure */
2239 	hs = (struct mfi_hotspare *)(
2240 	    ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array) +
2241 	    cfg->mfc_array_size * cfg->mfc_no_array +
2242 	    cfg->mfc_ld_size * cfg->mfc_no_ld);
2243 
2244 	if (volid < cfg->mfc_no_ld)
2245 		goto freeme; /* not a hotspare */
2246 
2247 	if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
2248 		goto freeme; /* not a hotspare */
2249 
2250 	/* offset into hotspare structure */
2251 	i = volid - cfg->mfc_no_ld;
2252 
2253 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs i %d volid %d no_ld %d no_hs %d "
2254 	    "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
2255 	    cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
2256 
2257 	/* get pd fields */
2258 	memset(&mbox, 0, sizeof(mbox));
2259 	mbox.s[0] = hs[i].mhs_pd.mfp_id;
2260 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2261 	    sizeof *pd, pd, &mbox)) {
2262 		DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs illegal PD\n",
2263 		    DEVNAME(sc));
2264 		goto freeme;
2265 	}
2266 
2267 	switch (type) {
2268 	case MFI_MGMT_VD:
2269 		vdhs = bio_hs;
2270 		vdhs->bv_status = BIOC_SVONLINE;
2271 		vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2272 		vdhs->bv_level = -1; /* hotspare */
2273 		vdhs->bv_nodisk = 1;
2274 		break;
2275 
2276 	case MFI_MGMT_SD:
2277 		sdhs = bio_hs;
2278 		sdhs->bd_status = BIOC_SDHOTSPARE;
2279 		sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
2280 		sdhs->bd_channel = pd->mpd_enc_idx;
2281 		sdhs->bd_target = pd->mpd_enc_slot;
2282 		inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
2283 		vendp = inqbuf->vendor;
2284 		memcpy(vend, vendp, sizeof vend - 1);
2285 		vend[sizeof vend - 1] = '\0';
2286 		strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
2287 		break;
2288 
2289 	default:
2290 		goto freeme;
2291 	}
2292 
2293 	DNPRINTF(MFI_D_IOCTL, "%s: mfi_vol_hs 6\n", DEVNAME(sc));
2294 	rv = 0;
2295 freeme:
2296 	free(pd, M_DEVBUF, sizeof *pd);
2297 	free(cfg, M_DEVBUF, 0);
2298 
2299 	return (rv);
2300 }
2301 
2302 #ifndef SMALL_KERNEL
2303 
2304 static const char *mfi_bbu_indicators[] = {
2305 	"pack missing",
2306 	"voltage low",
2307 	"temp high",
2308 	"charge active",
2309 	"discharge active",
2310 	"learn cycle req'd",
2311 	"learn cycle active",
2312 	"learn cycle failed",
2313 	"learn cycle timeout",
2314 	"I2C errors",
2315 	"replace pack",
2316 	"low capacity",
2317 	"periodic learn req'd"
2318 };
2319 
2320 #define MFI_BBU_SENSORS 4
2321 
2322 int
mfi_bbu(struct mfi_softc * sc)2323 mfi_bbu(struct mfi_softc *sc)
2324 {
2325 	struct mfi_bbu_status bbu;
2326 	u_int32_t status;
2327 	u_int32_t mask;
2328 	u_int32_t soh_bad;
2329 	int i;
2330 
2331 	if (mfi_mgmt(sc, MR_DCMD_BBU_GET_STATUS, MFI_DATA_IN,
2332 	    sizeof(bbu), &bbu, NULL) != 0) {
2333 		for (i = 0; i < MFI_BBU_SENSORS; i++) {
2334 			sc->sc_bbu[i].value = 0;
2335 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2336 		}
2337 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2338 			sc->sc_bbu_status[i].value = 0;
2339 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2340 		}
2341 		return (-1);
2342 	}
2343 
2344 	switch (bbu.battery_type) {
2345 	case MFI_BBU_TYPE_IBBU:
2346 		mask = MFI_BBU_STATE_BAD_IBBU;
2347 		soh_bad = 0;
2348 		break;
2349 	case MFI_BBU_TYPE_BBU:
2350 		mask = MFI_BBU_STATE_BAD_BBU;
2351 		soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
2352 		break;
2353 
2354 	case MFI_BBU_TYPE_NONE:
2355 	default:
2356 		sc->sc_bbu[0].value = 0;
2357 		sc->sc_bbu[0].status = SENSOR_S_CRIT;
2358 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2359 			sc->sc_bbu[i].value = 0;
2360 			sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
2361 		}
2362 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2363 			sc->sc_bbu_status[i].value = 0;
2364 			sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
2365 		}
2366 		return (0);
2367 	}
2368 
2369 	status = letoh32(bbu.fw_status);
2370 
2371 	sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
2372 	sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
2373 	    SENSOR_S_OK;
2374 
2375 	sc->sc_bbu[1].value = letoh16(bbu.voltage) * 1000;
2376 	sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current) * 1000;
2377 	sc->sc_bbu[3].value = letoh16(bbu.temperature) * 1000000 + 273150000;
2378 	for (i = 1; i < MFI_BBU_SENSORS; i++)
2379 		sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
2380 
2381 	for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2382 		sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
2383 		sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2384 	}
2385 
2386 	return (0);
2387 }
2388 
2389 int
mfi_create_sensors(struct mfi_softc * sc)2390 mfi_create_sensors(struct mfi_softc *sc)
2391 {
2392 	struct device		*dev;
2393 	struct scsi_link	*link;
2394 	int			i;
2395 
2396 	strlcpy(sc->sc_sensordev.xname, DEVNAME(sc),
2397 	    sizeof(sc->sc_sensordev.xname));
2398 
2399 	if (ISSET(letoh32(sc->sc_info.mci_adapter_ops ), MFI_INFO_AOPS_BBU)) {
2400 		sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
2401 		    M_DEVBUF, M_WAITOK | M_ZERO);
2402 
2403 		sc->sc_bbu[0].type = SENSOR_INDICATOR;
2404 		sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
2405 		strlcpy(sc->sc_bbu[0].desc, "bbu ok",
2406 		    sizeof(sc->sc_bbu[0].desc));
2407 		sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
2408 
2409 		sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
2410 		sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
2411 		sc->sc_bbu[2].type = SENSOR_AMPS;
2412 		sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
2413 		sc->sc_bbu[3].type = SENSOR_TEMP;
2414 		sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
2415 		for (i = 1; i < MFI_BBU_SENSORS; i++) {
2416 			strlcpy(sc->sc_bbu[i].desc, "bbu",
2417 			    sizeof(sc->sc_bbu[i].desc));
2418 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
2419 		}
2420 
2421 		sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
2422 		    sizeof(mfi_bbu_indicators), M_DEVBUF, M_WAITOK | M_ZERO);
2423 
2424 		for (i = 0; i < nitems(mfi_bbu_indicators); i++) {
2425 			sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
2426 			sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
2427 			strlcpy(sc->sc_bbu_status[i].desc,
2428 			    mfi_bbu_indicators[i],
2429 			    sizeof(sc->sc_bbu_status[i].desc));
2430 
2431 			sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
2432 		}
2433 	}
2434 
2435 	sc->sc_sensors = mallocarray(sc->sc_ld_cnt, sizeof(struct ksensor),
2436 	    M_DEVBUF, M_NOWAIT | M_ZERO);
2437 	if (sc->sc_sensors == NULL)
2438 		return (1);
2439 
2440 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2441 		link = scsi_get_link(sc->sc_scsibus, i, 0);
2442 		if (link == NULL)
2443 			goto bad;
2444 
2445 		dev = link->device_softc;
2446 
2447 		sc->sc_sensors[i].type = SENSOR_DRIVE;
2448 		sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2449 
2450 		strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
2451 		    sizeof(sc->sc_sensors[i].desc));
2452 
2453 		sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
2454 	}
2455 
2456 	if (sensor_task_register(sc, mfi_refresh_sensors, 10) == NULL)
2457 		goto bad;
2458 
2459 	sensordev_install(&sc->sc_sensordev);
2460 
2461 	return (0);
2462 
2463 bad:
2464 	free(sc->sc_sensors, M_DEVBUF,
2465 	    sc->sc_ld_cnt * sizeof(struct ksensor));
2466 
2467 	return (1);
2468 }
2469 
2470 void
mfi_refresh_sensors(void * arg)2471 mfi_refresh_sensors(void *arg)
2472 {
2473 	struct mfi_softc	*sc = arg;
2474 	int			i, rv;
2475 	struct bioc_vol		bv;
2476 
2477 	if (sc->sc_bbu != NULL && mfi_bbu(sc) != 0)
2478 		return;
2479 
2480 	for (i = 0; i < sc->sc_ld_cnt; i++) {
2481 		bzero(&bv, sizeof(bv));
2482 		bv.bv_volid = i;
2483 
2484 		rw_enter_write(&sc->sc_lock);
2485 		rv = mfi_ioctl_vol(sc, &bv);
2486 		rw_exit_write(&sc->sc_lock);
2487 
2488 		if (rv != 0)
2489 			return;
2490 
2491 		switch(bv.bv_status) {
2492 		case BIOC_SVOFFLINE:
2493 			sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL;
2494 			sc->sc_sensors[i].status = SENSOR_S_CRIT;
2495 			break;
2496 
2497 		case BIOC_SVDEGRADED:
2498 			sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL;
2499 			sc->sc_sensors[i].status = SENSOR_S_WARN;
2500 			break;
2501 
2502 		case BIOC_SVSCRUB:
2503 		case BIOC_SVONLINE:
2504 			sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE;
2505 			sc->sc_sensors[i].status = SENSOR_S_OK;
2506 			break;
2507 
2508 		case BIOC_SVINVALID:
2509 			/* FALLTHROUGH */
2510 		default:
2511 			sc->sc_sensors[i].value = 0; /* unknown */
2512 			sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
2513 			break;
2514 		}
2515 	}
2516 }
2517 #endif /* SMALL_KERNEL */
2518 #endif /* NBIO > 0 */
2519 
2520 void
mfi_start(struct mfi_softc * sc,struct mfi_ccb * ccb)2521 mfi_start(struct mfi_softc *sc, struct mfi_ccb *ccb)
2522 {
2523 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2524 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2525 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
2526 
2527 	mfi_post(sc, ccb);
2528 }
2529 
2530 void
mfi_done(struct mfi_softc * sc,struct mfi_ccb * ccb)2531 mfi_done(struct mfi_softc *sc, struct mfi_ccb *ccb)
2532 {
2533 	bus_dmamap_sync(sc->sc_dmat, MFIMEM_MAP(sc->sc_frames),
2534 	    ccb->ccb_pframe_offset, sc->sc_frames_size,
2535 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
2536 
2537 	if (ccb->ccb_len > 0) {
2538 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
2539 		    0, ccb->ccb_dmamap->dm_mapsize,
2540 		    (ccb->ccb_direction == MFI_DATA_IN) ?
2541 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
2542 
2543 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
2544 	}
2545 
2546 	ccb->ccb_done(sc, ccb);
2547 }
2548 
2549 u_int32_t
mfi_xscale_fw_state(struct mfi_softc * sc)2550 mfi_xscale_fw_state(struct mfi_softc *sc)
2551 {
2552 	return (mfi_read(sc, MFI_OMSG0));
2553 }
2554 
2555 void
mfi_xscale_intr_ena(struct mfi_softc * sc)2556 mfi_xscale_intr_ena(struct mfi_softc *sc)
2557 {
2558 	mfi_write(sc, MFI_OMSK, MFI_ENABLE_INTR);
2559 }
2560 
2561 int
mfi_xscale_intr(struct mfi_softc * sc)2562 mfi_xscale_intr(struct mfi_softc *sc)
2563 {
2564 	u_int32_t status;
2565 
2566 	status = mfi_read(sc, MFI_OSTS);
2567 	if (!ISSET(status, MFI_OSTS_INTR_VALID))
2568 		return (0);
2569 
2570 	/* write status back to acknowledge interrupt */
2571 	mfi_write(sc, MFI_OSTS, status);
2572 
2573 	return (1);
2574 }
2575 
2576 void
mfi_xscale_post(struct mfi_softc * sc,struct mfi_ccb * ccb)2577 mfi_xscale_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2578 {
2579 	mfi_write(sc, MFI_IQP, (ccb->ccb_pframe >> 3) |
2580 	    ccb->ccb_extra_frames);
2581 }
2582 
2583 u_int32_t
mfi_ppc_fw_state(struct mfi_softc * sc)2584 mfi_ppc_fw_state(struct mfi_softc *sc)
2585 {
2586 	return (mfi_read(sc, MFI_OSP));
2587 }
2588 
2589 void
mfi_ppc_intr_ena(struct mfi_softc * sc)2590 mfi_ppc_intr_ena(struct mfi_softc *sc)
2591 {
2592 	mfi_write(sc, MFI_ODC, 0xffffffff);
2593 	mfi_write(sc, MFI_OMSK, ~0x80000004);
2594 }
2595 
2596 int
mfi_ppc_intr(struct mfi_softc * sc)2597 mfi_ppc_intr(struct mfi_softc *sc)
2598 {
2599 	u_int32_t status;
2600 
2601 	status = mfi_read(sc, MFI_OSTS);
2602 	if (!ISSET(status, MFI_OSTS_PPC_INTR_VALID))
2603 		return (0);
2604 
2605 	/* write status back to acknowledge interrupt */
2606 	mfi_write(sc, MFI_ODC, status);
2607 
2608 	return (1);
2609 }
2610 
2611 void
mfi_ppc_post(struct mfi_softc * sc,struct mfi_ccb * ccb)2612 mfi_ppc_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2613 {
2614 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2615 	    (ccb->ccb_extra_frames << 1));
2616 }
2617 
2618 u_int32_t
mfi_gen2_fw_state(struct mfi_softc * sc)2619 mfi_gen2_fw_state(struct mfi_softc *sc)
2620 {
2621 	return (mfi_read(sc, MFI_OSP));
2622 }
2623 
2624 void
mfi_gen2_intr_ena(struct mfi_softc * sc)2625 mfi_gen2_intr_ena(struct mfi_softc *sc)
2626 {
2627 	mfi_write(sc, MFI_ODC, 0xffffffff);
2628 	mfi_write(sc, MFI_OMSK, ~MFI_OSTS_GEN2_INTR_VALID);
2629 }
2630 
2631 int
mfi_gen2_intr(struct mfi_softc * sc)2632 mfi_gen2_intr(struct mfi_softc *sc)
2633 {
2634 	u_int32_t status;
2635 
2636 	status = mfi_read(sc, MFI_OSTS);
2637 	if (!ISSET(status, MFI_OSTS_GEN2_INTR_VALID))
2638 		return (0);
2639 
2640 	/* write status back to acknowledge interrupt */
2641 	mfi_write(sc, MFI_ODC, status);
2642 
2643 	return (1);
2644 }
2645 
2646 void
mfi_gen2_post(struct mfi_softc * sc,struct mfi_ccb * ccb)2647 mfi_gen2_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2648 {
2649 	mfi_write(sc, MFI_IQP, 0x1 | ccb->ccb_pframe |
2650 	    (ccb->ccb_extra_frames << 1));
2651 }
2652 
2653 u_int32_t
mfi_skinny_fw_state(struct mfi_softc * sc)2654 mfi_skinny_fw_state(struct mfi_softc *sc)
2655 {
2656 	return (mfi_read(sc, MFI_OSP));
2657 }
2658 
2659 void
mfi_skinny_intr_ena(struct mfi_softc * sc)2660 mfi_skinny_intr_ena(struct mfi_softc *sc)
2661 {
2662 	mfi_write(sc, MFI_OMSK, ~0x00000001);
2663 }
2664 
2665 int
mfi_skinny_intr(struct mfi_softc * sc)2666 mfi_skinny_intr(struct mfi_softc *sc)
2667 {
2668 	u_int32_t status;
2669 
2670 	status = mfi_read(sc, MFI_OSTS);
2671 	if (!ISSET(status, MFI_OSTS_SKINNY_INTR_VALID))
2672 		return (0);
2673 
2674 	/* write status back to acknowledge interrupt */
2675 	mfi_write(sc, MFI_OSTS, status);
2676 
2677 	return (1);
2678 }
2679 
2680 void
mfi_skinny_post(struct mfi_softc * sc,struct mfi_ccb * ccb)2681 mfi_skinny_post(struct mfi_softc *sc, struct mfi_ccb *ccb)
2682 {
2683 	mfi_write(sc, MFI_IQPL, 0x1 | ccb->ccb_pframe |
2684 	    (ccb->ccb_extra_frames << 1));
2685 	mfi_write(sc, MFI_IQPH, 0x00000000);
2686 }
2687 
2688 u_int
mfi_skinny_sgd_load(struct mfi_softc * sc,struct mfi_ccb * ccb)2689 mfi_skinny_sgd_load(struct mfi_softc *sc, struct mfi_ccb *ccb)
2690 {
2691 	struct mfi_frame_header	*hdr = &ccb->ccb_frame->mfr_header;
2692 	union mfi_sgl		*sgl = ccb->ccb_sgl;
2693 	bus_dma_segment_t	*sgd = ccb->ccb_dmamap->dm_segs;
2694 	int			 i;
2695 
2696 	switch (hdr->mfh_cmd) {
2697 	case MFI_CMD_LD_READ:
2698 	case MFI_CMD_LD_WRITE:
2699 	case MFI_CMD_PD_SCSI_IO:
2700 		/* Use MF_FRAME_IEEE for some IO commands on skinny adapters */
2701 		for (i = 0; i < ccb->ccb_dmamap->dm_nsegs; i++) {
2702 			sgl->sg_skinny[i].addr = htole64(sgd[i].ds_addr);
2703 			sgl->sg_skinny[i].len = htole32(sgd[i].ds_len);
2704 			sgl->sg_skinny[i].flag = 0;
2705 		}
2706 		hdr->mfh_flags |= MFI_FRAME_IEEE | MFI_FRAME_SGL64;
2707 
2708 		return (ccb->ccb_dmamap->dm_nsegs * sizeof(sgl->sg_skinny));
2709 	default:
2710 		return (mfi_default_sgd_load(sc, ccb));
2711 	}
2712 }
2713 
2714 int
mfi_pd_scsi_probe(struct scsi_link * link)2715 mfi_pd_scsi_probe(struct scsi_link *link)
2716 {
2717 	union mfi_mbox mbox;
2718 	struct mfi_softc *sc = link->bus->sb_adapter_softc;
2719 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2720 
2721 	if (link->lun > 0)
2722 		return (0);
2723 
2724 	if (pl == NULL)
2725 		return (ENXIO);
2726 
2727 	memset(&mbox, 0, sizeof(mbox));
2728 	mbox.s[0] = pl->pd_id;
2729 
2730 	if (mfi_mgmt(sc, MR_DCMD_PD_GET_INFO, MFI_DATA_IN,
2731 	    sizeof(pl->pd_info), &pl->pd_info, &mbox))
2732 		return (EIO);
2733 
2734 	if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
2735 		return (ENXIO);
2736 
2737 	return (0);
2738 }
2739 
2740 void
mfi_pd_scsi_cmd(struct scsi_xfer * xs)2741 mfi_pd_scsi_cmd(struct scsi_xfer *xs)
2742 {
2743 	struct scsi_link *link = xs->sc_link;
2744 	struct mfi_softc *sc = link->bus->sb_adapter_softc;
2745 	struct mfi_ccb *ccb = xs->io;
2746 	struct mfi_pass_frame *pf = &ccb->ccb_frame->mfr_pass;
2747 	struct mfi_pd_link *pl = sc->sc_pd->pd_links[link->target];
2748 
2749 	KERNEL_UNLOCK();
2750 
2751 	mfi_scrub_ccb(ccb);
2752 	xs->error = XS_NOERROR;
2753 
2754 	pf->mpf_header.mfh_cmd = MFI_CMD_PD_SCSI_IO;
2755 	pf->mpf_header.mfh_target_id = pl->pd_id;
2756 	pf->mpf_header.mfh_lun_id = link->lun;
2757 	pf->mpf_header.mfh_cdb_len = xs->cmdlen;
2758 	pf->mpf_header.mfh_timeout = 0;
2759 	pf->mpf_header.mfh_data_len = htole32(xs->datalen); /* XXX */
2760 	pf->mpf_header.mfh_sense_len = MFI_SENSE_SIZE;
2761 	pf->mpf_sense_addr = htole64(ccb->ccb_psense);
2762 
2763 	memset(pf->mpf_cdb, 0, sizeof(pf->mpf_cdb));
2764 	memcpy(pf->mpf_cdb, &xs->cmd, xs->cmdlen);
2765 
2766 	ccb->ccb_done = mfi_scsi_xs_done;
2767 	ccb->ccb_cookie = xs;
2768 	ccb->ccb_frame_size = MFI_PASS_FRAME_SIZE;
2769 	ccb->ccb_sgl = &pf->mpf_sgl;
2770 
2771 	if (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT))
2772 		ccb->ccb_direction = xs->flags & SCSI_DATA_IN ?
2773 		    MFI_DATA_IN : MFI_DATA_OUT;
2774 	else
2775 		ccb->ccb_direction = MFI_DATA_NONE;
2776 
2777 	if (xs->data) {
2778 		ccb->ccb_data = xs->data;
2779 		ccb->ccb_len = xs->datalen;
2780 
2781 		if (mfi_create_sgl(sc, ccb, (xs->flags & SCSI_NOSLEEP) ?
2782 		    BUS_DMA_NOWAIT : BUS_DMA_WAITOK))
2783 			goto stuffup;
2784 	}
2785 
2786 	if (xs->flags & SCSI_POLL)
2787 		mfi_poll(sc, ccb);
2788 	else
2789 		mfi_start(sc, ccb);
2790 
2791 	KERNEL_LOCK();
2792 	return;
2793 
2794 stuffup:
2795 	xs->error = XS_DRIVER_STUFFUP;
2796 	KERNEL_LOCK();
2797 	scsi_done(xs);
2798 }
2799