xref: /openbsd/sys/dev/pci/mfii.c (revision a6445c1d)
1 /* $OpenBSD: mfii.c,v 1.19 2014/10/08 14:44:39 dlg Exp $ */
2 
3 /*
4  * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5  *
6  * Permission to use, copy, modify, and distribute this software for any
7  * purpose with or without fee is hereby granted, provided that the above
8  * copyright notice and this permission notice appear in all copies.
9  *
10  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17  */
18 
19 #include "bio.h"
20 
21 #include <sys/param.h>
22 #include <sys/systm.h>
23 #include <sys/kernel.h>
24 #include <sys/malloc.h>
25 #include <sys/device.h>
26 #include <sys/types.h>
27 #include <sys/pool.h>
28 
29 #include <dev/pci/pcidevs.h>
30 #include <dev/pci/pcivar.h>
31 
32 #include <machine/bus.h>
33 
34 #include <scsi/scsi_all.h>
35 #include <scsi/scsi_disk.h>
36 #include <scsi/scsiconf.h>
37 
38 #include <dev/ic/mfireg.h>
39 #include <dev/pci/mpiireg.h>
40 
41 #define	MFII_BAR		0x14
42 #define	MFII_PCI_MEMSIZE	0x2000 /* 8k */
43 
44 #define MFII_OSTS_INTR_VALID	0x00000009
45 #define MFII_RPI		0x6c /* reply post host index */
46 
47 #define MFII_REQ_TYPE_SCSI	MPII_REQ_DESCR_SCSI_IO
48 #define MFII_REQ_TYPE_LDIO	(0x7 << 1)
49 #define MFII_REQ_TYPE_MFA	(0x1 << 1)
50 #define MFII_REQ_TYPE_NO_LOCK	(0x2 << 1)
51 #define MFII_REQ_TYPE_HI_PRI	(0x6 << 1)
52 
53 #define MFII_REQ_MFA(_a)	htole64((_a) | MFII_REQ_TYPE_MFA)
54 
55 #define MFII_FUNCTION_LDIO_REQUEST			(0xf1)
56 
57 struct mfii_request_descr {
58 	u_int8_t	flags;
59 	u_int8_t	msix_index;
60 	u_int16_t	smid;
61 
62 	u_int16_t	lmid;
63 	u_int16_t	dev_handle;
64 } __packed;
65 
66 #define MFII_RAID_CTX_IO_TYPE_SYSPD	(0x1 << 4)
67 
68 struct mfii_raid_context {
69 	u_int8_t	type_nseg;
70 	u_int8_t	_reserved1;
71 	u_int16_t	timeout_value;
72 
73 	u_int8_t	reg_lock_flags;
74 	u_int8_t	_reserved2;
75 	u_int16_t	virtual_disk_target_id;
76 
77 	u_int64_t	reg_lock_row_lba;
78 
79 	u_int32_t	reg_lock_length;
80 
81 	u_int16_t	next_lm_id;
82 	u_int8_t	ex_status;
83 	u_int8_t	status;
84 
85 	u_int8_t	raid_flags;
86 	u_int8_t	num_sge;
87 	u_int16_t	config_seq_num;
88 
89 	u_int8_t	span_arm;
90 	u_int8_t	_reserved3[3];
91 } __packed;
92 
93 struct mfii_sge {
94 	u_int64_t	sg_addr;
95 	u_int32_t	sg_len;
96 	u_int16_t	_reserved;
97 	u_int8_t	sg_next_chain_offset;
98 	u_int8_t	sg_flags;
99 } __packed;
100 
101 #define MFII_SGE_ADDR_MASK		(0x03)
102 #define MFII_SGE_ADDR_SYSTEM		(0x00)
103 #define MFII_SGE_ADDR_IOCDDR		(0x01)
104 #define MFII_SGE_ADDR_IOCPLB		(0x02)
105 #define MFII_SGE_ADDR_IOCPLBNTA		(0x03)
106 #define MFII_SGE_END_OF_LIST		(0x40)
107 #define MFII_SGE_CHAIN_ELEMENT		(0x80)
108 
109 #define MFII_REQUEST_SIZE	256
110 
111 #define MR_DCMD_LD_MAP_GET_INFO			0x0300e101
112 
113 #define MFII_MAX_ROW		32
114 #define MFII_MAX_ARRAY		128
115 
116 struct mfii_array_map {
117 	uint16_t		mam_pd[MFII_MAX_ROW];
118 } __packed;
119 
120 struct mfii_dev_handle {
121 	uint16_t		mdh_cur_handle;
122 	uint8_t			mdh_valid;
123 	uint8_t			mdh_reserved;
124 	uint16_t		mdh_handle[2];
125 } __packed;
126 
127 struct mfii_ld_map {
128 	uint32_t		mlm_total_size;
129 	uint32_t		mlm_reserved1[5];
130 	uint32_t		mlm_num_lds;
131 	uint32_t		mlm_reserved2;
132 	uint8_t			mlm_tgtid_to_ld[2 * MFI_MAX_LD];
133 	uint8_t			mlm_pd_timeout;
134 	uint8_t			mlm_reserved3[7];
135 	struct mfii_array_map	mlm_am[MFII_MAX_ARRAY];
136 	struct mfii_dev_handle	mlm_dev_handle[MFI_MAX_PD];
137 } __packed;
138 
139 struct mfii_dmamem {
140 	bus_dmamap_t		mdm_map;
141 	bus_dma_segment_t	mdm_seg;
142 	size_t			mdm_size;
143 	caddr_t			mdm_kva;
144 };
145 #define MFII_DMA_MAP(_mdm)	((_mdm)->mdm_map)
146 #define MFII_DMA_LEN(_mdm)	((_mdm)->mdm_size)
147 #define MFII_DMA_DVA(_mdm)	((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
148 #define MFII_DMA_KVA(_mdm)	((void *)(_mdm)->mdm_kva)
149 
150 struct mfii_softc;
151 
152 struct mfii_ccb {
153 	void			*ccb_request;
154 	u_int64_t		ccb_request_dva;
155 	bus_addr_t		ccb_request_offset;
156 
157 	struct mfi_sense	*ccb_sense;
158 	u_int32_t		ccb_sense_dva;
159 	bus_addr_t		ccb_sense_offset;
160 
161 	struct mfii_sge		*ccb_sgl;
162 	u_int64_t		ccb_sgl_dva;
163 	bus_addr_t		ccb_sgl_offset;
164 	u_int			ccb_sgl_len;
165 
166 	struct mfii_request_descr ccb_req;
167 
168 	bus_dmamap_t		ccb_dmamap;
169 
170 	/* data for sgl */
171 	void			*ccb_data;
172 	size_t			ccb_len;
173 
174 	int			ccb_direction;
175 #define MFII_DATA_NONE			0
176 #define MFII_DATA_IN			1
177 #define MFII_DATA_OUT			2
178 
179 	void			*ccb_cookie;
180 	void			(*ccb_done)(struct mfii_softc *,
181 				    struct mfii_ccb *);
182 
183 	u_int32_t		ccb_flags;
184 #define MFI_CCB_F_ERR			(1<<0)
185 	u_int			ccb_smid;
186 	SIMPLEQ_ENTRY(mfii_ccb)	ccb_link;
187 };
188 SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb);
189 
190 struct mfii_pd_link {
191 	u_int16_t		pd_id;
192 	struct mfi_pd_details	pd_info;
193 	u_int16_t		pd_handle;
194 };
195 
196 struct mfii_pd_softc {
197 	struct scsi_link	pd_link;
198 	struct scsibus_softc	*pd_scsibus;
199 	struct mfii_pd_link	*pd_links[MFI_MAX_PD];
200 	uint8_t			pd_timeout;
201 };
202 
203 struct mfii_softc {
204 	struct device		sc_dev;
205 
206 	pci_chipset_tag_t	sc_pc;
207 	pcitag_t		sc_tag;
208 
209 	bus_space_tag_t		sc_iot;
210 	bus_space_handle_t	sc_ioh;
211 	bus_size_t		sc_ios;
212 	bus_dma_tag_t		sc_dmat;
213 
214 	void			*sc_ih;
215 
216 	struct mutex		sc_ccb_mtx;
217 	struct mutex		sc_post_mtx;
218 
219 	u_int			sc_max_cmds;
220 	u_int			sc_max_sgl;
221 
222 	u_int			sc_reply_postq_depth;
223 	u_int			sc_reply_postq_index;
224 	struct mutex		sc_reply_postq_mtx;
225 	struct mfii_dmamem	*sc_reply_postq;
226 
227 	struct mfii_dmamem	*sc_requests;
228 	struct mfii_dmamem	*sc_sense;
229 	struct mfii_dmamem	*sc_sgl;
230 
231 	struct mfii_ccb		*sc_ccb;
232 	struct mfii_ccb_list	sc_ccb_freeq;
233 
234 	struct scsi_link	sc_link;
235 	struct scsibus_softc	*sc_scsibus;
236 	struct mfii_pd_softc	*sc_pd;
237 	struct scsi_iopool	sc_iopool;
238 
239 	struct mfi_ctrl_info	sc_info;
240 };
241 
242 int		mfii_match(struct device *, void *, void *);
243 void		mfii_attach(struct device *, struct device *, void *);
244 int		mfii_detach(struct device *, int);
245 
246 struct cfattach mfii_ca = {
247 	sizeof(struct mfii_softc),
248 	mfii_match,
249 	mfii_attach,
250 	mfii_detach
251 };
252 
253 struct cfdriver mfii_cd = {
254 	NULL,
255 	"mfii",
256 	DV_DULL
257 };
258 
259 void		mfii_scsi_cmd(struct scsi_xfer *);
260 void		mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
261 
262 struct scsi_adapter mfii_switch = {
263 	mfii_scsi_cmd,
264 	scsi_minphys,
265 	NULL, /* probe */
266 	NULL, /* unprobe */
267 	NULL  /* ioctl */
268 };
269 
270 void		mfii_pd_scsi_cmd(struct scsi_xfer *);
271 int		mfii_pd_scsi_probe(struct scsi_link *);
272 
273 struct scsi_adapter mfii_pd_switch = {
274 	mfii_pd_scsi_cmd,
275 	scsi_minphys,
276 	mfii_pd_scsi_probe
277 };
278 
279 #define DEVNAME(_sc)		((_sc)->sc_dev.dv_xname)
280 
281 u_int32_t		mfii_read(struct mfii_softc *, bus_size_t);
282 void			mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
283 
284 struct mfii_dmamem *	mfii_dmamem_alloc(struct mfii_softc *, size_t);
285 void			mfii_dmamem_free(struct mfii_softc *,
286 			    struct mfii_dmamem *);
287 
288 void *			mfii_get_ccb(void *);
289 void			mfii_put_ccb(void *, void *);
290 int			mfii_init_ccb(struct mfii_softc *);
291 void			mfii_scrub_ccb(struct mfii_ccb *);
292 
293 int			mfii_transition_firmware(struct mfii_softc *);
294 int			mfii_initialise_firmware(struct mfii_softc *);
295 int			mfii_get_info(struct mfii_softc *);
296 int			mfii_syspd(struct mfii_softc *);
297 
298 void			mfii_start(struct mfii_softc *, struct mfii_ccb *);
299 void			mfii_done(struct mfii_softc *, struct mfii_ccb *);
300 int			mfii_poll(struct mfii_softc *, struct mfii_ccb *);
301 void			mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
302 int			mfii_exec(struct mfii_softc *, struct mfii_ccb *);
303 void			mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
304 int			mfii_my_intr(struct mfii_softc *);
305 int			mfii_intr(void *);
306 void			mfii_postq(struct mfii_softc *);
307 
308 int			mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
309 			    void *, int);
310 int			mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
311 			    void *, int);
312 
313 int			mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
314 
315 int			mfii_mgmt(struct mfii_softc *, struct mfii_ccb *,
316 			    u_int32_t, u_int8_t *, void *, size_t, int);
317 
318 int			mfii_scsi_cmd_io(struct mfii_softc *,
319 			    struct scsi_xfer *);
320 int			mfii_scsi_cmd_cdb(struct mfii_softc *,
321 			    struct scsi_xfer *);
322 int			mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
323 			    struct scsi_xfer *);
324 
325 
326 #define mfii_fw_state(_sc) mfii_read((_sc), MFI_OSP)
327 
328 static const struct pci_matchid mfii_devices[] = {
329 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_2208 },
330 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3008 },
331 	{ PCI_VENDOR_SYMBIOS,	PCI_PRODUCT_SYMBIOS_MEGARAID_3108 }
332 };
333 
334 int
335 mfii_match(struct device *parent, void *match, void *aux)
336 {
337 	return (pci_matchbyid(aux, mfii_devices, nitems(mfii_devices)));
338 }
339 
340 void
341 mfii_attach(struct device *parent, struct device *self, void *aux)
342 {
343 	struct mfii_softc *sc = (struct mfii_softc *)self;
344 	struct pci_attach_args *pa = aux;
345 	pcireg_t memtype;
346 	pci_intr_handle_t ih;
347 	struct scsibus_attach_args saa;
348 	u_int32_t status;
349 
350 	/* init sc */
351 	sc->sc_dmat = pa->pa_dmat;
352 	SIMPLEQ_INIT(&sc->sc_ccb_freeq);
353 	mtx_init(&sc->sc_ccb_mtx, IPL_BIO);
354 	mtx_init(&sc->sc_post_mtx, IPL_BIO);
355 	mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO);
356 	scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
357 
358 	/* wire up the bus shizz */
359 	memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, MFII_BAR);
360 	if (pci_mapreg_map(pa, MFII_BAR, memtype, 0,
361 	    &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, MFII_PCI_MEMSIZE)) {
362 		printf(": unable to map registers\n");
363 		return;
364 	}
365 
366 	/* disable interrupts */
367 	mfii_write(sc, MFI_OMSK, 0xffffffff);
368 
369 	if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
370 		printf(": unable to map interrupt\n");
371 		goto pci_unmap;
372 	}
373 	printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
374 
375 	/* lets get started */
376 	if (mfii_transition_firmware(sc))
377 		goto pci_unmap;
378 
379 	status = mfii_fw_state(sc);
380 	sc->sc_max_cmds = status & MFI_STATE_MAXCMD_MASK;
381 	sc->sc_max_sgl = (status & MFI_STATE_MAXSGL_MASK) >> 16;
382 
383 	/* sense memory */
384 	sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE);
385 	if (sc->sc_sense == NULL) {
386 		printf("%s: unable to allocate sense memory\n", DEVNAME(sc));
387 		goto pci_unmap;
388 	}
389 
390 	sc->sc_reply_postq_depth = roundup(sc->sc_max_cmds, 16);
391 
392 	sc->sc_reply_postq = mfii_dmamem_alloc(sc,
393 	    sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
394 	if (sc->sc_reply_postq == NULL)
395 		goto free_sense;
396 
397 	memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,
398 	    MFII_DMA_LEN(sc->sc_reply_postq));
399 
400 	sc->sc_requests = mfii_dmamem_alloc(sc,
401 	    MFII_REQUEST_SIZE * (sc->sc_max_cmds + 1));
402 	if (sc->sc_requests == NULL)
403 		goto free_reply_postq;
404 
405 	sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
406 	    sizeof(struct mfii_sge) * sc->sc_max_sgl);
407 	if (sc->sc_sgl == NULL)
408 		goto free_requests;
409 
410 	if (mfii_init_ccb(sc) != 0) {
411 		printf("%s: could not init ccb list\n", DEVNAME(sc));
412 		goto free_sgl;
413 	}
414 
415 	/* kickstart firmware with all addresses and pointers */
416 	if (mfii_initialise_firmware(sc) != 0) {
417 		printf("%s: could not initialize firmware\n", DEVNAME(sc));
418 		goto free_sgl;
419 	}
420 
421 	if (mfii_get_info(sc) != 0) {
422 		printf("%s: could not retrieve controller information\n",
423 		    DEVNAME(sc));
424 		goto free_sgl;
425 	}
426 
427 	printf("%s: \"%s\", firmware %s", DEVNAME(sc),
428 	    sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
429 	if (letoh16(sc->sc_info.mci_memory_size) > 0)
430 		printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size));
431 	printf("\n");
432 
433 	sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO,
434 	    mfii_intr, sc, DEVNAME(sc));
435 	if (sc->sc_ih == NULL)
436 		goto free_sgl;
437 
438 	sc->sc_link.openings = sc->sc_max_cmds;
439 	sc->sc_link.adapter_softc = sc;
440 	sc->sc_link.adapter = &mfii_switch;
441 	sc->sc_link.adapter_target = sc->sc_info.mci_max_lds;
442 	sc->sc_link.adapter_buswidth = sc->sc_info.mci_max_lds;
443 	sc->sc_link.pool = &sc->sc_iopool;
444 
445 	bzero(&saa, sizeof(saa));
446 	saa.saa_sc_link = &sc->sc_link;
447 
448 	config_found(&sc->sc_dev, &saa, scsiprint);
449 
450 	mfii_syspd(sc);
451 
452 	/* enable interrupts */
453 	mfii_write(sc, MFI_OSTS, 0xffffffff);
454 	mfii_write(sc, MFI_OMSK, ~MFII_OSTS_INTR_VALID);
455 
456 	return;
457 free_sgl:
458 	mfii_dmamem_free(sc, sc->sc_sgl);
459 free_requests:
460 	mfii_dmamem_free(sc, sc->sc_requests);
461 free_reply_postq:
462 	mfii_dmamem_free(sc, sc->sc_reply_postq);
463 free_sense:
464 	mfii_dmamem_free(sc, sc->sc_sense);
465 pci_unmap:
466 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
467 }
468 
469 int
470 mfii_syspd(struct mfii_softc *sc)
471 {
472 	struct scsibus_attach_args saa;
473 	struct scsi_link *link;
474 	struct mfii_ld_map *lm;
475 	struct mfii_pd_link *pl;
476 	struct mfi_pd_list *pd;
477 	struct mfii_ccb *ccb;
478 	u_int npds, i;
479 	int rv;
480 
481 	sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF, M_WAITOK|M_ZERO);
482 	if (sc->sc_pd == NULL)
483 		return (1);
484 
485 	lm = malloc(sizeof(*lm), M_TEMP, M_WAITOK|M_ZERO);
486 	if (lm == NULL)
487 		goto free_pdsc;
488 
489 	ccb = scsi_io_get(&sc->sc_iopool, 0);
490 	rv = mfii_mgmt(sc, ccb, MR_DCMD_LD_MAP_GET_INFO, NULL,
491 	    lm, sizeof(*lm), SCSI_DATA_IN|SCSI_NOSLEEP);
492 	scsi_io_put(&sc->sc_iopool, ccb);
493 	if (rv != 0)
494 		goto free_lm;
495 
496 	sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
497 
498 	pd = malloc(sizeof(*pd), M_TEMP, M_WAITOK|M_ZERO);
499 	if (pd == NULL)
500 		goto free_lm;
501 
502 	ccb = scsi_io_get(&sc->sc_iopool, 0);
503 	rv = mfii_mgmt(sc, ccb, MR_DCMD_PD_GET_LIST, NULL,
504 	    pd, sizeof(*pd), SCSI_DATA_IN|SCSI_NOSLEEP);
505 	scsi_io_put(&sc->sc_iopool, ccb);
506 	if (rv != 0)
507 		goto free_pd;
508 
509 	npds = letoh32(pd->mpl_no_pd);
510 	for (i = 0; i < npds; i++) {
511 		pl = malloc(sizeof(*pl), M_DEVBUF, M_WAITOK|M_ZERO);
512 		if (pl == NULL)
513 			goto free_pl;
514 
515 		pl->pd_id = pd->mpl_address[i].mpa_pd_id;
516 		pl->pd_handle = lm->mlm_dev_handle[i].mdh_cur_handle;
517 		sc->sc_pd->pd_links[i] = pl;
518 	}
519 
520 	free(pd, M_TEMP, 0);
521 	free(lm, M_TEMP, 0);
522 
523 	link = &sc->sc_pd->pd_link;
524 	link->adapter = &mfii_pd_switch;
525 	link->adapter_softc = sc;
526 	link->adapter_buswidth = MFI_MAX_PD;
527 	link->adapter_target = -1;
528 	link->openings = sc->sc_max_cmds - 1;
529 	link->pool = &sc->sc_iopool;
530 
531 	bzero(&saa, sizeof(saa));
532 	saa.saa_sc_link = link;
533 
534 	sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
535 	    config_found(&sc->sc_dev, &saa, scsiprint);
536 
537 	return (0);
538 free_pl:
539 	for (i = 0; i < npds; i++) {
540 		pl = sc->sc_pd->pd_links[i];
541 		if (pl == NULL)
542 			break;
543 
544 		free(pl, M_DEVBUF, 0);
545 	}
546 free_pd:
547 	free(pd, M_TEMP, 0);
548 free_lm:
549 	free(lm, M_TEMP, 0);
550 free_pdsc:
551 	free(sc->sc_pd, M_DEVBUF, 0);
552 	return (1);
553 }
554 
555 int
556 mfii_detach(struct device *self, int flags)
557 {
558 	struct mfii_softc *sc = (struct mfii_softc *)self;
559 
560 	if (sc->sc_ih == NULL)
561 		return (0);
562 
563 	pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
564 	mfii_dmamem_free(sc, sc->sc_sgl);
565 	mfii_dmamem_free(sc, sc->sc_requests);
566 	mfii_dmamem_free(sc, sc->sc_reply_postq);
567 	mfii_dmamem_free(sc, sc->sc_sense);
568 	bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
569 
570 	return (0);
571 }
572 
573 u_int32_t
574 mfii_read(struct mfii_softc *sc, bus_size_t r)
575 {
576 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
577 	    BUS_SPACE_BARRIER_READ);
578 	return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r));
579 }
580 
581 void
582 mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
583 {
584 	bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v);
585 	bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
586 	    BUS_SPACE_BARRIER_WRITE);
587 }
588 
589 struct mfii_dmamem *
590 mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
591 {
592 	struct mfii_dmamem *m;
593 	int nsegs;
594 
595 	m = malloc(sizeof(*m), M_DEVBUF, M_NOWAIT | M_ZERO);
596 	if (m == NULL)
597 		return (NULL);
598 
599 	m->mdm_size = size;
600 
601 	if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,
602 	    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map) != 0)
603 		goto mdmfree;
604 
605 	if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,
606 	    &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO) != 0)
607 		goto destroy;
608 
609 	if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,
610 	    BUS_DMA_NOWAIT) != 0)
611 		goto free;
612 
613 	if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,
614 	    BUS_DMA_NOWAIT) != 0)
615 		goto unmap;
616 
617 	return (m);
618 
619 unmap:
620 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
621 free:
622 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
623 destroy:
624 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
625 mdmfree:
626 	free(m, M_DEVBUF, 0);
627 
628 	return (NULL);
629 }
630 
631 void
632 mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
633 {
634 	bus_dmamap_unload(sc->sc_dmat, m->mdm_map);
635 	bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size);
636 	bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1);
637 	bus_dmamap_destroy(sc->sc_dmat, m->mdm_map);
638 	free(m, M_DEVBUF, 0);
639 }
640 
641 
642 
643 
644 int
645 mfii_transition_firmware(struct mfii_softc *sc)
646 {
647 	int32_t			fw_state, cur_state;
648 	int			max_wait, i;
649 
650 	fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
651 
652 	while (fw_state != MFI_STATE_READY) {
653 		cur_state = fw_state;
654 		switch (fw_state) {
655 		case MFI_STATE_FAULT:
656 			printf("%s: firmware fault\n", DEVNAME(sc));
657 			return (1);
658 		case MFI_STATE_WAIT_HANDSHAKE:
659 			mfii_write(sc, MFI_SKINNY_IDB,
660 			    MFI_INIT_CLEAR_HANDSHAKE);
661 			max_wait = 2;
662 			break;
663 		case MFI_STATE_OPERATIONAL:
664 			mfii_write(sc, MFI_SKINNY_IDB, MFI_INIT_READY);
665 			max_wait = 10;
666 			break;
667 		case MFI_STATE_UNDEFINED:
668 		case MFI_STATE_BB_INIT:
669 			max_wait = 2;
670 			break;
671 		case MFI_STATE_FW_INIT:
672 		case MFI_STATE_DEVICE_SCAN:
673 		case MFI_STATE_FLUSH_CACHE:
674 			max_wait = 20;
675 			break;
676 		default:
677 			printf("%s: unknown firmware state %d\n",
678 			    DEVNAME(sc), fw_state);
679 			return (1);
680 		}
681 		for (i = 0; i < (max_wait * 10); i++) {
682 			fw_state = mfii_fw_state(sc) & MFI_STATE_MASK;
683 			if (fw_state == cur_state)
684 				DELAY(100000);
685 			else
686 				break;
687 		}
688 		if (fw_state == cur_state) {
689 			printf("%s: firmware stuck in state %#x\n",
690 			    DEVNAME(sc), fw_state);
691 			return (1);
692 		}
693 	}
694 
695 	return (0);
696 }
697 
698 int
699 mfii_get_info(struct mfii_softc *sc)
700 {
701 	struct mfii_ccb *ccb;
702 	int rv;
703 
704 	ccb = scsi_io_get(&sc->sc_iopool, 0);
705 	rv = mfii_mgmt(sc, ccb, MR_DCMD_CTRL_GET_INFO, NULL,
706 	    &sc->sc_info, sizeof(sc->sc_info), SCSI_DATA_IN|SCSI_NOSLEEP);
707 	scsi_io_put(&sc->sc_iopool, ccb);
708 
709 	if (rv != 0)
710 		return (rv);
711 
712 #ifdef MFI_DEBUG
713 	for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
714 		printf("%s: active FW %s Version %s date %s time %s\n",
715 		    DEVNAME(sc),
716 		    sc->sc_info.mci_image_component[i].mic_name,
717 		    sc->sc_info.mci_image_component[i].mic_version,
718 		    sc->sc_info.mci_image_component[i].mic_build_date,
719 		    sc->sc_info.mci_image_component[i].mic_build_time);
720 	}
721 
722 	for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
723 		printf("%s: pending FW %s Version %s date %s time %s\n",
724 		    DEVNAME(sc),
725 		    sc->sc_info.mci_pending_image_component[i].mic_name,
726 		    sc->sc_info.mci_pending_image_component[i].mic_version,
727 		    sc->sc_info.mci_pending_image_component[i].mic_build_date,
728 		    sc->sc_info.mci_pending_image_component[i].mic_build_time);
729 	}
730 
731 	printf("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
732 	    DEVNAME(sc),
733 	    sc->sc_info.mci_max_arms,
734 	    sc->sc_info.mci_max_spans,
735 	    sc->sc_info.mci_max_arrays,
736 	    sc->sc_info.mci_max_lds,
737 	    sc->sc_info.mci_product_name);
738 
739 	printf("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
740 	    DEVNAME(sc),
741 	    sc->sc_info.mci_serial_number,
742 	    sc->sc_info.mci_hw_present,
743 	    sc->sc_info.mci_current_fw_time,
744 	    sc->sc_info.mci_max_cmds,
745 	    sc->sc_info.mci_max_sg_elements);
746 
747 	printf("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
748 	    DEVNAME(sc),
749 	    sc->sc_info.mci_max_request_size,
750 	    sc->sc_info.mci_lds_present,
751 	    sc->sc_info.mci_lds_degraded,
752 	    sc->sc_info.mci_lds_offline,
753 	    sc->sc_info.mci_pd_present);
754 
755 	printf("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
756 	    DEVNAME(sc),
757 	    sc->sc_info.mci_pd_disks_present,
758 	    sc->sc_info.mci_pd_disks_pred_failure,
759 	    sc->sc_info.mci_pd_disks_failed);
760 
761 	printf("%s: nvram %d mem %d flash %d\n",
762 	    DEVNAME(sc),
763 	    sc->sc_info.mci_nvram_size,
764 	    sc->sc_info.mci_memory_size,
765 	    sc->sc_info.mci_flash_size);
766 
767 	printf("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
768 	    DEVNAME(sc),
769 	    sc->sc_info.mci_ram_correctable_errors,
770 	    sc->sc_info.mci_ram_uncorrectable_errors,
771 	    sc->sc_info.mci_cluster_allowed,
772 	    sc->sc_info.mci_cluster_active);
773 
774 	printf("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
775 	    DEVNAME(sc),
776 	    sc->sc_info.mci_max_strips_per_io,
777 	    sc->sc_info.mci_raid_levels,
778 	    sc->sc_info.mci_adapter_ops,
779 	    sc->sc_info.mci_ld_ops);
780 
781 	printf("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
782 	    DEVNAME(sc),
783 	    sc->sc_info.mci_stripe_sz_ops.min,
784 	    sc->sc_info.mci_stripe_sz_ops.max,
785 	    sc->sc_info.mci_pd_ops,
786 	    sc->sc_info.mci_pd_mix_support);
787 
788 	printf("%s: ecc_bucket %d pckg_prop %s\n",
789 	    DEVNAME(sc),
790 	    sc->sc_info.mci_ecc_bucket_count,
791 	    sc->sc_info.mci_package_version);
792 
793 	printf("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
794 	    DEVNAME(sc),
795 	    sc->sc_info.mci_properties.mcp_seq_num,
796 	    sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
797 	    sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
798 	    sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
799 
800 	printf("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
801 	    DEVNAME(sc),
802 	    sc->sc_info.mci_properties.mcp_rebuild_rate,
803 	    sc->sc_info.mci_properties.mcp_patrol_read_rate,
804 	    sc->sc_info.mci_properties.mcp_bgi_rate,
805 	    sc->sc_info.mci_properties.mcp_cc_rate);
806 
807 	printf("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
808 	    DEVNAME(sc),
809 	    sc->sc_info.mci_properties.mcp_recon_rate,
810 	    sc->sc_info.mci_properties.mcp_cache_flush_interval,
811 	    sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
812 	    sc->sc_info.mci_properties.mcp_spinup_delay,
813 	    sc->sc_info.mci_properties.mcp_cluster_enable);
814 
815 	printf("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
816 	    DEVNAME(sc),
817 	    sc->sc_info.mci_properties.mcp_coercion_mode,
818 	    sc->sc_info.mci_properties.mcp_alarm_enable,
819 	    sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
820 	    sc->sc_info.mci_properties.mcp_disable_battery_warn,
821 	    sc->sc_info.mci_properties.mcp_ecc_bucket_size);
822 
823 	printf("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
824 	    DEVNAME(sc),
825 	    sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
826 	    sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
827 	    sc->sc_info.mci_properties.mcp_expose_encl_devices);
828 
829 	printf("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
830 	    DEVNAME(sc),
831 	    sc->sc_info.mci_pci.mip_vendor,
832 	    sc->sc_info.mci_pci.mip_device,
833 	    sc->sc_info.mci_pci.mip_subvendor,
834 	    sc->sc_info.mci_pci.mip_subdevice);
835 
836 	printf("%s: type %#x port_count %d port_addr ",
837 	    DEVNAME(sc),
838 	    sc->sc_info.mci_host.mih_type,
839 	    sc->sc_info.mci_host.mih_port_count);
840 
841 	for (i = 0; i < 8; i++)
842 		printf("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
843 	printf("\n");
844 
845 	printf("%s: type %.x port_count %d port_addr ",
846 	    DEVNAME(sc),
847 	    sc->sc_info.mci_device.mid_type,
848 	    sc->sc_info.mci_device.mid_port_count);
849 
850 	for (i = 0; i < 8; i++)
851 		printf("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
852 	printf("\n");
853 #endif /* MFI_DEBUG */
854 
855 	return (0);
856 }
857 
858 int
859 mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
860 {
861 	struct mfi_frame_header	*hdr = ccb->ccb_request;
862 	u_int64_t r;
863 	int to = 0, rv = 0;
864 
865 #ifdef DIAGNOSTIC
866 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
867 		panic("mfii_mfa_poll called with cookie or done set");
868 #endif
869 
870 	hdr->mfh_context = ccb->ccb_smid;
871 	hdr->mfh_cmd_status = 0xff;
872 	hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE);
873 
874 	r = MFII_REQ_MFA(ccb->ccb_request_dva);
875 	memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
876 
877 	mfii_start(sc, ccb);
878 
879 	for (;;) {
880 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
881 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
882 		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
883 
884 		if (hdr->mfh_cmd_status != 0xff)
885 			break;
886 
887 		if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
888 			printf("%s: timeout on ccb %d\n", DEVNAME(sc),
889 			    ccb->ccb_smid);
890 			ccb->ccb_flags |= MFI_CCB_F_ERR;
891 			rv = 1;
892 			break;
893 		}
894 
895 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
896 		    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
897 		    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
898 
899 		delay(1000);
900 	}
901 
902 	if (ccb->ccb_len > 0) {
903 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
904 		    0, ccb->ccb_dmamap->dm_mapsize,
905 		    (ccb->ccb_direction == MFII_DATA_IN) ?
906 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
907 
908 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
909 	}
910 
911 	return (rv);
912 }
913 
914 int
915 mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
916 {
917 	void (*done)(struct mfii_softc *, struct mfii_ccb *);
918 	void *cookie;
919 	int rv = 1;
920 
921 	done = ccb->ccb_done;
922 	cookie = ccb->ccb_cookie;
923 
924 	ccb->ccb_done = mfii_poll_done;
925 	ccb->ccb_cookie = &rv;
926 
927 	mfii_start(sc, ccb);
928 
929 	do {
930 		delay(10);
931 		mfii_postq(sc);
932 	} while (rv == 1);
933 
934 	ccb->ccb_cookie = cookie;
935 	done(sc, ccb);
936 
937 	return (0);
938 }
939 
940 void
941 mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
942 {
943 	int *rv = ccb->ccb_cookie;
944 
945 	*rv = 0;
946 }
947 
948 int
949 mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
950 {
951 	struct mutex m = MUTEX_INITIALIZER(IPL_BIO);
952 
953 #ifdef DIAGNOSTIC
954 	if (ccb->ccb_cookie != NULL || ccb->ccb_done != NULL)
955 		panic("mfii_exec called with cookie or done set");
956 #endif
957 
958 	ccb->ccb_cookie = &m;
959 	ccb->ccb_done = mfii_exec_done;
960 
961 	mtx_enter(&m);
962 	while (ccb->ccb_cookie != NULL)
963 		msleep(ccb, &m, PRIBIO, "mfiiexec", 0);
964 	mtx_leave(&m);
965 
966 	return (0);
967 }
968 
969 void
970 mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
971 {
972 	struct mutex *m = ccb->ccb_cookie;
973 
974 	mtx_enter(m);
975 	ccb->ccb_cookie = NULL;
976 	wakeup_one(ccb);
977 	mtx_leave(m);
978 }
979 
980 int
981 mfii_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb,
982     u_int32_t opc, u_int8_t *mbox, void *buf, size_t len, int flags)
983 {
984 	struct mfi_dcmd_frame *dcmd = ccb->ccb_request;
985 	struct mfi_frame_header	*hdr = &dcmd->mdf_header;
986 	u_int64_t r;
987 	u_int8_t *dma_buf;
988 	int rv = EIO;
989 
990 	dma_buf = dma_alloc(len, PR_WAITOK);
991 	if (dma_buf == NULL)
992 		return (ENOMEM);
993 
994 	mfii_scrub_ccb(ccb);
995 	ccb->ccb_data = dma_buf;
996 	ccb->ccb_len = len;
997 	switch (flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
998 	case SCSI_DATA_IN:
999 		ccb->ccb_direction = MFII_DATA_IN;
1000 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ);
1001 		break;
1002 	case SCSI_DATA_OUT:
1003 		ccb->ccb_direction = MFII_DATA_OUT;
1004 		hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE);
1005 		bcopy(buf, dma_buf, len);
1006 		break;
1007 	}
1008 
1009 	if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
1010 	    ISSET(flags, SCSI_NOSLEEP)) != 0) {
1011 		rv = ENOMEM;
1012 		goto done;
1013 	}
1014 
1015 	hdr->mfh_cmd = MFI_CMD_DCMD;
1016 	hdr->mfh_context = ccb->ccb_smid;
1017 	hdr->mfh_data_len = htole32(len);
1018 	hdr->mfh_sg_count = ccb->ccb_dmamap->dm_nsegs;
1019 
1020 	dcmd->mdf_opcode = opc;
1021 	/* handle special opcodes */
1022 	if (mbox != NULL)
1023 		memcpy(dcmd->mdf_mbox, mbox, MFI_MBOX_SIZE);
1024 
1025 	if (ISSET(flags, SCSI_NOSLEEP))
1026 		mfii_mfa_poll(sc, ccb);
1027 	else {
1028 		r = MFII_REQ_MFA(ccb->ccb_request_dva);
1029 		memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req));
1030 		mfii_exec(sc, ccb);
1031 	}
1032 
1033 	if (hdr->mfh_cmd_status == MFI_STAT_OK) {
1034 		rv = 0;
1035 
1036 		if (ccb->ccb_direction == MFII_DATA_IN)
1037 			bcopy(dma_buf, buf, len);
1038 	}
1039 
1040 done:
1041 	dma_free(dma_buf, len);
1042 
1043 	return (rv);
1044 }
1045 
1046 int
1047 mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1048     void *sglp, int nosleep)
1049 {
1050 	union mfi_sgl *sgl = sglp;
1051 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1052 	int error;
1053 	int i;
1054 
1055 	if (ccb->ccb_len == 0)
1056 		return (0);
1057 
1058 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1059 	    ccb->ccb_data, ccb->ccb_len, NULL,
1060 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1061 	if (error) {
1062 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1063 		return (1);
1064 	}
1065 
1066 	for (i = 0; i < dmap->dm_nsegs; i++) {
1067 		sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr);
1068 		sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len);
1069 	}
1070 
1071 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1072 	    ccb->ccb_direction == MFII_DATA_OUT ?
1073 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1074 
1075 	return (0);
1076 }
1077 
1078 void
1079 mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1080 {
1081 	u_int32_t *r = (u_int32_t *)&ccb->ccb_req;
1082 
1083 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1084 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1085 	    BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
1086 
1087 	mtx_enter(&sc->sc_post_mtx);
1088 	mfii_write(sc, MFI_IQPL, r[0]);
1089 	mfii_write(sc, MFI_IQPH, r[1]);
1090 	mtx_leave(&sc->sc_post_mtx);
1091 }
1092 
1093 void
1094 mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1095 {
1096 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),
1097 	    ccb->ccb_request_offset, MFII_REQUEST_SIZE,
1098 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
1099 
1100 	if (ccb->ccb_sgl_len > 0) {
1101 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1102 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1103 		    BUS_DMASYNC_POSTWRITE);
1104 	}
1105 
1106 	if (ccb->ccb_len > 0) {
1107 		bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,
1108 		    0, ccb->ccb_dmamap->dm_mapsize,
1109 		    (ccb->ccb_direction == MFII_DATA_IN) ?
1110 		    BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1111 
1112 		bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap);
1113 	}
1114 
1115 	ccb->ccb_done(sc, ccb);
1116 }
1117 
1118 int
1119 mfii_initialise_firmware(struct mfii_softc *sc)
1120 {
1121 	struct mpii_msg_iocinit_request *iiq;
1122 	struct mfii_dmamem *m;
1123 	struct mfii_ccb *ccb;
1124 	struct mfi_init_frame *init;
1125 	int rv;
1126 
1127 	m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1128 	if (m == NULL)
1129 		return (1);
1130 
1131 	iiq = MFII_DMA_KVA(m);
1132 	bzero(iiq, sizeof(*iiq));
1133 
1134 	iiq->function = MPII_FUNCTION_IOC_INIT;
1135 	iiq->whoinit = MPII_WHOINIT_HOST_DRIVER;
1136 
1137 	iiq->msg_version_maj = 0x02;
1138 	iiq->msg_version_min = 0x00;
1139 	iiq->hdr_version_unit = 0x10;
1140 	iiq->hdr_version_dev = 0x0;
1141 
1142 	iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4);
1143 
1144 	iiq->reply_descriptor_post_queue_depth =
1145 	    htole16(sc->sc_reply_postq_depth);
1146 	iiq->reply_free_queue_depth = htole16(0);
1147 
1148 	htolem32(&iiq->sense_buffer_address_high,
1149 	    MFII_DMA_DVA(sc->sc_sense) >> 32);
1150 
1151 	htolem32(&iiq->reply_descriptor_post_queue_address_lo,
1152 	    MFII_DMA_DVA(sc->sc_reply_postq));
1153 	htolem32(&iiq->reply_descriptor_post_queue_address_hi,
1154 	    MFII_DMA_DVA(sc->sc_reply_postq) >> 32);
1155 
1156 	htolem32(&iiq->system_request_frame_base_address_lo,
1157 	    MFII_DMA_DVA(sc->sc_requests));
1158 	htolem32(&iiq->system_request_frame_base_address_hi,
1159 	    MFII_DMA_DVA(sc->sc_requests) >> 32);
1160 
1161 	iiq->timestamp = htole64(time_uptime);
1162 
1163 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1164 	mfii_scrub_ccb(ccb);
1165 	init = ccb->ccb_request;
1166 
1167 	init->mif_header.mfh_cmd = MFI_CMD_INIT;
1168 	init->mif_header.mfh_data_len = htole32(sizeof(*iiq));
1169 	init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m));
1170 
1171 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1172 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1173 	    BUS_DMASYNC_PREREAD);
1174 
1175 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1176 	    0, sizeof(*iiq), BUS_DMASYNC_PREREAD);
1177 
1178 	rv = mfii_mfa_poll(sc, ccb);
1179 
1180 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),
1181 	    0, sizeof(*iiq), BUS_DMASYNC_POSTREAD);
1182 
1183 	scsi_io_put(&sc->sc_iopool, ccb);
1184 	mfii_dmamem_free(sc, m);
1185 
1186 	return (rv);
1187 }
1188 
1189 int
1190 mfii_my_intr(struct mfii_softc *sc)
1191 {
1192 	u_int32_t status;
1193 
1194 	status = mfii_read(sc, MFI_OSTS);
1195 	if (ISSET(status, 0x1)) {
1196 		mfii_write(sc, MFI_OSTS, status);
1197 		return (1);
1198 	}
1199 
1200 	return (ISSET(status, MFII_OSTS_INTR_VALID) ? 1 : 0);
1201 }
1202 
1203 int
1204 mfii_intr(void *arg)
1205 {
1206 	struct mfii_softc *sc = arg;
1207 
1208 	if (!mfii_my_intr(sc))
1209 		return (0);
1210 
1211 	mfii_postq(sc);
1212 
1213 	return (1);
1214 }
1215 
1216 void
1217 mfii_postq(struct mfii_softc *sc)
1218 {
1219 	struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs);
1220 	struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq);
1221 	struct mpii_reply_descr *rdp;
1222 	struct mfii_ccb *ccb;
1223 	int rpi = 0;
1224 
1225 	mtx_enter(&sc->sc_reply_postq_mtx);
1226 
1227 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1228 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1229 	    BUS_DMASYNC_POSTREAD);
1230 
1231 	for (;;) {
1232 		rdp = &postq[sc->sc_reply_postq_index];
1233 		if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK) ==
1234 		    MPII_REPLY_DESCR_UNUSED)
1235 			break;
1236 		if (rdp->data == 0xffffffff) {
1237 			/*
1238 			 * ioc is still writing to the reply post queue
1239 			 * race condition - bail!
1240 			 */
1241 			break;
1242 		}
1243 
1244 		ccb = &sc->sc_ccb[letoh16(rdp->smid) - 1];
1245 		SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link);
1246 		memset(rdp, 0xff, sizeof(*rdp));
1247 
1248 		sc->sc_reply_postq_index++;
1249 		sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
1250 		rpi = 1;
1251 	}
1252 
1253 	bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),
1254 	    0, MFII_DMA_LEN(sc->sc_reply_postq),
1255 	    BUS_DMASYNC_PREREAD);
1256 
1257 	if (rpi)
1258 		mfii_write(sc, MFII_RPI, sc->sc_reply_postq_index);
1259 
1260 	mtx_leave(&sc->sc_reply_postq_mtx);
1261 
1262 	while ((ccb = SIMPLEQ_FIRST(&ccbs)) != NULL) {
1263 		SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link);
1264 		mfii_done(sc, ccb);
1265 	}
1266 }
1267 
1268 void
1269 mfii_scsi_cmd(struct scsi_xfer *xs)
1270 {
1271 	struct scsi_link *link = xs->sc_link;
1272 	struct mfii_softc *sc = link->adapter_softc;
1273 	struct mfii_ccb *ccb = xs->io;
1274 
1275 	mfii_scrub_ccb(ccb);
1276 	ccb->ccb_cookie = xs;
1277 	ccb->ccb_done = mfii_scsi_cmd_done;
1278 	ccb->ccb_data = xs->data;
1279 	ccb->ccb_len = xs->datalen;
1280 
1281 #if 0
1282 	switch (xs->cmd->opcode) {
1283 	case READ_COMMAND:
1284 	case READ_BIG:
1285 	case READ_12:
1286 	case READ_16:
1287 	case WRITE_COMMAND:
1288 	case WRITE_BIG:
1289 	case WRITE_12:
1290 	case WRITE_16:
1291 		if (mfii_scsi_cmd_io(sc, xs) != 0)
1292 			goto stuffup;
1293 
1294 		break;
1295 
1296 	default:
1297 #endif
1298 		if (mfii_scsi_cmd_cdb(sc, xs) != 0)
1299 			goto stuffup;
1300 #if 0
1301 		break;
1302 	}
1303 #endif
1304 
1305 	xs->error = XS_NOERROR;
1306 	xs->resid = 0;
1307 
1308 	if (ISSET(xs->flags, SCSI_POLL)) {
1309 		if (mfii_poll(sc, ccb) != 0)
1310 			goto stuffup;
1311 		return;
1312 	}
1313 
1314 	mfii_start(sc, ccb);
1315 	return;
1316 
1317 stuffup:
1318 	xs->error = XS_DRIVER_STUFFUP;
1319 	scsi_done(xs);
1320 }
1321 
1322 void
1323 mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1324 {
1325 	struct scsi_xfer *xs = ccb->ccb_cookie;
1326 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1327 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1328 
1329 	switch (ctx->status) {
1330 	case MFI_STAT_OK:
1331 		break;
1332 
1333 	case MFI_STAT_SCSI_DONE_WITH_ERROR:
1334 		xs->error = XS_SENSE;
1335 		memset(&xs->sense, 0, sizeof(xs->sense));
1336 		memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense));
1337 		break;
1338 
1339 	case MFI_STAT_LD_OFFLINE:
1340 	case MFI_STAT_DEVICE_NOT_FOUND:
1341 		xs->error = XS_SELTIMEOUT;
1342 		break;
1343 
1344 	default:
1345 		xs->error = XS_DRIVER_STUFFUP;
1346 		break;
1347 	}
1348 
1349 	scsi_done(xs);
1350 }
1351 
1352 int
1353 mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
1354 {
1355 	struct mfii_ccb *ccb = xs->io;
1356 	u_int64_t blkno;
1357 	u_int32_t nblks;
1358 
1359 	ccb->ccb_req.flags = MFII_REQ_TYPE_LDIO;
1360 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1361 
1362 	scsi_cmd_rw_decode(xs->cmd, &blkno, &nblks);
1363 
1364 	return (1);
1365 }
1366 
1367 int
1368 mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
1369 {
1370 	struct scsi_link *link = xs->sc_link;
1371 	struct mfii_ccb *ccb = xs->io;
1372 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1373 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1374 
1375 	io->dev_handle = htole16(link->target);
1376 	io->function = MFII_FUNCTION_LDIO_REQUEST;
1377 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1378 	io->sgl_flags = htole16(0x02); /* XXX */
1379 	io->sense_buffer_length = sizeof(xs->sense);
1380 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1381 	io->data_length = htole32(xs->datalen);
1382 	io->io_flags = htole16(xs->cmdlen);
1383 	io->lun[0] = htobe16(link->lun);
1384 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1385 	case SCSI_DATA_IN:
1386 		ccb->ccb_direction = MFII_DATA_IN;
1387 		io->direction = MPII_SCSIIO_DIR_READ;
1388 		break;
1389 	case SCSI_DATA_OUT:
1390 		ccb->ccb_direction = MFII_DATA_OUT;
1391 		io->direction = MPII_SCSIIO_DIR_WRITE;
1392 		break;
1393 	default:
1394 		ccb->ccb_direction = MFII_DATA_NONE;
1395 		io->direction = MPII_SCSIIO_DIR_NONE;
1396 		break;
1397 	}
1398 	bcopy(xs->cmd, io->cdb, xs->cmdlen);
1399 
1400 	ctx->virtual_disk_target_id = htole16(link->target);
1401 
1402 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1403 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1404 		return (1);
1405 
1406 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1407 
1408 	ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI;
1409 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1410 
1411 	return (0);
1412 }
1413 
1414 void
1415 mfii_pd_scsi_cmd(struct scsi_xfer *xs)
1416 {
1417 	struct scsi_link *link = xs->sc_link;
1418 	struct mfii_softc *sc = link->adapter_softc;
1419 	struct mfii_ccb *ccb = xs->io;
1420 
1421 	mfii_scrub_ccb(ccb);
1422 	ccb->ccb_cookie = xs;
1423 	ccb->ccb_done = mfii_scsi_cmd_done;
1424 	ccb->ccb_data = xs->data;
1425 	ccb->ccb_len = xs->datalen;
1426 
1427 	if (mfii_pd_scsi_cmd_cdb(sc, xs) != 0)
1428 		goto stuffup;
1429 
1430 	xs->error = XS_NOERROR;
1431 	xs->resid = 0;
1432 
1433 	if (ISSET(xs->flags, SCSI_POLL)) {
1434 		if (mfii_poll(sc, ccb) != 0)
1435 			goto stuffup;
1436 		return;
1437 	}
1438 
1439 	mfii_start(sc, ccb);
1440 	return;
1441 
1442 stuffup:
1443 	xs->error = XS_DRIVER_STUFFUP;
1444 	scsi_done(xs);
1445 }
1446 
1447 int
1448 mfii_pd_scsi_probe(struct scsi_link *link)
1449 {
1450 	struct mfii_ccb *ccb;
1451 	uint8_t mbox[MFI_MBOX_SIZE];
1452 	struct mfii_softc *sc = link->adapter_softc;
1453 	struct mfii_pd_link *pl = sc->sc_pd->pd_links[link->target];
1454 	int rv;
1455 
1456 	if (link->lun > 0)
1457 		return (0);
1458 
1459 	if (pl == NULL)
1460 		return (ENXIO);
1461 
1462 	memset(mbox, 0, sizeof(mbox));
1463 	memcpy(&mbox[0], &pl->pd_id, sizeof(pl->pd_id));
1464 
1465 	ccb = scsi_io_get(&sc->sc_iopool, 0);
1466 	rv = mfii_mgmt(sc, ccb, MR_DCMD_PD_GET_INFO, mbox, &pl->pd_info,
1467 	    sizeof(pl->pd_info), SCSI_DATA_IN|SCSI_NOSLEEP);
1468 	scsi_io_put(&sc->sc_iopool, ccb);
1469 	if (rv != 0)
1470 		return (EIO);
1471 
1472 	if (letoh16(pl->pd_info.mpd_fw_state) != MFI_PD_SYSTEM)
1473 		return (ENXIO);
1474 
1475 	return (0);
1476 }
1477 
1478 int
1479 mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
1480 {
1481 	struct scsi_link *link = xs->sc_link;
1482 	struct mfii_ccb *ccb = xs->io;
1483 	struct mpii_msg_scsi_io *io = ccb->ccb_request;
1484 	struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1485 
1486 	io->dev_handle = sc->sc_pd->pd_links[link->target]->pd_handle;
1487 	io->function = 0;
1488 	io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva);
1489 	io->sgl_flags = htole16(0x02); /* XXX */
1490 	io->sense_buffer_length = sizeof(xs->sense);
1491 	io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
1492 	io->data_length = htole32(xs->datalen);
1493 	io->io_flags = htole16(xs->cmdlen);
1494 	io->lun[0] = htobe16(link->lun);
1495 	switch (xs->flags & (SCSI_DATA_IN | SCSI_DATA_OUT)) {
1496 	case SCSI_DATA_IN:
1497 		ccb->ccb_direction = MFII_DATA_IN;
1498 		io->direction = MPII_SCSIIO_DIR_READ;
1499 		break;
1500 	case SCSI_DATA_OUT:
1501 		ccb->ccb_direction = MFII_DATA_OUT;
1502 		io->direction = MPII_SCSIIO_DIR_WRITE;
1503 		break;
1504 	default:
1505 		ccb->ccb_direction = MFII_DATA_NONE;
1506 		io->direction = MPII_SCSIIO_DIR_NONE;
1507 		break;
1508 	}
1509 	bcopy(xs->cmd, io->cdb, xs->cmdlen);
1510 
1511 	ctx->virtual_disk_target_id = htole16(link->target);
1512 	ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD;
1513 	ctx->timeout_value = sc->sc_pd->pd_timeout;
1514 
1515 	if (mfii_load_ccb(sc, ccb, ctx + 1,
1516 	    ISSET(xs->flags, SCSI_NOSLEEP)) != 0)
1517 		return (1);
1518 
1519 	ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
1520 
1521 	ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI;
1522 	ccb->ccb_req.smid = letoh16(ccb->ccb_smid);
1523 	ccb->ccb_req.dev_handle = sc->sc_pd->pd_links[link->target]->pd_handle;
1524 
1525 	return (0);
1526 }
1527 
1528 int
1529 mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
1530     int nosleep)
1531 {
1532 	struct mpii_msg_request *req = ccb->ccb_request;
1533 	struct mfii_sge *sge = NULL, *nsge = sglp;
1534 	struct mfii_sge *ce = NULL;
1535 	bus_dmamap_t dmap = ccb->ccb_dmamap;
1536 	u_int space;
1537 	int i;
1538 
1539 	int error;
1540 
1541 	if (ccb->ccb_len == 0)
1542 		return (0);
1543 
1544 	error = bus_dmamap_load(sc->sc_dmat, dmap,
1545 	    ccb->ccb_data, ccb->ccb_len, NULL,
1546 	    nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK);
1547 	if (error) {
1548 		printf("%s: error %d loading dmamap\n", DEVNAME(sc), error);
1549 		return (1);
1550 	}
1551 
1552 	space = (MFII_REQUEST_SIZE - ((u_int8_t *)nsge - (u_int8_t *)req)) /
1553 	    sizeof(*nsge);
1554 	if (dmap->dm_nsegs > space) {
1555 		space--;
1556 
1557 		ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
1558 		bzero(ccb->ccb_sgl, ccb->ccb_sgl_len);
1559 
1560 		ce = nsge + space;
1561 		ce->sg_addr = htole64(ccb->ccb_sgl_dva);
1562 		ce->sg_len = htole32(ccb->ccb_sgl_len);
1563 		ce->sg_flags = MFII_SGE_CHAIN_ELEMENT |
1564 		    MFII_SGE_ADDR_IOCPLBNTA;
1565 
1566 		req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
1567 	}
1568 
1569 	for (i = 0; i < dmap->dm_nsegs; i++) {
1570 		if (nsge == ce)
1571 			nsge = ccb->ccb_sgl;
1572 
1573 		sge = nsge;
1574 
1575 		sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr);
1576 		sge->sg_len = htole32(dmap->dm_segs[i].ds_len);
1577 		sge->sg_flags = MFII_SGE_ADDR_SYSTEM;
1578 
1579 		nsge = sge + 1;
1580 	}
1581 
1582 	bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,
1583 	    ccb->ccb_direction == MFII_DATA_OUT ?
1584 	    BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD);
1585 
1586 	if (ccb->ccb_sgl_len > 0) {
1587 		bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),
1588 		    ccb->ccb_sgl_offset, ccb->ccb_sgl_len,
1589 		    BUS_DMASYNC_PREWRITE);
1590 	}
1591 
1592 	return (0);
1593 }
1594 
1595 void *
1596 mfii_get_ccb(void *cookie)
1597 {
1598 	struct mfii_softc *sc = cookie;
1599 	struct mfii_ccb *ccb;
1600 
1601 	mtx_enter(&sc->sc_ccb_mtx);
1602 	ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq);
1603 	if (ccb != NULL)
1604 		SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link);
1605 	mtx_leave(&sc->sc_ccb_mtx);
1606 
1607 	return (ccb);
1608 }
1609 
1610 void
1611 mfii_scrub_ccb(struct mfii_ccb *ccb)
1612 {
1613 	ccb->ccb_cookie = NULL;
1614 	ccb->ccb_done = NULL;
1615 	ccb->ccb_flags = 0;
1616 	ccb->ccb_data = NULL;
1617 	ccb->ccb_direction = 0;
1618 	ccb->ccb_len = 0;
1619 	ccb->ccb_sgl_len = 0;
1620 
1621 	bzero(&ccb->ccb_req, sizeof(ccb->ccb_req));
1622 	bzero(ccb->ccb_request, MFII_REQUEST_SIZE);
1623 }
1624 
1625 void
1626 mfii_put_ccb(void *cookie, void *io)
1627 {
1628 	struct mfii_softc *sc = cookie;
1629 	struct mfii_ccb *ccb = io;
1630 
1631 	mtx_enter(&sc->sc_ccb_mtx);
1632 	SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link);
1633 	mtx_leave(&sc->sc_ccb_mtx);
1634 }
1635 
1636 int
1637 mfii_init_ccb(struct mfii_softc *sc)
1638 {
1639 	struct mfii_ccb *ccb;
1640 	u_int8_t *request = MFII_DMA_KVA(sc->sc_requests);
1641 	u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense);
1642 	u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl);
1643 	u_int i;
1644 	int error;
1645 
1646 	sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
1647 	    M_DEVBUF, M_WAITOK|M_ZERO);
1648 
1649 	for (i = 0; i < sc->sc_max_cmds; i++) {
1650 		ccb = &sc->sc_ccb[i];
1651 
1652 		/* create a dma map for transfer */
1653 		error = bus_dmamap_create(sc->sc_dmat,
1654 		    MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,
1655 		    BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap);
1656 		if (error) {
1657 			printf("%s: cannot create ccb dmamap (%d)\n",
1658 			    DEVNAME(sc), error);
1659 			goto destroy;
1660 		}
1661 
1662 		/* select i + 1'th request. 0 is reserved for events */
1663 		ccb->ccb_smid = i + 1;
1664 		ccb->ccb_request_offset = MFII_REQUEST_SIZE * (i + 1);
1665 		ccb->ccb_request = request + ccb->ccb_request_offset;
1666 		ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests) +
1667 		    ccb->ccb_request_offset;
1668 
1669 		/* select i'th sense */
1670 		ccb->ccb_sense_offset = MFI_SENSE_SIZE * i;
1671 		ccb->ccb_sense = (struct mfi_sense *)(sense +
1672 		    ccb->ccb_sense_offset);
1673 		ccb->ccb_sense_dva = (u_int32_t)(MFII_DMA_DVA(sc->sc_sense) +
1674 		    ccb->ccb_sense_offset);
1675 
1676 		/* select i'th sgl */
1677 		ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
1678 		    sc->sc_max_sgl * i;
1679 		ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
1680 		ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl) +
1681 		    ccb->ccb_sgl_offset;
1682 
1683 		/* add ccb to queue */
1684 		mfii_put_ccb(sc, ccb);
1685 	}
1686 
1687 	return (0);
1688 
1689 destroy:
1690 	/* free dma maps and ccb memory */
1691 	while ((ccb = mfii_get_ccb(sc)) != NULL)
1692 		bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap);
1693 
1694 	free(sc->sc_ccb, M_DEVBUF, 0);
1695 
1696 	return (1);
1697 }
1698 
1699