xref: /freebsd/sys/dev/isp/isp_pci.c (revision 0957b409)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2009-2018 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 1997-2008 by Matthew Jacob
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 /*
30  * PCI specific probe and attach routines for Qlogic ISP SCSI adapters.
31  * FreeBSD Version.
32  */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35 
36 #include <sys/param.h>
37 #include <sys/systm.h>
38 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/linker.h>
41 #include <sys/firmware.h>
42 #include <sys/bus.h>
43 #include <sys/stdint.h>
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <machine/bus.h>
47 #include <machine/resource.h>
48 #include <sys/rman.h>
49 #include <sys/malloc.h>
50 #include <sys/uio.h>
51 
52 #ifdef __sparc64__
53 #include <dev/ofw/openfirm.h>
54 #include <machine/ofw_machdep.h>
55 #endif
56 
57 #include <dev/isp/isp_freebsd.h>
58 
59 static uint32_t isp_pci_rd_reg(ispsoftc_t *, int);
60 static void isp_pci_wr_reg(ispsoftc_t *, int, uint32_t);
61 static uint32_t isp_pci_rd_reg_1080(ispsoftc_t *, int);
62 static void isp_pci_wr_reg_1080(ispsoftc_t *, int, uint32_t);
63 static uint32_t isp_pci_rd_reg_2400(ispsoftc_t *, int);
64 static void isp_pci_wr_reg_2400(ispsoftc_t *, int, uint32_t);
65 static uint32_t isp_pci_rd_reg_2600(ispsoftc_t *, int);
66 static void isp_pci_wr_reg_2600(ispsoftc_t *, int, uint32_t);
67 static void isp_pci_run_isr(ispsoftc_t *);
68 static void isp_pci_run_isr_2300(ispsoftc_t *);
69 static void isp_pci_run_isr_2400(ispsoftc_t *);
70 static int isp_pci_mbxdma(ispsoftc_t *);
71 static void isp_pci_mbxdmafree(ispsoftc_t *);
72 static int isp_pci_dmasetup(ispsoftc_t *, XS_T *, void *);
73 static int isp_pci_irqsetup(ispsoftc_t *);
74 static void isp_pci_dumpregs(ispsoftc_t *, const char *);
75 
76 static struct ispmdvec mdvec = {
77 	isp_pci_run_isr,
78 	isp_pci_rd_reg,
79 	isp_pci_wr_reg,
80 	isp_pci_mbxdma,
81 	isp_pci_dmasetup,
82 	isp_common_dmateardown,
83 	isp_pci_irqsetup,
84 	isp_pci_dumpregs,
85 	NULL,
86 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
87 };
88 
89 static struct ispmdvec mdvec_1080 = {
90 	isp_pci_run_isr,
91 	isp_pci_rd_reg_1080,
92 	isp_pci_wr_reg_1080,
93 	isp_pci_mbxdma,
94 	isp_pci_dmasetup,
95 	isp_common_dmateardown,
96 	isp_pci_irqsetup,
97 	isp_pci_dumpregs,
98 	NULL,
99 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
100 };
101 
102 static struct ispmdvec mdvec_12160 = {
103 	isp_pci_run_isr,
104 	isp_pci_rd_reg_1080,
105 	isp_pci_wr_reg_1080,
106 	isp_pci_mbxdma,
107 	isp_pci_dmasetup,
108 	isp_common_dmateardown,
109 	isp_pci_irqsetup,
110 	isp_pci_dumpregs,
111 	NULL,
112 	BIU_BURST_ENABLE|BIU_PCI_CONF1_FIFO_64
113 };
114 
115 static struct ispmdvec mdvec_2100 = {
116 	isp_pci_run_isr,
117 	isp_pci_rd_reg,
118 	isp_pci_wr_reg,
119 	isp_pci_mbxdma,
120 	isp_pci_dmasetup,
121 	isp_common_dmateardown,
122 	isp_pci_irqsetup,
123 	isp_pci_dumpregs
124 };
125 
126 static struct ispmdvec mdvec_2200 = {
127 	isp_pci_run_isr,
128 	isp_pci_rd_reg,
129 	isp_pci_wr_reg,
130 	isp_pci_mbxdma,
131 	isp_pci_dmasetup,
132 	isp_common_dmateardown,
133 	isp_pci_irqsetup,
134 	isp_pci_dumpregs
135 };
136 
137 static struct ispmdvec mdvec_2300 = {
138 	isp_pci_run_isr_2300,
139 	isp_pci_rd_reg,
140 	isp_pci_wr_reg,
141 	isp_pci_mbxdma,
142 	isp_pci_dmasetup,
143 	isp_common_dmateardown,
144 	isp_pci_irqsetup,
145 	isp_pci_dumpregs
146 };
147 
148 static struct ispmdvec mdvec_2400 = {
149 	isp_pci_run_isr_2400,
150 	isp_pci_rd_reg_2400,
151 	isp_pci_wr_reg_2400,
152 	isp_pci_mbxdma,
153 	isp_pci_dmasetup,
154 	isp_common_dmateardown,
155 	isp_pci_irqsetup,
156 	NULL
157 };
158 
159 static struct ispmdvec mdvec_2500 = {
160 	isp_pci_run_isr_2400,
161 	isp_pci_rd_reg_2400,
162 	isp_pci_wr_reg_2400,
163 	isp_pci_mbxdma,
164 	isp_pci_dmasetup,
165 	isp_common_dmateardown,
166 	isp_pci_irqsetup,
167 	NULL
168 };
169 
170 static struct ispmdvec mdvec_2600 = {
171 	isp_pci_run_isr_2400,
172 	isp_pci_rd_reg_2600,
173 	isp_pci_wr_reg_2600,
174 	isp_pci_mbxdma,
175 	isp_pci_dmasetup,
176 	isp_common_dmateardown,
177 	isp_pci_irqsetup,
178 	NULL
179 };
180 
181 static struct ispmdvec mdvec_2700 = {
182 	isp_pci_run_isr_2400,
183 	isp_pci_rd_reg_2600,
184 	isp_pci_wr_reg_2600,
185 	isp_pci_mbxdma,
186 	isp_pci_dmasetup,
187 	isp_common_dmateardown,
188 	isp_pci_irqsetup,
189 	NULL
190 };
191 
192 #ifndef	PCIM_CMD_INVEN
193 #define	PCIM_CMD_INVEN			0x10
194 #endif
195 #ifndef	PCIM_CMD_BUSMASTEREN
196 #define	PCIM_CMD_BUSMASTEREN		0x0004
197 #endif
198 #ifndef	PCIM_CMD_PERRESPEN
199 #define	PCIM_CMD_PERRESPEN		0x0040
200 #endif
201 #ifndef	PCIM_CMD_SEREN
202 #define	PCIM_CMD_SEREN			0x0100
203 #endif
204 #ifndef	PCIM_CMD_INTX_DISABLE
205 #define	PCIM_CMD_INTX_DISABLE		0x0400
206 #endif
207 
208 #ifndef	PCIR_COMMAND
209 #define	PCIR_COMMAND			0x04
210 #endif
211 
212 #ifndef	PCIR_CACHELNSZ
213 #define	PCIR_CACHELNSZ			0x0c
214 #endif
215 
216 #ifndef	PCIR_LATTIMER
217 #define	PCIR_LATTIMER			0x0d
218 #endif
219 
220 #ifndef	PCIR_ROMADDR
221 #define	PCIR_ROMADDR			0x30
222 #endif
223 
224 #define	PCI_VENDOR_QLOGIC		0x1077
225 
226 #define	PCI_PRODUCT_QLOGIC_ISP1020	0x1020
227 #define	PCI_PRODUCT_QLOGIC_ISP1080	0x1080
228 #define	PCI_PRODUCT_QLOGIC_ISP10160	0x1016
229 #define	PCI_PRODUCT_QLOGIC_ISP12160	0x1216
230 #define	PCI_PRODUCT_QLOGIC_ISP1240	0x1240
231 #define	PCI_PRODUCT_QLOGIC_ISP1280	0x1280
232 
233 #define	PCI_PRODUCT_QLOGIC_ISP2100	0x2100
234 #define	PCI_PRODUCT_QLOGIC_ISP2200	0x2200
235 #define	PCI_PRODUCT_QLOGIC_ISP2300	0x2300
236 #define	PCI_PRODUCT_QLOGIC_ISP2312	0x2312
237 #define	PCI_PRODUCT_QLOGIC_ISP2322	0x2322
238 #define	PCI_PRODUCT_QLOGIC_ISP2422	0x2422
239 #define	PCI_PRODUCT_QLOGIC_ISP2432	0x2432
240 #define	PCI_PRODUCT_QLOGIC_ISP2532	0x2532
241 #define	PCI_PRODUCT_QLOGIC_ISP5432	0x5432
242 #define	PCI_PRODUCT_QLOGIC_ISP6312	0x6312
243 #define	PCI_PRODUCT_QLOGIC_ISP6322	0x6322
244 #define	PCI_PRODUCT_QLOGIC_ISP2031	0x2031
245 #define	PCI_PRODUCT_QLOGIC_ISP8031	0x8031
246 #define	PCI_PRODUCT_QLOGIC_ISP2684	0x2171
247 #define	PCI_PRODUCT_QLOGIC_ISP2692	0x2b61
248 #define	PCI_PRODUCT_QLOGIC_ISP2714	0x2071
249 #define	PCI_PRODUCT_QLOGIC_ISP2722	0x2261
250 
251 #define	PCI_QLOGIC_ISP1020	\
252 	((PCI_PRODUCT_QLOGIC_ISP1020 << 16) | PCI_VENDOR_QLOGIC)
253 #define	PCI_QLOGIC_ISP1080	\
254 	((PCI_PRODUCT_QLOGIC_ISP1080 << 16) | PCI_VENDOR_QLOGIC)
255 #define	PCI_QLOGIC_ISP10160	\
256 	((PCI_PRODUCT_QLOGIC_ISP10160 << 16) | PCI_VENDOR_QLOGIC)
257 #define	PCI_QLOGIC_ISP12160	\
258 	((PCI_PRODUCT_QLOGIC_ISP12160 << 16) | PCI_VENDOR_QLOGIC)
259 #define	PCI_QLOGIC_ISP1240	\
260 	((PCI_PRODUCT_QLOGIC_ISP1240 << 16) | PCI_VENDOR_QLOGIC)
261 #define	PCI_QLOGIC_ISP1280	\
262 	((PCI_PRODUCT_QLOGIC_ISP1280 << 16) | PCI_VENDOR_QLOGIC)
263 
264 #define	PCI_QLOGIC_ISP2100	\
265 	((PCI_PRODUCT_QLOGIC_ISP2100 << 16) | PCI_VENDOR_QLOGIC)
266 #define	PCI_QLOGIC_ISP2200	\
267 	((PCI_PRODUCT_QLOGIC_ISP2200 << 16) | PCI_VENDOR_QLOGIC)
268 #define	PCI_QLOGIC_ISP2300	\
269 	((PCI_PRODUCT_QLOGIC_ISP2300 << 16) | PCI_VENDOR_QLOGIC)
270 #define	PCI_QLOGIC_ISP2312	\
271 	((PCI_PRODUCT_QLOGIC_ISP2312 << 16) | PCI_VENDOR_QLOGIC)
272 #define	PCI_QLOGIC_ISP2322	\
273 	((PCI_PRODUCT_QLOGIC_ISP2322 << 16) | PCI_VENDOR_QLOGIC)
274 #define	PCI_QLOGIC_ISP2422	\
275 	((PCI_PRODUCT_QLOGIC_ISP2422 << 16) | PCI_VENDOR_QLOGIC)
276 #define	PCI_QLOGIC_ISP2432	\
277 	((PCI_PRODUCT_QLOGIC_ISP2432 << 16) | PCI_VENDOR_QLOGIC)
278 #define	PCI_QLOGIC_ISP2532	\
279 	((PCI_PRODUCT_QLOGIC_ISP2532 << 16) | PCI_VENDOR_QLOGIC)
280 #define	PCI_QLOGIC_ISP5432	\
281 	((PCI_PRODUCT_QLOGIC_ISP5432 << 16) | PCI_VENDOR_QLOGIC)
282 #define	PCI_QLOGIC_ISP6312	\
283 	((PCI_PRODUCT_QLOGIC_ISP6312 << 16) | PCI_VENDOR_QLOGIC)
284 #define	PCI_QLOGIC_ISP6322	\
285 	((PCI_PRODUCT_QLOGIC_ISP6322 << 16) | PCI_VENDOR_QLOGIC)
286 #define	PCI_QLOGIC_ISP2031	\
287 	((PCI_PRODUCT_QLOGIC_ISP2031 << 16) | PCI_VENDOR_QLOGIC)
288 #define	PCI_QLOGIC_ISP8031	\
289 	((PCI_PRODUCT_QLOGIC_ISP8031 << 16) | PCI_VENDOR_QLOGIC)
290 #define	PCI_QLOGIC_ISP2684	\
291 	((PCI_PRODUCT_QLOGIC_ISP2684 << 16) | PCI_VENDOR_QLOGIC)
292 #define	PCI_QLOGIC_ISP2692	\
293 	((PCI_PRODUCT_QLOGIC_ISP2692 << 16) | PCI_VENDOR_QLOGIC)
294 #define	PCI_QLOGIC_ISP2714	\
295 	((PCI_PRODUCT_QLOGIC_ISP2714 << 16) | PCI_VENDOR_QLOGIC)
296 #define	PCI_QLOGIC_ISP2722	\
297 	((PCI_PRODUCT_QLOGIC_ISP2722 << 16) | PCI_VENDOR_QLOGIC)
298 
299 /*
300  * Odd case for some AMI raid cards... We need to *not* attach to this.
301  */
302 #define	AMI_RAID_SUBVENDOR_ID	0x101e
303 
304 #define	PCI_DFLT_LTNCY	0x40
305 #define	PCI_DFLT_LNSZ	0x10
306 
307 static int isp_pci_probe (device_t);
308 static int isp_pci_attach (device_t);
309 static int isp_pci_detach (device_t);
310 
311 
312 #define	ISP_PCD(isp)	((struct isp_pcisoftc *)isp)->pci_dev
313 struct isp_pcisoftc {
314 	ispsoftc_t			pci_isp;
315 	device_t			pci_dev;
316 	struct resource *		regs;
317 	struct resource *		regs1;
318 	struct resource *		regs2;
319 	struct {
320 		int				iqd;
321 		struct resource *		irq;
322 		void *				ih;
323 	} irq[ISP_MAX_IRQS];
324 	int				rtp;
325 	int				rgd;
326 	int				rtp1;
327 	int				rgd1;
328 	int				rtp2;
329 	int				rgd2;
330 	int16_t				pci_poff[_NREG_BLKS];
331 	bus_dma_tag_t			dmat;
332 	int				msicount;
333 };
334 
335 
336 static device_method_t isp_pci_methods[] = {
337 	/* Device interface */
338 	DEVMETHOD(device_probe,		isp_pci_probe),
339 	DEVMETHOD(device_attach,	isp_pci_attach),
340 	DEVMETHOD(device_detach,	isp_pci_detach),
341 	{ 0, 0 }
342 };
343 
344 static driver_t isp_pci_driver = {
345 	"isp", isp_pci_methods, sizeof (struct isp_pcisoftc)
346 };
347 static devclass_t isp_devclass;
348 DRIVER_MODULE(isp, pci, isp_pci_driver, isp_devclass, 0, 0);
349 MODULE_DEPEND(isp, cam, 1, 1, 1);
350 MODULE_DEPEND(isp, firmware, 1, 1, 1);
351 static int isp_nvports = 0;
352 
353 static int
354 isp_pci_probe(device_t dev)
355 {
356 	switch ((pci_get_device(dev) << 16) | (pci_get_vendor(dev))) {
357 	case PCI_QLOGIC_ISP1020:
358 		device_set_desc(dev, "Qlogic ISP 1020/1040 PCI SCSI Adapter");
359 		break;
360 	case PCI_QLOGIC_ISP1080:
361 		device_set_desc(dev, "Qlogic ISP 1080 PCI SCSI Adapter");
362 		break;
363 	case PCI_QLOGIC_ISP1240:
364 		device_set_desc(dev, "Qlogic ISP 1240 PCI SCSI Adapter");
365 		break;
366 	case PCI_QLOGIC_ISP1280:
367 		device_set_desc(dev, "Qlogic ISP 1280 PCI SCSI Adapter");
368 		break;
369 	case PCI_QLOGIC_ISP10160:
370 		device_set_desc(dev, "Qlogic ISP 10160 PCI SCSI Adapter");
371 		break;
372 	case PCI_QLOGIC_ISP12160:
373 		if (pci_get_subvendor(dev) == AMI_RAID_SUBVENDOR_ID) {
374 			return (ENXIO);
375 		}
376 		device_set_desc(dev, "Qlogic ISP 12160 PCI SCSI Adapter");
377 		break;
378 	case PCI_QLOGIC_ISP2100:
379 		device_set_desc(dev, "Qlogic ISP 2100 PCI FC-AL Adapter");
380 		break;
381 	case PCI_QLOGIC_ISP2200:
382 		device_set_desc(dev, "Qlogic ISP 2200 PCI FC-AL Adapter");
383 		break;
384 	case PCI_QLOGIC_ISP2300:
385 		device_set_desc(dev, "Qlogic ISP 2300 PCI FC-AL Adapter");
386 		break;
387 	case PCI_QLOGIC_ISP2312:
388 		device_set_desc(dev, "Qlogic ISP 2312 PCI FC-AL Adapter");
389 		break;
390 	case PCI_QLOGIC_ISP2322:
391 		device_set_desc(dev, "Qlogic ISP 2322 PCI FC-AL Adapter");
392 		break;
393 	case PCI_QLOGIC_ISP2422:
394 		device_set_desc(dev, "Qlogic ISP 2422 PCI FC-AL Adapter");
395 		break;
396 	case PCI_QLOGIC_ISP2432:
397 		device_set_desc(dev, "Qlogic ISP 2432 PCI FC-AL Adapter");
398 		break;
399 	case PCI_QLOGIC_ISP2532:
400 		device_set_desc(dev, "Qlogic ISP 2532 PCI FC-AL Adapter");
401 		break;
402 	case PCI_QLOGIC_ISP5432:
403 		device_set_desc(dev, "Qlogic ISP 5432 PCI FC-AL Adapter");
404 		break;
405 	case PCI_QLOGIC_ISP6312:
406 		device_set_desc(dev, "Qlogic ISP 6312 PCI FC-AL Adapter");
407 		break;
408 	case PCI_QLOGIC_ISP6322:
409 		device_set_desc(dev, "Qlogic ISP 6322 PCI FC-AL Adapter");
410 		break;
411 	case PCI_QLOGIC_ISP2031:
412 		device_set_desc(dev, "Qlogic ISP 2031 PCI FC-AL Adapter");
413 		break;
414 	case PCI_QLOGIC_ISP8031:
415 		device_set_desc(dev, "Qlogic ISP 8031 PCI FCoE Adapter");
416 		break;
417 	case PCI_QLOGIC_ISP2684:
418 		device_set_desc(dev, "Qlogic ISP 2684 PCI FC Adapter");
419 		break;
420 	case PCI_QLOGIC_ISP2692:
421 		device_set_desc(dev, "Qlogic ISP 2692 PCI FC Adapter");
422 		break;
423 	case PCI_QLOGIC_ISP2714:
424 		device_set_desc(dev, "Qlogic ISP 2714 PCI FC Adapter");
425 		break;
426 	case PCI_QLOGIC_ISP2722:
427 		device_set_desc(dev, "Qlogic ISP 2722 PCI FC Adapter");
428 		break;
429 	default:
430 		return (ENXIO);
431 	}
432 	if (isp_announced == 0 && bootverbose) {
433 		printf("Qlogic ISP Driver, FreeBSD Version %d.%d, "
434 		    "Core Version %d.%d\n",
435 		    ISP_PLATFORM_VERSION_MAJOR, ISP_PLATFORM_VERSION_MINOR,
436 		    ISP_CORE_VERSION_MAJOR, ISP_CORE_VERSION_MINOR);
437 		isp_announced++;
438 	}
439 	/*
440 	 * XXXX: Here is where we might load the f/w module
441 	 * XXXX: (or increase a reference count to it).
442 	 */
443 	return (BUS_PROBE_DEFAULT);
444 }
445 
446 static void
447 isp_get_generic_options(device_t dev, ispsoftc_t *isp)
448 {
449 	int tval;
450 
451 	tval = 0;
452 	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "fwload_disable", &tval) == 0 && tval != 0) {
453 		isp->isp_confopts |= ISP_CFG_NORELOAD;
454 	}
455 	tval = 0;
456 	if (resource_int_value(device_get_name(dev), device_get_unit(dev), "ignore_nvram", &tval) == 0 && tval != 0) {
457 		isp->isp_confopts |= ISP_CFG_NONVRAM;
458 	}
459 	tval = 0;
460 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "debug", &tval);
461 	if (tval) {
462 		isp->isp_dblev = tval;
463 	} else {
464 		isp->isp_dblev = ISP_LOGWARN|ISP_LOGERR;
465 	}
466 	if (bootverbose) {
467 		isp->isp_dblev |= ISP_LOGCONFIG|ISP_LOGINFO;
468 	}
469 	tval = -1;
470 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "vports", &tval);
471 	if (tval > 0 && tval <= 254) {
472 		isp_nvports = tval;
473 	}
474 	tval = 7;
475 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev), "quickboot_time", &tval);
476 	isp_quickboot_time = tval;
477 }
478 
479 static void
480 isp_get_specific_options(device_t dev, int chan, ispsoftc_t *isp)
481 {
482 	const char *sptr;
483 	int tval = 0;
484 	char prefix[12], name[16];
485 
486 	if (chan == 0)
487 		prefix[0] = 0;
488 	else
489 		snprintf(prefix, sizeof(prefix), "chan%d.", chan);
490 	snprintf(name, sizeof(name), "%siid", prefix);
491 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
492 	    name, &tval)) {
493 		if (IS_FC(isp)) {
494 			ISP_FC_PC(isp, chan)->default_id = 109 - chan;
495 		} else {
496 #ifdef __sparc64__
497 			ISP_SPI_PC(isp, chan)->iid = OF_getscsinitid(dev);
498 #else
499 			ISP_SPI_PC(isp, chan)->iid = 7;
500 #endif
501 		}
502 	} else {
503 		if (IS_FC(isp)) {
504 			ISP_FC_PC(isp, chan)->default_id = tval - chan;
505 		} else {
506 			ISP_SPI_PC(isp, chan)->iid = tval;
507 		}
508 		isp->isp_confopts |= ISP_CFG_OWNLOOPID;
509 	}
510 
511 	if (IS_SCSI(isp))
512 		return;
513 
514 	tval = -1;
515 	snprintf(name, sizeof(name), "%srole", prefix);
516 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
517 	    name, &tval) == 0) {
518 		switch (tval) {
519 		case ISP_ROLE_NONE:
520 		case ISP_ROLE_INITIATOR:
521 		case ISP_ROLE_TARGET:
522 		case ISP_ROLE_BOTH:
523 			device_printf(dev, "Chan %d setting role to 0x%x\n", chan, tval);
524 			break;
525 		default:
526 			tval = -1;
527 			break;
528 		}
529 	}
530 	if (tval == -1) {
531 		tval = ISP_DEFAULT_ROLES;
532 	}
533 	ISP_FC_PC(isp, chan)->def_role = tval;
534 
535 	tval = 0;
536 	snprintf(name, sizeof(name), "%sfullduplex", prefix);
537 	if (resource_int_value(device_get_name(dev), device_get_unit(dev),
538 	    name, &tval) == 0 && tval != 0) {
539 		isp->isp_confopts |= ISP_CFG_FULL_DUPLEX;
540 	}
541 	sptr = NULL;
542 	snprintf(name, sizeof(name), "%stopology", prefix);
543 	if (resource_string_value(device_get_name(dev), device_get_unit(dev),
544 	    name, (const char **) &sptr) == 0 && sptr != NULL) {
545 		if (strcmp(sptr, "lport") == 0) {
546 			isp->isp_confopts |= ISP_CFG_LPORT;
547 		} else if (strcmp(sptr, "nport") == 0) {
548 			isp->isp_confopts |= ISP_CFG_NPORT;
549 		} else if (strcmp(sptr, "lport-only") == 0) {
550 			isp->isp_confopts |= ISP_CFG_LPORT_ONLY;
551 		} else if (strcmp(sptr, "nport-only") == 0) {
552 			isp->isp_confopts |= ISP_CFG_NPORT_ONLY;
553 		}
554 	}
555 
556 #ifdef ISP_FCTAPE_OFF
557 	isp->isp_confopts |= ISP_CFG_NOFCTAPE;
558 #else
559 	isp->isp_confopts |= ISP_CFG_FCTAPE;
560 #endif
561 
562 	tval = 0;
563 	snprintf(name, sizeof(name), "%snofctape", prefix);
564 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
565 	    name, &tval);
566 	if (tval) {
567 		isp->isp_confopts &= ~ISP_CFG_FCTAPE;
568 		isp->isp_confopts |= ISP_CFG_NOFCTAPE;
569 	}
570 
571 	tval = 0;
572 	snprintf(name, sizeof(name), "%sfctape", prefix);
573 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
574 	    name, &tval);
575 	if (tval) {
576 		isp->isp_confopts &= ~ISP_CFG_NOFCTAPE;
577 		isp->isp_confopts |= ISP_CFG_FCTAPE;
578 	}
579 
580 
581 	/*
582 	 * Because the resource_*_value functions can neither return
583 	 * 64 bit integer values, nor can they be directly coerced
584 	 * to interpret the right hand side of the assignment as
585 	 * you want them to interpret it, we have to force WWN
586 	 * hint replacement to specify WWN strings with a leading
587 	 * 'w' (e..g w50000000aaaa0001). Sigh.
588 	 */
589 	sptr = NULL;
590 	snprintf(name, sizeof(name), "%sportwwn", prefix);
591 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
592 	    name, (const char **) &sptr);
593 	if (tval == 0 && sptr != NULL && *sptr++ == 'w') {
594 		char *eptr = NULL;
595 		ISP_FC_PC(isp, chan)->def_wwpn = strtouq(sptr, &eptr, 16);
596 		if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwpn == -1) {
597 			device_printf(dev, "mangled portwwn hint '%s'\n", sptr);
598 			ISP_FC_PC(isp, chan)->def_wwpn = 0;
599 		}
600 	}
601 
602 	sptr = NULL;
603 	snprintf(name, sizeof(name), "%snodewwn", prefix);
604 	tval = resource_string_value(device_get_name(dev), device_get_unit(dev),
605 	    name, (const char **) &sptr);
606 	if (tval == 0 && sptr != NULL && *sptr++ == 'w') {
607 		char *eptr = NULL;
608 		ISP_FC_PC(isp, chan)->def_wwnn = strtouq(sptr, &eptr, 16);
609 		if (eptr < sptr + 16 || ISP_FC_PC(isp, chan)->def_wwnn == 0) {
610 			device_printf(dev, "mangled nodewwn hint '%s'\n", sptr);
611 			ISP_FC_PC(isp, chan)->def_wwnn = 0;
612 		}
613 	}
614 
615 	tval = -1;
616 	snprintf(name, sizeof(name), "%sloop_down_limit", prefix);
617 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
618 	    name, &tval);
619 	if (tval >= 0 && tval < 0xffff) {
620 		ISP_FC_PC(isp, chan)->loop_down_limit = tval;
621 	} else {
622 		ISP_FC_PC(isp, chan)->loop_down_limit = isp_loop_down_limit;
623 	}
624 
625 	tval = -1;
626 	snprintf(name, sizeof(name), "%sgone_device_time", prefix);
627 	(void) resource_int_value(device_get_name(dev), device_get_unit(dev),
628 	    name, &tval);
629 	if (tval >= 0 && tval < 0xffff) {
630 		ISP_FC_PC(isp, chan)->gone_device_time = tval;
631 	} else {
632 		ISP_FC_PC(isp, chan)->gone_device_time = isp_gone_device_time;
633 	}
634 }
635 
636 static int
637 isp_pci_attach(device_t dev)
638 {
639 	struct isp_pcisoftc *pcs = device_get_softc(dev);
640 	ispsoftc_t *isp = &pcs->pci_isp;
641 	int i;
642 	uint32_t data, cmd, linesz, did;
643 	size_t psize, xsize;
644 	char fwname[32];
645 
646 	pcs->pci_dev = dev;
647 	isp->isp_dev = dev;
648 	isp->isp_nchan = 1;
649 	mtx_init(&isp->isp_lock, "isp", NULL, MTX_DEF);
650 
651 	/*
652 	 * Get Generic Options
653 	 */
654 	isp_nvports = 0;
655 	isp_get_generic_options(dev, isp);
656 
657 	linesz = PCI_DFLT_LNSZ;
658 	pcs->regs = pcs->regs2 = NULL;
659 	pcs->rgd = pcs->rtp = 0;
660 
661 	pcs->pci_dev = dev;
662 	pcs->pci_poff[BIU_BLOCK >> _BLK_REG_SHFT] = BIU_REGS_OFF;
663 	pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS_OFF;
664 	pcs->pci_poff[SXP_BLOCK >> _BLK_REG_SHFT] = PCI_SXP_REGS_OFF;
665 	pcs->pci_poff[RISC_BLOCK >> _BLK_REG_SHFT] = PCI_RISC_REGS_OFF;
666 	pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = DMA_REGS_OFF;
667 
668 	switch (pci_get_devid(dev)) {
669 	case PCI_QLOGIC_ISP1020:
670 		did = 0x1040;
671 		isp->isp_mdvec = &mdvec;
672 		isp->isp_type = ISP_HA_SCSI_UNKNOWN;
673 		break;
674 	case PCI_QLOGIC_ISP1080:
675 		did = 0x1080;
676 		isp->isp_mdvec = &mdvec_1080;
677 		isp->isp_type = ISP_HA_SCSI_1080;
678 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
679 		break;
680 	case PCI_QLOGIC_ISP1240:
681 		did = 0x1080;
682 		isp->isp_mdvec = &mdvec_1080;
683 		isp->isp_type = ISP_HA_SCSI_1240;
684 		isp->isp_nchan = 2;
685 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
686 		break;
687 	case PCI_QLOGIC_ISP1280:
688 		did = 0x1080;
689 		isp->isp_mdvec = &mdvec_1080;
690 		isp->isp_type = ISP_HA_SCSI_1280;
691 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
692 		break;
693 	case PCI_QLOGIC_ISP10160:
694 		did = 0x12160;
695 		isp->isp_mdvec = &mdvec_12160;
696 		isp->isp_type = ISP_HA_SCSI_10160;
697 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
698 		break;
699 	case PCI_QLOGIC_ISP12160:
700 		did = 0x12160;
701 		isp->isp_nchan = 2;
702 		isp->isp_mdvec = &mdvec_12160;
703 		isp->isp_type = ISP_HA_SCSI_12160;
704 		pcs->pci_poff[DMA_BLOCK >> _BLK_REG_SHFT] = ISP1080_DMA_REGS_OFF;
705 		break;
706 	case PCI_QLOGIC_ISP2100:
707 		did = 0x2100;
708 		isp->isp_mdvec = &mdvec_2100;
709 		isp->isp_type = ISP_HA_FC_2100;
710 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
711 		if (pci_get_revid(dev) < 3) {
712 			/*
713 			 * XXX: Need to get the actual revision
714 			 * XXX: number of the 2100 FB. At any rate,
715 			 * XXX: lower cache line size for early revision
716 			 * XXX; boards.
717 			 */
718 			linesz = 1;
719 		}
720 		break;
721 	case PCI_QLOGIC_ISP2200:
722 		did = 0x2200;
723 		isp->isp_mdvec = &mdvec_2200;
724 		isp->isp_type = ISP_HA_FC_2200;
725 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2100_OFF;
726 		break;
727 	case PCI_QLOGIC_ISP2300:
728 		did = 0x2300;
729 		isp->isp_mdvec = &mdvec_2300;
730 		isp->isp_type = ISP_HA_FC_2300;
731 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
732 		break;
733 	case PCI_QLOGIC_ISP2312:
734 	case PCI_QLOGIC_ISP6312:
735 		did = 0x2300;
736 		isp->isp_mdvec = &mdvec_2300;
737 		isp->isp_type = ISP_HA_FC_2312;
738 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
739 		break;
740 	case PCI_QLOGIC_ISP2322:
741 	case PCI_QLOGIC_ISP6322:
742 		did = 0x2322;
743 		isp->isp_mdvec = &mdvec_2300;
744 		isp->isp_type = ISP_HA_FC_2322;
745 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2300_OFF;
746 		break;
747 	case PCI_QLOGIC_ISP2422:
748 	case PCI_QLOGIC_ISP2432:
749 		did = 0x2400;
750 		isp->isp_nchan += isp_nvports;
751 		isp->isp_mdvec = &mdvec_2400;
752 		isp->isp_type = ISP_HA_FC_2400;
753 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
754 		break;
755 	case PCI_QLOGIC_ISP2532:
756 		did = 0x2500;
757 		isp->isp_nchan += isp_nvports;
758 		isp->isp_mdvec = &mdvec_2500;
759 		isp->isp_type = ISP_HA_FC_2500;
760 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
761 		break;
762 	case PCI_QLOGIC_ISP5432:
763 		did = 0x2500;
764 		isp->isp_mdvec = &mdvec_2500;
765 		isp->isp_type = ISP_HA_FC_2500;
766 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
767 		break;
768 	case PCI_QLOGIC_ISP2031:
769 	case PCI_QLOGIC_ISP8031:
770 		did = 0x2600;
771 		isp->isp_nchan += isp_nvports;
772 		isp->isp_mdvec = &mdvec_2600;
773 		isp->isp_type = ISP_HA_FC_2600;
774 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
775 		break;
776 	case PCI_QLOGIC_ISP2684:
777 	case PCI_QLOGIC_ISP2692:
778 	case PCI_QLOGIC_ISP2714:
779 	case PCI_QLOGIC_ISP2722:
780 		did = 0x2700;
781 		isp->isp_nchan += isp_nvports;
782 		isp->isp_mdvec = &mdvec_2700;
783 		isp->isp_type = ISP_HA_FC_2700;
784 		pcs->pci_poff[MBOX_BLOCK >> _BLK_REG_SHFT] = PCI_MBOX_REGS2400_OFF;
785 		break;
786 	default:
787 		device_printf(dev, "unknown device type\n");
788 		goto bad;
789 		break;
790 	}
791 	isp->isp_revision = pci_get_revid(dev);
792 
793 	if (IS_26XX(isp)) {
794 		pcs->rtp = SYS_RES_MEMORY;
795 		pcs->rgd = PCIR_BAR(0);
796 		pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd,
797 		    RF_ACTIVE);
798 		pcs->rtp1 = SYS_RES_MEMORY;
799 		pcs->rgd1 = PCIR_BAR(2);
800 		pcs->regs1 = bus_alloc_resource_any(dev, pcs->rtp1, &pcs->rgd1,
801 		    RF_ACTIVE);
802 		pcs->rtp2 = SYS_RES_MEMORY;
803 		pcs->rgd2 = PCIR_BAR(4);
804 		pcs->regs2 = bus_alloc_resource_any(dev, pcs->rtp2, &pcs->rgd2,
805 		    RF_ACTIVE);
806 	} else {
807 		pcs->rtp = SYS_RES_MEMORY;
808 		pcs->rgd = PCIR_BAR(1);
809 		pcs->regs = bus_alloc_resource_any(dev, pcs->rtp, &pcs->rgd,
810 		    RF_ACTIVE);
811 		if (pcs->regs == NULL) {
812 			pcs->rtp = SYS_RES_IOPORT;
813 			pcs->rgd = PCIR_BAR(0);
814 			pcs->regs = bus_alloc_resource_any(dev, pcs->rtp,
815 			    &pcs->rgd, RF_ACTIVE);
816 		}
817 	}
818 	if (pcs->regs == NULL) {
819 		device_printf(dev, "Unable to map any ports\n");
820 		goto bad;
821 	}
822 	if (bootverbose) {
823 		device_printf(dev, "Using %s space register mapping\n",
824 		    (pcs->rtp == SYS_RES_IOPORT)? "I/O" : "Memory");
825 	}
826 	isp->isp_regs = pcs->regs;
827 	isp->isp_regs2 = pcs->regs2;
828 
829 	if (IS_FC(isp)) {
830 		psize = sizeof (fcparam);
831 		xsize = sizeof (struct isp_fc);
832 	} else {
833 		psize = sizeof (sdparam);
834 		xsize = sizeof (struct isp_spi);
835 	}
836 	psize *= isp->isp_nchan;
837 	xsize *= isp->isp_nchan;
838 	isp->isp_param = malloc(psize, M_DEVBUF, M_NOWAIT | M_ZERO);
839 	if (isp->isp_param == NULL) {
840 		device_printf(dev, "cannot allocate parameter data\n");
841 		goto bad;
842 	}
843 	isp->isp_osinfo.pc.ptr = malloc(xsize, M_DEVBUF, M_NOWAIT | M_ZERO);
844 	if (isp->isp_osinfo.pc.ptr == NULL) {
845 		device_printf(dev, "cannot allocate parameter data\n");
846 		goto bad;
847 	}
848 
849 	/*
850 	 * Now that we know who we are (roughly) get/set specific options
851 	 */
852 	for (i = 0; i < isp->isp_nchan; i++) {
853 		isp_get_specific_options(dev, i, isp);
854 	}
855 
856 	isp->isp_osinfo.fw = NULL;
857 	if (isp->isp_osinfo.fw == NULL) {
858 		snprintf(fwname, sizeof (fwname), "isp_%04x", did);
859 		isp->isp_osinfo.fw = firmware_get(fwname);
860 	}
861 	if (isp->isp_osinfo.fw != NULL) {
862 		isp_prt(isp, ISP_LOGCONFIG, "loaded firmware %s", fwname);
863 		isp->isp_mdvec->dv_ispfw = isp->isp_osinfo.fw->data;
864 	}
865 
866 	/*
867 	 * Make sure that SERR, PERR, WRITE INVALIDATE and BUSMASTER are set.
868 	 */
869 	cmd = pci_read_config(dev, PCIR_COMMAND, 2);
870 	cmd |= PCIM_CMD_SEREN | PCIM_CMD_PERRESPEN | PCIM_CMD_BUSMASTEREN | PCIM_CMD_INVEN;
871 	if (IS_2300(isp)) {	/* per QLogic errata */
872 		cmd &= ~PCIM_CMD_INVEN;
873 	}
874 	if (IS_2322(isp) || pci_get_devid(dev) == PCI_QLOGIC_ISP6312) {
875 		cmd &= ~PCIM_CMD_INTX_DISABLE;
876 	}
877 	if (IS_24XX(isp)) {
878 		cmd &= ~PCIM_CMD_INTX_DISABLE;
879 	}
880 	pci_write_config(dev, PCIR_COMMAND, cmd, 2);
881 
882 	/*
883 	 * Make sure the Cache Line Size register is set sensibly.
884 	 */
885 	data = pci_read_config(dev, PCIR_CACHELNSZ, 1);
886 	if (data == 0 || (linesz != PCI_DFLT_LNSZ && data != linesz)) {
887 		isp_prt(isp, ISP_LOGDEBUG0, "set PCI line size to %d from %d", linesz, data);
888 		data = linesz;
889 		pci_write_config(dev, PCIR_CACHELNSZ, data, 1);
890 	}
891 
892 	/*
893 	 * Make sure the Latency Timer is sane.
894 	 */
895 	data = pci_read_config(dev, PCIR_LATTIMER, 1);
896 	if (data < PCI_DFLT_LTNCY) {
897 		data = PCI_DFLT_LTNCY;
898 		isp_prt(isp, ISP_LOGDEBUG0, "set PCI latency to %d", data);
899 		pci_write_config(dev, PCIR_LATTIMER, data, 1);
900 	}
901 
902 	/*
903 	 * Make sure we've disabled the ROM.
904 	 */
905 	data = pci_read_config(dev, PCIR_ROMADDR, 4);
906 	data &= ~1;
907 	pci_write_config(dev, PCIR_ROMADDR, data, 4);
908 
909 	/*
910 	 * Last minute checks...
911 	 */
912 	if (IS_23XX(isp) || IS_24XX(isp)) {
913 		isp->isp_port = pci_get_function(dev);
914 	}
915 
916 	/*
917 	 * Make sure we're in reset state.
918 	 */
919 	ISP_LOCK(isp);
920 	if (isp_reinit(isp, 1) != 0) {
921 		ISP_UNLOCK(isp);
922 		goto bad;
923 	}
924 	ISP_UNLOCK(isp);
925 	if (isp_attach(isp)) {
926 		ISP_LOCK(isp);
927 		isp_shutdown(isp);
928 		ISP_UNLOCK(isp);
929 		goto bad;
930 	}
931 	return (0);
932 
933 bad:
934 	for (i = 0; i < isp->isp_nirq; i++) {
935 		(void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih);
936 		(void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd,
937 		    pcs->irq[0].irq);
938 	}
939 	if (pcs->msicount) {
940 		pci_release_msi(dev);
941 	}
942 	if (pcs->regs)
943 		(void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
944 	if (pcs->regs1)
945 		(void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1);
946 	if (pcs->regs2)
947 		(void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2);
948 	if (pcs->pci_isp.isp_param) {
949 		free(pcs->pci_isp.isp_param, M_DEVBUF);
950 		pcs->pci_isp.isp_param = NULL;
951 	}
952 	if (pcs->pci_isp.isp_osinfo.pc.ptr) {
953 		free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
954 		pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
955 	}
956 	mtx_destroy(&isp->isp_lock);
957 	return (ENXIO);
958 }
959 
960 static int
961 isp_pci_detach(device_t dev)
962 {
963 	struct isp_pcisoftc *pcs = device_get_softc(dev);
964 	ispsoftc_t *isp = &pcs->pci_isp;
965 	int i, status;
966 
967 	status = isp_detach(isp);
968 	if (status)
969 		return (status);
970 	ISP_LOCK(isp);
971 	isp_shutdown(isp);
972 	ISP_UNLOCK(isp);
973 	for (i = 0; i < isp->isp_nirq; i++) {
974 		(void) bus_teardown_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih);
975 		(void) bus_release_resource(dev, SYS_RES_IRQ, pcs->irq[i].iqd,
976 		    pcs->irq[i].irq);
977 	}
978 	if (pcs->msicount)
979 		pci_release_msi(dev);
980 	(void) bus_release_resource(dev, pcs->rtp, pcs->rgd, pcs->regs);
981 	if (pcs->regs1)
982 		(void) bus_release_resource(dev, pcs->rtp1, pcs->rgd1, pcs->regs1);
983 	if (pcs->regs2)
984 		(void) bus_release_resource(dev, pcs->rtp2, pcs->rgd2, pcs->regs2);
985 	isp_pci_mbxdmafree(isp);
986 	if (pcs->pci_isp.isp_param) {
987 		free(pcs->pci_isp.isp_param, M_DEVBUF);
988 		pcs->pci_isp.isp_param = NULL;
989 	}
990 	if (pcs->pci_isp.isp_osinfo.pc.ptr) {
991 		free(pcs->pci_isp.isp_osinfo.pc.ptr, M_DEVBUF);
992 		pcs->pci_isp.isp_osinfo.pc.ptr = NULL;
993 	}
994 	mtx_destroy(&isp->isp_lock);
995 	return (0);
996 }
997 
998 #define	IspVirt2Off(a, x)	\
999 	(((struct isp_pcisoftc *)a)->pci_poff[((x) & _BLK_REG_MASK) >> \
1000 	_BLK_REG_SHFT] + ((x) & 0xfff))
1001 
1002 #define	BXR2(isp, off)		bus_read_2((isp)->isp_regs, (off))
1003 #define	BXW2(isp, off, v)	bus_write_2((isp)->isp_regs, (off), (v))
1004 #define	BXR4(isp, off)		bus_read_4((isp)->isp_regs, (off))
1005 #define	BXW4(isp, off, v)	bus_write_4((isp)->isp_regs, (off), (v))
1006 #define	B2R4(isp, off)		bus_read_4((isp)->isp_regs2, (off))
1007 #define	B2W4(isp, off, v)	bus_write_4((isp)->isp_regs2, (off), (v))
1008 
1009 static ISP_INLINE uint16_t
1010 isp_pci_rd_debounced(ispsoftc_t *isp, int off)
1011 {
1012 	uint16_t val, prev;
1013 
1014 	val = BXR2(isp, IspVirt2Off(isp, off));
1015 	do {
1016 		prev = val;
1017 		val = BXR2(isp, IspVirt2Off(isp, off));
1018 	} while (val != prev);
1019 	return (val);
1020 }
1021 
1022 static void
1023 isp_pci_run_isr(ispsoftc_t *isp)
1024 {
1025 	uint16_t isr, sema, info;
1026 
1027 	if (IS_2100(isp)) {
1028 		isr = isp_pci_rd_debounced(isp, BIU_ISR);
1029 		sema = isp_pci_rd_debounced(isp, BIU_SEMA);
1030 	} else {
1031 		isr = BXR2(isp, IspVirt2Off(isp, BIU_ISR));
1032 		sema = BXR2(isp, IspVirt2Off(isp, BIU_SEMA));
1033 	}
1034 	isp_prt(isp, ISP_LOGDEBUG3, "ISR 0x%x SEMA 0x%x", isr, sema);
1035 	isr &= INT_PENDING_MASK(isp);
1036 	sema &= BIU_SEMA_LOCK;
1037 	if (isr == 0 && sema == 0)
1038 		return;
1039 	if (sema != 0) {
1040 		if (IS_2100(isp))
1041 			info = isp_pci_rd_debounced(isp, OUTMAILBOX0);
1042 		else
1043 			info = BXR2(isp, IspVirt2Off(isp, OUTMAILBOX0));
1044 		if (info & MBOX_COMMAND_COMPLETE)
1045 			isp_intr_mbox(isp, info);
1046 		else
1047 			isp_intr_async(isp, info);
1048 		if (!IS_FC(isp) && isp->isp_state == ISP_RUNSTATE)
1049 			isp_intr_respq(isp);
1050 	} else
1051 		isp_intr_respq(isp);
1052 	ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT);
1053 	if (sema)
1054 		ISP_WRITE(isp, BIU_SEMA, 0);
1055 }
1056 
1057 static void
1058 isp_pci_run_isr_2300(ispsoftc_t *isp)
1059 {
1060 	uint32_t hccr, r2hisr;
1061 	uint16_t isr, info;
1062 
1063 	if ((BXR2(isp, IspVirt2Off(isp, BIU_ISR)) & BIU2100_ISR_RISC_INT) == 0)
1064 		return;
1065 	r2hisr = BXR4(isp, IspVirt2Off(isp, BIU_R2HSTSLO));
1066 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1067 	if ((r2hisr & BIU_R2HST_INTR) == 0)
1068 		return;
1069 	isr = r2hisr & BIU_R2HST_ISTAT_MASK;
1070 	info = r2hisr >> 16;
1071 	switch (isr) {
1072 	case ISPR2HST_ROM_MBX_OK:
1073 	case ISPR2HST_ROM_MBX_FAIL:
1074 	case ISPR2HST_MBX_OK:
1075 	case ISPR2HST_MBX_FAIL:
1076 		isp_intr_mbox(isp, info);
1077 		break;
1078 	case ISPR2HST_ASYNC_EVENT:
1079 		isp_intr_async(isp, info);
1080 		break;
1081 	case ISPR2HST_RIO_16:
1082 		isp_intr_async(isp, ASYNC_RIO16_1);
1083 		break;
1084 	case ISPR2HST_FPOST:
1085 		isp_intr_async(isp, ASYNC_CMD_CMPLT);
1086 		break;
1087 	case ISPR2HST_FPOST_CTIO:
1088 		isp_intr_async(isp, ASYNC_CTIO_DONE);
1089 		break;
1090 	case ISPR2HST_RSPQ_UPDATE:
1091 		isp_intr_respq(isp);
1092 		break;
1093 	default:
1094 		hccr = ISP_READ(isp, HCCR);
1095 		if (hccr & HCCR_PAUSE) {
1096 			ISP_WRITE(isp, HCCR, HCCR_RESET);
1097 			isp_prt(isp, ISP_LOGERR, "RISC paused at interrupt (%x->%x)", hccr, ISP_READ(isp, HCCR));
1098 			ISP_WRITE(isp, BIU_ICR, 0);
1099 		} else {
1100 			isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1101 		}
1102 	}
1103 	ISP_WRITE(isp, HCCR, HCCR_CMD_CLEAR_RISC_INT);
1104 	ISP_WRITE(isp, BIU_SEMA, 0);
1105 }
1106 
1107 static void
1108 isp_pci_run_isr_2400(ispsoftc_t *isp)
1109 {
1110 	uint32_t r2hisr;
1111 	uint16_t isr, info;
1112 
1113 	r2hisr = BXR4(isp, IspVirt2Off(isp, BIU2400_R2HSTSLO));
1114 	isp_prt(isp, ISP_LOGDEBUG3, "RISC2HOST ISR 0x%x", r2hisr);
1115 	if ((r2hisr & BIU_R2HST_INTR) == 0)
1116 		return;
1117 	isr = r2hisr & BIU_R2HST_ISTAT_MASK;
1118 	info = (r2hisr >> 16);
1119 	switch (isr) {
1120 	case ISPR2HST_ROM_MBX_OK:
1121 	case ISPR2HST_ROM_MBX_FAIL:
1122 	case ISPR2HST_MBX_OK:
1123 	case ISPR2HST_MBX_FAIL:
1124 		isp_intr_mbox(isp, info);
1125 		break;
1126 	case ISPR2HST_ASYNC_EVENT:
1127 		isp_intr_async(isp, info);
1128 		break;
1129 	case ISPR2HST_RSPQ_UPDATE:
1130 		isp_intr_respq(isp);
1131 		break;
1132 	case ISPR2HST_RSPQ_UPDATE2:
1133 #ifdef	ISP_TARGET_MODE
1134 	case ISPR2HST_ATIO_RSPQ_UPDATE:
1135 #endif
1136 		isp_intr_respq(isp);
1137 		/* FALLTHROUGH */
1138 #ifdef	ISP_TARGET_MODE
1139 	case ISPR2HST_ATIO_UPDATE:
1140 	case ISPR2HST_ATIO_UPDATE2:
1141 		isp_intr_atioq(isp);
1142 #endif
1143 		break;
1144 	default:
1145 		isp_prt(isp, ISP_LOGERR, "unknown interrupt 0x%x\n", r2hisr);
1146 	}
1147 	ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
1148 }
1149 
1150 static uint32_t
1151 isp_pci_rd_reg(ispsoftc_t *isp, int regoff)
1152 {
1153 	uint16_t rv;
1154 	int oldconf = 0;
1155 
1156 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1157 		/*
1158 		 * We will assume that someone has paused the RISC processor.
1159 		 */
1160 		oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1161 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf | BIU_PCI_CONF1_SXP);
1162 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1163 	}
1164 	rv = BXR2(isp, IspVirt2Off(isp, regoff));
1165 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1166 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1167 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1168 	}
1169 	return (rv);
1170 }
1171 
1172 static void
1173 isp_pci_wr_reg(ispsoftc_t *isp, int regoff, uint32_t val)
1174 {
1175 	int oldconf = 0;
1176 
1177 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1178 		/*
1179 		 * We will assume that someone has paused the RISC processor.
1180 		 */
1181 		oldconf = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1182 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1183 		    oldconf | BIU_PCI_CONF1_SXP);
1184 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1185 	}
1186 	BXW2(isp, IspVirt2Off(isp, regoff), val);
1187 	MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1188 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1189 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oldconf);
1190 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1191 	}
1192 
1193 }
1194 
1195 static uint32_t
1196 isp_pci_rd_reg_1080(ispsoftc_t *isp, int regoff)
1197 {
1198 	uint32_t rv, oc = 0;
1199 
1200 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1201 		uint32_t tc;
1202 		/*
1203 		 * We will assume that someone has paused the RISC processor.
1204 		 */
1205 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1206 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1207 		if (regoff & SXP_BANK1_SELECT)
1208 			tc |= BIU_PCI1080_CONF1_SXP1;
1209 		else
1210 			tc |= BIU_PCI1080_CONF1_SXP0;
1211 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1212 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1213 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1214 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1215 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1216 		    oc | BIU_PCI1080_CONF1_DMA);
1217 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1218 	}
1219 	rv = BXR2(isp, IspVirt2Off(isp, regoff));
1220 	if (oc) {
1221 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1222 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1223 	}
1224 	return (rv);
1225 }
1226 
1227 static void
1228 isp_pci_wr_reg_1080(ispsoftc_t *isp, int regoff, uint32_t val)
1229 {
1230 	int oc = 0;
1231 
1232 	if ((regoff & _BLK_REG_MASK) == SXP_BLOCK) {
1233 		uint32_t tc;
1234 		/*
1235 		 * We will assume that someone has paused the RISC processor.
1236 		 */
1237 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1238 		tc = oc & ~BIU_PCI1080_CONF1_DMA;
1239 		if (regoff & SXP_BANK1_SELECT)
1240 			tc |= BIU_PCI1080_CONF1_SXP1;
1241 		else
1242 			tc |= BIU_PCI1080_CONF1_SXP0;
1243 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), tc);
1244 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1245 	} else if ((regoff & _BLK_REG_MASK) == DMA_BLOCK) {
1246 		oc = BXR2(isp, IspVirt2Off(isp, BIU_CONF1));
1247 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1),
1248 		    oc | BIU_PCI1080_CONF1_DMA);
1249 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1250 	}
1251 	BXW2(isp, IspVirt2Off(isp, regoff), val);
1252 	MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1253 	if (oc) {
1254 		BXW2(isp, IspVirt2Off(isp, BIU_CONF1), oc);
1255 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, BIU_CONF1), 2, -1);
1256 	}
1257 }
1258 
1259 static uint32_t
1260 isp_pci_rd_reg_2400(ispsoftc_t *isp, int regoff)
1261 {
1262 	uint32_t rv;
1263 	int block = regoff & _BLK_REG_MASK;
1264 
1265 	switch (block) {
1266 	case BIU_BLOCK:
1267 		break;
1268 	case MBOX_BLOCK:
1269 		return (BXR2(isp, IspVirt2Off(isp, regoff)));
1270 	case SXP_BLOCK:
1271 		isp_prt(isp, ISP_LOGERR, "SXP_BLOCK read at 0x%x", regoff);
1272 		return (0xffffffff);
1273 	case RISC_BLOCK:
1274 		isp_prt(isp, ISP_LOGERR, "RISC_BLOCK read at 0x%x", regoff);
1275 		return (0xffffffff);
1276 	case DMA_BLOCK:
1277 		isp_prt(isp, ISP_LOGERR, "DMA_BLOCK read at 0x%x", regoff);
1278 		return (0xffffffff);
1279 	default:
1280 		isp_prt(isp, ISP_LOGERR, "unknown block read at 0x%x", regoff);
1281 		return (0xffffffff);
1282 	}
1283 
1284 	switch (regoff) {
1285 	case BIU2400_FLASH_ADDR:
1286 	case BIU2400_FLASH_DATA:
1287 	case BIU2400_ICR:
1288 	case BIU2400_ISR:
1289 	case BIU2400_CSR:
1290 	case BIU2400_REQINP:
1291 	case BIU2400_REQOUTP:
1292 	case BIU2400_RSPINP:
1293 	case BIU2400_RSPOUTP:
1294 	case BIU2400_PRI_REQINP:
1295 	case BIU2400_PRI_REQOUTP:
1296 	case BIU2400_ATIO_RSPINP:
1297 	case BIU2400_ATIO_RSPOUTP:
1298 	case BIU2400_HCCR:
1299 	case BIU2400_GPIOD:
1300 	case BIU2400_GPIOE:
1301 	case BIU2400_HSEMA:
1302 		rv = BXR4(isp, IspVirt2Off(isp, regoff));
1303 		break;
1304 	case BIU2400_R2HSTSLO:
1305 		rv = BXR4(isp, IspVirt2Off(isp, regoff));
1306 		break;
1307 	case BIU2400_R2HSTSHI:
1308 		rv = BXR4(isp, IspVirt2Off(isp, regoff)) >> 16;
1309 		break;
1310 	default:
1311 		isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x",
1312 		    regoff);
1313 		rv = 0xffffffff;
1314 		break;
1315 	}
1316 	return (rv);
1317 }
1318 
1319 static void
1320 isp_pci_wr_reg_2400(ispsoftc_t *isp, int regoff, uint32_t val)
1321 {
1322 	int block = regoff & _BLK_REG_MASK;
1323 
1324 	switch (block) {
1325 	case BIU_BLOCK:
1326 		break;
1327 	case MBOX_BLOCK:
1328 		BXW2(isp, IspVirt2Off(isp, regoff), val);
1329 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 2, -1);
1330 		return;
1331 	case SXP_BLOCK:
1332 		isp_prt(isp, ISP_LOGERR, "SXP_BLOCK write at 0x%x", regoff);
1333 		return;
1334 	case RISC_BLOCK:
1335 		isp_prt(isp, ISP_LOGERR, "RISC_BLOCK write at 0x%x", regoff);
1336 		return;
1337 	case DMA_BLOCK:
1338 		isp_prt(isp, ISP_LOGERR, "DMA_BLOCK write at 0x%x", regoff);
1339 		return;
1340 	default:
1341 		isp_prt(isp, ISP_LOGERR, "unknown block write at 0x%x", regoff);
1342 		break;
1343 	}
1344 
1345 	switch (regoff) {
1346 	case BIU2400_FLASH_ADDR:
1347 	case BIU2400_FLASH_DATA:
1348 	case BIU2400_ICR:
1349 	case BIU2400_ISR:
1350 	case BIU2400_CSR:
1351 	case BIU2400_REQINP:
1352 	case BIU2400_REQOUTP:
1353 	case BIU2400_RSPINP:
1354 	case BIU2400_RSPOUTP:
1355 	case BIU2400_PRI_REQINP:
1356 	case BIU2400_PRI_REQOUTP:
1357 	case BIU2400_ATIO_RSPINP:
1358 	case BIU2400_ATIO_RSPOUTP:
1359 	case BIU2400_HCCR:
1360 	case BIU2400_GPIOD:
1361 	case BIU2400_GPIOE:
1362 	case BIU2400_HSEMA:
1363 		BXW4(isp, IspVirt2Off(isp, regoff), val);
1364 #ifdef MEMORYBARRIERW
1365 		if (regoff == BIU2400_REQINP ||
1366 		    regoff == BIU2400_RSPOUTP ||
1367 		    regoff == BIU2400_PRI_REQINP ||
1368 		    regoff == BIU2400_ATIO_RSPOUTP)
1369 			MEMORYBARRIERW(isp, SYNC_REG,
1370 			    IspVirt2Off(isp, regoff), 4, -1)
1371 		else
1372 #endif
1373 		MEMORYBARRIER(isp, SYNC_REG, IspVirt2Off(isp, regoff), 4, -1);
1374 		break;
1375 	default:
1376 		isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x",
1377 		    regoff);
1378 		break;
1379 	}
1380 }
1381 
1382 static uint32_t
1383 isp_pci_rd_reg_2600(ispsoftc_t *isp, int regoff)
1384 {
1385 	uint32_t rv;
1386 
1387 	switch (regoff) {
1388 	case BIU2400_PRI_REQINP:
1389 	case BIU2400_PRI_REQOUTP:
1390 		isp_prt(isp, ISP_LOGERR, "unknown register read at 0x%x",
1391 		    regoff);
1392 		rv = 0xffffffff;
1393 		break;
1394 	case BIU2400_REQINP:
1395 		rv = B2R4(isp, 0x00);
1396 		break;
1397 	case BIU2400_REQOUTP:
1398 		rv = B2R4(isp, 0x04);
1399 		break;
1400 	case BIU2400_RSPINP:
1401 		rv = B2R4(isp, 0x08);
1402 		break;
1403 	case BIU2400_RSPOUTP:
1404 		rv = B2R4(isp, 0x0c);
1405 		break;
1406 	case BIU2400_ATIO_RSPINP:
1407 		rv = B2R4(isp, 0x10);
1408 		break;
1409 	case BIU2400_ATIO_RSPOUTP:
1410 		rv = B2R4(isp, 0x14);
1411 		break;
1412 	default:
1413 		rv = isp_pci_rd_reg_2400(isp, regoff);
1414 		break;
1415 	}
1416 	return (rv);
1417 }
1418 
1419 static void
1420 isp_pci_wr_reg_2600(ispsoftc_t *isp, int regoff, uint32_t val)
1421 {
1422 	int off;
1423 
1424 	switch (regoff) {
1425 	case BIU2400_PRI_REQINP:
1426 	case BIU2400_PRI_REQOUTP:
1427 		isp_prt(isp, ISP_LOGERR, "unknown register write at 0x%x",
1428 		    regoff);
1429 		return;
1430 	case BIU2400_REQINP:
1431 		off = 0x00;
1432 		break;
1433 	case BIU2400_REQOUTP:
1434 		off = 0x04;
1435 		break;
1436 	case BIU2400_RSPINP:
1437 		off = 0x08;
1438 		break;
1439 	case BIU2400_RSPOUTP:
1440 		off = 0x0c;
1441 		break;
1442 	case BIU2400_ATIO_RSPINP:
1443 		off = 0x10;
1444 		break;
1445 	case BIU2400_ATIO_RSPOUTP:
1446 		off = 0x14;
1447 		break;
1448 	default:
1449 		isp_pci_wr_reg_2400(isp, regoff, val);
1450 		return;
1451 	}
1452 	B2W4(isp, off, val);
1453 }
1454 
1455 
1456 struct imush {
1457 	bus_addr_t maddr;
1458 	int error;
1459 };
1460 
1461 static void
1462 imc(void *arg, bus_dma_segment_t *segs, int nseg, int error)
1463 {
1464 	struct imush *imushp = (struct imush *) arg;
1465 
1466 	if (!(imushp->error = error))
1467 		imushp->maddr = segs[0].ds_addr;
1468 }
1469 
1470 static int
1471 isp_pci_mbxdma(ispsoftc_t *isp)
1472 {
1473 	caddr_t base;
1474 	uint32_t len, nsegs;
1475 	int i, error, cmap = 0;
1476 	bus_size_t slim;	/* segment size */
1477 	bus_addr_t llim;	/* low limit of unavailable dma */
1478 	bus_addr_t hlim;	/* high limit of unavailable dma */
1479 	struct imush im;
1480 	isp_ecmd_t *ecmd;
1481 
1482 	/* Already been here? If so, leave... */
1483 	if (isp->isp_xflist != NULL)
1484 		return (0);
1485 	if (isp->isp_rquest != NULL && isp->isp_maxcmds == 0)
1486 		return (0);
1487 	ISP_UNLOCK(isp);
1488 	if (isp->isp_rquest != NULL)
1489 		goto gotmaxcmds;
1490 
1491 	hlim = BUS_SPACE_MAXADDR;
1492 	if (IS_ULTRA2(isp) || IS_FC(isp) || IS_1240(isp)) {
1493 		if (sizeof (bus_size_t) > 4)
1494 			slim = (bus_size_t) (1ULL << 32);
1495 		else
1496 			slim = (bus_size_t) (1UL << 31);
1497 		llim = BUS_SPACE_MAXADDR;
1498 	} else {
1499 		slim = (1UL << 24);
1500 		llim = BUS_SPACE_MAXADDR_32BIT;
1501 	}
1502 	if (sizeof (bus_size_t) > 4)
1503 		nsegs = ISP_NSEG64_MAX;
1504 	else
1505 		nsegs = ISP_NSEG_MAX;
1506 
1507 	if (bus_dma_tag_create(bus_get_dma_tag(ISP_PCD(isp)), 1,
1508 	    slim, llim, hlim, NULL, NULL, BUS_SPACE_MAXSIZE, nsegs, slim, 0,
1509 	    busdma_lock_mutex, &isp->isp_lock, &isp->isp_osinfo.dmat)) {
1510 		ISP_LOCK(isp);
1511 		isp_prt(isp, ISP_LOGERR, "could not create master dma tag");
1512 		return (1);
1513 	}
1514 
1515 	/*
1516 	 * Allocate and map the request queue and a region for external
1517 	 * DMA addressable command/status structures (22XX and later).
1518 	 */
1519 	len = ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1520 	if (isp->isp_type >= ISP_HA_FC_2200)
1521 		len += (N_XCMDS * XCMD_SIZE);
1522 	if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1523 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1524 	    len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock,
1525 	    &isp->isp_osinfo.reqdmat)) {
1526 		isp_prt(isp, ISP_LOGERR, "cannot create request DMA tag");
1527 		goto bad;
1528 	}
1529 	if (bus_dmamem_alloc(isp->isp_osinfo.reqdmat, (void **)&base,
1530 	    BUS_DMA_COHERENT, &isp->isp_osinfo.reqmap) != 0) {
1531 		isp_prt(isp, ISP_LOGERR, "cannot allocate request DMA memory");
1532 		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
1533 		goto bad;
1534 	}
1535 	isp->isp_rquest = base;
1536 	im.error = 0;
1537 	if (bus_dmamap_load(isp->isp_osinfo.reqdmat, isp->isp_osinfo.reqmap,
1538 	    base, len, imc, &im, 0) || im.error) {
1539 		isp_prt(isp, ISP_LOGERR, "error loading request DMA map %d", im.error);
1540 		goto bad;
1541 	}
1542 	isp_prt(isp, ISP_LOGDEBUG0, "request area @ 0x%jx/0x%jx",
1543 	    (uintmax_t)im.maddr, (uintmax_t)len);
1544 	isp->isp_rquest_dma = im.maddr;
1545 	base += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1546 	im.maddr += ISP_QUEUE_SIZE(RQUEST_QUEUE_LEN(isp));
1547 	if (isp->isp_type >= ISP_HA_FC_2200) {
1548 		isp->isp_osinfo.ecmd_dma = im.maddr;
1549 		isp->isp_osinfo.ecmd_free = (isp_ecmd_t *)base;
1550 		isp->isp_osinfo.ecmd_base = isp->isp_osinfo.ecmd_free;
1551 		for (ecmd = isp->isp_osinfo.ecmd_free;
1552 		    ecmd < &isp->isp_osinfo.ecmd_free[N_XCMDS]; ecmd++) {
1553 			if (ecmd == &isp->isp_osinfo.ecmd_free[N_XCMDS - 1])
1554 				ecmd->next = NULL;
1555 			else
1556 				ecmd->next = ecmd + 1;
1557 		}
1558 	}
1559 
1560 	/*
1561 	 * Allocate and map the result queue.
1562 	 */
1563 	len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1564 	if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1565 	    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1566 	    len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock,
1567 	    &isp->isp_osinfo.respdmat)) {
1568 		isp_prt(isp, ISP_LOGERR, "cannot create response DMA tag");
1569 		goto bad;
1570 	}
1571 	if (bus_dmamem_alloc(isp->isp_osinfo.respdmat, (void **)&base,
1572 	    BUS_DMA_COHERENT, &isp->isp_osinfo.respmap) != 0) {
1573 		isp_prt(isp, ISP_LOGERR, "cannot allocate response DMA memory");
1574 		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
1575 		goto bad;
1576 	}
1577 	isp->isp_result = base;
1578 	im.error = 0;
1579 	if (bus_dmamap_load(isp->isp_osinfo.respdmat, isp->isp_osinfo.respmap,
1580 	    base, len, imc, &im, 0) || im.error) {
1581 		isp_prt(isp, ISP_LOGERR, "error loading response DMA map %d", im.error);
1582 		goto bad;
1583 	}
1584 	isp_prt(isp, ISP_LOGDEBUG0, "response area @ 0x%jx/0x%jx",
1585 	    (uintmax_t)im.maddr, (uintmax_t)len);
1586 	isp->isp_result_dma = im.maddr;
1587 
1588 #ifdef	ISP_TARGET_MODE
1589 	/*
1590 	 * Allocate and map ATIO queue on 24xx with target mode.
1591 	 */
1592 	if (IS_24XX(isp)) {
1593 		len = ISP_QUEUE_SIZE(RESULT_QUEUE_LEN(isp));
1594 		if (bus_dma_tag_create(isp->isp_osinfo.dmat, QENTRY_LEN, slim,
1595 		    BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL,
1596 		    len, 1, len, 0, busdma_lock_mutex, &isp->isp_lock,
1597 		    &isp->isp_osinfo.atiodmat)) {
1598 			isp_prt(isp, ISP_LOGERR, "cannot create ATIO DMA tag");
1599 			goto bad;
1600 		}
1601 		if (bus_dmamem_alloc(isp->isp_osinfo.atiodmat, (void **)&base,
1602 		    BUS_DMA_COHERENT, &isp->isp_osinfo.atiomap) != 0) {
1603 			isp_prt(isp, ISP_LOGERR, "cannot allocate ATIO DMA memory");
1604 			bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
1605 			goto bad;
1606 		}
1607 		isp->isp_atioq = base;
1608 		im.error = 0;
1609 		if (bus_dmamap_load(isp->isp_osinfo.atiodmat, isp->isp_osinfo.atiomap,
1610 		    base, len, imc, &im, 0) || im.error) {
1611 			isp_prt(isp, ISP_LOGERR, "error loading ATIO DMA map %d", im.error);
1612 			goto bad;
1613 		}
1614 		isp_prt(isp, ISP_LOGDEBUG0, "ATIO area @ 0x%jx/0x%jx",
1615 		    (uintmax_t)im.maddr, (uintmax_t)len);
1616 		isp->isp_atioq_dma = im.maddr;
1617 	}
1618 #endif
1619 
1620 	if (IS_FC(isp)) {
1621 		if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
1622 		    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1623 		    2*QENTRY_LEN, 1, 2*QENTRY_LEN, 0, busdma_lock_mutex,
1624 		    &isp->isp_lock, &isp->isp_osinfo.iocbdmat)) {
1625 			goto bad;
1626 		}
1627 		if (bus_dmamem_alloc(isp->isp_osinfo.iocbdmat,
1628 		    (void **)&base, BUS_DMA_COHERENT, &isp->isp_osinfo.iocbmap) != 0)
1629 			goto bad;
1630 		isp->isp_iocb = base;
1631 		im.error = 0;
1632 		if (bus_dmamap_load(isp->isp_osinfo.iocbdmat, isp->isp_osinfo.iocbmap,
1633 		    base, 2*QENTRY_LEN, imc, &im, 0) || im.error)
1634 			goto bad;
1635 		isp->isp_iocb_dma = im.maddr;
1636 
1637 		if (bus_dma_tag_create(isp->isp_osinfo.dmat, 64, slim,
1638 		    BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
1639 		    ISP_FC_SCRLEN, 1, ISP_FC_SCRLEN, 0, busdma_lock_mutex,
1640 		    &isp->isp_lock, &isp->isp_osinfo.scdmat))
1641 			goto bad;
1642 		for (cmap = 0; cmap < isp->isp_nchan; cmap++) {
1643 			struct isp_fc *fc = ISP_FC_PC(isp, cmap);
1644 			if (bus_dmamem_alloc(isp->isp_osinfo.scdmat,
1645 			    (void **)&base, BUS_DMA_COHERENT, &fc->scmap) != 0)
1646 				goto bad;
1647 			FCPARAM(isp, cmap)->isp_scratch = base;
1648 			im.error = 0;
1649 			if (bus_dmamap_load(isp->isp_osinfo.scdmat, fc->scmap,
1650 			    base, ISP_FC_SCRLEN, imc, &im, 0) || im.error) {
1651 				bus_dmamem_free(isp->isp_osinfo.scdmat,
1652 				    base, fc->scmap);
1653 				FCPARAM(isp, cmap)->isp_scratch = NULL;
1654 				goto bad;
1655 			}
1656 			FCPARAM(isp, cmap)->isp_scdma = im.maddr;
1657 			if (!IS_2100(isp)) {
1658 				for (i = 0; i < INITIAL_NEXUS_COUNT; i++) {
1659 					struct isp_nexus *n = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_NOWAIT | M_ZERO);
1660 					if (n == NULL) {
1661 						while (fc->nexus_free_list) {
1662 							n = fc->nexus_free_list;
1663 							fc->nexus_free_list = n->next;
1664 							free(n, M_DEVBUF);
1665 						}
1666 						goto bad;
1667 					}
1668 					n->next = fc->nexus_free_list;
1669 					fc->nexus_free_list = n;
1670 				}
1671 			}
1672 		}
1673 	}
1674 
1675 	if (isp->isp_maxcmds == 0) {
1676 		ISP_LOCK(isp);
1677 		return (0);
1678 	}
1679 
1680 gotmaxcmds:
1681 	len = isp->isp_maxcmds * sizeof (struct isp_pcmd);
1682 	isp->isp_osinfo.pcmd_pool = (struct isp_pcmd *)
1683 	    malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1684 	for (i = 0; i < isp->isp_maxcmds; i++) {
1685 		struct isp_pcmd *pcmd = &isp->isp_osinfo.pcmd_pool[i];
1686 		error = bus_dmamap_create(isp->isp_osinfo.dmat, 0, &pcmd->dmap);
1687 		if (error) {
1688 			isp_prt(isp, ISP_LOGERR, "error %d creating per-cmd DMA maps", error);
1689 			while (--i >= 0) {
1690 				bus_dmamap_destroy(isp->isp_osinfo.dmat,
1691 				    isp->isp_osinfo.pcmd_pool[i].dmap);
1692 			}
1693 			goto bad;
1694 		}
1695 		callout_init_mtx(&pcmd->wdog, &isp->isp_lock, 0);
1696 		if (i == isp->isp_maxcmds-1)
1697 			pcmd->next = NULL;
1698 		else
1699 			pcmd->next = &isp->isp_osinfo.pcmd_pool[i+1];
1700 	}
1701 	isp->isp_osinfo.pcmd_free = &isp->isp_osinfo.pcmd_pool[0];
1702 
1703 	len = sizeof (isp_hdl_t) * isp->isp_maxcmds;
1704 	isp->isp_xflist = (isp_hdl_t *) malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
1705 	for (len = 0; len < isp->isp_maxcmds - 1; len++)
1706 		isp->isp_xflist[len].cmd = &isp->isp_xflist[len+1];
1707 	isp->isp_xffree = isp->isp_xflist;
1708 
1709 	ISP_LOCK(isp);
1710 	return (0);
1711 
1712 bad:
1713 	isp_pci_mbxdmafree(isp);
1714 	ISP_LOCK(isp);
1715 	return (1);
1716 }
1717 
1718 static void
1719 isp_pci_mbxdmafree(ispsoftc_t *isp)
1720 {
1721 	int i;
1722 
1723 	if (isp->isp_xflist != NULL) {
1724 		free(isp->isp_xflist, M_DEVBUF);
1725 		isp->isp_xflist = NULL;
1726 	}
1727 	if (isp->isp_osinfo.pcmd_pool != NULL) {
1728 		for (i = 0; i < isp->isp_maxcmds; i++) {
1729 			bus_dmamap_destroy(isp->isp_osinfo.dmat,
1730 			    isp->isp_osinfo.pcmd_pool[i].dmap);
1731 		}
1732 		free(isp->isp_osinfo.pcmd_pool, M_DEVBUF);
1733 		isp->isp_osinfo.pcmd_pool = NULL;
1734 	}
1735 	if (IS_FC(isp)) {
1736 		for (i = 0; i < isp->isp_nchan; i++) {
1737 			struct isp_fc *fc = ISP_FC_PC(isp, i);
1738 			if (FCPARAM(isp, i)->isp_scdma != 0) {
1739 				bus_dmamap_unload(isp->isp_osinfo.scdmat,
1740 				    fc->scmap);
1741 				FCPARAM(isp, i)->isp_scdma = 0;
1742 			}
1743 			if (FCPARAM(isp, i)->isp_scratch != NULL) {
1744 				bus_dmamem_free(isp->isp_osinfo.scdmat,
1745 				    FCPARAM(isp, i)->isp_scratch, fc->scmap);
1746 				FCPARAM(isp, i)->isp_scratch = NULL;
1747 			}
1748 			while (fc->nexus_free_list) {
1749 				struct isp_nexus *n = fc->nexus_free_list;
1750 				fc->nexus_free_list = n->next;
1751 				free(n, M_DEVBUF);
1752 			}
1753 		}
1754 		if (isp->isp_iocb_dma != 0) {
1755 			bus_dma_tag_destroy(isp->isp_osinfo.scdmat);
1756 			bus_dmamap_unload(isp->isp_osinfo.iocbdmat,
1757 			    isp->isp_osinfo.iocbmap);
1758 			isp->isp_iocb_dma = 0;
1759 		}
1760 		if (isp->isp_iocb != NULL) {
1761 			bus_dmamem_free(isp->isp_osinfo.iocbdmat,
1762 			    isp->isp_iocb, isp->isp_osinfo.iocbmap);
1763 			bus_dma_tag_destroy(isp->isp_osinfo.iocbdmat);
1764 		}
1765 	}
1766 #ifdef	ISP_TARGET_MODE
1767 	if (IS_24XX(isp)) {
1768 		if (isp->isp_atioq_dma != 0) {
1769 			bus_dmamap_unload(isp->isp_osinfo.atiodmat,
1770 			    isp->isp_osinfo.atiomap);
1771 			isp->isp_atioq_dma = 0;
1772 		}
1773 		if (isp->isp_atioq != NULL) {
1774 			bus_dmamem_free(isp->isp_osinfo.atiodmat, isp->isp_atioq,
1775 			    isp->isp_osinfo.atiomap);
1776 			bus_dma_tag_destroy(isp->isp_osinfo.atiodmat);
1777 			isp->isp_atioq = NULL;
1778 		}
1779 	}
1780 #endif
1781 	if (isp->isp_result_dma != 0) {
1782 		bus_dmamap_unload(isp->isp_osinfo.respdmat,
1783 		    isp->isp_osinfo.respmap);
1784 		isp->isp_result_dma = 0;
1785 	}
1786 	if (isp->isp_result != NULL) {
1787 		bus_dmamem_free(isp->isp_osinfo.respdmat, isp->isp_result,
1788 		    isp->isp_osinfo.respmap);
1789 		bus_dma_tag_destroy(isp->isp_osinfo.respdmat);
1790 		isp->isp_result = NULL;
1791 	}
1792 	if (isp->isp_rquest_dma != 0) {
1793 		bus_dmamap_unload(isp->isp_osinfo.reqdmat,
1794 		    isp->isp_osinfo.reqmap);
1795 		isp->isp_rquest_dma = 0;
1796 	}
1797 	if (isp->isp_rquest != NULL) {
1798 		bus_dmamem_free(isp->isp_osinfo.reqdmat, isp->isp_rquest,
1799 		    isp->isp_osinfo.reqmap);
1800 		bus_dma_tag_destroy(isp->isp_osinfo.reqdmat);
1801 		isp->isp_rquest = NULL;
1802 	}
1803 }
1804 
1805 typedef struct {
1806 	ispsoftc_t *isp;
1807 	void *cmd_token;
1808 	void *rq;	/* original request */
1809 	int error;
1810 } mush_t;
1811 
1812 #define	MUSHERR_NOQENTRIES	-2
1813 
1814 static void
1815 dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1816 {
1817 	mush_t *mp = (mush_t *) arg;
1818 	ispsoftc_t *isp= mp->isp;
1819 	struct ccb_scsiio *csio = mp->cmd_token;
1820 	isp_ddir_t ddir;
1821 	int sdir;
1822 
1823 	if (error) {
1824 		mp->error = error;
1825 		return;
1826 	}
1827 	if (nseg == 0) {
1828 		ddir = ISP_NOXFR;
1829 	} else {
1830 		if ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1831 			ddir = ISP_FROM_DEVICE;
1832 		} else {
1833 			ddir = ISP_TO_DEVICE;
1834 		}
1835 		if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
1836 		    ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)) {
1837 			sdir = BUS_DMASYNC_PREREAD;
1838 		} else {
1839 			sdir = BUS_DMASYNC_PREWRITE;
1840 		}
1841 		bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
1842 		    sdir);
1843 	}
1844 
1845 	error = isp_send_cmd(isp, mp->rq, dm_segs, nseg, XS_XFRLEN(csio),
1846 	    ddir, (ispds64_t *)csio->req_map);
1847 	switch (error) {
1848 	case CMD_EAGAIN:
1849 		mp->error = MUSHERR_NOQENTRIES;
1850 		break;
1851 	case CMD_QUEUED:
1852 		break;
1853 	default:
1854 		mp->error = EIO;
1855 		break;
1856 	}
1857 }
1858 
1859 static int
1860 isp_pci_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *ff)
1861 {
1862 	mush_t mush, *mp;
1863 	int error;
1864 
1865 	mp = &mush;
1866 	mp->isp = isp;
1867 	mp->cmd_token = csio;
1868 	mp->rq = ff;
1869 	mp->error = 0;
1870 
1871 	error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
1872 	    (union ccb *)csio, dma2, mp, 0);
1873 	if (error == EINPROGRESS) {
1874 		bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
1875 		mp->error = EINVAL;
1876 		isp_prt(isp, ISP_LOGERR, "deferred dma allocation not supported");
1877 	} else if (error && mp->error == 0) {
1878 #ifdef	DIAGNOSTIC
1879 		isp_prt(isp, ISP_LOGERR, "error %d in dma mapping code", error);
1880 #endif
1881 		mp->error = error;
1882 	}
1883 	if (mp->error) {
1884 		int retval = CMD_COMPLETE;
1885 		if (mp->error == MUSHERR_NOQENTRIES) {
1886 			retval = CMD_EAGAIN;
1887 		} else if (mp->error == EFBIG) {
1888 			csio->ccb_h.status = CAM_REQ_TOO_BIG;
1889 		} else if (mp->error == EINVAL) {
1890 			csio->ccb_h.status = CAM_REQ_INVALID;
1891 		} else {
1892 			csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
1893 		}
1894 		return (retval);
1895 	}
1896 	return (CMD_QUEUED);
1897 }
1898 
1899 static int
1900 isp_pci_irqsetup(ispsoftc_t *isp)
1901 {
1902 	device_t dev = isp->isp_osinfo.dev;
1903 	struct isp_pcisoftc *pcs = device_get_softc(dev);
1904 	driver_intr_t *f;
1905 	int i, max_irq;
1906 
1907 	/* Allocate IRQs only once. */
1908 	if (isp->isp_nirq > 0)
1909 		return (0);
1910 
1911 	ISP_UNLOCK(isp);
1912 	if (ISP_CAP_MSIX(isp)) {
1913 		max_irq = min(ISP_MAX_IRQS, IS_26XX(isp) ? 3 : 2);
1914 		pcs->msicount = imin(pci_msix_count(dev), max_irq);
1915 		if (pcs->msicount > 0 &&
1916 		    pci_alloc_msix(dev, &pcs->msicount) != 0)
1917 			pcs->msicount = 0;
1918 	}
1919 	if (pcs->msicount == 0) {
1920 		pcs->msicount = imin(pci_msi_count(dev), 1);
1921 		if (pcs->msicount > 0 &&
1922 		    pci_alloc_msi(dev, &pcs->msicount) != 0)
1923 			pcs->msicount = 0;
1924 	}
1925 	for (i = 0; i < MAX(1, pcs->msicount); i++) {
1926 		pcs->irq[i].iqd = i + (pcs->msicount > 0);
1927 		pcs->irq[i].irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
1928 		    &pcs->irq[i].iqd, RF_ACTIVE | RF_SHAREABLE);
1929 		if (pcs->irq[i].irq == NULL) {
1930 			device_printf(dev, "could not allocate interrupt\n");
1931 			break;
1932 		}
1933 		if (i == 0)
1934 			f = isp_platform_intr;
1935 		else if (i == 1)
1936 			f = isp_platform_intr_resp;
1937 		else
1938 			f = isp_platform_intr_atio;
1939 		if (bus_setup_intr(dev, pcs->irq[i].irq, ISP_IFLAGS, NULL,
1940 		    f, isp, &pcs->irq[i].ih)) {
1941 			device_printf(dev, "could not setup interrupt\n");
1942 			(void) bus_release_resource(dev, SYS_RES_IRQ,
1943 			    pcs->irq[i].iqd, pcs->irq[i].irq);
1944 			break;
1945 		}
1946 		if (pcs->msicount > 1) {
1947 			bus_describe_intr(dev, pcs->irq[i].irq, pcs->irq[i].ih,
1948 			    "%d", i);
1949 		}
1950 		isp->isp_nirq = i + 1;
1951 	}
1952 	ISP_LOCK(isp);
1953 
1954 	return (isp->isp_nirq == 0);
1955 }
1956 
1957 static void
1958 isp_pci_dumpregs(ispsoftc_t *isp, const char *msg)
1959 {
1960 	struct isp_pcisoftc *pcs = (struct isp_pcisoftc *)isp;
1961 	if (msg)
1962 		printf("%s: %s\n", device_get_nameunit(isp->isp_dev), msg);
1963 	else
1964 		printf("%s:\n", device_get_nameunit(isp->isp_dev));
1965 	if (IS_SCSI(isp))
1966 		printf("    biu_conf1=%x", ISP_READ(isp, BIU_CONF1));
1967 	else
1968 		printf("    biu_csr=%x", ISP_READ(isp, BIU2100_CSR));
1969 	printf(" biu_icr=%x biu_isr=%x biu_sema=%x ", ISP_READ(isp, BIU_ICR),
1970 	    ISP_READ(isp, BIU_ISR), ISP_READ(isp, BIU_SEMA));
1971 	printf("risc_hccr=%x\n", ISP_READ(isp, HCCR));
1972 
1973 
1974 	if (IS_SCSI(isp)) {
1975 		ISP_WRITE(isp, HCCR, HCCR_CMD_PAUSE);
1976 		printf("    cdma_conf=%x cdma_sts=%x cdma_fifostat=%x\n",
1977 			ISP_READ(isp, CDMA_CONF), ISP_READ(isp, CDMA_STATUS),
1978 			ISP_READ(isp, CDMA_FIFO_STS));
1979 		printf("    ddma_conf=%x ddma_sts=%x ddma_fifostat=%x\n",
1980 			ISP_READ(isp, DDMA_CONF), ISP_READ(isp, DDMA_STATUS),
1981 			ISP_READ(isp, DDMA_FIFO_STS));
1982 		printf("    sxp_int=%x sxp_gross=%x sxp(scsi_ctrl)=%x\n",
1983 			ISP_READ(isp, SXP_INTERRUPT),
1984 			ISP_READ(isp, SXP_GROSS_ERR),
1985 			ISP_READ(isp, SXP_PINS_CTRL));
1986 		ISP_WRITE(isp, HCCR, HCCR_CMD_RELEASE);
1987 	}
1988 	printf("    mbox regs: %x %x %x %x %x\n",
1989 	    ISP_READ(isp, OUTMAILBOX0), ISP_READ(isp, OUTMAILBOX1),
1990 	    ISP_READ(isp, OUTMAILBOX2), ISP_READ(isp, OUTMAILBOX3),
1991 	    ISP_READ(isp, OUTMAILBOX4));
1992 	printf("    PCI Status Command/Status=%x\n",
1993 	    pci_read_config(pcs->pci_dev, PCIR_COMMAND, 1));
1994 }
1995