xref: /netbsd/sys/dev/ic/isp_netbsd.c (revision bf9ec67e)
1 /* $NetBSD: isp_netbsd.c,v 1.53 2002/04/04 23:38:46 mjacob Exp $ */
2 /*
3  * This driver, which is contained in NetBSD in the files:
4  *
5  *	sys/dev/ic/isp.c
6  *	sys/dev/ic/isp_inline.h
7  *	sys/dev/ic/isp_netbsd.c
8  *	sys/dev/ic/isp_netbsd.h
9  *	sys/dev/ic/isp_target.c
10  *	sys/dev/ic/isp_target.h
11  *	sys/dev/ic/isp_tpublic.h
12  *	sys/dev/ic/ispmbox.h
13  *	sys/dev/ic/ispreg.h
14  *	sys/dev/ic/ispvar.h
15  *	sys/microcode/isp/asm_sbus.h
16  *	sys/microcode/isp/asm_1040.h
17  *	sys/microcode/isp/asm_1080.h
18  *	sys/microcode/isp/asm_12160.h
19  *	sys/microcode/isp/asm_2100.h
20  *	sys/microcode/isp/asm_2200.h
21  *	sys/pci/isp_pci.c
22  *	sys/sbus/isp_sbus.c
23  *
24  * Is being actively maintained by Matthew Jacob (mjacob@netbsd.org).
25  * This driver also is shared source with FreeBSD, OpenBSD, Linux, Solaris,
26  * Linux versions. This tends to be an interesting maintenance problem.
27  *
28  * Please coordinate with Matthew Jacob on changes you wish to make here.
29  */
30 /*
31  * Platform (NetBSD) dependent common attachment code for Qlogic adapters.
32  * Matthew Jacob <mjacob@nas.nasa.gov>
33  */
34 /*
35  * Copyright (C) 1997, 1998, 1999 National Aeronautics & Space Administration
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  * 3. The name of the author may not be used to endorse or promote products
47  *    derived from this software without specific prior written permission
48  *
49  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
50  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
51  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
52  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
53  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
54  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
58  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59  */
60 
61 #include <sys/cdefs.h>
62 __KERNEL_RCSID(0, "$NetBSD: isp_netbsd.c,v 1.53 2002/04/04 23:38:46 mjacob Exp $");
63 
64 #include <dev/ic/isp_netbsd.h>
65 #include <sys/scsiio.h>
66 
67 
68 /*
69  * Set a timeout for the watchdogging of a command.
70  *
71  * The dimensional analysis is
72  *
73  *	milliseconds * (seconds/millisecond) * (ticks/second) = ticks
74  *
75  *			=
76  *
77  *	(milliseconds / 1000) * hz = ticks
78  *
79  *
80  * For timeouts less than 1 second, we'll get zero. Because of this, and
81  * because we want to establish *our* timeout to be longer than what the
82  * firmware might do, we just add 3 seconds at the back end.
83  */
84 #define	_XT(xs)	((((xs)->timeout/1000) * hz) + (3 * hz))
85 
86 static void isp_config_interrupts(struct device *);
87 static void ispminphys_1020(struct buf *);
88 static void ispminphys(struct buf *);
89 static INLINE void ispcmd(struct ispsoftc *, XS_T *);
90 static void isprequest(struct scsipi_channel *, scsipi_adapter_req_t, void *);
91 static int
92 ispioctl(struct scsipi_channel *, u_long, caddr_t, int, struct proc *);
93 
94 static void isp_polled_cmd(struct ispsoftc *, XS_T *);
95 static void isp_dog(void *);
96 static void isp_create_fc_worker(void *);
97 static void isp_fc_worker(void *);
98 
99 /*
100  * Complete attachment of hardware, include subdevices.
101  */
102 void
103 isp_attach(struct ispsoftc *isp)
104 {
105 	isp->isp_state = ISP_RUNSTATE;
106 
107 	isp->isp_osinfo._adapter.adapt_dev = &isp->isp_osinfo._dev;
108 	isp->isp_osinfo._adapter.adapt_nchannels = IS_DUALBUS(isp) ? 2 : 1;
109 	isp->isp_osinfo._adapter.adapt_openings = isp->isp_maxcmds;
110 	/*
111 	 * It's not stated whether max_periph is limited by SPI
112 	 * tag uage, but let's assume that it is.
113 	 */
114 	isp->isp_osinfo._adapter.adapt_max_periph = min(isp->isp_maxcmds, 255);
115 	isp->isp_osinfo._adapter.adapt_ioctl = ispioctl;
116 	isp->isp_osinfo._adapter.adapt_request = isprequest;
117 	if (isp->isp_type <= ISP_HA_SCSI_1020A) {
118 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys_1020;
119 	} else {
120 		isp->isp_osinfo._adapter.adapt_minphys = ispminphys;
121 	}
122 
123 	isp->isp_osinfo._chan.chan_adapter = &isp->isp_osinfo._adapter;
124 	isp->isp_osinfo._chan.chan_bustype = &scsi_bustype;
125 	isp->isp_osinfo._chan.chan_channel = 0;
126 
127 	/*
128 	 * Until the midlayer is fixed to use REPORT LUNS, limit to 8 luns.
129 	 */
130 	isp->isp_osinfo._chan.chan_nluns = min(isp->isp_maxluns, 8);
131 
132 	if (IS_FC(isp)) {
133 		isp->isp_osinfo._chan.chan_ntargets = MAX_FC_TARG;
134 		isp->isp_osinfo._chan.chan_id = MAX_FC_TARG;
135 		isp->isp_osinfo.threadwork = 1;
136 		/*
137 		 * Note that isp_create_fc_worker won't get called
138 		 * until much much later (after proc0 is created).
139 		 */
140 		kthread_create(isp_create_fc_worker, isp);
141 #ifdef	ISP_FW_CRASH_DUMP
142 		if (IS_2200(isp)) {
143 			FCPARAM(isp)->isp_dump_data =
144 			    malloc(QLA2200_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
145 				M_NOWAIT);
146 		} else if (IS_23XX(isp)) {
147 			FCPARAM(isp)->isp_dump_data =
148 			    malloc(QLA2300_RISC_IMAGE_DUMP_SIZE, M_DEVBUF,
149 				M_NOWAIT);
150 		}
151 		if (FCPARAM(isp)->isp_dump_data)
152 			FCPARAM(isp)->isp_dump_data[0] = 0;
153 #endif
154 	} else {
155 		int bus = 0;
156 		sdparam *sdp = isp->isp_param;
157 
158 		isp->isp_osinfo._chan.chan_ntargets = MAX_TARGETS;
159 		isp->isp_osinfo._chan.chan_id = sdp->isp_initiator_id;
160 		isp->isp_osinfo.discovered[0] = 1 << sdp->isp_initiator_id;
161 		if (IS_DUALBUS(isp)) {
162 			isp->isp_osinfo._chan_b = isp->isp_osinfo._chan;
163 			sdp++;
164 			isp->isp_osinfo.discovered[1] =
165 			    1 << sdp->isp_initiator_id;
166 			isp->isp_osinfo._chan_b.chan_id = sdp->isp_initiator_id;
167 			isp->isp_osinfo._chan_b.chan_channel = 1;
168 		}
169 		ISP_LOCK(isp);
170 		(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
171 		if (IS_DUALBUS(isp)) {
172 			bus++;
173 			(void) isp_control(isp, ISPCTL_RESET_BUS, &bus);
174 		}
175 		ISP_UNLOCK(isp);
176 	}
177 
178 
179 	/*
180          * Defer enabling mailbox interrupts until later.
181          */
182         config_interrupts((struct device *) isp, isp_config_interrupts);
183 
184 	/*
185 	 * And attach children (if any).
186 	 */
187 	config_found((void *)isp, &isp->isp_chanA, scsiprint);
188 	if (IS_DUALBUS(isp)) {
189 		config_found((void *)isp, &isp->isp_chanB, scsiprint);
190 	}
191 }
192 
193 
194 static void
195 isp_config_interrupts(struct device *self)
196 {
197         struct ispsoftc *isp = (struct ispsoftc *) self;
198 
199 	/*
200 	 * After this point, we'll be doing the new configuration
201 	 * schema which allows interrups, so we can do tsleep/wakeup
202 	 * for mailbox stuff at that point.
203 	 */
204 	isp->isp_osinfo.no_mbox_ints = 0;
205 }
206 
207 
208 /*
209  * minphys our xfers
210  */
211 
212 static void
213 ispminphys_1020(struct buf *bp)
214 {
215 	if (bp->b_bcount >= (1 << 24)) {
216 		bp->b_bcount = (1 << 24);
217 	}
218 	minphys(bp);
219 }
220 
221 static void
222 ispminphys(struct buf *bp)
223 {
224 	if (bp->b_bcount >= (1 << 30)) {
225 		bp->b_bcount = (1 << 30);
226 	}
227 	minphys(bp);
228 }
229 
230 static int
231 ispioctl(struct scsipi_channel *chan, u_long cmd, caddr_t addr, int flag,
232 	struct proc *p)
233 {
234 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
235 	int retval = ENOTTY;
236 
237 	switch (cmd) {
238 #ifdef	ISP_FW_CRASH_DUMP
239 	case ISP_GET_FW_CRASH_DUMP:
240 	{
241 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
242 		size_t sz;
243 
244 		retval = 0;
245 		if (IS_2200(isp))
246 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
247 		else
248 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
249 		ISP_LOCK(isp);
250 		if (ptr && *ptr) {
251 			void *uaddr = *((void **) addr);
252 			if (copyout(ptr, uaddr, sz)) {
253 				retval = EFAULT;
254 			} else {
255 				*ptr = 0;
256 			}
257 		} else {
258 			retval = ENXIO;
259 		}
260 		ISP_UNLOCK(isp);
261 		break;
262 	}
263 
264 	case ISP_FORCE_CRASH_DUMP:
265 		ISP_LOCK(isp);
266 		if (isp->isp_osinfo.blocked == 0) {
267                         isp->isp_osinfo.blocked = 1;
268                         scsipi_channel_freeze(&isp->isp_chanA, 1);
269                 }
270 		isp_fw_dump(isp);
271 		isp_reinit(isp);
272 		ISP_UNLOCK(isp);
273 		retval = 0;
274 		break;
275 #endif
276 	case ISP_GET_STATS:
277 	{
278 		isp_stats_t *sp = (isp_stats_t *) addr;
279 
280 		MEMZERO(sp, sizeof (*sp));
281 		sp->isp_stat_version = ISP_STATS_VERSION;
282 		sp->isp_type = isp->isp_type;
283 		sp->isp_revision = isp->isp_revision;
284 		ISP_LOCK(isp);
285 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
286 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
287 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
288 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
289 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
290 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
291 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
292 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
293 		ISP_UNLOCK(isp);
294 		retval = 0;
295 		break;
296 	}
297 	case ISP_CLR_STATS:
298 		ISP_LOCK(isp);
299 		isp->isp_intcnt = 0;
300 		isp->isp_intbogus = 0;
301 		isp->isp_intmboxc = 0;
302 		isp->isp_intoasync = 0;
303 		isp->isp_rsltccmplt = 0;
304 		isp->isp_fphccmplt = 0;
305 		isp->isp_rscchiwater = 0;
306 		isp->isp_fpcchiwater = 0;
307 		ISP_UNLOCK(isp);
308 		retval = 0;
309 		break;
310 	case ISP_SDBLEV:
311 	{
312 		int olddblev = isp->isp_dblev;
313 		isp->isp_dblev = *(int *)addr;
314 		*(int *)addr = olddblev;
315 		retval = 0;
316 		break;
317 	}
318 	case ISP_RESETHBA:
319 		ISP_LOCK(isp);
320 		isp_reinit(isp);
321 		ISP_UNLOCK(isp);
322 		retval = 0;
323 		break;
324 	case ISP_RESCAN:
325 		if (IS_FC(isp)) {
326 			ISP_LOCK(isp);
327 			if (isp_fc_runstate(isp, 5 * 1000000)) {
328 				retval = EIO;
329 			} else {
330 				retval = 0;
331 			}
332 			ISP_UNLOCK(isp);
333 		}
334 		break;
335 	case ISP_FC_LIP:
336 		if (IS_FC(isp)) {
337 			ISP_LOCK(isp);
338 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
339 				retval = EIO;
340 			} else {
341 				retval = 0;
342 			}
343 			ISP_UNLOCK(isp);
344 		}
345 		break;
346 	case ISP_FC_GETDINFO:
347 	{
348 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
349 		struct lportdb *lp;
350 
351 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
352 			retval = EINVAL;
353 			break;
354 		}
355 		ISP_LOCK(isp);
356 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
357 		if (lp->valid) {
358 			ifc->loopid = lp->loopid;
359 			ifc->portid = lp->portid;
360 			ifc->node_wwn = lp->node_wwn;
361 			ifc->port_wwn = lp->port_wwn;
362 			retval = 0;
363 		} else {
364 			retval = ENODEV;
365 		}
366 		ISP_UNLOCK(isp);
367 		break;
368 	}
369 	case SCBUSIORESET:
370 		ISP_LOCK(isp);
371 		if (isp_control(isp, ISPCTL_RESET_BUS, &chan->chan_channel))
372 			retval = EIO;
373 		else
374 			retval = 0;
375 		ISP_UNLOCK(isp);
376 		break;
377 	default:
378 		break;
379 	}
380 	return (retval);
381 }
382 
383 static INLINE void
384 ispcmd(struct ispsoftc *isp, XS_T *xs)
385 {
386 	ISP_LOCK(isp);
387 	if (isp->isp_state < ISP_RUNSTATE) {
388 		DISABLE_INTS(isp);
389 		isp_init(isp);
390 		if (isp->isp_state != ISP_INITSTATE) {
391 			ENABLE_INTS(isp);
392 			ISP_UNLOCK(isp);
393 			XS_SETERR(xs, HBA_BOTCH);
394 			scsipi_done(xs);
395 			return;
396 		}
397 		isp->isp_state = ISP_RUNSTATE;
398 		ENABLE_INTS(isp);
399 	}
400 	/*
401 	 * Handle the case of a FC card where the FC thread hasn't
402 	 * fired up yet and we have loop state to clean up. If we
403 	 * can't clear things up and we've never seen loop up, bounce
404 	 * the command.
405 	 */
406 	if (IS_FC(isp) && isp->isp_osinfo.threadwork &&
407 	    isp->isp_osinfo.thread == 0) {
408 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
409 		int delay_time;
410 
411 		if (xs->xs_control & XS_CTL_POLL) {
412 			isp->isp_osinfo.no_mbox_ints = 1;
413 		}
414 
415 		if (isp->isp_osinfo.loop_checked == 0) {
416 			delay_time = 10 * 1000000;
417 			isp->isp_osinfo.loop_checked = 1;
418 		} else {
419 			delay_time = 250000;
420 		}
421 
422 		if (isp_fc_runstate(isp, delay_time) != 0) {
423 			if (xs->xs_control & XS_CTL_POLL) {
424 				isp->isp_osinfo.no_mbox_ints = ombi;
425 			}
426 			if (FCPARAM(isp)->loop_seen_once == 0) {
427 				XS_SETERR(xs, HBA_SELTIMEOUT);
428 				scsipi_done(xs);
429 				ISP_UNLOCK(isp);
430 				return;
431 			}
432 			/*
433 			 * Otherwise, fall thru to be queued up for later.
434 			 */
435 		} else {
436 			int wasblocked =
437 			    (isp->isp_osinfo.blocked || isp->isp_osinfo.paused);
438 			isp->isp_osinfo.threadwork = 0;
439 			isp->isp_osinfo.blocked =
440 			    isp->isp_osinfo.paused = 0;
441 			if (wasblocked) {
442 				scsipi_channel_thaw(&isp->isp_chanA, 1);
443 			}
444 		}
445 		if (xs->xs_control & XS_CTL_POLL) {
446 			isp->isp_osinfo.no_mbox_ints = ombi;
447 		}
448 	}
449 
450 	if (isp->isp_osinfo.paused) {
451 		isp_prt(isp, ISP_LOGWARN, "I/O while paused");
452 		xs->error = XS_RESOURCE_SHORTAGE;
453 		scsipi_done(xs);
454 		ISP_UNLOCK(isp);
455 		return;
456 	}
457 	if (isp->isp_osinfo.blocked) {
458 		isp_prt(isp, ISP_LOGWARN, "I/O while blocked");
459 		xs->error = XS_REQUEUE;
460 		scsipi_done(xs);
461 		ISP_UNLOCK(isp);
462 		return;
463 	}
464 
465 	if (xs->xs_control & XS_CTL_POLL) {
466 		volatile u_int8_t ombi = isp->isp_osinfo.no_mbox_ints;
467 		isp->isp_osinfo.no_mbox_ints = 1;
468 		isp_polled_cmd(isp, xs);
469 		isp->isp_osinfo.no_mbox_ints = ombi;
470 		ISP_UNLOCK(isp);
471 		return;
472 	}
473 
474 	switch (isp_start(xs)) {
475 	case CMD_QUEUED:
476 		if (xs->timeout) {
477 			callout_reset(&xs->xs_callout, _XT(xs), isp_dog, xs);
478 		}
479 		break;
480 	case CMD_EAGAIN:
481 		isp->isp_osinfo.paused = 1;
482 		xs->error = XS_RESOURCE_SHORTAGE;
483 		scsipi_channel_freeze(&isp->isp_chanA, 1);
484 		if (IS_DUALBUS(isp)) {
485 			scsipi_channel_freeze(&isp->isp_chanB, 1);
486 		}
487 		scsipi_done(xs);
488 		break;
489 	case CMD_RQLATER:
490 		/*
491 		 * We can only get RQLATER from FC devices (1 channel only)
492 		 *
493 		 * Also, if we've never seen loop up, bounce the command
494 		 * (somebody has booted with no FC cable connected)
495 		 */
496 		if (FCPARAM(isp)->loop_seen_once == 0) {
497 			XS_SETERR(xs, HBA_SELTIMEOUT);
498 			scsipi_done(xs);
499 			break;
500 		}
501 		if (isp->isp_osinfo.blocked == 0) {
502 			isp->isp_osinfo.blocked = 1;
503 			scsipi_channel_freeze(&isp->isp_chanA, 1);
504 		}
505 		xs->error = XS_REQUEUE;
506 		scsipi_done(xs);
507 		break;
508 	case CMD_COMPLETE:
509 		scsipi_done(xs);
510 		break;
511 	}
512 	ISP_UNLOCK(isp);
513 }
514 
515 static void
516 isprequest(struct scsipi_channel *chan, scsipi_adapter_req_t req, void *arg)
517 {
518 	struct ispsoftc *isp = (void *)chan->chan_adapter->adapt_dev;
519 
520 	switch (req) {
521 	case ADAPTER_REQ_RUN_XFER:
522 		ispcmd(isp, (XS_T *) arg);
523 		break;
524 
525 	case ADAPTER_REQ_GROW_RESOURCES:
526 		/* Not supported. */
527 		break;
528 
529 	case ADAPTER_REQ_SET_XFER_MODE:
530 	if (IS_SCSI(isp)) {
531 		struct scsipi_xfer_mode *xm = arg;
532 		int dflags = 0;
533 		sdparam *sdp = SDPARAM(isp);
534 
535 		sdp += chan->chan_channel;
536 		if (xm->xm_mode & PERIPH_CAP_TQING)
537 			dflags |= DPARM_TQING;
538 		if (xm->xm_mode & PERIPH_CAP_WIDE16)
539 			dflags |= DPARM_WIDE;
540 		if (xm->xm_mode & PERIPH_CAP_SYNC)
541 			dflags |= DPARM_SYNC;
542 		ISP_LOCK(isp);
543 		sdp->isp_devparam[xm->xm_target].goal_flags |= dflags;
544 		dflags = sdp->isp_devparam[xm->xm_target].goal_flags;
545 		sdp->isp_devparam[xm->xm_target].dev_update = 1;
546 		isp->isp_update |= (1 << chan->chan_channel);
547 		ISP_UNLOCK(isp);
548 		isp_prt(isp, ISP_LOGDEBUG1,
549 		    "ispioctl: device flags 0x%x for %d.%d.X",
550 		    dflags, chan->chan_channel, xm->xm_target);
551 		break;
552 	}
553 	default:
554 		break;
555 	}
556 }
557 
558 static void
559 isp_polled_cmd(struct ispsoftc *isp, XS_T *xs)
560 {
561 	int result;
562 	int infinite = 0, mswait;
563 
564 	result = isp_start(xs);
565 
566 	switch (result) {
567 	case CMD_QUEUED:
568 		break;
569 	case CMD_RQLATER:
570 		if (XS_NOERR(xs)) {
571 			xs->error = XS_REQUEUE;
572 		}
573 	case CMD_EAGAIN:
574 		if (XS_NOERR(xs)) {
575 			xs->error = XS_RESOURCE_SHORTAGE;
576 		}
577 		/* FALLTHROUGH */
578 	case CMD_COMPLETE:
579 		scsipi_done(xs);
580 		return;
581 
582 	}
583 
584 	/*
585 	 * If we can't use interrupts, poll on completion.
586 	 */
587 	if ((mswait = XS_TIME(xs)) == 0)
588 		infinite = 1;
589 
590 	while (mswait || infinite) {
591 		u_int16_t isr, sema, mbox;
592 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
593 			isp_intr(isp, isr, sema, mbox);
594 			if (XS_CMD_DONE_P(xs)) {
595 				break;
596 			}
597 		}
598 		USEC_DELAY(1000);
599 		mswait -= 1;
600 	}
601 
602 	/*
603 	 * If no other error occurred but we didn't finish,
604 	 * something bad happened.
605 	 */
606 	if (XS_CMD_DONE_P(xs) == 0) {
607 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs)) {
608 			isp_reinit(isp);
609 		}
610 		if (XS_NOERR(xs)) {
611 			XS_SETERR(xs, HBA_BOTCH);
612 		}
613 	}
614 	scsipi_done(xs);
615 }
616 
617 void
618 isp_done(XS_T *xs)
619 {
620 	XS_CMD_S_DONE(xs);
621 	if (XS_CMD_WDOG_P(xs) == 0) {
622 		struct ispsoftc *isp = XS_ISP(xs);
623 		callout_stop(&xs->xs_callout);
624 		if (XS_CMD_GRACE_P(xs)) {
625 			isp_prt(isp, ISP_LOGDEBUG1,
626 			    "finished command on borrowed time");
627 		}
628 		XS_CMD_S_CLEAR(xs);
629 		/*
630 		 * Fixup- if we get a QFULL, we need
631 		 * to set XS_BUSY as the error.
632 		 */
633 		if (xs->status == SCSI_QUEUE_FULL) {
634 			xs->error = XS_BUSY;
635 		}
636 		if (isp->isp_osinfo.paused) {
637 			isp->isp_osinfo.paused = 0;
638 			scsipi_channel_timed_thaw(&isp->isp_chanA);
639 			if (IS_DUALBUS(isp)) {
640 				scsipi_channel_timed_thaw(&isp->isp_chanB);
641 			}
642 		}
643 		scsipi_done(xs);
644 	}
645 }
646 
647 static void
648 isp_dog(void *arg)
649 {
650 	XS_T *xs = arg;
651 	struct ispsoftc *isp = XS_ISP(xs);
652 	u_int16_t handle;
653 
654 	ISP_ILOCK(isp);
655 	/*
656 	 * We've decided this command is dead. Make sure we're not trying
657 	 * to kill a command that's already dead by getting it's handle and
658 	 * and seeing whether it's still alive.
659 	 */
660 	handle = isp_find_handle(isp, xs);
661 	if (handle) {
662 		u_int16_t isr, mbox, sema;
663 
664 		if (XS_CMD_DONE_P(xs)) {
665 			isp_prt(isp, ISP_LOGDEBUG1,
666 			    "watchdog found done cmd (handle 0x%x)", handle);
667 			ISP_IUNLOCK(isp);
668 			return;
669 		}
670 
671 		if (XS_CMD_WDOG_P(xs)) {
672 			isp_prt(isp, ISP_LOGDEBUG1,
673 			    "recursive watchdog (handle 0x%x)", handle);
674 			ISP_IUNLOCK(isp);
675 			return;
676 		}
677 
678 		XS_CMD_S_WDOG(xs);
679 
680 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
681 			isp_intr(isp, isr, sema, mbox);
682 
683 		}
684 		if (XS_CMD_DONE_P(xs)) {
685 			isp_prt(isp, ISP_LOGDEBUG1,
686 			    "watchdog cleanup for handle 0x%x", handle);
687 			XS_CMD_C_WDOG(xs);
688 			isp_done(xs);
689 		} else if (XS_CMD_GRACE_P(xs)) {
690 			isp_prt(isp, ISP_LOGDEBUG1,
691 			    "watchdog timeout for handle 0x%x", handle);
692 			/*
693 			 * Make sure the command is *really* dead before we
694 			 * release the handle (and DMA resources) for reuse.
695 			 */
696 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
697 
698 			/*
699 			 * After this point, the comamnd is really dead.
700 			 */
701 			if (XS_XFRLEN(xs)) {
702 				ISP_DMAFREE(isp, xs, handle);
703 			}
704 			isp_destroy_handle(isp, handle);
705 			XS_SETERR(xs, XS_TIMEOUT);
706 			XS_CMD_S_CLEAR(xs);
707 			isp_done(xs);
708 		} else {
709 			u_int16_t nxti, optr;
710 			ispreq_t local, *mp = &local, *qe;
711 			isp_prt(isp, ISP_LOGDEBUG2,
712 			    "possible command timeout on handle %x", handle);
713 			XS_CMD_C_WDOG(xs);
714 			callout_reset(&xs->xs_callout, hz, isp_dog, xs);
715 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
716 				ISP_UNLOCK(isp);
717 				return;
718 			}
719 			XS_CMD_S_GRACE(xs);
720 			MEMZERO((void *) mp, sizeof (*mp));
721 			mp->req_header.rqs_entry_count = 1;
722 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
723 			mp->req_modifier = SYNC_ALL;
724 			mp->req_target = XS_CHANNEL(xs) << 7;
725 			isp_put_request(isp, mp, qe);
726 			ISP_ADD_REQUEST(isp, nxti);
727 		}
728 	} else {
729 		isp_prt(isp, ISP_LOGDEBUG0, "watchdog with no command");
730 	}
731 	ISP_IUNLOCK(isp);
732 }
733 
734 /*
735  * Fibre Channel state cleanup thread
736  */
737 static void
738 isp_create_fc_worker(void *arg)
739 {
740 	struct ispsoftc *isp = arg;
741 
742 	if (kthread_create1(isp_fc_worker, isp, &isp->isp_osinfo.thread,
743 	    "%s:fc_thrd", isp->isp_name)) {
744 		isp_prt(isp, ISP_LOGERR, "unable to create FC worker thread");
745 		panic("isp_create_fc_worker");
746 	}
747 
748 }
749 
750 static void
751 isp_fc_worker(void *arg)
752 {
753 	void scsipi_run_queue(struct scsipi_channel *);
754 	struct ispsoftc *isp = arg;
755 
756 	for (;;) {
757 		int s;
758 
759 		/*
760 		 * Note we do *not* use the ISP_LOCK/ISP_UNLOCK macros here.
761 		 */
762 		s = splbio();
763 		while (isp->isp_osinfo.threadwork) {
764 			isp->isp_osinfo.threadwork = 0;
765 			if (isp_fc_runstate(isp, 10 * 1000000) == 0) {
766 				break;
767 			}
768 			if  (isp->isp_osinfo.loop_checked &&
769 			     FCPARAM(isp)->loop_seen_once == 0) {
770 				splx(s);
771 				goto skip;
772 			}
773 			isp->isp_osinfo.threadwork = 1;
774 			splx(s);
775 			delay(500 * 1000);
776 			s = splbio();
777 		}
778 		if (FCPARAM(isp)->isp_fwstate != FW_READY ||
779 		    FCPARAM(isp)->isp_loopstate != LOOP_READY) {
780 			isp_prt(isp, ISP_LOGINFO, "isp_fc_runstate in vain");
781 			isp->isp_osinfo.threadwork = 1;
782 			splx(s);
783 			continue;
784 		}
785 
786 		if (isp->isp_osinfo.blocked) {
787 			isp->isp_osinfo.blocked = 0;
788 			isp_prt(isp, ISP_LOGDEBUG0,
789 			    "restarting queues (freeze count %d)",
790 			    isp->isp_chanA.chan_qfreeze);
791 			scsipi_channel_thaw(&isp->isp_chanA, 1);
792 		}
793 
794 		if (isp->isp_osinfo.thread == NULL)
795 			break;
796 
797 skip:
798 		(void) tsleep(&isp->isp_osinfo.thread, PRIBIO, "fcclnup", 0);
799 
800 		splx(s);
801 	}
802 
803 	/* In case parent is waiting for us to exit. */
804 	wakeup(&isp->isp_osinfo.thread);
805 
806 	kthread_exit(0);
807 }
808 
809 /*
810  * Free any associated resources prior to decommissioning and
811  * set the card to a known state (so it doesn't wake up and kick
812  * us when we aren't expecting it to).
813  *
814  * Locks are held before coming here.
815  */
816 void
817 isp_uninit(struct ispsoftc *isp)
818 {
819 	isp_lock(isp);
820 	/*
821 	 * Leave with interrupts disabled.
822 	 */
823 	DISABLE_INTS(isp);
824 	isp_unlock(isp);
825 }
826 
827 int
828 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
829 {
830 	int bus, tgt;
831 
832 	switch (cmd) {
833 	case ISPASYNC_NEW_TGT_PARAMS:
834 	if (IS_SCSI(isp) && isp->isp_dblev) {
835 		sdparam *sdp = isp->isp_param;
836 		int flags;
837 		struct scsipi_xfer_mode xm;
838 
839 		tgt = *((int *) arg);
840 		bus = (tgt >> 16) & 0xffff;
841 		tgt &= 0xffff;
842 		sdp += bus;
843 		flags = sdp->isp_devparam[tgt].actv_flags;
844 
845 		xm.xm_mode = 0;
846 		xm.xm_period = sdp->isp_devparam[tgt].actv_period;
847 		xm.xm_offset = sdp->isp_devparam[tgt].actv_offset;
848 		xm.xm_target = tgt;
849 
850 		if ((flags & DPARM_SYNC) && xm.xm_period && xm.xm_offset)
851 			xm.xm_mode |= PERIPH_CAP_SYNC;
852 		if (flags & DPARM_WIDE)
853 			xm.xm_mode |= PERIPH_CAP_WIDE16;
854 		if (flags & DPARM_TQING)
855 			xm.xm_mode |= PERIPH_CAP_TQING;
856 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
857 		    ASYNC_EVENT_XFER_MODE, &xm);
858 		break;
859 	}
860 	case ISPASYNC_BUS_RESET:
861 		bus = *((int *) arg);
862 		scsipi_async_event(bus? &isp->isp_chanB : &isp->isp_chanA,
863 		    ASYNC_EVENT_RESET, NULL);
864 		isp_prt(isp, ISP_LOGINFO, "SCSI bus %d reset detected", bus);
865 		break;
866 	case ISPASYNC_LIP:
867 		/*
868 		 * Don't do queue freezes or blockage until we have the
869 		 * thread running that can unfreeze/unblock us.
870 		 */
871 		if (isp->isp_osinfo.blocked == 0)  {
872 			if (isp->isp_osinfo.thread) {
873 				isp->isp_osinfo.blocked = 1;
874 				scsipi_channel_freeze(&isp->isp_chanA, 1);
875 			}
876 		}
877 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
878 		break;
879 	case ISPASYNC_LOOP_RESET:
880 		/*
881 		 * Don't do queue freezes or blockage until we have the
882 		 * thread running that can unfreeze/unblock us.
883 		 */
884 		if (isp->isp_osinfo.blocked == 0) {
885 			if (isp->isp_osinfo.thread) {
886 				isp->isp_osinfo.blocked = 1;
887 				scsipi_channel_freeze(&isp->isp_chanA, 1);
888 			}
889 		}
890 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
891 		break;
892 	case ISPASYNC_LOOP_DOWN:
893 		/*
894 		 * Don't do queue freezes or blockage until we have the
895 		 * thread running that can unfreeze/unblock us.
896 		 */
897 		if (isp->isp_osinfo.blocked == 0) {
898 			if (isp->isp_osinfo.thread) {
899 				isp->isp_osinfo.blocked = 1;
900 				scsipi_channel_freeze(&isp->isp_chanA, 1);
901 			}
902 		}
903 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
904 		break;
905         case ISPASYNC_LOOP_UP:
906 		/*
907 		 * Let the subsequent ISPASYNC_CHANGE_NOTIFY invoke
908 		 * the FC worker thread. When the FC worker thread
909 		 * is done, let *it* call scsipi_channel_thaw...
910 		 */
911 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
912 		break;
913 	case ISPASYNC_PROMENADE:
914 	if (IS_FC(isp) && isp->isp_dblev) {
915 		const char fmt[] = "Target %d (Loop 0x%x) Port ID 0x%x "
916 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
917 		const static char *roles[4] = {
918 		    "None", "Target", "Initiator", "Target/Initiator"
919 		};
920 		fcparam *fcp = isp->isp_param;
921 		int tgt = *((int *) arg);
922 		struct lportdb *lp = &fcp->portdb[tgt];
923 
924 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
925 		    roles[lp->roles & 0x3],
926 		    (lp->valid)? "Arrived" : "Departed",
927 		    (u_int32_t) (lp->port_wwn >> 32),
928 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
929 		    (u_int32_t) (lp->node_wwn >> 32),
930 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
931 		break;
932 	}
933 	case ISPASYNC_CHANGE_NOTIFY:
934 		if (arg == ISPASYNC_CHANGE_PDB) {
935 			isp_prt(isp, ISP_LOGINFO, "Port Database Changed");
936 		} else if (arg == ISPASYNC_CHANGE_SNS) {
937 			isp_prt(isp, ISP_LOGINFO,
938 			    "Name Server Database Changed");
939 		}
940 
941 		/*
942 		 * We can set blocked here because we know it's now okay
943 		 * to try and run isp_fc_runstate (in order to build loop
944 		 * state). But we don't try and freeze the midlayer's queue
945 		 * if we have no thread that we can wake to later unfreeze
946 		 * it.
947 		 */
948 		if (isp->isp_osinfo.blocked == 0) {
949 			isp->isp_osinfo.blocked = 1;
950 			if (isp->isp_osinfo.thread) {
951 				scsipi_channel_freeze(&isp->isp_chanA, 1);
952 			}
953 		}
954 		/*
955 		 * Note that we have work for the thread to do, and
956 		 * if the thread is here already, wake it up.
957 		 */
958 		isp->isp_osinfo.threadwork++;
959 		if (isp->isp_osinfo.thread) {
960 			wakeup(&isp->isp_osinfo.thread);
961 		} else {
962 			isp_prt(isp, ISP_LOGDEBUG1, "no FC thread yet");
963 		}
964 		break;
965 	case ISPASYNC_FABRIC_DEV:
966 	{
967 		int target, base, lim;
968 		fcparam *fcp = isp->isp_param;
969 		struct lportdb *lp = NULL;
970 		struct lportdb *clp = (struct lportdb *) arg;
971 		char *pt;
972 
973 		switch (clp->port_type) {
974 		case 1:
975 			pt = "   N_Port";
976 			break;
977 		case 2:
978 			pt = "  NL_Port";
979 			break;
980 		case 3:
981 			pt = "F/NL_Port";
982 			break;
983 		case 0x7f:
984 			pt = "  Nx_Port";
985 			break;
986 		case 0x81:
987 			pt = "  F_port";
988 			break;
989 		case 0x82:
990 			pt = "  FL_Port";
991 			break;
992 		case 0x84:
993 			pt = "   E_port";
994 			break;
995 		default:
996 			pt = " ";
997 			break;
998 		}
999 
1000 		isp_prt(isp, ISP_LOGINFO,
1001 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
1002 
1003 		/*
1004 		 * If we don't have an initiator role we bail.
1005 		 *
1006 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
1007 		 */
1008 
1009 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
1010 			break;
1011 		}
1012 
1013 		/*
1014 		 * Is this entry for us? If so, we bail.
1015 		 */
1016 
1017 		if (fcp->isp_portid == clp->portid) {
1018 			break;
1019 		}
1020 
1021 		/*
1022 		 * Else, the default policy is to find room for it in
1023 		 * our local port database. Later, when we execute
1024 		 * the call to isp_pdb_sync either this newly arrived
1025 		 * or already logged in device will be (re)announced.
1026 		 */
1027 
1028 		if (fcp->isp_topo == TOPO_FL_PORT)
1029 			base = FC_SNS_ID+1;
1030 		else
1031 			base = 0;
1032 
1033 		if (fcp->isp_topo == TOPO_N_PORT)
1034 			lim = 1;
1035 		else
1036 			lim = MAX_FC_TARG;
1037 
1038 		/*
1039 		 * Is it already in our list?
1040 		 */
1041 		for (target = base; target < lim; target++) {
1042 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1043 				continue;
1044 			}
1045 			lp = &fcp->portdb[target];
1046 			if (lp->port_wwn == clp->port_wwn &&
1047 			    lp->node_wwn == clp->node_wwn) {
1048 				lp->fabric_dev = 1;
1049 				break;
1050 			}
1051 		}
1052 		if (target < lim) {
1053 			break;
1054 		}
1055 		for (target = base; target < lim; target++) {
1056 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
1057 				continue;
1058 			}
1059 			lp = &fcp->portdb[target];
1060 			if (lp->port_wwn == 0) {
1061 				break;
1062 			}
1063 		}
1064 		if (target == lim) {
1065 			isp_prt(isp, ISP_LOGWARN,
1066 			    "out of space for fabric devices");
1067 			break;
1068 		}
1069 		lp->port_type = clp->port_type;
1070 		lp->fc4_type = clp->fc4_type;
1071 		lp->node_wwn = clp->node_wwn;
1072 		lp->port_wwn = clp->port_wwn;
1073 		lp->portid = clp->portid;
1074 		lp->fabric_dev = 1;
1075 		break;
1076 	}
1077 	case ISPASYNC_FW_CRASH:
1078 	{
1079 		u_int16_t mbox1, mbox6;
1080 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
1081 		if (IS_DUALBUS(isp)) {
1082 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
1083 		} else {
1084 			mbox6 = 0;
1085 		}
1086                 isp_prt(isp, ISP_LOGERR,
1087                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
1088                     mbox6, mbox1);
1089 		isp_reinit(isp);
1090 		break;
1091 	}
1092 	default:
1093 		break;
1094 	}
1095 	return (0);
1096 }
1097 
1098 #include <machine/stdarg.h>
1099 void
1100 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
1101 {
1102 	va_list ap;
1103 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
1104 		return;
1105 	}
1106 	printf("%s: ", isp->isp_name);
1107 	va_start(ap, fmt);
1108 	vprintf(fmt, ap);
1109 	va_end(ap);
1110 	printf("\n");
1111 }
1112