xref: /dragonfly/sys/dev/disk/isp/isp_freebsd.c (revision 5de36205)
1 /* $FreeBSD: src/sys/dev/isp/isp_freebsd.c,v 1.32.2.20 2002/10/11 18:49:25 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_freebsd.c,v 1.13 2005/06/06 22:51:54 corecode Exp $ */
3 /*
4  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
5  *
6  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include "isp_freebsd.h"
30 #include <sys/unistd.h>
31 #include <sys/kthread.h>
32 #include <machine/stdarg.h>	/* for use by isp_prt below */
33 #include <sys/conf.h>
34 #include <sys/ioccom.h>
35 #include "isp_ioctl.h"
36 
37 
38 static d_ioctl_t ispioctl;
39 static void isp_intr_enable(void *);
40 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
41 static void isp_poll(struct cam_sim *);
42 static timeout_t isp_watchdog;
43 static void isp_kthread(void *);
44 static void isp_action(struct cam_sim *, union ccb *);
45 
46 
47 #define ISP_CDEV_MAJOR	248
48 static struct cdevsw isp_cdevsw = {
49 	/* name */	"isp",
50 	/* maj */	ISP_CDEV_MAJOR,
51 	/* flags */	D_TAPE,
52 	/* port */	NULL,
53 	/* clone */	NULL,
54 
55 	/* open */	nullopen,
56 	/* close */	nullclose,
57 	/* read */	noread,
58 	/* write */	nowrite,
59 	/* ioctl */	ispioctl,
60 	/* poll */	nopoll,
61 	/* mmap */	nommap,
62 	/* strategy */	nostrategy,
63 	/* dump */	nodump,
64 	/* psize */	nopsize
65 };
66 
67 static struct ispsoftc *isplist = NULL;
68 
69 void
70 isp_attach(struct ispsoftc *isp)
71 {
72 	int primary, secondary;
73 	struct ccb_setasync csa;
74 	struct cam_devq *devq;
75 	struct cam_sim *sim;
76 	struct cam_path *path;
77 
78 	/*
79 	 * Establish (in case of 12X0) which bus is the primary.
80 	 */
81 
82 	primary = 0;
83 	secondary = 1;
84 
85 	/*
86 	 * Create the device queue for our SIM(s).
87 	 */
88 	devq = cam_simq_alloc(isp->isp_maxcmds);
89 	if (devq == NULL) {
90 		return;
91 	}
92 
93 	/*
94 	 * Construct our SIM entry.
95 	 */
96 	ISPLOCK_2_CAMLOCK(isp);
97 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
98 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
99 	cam_simq_release(devq);		/* leaves 1 ref due to cam_sim_alloc */
100 	if (sim == NULL) {
101 		CAMLOCK_2_ISPLOCK(isp);
102 		return;
103 	}
104 	CAMLOCK_2_ISPLOCK(isp);
105 
106 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
107 	isp->isp_osinfo.ehook.ich_arg = isp;
108 	isp->isp_osinfo.ehook.ich_desc = "isp";
109 	ISPLOCK_2_CAMLOCK(isp);
110 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
111 		cam_sim_free(sim);
112 		CAMLOCK_2_ISPLOCK(isp);
113 		isp_prt(isp, ISP_LOGERR,
114 		    "could not establish interrupt enable hook");
115 		return;
116 	}
117 
118 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
119 		cam_sim_free(sim);
120 		CAMLOCK_2_ISPLOCK(isp);
121 		return;
122 	}
123 
124 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
125 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
126 		xpt_bus_deregister(cam_sim_path(sim));
127 		cam_sim_free(sim);
128 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
129 		CAMLOCK_2_ISPLOCK(isp);
130 		return;
131 	}
132 
133 	xpt_setup_ccb(&csa.ccb_h, path, 5);
134 	csa.ccb_h.func_code = XPT_SASYNC_CB;
135 	csa.event_enable = AC_LOST_DEVICE;
136 	csa.callback = isp_cam_async;
137 	csa.callback_arg = sim;
138 	xpt_action((union ccb *)&csa);
139 	CAMLOCK_2_ISPLOCK(isp);
140 	isp->isp_sim = sim;
141 	isp->isp_path = path;
142 	/*
143 	 * Create a kernel thread for fibre channel instances. We
144 	 * don't have dual channel FC cards.
145 	 */
146 	if (IS_FC(isp)) {
147 		ISPLOCK_2_CAMLOCK(isp);
148 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kthread,
149 		    "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) {
150 			xpt_bus_deregister(cam_sim_path(sim));
151 			cam_sim_free(sim);
152 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
153 			CAMLOCK_2_ISPLOCK(isp);
154 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
155 			return;
156 		}
157 		CAMLOCK_2_ISPLOCK(isp);
158 	}
159 
160 
161 	/*
162 	 * If we have a second channel, construct SIM entry for that.
163 	 */
164 	if (IS_DUALBUS(isp)) {
165 		ISPLOCK_2_CAMLOCK(isp);
166 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
167 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
168 		if (sim == NULL) {
169 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
170 			xpt_free_path(isp->isp_path);
171 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
172 			return;
173 		}
174 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
175 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
176 			xpt_free_path(isp->isp_path);
177 			cam_sim_free(sim);
178 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
179 			CAMLOCK_2_ISPLOCK(isp);
180 			return;
181 		}
182 
183 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
184 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
185 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
186 			xpt_free_path(isp->isp_path);
187 			xpt_bus_deregister(cam_sim_path(sim));
188 			cam_sim_free(sim);
189 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
190 			CAMLOCK_2_ISPLOCK(isp);
191 			return;
192 		}
193 
194 		xpt_setup_ccb(&csa.ccb_h, path, 5);
195 		csa.ccb_h.func_code = XPT_SASYNC_CB;
196 		csa.event_enable = AC_LOST_DEVICE;
197 		csa.callback = isp_cam_async;
198 		csa.callback_arg = sim;
199 		xpt_action((union ccb *)&csa);
200 		CAMLOCK_2_ISPLOCK(isp);
201 		isp->isp_sim2 = sim;
202 		isp->isp_path2 = path;
203 	}
204 	/*
205 	 * Create device nodes
206 	 */
207 	cdevsw_add(&isp_cdevsw, -1, device_get_unit(isp->isp_dev));
208 	make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
209 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
210 
211 	if (isp->isp_role != ISP_ROLE_NONE) {
212 		isp->isp_state = ISP_RUNSTATE;
213 	}
214 	if (isplist == NULL) {
215 		isplist = isp;
216 	} else {
217 		struct ispsoftc *tmp = isplist;
218 		while (tmp->isp_osinfo.next) {
219 			tmp = tmp->isp_osinfo.next;
220 		}
221 		tmp->isp_osinfo.next = isp;
222 	}
223 
224 }
225 
226 static INLINE void
227 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
228 {
229 	if (isp->isp_osinfo.simqfrozen == 0) {
230 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
231 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
232 		ISPLOCK_2_CAMLOCK(isp);
233 		xpt_freeze_simq(isp->isp_sim, 1);
234 		CAMLOCK_2_ISPLOCK(isp);
235 	} else {
236 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
237 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
238 	}
239 }
240 
241 static int
242 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, d_thread_t *td)
243 {
244 	struct ispsoftc *isp;
245 	int retval = ENOTTY;
246 
247 	isp = isplist;
248 	while (isp) {
249 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
250 			break;
251 		}
252 		isp = isp->isp_osinfo.next;
253 	}
254 	if (isp == NULL)
255 		return (ENXIO);
256 
257 	switch (cmd) {
258 #ifdef	ISP_FW_CRASH_DUMP
259 	case ISP_GET_FW_CRASH_DUMP:
260 	{
261 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
262 		size_t sz;
263 
264 		retval = 0;
265 		if (IS_2200(isp))
266 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
267 		else
268 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
269 		ISP_LOCK(isp);
270 		if (ptr && *ptr) {
271 			void *uaddr = *((void **) addr);
272 			if (copyout(ptr, uaddr, sz)) {
273 				retval = EFAULT;
274 			} else {
275 				*ptr = 0;
276 			}
277 		} else {
278 			retval = ENXIO;
279 		}
280 		ISP_UNLOCK(isp);
281 		break;
282 	}
283 
284 	case ISP_FORCE_CRASH_DUMP:
285 		ISP_LOCK(isp);
286 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
287 		isp_fw_dump(isp);
288 		isp_reinit(isp);
289 		ISP_UNLOCK(isp);
290 		retval = 0;
291 		break;
292 #endif
293 	case ISP_SDBLEV:
294 	{
295 		int olddblev = isp->isp_dblev;
296 		isp->isp_dblev = *(int *)addr;
297 		*(int *)addr = olddblev;
298 		retval = 0;
299 		break;
300 	}
301 	case ISP_RESETHBA:
302 		ISP_LOCK(isp);
303 		isp_reinit(isp);
304 		ISP_UNLOCK(isp);
305 		retval = 0;
306 		break;
307 	case ISP_RESCAN:
308 		if (IS_FC(isp)) {
309 			ISP_LOCK(isp);
310 			if (isp_fc_runstate(isp, 5 * 1000000)) {
311 				retval = EIO;
312 			} else {
313 				retval = 0;
314 			}
315 			ISP_UNLOCK(isp);
316 		}
317 		break;
318 	case ISP_FC_LIP:
319 		if (IS_FC(isp)) {
320 			ISP_LOCK(isp);
321 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
322 				retval = EIO;
323 			} else {
324 				retval = 0;
325 			}
326 			ISP_UNLOCK(isp);
327 		}
328 		break;
329 	case ISP_FC_GETDINFO:
330 	{
331 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
332 		struct lportdb *lp;
333 
334 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
335 			retval = EINVAL;
336 			break;
337 		}
338 		ISP_LOCK(isp);
339 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
340 		if (lp->valid) {
341 			ifc->loopid = lp->loopid;
342 			ifc->portid = lp->portid;
343 			ifc->node_wwn = lp->node_wwn;
344 			ifc->port_wwn = lp->port_wwn;
345 			retval = 0;
346 		} else {
347 			retval = ENODEV;
348 		}
349 		ISP_UNLOCK(isp);
350 		break;
351 	}
352 	case ISP_GET_STATS:
353 	{
354 		isp_stats_t *sp = (isp_stats_t *) addr;
355 
356 		MEMZERO(sp, sizeof (*sp));
357 		sp->isp_stat_version = ISP_STATS_VERSION;
358 		sp->isp_type = isp->isp_type;
359 		sp->isp_revision = isp->isp_revision;
360 		ISP_LOCK(isp);
361 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
362 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
363 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
364 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
365 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
366 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
367 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
368 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
369 		ISP_UNLOCK(isp);
370 		retval = 0;
371 		break;
372 	}
373 	case ISP_CLR_STATS:
374 		ISP_LOCK(isp);
375 		isp->isp_intcnt = 0;
376 		isp->isp_intbogus = 0;
377 		isp->isp_intmboxc = 0;
378 		isp->isp_intoasync = 0;
379 		isp->isp_rsltccmplt = 0;
380 		isp->isp_fphccmplt = 0;
381 		isp->isp_rscchiwater = 0;
382 		isp->isp_fpcchiwater = 0;
383 		ISP_UNLOCK(isp);
384 		retval = 0;
385 		break;
386 	case ISP_FC_GETHINFO:
387 	{
388 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
389 		MEMZERO(hba, sizeof (*hba));
390 		ISP_LOCK(isp);
391 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
392 		hba->fc_scsi_supported = 1;
393 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
394 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
395 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
396 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
397 		ISP_UNLOCK(isp);
398 		retval = 0;
399 		break;
400 	}
401 	case ISP_GET_FC_PARAM:
402 	{
403 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
404 
405 		if (!IS_FC(isp)) {
406 			retval = EINVAL;
407 			break;
408 		}
409 		f->parameter = 0;
410 		if (strcmp(f->param_name, "framelength") == 0) {
411 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
412 			retval = 0;
413 			break;
414 		}
415 		if (strcmp(f->param_name, "exec_throttle") == 0) {
416 			f->parameter = FCPARAM(isp)->isp_execthrottle;
417 			retval = 0;
418 			break;
419 		}
420 		if (strcmp(f->param_name, "fullduplex") == 0) {
421 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
422 				f->parameter = 1;
423 			retval = 0;
424 			break;
425 		}
426 		if (strcmp(f->param_name, "loopid") == 0) {
427 			f->parameter = FCPARAM(isp)->isp_loopid;
428 			retval = 0;
429 			break;
430 		}
431 		retval = EINVAL;
432 		break;
433 	}
434 	case ISP_SET_FC_PARAM:
435 	{
436 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
437 		u_int32_t param = f->parameter;
438 
439 		if (!IS_FC(isp)) {
440 			retval = EINVAL;
441 			break;
442 		}
443 		f->parameter = 0;
444 		if (strcmp(f->param_name, "framelength") == 0) {
445 			if (param != 512 && param != 1024 && param != 1024) {
446 				retval = EINVAL;
447 				break;
448 			}
449 			FCPARAM(isp)->isp_maxfrmlen = param;
450 			retval = 0;
451 			break;
452 		}
453 		if (strcmp(f->param_name, "exec_throttle") == 0) {
454 			if (param < 16 || param > 255) {
455 				retval = EINVAL;
456 				break;
457 			}
458 			FCPARAM(isp)->isp_execthrottle = param;
459 			retval = 0;
460 			break;
461 		}
462 		if (strcmp(f->param_name, "fullduplex") == 0) {
463 			if (param != 0 && param != 1) {
464 				retval = EINVAL;
465 				break;
466 			}
467 			if (param) {
468 				FCPARAM(isp)->isp_fwoptions |=
469 				    ICBOPT_FULL_DUPLEX;
470 			} else {
471 				FCPARAM(isp)->isp_fwoptions &=
472 				    ~ICBOPT_FULL_DUPLEX;
473 			}
474 			retval = 0;
475 			break;
476 		}
477 		if (strcmp(f->param_name, "loopid") == 0) {
478 			if (param < 0 || param > 125) {
479 				retval = EINVAL;
480 				break;
481 			}
482 			FCPARAM(isp)->isp_loopid = param;
483 			retval = 0;
484 			break;
485 		}
486 		retval = EINVAL;
487 		break;
488 	}
489 	default:
490 		break;
491 	}
492 	return (retval);
493 }
494 
495 static void
496 isp_intr_enable(void *arg)
497 {
498 	struct ispsoftc *isp = arg;
499 	if (isp->isp_role != ISP_ROLE_NONE) {
500 		ENABLE_INTS(isp);
501 	}
502 	/* Release our hook so that the boot can continue. */
503 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
504 }
505 
506 /*
507  * Put the target mode functions here, because some are inlines
508  */
509 
510 #ifdef	ISP_TARGET_MODE
511 
512 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
513 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
514 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
515 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
516 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
517 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
518 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
519 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
520 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
521 static cam_status
522 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
523 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
524 static void isp_en_lun(struct ispsoftc *, union ccb *);
525 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
526 static timeout_t isp_refire_putback_atio;
527 static void isp_complete_ctio(union ccb *);
528 static void isp_target_putback_atio(union ccb *);
529 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
530 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
531 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
532 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
533 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
534 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
535 
536 static INLINE int
537 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
538 {
539 	tstate_t *tptr;
540 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
541 	if (tptr == NULL) {
542 		return (0);
543 	}
544 	do {
545 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
546 			return (1);
547 		}
548 	} while ((tptr = tptr->next) != NULL);
549 	return (0);
550 }
551 
552 static INLINE int
553 are_any_luns_enabled(struct ispsoftc *isp, int port)
554 {
555 	int lo, hi;
556 	if (IS_DUALBUS(isp)) {
557 		lo = (port * (LUN_HASH_SIZE >> 1));
558 		hi = lo + (LUN_HASH_SIZE >> 1);
559 	} else {
560 		lo = 0;
561 		hi = LUN_HASH_SIZE;
562 	}
563 	for (lo = 0; lo < hi; lo++) {
564 		if (isp->isp_osinfo.lun_hash[lo]) {
565 			return (1);
566 		}
567 	}
568 	return (0);
569 }
570 
571 static INLINE tstate_t *
572 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
573 {
574 	tstate_t *tptr = NULL;
575 
576 	if (lun == CAM_LUN_WILDCARD) {
577 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
578 			tptr = &isp->isp_osinfo.tsdflt[bus];
579 			tptr->hold++;
580 			return (tptr);
581 		}
582 	} else {
583 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
584 		if (tptr == NULL) {
585 			return (NULL);
586 		}
587 	}
588 
589 	do {
590 		if (tptr->lun == lun && tptr->bus == bus) {
591 			tptr->hold++;
592 			return (tptr);
593 		}
594 	} while ((tptr = tptr->next) != NULL);
595 	return (tptr);
596 }
597 
598 static __inline void
599 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
600 {
601 	if (tptr->hold)
602 		tptr->hold--;
603 }
604 
605 static __inline int
606 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
607 {
608 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
609 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
610 		if (tsleep(&isp->isp_osinfo.tmflags[bus], PCATCH, "i0", 0)) {
611 			return (-1);
612 		}
613 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
614 	}
615 	return (0);
616 }
617 
618 static __inline int
619 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
620 {
621 	if (tsleep(&isp->isp_osinfo.rstatus[bus], 0, "qt1", timo)) {
622 		return (-1);
623 	}
624 	return (0);
625 }
626 
627 static __inline void
628 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
629 {
630 	isp->isp_osinfo.rstatus[bus] = status;
631 	wakeup(&isp->isp_osinfo.rstatus[bus]);
632 }
633 
634 static __inline void
635 isp_vsema_rqe(struct ispsoftc *isp, int bus)
636 {
637 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
638 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
639 		wakeup(&isp->isp_osinfo.tmflags[bus]);
640 	}
641 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
642 }
643 
644 static __inline atio_private_data_t *
645 isp_get_atpd(struct ispsoftc *isp, int tag)
646 {
647 	atio_private_data_t *atp;
648 	for (atp = isp->isp_osinfo.atpdp;
649 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
650 		if (atp->tag == tag)
651 			return (atp);
652 	}
653 	return (NULL);
654 }
655 
656 static cam_status
657 create_lun_state(struct ispsoftc *isp, int bus,
658     struct cam_path *path, tstate_t **rslt)
659 {
660 	cam_status status;
661 	lun_id_t lun;
662 	int hfx;
663 	tstate_t *tptr, *new;
664 
665 	lun = xpt_path_lun_id(path);
666 	if (lun < 0) {
667 		return (CAM_LUN_INVALID);
668 	}
669 	if (is_lun_enabled(isp, bus, lun)) {
670 		return (CAM_LUN_ALRDY_ENA);
671 	}
672 	new = malloc(sizeof (tstate_t), M_DEVBUF, M_WAITOK | M_ZERO);
673 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
674 	    xpt_path_target_id(path), xpt_path_lun_id(path));
675 	if (status != CAM_REQ_CMP) {
676 		free(new, M_DEVBUF);
677 		return (status);
678 	}
679 	new->bus = bus;
680 	new->lun = lun;
681 	SLIST_INIT(&new->atios);
682 	SLIST_INIT(&new->inots);
683 	new->hold = 1;
684 
685 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
686 	tptr = isp->isp_osinfo.lun_hash[hfx];
687 	if (tptr == NULL) {
688 		isp->isp_osinfo.lun_hash[hfx] = new;
689 	} else {
690 		while (tptr->next)
691 			tptr = tptr->next;
692 		tptr->next = new;
693 	}
694 	*rslt = new;
695 	return (CAM_REQ_CMP);
696 }
697 
698 static INLINE void
699 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
700 {
701 	int hfx;
702 	tstate_t *lw, *pw;
703 
704 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
705 	if (tptr->hold) {
706 		return;
707 	}
708 	pw = isp->isp_osinfo.lun_hash[hfx];
709 	if (pw == NULL) {
710 		return;
711 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
712 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
713 	} else {
714 		lw = pw;
715 		pw = lw->next;
716 		while (pw) {
717 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
718 				lw->next = pw->next;
719 				break;
720 			}
721 			lw = pw;
722 			pw = pw->next;
723 		}
724 		if (pw == NULL) {
725 			return;
726 		}
727 	}
728 	free(tptr, M_DEVBUF);
729 }
730 
731 /*
732  * we enter with our locks held.
733  */
734 static void
735 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
736 {
737 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
738 	struct ccb_en_lun *cel = &ccb->cel;
739 	tstate_t *tptr;
740 	u_int16_t rstat;
741 	int bus, cmd, av, wildcard;
742 	lun_id_t lun;
743 	target_id_t tgt;
744 
745 
746 	bus = XS_CHANNEL(ccb) & 0x1;
747 	tgt = ccb->ccb_h.target_id;
748 	lun = ccb->ccb_h.target_lun;
749 
750 	/*
751 	 * Do some sanity checking first.
752 	 */
753 
754 	if ((lun != CAM_LUN_WILDCARD) &&
755 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
756 		ccb->ccb_h.status = CAM_LUN_INVALID;
757 		return;
758 	}
759 
760 	if (IS_SCSI(isp)) {
761 		sdparam *sdp = isp->isp_param;
762 		sdp += bus;
763 		if (tgt != CAM_TARGET_WILDCARD &&
764 		    tgt != sdp->isp_initiator_id) {
765 			ccb->ccb_h.status = CAM_TID_INVALID;
766 			return;
767 		}
768 	} else {
769 		if (tgt != CAM_TARGET_WILDCARD &&
770 		    tgt != FCPARAM(isp)->isp_iid) {
771 			ccb->ccb_h.status = CAM_TID_INVALID;
772 			return;
773 		}
774 		/*
775 		 * This is as a good a place as any to check f/w capabilities.
776 		 */
777 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
778 			isp_prt(isp, ISP_LOGERR,
779 			    "firmware does not support target mode");
780 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
781 			return;
782 		}
783 		/*
784 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
785 		 * XXX: dorks with our already fragile enable/disable code.
786 		 */
787 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
788 			isp_prt(isp, ISP_LOGERR,
789 			    "firmware not SCCLUN capable");
790 		}
791 	}
792 
793 	if (tgt == CAM_TARGET_WILDCARD) {
794 		if (lun == CAM_LUN_WILDCARD) {
795 			wildcard = 1;
796 		} else {
797 			ccb->ccb_h.status = CAM_LUN_INVALID;
798 			return;
799 		}
800 	} else {
801 		wildcard = 0;
802 	}
803 
804 	/*
805 	 * Next check to see whether this is a target/lun wildcard action.
806 	 *
807 	 * If so, we know that we can accept commands for luns that haven't
808 	 * been enabled yet and send them upstream. Otherwise, we have to
809 	 * handle them locally (if we see them at all).
810 	 */
811 
812 	if (wildcard) {
813 		tptr = &isp->isp_osinfo.tsdflt[bus];
814 		if (cel->enable) {
815 			if (isp->isp_osinfo.tmflags[bus] &
816 			    TM_WILDCARD_ENABLED) {
817 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
818 				return;
819 			}
820 			ccb->ccb_h.status =
821 			    xpt_create_path(&tptr->owner, NULL,
822 			    xpt_path_path_id(ccb->ccb_h.path),
823 			    xpt_path_target_id(ccb->ccb_h.path),
824 			    xpt_path_lun_id(ccb->ccb_h.path));
825 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
826 				return;
827 			}
828 			SLIST_INIT(&tptr->atios);
829 			SLIST_INIT(&tptr->inots);
830 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
831 		} else {
832 			if ((isp->isp_osinfo.tmflags[bus] &
833 			    TM_WILDCARD_ENABLED) == 0) {
834 				ccb->ccb_h.status = CAM_REQ_CMP;
835 				return;
836 			}
837 			if (tptr->hold) {
838 				ccb->ccb_h.status = CAM_SCSI_BUSY;
839 				return;
840 			}
841 			xpt_free_path(tptr->owner);
842 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
843 		}
844 	}
845 
846 	/*
847 	 * Now check to see whether this bus needs to be
848 	 * enabled/disabled with respect to target mode.
849 	 */
850 	av = bus << 31;
851 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
852 		av |= ENABLE_TARGET_FLAG;
853 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
854 		if (av) {
855 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
856 			if (wildcard) {
857 				isp->isp_osinfo.tmflags[bus] &=
858 				    ~TM_WILDCARD_ENABLED;
859 				xpt_free_path(tptr->owner);
860 			}
861 			return;
862 		}
863 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
864 		isp_prt(isp, ISP_LOGINFO,
865 		    "Target Mode enabled on channel %d", bus);
866 	} else if (cel->enable == 0 &&
867 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
868 		if (are_any_luns_enabled(isp, bus)) {
869 			ccb->ccb_h.status = CAM_SCSI_BUSY;
870 			return;
871 		}
872 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
873 		if (av) {
874 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
875 			return;
876 		}
877 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
878 		isp_prt(isp, ISP_LOGINFO,
879 		    "Target Mode disabled on channel %d", bus);
880 	}
881 
882 	if (wildcard) {
883 		ccb->ccb_h.status = CAM_REQ_CMP;
884 		return;
885 	}
886 
887 	if (cel->enable) {
888 		ccb->ccb_h.status =
889 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
890 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
891 			return;
892 		}
893 	} else {
894 		tptr = get_lun_statep(isp, bus, lun);
895 		if (tptr == NULL) {
896 			ccb->ccb_h.status = CAM_LUN_INVALID;
897 			return;
898 		}
899 	}
900 
901 	if (isp_psema_sig_rqe(isp, bus)) {
902 		rls_lun_statep(isp, tptr);
903 		if (cel->enable)
904 			destroy_lun_state(isp, tptr);
905 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
906 		return;
907 	}
908 
909 	if (cel->enable) {
910 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
911 		int c, n, ulun = lun;
912 
913 		cmd = RQSTYPE_ENABLE_LUN;
914 		c = DFLT_CMND_CNT;
915 		n = DFLT_INOT_CNT;
916 		if (IS_FC(isp) && lun != 0) {
917 			cmd = RQSTYPE_MODIFY_LUN;
918 			n = 0;
919 			/*
920 		 	 * For SCC firmware, we only deal with setting
921 			 * (enabling or modifying) lun 0.
922 			 */
923 			ulun = 0;
924 		}
925 		rstat = LUN_ERR;
926 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
927 			xpt_print_path(ccb->ccb_h.path);
928 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
929 			goto out;
930 		}
931 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
932 			xpt_print_path(ccb->ccb_h.path);
933 			isp_prt(isp, ISP_LOGERR,
934 			    "wait for ENABLE/MODIFY LUN timed out");
935 			goto out;
936 		}
937 		rstat = isp->isp_osinfo.rstatus[bus];
938 		if (rstat != LUN_OK) {
939 			xpt_print_path(ccb->ccb_h.path);
940 			isp_prt(isp, ISP_LOGERR,
941 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
942 			goto out;
943 		}
944 	} else {
945 		int c, n, ulun = lun;
946 		u_int32_t seq;
947 
948 		rstat = LUN_ERR;
949 		seq = isp->isp_osinfo.rollinfo++;
950 		cmd = -RQSTYPE_MODIFY_LUN;
951 
952 		c = DFLT_CMND_CNT;
953 		n = DFLT_INOT_CNT;
954 		if (IS_FC(isp) && lun != 0) {
955 			n = 0;
956 			/*
957 		 	 * For SCC firmware, we only deal with setting
958 			 * (enabling or modifying) lun 0.
959 			 */
960 			ulun = 0;
961 		}
962 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
963 			xpt_print_path(ccb->ccb_h.path);
964 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
965 			goto out;
966 		}
967 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
968 			xpt_print_path(ccb->ccb_h.path);
969 			isp_prt(isp, ISP_LOGERR,
970 			    "wait for MODIFY LUN timed out");
971 			goto out;
972 		}
973 		rstat = isp->isp_osinfo.rstatus[bus];
974 		if (rstat != LUN_OK) {
975 			xpt_print_path(ccb->ccb_h.path);
976 			isp_prt(isp, ISP_LOGERR,
977 			    "MODIFY LUN returned 0x%x", rstat);
978 			goto out;
979 		}
980 		if (IS_FC(isp) && lun) {
981 			goto out;
982 		}
983 
984 		seq = isp->isp_osinfo.rollinfo++;
985 
986 		rstat = LUN_ERR;
987 		cmd = -RQSTYPE_ENABLE_LUN;
988 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
989 			xpt_print_path(ccb->ccb_h.path);
990 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
991 			goto out;
992 		}
993 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
994 			xpt_print_path(ccb->ccb_h.path);
995 			isp_prt(isp, ISP_LOGERR,
996 			     "wait for DISABLE LUN timed out");
997 			goto out;
998 		}
999 		rstat = isp->isp_osinfo.rstatus[bus];
1000 		if (rstat != LUN_OK) {
1001 			xpt_print_path(ccb->ccb_h.path);
1002 			isp_prt(isp, ISP_LOGWARN,
1003 			    "DISABLE LUN returned 0x%x", rstat);
1004 			goto out;
1005 		}
1006 		if (are_any_luns_enabled(isp, bus) == 0) {
1007 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1008 			if (av) {
1009 				isp_prt(isp, ISP_LOGWARN,
1010 				    "disable target mode on channel %d failed",
1011 				    bus);
1012 				goto out;
1013 			}
1014 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1015 			xpt_print_path(ccb->ccb_h.path);
1016 			isp_prt(isp, ISP_LOGINFO,
1017 			    "Target Mode disabled on channel %d", bus);
1018 		}
1019 	}
1020 
1021 out:
1022 	isp_vsema_rqe(isp, bus);
1023 
1024 	if (rstat != LUN_OK) {
1025 		xpt_print_path(ccb->ccb_h.path);
1026 		isp_prt(isp, ISP_LOGWARN,
1027 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1028 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1029 		rls_lun_statep(isp, tptr);
1030 		if (cel->enable)
1031 			destroy_lun_state(isp, tptr);
1032 	} else {
1033 		xpt_print_path(ccb->ccb_h.path);
1034 		isp_prt(isp, ISP_LOGINFO, lfmt,
1035 		    (cel->enable) ? "en" : "dis", bus);
1036 		rls_lun_statep(isp, tptr);
1037 		if (cel->enable == 0) {
1038 			destroy_lun_state(isp, tptr);
1039 		}
1040 		ccb->ccb_h.status = CAM_REQ_CMP;
1041 	}
1042 }
1043 
1044 static cam_status
1045 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1046 {
1047 	tstate_t *tptr;
1048 	struct ccb_hdr_slist *lp;
1049 	struct ccb_hdr *curelm;
1050 	int found;
1051 	union ccb *accb = ccb->cab.abort_ccb;
1052 
1053 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1054 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1055 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1056 			return (CAM_PATH_INVALID);
1057 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1058 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1059 			return (CAM_PATH_INVALID);
1060 		}
1061 	}
1062 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1063 	if (tptr == NULL) {
1064 		return (CAM_PATH_INVALID);
1065 	}
1066 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1067 		lp = &tptr->atios;
1068 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1069 		lp = &tptr->inots;
1070 	} else {
1071 		rls_lun_statep(isp, tptr);
1072 		return (CAM_UA_ABORT);
1073 	}
1074 	curelm = SLIST_FIRST(lp);
1075 	found = 0;
1076 	if (curelm == &accb->ccb_h) {
1077 		found = 1;
1078 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1079 	} else {
1080 		while(curelm != NULL) {
1081 			struct ccb_hdr *nextelm;
1082 
1083 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1084 			if (nextelm == &accb->ccb_h) {
1085 				found = 1;
1086 				SLIST_NEXT(curelm, sim_links.sle) =
1087 				    SLIST_NEXT(nextelm, sim_links.sle);
1088 				break;
1089 			}
1090 			curelm = nextelm;
1091 		}
1092 	}
1093 	rls_lun_statep(isp, tptr);
1094 	if (found) {
1095 		accb->ccb_h.status = CAM_REQ_ABORTED;
1096 		return (CAM_REQ_CMP);
1097 	}
1098 	return(CAM_PATH_INVALID);
1099 }
1100 
1101 static cam_status
1102 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1103 {
1104 	void *qe;
1105 	struct ccb_scsiio *cso = &ccb->csio;
1106 	u_int16_t *hp, save_handle;
1107 	u_int16_t nxti, optr;
1108 	u_int8_t local[QENTRY_LEN];
1109 
1110 
1111 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1112 		xpt_print_path(ccb->ccb_h.path);
1113 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1114 		return (CAM_RESRC_UNAVAIL);
1115 	}
1116 	bzero(local, QENTRY_LEN);
1117 
1118 	/*
1119 	 * We're either moving data or completing a command here.
1120 	 */
1121 
1122 	if (IS_FC(isp)) {
1123 		atio_private_data_t *atp;
1124 		ct2_entry_t *cto = (ct2_entry_t *) local;
1125 
1126 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1127 		cto->ct_header.rqs_entry_count = 1;
1128 		cto->ct_iid = cso->init_id;
1129 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1130 			cto->ct_lun = ccb->ccb_h.target_lun;
1131 		}
1132 
1133 		atp = isp_get_atpd(isp, cso->tag_id);
1134 		if (atp == NULL) {
1135 			isp_prt(isp, ISP_LOGERR,
1136 			    "cannot find private data adjunct for tag %x",
1137 			    cso->tag_id);
1138 			return (-1);
1139 		}
1140 
1141 		cto->ct_rxid = cso->tag_id;
1142 		if (cso->dxfer_len == 0) {
1143 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1144 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1145 				cto->ct_flags |= CT2_SENDSTATUS;
1146 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1147 				cto->ct_resid =
1148 				    atp->orig_datalen - atp->bytes_xfered;
1149 				if (cto->ct_resid < 0) {
1150 					cto->rsp.m1.ct_scsi_status |=
1151 					    CT2_DATA_OVER;
1152 				} else if (cto->ct_resid > 0) {
1153 					cto->rsp.m1.ct_scsi_status |=
1154 					    CT2_DATA_UNDER;
1155 				}
1156 			}
1157 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1158 				int m = min(cso->sense_len, MAXRESPLEN);
1159 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1160 				cto->rsp.m1.ct_senselen = m;
1161 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1162 			}
1163 		} else {
1164 			cto->ct_flags |= CT2_FLAG_MODE0;
1165 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1166 				cto->ct_flags |= CT2_DATA_IN;
1167 			} else {
1168 				cto->ct_flags |= CT2_DATA_OUT;
1169 			}
1170 			cto->ct_reloff = atp->bytes_xfered;
1171 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1172 				cto->ct_flags |= CT2_SENDSTATUS;
1173 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1174 				cto->ct_resid =
1175 				    atp->orig_datalen -
1176 				    (atp->bytes_xfered + cso->dxfer_len);
1177 				if (cto->ct_resid < 0) {
1178 					cto->rsp.m0.ct_scsi_status |=
1179 					    CT2_DATA_OVER;
1180 				} else if (cto->ct_resid > 0) {
1181 					cto->rsp.m0.ct_scsi_status |=
1182 					    CT2_DATA_UNDER;
1183 				}
1184 			} else {
1185 				atp->last_xframt = cso->dxfer_len;
1186 			}
1187 			/*
1188 			 * If we're sending data and status back together,
1189 			 * we can't also send back sense data as well.
1190 			 */
1191 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1192 		}
1193 
1194 		if (cto->ct_flags & CT2_SENDSTATUS) {
1195 			isp_prt(isp, ISP_LOGTDEBUG0,
1196 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1197 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1198 			    cso->dxfer_len, cto->ct_resid);
1199 			cto->ct_flags |= CT2_CCINCR;
1200 			atp->state = ATPD_STATE_LAST_CTIO;
1201 		} else
1202 			atp->state = ATPD_STATE_CTIO;
1203 		cto->ct_timeout = 10;
1204 		hp = &cto->ct_syshandle;
1205 	} else {
1206 		ct_entry_t *cto = (ct_entry_t *) local;
1207 
1208 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1209 		cto->ct_header.rqs_entry_count = 1;
1210 		cto->ct_iid = cso->init_id;
1211 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1212 		cto->ct_tgt = ccb->ccb_h.target_id;
1213 		cto->ct_lun = ccb->ccb_h.target_lun;
1214 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1215 		if (AT_HAS_TAG(cso->tag_id)) {
1216 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1217 			cto->ct_flags |= CT_TQAE;
1218 		}
1219 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1220 			cto->ct_flags |= CT_NODISC;
1221 		}
1222 		if (cso->dxfer_len == 0) {
1223 			cto->ct_flags |= CT_NO_DATA;
1224 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1225 			cto->ct_flags |= CT_DATA_IN;
1226 		} else {
1227 			cto->ct_flags |= CT_DATA_OUT;
1228 		}
1229 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1230 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1231 			cto->ct_scsi_status = cso->scsi_status;
1232 			cto->ct_resid = cso->resid;
1233 			isp_prt(isp, ISP_LOGTDEBUG0,
1234 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1235 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1236 			    cso->tag_id);
1237 		}
1238 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1239 		cto->ct_timeout = 10;
1240 		hp = &cto->ct_syshandle;
1241 	}
1242 
1243 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1244 		xpt_print_path(ccb->ccb_h.path);
1245 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1246 		return (CAM_RESRC_UNAVAIL);
1247 	}
1248 
1249 
1250 	/*
1251 	 * Call the dma setup routines for this entry (and any subsequent
1252 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1253 	 * new things to play with. As with isp_start's usage of DMA setup,
1254 	 * any swizzling is done in the machine dependent layer. Because
1255 	 * of this, we put the request onto the queue area first in native
1256 	 * format.
1257 	 */
1258 
1259 	save_handle = *hp;
1260 
1261 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1262 	case CMD_QUEUED:
1263 		ISP_ADD_REQUEST(isp, nxti);
1264 		return (CAM_REQ_INPROG);
1265 
1266 	case CMD_EAGAIN:
1267 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1268 		isp_destroy_handle(isp, save_handle);
1269 		return (CAM_RESRC_UNAVAIL);
1270 
1271 	default:
1272 		isp_destroy_handle(isp, save_handle);
1273 		return (XS_ERR(ccb));
1274 	}
1275 }
1276 
1277 static void
1278 isp_refire_putback_atio(void *arg)
1279 {
1280 	crit_enter();
1281 	isp_target_putback_atio(arg);
1282 	crit_exit();
1283 }
1284 
1285 static void
1286 isp_target_putback_atio(union ccb *ccb)
1287 {
1288 	struct ispsoftc *isp;
1289 	struct ccb_scsiio *cso;
1290 	u_int16_t nxti, optr;
1291 	void *qe;
1292 
1293 	isp = XS_ISP(ccb);
1294 
1295 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1296 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1297 		isp_prt(isp, ISP_LOGWARN,
1298 		    "isp_target_putback_atio: Request Queue Overflow");
1299 		return;
1300 	}
1301 	bzero(qe, QENTRY_LEN);
1302 	cso = &ccb->csio;
1303 	if (IS_FC(isp)) {
1304 		at2_entry_t local, *at = &local;
1305 		MEMZERO(at, sizeof (at2_entry_t));
1306 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1307 		at->at_header.rqs_entry_count = 1;
1308 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1309 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1310 		} else {
1311 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1312 		}
1313 		at->at_status = CT_OK;
1314 		at->at_rxid = cso->tag_id;
1315 		at->at_iid = cso->ccb_h.target_id;
1316 		isp_put_atio2(isp, at, qe);
1317 	} else {
1318 		at_entry_t local, *at = &local;
1319 		MEMZERO(at, sizeof (at_entry_t));
1320 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1321 		at->at_header.rqs_entry_count = 1;
1322 		at->at_iid = cso->init_id;
1323 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1324 		at->at_tgt = cso->ccb_h.target_id;
1325 		at->at_lun = cso->ccb_h.target_lun;
1326 		at->at_status = CT_OK;
1327 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1328 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1329 		isp_put_atio(isp, at, qe);
1330 	}
1331 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1332 	ISP_ADD_REQUEST(isp, nxti);
1333 	isp_complete_ctio(ccb);
1334 }
1335 
1336 static void
1337 isp_complete_ctio(union ccb *ccb)
1338 {
1339 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1340 		ccb->ccb_h.status |= CAM_REQ_CMP;
1341 	}
1342 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1343 	xpt_done(ccb);
1344 }
1345 
1346 /*
1347  * Handle ATIO stuff that the generic code can't.
1348  * This means handling CDBs.
1349  */
1350 
1351 static int
1352 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1353 {
1354 	tstate_t *tptr;
1355 	int status, bus, iswildcard;
1356 	struct ccb_accept_tio *atiop;
1357 
1358 	/*
1359 	 * The firmware status (except for the QLTM_SVALID bit)
1360 	 * indicates why this ATIO was sent to us.
1361 	 *
1362 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1363 	 *
1364 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1365 	 * we're still connected on the SCSI bus.
1366 	 */
1367 	status = aep->at_status;
1368 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1369 		/*
1370 		 * Bus Phase Sequence error. We should have sense data
1371 		 * suggested by the f/w. I'm not sure quite yet what
1372 		 * to do about this for CAM.
1373 		 */
1374 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1375 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1376 		return (0);
1377 	}
1378 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1379 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1380 		    status);
1381 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1382 		return (0);
1383 	}
1384 
1385 	bus = GET_BUS_VAL(aep->at_iid);
1386 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1387 	if (tptr == NULL) {
1388 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1389 		iswildcard = 1;
1390 	} else {
1391 		iswildcard = 0;
1392 	}
1393 
1394 	if (tptr == NULL) {
1395 		/*
1396 		 * Because we can't autofeed sense data back with
1397 		 * a command for parallel SCSI, we can't give back
1398 		 * a CHECK CONDITION. We'll give back a BUSY status
1399 		 * instead. This works out okay because the only
1400 		 * time we should, in fact, get this, is in the
1401 		 * case that somebody configured us without the
1402 		 * blackhole driver, so they get what they deserve.
1403 		 */
1404 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1405 		return (0);
1406 	}
1407 
1408 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1409 	if (atiop == NULL) {
1410 		/*
1411 		 * Because we can't autofeed sense data back with
1412 		 * a command for parallel SCSI, we can't give back
1413 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1414 		 * instead. This works out okay because the only time we
1415 		 * should, in fact, get this, is in the case that we've
1416 		 * run out of ATIOS.
1417 		 */
1418 		xpt_print_path(tptr->owner);
1419 		isp_prt(isp, ISP_LOGWARN,
1420 		    "no ATIOS for lun %d from initiator %d on channel %d",
1421 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1422 		if (aep->at_flags & AT_TQAE)
1423 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1424 		else
1425 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1426 		rls_lun_statep(isp, tptr);
1427 		return (0);
1428 	}
1429 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1430 	if (iswildcard) {
1431 		atiop->ccb_h.target_id = aep->at_tgt;
1432 		atiop->ccb_h.target_lun = aep->at_lun;
1433 	}
1434 	if (aep->at_flags & AT_NODISC) {
1435 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1436 	} else {
1437 		atiop->ccb_h.flags = 0;
1438 	}
1439 
1440 	if (status & QLTM_SVALID) {
1441 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1442 		atiop->sense_len = amt;
1443 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1444 	} else {
1445 		atiop->sense_len = 0;
1446 	}
1447 
1448 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1449 	atiop->cdb_len = aep->at_cdblen;
1450 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1451 	atiop->ccb_h.status = CAM_CDB_RECVD;
1452 	/*
1453 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1454 	 * and the handle (which we have to preserve).
1455 	 */
1456 	AT_MAKE_TAGID(atiop->tag_id, aep);
1457 	if (aep->at_flags & AT_TQAE) {
1458 		atiop->tag_action = aep->at_tag_type;
1459 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1460 	}
1461 	xpt_done((union ccb*)atiop);
1462 	isp_prt(isp, ISP_LOGTDEBUG0,
1463 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1464 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1465 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1466 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1467 	    "nondisc" : "disconnecting");
1468 	rls_lun_statep(isp, tptr);
1469 	return (0);
1470 }
1471 
1472 static int
1473 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1474 {
1475 	lun_id_t lun;
1476 	tstate_t *tptr;
1477 	struct ccb_accept_tio *atiop;
1478 	atio_private_data_t *atp;
1479 
1480 	/*
1481 	 * The firmware status (except for the QLTM_SVALID bit)
1482 	 * indicates why this ATIO was sent to us.
1483 	 *
1484 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1485 	 */
1486 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1487 		isp_prt(isp, ISP_LOGWARN,
1488 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1489 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1490 		return (0);
1491 	}
1492 
1493 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1494 		lun = aep->at_scclun;
1495 	} else {
1496 		lun = aep->at_lun;
1497 	}
1498 	tptr = get_lun_statep(isp, 0, lun);
1499 	if (tptr == NULL) {
1500 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1501 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1502 	}
1503 
1504 	if (tptr == NULL) {
1505 		/*
1506 		 * What we'd like to know is whether or not we have a listener
1507 		 * upstream that really hasn't configured yet. If we do, then
1508 		 * we can give a more sensible reply here. If not, then we can
1509 		 * reject this out of hand.
1510 		 *
1511 		 * Choices for what to send were
1512 		 *
1513                  *	Not Ready, Unit Not Self-Configured Yet
1514 		 *	(0x2,0x3e,0x00)
1515 		 *
1516 		 * for the former and
1517 		 *
1518 		 *	Illegal Request, Logical Unit Not Supported
1519 		 *	(0x5,0x25,0x00)
1520 		 *
1521 		 * for the latter.
1522 		 *
1523 		 * We used to decide whether there was at least one listener
1524 		 * based upon whether the black hole driver was configured.
1525 		 * However, recent config(8) changes have made this hard to do
1526 		 * at this time.
1527 		 *
1528 		 */
1529 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1530 		return (0);
1531 	}
1532 
1533 	atp = isp_get_atpd(isp, 0);
1534 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1535 	if (atiop == NULL || atp == NULL) {
1536 		/*
1537 		 * Because we can't autofeed sense data back with
1538 		 * a command for parallel SCSI, we can't give back
1539 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1540 		 * instead. This works out okay because the only time we
1541 		 * should, in fact, get this, is in the case that we've
1542 		 * run out of ATIOS.
1543 		 */
1544 		xpt_print_path(tptr->owner);
1545 		isp_prt(isp, ISP_LOGWARN,
1546 		    "no %s for lun %d from initiator %d",
1547 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1548 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1549 		rls_lun_statep(isp, tptr);
1550 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1551 		return (0);
1552 	}
1553 	atp->state = ATPD_STATE_ATIO;
1554 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1555 	tptr->atio_count--;
1556 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1557 	    lun, tptr->atio_count);
1558 
1559 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1560 		atiop->ccb_h.target_id =
1561 		    ((fcparam *)isp->isp_param)->isp_loopid;
1562 		atiop->ccb_h.target_lun = lun;
1563 	}
1564 	/*
1565 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1566 	 */
1567 	atiop->sense_len = 0;
1568 
1569 	atiop->init_id = aep->at_iid;
1570 	atiop->cdb_len = ATIO2_CDBLEN;
1571 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1572 	atiop->ccb_h.status = CAM_CDB_RECVD;
1573 	atiop->tag_id = aep->at_rxid;
1574 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1575 	case ATIO2_TC_ATTR_SIMPLEQ:
1576 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1577 		break;
1578         case ATIO2_TC_ATTR_HEADOFQ:
1579 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1580 		break;
1581         case ATIO2_TC_ATTR_ORDERED:
1582 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1583 		break;
1584         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1585 	case ATIO2_TC_ATTR_UNTAGGED:
1586 	default:
1587 		atiop->tag_action = 0;
1588 		break;
1589 	}
1590 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1591 
1592 	atp->tag = atiop->tag_id;
1593 	atp->lun = lun;
1594 	atp->orig_datalen = aep->at_datalen;
1595 	atp->last_xframt = 0;
1596 	atp->bytes_xfered = 0;
1597 	atp->state = ATPD_STATE_CAM;
1598 	xpt_done((union ccb*)atiop);
1599 
1600 	isp_prt(isp, ISP_LOGTDEBUG0,
1601 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1602 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1603 	    lun, aep->at_taskflags, aep->at_datalen);
1604 	rls_lun_statep(isp, tptr);
1605 	return (0);
1606 }
1607 
1608 static int
1609 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1610 {
1611 	union ccb *ccb;
1612 	int sentstatus, ok, notify_cam, resid = 0;
1613 	u_int16_t tval;
1614 
1615 	/*
1616 	 * CTIO and CTIO2 are close enough....
1617 	 */
1618 
1619 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1620 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1621 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1622 
1623 	if (IS_FC(isp)) {
1624 		ct2_entry_t *ct = arg;
1625 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1626 		if (atp == NULL) {
1627 			isp_prt(isp, ISP_LOGERR,
1628 			    "cannot find adjunct for %x after I/O",
1629 			    ct->ct_rxid);
1630 			return (0);
1631 		}
1632 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1633 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1634 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1635 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1636 		}
1637 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1638 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1639 			resid = ct->ct_resid;
1640 			atp->bytes_xfered += (atp->last_xframt - resid);
1641 			atp->last_xframt = 0;
1642 		}
1643 		if (sentstatus || !ok) {
1644 			atp->tag = 0;
1645 		}
1646 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1647 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1648 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1649 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1650 		    resid, sentstatus? "FIN" : "MID");
1651 		tval = ct->ct_rxid;
1652 
1653 		/* XXX: should really come after isp_complete_ctio */
1654 		atp->state = ATPD_STATE_PDON;
1655 	} else {
1656 		ct_entry_t *ct = arg;
1657 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1658 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1659 		/*
1660 		 * We *ought* to be able to get back to the original ATIO
1661 		 * here, but for some reason this gets lost. It's just as
1662 		 * well because it's squirrelled away as part of periph
1663 		 * private data.
1664 		 *
1665 		 * We can live without it as long as we continue to use
1666 		 * the auto-replenish feature for CTIOs.
1667 		 */
1668 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1669 		if (ct->ct_status & QLTM_SVALID) {
1670 			char *sp = (char *)ct;
1671 			sp += CTIO_SENSE_OFFSET;
1672 			ccb->csio.sense_len =
1673 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1674 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1675 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1676 		}
1677 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1678 			resid = ct->ct_resid;
1679 		}
1680 		isp_prt(isp, ISP_LOGTDEBUG0,
1681 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1682 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1683 		    ct->ct_status, ct->ct_flags, resid,
1684 		    sentstatus? "FIN" : "MID");
1685 		tval = ct->ct_fwhandle;
1686 	}
1687 	ccb->csio.resid += resid;
1688 
1689 	/*
1690 	 * We're here either because intermediate data transfers are done
1691 	 * and/or the final status CTIO (which may have joined with a
1692 	 * Data Transfer) is done.
1693 	 *
1694 	 * In any case, for this platform, the upper layers figure out
1695 	 * what to do next, so all we do here is collect status and
1696 	 * pass information along. Any DMA handles have already been
1697 	 * freed.
1698 	 */
1699 	if (notify_cam == 0) {
1700 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1701 		return (0);
1702 	}
1703 
1704 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1705 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1706 
1707 	if (!ok) {
1708 		isp_target_putback_atio(ccb);
1709 	} else {
1710 		isp_complete_ctio(ccb);
1711 
1712 	}
1713 	return (0);
1714 }
1715 
1716 static int
1717 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1718 {
1719 	return (0);	/* XXXX */
1720 }
1721 
1722 static int
1723 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1724 {
1725 
1726 	switch (inp->in_status) {
1727 	case IN_PORT_LOGOUT:
1728 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1729 		   inp->in_iid);
1730 		break;
1731 	case IN_PORT_CHANGED:
1732 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1733 		   inp->in_iid);
1734 		break;
1735 	case IN_GLOBAL_LOGO:
1736 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1737 		break;
1738 	case IN_ABORT_TASK:
1739 	{
1740 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1741 		struct ccb_immed_notify *inot = NULL;
1742 
1743 		if (atp) {
1744 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1745 			if (tptr) {
1746 				inot = (struct ccb_immed_notify *)
1747 				    SLIST_FIRST(&tptr->inots);
1748 				if (inot) {
1749 					SLIST_REMOVE_HEAD(&tptr->inots,
1750 					    sim_links.sle);
1751 				}
1752 			}
1753 			isp_prt(isp, ISP_LOGWARN,
1754 			   "abort task RX_ID %x IID %d state %d",
1755 			   inp->in_seqid, inp->in_iid, atp->state);
1756 		} else {
1757 			isp_prt(isp, ISP_LOGWARN,
1758 			   "abort task RX_ID %x from iid %d, state unknown",
1759 			   inp->in_seqid, inp->in_iid);
1760 		}
1761 		if (inot) {
1762 			inot->initiator_id = inp->in_iid;
1763 			inot->sense_len = 0;
1764 			inot->message_args[0] = MSG_ABORT_TAG;
1765 			inot->message_args[1] = inp->in_seqid & 0xff;
1766 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1767 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1768 			xpt_done((union ccb *)inot);
1769 		}
1770 		break;
1771 	}
1772 	default:
1773 		break;
1774 	}
1775 	return (0);
1776 }
1777 #endif
1778 
1779 static void
1780 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1781 {
1782 	struct cam_sim *sim;
1783 	struct ispsoftc *isp;
1784 
1785 	sim = (struct cam_sim *)cbarg;
1786 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1787 	switch (code) {
1788 	case AC_LOST_DEVICE:
1789 		if (IS_SCSI(isp)) {
1790 			u_int16_t oflags, nflags;
1791 			sdparam *sdp = isp->isp_param;
1792 			int tgt;
1793 
1794 			tgt = xpt_path_target_id(path);
1795 			if (tgt >= 0) {
1796 				sdp += cam_sim_bus(sim);
1797 				ISP_LOCK(isp);
1798 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1799 #ifndef	ISP_TARGET_MODE
1800 				nflags &= DPARM_SAFE_DFLT;
1801 				if (isp->isp_loaded_fw) {
1802 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1803 				}
1804 #else
1805 				nflags = DPARM_DEFAULT;
1806 #endif
1807 				oflags = sdp->isp_devparam[tgt].goal_flags;
1808 				sdp->isp_devparam[tgt].goal_flags = nflags;
1809 				sdp->isp_devparam[tgt].dev_update = 1;
1810 				isp->isp_update |= (1 << cam_sim_bus(sim));
1811 				(void) isp_control(isp,
1812 				    ISPCTL_UPDATE_PARAMS, NULL);
1813 				sdp->isp_devparam[tgt].goal_flags = oflags;
1814 				ISP_UNLOCK(isp);
1815 			}
1816 		}
1817 		break;
1818 	default:
1819 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1820 		break;
1821 	}
1822 }
1823 
1824 static void
1825 isp_poll(struct cam_sim *sim)
1826 {
1827 	struct ispsoftc *isp = cam_sim_softc(sim);
1828 	u_int16_t isr, sema, mbox;
1829 
1830 	ISP_LOCK(isp);
1831 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1832 		isp_intr(isp, isr, sema, mbox);
1833 	}
1834 	ISP_UNLOCK(isp);
1835 }
1836 
1837 
1838 static void
1839 isp_watchdog(void *arg)
1840 {
1841 	XS_T *xs = arg;
1842 	struct ispsoftc *isp = XS_ISP(xs);
1843 	u_int32_t handle;
1844 	int iok;
1845 
1846 	/*
1847 	 * We've decided this command is dead. Make sure we're not trying
1848 	 * to kill a command that's already dead by getting it's handle and
1849 	 * and seeing whether it's still alive.
1850 	 */
1851 	ISP_LOCK(isp);
1852 	iok = isp->isp_osinfo.intsok;
1853 	isp->isp_osinfo.intsok = 0;
1854 	handle = isp_find_handle(isp, xs);
1855 	if (handle) {
1856 		u_int16_t isr, sema, mbox;
1857 
1858 		if (XS_CMD_DONE_P(xs)) {
1859 			isp_prt(isp, ISP_LOGDEBUG1,
1860 			    "watchdog found done cmd (handle 0x%x)", handle);
1861 			ISP_UNLOCK(isp);
1862 			return;
1863 		}
1864 
1865 		if (XS_CMD_WDOG_P(xs)) {
1866 			isp_prt(isp, ISP_LOGDEBUG2,
1867 			    "recursive watchdog (handle 0x%x)", handle);
1868 			ISP_UNLOCK(isp);
1869 			return;
1870 		}
1871 
1872 		XS_CMD_S_WDOG(xs);
1873 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1874 			isp_intr(isp, isr, sema, mbox);
1875 		}
1876 		if (XS_CMD_DONE_P(xs)) {
1877 			isp_prt(isp, ISP_LOGDEBUG2,
1878 			    "watchdog cleanup for handle 0x%x", handle);
1879 			xpt_done((union ccb *) xs);
1880 		} else if (XS_CMD_GRACE_P(xs)) {
1881 			/*
1882 			 * Make sure the command is *really* dead before we
1883 			 * release the handle (and DMA resources) for reuse.
1884 			 */
1885 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1886 
1887 			/*
1888 			 * After this point, the comamnd is really dead.
1889 			 */
1890 			if (XS_XFRLEN(xs)) {
1891 				ISP_DMAFREE(isp, xs, handle);
1892                 	}
1893 			isp_destroy_handle(isp, handle);
1894 			xpt_print_path(xs->ccb_h.path);
1895 			isp_prt(isp, ISP_LOGWARN,
1896 			    "watchdog timeout for handle 0x%x", handle);
1897 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1898 			XS_CMD_C_WDOG(xs);
1899 			isp_done(xs);
1900 		} else {
1901 			u_int16_t nxti, optr;
1902 			ispreq_t local, *mp= &local, *qe;
1903 
1904 			XS_CMD_C_WDOG(xs);
1905 			callout_reset(&xs->ccb_h.timeout_ch, hz,
1906 				      isp_watchdog, xs);
1907 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1908 				ISP_UNLOCK(isp);
1909 				return;
1910 			}
1911 			XS_CMD_S_GRACE(xs);
1912 			MEMZERO((void *) mp, sizeof (*mp));
1913 			mp->req_header.rqs_entry_count = 1;
1914 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1915 			mp->req_modifier = SYNC_ALL;
1916 			mp->req_target = XS_CHANNEL(xs) << 7;
1917 			isp_put_request(isp, mp, qe);
1918 			ISP_ADD_REQUEST(isp, nxti);
1919 		}
1920 	} else {
1921 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1922 	}
1923 	isp->isp_osinfo.intsok = iok;
1924 	ISP_UNLOCK(isp);
1925 }
1926 
1927 static void
1928 isp_kthread(void *arg)
1929 {
1930 	struct ispsoftc *isp = arg;
1931 
1932 	crit_enter();
1933 	isp->isp_osinfo.intsok = 1;
1934 
1935 	/*
1936 	 * The first loop is for our usage where we have yet to have
1937 	 * gotten good fibre channel state.
1938 	 */
1939 	for (;;) {
1940 		int wasfrozen;
1941 
1942 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1943 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1944 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1945 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1946 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1947 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1948 				    isp->isp_osinfo.ktmature == 0) {
1949 					break;
1950 				}
1951 			}
1952 			tsleep(isp_kthread, 0, "isp_fcthrd", hz);
1953 
1954 		}
1955 
1956 		/*
1957 		 * Even if we didn't get good loop state we may be
1958 		 * unfreezing the SIMQ so that we can kill off
1959 		 * commands (if we've never seen loop before, for example).
1960 		 */
1961 		isp->isp_osinfo.ktmature = 1;
1962 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1963 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1964 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1965 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
1966 			ISPLOCK_2_CAMLOCK(isp);
1967 			xpt_release_simq(isp->isp_sim, 1);
1968 			CAMLOCK_2_ISPLOCK(isp);
1969 		}
1970 		tsleep(&isp->isp_osinfo.kthread, 0, "isp_fc_worker", 0);
1971 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
1972 	}
1973 }
1974 
1975 static void
1976 isp_action(struct cam_sim *sim, union ccb *ccb)
1977 {
1978 	int bus, tgt, error;
1979 	struct ispsoftc *isp;
1980 	struct ccb_trans_settings *cts;
1981 
1982 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1983 
1984 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1985 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1986 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1987 	if (isp->isp_state != ISP_RUNSTATE &&
1988 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1989 		CAMLOCK_2_ISPLOCK(isp);
1990 		isp_init(isp);
1991 		if (isp->isp_state != ISP_INITSTATE) {
1992 			ISP_UNLOCK(isp);
1993 			/*
1994 			 * Lie. Say it was a selection timeout.
1995 			 */
1996 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1997 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1998 			xpt_done(ccb);
1999 			return;
2000 		}
2001 		isp->isp_state = ISP_RUNSTATE;
2002 		ISPLOCK_2_CAMLOCK(isp);
2003 	}
2004 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2005 
2006 
2007 	switch (ccb->ccb_h.func_code) {
2008 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2009 		/*
2010 		 * Do a couple of preliminary checks...
2011 		 */
2012 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2013 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2014 				ccb->ccb_h.status = CAM_REQ_INVALID;
2015 				xpt_done(ccb);
2016 				break;
2017 			}
2018 		}
2019 #ifdef	DIAGNOSTIC
2020 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2021 			ccb->ccb_h.status = CAM_PATH_INVALID;
2022 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2023 			ccb->ccb_h.status = CAM_PATH_INVALID;
2024 		}
2025 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2026 			isp_prt(isp, ISP_LOGERR,
2027 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2028 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2029 			xpt_done(ccb);
2030 			break;
2031 		}
2032 #endif
2033 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2034 		CAMLOCK_2_ISPLOCK(isp);
2035 		error = isp_start((XS_T *) ccb);
2036 		switch (error) {
2037 		case CMD_QUEUED:
2038 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2039 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2040 				u_int64_t ticks = (u_int64_t) hz;
2041 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2042 					ticks = 60 * 1000 * ticks;
2043 				else
2044 					ticks = ccb->ccb_h.timeout * hz;
2045 				ticks = ((ticks + 999) / 1000) + hz + hz;
2046 				if (ticks >= 0x80000000) {
2047 					isp_prt(isp, ISP_LOGERR,
2048 					    "timeout overflow");
2049 					ticks = 0x7fffffff;
2050 				}
2051 				callout_reset(&ccb->ccb_h.timeout_ch, ticks,
2052 				    isp_watchdog, ccb);
2053 			}
2054 			ISPLOCK_2_CAMLOCK(isp);
2055 			break;
2056 		case CMD_RQLATER:
2057 			/*
2058 			 * This can only happen for Fibre Channel
2059 			 */
2060 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2061 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2062 			    isp->isp_osinfo.ktmature) {
2063 				ISPLOCK_2_CAMLOCK(isp);
2064 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2065 				xpt_done(ccb);
2066 				break;
2067 			}
2068 			wakeup(&isp->isp_osinfo.kthread);
2069 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2070 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2071 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2072 			ISPLOCK_2_CAMLOCK(isp);
2073 			xpt_done(ccb);
2074 			break;
2075 		case CMD_EAGAIN:
2076 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2077 			ISPLOCK_2_CAMLOCK(isp);
2078 			xpt_done(ccb);
2079 			break;
2080 		case CMD_COMPLETE:
2081 			isp_done((struct ccb_scsiio *) ccb);
2082 			ISPLOCK_2_CAMLOCK(isp);
2083 			break;
2084 		default:
2085 			isp_prt(isp, ISP_LOGERR,
2086 			    "What's this? 0x%x at %d in file %s",
2087 			    error, __LINE__, __FILE__);
2088 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2089 			xpt_done(ccb);
2090 			ISPLOCK_2_CAMLOCK(isp);
2091 		}
2092 		break;
2093 
2094 #ifdef	ISP_TARGET_MODE
2095 	case XPT_EN_LUN:		/* Enable LUN as a target */
2096 	{
2097 		int iok;
2098 		CAMLOCK_2_ISPLOCK(isp);
2099 		iok = isp->isp_osinfo.intsok;
2100 		isp->isp_osinfo.intsok = 0;
2101 		isp_en_lun(isp, ccb);
2102 		isp->isp_osinfo.intsok = iok;
2103 		ISPLOCK_2_CAMLOCK(isp);
2104 		xpt_done(ccb);
2105 		break;
2106 	}
2107 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2108 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2109 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2110 	{
2111 		tstate_t *tptr =
2112 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2113 		if (tptr == NULL) {
2114 			ccb->ccb_h.status = CAM_LUN_INVALID;
2115 			xpt_done(ccb);
2116 			break;
2117 		}
2118 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2119 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2120 		ccb->ccb_h.flags = 0;
2121 
2122 		CAMLOCK_2_ISPLOCK(isp);
2123 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2124 			/*
2125 			 * Note that the command itself may not be done-
2126 			 * it may not even have had the first CTIO sent.
2127 			 */
2128 			tptr->atio_count++;
2129 			isp_prt(isp, ISP_LOGTDEBUG0,
2130 			    "Put FREE ATIO2, lun %d, count now %d",
2131 			    ccb->ccb_h.target_lun, tptr->atio_count);
2132 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2133 			    sim_links.sle);
2134 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2135 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2136 			    sim_links.sle);
2137 		} else {
2138 			;
2139 		}
2140 		rls_lun_statep(isp, tptr);
2141 		ccb->ccb_h.status = CAM_REQ_INPROG;
2142 		ISPLOCK_2_CAMLOCK(isp);
2143 		break;
2144 	}
2145 	case XPT_CONT_TARGET_IO:
2146 	{
2147 		CAMLOCK_2_ISPLOCK(isp);
2148 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2149 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2150 			isp_prt(isp, ISP_LOGWARN,
2151 			    "XPT_CONT_TARGET_IO: status 0x%x",
2152 			    ccb->ccb_h.status);
2153 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2154 			ISPLOCK_2_CAMLOCK(isp);
2155 			xpt_done(ccb);
2156 		} else {
2157 			ISPLOCK_2_CAMLOCK(isp);
2158 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2159 		}
2160 		break;
2161 	}
2162 #endif
2163 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2164 
2165 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2166 		tgt = ccb->ccb_h.target_id;
2167 		tgt |= (bus << 16);
2168 
2169 		CAMLOCK_2_ISPLOCK(isp);
2170 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2171 		ISPLOCK_2_CAMLOCK(isp);
2172 		if (error) {
2173 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2174 		} else {
2175 			ccb->ccb_h.status = CAM_REQ_CMP;
2176 		}
2177 		xpt_done(ccb);
2178 		break;
2179 	case XPT_ABORT:			/* Abort the specified CCB */
2180 	{
2181 		union ccb *accb = ccb->cab.abort_ccb;
2182 		CAMLOCK_2_ISPLOCK(isp);
2183 		switch (accb->ccb_h.func_code) {
2184 #ifdef	ISP_TARGET_MODE
2185 		case XPT_ACCEPT_TARGET_IO:
2186 		case XPT_IMMED_NOTIFY:
2187         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2188 			break;
2189 		case XPT_CONT_TARGET_IO:
2190 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2191 			ccb->ccb_h.status = CAM_UA_ABORT;
2192 			break;
2193 #endif
2194 		case XPT_SCSI_IO:
2195 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2196 			if (error) {
2197 				ccb->ccb_h.status = CAM_UA_ABORT;
2198 			} else {
2199 				ccb->ccb_h.status = CAM_REQ_CMP;
2200 			}
2201 			break;
2202 		default:
2203 			ccb->ccb_h.status = CAM_REQ_INVALID;
2204 			break;
2205 		}
2206 		ISPLOCK_2_CAMLOCK(isp);
2207 		xpt_done(ccb);
2208 		break;
2209 	}
2210 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2211 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2212 		cts = &ccb->cts;
2213 		if (!IS_CURRENT_SETTINGS(cts)) {
2214 			ccb->ccb_h.status = CAM_REQ_INVALID;
2215 			xpt_done(ccb);
2216 			break;
2217 		}
2218 		tgt = cts->ccb_h.target_id;
2219 		CAMLOCK_2_ISPLOCK(isp);
2220 		if (IS_SCSI(isp)) {
2221 			sdparam *sdp = isp->isp_param;
2222 			u_int16_t *dptr;
2223 
2224 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2225 
2226 			sdp += bus;
2227 			/*
2228 			 * We always update (internally) from goal_flags
2229 			 * so any request to change settings just gets
2230 			 * vectored to that location.
2231 			 */
2232 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2233 
2234 			/*
2235 			 * Note that these operations affect the
2236 			 * the goal flags (goal_flags)- not
2237 			 * the current state flags. Then we mark
2238 			 * things so that the next operation to
2239 			 * this HBA will cause the update to occur.
2240 			 */
2241 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2242 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2243 					*dptr |= DPARM_DISC;
2244 				} else {
2245 					*dptr &= ~DPARM_DISC;
2246 				}
2247 			}
2248 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2249 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2250 					*dptr |= DPARM_TQING;
2251 				} else {
2252 					*dptr &= ~DPARM_TQING;
2253 				}
2254 			}
2255 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2256 				switch (cts->bus_width) {
2257 				case MSG_EXT_WDTR_BUS_16_BIT:
2258 					*dptr |= DPARM_WIDE;
2259 					break;
2260 				default:
2261 					*dptr &= ~DPARM_WIDE;
2262 				}
2263 			}
2264 			/*
2265 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2266 			 * of nonzero will cause us to go to the
2267 			 * selected (from NVRAM) maximum value for
2268 			 * this device. At a later point, we'll
2269 			 * allow finer control.
2270 			 */
2271 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2272 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2273 			    (cts->sync_offset > 0)) {
2274 				*dptr |= DPARM_SYNC;
2275 			} else {
2276 				*dptr &= ~DPARM_SYNC;
2277 			}
2278 			*dptr |= DPARM_SAFE_DFLT;
2279 			isp_prt(isp, ISP_LOGDEBUG0,
2280 			    "SET bus %d targ %d to flags %x off %x per %x",
2281 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2282 			    sdp->isp_devparam[tgt].goal_offset,
2283 			    sdp->isp_devparam[tgt].goal_period);
2284 			sdp->isp_devparam[tgt].dev_update = 1;
2285 			isp->isp_update |= (1 << bus);
2286 		}
2287 		ISPLOCK_2_CAMLOCK(isp);
2288 		ccb->ccb_h.status = CAM_REQ_CMP;
2289 		xpt_done(ccb);
2290 		break;
2291 	case XPT_GET_TRAN_SETTINGS:
2292 		cts = &ccb->cts;
2293 		tgt = cts->ccb_h.target_id;
2294 		CAMLOCK_2_ISPLOCK(isp);
2295 		if (IS_FC(isp)) {
2296 			/*
2297 			 * a lot of normal SCSI things don't make sense.
2298 			 */
2299 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2300 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2301 			/*
2302 			 * How do you measure the width of a high
2303 			 * speed serial bus? Well, in bytes.
2304 			 *
2305 			 * Offset and period make no sense, though, so we set
2306 			 * (above) a 'base' transfer speed to be gigabit.
2307 			 */
2308 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2309 		} else {
2310 			sdparam *sdp = isp->isp_param;
2311 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2312 			u_int16_t dval, pval, oval;
2313 
2314 			sdp += bus;
2315 
2316 			if (IS_CURRENT_SETTINGS(cts)) {
2317 				sdp->isp_devparam[tgt].dev_refresh = 1;
2318 				isp->isp_update |= (1 << bus);
2319 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2320 				    NULL);
2321 				dval = sdp->isp_devparam[tgt].actv_flags;
2322 				oval = sdp->isp_devparam[tgt].actv_offset;
2323 				pval = sdp->isp_devparam[tgt].actv_period;
2324 			} else {
2325 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2326 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2327 				pval = sdp->isp_devparam[tgt].nvrm_period;
2328 			}
2329 
2330 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2331 
2332 			if (dval & DPARM_DISC) {
2333 				cts->flags |= CCB_TRANS_DISC_ENB;
2334 			}
2335 			if (dval & DPARM_TQING) {
2336 				cts->flags |= CCB_TRANS_TAG_ENB;
2337 			}
2338 			if (dval & DPARM_WIDE) {
2339 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2340 			} else {
2341 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2342 			}
2343 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2344 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2345 
2346 			if ((dval & DPARM_SYNC) && oval != 0) {
2347 				cts->sync_period = pval;
2348 				cts->sync_offset = oval;
2349 				cts->valid |=
2350 				    CCB_TRANS_SYNC_RATE_VALID |
2351 				    CCB_TRANS_SYNC_OFFSET_VALID;
2352 			}
2353 			isp_prt(isp, ISP_LOGDEBUG0,
2354 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2355 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2356 			    bus, tgt, dval, oval, pval);
2357 		}
2358 		ISPLOCK_2_CAMLOCK(isp);
2359 		ccb->ccb_h.status = CAM_REQ_CMP;
2360 		xpt_done(ccb);
2361 		break;
2362 
2363 	case XPT_CALC_GEOMETRY:
2364 	{
2365 		struct ccb_calc_geometry *ccg;
2366 		u_int32_t secs_per_cylinder;
2367 		u_int32_t size_mb;
2368 
2369 		ccg = &ccb->ccg;
2370 		if (ccg->block_size == 0) {
2371 			isp_prt(isp, ISP_LOGERR,
2372 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2373 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2374 			ccb->ccb_h.status = CAM_REQ_INVALID;
2375 			xpt_done(ccb);
2376 			break;
2377 		}
2378 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2379 		if (size_mb > 1024) {
2380 			ccg->heads = 255;
2381 			ccg->secs_per_track = 63;
2382 		} else {
2383 			ccg->heads = 64;
2384 			ccg->secs_per_track = 32;
2385 		}
2386 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2387 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2388 		ccb->ccb_h.status = CAM_REQ_CMP;
2389 		xpt_done(ccb);
2390 		break;
2391 	}
2392 	case XPT_RESET_BUS:		/* Reset the specified bus */
2393 		bus = cam_sim_bus(sim);
2394 		CAMLOCK_2_ISPLOCK(isp);
2395 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2396 		ISPLOCK_2_CAMLOCK(isp);
2397 		if (error)
2398 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2399 		else {
2400 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2401 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2402 			else if (isp->isp_path != NULL)
2403 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2404 			ccb->ccb_h.status = CAM_REQ_CMP;
2405 		}
2406 		xpt_done(ccb);
2407 		break;
2408 
2409 	case XPT_TERM_IO:		/* Terminate the I/O process */
2410 		ccb->ccb_h.status = CAM_REQ_INVALID;
2411 		xpt_done(ccb);
2412 		break;
2413 
2414 	case XPT_PATH_INQ:		/* Path routing inquiry */
2415 	{
2416 		struct ccb_pathinq *cpi = &ccb->cpi;
2417 
2418 		cpi->version_num = 1;
2419 #ifdef	ISP_TARGET_MODE
2420 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2421 #else
2422 		cpi->target_sprt = 0;
2423 #endif
2424 		cpi->hba_eng_cnt = 0;
2425 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2426 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2427 		cpi->bus_id = cam_sim_bus(sim);
2428 		if (IS_FC(isp)) {
2429 			cpi->hba_misc = PIM_NOBUSRESET;
2430 			/*
2431 			 * Because our loop ID can shift from time to time,
2432 			 * make our initiator ID out of range of our bus.
2433 			 */
2434 			cpi->initiator_id = cpi->max_target + 1;
2435 
2436 			/*
2437 			 * Set base transfer capabilities for Fibre Channel.
2438 			 * Technically not correct because we don't know
2439 			 * what media we're running on top of- but we'll
2440 			 * look good if we always say 100MB/s.
2441 			 */
2442 			if (FCPARAM(isp)->isp_gbspeed == 2)
2443 				cpi->base_transfer_speed = 200000;
2444 			else
2445 				cpi->base_transfer_speed = 100000;
2446 			cpi->hba_inquiry = PI_TAG_ABLE;
2447 		} else {
2448 			sdparam *sdp = isp->isp_param;
2449 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2450 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2451 			cpi->hba_misc = 0;
2452 			cpi->initiator_id = sdp->isp_initiator_id;
2453 			cpi->base_transfer_speed = 3300;
2454 		}
2455 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2456 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2457 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2458 		cpi->unit_number = cam_sim_unit(sim);
2459 		cpi->ccb_h.status = CAM_REQ_CMP;
2460 		xpt_done(ccb);
2461 		break;
2462 	}
2463 	default:
2464 		ccb->ccb_h.status = CAM_REQ_INVALID;
2465 		xpt_done(ccb);
2466 		break;
2467 	}
2468 }
2469 
2470 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2471 void
2472 isp_done(struct ccb_scsiio *sccb)
2473 {
2474 	struct ispsoftc *isp = XS_ISP(sccb);
2475 
2476 	if (XS_NOERR(sccb))
2477 		XS_SETERR(sccb, CAM_REQ_CMP);
2478 
2479 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2480 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2481 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2482 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2483 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2484 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2485 		} else {
2486 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2487 		}
2488 	}
2489 
2490 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2491 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2492 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2493 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2494 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2495 			isp_prt(isp, ISP_LOGDEBUG0,
2496 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2497 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2498 			    sccb->ccb_h.status, sccb->scsi_status);
2499 		}
2500 	}
2501 
2502 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2503 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2504 		xpt_print_path(sccb->ccb_h.path);
2505 		isp_prt(isp, ISP_LOGINFO,
2506 		    "cam completion status 0x%x", sccb->ccb_h.status);
2507 	}
2508 
2509 	XS_CMD_S_DONE(sccb);
2510 	if (XS_CMD_WDOG_P(sccb) == 0) {
2511 		callout_stop(&sccb->ccb_h.timeout_ch);
2512 		if (XS_CMD_GRACE_P(sccb)) {
2513 			isp_prt(isp, ISP_LOGDEBUG2,
2514 			    "finished command on borrowed time");
2515 		}
2516 		XS_CMD_S_CLEAR(sccb);
2517 		ISPLOCK_2_CAMLOCK(isp);
2518 		xpt_done((union ccb *) sccb);
2519 		CAMLOCK_2_ISPLOCK(isp);
2520 	}
2521 }
2522 
2523 int
2524 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2525 {
2526 	int bus, rv = 0;
2527 	switch (cmd) {
2528 	case ISPASYNC_NEW_TGT_PARAMS:
2529 	{
2530 		int flags, tgt;
2531 		sdparam *sdp = isp->isp_param;
2532 		struct ccb_trans_settings cts;
2533 		struct cam_path *tmppath;
2534 
2535 		bzero(&cts, sizeof (struct ccb_trans_settings));
2536 
2537 		tgt = *((int *)arg);
2538 		bus = (tgt >> 16) & 0xffff;
2539 		tgt &= 0xffff;
2540 		sdp += bus;
2541 		ISPLOCK_2_CAMLOCK(isp);
2542 		if (xpt_create_path(&tmppath, NULL,
2543 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2544 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2545 			CAMLOCK_2_ISPLOCK(isp);
2546 			isp_prt(isp, ISP_LOGWARN,
2547 			    "isp_async cannot make temp path for %d.%d",
2548 			    tgt, bus);
2549 			rv = -1;
2550 			break;
2551 		}
2552 		CAMLOCK_2_ISPLOCK(isp);
2553 		flags = sdp->isp_devparam[tgt].actv_flags;
2554 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2555 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2556 		if (flags & DPARM_DISC) {
2557 			cts.flags |= CCB_TRANS_DISC_ENB;
2558 		}
2559 		if (flags & DPARM_TQING) {
2560 			cts.flags |= CCB_TRANS_TAG_ENB;
2561 		}
2562 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2563 		cts.bus_width = (flags & DPARM_WIDE)?
2564 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2565 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2566 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2567 		if (flags & DPARM_SYNC) {
2568 			cts.valid |=
2569 			    CCB_TRANS_SYNC_RATE_VALID |
2570 			    CCB_TRANS_SYNC_OFFSET_VALID;
2571 		}
2572 		isp_prt(isp, ISP_LOGDEBUG2,
2573 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2574 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2575 		    sdp->isp_devparam[tgt].actv_offset, flags);
2576 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2577 		ISPLOCK_2_CAMLOCK(isp);
2578 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2579 		xpt_free_path(tmppath);
2580 		CAMLOCK_2_ISPLOCK(isp);
2581 		break;
2582 	}
2583 	case ISPASYNC_BUS_RESET:
2584 		bus = *((int *)arg);
2585 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2586 		    bus);
2587 		if (bus > 0 && isp->isp_path2) {
2588 			ISPLOCK_2_CAMLOCK(isp);
2589 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2590 			CAMLOCK_2_ISPLOCK(isp);
2591 		} else if (isp->isp_path) {
2592 			ISPLOCK_2_CAMLOCK(isp);
2593 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2594 			CAMLOCK_2_ISPLOCK(isp);
2595 		}
2596 		break;
2597 	case ISPASYNC_LIP:
2598 		if (isp->isp_path) {
2599 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2600 		}
2601 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2602 		break;
2603 	case ISPASYNC_LOOP_RESET:
2604 		if (isp->isp_path) {
2605 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2606 		}
2607 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2608 		break;
2609 	case ISPASYNC_LOOP_DOWN:
2610 		if (isp->isp_path) {
2611 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2612 		}
2613 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2614 		break;
2615 	case ISPASYNC_LOOP_UP:
2616 		/*
2617 		 * Now we just note that Loop has come up. We don't
2618 		 * actually do anything because we're waiting for a
2619 		 * Change Notify before activating the FC cleanup
2620 		 * thread to look at the state of the loop again.
2621 		 */
2622 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2623 		break;
2624 	case ISPASYNC_PROMENADE:
2625 	{
2626 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2627 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2628 		static const char *roles[4] = {
2629 		    "(none)", "Target", "Initiator", "Target/Initiator"
2630 		};
2631 		fcparam *fcp = isp->isp_param;
2632 		int tgt = *((int *) arg);
2633 		struct lportdb *lp = &fcp->portdb[tgt];
2634 
2635 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2636 		    roles[lp->roles & 0x3],
2637 		    (lp->valid)? "Arrived" : "Departed",
2638 		    (u_int32_t) (lp->port_wwn >> 32),
2639 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2640 		    (u_int32_t) (lp->node_wwn >> 32),
2641 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2642 
2643 		break;
2644 	}
2645 	case ISPASYNC_CHANGE_NOTIFY:
2646 		if (arg == ISPASYNC_CHANGE_PDB) {
2647 			isp_prt(isp, ISP_LOGINFO,
2648 			    "Port Database Changed");
2649 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2650 			isp_prt(isp, ISP_LOGINFO,
2651 			    "Name Server Database Changed");
2652 		}
2653 		wakeup(&isp->isp_osinfo.kthread);
2654 		break;
2655 	case ISPASYNC_FABRIC_DEV:
2656 	{
2657 		int target, base, lim;
2658 		fcparam *fcp = isp->isp_param;
2659 		struct lportdb *lp = NULL;
2660 		struct lportdb *clp = (struct lportdb *) arg;
2661 		char *pt;
2662 
2663 		switch (clp->port_type) {
2664 		case 1:
2665 			pt = "   N_Port";
2666 			break;
2667 		case 2:
2668 			pt = "  NL_Port";
2669 			break;
2670 		case 3:
2671 			pt = "F/NL_Port";
2672 			break;
2673 		case 0x7f:
2674 			pt = "  Nx_Port";
2675 			break;
2676 		case 0x81:
2677 			pt = "  F_port";
2678 			break;
2679 		case 0x82:
2680 			pt = "  FL_Port";
2681 			break;
2682 		case 0x84:
2683 			pt = "   E_port";
2684 			break;
2685 		default:
2686 			pt = " ";
2687 			break;
2688 		}
2689 
2690 		isp_prt(isp, ISP_LOGINFO,
2691 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2692 
2693 		/*
2694 		 * If we don't have an initiator role we bail.
2695 		 *
2696 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2697 		 */
2698 
2699 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2700 			break;
2701 		}
2702 
2703 		/*
2704 		 * Is this entry for us? If so, we bail.
2705 		 */
2706 
2707 		if (fcp->isp_portid == clp->portid) {
2708 			break;
2709 		}
2710 
2711 		/*
2712 		 * Else, the default policy is to find room for it in
2713 		 * our local port database. Later, when we execute
2714 		 * the call to isp_pdb_sync either this newly arrived
2715 		 * or already logged in device will be (re)announced.
2716 		 */
2717 
2718 		if (fcp->isp_topo == TOPO_FL_PORT)
2719 			base = FC_SNS_ID+1;
2720 		else
2721 			base = 0;
2722 
2723 		if (fcp->isp_topo == TOPO_N_PORT)
2724 			lim = 1;
2725 		else
2726 			lim = MAX_FC_TARG;
2727 
2728 		/*
2729 		 * Is it already in our list?
2730 		 */
2731 		for (target = base; target < lim; target++) {
2732 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2733 				continue;
2734 			}
2735 			lp = &fcp->portdb[target];
2736 			if (lp->port_wwn == clp->port_wwn &&
2737 			    lp->node_wwn == clp->node_wwn) {
2738 				lp->fabric_dev = 1;
2739 				break;
2740 			}
2741 		}
2742 		if (target < lim) {
2743 			break;
2744 		}
2745 		for (target = base; target < lim; target++) {
2746 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2747 				continue;
2748 			}
2749 			lp = &fcp->portdb[target];
2750 			if (lp->port_wwn == 0) {
2751 				break;
2752 			}
2753 		}
2754 		if (target == lim) {
2755 			isp_prt(isp, ISP_LOGWARN,
2756 			    "out of space for fabric devices");
2757 			break;
2758 		}
2759 		lp->port_type = clp->port_type;
2760 		lp->fc4_type = clp->fc4_type;
2761 		lp->node_wwn = clp->node_wwn;
2762 		lp->port_wwn = clp->port_wwn;
2763 		lp->portid = clp->portid;
2764 		lp->fabric_dev = 1;
2765 		break;
2766 	}
2767 #ifdef	ISP_TARGET_MODE
2768 	case ISPASYNC_TARGET_MESSAGE:
2769 	{
2770 		tmd_msg_t *mp = arg;
2771 		isp_prt(isp, ISP_LOGALL,
2772 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2773 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2774 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2775 		    mp->nt_msg[0]);
2776 		break;
2777 	}
2778 	case ISPASYNC_TARGET_EVENT:
2779 	{
2780 		tmd_event_t *ep = arg;
2781 		isp_prt(isp, ISP_LOGALL,
2782 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2783 		break;
2784 	}
2785 	case ISPASYNC_TARGET_ACTION:
2786 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2787 		default:
2788 			isp_prt(isp, ISP_LOGWARN,
2789 			   "event 0x%x for unhandled target action",
2790 			    ((isphdr_t *)arg)->rqs_entry_type);
2791 			break;
2792 		case RQSTYPE_NOTIFY:
2793 			if (IS_SCSI(isp)) {
2794 				rv = isp_handle_platform_notify_scsi(isp,
2795 				    (in_entry_t *) arg);
2796 			} else {
2797 				rv = isp_handle_platform_notify_fc(isp,
2798 				    (in_fcentry_t *) arg);
2799 			}
2800 			break;
2801 		case RQSTYPE_ATIO:
2802 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2803 			break;
2804 		case RQSTYPE_ATIO2:
2805 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2806 			break;
2807 		case RQSTYPE_CTIO2:
2808 		case RQSTYPE_CTIO:
2809 			rv = isp_handle_platform_ctio(isp, arg);
2810 			break;
2811 		case RQSTYPE_ENABLE_LUN:
2812 		case RQSTYPE_MODIFY_LUN:
2813 			if (IS_DUALBUS(isp)) {
2814 				bus =
2815 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
2816 			} else {
2817 				bus = 0;
2818 			}
2819 			isp_cv_signal_rqe(isp, bus,
2820 			    ((lun_entry_t *)arg)->le_status);
2821 			break;
2822 		}
2823 		break;
2824 #endif
2825 	case ISPASYNC_FW_CRASH:
2826 	{
2827 		u_int16_t mbox1, mbox6;
2828 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
2829 		if (IS_DUALBUS(isp)) {
2830 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
2831 		} else {
2832 			mbox6 = 0;
2833 		}
2834                 isp_prt(isp, ISP_LOGERR,
2835                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
2836                     mbox6, mbox1);
2837 #ifdef	ISP_FW_CRASH_DUMP
2838 		/*
2839 		 * XXX: really need a thread to do this right.
2840 		 */
2841 		if (IS_FC(isp)) {
2842 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
2843 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
2844 			isp_freeze_loopdown(isp, "f/w crash");
2845 			isp_fw_dump(isp);
2846 		}
2847 		isp_reinit(isp);
2848 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
2849 #endif
2850 		break;
2851 	}
2852 	case ISPASYNC_UNHANDLED_RESPONSE:
2853 		break;
2854 	default:
2855 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2856 		break;
2857 	}
2858 	return (rv);
2859 }
2860 
2861 
2862 /*
2863  * Locks are held before coming here.
2864  */
2865 void
2866 isp_uninit(struct ispsoftc *isp)
2867 {
2868 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2869 	DISABLE_INTS(isp);
2870 }
2871 
2872 void
2873 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2874 {
2875 	__va_list ap;
2876 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2877 		return;
2878 	}
2879 	printf("%s: ", device_get_nameunit(isp->isp_dev));
2880 	__va_start(ap, fmt);
2881 	vprintf(fmt, ap);
2882 	__va_end(ap);
2883 	printf("\n");
2884 }
2885