xref: /dragonfly/sys/dev/disk/isp/isp_freebsd.c (revision bcb3e04d)
1 /* $FreeBSD: src/sys/dev/isp/isp_freebsd.c,v 1.32.2.20 2002/10/11 18:49:25 mjacob Exp $ */
2 /*
3  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
4  *
5  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 #include <sys/unistd.h>
29 #include <sys/kthread.h>
30 #include <sys/conf.h>
31 #include <sys/device.h>
32 #include <machine/stdarg.h>	/* for use by isp_prt below */
33 
34 #include "isp_ioctl.h"
35 #include "isp_freebsd.h"
36 
37 static d_ioctl_t ispioctl;
38 static void isp_intr_enable(void *);
39 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
40 static void isp_poll(struct cam_sim *);
41 static timeout_t isp_watchdog;
42 static void isp_kthread(void *);
43 static void isp_action(struct cam_sim *, union ccb *);
44 
45 static struct dev_ops isp_ops = {
46 	{ "isp", 0, D_TAPE },
47 	.d_open =	nullopen,
48 	.d_close =	nullclose,
49 	.d_ioctl =	ispioctl,
50 };
51 
52 static struct ispsoftc *isplist = NULL;
53 
54 void
55 isp_attach(struct ispsoftc *isp)
56 {
57 	int primary, secondary;
58 	struct ccb_setasync csa;
59 	struct cam_devq *devq;
60 	struct cam_sim *sim;
61 	struct cam_path *path;
62 
63 	/*
64 	 * Establish (in case of 12X0) which bus is the primary.
65 	 */
66 
67 	primary = 0;
68 	secondary = 1;
69 
70 	/*
71 	 * Create the device queue for our SIM(s).
72 	 */
73 	devq = cam_simq_alloc(isp->isp_maxcmds);
74 	if (devq == NULL) {
75 		return;
76 	}
77 
78 	/*
79 	 * Construct our SIM entry.
80 	 */
81 	ISPLOCK_2_CAMLOCK(isp);
82 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
83 	    device_get_unit(isp->isp_dev), &sim_mplock, 1, isp->isp_maxcmds, devq);
84 	cam_simq_release(devq);		/* leaves 1 ref due to cam_sim_alloc */
85 	if (sim == NULL) {
86 		CAMLOCK_2_ISPLOCK(isp);
87 		return;
88 	}
89 	CAMLOCK_2_ISPLOCK(isp);
90 
91 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
92 	isp->isp_osinfo.ehook.ich_arg = isp;
93 	isp->isp_osinfo.ehook.ich_desc = "isp";
94 	ISPLOCK_2_CAMLOCK(isp);
95 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
96 		cam_sim_free(sim);
97 		CAMLOCK_2_ISPLOCK(isp);
98 		isp_prt(isp, ISP_LOGERR,
99 		    "could not establish interrupt enable hook");
100 		return;
101 	}
102 
103 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
104 		cam_sim_free(sim);
105 		CAMLOCK_2_ISPLOCK(isp);
106 		return;
107 	}
108 
109 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
110 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
111 		xpt_bus_deregister(cam_sim_path(sim));
112 		cam_sim_free(sim);
113 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
114 		CAMLOCK_2_ISPLOCK(isp);
115 		return;
116 	}
117 
118 	xpt_setup_ccb(&csa.ccb_h, path, 5);
119 	csa.ccb_h.func_code = XPT_SASYNC_CB;
120 	csa.event_enable = AC_LOST_DEVICE;
121 	csa.callback = isp_cam_async;
122 	csa.callback_arg = sim;
123 	xpt_action((union ccb *)&csa);
124 	CAMLOCK_2_ISPLOCK(isp);
125 	isp->isp_sim = sim;
126 	isp->isp_path = path;
127 	/*
128 	 * Create a kernel thread for fibre channel instances. We
129 	 * don't have dual channel FC cards.
130 	 */
131 	if (IS_FC(isp)) {
132 		ISPLOCK_2_CAMLOCK(isp);
133 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kthread,
134 		    "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) {
135 			xpt_bus_deregister(cam_sim_path(sim));
136 			cam_sim_free(sim);
137 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
138 			CAMLOCK_2_ISPLOCK(isp);
139 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
140 			return;
141 		}
142 		CAMLOCK_2_ISPLOCK(isp);
143 	}
144 
145 
146 	/*
147 	 * If we have a second channel, construct SIM entry for that.
148 	 */
149 	if (IS_DUALBUS(isp)) {
150 		ISPLOCK_2_CAMLOCK(isp);
151 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
152 		    device_get_unit(isp->isp_dev), &sim_mplock, 1, isp->isp_maxcmds, devq);
153 		if (sim == NULL) {
154 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
155 			xpt_free_path(isp->isp_path);
156 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
157 			return;
158 		}
159 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
160 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
161 			xpt_free_path(isp->isp_path);
162 			cam_sim_free(sim);
163 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
164 			CAMLOCK_2_ISPLOCK(isp);
165 			return;
166 		}
167 
168 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
169 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
170 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
171 			xpt_free_path(isp->isp_path);
172 			xpt_bus_deregister(cam_sim_path(sim));
173 			cam_sim_free(sim);
174 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
175 			CAMLOCK_2_ISPLOCK(isp);
176 			return;
177 		}
178 
179 		xpt_setup_ccb(&csa.ccb_h, path, 5);
180 		csa.ccb_h.func_code = XPT_SASYNC_CB;
181 		csa.event_enable = AC_LOST_DEVICE;
182 		csa.callback = isp_cam_async;
183 		csa.callback_arg = sim;
184 		xpt_action((union ccb *)&csa);
185 		CAMLOCK_2_ISPLOCK(isp);
186 		isp->isp_sim2 = sim;
187 		isp->isp_path2 = path;
188 	}
189 	/*
190 	 * Create device nodes
191 	 */
192 	make_dev(&isp_ops, device_get_unit(isp->isp_dev), UID_ROOT,
193 		 GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
194 
195 	if (isp->isp_role != ISP_ROLE_NONE) {
196 		isp->isp_state = ISP_RUNSTATE;
197 	}
198 	if (isplist == NULL) {
199 		isplist = isp;
200 	} else {
201 		struct ispsoftc *tmp = isplist;
202 		while (tmp->isp_osinfo.next) {
203 			tmp = tmp->isp_osinfo.next;
204 		}
205 		tmp->isp_osinfo.next = isp;
206 	}
207 
208 }
209 
210 static INLINE void
211 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
212 {
213 	if (isp->isp_osinfo.simqfrozen == 0) {
214 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
215 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
216 		ISPLOCK_2_CAMLOCK(isp);
217 		xpt_freeze_simq(isp->isp_sim, 1);
218 		CAMLOCK_2_ISPLOCK(isp);
219 	} else {
220 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
221 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
222 	}
223 }
224 
225 static int
226 ispioctl(struct dev_ioctl_args *ap)
227 {
228 	cdev_t dev = ap->a_head.a_dev;
229 	struct ispsoftc *isp;
230 	int retval = ENOTTY;
231 
232 	isp = isplist;
233 	while (isp) {
234 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
235 			break;
236 		}
237 		isp = isp->isp_osinfo.next;
238 	}
239 	if (isp == NULL)
240 		return (ENXIO);
241 
242 	switch (ap->a_cmd) {
243 #ifdef	ISP_FW_CRASH_DUMP
244 	case ISP_GET_FW_CRASH_DUMP:
245 	{
246 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
247 		size_t sz;
248 
249 		retval = 0;
250 		if (IS_2200(isp))
251 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
252 		else
253 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
254 		ISP_LOCK(isp);
255 		if (ptr && *ptr) {
256 			void *uaddr = *((void **) addr);
257 			if (copyout(ptr, uaddr, sz)) {
258 				retval = EFAULT;
259 			} else {
260 				*ptr = 0;
261 			}
262 		} else {
263 			retval = ENXIO;
264 		}
265 		ISP_UNLOCK(isp);
266 		break;
267 	}
268 
269 	case ISP_FORCE_CRASH_DUMP:
270 		ISP_LOCK(isp);
271 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
272 		isp_fw_dump(isp);
273 		isp_reinit(isp);
274 		ISP_UNLOCK(isp);
275 		retval = 0;
276 		break;
277 #endif
278 	case ISP_SDBLEV:
279 	{
280 		int olddblev = isp->isp_dblev;
281 		isp->isp_dblev = *(int *)ap->a_data;
282 		*(int *)ap->a_data = olddblev;
283 		retval = 0;
284 		break;
285 	}
286 	case ISP_RESETHBA:
287 		ISP_LOCK(isp);
288 		isp_reinit(isp);
289 		ISP_UNLOCK(isp);
290 		retval = 0;
291 		break;
292 	case ISP_RESCAN:
293 		if (IS_FC(isp)) {
294 			ISP_LOCK(isp);
295 			if (isp_fc_runstate(isp, 5 * 1000000)) {
296 				retval = EIO;
297 			} else {
298 				retval = 0;
299 			}
300 			ISP_UNLOCK(isp);
301 		}
302 		break;
303 	case ISP_FC_LIP:
304 		if (IS_FC(isp)) {
305 			ISP_LOCK(isp);
306 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
307 				retval = EIO;
308 			} else {
309 				retval = 0;
310 			}
311 			ISP_UNLOCK(isp);
312 		}
313 		break;
314 	case ISP_FC_GETDINFO:
315 	{
316 		struct isp_fc_device *ifc = (struct isp_fc_device *) ap->a_data;
317 		struct lportdb *lp;
318 
319 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
320 			retval = EINVAL;
321 			break;
322 		}
323 		ISP_LOCK(isp);
324 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
325 		if (lp->valid) {
326 			ifc->loopid = lp->loopid;
327 			ifc->portid = lp->portid;
328 			ifc->node_wwn = lp->node_wwn;
329 			ifc->port_wwn = lp->port_wwn;
330 			retval = 0;
331 		} else {
332 			retval = ENODEV;
333 		}
334 		ISP_UNLOCK(isp);
335 		break;
336 	}
337 	case ISP_GET_STATS:
338 	{
339 		isp_stats_t *sp = (isp_stats_t *) ap->a_data;
340 
341 		MEMZERO(sp, sizeof (*sp));
342 		sp->isp_stat_version = ISP_STATS_VERSION;
343 		sp->isp_type = isp->isp_type;
344 		sp->isp_revision = isp->isp_revision;
345 		ISP_LOCK(isp);
346 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
347 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
348 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
349 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
350 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
351 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
352 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
353 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
354 		ISP_UNLOCK(isp);
355 		retval = 0;
356 		break;
357 	}
358 	case ISP_CLR_STATS:
359 		ISP_LOCK(isp);
360 		isp->isp_intcnt = 0;
361 		isp->isp_intbogus = 0;
362 		isp->isp_intmboxc = 0;
363 		isp->isp_intoasync = 0;
364 		isp->isp_rsltccmplt = 0;
365 		isp->isp_fphccmplt = 0;
366 		isp->isp_rscchiwater = 0;
367 		isp->isp_fpcchiwater = 0;
368 		ISP_UNLOCK(isp);
369 		retval = 0;
370 		break;
371 	case ISP_FC_GETHINFO:
372 	{
373 		struct isp_hba_device *hba = (struct isp_hba_device *) ap->a_data;
374 		MEMZERO(hba, sizeof (*hba));
375 		ISP_LOCK(isp);
376 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
377 		hba->fc_scsi_supported = 1;
378 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
379 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
380 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
381 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
382 		ISP_UNLOCK(isp);
383 		retval = 0;
384 		break;
385 	}
386 	case ISP_GET_FC_PARAM:
387 	{
388 		struct isp_fc_param *f = (struct isp_fc_param *) ap->a_data;
389 
390 		if (!IS_FC(isp)) {
391 			retval = EINVAL;
392 			break;
393 		}
394 		f->parameter = 0;
395 		if (strcmp(f->param_name, "framelength") == 0) {
396 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
397 			retval = 0;
398 			break;
399 		}
400 		if (strcmp(f->param_name, "exec_throttle") == 0) {
401 			f->parameter = FCPARAM(isp)->isp_execthrottle;
402 			retval = 0;
403 			break;
404 		}
405 		if (strcmp(f->param_name, "fullduplex") == 0) {
406 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
407 				f->parameter = 1;
408 			retval = 0;
409 			break;
410 		}
411 		if (strcmp(f->param_name, "loopid") == 0) {
412 			f->parameter = FCPARAM(isp)->isp_loopid;
413 			retval = 0;
414 			break;
415 		}
416 		retval = EINVAL;
417 		break;
418 	}
419 	case ISP_SET_FC_PARAM:
420 	{
421 		struct isp_fc_param *f = (struct isp_fc_param *) ap->a_data;
422 		u_int32_t param = f->parameter;
423 
424 		if (!IS_FC(isp)) {
425 			retval = EINVAL;
426 			break;
427 		}
428 		f->parameter = 0;
429 		if (strcmp(f->param_name, "framelength") == 0) {
430 			if (param != 512 && param != 1024 && param != 1024) {
431 				retval = EINVAL;
432 				break;
433 			}
434 			FCPARAM(isp)->isp_maxfrmlen = param;
435 			retval = 0;
436 			break;
437 		}
438 		if (strcmp(f->param_name, "exec_throttle") == 0) {
439 			if (param < 16 || param > 255) {
440 				retval = EINVAL;
441 				break;
442 			}
443 			FCPARAM(isp)->isp_execthrottle = param;
444 			retval = 0;
445 			break;
446 		}
447 		if (strcmp(f->param_name, "fullduplex") == 0) {
448 			if (param != 0 && param != 1) {
449 				retval = EINVAL;
450 				break;
451 			}
452 			if (param) {
453 				FCPARAM(isp)->isp_fwoptions |=
454 				    ICBOPT_FULL_DUPLEX;
455 			} else {
456 				FCPARAM(isp)->isp_fwoptions &=
457 				    ~ICBOPT_FULL_DUPLEX;
458 			}
459 			retval = 0;
460 			break;
461 		}
462 		if (strcmp(f->param_name, "loopid") == 0) {
463 			if (param < 0 || param > 125) {
464 				retval = EINVAL;
465 				break;
466 			}
467 			FCPARAM(isp)->isp_loopid = param;
468 			retval = 0;
469 			break;
470 		}
471 		retval = EINVAL;
472 		break;
473 	}
474 	default:
475 		break;
476 	}
477 	return (retval);
478 }
479 
480 static void
481 isp_intr_enable(void *arg)
482 {
483 	struct ispsoftc *isp = arg;
484 	if (isp->isp_role != ISP_ROLE_NONE) {
485 		ENABLE_INTS(isp);
486 	}
487 	/* Release our hook so that the boot can continue. */
488 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
489 }
490 
491 /*
492  * Put the target mode functions here, because some are inlines
493  */
494 
495 #ifdef	ISP_TARGET_MODE
496 
497 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
498 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
499 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
500 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
501 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
502 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
503 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
504 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
505 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
506 static cam_status
507 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
508 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
509 static void isp_en_lun(struct ispsoftc *, union ccb *);
510 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
511 static timeout_t isp_refire_putback_atio;
512 static void isp_complete_ctio(union ccb *);
513 static void isp_target_putback_atio(union ccb *);
514 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
515 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
516 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
517 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
518 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
519 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
520 
521 static INLINE int
522 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
523 {
524 	tstate_t *tptr;
525 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
526 	if (tptr == NULL) {
527 		return (0);
528 	}
529 	do {
530 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
531 			return (1);
532 		}
533 	} while ((tptr = tptr->next) != NULL);
534 	return (0);
535 }
536 
537 static INLINE int
538 are_any_luns_enabled(struct ispsoftc *isp, int port)
539 {
540 	int lo, hi;
541 	if (IS_DUALBUS(isp)) {
542 		lo = (port * (LUN_HASH_SIZE >> 1));
543 		hi = lo + (LUN_HASH_SIZE >> 1);
544 	} else {
545 		lo = 0;
546 		hi = LUN_HASH_SIZE;
547 	}
548 	for (lo = 0; lo < hi; lo++) {
549 		if (isp->isp_osinfo.lun_hash[lo]) {
550 			return (1);
551 		}
552 	}
553 	return (0);
554 }
555 
556 static INLINE tstate_t *
557 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
558 {
559 	tstate_t *tptr = NULL;
560 
561 	if (lun == CAM_LUN_WILDCARD) {
562 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
563 			tptr = &isp->isp_osinfo.tsdflt[bus];
564 			tptr->hold++;
565 			return (tptr);
566 		}
567 	} else {
568 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
569 		if (tptr == NULL) {
570 			return (NULL);
571 		}
572 	}
573 
574 	do {
575 		if (tptr->lun == lun && tptr->bus == bus) {
576 			tptr->hold++;
577 			return (tptr);
578 		}
579 	} while ((tptr = tptr->next) != NULL);
580 	return (tptr);
581 }
582 
583 static __inline void
584 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
585 {
586 	if (tptr->hold)
587 		tptr->hold--;
588 }
589 
590 static __inline int
591 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
592 {
593 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
594 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
595 		if (tsleep(&isp->isp_osinfo.tmflags[bus], PCATCH, "i0", 0)) {
596 			return (-1);
597 		}
598 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
599 	}
600 	return (0);
601 }
602 
603 static __inline int
604 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
605 {
606 	if (tsleep(&isp->isp_osinfo.rstatus[bus], 0, "qt1", timo)) {
607 		return (-1);
608 	}
609 	return (0);
610 }
611 
612 static __inline void
613 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
614 {
615 	isp->isp_osinfo.rstatus[bus] = status;
616 	wakeup(&isp->isp_osinfo.rstatus[bus]);
617 }
618 
619 static __inline void
620 isp_vsema_rqe(struct ispsoftc *isp, int bus)
621 {
622 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
623 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
624 		wakeup(&isp->isp_osinfo.tmflags[bus]);
625 	}
626 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
627 }
628 
629 static __inline atio_private_data_t *
630 isp_get_atpd(struct ispsoftc *isp, int tag)
631 {
632 	atio_private_data_t *atp;
633 	for (atp = isp->isp_osinfo.atpdp;
634 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
635 		if (atp->tag == tag)
636 			return (atp);
637 	}
638 	return (NULL);
639 }
640 
641 static cam_status
642 create_lun_state(struct ispsoftc *isp, int bus,
643     struct cam_path *path, tstate_t **rslt)
644 {
645 	cam_status status;
646 	lun_id_t lun;
647 	int hfx;
648 	tstate_t *tptr, *new;
649 
650 	lun = xpt_path_lun_id(path);
651 	if (lun < 0) {
652 		return (CAM_LUN_INVALID);
653 	}
654 	if (is_lun_enabled(isp, bus, lun)) {
655 		return (CAM_LUN_ALRDY_ENA);
656 	}
657 	new = kmalloc(sizeof (tstate_t), M_DEVBUF, M_WAITOK | M_ZERO);
658 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
659 	    xpt_path_target_id(path), xpt_path_lun_id(path));
660 	if (status != CAM_REQ_CMP) {
661 		kfree(new, M_DEVBUF);
662 		return (status);
663 	}
664 	new->bus = bus;
665 	new->lun = lun;
666 	SLIST_INIT(&new->atios);
667 	SLIST_INIT(&new->inots);
668 	new->hold = 1;
669 
670 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
671 	tptr = isp->isp_osinfo.lun_hash[hfx];
672 	if (tptr == NULL) {
673 		isp->isp_osinfo.lun_hash[hfx] = new;
674 	} else {
675 		while (tptr->next)
676 			tptr = tptr->next;
677 		tptr->next = new;
678 	}
679 	*rslt = new;
680 	return (CAM_REQ_CMP);
681 }
682 
683 static INLINE void
684 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
685 {
686 	int hfx;
687 	tstate_t *lw, *pw;
688 
689 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
690 	if (tptr->hold) {
691 		return;
692 	}
693 	pw = isp->isp_osinfo.lun_hash[hfx];
694 	if (pw == NULL) {
695 		return;
696 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
697 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
698 	} else {
699 		lw = pw;
700 		pw = lw->next;
701 		while (pw) {
702 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
703 				lw->next = pw->next;
704 				break;
705 			}
706 			lw = pw;
707 			pw = pw->next;
708 		}
709 		if (pw == NULL) {
710 			return;
711 		}
712 	}
713 	kfree(tptr, M_DEVBUF);
714 }
715 
716 /*
717  * we enter with our locks held.
718  */
719 static void
720 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
721 {
722 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
723 	struct ccb_en_lun *cel = &ccb->cel;
724 	tstate_t *tptr;
725 	u_int16_t rstat;
726 	int bus, cmd, av, wildcard;
727 	lun_id_t lun;
728 	target_id_t tgt;
729 
730 
731 	bus = XS_CHANNEL(ccb) & 0x1;
732 	tgt = ccb->ccb_h.target_id;
733 	lun = ccb->ccb_h.target_lun;
734 
735 	/*
736 	 * Do some sanity checking first.
737 	 */
738 
739 	if ((lun != CAM_LUN_WILDCARD) &&
740 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
741 		ccb->ccb_h.status = CAM_LUN_INVALID;
742 		return;
743 	}
744 
745 	if (IS_SCSI(isp)) {
746 		sdparam *sdp = isp->isp_param;
747 		sdp += bus;
748 		if (tgt != CAM_TARGET_WILDCARD &&
749 		    tgt != sdp->isp_initiator_id) {
750 			ccb->ccb_h.status = CAM_TID_INVALID;
751 			return;
752 		}
753 	} else {
754 		if (tgt != CAM_TARGET_WILDCARD &&
755 		    tgt != FCPARAM(isp)->isp_iid) {
756 			ccb->ccb_h.status = CAM_TID_INVALID;
757 			return;
758 		}
759 		/*
760 		 * This is as a good a place as any to check f/w capabilities.
761 		 */
762 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
763 			isp_prt(isp, ISP_LOGERR,
764 			    "firmware does not support target mode");
765 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
766 			return;
767 		}
768 		/*
769 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
770 		 * XXX: dorks with our already fragile enable/disable code.
771 		 */
772 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
773 			isp_prt(isp, ISP_LOGERR,
774 			    "firmware not SCCLUN capable");
775 		}
776 	}
777 
778 	if (tgt == CAM_TARGET_WILDCARD) {
779 		if (lun == CAM_LUN_WILDCARD) {
780 			wildcard = 1;
781 		} else {
782 			ccb->ccb_h.status = CAM_LUN_INVALID;
783 			return;
784 		}
785 	} else {
786 		wildcard = 0;
787 	}
788 
789 	/*
790 	 * Next check to see whether this is a target/lun wildcard action.
791 	 *
792 	 * If so, we know that we can accept commands for luns that haven't
793 	 * been enabled yet and send them upstream. Otherwise, we have to
794 	 * handle them locally (if we see them at all).
795 	 */
796 
797 	if (wildcard) {
798 		tptr = &isp->isp_osinfo.tsdflt[bus];
799 		if (cel->enable) {
800 			if (isp->isp_osinfo.tmflags[bus] &
801 			    TM_WILDCARD_ENABLED) {
802 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
803 				return;
804 			}
805 			ccb->ccb_h.status =
806 			    xpt_create_path(&tptr->owner, NULL,
807 			    xpt_path_path_id(ccb->ccb_h.path),
808 			    xpt_path_target_id(ccb->ccb_h.path),
809 			    xpt_path_lun_id(ccb->ccb_h.path));
810 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
811 				return;
812 			}
813 			SLIST_INIT(&tptr->atios);
814 			SLIST_INIT(&tptr->inots);
815 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
816 		} else {
817 			if ((isp->isp_osinfo.tmflags[bus] &
818 			    TM_WILDCARD_ENABLED) == 0) {
819 				ccb->ccb_h.status = CAM_REQ_CMP;
820 				return;
821 			}
822 			if (tptr->hold) {
823 				ccb->ccb_h.status = CAM_SCSI_BUSY;
824 				return;
825 			}
826 			xpt_free_path(tptr->owner);
827 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
828 		}
829 	}
830 
831 	/*
832 	 * Now check to see whether this bus needs to be
833 	 * enabled/disabled with respect to target mode.
834 	 */
835 	av = bus << 31;
836 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
837 		av |= ENABLE_TARGET_FLAG;
838 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
839 		if (av) {
840 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
841 			if (wildcard) {
842 				isp->isp_osinfo.tmflags[bus] &=
843 				    ~TM_WILDCARD_ENABLED;
844 				xpt_free_path(tptr->owner);
845 			}
846 			return;
847 		}
848 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
849 		isp_prt(isp, ISP_LOGINFO,
850 		    "Target Mode enabled on channel %d", bus);
851 	} else if (cel->enable == 0 &&
852 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
853 		if (are_any_luns_enabled(isp, bus)) {
854 			ccb->ccb_h.status = CAM_SCSI_BUSY;
855 			return;
856 		}
857 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
858 		if (av) {
859 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
860 			return;
861 		}
862 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
863 		isp_prt(isp, ISP_LOGINFO,
864 		    "Target Mode disabled on channel %d", bus);
865 	}
866 
867 	if (wildcard) {
868 		ccb->ccb_h.status = CAM_REQ_CMP;
869 		return;
870 	}
871 
872 	if (cel->enable) {
873 		ccb->ccb_h.status =
874 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
875 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
876 			return;
877 		}
878 	} else {
879 		tptr = get_lun_statep(isp, bus, lun);
880 		if (tptr == NULL) {
881 			ccb->ccb_h.status = CAM_LUN_INVALID;
882 			return;
883 		}
884 	}
885 
886 	if (isp_psema_sig_rqe(isp, bus)) {
887 		rls_lun_statep(isp, tptr);
888 		if (cel->enable)
889 			destroy_lun_state(isp, tptr);
890 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
891 		return;
892 	}
893 
894 	if (cel->enable) {
895 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
896 		int c, n, ulun = lun;
897 
898 		cmd = RQSTYPE_ENABLE_LUN;
899 		c = DFLT_CMND_CNT;
900 		n = DFLT_INOT_CNT;
901 		if (IS_FC(isp) && lun != 0) {
902 			cmd = RQSTYPE_MODIFY_LUN;
903 			n = 0;
904 			/*
905 		 	 * For SCC firmware, we only deal with setting
906 			 * (enabling or modifying) lun 0.
907 			 */
908 			ulun = 0;
909 		}
910 		rstat = LUN_ERR;
911 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
912 			xpt_print_path(ccb->ccb_h.path);
913 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
914 			goto out;
915 		}
916 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
917 			xpt_print_path(ccb->ccb_h.path);
918 			isp_prt(isp, ISP_LOGERR,
919 			    "wait for ENABLE/MODIFY LUN timed out");
920 			goto out;
921 		}
922 		rstat = isp->isp_osinfo.rstatus[bus];
923 		if (rstat != LUN_OK) {
924 			xpt_print_path(ccb->ccb_h.path);
925 			isp_prt(isp, ISP_LOGERR,
926 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
927 			goto out;
928 		}
929 	} else {
930 		int c, n, ulun = lun;
931 		u_int32_t seq;
932 
933 		rstat = LUN_ERR;
934 		seq = isp->isp_osinfo.rollinfo++;
935 		cmd = -RQSTYPE_MODIFY_LUN;
936 
937 		c = DFLT_CMND_CNT;
938 		n = DFLT_INOT_CNT;
939 		if (IS_FC(isp) && lun != 0) {
940 			n = 0;
941 			/*
942 		 	 * For SCC firmware, we only deal with setting
943 			 * (enabling or modifying) lun 0.
944 			 */
945 			ulun = 0;
946 		}
947 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
948 			xpt_print_path(ccb->ccb_h.path);
949 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
950 			goto out;
951 		}
952 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
953 			xpt_print_path(ccb->ccb_h.path);
954 			isp_prt(isp, ISP_LOGERR,
955 			    "wait for MODIFY LUN timed out");
956 			goto out;
957 		}
958 		rstat = isp->isp_osinfo.rstatus[bus];
959 		if (rstat != LUN_OK) {
960 			xpt_print_path(ccb->ccb_h.path);
961 			isp_prt(isp, ISP_LOGERR,
962 			    "MODIFY LUN returned 0x%x", rstat);
963 			goto out;
964 		}
965 		if (IS_FC(isp) && lun) {
966 			goto out;
967 		}
968 
969 		seq = isp->isp_osinfo.rollinfo++;
970 
971 		rstat = LUN_ERR;
972 		cmd = -RQSTYPE_ENABLE_LUN;
973 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
974 			xpt_print_path(ccb->ccb_h.path);
975 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
976 			goto out;
977 		}
978 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
979 			xpt_print_path(ccb->ccb_h.path);
980 			isp_prt(isp, ISP_LOGERR,
981 			     "wait for DISABLE LUN timed out");
982 			goto out;
983 		}
984 		rstat = isp->isp_osinfo.rstatus[bus];
985 		if (rstat != LUN_OK) {
986 			xpt_print_path(ccb->ccb_h.path);
987 			isp_prt(isp, ISP_LOGWARN,
988 			    "DISABLE LUN returned 0x%x", rstat);
989 			goto out;
990 		}
991 		if (are_any_luns_enabled(isp, bus) == 0) {
992 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
993 			if (av) {
994 				isp_prt(isp, ISP_LOGWARN,
995 				    "disable target mode on channel %d failed",
996 				    bus);
997 				goto out;
998 			}
999 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1000 			xpt_print_path(ccb->ccb_h.path);
1001 			isp_prt(isp, ISP_LOGINFO,
1002 			    "Target Mode disabled on channel %d", bus);
1003 		}
1004 	}
1005 
1006 out:
1007 	isp_vsema_rqe(isp, bus);
1008 
1009 	if (rstat != LUN_OK) {
1010 		xpt_print_path(ccb->ccb_h.path);
1011 		isp_prt(isp, ISP_LOGWARN,
1012 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1013 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1014 		rls_lun_statep(isp, tptr);
1015 		if (cel->enable)
1016 			destroy_lun_state(isp, tptr);
1017 	} else {
1018 		xpt_print_path(ccb->ccb_h.path);
1019 		isp_prt(isp, ISP_LOGINFO, lfmt,
1020 		    (cel->enable) ? "en" : "dis", bus);
1021 		rls_lun_statep(isp, tptr);
1022 		if (cel->enable == 0) {
1023 			destroy_lun_state(isp, tptr);
1024 		}
1025 		ccb->ccb_h.status = CAM_REQ_CMP;
1026 	}
1027 }
1028 
1029 static cam_status
1030 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1031 {
1032 	tstate_t *tptr;
1033 	struct ccb_hdr_slist *lp;
1034 	struct ccb_hdr *curelm;
1035 	int found;
1036 	union ccb *accb = ccb->cab.abort_ccb;
1037 
1038 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1039 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1040 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1041 			return (CAM_PATH_INVALID);
1042 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1043 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1044 			return (CAM_PATH_INVALID);
1045 		}
1046 	}
1047 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1048 	if (tptr == NULL) {
1049 		return (CAM_PATH_INVALID);
1050 	}
1051 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1052 		lp = &tptr->atios;
1053 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1054 		lp = &tptr->inots;
1055 	} else {
1056 		rls_lun_statep(isp, tptr);
1057 		return (CAM_UA_ABORT);
1058 	}
1059 	curelm = SLIST_FIRST(lp);
1060 	found = 0;
1061 	if (curelm == &accb->ccb_h) {
1062 		found = 1;
1063 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1064 	} else {
1065 		while(curelm != NULL) {
1066 			struct ccb_hdr *nextelm;
1067 
1068 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1069 			if (nextelm == &accb->ccb_h) {
1070 				found = 1;
1071 				SLIST_NEXT(curelm, sim_links.sle) =
1072 				    SLIST_NEXT(nextelm, sim_links.sle);
1073 				break;
1074 			}
1075 			curelm = nextelm;
1076 		}
1077 	}
1078 	rls_lun_statep(isp, tptr);
1079 	if (found) {
1080 		accb->ccb_h.status = CAM_REQ_ABORTED;
1081 		return (CAM_REQ_CMP);
1082 	}
1083 	return(CAM_PATH_INVALID);
1084 }
1085 
1086 static cam_status
1087 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1088 {
1089 	void *qe;
1090 	struct ccb_scsiio *cso = &ccb->csio;
1091 	u_int16_t *hp, save_handle;
1092 	u_int16_t nxti, optr;
1093 	u_int8_t local[QENTRY_LEN];
1094 
1095 
1096 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1097 		xpt_print_path(ccb->ccb_h.path);
1098 		kprintf("Request Queue Overflow in isp_target_start_ctio\n");
1099 		return (CAM_RESRC_UNAVAIL);
1100 	}
1101 	bzero(local, QENTRY_LEN);
1102 
1103 	/*
1104 	 * We're either moving data or completing a command here.
1105 	 */
1106 
1107 	if (IS_FC(isp)) {
1108 		atio_private_data_t *atp;
1109 		ct2_entry_t *cto = (ct2_entry_t *) local;
1110 
1111 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1112 		cto->ct_header.rqs_entry_count = 1;
1113 		cto->ct_iid = cso->init_id;
1114 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1115 			cto->ct_lun = ccb->ccb_h.target_lun;
1116 		}
1117 
1118 		atp = isp_get_atpd(isp, cso->tag_id);
1119 		if (atp == NULL) {
1120 			isp_prt(isp, ISP_LOGERR,
1121 			    "cannot find private data adjunct for tag %x",
1122 			    cso->tag_id);
1123 			return (-1);
1124 		}
1125 
1126 		cto->ct_rxid = cso->tag_id;
1127 		if (cso->dxfer_len == 0) {
1128 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1129 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1130 				cto->ct_flags |= CT2_SENDSTATUS;
1131 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1132 				cto->ct_resid =
1133 				    atp->orig_datalen - atp->bytes_xfered;
1134 				if (cto->ct_resid < 0) {
1135 					cto->rsp.m1.ct_scsi_status |=
1136 					    CT2_DATA_OVER;
1137 				} else if (cto->ct_resid > 0) {
1138 					cto->rsp.m1.ct_scsi_status |=
1139 					    CT2_DATA_UNDER;
1140 				}
1141 			}
1142 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1143 				int m = min(cso->sense_len, MAXRESPLEN);
1144 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1145 				cto->rsp.m1.ct_senselen = m;
1146 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1147 			}
1148 		} else {
1149 			cto->ct_flags |= CT2_FLAG_MODE0;
1150 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1151 				cto->ct_flags |= CT2_DATA_IN;
1152 			} else {
1153 				cto->ct_flags |= CT2_DATA_OUT;
1154 			}
1155 			cto->ct_reloff = atp->bytes_xfered;
1156 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1157 				cto->ct_flags |= CT2_SENDSTATUS;
1158 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1159 				cto->ct_resid =
1160 				    atp->orig_datalen -
1161 				    (atp->bytes_xfered + cso->dxfer_len);
1162 				if (cto->ct_resid < 0) {
1163 					cto->rsp.m0.ct_scsi_status |=
1164 					    CT2_DATA_OVER;
1165 				} else if (cto->ct_resid > 0) {
1166 					cto->rsp.m0.ct_scsi_status |=
1167 					    CT2_DATA_UNDER;
1168 				}
1169 			} else {
1170 				atp->last_xframt = cso->dxfer_len;
1171 			}
1172 			/*
1173 			 * If we're sending data and status back together,
1174 			 * we can't also send back sense data as well.
1175 			 */
1176 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1177 		}
1178 
1179 		if (cto->ct_flags & CT2_SENDSTATUS) {
1180 			isp_prt(isp, ISP_LOGTDEBUG0,
1181 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1182 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1183 			    cso->dxfer_len, cto->ct_resid);
1184 			cto->ct_flags |= CT2_CCINCR;
1185 			atp->state = ATPD_STATE_LAST_CTIO;
1186 		} else
1187 			atp->state = ATPD_STATE_CTIO;
1188 		cto->ct_timeout = 10;
1189 		hp = &cto->ct_syshandle;
1190 	} else {
1191 		ct_entry_t *cto = (ct_entry_t *) local;
1192 
1193 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1194 		cto->ct_header.rqs_entry_count = 1;
1195 		cto->ct_iid = cso->init_id;
1196 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1197 		cto->ct_tgt = ccb->ccb_h.target_id;
1198 		cto->ct_lun = ccb->ccb_h.target_lun;
1199 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1200 		if (AT_HAS_TAG(cso->tag_id)) {
1201 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1202 			cto->ct_flags |= CT_TQAE;
1203 		}
1204 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1205 			cto->ct_flags |= CT_NODISC;
1206 		}
1207 		if (cso->dxfer_len == 0) {
1208 			cto->ct_flags |= CT_NO_DATA;
1209 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1210 			cto->ct_flags |= CT_DATA_IN;
1211 		} else {
1212 			cto->ct_flags |= CT_DATA_OUT;
1213 		}
1214 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1215 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1216 			cto->ct_scsi_status = cso->scsi_status;
1217 			cto->ct_resid = cso->resid;
1218 			isp_prt(isp, ISP_LOGTDEBUG0,
1219 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1220 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1221 			    cso->tag_id);
1222 		}
1223 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1224 		cto->ct_timeout = 10;
1225 		hp = &cto->ct_syshandle;
1226 	}
1227 
1228 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1229 		xpt_print_path(ccb->ccb_h.path);
1230 		kprintf("No XFLIST pointers for isp_target_start_ctio\n");
1231 		return (CAM_RESRC_UNAVAIL);
1232 	}
1233 
1234 
1235 	/*
1236 	 * Call the dma setup routines for this entry (and any subsequent
1237 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1238 	 * new things to play with. As with isp_start's usage of DMA setup,
1239 	 * any swizzling is done in the machine dependent layer. Because
1240 	 * of this, we put the request onto the queue area first in native
1241 	 * format.
1242 	 */
1243 
1244 	save_handle = *hp;
1245 
1246 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1247 	case CMD_QUEUED:
1248 		ISP_ADD_REQUEST(isp, nxti);
1249 		return (CAM_REQ_INPROG);
1250 
1251 	case CMD_EAGAIN:
1252 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1253 		isp_destroy_handle(isp, save_handle);
1254 		return (CAM_RESRC_UNAVAIL);
1255 
1256 	default:
1257 		isp_destroy_handle(isp, save_handle);
1258 		return (XS_ERR(ccb));
1259 	}
1260 }
1261 
1262 static void
1263 isp_refire_putback_atio(void *arg)
1264 {
1265 	crit_enter();
1266 	isp_target_putback_atio(arg);
1267 	crit_exit();
1268 }
1269 
1270 static void
1271 isp_target_putback_atio(union ccb *ccb)
1272 {
1273 	struct ispsoftc *isp;
1274 	struct ccb_scsiio *cso;
1275 	u_int16_t nxti, optr;
1276 	void *qe;
1277 
1278 	isp = XS_ISP(ccb);
1279 
1280 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1281 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1282 		isp_prt(isp, ISP_LOGWARN,
1283 		    "isp_target_putback_atio: Request Queue Overflow");
1284 		return;
1285 	}
1286 	bzero(qe, QENTRY_LEN);
1287 	cso = &ccb->csio;
1288 	if (IS_FC(isp)) {
1289 		at2_entry_t local, *at = &local;
1290 		MEMZERO(at, sizeof (at2_entry_t));
1291 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1292 		at->at_header.rqs_entry_count = 1;
1293 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1294 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1295 		} else {
1296 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1297 		}
1298 		at->at_status = CT_OK;
1299 		at->at_rxid = cso->tag_id;
1300 		at->at_iid = cso->ccb_h.target_id;
1301 		isp_put_atio2(isp, at, qe);
1302 	} else {
1303 		at_entry_t local, *at = &local;
1304 		MEMZERO(at, sizeof (at_entry_t));
1305 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1306 		at->at_header.rqs_entry_count = 1;
1307 		at->at_iid = cso->init_id;
1308 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1309 		at->at_tgt = cso->ccb_h.target_id;
1310 		at->at_lun = cso->ccb_h.target_lun;
1311 		at->at_status = CT_OK;
1312 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1313 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1314 		isp_put_atio(isp, at, qe);
1315 	}
1316 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1317 	ISP_ADD_REQUEST(isp, nxti);
1318 	isp_complete_ctio(ccb);
1319 }
1320 
1321 static void
1322 isp_complete_ctio(union ccb *ccb)
1323 {
1324 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1325 		ccb->ccb_h.status |= CAM_REQ_CMP;
1326 	}
1327 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1328 	xpt_done(ccb);
1329 }
1330 
1331 /*
1332  * Handle ATIO stuff that the generic code can't.
1333  * This means handling CDBs.
1334  */
1335 
1336 static int
1337 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1338 {
1339 	tstate_t *tptr;
1340 	int status, bus, iswildcard;
1341 	struct ccb_accept_tio *atiop;
1342 
1343 	/*
1344 	 * The firmware status (except for the QLTM_SVALID bit)
1345 	 * indicates why this ATIO was sent to us.
1346 	 *
1347 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1348 	 *
1349 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1350 	 * we're still connected on the SCSI bus.
1351 	 */
1352 	status = aep->at_status;
1353 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1354 		/*
1355 		 * Bus Phase Sequence error. We should have sense data
1356 		 * suggested by the f/w. I'm not sure quite yet what
1357 		 * to do about this for CAM.
1358 		 */
1359 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1360 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1361 		return (0);
1362 	}
1363 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1364 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1365 		    status);
1366 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1367 		return (0);
1368 	}
1369 
1370 	bus = GET_BUS_VAL(aep->at_iid);
1371 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1372 	if (tptr == NULL) {
1373 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1374 		iswildcard = 1;
1375 	} else {
1376 		iswildcard = 0;
1377 	}
1378 
1379 	if (tptr == NULL) {
1380 		/*
1381 		 * Because we can't autofeed sense data back with
1382 		 * a command for parallel SCSI, we can't give back
1383 		 * a CHECK CONDITION. We'll give back a BUSY status
1384 		 * instead. This works out okay because the only
1385 		 * time we should, in fact, get this, is in the
1386 		 * case that somebody configured us without the
1387 		 * blackhole driver, so they get what they deserve.
1388 		 */
1389 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1390 		return (0);
1391 	}
1392 
1393 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1394 	if (atiop == NULL) {
1395 		/*
1396 		 * Because we can't autofeed sense data back with
1397 		 * a command for parallel SCSI, we can't give back
1398 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1399 		 * instead. This works out okay because the only time we
1400 		 * should, in fact, get this, is in the case that we've
1401 		 * run out of ATIOS.
1402 		 */
1403 		xpt_print_path(tptr->owner);
1404 		isp_prt(isp, ISP_LOGWARN,
1405 		    "no ATIOS for lun %d from initiator %d on channel %d",
1406 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1407 		if (aep->at_flags & AT_TQAE)
1408 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1409 		else
1410 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1411 		rls_lun_statep(isp, tptr);
1412 		return (0);
1413 	}
1414 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1415 	if (iswildcard) {
1416 		atiop->ccb_h.target_id = aep->at_tgt;
1417 		atiop->ccb_h.target_lun = aep->at_lun;
1418 	}
1419 	if (aep->at_flags & AT_NODISC) {
1420 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1421 	} else {
1422 		atiop->ccb_h.flags = 0;
1423 	}
1424 
1425 	if (status & QLTM_SVALID) {
1426 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1427 		atiop->sense_len = amt;
1428 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1429 	} else {
1430 		atiop->sense_len = 0;
1431 	}
1432 
1433 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1434 	atiop->cdb_len = aep->at_cdblen;
1435 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1436 	atiop->ccb_h.status = CAM_CDB_RECVD;
1437 	/*
1438 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1439 	 * and the handle (which we have to preserve).
1440 	 */
1441 	AT_MAKE_TAGID(atiop->tag_id, aep);
1442 	if (aep->at_flags & AT_TQAE) {
1443 		atiop->tag_action = aep->at_tag_type;
1444 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1445 	}
1446 	xpt_done((union ccb*)atiop);
1447 	isp_prt(isp, ISP_LOGTDEBUG0,
1448 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1449 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1450 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1451 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1452 	    "nondisc" : "disconnecting");
1453 	rls_lun_statep(isp, tptr);
1454 	return (0);
1455 }
1456 
1457 static int
1458 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1459 {
1460 	lun_id_t lun;
1461 	tstate_t *tptr;
1462 	struct ccb_accept_tio *atiop;
1463 	atio_private_data_t *atp;
1464 
1465 	/*
1466 	 * The firmware status (except for the QLTM_SVALID bit)
1467 	 * indicates why this ATIO was sent to us.
1468 	 *
1469 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1470 	 */
1471 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1472 		isp_prt(isp, ISP_LOGWARN,
1473 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1474 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1475 		return (0);
1476 	}
1477 
1478 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1479 		lun = aep->at_scclun;
1480 	} else {
1481 		lun = aep->at_lun;
1482 	}
1483 	tptr = get_lun_statep(isp, 0, lun);
1484 	if (tptr == NULL) {
1485 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1486 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1487 	}
1488 
1489 	if (tptr == NULL) {
1490 		/*
1491 		 * What we'd like to know is whether or not we have a listener
1492 		 * upstream that really hasn't configured yet. If we do, then
1493 		 * we can give a more sensible reply here. If not, then we can
1494 		 * reject this out of hand.
1495 		 *
1496 		 * Choices for what to send were
1497 		 *
1498                  *	Not Ready, Unit Not Self-Configured Yet
1499 		 *	(0x2,0x3e,0x00)
1500 		 *
1501 		 * for the former and
1502 		 *
1503 		 *	Illegal Request, Logical Unit Not Supported
1504 		 *	(0x5,0x25,0x00)
1505 		 *
1506 		 * for the latter.
1507 		 *
1508 		 * We used to decide whether there was at least one listener
1509 		 * based upon whether the black hole driver was configured.
1510 		 * However, recent config(8) changes have made this hard to do
1511 		 * at this time.
1512 		 *
1513 		 */
1514 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1515 		return (0);
1516 	}
1517 
1518 	atp = isp_get_atpd(isp, 0);
1519 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1520 	if (atiop == NULL || atp == NULL) {
1521 		/*
1522 		 * Because we can't autofeed sense data back with
1523 		 * a command for parallel SCSI, we can't give back
1524 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1525 		 * instead. This works out okay because the only time we
1526 		 * should, in fact, get this, is in the case that we've
1527 		 * run out of ATIOS.
1528 		 */
1529 		xpt_print_path(tptr->owner);
1530 		isp_prt(isp, ISP_LOGWARN,
1531 		    "no %s for lun %d from initiator %d",
1532 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1533 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1534 		rls_lun_statep(isp, tptr);
1535 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1536 		return (0);
1537 	}
1538 	atp->state = ATPD_STATE_ATIO;
1539 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1540 	tptr->atio_count--;
1541 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1542 	    lun, tptr->atio_count);
1543 
1544 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1545 		atiop->ccb_h.target_id =
1546 		    ((fcparam *)isp->isp_param)->isp_loopid;
1547 		atiop->ccb_h.target_lun = lun;
1548 	}
1549 	/*
1550 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1551 	 */
1552 	atiop->sense_len = 0;
1553 
1554 	atiop->init_id = aep->at_iid;
1555 	atiop->cdb_len = ATIO2_CDBLEN;
1556 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1557 	atiop->ccb_h.status = CAM_CDB_RECVD;
1558 	atiop->tag_id = aep->at_rxid;
1559 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1560 	case ATIO2_TC_ATTR_SIMPLEQ:
1561 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1562 		break;
1563         case ATIO2_TC_ATTR_HEADOFQ:
1564 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1565 		break;
1566         case ATIO2_TC_ATTR_ORDERED:
1567 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1568 		break;
1569         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1570 	case ATIO2_TC_ATTR_UNTAGGED:
1571 	default:
1572 		atiop->tag_action = 0;
1573 		break;
1574 	}
1575 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1576 
1577 	atp->tag = atiop->tag_id;
1578 	atp->lun = lun;
1579 	atp->orig_datalen = aep->at_datalen;
1580 	atp->last_xframt = 0;
1581 	atp->bytes_xfered = 0;
1582 	atp->state = ATPD_STATE_CAM;
1583 	xpt_done((union ccb*)atiop);
1584 
1585 	isp_prt(isp, ISP_LOGTDEBUG0,
1586 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1587 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1588 	    lun, aep->at_taskflags, aep->at_datalen);
1589 	rls_lun_statep(isp, tptr);
1590 	return (0);
1591 }
1592 
1593 static int
1594 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1595 {
1596 	union ccb *ccb;
1597 	int sentstatus, ok, notify_cam, resid = 0;
1598 	u_int16_t tval;
1599 
1600 	/*
1601 	 * CTIO and CTIO2 are close enough....
1602 	 */
1603 
1604 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1605 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1606 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1607 
1608 	if (IS_FC(isp)) {
1609 		ct2_entry_t *ct = arg;
1610 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1611 		if (atp == NULL) {
1612 			isp_prt(isp, ISP_LOGERR,
1613 			    "cannot find adjunct for %x after I/O",
1614 			    ct->ct_rxid);
1615 			return (0);
1616 		}
1617 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1618 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1619 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1620 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1621 		}
1622 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1623 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1624 			resid = ct->ct_resid;
1625 			atp->bytes_xfered += (atp->last_xframt - resid);
1626 			atp->last_xframt = 0;
1627 		}
1628 		if (sentstatus || !ok) {
1629 			atp->tag = 0;
1630 		}
1631 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1632 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1633 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1634 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1635 		    resid, sentstatus? "FIN" : "MID");
1636 		tval = ct->ct_rxid;
1637 
1638 		/* XXX: should really come after isp_complete_ctio */
1639 		atp->state = ATPD_STATE_PDON;
1640 	} else {
1641 		ct_entry_t *ct = arg;
1642 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1643 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1644 		/*
1645 		 * We *ought* to be able to get back to the original ATIO
1646 		 * here, but for some reason this gets lost. It's just as
1647 		 * well because it's squirrelled away as part of periph
1648 		 * private data.
1649 		 *
1650 		 * We can live without it as long as we continue to use
1651 		 * the auto-replenish feature for CTIOs.
1652 		 */
1653 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1654 		if (ct->ct_status & QLTM_SVALID) {
1655 			char *sp = (char *)ct;
1656 			sp += CTIO_SENSE_OFFSET;
1657 			ccb->csio.sense_len =
1658 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1659 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1660 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1661 		}
1662 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1663 			resid = ct->ct_resid;
1664 		}
1665 		isp_prt(isp, ISP_LOGTDEBUG0,
1666 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1667 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1668 		    ct->ct_status, ct->ct_flags, resid,
1669 		    sentstatus? "FIN" : "MID");
1670 		tval = ct->ct_fwhandle;
1671 	}
1672 	ccb->csio.resid += resid;
1673 
1674 	/*
1675 	 * We're here either because intermediate data transfers are done
1676 	 * and/or the final status CTIO (which may have joined with a
1677 	 * Data Transfer) is done.
1678 	 *
1679 	 * In any case, for this platform, the upper layers figure out
1680 	 * what to do next, so all we do here is collect status and
1681 	 * pass information along. Any DMA handles have already been
1682 	 * freed.
1683 	 */
1684 	if (notify_cam == 0) {
1685 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1686 		return (0);
1687 	}
1688 
1689 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1690 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1691 
1692 	if (!ok) {
1693 		isp_target_putback_atio(ccb);
1694 	} else {
1695 		isp_complete_ctio(ccb);
1696 
1697 	}
1698 	return (0);
1699 }
1700 
1701 static int
1702 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1703 {
1704 	return (0);	/* XXXX */
1705 }
1706 
1707 static int
1708 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1709 {
1710 
1711 	switch (inp->in_status) {
1712 	case IN_PORT_LOGOUT:
1713 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1714 		   inp->in_iid);
1715 		break;
1716 	case IN_PORT_CHANGED:
1717 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1718 		   inp->in_iid);
1719 		break;
1720 	case IN_GLOBAL_LOGO:
1721 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1722 		break;
1723 	case IN_ABORT_TASK:
1724 	{
1725 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1726 		struct ccb_immed_notify *inot = NULL;
1727 
1728 		if (atp) {
1729 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1730 			if (tptr) {
1731 				inot = (struct ccb_immed_notify *)
1732 				    SLIST_FIRST(&tptr->inots);
1733 				if (inot) {
1734 					SLIST_REMOVE_HEAD(&tptr->inots,
1735 					    sim_links.sle);
1736 				}
1737 			}
1738 			isp_prt(isp, ISP_LOGWARN,
1739 			   "abort task RX_ID %x IID %d state %d",
1740 			   inp->in_seqid, inp->in_iid, atp->state);
1741 		} else {
1742 			isp_prt(isp, ISP_LOGWARN,
1743 			   "abort task RX_ID %x from iid %d, state unknown",
1744 			   inp->in_seqid, inp->in_iid);
1745 		}
1746 		if (inot) {
1747 			inot->initiator_id = inp->in_iid;
1748 			inot->sense_len = 0;
1749 			inot->message_args[0] = MSG_ABORT_TAG;
1750 			inot->message_args[1] = inp->in_seqid & 0xff;
1751 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1752 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1753 			xpt_done((union ccb *)inot);
1754 		}
1755 		break;
1756 	}
1757 	default:
1758 		break;
1759 	}
1760 	return (0);
1761 }
1762 #endif
1763 
1764 static void
1765 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1766 {
1767 	struct cam_sim *sim;
1768 	struct ispsoftc *isp;
1769 
1770 	sim = (struct cam_sim *)cbarg;
1771 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1772 	switch (code) {
1773 	case AC_LOST_DEVICE:
1774 		if (IS_SCSI(isp)) {
1775 			u_int16_t oflags, nflags;
1776 			sdparam *sdp = isp->isp_param;
1777 			int tgt;
1778 
1779 			tgt = xpt_path_target_id(path);
1780 			if (tgt >= 0) {
1781 				sdp += cam_sim_bus(sim);
1782 				ISP_LOCK(isp);
1783 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1784 #ifndef	ISP_TARGET_MODE
1785 				nflags &= DPARM_SAFE_DFLT;
1786 				if (isp->isp_loaded_fw) {
1787 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1788 				}
1789 #else
1790 				nflags = DPARM_DEFAULT;
1791 #endif
1792 				oflags = sdp->isp_devparam[tgt].goal_flags;
1793 				sdp->isp_devparam[tgt].goal_flags = nflags;
1794 				sdp->isp_devparam[tgt].dev_update = 1;
1795 				isp->isp_update |= (1 << cam_sim_bus(sim));
1796 				(void) isp_control(isp,
1797 				    ISPCTL_UPDATE_PARAMS, NULL);
1798 				sdp->isp_devparam[tgt].goal_flags = oflags;
1799 				ISP_UNLOCK(isp);
1800 			}
1801 		}
1802 		break;
1803 	default:
1804 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1805 		break;
1806 	}
1807 }
1808 
1809 static void
1810 isp_poll(struct cam_sim *sim)
1811 {
1812 	struct ispsoftc *isp = cam_sim_softc(sim);
1813 	u_int16_t isr, sema, mbox;
1814 
1815 	ISP_LOCK(isp);
1816 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1817 		isp_intr(isp, isr, sema, mbox);
1818 	}
1819 	ISP_UNLOCK(isp);
1820 }
1821 
1822 
1823 static void
1824 isp_watchdog(void *arg)
1825 {
1826 	XS_T *xs = arg;
1827 	struct ispsoftc *isp = XS_ISP(xs);
1828 	u_int32_t handle;
1829 	int iok;
1830 
1831 	/*
1832 	 * We've decided this command is dead. Make sure we're not trying
1833 	 * to kill a command that's already dead by getting it's handle and
1834 	 * and seeing whether it's still alive.
1835 	 */
1836 	ISP_LOCK(isp);
1837 	iok = isp->isp_osinfo.intsok;
1838 	isp->isp_osinfo.intsok = 0;
1839 	handle = isp_find_handle(isp, xs);
1840 	if (handle) {
1841 		u_int16_t isr, sema, mbox;
1842 
1843 		if (XS_CMD_DONE_P(xs)) {
1844 			isp_prt(isp, ISP_LOGDEBUG1,
1845 			    "watchdog found done cmd (handle 0x%x)", handle);
1846 			ISP_UNLOCK(isp);
1847 			return;
1848 		}
1849 
1850 		if (XS_CMD_WDOG_P(xs)) {
1851 			isp_prt(isp, ISP_LOGDEBUG2,
1852 			    "recursive watchdog (handle 0x%x)", handle);
1853 			ISP_UNLOCK(isp);
1854 			return;
1855 		}
1856 
1857 		XS_CMD_S_WDOG(xs);
1858 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1859 			isp_intr(isp, isr, sema, mbox);
1860 		}
1861 		if (XS_CMD_DONE_P(xs)) {
1862 			isp_prt(isp, ISP_LOGDEBUG2,
1863 			    "watchdog cleanup for handle 0x%x", handle);
1864 			xpt_done((union ccb *) xs);
1865 		} else if (XS_CMD_GRACE_P(xs)) {
1866 			/*
1867 			 * Make sure the command is *really* dead before we
1868 			 * release the handle (and DMA resources) for reuse.
1869 			 */
1870 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1871 
1872 			/*
1873 			 * After this point, the comamnd is really dead.
1874 			 */
1875 			if (XS_XFRLEN(xs)) {
1876 				ISP_DMAFREE(isp, xs, handle);
1877                 	}
1878 			isp_destroy_handle(isp, handle);
1879 			xpt_print_path(xs->ccb_h.path);
1880 			isp_prt(isp, ISP_LOGWARN,
1881 			    "watchdog timeout for handle 0x%x", handle);
1882 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1883 			XS_CMD_C_WDOG(xs);
1884 			isp_done(xs);
1885 		} else {
1886 			u_int16_t nxti, optr;
1887 			ispreq_t local, *mp= &local, *qe;
1888 
1889 			XS_CMD_C_WDOG(xs);
1890 			callout_reset(&xs->ccb_h.timeout_ch, hz,
1891 				      isp_watchdog, xs);
1892 			if (isp_getrqentry(isp, &nxti, &optr, (void *) &qe)) {
1893 				ISP_UNLOCK(isp);
1894 				return;
1895 			}
1896 			XS_CMD_S_GRACE(xs);
1897 			MEMZERO((void *) mp, sizeof (*mp));
1898 			mp->req_header.rqs_entry_count = 1;
1899 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1900 			mp->req_modifier = SYNC_ALL;
1901 			mp->req_target = XS_CHANNEL(xs) << 7;
1902 			isp_put_request(isp, mp, qe);
1903 			ISP_ADD_REQUEST(isp, nxti);
1904 		}
1905 	} else {
1906 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1907 	}
1908 	isp->isp_osinfo.intsok = iok;
1909 	ISP_UNLOCK(isp);
1910 }
1911 
1912 static void
1913 isp_kthread(void *arg)
1914 {
1915 	struct ispsoftc *isp = arg;
1916 
1917 	get_mplock();
1918 	crit_enter();
1919 	isp->isp_osinfo.intsok = 1;
1920 
1921 	/*
1922 	 * The first loop is for our usage where we have yet to have
1923 	 * gotten good fibre channel state.
1924 	 */
1925 	for (;;) {
1926 		int wasfrozen;
1927 
1928 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1929 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1930 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1931 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1932 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1933 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1934 				    isp->isp_osinfo.ktmature == 0) {
1935 					break;
1936 				}
1937 			}
1938 			tsleep(isp_kthread, 0, "isp_fcthrd", hz);
1939 
1940 		}
1941 
1942 		/*
1943 		 * Even if we didn't get good loop state we may be
1944 		 * unfreezing the SIMQ so that we can kill off
1945 		 * commands (if we've never seen loop before, for example).
1946 		 */
1947 		isp->isp_osinfo.ktmature = 1;
1948 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1949 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1950 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1951 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
1952 			ISPLOCK_2_CAMLOCK(isp);
1953 			xpt_release_simq(isp->isp_sim, 1);
1954 			CAMLOCK_2_ISPLOCK(isp);
1955 		}
1956 		tsleep(&isp->isp_osinfo.kthread, 0, "isp_fc_worker", 0);
1957 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
1958 	}
1959 	rel_mplock();
1960 }
1961 
1962 static void
1963 isp_action(struct cam_sim *sim, union ccb *ccb)
1964 {
1965 	int bus, tgt, error;
1966 	struct ispsoftc *isp;
1967 	struct ccb_trans_settings *cts;
1968 
1969 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1970 
1971 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1972 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1973 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1974 	if (isp->isp_state != ISP_RUNSTATE &&
1975 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1976 		CAMLOCK_2_ISPLOCK(isp);
1977 		isp_init(isp);
1978 		if (isp->isp_state != ISP_INITSTATE) {
1979 			ISP_UNLOCK(isp);
1980 			/*
1981 			 * Lie. Say it was a selection timeout.
1982 			 */
1983 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
1984 			xpt_freeze_devq(ccb->ccb_h.path, 1);
1985 			xpt_done(ccb);
1986 			return;
1987 		}
1988 		isp->isp_state = ISP_RUNSTATE;
1989 		ISPLOCK_2_CAMLOCK(isp);
1990 	}
1991 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
1992 
1993 
1994 	switch (ccb->ccb_h.func_code) {
1995 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
1996 		/*
1997 		 * Do a couple of preliminary checks...
1998 		 */
1999 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2000 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2001 				ccb->ccb_h.status = CAM_REQ_INVALID;
2002 				xpt_done(ccb);
2003 				break;
2004 			}
2005 		}
2006 #ifdef	DIAGNOSTIC
2007 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2008 			ccb->ccb_h.status = CAM_PATH_INVALID;
2009 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2010 			ccb->ccb_h.status = CAM_PATH_INVALID;
2011 		}
2012 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2013 			isp_prt(isp, ISP_LOGERR,
2014 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2015 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2016 			xpt_done(ccb);
2017 			break;
2018 		}
2019 #endif
2020 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2021 		CAMLOCK_2_ISPLOCK(isp);
2022 		error = isp_start((XS_T *) ccb);
2023 		switch (error) {
2024 		case CMD_QUEUED:
2025 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2026 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2027 				u_int64_t ticks = (u_int64_t) hz;
2028 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2029 					ticks = 60 * 1000 * ticks;
2030 				else
2031 					ticks = ccb->ccb_h.timeout * hz;
2032 				ticks = ((ticks + 999) / 1000) + hz + hz;
2033 				if (ticks >= 0x80000000) {
2034 					isp_prt(isp, ISP_LOGERR,
2035 					    "timeout overflow");
2036 					ticks = 0x7fffffff;
2037 				}
2038 				callout_reset(&ccb->ccb_h.timeout_ch, ticks,
2039 				    isp_watchdog, ccb);
2040 			}
2041 			ISPLOCK_2_CAMLOCK(isp);
2042 			break;
2043 		case CMD_RQLATER:
2044 			/*
2045 			 * This can only happen for Fibre Channel
2046 			 */
2047 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2048 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2049 			    isp->isp_osinfo.ktmature) {
2050 				ISPLOCK_2_CAMLOCK(isp);
2051 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2052 				xpt_done(ccb);
2053 				break;
2054 			}
2055 			wakeup(&isp->isp_osinfo.kthread);
2056 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2057 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2058 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2059 			ISPLOCK_2_CAMLOCK(isp);
2060 			xpt_done(ccb);
2061 			break;
2062 		case CMD_EAGAIN:
2063 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2064 			ISPLOCK_2_CAMLOCK(isp);
2065 			xpt_done(ccb);
2066 			break;
2067 		case CMD_COMPLETE:
2068 			isp_done((struct ccb_scsiio *) ccb);
2069 			ISPLOCK_2_CAMLOCK(isp);
2070 			break;
2071 		default:
2072 			isp_prt(isp, ISP_LOGERR,
2073 			    "What's this? 0x%x at %d in file %s",
2074 			    error, __LINE__, __FILE__);
2075 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2076 			xpt_done(ccb);
2077 			ISPLOCK_2_CAMLOCK(isp);
2078 		}
2079 		break;
2080 
2081 #ifdef	ISP_TARGET_MODE
2082 	case XPT_EN_LUN:		/* Enable LUN as a target */
2083 	{
2084 		int iok;
2085 		CAMLOCK_2_ISPLOCK(isp);
2086 		iok = isp->isp_osinfo.intsok;
2087 		isp->isp_osinfo.intsok = 0;
2088 		isp_en_lun(isp, ccb);
2089 		isp->isp_osinfo.intsok = iok;
2090 		ISPLOCK_2_CAMLOCK(isp);
2091 		xpt_done(ccb);
2092 		break;
2093 	}
2094 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2095 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2096 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2097 	{
2098 		tstate_t *tptr =
2099 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2100 		if (tptr == NULL) {
2101 			ccb->ccb_h.status = CAM_LUN_INVALID;
2102 			xpt_done(ccb);
2103 			break;
2104 		}
2105 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2106 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2107 		ccb->ccb_h.flags = 0;
2108 
2109 		CAMLOCK_2_ISPLOCK(isp);
2110 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2111 			/*
2112 			 * Note that the command itself may not be done-
2113 			 * it may not even have had the first CTIO sent.
2114 			 */
2115 			tptr->atio_count++;
2116 			isp_prt(isp, ISP_LOGTDEBUG0,
2117 			    "Put FREE ATIO2, lun %d, count now %d",
2118 			    ccb->ccb_h.target_lun, tptr->atio_count);
2119 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2120 			    sim_links.sle);
2121 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2122 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2123 			    sim_links.sle);
2124 		} else {
2125 			;
2126 		}
2127 		rls_lun_statep(isp, tptr);
2128 		ccb->ccb_h.status = CAM_REQ_INPROG;
2129 		ISPLOCK_2_CAMLOCK(isp);
2130 		break;
2131 	}
2132 	case XPT_CONT_TARGET_IO:
2133 	{
2134 		CAMLOCK_2_ISPLOCK(isp);
2135 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2136 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2137 			isp_prt(isp, ISP_LOGWARN,
2138 			    "XPT_CONT_TARGET_IO: status 0x%x",
2139 			    ccb->ccb_h.status);
2140 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2141 			ISPLOCK_2_CAMLOCK(isp);
2142 			xpt_done(ccb);
2143 		} else {
2144 			ISPLOCK_2_CAMLOCK(isp);
2145 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2146 		}
2147 		break;
2148 	}
2149 #endif
2150 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2151 
2152 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2153 		tgt = ccb->ccb_h.target_id;
2154 		tgt |= (bus << 16);
2155 
2156 		CAMLOCK_2_ISPLOCK(isp);
2157 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2158 		ISPLOCK_2_CAMLOCK(isp);
2159 		if (error) {
2160 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2161 		} else {
2162 			ccb->ccb_h.status = CAM_REQ_CMP;
2163 		}
2164 		xpt_done(ccb);
2165 		break;
2166 	case XPT_ABORT:			/* Abort the specified CCB */
2167 	{
2168 		union ccb *accb = ccb->cab.abort_ccb;
2169 		CAMLOCK_2_ISPLOCK(isp);
2170 		switch (accb->ccb_h.func_code) {
2171 #ifdef	ISP_TARGET_MODE
2172 		case XPT_ACCEPT_TARGET_IO:
2173 		case XPT_IMMED_NOTIFY:
2174         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2175 			break;
2176 		case XPT_CONT_TARGET_IO:
2177 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2178 			ccb->ccb_h.status = CAM_UA_ABORT;
2179 			break;
2180 #endif
2181 		case XPT_SCSI_IO:
2182 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2183 			if (error) {
2184 				ccb->ccb_h.status = CAM_UA_ABORT;
2185 			} else {
2186 				ccb->ccb_h.status = CAM_REQ_CMP;
2187 			}
2188 			break;
2189 		default:
2190 			ccb->ccb_h.status = CAM_REQ_INVALID;
2191 			break;
2192 		}
2193 		ISPLOCK_2_CAMLOCK(isp);
2194 		xpt_done(ccb);
2195 		break;
2196 	}
2197 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2198 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2199 		cts = &ccb->cts;
2200 		if (!IS_CURRENT_SETTINGS(cts)) {
2201 			ccb->ccb_h.status = CAM_REQ_INVALID;
2202 			xpt_done(ccb);
2203 			break;
2204 		}
2205 		tgt = cts->ccb_h.target_id;
2206 		CAMLOCK_2_ISPLOCK(isp);
2207 		if (IS_SCSI(isp)) {
2208 			struct ccb_trans_settings_scsi *scsi =
2209 			    &cts->proto_specific.scsi;
2210 			struct ccb_trans_settings_spi *spi =
2211 			    &cts->xport_specific.spi;
2212 			sdparam *sdp = isp->isp_param;
2213 			u_int16_t *dptr;
2214 
2215 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2216 			sdp += bus;
2217 			/*
2218 			 * We always update (internally) from dev_flags
2219 			 * so any request to change settings just gets
2220 			 * vectored to that location.
2221 			 */
2222 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2223 
2224 			if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
2225 				if ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0)
2226 					*dptr |= DPARM_DISC;
2227 				else
2228 					*dptr &= ~DPARM_DISC;
2229 			}
2230 
2231 			if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
2232 				if ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0)
2233 					*dptr |= DPARM_TQING;
2234 				else
2235 					*dptr &= ~DPARM_TQING;
2236 			}
2237 
2238 			if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
2239 				if (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT)
2240 					*dptr |= DPARM_WIDE;
2241 				else
2242 					*dptr &= ~DPARM_WIDE;
2243 			}
2244 
2245 			/*
2246 			 * XXX: FIX ME
2247 			 */
2248 			if ((spi->valid & CTS_SPI_VALID_SYNC_OFFSET) &&
2249 			    (spi->valid & CTS_SPI_VALID_SYNC_RATE)) {
2250 				*dptr |= DPARM_SYNC;
2251 				isp_prt(isp, ISP_LOGDEBUG0,
2252 				   "enabling synchronous mode, but ignoring "
2253 				   "setting to period 0x%x offset 0x%x",
2254 				   spi->sync_period, spi->sync_offset);
2255 			} else if (spi->sync_period && spi->sync_offset) {
2256 				*dptr |= DPARM_SYNC;
2257 				isp_prt(isp, ISP_LOGDEBUG0,
2258 				   "enabling synchronous mode (1), but ignoring"
2259 				   " setting to period 0x%x offset 0x%x",
2260 				   spi->sync_period, spi->sync_offset);
2261 			} else {
2262 				*dptr &= ~DPARM_SYNC;
2263 			}
2264 			isp_prt(isp, ISP_LOGDEBUG0,
2265 			    "SET bus %d targ %d to flags %x off %x per %x",
2266 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2267 			    sdp->isp_devparam[tgt].goal_offset,
2268 			    sdp->isp_devparam[tgt].goal_period);
2269 			sdp->isp_devparam[tgt].dev_update = 1;
2270 			isp->isp_update |= (1 << bus);
2271 		}
2272 		ISPLOCK_2_CAMLOCK(isp);
2273 		ccb->ccb_h.status = CAM_REQ_CMP;
2274 		xpt_done(ccb);
2275 		break;
2276 	case XPT_GET_TRAN_SETTINGS:
2277 		cts = &ccb->cts;
2278 		tgt = cts->ccb_h.target_id;
2279 		CAMLOCK_2_ISPLOCK(isp);
2280 		if (IS_FC(isp)) {
2281 			fcparam *fcp = isp->isp_param;
2282 			struct ccb_trans_settings_fc *fc =
2283 			    &cts->xport_specific.fc;
2284 
2285 			cts->protocol = PROTO_SCSI;
2286 			cts->protocol_version = SCSI_REV_2;
2287 			cts->transport = XPORT_FC;
2288 			cts->transport_version = 0;
2289 
2290 			fc->valid = CTS_FC_VALID_SPEED;
2291 			fc->bitrate = 100000;
2292 			if (tgt > 0 && tgt < MAX_FC_TARG) {
2293 				struct lportdb *lp = &fcp->portdb[tgt];
2294 				fc->wwnn = lp->node_wwn;
2295 				fc->wwpn = lp->port_wwn;
2296 				fc->port = lp->portid;
2297 				fc->valid |= CTS_FC_VALID_WWNN |
2298 				    CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2299 			}
2300 		} else {
2301 			struct ccb_trans_settings_scsi *scsi =
2302 			    &cts->proto_specific.scsi;
2303 			struct ccb_trans_settings_spi *spi =
2304 			    &cts->xport_specific.spi;
2305 			sdparam *sdp = isp->isp_param;
2306 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2307 			u_int16_t dval, pval, oval;
2308 
2309 			sdp += bus;
2310 
2311 			if (IS_CURRENT_SETTINGS(cts)) {
2312 				sdp->isp_devparam[tgt].dev_refresh = 1;
2313 				isp->isp_update |= (1 << bus);
2314 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2315 				    NULL);
2316 				dval = sdp->isp_devparam[tgt].actv_flags;
2317 				oval = sdp->isp_devparam[tgt].actv_offset;
2318 				pval = sdp->isp_devparam[tgt].actv_period;
2319 			} else {
2320 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2321 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2322 				pval = sdp->isp_devparam[tgt].nvrm_period;
2323 			}
2324 
2325 			cts->protocol = PROTO_SCSI;
2326 			cts->protocol_version = SCSI_REV_2;
2327 			cts->transport = XPORT_SPI;
2328 			cts->transport_version = 2;
2329 
2330 			scsi->flags &= ~CTS_SCSI_FLAGS_TAG_ENB;
2331 			spi->flags &= ~CTS_SPI_FLAGS_DISC_ENB;
2332 			if (dval & DPARM_DISC) {
2333 				spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2334 			}
2335 			if (dval & DPARM_TQING) {
2336 				scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2337 			}
2338 			if ((dval & DPARM_SYNC) && oval != 0) {
2339 				spi->sync_offset = oval;
2340 				spi->sync_period = pval;
2341 				spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2342 				spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2343 			}
2344 			spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
2345 			if (dval & DPARM_WIDE) {
2346 				spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2347 			} else {
2348 				spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2349 			}
2350 			if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
2351 				scsi->valid = CTS_SCSI_VALID_TQ;
2352 				spi->valid |= CTS_SPI_VALID_DISC;
2353 			} else {
2354 				scsi->valid = 0;
2355 			}
2356 			isp_prt(isp, ISP_LOGDEBUG0,
2357 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2358 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2359 			    bus, tgt, dval, oval, pval);
2360 		}
2361 		ISPLOCK_2_CAMLOCK(isp);
2362 		ccb->ccb_h.status = CAM_REQ_CMP;
2363 		xpt_done(ccb);
2364 		break;
2365 
2366 	case XPT_CALC_GEOMETRY:
2367 	{
2368 		struct ccb_calc_geometry *ccg;
2369 		u_int32_t secs_per_cylinder;
2370 		u_int32_t size_mb;
2371 
2372 		ccg = &ccb->ccg;
2373 		if (ccg->block_size == 0) {
2374 			isp_prt(isp, ISP_LOGERR,
2375 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2376 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2377 			ccb->ccb_h.status = CAM_REQ_INVALID;
2378 			xpt_done(ccb);
2379 			break;
2380 		}
2381 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2382 		if (size_mb > 1024) {
2383 			ccg->heads = 255;
2384 			ccg->secs_per_track = 63;
2385 		} else {
2386 			ccg->heads = 64;
2387 			ccg->secs_per_track = 32;
2388 		}
2389 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2390 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2391 		ccb->ccb_h.status = CAM_REQ_CMP;
2392 		xpt_done(ccb);
2393 		break;
2394 	}
2395 	case XPT_RESET_BUS:		/* Reset the specified bus */
2396 		bus = cam_sim_bus(sim);
2397 		CAMLOCK_2_ISPLOCK(isp);
2398 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2399 		ISPLOCK_2_CAMLOCK(isp);
2400 		if (error)
2401 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2402 		else {
2403 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2404 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2405 			else if (isp->isp_path != NULL)
2406 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2407 			ccb->ccb_h.status = CAM_REQ_CMP;
2408 		}
2409 		xpt_done(ccb);
2410 		break;
2411 
2412 	case XPT_TERM_IO:		/* Terminate the I/O process */
2413 		ccb->ccb_h.status = CAM_REQ_INVALID;
2414 		xpt_done(ccb);
2415 		break;
2416 
2417 	case XPT_PATH_INQ:		/* Path routing inquiry */
2418 	{
2419 		struct ccb_pathinq *cpi = &ccb->cpi;
2420 
2421 		cpi->version_num = 1;
2422 #ifdef	ISP_TARGET_MODE
2423 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2424 #else
2425 		cpi->target_sprt = 0;
2426 #endif
2427 		cpi->hba_eng_cnt = 0;
2428 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2429 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2430 		cpi->bus_id = cam_sim_bus(sim);
2431 		if (IS_FC(isp)) {
2432 			cpi->hba_misc = PIM_NOBUSRESET;
2433 			/*
2434 			 * Because our loop ID can shift from time to time,
2435 			 * make our initiator ID out of range of our bus.
2436 			 */
2437 			cpi->initiator_id = cpi->max_target + 1;
2438 
2439 			/*
2440 			 * Set base transfer capabilities for Fibre Channel.
2441 			 * Technically not correct because we don't know
2442 			 * what media we're running on top of- but we'll
2443 			 * look good if we always say 100MB/s.
2444 			 */
2445 			if (FCPARAM(isp)->isp_gbspeed == 2)
2446 				cpi->base_transfer_speed = 200000;
2447 			else
2448 				cpi->base_transfer_speed = 100000;
2449 			cpi->hba_inquiry = PI_TAG_ABLE;
2450 			cpi->transport = XPORT_FC;
2451 			cpi->transport_version = 0;	/* WHAT'S THIS FOR? */
2452 		} else {
2453 			sdparam *sdp = isp->isp_param;
2454 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2455 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2456 			cpi->hba_misc = 0;
2457 			cpi->initiator_id = sdp->isp_initiator_id;
2458 			cpi->base_transfer_speed = 3300;
2459 			cpi->transport = XPORT_SPI;
2460 			cpi->transport_version = 2;	/* WHAT'S THIS FOR? */
2461 		}
2462 		cpi->protocol = PROTO_SCSI;
2463 		cpi->protocol_version = SCSI_REV_2;
2464 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2465 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2466 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2467 		cpi->unit_number = cam_sim_unit(sim);
2468 		cpi->ccb_h.status = CAM_REQ_CMP;
2469 		xpt_done(ccb);
2470 		break;
2471 	}
2472 	default:
2473 		ccb->ccb_h.status = CAM_REQ_INVALID;
2474 		xpt_done(ccb);
2475 		break;
2476 	}
2477 }
2478 
2479 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2480 void
2481 isp_done(struct ccb_scsiio *sccb)
2482 {
2483 	struct ispsoftc *isp = XS_ISP(sccb);
2484 
2485 	if (XS_NOERR(sccb))
2486 		XS_SETERR(sccb, CAM_REQ_CMP);
2487 
2488 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2489 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2490 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2491 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2492 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2493 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2494 		} else {
2495 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2496 		}
2497 	}
2498 
2499 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2500 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2501 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2502 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2503 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2504 			isp_prt(isp, ISP_LOGDEBUG0,
2505 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2506 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2507 			    sccb->ccb_h.status, sccb->scsi_status);
2508 		}
2509 	}
2510 
2511 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2512 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2513 		xpt_print_path(sccb->ccb_h.path);
2514 		isp_prt(isp, ISP_LOGINFO,
2515 		    "cam completion status 0x%x", sccb->ccb_h.status);
2516 	}
2517 
2518 	XS_CMD_S_DONE(sccb);
2519 	if (XS_CMD_WDOG_P(sccb) == 0) {
2520 		callout_stop(&sccb->ccb_h.timeout_ch);
2521 		if (XS_CMD_GRACE_P(sccb)) {
2522 			isp_prt(isp, ISP_LOGDEBUG2,
2523 			    "finished command on borrowed time");
2524 		}
2525 		XS_CMD_S_CLEAR(sccb);
2526 		ISPLOCK_2_CAMLOCK(isp);
2527 		xpt_done((union ccb *) sccb);
2528 		CAMLOCK_2_ISPLOCK(isp);
2529 	}
2530 }
2531 
2532 int
2533 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2534 {
2535 	int bus, rv = 0;
2536 	switch (cmd) {
2537 	case ISPASYNC_NEW_TGT_PARAMS:
2538 	{
2539 		struct ccb_trans_settings_scsi *scsi;
2540 		struct ccb_trans_settings_spi *spi;
2541 		int flags, tgt;
2542 		sdparam *sdp = isp->isp_param;
2543 		struct ccb_trans_settings cts;
2544 		struct cam_path *tmppath;
2545 
2546 		bzero(&cts, sizeof (struct ccb_trans_settings));
2547 
2548 		tgt = *((int *)arg);
2549 		bus = (tgt >> 16) & 0xffff;
2550 		tgt &= 0xffff;
2551 		sdp += bus;
2552 		ISPLOCK_2_CAMLOCK(isp);
2553 		if (xpt_create_path(&tmppath, NULL,
2554 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2555 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2556 			CAMLOCK_2_ISPLOCK(isp);
2557 			isp_prt(isp, ISP_LOGWARN,
2558 			    "isp_async cannot make temp path for %d.%d",
2559 			    tgt, bus);
2560 			rv = -1;
2561 			break;
2562 		}
2563 		CAMLOCK_2_ISPLOCK(isp);
2564 		flags = sdp->isp_devparam[tgt].actv_flags;
2565 		cts.type = CTS_TYPE_CURRENT_SETTINGS;
2566 		cts.protocol = PROTO_SCSI;
2567 		cts.transport = XPORT_SPI;
2568 
2569 		scsi = &cts.proto_specific.scsi;
2570 		spi = &cts.xport_specific.spi;
2571 
2572 		if (flags & DPARM_TQING) {
2573 			scsi->valid |= CTS_SCSI_VALID_TQ;
2574 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
2575 		}
2576 
2577 		if (flags & DPARM_DISC) {
2578 			spi->valid |= CTS_SPI_VALID_DISC;
2579 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
2580 		}
2581 		spi->flags |= CTS_SPI_VALID_BUS_WIDTH;
2582 		if (flags & DPARM_WIDE) {
2583 			spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2584 		} else {
2585 			spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2586 		}
2587 		if (flags & DPARM_SYNC) {
2588 			spi->valid |= CTS_SPI_VALID_SYNC_RATE;
2589 			spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
2590 			spi->sync_period = sdp->isp_devparam[tgt].actv_period;
2591 			spi->sync_offset = sdp->isp_devparam[tgt].actv_offset;
2592 		}
2593 		isp_prt(isp, ISP_LOGDEBUG2,
2594 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2595 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2596 		    sdp->isp_devparam[tgt].actv_offset, flags);
2597 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2598 		ISPLOCK_2_CAMLOCK(isp);
2599 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2600 		xpt_free_path(tmppath);
2601 		CAMLOCK_2_ISPLOCK(isp);
2602 		break;
2603 	}
2604 	case ISPASYNC_BUS_RESET:
2605 		bus = *((int *)arg);
2606 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2607 		    bus);
2608 		if (bus > 0 && isp->isp_path2) {
2609 			ISPLOCK_2_CAMLOCK(isp);
2610 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2611 			CAMLOCK_2_ISPLOCK(isp);
2612 		} else if (isp->isp_path) {
2613 			ISPLOCK_2_CAMLOCK(isp);
2614 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2615 			CAMLOCK_2_ISPLOCK(isp);
2616 		}
2617 		break;
2618 	case ISPASYNC_LIP:
2619 		if (isp->isp_path) {
2620 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2621 		}
2622 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2623 		break;
2624 	case ISPASYNC_LOOP_RESET:
2625 		if (isp->isp_path) {
2626 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2627 		}
2628 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2629 		break;
2630 	case ISPASYNC_LOOP_DOWN:
2631 		if (isp->isp_path) {
2632 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2633 		}
2634 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2635 		break;
2636 	case ISPASYNC_LOOP_UP:
2637 		/*
2638 		 * Now we just note that Loop has come up. We don't
2639 		 * actually do anything because we're waiting for a
2640 		 * Change Notify before activating the FC cleanup
2641 		 * thread to look at the state of the loop again.
2642 		 */
2643 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2644 		break;
2645 	case ISPASYNC_PROMENADE:
2646 	{
2647 		struct cam_path *tmppath;
2648 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2649 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2650 		static const char *roles[4] = {
2651 		    "(none)", "Target", "Initiator", "Target/Initiator"
2652 		};
2653 		fcparam *fcp = isp->isp_param;
2654 		int tgt = *((int *) arg);
2655 		struct lportdb *lp = &fcp->portdb[tgt];
2656 
2657 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2658 		    roles[lp->roles & 0x3],
2659 		    (lp->valid)? "Arrived" : "Departed",
2660 		    (u_int32_t) (lp->port_wwn >> 32),
2661 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2662 		    (u_int32_t) (lp->node_wwn >> 32),
2663 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2664 
2665 		if (xpt_create_path(&tmppath, NULL, cam_sim_path(isp->isp_sim),
2666 		    (target_id_t)tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2667                         break;
2668                 }
2669 		if (lp->valid && (lp->roles &
2670 		    (SVC3_INI_ROLE >> SVC3_ROLE_SHIFT))) {
2671 			ISPLOCK_2_CAMLOCK(isp);
2672 			xpt_async(AC_FOUND_DEVICE, tmppath, NULL);
2673 		} else {
2674 			ISPLOCK_2_CAMLOCK(isp);
2675 			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
2676 		}
2677 		CAMLOCK_2_ISPLOCK(isp);
2678 		xpt_free_path(tmppath);
2679 		break;
2680 	}
2681 	case ISPASYNC_CHANGE_NOTIFY:
2682 		if (arg == ISPASYNC_CHANGE_PDB) {
2683 			isp_prt(isp, ISP_LOGINFO,
2684 			    "Port Database Changed");
2685 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2686 			isp_prt(isp, ISP_LOGINFO,
2687 			    "Name Server Database Changed");
2688 		}
2689 		wakeup(&isp->isp_osinfo.kthread);
2690 		break;
2691 	case ISPASYNC_FABRIC_DEV:
2692 	{
2693 		int target, base, lim;
2694 		fcparam *fcp = isp->isp_param;
2695 		struct lportdb *lp = NULL;
2696 		struct lportdb *clp = (struct lportdb *) arg;
2697 		char *pt;
2698 
2699 		switch (clp->port_type) {
2700 		case 1:
2701 			pt = "   N_Port";
2702 			break;
2703 		case 2:
2704 			pt = "  NL_Port";
2705 			break;
2706 		case 3:
2707 			pt = "F/NL_Port";
2708 			break;
2709 		case 0x7f:
2710 			pt = "  Nx_Port";
2711 			break;
2712 		case 0x81:
2713 			pt = "  F_port";
2714 			break;
2715 		case 0x82:
2716 			pt = "  FL_Port";
2717 			break;
2718 		case 0x84:
2719 			pt = "   E_port";
2720 			break;
2721 		default:
2722 			pt = " ";
2723 			break;
2724 		}
2725 
2726 		isp_prt(isp, ISP_LOGINFO,
2727 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2728 
2729 		/*
2730 		 * If we don't have an initiator role we bail.
2731 		 *
2732 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2733 		 */
2734 
2735 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2736 			break;
2737 		}
2738 
2739 		/*
2740 		 * Is this entry for us? If so, we bail.
2741 		 */
2742 
2743 		if (fcp->isp_portid == clp->portid) {
2744 			break;
2745 		}
2746 
2747 		/*
2748 		 * Else, the default policy is to find room for it in
2749 		 * our local port database. Later, when we execute
2750 		 * the call to isp_pdb_sync either this newly arrived
2751 		 * or already logged in device will be (re)announced.
2752 		 */
2753 
2754 		if (fcp->isp_topo == TOPO_FL_PORT)
2755 			base = FC_SNS_ID+1;
2756 		else
2757 			base = 0;
2758 
2759 		if (fcp->isp_topo == TOPO_N_PORT)
2760 			lim = 1;
2761 		else
2762 			lim = MAX_FC_TARG;
2763 
2764 		/*
2765 		 * Is it already in our list?
2766 		 */
2767 		for (target = base; target < lim; target++) {
2768 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2769 				continue;
2770 			}
2771 			lp = &fcp->portdb[target];
2772 			if (lp->port_wwn == clp->port_wwn &&
2773 			    lp->node_wwn == clp->node_wwn) {
2774 				lp->fabric_dev = 1;
2775 				break;
2776 			}
2777 		}
2778 		if (target < lim) {
2779 			break;
2780 		}
2781 		for (target = base; target < lim; target++) {
2782 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2783 				continue;
2784 			}
2785 			lp = &fcp->portdb[target];
2786 			if (lp->port_wwn == 0) {
2787 				break;
2788 			}
2789 		}
2790 		if (target == lim) {
2791 			isp_prt(isp, ISP_LOGWARN,
2792 			    "out of space for fabric devices");
2793 			break;
2794 		}
2795 		lp->port_type = clp->port_type;
2796 		lp->fc4_type = clp->fc4_type;
2797 		lp->node_wwn = clp->node_wwn;
2798 		lp->port_wwn = clp->port_wwn;
2799 		lp->portid = clp->portid;
2800 		lp->fabric_dev = 1;
2801 		break;
2802 	}
2803 #ifdef	ISP_TARGET_MODE
2804 	case ISPASYNC_TARGET_MESSAGE:
2805 	{
2806 		tmd_msg_t *mp = arg;
2807 		isp_prt(isp, ISP_LOGALL,
2808 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2809 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2810 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2811 		    mp->nt_msg[0]);
2812 		break;
2813 	}
2814 	case ISPASYNC_TARGET_EVENT:
2815 	{
2816 		tmd_event_t *ep = arg;
2817 		isp_prt(isp, ISP_LOGALL,
2818 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2819 		break;
2820 	}
2821 	case ISPASYNC_TARGET_ACTION:
2822 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2823 		default:
2824 			isp_prt(isp, ISP_LOGWARN,
2825 			   "event 0x%x for unhandled target action",
2826 			    ((isphdr_t *)arg)->rqs_entry_type);
2827 			break;
2828 		case RQSTYPE_NOTIFY:
2829 			if (IS_SCSI(isp)) {
2830 				rv = isp_handle_platform_notify_scsi(isp,
2831 				    (in_entry_t *) arg);
2832 			} else {
2833 				rv = isp_handle_platform_notify_fc(isp,
2834 				    (in_fcentry_t *) arg);
2835 			}
2836 			break;
2837 		case RQSTYPE_ATIO:
2838 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2839 			break;
2840 		case RQSTYPE_ATIO2:
2841 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2842 			break;
2843 		case RQSTYPE_CTIO2:
2844 		case RQSTYPE_CTIO:
2845 			rv = isp_handle_platform_ctio(isp, arg);
2846 			break;
2847 		case RQSTYPE_ENABLE_LUN:
2848 		case RQSTYPE_MODIFY_LUN:
2849 			if (IS_DUALBUS(isp)) {
2850 				bus =
2851 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
2852 			} else {
2853 				bus = 0;
2854 			}
2855 			isp_cv_signal_rqe(isp, bus,
2856 			    ((lun_entry_t *)arg)->le_status);
2857 			break;
2858 		}
2859 		break;
2860 #endif
2861 	case ISPASYNC_FW_CRASH:
2862 	{
2863 		u_int16_t mbox1, mbox6;
2864 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
2865 		if (IS_DUALBUS(isp)) {
2866 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
2867 		} else {
2868 			mbox6 = 0;
2869 		}
2870                 isp_prt(isp, ISP_LOGERR,
2871                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
2872                     mbox6, mbox1);
2873 #ifdef	ISP_FW_CRASH_DUMP
2874 		/*
2875 		 * XXX: really need a thread to do this right.
2876 		 */
2877 		if (IS_FC(isp)) {
2878 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
2879 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
2880 			isp_freeze_loopdown(isp, "f/w crash");
2881 			isp_fw_dump(isp);
2882 		}
2883 		isp_reinit(isp);
2884 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
2885 #endif
2886 		break;
2887 	}
2888 	case ISPASYNC_UNHANDLED_RESPONSE:
2889 		break;
2890 	default:
2891 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2892 		break;
2893 	}
2894 	return (rv);
2895 }
2896 
2897 
2898 /*
2899  * Locks are held before coming here.
2900  */
2901 void
2902 isp_uninit(struct ispsoftc *isp)
2903 {
2904 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2905 	DISABLE_INTS(isp);
2906 }
2907 
2908 void
2909 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2910 {
2911 	__va_list ap;
2912 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2913 		return;
2914 	}
2915 	kprintf("%s: ", device_get_nameunit(isp->isp_dev));
2916 	__va_start(ap, fmt);
2917 	kvprintf(fmt, ap);
2918 	__va_end(ap);
2919 	kprintf("\n");
2920 }
2921