xref: /dragonfly/sys/dev/disk/isp/isp_freebsd.c (revision 9bb2a92d)
1 /* $FreeBSD: src/sys/dev/isp/isp_freebsd.c,v 1.32.2.20 2002/10/11 18:49:25 mjacob Exp $ */
2 /* $DragonFly: src/sys/dev/disk/isp/isp_freebsd.c,v 1.7 2003/11/09 02:22:34 dillon Exp $ */
3 /*
4  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
5  *
6  * Copyright (c) 1997, 1998, 1999, 2000, 2001 by Matthew Jacob
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 #include "isp_freebsd.h"
30 #include <sys/unistd.h>
31 #include <sys/kthread.h>
32 #include <machine/stdarg.h>	/* for use by isp_prt below */
33 #include <sys/conf.h>
34 #include <sys/ioccom.h>
35 #include "isp_ioctl.h"
36 
37 
38 static d_ioctl_t ispioctl;
39 static void isp_intr_enable(void *);
40 static void isp_cam_async(void *, u_int32_t, struct cam_path *, void *);
41 static void isp_poll(struct cam_sim *);
42 static timeout_t isp_watchdog;
43 static void isp_kthread(void *);
44 static void isp_action(struct cam_sim *, union ccb *);
45 
46 
47 #define ISP_CDEV_MAJOR	248
48 static struct cdevsw isp_cdevsw = {
49 	/* name */	"isp",
50 	/* maj */	ISP_CDEV_MAJOR,
51 	/* flags */	D_TAPE,
52 	/* port */	NULL,
53 	/* autoq */	0,
54 
55 	/* open */	nullopen,
56 	/* close */	nullclose,
57 	/* read */	noread,
58 	/* write */	nowrite,
59 	/* ioctl */	ispioctl,
60 	/* poll */	nopoll,
61 	/* mmap */	nommap,
62 	/* strategy */	nostrategy,
63 	/* dump */	nodump,
64 	/* psize */	nopsize
65 };
66 
67 static struct ispsoftc *isplist = NULL;
68 
69 void
70 isp_attach(struct ispsoftc *isp)
71 {
72 	int primary, secondary;
73 	struct ccb_setasync csa;
74 	struct cam_devq *devq;
75 	struct cam_sim *sim;
76 	struct cam_path *path;
77 
78 	/*
79 	 * Establish (in case of 12X0) which bus is the primary.
80 	 */
81 
82 	primary = 0;
83 	secondary = 1;
84 
85 	/*
86 	 * Create the device queue for our SIM(s).
87 	 */
88 	devq = cam_simq_alloc(isp->isp_maxcmds);
89 	if (devq == NULL) {
90 		return;
91 	}
92 
93 	/*
94 	 * Construct our SIM entry.
95 	 */
96 	ISPLOCK_2_CAMLOCK(isp);
97 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
98 	    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
99 	if (sim == NULL) {
100 		cam_simq_free(devq);
101 		CAMLOCK_2_ISPLOCK(isp);
102 		return;
103 	}
104 	CAMLOCK_2_ISPLOCK(isp);
105 
106 	isp->isp_osinfo.ehook.ich_func = isp_intr_enable;
107 	isp->isp_osinfo.ehook.ich_arg = isp;
108 	ISPLOCK_2_CAMLOCK(isp);
109 	if (config_intrhook_establish(&isp->isp_osinfo.ehook) != 0) {
110 		cam_sim_free(sim, TRUE);
111 		CAMLOCK_2_ISPLOCK(isp);
112 		isp_prt(isp, ISP_LOGERR,
113 		    "could not establish interrupt enable hook");
114 		return;
115 	}
116 
117 	if (xpt_bus_register(sim, primary) != CAM_SUCCESS) {
118 		cam_sim_free(sim, TRUE);
119 		CAMLOCK_2_ISPLOCK(isp);
120 		return;
121 	}
122 
123 	if (xpt_create_path(&path, NULL, cam_sim_path(sim),
124 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
125 		xpt_bus_deregister(cam_sim_path(sim));
126 		cam_sim_free(sim, TRUE);
127 		config_intrhook_disestablish(&isp->isp_osinfo.ehook);
128 		CAMLOCK_2_ISPLOCK(isp);
129 		return;
130 	}
131 
132 	xpt_setup_ccb(&csa.ccb_h, path, 5);
133 	csa.ccb_h.func_code = XPT_SASYNC_CB;
134 	csa.event_enable = AC_LOST_DEVICE;
135 	csa.callback = isp_cam_async;
136 	csa.callback_arg = sim;
137 	xpt_action((union ccb *)&csa);
138 	CAMLOCK_2_ISPLOCK(isp);
139 	isp->isp_sim = sim;
140 	isp->isp_path = path;
141 	/*
142 	 * Create a kernel thread for fibre channel instances. We
143 	 * don't have dual channel FC cards.
144 	 */
145 	if (IS_FC(isp)) {
146 		ISPLOCK_2_CAMLOCK(isp);
147 		if (kthread_create(isp_kthread, isp, &isp->isp_osinfo.kthread,
148 		    "%s: fc_thrd", device_get_nameunit(isp->isp_dev))) {
149 			xpt_bus_deregister(cam_sim_path(sim));
150 			cam_sim_free(sim, TRUE);
151 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
152 			CAMLOCK_2_ISPLOCK(isp);
153 			isp_prt(isp, ISP_LOGERR, "could not create kthread");
154 			return;
155 		}
156 		CAMLOCK_2_ISPLOCK(isp);
157 	}
158 
159 
160 	/*
161 	 * If we have a second channel, construct SIM entry for that.
162 	 */
163 	if (IS_DUALBUS(isp)) {
164 		ISPLOCK_2_CAMLOCK(isp);
165 		sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
166 		    device_get_unit(isp->isp_dev), 1, isp->isp_maxcmds, devq);
167 		if (sim == NULL) {
168 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
169 			xpt_free_path(isp->isp_path);
170 			cam_simq_free(devq);
171 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
172 			return;
173 		}
174 		if (xpt_bus_register(sim, secondary) != CAM_SUCCESS) {
175 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
176 			xpt_free_path(isp->isp_path);
177 			cam_sim_free(sim, TRUE);
178 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
179 			CAMLOCK_2_ISPLOCK(isp);
180 			return;
181 		}
182 
183 		if (xpt_create_path(&path, NULL, cam_sim_path(sim),
184 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
185 			xpt_bus_deregister(cam_sim_path(isp->isp_sim));
186 			xpt_free_path(isp->isp_path);
187 			xpt_bus_deregister(cam_sim_path(sim));
188 			cam_sim_free(sim, TRUE);
189 			config_intrhook_disestablish(&isp->isp_osinfo.ehook);
190 			CAMLOCK_2_ISPLOCK(isp);
191 			return;
192 		}
193 
194 		xpt_setup_ccb(&csa.ccb_h, path, 5);
195 		csa.ccb_h.func_code = XPT_SASYNC_CB;
196 		csa.event_enable = AC_LOST_DEVICE;
197 		csa.callback = isp_cam_async;
198 		csa.callback_arg = sim;
199 		xpt_action((union ccb *)&csa);
200 		CAMLOCK_2_ISPLOCK(isp);
201 		isp->isp_sim2 = sim;
202 		isp->isp_path2 = path;
203 	}
204 	/*
205 	 * Create device nodes
206 	 */
207 	(void) make_dev(&isp_cdevsw, device_get_unit(isp->isp_dev), UID_ROOT,
208 	    GID_OPERATOR, 0600, "%s", device_get_nameunit(isp->isp_dev));
209 
210 	if (isp->isp_role != ISP_ROLE_NONE) {
211 		isp->isp_state = ISP_RUNSTATE;
212 	}
213 	if (isplist == NULL) {
214 		isplist = isp;
215 	} else {
216 		struct ispsoftc *tmp = isplist;
217 		while (tmp->isp_osinfo.next) {
218 			tmp = tmp->isp_osinfo.next;
219 		}
220 		tmp->isp_osinfo.next = isp;
221 	}
222 
223 }
224 
225 static INLINE void
226 isp_freeze_loopdown(struct ispsoftc *isp, char *msg)
227 {
228 	if (isp->isp_osinfo.simqfrozen == 0) {
229 		isp_prt(isp, ISP_LOGDEBUG0, "%s: freeze simq (loopdown)", msg);
230 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
231 		ISPLOCK_2_CAMLOCK(isp);
232 		xpt_freeze_simq(isp->isp_sim, 1);
233 		CAMLOCK_2_ISPLOCK(isp);
234 	} else {
235 		isp_prt(isp, ISP_LOGDEBUG0, "%s: mark frozen (loopdown)", msg);
236 		isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
237 	}
238 }
239 
240 static int
241 ispioctl(dev_t dev, u_long cmd, caddr_t addr, int flags, d_thread_t *td)
242 {
243 	struct ispsoftc *isp;
244 	int retval = ENOTTY;
245 
246 	isp = isplist;
247 	while (isp) {
248 		if (minor(dev) == device_get_unit(isp->isp_dev)) {
249 			break;
250 		}
251 		isp = isp->isp_osinfo.next;
252 	}
253 	if (isp == NULL)
254 		return (ENXIO);
255 
256 	switch (cmd) {
257 #ifdef	ISP_FW_CRASH_DUMP
258 	case ISP_GET_FW_CRASH_DUMP:
259 	{
260 		u_int16_t *ptr = FCPARAM(isp)->isp_dump_data;
261 		size_t sz;
262 
263 		retval = 0;
264 		if (IS_2200(isp))
265 			sz = QLA2200_RISC_IMAGE_DUMP_SIZE;
266 		else
267 			sz = QLA2300_RISC_IMAGE_DUMP_SIZE;
268 		ISP_LOCK(isp);
269 		if (ptr && *ptr) {
270 			void *uaddr = *((void **) addr);
271 			if (copyout(ptr, uaddr, sz)) {
272 				retval = EFAULT;
273 			} else {
274 				*ptr = 0;
275 			}
276 		} else {
277 			retval = ENXIO;
278 		}
279 		ISP_UNLOCK(isp);
280 		break;
281 	}
282 
283 	case ISP_FORCE_CRASH_DUMP:
284 		ISP_LOCK(isp);
285 		isp_freeze_loopdown(isp, "ispioctl(ISP_FORCE_CRASH_DUMP)");
286 		isp_fw_dump(isp);
287 		isp_reinit(isp);
288 		ISP_UNLOCK(isp);
289 		retval = 0;
290 		break;
291 #endif
292 	case ISP_SDBLEV:
293 	{
294 		int olddblev = isp->isp_dblev;
295 		isp->isp_dblev = *(int *)addr;
296 		*(int *)addr = olddblev;
297 		retval = 0;
298 		break;
299 	}
300 	case ISP_RESETHBA:
301 		ISP_LOCK(isp);
302 		isp_reinit(isp);
303 		ISP_UNLOCK(isp);
304 		retval = 0;
305 		break;
306 	case ISP_RESCAN:
307 		if (IS_FC(isp)) {
308 			ISP_LOCK(isp);
309 			if (isp_fc_runstate(isp, 5 * 1000000)) {
310 				retval = EIO;
311 			} else {
312 				retval = 0;
313 			}
314 			ISP_UNLOCK(isp);
315 		}
316 		break;
317 	case ISP_FC_LIP:
318 		if (IS_FC(isp)) {
319 			ISP_LOCK(isp);
320 			if (isp_control(isp, ISPCTL_SEND_LIP, 0)) {
321 				retval = EIO;
322 			} else {
323 				retval = 0;
324 			}
325 			ISP_UNLOCK(isp);
326 		}
327 		break;
328 	case ISP_FC_GETDINFO:
329 	{
330 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
331 		struct lportdb *lp;
332 
333 		if (ifc->loopid < 0 || ifc->loopid >= MAX_FC_TARG) {
334 			retval = EINVAL;
335 			break;
336 		}
337 		ISP_LOCK(isp);
338 		lp = &FCPARAM(isp)->portdb[ifc->loopid];
339 		if (lp->valid) {
340 			ifc->loopid = lp->loopid;
341 			ifc->portid = lp->portid;
342 			ifc->node_wwn = lp->node_wwn;
343 			ifc->port_wwn = lp->port_wwn;
344 			retval = 0;
345 		} else {
346 			retval = ENODEV;
347 		}
348 		ISP_UNLOCK(isp);
349 		break;
350 	}
351 	case ISP_GET_STATS:
352 	{
353 		isp_stats_t *sp = (isp_stats_t *) addr;
354 
355 		MEMZERO(sp, sizeof (*sp));
356 		sp->isp_stat_version = ISP_STATS_VERSION;
357 		sp->isp_type = isp->isp_type;
358 		sp->isp_revision = isp->isp_revision;
359 		ISP_LOCK(isp);
360 		sp->isp_stats[ISP_INTCNT] = isp->isp_intcnt;
361 		sp->isp_stats[ISP_INTBOGUS] = isp->isp_intbogus;
362 		sp->isp_stats[ISP_INTMBOXC] = isp->isp_intmboxc;
363 		sp->isp_stats[ISP_INGOASYNC] = isp->isp_intoasync;
364 		sp->isp_stats[ISP_RSLTCCMPLT] = isp->isp_rsltccmplt;
365 		sp->isp_stats[ISP_FPHCCMCPLT] = isp->isp_fphccmplt;
366 		sp->isp_stats[ISP_RSCCHIWAT] = isp->isp_rscchiwater;
367 		sp->isp_stats[ISP_FPCCHIWAT] = isp->isp_fpcchiwater;
368 		ISP_UNLOCK(isp);
369 		retval = 0;
370 		break;
371 	}
372 	case ISP_CLR_STATS:
373 		ISP_LOCK(isp);
374 		isp->isp_intcnt = 0;
375 		isp->isp_intbogus = 0;
376 		isp->isp_intmboxc = 0;
377 		isp->isp_intoasync = 0;
378 		isp->isp_rsltccmplt = 0;
379 		isp->isp_fphccmplt = 0;
380 		isp->isp_rscchiwater = 0;
381 		isp->isp_fpcchiwater = 0;
382 		ISP_UNLOCK(isp);
383 		retval = 0;
384 		break;
385 	case ISP_FC_GETHINFO:
386 	{
387 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
388 		MEMZERO(hba, sizeof (*hba));
389 		ISP_LOCK(isp);
390 		hba->fc_speed = FCPARAM(isp)->isp_gbspeed;
391 		hba->fc_scsi_supported = 1;
392 		hba->fc_topology = FCPARAM(isp)->isp_topo + 1;
393 		hba->fc_loopid = FCPARAM(isp)->isp_loopid;
394 		hba->active_node_wwn = FCPARAM(isp)->isp_nodewwn;
395 		hba->active_port_wwn = FCPARAM(isp)->isp_portwwn;
396 		ISP_UNLOCK(isp);
397 		retval = 0;
398 		break;
399 	}
400 	case ISP_GET_FC_PARAM:
401 	{
402 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
403 
404 		if (!IS_FC(isp)) {
405 			retval = EINVAL;
406 			break;
407 		}
408 		f->parameter = 0;
409 		if (strcmp(f->param_name, "framelength") == 0) {
410 			f->parameter = FCPARAM(isp)->isp_maxfrmlen;
411 			retval = 0;
412 			break;
413 		}
414 		if (strcmp(f->param_name, "exec_throttle") == 0) {
415 			f->parameter = FCPARAM(isp)->isp_execthrottle;
416 			retval = 0;
417 			break;
418 		}
419 		if (strcmp(f->param_name, "fullduplex") == 0) {
420 			if (FCPARAM(isp)->isp_fwoptions & ICBOPT_FULL_DUPLEX)
421 				f->parameter = 1;
422 			retval = 0;
423 			break;
424 		}
425 		if (strcmp(f->param_name, "loopid") == 0) {
426 			f->parameter = FCPARAM(isp)->isp_loopid;
427 			retval = 0;
428 			break;
429 		}
430 		retval = EINVAL;
431 		break;
432 	}
433 	case ISP_SET_FC_PARAM:
434 	{
435 		struct isp_fc_param *f = (struct isp_fc_param *) addr;
436 		u_int32_t param = f->parameter;
437 
438 		if (!IS_FC(isp)) {
439 			retval = EINVAL;
440 			break;
441 		}
442 		f->parameter = 0;
443 		if (strcmp(f->param_name, "framelength") == 0) {
444 			if (param != 512 && param != 1024 && param != 1024) {
445 				retval = EINVAL;
446 				break;
447 			}
448 			FCPARAM(isp)->isp_maxfrmlen = param;
449 			retval = 0;
450 			break;
451 		}
452 		if (strcmp(f->param_name, "exec_throttle") == 0) {
453 			if (param < 16 || param > 255) {
454 				retval = EINVAL;
455 				break;
456 			}
457 			FCPARAM(isp)->isp_execthrottle = param;
458 			retval = 0;
459 			break;
460 		}
461 		if (strcmp(f->param_name, "fullduplex") == 0) {
462 			if (param != 0 && param != 1) {
463 				retval = EINVAL;
464 				break;
465 			}
466 			if (param) {
467 				FCPARAM(isp)->isp_fwoptions |=
468 				    ICBOPT_FULL_DUPLEX;
469 			} else {
470 				FCPARAM(isp)->isp_fwoptions &=
471 				    ~ICBOPT_FULL_DUPLEX;
472 			}
473 			retval = 0;
474 			break;
475 		}
476 		if (strcmp(f->param_name, "loopid") == 0) {
477 			if (param < 0 || param > 125) {
478 				retval = EINVAL;
479 				break;
480 			}
481 			FCPARAM(isp)->isp_loopid = param;
482 			retval = 0;
483 			break;
484 		}
485 		retval = EINVAL;
486 		break;
487 	}
488 	default:
489 		break;
490 	}
491 	return (retval);
492 }
493 
494 static void
495 isp_intr_enable(void *arg)
496 {
497 	struct ispsoftc *isp = arg;
498 	if (isp->isp_role != ISP_ROLE_NONE) {
499 		ENABLE_INTS(isp);
500 	}
501 	/* Release our hook so that the boot can continue. */
502 	config_intrhook_disestablish(&isp->isp_osinfo.ehook);
503 }
504 
505 /*
506  * Put the target mode functions here, because some are inlines
507  */
508 
509 #ifdef	ISP_TARGET_MODE
510 
511 static INLINE int is_lun_enabled(struct ispsoftc *, int, lun_id_t);
512 static INLINE int are_any_luns_enabled(struct ispsoftc *, int);
513 static INLINE tstate_t *get_lun_statep(struct ispsoftc *, int, lun_id_t);
514 static INLINE void rls_lun_statep(struct ispsoftc *, tstate_t *);
515 static INLINE int isp_psema_sig_rqe(struct ispsoftc *, int);
516 static INLINE int isp_cv_wait_timed_rqe(struct ispsoftc *, int, int);
517 static INLINE void isp_cv_signal_rqe(struct ispsoftc *, int, int);
518 static INLINE void isp_vsema_rqe(struct ispsoftc *, int);
519 static INLINE atio_private_data_t *isp_get_atpd(struct ispsoftc *, int);
520 static cam_status
521 create_lun_state(struct ispsoftc *, int, struct cam_path *, tstate_t **);
522 static void destroy_lun_state(struct ispsoftc *, tstate_t *);
523 static void isp_en_lun(struct ispsoftc *, union ccb *);
524 static cam_status isp_abort_tgt_ccb(struct ispsoftc *, union ccb *);
525 static timeout_t isp_refire_putback_atio;
526 static void isp_complete_ctio(union ccb *);
527 static void isp_target_putback_atio(union ccb *);
528 static cam_status isp_target_start_ctio(struct ispsoftc *, union ccb *);
529 static int isp_handle_platform_atio(struct ispsoftc *, at_entry_t *);
530 static int isp_handle_platform_atio2(struct ispsoftc *, at2_entry_t *);
531 static int isp_handle_platform_ctio(struct ispsoftc *, void *);
532 static int isp_handle_platform_notify_scsi(struct ispsoftc *, in_entry_t *);
533 static int isp_handle_platform_notify_fc(struct ispsoftc *, in_fcentry_t *);
534 
535 static INLINE int
536 is_lun_enabled(struct ispsoftc *isp, int bus, lun_id_t lun)
537 {
538 	tstate_t *tptr;
539 	tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
540 	if (tptr == NULL) {
541 		return (0);
542 	}
543 	do {
544 		if (tptr->lun == (lun_id_t) lun && tptr->bus == bus) {
545 			return (1);
546 		}
547 	} while ((tptr = tptr->next) != NULL);
548 	return (0);
549 }
550 
551 static INLINE int
552 are_any_luns_enabled(struct ispsoftc *isp, int port)
553 {
554 	int lo, hi;
555 	if (IS_DUALBUS(isp)) {
556 		lo = (port * (LUN_HASH_SIZE >> 1));
557 		hi = lo + (LUN_HASH_SIZE >> 1);
558 	} else {
559 		lo = 0;
560 		hi = LUN_HASH_SIZE;
561 	}
562 	for (lo = 0; lo < hi; lo++) {
563 		if (isp->isp_osinfo.lun_hash[lo]) {
564 			return (1);
565 		}
566 	}
567 	return (0);
568 }
569 
570 static INLINE tstate_t *
571 get_lun_statep(struct ispsoftc *isp, int bus, lun_id_t lun)
572 {
573 	tstate_t *tptr = NULL;
574 
575 	if (lun == CAM_LUN_WILDCARD) {
576 		if (isp->isp_osinfo.tmflags[bus] & TM_WILDCARD_ENABLED) {
577 			tptr = &isp->isp_osinfo.tsdflt[bus];
578 			tptr->hold++;
579 			return (tptr);
580 		}
581 	} else {
582 		tptr = isp->isp_osinfo.lun_hash[LUN_HASH_FUNC(isp, bus, lun)];
583 		if (tptr == NULL) {
584 			return (NULL);
585 		}
586 	}
587 
588 	do {
589 		if (tptr->lun == lun && tptr->bus == bus) {
590 			tptr->hold++;
591 			return (tptr);
592 		}
593 	} while ((tptr = tptr->next) != NULL);
594 	return (tptr);
595 }
596 
597 static __inline void
598 rls_lun_statep(struct ispsoftc *isp, tstate_t *tptr)
599 {
600 	if (tptr->hold)
601 		tptr->hold--;
602 }
603 
604 static __inline int
605 isp_psema_sig_rqe(struct ispsoftc *isp, int bus)
606 {
607 	while (isp->isp_osinfo.tmflags[bus] & TM_BUSY) {
608 		isp->isp_osinfo.tmflags[bus] |= TM_WANTED;
609 		if (tsleep(&isp->isp_osinfo.tmflags[bus], PCATCH, "i0", 0)) {
610 			return (-1);
611 		}
612 		isp->isp_osinfo.tmflags[bus] |= TM_BUSY;
613 	}
614 	return (0);
615 }
616 
617 static __inline int
618 isp_cv_wait_timed_rqe(struct ispsoftc *isp, int bus, int timo)
619 {
620 	if (tsleep(&isp->isp_osinfo.rstatus[bus], 0, "qt1", timo)) {
621 		return (-1);
622 	}
623 	return (0);
624 }
625 
626 static __inline void
627 isp_cv_signal_rqe(struct ispsoftc *isp, int bus, int status)
628 {
629 	isp->isp_osinfo.rstatus[bus] = status;
630 	wakeup(&isp->isp_osinfo.rstatus[bus]);
631 }
632 
633 static __inline void
634 isp_vsema_rqe(struct ispsoftc *isp, int bus)
635 {
636 	if (isp->isp_osinfo.tmflags[bus] & TM_WANTED) {
637 		isp->isp_osinfo.tmflags[bus] &= ~TM_WANTED;
638 		wakeup(&isp->isp_osinfo.tmflags[bus]);
639 	}
640 	isp->isp_osinfo.tmflags[bus] &= ~TM_BUSY;
641 }
642 
643 static __inline atio_private_data_t *
644 isp_get_atpd(struct ispsoftc *isp, int tag)
645 {
646 	atio_private_data_t *atp;
647 	for (atp = isp->isp_osinfo.atpdp;
648 	    atp < &isp->isp_osinfo.atpdp[ATPDPSIZE]; atp++) {
649 		if (atp->tag == tag)
650 			return (atp);
651 	}
652 	return (NULL);
653 }
654 
655 static cam_status
656 create_lun_state(struct ispsoftc *isp, int bus,
657     struct cam_path *path, tstate_t **rslt)
658 {
659 	cam_status status;
660 	lun_id_t lun;
661 	int hfx;
662 	tstate_t *tptr, *new;
663 
664 	lun = xpt_path_lun_id(path);
665 	if (lun < 0) {
666 		return (CAM_LUN_INVALID);
667 	}
668 	if (is_lun_enabled(isp, bus, lun)) {
669 		return (CAM_LUN_ALRDY_ENA);
670 	}
671 	new = (tstate_t *) malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
672 	if (new == NULL) {
673 		return (CAM_RESRC_UNAVAIL);
674 	}
675 
676 	status = xpt_create_path(&new->owner, NULL, xpt_path_path_id(path),
677 	    xpt_path_target_id(path), xpt_path_lun_id(path));
678 	if (status != CAM_REQ_CMP) {
679 		free(new, M_DEVBUF);
680 		return (status);
681 	}
682 	new->bus = bus;
683 	new->lun = lun;
684 	SLIST_INIT(&new->atios);
685 	SLIST_INIT(&new->inots);
686 	new->hold = 1;
687 
688 	hfx = LUN_HASH_FUNC(isp, new->bus, new->lun);
689 	tptr = isp->isp_osinfo.lun_hash[hfx];
690 	if (tptr == NULL) {
691 		isp->isp_osinfo.lun_hash[hfx] = new;
692 	} else {
693 		while (tptr->next)
694 			tptr = tptr->next;
695 		tptr->next = new;
696 	}
697 	*rslt = new;
698 	return (CAM_REQ_CMP);
699 }
700 
701 static INLINE void
702 destroy_lun_state(struct ispsoftc *isp, tstate_t *tptr)
703 {
704 	int hfx;
705 	tstate_t *lw, *pw;
706 
707 	hfx = LUN_HASH_FUNC(isp, tptr->bus, tptr->lun);
708 	if (tptr->hold) {
709 		return;
710 	}
711 	pw = isp->isp_osinfo.lun_hash[hfx];
712 	if (pw == NULL) {
713 		return;
714 	} else if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
715 		isp->isp_osinfo.lun_hash[hfx] = pw->next;
716 	} else {
717 		lw = pw;
718 		pw = lw->next;
719 		while (pw) {
720 			if (pw->lun == tptr->lun && pw->bus == tptr->bus) {
721 				lw->next = pw->next;
722 				break;
723 			}
724 			lw = pw;
725 			pw = pw->next;
726 		}
727 		if (pw == NULL) {
728 			return;
729 		}
730 	}
731 	free(tptr, M_DEVBUF);
732 }
733 
734 /*
735  * we enter with our locks held.
736  */
737 static void
738 isp_en_lun(struct ispsoftc *isp, union ccb *ccb)
739 {
740 	const char lfmt[] = "Lun now %sabled for target mode on channel %d";
741 	struct ccb_en_lun *cel = &ccb->cel;
742 	tstate_t *tptr;
743 	u_int16_t rstat;
744 	int bus, cmd, av, wildcard;
745 	lun_id_t lun;
746 	target_id_t tgt;
747 
748 
749 	bus = XS_CHANNEL(ccb) & 0x1;
750 	tgt = ccb->ccb_h.target_id;
751 	lun = ccb->ccb_h.target_lun;
752 
753 	/*
754 	 * Do some sanity checking first.
755 	 */
756 
757 	if ((lun != CAM_LUN_WILDCARD) &&
758 	    (lun < 0 || lun >= (lun_id_t) isp->isp_maxluns)) {
759 		ccb->ccb_h.status = CAM_LUN_INVALID;
760 		return;
761 	}
762 
763 	if (IS_SCSI(isp)) {
764 		sdparam *sdp = isp->isp_param;
765 		sdp += bus;
766 		if (tgt != CAM_TARGET_WILDCARD &&
767 		    tgt != sdp->isp_initiator_id) {
768 			ccb->ccb_h.status = CAM_TID_INVALID;
769 			return;
770 		}
771 	} else {
772 		if (tgt != CAM_TARGET_WILDCARD &&
773 		    tgt != FCPARAM(isp)->isp_iid) {
774 			ccb->ccb_h.status = CAM_TID_INVALID;
775 			return;
776 		}
777 		/*
778 		 * This is as a good a place as any to check f/w capabilities.
779 		 */
780 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_TMODE) == 0) {
781 			isp_prt(isp, ISP_LOGERR,
782 			    "firmware does not support target mode");
783 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
784 			return;
785 		}
786 		/*
787 		 * XXX: We *could* handle non-SCCLUN f/w, but we'd have to
788 		 * XXX: dorks with our already fragile enable/disable code.
789 		 */
790 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
791 			isp_prt(isp, ISP_LOGERR,
792 			    "firmware not SCCLUN capable");
793 		}
794 	}
795 
796 	if (tgt == CAM_TARGET_WILDCARD) {
797 		if (lun == CAM_LUN_WILDCARD) {
798 			wildcard = 1;
799 		} else {
800 			ccb->ccb_h.status = CAM_LUN_INVALID;
801 			return;
802 		}
803 	} else {
804 		wildcard = 0;
805 	}
806 
807 	/*
808 	 * Next check to see whether this is a target/lun wildcard action.
809 	 *
810 	 * If so, we know that we can accept commands for luns that haven't
811 	 * been enabled yet and send them upstream. Otherwise, we have to
812 	 * handle them locally (if we see them at all).
813 	 */
814 
815 	if (wildcard) {
816 		tptr = &isp->isp_osinfo.tsdflt[bus];
817 		if (cel->enable) {
818 			if (isp->isp_osinfo.tmflags[bus] &
819 			    TM_WILDCARD_ENABLED) {
820 				ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
821 				return;
822 			}
823 			ccb->ccb_h.status =
824 			    xpt_create_path(&tptr->owner, NULL,
825 			    xpt_path_path_id(ccb->ccb_h.path),
826 			    xpt_path_target_id(ccb->ccb_h.path),
827 			    xpt_path_lun_id(ccb->ccb_h.path));
828 			if (ccb->ccb_h.status != CAM_REQ_CMP) {
829 				return;
830 			}
831 			SLIST_INIT(&tptr->atios);
832 			SLIST_INIT(&tptr->inots);
833 			isp->isp_osinfo.tmflags[bus] |= TM_WILDCARD_ENABLED;
834 		} else {
835 			if ((isp->isp_osinfo.tmflags[bus] &
836 			    TM_WILDCARD_ENABLED) == 0) {
837 				ccb->ccb_h.status = CAM_REQ_CMP;
838 				return;
839 			}
840 			if (tptr->hold) {
841 				ccb->ccb_h.status = CAM_SCSI_BUSY;
842 				return;
843 			}
844 			xpt_free_path(tptr->owner);
845 			isp->isp_osinfo.tmflags[bus] &= ~TM_WILDCARD_ENABLED;
846 		}
847 	}
848 
849 	/*
850 	 * Now check to see whether this bus needs to be
851 	 * enabled/disabled with respect to target mode.
852 	 */
853 	av = bus << 31;
854 	if (cel->enable && !(isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED)) {
855 		av |= ENABLE_TARGET_FLAG;
856 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
857 		if (av) {
858 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
859 			if (wildcard) {
860 				isp->isp_osinfo.tmflags[bus] &=
861 				    ~TM_WILDCARD_ENABLED;
862 				xpt_free_path(tptr->owner);
863 			}
864 			return;
865 		}
866 		isp->isp_osinfo.tmflags[bus] |= TM_TMODE_ENABLED;
867 		isp_prt(isp, ISP_LOGINFO,
868 		    "Target Mode enabled on channel %d", bus);
869 	} else if (cel->enable == 0 &&
870 	    (isp->isp_osinfo.tmflags[bus] & TM_TMODE_ENABLED) && wildcard) {
871 		if (are_any_luns_enabled(isp, bus)) {
872 			ccb->ccb_h.status = CAM_SCSI_BUSY;
873 			return;
874 		}
875 		av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
876 		if (av) {
877 			ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
878 			return;
879 		}
880 		isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
881 		isp_prt(isp, ISP_LOGINFO,
882 		    "Target Mode disabled on channel %d", bus);
883 	}
884 
885 	if (wildcard) {
886 		ccb->ccb_h.status = CAM_REQ_CMP;
887 		return;
888 	}
889 
890 	if (cel->enable) {
891 		ccb->ccb_h.status =
892 		    create_lun_state(isp, bus, ccb->ccb_h.path, &tptr);
893 		if (ccb->ccb_h.status != CAM_REQ_CMP) {
894 			return;
895 		}
896 	} else {
897 		tptr = get_lun_statep(isp, bus, lun);
898 		if (tptr == NULL) {
899 			ccb->ccb_h.status = CAM_LUN_INVALID;
900 			return;
901 		}
902 	}
903 
904 	if (isp_psema_sig_rqe(isp, bus)) {
905 		rls_lun_statep(isp, tptr);
906 		if (cel->enable)
907 			destroy_lun_state(isp, tptr);
908 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
909 		return;
910 	}
911 
912 	if (cel->enable) {
913 		u_int32_t seq = isp->isp_osinfo.rollinfo++;
914 		int c, n, ulun = lun;
915 
916 		cmd = RQSTYPE_ENABLE_LUN;
917 		c = DFLT_CMND_CNT;
918 		n = DFLT_INOT_CNT;
919 		if (IS_FC(isp) && lun != 0) {
920 			cmd = RQSTYPE_MODIFY_LUN;
921 			n = 0;
922 			/*
923 		 	 * For SCC firmware, we only deal with setting
924 			 * (enabling or modifying) lun 0.
925 			 */
926 			ulun = 0;
927 		}
928 		rstat = LUN_ERR;
929 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
930 			xpt_print_path(ccb->ccb_h.path);
931 			isp_prt(isp, ISP_LOGWARN, "isp_lun_cmd failed");
932 			goto out;
933 		}
934 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
935 			xpt_print_path(ccb->ccb_h.path);
936 			isp_prt(isp, ISP_LOGERR,
937 			    "wait for ENABLE/MODIFY LUN timed out");
938 			goto out;
939 		}
940 		rstat = isp->isp_osinfo.rstatus[bus];
941 		if (rstat != LUN_OK) {
942 			xpt_print_path(ccb->ccb_h.path);
943 			isp_prt(isp, ISP_LOGERR,
944 			    "ENABLE/MODIFY LUN returned 0x%x", rstat);
945 			goto out;
946 		}
947 	} else {
948 		int c, n, ulun = lun;
949 		u_int32_t seq;
950 
951 		rstat = LUN_ERR;
952 		seq = isp->isp_osinfo.rollinfo++;
953 		cmd = -RQSTYPE_MODIFY_LUN;
954 
955 		c = DFLT_CMND_CNT;
956 		n = DFLT_INOT_CNT;
957 		if (IS_FC(isp) && lun != 0) {
958 			n = 0;
959 			/*
960 		 	 * For SCC firmware, we only deal with setting
961 			 * (enabling or modifying) lun 0.
962 			 */
963 			ulun = 0;
964 		}
965 		if (isp_lun_cmd(isp, cmd, bus, tgt, ulun, c, n, seq)) {
966 			xpt_print_path(ccb->ccb_h.path);
967 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
968 			goto out;
969 		}
970 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
971 			xpt_print_path(ccb->ccb_h.path);
972 			isp_prt(isp, ISP_LOGERR,
973 			    "wait for MODIFY LUN timed out");
974 			goto out;
975 		}
976 		rstat = isp->isp_osinfo.rstatus[bus];
977 		if (rstat != LUN_OK) {
978 			xpt_print_path(ccb->ccb_h.path);
979 			isp_prt(isp, ISP_LOGERR,
980 			    "MODIFY LUN returned 0x%x", rstat);
981 			goto out;
982 		}
983 		if (IS_FC(isp) && lun) {
984 			goto out;
985 		}
986 
987 		seq = isp->isp_osinfo.rollinfo++;
988 
989 		rstat = LUN_ERR;
990 		cmd = -RQSTYPE_ENABLE_LUN;
991 		if (isp_lun_cmd(isp, cmd, bus, tgt, lun, 0, 0, seq)) {
992 			xpt_print_path(ccb->ccb_h.path);
993 			isp_prt(isp, ISP_LOGERR, "isp_lun_cmd failed");
994 			goto out;
995 		}
996 		if (isp_cv_wait_timed_rqe(isp, bus, 30 * hz)) {
997 			xpt_print_path(ccb->ccb_h.path);
998 			isp_prt(isp, ISP_LOGERR,
999 			     "wait for DISABLE LUN timed out");
1000 			goto out;
1001 		}
1002 		rstat = isp->isp_osinfo.rstatus[bus];
1003 		if (rstat != LUN_OK) {
1004 			xpt_print_path(ccb->ccb_h.path);
1005 			isp_prt(isp, ISP_LOGWARN,
1006 			    "DISABLE LUN returned 0x%x", rstat);
1007 			goto out;
1008 		}
1009 		if (are_any_luns_enabled(isp, bus) == 0) {
1010 			av = isp_control(isp, ISPCTL_TOGGLE_TMODE, &av);
1011 			if (av) {
1012 				isp_prt(isp, ISP_LOGWARN,
1013 				    "disable target mode on channel %d failed",
1014 				    bus);
1015 				goto out;
1016 			}
1017 			isp->isp_osinfo.tmflags[bus] &= ~TM_TMODE_ENABLED;
1018 			xpt_print_path(ccb->ccb_h.path);
1019 			isp_prt(isp, ISP_LOGINFO,
1020 			    "Target Mode disabled on channel %d", bus);
1021 		}
1022 	}
1023 
1024 out:
1025 	isp_vsema_rqe(isp, bus);
1026 
1027 	if (rstat != LUN_OK) {
1028 		xpt_print_path(ccb->ccb_h.path);
1029 		isp_prt(isp, ISP_LOGWARN,
1030 		    "lun %sable failed", (cel->enable) ? "en" : "dis");
1031 		ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1032 		rls_lun_statep(isp, tptr);
1033 		if (cel->enable)
1034 			destroy_lun_state(isp, tptr);
1035 	} else {
1036 		xpt_print_path(ccb->ccb_h.path);
1037 		isp_prt(isp, ISP_LOGINFO, lfmt,
1038 		    (cel->enable) ? "en" : "dis", bus);
1039 		rls_lun_statep(isp, tptr);
1040 		if (cel->enable == 0) {
1041 			destroy_lun_state(isp, tptr);
1042 		}
1043 		ccb->ccb_h.status = CAM_REQ_CMP;
1044 	}
1045 }
1046 
1047 static cam_status
1048 isp_abort_tgt_ccb(struct ispsoftc *isp, union ccb *ccb)
1049 {
1050 	tstate_t *tptr;
1051 	struct ccb_hdr_slist *lp;
1052 	struct ccb_hdr *curelm;
1053 	int found;
1054 	union ccb *accb = ccb->cab.abort_ccb;
1055 
1056 	if (accb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
1057 		if (IS_FC(isp) && (accb->ccb_h.target_id !=
1058 		    ((fcparam *) isp->isp_param)->isp_loopid)) {
1059 			return (CAM_PATH_INVALID);
1060 		} else if (IS_SCSI(isp) && (accb->ccb_h.target_id !=
1061 		    ((sdparam *) isp->isp_param)->isp_initiator_id)) {
1062 			return (CAM_PATH_INVALID);
1063 		}
1064 	}
1065 	tptr = get_lun_statep(isp, XS_CHANNEL(ccb), accb->ccb_h.target_lun);
1066 	if (tptr == NULL) {
1067 		return (CAM_PATH_INVALID);
1068 	}
1069 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
1070 		lp = &tptr->atios;
1071 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
1072 		lp = &tptr->inots;
1073 	} else {
1074 		rls_lun_statep(isp, tptr);
1075 		return (CAM_UA_ABORT);
1076 	}
1077 	curelm = SLIST_FIRST(lp);
1078 	found = 0;
1079 	if (curelm == &accb->ccb_h) {
1080 		found = 1;
1081 		SLIST_REMOVE_HEAD(lp, sim_links.sle);
1082 	} else {
1083 		while(curelm != NULL) {
1084 			struct ccb_hdr *nextelm;
1085 
1086 			nextelm = SLIST_NEXT(curelm, sim_links.sle);
1087 			if (nextelm == &accb->ccb_h) {
1088 				found = 1;
1089 				SLIST_NEXT(curelm, sim_links.sle) =
1090 				    SLIST_NEXT(nextelm, sim_links.sle);
1091 				break;
1092 			}
1093 			curelm = nextelm;
1094 		}
1095 	}
1096 	rls_lun_statep(isp, tptr);
1097 	if (found) {
1098 		accb->ccb_h.status = CAM_REQ_ABORTED;
1099 		return (CAM_REQ_CMP);
1100 	}
1101 	return(CAM_PATH_INVALID);
1102 }
1103 
1104 static cam_status
1105 isp_target_start_ctio(struct ispsoftc *isp, union ccb *ccb)
1106 {
1107 	void *qe;
1108 	struct ccb_scsiio *cso = &ccb->csio;
1109 	u_int16_t *hp, save_handle;
1110 	u_int16_t nxti, optr;
1111 	u_int8_t local[QENTRY_LEN];
1112 
1113 
1114 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1115 		xpt_print_path(ccb->ccb_h.path);
1116 		printf("Request Queue Overflow in isp_target_start_ctio\n");
1117 		return (CAM_RESRC_UNAVAIL);
1118 	}
1119 	bzero(local, QENTRY_LEN);
1120 
1121 	/*
1122 	 * We're either moving data or completing a command here.
1123 	 */
1124 
1125 	if (IS_FC(isp)) {
1126 		atio_private_data_t *atp;
1127 		ct2_entry_t *cto = (ct2_entry_t *) local;
1128 
1129 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO2;
1130 		cto->ct_header.rqs_entry_count = 1;
1131 		cto->ct_iid = cso->init_id;
1132 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) == 0) {
1133 			cto->ct_lun = ccb->ccb_h.target_lun;
1134 		}
1135 
1136 		atp = isp_get_atpd(isp, cso->tag_id);
1137 		if (atp == NULL) {
1138 			isp_prt(isp, ISP_LOGERR,
1139 			    "cannot find private data adjunct for tag %x",
1140 			    cso->tag_id);
1141 			return (-1);
1142 		}
1143 
1144 		cto->ct_rxid = cso->tag_id;
1145 		if (cso->dxfer_len == 0) {
1146 			cto->ct_flags |= CT2_FLAG_MODE1 | CT2_NO_DATA;
1147 			if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1148 				cto->ct_flags |= CT2_SENDSTATUS;
1149 				cto->rsp.m1.ct_scsi_status = cso->scsi_status;
1150 				cto->ct_resid =
1151 				    atp->orig_datalen - atp->bytes_xfered;
1152 				if (cto->ct_resid < 0) {
1153 					cto->rsp.m1.ct_scsi_status |=
1154 					    CT2_DATA_OVER;
1155 				} else if (cto->ct_resid > 0) {
1156 					cto->rsp.m1.ct_scsi_status |=
1157 					    CT2_DATA_UNDER;
1158 				}
1159 			}
1160 			if ((ccb->ccb_h.flags & CAM_SEND_SENSE) != 0) {
1161 				int m = min(cso->sense_len, MAXRESPLEN);
1162 				bcopy(&cso->sense_data, cto->rsp.m1.ct_resp, m);
1163 				cto->rsp.m1.ct_senselen = m;
1164 				cto->rsp.m1.ct_scsi_status |= CT2_SNSLEN_VALID;
1165 			}
1166 		} else {
1167 			cto->ct_flags |= CT2_FLAG_MODE0;
1168 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1169 				cto->ct_flags |= CT2_DATA_IN;
1170 			} else {
1171 				cto->ct_flags |= CT2_DATA_OUT;
1172 			}
1173 			cto->ct_reloff = atp->bytes_xfered;
1174 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) != 0) {
1175 				cto->ct_flags |= CT2_SENDSTATUS;
1176 				cto->rsp.m0.ct_scsi_status = cso->scsi_status;
1177 				cto->ct_resid =
1178 				    atp->orig_datalen -
1179 				    (atp->bytes_xfered + cso->dxfer_len);
1180 				if (cto->ct_resid < 0) {
1181 					cto->rsp.m0.ct_scsi_status |=
1182 					    CT2_DATA_OVER;
1183 				} else if (cto->ct_resid > 0) {
1184 					cto->rsp.m0.ct_scsi_status |=
1185 					    CT2_DATA_UNDER;
1186 				}
1187 			} else {
1188 				atp->last_xframt = cso->dxfer_len;
1189 			}
1190 			/*
1191 			 * If we're sending data and status back together,
1192 			 * we can't also send back sense data as well.
1193 			 */
1194 			ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1195 		}
1196 
1197 		if (cto->ct_flags & CT2_SENDSTATUS) {
1198 			isp_prt(isp, ISP_LOGTDEBUG0,
1199 			    "CTIO2[%x] STATUS %x origd %u curd %u resid %u",
1200 			    cto->ct_rxid, cso->scsi_status, atp->orig_datalen,
1201 			    cso->dxfer_len, cto->ct_resid);
1202 			cto->ct_flags |= CT2_CCINCR;
1203 			atp->state = ATPD_STATE_LAST_CTIO;
1204 		} else
1205 			atp->state = ATPD_STATE_CTIO;
1206 		cto->ct_timeout = 10;
1207 		hp = &cto->ct_syshandle;
1208 	} else {
1209 		ct_entry_t *cto = (ct_entry_t *) local;
1210 
1211 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO;
1212 		cto->ct_header.rqs_entry_count = 1;
1213 		cto->ct_iid = cso->init_id;
1214 		cto->ct_iid |= XS_CHANNEL(ccb) << 7;
1215 		cto->ct_tgt = ccb->ccb_h.target_id;
1216 		cto->ct_lun = ccb->ccb_h.target_lun;
1217 		cto->ct_fwhandle = AT_GET_HANDLE(cso->tag_id);
1218 		if (AT_HAS_TAG(cso->tag_id)) {
1219 			cto->ct_tag_val = (u_int8_t) AT_GET_TAG(cso->tag_id);
1220 			cto->ct_flags |= CT_TQAE;
1221 		}
1222 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
1223 			cto->ct_flags |= CT_NODISC;
1224 		}
1225 		if (cso->dxfer_len == 0) {
1226 			cto->ct_flags |= CT_NO_DATA;
1227 		} else if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1228 			cto->ct_flags |= CT_DATA_IN;
1229 		} else {
1230 			cto->ct_flags |= CT_DATA_OUT;
1231 		}
1232 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1233 			cto->ct_flags |= CT_SENDSTATUS|CT_CCINCR;
1234 			cto->ct_scsi_status = cso->scsi_status;
1235 			cto->ct_resid = cso->resid;
1236 			isp_prt(isp, ISP_LOGTDEBUG0,
1237 			    "CTIO[%x] SCSI STATUS 0x%x resid %d tag_id %x",
1238 			    cto->ct_fwhandle, cso->scsi_status, cso->resid,
1239 			    cso->tag_id);
1240 		}
1241 		ccb->ccb_h.flags &= ~CAM_SEND_SENSE;
1242 		cto->ct_timeout = 10;
1243 		hp = &cto->ct_syshandle;
1244 	}
1245 
1246 	if (isp_save_xs(isp, (XS_T *)ccb, hp)) {
1247 		xpt_print_path(ccb->ccb_h.path);
1248 		printf("No XFLIST pointers for isp_target_start_ctio\n");
1249 		return (CAM_RESRC_UNAVAIL);
1250 	}
1251 
1252 
1253 	/*
1254 	 * Call the dma setup routines for this entry (and any subsequent
1255 	 * CTIOs) if there's data to move, and then tell the f/w it's got
1256 	 * new things to play with. As with isp_start's usage of DMA setup,
1257 	 * any swizzling is done in the machine dependent layer. Because
1258 	 * of this, we put the request onto the queue area first in native
1259 	 * format.
1260 	 */
1261 
1262 	save_handle = *hp;
1263 
1264 	switch (ISP_DMASETUP(isp, cso, (ispreq_t *) local, &nxti, optr)) {
1265 	case CMD_QUEUED:
1266 		ISP_ADD_REQUEST(isp, nxti);
1267 		return (CAM_REQ_INPROG);
1268 
1269 	case CMD_EAGAIN:
1270 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
1271 		isp_destroy_handle(isp, save_handle);
1272 		return (CAM_RESRC_UNAVAIL);
1273 
1274 	default:
1275 		isp_destroy_handle(isp, save_handle);
1276 		return (XS_ERR(ccb));
1277 	}
1278 }
1279 
1280 static void
1281 isp_refire_putback_atio(void *arg)
1282 {
1283 	int s = splcam();
1284 	isp_target_putback_atio(arg);
1285 	splx(s);
1286 }
1287 
1288 static void
1289 isp_target_putback_atio(union ccb *ccb)
1290 {
1291 	struct ispsoftc *isp;
1292 	struct ccb_scsiio *cso;
1293 	u_int16_t nxti, optr;
1294 	void *qe;
1295 
1296 	isp = XS_ISP(ccb);
1297 
1298 	if (isp_getrqentry(isp, &nxti, &optr, &qe)) {
1299 		(void) timeout(isp_refire_putback_atio, ccb, 10);
1300 		isp_prt(isp, ISP_LOGWARN,
1301 		    "isp_target_putback_atio: Request Queue Overflow");
1302 		return;
1303 	}
1304 	bzero(qe, QENTRY_LEN);
1305 	cso = &ccb->csio;
1306 	if (IS_FC(isp)) {
1307 		at2_entry_t local, *at = &local;
1308 		MEMZERO(at, sizeof (at2_entry_t));
1309 		at->at_header.rqs_entry_type = RQSTYPE_ATIO2;
1310 		at->at_header.rqs_entry_count = 1;
1311 		if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1312 			at->at_scclun = (uint16_t) ccb->ccb_h.target_lun;
1313 		} else {
1314 			at->at_lun = (uint8_t) ccb->ccb_h.target_lun;
1315 		}
1316 		at->at_status = CT_OK;
1317 		at->at_rxid = cso->tag_id;
1318 		at->at_iid = cso->ccb_h.target_id;
1319 		isp_put_atio2(isp, at, qe);
1320 	} else {
1321 		at_entry_t local, *at = &local;
1322 		MEMZERO(at, sizeof (at_entry_t));
1323 		at->at_header.rqs_entry_type = RQSTYPE_ATIO;
1324 		at->at_header.rqs_entry_count = 1;
1325 		at->at_iid = cso->init_id;
1326 		at->at_iid |= XS_CHANNEL(ccb) << 7;
1327 		at->at_tgt = cso->ccb_h.target_id;
1328 		at->at_lun = cso->ccb_h.target_lun;
1329 		at->at_status = CT_OK;
1330 		at->at_tag_val = AT_GET_TAG(cso->tag_id);
1331 		at->at_handle = AT_GET_HANDLE(cso->tag_id);
1332 		isp_put_atio(isp, at, qe);
1333 	}
1334 	ISP_TDQE(isp, "isp_target_putback_atio", (int) optr, qe);
1335 	ISP_ADD_REQUEST(isp, nxti);
1336 	isp_complete_ctio(ccb);
1337 }
1338 
1339 static void
1340 isp_complete_ctio(union ccb *ccb)
1341 {
1342 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1343 		ccb->ccb_h.status |= CAM_REQ_CMP;
1344 	}
1345 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1346 	xpt_done(ccb);
1347 }
1348 
1349 /*
1350  * Handle ATIO stuff that the generic code can't.
1351  * This means handling CDBs.
1352  */
1353 
1354 static int
1355 isp_handle_platform_atio(struct ispsoftc *isp, at_entry_t *aep)
1356 {
1357 	tstate_t *tptr;
1358 	int status, bus, iswildcard;
1359 	struct ccb_accept_tio *atiop;
1360 
1361 	/*
1362 	 * The firmware status (except for the QLTM_SVALID bit)
1363 	 * indicates why this ATIO was sent to us.
1364 	 *
1365 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1366 	 *
1367 	 * If the DISCONNECTS DISABLED bit is set in the flags field,
1368 	 * we're still connected on the SCSI bus.
1369 	 */
1370 	status = aep->at_status;
1371 	if ((status & ~QLTM_SVALID) == AT_PHASE_ERROR) {
1372 		/*
1373 		 * Bus Phase Sequence error. We should have sense data
1374 		 * suggested by the f/w. I'm not sure quite yet what
1375 		 * to do about this for CAM.
1376 		 */
1377 		isp_prt(isp, ISP_LOGWARN, "PHASE ERROR");
1378 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1379 		return (0);
1380 	}
1381 	if ((status & ~QLTM_SVALID) != AT_CDB) {
1382 		isp_prt(isp, ISP_LOGWARN, "bad atio (0x%x) leaked to platform",
1383 		    status);
1384 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1385 		return (0);
1386 	}
1387 
1388 	bus = GET_BUS_VAL(aep->at_iid);
1389 	tptr = get_lun_statep(isp, bus, aep->at_lun);
1390 	if (tptr == NULL) {
1391 		tptr = get_lun_statep(isp, bus, CAM_LUN_WILDCARD);
1392 		iswildcard = 1;
1393 	} else {
1394 		iswildcard = 0;
1395 	}
1396 
1397 	if (tptr == NULL) {
1398 		/*
1399 		 * Because we can't autofeed sense data back with
1400 		 * a command for parallel SCSI, we can't give back
1401 		 * a CHECK CONDITION. We'll give back a BUSY status
1402 		 * instead. This works out okay because the only
1403 		 * time we should, in fact, get this, is in the
1404 		 * case that somebody configured us without the
1405 		 * blackhole driver, so they get what they deserve.
1406 		 */
1407 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1408 		return (0);
1409 	}
1410 
1411 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1412 	if (atiop == NULL) {
1413 		/*
1414 		 * Because we can't autofeed sense data back with
1415 		 * a command for parallel SCSI, we can't give back
1416 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1417 		 * instead. This works out okay because the only time we
1418 		 * should, in fact, get this, is in the case that we've
1419 		 * run out of ATIOS.
1420 		 */
1421 		xpt_print_path(tptr->owner);
1422 		isp_prt(isp, ISP_LOGWARN,
1423 		    "no ATIOS for lun %d from initiator %d on channel %d",
1424 		    aep->at_lun, GET_IID_VAL(aep->at_iid), bus);
1425 		if (aep->at_flags & AT_TQAE)
1426 			isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1427 		else
1428 			isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1429 		rls_lun_statep(isp, tptr);
1430 		return (0);
1431 	}
1432 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1433 	if (iswildcard) {
1434 		atiop->ccb_h.target_id = aep->at_tgt;
1435 		atiop->ccb_h.target_lun = aep->at_lun;
1436 	}
1437 	if (aep->at_flags & AT_NODISC) {
1438 		atiop->ccb_h.flags = CAM_DIS_DISCONNECT;
1439 	} else {
1440 		atiop->ccb_h.flags = 0;
1441 	}
1442 
1443 	if (status & QLTM_SVALID) {
1444 		size_t amt = imin(QLTM_SENSELEN, sizeof (atiop->sense_data));
1445 		atiop->sense_len = amt;
1446 		MEMCPY(&atiop->sense_data, aep->at_sense, amt);
1447 	} else {
1448 		atiop->sense_len = 0;
1449 	}
1450 
1451 	atiop->init_id = GET_IID_VAL(aep->at_iid);
1452 	atiop->cdb_len = aep->at_cdblen;
1453 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, aep->at_cdblen);
1454 	atiop->ccb_h.status = CAM_CDB_RECVD;
1455 	/*
1456 	 * Construct a tag 'id' based upon tag value (which may be 0..255)
1457 	 * and the handle (which we have to preserve).
1458 	 */
1459 	AT_MAKE_TAGID(atiop->tag_id, aep);
1460 	if (aep->at_flags & AT_TQAE) {
1461 		atiop->tag_action = aep->at_tag_type;
1462 		atiop->ccb_h.status |= CAM_TAG_ACTION_VALID;
1463 	}
1464 	xpt_done((union ccb*)atiop);
1465 	isp_prt(isp, ISP_LOGTDEBUG0,
1466 	    "ATIO[%x] CDB=0x%x bus %d iid%d->lun%d tag 0x%x ttype 0x%x %s",
1467 	    aep->at_handle, aep->at_cdb[0] & 0xff, GET_BUS_VAL(aep->at_iid),
1468 	    GET_IID_VAL(aep->at_iid), aep->at_lun, aep->at_tag_val & 0xff,
1469 	    aep->at_tag_type, (aep->at_flags & AT_NODISC)?
1470 	    "nondisc" : "disconnecting");
1471 	rls_lun_statep(isp, tptr);
1472 	return (0);
1473 }
1474 
1475 static int
1476 isp_handle_platform_atio2(struct ispsoftc *isp, at2_entry_t *aep)
1477 {
1478 	lun_id_t lun;
1479 	tstate_t *tptr;
1480 	struct ccb_accept_tio *atiop;
1481 	atio_private_data_t *atp;
1482 
1483 	/*
1484 	 * The firmware status (except for the QLTM_SVALID bit)
1485 	 * indicates why this ATIO was sent to us.
1486 	 *
1487 	 * If QLTM_SVALID is set, the firware has recommended Sense Data.
1488 	 */
1489 	if ((aep->at_status & ~QLTM_SVALID) != AT_CDB) {
1490 		isp_prt(isp, ISP_LOGWARN,
1491 		    "bogus atio (0x%x) leaked to platform", aep->at_status);
1492 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1493 		return (0);
1494 	}
1495 
1496 	if ((FCPARAM(isp)->isp_fwattr & ISP_FW_ATTR_SCCLUN) != 0) {
1497 		lun = aep->at_scclun;
1498 	} else {
1499 		lun = aep->at_lun;
1500 	}
1501 	tptr = get_lun_statep(isp, 0, lun);
1502 	if (tptr == NULL) {
1503 		isp_prt(isp, ISP_LOGWARN, "no state pointer for lun %d", lun);
1504 		tptr = get_lun_statep(isp, 0, CAM_LUN_WILDCARD);
1505 	}
1506 
1507 	if (tptr == NULL) {
1508 		/*
1509 		 * What we'd like to know is whether or not we have a listener
1510 		 * upstream that really hasn't configured yet. If we do, then
1511 		 * we can give a more sensible reply here. If not, then we can
1512 		 * reject this out of hand.
1513 		 *
1514 		 * Choices for what to send were
1515 		 *
1516                  *	Not Ready, Unit Not Self-Configured Yet
1517 		 *	(0x2,0x3e,0x00)
1518 		 *
1519 		 * for the former and
1520 		 *
1521 		 *	Illegal Request, Logical Unit Not Supported
1522 		 *	(0x5,0x25,0x00)
1523 		 *
1524 		 * for the latter.
1525 		 *
1526 		 * We used to decide whether there was at least one listener
1527 		 * based upon whether the black hole driver was configured.
1528 		 * However, recent config(8) changes have made this hard to do
1529 		 * at this time.
1530 		 *
1531 		 */
1532 		isp_endcmd(isp, aep, SCSI_STATUS_BUSY, 0);
1533 		return (0);
1534 	}
1535 
1536 	atp = isp_get_atpd(isp, 0);
1537 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1538 	if (atiop == NULL || atp == NULL) {
1539 		/*
1540 		 * Because we can't autofeed sense data back with
1541 		 * a command for parallel SCSI, we can't give back
1542 		 * a CHECK CONDITION. We'll give back a QUEUE FULL status
1543 		 * instead. This works out okay because the only time we
1544 		 * should, in fact, get this, is in the case that we've
1545 		 * run out of ATIOS.
1546 		 */
1547 		xpt_print_path(tptr->owner);
1548 		isp_prt(isp, ISP_LOGWARN,
1549 		    "no %s for lun %d from initiator %d",
1550 		    (atp == NULL && atiop == NULL)? "ATIO2s *or* ATPS" :
1551 		    ((atp == NULL)? "ATPs" : "ATIO2s"), lun, aep->at_iid);
1552 		rls_lun_statep(isp, tptr);
1553 		isp_endcmd(isp, aep, SCSI_STATUS_QUEUE_FULL, 0);
1554 		return (0);
1555 	}
1556 	atp->state = ATPD_STATE_ATIO;
1557 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1558 	tptr->atio_count--;
1559 	isp_prt(isp, ISP_LOGTDEBUG0, "Take FREE ATIO2 lun %d, count now %d",
1560 	    lun, tptr->atio_count);
1561 
1562 	if (tptr == &isp->isp_osinfo.tsdflt[0]) {
1563 		atiop->ccb_h.target_id =
1564 		    ((fcparam *)isp->isp_param)->isp_loopid;
1565 		atiop->ccb_h.target_lun = lun;
1566 	}
1567 	/*
1568 	 * We don't get 'suggested' sense data as we do with SCSI cards.
1569 	 */
1570 	atiop->sense_len = 0;
1571 
1572 	atiop->init_id = aep->at_iid;
1573 	atiop->cdb_len = ATIO2_CDBLEN;
1574 	MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cdb, ATIO2_CDBLEN);
1575 	atiop->ccb_h.status = CAM_CDB_RECVD;
1576 	atiop->tag_id = aep->at_rxid;
1577 	switch (aep->at_taskflags & ATIO2_TC_ATTR_MASK) {
1578 	case ATIO2_TC_ATTR_SIMPLEQ:
1579 		atiop->tag_action = MSG_SIMPLE_Q_TAG;
1580 		break;
1581         case ATIO2_TC_ATTR_HEADOFQ:
1582 		atiop->tag_action = MSG_HEAD_OF_Q_TAG;
1583 		break;
1584         case ATIO2_TC_ATTR_ORDERED:
1585 		atiop->tag_action = MSG_ORDERED_Q_TAG;
1586 		break;
1587         case ATIO2_TC_ATTR_ACAQ:		/* ?? */
1588 	case ATIO2_TC_ATTR_UNTAGGED:
1589 	default:
1590 		atiop->tag_action = 0;
1591 		break;
1592 	}
1593 	atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
1594 
1595 	atp->tag = atiop->tag_id;
1596 	atp->lun = lun;
1597 	atp->orig_datalen = aep->at_datalen;
1598 	atp->last_xframt = 0;
1599 	atp->bytes_xfered = 0;
1600 	atp->state = ATPD_STATE_CAM;
1601 	xpt_done((union ccb*)atiop);
1602 
1603 	isp_prt(isp, ISP_LOGTDEBUG0,
1604 	    "ATIO2[%x] CDB=0x%x iid%d->lun%d tattr 0x%x datalen %u",
1605 	    aep->at_rxid, aep->at_cdb[0] & 0xff, aep->at_iid,
1606 	    lun, aep->at_taskflags, aep->at_datalen);
1607 	rls_lun_statep(isp, tptr);
1608 	return (0);
1609 }
1610 
1611 static int
1612 isp_handle_platform_ctio(struct ispsoftc *isp, void *arg)
1613 {
1614 	union ccb *ccb;
1615 	int sentstatus, ok, notify_cam, resid = 0;
1616 	u_int16_t tval;
1617 
1618 	/*
1619 	 * CTIO and CTIO2 are close enough....
1620 	 */
1621 
1622 	ccb = (union ccb *) isp_find_xs(isp, ((ct_entry_t *)arg)->ct_syshandle);
1623 	KASSERT((ccb != NULL), ("null ccb in isp_handle_platform_ctio"));
1624 	isp_destroy_handle(isp, ((ct_entry_t *)arg)->ct_syshandle);
1625 
1626 	if (IS_FC(isp)) {
1627 		ct2_entry_t *ct = arg;
1628 		atio_private_data_t *atp = isp_get_atpd(isp, ct->ct_rxid);
1629 		if (atp == NULL) {
1630 			isp_prt(isp, ISP_LOGERR,
1631 			    "cannot find adjunct for %x after I/O",
1632 			    ct->ct_rxid);
1633 			return (0);
1634 		}
1635 		sentstatus = ct->ct_flags & CT2_SENDSTATUS;
1636 		ok = (ct->ct_status & ~QLTM_SVALID) == CT_OK;
1637 		if (ok && sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE)) {
1638 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1639 		}
1640 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1641 		if ((ct->ct_flags & CT2_DATAMASK) != CT2_NO_DATA) {
1642 			resid = ct->ct_resid;
1643 			atp->bytes_xfered += (atp->last_xframt - resid);
1644 			atp->last_xframt = 0;
1645 		}
1646 		if (sentstatus || !ok) {
1647 			atp->tag = 0;
1648 		}
1649 		isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN,
1650 		    "CTIO2[%x] sts 0x%x flg 0x%x sns %d resid %d %s",
1651 		    ct->ct_rxid, ct->ct_status, ct->ct_flags,
1652 		    (ccb->ccb_h.status & CAM_SENT_SENSE) != 0,
1653 		    resid, sentstatus? "FIN" : "MID");
1654 		tval = ct->ct_rxid;
1655 
1656 		/* XXX: should really come after isp_complete_ctio */
1657 		atp->state = ATPD_STATE_PDON;
1658 	} else {
1659 		ct_entry_t *ct = arg;
1660 		sentstatus = ct->ct_flags & CT_SENDSTATUS;
1661 		ok = (ct->ct_status  & ~QLTM_SVALID) == CT_OK;
1662 		/*
1663 		 * We *ought* to be able to get back to the original ATIO
1664 		 * here, but for some reason this gets lost. It's just as
1665 		 * well because it's squirrelled away as part of periph
1666 		 * private data.
1667 		 *
1668 		 * We can live without it as long as we continue to use
1669 		 * the auto-replenish feature for CTIOs.
1670 		 */
1671 		notify_cam = ct->ct_header.rqs_seqno & 0x1;
1672 		if (ct->ct_status & QLTM_SVALID) {
1673 			char *sp = (char *)ct;
1674 			sp += CTIO_SENSE_OFFSET;
1675 			ccb->csio.sense_len =
1676 			    min(sizeof (ccb->csio.sense_data), QLTM_SENSELEN);
1677 			MEMCPY(&ccb->csio.sense_data, sp, ccb->csio.sense_len);
1678 			ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
1679 		}
1680 		if ((ct->ct_flags & CT_DATAMASK) != CT_NO_DATA) {
1681 			resid = ct->ct_resid;
1682 		}
1683 		isp_prt(isp, ISP_LOGTDEBUG0,
1684 		    "CTIO[%x] tag %x iid %d lun %d sts %x flg %x resid %d %s",
1685 		    ct->ct_fwhandle, ct->ct_tag_val, ct->ct_iid, ct->ct_lun,
1686 		    ct->ct_status, ct->ct_flags, resid,
1687 		    sentstatus? "FIN" : "MID");
1688 		tval = ct->ct_fwhandle;
1689 	}
1690 	ccb->csio.resid += resid;
1691 
1692 	/*
1693 	 * We're here either because intermediate data transfers are done
1694 	 * and/or the final status CTIO (which may have joined with a
1695 	 * Data Transfer) is done.
1696 	 *
1697 	 * In any case, for this platform, the upper layers figure out
1698 	 * what to do next, so all we do here is collect status and
1699 	 * pass information along. Any DMA handles have already been
1700 	 * freed.
1701 	 */
1702 	if (notify_cam == 0) {
1703 		isp_prt(isp, ISP_LOGTDEBUG0, "  INTER CTIO[0x%x] done", tval);
1704 		return (0);
1705 	}
1706 
1707 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done",
1708 	    (sentstatus)? "  FINAL " : "MIDTERM ", tval);
1709 
1710 	if (!ok) {
1711 		isp_target_putback_atio(ccb);
1712 	} else {
1713 		isp_complete_ctio(ccb);
1714 
1715 	}
1716 	return (0);
1717 }
1718 
1719 static int
1720 isp_handle_platform_notify_scsi(struct ispsoftc *isp, in_entry_t *inp)
1721 {
1722 	return (0);	/* XXXX */
1723 }
1724 
1725 static int
1726 isp_handle_platform_notify_fc(struct ispsoftc *isp, in_fcentry_t *inp)
1727 {
1728 
1729 	switch (inp->in_status) {
1730 	case IN_PORT_LOGOUT:
1731 		isp_prt(isp, ISP_LOGWARN, "port logout of iid %d",
1732 		   inp->in_iid);
1733 		break;
1734 	case IN_PORT_CHANGED:
1735 		isp_prt(isp, ISP_LOGWARN, "port changed for iid %d",
1736 		   inp->in_iid);
1737 		break;
1738 	case IN_GLOBAL_LOGO:
1739 		isp_prt(isp, ISP_LOGINFO, "all ports logged out");
1740 		break;
1741 	case IN_ABORT_TASK:
1742 	{
1743 		atio_private_data_t *atp = isp_get_atpd(isp, inp->in_seqid);
1744 		struct ccb_immed_notify *inot = NULL;
1745 
1746 		if (atp) {
1747 			tstate_t *tptr = get_lun_statep(isp, 0, atp->lun);
1748 			if (tptr) {
1749 				inot = (struct ccb_immed_notify *)
1750 				    SLIST_FIRST(&tptr->inots);
1751 				if (inot) {
1752 					SLIST_REMOVE_HEAD(&tptr->inots,
1753 					    sim_links.sle);
1754 				}
1755 			}
1756 			isp_prt(isp, ISP_LOGWARN,
1757 			   "abort task RX_ID %x IID %d state %d",
1758 			   inp->in_seqid, inp->in_iid, atp->state);
1759 		} else {
1760 			isp_prt(isp, ISP_LOGWARN,
1761 			   "abort task RX_ID %x from iid %d, state unknown",
1762 			   inp->in_seqid, inp->in_iid);
1763 		}
1764 		if (inot) {
1765 			inot->initiator_id = inp->in_iid;
1766 			inot->sense_len = 0;
1767 			inot->message_args[0] = MSG_ABORT_TAG;
1768 			inot->message_args[1] = inp->in_seqid & 0xff;
1769 			inot->message_args[2] = (inp->in_seqid >> 8) & 0xff;
1770 			inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
1771 			xpt_done((union ccb *)inot);
1772 		}
1773 		break;
1774 	}
1775 	default:
1776 		break;
1777 	}
1778 	return (0);
1779 }
1780 #endif
1781 
1782 static void
1783 isp_cam_async(void *cbarg, u_int32_t code, struct cam_path *path, void *arg)
1784 {
1785 	struct cam_sim *sim;
1786 	struct ispsoftc *isp;
1787 
1788 	sim = (struct cam_sim *)cbarg;
1789 	isp = (struct ispsoftc *) cam_sim_softc(sim);
1790 	switch (code) {
1791 	case AC_LOST_DEVICE:
1792 		if (IS_SCSI(isp)) {
1793 			u_int16_t oflags, nflags;
1794 			sdparam *sdp = isp->isp_param;
1795 			int tgt;
1796 
1797 			tgt = xpt_path_target_id(path);
1798 			if (tgt >= 0) {
1799 				sdp += cam_sim_bus(sim);
1800 				ISP_LOCK(isp);
1801 				nflags = sdp->isp_devparam[tgt].nvrm_flags;
1802 #ifndef	ISP_TARGET_MODE
1803 				nflags &= DPARM_SAFE_DFLT;
1804 				if (isp->isp_loaded_fw) {
1805 					nflags |= DPARM_NARROW | DPARM_ASYNC;
1806 				}
1807 #else
1808 				nflags = DPARM_DEFAULT;
1809 #endif
1810 				oflags = sdp->isp_devparam[tgt].goal_flags;
1811 				sdp->isp_devparam[tgt].goal_flags = nflags;
1812 				sdp->isp_devparam[tgt].dev_update = 1;
1813 				isp->isp_update |= (1 << cam_sim_bus(sim));
1814 				(void) isp_control(isp,
1815 				    ISPCTL_UPDATE_PARAMS, NULL);
1816 				sdp->isp_devparam[tgt].goal_flags = oflags;
1817 				ISP_UNLOCK(isp);
1818 			}
1819 		}
1820 		break;
1821 	default:
1822 		isp_prt(isp, ISP_LOGWARN, "isp_cam_async: Code 0x%x", code);
1823 		break;
1824 	}
1825 }
1826 
1827 static void
1828 isp_poll(struct cam_sim *sim)
1829 {
1830 	struct ispsoftc *isp = cam_sim_softc(sim);
1831 	u_int16_t isr, sema, mbox;
1832 
1833 	ISP_LOCK(isp);
1834 	if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1835 		isp_intr(isp, isr, sema, mbox);
1836 	}
1837 	ISP_UNLOCK(isp);
1838 }
1839 
1840 
1841 static void
1842 isp_watchdog(void *arg)
1843 {
1844 	XS_T *xs = arg;
1845 	struct ispsoftc *isp = XS_ISP(xs);
1846 	u_int32_t handle;
1847 	int iok;
1848 
1849 	/*
1850 	 * We've decided this command is dead. Make sure we're not trying
1851 	 * to kill a command that's already dead by getting it's handle and
1852 	 * and seeing whether it's still alive.
1853 	 */
1854 	ISP_LOCK(isp);
1855 	iok = isp->isp_osinfo.intsok;
1856 	isp->isp_osinfo.intsok = 0;
1857 	handle = isp_find_handle(isp, xs);
1858 	if (handle) {
1859 		u_int16_t isr, sema, mbox;
1860 
1861 		if (XS_CMD_DONE_P(xs)) {
1862 			isp_prt(isp, ISP_LOGDEBUG1,
1863 			    "watchdog found done cmd (handle 0x%x)", handle);
1864 			ISP_UNLOCK(isp);
1865 			return;
1866 		}
1867 
1868 		if (XS_CMD_WDOG_P(xs)) {
1869 			isp_prt(isp, ISP_LOGDEBUG2,
1870 			    "recursive watchdog (handle 0x%x)", handle);
1871 			ISP_UNLOCK(isp);
1872 			return;
1873 		}
1874 
1875 		XS_CMD_S_WDOG(xs);
1876 		if (ISP_READ_ISR(isp, &isr, &sema, &mbox)) {
1877 			isp_intr(isp, isr, sema, mbox);
1878 		}
1879 		if (XS_CMD_DONE_P(xs)) {
1880 			isp_prt(isp, ISP_LOGDEBUG2,
1881 			    "watchdog cleanup for handle 0x%x", handle);
1882 			xpt_done((union ccb *) xs);
1883 		} else if (XS_CMD_GRACE_P(xs)) {
1884 			/*
1885 			 * Make sure the command is *really* dead before we
1886 			 * release the handle (and DMA resources) for reuse.
1887 			 */
1888 			(void) isp_control(isp, ISPCTL_ABORT_CMD, arg);
1889 
1890 			/*
1891 			 * After this point, the comamnd is really dead.
1892 			 */
1893 			if (XS_XFRLEN(xs)) {
1894 				ISP_DMAFREE(isp, xs, handle);
1895                 	}
1896 			isp_destroy_handle(isp, handle);
1897 			xpt_print_path(xs->ccb_h.path);
1898 			isp_prt(isp, ISP_LOGWARN,
1899 			    "watchdog timeout for handle 0x%x", handle);
1900 			XS_SETERR(xs, CAM_CMD_TIMEOUT);
1901 			XS_CMD_C_WDOG(xs);
1902 			isp_done(xs);
1903 		} else {
1904 			u_int16_t nxti, optr;
1905 			ispreq_t local, *mp= &local, *qe;
1906 
1907 			XS_CMD_C_WDOG(xs);
1908 			xs->ccb_h.timeout_ch = timeout(isp_watchdog, xs, hz);
1909 			if (isp_getrqentry(isp, &nxti, &optr, (void **) &qe)) {
1910 				ISP_UNLOCK(isp);
1911 				return;
1912 			}
1913 			XS_CMD_S_GRACE(xs);
1914 			MEMZERO((void *) mp, sizeof (*mp));
1915 			mp->req_header.rqs_entry_count = 1;
1916 			mp->req_header.rqs_entry_type = RQSTYPE_MARKER;
1917 			mp->req_modifier = SYNC_ALL;
1918 			mp->req_target = XS_CHANNEL(xs) << 7;
1919 			isp_put_request(isp, mp, qe);
1920 			ISP_ADD_REQUEST(isp, nxti);
1921 		}
1922 	} else {
1923 		isp_prt(isp, ISP_LOGDEBUG2, "watchdog with no command");
1924 	}
1925 	isp->isp_osinfo.intsok = iok;
1926 	ISP_UNLOCK(isp);
1927 }
1928 
1929 static void
1930 isp_kthread(void *arg)
1931 {
1932 	struct ispsoftc *isp = arg;
1933 	int s;
1934 
1935 	s = splcam();
1936 	isp->isp_osinfo.intsok = 1;
1937 
1938 	/*
1939 	 * The first loop is for our usage where we have yet to have
1940 	 * gotten good fibre channel state.
1941 	 */
1942 	for (;;) {
1943 		int wasfrozen;
1944 
1945 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: checking FC state");
1946 		while (isp_fc_runstate(isp, 2 * 1000000) != 0) {
1947 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: FC state ungood");
1948 			if (FCPARAM(isp)->isp_fwstate != FW_READY ||
1949 			    FCPARAM(isp)->isp_loopstate < LOOP_PDB_RCVD) {
1950 				if (FCPARAM(isp)->loop_seen_once == 0 ||
1951 				    isp->isp_osinfo.ktmature == 0) {
1952 					break;
1953 				}
1954 			}
1955 			tsleep(isp_kthread, 0, "isp_fcthrd", hz);
1956 
1957 		}
1958 
1959 		/*
1960 		 * Even if we didn't get good loop state we may be
1961 		 * unfreezing the SIMQ so that we can kill off
1962 		 * commands (if we've never seen loop before, for example).
1963 		 */
1964 		isp->isp_osinfo.ktmature = 1;
1965 		wasfrozen = isp->isp_osinfo.simqfrozen & SIMQFRZ_LOOPDOWN;
1966 		isp->isp_osinfo.simqfrozen &= ~SIMQFRZ_LOOPDOWN;
1967 		if (wasfrozen && isp->isp_osinfo.simqfrozen == 0) {
1968 			isp_prt(isp, ISP_LOGDEBUG0, "kthread: releasing simq");
1969 			ISPLOCK_2_CAMLOCK(isp);
1970 			xpt_release_simq(isp->isp_sim, 1);
1971 			CAMLOCK_2_ISPLOCK(isp);
1972 		}
1973 		tsleep(&isp->isp_osinfo.kthread, 0, "isp_fc_worker", 0);
1974 		isp_prt(isp, ISP_LOGDEBUG0, "kthread: waiting until called");
1975 	}
1976 }
1977 
1978 static void
1979 isp_action(struct cam_sim *sim, union ccb *ccb)
1980 {
1981 	int bus, tgt, error;
1982 	struct ispsoftc *isp;
1983 	struct ccb_trans_settings *cts;
1984 
1985 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
1986 
1987 	isp = (struct ispsoftc *)cam_sim_softc(sim);
1988 	ccb->ccb_h.sim_priv.entries[0].field = 0;
1989 	ccb->ccb_h.sim_priv.entries[1].ptr = isp;
1990 	if (isp->isp_state != ISP_RUNSTATE &&
1991 	    ccb->ccb_h.func_code == XPT_SCSI_IO) {
1992 		CAMLOCK_2_ISPLOCK(isp);
1993 		isp_init(isp);
1994 		if (isp->isp_state != ISP_INITSTATE) {
1995 			ISP_UNLOCK(isp);
1996 			/*
1997 			 * Lie. Say it was a selection timeout.
1998 			 */
1999 			ccb->ccb_h.status = CAM_SEL_TIMEOUT | CAM_DEV_QFRZN;
2000 			xpt_freeze_devq(ccb->ccb_h.path, 1);
2001 			xpt_done(ccb);
2002 			return;
2003 		}
2004 		isp->isp_state = ISP_RUNSTATE;
2005 		ISPLOCK_2_CAMLOCK(isp);
2006 	}
2007 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2008 
2009 
2010 	switch (ccb->ccb_h.func_code) {
2011 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2012 		/*
2013 		 * Do a couple of preliminary checks...
2014 		 */
2015 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2016 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2017 				ccb->ccb_h.status = CAM_REQ_INVALID;
2018 				xpt_done(ccb);
2019 				break;
2020 			}
2021 		}
2022 #ifdef	DIAGNOSTIC
2023 		if (ccb->ccb_h.target_id > (ISP_MAX_TARGETS(isp) - 1)) {
2024 			ccb->ccb_h.status = CAM_PATH_INVALID;
2025 		} else if (ccb->ccb_h.target_lun > (ISP_MAX_LUNS(isp) - 1)) {
2026 			ccb->ccb_h.status = CAM_PATH_INVALID;
2027 		}
2028 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2029 			isp_prt(isp, ISP_LOGERR,
2030 			    "invalid tgt/lun (%d.%d) in XPT_SCSI_IO",
2031 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
2032 			xpt_done(ccb);
2033 			break;
2034 		}
2035 #endif
2036 		((struct ccb_scsiio *) ccb)->scsi_status = SCSI_STATUS_OK;
2037 		CAMLOCK_2_ISPLOCK(isp);
2038 		error = isp_start((XS_T *) ccb);
2039 		switch (error) {
2040 		case CMD_QUEUED:
2041 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2042 			if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2043 				u_int64_t ticks = (u_int64_t) hz;
2044 				if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT)
2045 					ticks = 60 * 1000 * ticks;
2046 				else
2047 					ticks = ccb->ccb_h.timeout * hz;
2048 				ticks = ((ticks + 999) / 1000) + hz + hz;
2049 				if (ticks >= 0x80000000) {
2050 					isp_prt(isp, ISP_LOGERR,
2051 					    "timeout overflow");
2052 					ticks = 0x7fffffff;
2053 				}
2054 				ccb->ccb_h.timeout_ch = timeout(isp_watchdog,
2055 				    (caddr_t)ccb, (int)ticks);
2056 			} else {
2057 				callout_handle_init(&ccb->ccb_h.timeout_ch);
2058 			}
2059 			ISPLOCK_2_CAMLOCK(isp);
2060 			break;
2061 		case CMD_RQLATER:
2062 			/*
2063 			 * This can only happen for Fibre Channel
2064 			 */
2065 			KASSERT((IS_FC(isp)), ("CMD_RQLATER for FC only"));
2066 			if (FCPARAM(isp)->loop_seen_once == 0 &&
2067 			    isp->isp_osinfo.ktmature) {
2068 				ISPLOCK_2_CAMLOCK(isp);
2069 				XS_SETERR(ccb, CAM_SEL_TIMEOUT);
2070 				xpt_done(ccb);
2071 				break;
2072 			}
2073 			wakeup(&isp->isp_osinfo.kthread);
2074 			isp_freeze_loopdown(isp, "isp_action(RQLATER)");
2075 			isp->isp_osinfo.simqfrozen |= SIMQFRZ_LOOPDOWN;
2076 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2077 			ISPLOCK_2_CAMLOCK(isp);
2078 			xpt_done(ccb);
2079 			break;
2080 		case CMD_EAGAIN:
2081 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2082 			ISPLOCK_2_CAMLOCK(isp);
2083 			xpt_done(ccb);
2084 			break;
2085 		case CMD_COMPLETE:
2086 			isp_done((struct ccb_scsiio *) ccb);
2087 			ISPLOCK_2_CAMLOCK(isp);
2088 			break;
2089 		default:
2090 			isp_prt(isp, ISP_LOGERR,
2091 			    "What's this? 0x%x at %d in file %s",
2092 			    error, __LINE__, __FILE__);
2093 			XS_SETERR(ccb, CAM_REQ_CMP_ERR);
2094 			xpt_done(ccb);
2095 			ISPLOCK_2_CAMLOCK(isp);
2096 		}
2097 		break;
2098 
2099 #ifdef	ISP_TARGET_MODE
2100 	case XPT_EN_LUN:		/* Enable LUN as a target */
2101 	{
2102 		int iok;
2103 		CAMLOCK_2_ISPLOCK(isp);
2104 		iok = isp->isp_osinfo.intsok;
2105 		isp->isp_osinfo.intsok = 0;
2106 		isp_en_lun(isp, ccb);
2107 		isp->isp_osinfo.intsok = iok;
2108 		ISPLOCK_2_CAMLOCK(isp);
2109 		xpt_done(ccb);
2110 		break;
2111 	}
2112 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
2113 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
2114 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2115 	{
2116 		tstate_t *tptr =
2117 		    get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2118 		if (tptr == NULL) {
2119 			ccb->ccb_h.status = CAM_LUN_INVALID;
2120 			xpt_done(ccb);
2121 			break;
2122 		}
2123 		ccb->ccb_h.sim_priv.entries[0].field = 0;
2124 		ccb->ccb_h.sim_priv.entries[1].ptr = isp;
2125 		ccb->ccb_h.flags = 0;
2126 
2127 		CAMLOCK_2_ISPLOCK(isp);
2128 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2129 			/*
2130 			 * Note that the command itself may not be done-
2131 			 * it may not even have had the first CTIO sent.
2132 			 */
2133 			tptr->atio_count++;
2134 			isp_prt(isp, ISP_LOGTDEBUG0,
2135 			    "Put FREE ATIO2, lun %d, count now %d",
2136 			    ccb->ccb_h.target_lun, tptr->atio_count);
2137 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h,
2138 			    sim_links.sle);
2139 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
2140 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h,
2141 			    sim_links.sle);
2142 		} else {
2143 			;
2144 		}
2145 		rls_lun_statep(isp, tptr);
2146 		ccb->ccb_h.status = CAM_REQ_INPROG;
2147 		ISPLOCK_2_CAMLOCK(isp);
2148 		break;
2149 	}
2150 	case XPT_CONT_TARGET_IO:
2151 	{
2152 		CAMLOCK_2_ISPLOCK(isp);
2153 		ccb->ccb_h.status = isp_target_start_ctio(isp, ccb);
2154 		if (ccb->ccb_h.status != CAM_REQ_INPROG) {
2155 			isp_prt(isp, ISP_LOGWARN,
2156 			    "XPT_CONT_TARGET_IO: status 0x%x",
2157 			    ccb->ccb_h.status);
2158 			XS_SETERR(ccb, CAM_REQUEUE_REQ);
2159 			ISPLOCK_2_CAMLOCK(isp);
2160 			xpt_done(ccb);
2161 		} else {
2162 			ISPLOCK_2_CAMLOCK(isp);
2163 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2164 		}
2165 		break;
2166 	}
2167 #endif
2168 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2169 
2170 		bus = cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
2171 		tgt = ccb->ccb_h.target_id;
2172 		tgt |= (bus << 16);
2173 
2174 		CAMLOCK_2_ISPLOCK(isp);
2175 		error = isp_control(isp, ISPCTL_RESET_DEV, &tgt);
2176 		ISPLOCK_2_CAMLOCK(isp);
2177 		if (error) {
2178 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2179 		} else {
2180 			ccb->ccb_h.status = CAM_REQ_CMP;
2181 		}
2182 		xpt_done(ccb);
2183 		break;
2184 	case XPT_ABORT:			/* Abort the specified CCB */
2185 	{
2186 		union ccb *accb = ccb->cab.abort_ccb;
2187 		CAMLOCK_2_ISPLOCK(isp);
2188 		switch (accb->ccb_h.func_code) {
2189 #ifdef	ISP_TARGET_MODE
2190 		case XPT_ACCEPT_TARGET_IO:
2191 		case XPT_IMMED_NOTIFY:
2192         		ccb->ccb_h.status = isp_abort_tgt_ccb(isp, ccb);
2193 			break;
2194 		case XPT_CONT_TARGET_IO:
2195 			isp_prt(isp, ISP_LOGERR, "cannot abort CTIOs yet");
2196 			ccb->ccb_h.status = CAM_UA_ABORT;
2197 			break;
2198 #endif
2199 		case XPT_SCSI_IO:
2200 			error = isp_control(isp, ISPCTL_ABORT_CMD, ccb);
2201 			if (error) {
2202 				ccb->ccb_h.status = CAM_UA_ABORT;
2203 			} else {
2204 				ccb->ccb_h.status = CAM_REQ_CMP;
2205 			}
2206 			break;
2207 		default:
2208 			ccb->ccb_h.status = CAM_REQ_INVALID;
2209 			break;
2210 		}
2211 		ISPLOCK_2_CAMLOCK(isp);
2212 		xpt_done(ccb);
2213 		break;
2214 	}
2215 #define	IS_CURRENT_SETTINGS(c)	(c->flags & CCB_TRANS_CURRENT_SETTINGS)
2216 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2217 		cts = &ccb->cts;
2218 		if (!IS_CURRENT_SETTINGS(cts)) {
2219 			ccb->ccb_h.status = CAM_REQ_INVALID;
2220 			xpt_done(ccb);
2221 			break;
2222 		}
2223 		tgt = cts->ccb_h.target_id;
2224 		CAMLOCK_2_ISPLOCK(isp);
2225 		if (IS_SCSI(isp)) {
2226 			sdparam *sdp = isp->isp_param;
2227 			u_int16_t *dptr;
2228 
2229 			bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2230 
2231 			sdp += bus;
2232 			/*
2233 			 * We always update (internally) from goal_flags
2234 			 * so any request to change settings just gets
2235 			 * vectored to that location.
2236 			 */
2237 			dptr = &sdp->isp_devparam[tgt].goal_flags;
2238 
2239 			/*
2240 			 * Note that these operations affect the
2241 			 * the goal flags (goal_flags)- not
2242 			 * the current state flags. Then we mark
2243 			 * things so that the next operation to
2244 			 * this HBA will cause the update to occur.
2245 			 */
2246 			if (cts->valid & CCB_TRANS_DISC_VALID) {
2247 				if ((cts->flags & CCB_TRANS_DISC_ENB) != 0) {
2248 					*dptr |= DPARM_DISC;
2249 				} else {
2250 					*dptr &= ~DPARM_DISC;
2251 				}
2252 			}
2253 			if (cts->valid & CCB_TRANS_TQ_VALID) {
2254 				if ((cts->flags & CCB_TRANS_TAG_ENB) != 0) {
2255 					*dptr |= DPARM_TQING;
2256 				} else {
2257 					*dptr &= ~DPARM_TQING;
2258 				}
2259 			}
2260 			if (cts->valid & CCB_TRANS_BUS_WIDTH_VALID) {
2261 				switch (cts->bus_width) {
2262 				case MSG_EXT_WDTR_BUS_16_BIT:
2263 					*dptr |= DPARM_WIDE;
2264 					break;
2265 				default:
2266 					*dptr &= ~DPARM_WIDE;
2267 				}
2268 			}
2269 			/*
2270 			 * Any SYNC RATE of nonzero and SYNC_OFFSET
2271 			 * of nonzero will cause us to go to the
2272 			 * selected (from NVRAM) maximum value for
2273 			 * this device. At a later point, we'll
2274 			 * allow finer control.
2275 			 */
2276 			if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
2277 			    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID) &&
2278 			    (cts->sync_offset > 0)) {
2279 				*dptr |= DPARM_SYNC;
2280 			} else {
2281 				*dptr &= ~DPARM_SYNC;
2282 			}
2283 			*dptr |= DPARM_SAFE_DFLT;
2284 			isp_prt(isp, ISP_LOGDEBUG0,
2285 			    "SET bus %d targ %d to flags %x off %x per %x",
2286 			    bus, tgt, sdp->isp_devparam[tgt].goal_flags,
2287 			    sdp->isp_devparam[tgt].goal_offset,
2288 			    sdp->isp_devparam[tgt].goal_period);
2289 			sdp->isp_devparam[tgt].dev_update = 1;
2290 			isp->isp_update |= (1 << bus);
2291 		}
2292 		ISPLOCK_2_CAMLOCK(isp);
2293 		ccb->ccb_h.status = CAM_REQ_CMP;
2294 		xpt_done(ccb);
2295 		break;
2296 	case XPT_GET_TRAN_SETTINGS:
2297 		cts = &ccb->cts;
2298 		tgt = cts->ccb_h.target_id;
2299 		CAMLOCK_2_ISPLOCK(isp);
2300 		if (IS_FC(isp)) {
2301 			/*
2302 			 * a lot of normal SCSI things don't make sense.
2303 			 */
2304 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
2305 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2306 			/*
2307 			 * How do you measure the width of a high
2308 			 * speed serial bus? Well, in bytes.
2309 			 *
2310 			 * Offset and period make no sense, though, so we set
2311 			 * (above) a 'base' transfer speed to be gigabit.
2312 			 */
2313 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2314 		} else {
2315 			sdparam *sdp = isp->isp_param;
2316 			int bus = cam_sim_bus(xpt_path_sim(cts->ccb_h.path));
2317 			u_int16_t dval, pval, oval;
2318 
2319 			sdp += bus;
2320 
2321 			if (IS_CURRENT_SETTINGS(cts)) {
2322 				sdp->isp_devparam[tgt].dev_refresh = 1;
2323 				isp->isp_update |= (1 << bus);
2324 				(void) isp_control(isp, ISPCTL_UPDATE_PARAMS,
2325 				    NULL);
2326 				dval = sdp->isp_devparam[tgt].actv_flags;
2327 				oval = sdp->isp_devparam[tgt].actv_offset;
2328 				pval = sdp->isp_devparam[tgt].actv_period;
2329 			} else {
2330 				dval = sdp->isp_devparam[tgt].nvrm_flags;
2331 				oval = sdp->isp_devparam[tgt].nvrm_offset;
2332 				pval = sdp->isp_devparam[tgt].nvrm_period;
2333 			}
2334 
2335 			cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
2336 
2337 			if (dval & DPARM_DISC) {
2338 				cts->flags |= CCB_TRANS_DISC_ENB;
2339 			}
2340 			if (dval & DPARM_TQING) {
2341 				cts->flags |= CCB_TRANS_TAG_ENB;
2342 			}
2343 			if (dval & DPARM_WIDE) {
2344 				cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2345 			} else {
2346 				cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
2347 			}
2348 			cts->valid = CCB_TRANS_BUS_WIDTH_VALID |
2349 			    CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2350 
2351 			if ((dval & DPARM_SYNC) && oval != 0) {
2352 				cts->sync_period = pval;
2353 				cts->sync_offset = oval;
2354 				cts->valid |=
2355 				    CCB_TRANS_SYNC_RATE_VALID |
2356 				    CCB_TRANS_SYNC_OFFSET_VALID;
2357 			}
2358 			isp_prt(isp, ISP_LOGDEBUG0,
2359 			    "GET %s bus %d targ %d to flags %x off %x per %x",
2360 			    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM",
2361 			    bus, tgt, dval, oval, pval);
2362 		}
2363 		ISPLOCK_2_CAMLOCK(isp);
2364 		ccb->ccb_h.status = CAM_REQ_CMP;
2365 		xpt_done(ccb);
2366 		break;
2367 
2368 	case XPT_CALC_GEOMETRY:
2369 	{
2370 		struct ccb_calc_geometry *ccg;
2371 		u_int32_t secs_per_cylinder;
2372 		u_int32_t size_mb;
2373 
2374 		ccg = &ccb->ccg;
2375 		if (ccg->block_size == 0) {
2376 			isp_prt(isp, ISP_LOGERR,
2377 			    "%d.%d XPT_CALC_GEOMETRY block size 0?",
2378 			    ccg->ccb_h.target_id, ccg->ccb_h.target_lun);
2379 			ccb->ccb_h.status = CAM_REQ_INVALID;
2380 			xpt_done(ccb);
2381 			break;
2382 		}
2383 		size_mb = ccg->volume_size /((1024L * 1024L) / ccg->block_size);
2384 		if (size_mb > 1024) {
2385 			ccg->heads = 255;
2386 			ccg->secs_per_track = 63;
2387 		} else {
2388 			ccg->heads = 64;
2389 			ccg->secs_per_track = 32;
2390 		}
2391 		secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2392 		ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2393 		ccb->ccb_h.status = CAM_REQ_CMP;
2394 		xpt_done(ccb);
2395 		break;
2396 	}
2397 	case XPT_RESET_BUS:		/* Reset the specified bus */
2398 		bus = cam_sim_bus(sim);
2399 		CAMLOCK_2_ISPLOCK(isp);
2400 		error = isp_control(isp, ISPCTL_RESET_BUS, &bus);
2401 		ISPLOCK_2_CAMLOCK(isp);
2402 		if (error)
2403 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2404 		else {
2405 			if (cam_sim_bus(sim) && isp->isp_path2 != NULL)
2406 				xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2407 			else if (isp->isp_path != NULL)
2408 				xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2409 			ccb->ccb_h.status = CAM_REQ_CMP;
2410 		}
2411 		xpt_done(ccb);
2412 		break;
2413 
2414 	case XPT_TERM_IO:		/* Terminate the I/O process */
2415 		ccb->ccb_h.status = CAM_REQ_INVALID;
2416 		xpt_done(ccb);
2417 		break;
2418 
2419 	case XPT_PATH_INQ:		/* Path routing inquiry */
2420 	{
2421 		struct ccb_pathinq *cpi = &ccb->cpi;
2422 
2423 		cpi->version_num = 1;
2424 #ifdef	ISP_TARGET_MODE
2425 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2426 #else
2427 		cpi->target_sprt = 0;
2428 #endif
2429 		cpi->hba_eng_cnt = 0;
2430 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2431 		cpi->max_lun = ISP_MAX_LUNS(isp) - 1;
2432 		cpi->bus_id = cam_sim_bus(sim);
2433 		if (IS_FC(isp)) {
2434 			cpi->hba_misc = PIM_NOBUSRESET;
2435 			/*
2436 			 * Because our loop ID can shift from time to time,
2437 			 * make our initiator ID out of range of our bus.
2438 			 */
2439 			cpi->initiator_id = cpi->max_target + 1;
2440 
2441 			/*
2442 			 * Set base transfer capabilities for Fibre Channel.
2443 			 * Technically not correct because we don't know
2444 			 * what media we're running on top of- but we'll
2445 			 * look good if we always say 100MB/s.
2446 			 */
2447 			if (FCPARAM(isp)->isp_gbspeed == 2)
2448 				cpi->base_transfer_speed = 200000;
2449 			else
2450 				cpi->base_transfer_speed = 100000;
2451 			cpi->hba_inquiry = PI_TAG_ABLE;
2452 		} else {
2453 			sdparam *sdp = isp->isp_param;
2454 			sdp += cam_sim_bus(xpt_path_sim(cpi->ccb_h.path));
2455 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2456 			cpi->hba_misc = 0;
2457 			cpi->initiator_id = sdp->isp_initiator_id;
2458 			cpi->base_transfer_speed = 3300;
2459 		}
2460 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2461 		strncpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2462 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2463 		cpi->unit_number = cam_sim_unit(sim);
2464 		cpi->ccb_h.status = CAM_REQ_CMP;
2465 		xpt_done(ccb);
2466 		break;
2467 	}
2468 	default:
2469 		ccb->ccb_h.status = CAM_REQ_INVALID;
2470 		xpt_done(ccb);
2471 		break;
2472 	}
2473 }
2474 
2475 #define	ISPDDB	(CAM_DEBUG_INFO|CAM_DEBUG_TRACE|CAM_DEBUG_CDB)
2476 void
2477 isp_done(struct ccb_scsiio *sccb)
2478 {
2479 	struct ispsoftc *isp = XS_ISP(sccb);
2480 
2481 	if (XS_NOERR(sccb))
2482 		XS_SETERR(sccb, CAM_REQ_CMP);
2483 
2484 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
2485 	    (sccb->scsi_status != SCSI_STATUS_OK)) {
2486 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2487 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) &&
2488 		    (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2489 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2490 		} else {
2491 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2492 		}
2493 	}
2494 
2495 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2496 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2497 		if ((sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2498 			sccb->ccb_h.status |= CAM_DEV_QFRZN;
2499 			xpt_freeze_devq(sccb->ccb_h.path, 1);
2500 			isp_prt(isp, ISP_LOGDEBUG0,
2501 			    "freeze devq %d.%d cam sts %x scsi sts %x",
2502 			    sccb->ccb_h.target_id, sccb->ccb_h.target_lun,
2503 			    sccb->ccb_h.status, sccb->scsi_status);
2504 		}
2505 	}
2506 
2507 	if ((CAM_DEBUGGED(sccb->ccb_h.path, ISPDDB)) &&
2508 	    (sccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
2509 		xpt_print_path(sccb->ccb_h.path);
2510 		isp_prt(isp, ISP_LOGINFO,
2511 		    "cam completion status 0x%x", sccb->ccb_h.status);
2512 	}
2513 
2514 	XS_CMD_S_DONE(sccb);
2515 	if (XS_CMD_WDOG_P(sccb) == 0) {
2516 		untimeout(isp_watchdog, (caddr_t)sccb, sccb->ccb_h.timeout_ch);
2517 		if (XS_CMD_GRACE_P(sccb)) {
2518 			isp_prt(isp, ISP_LOGDEBUG2,
2519 			    "finished command on borrowed time");
2520 		}
2521 		XS_CMD_S_CLEAR(sccb);
2522 		ISPLOCK_2_CAMLOCK(isp);
2523 		xpt_done((union ccb *) sccb);
2524 		CAMLOCK_2_ISPLOCK(isp);
2525 	}
2526 }
2527 
2528 int
2529 isp_async(struct ispsoftc *isp, ispasync_t cmd, void *arg)
2530 {
2531 	int bus, rv = 0;
2532 	switch (cmd) {
2533 	case ISPASYNC_NEW_TGT_PARAMS:
2534 	{
2535 		int flags, tgt;
2536 		sdparam *sdp = isp->isp_param;
2537 		struct ccb_trans_settings cts;
2538 		struct cam_path *tmppath;
2539 
2540 		bzero(&cts, sizeof (struct ccb_trans_settings));
2541 
2542 		tgt = *((int *)arg);
2543 		bus = (tgt >> 16) & 0xffff;
2544 		tgt &= 0xffff;
2545 		sdp += bus;
2546 		ISPLOCK_2_CAMLOCK(isp);
2547 		if (xpt_create_path(&tmppath, NULL,
2548 		    cam_sim_path(bus? isp->isp_sim2 : isp->isp_sim),
2549 		    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2550 			CAMLOCK_2_ISPLOCK(isp);
2551 			isp_prt(isp, ISP_LOGWARN,
2552 			    "isp_async cannot make temp path for %d.%d",
2553 			    tgt, bus);
2554 			rv = -1;
2555 			break;
2556 		}
2557 		CAMLOCK_2_ISPLOCK(isp);
2558 		flags = sdp->isp_devparam[tgt].actv_flags;
2559 		cts.flags = CCB_TRANS_CURRENT_SETTINGS;
2560 		cts.valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
2561 		if (flags & DPARM_DISC) {
2562 			cts.flags |= CCB_TRANS_DISC_ENB;
2563 		}
2564 		if (flags & DPARM_TQING) {
2565 			cts.flags |= CCB_TRANS_TAG_ENB;
2566 		}
2567 		cts.valid |= CCB_TRANS_BUS_WIDTH_VALID;
2568 		cts.bus_width = (flags & DPARM_WIDE)?
2569 		    MSG_EXT_WDTR_BUS_8_BIT : MSG_EXT_WDTR_BUS_16_BIT;
2570 		cts.sync_period = sdp->isp_devparam[tgt].actv_period;
2571 		cts.sync_offset = sdp->isp_devparam[tgt].actv_offset;
2572 		if (flags & DPARM_SYNC) {
2573 			cts.valid |=
2574 			    CCB_TRANS_SYNC_RATE_VALID |
2575 			    CCB_TRANS_SYNC_OFFSET_VALID;
2576 		}
2577 		isp_prt(isp, ISP_LOGDEBUG2,
2578 		    "NEW_TGT_PARAMS bus %d tgt %d period %x offset %x flags %x",
2579 		    bus, tgt, sdp->isp_devparam[tgt].actv_period,
2580 		    sdp->isp_devparam[tgt].actv_offset, flags);
2581 		xpt_setup_ccb(&cts.ccb_h, tmppath, 1);
2582 		ISPLOCK_2_CAMLOCK(isp);
2583 		xpt_async(AC_TRANSFER_NEG, tmppath, &cts);
2584 		xpt_free_path(tmppath);
2585 		CAMLOCK_2_ISPLOCK(isp);
2586 		break;
2587 	}
2588 	case ISPASYNC_BUS_RESET:
2589 		bus = *((int *)arg);
2590 		isp_prt(isp, ISP_LOGINFO, "SCSI bus reset on bus %d detected",
2591 		    bus);
2592 		if (bus > 0 && isp->isp_path2) {
2593 			ISPLOCK_2_CAMLOCK(isp);
2594 			xpt_async(AC_BUS_RESET, isp->isp_path2, NULL);
2595 			CAMLOCK_2_ISPLOCK(isp);
2596 		} else if (isp->isp_path) {
2597 			ISPLOCK_2_CAMLOCK(isp);
2598 			xpt_async(AC_BUS_RESET, isp->isp_path, NULL);
2599 			CAMLOCK_2_ISPLOCK(isp);
2600 		}
2601 		break;
2602 	case ISPASYNC_LIP:
2603 		if (isp->isp_path) {
2604 			isp_freeze_loopdown(isp, "ISPASYNC_LIP");
2605 		}
2606 		isp_prt(isp, ISP_LOGINFO, "LIP Received");
2607 		break;
2608 	case ISPASYNC_LOOP_RESET:
2609 		if (isp->isp_path) {
2610 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_RESET");
2611 		}
2612 		isp_prt(isp, ISP_LOGINFO, "Loop Reset Received");
2613 		break;
2614 	case ISPASYNC_LOOP_DOWN:
2615 		if (isp->isp_path) {
2616 			isp_freeze_loopdown(isp, "ISPASYNC_LOOP_DOWN");
2617 		}
2618 		isp_prt(isp, ISP_LOGINFO, "Loop DOWN");
2619 		break;
2620 	case ISPASYNC_LOOP_UP:
2621 		/*
2622 		 * Now we just note that Loop has come up. We don't
2623 		 * actually do anything because we're waiting for a
2624 		 * Change Notify before activating the FC cleanup
2625 		 * thread to look at the state of the loop again.
2626 		 */
2627 		isp_prt(isp, ISP_LOGINFO, "Loop UP");
2628 		break;
2629 	case ISPASYNC_PROMENADE:
2630 	{
2631 		const char *fmt = "Target %d (Loop 0x%x) Port ID 0x%x "
2632 		    "(role %s) %s\n Port WWN 0x%08x%08x\n Node WWN 0x%08x%08x";
2633 		static const char *roles[4] = {
2634 		    "(none)", "Target", "Initiator", "Target/Initiator"
2635 		};
2636 		fcparam *fcp = isp->isp_param;
2637 		int tgt = *((int *) arg);
2638 		struct lportdb *lp = &fcp->portdb[tgt];
2639 
2640 		isp_prt(isp, ISP_LOGINFO, fmt, tgt, lp->loopid, lp->portid,
2641 		    roles[lp->roles & 0x3],
2642 		    (lp->valid)? "Arrived" : "Departed",
2643 		    (u_int32_t) (lp->port_wwn >> 32),
2644 		    (u_int32_t) (lp->port_wwn & 0xffffffffLL),
2645 		    (u_int32_t) (lp->node_wwn >> 32),
2646 		    (u_int32_t) (lp->node_wwn & 0xffffffffLL));
2647 
2648 		break;
2649 	}
2650 	case ISPASYNC_CHANGE_NOTIFY:
2651 		if (arg == ISPASYNC_CHANGE_PDB) {
2652 			isp_prt(isp, ISP_LOGINFO,
2653 			    "Port Database Changed");
2654 		} else if (arg == ISPASYNC_CHANGE_SNS) {
2655 			isp_prt(isp, ISP_LOGINFO,
2656 			    "Name Server Database Changed");
2657 		}
2658 		wakeup(&isp->isp_osinfo.kthread);
2659 		break;
2660 	case ISPASYNC_FABRIC_DEV:
2661 	{
2662 		int target, base, lim;
2663 		fcparam *fcp = isp->isp_param;
2664 		struct lportdb *lp = NULL;
2665 		struct lportdb *clp = (struct lportdb *) arg;
2666 		char *pt;
2667 
2668 		switch (clp->port_type) {
2669 		case 1:
2670 			pt = "   N_Port";
2671 			break;
2672 		case 2:
2673 			pt = "  NL_Port";
2674 			break;
2675 		case 3:
2676 			pt = "F/NL_Port";
2677 			break;
2678 		case 0x7f:
2679 			pt = "  Nx_Port";
2680 			break;
2681 		case 0x81:
2682 			pt = "  F_port";
2683 			break;
2684 		case 0x82:
2685 			pt = "  FL_Port";
2686 			break;
2687 		case 0x84:
2688 			pt = "   E_port";
2689 			break;
2690 		default:
2691 			pt = " ";
2692 			break;
2693 		}
2694 
2695 		isp_prt(isp, ISP_LOGINFO,
2696 		    "%s Fabric Device @ PortID 0x%x", pt, clp->portid);
2697 
2698 		/*
2699 		 * If we don't have an initiator role we bail.
2700 		 *
2701 		 * We just use ISPASYNC_FABRIC_DEV for announcement purposes.
2702 		 */
2703 
2704 		if ((isp->isp_role & ISP_ROLE_INITIATOR) == 0) {
2705 			break;
2706 		}
2707 
2708 		/*
2709 		 * Is this entry for us? If so, we bail.
2710 		 */
2711 
2712 		if (fcp->isp_portid == clp->portid) {
2713 			break;
2714 		}
2715 
2716 		/*
2717 		 * Else, the default policy is to find room for it in
2718 		 * our local port database. Later, when we execute
2719 		 * the call to isp_pdb_sync either this newly arrived
2720 		 * or already logged in device will be (re)announced.
2721 		 */
2722 
2723 		if (fcp->isp_topo == TOPO_FL_PORT)
2724 			base = FC_SNS_ID+1;
2725 		else
2726 			base = 0;
2727 
2728 		if (fcp->isp_topo == TOPO_N_PORT)
2729 			lim = 1;
2730 		else
2731 			lim = MAX_FC_TARG;
2732 
2733 		/*
2734 		 * Is it already in our list?
2735 		 */
2736 		for (target = base; target < lim; target++) {
2737 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2738 				continue;
2739 			}
2740 			lp = &fcp->portdb[target];
2741 			if (lp->port_wwn == clp->port_wwn &&
2742 			    lp->node_wwn == clp->node_wwn) {
2743 				lp->fabric_dev = 1;
2744 				break;
2745 			}
2746 		}
2747 		if (target < lim) {
2748 			break;
2749 		}
2750 		for (target = base; target < lim; target++) {
2751 			if (target >= FL_PORT_ID && target <= FC_SNS_ID) {
2752 				continue;
2753 			}
2754 			lp = &fcp->portdb[target];
2755 			if (lp->port_wwn == 0) {
2756 				break;
2757 			}
2758 		}
2759 		if (target == lim) {
2760 			isp_prt(isp, ISP_LOGWARN,
2761 			    "out of space for fabric devices");
2762 			break;
2763 		}
2764 		lp->port_type = clp->port_type;
2765 		lp->fc4_type = clp->fc4_type;
2766 		lp->node_wwn = clp->node_wwn;
2767 		lp->port_wwn = clp->port_wwn;
2768 		lp->portid = clp->portid;
2769 		lp->fabric_dev = 1;
2770 		break;
2771 	}
2772 #ifdef	ISP_TARGET_MODE
2773 	case ISPASYNC_TARGET_MESSAGE:
2774 	{
2775 		tmd_msg_t *mp = arg;
2776 		isp_prt(isp, ISP_LOGALL,
2777 		    "bus %d iid %d tgt %d lun %d ttype %x tval %x msg[0]=%x",
2778 		    mp->nt_bus, (int) mp->nt_iid, (int) mp->nt_tgt,
2779 		    (int) mp->nt_lun, mp->nt_tagtype, mp->nt_tagval,
2780 		    mp->nt_msg[0]);
2781 		break;
2782 	}
2783 	case ISPASYNC_TARGET_EVENT:
2784 	{
2785 		tmd_event_t *ep = arg;
2786 		isp_prt(isp, ISP_LOGALL,
2787 		    "bus %d event code 0x%x", ep->ev_bus, ep->ev_event);
2788 		break;
2789 	}
2790 	case ISPASYNC_TARGET_ACTION:
2791 		switch (((isphdr_t *)arg)->rqs_entry_type) {
2792 		default:
2793 			isp_prt(isp, ISP_LOGWARN,
2794 			   "event 0x%x for unhandled target action",
2795 			    ((isphdr_t *)arg)->rqs_entry_type);
2796 			break;
2797 		case RQSTYPE_NOTIFY:
2798 			if (IS_SCSI(isp)) {
2799 				rv = isp_handle_platform_notify_scsi(isp,
2800 				    (in_entry_t *) arg);
2801 			} else {
2802 				rv = isp_handle_platform_notify_fc(isp,
2803 				    (in_fcentry_t *) arg);
2804 			}
2805 			break;
2806 		case RQSTYPE_ATIO:
2807 			rv = isp_handle_platform_atio(isp, (at_entry_t *) arg);
2808 			break;
2809 		case RQSTYPE_ATIO2:
2810 			rv = isp_handle_platform_atio2(isp, (at2_entry_t *)arg);
2811 			break;
2812 		case RQSTYPE_CTIO2:
2813 		case RQSTYPE_CTIO:
2814 			rv = isp_handle_platform_ctio(isp, arg);
2815 			break;
2816 		case RQSTYPE_ENABLE_LUN:
2817 		case RQSTYPE_MODIFY_LUN:
2818 			if (IS_DUALBUS(isp)) {
2819 				bus =
2820 				    GET_BUS_VAL(((lun_entry_t *)arg)->le_rsvd);
2821 			} else {
2822 				bus = 0;
2823 			}
2824 			isp_cv_signal_rqe(isp, bus,
2825 			    ((lun_entry_t *)arg)->le_status);
2826 			break;
2827 		}
2828 		break;
2829 #endif
2830 	case ISPASYNC_FW_CRASH:
2831 	{
2832 		u_int16_t mbox1, mbox6;
2833 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
2834 		if (IS_DUALBUS(isp)) {
2835 			mbox6 = ISP_READ(isp, OUTMAILBOX6);
2836 		} else {
2837 			mbox6 = 0;
2838 		}
2839                 isp_prt(isp, ISP_LOGERR,
2840                     "Internal Firmware Error on bus %d @ RISC Address 0x%x",
2841                     mbox6, mbox1);
2842 #ifdef	ISP_FW_CRASH_DUMP
2843 		/*
2844 		 * XXX: really need a thread to do this right.
2845 		 */
2846 		if (IS_FC(isp)) {
2847 			FCPARAM(isp)->isp_fwstate = FW_CONFIG_WAIT;
2848 			FCPARAM(isp)->isp_loopstate = LOOP_NIL;
2849 			isp_freeze_loopdown(isp, "f/w crash");
2850 			isp_fw_dump(isp);
2851 		}
2852 		isp_reinit(isp);
2853 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
2854 #endif
2855 		break;
2856 	}
2857 	case ISPASYNC_UNHANDLED_RESPONSE:
2858 		break;
2859 	default:
2860 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
2861 		break;
2862 	}
2863 	return (rv);
2864 }
2865 
2866 
2867 /*
2868  * Locks are held before coming here.
2869  */
2870 void
2871 isp_uninit(struct ispsoftc *isp)
2872 {
2873 	ISP_WRITE(isp, HCCR, HCCR_CMD_RESET);
2874 	DISABLE_INTS(isp);
2875 }
2876 
2877 void
2878 isp_prt(struct ispsoftc *isp, int level, const char *fmt, ...)
2879 {
2880 	__va_list ap;
2881 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
2882 		return;
2883 	}
2884 	printf("%s: ", device_get_nameunit(isp->isp_dev));
2885 	__va_start(ap, fmt);
2886 	vprintf(fmt, ap);
2887 	__va_end(ap);
2888 	printf("\n");
2889 }
2890