xref: /freebsd/sys/dev/isp/isp_freebsd.c (revision 61e21613)
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause
3  *
4  * Copyright (c) 2009-2020 Alexander Motin <mav@FreeBSD.org>
5  * Copyright (c) 1997-2009 by Matthew Jacob
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice immediately at the beginning of the file, without modification,
13  *    this list of conditions, and the following disclaimer.
14  * 2. The name of the author may not be used to endorse or promote products
15  *    derived from this software without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * Platform (FreeBSD) dependent common attachment code for Qlogic adapters.
32  */
33 #include <sys/cdefs.h>
34 #include <dev/isp/isp_freebsd.h>
35 #include <sys/unistd.h>
36 #include <sys/kthread.h>
37 #include <sys/conf.h>
38 #include <sys/module.h>
39 #include <sys/ioccom.h>
40 #include <dev/isp/isp_ioctl.h>
41 #include <sys/devicestat.h>
42 #include <cam/cam_periph.h>
43 #include <cam/cam_xpt_periph.h>
44 
45 MODULE_VERSION(isp, 1);
46 MODULE_DEPEND(isp, cam, 1, 1, 1);
47 int isp_announced = 0;
48 int isp_loop_down_limit = 60;	/* default loop down limit */
49 int isp_quickboot_time = 7;	/* don't wait more than N secs for loop up */
50 int isp_gone_device_time = 30;	/* grace time before reporting device lost */
51 static const char prom3[] = "Chan %d [%u] PortID 0x%06x Departed because of %s";
52 
53 static void isp_freeze_loopdown(ispsoftc_t *, int);
54 static void isp_loop_changed(ispsoftc_t *isp, int chan);
55 static void isp_rq_check_above(ispsoftc_t *);
56 static void isp_rq_check_below(ispsoftc_t *);
57 static d_ioctl_t ispioctl;
58 static void isp_poll(struct cam_sim *);
59 static callout_func_t isp_watchdog;
60 static callout_func_t isp_gdt;
61 static task_fn_t isp_gdt_task;
62 static void isp_kthread(void *);
63 static void isp_action(struct cam_sim *, union ccb *);
64 static int isp_timer_count;
65 static void isp_timer(void *);
66 
67 static struct cdevsw isp_cdevsw = {
68 	.d_version =	D_VERSION,
69 	.d_ioctl =	ispioctl,
70 	.d_name =	"isp",
71 };
72 
73 static int
74 isp_role_sysctl(SYSCTL_HANDLER_ARGS)
75 {
76 	ispsoftc_t *isp = (ispsoftc_t *)arg1;
77 	int chan = arg2;
78 	int error, old, value;
79 
80 	value = FCPARAM(isp, chan)->role;
81 
82 	error = sysctl_handle_int(oidp, &value, 0, req);
83 	if ((error != 0) || (req->newptr == NULL))
84 		return (error);
85 
86 	if (value < ISP_ROLE_NONE || value > ISP_ROLE_BOTH)
87 		return (EINVAL);
88 
89 	ISP_LOCK(isp);
90 	old = FCPARAM(isp, chan)->role;
91 
92 	/* We don't allow target mode switch from here. */
93 	value = (old & ISP_ROLE_TARGET) | (value & ISP_ROLE_INITIATOR);
94 
95 	/* If nothing has changed -- we are done. */
96 	if (value == old) {
97 		ISP_UNLOCK(isp);
98 		return (0);
99 	}
100 
101 	/* Actually change the role. */
102 	error = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, value);
103 	ISP_UNLOCK(isp);
104 	return (error);
105 }
106 
107 static int
108 isp_attach_chan(ispsoftc_t *isp, struct cam_devq *devq, int chan)
109 {
110 	fcparam *fcp = FCPARAM(isp, chan);
111 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
112 	struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(isp->isp_osinfo.dev);
113 	struct sysctl_oid *tree = device_get_sysctl_tree(isp->isp_osinfo.dev);
114 	char name[16];
115 	struct cam_sim *sim;
116 	struct cam_path *path;
117 #ifdef	ISP_TARGET_MODE
118 	int i;
119 #endif
120 
121 	sim = cam_sim_alloc(isp_action, isp_poll, "isp", isp,
122 	    device_get_unit(isp->isp_dev), &isp->isp_lock,
123 	    isp->isp_maxcmds, isp->isp_maxcmds, devq);
124 	if (sim == NULL)
125 		return (ENOMEM);
126 
127 	if (xpt_bus_register(sim, isp->isp_dev, chan) != CAM_SUCCESS) {
128 		cam_sim_free(sim, FALSE);
129 		return (EIO);
130 	}
131 	if (xpt_create_path(&path, NULL, cam_sim_path(sim), CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
132 		xpt_bus_deregister(cam_sim_path(sim));
133 		cam_sim_free(sim, FALSE);
134 		return (ENXIO);
135 	}
136 
137 	ISP_LOCK(isp);
138 	fc->sim = sim;
139 	fc->path = path;
140 	fc->isp = isp;
141 	fc->ready = 1;
142 	fcp->isp_use_gft_id = 1;
143 	fcp->isp_use_gff_id = 1;
144 
145 	callout_init_mtx(&fc->gdt, &isp->isp_lock, 0);
146 	TASK_INIT(&fc->gtask, 1, isp_gdt_task, fc);
147 #ifdef	ISP_TARGET_MODE
148 	TAILQ_INIT(&fc->waitq);
149 	STAILQ_INIT(&fc->ntfree);
150 	for (i = 0; i < ATPDPSIZE; i++)
151 		STAILQ_INSERT_TAIL(&fc->ntfree, &fc->ntpool[i], next);
152 	LIST_INIT(&fc->atfree);
153 	for (i = ATPDPSIZE-1; i >= 0; i--)
154 		LIST_INSERT_HEAD(&fc->atfree, &fc->atpool[i], next);
155 	for (i = 0; i < ATPDPHASHSIZE; i++)
156 		LIST_INIT(&fc->atused[i]);
157 #endif
158 	isp_loop_changed(isp, chan);
159 	ISP_UNLOCK(isp);
160 	if (kproc_create(isp_kthread, fc, &fc->kproc, 0, 0,
161 	    "%s_%d", device_get_nameunit(isp->isp_osinfo.dev), chan)) {
162 		xpt_free_path(fc->path);
163 		xpt_bus_deregister(cam_sim_path(fc->sim));
164 		cam_sim_free(fc->sim, FALSE);
165 		return (ENOMEM);
166 	}
167 	fc->num_threads += 1;
168 	if (chan > 0) {
169 		snprintf(name, sizeof(name), "chan%d", chan);
170 		tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(tree),
171 		    OID_AUTO, name, CTLFLAG_RW | CTLFLAG_MPSAFE, 0,
172 		    "Virtual channel");
173 	}
174 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
175 	    "wwnn", CTLFLAG_RD, &fcp->isp_wwnn,
176 	    "World Wide Node Name");
177 	SYSCTL_ADD_QUAD(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
178 	    "wwpn", CTLFLAG_RD, &fcp->isp_wwpn,
179 	    "World Wide Port Name");
180 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
181 	    "loop_down_limit", CTLFLAG_RW, &fc->loop_down_limit, 0,
182 	    "Loop Down Limit");
183 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
184 	    "gone_device_time", CTLFLAG_RW, &fc->gone_device_time, 0,
185 	    "Gone Device Time");
186 #if defined(ISP_TARGET_MODE) && defined(DEBUG)
187 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
188 	    "inject_lost_data_frame", CTLFLAG_RW, &fc->inject_lost_data_frame, 0,
189 	    "Cause a Lost Frame on a Read");
190 #endif
191 	SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
192 	    "role", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_MPSAFE,
193 	    isp, chan, isp_role_sysctl, "I", "Current role");
194 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
195 	    "speed", CTLFLAG_RD, &fcp->isp_gbspeed, 0,
196 	    "Connection speed in gigabits");
197 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
198 	    "linkstate", CTLFLAG_RD, &fcp->isp_linkstate, 0,
199 	    "Link state");
200 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
201 	    "fwstate", CTLFLAG_RD, &fcp->isp_fwstate, 0,
202 	    "Firmware state");
203 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
204 	    "loopstate", CTLFLAG_RD, &fcp->isp_loopstate, 0,
205 	    "Loop state");
206 	SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
207 	    "topo", CTLFLAG_RD, &fcp->isp_topo, 0,
208 	    "Connection topology");
209 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
210 	    "use_gft_id", CTLFLAG_RWTUN, &fcp->isp_use_gft_id, 0,
211 	    "Use GFT_ID during fabric scan");
212 	SYSCTL_ADD_INT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
213 	    "use_gff_id", CTLFLAG_RWTUN, &fcp->isp_use_gff_id, 0,
214 	    "Use GFF_ID during fabric scan");
215 	return (0);
216 }
217 
218 static void
219 isp_detach_chan(ispsoftc_t *isp, int chan)
220 {
221 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
222 
223 	xpt_free_path(fc->path);
224 	xpt_bus_deregister(cam_sim_path(fc->sim));
225 	cam_sim_free(fc->sim, FALSE);
226 
227 	/* Wait for the channel's spawned threads to exit. */
228 	wakeup(fc);
229 	while (fc->num_threads != 0)
230 		mtx_sleep(&fc->num_threads, &isp->isp_lock, PRIBIO, "isp_reap", 0);
231 }
232 
233 int
234 isp_attach(ispsoftc_t *isp)
235 {
236 	const char *nu = device_get_nameunit(isp->isp_osinfo.dev);
237 	int du = device_get_unit(isp->isp_dev);
238 	int chan;
239 
240 	/*
241 	 * Create the device queue for our SIM(s).
242 	 */
243 	isp->isp_osinfo.devq = cam_simq_alloc(isp->isp_maxcmds);
244 	if (isp->isp_osinfo.devq == NULL) {
245 		return (EIO);
246 	}
247 
248 	for (chan = 0; chan < isp->isp_nchan; chan++) {
249 		if (isp_attach_chan(isp, isp->isp_osinfo.devq, chan)) {
250 			goto unwind;
251 		}
252 	}
253 
254 	callout_init_mtx(&isp->isp_osinfo.tmo, &isp->isp_lock, 0);
255 	isp_timer_count = hz >> 2;
256 	callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
257 
258 	isp->isp_osinfo.cdev = make_dev(&isp_cdevsw, du, UID_ROOT, GID_OPERATOR, 0600, "%s", nu);
259 	if (isp->isp_osinfo.cdev) {
260 		isp->isp_osinfo.cdev->si_drv1 = isp;
261 	}
262 	return (0);
263 
264 unwind:
265 	ISP_LOCK(isp);
266 	isp->isp_osinfo.is_exiting = 1;
267 	while (--chan >= 0)
268 		isp_detach_chan(isp, chan);
269 	ISP_UNLOCK(isp);
270 	cam_simq_free(isp->isp_osinfo.devq);
271 	isp->isp_osinfo.devq = NULL;
272 	return (-1);
273 }
274 
275 int
276 isp_detach(ispsoftc_t *isp)
277 {
278 	int chan;
279 
280 	if (isp->isp_osinfo.cdev) {
281 		destroy_dev(isp->isp_osinfo.cdev);
282 		isp->isp_osinfo.cdev = NULL;
283 	}
284 	ISP_LOCK(isp);
285 	/* Tell spawned threads that we're exiting. */
286 	isp->isp_osinfo.is_exiting = 1;
287 	for (chan = isp->isp_nchan - 1; chan >= 0; chan -= 1)
288 		isp_detach_chan(isp, chan);
289 	ISP_UNLOCK(isp);
290 	callout_drain(&isp->isp_osinfo.tmo);
291 	cam_simq_free(isp->isp_osinfo.devq);
292 	return (0);
293 }
294 
295 static void
296 isp_freeze_loopdown(ispsoftc_t *isp, int chan)
297 {
298 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
299 
300 	if (fc->sim == NULL)
301 		return;
302 	if (fc->simqfrozen == 0) {
303 		isp_prt(isp, ISP_LOGDEBUG0,
304 		    "Chan %d Freeze simq (loopdown)", chan);
305 		fc->simqfrozen = SIMQFRZ_LOOPDOWN;
306 		xpt_hold_boot();
307 		xpt_freeze_simq(fc->sim, 1);
308 	} else {
309 		isp_prt(isp, ISP_LOGDEBUG0,
310 		    "Chan %d Mark simq frozen (loopdown)", chan);
311 		fc->simqfrozen |= SIMQFRZ_LOOPDOWN;
312 	}
313 }
314 
315 static void
316 isp_unfreeze_loopdown(ispsoftc_t *isp, int chan)
317 {
318 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
319 
320 	if (fc->sim == NULL)
321 		return;
322 	int wasfrozen = fc->simqfrozen & SIMQFRZ_LOOPDOWN;
323 	fc->simqfrozen &= ~SIMQFRZ_LOOPDOWN;
324 	if (wasfrozen && fc->simqfrozen == 0) {
325 		isp_prt(isp, ISP_LOGDEBUG0,
326 		    "Chan %d Release simq", chan);
327 		xpt_release_simq(fc->sim, 1);
328 		xpt_release_boot();
329 	}
330 }
331 
332 /*
333  * Functions to protect from request queue overflow by freezing SIM queue.
334  * XXX: freezing only one arbitrary SIM, since they all share the queue.
335  */
336 static void
337 isp_rq_check_above(ispsoftc_t *isp)
338 {
339 	struct isp_fc *fc = ISP_FC_PC(isp, 0);
340 
341 	if (isp->isp_rqovf || fc->sim == NULL)
342 		return;
343 	if (!isp_rqentry_avail(isp, QENTRY_MAX)) {
344 		xpt_freeze_simq(fc->sim, 1);
345 		isp->isp_rqovf = 1;
346 	}
347 }
348 
349 static void
350 isp_rq_check_below(ispsoftc_t *isp)
351 {
352 	struct isp_fc *fc = ISP_FC_PC(isp, 0);
353 
354 	if (!isp->isp_rqovf || fc->sim == NULL)
355 		return;
356 	if (isp_rqentry_avail(isp, QENTRY_MAX)) {
357 		xpt_release_simq(fc->sim, 0);
358 		isp->isp_rqovf = 0;
359 	}
360 }
361 
362 static int
363 ispioctl(struct cdev *dev, u_long c, caddr_t addr, int flags, struct thread *td)
364 {
365 	ispsoftc_t *isp;
366 	int nr, chan, retval = ENOTTY;
367 
368 	isp = dev->si_drv1;
369 
370 	switch (c) {
371 	case ISP_SDBLEV:
372 	{
373 		int olddblev = isp->isp_dblev;
374 		isp->isp_dblev = *(int *)addr;
375 		*(int *)addr = olddblev;
376 		retval = 0;
377 		break;
378 	}
379 	case ISP_GETROLE:
380 		chan = *(int *)addr;
381 		if (chan < 0 || chan >= isp->isp_nchan) {
382 			retval = -ENXIO;
383 			break;
384 		}
385 		*(int *)addr = FCPARAM(isp, chan)->role;
386 		retval = 0;
387 		break;
388 	case ISP_SETROLE:
389 		nr = *(int *)addr;
390 		chan = nr >> 8;
391 		if (chan < 0 || chan >= isp->isp_nchan) {
392 			retval = -ENXIO;
393 			break;
394 		}
395 		nr &= 0xff;
396 		if (nr & ~(ISP_ROLE_INITIATOR|ISP_ROLE_TARGET)) {
397 			retval = EINVAL;
398 			break;
399 		}
400 		ISP_LOCK(isp);
401 		*(int *)addr = FCPARAM(isp, chan)->role;
402 		retval = isp_control(isp, ISPCTL_CHANGE_ROLE, chan, nr);
403 		ISP_UNLOCK(isp);
404 		break;
405 
406 	case ISP_RESETHBA:
407 		ISP_LOCK(isp);
408 		isp_reinit(isp, 0);
409 		ISP_UNLOCK(isp);
410 		retval = 0;
411 		break;
412 
413 	case ISP_RESCAN:
414 		chan = *(intptr_t *)addr;
415 		if (chan < 0 || chan >= isp->isp_nchan) {
416 			retval = -ENXIO;
417 			break;
418 		}
419 		ISP_LOCK(isp);
420 		if (isp_fc_runstate(isp, chan, 5 * 1000000) != LOOP_READY) {
421 			retval = EIO;
422 		} else {
423 			retval = 0;
424 		}
425 		ISP_UNLOCK(isp);
426 		break;
427 
428 	case ISP_FC_LIP:
429 		chan = *(intptr_t *)addr;
430 		if (chan < 0 || chan >= isp->isp_nchan) {
431 			retval = -ENXIO;
432 			break;
433 		}
434 		ISP_LOCK(isp);
435 		if (isp_control(isp, ISPCTL_SEND_LIP, chan)) {
436 			retval = EIO;
437 		} else {
438 			retval = 0;
439 		}
440 		ISP_UNLOCK(isp);
441 		break;
442 	case ISP_FC_GETDINFO:
443 	{
444 		struct isp_fc_device *ifc = (struct isp_fc_device *) addr;
445 		fcportdb_t *lp;
446 
447 		if (ifc->loopid >= MAX_FC_TARG) {
448 			retval = EINVAL;
449 			break;
450 		}
451 		lp = &FCPARAM(isp, ifc->chan)->portdb[ifc->loopid];
452 		if (lp->state != FC_PORTDB_STATE_NIL) {
453 			ifc->role = (lp->prli_word3 & SVC3_ROLE_MASK) >> SVC3_ROLE_SHIFT;
454 			ifc->loopid = lp->handle;
455 			ifc->portid = lp->portid;
456 			ifc->node_wwn = lp->node_wwn;
457 			ifc->port_wwn = lp->port_wwn;
458 			retval = 0;
459 		} else {
460 			retval = ENODEV;
461 		}
462 		break;
463 	}
464 	case ISP_FC_GETHINFO:
465 	{
466 		struct isp_hba_device *hba = (struct isp_hba_device *) addr;
467 		int chan = hba->fc_channel;
468 
469 		if (chan < 0 || chan >= isp->isp_nchan) {
470 			retval = ENXIO;
471 			break;
472 		}
473 		hba->fc_fw_major = ISP_FW_MAJORX(isp->isp_fwrev);
474 		hba->fc_fw_minor = ISP_FW_MINORX(isp->isp_fwrev);
475 		hba->fc_fw_micro = ISP_FW_MICROX(isp->isp_fwrev);
476 		hba->fc_nchannels = isp->isp_nchan;
477 		hba->fc_nports = MAX_FC_TARG;
478 		hba->fc_speed = FCPARAM(isp, hba->fc_channel)->isp_gbspeed;
479 		hba->fc_topology = FCPARAM(isp, chan)->isp_topo + 1;
480 		hba->fc_loopid = FCPARAM(isp, chan)->isp_loopid;
481 		hba->nvram_node_wwn = FCPARAM(isp, chan)->isp_wwnn_nvram;
482 		hba->nvram_port_wwn = FCPARAM(isp, chan)->isp_wwpn_nvram;
483 		hba->active_node_wwn = FCPARAM(isp, chan)->isp_wwnn;
484 		hba->active_port_wwn = FCPARAM(isp, chan)->isp_wwpn;
485 		retval = 0;
486 		break;
487 	}
488 	case ISP_TSK_MGMT:
489 	{
490 		int needmarker;
491 		struct isp_fc_tsk_mgmt *fct = (struct isp_fc_tsk_mgmt *) addr;
492 		uint16_t nphdl;
493 		isp24xx_tmf_t tmf;
494 		isp24xx_statusreq_t sp;
495 		fcparam *fcp;
496 		fcportdb_t *lp;
497 		int i;
498 
499 		chan = fct->chan;
500 		if (chan < 0 || chan >= isp->isp_nchan) {
501 			retval = -ENXIO;
502 			break;
503 		}
504 
505 		needmarker = retval = 0;
506 		nphdl = fct->loopid;
507 		ISP_LOCK(isp);
508 		fcp = FCPARAM(isp, chan);
509 
510 		for (i = 0; i < MAX_FC_TARG; i++) {
511 			lp = &fcp->portdb[i];
512 			if (lp->handle == nphdl) {
513 				break;
514 			}
515 		}
516 		if (i == MAX_FC_TARG) {
517 			retval = ENXIO;
518 			ISP_UNLOCK(isp);
519 			break;
520 		}
521 		ISP_MEMZERO(&tmf, sizeof(tmf));
522 		tmf.tmf_header.rqs_entry_type = RQSTYPE_TSK_MGMT;
523 		tmf.tmf_header.rqs_entry_count = 1;
524 		tmf.tmf_nphdl = lp->handle;
525 		tmf.tmf_delay = 2;
526 		tmf.tmf_timeout = 4;
527 		tmf.tmf_tidlo = lp->portid;
528 		tmf.tmf_tidhi = lp->portid >> 16;
529 		tmf.tmf_vpidx = ISP_GET_VPIDX(isp, chan);
530 		tmf.tmf_lun[1] = fct->lun & 0xff;
531 		if (fct->lun >= 256) {
532 			tmf.tmf_lun[0] = 0x40 | (fct->lun >> 8);
533 		}
534 		switch (fct->action) {
535 		case IPT_CLEAR_ACA:
536 			tmf.tmf_flags = ISP24XX_TMF_CLEAR_ACA;
537 			break;
538 		case IPT_TARGET_RESET:
539 			tmf.tmf_flags = ISP24XX_TMF_TARGET_RESET;
540 			needmarker = 1;
541 			break;
542 		case IPT_LUN_RESET:
543 			tmf.tmf_flags = ISP24XX_TMF_LUN_RESET;
544 			needmarker = 1;
545 			break;
546 		case IPT_CLEAR_TASK_SET:
547 			tmf.tmf_flags = ISP24XX_TMF_CLEAR_TASK_SET;
548 			needmarker = 1;
549 			break;
550 		case IPT_ABORT_TASK_SET:
551 			tmf.tmf_flags = ISP24XX_TMF_ABORT_TASK_SET;
552 			needmarker = 1;
553 			break;
554 		default:
555 			retval = EINVAL;
556 			break;
557 		}
558 		if (retval) {
559 			ISP_UNLOCK(isp);
560 			break;
561 		}
562 
563 		retval = isp_exec_entry_queue(isp, &tmf, &sp, 5);
564 		if (retval != 0) {
565 			isp_prt(isp, ISP_LOGERR, "%s: TMF of chan %d error %d",
566 			    __func__, chan, retval);
567 			ISP_UNLOCK(isp);
568 			break;
569 		}
570 
571 		if (sp.req_completion_status != 0)
572 			retval = EIO;
573 		else if (needmarker)
574 			fcp->sendmarker = 1;
575 		ISP_UNLOCK(isp);
576 		break;
577 	}
578 	default:
579 		break;
580 	}
581 	return (retval);
582 }
583 
584 /*
585  * Local Inlines
586  */
587 
588 static ISP_INLINE int isp_get_pcmd(ispsoftc_t *, union ccb *);
589 static ISP_INLINE void isp_free_pcmd(ispsoftc_t *, union ccb *);
590 
591 static ISP_INLINE int
592 isp_get_pcmd(ispsoftc_t *isp, union ccb *ccb)
593 {
594 	ISP_PCMD(ccb) = isp->isp_osinfo.pcmd_free;
595 	if (ISP_PCMD(ccb) == NULL) {
596 		return (-1);
597 	}
598 	isp->isp_osinfo.pcmd_free = ((struct isp_pcmd *)ISP_PCMD(ccb))->next;
599 	return (0);
600 }
601 
602 static ISP_INLINE void
603 isp_free_pcmd(ispsoftc_t *isp, union ccb *ccb)
604 {
605 	if (ISP_PCMD(ccb)) {
606 #ifdef	ISP_TARGET_MODE
607 		PISP_PCMD(ccb)->datalen = 0;
608 #endif
609 		PISP_PCMD(ccb)->next = isp->isp_osinfo.pcmd_free;
610 		isp->isp_osinfo.pcmd_free = ISP_PCMD(ccb);
611 		ISP_PCMD(ccb) = NULL;
612 	}
613 }
614 
615 /*
616  * Put the target mode functions here, because some are inlines
617  */
618 #ifdef	ISP_TARGET_MODE
619 static ISP_INLINE tstate_t *get_lun_statep(ispsoftc_t *, int, lun_id_t);
620 static atio_private_data_t *isp_get_atpd(ispsoftc_t *, int, uint32_t);
621 static atio_private_data_t *isp_find_atpd(ispsoftc_t *, int, uint32_t);
622 static void isp_put_atpd(ispsoftc_t *, int, atio_private_data_t *);
623 static inot_private_data_t *isp_get_ntpd(ispsoftc_t *, int);
624 static inot_private_data_t *isp_find_ntpd(ispsoftc_t *, int, uint32_t, uint32_t);
625 static void isp_put_ntpd(ispsoftc_t *, int, inot_private_data_t *);
626 static tstate_t *create_lun_state(ispsoftc_t *, int, struct cam_path *);
627 static void destroy_lun_state(ispsoftc_t *, int, tstate_t *);
628 static void isp_enable_lun(ispsoftc_t *, union ccb *);
629 static void isp_disable_lun(ispsoftc_t *, union ccb *);
630 static callout_func_t isp_refire_notify_ack;
631 static void isp_complete_ctio(ispsoftc_t *isp, union ccb *);
632 enum Start_Ctio_How { FROM_CAM, FROM_TIMER, FROM_SRR, FROM_CTIO_DONE };
633 static void isp_target_start_ctio(ispsoftc_t *, union ccb *, enum Start_Ctio_How);
634 static void isp_handle_platform_atio7(ispsoftc_t *, at7_entry_t *);
635 static void isp_handle_platform_ctio(ispsoftc_t *, ct7_entry_t *);
636 static int isp_handle_platform_target_notify_ack(ispsoftc_t *, isp_notify_t *, uint32_t rsp);
637 static void isp_handle_platform_target_tmf(ispsoftc_t *, isp_notify_t *);
638 static void isp_target_mark_aborted_early(ispsoftc_t *, int chan, tstate_t *, uint32_t);
639 
640 static ISP_INLINE tstate_t *
641 get_lun_statep(ispsoftc_t *isp, int bus, lun_id_t lun)
642 {
643 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
644 	tstate_t *tptr;
645 
646 	SLIST_FOREACH(tptr, &fc->lun_hash[LUN_HASH_FUNC(lun)], next) {
647 		if (tptr->ts_lun == lun)
648 			return (tptr);
649 	}
650 	return (NULL);
651 }
652 
653 static int
654 isp_atio_restart(ispsoftc_t *isp, int bus, tstate_t *tptr)
655 {
656 	inot_private_data_t *ntp;
657 	struct ntpdlist rq;
658 
659 	if (STAILQ_EMPTY(&tptr->restart_queue))
660 		return (0);
661 	STAILQ_INIT(&rq);
662 	STAILQ_CONCAT(&rq, &tptr->restart_queue);
663 	while ((ntp = STAILQ_FIRST(&rq)) != NULL) {
664 		STAILQ_REMOVE_HEAD(&rq, next);
665 		isp_prt(isp, ISP_LOGTDEBUG0,
666 		    "%s: restarting resrc deprived %x", __func__,
667 		    ((at7_entry_t *)ntp->data)->at_rxid);
668 		isp_handle_platform_atio7(isp, (at7_entry_t *) ntp->data);
669 		isp_put_ntpd(isp, bus, ntp);
670 		if (!STAILQ_EMPTY(&tptr->restart_queue))
671 			break;
672 	}
673 	if (!STAILQ_EMPTY(&rq)) {
674 		STAILQ_CONCAT(&rq, &tptr->restart_queue);
675 		STAILQ_CONCAT(&tptr->restart_queue, &rq);
676 	}
677 	return (!STAILQ_EMPTY(&tptr->restart_queue));
678 }
679 
680 static void
681 isp_tmcmd_restart(ispsoftc_t *isp)
682 {
683 	struct isp_fc *fc;
684 	tstate_t *tptr;
685 	union ccb *ccb;
686 	int bus, i;
687 
688 	for (bus = 0; bus < isp->isp_nchan; bus++) {
689 		fc = ISP_FC_PC(isp, bus);
690 		for (i = 0; i < LUN_HASH_SIZE; i++) {
691 			SLIST_FOREACH(tptr, &fc->lun_hash[i], next)
692 				isp_atio_restart(isp, bus, tptr);
693 		}
694 
695 		/*
696 		 * We only need to do this once per channel.
697 		 */
698 		ccb = (union ccb *)TAILQ_FIRST(&fc->waitq);
699 		if (ccb != NULL) {
700 			TAILQ_REMOVE(&fc->waitq, &ccb->ccb_h, sim_links.tqe);
701 			isp_target_start_ctio(isp, ccb, FROM_TIMER);
702 		}
703 	}
704 	isp_rq_check_above(isp);
705 	isp_rq_check_below(isp);
706 }
707 
708 static atio_private_data_t *
709 isp_get_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
710 {
711 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
712 	atio_private_data_t *atp;
713 
714 	atp = LIST_FIRST(&fc->atfree);
715 	if (atp) {
716 		LIST_REMOVE(atp, next);
717 		atp->tag = tag;
718 		LIST_INSERT_HEAD(&fc->atused[ATPDPHASH(tag)], atp, next);
719 	}
720 	return (atp);
721 }
722 
723 static atio_private_data_t *
724 isp_find_atpd(ispsoftc_t *isp, int chan, uint32_t tag)
725 {
726 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
727 	atio_private_data_t *atp;
728 
729 	LIST_FOREACH(atp, &fc->atused[ATPDPHASH(tag)], next) {
730 		if (atp->tag == tag)
731 			return (atp);
732 	}
733 	return (NULL);
734 }
735 
736 static void
737 isp_put_atpd(ispsoftc_t *isp, int chan, atio_private_data_t *atp)
738 {
739 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
740 
741 	if (atp->ests)
742 		isp_put_ecmd(isp, atp->ests);
743 	LIST_REMOVE(atp, next);
744 	memset(atp, 0, sizeof (*atp));
745 	LIST_INSERT_HEAD(&fc->atfree, atp, next);
746 }
747 
748 static void
749 isp_dump_atpd(ispsoftc_t *isp, int chan)
750 {
751 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
752 	atio_private_data_t *atp;
753 	const char *states[8] = { "Free", "ATIO", "CAM", "CTIO", "LAST_CTIO", "PDON", "?6", "7" };
754 
755 	for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
756 		if (atp->state == ATPD_STATE_FREE)
757 			continue;
758 		isp_prt(isp, ISP_LOGALL, "Chan %d ATP [0x%x] origdlen %u bytes_xfrd %u lun %jx nphdl 0x%04x s_id 0x%06x d_id 0x%06x oxid 0x%04x state %s",
759 		    chan, atp->tag, atp->orig_datalen, atp->bytes_xfered, (uintmax_t)atp->lun, atp->nphdl, atp->sid, atp->did, atp->oxid, states[atp->state & 0x7]);
760 	}
761 }
762 
763 static inot_private_data_t *
764 isp_get_ntpd(ispsoftc_t *isp, int chan)
765 {
766 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
767 	inot_private_data_t *ntp;
768 
769 	ntp = STAILQ_FIRST(&fc->ntfree);
770 	if (ntp)
771 		STAILQ_REMOVE_HEAD(&fc->ntfree, next);
772 	return (ntp);
773 }
774 
775 static inot_private_data_t *
776 isp_find_ntpd(ispsoftc_t *isp, int chan, uint32_t tag_id, uint32_t seq_id)
777 {
778 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
779 	inot_private_data_t *ntp;
780 
781 	for (ntp = fc->ntpool; ntp < &fc->ntpool[ATPDPSIZE]; ntp++) {
782 		if (ntp->tag_id == tag_id && ntp->seq_id == seq_id)
783 			return (ntp);
784 	}
785 	return (NULL);
786 }
787 
788 static void
789 isp_put_ntpd(ispsoftc_t *isp, int chan, inot_private_data_t *ntp)
790 {
791 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
792 
793 	ntp->tag_id = ntp->seq_id = 0;
794 	STAILQ_INSERT_HEAD(&fc->ntfree, ntp, next);
795 }
796 
797 tstate_t *
798 create_lun_state(ispsoftc_t *isp, int bus, struct cam_path *path)
799 {
800 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
801 	lun_id_t lun;
802 	tstate_t *tptr;
803 
804 	lun = xpt_path_lun_id(path);
805 	tptr = malloc(sizeof (tstate_t), M_DEVBUF, M_NOWAIT|M_ZERO);
806 	if (tptr == NULL)
807 		return (NULL);
808 	tptr->ts_lun = lun;
809 	SLIST_INIT(&tptr->atios);
810 	SLIST_INIT(&tptr->inots);
811 	STAILQ_INIT(&tptr->restart_queue);
812 	SLIST_INSERT_HEAD(&fc->lun_hash[LUN_HASH_FUNC(lun)], tptr, next);
813 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, path, "created tstate\n");
814 	return (tptr);
815 }
816 
817 static void
818 destroy_lun_state(ispsoftc_t *isp, int bus, tstate_t *tptr)
819 {
820 	struct isp_fc *fc = ISP_FC_PC(isp, bus);
821 	union ccb *ccb;
822 	inot_private_data_t *ntp;
823 
824 	while ((ccb = (union ccb *)SLIST_FIRST(&tptr->atios)) != NULL) {
825 		SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
826 		ccb->ccb_h.status = CAM_REQ_ABORTED;
827 		xpt_done(ccb);
828 	};
829 	while ((ccb = (union ccb *)SLIST_FIRST(&tptr->inots)) != NULL) {
830 		SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
831 		ccb->ccb_h.status = CAM_REQ_ABORTED;
832 		xpt_done(ccb);
833 	}
834 	while ((ntp = STAILQ_FIRST(&tptr->restart_queue)) != NULL) {
835 		isp_endcmd(isp, ntp->data, NIL_HANDLE, bus, SCSI_STATUS_BUSY, 0);
836 		STAILQ_REMOVE_HEAD(&tptr->restart_queue, next);
837 		isp_put_ntpd(isp, bus, ntp);
838 	}
839 	SLIST_REMOVE(&fc->lun_hash[LUN_HASH_FUNC(tptr->ts_lun)], tptr, tstate, next);
840 	free(tptr, M_DEVBUF);
841 }
842 
843 static void
844 isp_enable_lun(ispsoftc_t *isp, union ccb *ccb)
845 {
846 	tstate_t *tptr;
847 	int bus = XS_CHANNEL(ccb);
848 	target_id_t target = ccb->ccb_h.target_id;
849 	lun_id_t lun = ccb->ccb_h.target_lun;
850 
851 	/*
852 	 * We only support either target and lun both wildcard
853 	 * or target and lun both non-wildcard.
854 	 */
855 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
856 	    "enabling lun %jx\n", (uintmax_t)lun);
857 	if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
858 		ccb->ccb_h.status = CAM_LUN_INVALID;
859 		xpt_done(ccb);
860 		return;
861 	}
862 
863 	/* Create the state pointer. It should not already exist. */
864 	tptr = get_lun_statep(isp, bus, lun);
865 	if (tptr) {
866 		ccb->ccb_h.status = CAM_LUN_ALRDY_ENA;
867 		xpt_done(ccb);
868 		return;
869 	}
870 	tptr = create_lun_state(isp, bus, ccb->ccb_h.path);
871 	if (tptr == NULL) {
872 		ccb->ccb_h.status = CAM_RESRC_UNAVAIL;
873 		xpt_done(ccb);
874 		return;
875 	}
876 
877 	ccb->ccb_h.status = CAM_REQ_CMP;
878 	xpt_done(ccb);
879 }
880 
881 static void
882 isp_disable_lun(ispsoftc_t *isp, union ccb *ccb)
883 {
884 	tstate_t *tptr;
885 	int bus = XS_CHANNEL(ccb);
886 	target_id_t target = ccb->ccb_h.target_id;
887 	lun_id_t lun = ccb->ccb_h.target_lun;
888 
889 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG0|ISP_LOGCONFIG, ccb->ccb_h.path,
890 	    "disabling lun %jx\n", (uintmax_t)lun);
891 	if ((target == CAM_TARGET_WILDCARD) != (lun == CAM_LUN_WILDCARD)) {
892 		ccb->ccb_h.status = CAM_LUN_INVALID;
893 		xpt_done(ccb);
894 		return;
895 	}
896 
897 	/* Find the state pointer. */
898 	if ((tptr = get_lun_statep(isp, bus, lun)) == NULL) {
899 		ccb->ccb_h.status = CAM_PATH_INVALID;
900 		xpt_done(ccb);
901 		return;
902 	}
903 
904 	destroy_lun_state(isp, bus, tptr);
905 	ccb->ccb_h.status = CAM_REQ_CMP;
906 	xpt_done(ccb);
907 }
908 
909 static void
910 isp_target_start_ctio(ispsoftc_t *isp, union ccb *ccb, enum Start_Ctio_How how)
911 {
912 	int fctape, sendstatus, resid;
913 	fcparam *fcp;
914 	atio_private_data_t *atp;
915 	struct ccb_scsiio *cso;
916 	struct isp_ccbq *waitq;
917 	uint32_t dmaresult, handle, xfrlen, sense_length, tmp;
918 	ct7_entry_t local, *cto = &local;
919 
920 	isp_prt(isp, ISP_LOGTDEBUG0, "%s: ENTRY[0x%x] how %u xfrlen %u sendstatus %d sense_len %u", __func__, ccb->csio.tag_id, how, ccb->csio.dxfer_len,
921 	    (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0, ((ccb->ccb_h.flags & CAM_SEND_SENSE)? ccb->csio.sense_len : 0));
922 
923 	waitq = &ISP_FC_PC(isp, XS_CHANNEL(ccb))->waitq;
924 	switch (how) {
925 	case FROM_CAM:
926 		/*
927 		 * Insert at the tail of the list, if any, waiting CTIO CCBs
928 		 */
929 		TAILQ_INSERT_TAIL(waitq, &ccb->ccb_h, sim_links.tqe);
930 		break;
931 	case FROM_TIMER:
932 	case FROM_SRR:
933 	case FROM_CTIO_DONE:
934 		TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
935 		break;
936 	}
937 
938 	while ((ccb = (union ccb *) TAILQ_FIRST(waitq)) != NULL) {
939 		TAILQ_REMOVE(waitq, &ccb->ccb_h, sim_links.tqe);
940 
941 		cso = &ccb->csio;
942 		xfrlen = cso->dxfer_len;
943 		if (xfrlen == 0) {
944 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
945 				ISP_PATH_PRT(isp, ISP_LOGERR, ccb->ccb_h.path, "a data transfer length of zero but no status to send is wrong\n");
946 				ccb->ccb_h.status = CAM_REQ_INVALID;
947 				xpt_done(ccb);
948 				continue;
949 			}
950 		}
951 
952 		atp = isp_find_atpd(isp, XS_CHANNEL(ccb), cso->tag_id);
953 		if (atp == NULL) {
954 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] cannot find private data adjunct in %s", __func__, cso->tag_id, __func__);
955 			isp_dump_atpd(isp, XS_CHANNEL(ccb));
956 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
957 			xpt_done(ccb);
958 			continue;
959 		}
960 
961 		/*
962 		 * Is this command a dead duck?
963 		 */
964 		if (atp->dead) {
965 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] not sending a CTIO for a dead command", __func__, cso->tag_id);
966 			ccb->ccb_h.status = CAM_REQ_ABORTED;
967 			xpt_done(ccb);
968 			continue;
969 		}
970 
971 		/*
972 		 * Check to make sure we're still in target mode.
973 		 */
974 		fcp = FCPARAM(isp, XS_CHANNEL(ccb));
975 		if ((fcp->role & ISP_ROLE_TARGET) == 0) {
976 			isp_prt(isp, ISP_LOGERR, "%s: [0x%x] stopping sending a CTIO because we're no longer in target mode", __func__, cso->tag_id);
977 			ccb->ccb_h.status = CAM_PROVIDE_FAIL;
978 			xpt_done(ccb);
979 			continue;
980 		}
981 
982 		/*
983 		 * We're only handling ATPD_CCB_OUTSTANDING outstanding CCB at a time (one of which
984 		 * could be split into two CTIOs to split data and status).
985 		 */
986 		if (atp->ctcnt >= ATPD_CCB_OUTSTANDING) {
987 			isp_prt(isp, ISP_LOGTINFO, "[0x%x] handling only %d CCBs at a time (flags for this ccb: 0x%x)", cso->tag_id, ATPD_CCB_OUTSTANDING, ccb->ccb_h.flags);
988 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
989 			break;
990 		}
991 
992 		/*
993 		 * Does the initiator expect FC-Tape style responses?
994 		 */
995 		if ((atp->word3 & PRLI_WD3_RETRY) && fcp->fctape_enabled) {
996 			fctape = 1;
997 		} else {
998 			fctape = 0;
999 		}
1000 
1001 		/*
1002 		 * If we already did the data xfer portion of a CTIO that sends data
1003 		 * and status, don't do it again and do the status portion now.
1004 		 */
1005 		if (atp->sendst) {
1006 			isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] now sending synthesized status orig_dl=%u xfered=%u bit=%u",
1007 			    cso->tag_id, atp->orig_datalen, atp->bytes_xfered, atp->bytes_in_transit);
1008 			xfrlen = 0;	/* we already did the data transfer */
1009 			atp->sendst = 0;
1010 		}
1011 		if (ccb->ccb_h.flags & CAM_SEND_STATUS) {
1012 			sendstatus = 1;
1013 		} else {
1014 			sendstatus = 0;
1015 		}
1016 
1017 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
1018 			KASSERT((sendstatus != 0), ("how can you have CAM_SEND_SENSE w/o CAM_SEND_STATUS?"));
1019 			/*
1020 			 * Sense length is not the entire sense data structure size. Periph
1021 			 * drivers don't seem to be setting sense_len to reflect the actual
1022 			 * size. We'll peek inside to get the right amount.
1023 			 */
1024 			sense_length = cso->sense_len;
1025 
1026 			/*
1027 			 * This 'cannot' happen
1028 			 */
1029 			if (sense_length > (XCMD_SIZE - MIN_FCP_RESPONSE_SIZE)) {
1030 				sense_length = XCMD_SIZE - MIN_FCP_RESPONSE_SIZE;
1031 			}
1032 		} else {
1033 			sense_length = 0;
1034 		}
1035 
1036 		/*
1037 		 * Check for overflow
1038 		 */
1039 		tmp = atp->bytes_xfered + atp->bytes_in_transit;
1040 		if (xfrlen > 0 && tmp > atp->orig_datalen) {
1041 			isp_prt(isp, ISP_LOGERR,
1042 			    "%s: [0x%x] data overflow by %u bytes", __func__,
1043 			    cso->tag_id, tmp + xfrlen - atp->orig_datalen);
1044 			ccb->ccb_h.status = CAM_DATA_RUN_ERR;
1045 			xpt_done(ccb);
1046 			continue;
1047 		}
1048 		if (xfrlen > atp->orig_datalen - tmp) {
1049 			xfrlen = atp->orig_datalen - tmp;
1050 			if (xfrlen == 0 && !sendstatus) {
1051 				cso->resid = cso->dxfer_len;
1052 				ccb->ccb_h.status = CAM_REQ_CMP;
1053 				xpt_done(ccb);
1054 				continue;
1055 			}
1056 		}
1057 
1058 		memset(cto, 0, QENTRY_LEN);
1059 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1060 		cto->ct_header.rqs_entry_count = 1;
1061 		cto->ct_header.rqs_seqno |= ATPD_SEQ_NOTIFY_CAM;
1062 		ATPD_SET_SEQNO(cto, atp);
1063 		cto->ct_nphdl = atp->nphdl;
1064 		cto->ct_rxid = atp->tag;
1065 		cto->ct_iid_lo = atp->sid;
1066 		cto->ct_iid_hi = atp->sid >> 16;
1067 		cto->ct_oxid = atp->oxid;
1068 		cto->ct_vpidx = ISP_GET_VPIDX(isp, XS_CHANNEL(ccb));
1069 		cto->ct_timeout = XS_TIME(ccb);
1070 		cto->ct_flags = atp->tattr << CT7_TASK_ATTR_SHIFT;
1071 
1072 		/*
1073 		 * Mode 1, status, no data. Only possible when we are sending status, have
1074 		 * no data to transfer, and any sense data can fit into a ct7_entry_t.
1075 		 *
1076 		 * Mode 2, status, no data. We have to use this in the case that
1077 		 * the sense data won't fit into a ct7_entry_t.
1078 		 *
1079 		 */
1080 		if (sendstatus && xfrlen == 0) {
1081 			cto->ct_flags |= CT7_SENDSTATUS | CT7_NO_DATA;
1082 			resid = atp->orig_datalen - atp->bytes_xfered - atp->bytes_in_transit;
1083 			if (sense_length <= MAXRESPLEN_24XX) {
1084 				cto->ct_flags |= CT7_FLAG_MODE1;
1085 				cto->ct_scsi_status = cso->scsi_status;
1086 				if (resid < 0) {
1087 					cto->ct_resid = -resid;
1088 					cto->ct_scsi_status |= (FCP_RESID_OVERFLOW << 8);
1089 				} else if (resid > 0) {
1090 					cto->ct_resid = resid;
1091 					cto->ct_scsi_status |= (FCP_RESID_UNDERFLOW << 8);
1092 				}
1093 				if (fctape) {
1094 					cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1095 				}
1096 				if (sense_length) {
1097 					cto->ct_scsi_status |= (FCP_SNSLEN_VALID << 8);
1098 					cto->rsp.m1.ct_resplen = cto->ct_senselen = sense_length;
1099 					memcpy(cto->rsp.m1.ct_resp, &cso->sense_data, sense_length);
1100 				}
1101 			} else {
1102 				bus_addr_t addr;
1103 				fcp_rsp_iu_t rp;
1104 
1105 				if (atp->ests == NULL) {
1106 					atp->ests = isp_get_ecmd(isp);
1107 					if (atp->ests == NULL) {
1108 						TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1109 						break;
1110 					}
1111 				}
1112 				memset(&rp, 0, sizeof(rp));
1113 				if (fctape) {
1114 					cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1115 					rp.fcp_rsp_bits |= FCP_CONF_REQ;
1116 				}
1117 				cto->ct_flags |= CT7_FLAG_MODE2;
1118 				rp.fcp_rsp_scsi_status = cso->scsi_status;
1119 				if (resid < 0) {
1120 					rp.fcp_rsp_resid = -resid;
1121 					rp.fcp_rsp_bits |= FCP_RESID_OVERFLOW;
1122 				} else if (resid > 0) {
1123 					rp.fcp_rsp_resid = resid;
1124 					rp.fcp_rsp_bits |= FCP_RESID_UNDERFLOW;
1125 				}
1126 				if (sense_length) {
1127 					rp.fcp_rsp_snslen = sense_length;
1128 					cto->ct_senselen = sense_length;
1129 					rp.fcp_rsp_bits |= FCP_SNSLEN_VALID;
1130 					isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1131 					memcpy(((fcp_rsp_iu_t *)atp->ests)->fcp_rsp_extra, &cso->sense_data, sense_length);
1132 				} else {
1133 					isp_put_fcp_rsp_iu(isp, &rp, atp->ests);
1134 				}
1135 				if (isp->isp_dblev & ISP_LOGTDEBUG1) {
1136 					isp_print_bytes(isp, "FCP Response Frame After Swizzling", MIN_FCP_RESPONSE_SIZE + sense_length, atp->ests);
1137 				}
1138 				bus_dmamap_sync(isp->isp_osinfo.ecmd_dmat, isp->isp_osinfo.ecmd_map, BUS_DMASYNC_PREWRITE);
1139 				addr = isp->isp_osinfo.ecmd_dma;
1140 				addr += ((((isp_ecmd_t *)atp->ests) - isp->isp_osinfo.ecmd_base) * XCMD_SIZE);
1141 				isp_prt(isp, ISP_LOGTDEBUG0, "%s: ests base %p vaddr %p ecmd_dma %jx addr %jx len %u", __func__, isp->isp_osinfo.ecmd_base, atp->ests,
1142 				    (uintmax_t) isp->isp_osinfo.ecmd_dma, (uintmax_t)addr, MIN_FCP_RESPONSE_SIZE + sense_length);
1143 				cto->rsp.m2.ct_datalen = MIN_FCP_RESPONSE_SIZE + sense_length;
1144 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_base = DMA_LO32(addr);
1145 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_basehi = DMA_HI32(addr);
1146 				cto->rsp.m2.ct_fcp_rsp_iudata.ds_count = MIN_FCP_RESPONSE_SIZE + sense_length;
1147 			}
1148 			if (sense_length) {
1149 				isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d slen %u sense: %x %x/%x/%x", __func__,
1150 				    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid, sense_length,
1151 				    cso->sense_data.error_code, cso->sense_data.sense_buf[1], cso->sense_data.sense_buf[11], cso->sense_data.sense_buf[12]);
1152 			} else {
1153 				isp_prt(isp, ISP_LOGDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x resid=%d", __func__,
1154 				    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, cto->ct_resid);
1155 			}
1156 			atp->state = ATPD_STATE_LAST_CTIO;
1157 		}
1158 
1159 		/*
1160 		 * Mode 0 data transfers, *possibly* with status.
1161 		 */
1162 		if (xfrlen != 0) {
1163 			cto->ct_flags |= CT7_FLAG_MODE0;
1164 			if ((cso->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1165 				cto->ct_flags |= CT7_DATA_IN;
1166 			} else {
1167 				cto->ct_flags |= CT7_DATA_OUT;
1168 			}
1169 
1170 			cto->rsp.m0.reloff = atp->bytes_xfered + atp->bytes_in_transit;
1171 			cto->rsp.m0.ct_xfrlen = xfrlen;
1172 
1173 #ifdef	DEBUG
1174 			if (ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame && xfrlen > ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame) {
1175 				isp_prt(isp, ISP_LOGWARN, "%s: truncating data frame with xfrlen %d to %d", __func__, xfrlen, xfrlen - (xfrlen >> 2));
1176 				ISP_FC_PC(isp, XS_CHANNEL(ccb))->inject_lost_data_frame = 0;
1177 				cto->rsp.m0.ct_xfrlen -= xfrlen >> 2;
1178 			}
1179 #endif
1180 			if (sendstatus) {
1181 				resid = atp->orig_datalen - atp->bytes_xfered - xfrlen;
1182 				if (cso->scsi_status == SCSI_STATUS_OK && resid == 0 /* && fctape == 0 */) {
1183 					cto->ct_flags |= CT7_SENDSTATUS;
1184 					atp->state = ATPD_STATE_LAST_CTIO;
1185 					if (fctape) {
1186 						cto->ct_flags |= CT7_CONFIRM|CT7_EXPLCT_CONF;
1187 					}
1188 				} else {
1189 					atp->sendst = 1;	/* send status later */
1190 					cto->ct_header.rqs_seqno &= ~ATPD_SEQ_NOTIFY_CAM;
1191 					atp->state = ATPD_STATE_CTIO;
1192 				}
1193 			} else {
1194 				atp->state = ATPD_STATE_CTIO;
1195 			}
1196 			isp_prt(isp, ISP_LOGTDEBUG0, "%s: CTIO7[0x%x] seq %u nc %d CDB0=%x sstatus=0x%x flags=0x%x xfrlen=%u off=%u", __func__,
1197 			    cto->ct_rxid, ATPD_GET_SEQNO(cto), ATPD_GET_NCAM(cto), atp->cdb0, cto->ct_scsi_status, cto->ct_flags, xfrlen, atp->bytes_xfered);
1198 		}
1199 
1200 		if (isp_get_pcmd(isp, ccb)) {
1201 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "out of PCMDs\n");
1202 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1203 			break;
1204 		}
1205 		handle = isp_allocate_handle(isp, ccb, ISP_HANDLE_TARGET);
1206 		if (handle == 0) {
1207 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "No XFLIST pointers for %s\n", __func__);
1208 			TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1209 			isp_free_pcmd(isp, ccb);
1210 			break;
1211 		}
1212 		atp->bytes_in_transit += xfrlen;
1213 		PISP_PCMD(ccb)->datalen = xfrlen;
1214 
1215 		/*
1216 		 * Call the dma setup routines for this entry (and any subsequent
1217 		 * CTIOs) if there's data to move, and then tell the f/w it's got
1218 		 * new things to play with. As with isp_start's usage of DMA setup,
1219 		 * any swizzling is done in the machine dependent layer. Because
1220 		 * of this, we put the request onto the queue area first in native
1221 		 * format.
1222 		 */
1223 		cto->ct_syshandle = handle;
1224 		dmaresult = ISP_DMASETUP(isp, cso, cto);
1225 		if (dmaresult != 0) {
1226 			isp_destroy_handle(isp, handle);
1227 			isp_free_pcmd(isp, ccb);
1228 			if (dmaresult == CMD_EAGAIN) {
1229 				TAILQ_INSERT_HEAD(waitq, &ccb->ccb_h, sim_links.tqe);
1230 				break;
1231 			}
1232 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1233 			xpt_done(ccb);
1234 			continue;
1235 		}
1236 		ccb->ccb_h.status = CAM_REQ_INPROG | CAM_SIM_QUEUED;
1237 		if (xfrlen) {
1238 			ccb->ccb_h.spriv_field0 = atp->bytes_xfered;
1239 		} else {
1240 			ccb->ccb_h.spriv_field0 = ~0;
1241 		}
1242 		atp->ctcnt++;
1243 		atp->seqno++;
1244 	}
1245 }
1246 
1247 static void
1248 isp_refire_notify_ack(void *arg)
1249 {
1250 	isp_tna_t *tp  = arg;
1251 	ispsoftc_t *isp = tp->isp;
1252 
1253 	ISP_ASSERT_LOCKED(isp);
1254 	if (isp_notify_ack(isp, tp->not)) {
1255 		callout_schedule(&tp->timer, 5);
1256 	} else {
1257 		free(tp, M_DEVBUF);
1258 	}
1259 }
1260 
1261 
1262 static void
1263 isp_complete_ctio(ispsoftc_t *isp, union ccb *ccb)
1264 {
1265 
1266 	isp_rq_check_below(isp);
1267 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1268 	xpt_done(ccb);
1269 }
1270 
1271 static void
1272 isp_handle_platform_atio7(ispsoftc_t *isp, at7_entry_t *aep)
1273 {
1274 	int cdbxlen;
1275 	lun_id_t lun;
1276 	uint16_t chan, nphdl = NIL_HANDLE;
1277 	uint32_t did, sid;
1278 	fcportdb_t *lp;
1279 	tstate_t *tptr;
1280 	struct ccb_accept_tio *atiop;
1281 	atio_private_data_t *atp = NULL;
1282 	atio_private_data_t *oatp;
1283 	inot_private_data_t *ntp;
1284 
1285 	did = (aep->at_hdr.d_id[0] << 16) | (aep->at_hdr.d_id[1] << 8) | aep->at_hdr.d_id[2];
1286 	sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1287 	lun = CAM_EXTLUN_BYTE_SWIZZLE(be64dec(aep->at_cmnd.fcp_cmnd_lun));
1288 
1289 	if (ISP_CAP_MULTI_ID(isp) && isp->isp_nchan > 1) {
1290 		/* Channel has to be derived from D_ID */
1291 		isp_find_chan_by_did(isp, did, &chan);
1292 		if (chan == ISP_NOCHAN) {
1293 			isp_prt(isp, ISP_LOGWARN,
1294 			    "%s: [RX_ID 0x%x] D_ID %x not found on any channel",
1295 			    __func__, aep->at_rxid, did);
1296 			isp_endcmd(isp, aep, NIL_HANDLE, ISP_NOCHAN,
1297 			    ECMD_TERMINATE, 0);
1298 			return;
1299 		}
1300 	} else {
1301 		chan = 0;
1302 	}
1303 
1304 	/*
1305 	 * Find the PDB entry for this initiator
1306 	 */
1307 	if (isp_find_pdb_by_portid(isp, chan, sid, &lp) == 0) {
1308 		/*
1309 		 * If we're not in the port database terminate the exchange.
1310 		 */
1311 		isp_prt(isp, ISP_LOGTINFO, "%s: [RX_ID 0x%x] D_ID 0x%06x found on Chan %d for S_ID 0x%06x wasn't in PDB already",
1312 		    __func__, aep->at_rxid, did, chan, sid);
1313 		isp_dump_portdb(isp, chan);
1314 		isp_endcmd(isp, aep, NIL_HANDLE, chan, ECMD_TERMINATE, 0);
1315 		return;
1316 	}
1317 	nphdl = lp->handle;
1318 
1319 	/*
1320 	 * Get the tstate pointer
1321 	 */
1322 	tptr = get_lun_statep(isp, chan, lun);
1323 	if (tptr == NULL) {
1324 		tptr = get_lun_statep(isp, chan, CAM_LUN_WILDCARD);
1325 		if (tptr == NULL) {
1326 			isp_prt(isp, ISP_LOGWARN,
1327 			    "%s: [0x%x] no state pointer for lun %jx or wildcard",
1328 			    __func__, aep->at_rxid, (uintmax_t)lun);
1329 			if (lun == 0) {
1330 				isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1331 			} else {
1332 				isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_CHECK_COND | ECMD_SVALID | (0x5 << 12) | (0x25 << 16), 0);
1333 			}
1334 			return;
1335 		}
1336 	}
1337 
1338 	/*
1339 	 * Start any commands pending resources first.
1340 	 */
1341 	if (isp_atio_restart(isp, chan, tptr))
1342 		goto noresrc;
1343 
1344 	/*
1345 	 * If the f/w is out of resources, just send a BUSY status back.
1346 	 */
1347 	if (aep->at_rxid == AT7_NORESRC_RXID) {
1348 		isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1349 		return;
1350 	}
1351 
1352 	/*
1353 	 * If we're out of resources, just send a BUSY status back.
1354 	 */
1355 	atiop = (struct ccb_accept_tio *) SLIST_FIRST(&tptr->atios);
1356 	if (atiop == NULL) {
1357 		isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atios", aep->at_rxid);
1358 		goto noresrc;
1359 	}
1360 
1361 	oatp = isp_find_atpd(isp, chan, aep->at_rxid);
1362 	if (oatp) {
1363 		isp_prt(isp, oatp->state == ATPD_STATE_LAST_CTIO ? ISP_LOGTDEBUG0 :
1364 		    ISP_LOGWARN, "[0x%x] tag wraparound (N-Port Handle "
1365 		    "0x%04x S_ID 0x%04x OX_ID 0x%04x) oatp state %d",
1366 		    aep->at_rxid, nphdl, sid, aep->at_hdr.ox_id, oatp->state);
1367 		/*
1368 		 * It's not a "no resource" condition- but we can treat it like one
1369 		 */
1370 		goto noresrc;
1371 	}
1372 	atp = isp_get_atpd(isp, chan, aep->at_rxid);
1373 	if (atp == NULL) {
1374 		isp_prt(isp, ISP_LOGTDEBUG0, "[0x%x] out of atps", aep->at_rxid);
1375 		isp_endcmd(isp, aep, nphdl, chan, SCSI_BUSY, 0);
1376 		return;
1377 	}
1378 	atp->word3 = lp->prli_word3;
1379 	atp->state = ATPD_STATE_ATIO;
1380 	SLIST_REMOVE_HEAD(&tptr->atios, sim_links.sle);
1381 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, atiop->ccb_h.path, "Take FREE ATIO\n");
1382 	atiop->init_id = FC_PORTDB_TGT(isp, chan, lp);
1383 	atiop->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1384 	atiop->ccb_h.target_lun = lun;
1385 	atiop->sense_len = 0;
1386 	cdbxlen = aep->at_cmnd.fcp_cmnd_alen_datadir >> FCP_CMND_ADDTL_CDBLEN_SHIFT;
1387 	if (cdbxlen) {
1388 		isp_prt(isp, ISP_LOGWARN, "additional CDBLEN ignored");
1389 	}
1390 	cdbxlen = sizeof (aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb);
1391 	ISP_MEMCPY(atiop->cdb_io.cdb_bytes, aep->at_cmnd.cdb_dl.sf.fcp_cmnd_cdb, cdbxlen);
1392 	atiop->cdb_len = cdbxlen;
1393 	atiop->ccb_h.status = CAM_CDB_RECVD;
1394 	atiop->tag_id = atp->tag;
1395 	switch (aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK) {
1396 	case FCP_CMND_TASK_ATTR_SIMPLE:
1397 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1398 		atiop->tag_action = MSG_SIMPLE_TASK;
1399 		break;
1400 	case FCP_CMND_TASK_ATTR_HEAD:
1401 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1402 		atiop->tag_action = MSG_HEAD_OF_QUEUE_TASK;
1403 		break;
1404 	case FCP_CMND_TASK_ATTR_ORDERED:
1405 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1406 		atiop->tag_action = MSG_ORDERED_TASK;
1407 		break;
1408 	case FCP_CMND_TASK_ATTR_ACA:
1409 		atiop->ccb_h.flags |= CAM_TAG_ACTION_VALID;
1410 		atiop->tag_action = MSG_ACA_TASK;
1411 		break;
1412 	case FCP_CMND_TASK_ATTR_UNTAGGED:
1413 	default:
1414 		atiop->tag_action = 0;
1415 		break;
1416 	}
1417 	atiop->priority = (aep->at_cmnd.fcp_cmnd_task_attribute &
1418 	    FCP_CMND_PRIO_MASK) >> FCP_CMND_PRIO_SHIFT;
1419 	atp->orig_datalen = aep->at_cmnd.cdb_dl.sf.fcp_cmnd_dl;
1420 	atp->bytes_xfered = 0;
1421 	atp->lun = lun;
1422 	atp->nphdl = nphdl;
1423 	atp->sid = sid;
1424 	atp->did = did;
1425 	atp->oxid = aep->at_hdr.ox_id;
1426 	atp->rxid = aep->at_hdr.rx_id;
1427 	atp->cdb0 = atiop->cdb_io.cdb_bytes[0];
1428 	atp->tattr = aep->at_cmnd.fcp_cmnd_task_attribute & FCP_CMND_TASK_ATTR_MASK;
1429 	atp->state = ATPD_STATE_CAM;
1430 	isp_prt(isp, ISP_LOGTDEBUG0, "ATIO7[0x%x] CDB=0x%x lun %jx datalen %u",
1431 	    aep->at_rxid, atp->cdb0, (uintmax_t)lun, atp->orig_datalen);
1432 	xpt_done((union ccb *)atiop);
1433 	return;
1434 noresrc:
1435 	KASSERT(atp == NULL, ("%s: atp is not NULL on noresrc!\n", __func__));
1436 	ntp = isp_get_ntpd(isp, chan);
1437 	if (ntp == NULL) {
1438 		isp_endcmd(isp, aep, nphdl, chan, SCSI_STATUS_BUSY, 0);
1439 		return;
1440 	}
1441 	memcpy(ntp->data, aep, QENTRY_LEN);
1442 	STAILQ_INSERT_TAIL(&tptr->restart_queue, ntp, next);
1443 }
1444 
1445 
1446 /*
1447  * Handle starting an SRR (sequence retransmit request)
1448  * We get here when we've gotten the immediate notify
1449  * and the return of all outstanding CTIOs for this
1450  * transaction.
1451  */
1452 static void
1453 isp_handle_srr_start(ispsoftc_t *isp, atio_private_data_t *atp)
1454 {
1455 	in_fcentry_24xx_t *inot;
1456 	uint32_t srr_off, ccb_off, ccb_len, ccb_end;
1457 	union ccb *ccb;
1458 
1459 	inot = (in_fcentry_24xx_t *)atp->srr;
1460 	srr_off = inot->in_srr_reloff_lo | (inot->in_srr_reloff_hi << 16);
1461 	ccb = atp->srr_ccb;
1462 	atp->srr_ccb = NULL;
1463 	atp->nsrr++;
1464 	if (ccb == NULL) {
1465 		isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] null ccb", atp->tag);
1466 		goto fail;
1467 	}
1468 
1469 	ccb_off = ccb->ccb_h.spriv_field0;
1470 	ccb_len = ccb->csio.dxfer_len;
1471         ccb_end = (ccb_off == ~0)? ~0 : ccb_off + ccb_len;
1472 
1473 	switch (inot->in_srr_iu) {
1474 	case R_CTL_INFO_SOLICITED_DATA:
1475 		/*
1476 		 * We have to restart a FCP_DATA data out transaction
1477 		 */
1478 		atp->sendst = 0;
1479 		atp->bytes_xfered = srr_off;
1480 		if (ccb_len == 0) {
1481 			isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x but current CCB doesn't transfer data", atp->tag, srr_off);
1482 			goto mdp;
1483 		}
1484  		if (srr_off < ccb_off || ccb_off > srr_off + ccb_len) {
1485 			isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x not covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1486 			goto mdp;
1487 		}
1488 		isp_prt(isp, ISP_LOGWARN, "SRR[0x%x] SRR offset 0x%x covered by current CCB data range [0x%x..0x%x]", atp->tag, srr_off, ccb_off, ccb_end);
1489 		break;
1490 	case R_CTL_INFO_COMMAND_STATUS:
1491 		isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] Got an FCP RSP SRR- resending status", atp->tag);
1492 		atp->sendst = 1;
1493 		/*
1494 		 * We have to restart a FCP_RSP IU transaction
1495 		 */
1496 		break;
1497 	case R_CTL_INFO_DATA_DESCRIPTOR:
1498 		/*
1499 		 * We have to restart an FCP DATA in transaction
1500 		 */
1501 		isp_prt(isp, ISP_LOGWARN, "Got an FCP DATA IN SRR- dropping");
1502 		goto fail;
1503 
1504 	default:
1505 		isp_prt(isp, ISP_LOGWARN, "Got an unknown information (%x) SRR- dropping", inot->in_srr_iu);
1506 		goto fail;
1507 	}
1508 
1509 	/*
1510 	 * We can't do anything until this is acked, so we might as well start it now.
1511 	 * We aren't going to do the usual asynchronous ack issue because we need
1512 	 * to make sure this gets on the wire first.
1513 	 */
1514 	if (isp_notify_ack(isp, inot)) {
1515 		isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1516 		goto fail;
1517 	}
1518 	isp_target_start_ctio(isp, ccb, FROM_SRR);
1519 	return;
1520 fail:
1521 	inot->in_reserved = 1;
1522 	isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1523 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1524 	ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1525 	isp_complete_ctio(isp, ccb);
1526 	return;
1527 mdp:
1528 	if (isp_notify_ack(isp, inot)) {
1529 		isp_prt(isp, ISP_LOGWARN, "could not push positive ack for SRR- you lose");
1530 		goto fail;
1531 	}
1532 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1533 	ccb->ccb_h.status |= CAM_MESSAGE_RECV;
1534 	/*
1535 	 * This is not a strict interpretation of MDP, but it's close
1536 	 */
1537 	ccb->csio.msg_ptr = &ccb->csio.sense_data.sense_buf[SSD_FULL_SIZE - 16];
1538 	ccb->csio.msg_len = 7;
1539 	ccb->csio.msg_ptr[0] = MSG_EXTENDED;
1540 	ccb->csio.msg_ptr[1] = 5;
1541 	ccb->csio.msg_ptr[2] = 0;	/* modify data pointer */
1542 	ccb->csio.msg_ptr[3] = srr_off >> 24;
1543 	ccb->csio.msg_ptr[4] = srr_off >> 16;
1544 	ccb->csio.msg_ptr[5] = srr_off >> 8;
1545 	ccb->csio.msg_ptr[6] = srr_off;
1546 	isp_complete_ctio(isp, ccb);
1547 }
1548 
1549 
1550 static void
1551 isp_handle_platform_srr(ispsoftc_t *isp, isp_notify_t *notify)
1552 {
1553 	in_fcentry_24xx_t *inot = notify->nt_lreserved;
1554 	atio_private_data_t *atp;
1555 	uint32_t tag = notify->nt_tagval & 0xffffffff;
1556 
1557 	atp = isp_find_atpd(isp, notify->nt_channel, tag);
1558 	if (atp == NULL) {
1559 		isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x in SRR Notify",
1560 		    __func__, tag);
1561 		isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, inot);
1562 		return;
1563 	}
1564 	atp->srr_notify_rcvd = 1;
1565 	memcpy(atp->srr, inot, sizeof (atp->srr));
1566 	isp_prt(isp, ISP_LOGTINFO, "SRR[0x%x] flags 0x%x srr_iu %x reloff 0x%x",
1567 	    inot->in_rxid, inot->in_flags, inot->in_srr_iu,
1568 	    ((uint32_t)inot->in_srr_reloff_hi << 16) | inot->in_srr_reloff_lo);
1569 	if (atp->srr_ccb)
1570 		isp_handle_srr_start(isp, atp);
1571 }
1572 
1573 static void
1574 isp_handle_platform_ctio(ispsoftc_t *isp, ct7_entry_t *ct)
1575 {
1576 	union ccb *ccb;
1577 	int sentstatus = 0, ok = 0, notify_cam = 0, failure = 0;
1578 	atio_private_data_t *atp = NULL;
1579 	int bus;
1580 	uint32_t handle, data_requested, resid;
1581 
1582 	handle = ct->ct_syshandle;
1583 	ccb = isp_find_xs(isp, handle);
1584 	if (ccb == NULL) {
1585 		isp_print_bytes(isp, "null ccb in isp_handle_platform_ctio", QENTRY_LEN, ct);
1586 		return;
1587 	}
1588 	isp_destroy_handle(isp, handle);
1589 	resid = data_requested = PISP_PCMD(ccb)->datalen;
1590 	isp_free_pcmd(isp, ccb);
1591 
1592 	bus = XS_CHANNEL(ccb);
1593 	atp = isp_find_atpd(isp, bus, ct->ct_rxid);
1594 	if (atp == NULL) {
1595 		/*
1596 		 * XXX: isp_clear_commands() generates fake CTIO with zero
1597 		 * ct_rxid value, filling only ct_syshandle.  Workaround
1598 		 * that using tag_id from the CCB, pointed by ct_syshandle.
1599 		 */
1600 		atp = isp_find_atpd(isp, bus, ccb->csio.tag_id);
1601 	}
1602 	if (atp == NULL) {
1603 		isp_prt(isp, ISP_LOGERR, "%s: cannot find adjunct for %x after I/O", __func__, ccb->csio.tag_id);
1604 		return;
1605 	}
1606 	KASSERT((atp->ctcnt > 0), ("ctio count not greater than zero"));
1607 	atp->bytes_in_transit -= data_requested;
1608 	atp->ctcnt -= 1;
1609 	ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1610 
1611 	if (ct->ct_nphdl == CT7_SRR) {
1612 		atp->srr_ccb = ccb;
1613 		if (atp->srr_notify_rcvd)
1614 			isp_handle_srr_start(isp, atp);
1615 		return;
1616 	}
1617 	if (ct->ct_nphdl == CT_HBA_RESET) {
1618 		sentstatus = (ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1619 		    (atp->sendst == 0);
1620 		failure = CAM_UNREC_HBA_ERROR;
1621 	} else {
1622 		sentstatus = ct->ct_flags & CT7_SENDSTATUS;
1623 		ok = (ct->ct_nphdl == CT7_OK);
1624 		notify_cam = (ct->ct_header.rqs_seqno & ATPD_SEQ_NOTIFY_CAM) != 0;
1625 		if ((ct->ct_flags & CT7_DATAMASK) != CT7_NO_DATA)
1626 			resid = ct->ct_resid;
1627 	}
1628 	isp_prt(isp, ok? ISP_LOGTDEBUG0 : ISP_LOGWARN, "%s: CTIO7[%x] seq %u nc %d sts 0x%x flg 0x%x sns %d resid %d %s", __func__, ct->ct_rxid, ATPD_GET_SEQNO(ct),
1629 	   notify_cam, ct->ct_nphdl, ct->ct_flags, (ccb->ccb_h.status & CAM_SENT_SENSE) != 0, resid, sentstatus? "FIN" : "MID");
1630 	if (ok) {
1631 		if (data_requested > 0) {
1632 			atp->bytes_xfered += data_requested - resid;
1633 			ccb->csio.resid = ccb->csio.dxfer_len -
1634 			    (data_requested - resid);
1635 		}
1636 		if (sentstatus && (ccb->ccb_h.flags & CAM_SEND_SENSE))
1637 			ccb->ccb_h.status |= CAM_SENT_SENSE;
1638 		ccb->ccb_h.status |= CAM_REQ_CMP;
1639 	} else {
1640 		notify_cam = 1;
1641 		if (failure == CAM_UNREC_HBA_ERROR)
1642 			ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
1643 		else
1644 			ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1645 	}
1646 	atp->state = ATPD_STATE_PDON;
1647 
1648 	/*
1649 	 * We never *not* notify CAM when there has been any error (ok == 0),
1650 	 * so we never need to do an ATIO putback if we're not notifying CAM.
1651 	 */
1652 	isp_prt(isp, ISP_LOGTDEBUG0, "%s CTIO[0x%x] done (ok=%d nc=%d nowsendstatus=%d ccb ss=%d)",
1653 	    (sentstatus)? "  FINAL " : "MIDTERM ", atp->tag, ok, notify_cam, atp->sendst, (ccb->ccb_h.flags & CAM_SEND_STATUS) != 0);
1654 	if (notify_cam == 0) {
1655 		if (atp->sendst) {
1656 			isp_target_start_ctio(isp, ccb, FROM_CTIO_DONE);
1657 		}
1658 		return;
1659 	}
1660 
1661 	/*
1662 	 * We are done with this ATIO if we successfully sent status.
1663 	 * In all other cases expect either another CTIO or XPT_ABORT.
1664 	 */
1665 	if (ok && sentstatus)
1666 		isp_put_atpd(isp, bus, atp);
1667 
1668 	/*
1669 	 * We're telling CAM we're done with this CTIO transaction.
1670 	 *
1671 	 * 24XX cards never need an ATIO put back.
1672 	 */
1673 	isp_complete_ctio(isp, ccb);
1674 }
1675 
1676 static int
1677 isp_handle_platform_target_notify_ack(ispsoftc_t *isp, isp_notify_t *mp, uint32_t rsp)
1678 {
1679 	ct7_entry_t local, *cto = &local;
1680 
1681 	if (isp->isp_state != ISP_RUNSTATE) {
1682 		isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) acked- h/w not ready (dropping)", mp->nt_ncode, mp->nt_lreserved != NULL);
1683 		return (0);
1684 	}
1685 
1686 	/*
1687 	 * This case is for a Task Management Function, which shows up as an ATIO7 entry.
1688 	 */
1689 	if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ATIO) {
1690 		at7_entry_t *aep = (at7_entry_t *)mp->nt_lreserved;
1691 		fcportdb_t *lp;
1692 		uint32_t sid;
1693 		uint16_t nphdl;
1694 
1695 		sid = (aep->at_hdr.s_id[0] << 16) | (aep->at_hdr.s_id[1] << 8) | aep->at_hdr.s_id[2];
1696 		if (isp_find_pdb_by_portid(isp, mp->nt_channel, sid, &lp)) {
1697 			nphdl = lp->handle;
1698 		} else {
1699 			nphdl = NIL_HANDLE;
1700 		}
1701 		ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1702 		cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1703 		cto->ct_header.rqs_entry_count = 1;
1704 		cto->ct_nphdl = nphdl;
1705 		cto->ct_rxid = aep->at_rxid;
1706 		cto->ct_vpidx = mp->nt_channel;
1707 		cto->ct_iid_lo = sid;
1708 		cto->ct_iid_hi = sid >> 16;
1709 		cto->ct_oxid = aep->at_hdr.ox_id;
1710 		cto->ct_flags = CT7_SENDSTATUS|CT7_NOACK|CT7_NO_DATA|CT7_FLAG_MODE1;
1711 		cto->ct_flags |= (aep->at_ta_len >> 12) << CT7_TASK_ATTR_SHIFT;
1712 		if (rsp != 0) {
1713 			cto->ct_scsi_status |= (FCP_RSPLEN_VALID << 8);
1714 			cto->rsp.m1.ct_resplen = 4;
1715 			ISP_MEMZERO(cto->rsp.m1.ct_resp, sizeof (cto->rsp.m1.ct_resp));
1716 			cto->rsp.m1.ct_resp[0] = rsp & 0xff;
1717 			cto->rsp.m1.ct_resp[1] = (rsp >> 8) & 0xff;
1718 			cto->rsp.m1.ct_resp[2] = (rsp >> 16) & 0xff;
1719 			cto->rsp.m1.ct_resp[3] = (rsp >> 24) & 0xff;
1720 		}
1721 		return (isp_send_entry(isp, cto));
1722 	}
1723 
1724 	/*
1725 	 * This case is for a responding to an ABTS frame
1726 	 */
1727 	if (mp->nt_lreserved && ((isphdr_t *)mp->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1728 
1729 		/*
1730 		 * Overload nt_need_ack here to mark whether we've terminated the associated command.
1731 		 */
1732 		if (mp->nt_need_ack) {
1733 			abts_t *abts = (abts_t *)mp->nt_lreserved;
1734 
1735 			ISP_MEMZERO(cto, sizeof (ct7_entry_t));
1736 			isp_prt(isp, ISP_LOGTDEBUG0, "%s: [%x] terminating after ABTS received", __func__, abts->abts_rxid_task);
1737 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
1738 			cto->ct_header.rqs_entry_count = 1;
1739 			cto->ct_nphdl = mp->nt_nphdl;
1740 			cto->ct_rxid = abts->abts_rxid_task;
1741 			cto->ct_iid_lo = mp->nt_sid;
1742 			cto->ct_iid_hi = mp->nt_sid >> 16;
1743 			cto->ct_oxid = abts->abts_ox_id;
1744 			cto->ct_vpidx = mp->nt_channel;
1745 			cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
1746 			if (isp_send_entry(isp, cto)) {
1747 				return (ENOMEM);
1748 			}
1749 			mp->nt_need_ack = 0;
1750 		}
1751 		return (isp_acknak_abts(isp, mp->nt_lreserved, 0));
1752 	}
1753 
1754 	/*
1755 	 * General purpose acknowledgement
1756 	 */
1757 	if (mp->nt_need_ack) {
1758 		isp_prt(isp, ISP_LOGTINFO, "Notify Code 0x%x (qevalid=%d) being acked", mp->nt_ncode, mp->nt_lreserved != NULL);
1759 		/*
1760 		 * Don't need to use the guaranteed send because the caller can retry
1761 		 */
1762 		return (isp_notify_ack(isp, mp->nt_lreserved));
1763 	}
1764 	return (0);
1765 }
1766 
1767 /*
1768  * Handle task management functions.
1769  *
1770  * We show up here with a notify structure filled out.
1771  *
1772  * The nt_lreserved tag points to the original queue entry
1773  */
1774 static void
1775 isp_handle_platform_target_tmf(ispsoftc_t *isp, isp_notify_t *notify)
1776 {
1777 	tstate_t *tptr;
1778 	fcportdb_t *lp;
1779 	struct ccb_immediate_notify *inot;
1780 	inot_private_data_t *ntp = NULL;
1781 	atio_private_data_t *atp;
1782 	lun_id_t lun;
1783 
1784 	isp_prt(isp, ISP_LOGTDEBUG0, "%s: code 0x%x sid  0x%x tagval 0x%016llx chan %d lun %jx", __func__, notify->nt_ncode,
1785 	    notify->nt_sid, (unsigned long long) notify->nt_tagval, notify->nt_channel, notify->nt_lun);
1786 	if (notify->nt_lun == LUN_ANY) {
1787 		if (notify->nt_tagval == TAG_ANY) {
1788 			lun = CAM_LUN_WILDCARD;
1789 		} else {
1790 			atp = isp_find_atpd(isp, notify->nt_channel,
1791 			    notify->nt_tagval & 0xffffffff);
1792 			lun = atp ? atp->lun : CAM_LUN_WILDCARD;
1793 		}
1794 	} else {
1795 		lun = notify->nt_lun;
1796 	}
1797 	tptr = get_lun_statep(isp, notify->nt_channel, lun);
1798 	if (tptr == NULL) {
1799 		tptr = get_lun_statep(isp, notify->nt_channel, CAM_LUN_WILDCARD);
1800 		if (tptr == NULL) {
1801 			isp_prt(isp, ISP_LOGWARN, "%s: no state pointer found for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1802 			goto bad;
1803 		}
1804 	}
1805 	inot = (struct ccb_immediate_notify *) SLIST_FIRST(&tptr->inots);
1806 	if (inot == NULL) {
1807 		isp_prt(isp, ISP_LOGWARN, "%s: out of immediate notify structures for chan %d lun %#jx", __func__, notify->nt_channel, (uintmax_t)lun);
1808 		goto bad;
1809 	}
1810 
1811 	inot->ccb_h.target_id = ISP_MAX_TARGETS(isp);
1812 	inot->ccb_h.target_lun = lun;
1813 	if (isp_find_pdb_by_portid(isp, notify->nt_channel, notify->nt_sid, &lp) == 0 &&
1814 	    isp_find_pdb_by_handle(isp, notify->nt_channel, notify->nt_nphdl, &lp) == 0) {
1815 		inot->initiator_id = CAM_TARGET_WILDCARD;
1816 	} else {
1817 		inot->initiator_id = FC_PORTDB_TGT(isp, notify->nt_channel, lp);
1818 	}
1819 	inot->seq_id = notify->nt_tagval;
1820 	inot->tag_id = notify->nt_tagval >> 32;
1821 
1822 	switch (notify->nt_ncode) {
1823 	case NT_ABORT_TASK:
1824 		isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, inot->tag_id);
1825 		inot->arg = MSG_ABORT_TASK;
1826 		break;
1827 	case NT_ABORT_TASK_SET:
1828 		isp_target_mark_aborted_early(isp, notify->nt_channel, tptr, TAG_ANY);
1829 		inot->arg = MSG_ABORT_TASK_SET;
1830 		break;
1831 	case NT_CLEAR_ACA:
1832 		inot->arg = MSG_CLEAR_ACA;
1833 		break;
1834 	case NT_CLEAR_TASK_SET:
1835 		inot->arg = MSG_CLEAR_TASK_SET;
1836 		break;
1837 	case NT_LUN_RESET:
1838 		inot->arg = MSG_LOGICAL_UNIT_RESET;
1839 		break;
1840 	case NT_TARGET_RESET:
1841 		inot->arg = MSG_TARGET_RESET;
1842 		break;
1843 	case NT_QUERY_TASK_SET:
1844 		inot->arg = MSG_QUERY_TASK_SET;
1845 		break;
1846 	case NT_QUERY_ASYNC_EVENT:
1847 		inot->arg = MSG_QUERY_ASYNC_EVENT;
1848 		break;
1849 	default:
1850 		isp_prt(isp, ISP_LOGWARN, "%s: unknown TMF code 0x%x for chan %d lun %#jx", __func__, notify->nt_ncode, notify->nt_channel, (uintmax_t)lun);
1851 		goto bad;
1852 	}
1853 
1854 	ntp = isp_get_ntpd(isp, notify->nt_channel);
1855 	if (ntp == NULL) {
1856 		isp_prt(isp, ISP_LOGWARN, "%s: out of inotify private structures", __func__);
1857 		goto bad;
1858 	}
1859 	ISP_MEMCPY(&ntp->nt, notify, sizeof (isp_notify_t));
1860 	if (notify->nt_lreserved) {
1861 		ISP_MEMCPY(&ntp->data, notify->nt_lreserved, QENTRY_LEN);
1862 		ntp->nt.nt_lreserved = &ntp->data;
1863 	}
1864 	ntp->seq_id = notify->nt_tagval;
1865 	ntp->tag_id = notify->nt_tagval >> 32;
1866 
1867 	SLIST_REMOVE_HEAD(&tptr->inots, sim_links.sle);
1868 	ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, inot->ccb_h.path, "Take FREE INOT\n");
1869 	inot->ccb_h.status = CAM_MESSAGE_RECV;
1870 	xpt_done((union ccb *)inot);
1871 	return;
1872 bad:
1873 	if (notify->nt_need_ack) {
1874 		if (((isphdr_t *)notify->nt_lreserved)->rqs_entry_type == RQSTYPE_ABTS_RCVD) {
1875 			if (isp_acknak_abts(isp, notify->nt_lreserved, ENOMEM)) {
1876 				isp_prt(isp, ISP_LOGWARN, "you lose- unable to send an ACKNAK");
1877 			}
1878 		} else {
1879 			isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK, notify->nt_lreserved);
1880 		}
1881 	}
1882 }
1883 
1884 static void
1885 isp_target_mark_aborted_early(ispsoftc_t *isp, int chan, tstate_t *tptr, uint32_t tag_id)
1886 {
1887 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
1888 	atio_private_data_t *atp;
1889 	inot_private_data_t *ntp, *tmp;
1890 	uint32_t this_tag_id;
1891 
1892 	/*
1893 	 * First, clean any commands pending restart
1894 	 */
1895 	STAILQ_FOREACH_SAFE(ntp, &tptr->restart_queue, next, tmp) {
1896 		this_tag_id = ((at7_entry_t *)ntp->data)->at_rxid;
1897 		if ((uint64_t)tag_id == TAG_ANY || tag_id == this_tag_id) {
1898 			isp_endcmd(isp, ntp->data, NIL_HANDLE, chan,
1899 			    ECMD_TERMINATE, 0);
1900 			isp_put_ntpd(isp, chan, ntp);
1901 			STAILQ_REMOVE(&tptr->restart_queue, ntp,
1902 			    inot_private_data, next);
1903 		}
1904 	}
1905 
1906 	/*
1907 	 * Now mark other ones dead as well.
1908 	 */
1909 	for (atp = fc->atpool; atp < &fc->atpool[ATPDPSIZE]; atp++) {
1910 		if (atp->lun != tptr->ts_lun)
1911 			continue;
1912 		if ((uint64_t)tag_id == TAG_ANY || atp->tag == tag_id)
1913 			atp->dead = 1;
1914 	}
1915 }
1916 #endif
1917 
1918 static void
1919 isp_poll(struct cam_sim *sim)
1920 {
1921 	ispsoftc_t *isp = cam_sim_softc(sim);
1922 
1923 	ISP_RUN_ISR(isp);
1924 }
1925 
1926 
1927 static void
1928 isp_watchdog(void *arg)
1929 {
1930 	struct ccb_scsiio *xs = arg;
1931 	ispsoftc_t *isp;
1932 	uint32_t ohandle = ISP_HANDLE_FREE, handle;
1933 
1934 	isp = XS_ISP(xs);
1935 
1936 	handle = isp_find_handle(isp, xs);
1937 
1938 	/*
1939 	 * Hand crank the interrupt code just to be sure the command isn't stuck somewhere.
1940 	 */
1941 	if (handle != ISP_HANDLE_FREE) {
1942 		ISP_RUN_ISR(isp);
1943 		ohandle = handle;
1944 		handle = isp_find_handle(isp, xs);
1945 	}
1946 	if (handle != ISP_HANDLE_FREE) {
1947 		/*
1948 		 * Try and make sure the command is really dead before
1949 		 * we release the handle (and DMA resources) for reuse.
1950 		 *
1951 		 * If we are successful in aborting the command then
1952 		 * we're done here because we'll get the command returned
1953 		 * back separately.
1954 		 */
1955 		if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
1956 			return;
1957 		}
1958 
1959 		/*
1960 		 * Note that after calling the above, the command may in
1961 		 * fact have been completed.
1962 		 */
1963 		xs = isp_find_xs(isp, handle);
1964 
1965 		/*
1966 		 * If the command no longer exists, then we won't
1967 		 * be able to find the xs again with this handle.
1968 		 */
1969 		if (xs == NULL) {
1970 			return;
1971 		}
1972 
1973 		/*
1974 		 * After this point, the command is really dead.
1975 		 */
1976 		ISP_DMAFREE(isp, xs);
1977 		isp_destroy_handle(isp, handle);
1978 		isp_prt(isp, ISP_LOGERR, "%s: timeout for handle 0x%x", __func__, handle);
1979 		XS_SETERR(xs, CAM_CMD_TIMEOUT);
1980 		isp_done(xs);
1981 	} else {
1982 		if (ohandle != ISP_HANDLE_FREE) {
1983 			isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle 0x%x, recovered during interrupt", __func__, ohandle);
1984 		} else {
1985 			isp_prt(isp, ISP_LOGWARN, "%s: timeout for handle already free", __func__);
1986 		}
1987 	}
1988 }
1989 
1990 static void
1991 isp_make_here(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
1992 {
1993 	union ccb *ccb;
1994 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
1995 
1996 	/*
1997 	 * Allocate a CCB, create a wildcard path for this target and schedule a rescan.
1998 	 */
1999 	ccb = xpt_alloc_ccb_nowait();
2000 	if (ccb == NULL) {
2001 		isp_prt(isp, ISP_LOGWARN, "Chan %d unable to alloc CCB for rescan", chan);
2002 		return;
2003 	}
2004 	if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(fc->sim),
2005 	    tgt, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2006 		isp_prt(isp, ISP_LOGWARN, "unable to create path for rescan");
2007 		xpt_free_ccb(ccb);
2008 		return;
2009 	}
2010 	xpt_rescan(ccb);
2011 }
2012 
2013 static void
2014 isp_make_gone(ispsoftc_t *isp, fcportdb_t *fcp, int chan, int tgt)
2015 {
2016 	struct cam_path *tp;
2017 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2018 
2019 	if (xpt_create_path(&tp, NULL, cam_sim_path(fc->sim), tgt, CAM_LUN_WILDCARD) == CAM_REQ_CMP) {
2020 		xpt_async(AC_LOST_DEVICE, tp, NULL);
2021 		xpt_free_path(tp);
2022 	}
2023 }
2024 
2025 /*
2026  * Gone Device Timer Function- when we have decided that a device has gone
2027  * away, we wait a specific period of time prior to telling the OS it has
2028  * gone away.
2029  *
2030  * This timer function fires once a second and then scans the port database
2031  * for devices that are marked dead but still have a virtual target assigned.
2032  * We decrement a counter for that port database entry, and when it hits zero,
2033  * we tell the OS the device has gone away.
2034  */
2035 static void
2036 isp_gdt(void *arg)
2037 {
2038 	struct isp_fc *fc = arg;
2039 	taskqueue_enqueue(taskqueue_thread, &fc->gtask);
2040 }
2041 
2042 static void
2043 isp_gdt_task(void *arg, int pending)
2044 {
2045 	struct isp_fc *fc = arg;
2046 	ispsoftc_t *isp = fc->isp;
2047 	int chan = fc - ISP_FC_PC(isp, 0);
2048 	fcportdb_t *lp;
2049 	struct ac_contract ac;
2050 	struct ac_device_changed *adc;
2051 	int dbidx, more_to_do = 0;
2052 
2053 	ISP_LOCK(isp);
2054 	isp_prt(isp, ISP_LOGDEBUG0, "Chan %d GDT timer expired", chan);
2055 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2056 		lp = &FCPARAM(isp, chan)->portdb[dbidx];
2057 
2058 		if (lp->state != FC_PORTDB_STATE_ZOMBIE) {
2059 			continue;
2060 		}
2061 		if (lp->gone_timer != 0) {
2062 			lp->gone_timer -= 1;
2063 			more_to_do++;
2064 			continue;
2065 		}
2066 		isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Gone Device Timeout");
2067 		if (lp->is_target) {
2068 			lp->is_target = 0;
2069 			isp_make_gone(isp, lp, chan, dbidx);
2070 		}
2071 		if (lp->is_initiator) {
2072 			lp->is_initiator = 0;
2073 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2074 			adc = (struct ac_device_changed *) ac.contract_data;
2075 			adc->wwpn = lp->port_wwn;
2076 			adc->port = lp->portid;
2077 			adc->target = dbidx;
2078 			adc->arrived = 0;
2079 			xpt_async(AC_CONTRACT, fc->path, &ac);
2080 		}
2081 		lp->state = FC_PORTDB_STATE_NIL;
2082 	}
2083 	if (fc->ready) {
2084 		if (more_to_do) {
2085 			callout_reset(&fc->gdt, hz, isp_gdt, fc);
2086 		} else {
2087 			callout_deactivate(&fc->gdt);
2088 			isp_prt(isp, ISP_LOG_SANCFG, "Chan %d Stopping Gone Device Timer @ %lu", chan, (unsigned long) time_uptime);
2089 		}
2090 	}
2091 	ISP_UNLOCK(isp);
2092 }
2093 
2094 /*
2095  * When loop goes down we remember the time and freeze CAM command queue.
2096  * During some time period we are trying to reprobe the loop.  But if we
2097  * fail, we tell the OS that devices have gone away and drop the freeze.
2098  *
2099  * We don't clear the devices out of our port database because, when loop
2100  * come back up, we have to do some actual cleanup with the chip at that
2101  * point (implicit PLOGO, e.g., to get the chip's port database state right).
2102  */
2103 static void
2104 isp_loop_changed(ispsoftc_t *isp, int chan)
2105 {
2106 	fcparam *fcp = FCPARAM(isp, chan);
2107 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2108 
2109 	if (fc->loop_down_time)
2110 		return;
2111 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop changed", chan);
2112 	if (fcp->role & ISP_ROLE_INITIATOR)
2113 		isp_freeze_loopdown(isp, chan);
2114 	fc->loop_down_time = time_uptime;
2115 	wakeup(fc);
2116 }
2117 
2118 static void
2119 isp_loop_up(ispsoftc_t *isp, int chan)
2120 {
2121 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2122 
2123 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is up", chan);
2124 	fc->loop_seen_once = 1;
2125 	fc->loop_down_time = 0;
2126 	isp_unfreeze_loopdown(isp, chan);
2127 }
2128 
2129 static void
2130 isp_loop_dead(ispsoftc_t *isp, int chan)
2131 {
2132 	fcparam *fcp = FCPARAM(isp, chan);
2133 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
2134 	fcportdb_t *lp;
2135 	struct ac_contract ac;
2136 	struct ac_device_changed *adc;
2137 	int dbidx, i;
2138 
2139 	isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Loop is dead", chan);
2140 
2141 	/*
2142 	 * Notify to the OS all targets who we now consider have departed.
2143 	 */
2144 	for (dbidx = 0; dbidx < MAX_FC_TARG; dbidx++) {
2145 		lp = &fcp->portdb[dbidx];
2146 
2147 		if (lp->state == FC_PORTDB_STATE_NIL)
2148 			continue;
2149 
2150 		for (i = 0; i < ISP_HANDLE_NUM(isp); i++) {
2151 			struct ccb_scsiio *xs;
2152 
2153 			if (ISP_H2HT(isp->isp_xflist[i].handle) != ISP_HANDLE_INITIATOR) {
2154 				continue;
2155 			}
2156 			if ((xs = isp->isp_xflist[i].cmd) == NULL) {
2157 				continue;
2158                         }
2159 			if (dbidx != XS_TGT(xs)) {
2160 				continue;
2161 			}
2162 			isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx orphaned by loop down timeout",
2163 			    isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2164 			    (uintmax_t)XS_LUN(xs));
2165 
2166 			/*
2167 			 * Just like in isp_watchdog, abort the outstanding
2168 			 * command or immediately free its resources if it is
2169 			 * not active
2170 			 */
2171 			if (isp_control(isp, ISPCTL_ABORT_CMD, xs) == 0) {
2172 				continue;
2173 			}
2174 
2175 			ISP_DMAFREE(isp, xs);
2176 			isp_destroy_handle(isp, isp->isp_xflist[i].handle);
2177 			isp_prt(isp, ISP_LOGWARN, "command handle 0x%x for %d.%d.%jx could not be aborted and was destroyed",
2178 			    isp->isp_xflist[i].handle, chan, XS_TGT(xs),
2179 			    (uintmax_t)XS_LUN(xs));
2180 			XS_SETERR(xs, HBA_BUSRESET);
2181 			isp_done(xs);
2182 		}
2183 
2184 		isp_prt(isp, ISP_LOGCONFIG, prom3, chan, dbidx, lp->portid, "Loop Down Timeout");
2185 		if (lp->is_target) {
2186 			lp->is_target = 0;
2187 			isp_make_gone(isp, lp, chan, dbidx);
2188 		}
2189 		if (lp->is_initiator) {
2190 			lp->is_initiator = 0;
2191 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2192 			adc = (struct ac_device_changed *) ac.contract_data;
2193 			adc->wwpn = lp->port_wwn;
2194 			adc->port = lp->portid;
2195 			adc->target = dbidx;
2196 			adc->arrived = 0;
2197 			xpt_async(AC_CONTRACT, fc->path, &ac);
2198 		}
2199 	}
2200 
2201 	isp_unfreeze_loopdown(isp, chan);
2202 	fc->loop_down_time = 0;
2203 }
2204 
2205 static void
2206 isp_kthread(void *arg)
2207 {
2208 	struct isp_fc *fc = arg;
2209 	ispsoftc_t *isp = fc->isp;
2210 	int chan = fc - ISP_FC_PC(isp, 0);
2211 	int slp = 0, d;
2212 	int lb, lim;
2213 
2214 	ISP_LOCK(isp);
2215 	while (isp->isp_osinfo.is_exiting == 0) {
2216 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2217 		    "Chan %d Checking FC state", chan);
2218 		lb = isp_fc_runstate(isp, chan, 250000);
2219 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2220 		    "Chan %d FC got to %s state", chan,
2221 		    isp_fc_loop_statename(lb));
2222 
2223 		/*
2224 		 * Our action is different based upon whether we're supporting
2225 		 * Initiator mode or not. If we are, we might freeze the simq
2226 		 * when loop is down and set all sorts of different delays to
2227 		 * check again.
2228 		 *
2229 		 * If not, we simply just wait for loop to come up.
2230 		 */
2231 		if (lb == LOOP_READY || lb < 0) {
2232 			slp = 0;
2233 		} else {
2234 			/*
2235 			 * If we've never seen loop up and we've waited longer
2236 			 * than quickboot time, or we've seen loop up but we've
2237 			 * waited longer than loop_down_limit, give up and go
2238 			 * to sleep until loop comes up.
2239 			 */
2240 			if (fc->loop_seen_once == 0)
2241 				lim = isp_quickboot_time;
2242 			else
2243 				lim = fc->loop_down_limit;
2244 			d = time_uptime - fc->loop_down_time;
2245 			if (d >= lim)
2246 				slp = 0;
2247 			else if (d < 10)
2248 				slp = 1;
2249 			else if (d < 30)
2250 				slp = 5;
2251 			else if (d < 60)
2252 				slp = 10;
2253 			else if (d < 120)
2254 				slp = 20;
2255 			else
2256 				slp = 30;
2257 		}
2258 
2259 		if (slp == 0) {
2260 			if (lb == LOOP_READY)
2261 				isp_loop_up(isp, chan);
2262 			else
2263 				isp_loop_dead(isp, chan);
2264 		}
2265 
2266 		isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0,
2267 		    "Chan %d sleep for %d seconds", chan, slp);
2268 		msleep(fc, &isp->isp_lock, PRIBIO, "ispf", slp * hz);
2269 	}
2270 	fc->num_threads -= 1;
2271 	wakeup(&fc->num_threads);
2272 	ISP_UNLOCK(isp);
2273 	kthread_exit();
2274 }
2275 
2276 #ifdef	ISP_TARGET_MODE
2277 static void
2278 isp_abort_atio(ispsoftc_t *isp, union ccb *ccb)
2279 {
2280 	atio_private_data_t *atp;
2281 	union ccb *accb = ccb->cab.abort_ccb;
2282 	struct ccb_hdr *sccb;
2283 	tstate_t *tptr;
2284 
2285 	tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2286 	if (tptr != NULL) {
2287 		/* Search for the ATIO among queueued. */
2288 		SLIST_FOREACH(sccb, &tptr->atios, sim_links.sle) {
2289 			if (sccb != &accb->ccb_h)
2290 				continue;
2291 			SLIST_REMOVE(&tptr->atios, sccb, ccb_hdr, sim_links.sle);
2292 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2293 			    "Abort FREE ATIO\n");
2294 			accb->ccb_h.status = CAM_REQ_ABORTED;
2295 			xpt_done(accb);
2296 			ccb->ccb_h.status = CAM_REQ_CMP;
2297 			return;
2298 		}
2299 	}
2300 
2301 	/* Search for the ATIO among running. */
2302 	atp = isp_find_atpd(isp, XS_CHANNEL(accb), accb->atio.tag_id);
2303 	if (atp != NULL) {
2304 		/* Send TERMINATE to firmware. */
2305 		if (!atp->dead) {
2306 			uint8_t storage[QENTRY_LEN];
2307 			ct7_entry_t *cto = (ct7_entry_t *) storage;
2308 
2309 			ISP_MEMZERO(cto, sizeof (ct7_entry_t));
2310 			cto->ct_header.rqs_entry_type = RQSTYPE_CTIO7;
2311 			cto->ct_header.rqs_entry_count = 1;
2312 			cto->ct_nphdl = atp->nphdl;
2313 			cto->ct_rxid = atp->tag;
2314 			cto->ct_iid_lo = atp->sid;
2315 			cto->ct_iid_hi = atp->sid >> 16;
2316 			cto->ct_oxid = atp->oxid;
2317 			cto->ct_vpidx = XS_CHANNEL(accb);
2318 			cto->ct_flags = CT7_NOACK|CT7_TERMINATE;
2319 			isp_send_entry(isp, cto);
2320 		}
2321 		isp_put_atpd(isp, XS_CHANNEL(accb), atp);
2322 		ccb->ccb_h.status = CAM_REQ_CMP;
2323 	} else {
2324 		ccb->ccb_h.status = CAM_UA_ABORT;
2325 	}
2326 }
2327 
2328 static void
2329 isp_abort_inot(ispsoftc_t *isp, union ccb *ccb)
2330 {
2331 	inot_private_data_t *ntp;
2332 	union ccb *accb = ccb->cab.abort_ccb;
2333 	struct ccb_hdr *sccb;
2334 	tstate_t *tptr;
2335 
2336 	tptr = get_lun_statep(isp, XS_CHANNEL(accb), XS_LUN(accb));
2337 	if (tptr != NULL) {
2338 		/* Search for the INOT among queueued. */
2339 		SLIST_FOREACH(sccb, &tptr->inots, sim_links.sle) {
2340 			if (sccb != &accb->ccb_h)
2341 				continue;
2342 			SLIST_REMOVE(&tptr->inots, sccb, ccb_hdr, sim_links.sle);
2343 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, sccb->path,
2344 			    "Abort FREE INOT\n");
2345 			accb->ccb_h.status = CAM_REQ_ABORTED;
2346 			xpt_done(accb);
2347 			ccb->ccb_h.status = CAM_REQ_CMP;
2348 			return;
2349 		}
2350 	}
2351 
2352 	/* Search for the INOT among running. */
2353 	ntp = isp_find_ntpd(isp, XS_CHANNEL(accb), accb->cin1.tag_id, accb->cin1.seq_id);
2354 	if (ntp != NULL) {
2355 		if (ntp->nt.nt_need_ack) {
2356 			isp_async(isp, ISPASYNC_TARGET_NOTIFY_ACK,
2357 			    ntp->nt.nt_lreserved);
2358 		}
2359 		isp_put_ntpd(isp, XS_CHANNEL(accb), ntp);
2360 		ccb->ccb_h.status = CAM_REQ_CMP;
2361 	} else {
2362 		ccb->ccb_h.status = CAM_UA_ABORT;
2363 		return;
2364 	}
2365 }
2366 #endif
2367 
2368 static void
2369 isp_action(struct cam_sim *sim, union ccb *ccb)
2370 {
2371 	int bus, tgt, error;
2372 	ispsoftc_t *isp;
2373 	fcparam *fcp;
2374 	struct ccb_trans_settings *cts;
2375 	sbintime_t ts;
2376 
2377 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("isp_action\n"));
2378 
2379 	isp = (ispsoftc_t *)cam_sim_softc(sim);
2380 	ISP_ASSERT_LOCKED(isp);
2381 	bus = cam_sim_bus(sim);
2382 	isp_prt(isp, ISP_LOGDEBUG2, "isp_action code %x", ccb->ccb_h.func_code);
2383 	ISP_PCMD(ccb) = NULL;
2384 
2385 	switch (ccb->ccb_h.func_code) {
2386 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
2387 		/*
2388 		 * Do a couple of preliminary checks...
2389 		 */
2390 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2391 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
2392 				ccb->ccb_h.status = CAM_REQ_INVALID;
2393 				isp_done((struct ccb_scsiio *) ccb);
2394 				break;
2395 			}
2396 		}
2397 #ifdef	DIAGNOSTIC
2398 		if (ccb->ccb_h.target_id >= ISP_MAX_TARGETS(isp)) {
2399 			xpt_print(ccb->ccb_h.path, "invalid target\n");
2400 			ccb->ccb_h.status = CAM_PATH_INVALID;
2401 		}
2402 		if (ccb->ccb_h.status == CAM_PATH_INVALID) {
2403 			xpt_done(ccb);
2404 			break;
2405 		}
2406 #endif
2407 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2408 		if (isp_get_pcmd(isp, ccb)) {
2409 			isp_prt(isp, ISP_LOGWARN, "out of PCMDs");
2410 			cam_freeze_devq(ccb->ccb_h.path);
2411 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 250, 0);
2412 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2413 			xpt_done(ccb);
2414 			break;
2415 		}
2416 		error = isp_start((XS_T *) ccb);
2417 		isp_rq_check_above(isp);
2418 		switch (error) {
2419 		case 0:
2420 			ccb->ccb_h.status |= CAM_SIM_QUEUED;
2421 			if (ccb->ccb_h.timeout == CAM_TIME_INFINITY)
2422 				break;
2423 			/* Give firmware extra 10s to handle timeout. */
2424 			ts = SBT_1MS * ccb->ccb_h.timeout + 10 * SBT_1S;
2425 			callout_reset_sbt(&PISP_PCMD(ccb)->wdog, ts, 0,
2426 			    isp_watchdog, ccb, 0);
2427 			break;
2428 		case CMD_RQLATER:
2429 			isp_prt(isp, ISP_LOGDEBUG0, "%d.%jx retry later",
2430 			    XS_TGT(ccb), (uintmax_t)XS_LUN(ccb));
2431 			cam_freeze_devq(ccb->ccb_h.path);
2432 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 1000, 0);
2433 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2434 			isp_free_pcmd(isp, ccb);
2435 			xpt_done(ccb);
2436 			break;
2437 		case CMD_EAGAIN:
2438 			isp_free_pcmd(isp, ccb);
2439 			cam_freeze_devq(ccb->ccb_h.path);
2440 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2441 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2442 			xpt_done(ccb);
2443 			break;
2444 		case CMD_COMPLETE:
2445 			isp_done((struct ccb_scsiio *) ccb);
2446 			break;
2447 		default:
2448 			isp_prt(isp, ISP_LOGERR, "What's this? 0x%x at %d in file %s", error, __LINE__, __FILE__);
2449 			ccb->ccb_h.status = CAM_REQUEUE_REQ;
2450 			isp_free_pcmd(isp, ccb);
2451 			xpt_done(ccb);
2452 		}
2453 		break;
2454 
2455 #ifdef	ISP_TARGET_MODE
2456 	case XPT_EN_LUN:		/* Enable/Disable LUN as a target */
2457 		if (ccb->cel.enable) {
2458 			isp_enable_lun(isp, ccb);
2459 		} else {
2460 			isp_disable_lun(isp, ccb);
2461 		}
2462 		break;
2463 	case XPT_IMMEDIATE_NOTIFY:	/* Add Immediate Notify Resource */
2464 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
2465 	{
2466 		tstate_t *tptr = get_lun_statep(isp, XS_CHANNEL(ccb), ccb->ccb_h.target_lun);
2467 		if (tptr == NULL) {
2468 			const char *str;
2469 
2470 			if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY)
2471 				str = "XPT_IMMEDIATE_NOTIFY";
2472 			else
2473 				str = "XPT_ACCEPT_TARGET_IO";
2474 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path,
2475 			    "%s: no state pointer found for %s\n",
2476 			    __func__, str);
2477 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2478 			xpt_done(ccb);
2479 			break;
2480 		}
2481 
2482 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
2483 			ccb->atio.tag_id = 0;
2484 			SLIST_INSERT_HEAD(&tptr->atios, &ccb->ccb_h, sim_links.sle);
2485 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2486 			    "Put FREE ATIO\n");
2487 		} else if (ccb->ccb_h.func_code == XPT_IMMEDIATE_NOTIFY) {
2488 			ccb->cin1.seq_id = ccb->cin1.tag_id = 0;
2489 			SLIST_INSERT_HEAD(&tptr->inots, &ccb->ccb_h, sim_links.sle);
2490 			ISP_PATH_PRT(isp, ISP_LOGTDEBUG2, ccb->ccb_h.path,
2491 			    "Put FREE INOT\n");
2492 		}
2493 		ccb->ccb_h.status = CAM_REQ_INPROG;
2494 		break;
2495 	}
2496 	case XPT_NOTIFY_ACKNOWLEDGE:		/* notify ack */
2497 	{
2498 		inot_private_data_t *ntp;
2499 
2500 		/*
2501 		 * XXX: Because we cannot guarantee that the path information in the notify acknowledge ccb
2502 		 * XXX: matches that for the immediate notify, we have to *search* for the notify structure
2503 		 */
2504 		/*
2505 		 * All the relevant path information is in the associated immediate notify
2506 		 */
2507 		ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] NOTIFY ACKNOWLEDGE for 0x%x seen\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2508 		ntp = isp_find_ntpd(isp, XS_CHANNEL(ccb), ccb->cna2.tag_id, ccb->cna2.seq_id);
2509 		if (ntp == NULL) {
2510 			ISP_PATH_PRT(isp, ISP_LOGWARN, ccb->ccb_h.path, "%s: [0x%x] XPT_NOTIFY_ACKNOWLEDGE of 0x%x cannot find ntp private data\n", __func__,
2511 			     ccb->cna2.tag_id, ccb->cna2.seq_id);
2512 			ccb->ccb_h.status = CAM_DEV_NOT_THERE;
2513 			xpt_done(ccb);
2514 			break;
2515 		}
2516 		if (isp_handle_platform_target_notify_ack(isp, &ntp->nt,
2517 		    (ccb->ccb_h.flags & CAM_SEND_STATUS) ? ccb->cna2.arg : 0)) {
2518 			cam_freeze_devq(ccb->ccb_h.path);
2519 			cam_release_devq(ccb->ccb_h.path, RELSIM_RELEASE_AFTER_TIMEOUT, 0, 10, 0);
2520 			ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2521 			ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2522 			break;
2523 		}
2524 		isp_put_ntpd(isp, XS_CHANNEL(ccb), ntp);
2525 		ccb->ccb_h.status = CAM_REQ_CMP;
2526 		ISP_PATH_PRT(isp, ISP_LOGTDEBUG0, ccb->ccb_h.path, "%s: [0x%x] calling xpt_done for tag 0x%x\n", __func__, ccb->cna2.tag_id, ccb->cna2.seq_id);
2527 		xpt_done(ccb);
2528 		break;
2529 	}
2530 	case XPT_CONT_TARGET_IO:
2531 		isp_target_start_ctio(isp, ccb, FROM_CAM);
2532 		isp_rq_check_above(isp);
2533 		break;
2534 #endif
2535 	case XPT_RESET_DEV:		/* BDR the specified SCSI device */
2536 		tgt = ccb->ccb_h.target_id;
2537 		tgt |= (bus << 16);
2538 
2539 		error = isp_control(isp, ISPCTL_RESET_DEV, bus, tgt);
2540 		if (error) {
2541 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2542 		} else {
2543 			/*
2544 			 * If we have a FC device, reset the Command
2545 			 * Reference Number, because the target will expect
2546 			 * that we re-start the CRN at 1 after a reset.
2547 			 */
2548 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2549 
2550 			ccb->ccb_h.status = CAM_REQ_CMP;
2551 		}
2552 		xpt_done(ccb);
2553 		break;
2554 	case XPT_ABORT:			/* Abort the specified CCB */
2555 	{
2556 		union ccb *accb = ccb->cab.abort_ccb;
2557 		switch (accb->ccb_h.func_code) {
2558 #ifdef	ISP_TARGET_MODE
2559 		case XPT_ACCEPT_TARGET_IO:
2560 			isp_abort_atio(isp, ccb);
2561 			break;
2562 		case XPT_IMMEDIATE_NOTIFY:
2563 			isp_abort_inot(isp, ccb);
2564 			break;
2565 #endif
2566 		case XPT_SCSI_IO:
2567 			error = isp_control(isp, ISPCTL_ABORT_CMD, accb);
2568 			if (error) {
2569 				ccb->ccb_h.status = CAM_UA_ABORT;
2570 			} else {
2571 				ccb->ccb_h.status = CAM_REQ_CMP;
2572 			}
2573 			break;
2574 		default:
2575 			ccb->ccb_h.status = CAM_REQ_INVALID;
2576 			break;
2577 		}
2578 		/*
2579 		 * This is not a queued CCB, so the caller expects it to be
2580 		 * complete when control is returned.
2581 		 */
2582 		break;
2583 	}
2584 #define	IS_CURRENT_SETTINGS(c)	(c->type == CTS_TYPE_CURRENT_SETTINGS)
2585 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
2586 		cts = &ccb->cts;
2587 		if (!IS_CURRENT_SETTINGS(cts)) {
2588 			ccb->ccb_h.status = CAM_REQ_INVALID;
2589 			xpt_done(ccb);
2590 			break;
2591 		}
2592 		ccb->ccb_h.status = CAM_REQ_CMP;
2593 		xpt_done(ccb);
2594 		break;
2595 	case XPT_GET_TRAN_SETTINGS:
2596 	{
2597 		struct ccb_trans_settings_scsi *scsi;
2598 		struct ccb_trans_settings_fc *fc;
2599 
2600 		cts = &ccb->cts;
2601 		scsi = &cts->proto_specific.scsi;
2602 		fc = &cts->xport_specific.fc;
2603 		tgt = cts->ccb_h.target_id;
2604 		fcp = FCPARAM(isp, bus);
2605 
2606 		cts->protocol = PROTO_SCSI;
2607 		cts->protocol_version = SCSI_REV_2;
2608 		cts->transport = XPORT_FC;
2609 		cts->transport_version = 0;
2610 
2611 		scsi->valid = CTS_SCSI_VALID_TQ;
2612 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2613 		fc->valid = CTS_FC_VALID_SPEED;
2614 		fc->bitrate = fcp->isp_gbspeed * 100000;
2615 		if (tgt < MAX_FC_TARG) {
2616 			fcportdb_t *lp = &fcp->portdb[tgt];
2617 			fc->wwnn = lp->node_wwn;
2618 			fc->wwpn = lp->port_wwn;
2619 			fc->port = lp->portid;
2620 			fc->valid |= CTS_FC_VALID_WWNN | CTS_FC_VALID_WWPN | CTS_FC_VALID_PORT;
2621 		}
2622 		ccb->ccb_h.status = CAM_REQ_CMP;
2623 		xpt_done(ccb);
2624 		break;
2625 	}
2626 	case XPT_CALC_GEOMETRY:
2627 		cam_calc_geometry(&ccb->ccg, 1);
2628 		xpt_done(ccb);
2629 		break;
2630 
2631 	case XPT_RESET_BUS:		/* Reset the specified bus */
2632 		error = isp_control(isp, ISPCTL_RESET_BUS, bus);
2633 		if (error) {
2634 			ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2635 			xpt_done(ccb);
2636 			break;
2637 		}
2638 		if (bootverbose) {
2639 			xpt_print(ccb->ccb_h.path, "reset bus on channel %d\n", bus);
2640 		}
2641 		xpt_async(AC_BUS_RESET, ISP_FC_PC(isp, bus)->path, 0);
2642 		ccb->ccb_h.status = CAM_REQ_CMP;
2643 		xpt_done(ccb);
2644 		break;
2645 
2646 	case XPT_TERM_IO:		/* Terminate the I/O process */
2647 		ccb->ccb_h.status = CAM_REQ_INVALID;
2648 		xpt_done(ccb);
2649 		break;
2650 
2651 	case XPT_SET_SIM_KNOB:		/* Set SIM knobs */
2652 	{
2653 		struct ccb_sim_knob *kp = &ccb->knob;
2654 		fcparam *fcp = FCPARAM(isp, bus);
2655 
2656 		if (kp->xport_specific.fc.valid & KNOB_VALID_ADDRESS) {
2657 			fcp->isp_wwnn = ISP_FC_PC(isp, bus)->def_wwnn = kp->xport_specific.fc.wwnn;
2658 			fcp->isp_wwpn = ISP_FC_PC(isp, bus)->def_wwpn = kp->xport_specific.fc.wwpn;
2659 			isp_prt(isp, ISP_LOGALL, "Setting Channel %d wwns to 0x%jx 0x%jx", bus, fcp->isp_wwnn, fcp->isp_wwpn);
2660 		}
2661 		ccb->ccb_h.status = CAM_REQ_CMP;
2662 		if (kp->xport_specific.fc.valid & KNOB_VALID_ROLE) {
2663 			int rchange = 0;
2664 			int newrole = 0;
2665 
2666 			switch (kp->xport_specific.fc.role) {
2667 			case KNOB_ROLE_NONE:
2668 				if (fcp->role != ISP_ROLE_NONE) {
2669 					rchange = 1;
2670 					newrole = ISP_ROLE_NONE;
2671 				}
2672 				break;
2673 			case KNOB_ROLE_TARGET:
2674 				if (fcp->role != ISP_ROLE_TARGET) {
2675 					rchange = 1;
2676 					newrole = ISP_ROLE_TARGET;
2677 				}
2678 				break;
2679 			case KNOB_ROLE_INITIATOR:
2680 				if (fcp->role != ISP_ROLE_INITIATOR) {
2681 					rchange = 1;
2682 					newrole = ISP_ROLE_INITIATOR;
2683 				}
2684 				break;
2685 			case KNOB_ROLE_BOTH:
2686 				if (fcp->role != ISP_ROLE_BOTH) {
2687 					rchange = 1;
2688 					newrole = ISP_ROLE_BOTH;
2689 				}
2690 				break;
2691 			}
2692 			if (rchange) {
2693 				ISP_PATH_PRT(isp, ISP_LOGCONFIG, ccb->ccb_h.path, "changing role on from %d to %d\n", fcp->role, newrole);
2694 				if (isp_control(isp, ISPCTL_CHANGE_ROLE,
2695 				    bus, newrole) != 0) {
2696 					ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2697 					xpt_done(ccb);
2698 					break;
2699 				}
2700 			}
2701 		}
2702 		xpt_done(ccb);
2703 		break;
2704 	}
2705 	case XPT_GET_SIM_KNOB_OLD:	/* Get SIM knobs -- compat value */
2706 	case XPT_GET_SIM_KNOB:		/* Get SIM knobs */
2707 	{
2708 		struct ccb_sim_knob *kp = &ccb->knob;
2709 		fcparam *fcp = FCPARAM(isp, bus);
2710 
2711 		kp->xport_specific.fc.wwnn = fcp->isp_wwnn;
2712 		kp->xport_specific.fc.wwpn = fcp->isp_wwpn;
2713 		switch (fcp->role) {
2714 		case ISP_ROLE_NONE:
2715 			kp->xport_specific.fc.role = KNOB_ROLE_NONE;
2716 			break;
2717 		case ISP_ROLE_TARGET:
2718 			kp->xport_specific.fc.role = KNOB_ROLE_TARGET;
2719 			break;
2720 		case ISP_ROLE_INITIATOR:
2721 			kp->xport_specific.fc.role = KNOB_ROLE_INITIATOR;
2722 			break;
2723 		case ISP_ROLE_BOTH:
2724 			kp->xport_specific.fc.role = KNOB_ROLE_BOTH;
2725 			break;
2726 		}
2727 		kp->xport_specific.fc.valid = KNOB_VALID_ADDRESS | KNOB_VALID_ROLE;
2728 		ccb->ccb_h.status = CAM_REQ_CMP;
2729 		xpt_done(ccb);
2730 		break;
2731 	}
2732 	case XPT_PATH_INQ:		/* Path routing inquiry */
2733 	{
2734 		struct ccb_pathinq *cpi = &ccb->cpi;
2735 
2736 		cpi->version_num = 1;
2737 #ifdef	ISP_TARGET_MODE
2738 		cpi->target_sprt = PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
2739 #else
2740 		cpi->target_sprt = 0;
2741 #endif
2742 		cpi->hba_eng_cnt = 0;
2743 		cpi->max_target = ISP_MAX_TARGETS(isp) - 1;
2744 		cpi->max_lun = 255;
2745 		cpi->bus_id = cam_sim_bus(sim);
2746 		cpi->maxio = (ISP_NSEG64_MAX - 1) * PAGE_SIZE;
2747 
2748 		fcp = FCPARAM(isp, bus);
2749 
2750 		cpi->hba_misc = PIM_NOBUSRESET | PIM_UNMAPPED;
2751 		cpi->hba_misc |= PIM_EXTLUNS | PIM_NOSCAN;
2752 
2753 		/*
2754 		 * Because our loop ID can shift from time to time,
2755 		 * make our initiator ID out of range of our bus.
2756 		 */
2757 		cpi->initiator_id = cpi->max_target + 1;
2758 
2759 		/*
2760 		 * Set base transfer capabilities for Fibre Channel, for this HBA.
2761 		 */
2762 		if (IS_25XX(isp))
2763 			cpi->base_transfer_speed = 8000000;
2764 		else
2765 			cpi->base_transfer_speed = 4000000;
2766 		cpi->hba_inquiry = PI_TAG_ABLE;
2767 		cpi->transport = XPORT_FC;
2768 		cpi->transport_version = 0;
2769 		cpi->xport_specific.fc.wwnn = fcp->isp_wwnn;
2770 		cpi->xport_specific.fc.wwpn = fcp->isp_wwpn;
2771 		cpi->xport_specific.fc.port = fcp->isp_portid;
2772 		cpi->xport_specific.fc.bitrate = fcp->isp_gbspeed * 1000;
2773 		cpi->protocol = PROTO_SCSI;
2774 		cpi->protocol_version = SCSI_REV_2;
2775 		strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2776 		strlcpy(cpi->hba_vid, "Qlogic", HBA_IDLEN);
2777 		strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2778 		cpi->unit_number = cam_sim_unit(sim);
2779 		cpi->ccb_h.status = CAM_REQ_CMP;
2780 		xpt_done(ccb);
2781 		break;
2782 	}
2783 	default:
2784 		ccb->ccb_h.status = CAM_REQ_INVALID;
2785 		xpt_done(ccb);
2786 		break;
2787 	}
2788 }
2789 
2790 void
2791 isp_done(XS_T *sccb)
2792 {
2793 	ispsoftc_t *isp = XS_ISP(sccb);
2794 	uint32_t status;
2795 
2796 	if (XS_NOERR(sccb))
2797 		XS_SETERR(sccb, CAM_REQ_CMP);
2798 
2799 	if ((sccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && (sccb->scsi_status != SCSI_STATUS_OK)) {
2800 		sccb->ccb_h.status &= ~CAM_STATUS_MASK;
2801 		if ((sccb->scsi_status == SCSI_STATUS_CHECK_COND) && (sccb->ccb_h.status & CAM_AUTOSNS_VALID) == 0) {
2802 			sccb->ccb_h.status |= CAM_AUTOSENSE_FAIL;
2803 		} else {
2804 			sccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
2805 		}
2806 	}
2807 
2808 	sccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2809 	status = sccb->ccb_h.status & CAM_STATUS_MASK;
2810 	if (status != CAM_REQ_CMP &&
2811 	    (sccb->ccb_h.status & CAM_DEV_QFRZN) == 0) {
2812 		sccb->ccb_h.status |= CAM_DEV_QFRZN;
2813 		xpt_freeze_devq(sccb->ccb_h.path, 1);
2814 	}
2815 
2816 	if (ISP_PCMD(sccb)) {
2817 		if (callout_active(&PISP_PCMD(sccb)->wdog))
2818 			callout_stop(&PISP_PCMD(sccb)->wdog);
2819 		isp_free_pcmd(isp, (union ccb *) sccb);
2820 	}
2821 	isp_rq_check_below(isp);
2822 	xpt_done((union ccb *) sccb);
2823 }
2824 
2825 void
2826 isp_async(ispsoftc_t *isp, ispasync_t cmd, ...)
2827 {
2828 	int bus;
2829 	static const char prom[] = "Chan %d [%d] WWPN 0x%16jx PortID 0x%06x handle 0x%x %s %s";
2830 	char buf[64];
2831 	char *msg = NULL;
2832 	target_id_t tgt = 0;
2833 	fcportdb_t *lp;
2834 	struct isp_fc *fc;
2835 	struct ac_contract ac;
2836 	struct ac_device_changed *adc;
2837 	va_list ap;
2838 
2839 	switch (cmd) {
2840 	case ISPASYNC_LOOP_RESET:
2841 	{
2842 		uint16_t lipp;
2843 		fcparam *fcp;
2844 		va_start(ap, cmd);
2845 		bus = va_arg(ap, int);
2846 		va_end(ap);
2847 
2848 		lipp = ISP_READ(isp, OUTMAILBOX1);
2849 		fcp = FCPARAM(isp, bus);
2850 
2851 		isp_prt(isp, ISP_LOGINFO, "Chan %d LOOP Reset, LIP primitive %x", bus, lipp);
2852 		/*
2853 		 * Per FCP-4, a Reset LIP should result in a CRN reset. Other
2854 		 * LIPs and loop up/down events should never reset the CRN. For
2855 		 * an as of yet unknown reason, 24xx series cards (and
2856 		 * potentially others) can interrupt with a LIP Reset status
2857 		 * when no LIP reset came down the wire. Additionally, the LIP
2858 		 * primitive accompanying this status would not be a valid LIP
2859 		 * Reset primitive, but some variation of an invalid AL_PA
2860 		 * LIP. As a result, we have to verify the AL_PD in the LIP
2861 		 * addresses our port before blindly resetting.
2862 		*/
2863 		if (FCP_IS_DEST_ALPD(fcp, (lipp & 0x00FF)))
2864 			isp_fcp_reset_crn(isp, bus, /*tgt*/0, /*tgt_set*/ 0);
2865 		isp_loop_changed(isp, bus);
2866 		break;
2867 	}
2868 	case ISPASYNC_LIP:
2869 		if (msg == NULL)
2870 			msg = "LIP Received";
2871 		/* FALLTHROUGH */
2872 	case ISPASYNC_LOOP_DOWN:
2873 		if (msg == NULL)
2874 			msg = "LOOP Down";
2875 		/* FALLTHROUGH */
2876 	case ISPASYNC_LOOP_UP:
2877 		if (msg == NULL)
2878 			msg = "LOOP Up";
2879 		va_start(ap, cmd);
2880 		bus = va_arg(ap, int);
2881 		va_end(ap);
2882 		isp_loop_changed(isp, bus);
2883 		isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
2884 		break;
2885 	case ISPASYNC_DEV_ARRIVED:
2886 		va_start(ap, cmd);
2887 		bus = va_arg(ap, int);
2888 		lp = va_arg(ap, fcportdb_t *);
2889 		va_end(ap);
2890 		fc = ISP_FC_PC(isp, bus);
2891 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2892 		isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2893 		isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "arrived");
2894 		if ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2895 		    (lp->prli_word3 & PRLI_WD3_TARGET_FUNCTION)) {
2896 			lp->is_target = 1;
2897 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2898 			isp_make_here(isp, lp, bus, tgt);
2899 		}
2900 		if ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2901 		    (lp->prli_word3 & PRLI_WD3_INITIATOR_FUNCTION)) {
2902 			lp->is_initiator = 1;
2903 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2904 			adc = (struct ac_device_changed *) ac.contract_data;
2905 			adc->wwpn = lp->port_wwn;
2906 			adc->port = lp->portid;
2907 			adc->target = tgt;
2908 			adc->arrived = 1;
2909 			xpt_async(AC_CONTRACT, fc->path, &ac);
2910 		}
2911 		break;
2912 	case ISPASYNC_DEV_CHANGED:
2913 	case ISPASYNC_DEV_STAYED:
2914 	{
2915 		int crn_reset_done;
2916 
2917 		crn_reset_done = 0;
2918 		va_start(ap, cmd);
2919 		bus = va_arg(ap, int);
2920 		lp = va_arg(ap, fcportdb_t *);
2921 		va_end(ap);
2922 		fc = ISP_FC_PC(isp, bus);
2923 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2924 		isp_gen_role_str(buf, sizeof (buf), lp->new_prli_word3);
2925 		if (cmd == ISPASYNC_DEV_CHANGED)
2926 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->new_portid, lp->handle, buf, "changed");
2927 		else
2928 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "stayed");
2929 
2930 		if (lp->is_target !=
2931 		    ((FCPARAM(isp, bus)->role & ISP_ROLE_INITIATOR) &&
2932 		     (lp->new_prli_word3 & PRLI_WD3_TARGET_FUNCTION))) {
2933 			lp->is_target = !lp->is_target;
2934 			if (lp->is_target) {
2935 				if (cmd == ISPASYNC_DEV_CHANGED) {
2936 					isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2937 					crn_reset_done = 1;
2938 				}
2939 				isp_make_here(isp, lp, bus, tgt);
2940 			} else {
2941 				isp_make_gone(isp, lp, bus, tgt);
2942 				if (cmd == ISPASYNC_DEV_CHANGED) {
2943 					isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2944 					crn_reset_done = 1;
2945 				}
2946 			}
2947 		}
2948 		if (lp->is_initiator !=
2949 		    ((FCPARAM(isp, bus)->role & ISP_ROLE_TARGET) &&
2950 		     (lp->new_prli_word3 & PRLI_WD3_INITIATOR_FUNCTION))) {
2951 			lp->is_initiator = !lp->is_initiator;
2952 			ac.contract_number = AC_CONTRACT_DEV_CHG;
2953 			adc = (struct ac_device_changed *) ac.contract_data;
2954 			adc->wwpn = lp->port_wwn;
2955 			adc->port = lp->portid;
2956 			adc->target = tgt;
2957 			adc->arrived = lp->is_initiator;
2958 			xpt_async(AC_CONTRACT, fc->path, &ac);
2959 		}
2960 
2961 		if ((cmd == ISPASYNC_DEV_CHANGED) &&
2962 		    (crn_reset_done == 0))
2963 			isp_fcp_reset_crn(isp, bus, tgt, /*tgt_set*/ 1);
2964 
2965 		break;
2966 	}
2967 	case ISPASYNC_DEV_GONE:
2968 		va_start(ap, cmd);
2969 		bus = va_arg(ap, int);
2970 		lp = va_arg(ap, fcportdb_t *);
2971 		va_end(ap);
2972 		fc = ISP_FC_PC(isp, bus);
2973 		tgt = FC_PORTDB_TGT(isp, bus, lp);
2974 		/*
2975 		 * If this has a virtual target or initiator set the isp_gdt
2976 		 * timer running on it to delay its departure.
2977 		 */
2978 		isp_gen_role_str(buf, sizeof (buf), lp->prli_word3);
2979 		if (lp->is_target || lp->is_initiator) {
2980 			lp->state = FC_PORTDB_STATE_ZOMBIE;
2981 			lp->gone_timer = fc->gone_device_time;
2982 			isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone zombie");
2983 			if (fc->ready && !callout_active(&fc->gdt)) {
2984 				isp_prt(isp, ISP_LOG_SANCFG|ISP_LOGDEBUG0, "Chan %d Starting Gone Device Timer with %u seconds time now %lu", bus, lp->gone_timer, (unsigned long)time_uptime);
2985 				callout_reset(&fc->gdt, hz, isp_gdt, fc);
2986 			}
2987 			break;
2988 		}
2989 		isp_prt(isp, ISP_LOGCONFIG, prom, bus, tgt, lp->port_wwn, lp->portid, lp->handle, buf, "gone");
2990 		break;
2991 	case ISPASYNC_CHANGE_NOTIFY:
2992 	{
2993 		char *msg;
2994 		int evt, nphdl, nlstate, portid, reason;
2995 
2996 		va_start(ap, cmd);
2997 		bus = va_arg(ap, int);
2998 		evt = va_arg(ap, int);
2999 		if (evt == ISPASYNC_CHANGE_PDB) {
3000 			nphdl = va_arg(ap, int);
3001 			nlstate = va_arg(ap, int);
3002 			reason = va_arg(ap, int);
3003 		} else if (evt == ISPASYNC_CHANGE_SNS) {
3004 			portid = va_arg(ap, int);
3005 		} else {
3006 			nphdl = NIL_HANDLE;
3007 			nlstate = reason = 0;
3008 		}
3009 		va_end(ap);
3010 
3011 		if (evt == ISPASYNC_CHANGE_PDB) {
3012 			int tgt_set = 0;
3013 			msg = "Port Database Changed";
3014 			isp_prt(isp, ISP_LOGINFO,
3015 			    "Chan %d %s (nphdl 0x%x state 0x%x reason 0x%x)",
3016 			    bus, msg, nphdl, nlstate, reason);
3017 			/*
3018 			 * Port database syncs are not sufficient for
3019 			 * determining that logins or logouts are done on the
3020 			 * loop, but this information is directly available from
3021 			 * the reason code from the incoming mbox. We must reset
3022 			 * the fcp crn on these events according to FCP-4
3023 			 */
3024 			switch (reason) {
3025 			case PDB24XX_AE_IMPL_LOGO_1:
3026 			case PDB24XX_AE_IMPL_LOGO_2:
3027 			case PDB24XX_AE_IMPL_LOGO_3:
3028 			case PDB24XX_AE_PLOGI_RCVD:
3029 			case PDB24XX_AE_PRLI_RCVD:
3030 			case PDB24XX_AE_PRLO_RCVD:
3031 			case PDB24XX_AE_LOGO_RCVD:
3032 			case PDB24XX_AE_PLOGI_DONE:
3033 			case PDB24XX_AE_PRLI_DONE:
3034 				/*
3035 				 * If the event is not global, twiddle tgt and
3036 				 * tgt_set to nominate only the target
3037 				 * associated with the nphdl.
3038 				 */
3039 				if (nphdl != PDB24XX_AE_GLOBAL) {
3040 					/* Break if we don't yet have the pdb */
3041 					if (!isp_find_pdb_by_handle(isp, bus, nphdl, &lp))
3042 						break;
3043 					tgt = FC_PORTDB_TGT(isp, bus, lp);
3044 					tgt_set = 1;
3045 				}
3046 				isp_fcp_reset_crn(isp, bus, tgt, tgt_set);
3047 				break;
3048 			default:
3049 				break; /* NOP */
3050 			}
3051 		} else if (evt == ISPASYNC_CHANGE_SNS) {
3052 			msg = "Name Server Database Changed";
3053 			isp_prt(isp, ISP_LOGINFO, "Chan %d %s (PortID 0x%06x)",
3054 			    bus, msg, portid);
3055 		} else {
3056 			msg = "Other Change Notify";
3057 			isp_prt(isp, ISP_LOGINFO, "Chan %d %s", bus, msg);
3058 		}
3059 		isp_loop_changed(isp, bus);
3060 		break;
3061 	}
3062 #ifdef	ISP_TARGET_MODE
3063 	case ISPASYNC_TARGET_NOTIFY:
3064 	{
3065 		isp_notify_t *notify;
3066 		va_start(ap, cmd);
3067 		notify = va_arg(ap, isp_notify_t *);
3068 		va_end(ap);
3069 		switch (notify->nt_ncode) {
3070 		case NT_ABORT_TASK:
3071 		case NT_ABORT_TASK_SET:
3072 		case NT_CLEAR_ACA:
3073 		case NT_CLEAR_TASK_SET:
3074 		case NT_LUN_RESET:
3075 		case NT_TARGET_RESET:
3076 		case NT_QUERY_TASK_SET:
3077 		case NT_QUERY_ASYNC_EVENT:
3078 			/*
3079 			 * These are task management functions.
3080 			 */
3081 			isp_handle_platform_target_tmf(isp, notify);
3082 			break;
3083 		case NT_LIP_RESET:
3084 		case NT_LINK_UP:
3085 		case NT_LINK_DOWN:
3086 		case NT_HBA_RESET:
3087 			/*
3088 			 * No action need be taken here.
3089 			 */
3090 			break;
3091 		case NT_SRR:
3092 			isp_handle_platform_srr(isp, notify);
3093 			break;
3094 		default:
3095 			isp_prt(isp, ISP_LOGALL, "target notify code 0x%x", notify->nt_ncode);
3096 			isp_handle_platform_target_notify_ack(isp, notify, 0);
3097 			break;
3098 		}
3099 		break;
3100 	}
3101 	case ISPASYNC_TARGET_NOTIFY_ACK:
3102 	{
3103 		void *inot;
3104 		va_start(ap, cmd);
3105 		inot = va_arg(ap, void *);
3106 		va_end(ap);
3107 		if (isp_notify_ack(isp, inot)) {
3108 			isp_tna_t *tp = malloc(sizeof (*tp), M_DEVBUF, M_NOWAIT);
3109 			if (tp) {
3110 				tp->isp = isp;
3111 				memcpy(tp->data, inot, sizeof (tp->data));
3112 				tp->not = tp->data;
3113 				callout_init_mtx(&tp->timer, &isp->isp_lock, 0);
3114 				callout_reset(&tp->timer, 5,
3115 				    isp_refire_notify_ack, tp);
3116 			} else {
3117 				isp_prt(isp, ISP_LOGERR, "you lose- cannot allocate a notify refire");
3118 			}
3119 		}
3120 		break;
3121 	}
3122 	case ISPASYNC_TARGET_ACTION:
3123 	{
3124 		isphdr_t *hp;
3125 
3126 		va_start(ap, cmd);
3127 		hp = va_arg(ap, isphdr_t *);
3128 		va_end(ap);
3129 		switch (hp->rqs_entry_type) {
3130 		case RQSTYPE_ATIO:
3131 			isp_handle_platform_atio7(isp, (at7_entry_t *)hp);
3132 			break;
3133 		case RQSTYPE_CTIO7:
3134 			isp_handle_platform_ctio(isp, (ct7_entry_t *)hp);
3135 			break;
3136 		default:
3137 			isp_prt(isp, ISP_LOGWARN, "%s: unhandled target action 0x%x",
3138 			    __func__, hp->rqs_entry_type);
3139 			break;
3140 		}
3141 		break;
3142 	}
3143 #endif
3144 	case ISPASYNC_FW_CRASH:
3145 	{
3146 		uint16_t mbox1;
3147 		mbox1 = ISP_READ(isp, OUTMAILBOX1);
3148 		isp_prt(isp, ISP_LOGERR, "Internal Firmware Error @ RISC Address 0x%x", mbox1);
3149 #if 0
3150 		isp_reinit(isp, 1);
3151 		isp_async(isp, ISPASYNC_FW_RESTARTED, NULL);
3152 #endif
3153 		break;
3154 	}
3155 	default:
3156 		isp_prt(isp, ISP_LOGERR, "unknown isp_async event %d", cmd);
3157 		break;
3158 	}
3159 }
3160 
3161 uint64_t
3162 isp_default_wwn(ispsoftc_t * isp, int chan, int isactive, int iswwnn)
3163 {
3164 	uint64_t seed;
3165 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3166 
3167 	/* First try to use explicitly configured WWNs. */
3168 	seed = iswwnn ? fc->def_wwnn : fc->def_wwpn;
3169 	if (seed)
3170 		return (seed);
3171 
3172 	/* Otherwise try to use WWNs from NVRAM. */
3173 	if (isactive) {
3174 		seed = iswwnn ? FCPARAM(isp, chan)->isp_wwnn_nvram :
3175 		    FCPARAM(isp, chan)->isp_wwpn_nvram;
3176 		if (seed)
3177 			return (seed);
3178 	}
3179 
3180 	/* If still no WWNs, try to steal them from the first channel. */
3181 	if (chan > 0) {
3182 		seed = iswwnn ? ISP_FC_PC(isp, 0)->def_wwnn :
3183 		    ISP_FC_PC(isp, 0)->def_wwpn;
3184 		if (seed == 0) {
3185 			seed = iswwnn ? FCPARAM(isp, 0)->isp_wwnn_nvram :
3186 			    FCPARAM(isp, 0)->isp_wwpn_nvram;
3187 		}
3188 	}
3189 
3190 	/* If still nothing -- improvise. */
3191 	if (seed == 0) {
3192 		seed = 0x400000007F000000ull + device_get_unit(isp->isp_dev);
3193 		if (!iswwnn)
3194 			seed ^= 0x0100000000000000ULL;
3195 	}
3196 
3197 	/* For additional channels we have to improvise even more. */
3198 	if (!iswwnn && chan > 0) {
3199 		/*
3200 		 * We'll stick our channel number plus one first into bits
3201 		 * 57..59 and thence into bits 52..55 which allows for 8 bits
3202 		 * of channel which is enough for our maximum of 255 channels.
3203 		 */
3204 		seed ^= 0x0100000000000000ULL;
3205 		seed ^= ((uint64_t) (chan + 1) & 0xf) << 56;
3206 		seed ^= ((uint64_t) ((chan + 1) >> 4) & 0xf) << 52;
3207 	}
3208 	return (seed);
3209 }
3210 
3211 void
3212 isp_prt(ispsoftc_t *isp, int level, const char *fmt, ...)
3213 {
3214 	int loc;
3215 	char lbuf[200];
3216 	va_list ap;
3217 
3218 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3219 		return;
3220 	}
3221 	snprintf(lbuf, sizeof (lbuf), "%s: ", device_get_nameunit(isp->isp_dev));
3222 	loc = strlen(lbuf);
3223 	va_start(ap, fmt);
3224 	vsnprintf(&lbuf[loc], sizeof (lbuf) - loc - 1, fmt, ap);
3225 	va_end(ap);
3226 	printf("%s\n", lbuf);
3227 }
3228 
3229 void
3230 isp_xs_prt(ispsoftc_t *isp, XS_T *xs, int level, const char *fmt, ...)
3231 {
3232 	va_list ap;
3233 	if (level != ISP_LOGALL && (level & isp->isp_dblev) == 0) {
3234 		return;
3235 	}
3236 	xpt_print_path(xs->ccb_h.path);
3237 	va_start(ap, fmt);
3238 	vprintf(fmt, ap);
3239 	va_end(ap);
3240 	printf("\n");
3241 }
3242 
3243 uint64_t
3244 isp_nanotime_sub(struct timespec *b, struct timespec *a)
3245 {
3246 	uint64_t elapsed;
3247 	struct timespec x;
3248 
3249 	timespecsub(b, a, &x);
3250 	elapsed = GET_NANOSEC(&x);
3251 	if (elapsed == 0)
3252 		elapsed++;
3253 	return (elapsed);
3254 }
3255 
3256 int
3257 isp_fc_scratch_acquire(ispsoftc_t *isp, int chan)
3258 {
3259 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3260 
3261 	if (fc->fcbsy)
3262 		return (-1);
3263 	fc->fcbsy = 1;
3264 	return (0);
3265 }
3266 
3267 void
3268 isp_platform_intr(void *arg)
3269 {
3270 	ispsoftc_t *isp = arg;
3271 
3272 	ISP_LOCK(isp);
3273 	ISP_RUN_ISR(isp);
3274 	ISP_UNLOCK(isp);
3275 }
3276 
3277 void
3278 isp_platform_intr_resp(void *arg)
3279 {
3280 	ispsoftc_t *isp = arg;
3281 
3282 	ISP_LOCK(isp);
3283 	isp_intr_respq(isp);
3284 	ISP_UNLOCK(isp);
3285 
3286 	/* We have handshake enabled, so explicitly complete interrupt */
3287 	ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3288 }
3289 
3290 void
3291 isp_platform_intr_atio(void *arg)
3292 {
3293 	ispsoftc_t *isp = arg;
3294 
3295 	ISP_LOCK(isp);
3296 #ifdef	ISP_TARGET_MODE
3297 	isp_intr_atioq(isp);
3298 #endif
3299 	ISP_UNLOCK(isp);
3300 
3301 	/* We have handshake enabled, so explicitly complete interrupt */
3302 	ISP_WRITE(isp, BIU2400_HCCR, HCCR_2400_CMD_CLEAR_RISC_INT);
3303 }
3304 
3305 typedef struct {
3306 	ispsoftc_t		*isp;
3307 	struct ccb_scsiio	*csio;
3308 	void			*qe;
3309 	int			error;
3310 } mush_t;
3311 
3312 static void
3313 isp_dma2(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
3314 {
3315 	mush_t *mp = (mush_t *) arg;
3316 	ispsoftc_t *isp= mp->isp;
3317 	struct ccb_scsiio *csio = mp->csio;
3318 	bus_dmasync_op_t op;
3319 
3320 	if (error) {
3321 		mp->error = error;
3322 		return;
3323 	}
3324 	if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3325 	    ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3326 		op = BUS_DMASYNC_PREREAD;
3327 	else
3328 		op = BUS_DMASYNC_PREWRITE;
3329 	bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3330 
3331 	mp->error = ISP_SEND_CMD(isp, mp->qe, dm_segs, nseg);
3332 	if (mp->error)
3333 		isp_dmafree(isp, csio);
3334 }
3335 
3336 int
3337 isp_dmasetup(ispsoftc_t *isp, struct ccb_scsiio *csio, void *qe)
3338 {
3339 	mush_t mp;
3340 	int error;
3341 
3342 	if (XS_XFRLEN(csio)) {
3343 		mp.isp = isp;
3344 		mp.csio = csio;
3345 		mp.qe = qe;
3346 		mp.error = 0;
3347 		error = bus_dmamap_load_ccb(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap,
3348 		    (union ccb *)csio, isp_dma2, &mp, BUS_DMA_NOWAIT);
3349 		if (error == 0)
3350 			error = mp.error;
3351 	} else {
3352 		error = ISP_SEND_CMD(isp, qe, NULL, 0);
3353 	}
3354 	switch (error) {
3355 	case 0:
3356 	case CMD_COMPLETE:
3357 	case CMD_EAGAIN:
3358 	case CMD_RQLATER:
3359 		break;
3360 	case ENOMEM:
3361 		error = CMD_EAGAIN;
3362 		break;
3363 	case EINVAL:
3364 	case EFBIG:
3365 		csio->ccb_h.status = CAM_REQ_INVALID;
3366 		error = CMD_COMPLETE;
3367 		break;
3368 	default:
3369 		csio->ccb_h.status = CAM_UNREC_HBA_ERROR;
3370 		error = CMD_COMPLETE;
3371 		break;
3372 	}
3373 	return (error);
3374 }
3375 
3376 void
3377 isp_dmafree(ispsoftc_t *isp, struct ccb_scsiio *csio)
3378 {
3379 	bus_dmasync_op_t op;
3380 
3381 	if (XS_XFRLEN(csio) == 0)
3382 		return;
3383 
3384 	if ((csio->ccb_h.func_code == XPT_CONT_TARGET_IO) ^
3385 	    ((csio->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN))
3386 		op = BUS_DMASYNC_POSTREAD;
3387 	else
3388 		op = BUS_DMASYNC_POSTWRITE;
3389 	bus_dmamap_sync(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap, op);
3390 	bus_dmamap_unload(isp->isp_osinfo.dmat, PISP_PCMD(csio)->dmap);
3391 }
3392 
3393 /*
3394  * Reset the command reference number for all LUNs on a specific target
3395  * (needed when a target arrives again) or for all targets on a port
3396  * (needed for events like a LIP).
3397  */
3398 void
3399 isp_fcp_reset_crn(ispsoftc_t *isp, int chan, uint32_t tgt, int tgt_set)
3400 {
3401 	struct isp_fc *fc = ISP_FC_PC(isp, chan);
3402 	struct isp_nexus *nxp;
3403 	int i;
3404 
3405 	if (tgt_set == 0)
3406 		isp_prt(isp, ISP_LOGDEBUG0,
3407 		    "Chan %d resetting CRN on all targets", chan);
3408 	else
3409 		isp_prt(isp, ISP_LOGDEBUG0,
3410 		    "Chan %d resetting CRN on target %u", chan, tgt);
3411 
3412 	for (i = 0; i < NEXUS_HASH_WIDTH; i++) {
3413 		for (nxp = fc->nexus_hash[i]; nxp != NULL; nxp = nxp->next) {
3414 			if (tgt_set == 0 || tgt == nxp->tgt)
3415 				nxp->crnseed = 0;
3416 		}
3417 	}
3418 }
3419 
3420 int
3421 isp_fcp_next_crn(ispsoftc_t *isp, uint8_t *crnp, XS_T *cmd)
3422 {
3423 	lun_id_t lun;
3424 	uint32_t chan, tgt;
3425 	struct isp_fc *fc;
3426 	struct isp_nexus *nxp;
3427 	int idx;
3428 
3429 	chan = XS_CHANNEL(cmd);
3430 	tgt = XS_TGT(cmd);
3431 	lun = XS_LUN(cmd);
3432 	fc = ISP_FC_PC(isp, chan);
3433 	idx = NEXUS_HASH(tgt, lun);
3434 	nxp = fc->nexus_hash[idx];
3435 
3436 	while (nxp) {
3437 		if (nxp->tgt == tgt && nxp->lun == lun)
3438 			break;
3439 		nxp = nxp->next;
3440 	}
3441 	if (nxp == NULL) {
3442 		nxp = fc->nexus_free_list;
3443 		if (nxp == NULL) {
3444 			nxp = malloc(sizeof (struct isp_nexus), M_DEVBUF, M_ZERO|M_NOWAIT);
3445 			if (nxp == NULL) {
3446 				return (-1);
3447 			}
3448 		} else {
3449 			fc->nexus_free_list = nxp->next;
3450 		}
3451 		nxp->tgt = tgt;
3452 		nxp->lun = lun;
3453 		nxp->next = fc->nexus_hash[idx];
3454 		fc->nexus_hash[idx] = nxp;
3455 	}
3456 	if (nxp->crnseed == 0)
3457 		nxp->crnseed = 1;
3458 	*crnp = nxp->crnseed++;
3459 	return (0);
3460 }
3461 
3462 /*
3463  * We enter with the lock held
3464  */
3465 void
3466 isp_timer(void *arg)
3467 {
3468 	ispsoftc_t *isp = arg;
3469 #ifdef	ISP_TARGET_MODE
3470 	isp_tmcmd_restart(isp);
3471 #endif
3472 	callout_reset(&isp->isp_osinfo.tmo, isp_timer_count, isp_timer, isp);
3473 }
3474 
3475 #ifdef	ISP_TARGET_MODE
3476 isp_ecmd_t *
3477 isp_get_ecmd(ispsoftc_t *isp)
3478 {
3479 	isp_ecmd_t *ecmd = isp->isp_osinfo.ecmd_free;
3480 	if (ecmd) {
3481 		isp->isp_osinfo.ecmd_free = ecmd->next;
3482 	}
3483 	return (ecmd);
3484 }
3485 
3486 void
3487 isp_put_ecmd(ispsoftc_t *isp, isp_ecmd_t *ecmd)
3488 {
3489 	ecmd->next = isp->isp_osinfo.ecmd_free;
3490 	isp->isp_osinfo.ecmd_free = ecmd;
3491 }
3492 #endif
3493