xref: /dragonfly/sys/dev/disk/mpt/mpt_cam.c (revision ce7a3582)
1 /*-
2  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
3  * FreeBSD Version.
4  *
5  * Copyright (c)  2000, 2001 by Greg Ansley
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice immediately at the beginning of the file, without modification,
12  *    this list of conditions, and the following disclaimer.
13  * 2. The name of the author may not be used to endorse or promote products
14  *    derived from this software without specific prior written permission.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
20  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 /*-
29  * Copyright (c) 2002, 2006 by Matthew Jacob
30  * All rights reserved.
31  *
32  * Redistribution and use in source and binary forms, with or without
33  * modification, are permitted provided that the following conditions are
34  * met:
35  * 1. Redistributions of source code must retain the above copyright
36  *    notice, this list of conditions and the following disclaimer.
37  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
38  *    substantially similar to the "NO WARRANTY" disclaimer below
39  *    ("Disclaimer") and any redistribution must be conditioned upon including
40  *    a substantially similar Disclaimer requirement for further binary
41  *    redistribution.
42  * 3. Neither the names of the above listed copyright holders nor the names
43  *    of any contributors may be used to endorse or promote products derived
44  *    from this software without specific prior written permission.
45  *
46  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
47  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
48  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
49  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
50  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
51  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
52  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
53  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
54  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
55  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
56  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
57  *
58  * Support from Chris Ellsworth in order to make SAS adapters work
59  * is gratefully acknowledged.
60  *
61  * Support from LSI-Logic has also gone a great deal toward making this a
62  * workable subsystem and is gratefully acknowledged.
63  */
64 /*-
65  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
66  * Copyright (c) 2005, WHEEL Sp. z o.o.
67  * Copyright (c) 2004, 2005 Justin T. Gibbs
68  * All rights reserved.
69  *
70  * Redistribution and use in source and binary forms, with or without
71  * modification, are permitted provided that the following conditions are
72  * met:
73  * 1. Redistributions of source code must retain the above copyright
74  *    notice, this list of conditions and the following disclaimer.
75  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
76  *    substantially similar to the "NO WARRANTY" disclaimer below
77  *    ("Disclaimer") and any redistribution must be conditioned upon including
78  *    a substantially similar Disclaimer requirement for further binary
79  *    redistribution.
80  * 3. Neither the names of the above listed copyright holders nor the names
81  *    of any contributors may be used to endorse or promote products derived
82  *    from this software without specific prior written permission.
83  *
84  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
85  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
86  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
87  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
88  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
89  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
90  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
91  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
92  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
93  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
94  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
95  *
96  * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.77 2011/04/22 09:59:16 marius Exp $
97  */
98 
99 #include <dev/disk/mpt/mpt.h>
100 #include <dev/disk/mpt/mpt_cam.h>
101 #include <dev/disk/mpt/mpt_raid.h>
102 
103 #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
104 #include "dev/disk/mpt/mpilib/mpi_init.h"
105 #include "dev/disk/mpt/mpilib/mpi_targ.h"
106 #include "dev/disk/mpt/mpilib/mpi_fc.h"
107 #include "dev/disk/mpt/mpilib/mpi_sas.h"
108 #include <sys/sysctl.h>
109 #include <sys/callout.h>
110 #include <sys/kthread.h>
111 
112 #ifndef	CAM_NEW_TRAN_CODE
113 #define	CAM_NEW_TRAN_CODE	1
114 #endif
115 
116 static void mpt_poll(struct cam_sim *);
117 static timeout_t mpt_timeout;
118 static void mpt_action(struct cam_sim *, union ccb *);
119 static int
120 mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
121 static void mpt_setwidth(struct mpt_softc *, int, int);
122 static void mpt_setsync(struct mpt_softc *, int, int, int);
123 static int mpt_update_spi_config(struct mpt_softc *, int);
124 static void mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended);
125 
126 static mpt_reply_handler_t mpt_scsi_reply_handler;
127 static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
128 static mpt_reply_handler_t mpt_fc_els_reply_handler;
129 static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
130 					MSG_DEFAULT_REPLY *);
131 static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
132 static int mpt_fc_reset_link(struct mpt_softc *, int);
133 
134 static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
135 static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
136 static void mpt_recovery_thread(void *arg);
137 static void mpt_recover_commands(struct mpt_softc *mpt);
138 
139 static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
140     u_int, u_int, u_int, int);
141 
142 static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
143 static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
144 static int mpt_add_els_buffers(struct mpt_softc *mpt);
145 static int mpt_add_target_commands(struct mpt_softc *mpt);
146 static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
147 static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
148 static void mpt_target_start_io(struct mpt_softc *, union ccb *);
149 static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
150 static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
151 static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
152     uint8_t, uint8_t const *);
153 static void
154 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
155     tgt_resource_t *, int);
156 static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
157 static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
158 static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
159 static mpt_reply_handler_t mpt_sata_pass_reply_handler;
160 
161 static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
162 static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
163 static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
164 static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
165 
166 static mpt_probe_handler_t	mpt_cam_probe;
167 static mpt_attach_handler_t	mpt_cam_attach;
168 static mpt_enable_handler_t	mpt_cam_enable;
169 static mpt_ready_handler_t	mpt_cam_ready;
170 static mpt_event_handler_t	mpt_cam_event;
171 static mpt_reset_handler_t	mpt_cam_ioc_reset;
172 static mpt_detach_handler_t	mpt_cam_detach;
173 
174 static struct mpt_personality mpt_cam_personality =
175 {
176 	.name		= "mpt_cam",
177 	.probe		= mpt_cam_probe,
178 	.attach		= mpt_cam_attach,
179 	.enable		= mpt_cam_enable,
180 	.ready		= mpt_cam_ready,
181 	.event		= mpt_cam_event,
182 	.reset		= mpt_cam_ioc_reset,
183 	.detach		= mpt_cam_detach,
184 };
185 
186 DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
187 MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
188 
189 int mpt_enable_sata_wc = -1;
190 TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
191 
192 int
193 mpt_cam_probe(struct mpt_softc *mpt)
194 {
195 	int role;
196 
197 	/*
198 	 * Only attach to nodes that support the initiator or target role
199 	 * (or want to) or have RAID physical devices that need CAM pass-thru
200 	 * support.
201 	 */
202 	if (mpt->do_cfg_role) {
203 		role = mpt->cfg_role;
204 	} else {
205 		role = mpt->role;
206 	}
207 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
208 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
209 		return (0);
210 	}
211 	return (ENODEV);
212 }
213 
214 int
215 mpt_cam_attach(struct mpt_softc *mpt)
216 {
217 	struct cam_devq *devq;
218 	mpt_handler_t	 handler;
219 	int		 maxq;
220 	int		 error;
221 
222 	MPT_LOCK(mpt);
223 	TAILQ_INIT(&mpt->request_timeout_list);
224 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
225 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
226 
227 	handler.reply_handler = mpt_scsi_reply_handler;
228 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
229 				     &scsi_io_handler_id);
230 	if (error != 0) {
231 		MPT_UNLOCK(mpt);
232 		goto cleanup;
233 	}
234 
235 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
236 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
237 				     &scsi_tmf_handler_id);
238 	if (error != 0) {
239 		MPT_UNLOCK(mpt);
240 		goto cleanup;
241 	}
242 
243 	/*
244 	 * If we're fibre channel and could support target mode, we register
245 	 * an ELS reply handler and give it resources.
246 	 */
247 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
248 		handler.reply_handler = mpt_fc_els_reply_handler;
249 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
250 		    &fc_els_handler_id);
251 		if (error != 0) {
252 			MPT_UNLOCK(mpt);
253 			goto cleanup;
254 		}
255 		if (mpt_add_els_buffers(mpt) == FALSE) {
256 			error = ENOMEM;
257 			MPT_UNLOCK(mpt);
258 			goto cleanup;
259 		}
260 		maxq -= mpt->els_cmds_allocated;
261 	}
262 
263 	/*
264 	 * If we support target mode, we register a reply handler for it,
265 	 * but don't add command resources until we actually enable target
266 	 * mode.
267 	 */
268 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
269 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
270 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
271 		    &mpt->scsi_tgt_handler_id);
272 		if (error != 0) {
273 			MPT_UNLOCK(mpt);
274 			goto cleanup;
275 		}
276 	}
277 
278 	if (mpt->is_sas) {
279 		handler.reply_handler = mpt_sata_pass_reply_handler;
280 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
281 		    &sata_pass_handler_id);
282 		if (error != 0) {
283 			MPT_UNLOCK(mpt);
284 			goto cleanup;
285 		}
286 	}
287 
288 	/*
289 	 * We keep one request reserved for timeout TMF requests.
290 	 */
291 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
292 	if (mpt->tmf_req == NULL) {
293 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
294 		error = ENOMEM;
295 		MPT_UNLOCK(mpt);
296 		goto cleanup;
297 	}
298 
299 	/*
300 	 * Mark the request as free even though not on the free list.
301 	 * There is only one TMF request allowed to be outstanding at
302 	 * a time and the TMF routines perform their own allocation
303 	 * tracking using the standard state flags.
304 	 */
305 	mpt->tmf_req->state = REQ_STATE_FREE;
306 	maxq--;
307 
308 	/*
309 	 * The rest of this is CAM foo, for which we need to drop our lock
310 	 */
311 	MPT_UNLOCK(mpt);
312 
313 	if (mpt_spawn_recovery_thread(mpt) != 0) {
314 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
315 		error = ENOMEM;
316 		goto cleanup;
317 	}
318 
319 	/*
320 	 * Create the device queue for our SIM(s).
321 	 */
322 	devq = cam_simq_alloc(maxq);
323 	if (devq == NULL) {
324 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
325 		error = ENOMEM;
326 		goto cleanup;
327 	}
328 
329 	/*
330 	 * Construct our SIM entry.
331 	 */
332 	mpt->sim =
333 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
334 	if (mpt->sim == NULL) {
335 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
336 		cam_devq_release(devq);
337 		error = ENOMEM;
338 		goto cleanup;
339 	}
340 
341 	/*
342 	 * Register exactly this bus.
343 	 */
344 	MPT_LOCK(mpt);
345 	if (mpt_xpt_bus_register(mpt->sim, mpt->dev, 0) != CAM_SUCCESS) {
346 		mpt_prt(mpt, "Bus registration Failed!\n");
347 		error = ENOMEM;
348 		MPT_UNLOCK(mpt);
349 		goto cleanup;
350 	}
351 
352 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
353 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
354 		mpt_prt(mpt, "Unable to allocate Path!\n");
355 		error = ENOMEM;
356 		MPT_UNLOCK(mpt);
357 		goto cleanup;
358 	}
359 	MPT_UNLOCK(mpt);
360 
361 	/*
362 	 * Only register a second bus for RAID physical
363 	 * devices if the controller supports RAID.
364 	 */
365 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
366 		return (0);
367 	}
368 
369 	/*
370 	 * Create a "bus" to export all hidden disks to CAM.
371 	 */
372 	mpt->phydisk_sim =
373 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
374 	if (mpt->phydisk_sim == NULL) {
375 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
376 		error = ENOMEM;
377 		goto cleanup;
378 	}
379 
380 	/*
381 	 * Register this bus.
382 	 */
383 	MPT_LOCK(mpt);
384 	if (mpt_xpt_bus_register(mpt->phydisk_sim, mpt->dev, 1) !=
385 	    CAM_SUCCESS) {
386 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
387 		error = ENOMEM;
388 		MPT_UNLOCK(mpt);
389 		goto cleanup;
390 	}
391 
392 	if (xpt_create_path(&mpt->phydisk_path, NULL,
393 	    cam_sim_path(mpt->phydisk_sim),
394 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
395 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
396 		error = ENOMEM;
397 		MPT_UNLOCK(mpt);
398 		goto cleanup;
399 	}
400 	MPT_UNLOCK(mpt);
401 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
402 	return (0);
403 
404 cleanup:
405 	mpt_cam_detach(mpt);
406 	return (error);
407 }
408 
409 /*
410  * Read FC configuration information
411  */
412 static int
413 mpt_read_config_info_fc(struct mpt_softc *mpt)
414 {
415 	char *topology = NULL;
416 	int rv;
417 
418 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
419 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
420 	if (rv) {
421 		return (-1);
422 	}
423 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
424 		 mpt->mpt_fcport_page0.Header.PageVersion,
425 		 mpt->mpt_fcport_page0.Header.PageLength,
426 		 mpt->mpt_fcport_page0.Header.PageNumber,
427 		 mpt->mpt_fcport_page0.Header.PageType);
428 
429 
430 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
431 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
432 	if (rv) {
433 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
434 		return (-1);
435 	}
436 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
437 
438 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
439 
440 	switch (mpt->mpt_fcport_page0.Flags &
441 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
442 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
443 		mpt->mpt_fcport_speed = 0;
444 		topology = "<NO LOOP>";
445 		break;
446 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
447 		topology = "N-Port";
448 		break;
449 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
450 		topology = "NL-Port";
451 		break;
452 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
453 		topology = "F-Port";
454 		break;
455 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
456 		topology = "FL-Port";
457 		break;
458 	default:
459 		mpt->mpt_fcport_speed = 0;
460 		topology = "?";
461 		break;
462 	}
463 
464 	mpt_lprt(mpt, MPT_PRT_INFO,
465 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
466 	    "Speed %u-Gbit\n", topology,
467 	    mpt->mpt_fcport_page0.WWNN.High,
468 	    mpt->mpt_fcport_page0.WWNN.Low,
469 	    mpt->mpt_fcport_page0.WWPN.High,
470 	    mpt->mpt_fcport_page0.WWPN.Low,
471 	    mpt->mpt_fcport_speed);
472 	MPT_UNLOCK(mpt);
473 	{
474 		ksnprintf(mpt->scinfo.fc.wwnn,
475 		    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
476 		    mpt->mpt_fcport_page0.WWNN.High,
477 		    mpt->mpt_fcport_page0.WWNN.Low);
478 
479 		ksnprintf(mpt->scinfo.fc.wwpn,
480 		    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
481 		    mpt->mpt_fcport_page0.WWPN.High,
482 		    mpt->mpt_fcport_page0.WWPN.Low);
483 
484 		SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
485 		       SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
486 		       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
487 		       "World Wide Node Name");
488 
489 		SYSCTL_ADD_STRING(&mpt->mpt_sysctl_ctx,
490 		       SYSCTL_CHILDREN(mpt->mpt_sysctl_tree), OID_AUTO,
491 		       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
492 		       "World Wide Port Name");
493 
494 	}
495 	MPT_LOCK(mpt);
496 	return (0);
497 }
498 
499 /*
500  * Set FC configuration information.
501  */
502 static int
503 mpt_set_initial_config_fc(struct mpt_softc *mpt)
504 {
505 
506 	CONFIG_PAGE_FC_PORT_1 fc;
507 	U32 fl;
508 	int r, doit = 0;
509 	int role;
510 
511 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
512 	    &fc.Header, FALSE, 5000);
513 	if (r) {
514 		mpt_prt(mpt, "failed to read FC page 1 header\n");
515 		return (mpt_fc_reset_link(mpt, 1));
516 	}
517 
518 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
519 	    &fc.Header, sizeof (fc), FALSE, 5000);
520 	if (r) {
521 		mpt_prt(mpt, "failed to read FC page 1\n");
522 		return (mpt_fc_reset_link(mpt, 1));
523 	}
524 	mpt2host_config_page_fc_port_1(&fc);
525 
526 	/*
527 	 * Check our flags to make sure we support the role we want.
528 	 */
529 	doit = 0;
530 	role = 0;
531 	fl = fc.Flags;
532 
533 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
534 		role |= MPT_ROLE_INITIATOR;
535 	}
536 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
537 		role |= MPT_ROLE_TARGET;
538 	}
539 
540 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
541 
542 	if (mpt->do_cfg_role == 0) {
543 		role = mpt->cfg_role;
544 	} else {
545 		mpt->do_cfg_role = 0;
546 	}
547 
548 	if (role != mpt->cfg_role) {
549 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
550 			if ((role & MPT_ROLE_INITIATOR) == 0) {
551 				mpt_prt(mpt, "adding initiator role\n");
552 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
553 				doit++;
554 			} else {
555 				mpt_prt(mpt, "keeping initiator role\n");
556 			}
557 		} else if (role & MPT_ROLE_INITIATOR) {
558 			mpt_prt(mpt, "removing initiator role\n");
559 			doit++;
560 		}
561 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
562 			if ((role & MPT_ROLE_TARGET) == 0) {
563 				mpt_prt(mpt, "adding target role\n");
564 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
565 				doit++;
566 			} else {
567 				mpt_prt(mpt, "keeping target role\n");
568 			}
569 		} else if (role & MPT_ROLE_TARGET) {
570 			mpt_prt(mpt, "removing target role\n");
571 			doit++;
572 		}
573 		mpt->role = mpt->cfg_role;
574 	}
575 
576 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
577 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
578 			mpt_prt(mpt, "adding OXID option\n");
579 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
580 			doit++;
581 		}
582 	}
583 
584 	if (doit) {
585 		fc.Flags = fl;
586 		host2mpt_config_page_fc_port_1(&fc);
587 		r = mpt_write_cfg_page(mpt,
588 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
589 		    sizeof(fc), FALSE, 5000);
590 		if (r != 0) {
591 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
592 			return (0);
593 		}
594 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
595 		    "effect until next reboot or IOC reset\n");
596 	}
597 	return (0);
598 }
599 
600 static int
601 mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
602 {
603 	ConfigExtendedPageHeader_t hdr;
604 	struct mptsas_phyinfo *phyinfo;
605 	SasIOUnitPage0_t *buffer;
606 	int error, len, i;
607 
608 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
609 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
610 				       &hdr, 0, 10000);
611 	if (error)
612 		goto out;
613 	if (hdr.ExtPageLength == 0) {
614 		error = ENXIO;
615 		goto out;
616 	}
617 
618 	len = hdr.ExtPageLength * 4;
619 	buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
620 	if (buffer == NULL) {
621 		error = ENOMEM;
622 		goto out;
623 	}
624 
625 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
626 				     0, &hdr, buffer, len, 0, 10000);
627 	if (error) {
628 		kfree(buffer, M_DEVBUF);
629 		goto out;
630 	}
631 
632 	portinfo->num_phys = buffer->NumPhys;
633 	portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
634 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
635 	if (portinfo->phy_info == NULL) {
636 		kfree(buffer, M_DEVBUF);
637 		error = ENOMEM;
638 		goto out;
639 	}
640 
641 	for (i = 0; i < portinfo->num_phys; i++) {
642 		phyinfo = &portinfo->phy_info[i];
643 		phyinfo->phy_num = i;
644 		phyinfo->port_id = buffer->PhyData[i].Port;
645 		phyinfo->negotiated_link_rate =
646 		    buffer->PhyData[i].NegotiatedLinkRate;
647 		phyinfo->handle =
648 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
649 	}
650 
651 	kfree(buffer, M_DEVBUF);
652 out:
653 	return (error);
654 }
655 
656 static int
657 mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
658 	uint32_t form, uint32_t form_specific)
659 {
660 	ConfigExtendedPageHeader_t hdr;
661 	SasPhyPage0_t *buffer;
662 	int error;
663 
664 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
665 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
666 				       0, 10000);
667 	if (error)
668 		goto out;
669 	if (hdr.ExtPageLength == 0) {
670 		error = ENXIO;
671 		goto out;
672 	}
673 
674 	buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
675 	if (buffer == NULL) {
676 		error = ENOMEM;
677 		goto out;
678 	}
679 
680 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
681 				     form + form_specific, &hdr, buffer,
682 				     sizeof(SasPhyPage0_t), 0, 10000);
683 	if (error) {
684 		kfree(buffer, M_DEVBUF);
685 		goto out;
686 	}
687 
688 	phy_info->hw_link_rate = buffer->HwLinkRate;
689 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
690 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
691 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
692 
693 	kfree(buffer, M_DEVBUF);
694 out:
695 	return (error);
696 }
697 
698 static int
699 mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
700 	uint32_t form, uint32_t form_specific)
701 {
702 	ConfigExtendedPageHeader_t hdr;
703 	SasDevicePage0_t *buffer;
704 	uint64_t sas_address;
705 	int error = 0;
706 
707 	bzero(device_info, sizeof(*device_info));
708 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
709 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
710 				       &hdr, 0, 10000);
711 	if (error)
712 		goto out;
713 	if (hdr.ExtPageLength == 0) {
714 		error = ENXIO;
715 		goto out;
716 	}
717 
718 	buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
719 	if (buffer == NULL) {
720 		error = ENOMEM;
721 		goto out;
722 	}
723 
724 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
725 				     form + form_specific, &hdr, buffer,
726 				     sizeof(SasDevicePage0_t), 0, 10000);
727 	if (error) {
728 		kfree(buffer, M_DEVBUF);
729 		goto out;
730 	}
731 
732 	device_info->dev_handle = le16toh(buffer->DevHandle);
733 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
734 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
735 	device_info->slot = le16toh(buffer->Slot);
736 	device_info->phy_num = buffer->PhyNum;
737 	device_info->physical_port = buffer->PhysicalPort;
738 	device_info->target_id = buffer->TargetID;
739 	device_info->bus = buffer->Bus;
740 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
741 	device_info->sas_address = le64toh(sas_address);
742 	device_info->device_info = le32toh(buffer->DeviceInfo);
743 
744 	kfree(buffer, M_DEVBUF);
745 out:
746 	return (error);
747 }
748 
749 /*
750  * Read SAS configuration information. Nothing to do yet.
751  */
752 static int
753 mpt_read_config_info_sas(struct mpt_softc *mpt)
754 {
755 	struct mptsas_portinfo *portinfo;
756 	struct mptsas_phyinfo *phyinfo;
757 	int error, i;
758 
759 	portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
760 	if (portinfo == NULL)
761 		return (ENOMEM);
762 
763 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
764 	if (error) {
765 		kfree(portinfo, M_DEVBUF);
766 		return (0);
767 	}
768 
769 	for (i = 0; i < portinfo->num_phys; i++) {
770 		phyinfo = &portinfo->phy_info[i];
771 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
772 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
773 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
774 		if (error)
775 			break;
776 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
777 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
778 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
779 		    phyinfo->handle);
780 		if (error)
781 			break;
782 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
783 		if (phyinfo->attached.dev_handle)
784 			error = mptsas_sas_device_pg0(mpt,
785 			    &phyinfo->attached,
786 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
787 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
788 			    phyinfo->attached.dev_handle);
789 		if (error)
790 			break;
791 	}
792 	mpt->sas_portinfo = portinfo;
793 	return (0);
794 }
795 
796 static void
797 mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
798 	int enabled)
799 {
800 	SataPassthroughRequest_t	*pass;
801 	request_t *req;
802 	int error, status;
803 
804 	req = mpt_get_request(mpt, 0);
805 	if (req == NULL)
806 		return;
807 
808 	pass = req->req_vbuf;
809 	bzero(pass, sizeof(SataPassthroughRequest_t));
810 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
811 	pass->TargetID = devinfo->target_id;
812 	pass->Bus = devinfo->bus;
813 	pass->PassthroughFlags = 0;
814 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
815 	pass->DataLength = 0;
816 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
817 	pass->CommandFIS[0] = 0x27;
818 	pass->CommandFIS[1] = 0x80;
819 	pass->CommandFIS[2] = 0xef;
820 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
821 	pass->CommandFIS[7] = 0x40;
822 	pass->CommandFIS[15] = 0x08;
823 
824 	mpt_check_doorbell(mpt);
825 	mpt_send_cmd(mpt, req);
826 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
827 			     10 * 1000);
828 	if (error) {
829 		mpt_free_request(mpt, req);
830 		kprintf("error %d sending passthrough\n", error);
831 		return;
832 	}
833 
834 	status = le16toh(req->IOCStatus);
835 	if (status != MPI_IOCSTATUS_SUCCESS) {
836 		mpt_free_request(mpt, req);
837 		kprintf("IOCSTATUS %d\n", status);
838 		return;
839 	}
840 
841 	mpt_free_request(mpt, req);
842 }
843 
844 /*
845  * Set SAS configuration information. Nothing to do yet.
846  */
847 static int
848 mpt_set_initial_config_sas(struct mpt_softc *mpt)
849 {
850 	struct mptsas_phyinfo *phyinfo;
851 	int i;
852 
853 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
854 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
855 			phyinfo = &mpt->sas_portinfo->phy_info[i];
856 			if (phyinfo->attached.dev_handle == 0)
857 				continue;
858 			if ((phyinfo->attached.device_info &
859 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
860 				continue;
861 			if (bootverbose)
862 				device_printf(mpt->dev,
863 				    "%sabling SATA WC on phy %d\n",
864 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
865 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
866 					   mpt_enable_sata_wc);
867 		}
868 	}
869 
870 	return (0);
871 }
872 
873 static int
874 mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
875  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
876 {
877 	if (req != NULL) {
878 
879 		if (reply_frame != NULL) {
880 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
881 		}
882 		req->state &= ~REQ_STATE_QUEUED;
883 		req->state |= REQ_STATE_DONE;
884 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
885 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
886 			wakeup(req);
887 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
888 			/*
889 			 * Whew- we can free this request (late completion)
890 			 */
891 			mpt_free_request(mpt, req);
892 		}
893 	}
894 
895 	return (TRUE);
896 }
897 
898 /*
899  * Read SCSI configuration information
900  */
901 static int
902 mpt_read_config_info_spi(struct mpt_softc *mpt)
903 {
904 	int rv, i;
905 
906 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
907 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
908 	if (rv) {
909 		return (-1);
910 	}
911 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
912 	    mpt->mpt_port_page0.Header.PageVersion,
913 	    mpt->mpt_port_page0.Header.PageLength,
914 	    mpt->mpt_port_page0.Header.PageNumber,
915 	    mpt->mpt_port_page0.Header.PageType);
916 
917 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
918 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
919 	if (rv) {
920 		return (-1);
921 	}
922 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
923 	    mpt->mpt_port_page1.Header.PageVersion,
924 	    mpt->mpt_port_page1.Header.PageLength,
925 	    mpt->mpt_port_page1.Header.PageNumber,
926 	    mpt->mpt_port_page1.Header.PageType);
927 
928 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
929 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
930 	if (rv) {
931 		return (-1);
932 	}
933 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
934 	    mpt->mpt_port_page2.Header.PageVersion,
935 	    mpt->mpt_port_page2.Header.PageLength,
936 	    mpt->mpt_port_page2.Header.PageNumber,
937 	    mpt->mpt_port_page2.Header.PageType);
938 
939 	for (i = 0; i < 16; i++) {
940 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
941 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
942 		if (rv) {
943 			return (-1);
944 		}
945 		mpt_lprt(mpt, MPT_PRT_DEBUG,
946 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
947 		    mpt->mpt_dev_page0[i].Header.PageVersion,
948 		    mpt->mpt_dev_page0[i].Header.PageLength,
949 		    mpt->mpt_dev_page0[i].Header.PageNumber,
950 		    mpt->mpt_dev_page0[i].Header.PageType);
951 
952 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
953 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
954 		if (rv) {
955 			return (-1);
956 		}
957 		mpt_lprt(mpt, MPT_PRT_DEBUG,
958 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
959 		    mpt->mpt_dev_page1[i].Header.PageVersion,
960 		    mpt->mpt_dev_page1[i].Header.PageLength,
961 		    mpt->mpt_dev_page1[i].Header.PageNumber,
962 		    mpt->mpt_dev_page1[i].Header.PageType);
963 	}
964 
965 	/*
966 	 * At this point, we don't *have* to fail. As long as we have
967 	 * valid config header information, we can (barely) lurch
968 	 * along.
969 	 */
970 
971 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
972 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
973 	if (rv) {
974 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
975 	} else {
976 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
977 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
978 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
979 		    mpt->mpt_port_page0.Capabilities,
980 		    mpt->mpt_port_page0.PhysicalInterface);
981 	}
982 
983 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
984 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
985 	if (rv) {
986 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
987 	} else {
988 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
989 		mpt_lprt(mpt, MPT_PRT_DEBUG,
990 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
991 		    mpt->mpt_port_page1.Configuration,
992 		    mpt->mpt_port_page1.OnBusTimerValue);
993 	}
994 
995 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
996 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
997 	if (rv) {
998 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
999 	} else {
1000 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1001 		    "Port Page 2: Flags %x Settings %x\n",
1002 		    mpt->mpt_port_page2.PortFlags,
1003 		    mpt->mpt_port_page2.PortSettings);
1004 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
1005 		for (i = 0; i < 16; i++) {
1006 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1007 			    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
1008 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
1009 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
1010 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
1011 		}
1012 	}
1013 
1014 	for (i = 0; i < 16; i++) {
1015 		rv = mpt_read_cur_cfg_page(mpt, i,
1016 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
1017 		    FALSE, 5000);
1018 		if (rv) {
1019 			mpt_prt(mpt,
1020 			    "cannot read SPI Target %d Device Page 0\n", i);
1021 			continue;
1022 		}
1023 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
1024 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1025 		    "target %d page 0: Negotiated Params %x Information %x\n",
1026 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
1027 		    mpt->mpt_dev_page0[i].Information);
1028 
1029 		rv = mpt_read_cur_cfg_page(mpt, i,
1030 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
1031 		    FALSE, 5000);
1032 		if (rv) {
1033 			mpt_prt(mpt,
1034 			    "cannot read SPI Target %d Device Page 1\n", i);
1035 			continue;
1036 		}
1037 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
1038 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1039 		    "target %d page 1: Requested Params %x Configuration %x\n",
1040 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
1041 		    mpt->mpt_dev_page1[i].Configuration);
1042 	}
1043 	return (0);
1044 }
1045 
1046 /*
1047  * Validate SPI configuration information.
1048  *
1049  * In particular, validate SPI Port Page 1.
1050  */
1051 static int
1052 mpt_set_initial_config_spi(struct mpt_softc *mpt)
1053 {
1054 	int error, i, pp1val;
1055 
1056 	mpt->mpt_disc_enable = 0xff;
1057 	mpt->mpt_tag_enable = 0;
1058 
1059 	pp1val = ((1 << mpt->mpt_ini_id) <<
1060 	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
1061 	if (mpt->mpt_port_page1.Configuration != pp1val) {
1062 		CONFIG_PAGE_SCSI_PORT_1 tmp;
1063 
1064 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
1065 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
1066 		tmp = mpt->mpt_port_page1;
1067 		tmp.Configuration = pp1val;
1068 		host2mpt_config_page_scsi_port_1(&tmp);
1069 		error = mpt_write_cur_cfg_page(mpt, 0,
1070 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1071 		if (error) {
1072 			return (-1);
1073 		}
1074 		error = mpt_read_cur_cfg_page(mpt, 0,
1075 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
1076 		if (error) {
1077 			return (-1);
1078 		}
1079 		mpt2host_config_page_scsi_port_1(&tmp);
1080 		if (tmp.Configuration != pp1val) {
1081 			mpt_prt(mpt,
1082 			    "failed to reset SPI Port Page 1 Config value\n");
1083 			return (-1);
1084 		}
1085 		mpt->mpt_port_page1 = tmp;
1086 	}
1087 
1088 	/*
1089 	 * The purpose of this exercise is to get
1090 	 * all targets back to async/narrow.
1091 	 *
1092 	 * We skip this step if the BIOS has already negotiated
1093 	 * speeds with the targets.
1094 	 */
1095 	i = mpt->mpt_port_page2.PortSettings &
1096 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
1097 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
1098 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
1099 		    "honoring BIOS transfer negotiations\n");
1100 	} else {
1101 		for (i = 0; i < 16; i++) {
1102 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
1103 			mpt->mpt_dev_page1[i].Configuration = 0;
1104 			(void) mpt_update_spi_config(mpt, i);
1105 		}
1106 	}
1107 	return (0);
1108 }
1109 
1110 int
1111 mpt_cam_enable(struct mpt_softc *mpt)
1112 {
1113 	int error;
1114 
1115 	MPT_LOCK(mpt);
1116 
1117 	error = EIO;
1118 	if (mpt->is_fc) {
1119 		if (mpt_read_config_info_fc(mpt)) {
1120 			goto out;
1121 		}
1122 		if (mpt_set_initial_config_fc(mpt)) {
1123 			goto out;
1124 		}
1125 	} else if (mpt->is_sas) {
1126 		if (mpt_read_config_info_sas(mpt)) {
1127 			goto out;
1128 		}
1129 		if (mpt_set_initial_config_sas(mpt)) {
1130 			goto out;
1131 		}
1132 	} else if (mpt->is_spi) {
1133 		if (mpt_read_config_info_spi(mpt)) {
1134 			goto out;
1135 		}
1136 		if (mpt_set_initial_config_spi(mpt)) {
1137 			goto out;
1138 		}
1139 	}
1140 	error = 0;
1141 
1142 out:
1143 	MPT_UNLOCK(mpt);
1144 	return (error);
1145 }
1146 
1147 void
1148 mpt_cam_ready(struct mpt_softc *mpt)
1149 {
1150 	/*
1151 	 * If we're in target mode, hang out resources now
1152 	 * so we don't cause the world to hang talking to us.
1153 	 */
1154 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
1155 		/*
1156 		 * Try to add some target command resources
1157 		 */
1158 		MPT_LOCK(mpt);
1159 		if (mpt_add_target_commands(mpt) == FALSE) {
1160 			mpt_prt(mpt, "failed to add target commands\n");
1161 		}
1162 		MPT_UNLOCK(mpt);
1163 	}
1164 	mpt->ready = 1;
1165 }
1166 
1167 void
1168 mpt_cam_detach(struct mpt_softc *mpt)
1169 {
1170 	mpt_handler_t handler;
1171 
1172 	MPT_LOCK(mpt);
1173 	mpt->ready = 0;
1174 	mpt_terminate_recovery_thread(mpt);
1175 
1176 	handler.reply_handler = mpt_scsi_reply_handler;
1177 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1178 			       scsi_io_handler_id);
1179 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
1180 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1181 			       scsi_tmf_handler_id);
1182 	handler.reply_handler = mpt_fc_els_reply_handler;
1183 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1184 			       fc_els_handler_id);
1185 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
1186 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1187 			       mpt->scsi_tgt_handler_id);
1188 	handler.reply_handler = mpt_sata_pass_reply_handler;
1189 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
1190 			       sata_pass_handler_id);
1191 
1192 	if (mpt->tmf_req != NULL) {
1193 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
1194 		mpt_free_request(mpt, mpt->tmf_req);
1195 		mpt->tmf_req = NULL;
1196 	}
1197 	if (mpt->sas_portinfo != NULL) {
1198 		kfree(mpt->sas_portinfo, M_DEVBUF);
1199 		mpt->sas_portinfo = NULL;
1200 	}
1201 
1202 	if (mpt->sim != NULL) {
1203 		xpt_free_path(mpt->path);
1204 		xpt_bus_deregister(cam_sim_path(mpt->sim));
1205 		cam_sim_free(mpt->sim);
1206 		mpt->sim = NULL;
1207 	}
1208 
1209 	if (mpt->phydisk_sim != NULL) {
1210 		xpt_free_path(mpt->phydisk_path);
1211 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
1212 		cam_sim_free(mpt->phydisk_sim);
1213 		mpt->phydisk_sim = NULL;
1214 	}
1215 	MPT_UNLOCK(mpt);
1216 }
1217 
1218 /* This routine is used after a system crash to dump core onto the swap device.
1219  */
1220 static void
1221 mpt_poll(struct cam_sim *sim)
1222 {
1223 	struct mpt_softc *mpt;
1224 
1225 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
1226 	mpt_intr(mpt);
1227 }
1228 
1229 /*
1230  * Watchdog timeout routine for SCSI requests.
1231  */
1232 static void
1233 mpt_timeout(void *arg)
1234 {
1235 	union ccb	 *ccb;
1236 	struct mpt_softc *mpt;
1237 	request_t	 *req;
1238 
1239 	ccb = (union ccb *)arg;
1240 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1241 
1242 	MPT_LOCK(mpt);
1243 	req = ccb->ccb_h.ccb_req_ptr;
1244 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
1245 	    req->serno, ccb, req->ccb);
1246 /* XXX: WHAT ARE WE TRYING TO DO HERE? */
1247 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
1248 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
1249 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
1250 		req->state |= REQ_STATE_TIMEDOUT;
1251 		mpt_wakeup_recovery_thread(mpt);
1252 	}
1253 	MPT_UNLOCK(mpt);
1254 }
1255 
1256 /*
1257  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
1258  *
1259  * Takes a list of physical segments and builds the SGL for SCSI IO command
1260  * and forwards the commard to the IOC after one last check that CAM has not
1261  * aborted the transaction.
1262  */
1263 static void
1264 mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1265 {
1266 	request_t *req, *trq;
1267 	char *mpt_off;
1268 	union ccb *ccb;
1269 	struct mpt_softc *mpt;
1270 	int seg, first_lim;
1271 	uint32_t flags, nxt_off;
1272 	void *sglp = NULL;
1273 	MSG_REQUEST_HEADER *hdrp;
1274 	SGE_SIMPLE64 *se;
1275 	SGE_CHAIN64 *ce;
1276 	int istgt = 0;
1277 
1278 	req = (request_t *)arg;
1279 	ccb = req->ccb;
1280 
1281 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1282 	req = ccb->ccb_h.ccb_req_ptr;
1283 
1284 	hdrp = req->req_vbuf;
1285 	mpt_off = req->req_vbuf;
1286 
1287 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1288 		error = EFBIG;
1289 	}
1290 
1291 	if (error == 0) {
1292 		switch (hdrp->Function) {
1293 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1294 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1295 			istgt = 0;
1296 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1297 			break;
1298 		case MPI_FUNCTION_TARGET_ASSIST:
1299 			istgt = 1;
1300 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1301 			break;
1302 		default:
1303 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
1304 			    hdrp->Function);
1305 			error = EINVAL;
1306 			break;
1307 		}
1308 	}
1309 
1310 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1311 		error = EFBIG;
1312 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1313 		    nseg, mpt->max_seg_cnt);
1314 	}
1315 
1316 bad:
1317 	if (error != 0) {
1318 		if (error != EFBIG && error != ENOMEM) {
1319 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
1320 		}
1321 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1322 			cam_status status;
1323 			mpt_freeze_ccb(ccb);
1324 			if (error == EFBIG) {
1325 				status = CAM_REQ_TOO_BIG;
1326 			} else if (error == ENOMEM) {
1327 				if (mpt->outofbeer == 0) {
1328 					mpt->outofbeer = 1;
1329 					xpt_freeze_simq(mpt->sim, 1);
1330 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1331 					    "FREEZEQ\n");
1332 				}
1333 				status = CAM_REQUEUE_REQ;
1334 			} else {
1335 				status = CAM_REQ_CMP_ERR;
1336 			}
1337 			mpt_set_ccb_status(ccb, status);
1338 		}
1339 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1340 			request_t *cmd_req =
1341 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1342 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1343 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1344 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1345 		}
1346 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1347 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1348 		xpt_done(ccb);
1349 		CAMLOCK_2_MPTLOCK(mpt);
1350 		mpt_free_request(mpt, req);
1351 		MPTLOCK_2_CAMLOCK(mpt);
1352 		return;
1353 	}
1354 
1355 	/*
1356 	 * No data to transfer?
1357 	 * Just make a single simple SGL with zero length.
1358 	 */
1359 
1360 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1361 		int tidx = ((char *)sglp) - mpt_off;
1362 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1363 	}
1364 
1365 	if (nseg == 0) {
1366 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1367 		MPI_pSGE_SET_FLAGS(se1,
1368 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1369 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1370 		se1->FlagsLength = htole32(se1->FlagsLength);
1371 		goto out;
1372 	}
1373 
1374 
1375 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1376 	if (istgt == 0) {
1377 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1378 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1379 		}
1380 	} else {
1381 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1382 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1383 		}
1384 	}
1385 
1386 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1387 		bus_dmasync_op_t op;
1388 		if (istgt == 0) {
1389 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1390 				op = BUS_DMASYNC_PREREAD;
1391 			} else {
1392 				op = BUS_DMASYNC_PREWRITE;
1393 			}
1394 		} else {
1395 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1396 				op = BUS_DMASYNC_PREWRITE;
1397 			} else {
1398 				op = BUS_DMASYNC_PREREAD;
1399 			}
1400 		}
1401 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1402 	}
1403 
1404 	/*
1405 	 * Okay, fill in what we can at the end of the command frame.
1406 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1407 	 * the command frame.
1408 	 *
1409 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1410 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
1411 	 * that.
1412 	 */
1413 
1414 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1415 		first_lim = nseg;
1416 	} else {
1417 		/*
1418 		 * Leave room for CHAIN element
1419 		 */
1420 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1421 	}
1422 
1423 	se = (SGE_SIMPLE64 *) sglp;
1424 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1425 		uint32_t tf;
1426 
1427 		memset(se, 0, sizeof (*se));
1428 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
1429 		if (sizeof(bus_addr_t) > 4) {
1430 			se->Address.High =
1431 			    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1432 		}
1433 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1434 		tf = flags;
1435 		if (seg == first_lim - 1) {
1436 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1437 		}
1438 		if (seg == nseg - 1) {
1439 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1440 				MPI_SGE_FLAGS_END_OF_BUFFER;
1441 		}
1442 		MPI_pSGE_SET_FLAGS(se, tf);
1443 		se->FlagsLength = htole32(se->FlagsLength);
1444 	}
1445 
1446 	if (seg == nseg) {
1447 		goto out;
1448 	}
1449 
1450 	/*
1451 	 * Tell the IOC where to find the first chain element.
1452 	 */
1453 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1454 	nxt_off = MPT_RQSL(mpt);
1455 	trq = req;
1456 
1457 	/*
1458 	 * Make up the rest of the data segments out of a chain element
1459 	 * (contiained in the current request frame) which points to
1460 	 * SIMPLE64 elements in the next request frame, possibly ending
1461 	 * with *another* chain element (if there's more).
1462 	 */
1463 	while (seg < nseg) {
1464 		int this_seg_lim;
1465 		uint32_t tf, cur_off;
1466 		bus_addr_t chain_list_addr;
1467 
1468 		/*
1469 		 * Point to the chain descriptor. Note that the chain
1470 		 * descriptor is at the end of the *previous* list (whether
1471 		 * chain or simple).
1472 		 */
1473 		ce = (SGE_CHAIN64 *) se;
1474 
1475 		/*
1476 		 * Before we change our current pointer, make  sure we won't
1477 		 * overflow the request area with this frame. Note that we
1478 		 * test against 'greater than' here as it's okay in this case
1479 		 * to have next offset be just outside the request area.
1480 		 */
1481 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1482 			nxt_off = MPT_REQUEST_AREA;
1483 			goto next_chain;
1484 		}
1485 
1486 		/*
1487 		 * Set our SGE element pointer to the beginning of the chain
1488 		 * list and update our next chain list offset.
1489 		 */
1490 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
1491 		cur_off = nxt_off;
1492 		nxt_off += MPT_RQSL(mpt);
1493 
1494 		/*
1495 		 * Now initialized the chain descriptor.
1496 		 */
1497 		memset(ce, 0, sizeof (*ce));
1498 
1499 		/*
1500 		 * Get the physical address of the chain list.
1501 		 */
1502 		chain_list_addr = trq->req_pbuf;
1503 		chain_list_addr += cur_off;
1504 		if (sizeof (bus_addr_t) > 4) {
1505 			ce->Address.High =
1506 			    htole32(((uint64_t)chain_list_addr) >> 32);
1507 		}
1508 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
1509 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
1510 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
1511 
1512 		/*
1513 		 * If we have more than a frame's worth of segments left,
1514 		 * set up the chain list to have the last element be another
1515 		 * chain descriptor.
1516 		 */
1517 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1518 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1519 			/*
1520 			 * The length of the chain is the length in bytes of the
1521 			 * number of segments plus the next chain element.
1522 			 *
1523 			 * The next chain descriptor offset is the length,
1524 			 * in words, of the number of segments.
1525 			 */
1526 			ce->Length = (this_seg_lim - seg) *
1527 			    sizeof (SGE_SIMPLE64);
1528 			ce->NextChainOffset = ce->Length >> 2;
1529 			ce->Length += sizeof (SGE_CHAIN64);
1530 		} else {
1531 			this_seg_lim = nseg;
1532 			ce->Length = (this_seg_lim - seg) *
1533 			    sizeof (SGE_SIMPLE64);
1534 		}
1535 		ce->Length = htole16(ce->Length);
1536 
1537 		/*
1538 		 * Fill in the chain list SGE elements with our segment data.
1539 		 *
1540 		 * If we're the last element in this chain list, set the last
1541 		 * element flag. If we're the completely last element period,
1542 		 * set the end of list and end of buffer flags.
1543 		 */
1544 		while (seg < this_seg_lim) {
1545 			memset(se, 0, sizeof (*se));
1546 			se->Address.Low = htole32(dm_segs->ds_addr &
1547 			    0xffffffff);
1548 			if (sizeof (bus_addr_t) > 4) {
1549 				se->Address.High =
1550 				    htole32(((uint64_t)dm_segs->ds_addr) >> 32);
1551 			}
1552 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1553 			tf = flags;
1554 			if (seg ==  this_seg_lim - 1) {
1555 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1556 			}
1557 			if (seg == nseg - 1) {
1558 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1559 					MPI_SGE_FLAGS_END_OF_BUFFER;
1560 			}
1561 			MPI_pSGE_SET_FLAGS(se, tf);
1562 			se->FlagsLength = htole32(se->FlagsLength);
1563 			se++;
1564 			seg++;
1565 			dm_segs++;
1566 		}
1567 
1568     next_chain:
1569 		/*
1570 		 * If we have more segments to do and we've used up all of
1571 		 * the space in a request area, go allocate another one
1572 		 * and chain to that.
1573 		 */
1574 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1575 			request_t *nrq;
1576 
1577 			CAMLOCK_2_MPTLOCK(mpt);
1578 			nrq = mpt_get_request(mpt, FALSE);
1579 			MPTLOCK_2_CAMLOCK(mpt);
1580 
1581 			if (nrq == NULL) {
1582 				error = ENOMEM;
1583 				goto bad;
1584 			}
1585 
1586 			/*
1587 			 * Append the new request area on the tail of our list.
1588 			 */
1589 			if ((trq = req->chain) == NULL) {
1590 				req->chain = nrq;
1591 			} else {
1592 				while (trq->chain != NULL) {
1593 					trq = trq->chain;
1594 				}
1595 				trq->chain = nrq;
1596 			}
1597 			trq = nrq;
1598 			mpt_off = trq->req_vbuf;
1599 			if (mpt->verbose >= MPT_PRT_DEBUG) {
1600 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
1601 			}
1602 			nxt_off = 0;
1603 		}
1604 	}
1605 out:
1606 
1607 	/*
1608 	 * Last time we need to check if this CCB needs to be aborted.
1609 	 */
1610 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
1611 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1612 			request_t *cmd_req =
1613 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1614 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1615 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1616 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1617 		}
1618 		mpt_prt(mpt,
1619 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
1620 		    ccb->ccb_h.status & CAM_STATUS_MASK);
1621 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
1622 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
1623 		}
1624 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1625 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1626 		xpt_done(ccb);
1627 		CAMLOCK_2_MPTLOCK(mpt);
1628 		mpt_free_request(mpt, req);
1629 		MPTLOCK_2_CAMLOCK(mpt);
1630 		return;
1631 	}
1632 
1633 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
1634 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
1635 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
1636 		    mpt_timeout, ccb);
1637 	}
1638 	if (mpt->verbose > MPT_PRT_DEBUG) {
1639 		int nc = 0;
1640 		mpt_print_request(req->req_vbuf);
1641 		for (trq = req->chain; trq; trq = trq->chain) {
1642 			kprintf("  Additional Chain Area %d\n", nc++);
1643 			mpt_dump_sgl(trq->req_vbuf, 0);
1644 		}
1645 	}
1646 
1647 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1648 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1649 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
1650 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
1651 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
1652 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
1653 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
1654 		} else {
1655 			tgt->state = TGT_STATE_MOVING_DATA;
1656 		}
1657 #else
1658 		tgt->state = TGT_STATE_MOVING_DATA;
1659 #endif
1660 	}
1661 	CAMLOCK_2_MPTLOCK(mpt);
1662 	mpt_send_cmd(mpt, req);
1663 	MPTLOCK_2_CAMLOCK(mpt);
1664 }
1665 
1666 static void
1667 mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
1668 {
1669 	request_t *req, *trq;
1670 	char *mpt_off;
1671 	union ccb *ccb;
1672 	struct mpt_softc *mpt;
1673 	int seg, first_lim;
1674 	uint32_t flags, nxt_off;
1675 	void *sglp = NULL;
1676 	MSG_REQUEST_HEADER *hdrp;
1677 	SGE_SIMPLE32 *se;
1678 	SGE_CHAIN32 *ce;
1679 	int istgt = 0;
1680 
1681 	req = (request_t *)arg;
1682 	ccb = req->ccb;
1683 
1684 	mpt = ccb->ccb_h.ccb_mpt_ptr;
1685 	req = ccb->ccb_h.ccb_req_ptr;
1686 
1687 	hdrp = req->req_vbuf;
1688 	mpt_off = req->req_vbuf;
1689 
1690 
1691 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1692 		error = EFBIG;
1693 	}
1694 
1695 	if (error == 0) {
1696 		switch (hdrp->Function) {
1697 		case MPI_FUNCTION_SCSI_IO_REQUEST:
1698 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
1699 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
1700 			break;
1701 		case MPI_FUNCTION_TARGET_ASSIST:
1702 			istgt = 1;
1703 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
1704 			break;
1705 		default:
1706 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
1707 			    hdrp->Function);
1708 			error = EINVAL;
1709 			break;
1710 		}
1711 	}
1712 
1713 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
1714 		error = EFBIG;
1715 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
1716 		    nseg, mpt->max_seg_cnt);
1717 	}
1718 
1719 bad:
1720 	if (error != 0) {
1721 		if (error != EFBIG && error != ENOMEM) {
1722 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
1723 		}
1724 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
1725 			cam_status status;
1726 			mpt_freeze_ccb(ccb);
1727 			if (error == EFBIG) {
1728 				status = CAM_REQ_TOO_BIG;
1729 			} else if (error == ENOMEM) {
1730 				if (mpt->outofbeer == 0) {
1731 					mpt->outofbeer = 1;
1732 					xpt_freeze_simq(mpt->sim, 1);
1733 					mpt_lprt(mpt, MPT_PRT_DEBUG,
1734 					    "FREEZEQ\n");
1735 				}
1736 				status = CAM_REQUEUE_REQ;
1737 			} else {
1738 				status = CAM_REQ_CMP_ERR;
1739 			}
1740 			mpt_set_ccb_status(ccb, status);
1741 		}
1742 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
1743 			request_t *cmd_req =
1744 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
1745 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
1746 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
1747 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
1748 		}
1749 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
1750 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
1751 		xpt_done(ccb);
1752 		CAMLOCK_2_MPTLOCK(mpt);
1753 		mpt_free_request(mpt, req);
1754 		MPTLOCK_2_CAMLOCK(mpt);
1755 		return;
1756 	}
1757 
1758 	/*
1759 	 * No data to transfer?
1760 	 * Just make a single simple SGL with zero length.
1761 	 */
1762 
1763 	if (mpt->verbose >= MPT_PRT_DEBUG) {
1764 		int tidx = ((char *)sglp) - mpt_off;
1765 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
1766 	}
1767 
1768 	if (nseg == 0) {
1769 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
1770 		MPI_pSGE_SET_FLAGS(se1,
1771 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
1772 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
1773 		se1->FlagsLength = htole32(se1->FlagsLength);
1774 		goto out;
1775 	}
1776 
1777 
1778 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
1779 	if (istgt == 0) {
1780 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1781 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1782 		}
1783 	} else {
1784 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1785 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
1786 		}
1787 	}
1788 
1789 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
1790 		bus_dmasync_op_t op;
1791 		if (istgt) {
1792 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1793 				op = BUS_DMASYNC_PREREAD;
1794 			} else {
1795 				op = BUS_DMASYNC_PREWRITE;
1796 			}
1797 		} else {
1798 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1799 				op = BUS_DMASYNC_PREWRITE;
1800 			} else {
1801 				op = BUS_DMASYNC_PREREAD;
1802 			}
1803 		}
1804 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
1805 	}
1806 
1807 	/*
1808 	 * Okay, fill in what we can at the end of the command frame.
1809 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
1810 	 * the command frame.
1811 	 *
1812 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
1813 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
1814 	 * that.
1815 	 */
1816 
1817 	if (nseg < MPT_NSGL_FIRST(mpt)) {
1818 		first_lim = nseg;
1819 	} else {
1820 		/*
1821 		 * Leave room for CHAIN element
1822 		 */
1823 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
1824 	}
1825 
1826 	se = (SGE_SIMPLE32 *) sglp;
1827 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
1828 		uint32_t tf;
1829 
1830 		memset(se, 0,sizeof (*se));
1831 		se->Address = htole32(dm_segs->ds_addr);
1832 
1833 
1834 
1835 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1836 		tf = flags;
1837 		if (seg == first_lim - 1) {
1838 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
1839 		}
1840 		if (seg == nseg - 1) {
1841 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1842 				MPI_SGE_FLAGS_END_OF_BUFFER;
1843 		}
1844 		MPI_pSGE_SET_FLAGS(se, tf);
1845 		se->FlagsLength = htole32(se->FlagsLength);
1846 	}
1847 
1848 	if (seg == nseg) {
1849 		goto out;
1850 	}
1851 
1852 	/*
1853 	 * Tell the IOC where to find the first chain element.
1854 	 */
1855 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
1856 	nxt_off = MPT_RQSL(mpt);
1857 	trq = req;
1858 
1859 	/*
1860 	 * Make up the rest of the data segments out of a chain element
1861 	 * (contiained in the current request frame) which points to
1862 	 * SIMPLE32 elements in the next request frame, possibly ending
1863 	 * with *another* chain element (if there's more).
1864 	 */
1865 	while (seg < nseg) {
1866 		int this_seg_lim;
1867 		uint32_t tf, cur_off;
1868 		bus_addr_t chain_list_addr;
1869 
1870 		/*
1871 		 * Point to the chain descriptor. Note that the chain
1872 		 * descriptor is at the end of the *previous* list (whether
1873 		 * chain or simple).
1874 		 */
1875 		ce = (SGE_CHAIN32 *) se;
1876 
1877 		/*
1878 		 * Before we change our current pointer, make  sure we won't
1879 		 * overflow the request area with this frame. Note that we
1880 		 * test against 'greater than' here as it's okay in this case
1881 		 * to have next offset be just outside the request area.
1882 		 */
1883 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
1884 			nxt_off = MPT_REQUEST_AREA;
1885 			goto next_chain;
1886 		}
1887 
1888 		/*
1889 		 * Set our SGE element pointer to the beginning of the chain
1890 		 * list and update our next chain list offset.
1891 		 */
1892 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
1893 		cur_off = nxt_off;
1894 		nxt_off += MPT_RQSL(mpt);
1895 
1896 		/*
1897 		 * Now initialized the chain descriptor.
1898 		 */
1899 		memset(ce, 0, sizeof (*ce));
1900 
1901 		/*
1902 		 * Get the physical address of the chain list.
1903 		 */
1904 		chain_list_addr = trq->req_pbuf;
1905 		chain_list_addr += cur_off;
1906 
1907 
1908 
1909 		ce->Address = htole32(chain_list_addr);
1910 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
1911 
1912 
1913 		/*
1914 		 * If we have more than a frame's worth of segments left,
1915 		 * set up the chain list to have the last element be another
1916 		 * chain descriptor.
1917 		 */
1918 		if ((nseg - seg) > MPT_NSGL(mpt)) {
1919 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
1920 			/*
1921 			 * The length of the chain is the length in bytes of the
1922 			 * number of segments plus the next chain element.
1923 			 *
1924 			 * The next chain descriptor offset is the length,
1925 			 * in words, of the number of segments.
1926 			 */
1927 			ce->Length = (this_seg_lim - seg) *
1928 			    sizeof (SGE_SIMPLE32);
1929 			ce->NextChainOffset = ce->Length >> 2;
1930 			ce->Length += sizeof (SGE_CHAIN32);
1931 		} else {
1932 			this_seg_lim = nseg;
1933 			ce->Length = (this_seg_lim - seg) *
1934 			    sizeof (SGE_SIMPLE32);
1935 		}
1936 		ce->Length = htole16(ce->Length);
1937 
1938 		/*
1939 		 * Fill in the chain list SGE elements with our segment data.
1940 		 *
1941 		 * If we're the last element in this chain list, set the last
1942 		 * element flag. If we're the completely last element period,
1943 		 * set the end of list and end of buffer flags.
1944 		 */
1945 		while (seg < this_seg_lim) {
1946 			memset(se, 0, sizeof (*se));
1947 			se->Address = htole32(dm_segs->ds_addr);
1948 
1949 
1950 
1951 
1952 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
1953 			tf = flags;
1954 			if (seg ==  this_seg_lim - 1) {
1955 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
1956 			}
1957 			if (seg == nseg - 1) {
1958 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
1959 					MPI_SGE_FLAGS_END_OF_BUFFER;
1960 			}
1961 			MPI_pSGE_SET_FLAGS(se, tf);
1962 			se->FlagsLength = htole32(se->FlagsLength);
1963 			se++;
1964 			seg++;
1965 			dm_segs++;
1966 		}
1967 
1968     next_chain:
1969 		/*
1970 		 * If we have more segments to do and we've used up all of
1971 		 * the space in a request area, go allocate another one
1972 		 * and chain to that.
1973 		 */
1974 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
1975 			request_t *nrq;
1976 
1977 			CAMLOCK_2_MPTLOCK(mpt);
1978 			nrq = mpt_get_request(mpt, FALSE);
1979 			MPTLOCK_2_CAMLOCK(mpt);
1980 
1981 			if (nrq == NULL) {
1982 				error = ENOMEM;
1983 				goto bad;
1984 			}
1985 
1986 			/*
1987 			 * Append the new request area on the tail of our list.
1988 			 */
1989 			if ((trq = req->chain) == NULL) {
1990 				req->chain = nrq;
1991 			} else {
1992 				while (trq->chain != NULL) {
1993 					trq = trq->chain;
1994 				}
1995 				trq->chain = nrq;
1996 			}
1997 			trq = nrq;
1998 			mpt_off = trq->req_vbuf;
1999 			if (mpt->verbose >= MPT_PRT_DEBUG) {
2000 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
2001 			}
2002 			nxt_off = 0;
2003 		}
2004 	}
2005 out:
2006 
2007 	/*
2008 	 * Last time we need to check if this CCB needs to be aborted.
2009 	 */
2010 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2011 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2012 			request_t *cmd_req =
2013 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2014 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
2015 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
2016 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
2017 		}
2018 		mpt_prt(mpt,
2019 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
2020 		    ccb->ccb_h.status & CAM_STATUS_MASK);
2021 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
2022 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2023 		}
2024 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2025 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2026 		xpt_done(ccb);
2027 		CAMLOCK_2_MPTLOCK(mpt);
2028 		mpt_free_request(mpt, req);
2029 		MPTLOCK_2_CAMLOCK(mpt);
2030 		return;
2031 	}
2032 
2033 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
2034 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
2035 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
2036 		    mpt_timeout, ccb);
2037 	}
2038 	if (mpt->verbose > MPT_PRT_DEBUG) {
2039 		int nc = 0;
2040 		mpt_print_request(req->req_vbuf);
2041 		for (trq = req->chain; trq; trq = trq->chain) {
2042 			kprintf("  Additional Chain Area %d\n", nc++);
2043 			mpt_dump_sgl(trq->req_vbuf, 0);
2044 		}
2045 	}
2046 
2047 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
2048 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
2049 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
2050 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
2051 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
2052 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
2053 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
2054 		} else {
2055 			tgt->state = TGT_STATE_MOVING_DATA;
2056 		}
2057 #else
2058 		tgt->state = TGT_STATE_MOVING_DATA;
2059 #endif
2060 	}
2061 	CAMLOCK_2_MPTLOCK(mpt);
2062 	mpt_send_cmd(mpt, req);
2063 	MPTLOCK_2_CAMLOCK(mpt);
2064 }
2065 
2066 static void
2067 mpt_start(struct cam_sim *sim, union ccb *ccb)
2068 {
2069 	request_t *req;
2070 	struct mpt_softc *mpt;
2071 	MSG_SCSI_IO_REQUEST *mpt_req;
2072 	struct ccb_scsiio *csio = &ccb->csio;
2073 	struct ccb_hdr *ccbh = &ccb->ccb_h;
2074 	bus_dmamap_callback_t *cb;
2075 	target_id_t tgt;
2076 	int raid_passthru;
2077 
2078 	/* Get the pointer for the physical addapter */
2079 	mpt = ccb->ccb_h.ccb_mpt_ptr;
2080 	raid_passthru = (sim == mpt->phydisk_sim);
2081 
2082 	CAMLOCK_2_MPTLOCK(mpt);
2083 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
2084 		if (mpt->outofbeer == 0) {
2085 			mpt->outofbeer = 1;
2086 			xpt_freeze_simq(mpt->sim, 1);
2087 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
2088 		}
2089 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2090 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
2091 		MPTLOCK_2_CAMLOCK(mpt);
2092 		xpt_done(ccb);
2093 		return;
2094 	}
2095 #ifdef	INVARIANTS
2096 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
2097 #endif
2098 	MPTLOCK_2_CAMLOCK(mpt);
2099 
2100 	if (sizeof (bus_addr_t) > 4) {
2101 		cb = mpt_execute_req_a64;
2102 	} else {
2103 		cb = mpt_execute_req;
2104 	}
2105 
2106 	/*
2107 	 * Link the ccb and the request structure so we can find
2108 	 * the other knowing either the request or the ccb
2109 	 */
2110 	req->ccb = ccb;
2111 	ccb->ccb_h.ccb_req_ptr = req;
2112 
2113 	/* Now we build the command for the IOC */
2114 	mpt_req = req->req_vbuf;
2115 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
2116 
2117 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
2118 	if (raid_passthru) {
2119 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
2120 		CAMLOCK_2_MPTLOCK(mpt);
2121 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
2122 			MPTLOCK_2_CAMLOCK(mpt);
2123 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2124 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
2125 			xpt_done(ccb);
2126 			return;
2127 		}
2128 		MPTLOCK_2_CAMLOCK(mpt);
2129 		mpt_req->Bus = 0;	/* we never set bus here */
2130 	} else {
2131 		tgt = ccb->ccb_h.target_id;
2132 		mpt_req->Bus = 0;	/* XXX */
2133 
2134 	}
2135 	mpt_req->SenseBufferLength =
2136 		(csio->sense_len < MPT_SENSE_SIZE) ?
2137 		 csio->sense_len : MPT_SENSE_SIZE;
2138 
2139 	/*
2140 	 * We use the message context to find the request structure when we
2141 	 * Get the command completion interrupt from the IOC.
2142 	 */
2143 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
2144 
2145 	/* Which physical device to do the I/O on */
2146 	mpt_req->TargetID = tgt;
2147 
2148 	/* We assume a single level LUN type */
2149 	if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
2150 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
2151 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
2152 	} else {
2153 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
2154 	}
2155 
2156 	/* Set the direction of the transfer */
2157 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2158 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
2159 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2160 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
2161 	} else {
2162 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
2163 	}
2164 
2165 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
2166 		switch(ccb->csio.tag_action) {
2167 		case MSG_HEAD_OF_Q_TAG:
2168 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
2169 			break;
2170 		case MSG_ACA_TASK:
2171 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
2172 			break;
2173 		case MSG_ORDERED_Q_TAG:
2174 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
2175 			break;
2176 		case MSG_SIMPLE_Q_TAG:
2177 		default:
2178 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2179 			break;
2180 		}
2181 	} else {
2182 		if (mpt->is_fc || mpt->is_sas) {
2183 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
2184 		} else {
2185 			/* XXX No such thing for a target doing packetized. */
2186 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
2187 		}
2188 	}
2189 
2190 	if (mpt->is_spi) {
2191 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
2192 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
2193 		}
2194 	}
2195 	mpt_req->Control = htole32(mpt_req->Control);
2196 
2197 	/* Copy the scsi command block into place */
2198 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
2199 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
2200 	} else {
2201 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
2202 	}
2203 
2204 	mpt_req->CDBLength = csio->cdb_len;
2205 	mpt_req->DataLength = htole32(csio->dxfer_len);
2206 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
2207 
2208 	/*
2209 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
2210 	 */
2211 	if (mpt->verbose == MPT_PRT_DEBUG) {
2212 		U32 df;
2213 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
2214 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
2215 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
2216 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
2217 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
2218 			mpt_prtc(mpt, "(%s %u byte%s ",
2219 			    (df == MPI_SCSIIO_CONTROL_READ)?
2220 			    "read" : "write",  csio->dxfer_len,
2221 			    (csio->dxfer_len == 1)? ")" : "s)");
2222 		}
2223 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
2224 		    ccb->ccb_h.target_lun, req, req->serno);
2225 	}
2226 
2227 	/*
2228 	 * If we have any data to send with this command map it into bus space.
2229 	 */
2230 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2231 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
2232 			/*
2233 			 * We've been given a pointer to a single buffer.
2234 			 */
2235 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
2236 				/*
2237 				 * Virtual address that needs to translated into
2238 				 * one or more physical address ranges.
2239 				 */
2240 				int error;
2241 				crit_enter();
2242 				error = bus_dmamap_load(mpt->buffer_dmat,
2243 				    req->dmap, csio->data_ptr, csio->dxfer_len,
2244 				    cb, req, 0);
2245 				crit_exit();
2246 				if (error == EINPROGRESS) {
2247 					/*
2248 					 * So as to maintain ordering,
2249 					 * freeze the controller queue
2250 					 * until our mapping is
2251 					 * returned.
2252 					 */
2253 					xpt_freeze_simq(mpt->sim, 1);
2254 					ccbh->status |= CAM_RELEASE_SIMQ;
2255 				}
2256 			} else {
2257 				/*
2258 				 * We have been given a pointer to single
2259 				 * physical buffer.
2260 				 */
2261 				struct bus_dma_segment seg;
2262 				seg.ds_addr =
2263 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
2264 				seg.ds_len = csio->dxfer_len;
2265 				(*cb)(req, &seg, 1, 0);
2266 			}
2267 		} else {
2268 			/*
2269 			 * We have been given a list of addresses.
2270 			 * This case could be easily supported but they are not
2271 			 * currently generated by the CAM subsystem so there
2272 			 * is no point in wasting the time right now.
2273 			 */
2274 			struct bus_dma_segment *segs;
2275 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
2276 				(*cb)(req, NULL, 0, EFAULT);
2277 			} else {
2278 				/* Just use the segments provided */
2279 				segs = (struct bus_dma_segment *)csio->data_ptr;
2280 				(*cb)(req, segs, csio->sglist_cnt, 0);
2281 			}
2282 		}
2283 	} else {
2284 		(*cb)(req, NULL, 0, 0);
2285 	}
2286 }
2287 
2288 static int
2289 mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
2290     int sleep_ok)
2291 {
2292 	int   error;
2293 	uint16_t status;
2294 	uint8_t response;
2295 
2296 	error = mpt_scsi_send_tmf(mpt,
2297 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
2298 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
2299 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
2300 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
2301 	    0,	/* XXX How do I get the channel ID? */
2302 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
2303 	    lun != CAM_LUN_WILDCARD ? lun : 0,
2304 	    0, sleep_ok);
2305 
2306 	if (error != 0) {
2307 		/*
2308 		 * mpt_scsi_send_tmf hard resets on failure, so no
2309 		 * need to do so here.
2310 		 */
2311 		mpt_prt(mpt,
2312 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
2313 		return (EIO);
2314 	}
2315 
2316 	/* Wait for bus reset to be processed by the IOC. */
2317 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
2318 	    REQ_STATE_DONE, sleep_ok, 5000);
2319 
2320 	status = le16toh(mpt->tmf_req->IOCStatus);
2321 	response = mpt->tmf_req->ResponseCode;
2322 	mpt->tmf_req->state = REQ_STATE_FREE;
2323 
2324 	if (error) {
2325 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
2326 		    "Resetting controller.\n");
2327 		mpt_reset(mpt, TRUE);
2328 		return (ETIMEDOUT);
2329 	}
2330 
2331 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
2332 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
2333 		    "Resetting controller.\n", status);
2334 		mpt_reset(mpt, TRUE);
2335 		return (EIO);
2336 	}
2337 
2338 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
2339 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
2340 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
2341 		    "Resetting controller.\n", response);
2342 		mpt_reset(mpt, TRUE);
2343 		return (EIO);
2344 	}
2345 	return (0);
2346 }
2347 
2348 static int
2349 mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
2350 {
2351 	int r = 0;
2352 	request_t *req;
2353 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
2354 
2355 	req = mpt_get_request(mpt, FALSE);
2356 	if (req == NULL) {
2357 		return (ENOMEM);
2358 	}
2359 	fc = req->req_vbuf;
2360 	memset(fc, 0, sizeof(*fc));
2361 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
2362 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
2363 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
2364 	mpt_send_cmd(mpt, req);
2365 	if (dowait) {
2366 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
2367 		    REQ_STATE_DONE, FALSE, 60 * 1000);
2368 		if (r == 0) {
2369 			mpt_free_request(mpt, req);
2370 		}
2371 	}
2372 	return (r);
2373 }
2374 
2375 static void
2376 mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
2377 {
2378     xpt_free_path(ccb->ccb_h.path);
2379     kfree(ccb, M_TEMP);
2380 }
2381 
2382 static int
2383 mpt_cam_event(struct mpt_softc *mpt, request_t *req,
2384 	      MSG_EVENT_NOTIFY_REPLY *msg)
2385 {
2386 	uint32_t data0, data1;
2387 
2388 	data0 = le32toh(msg->Data[0]);
2389 	data1 = le32toh(msg->Data[1]);
2390 	switch(msg->Event & 0xFF) {
2391 	case MPI_EVENT_UNIT_ATTENTION:
2392 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
2393 		    (data0 >> 8) & 0xff, data0 & 0xff);
2394 		break;
2395 
2396 	case MPI_EVENT_IOC_BUS_RESET:
2397 		/* We generated a bus reset */
2398 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
2399 		    (data0 >> 8) & 0xff);
2400 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2401 		break;
2402 
2403 	case MPI_EVENT_EXT_BUS_RESET:
2404 		/* Someone else generated a bus reset */
2405 		mpt_prt(mpt, "External Bus Reset Detected\n");
2406 		/*
2407 		 * These replies don't return EventData like the MPI
2408 		 * spec says they do
2409 		 */
2410 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
2411 		break;
2412 
2413 	case MPI_EVENT_RESCAN:
2414 	{
2415 		union ccb *ccb;
2416 		uint32_t pathid;
2417 		/*
2418 		 * In general this means a device has been added to the loop.
2419 		 */
2420 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
2421 		if (mpt->ready == 0) {
2422 			break;
2423 		}
2424 		if (mpt->phydisk_sim) {
2425 			pathid = cam_sim_path(mpt->phydisk_sim);
2426 		} else {
2427 			pathid = cam_sim_path(mpt->sim);
2428 		}
2429 		MPTLOCK_2_CAMLOCK(mpt);
2430 		/*
2431 		 * Allocate a CCB, create a wildcard path for this bus,
2432 		 * and schedule a rescan.
2433 		 */
2434 		ccb = kmalloc(sizeof(union ccb), M_TEMP, M_WAITOK | M_ZERO);
2435 
2436 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
2437 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2438 			CAMLOCK_2_MPTLOCK(mpt);
2439 			mpt_prt(mpt, "unable to create path for rescan\n");
2440 			kfree(ccb, M_TEMP);
2441 			break;
2442 		}
2443 
2444 		xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, 5/*priority (low)*/);
2445 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
2446 		ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
2447 		ccb->crcn.flags = CAM_FLAG_NONE;
2448 		xpt_action(ccb);
2449 
2450 		/* scan is now in progress */
2451 
2452 		CAMLOCK_2_MPTLOCK(mpt);
2453 		break;
2454 	}
2455 	case MPI_EVENT_LINK_STATUS_CHANGE:
2456 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
2457 		    (data1 >> 8) & 0xff,
2458 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
2459 		break;
2460 
2461 	case MPI_EVENT_LOOP_STATE_CHANGE:
2462 		switch ((data0 >> 16) & 0xff) {
2463 		case 0x01:
2464 			mpt_prt(mpt,
2465 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
2466 			    "(Loop Initialization)\n",
2467 			    (data1 >> 8) & 0xff,
2468 			    (data0 >> 8) & 0xff,
2469 			    (data0     ) & 0xff);
2470 			switch ((data0 >> 8) & 0xff) {
2471 			case 0xF7:
2472 				if ((data0 & 0xff) == 0xF7) {
2473 					mpt_prt(mpt, "Device needs AL_PA\n");
2474 				} else {
2475 					mpt_prt(mpt, "Device %02x doesn't like "
2476 					    "FC performance\n",
2477 					    data0 & 0xFF);
2478 				}
2479 				break;
2480 			case 0xF8:
2481 				if ((data0 & 0xff) == 0xF7) {
2482 					mpt_prt(mpt, "Device had loop failure "
2483 					    "at its receiver prior to acquiring"
2484 					    " AL_PA\n");
2485 				} else {
2486 					mpt_prt(mpt, "Device %02x detected loop"
2487 					    " failure at its receiver\n",
2488 					    data0 & 0xFF);
2489 				}
2490 				break;
2491 			default:
2492 				mpt_prt(mpt, "Device %02x requests that device "
2493 				    "%02x reset itself\n",
2494 				    data0 & 0xFF,
2495 				    (data0 >> 8) & 0xFF);
2496 				break;
2497 			}
2498 			break;
2499 		case 0x02:
2500 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2501 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
2502 			    (data1 >> 8) & 0xff, /* Port */
2503 			    (data0 >>  8) & 0xff, /* Character 3 */
2504 			    (data0      ) & 0xff  /* Character 4 */);
2505 			break;
2506 		case 0x03:
2507 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
2508 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
2509 			    (data1 >> 8) & 0xff, /* Port */
2510 			    (data0 >> 8) & 0xff, /* Character 3 */
2511 			    (data0     ) & 0xff  /* Character 4 */);
2512 			break;
2513 		default:
2514 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
2515 			    "FC event (%02x %02x %02x)\n",
2516 			    (data1 >> 8) & 0xff, /* Port */
2517 			    (data0 >> 16) & 0xff, /* Event */
2518 			    (data0 >>  8) & 0xff, /* Character 3 */
2519 			    (data0      ) & 0xff  /* Character 4 */);
2520 		}
2521 		break;
2522 
2523 	case MPI_EVENT_LOGOUT:
2524 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
2525 		    (data1 >> 8) & 0xff, data0);
2526 		break;
2527 	case MPI_EVENT_QUEUE_FULL:
2528 	{
2529 		struct cam_sim *sim;
2530 		struct cam_path *tmppath;
2531 		struct ccb_relsim crs;
2532 		PTR_EVENT_DATA_QUEUE_FULL pqf;
2533 		lun_id_t lun_id;
2534 
2535 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
2536 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
2537 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
2538 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
2539 		if (mpt->phydisk_sim) {
2540 			sim = mpt->phydisk_sim;
2541 		} else {
2542 			sim = mpt->sim;
2543 		}
2544 		MPTLOCK_2_CAMLOCK(mpt);
2545 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
2546 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
2547 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
2548 				mpt_prt(mpt, "unable to create a path to send "
2549 				    "XPT_REL_SIMQ");
2550 				CAMLOCK_2_MPTLOCK(mpt);
2551 				break;
2552 			}
2553 			xpt_setup_ccb(&crs.ccb_h, tmppath, 5);
2554 			crs.ccb_h.func_code = XPT_REL_SIMQ;
2555 			crs.ccb_h.flags = CAM_DEV_QFREEZE;
2556 			crs.release_flags = RELSIM_ADJUST_OPENINGS;
2557 			crs.openings = pqf->CurrentDepth - 1;
2558 			xpt_action((union ccb *)&crs);
2559 			if (crs.ccb_h.status != CAM_REQ_CMP) {
2560 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
2561 			}
2562 			xpt_free_path(tmppath);
2563 		}
2564 		CAMLOCK_2_MPTLOCK(mpt);
2565 		break;
2566 	}
2567 	case MPI_EVENT_IR_RESYNC_UPDATE:
2568 		mpt_prt(mpt, "IR resync update %d completed\n",
2569 		    (data0 >> 16) & 0xff);
2570 		break;
2571 	case MPI_EVENT_EVENT_CHANGE:
2572 	case MPI_EVENT_INTEGRATED_RAID:
2573 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
2574 	case MPI_EVENT_SAS_SES:
2575 		break;
2576 	default:
2577 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
2578 		    msg->Event & 0xFF);
2579 		return (0);
2580 	}
2581 	return (1);
2582 }
2583 
2584 /*
2585  * Reply path for all SCSI I/O requests, called from our
2586  * interrupt handler by extracting our handler index from
2587  * the MsgContext field of the reply from the IOC.
2588  *
2589  * This routine is optimized for the common case of a
2590  * completion without error.  All exception handling is
2591  * offloaded to non-inlined helper routines to minimize
2592  * cache footprint.
2593  */
2594 static int
2595 mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
2596     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2597 {
2598 	MSG_SCSI_IO_REQUEST *scsi_req;
2599 	union ccb *ccb;
2600 
2601 	if (req->state == REQ_STATE_FREE) {
2602 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
2603 		return (TRUE);
2604 	}
2605 
2606 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
2607 	ccb = req->ccb;
2608 	if (ccb == NULL) {
2609 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
2610 		    req, req->serno);
2611 		return (TRUE);
2612 	}
2613 
2614 	mpt_req_untimeout(req, mpt_timeout, ccb);
2615 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
2616 
2617 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
2618 		bus_dmasync_op_t op;
2619 
2620 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
2621 			op = BUS_DMASYNC_POSTREAD;
2622 		else
2623 			op = BUS_DMASYNC_POSTWRITE;
2624 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
2625 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
2626 	}
2627 
2628 	if (reply_frame == NULL) {
2629 		/*
2630 		 * Context only reply, completion without error status.
2631 		 */
2632 		ccb->csio.resid = 0;
2633 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
2634 		ccb->csio.scsi_status = SCSI_STATUS_OK;
2635 	} else {
2636 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
2637 	}
2638 
2639 	if (mpt->outofbeer) {
2640 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
2641 		mpt->outofbeer = 0;
2642 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
2643 	}
2644 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
2645 		struct scsi_inquiry_data *iq =
2646 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
2647 		if (scsi_req->Function ==
2648 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
2649 			/*
2650 			 * Fake out the device type so that only the
2651 			 * pass-thru device will attach.
2652 			 */
2653 			iq->device &= ~0x1F;
2654 			iq->device |= T_NODEVICE;
2655 		}
2656 	}
2657 	if (mpt->verbose == MPT_PRT_DEBUG) {
2658 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
2659 		    req, req->serno);
2660 	}
2661 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
2662 	MPTLOCK_2_CAMLOCK(mpt);
2663 	xpt_done(ccb);
2664 	CAMLOCK_2_MPTLOCK(mpt);
2665 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
2666 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2667 	} else {
2668 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
2669 		    req, req->serno);
2670 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
2671 	}
2672 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
2673 	    ("CCB req needed wakeup"));
2674 #ifdef	INVARIANTS
2675 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
2676 #endif
2677 	mpt_free_request(mpt, req);
2678 	return (TRUE);
2679 }
2680 
2681 static int
2682 mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
2683     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2684 {
2685 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
2686 
2687 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
2688 #ifdef	INVARIANTS
2689 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
2690 #endif
2691 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
2692 	/* Record IOC Status and Response Code of TMF for any waiters. */
2693 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
2694 	req->ResponseCode = tmf_reply->ResponseCode;
2695 
2696 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
2697 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
2698 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2699 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
2700 		req->state |= REQ_STATE_DONE;
2701 		wakeup(req);
2702 	} else {
2703 		mpt->tmf_req->state = REQ_STATE_FREE;
2704 	}
2705 	return (TRUE);
2706 }
2707 
2708 /*
2709  * XXX: Move to definitions file
2710  */
2711 #define	ELS	0x22
2712 #define	FC4LS	0x32
2713 #define	ABTS	0x81
2714 #define	BA_ACC	0x84
2715 
2716 #define	LS_RJT	0x01
2717 #define	LS_ACC	0x02
2718 #define	PLOGI	0x03
2719 #define	LOGO	0x05
2720 #define SRR	0x14
2721 #define PRLI	0x20
2722 #define PRLO	0x21
2723 #define ADISC	0x52
2724 #define RSCN	0x61
2725 
2726 static void
2727 mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
2728     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
2729 {
2730 	uint32_t fl;
2731 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
2732 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
2733 
2734 	/*
2735 	 * We are going to reuse the ELS request to send this response back.
2736 	 */
2737 	rsp = &tmp;
2738 	memset(rsp, 0, sizeof(*rsp));
2739 
2740 #ifdef	USE_IMMEDIATE_LINK_DATA
2741 	/*
2742 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
2743 	 */
2744 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
2745 #endif
2746 	rsp->RspLength = length;
2747 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
2748 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
2749 
2750 	/*
2751 	 * Copy over information from the original reply frame to
2752 	 * it's correct place in the response.
2753 	 */
2754 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
2755 
2756 	/*
2757 	 * And now copy back the temporary area to the original frame.
2758 	 */
2759 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
2760 	rsp = req->req_vbuf;
2761 
2762 #ifdef	USE_IMMEDIATE_LINK_DATA
2763 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
2764 #else
2765 {
2766 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
2767 	bus_addr_t paddr = req->req_pbuf;
2768 	paddr += MPT_RQSL(mpt);
2769 
2770 	fl =
2771 		MPI_SGE_FLAGS_HOST_TO_IOC	|
2772 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
2773 		MPI_SGE_FLAGS_LAST_ELEMENT	|
2774 		MPI_SGE_FLAGS_END_OF_LIST	|
2775 		MPI_SGE_FLAGS_END_OF_BUFFER;
2776 	fl <<= MPI_SGE_FLAGS_SHIFT;
2777 	fl |= (length);
2778 	se->FlagsLength = htole32(fl);
2779 	se->Address = htole32((uint32_t) paddr);
2780 }
2781 #endif
2782 
2783 	/*
2784 	 * Send it on...
2785 	 */
2786 	mpt_send_cmd(mpt, req);
2787 }
2788 
2789 static int
2790 mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
2791     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
2792 {
2793 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
2794 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
2795 	U8 rctl;
2796 	U8 type;
2797 	U8 cmd;
2798 	U16 status = le16toh(reply_frame->IOCStatus);
2799 	U32 *elsbuf;
2800 	int ioindex;
2801 	int do_refresh = TRUE;
2802 
2803 #ifdef	INVARIANTS
2804 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
2805 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
2806 	    req, req->serno, rp->Function));
2807 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2808 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2809 	} else {
2810 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
2811 	}
2812 #endif
2813 	mpt_lprt(mpt, MPT_PRT_DEBUG,
2814 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
2815 	    req, req->serno, reply_frame, reply_frame->Function);
2816 
2817 	if  (status != MPI_IOCSTATUS_SUCCESS) {
2818 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
2819 		    status, reply_frame->Function);
2820 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
2821 			/*
2822 			 * XXX: to get around shutdown issue
2823 			 */
2824 			mpt->disabled = 1;
2825 			return (TRUE);
2826 		}
2827 		return (TRUE);
2828 	}
2829 
2830 	/*
2831 	 * If the function of a link service response, we recycle the
2832 	 * response to be a refresh for a new link service request.
2833 	 *
2834 	 * The request pointer is bogus in this case and we have to fetch
2835 	 * it based upon the TransactionContext.
2836 	 */
2837 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
2838 		/* Freddie Uncle Charlie Katie */
2839 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
2840 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
2841 			if (mpt->els_cmd_ptrs[ioindex] == req) {
2842 				break;
2843 			}
2844 
2845 		KASSERT(ioindex < mpt->els_cmds_allocated,
2846 		    ("can't find my mommie!"));
2847 
2848 		/* remove from active list as we're going to re-post it */
2849 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2850 		req->state &= ~REQ_STATE_QUEUED;
2851 		req->state |= REQ_STATE_DONE;
2852 		mpt_fc_post_els(mpt, req, ioindex);
2853 		return (TRUE);
2854 	}
2855 
2856 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
2857 		/* remove from active list as we're done */
2858 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2859 		req->state &= ~REQ_STATE_QUEUED;
2860 		req->state |= REQ_STATE_DONE;
2861 		if (req->state & REQ_STATE_TIMEDOUT) {
2862 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2863 			    "Sync Primitive Send Completed After Timeout\n");
2864 			mpt_free_request(mpt, req);
2865 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
2866 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2867 			    "Async Primitive Send Complete\n");
2868 			mpt_free_request(mpt, req);
2869 		} else {
2870 			mpt_lprt(mpt, MPT_PRT_DEBUG,
2871 			    "Sync Primitive Send Complete- Waking Waiter\n");
2872 			wakeup(req);
2873 		}
2874 		return (TRUE);
2875 	}
2876 
2877 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
2878 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
2879 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
2880 		    rp->MsgLength, rp->MsgFlags);
2881 		return (TRUE);
2882 	}
2883 
2884 	if (rp->MsgLength <= 5) {
2885 		/*
2886 		 * This is just a ack of an original ELS buffer post
2887 		 */
2888 		mpt_lprt(mpt, MPT_PRT_DEBUG,
2889 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
2890 		return (TRUE);
2891 	}
2892 
2893 
2894 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
2895 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
2896 
2897 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
2898 	cmd = be32toh(elsbuf[0]) >> 24;
2899 
2900 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
2901 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
2902 		return (TRUE);
2903 	}
2904 
2905 	ioindex = le32toh(rp->TransactionContext);
2906 	req = mpt->els_cmd_ptrs[ioindex];
2907 
2908 	if (rctl == ELS && type == 1) {
2909 		switch (cmd) {
2910 		case PRLI:
2911 			/*
2912 			 * Send back a PRLI ACC
2913 			 */
2914 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
2915 			    le32toh(rp->Wwn.PortNameHigh),
2916 			    le32toh(rp->Wwn.PortNameLow));
2917 			elsbuf[0] = htobe32(0x02100014);
2918 			elsbuf[1] |= htobe32(0x00000100);
2919 			elsbuf[4] = htobe32(0x00000002);
2920 			if (mpt->role & MPT_ROLE_TARGET)
2921 				elsbuf[4] |= htobe32(0x00000010);
2922 			if (mpt->role & MPT_ROLE_INITIATOR)
2923 				elsbuf[4] |= htobe32(0x00000020);
2924 			/* remove from active list as we're done */
2925 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2926 			req->state &= ~REQ_STATE_QUEUED;
2927 			req->state |= REQ_STATE_DONE;
2928 			mpt_fc_els_send_response(mpt, req, rp, 20);
2929 			do_refresh = FALSE;
2930 			break;
2931 		case PRLO:
2932 			memset(elsbuf, 0, 5 * (sizeof (U32)));
2933 			elsbuf[0] = htobe32(0x02100014);
2934 			elsbuf[1] = htobe32(0x08000100);
2935 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
2936 			    le32toh(rp->Wwn.PortNameHigh),
2937 			    le32toh(rp->Wwn.PortNameLow));
2938 			/* remove from active list as we're done */
2939 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
2940 			req->state &= ~REQ_STATE_QUEUED;
2941 			req->state |= REQ_STATE_DONE;
2942 			mpt_fc_els_send_response(mpt, req, rp, 20);
2943 			do_refresh = FALSE;
2944 			break;
2945 		default:
2946 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
2947 			break;
2948 		}
2949 	} else if (rctl == ABTS && type == 0) {
2950 		uint16_t rx_id = le16toh(rp->Rxid);
2951 		uint16_t ox_id = le16toh(rp->Oxid);
2952 		request_t *tgt_req = NULL;
2953 
2954 		mpt_prt(mpt,
2955 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
2956 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
2957 		    le32toh(rp->Wwn.PortNameLow));
2958 		if (rx_id >= mpt->mpt_max_tgtcmds) {
2959 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
2960 		} else if (mpt->tgt_cmd_ptrs == NULL) {
2961 			mpt_prt(mpt, "No TGT CMD PTRS\n");
2962 		} else {
2963 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
2964 		}
2965 		if (tgt_req) {
2966 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
2967 			union ccb *ccb;
2968 			uint32_t ct_id;
2969 
2970 			/*
2971 			 * Check to make sure we have the correct command
2972 			 * The reply descriptor in the target state should
2973 			 * should contain an IoIndex that should match the
2974 			 * RX_ID.
2975 			 *
2976 			 * It'd be nice to have OX_ID to crosscheck with
2977 			 * as well.
2978 			 */
2979 			ct_id = GET_IO_INDEX(tgt->reply_desc);
2980 
2981 			if (ct_id != rx_id) {
2982 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
2983 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
2984 				    rx_id, ct_id);
2985 				goto skip;
2986 			}
2987 
2988 			ccb = tgt->ccb;
2989 			if (ccb) {
2990 				mpt_prt(mpt,
2991 				    "CCB (%p): lun %u flags %x status %x\n",
2992 				    ccb, ccb->ccb_h.target_lun,
2993 				    ccb->ccb_h.flags, ccb->ccb_h.status);
2994 			}
2995 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
2996 			    "%x nxfers %x\n", tgt->state,
2997 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
2998 			    tgt->nxfers);
2999   skip:
3000 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
3001 				mpt_prt(mpt, "unable to start TargetAbort\n");
3002 			}
3003 		} else {
3004 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
3005 		}
3006 		memset(elsbuf, 0, 5 * (sizeof (U32)));
3007 		elsbuf[0] = htobe32(0);
3008 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
3009 		elsbuf[2] = htobe32(0x000ffff);
3010 		/*
3011 		 * Dork with the reply frame so that the response to it
3012 		 * will be correct.
3013 		 */
3014 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
3015 		/* remove from active list as we're done */
3016 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3017 		req->state &= ~REQ_STATE_QUEUED;
3018 		req->state |= REQ_STATE_DONE;
3019 		mpt_fc_els_send_response(mpt, req, rp, 12);
3020 		do_refresh = FALSE;
3021 	} else {
3022 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
3023 	}
3024 	if (do_refresh == TRUE) {
3025 		/* remove from active list as we're done */
3026 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
3027 		req->state &= ~REQ_STATE_QUEUED;
3028 		req->state |= REQ_STATE_DONE;
3029 		mpt_fc_post_els(mpt, req, ioindex);
3030 	}
3031 	return (TRUE);
3032 }
3033 
3034 /*
3035  * Clean up all SCSI Initiator personality state in response
3036  * to a controller reset.
3037  */
3038 static void
3039 mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
3040 {
3041 	/*
3042 	 * The pending list is already run down by
3043 	 * the generic handler.  Perform the same
3044 	 * operation on the timed out request list.
3045 	 */
3046 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
3047 				   MPI_IOCSTATUS_INVALID_STATE);
3048 
3049 	/*
3050 	 * XXX: We need to repost ELS and Target Command Buffers?
3051 	 */
3052 
3053 	/*
3054 	 * Inform the XPT that a bus reset has occurred.
3055 	 */
3056 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
3057 }
3058 
3059 /*
3060  * Parse additional completion information in the reply
3061  * frame for SCSI I/O requests.
3062  */
3063 static int
3064 mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
3065 			     MSG_DEFAULT_REPLY *reply_frame)
3066 {
3067 	union ccb *ccb;
3068 	MSG_SCSI_IO_REPLY *scsi_io_reply;
3069 	u_int ioc_status;
3070 	u_int sstate;
3071 
3072 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
3073 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
3074 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
3075 		("MPT SCSI I/O Handler called with incorrect reply type"));
3076 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
3077 		("MPT SCSI I/O Handler called with continuation reply"));
3078 
3079 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
3080 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
3081 	ioc_status &= MPI_IOCSTATUS_MASK;
3082 	sstate = scsi_io_reply->SCSIState;
3083 
3084 	ccb = req->ccb;
3085 	ccb->csio.resid =
3086 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
3087 
3088 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
3089 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
3090 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
3091 		ccb->csio.sense_resid =
3092 		    ccb->csio.sense_len - le32toh(scsi_io_reply->SenseCount);
3093 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
3094 		    min(ccb->csio.sense_len,
3095 		    le32toh(scsi_io_reply->SenseCount)));
3096 	}
3097 
3098 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
3099 		/*
3100 		 * Tag messages rejected, but non-tagged retry
3101 		 * was successful.
3102 XXXX
3103 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
3104 		 */
3105 	}
3106 
3107 	switch(ioc_status) {
3108 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
3109 		/*
3110 		 * XXX
3111 		 * Linux driver indicates that a zero
3112 		 * transfer length with this error code
3113 		 * indicates a CRC error.
3114 		 *
3115 		 * No need to swap the bytes for checking
3116 		 * against zero.
3117 		 */
3118 		if (scsi_io_reply->TransferCount == 0) {
3119 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3120 			break;
3121 		}
3122 		/* FALLTHROUGH */
3123 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
3124 	case MPI_IOCSTATUS_SUCCESS:
3125 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
3126 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
3127 			/*
3128 			 * Status was never returned for this transaction.
3129 			 */
3130 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
3131 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
3132 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
3133 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
3134 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
3135 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
3136 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
3137 
3138 			/* XXX Handle SPI-Packet and FCP-2 response info. */
3139 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3140 		} else
3141 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3142 		break;
3143 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
3144 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
3145 		break;
3146 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
3147 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
3148 		break;
3149 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
3150 		/*
3151 		 * Since selection timeouts and "device really not
3152 		 * there" are grouped into this error code, report
3153 		 * selection timeout.  Selection timeouts are
3154 		 * typically retried before giving up on the device
3155 		 * whereas "device not there" errors are considered
3156 		 * unretryable.
3157 		 */
3158 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3159 		break;
3160 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
3161 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
3162 		break;
3163 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
3164 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
3165 		break;
3166 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
3167 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
3168 		break;
3169 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
3170 		ccb->ccb_h.status = CAM_UA_TERMIO;
3171 		break;
3172 	case MPI_IOCSTATUS_INVALID_STATE:
3173 		/*
3174 		 * The IOC has been reset.  Emulate a bus reset.
3175 		 */
3176 		/* FALLTHROUGH */
3177 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
3178 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
3179 		break;
3180 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
3181 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
3182 		/*
3183 		 * Don't clobber any timeout status that has
3184 		 * already been set for this transaction.  We
3185 		 * want the SCSI layer to be able to differentiate
3186 		 * between the command we aborted due to timeout
3187 		 * and any innocent bystanders.
3188 		 */
3189 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
3190 			break;
3191 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
3192 		break;
3193 
3194 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
3195 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
3196 		break;
3197 	case MPI_IOCSTATUS_BUSY:
3198 		mpt_set_ccb_status(ccb, CAM_BUSY);
3199 		break;
3200 	case MPI_IOCSTATUS_INVALID_FUNCTION:
3201 	case MPI_IOCSTATUS_INVALID_SGL:
3202 	case MPI_IOCSTATUS_INTERNAL_ERROR:
3203 	case MPI_IOCSTATUS_INVALID_FIELD:
3204 	default:
3205 		/* XXX
3206 		 * Some of the above may need to kick
3207 		 * of a recovery action!!!!
3208 		 */
3209 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
3210 		break;
3211 	}
3212 
3213 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
3214 		mpt_freeze_ccb(ccb);
3215 	}
3216 
3217 	return (TRUE);
3218 }
3219 
3220 static void
3221 mpt_action(struct cam_sim *sim, union ccb *ccb)
3222 {
3223 	struct mpt_softc *mpt;
3224 	struct ccb_trans_settings *cts;
3225 	target_id_t tgt;
3226 	lun_id_t lun;
3227 	int raid_passthru;
3228 
3229 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
3230 
3231 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
3232 	raid_passthru = (sim == mpt->phydisk_sim);
3233 	MPT_LOCK_ASSERT(mpt);
3234 
3235 	tgt = ccb->ccb_h.target_id;
3236 	lun = ccb->ccb_h.target_lun;
3237 	if (raid_passthru &&
3238 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
3239 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
3240 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
3241 		CAMLOCK_2_MPTLOCK(mpt);
3242 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
3243 			MPTLOCK_2_CAMLOCK(mpt);
3244 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3245 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
3246 			xpt_done(ccb);
3247 			return;
3248 		}
3249 		MPTLOCK_2_CAMLOCK(mpt);
3250 	}
3251 	ccb->ccb_h.ccb_mpt_ptr = mpt;
3252 
3253 	switch (ccb->ccb_h.func_code) {
3254 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
3255 		/*
3256 		 * Do a couple of preliminary checks...
3257 		 */
3258 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
3259 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
3260 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3261 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3262 				break;
3263 			}
3264 		}
3265 		/* Max supported CDB length is 16 bytes */
3266 		/* XXX Unless we implement the new 32byte message type */
3267 		if (ccb->csio.cdb_len >
3268 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
3269 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3270 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3271 			break;
3272 		}
3273 #ifdef	MPT_TEST_MULTIPATH
3274 		if (mpt->failure_id == ccb->ccb_h.target_id) {
3275 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3276 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
3277 			break;
3278 		}
3279 #endif
3280 		ccb->csio.scsi_status = SCSI_STATUS_OK;
3281 		mpt_start(sim, ccb);
3282 		return;
3283 
3284 	case XPT_RESET_BUS:
3285 		if (raid_passthru) {
3286 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3287 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3288 			break;
3289 		}
3290 	case XPT_RESET_DEV:
3291 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
3292 			if (bootverbose) {
3293 				xpt_print(ccb->ccb_h.path, "reset bus\n");
3294 			}
3295 		} else {
3296 			xpt_print(ccb->ccb_h.path, "reset device\n");
3297 		}
3298 		CAMLOCK_2_MPTLOCK(mpt);
3299 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
3300 		MPTLOCK_2_CAMLOCK(mpt);
3301 
3302 		/*
3303 		 * mpt_bus_reset is always successful in that it
3304 		 * will fall back to a hard reset should a bus
3305 		 * reset attempt fail.
3306 		 */
3307 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3308 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3309 		break;
3310 
3311 	case XPT_ABORT:
3312 	{
3313 		union ccb *accb = ccb->cab.abort_ccb;
3314 		CAMLOCK_2_MPTLOCK(mpt);
3315 		switch (accb->ccb_h.func_code) {
3316 		case XPT_ACCEPT_TARGET_IO:
3317 		case XPT_IMMED_NOTIFY:
3318 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
3319 			break;
3320 		case XPT_CONT_TARGET_IO:
3321 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
3322 			ccb->ccb_h.status = CAM_UA_ABORT;
3323 			break;
3324 		case XPT_SCSI_IO:
3325 			ccb->ccb_h.status = CAM_UA_ABORT;
3326 			break;
3327 		default:
3328 			ccb->ccb_h.status = CAM_REQ_INVALID;
3329 			break;
3330 		}
3331 		MPTLOCK_2_CAMLOCK(mpt);
3332 		break;
3333 	}
3334 
3335 #ifdef	CAM_NEW_TRAN_CODE
3336 #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
3337 #else
3338 #define	IS_CURRENT_SETTINGS(c)	((c)->flags & CCB_TRANS_CURRENT_SETTINGS)
3339 #endif
3340 #define	DP_DISC_ENABLE	0x1
3341 #define	DP_DISC_DISABL	0x2
3342 #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
3343 
3344 #define	DP_TQING_ENABLE	0x4
3345 #define	DP_TQING_DISABL	0x8
3346 #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
3347 
3348 #define	DP_WIDE		0x10
3349 #define	DP_NARROW	0x20
3350 #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
3351 
3352 #define	DP_SYNC		0x40
3353 
3354 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
3355 	{
3356 #ifdef	CAM_NEW_TRAN_CODE
3357 		struct ccb_trans_settings_scsi *scsi;
3358 		struct ccb_trans_settings_spi *spi;
3359 #endif
3360 		uint8_t dval;
3361 		u_int period;
3362 		u_int offset;
3363 		int i, j;
3364 
3365 		cts = &ccb->cts;
3366 
3367 		if (mpt->is_fc || mpt->is_sas) {
3368 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3369 			break;
3370 		}
3371 
3372 #ifdef	CAM_NEW_TRAN_CODE
3373 		scsi = &cts->proto_specific.scsi;
3374 		spi = &cts->xport_specific.spi;
3375 
3376 		/*
3377 		 * We can be called just to valid transport and proto versions
3378 		 */
3379 		if (scsi->valid == 0 && spi->valid == 0) {
3380 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3381 			break;
3382 		}
3383 #endif
3384 
3385 		/*
3386 		 * Skip attempting settings on RAID volume disks.
3387 		 * Other devices on the bus get the normal treatment.
3388 		 */
3389 		if (mpt->phydisk_sim && raid_passthru == 0 &&
3390 		    mpt_is_raid_volume(mpt, tgt) != 0) {
3391 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3392 			    "no transfer settings for RAID vols\n");
3393 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3394 			break;
3395 		}
3396 
3397 		i = mpt->mpt_port_page2.PortSettings &
3398 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
3399 		j = mpt->mpt_port_page2.PortFlags &
3400 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
3401 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
3402 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
3403 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
3404 			    "honoring BIOS transfer negotiations\n");
3405 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3406 			break;
3407 		}
3408 
3409 		dval = 0;
3410 		period = 0;
3411 		offset = 0;
3412 
3413 #ifndef	CAM_NEW_TRAN_CODE
3414 		if ((cts->valid & CCB_TRANS_DISC_VALID) != 0) {
3415 			dval |= (cts->flags & CCB_TRANS_DISC_ENB) ?
3416 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3417 		}
3418 
3419 		if ((cts->valid & CCB_TRANS_TQ_VALID) != 0) {
3420 			dval |= (cts->flags & CCB_TRANS_TAG_ENB) ?
3421 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3422 		}
3423 
3424 		if ((cts->valid & CCB_TRANS_BUS_WIDTH_VALID) != 0) {
3425 			dval |= cts->bus_width ? DP_WIDE : DP_NARROW;
3426 		}
3427 
3428 		if ((cts->valid & CCB_TRANS_SYNC_RATE_VALID) &&
3429 		    (cts->valid & CCB_TRANS_SYNC_OFFSET_VALID)) {
3430 			dval |= DP_SYNC;
3431 			period = cts->sync_period;
3432 			offset = cts->sync_offset;
3433 		}
3434 #else
3435 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
3436 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
3437 			    DP_DISC_ENABLE : DP_DISC_DISABL;
3438 		}
3439 
3440 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
3441 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
3442 			    DP_TQING_ENABLE : DP_TQING_DISABL;
3443 		}
3444 
3445 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
3446 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
3447 			    DP_WIDE : DP_NARROW;
3448 		}
3449 
3450 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
3451 			dval |= DP_SYNC;
3452 			offset = spi->sync_offset;
3453 		} else {
3454 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3455 			    &mpt->mpt_dev_page1[tgt];
3456 			offset = ptr->RequestedParameters;
3457 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3458 			offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3459 		}
3460 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
3461 			dval |= DP_SYNC;
3462 			period = spi->sync_period;
3463 		} else {
3464 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
3465 			    &mpt->mpt_dev_page1[tgt];
3466 			period = ptr->RequestedParameters;
3467 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3468 			period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3469 		}
3470 #endif
3471 		CAMLOCK_2_MPTLOCK(mpt);
3472 		if (dval & DP_DISC_ENABLE) {
3473 			mpt->mpt_disc_enable |= (1 << tgt);
3474 		} else if (dval & DP_DISC_DISABL) {
3475 			mpt->mpt_disc_enable &= ~(1 << tgt);
3476 		}
3477 		if (dval & DP_TQING_ENABLE) {
3478 			mpt->mpt_tag_enable |= (1 << tgt);
3479 		} else if (dval & DP_TQING_DISABL) {
3480 			mpt->mpt_tag_enable &= ~(1 << tgt);
3481 		}
3482 		if (dval & DP_WIDTH) {
3483 			mpt_setwidth(mpt, tgt, 1);
3484 		}
3485 		if (dval & DP_SYNC) {
3486 			mpt_setsync(mpt, tgt, period, offset);
3487 		}
3488 		if (dval == 0) {
3489 			MPTLOCK_2_CAMLOCK(mpt);
3490 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3491 			break;
3492 		}
3493 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3494 		    "set [%d]: 0x%x period 0x%x offset %d\n",
3495 		    tgt, dval, period, offset);
3496 		if (mpt_update_spi_config(mpt, tgt)) {
3497 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3498 		} else {
3499 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3500 		}
3501 		MPTLOCK_2_CAMLOCK(mpt);
3502 		break;
3503 	}
3504 	case XPT_GET_TRAN_SETTINGS:
3505 	{
3506 #ifdef	CAM_NEW_TRAN_CODE
3507 		struct ccb_trans_settings_scsi *scsi;
3508 		cts = &ccb->cts;
3509 		cts->protocol = PROTO_SCSI;
3510 		if (mpt->is_fc) {
3511 			struct ccb_trans_settings_fc *fc =
3512 			    &cts->xport_specific.fc;
3513 			cts->protocol_version = SCSI_REV_SPC;
3514 			cts->transport = XPORT_FC;
3515 			cts->transport_version = 0;
3516 			fc->valid = CTS_FC_VALID_SPEED;
3517 			fc->bitrate = 100000;
3518 		} else if (mpt->is_sas) {
3519 			struct ccb_trans_settings_sas *sas =
3520 			    &cts->xport_specific.sas;
3521 			cts->protocol_version = SCSI_REV_SPC2;
3522 			cts->transport = XPORT_SAS;
3523 			cts->transport_version = 0;
3524 			sas->valid = CTS_SAS_VALID_SPEED;
3525 			sas->bitrate = 300000;
3526 		} else {
3527 			cts->protocol_version = SCSI_REV_2;
3528 			cts->transport = XPORT_SPI;
3529 			cts->transport_version = 2;
3530 			if (mpt_get_spi_settings(mpt, cts) != 0) {
3531 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3532 				break;
3533 			}
3534 		}
3535 		scsi = &cts->proto_specific.scsi;
3536 		scsi->valid = CTS_SCSI_VALID_TQ;
3537 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
3538 #else
3539 		cts = &ccb->cts;
3540 		if (mpt->is_fc) {
3541 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3542 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3543 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3544 		} else if (mpt->is_sas) {
3545 			cts->flags = CCB_TRANS_TAG_ENB | CCB_TRANS_DISC_ENB;
3546 			cts->valid = CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3547 			cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3548 		} else if (mpt_get_spi_settings(mpt, cts) != 0) {
3549 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3550 			break;
3551 		}
3552 #endif
3553 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3554 		break;
3555 	}
3556 	case XPT_CALC_GEOMETRY:
3557 	{
3558 		struct ccb_calc_geometry *ccg;
3559 
3560 		ccg = &ccb->ccg;
3561 		if (ccg->block_size == 0) {
3562 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
3563 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3564 			break;
3565 		}
3566 		mpt_calc_geometry(ccg, /*extended*/1);
3567 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d\n", __LINE__));
3568 		break;
3569 	}
3570 	case XPT_PATH_INQ:		/* Path routing inquiry */
3571 	{
3572 		struct ccb_pathinq *cpi = &ccb->cpi;
3573 
3574 		cpi->version_num = 1;
3575 		cpi->target_sprt = 0;
3576 		cpi->hba_eng_cnt = 0;
3577 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
3578 #if 0 /* XXX swildner */
3579 		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
3580 #endif
3581 		/*
3582 		 * FC cards report MAX_DEVICES of 512, but
3583 		 * the MSG_SCSI_IO_REQUEST target id field
3584 		 * is only 8 bits. Until we fix the driver
3585 		 * to support 'channels' for bus overflow,
3586 		 * just limit it.
3587 		 */
3588 		if (cpi->max_target > 255) {
3589 			cpi->max_target = 255;
3590 		}
3591 
3592 		/*
3593 		 * VMware ESX reports > 16 devices and then dies when we probe.
3594 		 */
3595 		if (mpt->is_spi && cpi->max_target > 15) {
3596 			cpi->max_target = 15;
3597 		}
3598 		if (mpt->is_spi)
3599 			cpi->max_lun = 7;
3600 		else
3601 			cpi->max_lun = MPT_MAX_LUNS;
3602 		cpi->initiator_id = mpt->mpt_ini_id;
3603 		cpi->bus_id = cam_sim_bus(sim);
3604 
3605 		/*
3606 		 * The base speed is the speed of the underlying connection.
3607 		 */
3608 #ifdef	CAM_NEW_TRAN_CODE
3609 		cpi->protocol = PROTO_SCSI;
3610 		if (mpt->is_fc) {
3611 			cpi->hba_misc = PIM_NOBUSRESET;
3612 			cpi->base_transfer_speed = 100000;
3613 			cpi->hba_inquiry = PI_TAG_ABLE;
3614 			cpi->transport = XPORT_FC;
3615 			cpi->transport_version = 0;
3616 			cpi->protocol_version = SCSI_REV_SPC;
3617 		} else if (mpt->is_sas) {
3618 			cpi->hba_misc = PIM_NOBUSRESET;
3619 			cpi->base_transfer_speed = 300000;
3620 			cpi->hba_inquiry = PI_TAG_ABLE;
3621 			cpi->transport = XPORT_SAS;
3622 			cpi->transport_version = 0;
3623 			cpi->protocol_version = SCSI_REV_SPC2;
3624 		} else {
3625 			cpi->hba_misc = PIM_SEQSCAN;
3626 			cpi->base_transfer_speed = 3300;
3627 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3628 			cpi->transport = XPORT_SPI;
3629 			cpi->transport_version = 2;
3630 			cpi->protocol_version = SCSI_REV_2;
3631 		}
3632 #else
3633 		if (mpt->is_fc) {
3634 			cpi->hba_misc = PIM_NOBUSRESET;
3635 			cpi->base_transfer_speed = 100000;
3636 			cpi->hba_inquiry = PI_TAG_ABLE;
3637 		} else if (mpt->is_sas) {
3638 			cpi->hba_misc = PIM_NOBUSRESET;
3639 			cpi->base_transfer_speed = 300000;
3640 			cpi->hba_inquiry = PI_TAG_ABLE;
3641 		} else {
3642 			cpi->hba_misc = PIM_SEQSCAN;
3643 			cpi->base_transfer_speed = 3300;
3644 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
3645 		}
3646 #endif
3647 
3648 		/*
3649 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
3650 		 * wide and restrict it to one lun.
3651 		 */
3652 		if (raid_passthru) {
3653 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
3654 			cpi->initiator_id = cpi->max_target + 1;
3655 			cpi->max_lun = 0;
3656 		}
3657 
3658 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
3659 			cpi->hba_misc |= PIM_NOINITIATOR;
3660 		}
3661 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
3662 			cpi->target_sprt =
3663 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
3664 		} else {
3665 			cpi->target_sprt = 0;
3666 		}
3667 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
3668 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
3669 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
3670 		cpi->unit_number = cam_sim_unit(sim);
3671 		cpi->ccb_h.status = CAM_REQ_CMP;
3672 		break;
3673 	}
3674 	case XPT_EN_LUN:		/* Enable LUN as a target */
3675 	{
3676 		int result;
3677 
3678 		CAMLOCK_2_MPTLOCK(mpt);
3679 		if (ccb->cel.enable)
3680 			result = mpt_enable_lun(mpt,
3681 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3682 		else
3683 			result = mpt_disable_lun(mpt,
3684 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
3685 		MPTLOCK_2_CAMLOCK(mpt);
3686 		if (result == 0) {
3687 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
3688 		} else {
3689 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
3690 		}
3691 		break;
3692 	}
3693 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
3694 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
3695 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
3696 	{
3697 		tgt_resource_t *trtp;
3698 		lun_id_t lun = ccb->ccb_h.target_lun;
3699 		ccb->ccb_h.sim_priv.entries[0].field = 0;
3700 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
3701 		ccb->ccb_h.flags = 0;
3702 
3703 		if (lun == CAM_LUN_WILDCARD) {
3704 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
3705 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3706 				break;
3707 			}
3708 			trtp = &mpt->trt_wildcard;
3709 		} else if (lun >= MPT_MAX_LUNS) {
3710 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
3711 			break;
3712 		} else {
3713 			trtp = &mpt->trt[lun];
3714 		}
3715 		CAMLOCK_2_MPTLOCK(mpt);
3716 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
3717 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3718 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
3719 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
3720 			    sim_links.stqe);
3721 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
3722 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
3723 			    "Put FREE INOT lun %d\n", lun);
3724 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
3725 			    sim_links.stqe);
3726 		} else {
3727 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
3728 		}
3729 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
3730 		MPTLOCK_2_CAMLOCK(mpt);
3731 		return;
3732 	}
3733 	case XPT_CONT_TARGET_IO:
3734 		CAMLOCK_2_MPTLOCK(mpt);
3735 		mpt_target_start_io(mpt, ccb);
3736 		MPTLOCK_2_CAMLOCK(mpt);
3737 		return;
3738 
3739 	default:
3740 		ccb->ccb_h.status = CAM_REQ_INVALID;
3741 		break;
3742 	}
3743 	xpt_done(ccb);
3744 }
3745 
3746 static int
3747 mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
3748 {
3749 #ifdef	CAM_NEW_TRAN_CODE
3750 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
3751 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
3752 #endif
3753 	target_id_t tgt;
3754 	uint32_t dval, pval, oval;
3755 	int rv;
3756 
3757 	if (IS_CURRENT_SETTINGS(cts) == 0) {
3758 		tgt = cts->ccb_h.target_id;
3759 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
3760 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
3761 			return (-1);
3762 		}
3763 	} else {
3764 		tgt = cts->ccb_h.target_id;
3765 	}
3766 
3767 	/*
3768 	 * We aren't looking at Port Page 2 BIOS settings here-
3769 	 * sometimes these have been known to be bogus XXX.
3770 	 *
3771 	 * For user settings, we pick the max from port page 0
3772 	 *
3773 	 * For current settings we read the current settings out from
3774 	 * device page 0 for that target.
3775 	 */
3776 	if (IS_CURRENT_SETTINGS(cts)) {
3777 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
3778 		dval = 0;
3779 
3780 		CAMLOCK_2_MPTLOCK(mpt);
3781 		tmp = mpt->mpt_dev_page0[tgt];
3782 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
3783 		    sizeof(tmp), FALSE, 5000);
3784 		if (rv) {
3785 			MPTLOCK_2_CAMLOCK(mpt);
3786 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
3787 			return (rv);
3788 		}
3789 		mpt2host_config_page_scsi_device_0(&tmp);
3790 
3791 		MPTLOCK_2_CAMLOCK(mpt);
3792 		mpt_lprt(mpt, MPT_PRT_DEBUG,
3793 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
3794 		    tmp.NegotiatedParameters, tmp.Information);
3795 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
3796 		    DP_WIDE : DP_NARROW;
3797 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
3798 		    DP_DISC_ENABLE : DP_DISC_DISABL;
3799 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
3800 		    DP_TQING_ENABLE : DP_TQING_DISABL;
3801 		oval = tmp.NegotiatedParameters;
3802 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
3803 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
3804 		pval = tmp.NegotiatedParameters;
3805 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
3806 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
3807 		mpt->mpt_dev_page0[tgt] = tmp;
3808 	} else {
3809 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
3810 		oval = mpt->mpt_port_page0.Capabilities;
3811 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
3812 		pval = mpt->mpt_port_page0.Capabilities;
3813 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
3814 	}
3815 
3816 #ifndef	CAM_NEW_TRAN_CODE
3817 	cts->flags &= ~(CCB_TRANS_DISC_ENB|CCB_TRANS_TAG_ENB);
3818 	cts->valid = 0;
3819 	cts->sync_period = pval;
3820 	cts->sync_offset = oval;
3821 	cts->valid |= CCB_TRANS_SYNC_RATE_VALID;
3822 	cts->valid |= CCB_TRANS_SYNC_OFFSET_VALID;
3823 	cts->valid |= CCB_TRANS_BUS_WIDTH_VALID;
3824 	if (dval & DP_WIDE) {
3825 		cts->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3826 	} else {
3827 		cts->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3828 	}
3829 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3830 		cts->valid |= CCB_TRANS_DISC_VALID | CCB_TRANS_TQ_VALID;
3831 		if (dval & DP_DISC_ENABLE) {
3832 			cts->flags |= CCB_TRANS_DISC_ENB;
3833 		}
3834 		if (dval & DP_TQING_ENABLE) {
3835 			cts->flags |= CCB_TRANS_TAG_ENB;
3836 		}
3837 	}
3838 #else
3839 	spi->valid = 0;
3840 	scsi->valid = 0;
3841 	spi->flags = 0;
3842 	scsi->flags = 0;
3843 	spi->sync_offset = oval;
3844 	spi->sync_period = pval;
3845 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
3846 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
3847 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
3848 	if (dval & DP_WIDE) {
3849 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
3850 	} else {
3851 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
3852 	}
3853 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
3854 		scsi->valid = CTS_SCSI_VALID_TQ;
3855 		if (dval & DP_TQING_ENABLE) {
3856 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
3857 		}
3858 		spi->valid |= CTS_SPI_VALID_DISC;
3859 		if (dval & DP_DISC_ENABLE) {
3860 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
3861 		}
3862 	}
3863 #endif
3864 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3865 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
3866 	    IS_CURRENT_SETTINGS(cts)? "ACTIVE" : "NVRAM ", dval, pval, oval);
3867 	return (0);
3868 }
3869 
3870 static void
3871 mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
3872 {
3873 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3874 
3875 	ptr = &mpt->mpt_dev_page1[tgt];
3876 	if (onoff) {
3877 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
3878 	} else {
3879 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
3880 	}
3881 }
3882 
3883 static void
3884 mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
3885 {
3886 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
3887 
3888 	ptr = &mpt->mpt_dev_page1[tgt];
3889 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
3890 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
3891 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
3892 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
3893 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
3894 	if (period == 0) {
3895 		return;
3896 	}
3897 	ptr->RequestedParameters |=
3898 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
3899 	ptr->RequestedParameters |=
3900 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
3901 	if (period < 0xa) {
3902 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
3903 	}
3904 	if (period < 0x9) {
3905 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
3906 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
3907 	}
3908 }
3909 
3910 static int
3911 mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
3912 {
3913 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
3914 	int rv;
3915 
3916 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
3917 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
3918 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
3919 	tmp = mpt->mpt_dev_page1[tgt];
3920 	host2mpt_config_page_scsi_device_1(&tmp);
3921 	rv = mpt_write_cur_cfg_page(mpt, tgt,
3922 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
3923 	if (rv) {
3924 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
3925 		return (-1);
3926 	}
3927 	return (0);
3928 }
3929 
3930 static void
3931 mpt_calc_geometry(struct ccb_calc_geometry *ccg, int extended)
3932 {
3933 	cam_calc_geometry(ccg, extended);
3934 	uint32_t size_mb;
3935 	uint32_t secs_per_cylinder;
3936 
3937 	if (ccg->block_size == 0) {
3938 		ccg->ccb_h.status = CAM_REQ_INVALID;
3939 		return;
3940 	}
3941 	size_mb = ccg->volume_size / ((1024L * 1024L) / ccg->block_size);
3942 	if (size_mb > 1024 && extended) {
3943 		ccg->heads = 255;
3944 		ccg->secs_per_track = 63;
3945 	} else {
3946 		ccg->heads = 64;
3947 		ccg->secs_per_track = 32;
3948 	}
3949 	secs_per_cylinder = ccg->heads * ccg->secs_per_track;
3950 	ccg->cylinders = ccg->volume_size / secs_per_cylinder;
3951 	ccg->ccb_h.status = CAM_REQ_CMP;
3952 }
3953 
3954 /****************************** Timeout Recovery ******************************/
3955 static int
3956 mpt_spawn_recovery_thread(struct mpt_softc *mpt)
3957 {
3958 	int error;
3959 
3960 	error = mpt_kthread_create(mpt_recovery_thread, mpt,
3961 	    &mpt->recovery_thread, /*flags*/0,
3962 	    /*altstack*/0, "mpt_recovery%d", mpt->unit);
3963 	return (error);
3964 }
3965 
3966 static void
3967 mpt_terminate_recovery_thread(struct mpt_softc *mpt)
3968 {
3969 	if (mpt->recovery_thread == NULL) {
3970 		return;
3971 	}
3972 	mpt->shutdwn_recovery = 1;
3973 	wakeup(mpt);
3974 	/*
3975 	 * Sleep on a slightly different location
3976 	 * for this interlock just for added safety.
3977 	 */
3978 	mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0);
3979 }
3980 
3981 static void
3982 mpt_recovery_thread(void *arg)
3983 {
3984 	struct mpt_softc *mpt;
3985 
3986 	mpt = (struct mpt_softc *)arg;
3987 	MPT_LOCK(mpt);
3988 	for (;;) {
3989 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
3990 			if (mpt->shutdwn_recovery == 0) {
3991 				mpt_sleep(mpt, mpt, 0, "idle", 0);
3992 			}
3993 		}
3994 		if (mpt->shutdwn_recovery != 0) {
3995 			break;
3996 		}
3997 		mpt_recover_commands(mpt);
3998 	}
3999 	mpt->recovery_thread = NULL;
4000 	wakeup(&mpt->recovery_thread);
4001 	MPT_UNLOCK(mpt);
4002 	mpt_kthread_exit(0);
4003 }
4004 
4005 static int
4006 mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
4007     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
4008 {
4009 	MSG_SCSI_TASK_MGMT *tmf_req;
4010 	int		    error;
4011 
4012 	/*
4013 	 * Wait for any current TMF request to complete.
4014 	 * We're only allowed to issue one TMF at a time.
4015 	 */
4016 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
4017 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
4018 	if (error != 0) {
4019 		mpt_reset(mpt, TRUE);
4020 		return (ETIMEDOUT);
4021 	}
4022 
4023 	mpt_assign_serno(mpt, mpt->tmf_req);
4024 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
4025 
4026 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
4027 	memset(tmf_req, 0, sizeof(*tmf_req));
4028 	tmf_req->TargetID = target;
4029 	tmf_req->Bus = channel;
4030 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
4031 	tmf_req->TaskType = type;
4032 	tmf_req->MsgFlags = flags;
4033 	tmf_req->MsgContext =
4034 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
4035 	if (lun > MPT_MAX_LUNS) {
4036 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4037 		tmf_req->LUN[1] = lun & 0xff;
4038 	} else {
4039 		tmf_req->LUN[1] = lun;
4040 	}
4041 	tmf_req->TaskMsgContext = abort_ctx;
4042 
4043 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4044 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
4045 	    mpt->tmf_req->serno, tmf_req->MsgContext);
4046 	if (mpt->verbose > MPT_PRT_DEBUG) {
4047 		mpt_print_request(tmf_req);
4048 	}
4049 
4050 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
4051 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
4052 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
4053 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
4054 	if (error != MPT_OK) {
4055 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
4056 		mpt->tmf_req->state = REQ_STATE_FREE;
4057 		mpt_reset(mpt, TRUE);
4058 	}
4059 	return (error);
4060 }
4061 
4062 /*
4063  * When a command times out, it is placed on the requeust_timeout_list
4064  * and we wake our recovery thread.  The MPT-Fusion architecture supports
4065  * only a single TMF operation at a time, so we serially abort/bdr, etc,
4066  * the timedout transactions.  The next TMF is issued either by the
4067  * completion handler of the current TMF waking our recovery thread,
4068  * or the TMF timeout handler causing a hard reset sequence.
4069  */
4070 static void
4071 mpt_recover_commands(struct mpt_softc *mpt)
4072 {
4073 	request_t	   *req;
4074 	union ccb	   *ccb;
4075 	int		    error;
4076 
4077 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4078 		/*
4079 		 * No work to do- leave.
4080 		 */
4081 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
4082 		return;
4083 	}
4084 
4085 	/*
4086 	 * Flush any commands whose completion coincides with their timeout.
4087 	 */
4088 	mpt_intr(mpt);
4089 
4090 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
4091 		/*
4092 		 * The timedout commands have already
4093 		 * completed.  This typically means
4094 		 * that either the timeout value was on
4095 		 * the hairy edge of what the device
4096 		 * requires or - more likely - interrupts
4097 		 * are not happening.
4098 		 */
4099 		mpt_prt(mpt, "Timedout requests already complete. "
4100 		    "Interrupts may not be functioning.\n");
4101 		mpt_enable_ints(mpt);
4102 		return;
4103 	}
4104 
4105 	/*
4106 	 * We have no visibility into the current state of the
4107 	 * controller, so attempt to abort the commands in the
4108 	 * order they timed-out. For initiator commands, we
4109 	 * depend on the reply handler pulling requests off
4110 	 * the timeout list.
4111 	 */
4112 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
4113 		uint16_t status;
4114 		uint8_t response;
4115 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
4116 
4117 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
4118 		    req, req->serno, hdrp->Function);
4119 		ccb = req->ccb;
4120 		if (ccb == NULL) {
4121 			mpt_prt(mpt, "null ccb in timed out request. "
4122 			    "Resetting Controller.\n");
4123 			mpt_reset(mpt, TRUE);
4124 			continue;
4125 		}
4126 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
4127 
4128 		/*
4129 		 * Check to see if this is not an initiator command and
4130 		 * deal with it differently if it is.
4131 		 */
4132 		switch (hdrp->Function) {
4133 		case MPI_FUNCTION_SCSI_IO_REQUEST:
4134 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
4135 			break;
4136 		default:
4137 			/*
4138 			 * XXX: FIX ME: need to abort target assists...
4139 			 */
4140 			mpt_prt(mpt, "just putting it back on the pend q\n");
4141 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
4142 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
4143 			    links);
4144 			continue;
4145 		}
4146 
4147 		error = mpt_scsi_send_tmf(mpt,
4148 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
4149 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
4150 		    htole32(req->index | scsi_io_handler_id), TRUE);
4151 
4152 		if (error != 0) {
4153 			/*
4154 			 * mpt_scsi_send_tmf hard resets on failure, so no
4155 			 * need to do so here.  Our queue should be emptied
4156 			 * by the hard reset.
4157 			 */
4158 			continue;
4159 		}
4160 
4161 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
4162 		    REQ_STATE_DONE, TRUE, 500);
4163 
4164 		status = le16toh(mpt->tmf_req->IOCStatus);
4165 		response = mpt->tmf_req->ResponseCode;
4166 		mpt->tmf_req->state = REQ_STATE_FREE;
4167 
4168 		if (error != 0) {
4169 			/*
4170 			 * If we've errored out,, reset the controller.
4171 			 */
4172 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
4173 			    "Resetting controller\n");
4174 			mpt_reset(mpt, TRUE);
4175 			continue;
4176 		}
4177 
4178 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
4179 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
4180 			    "Resetting controller.\n", status);
4181 			mpt_reset(mpt, TRUE);
4182 			continue;
4183 		}
4184 
4185 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
4186 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
4187 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
4188 			    "Resetting controller.\n", response);
4189 			mpt_reset(mpt, TRUE);
4190 			continue;
4191 		}
4192 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
4193 	}
4194 }
4195 
4196 /************************ Target Mode Support ****************************/
4197 static void
4198 mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
4199 {
4200 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
4201 	PTR_SGE_TRANSACTION32 tep;
4202 	PTR_SGE_SIMPLE32 se;
4203 	bus_addr_t paddr;
4204 	uint32_t fl;
4205 
4206 	paddr = req->req_pbuf;
4207 	paddr += MPT_RQSL(mpt);
4208 
4209 	fc = req->req_vbuf;
4210 	memset(fc, 0, MPT_REQUEST_AREA);
4211 	fc->BufferCount = 1;
4212 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
4213 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
4214 
4215 	/*
4216 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
4217 	 * consist of a TE SGL element (with details length of zero)
4218 	 * followed by a SIMPLE SGL element which holds the address
4219 	 * of the buffer.
4220 	 */
4221 
4222 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
4223 
4224 	tep->ContextSize = 4;
4225 	tep->Flags = 0;
4226 	tep->TransactionContext[0] = htole32(ioindex);
4227 
4228 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
4229 	fl =
4230 		MPI_SGE_FLAGS_HOST_TO_IOC	|
4231 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4232 		MPI_SGE_FLAGS_LAST_ELEMENT	|
4233 		MPI_SGE_FLAGS_END_OF_LIST	|
4234 		MPI_SGE_FLAGS_END_OF_BUFFER;
4235 	fl <<= MPI_SGE_FLAGS_SHIFT;
4236 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
4237 	se->FlagsLength = htole32(fl);
4238 	se->Address = htole32((uint32_t) paddr);
4239 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4240 	    "add ELS index %d ioindex %d for %p:%u\n",
4241 	    req->index, ioindex, req, req->serno);
4242 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
4243 	    ("mpt_fc_post_els: request not locked"));
4244 	mpt_send_cmd(mpt, req);
4245 }
4246 
4247 static void
4248 mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
4249 {
4250 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
4251 	PTR_CMD_BUFFER_DESCRIPTOR cb;
4252 	bus_addr_t paddr;
4253 
4254 	paddr = req->req_pbuf;
4255 	paddr += MPT_RQSL(mpt);
4256 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
4257 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
4258 
4259 	fc = req->req_vbuf;
4260 	fc->BufferCount = 1;
4261 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
4262 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4263 
4264 	cb = &fc->Buffer[0];
4265 	cb->IoIndex = htole16(ioindex);
4266 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
4267 
4268 	mpt_check_doorbell(mpt);
4269 	mpt_send_cmd(mpt, req);
4270 }
4271 
4272 static int
4273 mpt_add_els_buffers(struct mpt_softc *mpt)
4274 {
4275 	int i;
4276 
4277 	if (mpt->is_fc == 0) {
4278 		return (TRUE);
4279 	}
4280 
4281 	if (mpt->els_cmds_allocated) {
4282 		return (TRUE);
4283 	}
4284 
4285 	mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *),
4286 	    M_DEVBUF, M_NOWAIT | M_ZERO);
4287 
4288 	if (mpt->els_cmd_ptrs == NULL) {
4289 		return (FALSE);
4290 	}
4291 
4292 	/*
4293 	 * Feed the chip some ELS buffer resources
4294 	 */
4295 	for (i = 0; i < MPT_MAX_ELS; i++) {
4296 		request_t *req = mpt_get_request(mpt, FALSE);
4297 		if (req == NULL) {
4298 			break;
4299 		}
4300 		req->state |= REQ_STATE_LOCKED;
4301 		mpt->els_cmd_ptrs[i] = req;
4302 		mpt_fc_post_els(mpt, req, i);
4303 	}
4304 
4305 	if (i == 0) {
4306 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
4307 		kfree(mpt->els_cmd_ptrs, M_DEVBUF);
4308 		mpt->els_cmd_ptrs = NULL;
4309 		return (FALSE);
4310 	}
4311 	if (i != MPT_MAX_ELS) {
4312 		mpt_lprt(mpt, MPT_PRT_INFO,
4313 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
4314 	}
4315 	mpt->els_cmds_allocated = i;
4316 	return(TRUE);
4317 }
4318 
4319 static int
4320 mpt_add_target_commands(struct mpt_softc *mpt)
4321 {
4322 	int i, max;
4323 
4324 	if (mpt->tgt_cmd_ptrs) {
4325 		return (TRUE);
4326 	}
4327 
4328 	max = MPT_MAX_REQUESTS(mpt) >> 1;
4329 	if (max > mpt->mpt_max_tgtcmds) {
4330 		max = mpt->mpt_max_tgtcmds;
4331 	}
4332 	mpt->tgt_cmd_ptrs =
4333 	    kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
4334 	if (mpt->tgt_cmd_ptrs == NULL) {
4335 		mpt_prt(mpt,
4336 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
4337 		return (FALSE);
4338 	}
4339 
4340 	for (i = 0; i < max; i++) {
4341 		request_t *req;
4342 
4343 		req = mpt_get_request(mpt, FALSE);
4344 		if (req == NULL) {
4345 			break;
4346 		}
4347 		req->state |= REQ_STATE_LOCKED;
4348 		mpt->tgt_cmd_ptrs[i] = req;
4349 		mpt_post_target_command(mpt, req, i);
4350 	}
4351 
4352 
4353 	if (i == 0) {
4354 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
4355 		kfree(mpt->tgt_cmd_ptrs, M_DEVBUF);
4356 		mpt->tgt_cmd_ptrs = NULL;
4357 		return (FALSE);
4358 	}
4359 
4360 	mpt->tgt_cmds_allocated = i;
4361 
4362 	if (i < max) {
4363 		mpt_lprt(mpt, MPT_PRT_INFO,
4364 		    "added %d of %d target bufs\n", i, max);
4365 	}
4366 	return (i);
4367 }
4368 
4369 static int
4370 mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4371 {
4372 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4373 		mpt->twildcard = 1;
4374 	} else if (lun >= MPT_MAX_LUNS) {
4375 		return (EINVAL);
4376 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4377 		return (EINVAL);
4378 	}
4379 	if (mpt->tenabled == 0) {
4380 		if (mpt->is_fc) {
4381 			(void) mpt_fc_reset_link(mpt, 0);
4382 		}
4383 		mpt->tenabled = 1;
4384 	}
4385 	if (lun == CAM_LUN_WILDCARD) {
4386 		mpt->trt_wildcard.enabled = 1;
4387 	} else {
4388 		mpt->trt[lun].enabled = 1;
4389 	}
4390 	return (0);
4391 }
4392 
4393 static int
4394 mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
4395 {
4396 	int i;
4397 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
4398 		mpt->twildcard = 0;
4399 	} else if (lun >= MPT_MAX_LUNS) {
4400 		return (EINVAL);
4401 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
4402 		return (EINVAL);
4403 	}
4404 	if (lun == CAM_LUN_WILDCARD) {
4405 		mpt->trt_wildcard.enabled = 0;
4406 	} else {
4407 		mpt->trt[lun].enabled = 0;
4408 	}
4409 	for (i = 0; i < MPT_MAX_LUNS; i++) {
4410 		if (mpt->trt[lun].enabled) {
4411 			break;
4412 		}
4413 	}
4414 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
4415 		if (mpt->is_fc) {
4416 			(void) mpt_fc_reset_link(mpt, 0);
4417 		}
4418 		mpt->tenabled = 0;
4419 	}
4420 	return (0);
4421 }
4422 
4423 /*
4424  * Called with MPT lock held
4425  */
4426 static void
4427 mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
4428 {
4429 	struct ccb_scsiio *csio = &ccb->csio;
4430 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
4431 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
4432 
4433 	switch (tgt->state) {
4434 	case TGT_STATE_IN_CAM:
4435 		break;
4436 	case TGT_STATE_MOVING_DATA:
4437 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4438 		xpt_freeze_simq(mpt->sim, 1);
4439 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4440 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4441 		MPTLOCK_2_CAMLOCK(mpt);
4442 		xpt_done(ccb);
4443 		CAMLOCK_2_MPTLOCK(mpt);
4444 		return;
4445 	default:
4446 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
4447 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
4448 		mpt_tgt_dump_req_state(mpt, cmd_req);
4449 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
4450 		MPTLOCK_2_CAMLOCK(mpt);
4451 		xpt_done(ccb);
4452 		CAMLOCK_2_MPTLOCK(mpt);
4453 		return;
4454 	}
4455 
4456 	if (csio->dxfer_len) {
4457 		bus_dmamap_callback_t *cb;
4458 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
4459 		request_t *req;
4460 
4461 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
4462 		    ("dxfer_len %u but direction is NONE\n", csio->dxfer_len));
4463 
4464 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4465 			if (mpt->outofbeer == 0) {
4466 				mpt->outofbeer = 1;
4467 				xpt_freeze_simq(mpt->sim, 1);
4468 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4469 			}
4470 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4471 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4472 			MPTLOCK_2_CAMLOCK(mpt);
4473 			xpt_done(ccb);
4474 			CAMLOCK_2_MPTLOCK(mpt);
4475 			return;
4476 		}
4477 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4478 		if (sizeof (bus_addr_t) > 4) {
4479 			cb = mpt_execute_req_a64;
4480 		} else {
4481 			cb = mpt_execute_req;
4482 		}
4483 
4484 		req->ccb = ccb;
4485 		ccb->ccb_h.ccb_req_ptr = req;
4486 
4487 		/*
4488 		 * Record the currently active ccb and the
4489 		 * request for it in our target state area.
4490 		 */
4491 		tgt->ccb = ccb;
4492 		tgt->req = req;
4493 
4494 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4495 		ta = req->req_vbuf;
4496 
4497 		if (mpt->is_sas) {
4498 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4499 			     cmd_req->req_vbuf;
4500 			ta->QueueTag = ssp->InitiatorTag;
4501 		} else if (mpt->is_spi) {
4502 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4503 			     cmd_req->req_vbuf;
4504 			ta->QueueTag = sp->Tag;
4505 		}
4506 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4507 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4508 		ta->ReplyWord = htole32(tgt->reply_desc);
4509 		if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
4510 			ta->LUN[0] =
4511 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
4512 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
4513 		} else {
4514 			ta->LUN[1] = csio->ccb_h.target_lun;
4515 		}
4516 
4517 		ta->RelativeOffset = tgt->bytes_xfered;
4518 		ta->DataLength = ccb->csio.dxfer_len;
4519 		if (ta->DataLength > tgt->resid) {
4520 			ta->DataLength = tgt->resid;
4521 		}
4522 
4523 		/*
4524 		 * XXX Should be done after data transfer completes?
4525 		 */
4526 		tgt->resid -= csio->dxfer_len;
4527 		tgt->bytes_xfered += csio->dxfer_len;
4528 
4529 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
4530 			ta->TargetAssistFlags |=
4531 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4532 		}
4533 
4534 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4535 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
4536 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
4537 			ta->TargetAssistFlags |=
4538 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
4539 		}
4540 #endif
4541 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
4542 
4543 		mpt_lprt(mpt, MPT_PRT_DEBUG,
4544 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
4545 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
4546 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
4547 
4548 		MPTLOCK_2_CAMLOCK(mpt);
4549 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
4550 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
4551 				int error;
4552 				crit_enter();
4553 				error = bus_dmamap_load(mpt->buffer_dmat,
4554 				    req->dmap, csio->data_ptr, csio->dxfer_len,
4555 				    cb, req, 0);
4556 				crit_exit();
4557 				if (error == EINPROGRESS) {
4558 					xpt_freeze_simq(mpt->sim, 1);
4559 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
4560 				}
4561 			} else {
4562 				/*
4563 				 * We have been given a pointer to single
4564 				 * physical buffer.
4565 				 */
4566 				struct bus_dma_segment seg;
4567 				seg.ds_addr = (bus_addr_t)
4568 				    (vm_offset_t)csio->data_ptr;
4569 				seg.ds_len = csio->dxfer_len;
4570 				(*cb)(req, &seg, 1, 0);
4571 			}
4572 		} else {
4573 			/*
4574 			 * We have been given a list of addresses.
4575 			 * This case could be easily supported but they are not
4576 			 * currently generated by the CAM subsystem so there
4577 			 * is no point in wasting the time right now.
4578 			 */
4579 			struct bus_dma_segment *sgs;
4580 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
4581 				(*cb)(req, NULL, 0, EFAULT);
4582 			} else {
4583 				/* Just use the segments provided */
4584 				sgs = (struct bus_dma_segment *)csio->data_ptr;
4585 				(*cb)(req, sgs, csio->sglist_cnt, 0);
4586 			}
4587 		}
4588 		CAMLOCK_2_MPTLOCK(mpt);
4589 	} else {
4590 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
4591 
4592 		/*
4593 		 * XXX: I don't know why this seems to happen, but
4594 		 * XXX: completing the CCB seems to make things happy.
4595 		 * XXX: This seems to happen if the initiator requests
4596 		 * XXX: enough data that we have to do multiple CTIOs.
4597 		 */
4598 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
4599 			mpt_lprt(mpt, MPT_PRT_DEBUG,
4600 			    "Meaningless STATUS CCB (%p): flags %x status %x "
4601 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
4602 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
4603 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
4604 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4605 			MPTLOCK_2_CAMLOCK(mpt);
4606 			xpt_done(ccb);
4607 			CAMLOCK_2_MPTLOCK(mpt);
4608 			return;
4609 		}
4610 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
4611 			sp = sense;
4612 			memcpy(sp, &csio->sense_data,
4613 			   min(csio->sense_len, MPT_SENSE_SIZE));
4614 		}
4615 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
4616 	}
4617 }
4618 
4619 static void
4620 mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
4621     uint32_t lun, int send, uint8_t *data, size_t length)
4622 {
4623 	mpt_tgt_state_t *tgt;
4624 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
4625 	SGE_SIMPLE32 *se;
4626 	uint32_t flags;
4627 	uint8_t *dptr;
4628 	bus_addr_t pptr;
4629 	request_t *req;
4630 
4631 	/*
4632 	 * We enter with resid set to the data load for the command.
4633 	 */
4634 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4635 	if (length == 0 || tgt->resid == 0) {
4636 		tgt->resid = 0;
4637 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
4638 		return;
4639 	}
4640 
4641 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4642 		mpt_prt(mpt, "out of resources- dropping local response\n");
4643 		return;
4644 	}
4645 	tgt->is_local = 1;
4646 
4647 
4648 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
4649 	ta = req->req_vbuf;
4650 
4651 	if (mpt->is_sas) {
4652 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
4653 		ta->QueueTag = ssp->InitiatorTag;
4654 	} else if (mpt->is_spi) {
4655 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
4656 		ta->QueueTag = sp->Tag;
4657 	}
4658 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
4659 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4660 	ta->ReplyWord = htole32(tgt->reply_desc);
4661 	if (lun > MPT_MAX_LUNS) {
4662 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
4663 		ta->LUN[1] = lun & 0xff;
4664 	} else {
4665 		ta->LUN[1] = lun;
4666 	}
4667 	ta->RelativeOffset = 0;
4668 	ta->DataLength = length;
4669 
4670 	dptr = req->req_vbuf;
4671 	dptr += MPT_RQSL(mpt);
4672 	pptr = req->req_pbuf;
4673 	pptr += MPT_RQSL(mpt);
4674 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
4675 
4676 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
4677 	memset(se, 0,sizeof (*se));
4678 
4679 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
4680 	if (send) {
4681 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
4682 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
4683 	}
4684 	se->Address = pptr;
4685 	MPI_pSGE_SET_LENGTH(se, length);
4686 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
4687 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
4688 	MPI_pSGE_SET_FLAGS(se, flags);
4689 
4690 	tgt->ccb = NULL;
4691 	tgt->req = req;
4692 	tgt->resid -= length;
4693 	tgt->bytes_xfered = length;
4694 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4695 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
4696 #else
4697 	tgt->state = TGT_STATE_MOVING_DATA;
4698 #endif
4699 	mpt_send_cmd(mpt, req);
4700 }
4701 
4702 /*
4703  * Abort queued up CCBs
4704  */
4705 static cam_status
4706 mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
4707 {
4708 	struct mpt_hdr_stailq *lp;
4709 	struct ccb_hdr *srch;
4710 	int found = 0;
4711 	union ccb *accb = ccb->cab.abort_ccb;
4712 	tgt_resource_t *trtp;
4713 
4714 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
4715 
4716 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
4717 		trtp = &mpt->trt_wildcard;
4718 	} else {
4719 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
4720 	}
4721 
4722 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
4723 		lp = &trtp->atios;
4724 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
4725 		lp = &trtp->inots;
4726 	} else {
4727 		return (CAM_REQ_INVALID);
4728 	}
4729 
4730 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
4731 		if (srch == &accb->ccb_h) {
4732 			found = 1;
4733 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
4734 			break;
4735 		}
4736 	}
4737 	if (found) {
4738 		accb->ccb_h.status = CAM_REQ_ABORTED;
4739 		xpt_done(accb);
4740 		return (CAM_REQ_CMP);
4741 	}
4742 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
4743 	return (CAM_PATH_INVALID);
4744 }
4745 
4746 /*
4747  * Ask the MPT to abort the current target command
4748  */
4749 static int
4750 mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
4751 {
4752 	int error;
4753 	request_t *req;
4754 	PTR_MSG_TARGET_MODE_ABORT abtp;
4755 
4756 	req = mpt_get_request(mpt, FALSE);
4757 	if (req == NULL) {
4758 		return (-1);
4759 	}
4760 	abtp = req->req_vbuf;
4761 	memset(abtp, 0, sizeof (*abtp));
4762 
4763 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4764 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
4765 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
4766 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
4767 	error = 0;
4768 	if (mpt->is_fc || mpt->is_sas) {
4769 		mpt_send_cmd(mpt, req);
4770 	} else {
4771 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
4772 	}
4773 	return (error);
4774 }
4775 
4776 /*
4777  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
4778  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
4779  * FC929 to set bogus FC_RSP fields (nonzero residuals
4780  * but w/o RESID fields set). This causes QLogic initiators
4781  * to think maybe that a frame was lost.
4782  *
4783  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
4784  * we use allocated requests to do TARGET_ASSIST and we
4785  * need to know when to release them.
4786  */
4787 
4788 static void
4789 mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
4790     uint8_t status, uint8_t const *sense_data)
4791 {
4792 	uint8_t *cmd_vbuf;
4793 	mpt_tgt_state_t *tgt;
4794 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
4795 	request_t *req;
4796 	bus_addr_t paddr;
4797 	int resplen = 0;
4798 	uint32_t fl;
4799 
4800 	cmd_vbuf = cmd_req->req_vbuf;
4801 	cmd_vbuf += MPT_RQSL(mpt);
4802 	tgt = MPT_TGT_STATE(mpt, cmd_req);
4803 
4804 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
4805 		if (mpt->outofbeer == 0) {
4806 			mpt->outofbeer = 1;
4807 			xpt_freeze_simq(mpt->sim, 1);
4808 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
4809 		}
4810 		if (ccb) {
4811 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
4812 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
4813 			MPTLOCK_2_CAMLOCK(mpt);
4814 			xpt_done(ccb);
4815 			CAMLOCK_2_MPTLOCK(mpt);
4816 		} else {
4817 			mpt_prt(mpt,
4818 			    "could not allocate status request- dropping\n");
4819 		}
4820 		return;
4821 	}
4822 	req->ccb = ccb;
4823 	if (ccb) {
4824 		ccb->ccb_h.ccb_mpt_ptr = mpt;
4825 		ccb->ccb_h.ccb_req_ptr = req;
4826 	}
4827 
4828 	/*
4829 	 * Record the currently active ccb, if any, and the
4830 	 * request for it in our target state area.
4831 	 */
4832 	tgt->ccb = ccb;
4833 	tgt->req = req;
4834 	tgt->state = TGT_STATE_SENDING_STATUS;
4835 
4836 	tp = req->req_vbuf;
4837 	paddr = req->req_pbuf;
4838 	paddr += MPT_RQSL(mpt);
4839 
4840 	memset(tp, 0, sizeof (*tp));
4841 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
4842 	if (mpt->is_fc) {
4843 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
4844 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
4845 		uint8_t *sts_vbuf;
4846 		uint32_t *rsp;
4847 
4848 		sts_vbuf = req->req_vbuf;
4849 		sts_vbuf += MPT_RQSL(mpt);
4850 		rsp = (uint32_t *) sts_vbuf;
4851 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
4852 
4853 		/*
4854 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
4855 		 * It has to be big-endian in memory and is organized
4856 		 * in 32 bit words, which are much easier to deal with
4857 		 * as words which are swizzled as needed.
4858 		 *
4859 		 * All we're filling here is the FC_RSP payload.
4860 		 * We may just have the chip synthesize it if
4861 		 * we have no residual and an OK status.
4862 		 *
4863 		 */
4864 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
4865 
4866 		rsp[2] = status;
4867 		if (tgt->resid) {
4868 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
4869 			rsp[3] = htobe32(tgt->resid);
4870 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4871 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4872 #endif
4873 		}
4874 		if (status == SCSI_STATUS_CHECK_COND) {
4875 			int i;
4876 
4877 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
4878 			rsp[4] = htobe32(MPT_SENSE_SIZE);
4879 			if (sense_data) {
4880 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
4881 			} else {
4882 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
4883 				    "TION but no sense data?\n");
4884 				memset(&rsp, 0, MPT_SENSE_SIZE);
4885 			}
4886 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
4887 				rsp[i] = htobe32(rsp[i]);
4888 			}
4889 #ifdef	WE_TRUST_AUTO_GOOD_STATUS
4890 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4891 #endif
4892 		}
4893 #ifndef	WE_TRUST_AUTO_GOOD_STATUS
4894 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
4895 #endif
4896 		rsp[2] = htobe32(rsp[2]);
4897 	} else if (mpt->is_sas) {
4898 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
4899 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
4900 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
4901 	} else {
4902 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
4903 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
4904 		tp->StatusCode = status;
4905 		tp->QueueTag = htole16(sp->Tag);
4906 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
4907 	}
4908 
4909 	tp->ReplyWord = htole32(tgt->reply_desc);
4910 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
4911 
4912 #ifdef	WE_CAN_USE_AUTO_REPOST
4913 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
4914 #endif
4915 	if (status == SCSI_STATUS_OK && resplen == 0) {
4916 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
4917 	} else {
4918 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
4919 		fl =
4920 			MPI_SGE_FLAGS_HOST_TO_IOC	|
4921 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
4922 			MPI_SGE_FLAGS_LAST_ELEMENT	|
4923 			MPI_SGE_FLAGS_END_OF_LIST	|
4924 			MPI_SGE_FLAGS_END_OF_BUFFER;
4925 		fl <<= MPI_SGE_FLAGS_SHIFT;
4926 		fl |= resplen;
4927 		tp->StatusDataSGE.FlagsLength = htole32(fl);
4928 	}
4929 
4930 	mpt_lprt(mpt, MPT_PRT_DEBUG,
4931 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
4932 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
4933 	    req->serno, tgt->resid);
4934 	if (ccb) {
4935 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
4936 		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
4937 	}
4938 	mpt_send_cmd(mpt, req);
4939 }
4940 
4941 static void
4942 mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
4943     tgt_resource_t *trtp, int init_id)
4944 {
4945 	struct ccb_immed_notify *inot;
4946 	mpt_tgt_state_t *tgt;
4947 
4948 	tgt = MPT_TGT_STATE(mpt, req);
4949 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
4950 	if (inot == NULL) {
4951 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
4952 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
4953 		return;
4954 	}
4955 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
4956 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
4957 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
4958 
4959 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
4960 	inot->sense_len = 0;
4961 	memset(inot->message_args, 0, sizeof (inot->message_args));
4962 	inot->initiator_id = init_id;	/* XXX */
4963 
4964 	/*
4965 	 * This is a somewhat grotesque attempt to map from task management
4966 	 * to old style SCSI messages. God help us all.
4967 	 */
4968 	switch (fc) {
4969 	case MPT_ABORT_TASK_SET:
4970 		inot->message_args[0] = MSG_ABORT_TAG;
4971 		break;
4972 	case MPT_CLEAR_TASK_SET:
4973 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
4974 		break;
4975 	case MPT_TARGET_RESET:
4976 		inot->message_args[0] = MSG_TARGET_RESET;
4977 		break;
4978 	case MPT_CLEAR_ACA:
4979 		inot->message_args[0] = MSG_CLEAR_ACA;
4980 		break;
4981 	case MPT_TERMINATE_TASK:
4982 		inot->message_args[0] = MSG_ABORT_TAG;
4983 		break;
4984 	default:
4985 		inot->message_args[0] = MSG_NOOP;
4986 		break;
4987 	}
4988 	tgt->ccb = (union ccb *) inot;
4989 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
4990 	MPTLOCK_2_CAMLOCK(mpt);
4991 	xpt_done((union ccb *)inot);
4992 	CAMLOCK_2_MPTLOCK(mpt);
4993 }
4994 
4995 static void
4996 mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
4997 {
4998 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
4999 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
5000 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
5001 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
5002 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
5003 	     '0',  '0',  '0',  '1'
5004 	};
5005 	struct ccb_accept_tio *atiop;
5006 	lun_id_t lun;
5007 	int tag_action = 0;
5008 	mpt_tgt_state_t *tgt;
5009 	tgt_resource_t *trtp = NULL;
5010 	U8 *lunptr;
5011 	U8 *vbuf;
5012 	U16 itag;
5013 	U16 ioindex;
5014 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
5015 	uint8_t *cdbp;
5016 
5017 	/*
5018 	 * Stash info for the current command where we can get at it later.
5019 	 */
5020 	vbuf = req->req_vbuf;
5021 	vbuf += MPT_RQSL(mpt);
5022 
5023 	/*
5024 	 * Get our state pointer set up.
5025 	 */
5026 	tgt = MPT_TGT_STATE(mpt, req);
5027 	if (tgt->state != TGT_STATE_LOADED) {
5028 		mpt_tgt_dump_req_state(mpt, req);
5029 		panic("bad target state in mpt_scsi_tgt_atio");
5030 	}
5031 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
5032 	tgt->state = TGT_STATE_IN_CAM;
5033 	tgt->reply_desc = reply_desc;
5034 	ioindex = GET_IO_INDEX(reply_desc);
5035 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5036 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
5037 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
5038 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
5039 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
5040 	}
5041 	if (mpt->is_fc) {
5042 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
5043 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
5044 		if (fc->FcpCntl[2]) {
5045 			/*
5046 			 * Task Management Request
5047 			 */
5048 			switch (fc->FcpCntl[2]) {
5049 			case 0x2:
5050 				fct = MPT_ABORT_TASK_SET;
5051 				break;
5052 			case 0x4:
5053 				fct = MPT_CLEAR_TASK_SET;
5054 				break;
5055 			case 0x20:
5056 				fct = MPT_TARGET_RESET;
5057 				break;
5058 			case 0x40:
5059 				fct = MPT_CLEAR_ACA;
5060 				break;
5061 			case 0x80:
5062 				fct = MPT_TERMINATE_TASK;
5063 				break;
5064 			default:
5065 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
5066 				    fc->FcpCntl[2]);
5067 				mpt_scsi_tgt_status(mpt, 0, req,
5068 				    SCSI_STATUS_OK, 0);
5069 				return;
5070 			}
5071 		} else {
5072 			switch (fc->FcpCntl[1]) {
5073 			case 0:
5074 				tag_action = MSG_SIMPLE_Q_TAG;
5075 				break;
5076 			case 1:
5077 				tag_action = MSG_HEAD_OF_Q_TAG;
5078 				break;
5079 			case 2:
5080 				tag_action = MSG_ORDERED_Q_TAG;
5081 				break;
5082 			default:
5083 				/*
5084 				 * Bah. Ignore Untagged Queing and ACA
5085 				 */
5086 				tag_action = MSG_SIMPLE_Q_TAG;
5087 				break;
5088 			}
5089 		}
5090 		tgt->resid = be32toh(fc->FcpDl);
5091 		cdbp = fc->FcpCdb;
5092 		lunptr = fc->FcpLun;
5093 		itag = be16toh(fc->OptionalOxid);
5094 	} else if (mpt->is_sas) {
5095 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
5096 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
5097 		cdbp = ssp->CDB;
5098 		lunptr = ssp->LogicalUnitNumber;
5099 		itag = ssp->InitiatorTag;
5100 	} else {
5101 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
5102 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
5103 		cdbp = sp->CDB;
5104 		lunptr = sp->LogicalUnitNumber;
5105 		itag = sp->Tag;
5106 	}
5107 
5108 	/*
5109 	 * Generate a simple lun
5110 	 */
5111 	switch (lunptr[0] & 0xc0) {
5112 	case 0x40:
5113 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
5114 		break;
5115 	case 0:
5116 		lun = lunptr[1];
5117 		break;
5118 	default:
5119 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
5120 		lun = 0xffff;
5121 		break;
5122 	}
5123 
5124 	/*
5125 	 * Deal with non-enabled or bad luns here.
5126 	 */
5127 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
5128 	    mpt->trt[lun].enabled == 0) {
5129 		if (mpt->twildcard) {
5130 			trtp = &mpt->trt_wildcard;
5131 		} else if (fct == MPT_NIL_TMT_VALUE) {
5132 			/*
5133 			 * In this case, we haven't got an upstream listener
5134 			 * for either a specific lun or wildcard luns. We
5135 			 * have to make some sensible response. For regular
5136 			 * inquiry, just return some NOT HERE inquiry data.
5137 			 * For VPD inquiry, report illegal field in cdb.
5138 			 * For REQUEST SENSE, just return NO SENSE data.
5139 			 * REPORT LUNS gets illegal command.
5140 			 * All other commands get 'no such device'.
5141 			 */
5142 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
5143 			size_t len;
5144 
5145 			memset(buf, 0, MPT_SENSE_SIZE);
5146 			cond = SCSI_STATUS_CHECK_COND;
5147 			buf[0] = 0xf0;
5148 			buf[2] = 0x5;
5149 			buf[7] = 0x8;
5150 			sp = buf;
5151 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5152 
5153 			switch (cdbp[0]) {
5154 			case INQUIRY:
5155 			{
5156 				if (cdbp[1] != 0) {
5157 					buf[12] = 0x26;
5158 					buf[13] = 0x01;
5159 					break;
5160 				}
5161 				len = min(tgt->resid, cdbp[4]);
5162 				len = min(len, sizeof (null_iqd));
5163 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5164 				    "local inquiry %ld bytes\n", (long) len);
5165 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5166 				    null_iqd, len);
5167 				return;
5168 			}
5169 			case REQUEST_SENSE:
5170 			{
5171 				buf[2] = 0x0;
5172 				len = min(tgt->resid, cdbp[4]);
5173 				len = min(len, sizeof (buf));
5174 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5175 				    "local reqsense %ld bytes\n", (long) len);
5176 				mpt_scsi_tgt_local(mpt, req, lun, 1,
5177 				    buf, len);
5178 				return;
5179 			}
5180 			case REPORT_LUNS:
5181 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
5182 				buf[12] = 0x26;
5183 				return;
5184 			default:
5185 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5186 				    "CMD 0x%x to unmanaged lun %u\n",
5187 				    cdbp[0], lun);
5188 				buf[12] = 0x25;
5189 				break;
5190 			}
5191 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
5192 			return;
5193 		}
5194 		/* otherwise, leave trtp NULL */
5195 	} else {
5196 		trtp = &mpt->trt[lun];
5197 	}
5198 
5199 	/*
5200 	 * Deal with any task management
5201 	 */
5202 	if (fct != MPT_NIL_TMT_VALUE) {
5203 		if (trtp == NULL) {
5204 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
5205 			    fct);
5206 			mpt_scsi_tgt_status(mpt, 0, req,
5207 			    SCSI_STATUS_OK, 0);
5208 		} else {
5209 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
5210 			    GET_INITIATOR_INDEX(reply_desc));
5211 		}
5212 		return;
5213 	}
5214 
5215 
5216 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
5217 	if (atiop == NULL) {
5218 		mpt_lprt(mpt, MPT_PRT_WARN,
5219 		    "no ATIOs for lun %u- sending back %s\n", lun,
5220 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
5221 		mpt_scsi_tgt_status(mpt, NULL, req,
5222 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
5223 		    NULL);
5224 		return;
5225 	}
5226 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
5227 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
5228 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
5229 	atiop->ccb_h.ccb_mpt_ptr = mpt;
5230 	atiop->ccb_h.status = CAM_CDB_RECVD;
5231 	atiop->ccb_h.target_lun = lun;
5232 	atiop->sense_len = 0;
5233 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
5234 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
5235 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
5236 
5237 	/*
5238 	 * The tag we construct here allows us to find the
5239 	 * original request that the command came in with.
5240 	 *
5241 	 * This way we don't have to depend on anything but the
5242 	 * tag to find things when CCBs show back up from CAM.
5243 	 */
5244 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
5245 	tgt->tag_id = atiop->tag_id;
5246 	if (tag_action) {
5247 		atiop->tag_action = tag_action;
5248 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
5249 	}
5250 	if (mpt->verbose >= MPT_PRT_DEBUG) {
5251 		int i;
5252 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
5253 		    atiop->ccb_h.target_lun);
5254 		for (i = 0; i < atiop->cdb_len; i++) {
5255 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
5256 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
5257 		}
5258 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
5259 		    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
5260 	}
5261 
5262 	MPTLOCK_2_CAMLOCK(mpt);
5263 	xpt_done((union ccb *)atiop);
5264 	CAMLOCK_2_MPTLOCK(mpt);
5265 }
5266 
5267 static void
5268 mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
5269 {
5270 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5271 
5272 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
5273 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
5274 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
5275 	    tgt->tag_id, tgt->state);
5276 }
5277 
5278 static void
5279 mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
5280 {
5281 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
5282 	    req->index, req->index, req->state);
5283 	mpt_tgt_dump_tgt_state(mpt, req);
5284 }
5285 
5286 static int
5287 mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
5288     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
5289 {
5290 	int dbg;
5291 	union ccb *ccb;
5292 	U16 status;
5293 
5294 	if (reply_frame == NULL) {
5295 		/*
5296 		 * Figure out what the state of the command is.
5297 		 */
5298 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
5299 
5300 #ifdef	INVARIANTS
5301 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
5302 		if (tgt->req) {
5303 			mpt_req_not_spcl(mpt, tgt->req,
5304 			    "turbo scsi_tgt_reply associated req", __LINE__);
5305 		}
5306 #endif
5307 		switch(tgt->state) {
5308 		case TGT_STATE_LOADED:
5309 			/*
5310 			 * This is a new command starting.
5311 			 */
5312 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
5313 			break;
5314 		case TGT_STATE_MOVING_DATA:
5315 		{
5316 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
5317 
5318 			ccb = tgt->ccb;
5319 			if (tgt->req == NULL) {
5320 				panic("mpt: turbo target reply with null "
5321 				    "associated request moving data");
5322 				/* NOTREACHED */
5323 			}
5324 			if (ccb == NULL) {
5325 				if (tgt->is_local == 0) {
5326 					panic("mpt: turbo target reply with "
5327 					    "null associated ccb moving data");
5328 					/* NOTREACHED */
5329 				}
5330 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5331 				    "TARGET_ASSIST local done\n");
5332 				TAILQ_REMOVE(&mpt->request_pending_list,
5333 				    tgt->req, links);
5334 				mpt_free_request(mpt, tgt->req);
5335 				tgt->req = NULL;
5336 				mpt_scsi_tgt_status(mpt, NULL, req,
5337 				    0, NULL);
5338 				return (TRUE);
5339 			}
5340 			tgt->ccb = NULL;
5341 			tgt->nxfers++;
5342 			mpt_req_untimeout(req, mpt_timeout, ccb);
5343 			mpt_lprt(mpt, MPT_PRT_DEBUG,
5344 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
5345 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
5346 			/*
5347 			 * Free the Target Assist Request
5348 			 */
5349 			KASSERT(tgt->req->ccb == ccb,
5350 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
5351 			    tgt->req->serno, tgt->req->ccb));
5352 			TAILQ_REMOVE(&mpt->request_pending_list,
5353 			    tgt->req, links);
5354 			mpt_free_request(mpt, tgt->req);
5355 			tgt->req = NULL;
5356 
5357 			/*
5358 			 * Do we need to send status now? That is, are
5359 			 * we done with all our data transfers?
5360 			 */
5361 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
5362 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5363 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5364 				KASSERT(ccb->ccb_h.status,
5365 				    ("zero ccb sts at %d\n", __LINE__));
5366 				tgt->state = TGT_STATE_IN_CAM;
5367 				if (mpt->outofbeer) {
5368 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5369 					mpt->outofbeer = 0;
5370 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5371 				}
5372 				MPTLOCK_2_CAMLOCK(mpt);
5373 				xpt_done(ccb);
5374 				CAMLOCK_2_MPTLOCK(mpt);
5375 				break;
5376 			}
5377 			/*
5378 			 * Otherwise, send status (and sense)
5379 			 */
5380 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5381 				sp = sense;
5382 				memcpy(sp, &ccb->csio.sense_data,
5383 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
5384 			}
5385 			mpt_scsi_tgt_status(mpt, ccb, req,
5386 			    ccb->csio.scsi_status, sp);
5387 			break;
5388 		}
5389 		case TGT_STATE_SENDING_STATUS:
5390 		case TGT_STATE_MOVING_DATA_AND_STATUS:
5391 		{
5392 			int ioindex;
5393 			ccb = tgt->ccb;
5394 
5395 			if (tgt->req == NULL) {
5396 				panic("mpt: turbo target reply with null "
5397 				    "associated request sending status");
5398 				/* NOTREACHED */
5399 			}
5400 
5401 			if (ccb) {
5402 				tgt->ccb = NULL;
5403 				if (tgt->state ==
5404 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
5405 					tgt->nxfers++;
5406 				}
5407 				mpt_req_untimeout(req, mpt_timeout, ccb);
5408 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
5409 					ccb->ccb_h.status |= CAM_SENT_SENSE;
5410 				}
5411 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5412 				    "TARGET_STATUS tag %x sts %x flgs %x req "
5413 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
5414 				    ccb->ccb_h.flags, tgt->req);
5415 				/*
5416 				 * Free the Target Send Status Request
5417 				 */
5418 				KASSERT(tgt->req->ccb == ccb,
5419 				    ("tgt->req %p:%u tgt->req->ccb %p",
5420 				    tgt->req, tgt->req->serno, tgt->req->ccb));
5421 				/*
5422 				 * Notify CAM that we're done
5423 				 */
5424 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
5425 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
5426 				KASSERT(ccb->ccb_h.status,
5427 				    ("ZERO ccb sts at %d\n", __LINE__));
5428 				tgt->ccb = NULL;
5429 			} else {
5430 				mpt_lprt(mpt, MPT_PRT_DEBUG,
5431 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
5432 				    tgt->req, tgt->req->serno);
5433 			}
5434 			TAILQ_REMOVE(&mpt->request_pending_list,
5435 			    tgt->req, links);
5436 			mpt_free_request(mpt, tgt->req);
5437 			tgt->req = NULL;
5438 
5439 			/*
5440 			 * And re-post the Command Buffer.
5441 			 * This will reset the state.
5442 			 */
5443 			ioindex = GET_IO_INDEX(reply_desc);
5444 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5445 			tgt->is_local = 0;
5446 			mpt_post_target_command(mpt, req, ioindex);
5447 
5448 			/*
5449 			 * And post a done for anyone who cares
5450 			 */
5451 			if (ccb) {
5452 				if (mpt->outofbeer) {
5453 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
5454 					mpt->outofbeer = 0;
5455 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
5456 				}
5457 				MPTLOCK_2_CAMLOCK(mpt);
5458 				xpt_done(ccb);
5459 				CAMLOCK_2_MPTLOCK(mpt);
5460 			}
5461 			break;
5462 		}
5463 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
5464 			tgt->state = TGT_STATE_LOADED;
5465 			break;
5466 		default:
5467 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
5468 			    "Reply Function\n", tgt->state);
5469 		}
5470 		return (TRUE);
5471 	}
5472 
5473 	status = le16toh(reply_frame->IOCStatus);
5474 	if (status != MPI_IOCSTATUS_SUCCESS) {
5475 		dbg = MPT_PRT_ERROR;
5476 	} else {
5477 		dbg = MPT_PRT_DEBUG1;
5478 	}
5479 
5480 	mpt_lprt(mpt, dbg,
5481 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
5482 	     req, req->serno, reply_frame, reply_frame->Function, status);
5483 
5484 	switch (reply_frame->Function) {
5485 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
5486 	{
5487 		mpt_tgt_state_t *tgt;
5488 #ifdef	INVARIANTS
5489 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
5490 #endif
5491 		if (status != MPI_IOCSTATUS_SUCCESS) {
5492 			/*
5493 			 * XXX What to do?
5494 			 */
5495 			break;
5496 		}
5497 		tgt = MPT_TGT_STATE(mpt, req);
5498 		KASSERT(tgt->state == TGT_STATE_LOADING,
5499 		    ("bad state 0x%x on reply to buffer post\n", tgt->state));
5500 		mpt_assign_serno(mpt, req);
5501 		tgt->state = TGT_STATE_LOADED;
5502 		break;
5503 	}
5504 	case MPI_FUNCTION_TARGET_ASSIST:
5505 #ifdef	INVARIANTS
5506 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
5507 #endif
5508 		mpt_prt(mpt, "target assist completion\n");
5509 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5510 		mpt_free_request(mpt, req);
5511 		break;
5512 	case MPI_FUNCTION_TARGET_STATUS_SEND:
5513 #ifdef	INVARIANTS
5514 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
5515 #endif
5516 		mpt_prt(mpt, "status send completion\n");
5517 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5518 		mpt_free_request(mpt, req);
5519 		break;
5520 	case MPI_FUNCTION_TARGET_MODE_ABORT:
5521 	{
5522 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
5523 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
5524 		PTR_MSG_TARGET_MODE_ABORT abtp =
5525 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
5526 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
5527 #ifdef	INVARIANTS
5528 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
5529 #endif
5530 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
5531 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
5532 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
5533 		mpt_free_request(mpt, req);
5534 		break;
5535 	}
5536 	default:
5537 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
5538 		    "0x%x\n", reply_frame->Function);
5539 		break;
5540 	}
5541 	return (TRUE);
5542 }
5543