xref: /dragonfly/sys/dev/disk/mpt/mpt_cam.c (revision cec957e9)
12545bca0SMatthew Dillon /*-
22545bca0SMatthew Dillon  * FreeBSD/CAM specific routines for LSI '909 FC  adapters.
32545bca0SMatthew Dillon  * FreeBSD Version.
42545bca0SMatthew Dillon  *
52545bca0SMatthew Dillon  * Copyright (c)  2000, 2001 by Greg Ansley
62545bca0SMatthew Dillon  *
72545bca0SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
82545bca0SMatthew Dillon  * modification, are permitted provided that the following conditions
92545bca0SMatthew Dillon  * are met:
102545bca0SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
112545bca0SMatthew Dillon  *    notice immediately at the beginning of the file, without modification,
122545bca0SMatthew Dillon  *    this list of conditions, and the following disclaimer.
132545bca0SMatthew Dillon  * 2. The name of the author may not be used to endorse or promote products
142545bca0SMatthew Dillon  *    derived from this software without specific prior written permission.
152545bca0SMatthew Dillon  *
162545bca0SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
172545bca0SMatthew Dillon  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
182545bca0SMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
192545bca0SMatthew Dillon  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
202545bca0SMatthew Dillon  * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
212545bca0SMatthew Dillon  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
222545bca0SMatthew Dillon  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
232545bca0SMatthew Dillon  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
242545bca0SMatthew Dillon  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
252545bca0SMatthew Dillon  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
262545bca0SMatthew Dillon  * SUCH DAMAGE.
272545bca0SMatthew Dillon  */
282545bca0SMatthew Dillon /*-
292545bca0SMatthew Dillon  * Copyright (c) 2002, 2006 by Matthew Jacob
302545bca0SMatthew Dillon  * All rights reserved.
312545bca0SMatthew Dillon  *
322545bca0SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
332545bca0SMatthew Dillon  * modification, are permitted provided that the following conditions are
342545bca0SMatthew Dillon  * met:
352545bca0SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
362545bca0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
372545bca0SMatthew Dillon  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
382545bca0SMatthew Dillon  *    substantially similar to the "NO WARRANTY" disclaimer below
392545bca0SMatthew Dillon  *    ("Disclaimer") and any redistribution must be conditioned upon including
402545bca0SMatthew Dillon  *    a substantially similar Disclaimer requirement for further binary
412545bca0SMatthew Dillon  *    redistribution.
422545bca0SMatthew Dillon  * 3. Neither the names of the above listed copyright holders nor the names
432545bca0SMatthew Dillon  *    of any contributors may be used to endorse or promote products derived
442545bca0SMatthew Dillon  *    from this software without specific prior written permission.
452545bca0SMatthew Dillon  *
462545bca0SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
472545bca0SMatthew Dillon  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
482545bca0SMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
492545bca0SMatthew Dillon  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
502545bca0SMatthew Dillon  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
512545bca0SMatthew Dillon  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
522545bca0SMatthew Dillon  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
532545bca0SMatthew Dillon  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
542545bca0SMatthew Dillon  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
552545bca0SMatthew Dillon  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
562545bca0SMatthew Dillon  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
572545bca0SMatthew Dillon  *
582545bca0SMatthew Dillon  * Support from Chris Ellsworth in order to make SAS adapters work
592545bca0SMatthew Dillon  * is gratefully acknowledged.
602545bca0SMatthew Dillon  *
612545bca0SMatthew Dillon  * Support from LSI-Logic has also gone a great deal toward making this a
622545bca0SMatthew Dillon  * workable subsystem and is gratefully acknowledged.
632545bca0SMatthew Dillon  */
642545bca0SMatthew Dillon /*-
652545bca0SMatthew Dillon  * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
662545bca0SMatthew Dillon  * Copyright (c) 2005, WHEEL Sp. z o.o.
672545bca0SMatthew Dillon  * Copyright (c) 2004, 2005 Justin T. Gibbs
682545bca0SMatthew Dillon  * All rights reserved.
692545bca0SMatthew Dillon  *
702545bca0SMatthew Dillon  * Redistribution and use in source and binary forms, with or without
712545bca0SMatthew Dillon  * modification, are permitted provided that the following conditions are
722545bca0SMatthew Dillon  * met:
732545bca0SMatthew Dillon  * 1. Redistributions of source code must retain the above copyright
742545bca0SMatthew Dillon  *    notice, this list of conditions and the following disclaimer.
752545bca0SMatthew Dillon  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
762545bca0SMatthew Dillon  *    substantially similar to the "NO WARRANTY" disclaimer below
772545bca0SMatthew Dillon  *    ("Disclaimer") and any redistribution must be conditioned upon including
782545bca0SMatthew Dillon  *    a substantially similar Disclaimer requirement for further binary
792545bca0SMatthew Dillon  *    redistribution.
802545bca0SMatthew Dillon  * 3. Neither the names of the above listed copyright holders nor the names
812545bca0SMatthew Dillon  *    of any contributors may be used to endorse or promote products derived
822545bca0SMatthew Dillon  *    from this software without specific prior written permission.
832545bca0SMatthew Dillon  *
842545bca0SMatthew Dillon  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
852545bca0SMatthew Dillon  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
862545bca0SMatthew Dillon  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
872545bca0SMatthew Dillon  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
882545bca0SMatthew Dillon  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
892545bca0SMatthew Dillon  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
902545bca0SMatthew Dillon  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
912545bca0SMatthew Dillon  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
922545bca0SMatthew Dillon  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
932545bca0SMatthew Dillon  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
942545bca0SMatthew Dillon  * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
9532af04f7SSascha Wildner  *
964c42baf4SSascha Wildner  * $FreeBSD: src/sys/dev/mpt/mpt_cam.c,v 1.84 2012/02/11 12:03:44 marius Exp $
972545bca0SMatthew Dillon  */
982545bca0SMatthew Dillon 
99*cec957e9SMatthew Dillon #include <bus/cam/cam.h>
100*cec957e9SMatthew Dillon #include <bus/cam/cam_ccb.h>
101*cec957e9SMatthew Dillon #include <bus/cam/cam_xpt.h>
102*cec957e9SMatthew Dillon #include <bus/cam/cam_xpt_periph.h>
103*cec957e9SMatthew Dillon 
1042545bca0SMatthew Dillon #include <dev/disk/mpt/mpt.h>
1052545bca0SMatthew Dillon #include <dev/disk/mpt/mpt_cam.h>
1062545bca0SMatthew Dillon #include <dev/disk/mpt/mpt_raid.h>
1072545bca0SMatthew Dillon 
1082545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_ioc.h" /* XXX Fix Event Handling!!! */
1092545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_init.h"
1102545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_targ.h"
1112545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_fc.h"
1122545bca0SMatthew Dillon #include "dev/disk/mpt/mpilib/mpi_sas.h"
1132545bca0SMatthew Dillon #include <sys/callout.h>
1142545bca0SMatthew Dillon #include <sys/kthread.h>
115f582582cSSascha Wildner #include <sys/sysctl.h>
1162545bca0SMatthew Dillon 
1172545bca0SMatthew Dillon static void mpt_poll(struct cam_sim *);
1182545bca0SMatthew Dillon static timeout_t mpt_timeout;
1192545bca0SMatthew Dillon static void mpt_action(struct cam_sim *, union ccb *);
1202545bca0SMatthew Dillon static int
1212545bca0SMatthew Dillon mpt_get_spi_settings(struct mpt_softc *, struct ccb_trans_settings *);
1222545bca0SMatthew Dillon static void mpt_setwidth(struct mpt_softc *, int, int);
1232545bca0SMatthew Dillon static void mpt_setsync(struct mpt_softc *, int, int, int);
1242545bca0SMatthew Dillon static int mpt_update_spi_config(struct mpt_softc *, int);
1252545bca0SMatthew Dillon 
1262545bca0SMatthew Dillon static mpt_reply_handler_t mpt_scsi_reply_handler;
1272545bca0SMatthew Dillon static mpt_reply_handler_t mpt_scsi_tmf_reply_handler;
1282545bca0SMatthew Dillon static mpt_reply_handler_t mpt_fc_els_reply_handler;
1292545bca0SMatthew Dillon static int mpt_scsi_reply_frame_handler(struct mpt_softc *, request_t *,
1302545bca0SMatthew Dillon 					MSG_DEFAULT_REPLY *);
1312545bca0SMatthew Dillon static int mpt_bus_reset(struct mpt_softc *, target_id_t, lun_id_t, int);
1322545bca0SMatthew Dillon static int mpt_fc_reset_link(struct mpt_softc *, int);
1332545bca0SMatthew Dillon 
1342545bca0SMatthew Dillon static int mpt_spawn_recovery_thread(struct mpt_softc *mpt);
1352545bca0SMatthew Dillon static void mpt_terminate_recovery_thread(struct mpt_softc *mpt);
1362545bca0SMatthew Dillon static void mpt_recovery_thread(void *arg);
1372545bca0SMatthew Dillon static void mpt_recover_commands(struct mpt_softc *mpt);
1382545bca0SMatthew Dillon 
1392545bca0SMatthew Dillon static int mpt_scsi_send_tmf(struct mpt_softc *, u_int, u_int, u_int,
1402545bca0SMatthew Dillon     u_int, u_int, u_int, int);
1412545bca0SMatthew Dillon 
1422545bca0SMatthew Dillon static void mpt_fc_post_els(struct mpt_softc *mpt, request_t *, int);
1432545bca0SMatthew Dillon static void mpt_post_target_command(struct mpt_softc *, request_t *, int);
1442545bca0SMatthew Dillon static int mpt_add_els_buffers(struct mpt_softc *mpt);
1452545bca0SMatthew Dillon static int mpt_add_target_commands(struct mpt_softc *mpt);
1462545bca0SMatthew Dillon static int mpt_enable_lun(struct mpt_softc *, target_id_t, lun_id_t);
1472545bca0SMatthew Dillon static int mpt_disable_lun(struct mpt_softc *, target_id_t, lun_id_t);
1482545bca0SMatthew Dillon static void mpt_target_start_io(struct mpt_softc *, union ccb *);
1492545bca0SMatthew Dillon static cam_status mpt_abort_target_ccb(struct mpt_softc *, union ccb *);
1502545bca0SMatthew Dillon static int mpt_abort_target_cmd(struct mpt_softc *, request_t *);
1512545bca0SMatthew Dillon static void mpt_scsi_tgt_status(struct mpt_softc *, union ccb *, request_t *,
1522545bca0SMatthew Dillon     uint8_t, uint8_t const *);
1532545bca0SMatthew Dillon static void
1542545bca0SMatthew Dillon mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *, request_t *, mpt_task_mgmt_t,
1552545bca0SMatthew Dillon     tgt_resource_t *, int);
1562545bca0SMatthew Dillon static void mpt_tgt_dump_tgt_state(struct mpt_softc *, request_t *);
1572545bca0SMatthew Dillon static void mpt_tgt_dump_req_state(struct mpt_softc *, request_t *);
1582545bca0SMatthew Dillon static mpt_reply_handler_t mpt_scsi_tgt_reply_handler;
1592545bca0SMatthew Dillon static mpt_reply_handler_t mpt_sata_pass_reply_handler;
1602545bca0SMatthew Dillon 
1612545bca0SMatthew Dillon static uint32_t scsi_io_handler_id = MPT_HANDLER_ID_NONE;
1622545bca0SMatthew Dillon static uint32_t scsi_tmf_handler_id = MPT_HANDLER_ID_NONE;
1632545bca0SMatthew Dillon static uint32_t fc_els_handler_id = MPT_HANDLER_ID_NONE;
1642545bca0SMatthew Dillon static uint32_t sata_pass_handler_id = MPT_HANDLER_ID_NONE;
1652545bca0SMatthew Dillon 
1662545bca0SMatthew Dillon static mpt_probe_handler_t	mpt_cam_probe;
1672545bca0SMatthew Dillon static mpt_attach_handler_t	mpt_cam_attach;
1682545bca0SMatthew Dillon static mpt_enable_handler_t	mpt_cam_enable;
1692545bca0SMatthew Dillon static mpt_ready_handler_t	mpt_cam_ready;
1702545bca0SMatthew Dillon static mpt_event_handler_t	mpt_cam_event;
1712545bca0SMatthew Dillon static mpt_reset_handler_t	mpt_cam_ioc_reset;
1722545bca0SMatthew Dillon static mpt_detach_handler_t	mpt_cam_detach;
1732545bca0SMatthew Dillon 
1742545bca0SMatthew Dillon static struct mpt_personality mpt_cam_personality =
1752545bca0SMatthew Dillon {
1762545bca0SMatthew Dillon 	.name		= "mpt_cam",
1772545bca0SMatthew Dillon 	.probe		= mpt_cam_probe,
1782545bca0SMatthew Dillon 	.attach		= mpt_cam_attach,
1792545bca0SMatthew Dillon 	.enable		= mpt_cam_enable,
1802545bca0SMatthew Dillon 	.ready		= mpt_cam_ready,
1812545bca0SMatthew Dillon 	.event		= mpt_cam_event,
1822545bca0SMatthew Dillon 	.reset		= mpt_cam_ioc_reset,
1832545bca0SMatthew Dillon 	.detach		= mpt_cam_detach,
1842545bca0SMatthew Dillon };
1852545bca0SMatthew Dillon 
1862545bca0SMatthew Dillon DECLARE_MPT_PERSONALITY(mpt_cam, SI_ORDER_SECOND);
1872545bca0SMatthew Dillon MODULE_DEPEND(mpt_cam, cam, 1, 1, 1);
1882545bca0SMatthew Dillon 
1892545bca0SMatthew Dillon int mpt_enable_sata_wc = -1;
1902545bca0SMatthew Dillon TUNABLE_INT("hw.mpt.enable_sata_wc", &mpt_enable_sata_wc);
1912545bca0SMatthew Dillon 
1924c42baf4SSascha Wildner static int
mpt_cam_probe(struct mpt_softc * mpt)1932545bca0SMatthew Dillon mpt_cam_probe(struct mpt_softc *mpt)
1942545bca0SMatthew Dillon {
1952545bca0SMatthew Dillon 	int role;
1962545bca0SMatthew Dillon 
1972545bca0SMatthew Dillon 	/*
1982545bca0SMatthew Dillon 	 * Only attach to nodes that support the initiator or target role
1992545bca0SMatthew Dillon 	 * (or want to) or have RAID physical devices that need CAM pass-thru
2002545bca0SMatthew Dillon 	 * support.
2012545bca0SMatthew Dillon 	 */
2022545bca0SMatthew Dillon 	if (mpt->do_cfg_role) {
2032545bca0SMatthew Dillon 		role = mpt->cfg_role;
2042545bca0SMatthew Dillon 	} else {
2052545bca0SMatthew Dillon 		role = mpt->role;
2062545bca0SMatthew Dillon 	}
2072545bca0SMatthew Dillon 	if ((role & (MPT_ROLE_TARGET|MPT_ROLE_INITIATOR)) != 0 ||
2082545bca0SMatthew Dillon 	    (mpt->ioc_page2 != NULL && mpt->ioc_page2->MaxPhysDisks != 0)) {
2092545bca0SMatthew Dillon 		return (0);
2102545bca0SMatthew Dillon 	}
2112545bca0SMatthew Dillon 	return (ENODEV);
2122545bca0SMatthew Dillon }
2132545bca0SMatthew Dillon 
2144c42baf4SSascha Wildner static int
mpt_cam_attach(struct mpt_softc * mpt)2152545bca0SMatthew Dillon mpt_cam_attach(struct mpt_softc *mpt)
2162545bca0SMatthew Dillon {
2172545bca0SMatthew Dillon 	struct cam_devq *devq;
2182545bca0SMatthew Dillon 	mpt_handler_t	 handler;
2192545bca0SMatthew Dillon 	int		 maxq;
2202545bca0SMatthew Dillon 	int		 error;
2212545bca0SMatthew Dillon 
2222545bca0SMatthew Dillon 	MPT_LOCK(mpt);
2232545bca0SMatthew Dillon 	TAILQ_INIT(&mpt->request_timeout_list);
2242545bca0SMatthew Dillon 	maxq = (mpt->ioc_facts.GlobalCredits < MPT_MAX_REQUESTS(mpt))?
2252545bca0SMatthew Dillon 	    mpt->ioc_facts.GlobalCredits : MPT_MAX_REQUESTS(mpt);
2262545bca0SMatthew Dillon 
2272545bca0SMatthew Dillon 	handler.reply_handler = mpt_scsi_reply_handler;
2282545bca0SMatthew Dillon 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
2292545bca0SMatthew Dillon 				     &scsi_io_handler_id);
2302545bca0SMatthew Dillon 	if (error != 0) {
2312545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
2322545bca0SMatthew Dillon 		goto cleanup;
2332545bca0SMatthew Dillon 	}
2342545bca0SMatthew Dillon 
2352545bca0SMatthew Dillon 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
2362545bca0SMatthew Dillon 	error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
2372545bca0SMatthew Dillon 				     &scsi_tmf_handler_id);
2382545bca0SMatthew Dillon 	if (error != 0) {
2392545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
2402545bca0SMatthew Dillon 		goto cleanup;
2412545bca0SMatthew Dillon 	}
2422545bca0SMatthew Dillon 
2432545bca0SMatthew Dillon 	/*
2442545bca0SMatthew Dillon 	 * If we're fibre channel and could support target mode, we register
2452545bca0SMatthew Dillon 	 * an ELS reply handler and give it resources.
2462545bca0SMatthew Dillon 	 */
2472545bca0SMatthew Dillon 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
2482545bca0SMatthew Dillon 		handler.reply_handler = mpt_fc_els_reply_handler;
2492545bca0SMatthew Dillon 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
2502545bca0SMatthew Dillon 		    &fc_els_handler_id);
2512545bca0SMatthew Dillon 		if (error != 0) {
2522545bca0SMatthew Dillon 			MPT_UNLOCK(mpt);
2532545bca0SMatthew Dillon 			goto cleanup;
2542545bca0SMatthew Dillon 		}
2552545bca0SMatthew Dillon 		if (mpt_add_els_buffers(mpt) == FALSE) {
2562545bca0SMatthew Dillon 			error = ENOMEM;
2572545bca0SMatthew Dillon 			MPT_UNLOCK(mpt);
2582545bca0SMatthew Dillon 			goto cleanup;
2592545bca0SMatthew Dillon 		}
2602545bca0SMatthew Dillon 		maxq -= mpt->els_cmds_allocated;
2612545bca0SMatthew Dillon 	}
2622545bca0SMatthew Dillon 
2632545bca0SMatthew Dillon 	/*
2642545bca0SMatthew Dillon 	 * If we support target mode, we register a reply handler for it,
2652545bca0SMatthew Dillon 	 * but don't add command resources until we actually enable target
2662545bca0SMatthew Dillon 	 * mode.
2672545bca0SMatthew Dillon 	 */
2682545bca0SMatthew Dillon 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET) != 0) {
2692545bca0SMatthew Dillon 		handler.reply_handler = mpt_scsi_tgt_reply_handler;
2702545bca0SMatthew Dillon 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
2712545bca0SMatthew Dillon 		    &mpt->scsi_tgt_handler_id);
2722545bca0SMatthew Dillon 		if (error != 0) {
2732545bca0SMatthew Dillon 			MPT_UNLOCK(mpt);
2742545bca0SMatthew Dillon 			goto cleanup;
2752545bca0SMatthew Dillon 		}
2762545bca0SMatthew Dillon 	}
2772545bca0SMatthew Dillon 
2782545bca0SMatthew Dillon 	if (mpt->is_sas) {
2792545bca0SMatthew Dillon 		handler.reply_handler = mpt_sata_pass_reply_handler;
2802545bca0SMatthew Dillon 		error = mpt_register_handler(mpt, MPT_HANDLER_REPLY, handler,
2812545bca0SMatthew Dillon 		    &sata_pass_handler_id);
2822545bca0SMatthew Dillon 		if (error != 0) {
2832545bca0SMatthew Dillon 			MPT_UNLOCK(mpt);
2842545bca0SMatthew Dillon 			goto cleanup;
2852545bca0SMatthew Dillon 		}
2862545bca0SMatthew Dillon 	}
2872545bca0SMatthew Dillon 
2882545bca0SMatthew Dillon 	/*
2892545bca0SMatthew Dillon 	 * We keep one request reserved for timeout TMF requests.
2902545bca0SMatthew Dillon 	 */
2912545bca0SMatthew Dillon 	mpt->tmf_req = mpt_get_request(mpt, FALSE);
2922545bca0SMatthew Dillon 	if (mpt->tmf_req == NULL) {
2932545bca0SMatthew Dillon 		mpt_prt(mpt, "Unable to allocate dedicated TMF request!\n");
2942545bca0SMatthew Dillon 		error = ENOMEM;
2952545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
2962545bca0SMatthew Dillon 		goto cleanup;
2972545bca0SMatthew Dillon 	}
2982545bca0SMatthew Dillon 
2992545bca0SMatthew Dillon 	/*
3002545bca0SMatthew Dillon 	 * Mark the request as free even though not on the free list.
3012545bca0SMatthew Dillon 	 * There is only one TMF request allowed to be outstanding at
3022545bca0SMatthew Dillon 	 * a time and the TMF routines perform their own allocation
3032545bca0SMatthew Dillon 	 * tracking using the standard state flags.
3042545bca0SMatthew Dillon 	 */
3052545bca0SMatthew Dillon 	mpt->tmf_req->state = REQ_STATE_FREE;
3062545bca0SMatthew Dillon 	maxq--;
3072545bca0SMatthew Dillon 
3082545bca0SMatthew Dillon 	/*
3092545bca0SMatthew Dillon 	 * The rest of this is CAM foo, for which we need to drop our lock
3102545bca0SMatthew Dillon 	 */
3112545bca0SMatthew Dillon 	MPT_UNLOCK(mpt);
3122545bca0SMatthew Dillon 
3132545bca0SMatthew Dillon 	if (mpt_spawn_recovery_thread(mpt) != 0) {
3142545bca0SMatthew Dillon 		mpt_prt(mpt, "Unable to spawn recovery thread!\n");
3152545bca0SMatthew Dillon 		error = ENOMEM;
3162545bca0SMatthew Dillon 		goto cleanup;
3172545bca0SMatthew Dillon 	}
3182545bca0SMatthew Dillon 
3192545bca0SMatthew Dillon 	/*
3202545bca0SMatthew Dillon 	 * Create the device queue for our SIM(s).
3212545bca0SMatthew Dillon 	 */
3222545bca0SMatthew Dillon 	devq = cam_simq_alloc(maxq);
3232545bca0SMatthew Dillon 	if (devq == NULL) {
3242545bca0SMatthew Dillon 		mpt_prt(mpt, "Unable to allocate CAM SIMQ!\n");
3252545bca0SMatthew Dillon 		error = ENOMEM;
3262545bca0SMatthew Dillon 		goto cleanup;
3272545bca0SMatthew Dillon 	}
3282545bca0SMatthew Dillon 
3292545bca0SMatthew Dillon 	/*
3302545bca0SMatthew Dillon 	 * Construct our SIM entry.
3312545bca0SMatthew Dillon 	 */
3322545bca0SMatthew Dillon 	mpt->sim =
3332545bca0SMatthew Dillon 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
3342545bca0SMatthew Dillon 	if (mpt->sim == NULL) {
3352545bca0SMatthew Dillon 		mpt_prt(mpt, "Unable to allocate CAM SIM!\n");
3362545bca0SMatthew Dillon 		cam_devq_release(devq);
3372545bca0SMatthew Dillon 		error = ENOMEM;
3382545bca0SMatthew Dillon 		goto cleanup;
3392545bca0SMatthew Dillon 	}
3402545bca0SMatthew Dillon 
3412545bca0SMatthew Dillon 	/*
3422545bca0SMatthew Dillon 	 * Register exactly this bus.
3432545bca0SMatthew Dillon 	 */
3442545bca0SMatthew Dillon 	MPT_LOCK(mpt);
345f582582cSSascha Wildner 	if (xpt_bus_register(mpt->sim, 0) != CAM_SUCCESS) {
3462545bca0SMatthew Dillon 		mpt_prt(mpt, "Bus registration Failed!\n");
3472545bca0SMatthew Dillon 		error = ENOMEM;
3482545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
3492545bca0SMatthew Dillon 		goto cleanup;
3502545bca0SMatthew Dillon 	}
3512545bca0SMatthew Dillon 
3522545bca0SMatthew Dillon 	if (xpt_create_path(&mpt->path, NULL, cam_sim_path(mpt->sim),
3532545bca0SMatthew Dillon 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3542545bca0SMatthew Dillon 		mpt_prt(mpt, "Unable to allocate Path!\n");
3552545bca0SMatthew Dillon 		error = ENOMEM;
3562545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
3572545bca0SMatthew Dillon 		goto cleanup;
3582545bca0SMatthew Dillon 	}
3592545bca0SMatthew Dillon 	MPT_UNLOCK(mpt);
3602545bca0SMatthew Dillon 
3612545bca0SMatthew Dillon 	/*
3622545bca0SMatthew Dillon 	 * Only register a second bus for RAID physical
3632545bca0SMatthew Dillon 	 * devices if the controller supports RAID.
3642545bca0SMatthew Dillon 	 */
3652545bca0SMatthew Dillon 	if (mpt->ioc_page2 == NULL || mpt->ioc_page2->MaxPhysDisks == 0) {
3662545bca0SMatthew Dillon 		return (0);
3672545bca0SMatthew Dillon 	}
3682545bca0SMatthew Dillon 
3692545bca0SMatthew Dillon 	/*
3702545bca0SMatthew Dillon 	 * Create a "bus" to export all hidden disks to CAM.
3712545bca0SMatthew Dillon 	 */
3722545bca0SMatthew Dillon 	mpt->phydisk_sim =
3732545bca0SMatthew Dillon 	    mpt_sim_alloc(mpt_action, mpt_poll, "mpt", mpt, 1, maxq, devq);
3742545bca0SMatthew Dillon 	if (mpt->phydisk_sim == NULL) {
3752545bca0SMatthew Dillon 		mpt_prt(mpt, "Unable to allocate Physical Disk CAM SIM!\n");
3762545bca0SMatthew Dillon 		error = ENOMEM;
3772545bca0SMatthew Dillon 		goto cleanup;
3782545bca0SMatthew Dillon 	}
3792545bca0SMatthew Dillon 
3802545bca0SMatthew Dillon 	/*
3812545bca0SMatthew Dillon 	 * Register this bus.
3822545bca0SMatthew Dillon 	 */
3832545bca0SMatthew Dillon 	MPT_LOCK(mpt);
384f582582cSSascha Wildner 	if (xpt_bus_register(mpt->phydisk_sim, 1) !=
3852545bca0SMatthew Dillon 	    CAM_SUCCESS) {
3862545bca0SMatthew Dillon 		mpt_prt(mpt, "Physical Disk Bus registration Failed!\n");
3872545bca0SMatthew Dillon 		error = ENOMEM;
3882545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
3892545bca0SMatthew Dillon 		goto cleanup;
3902545bca0SMatthew Dillon 	}
3912545bca0SMatthew Dillon 
3922545bca0SMatthew Dillon 	if (xpt_create_path(&mpt->phydisk_path, NULL,
3932545bca0SMatthew Dillon 	    cam_sim_path(mpt->phydisk_sim),
3942545bca0SMatthew Dillon 	    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
3952545bca0SMatthew Dillon 		mpt_prt(mpt, "Unable to allocate Physical Disk Path!\n");
3962545bca0SMatthew Dillon 		error = ENOMEM;
3972545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
3982545bca0SMatthew Dillon 		goto cleanup;
3992545bca0SMatthew Dillon 	}
4002545bca0SMatthew Dillon 	MPT_UNLOCK(mpt);
4012545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG, "attached cam\n");
4022545bca0SMatthew Dillon 	return (0);
4032545bca0SMatthew Dillon 
4042545bca0SMatthew Dillon cleanup:
4052545bca0SMatthew Dillon 	mpt_cam_detach(mpt);
4062545bca0SMatthew Dillon 	return (error);
4072545bca0SMatthew Dillon }
4082545bca0SMatthew Dillon 
4092545bca0SMatthew Dillon /*
4102545bca0SMatthew Dillon  * Read FC configuration information
4112545bca0SMatthew Dillon  */
4122545bca0SMatthew Dillon static int
mpt_read_config_info_fc(struct mpt_softc * mpt)4132545bca0SMatthew Dillon mpt_read_config_info_fc(struct mpt_softc *mpt)
4142545bca0SMatthew Dillon {
41526595b18SSascha Wildner 	struct sysctl_ctx_list *ctx;
41626595b18SSascha Wildner 	struct sysctl_oid *tree;
4172545bca0SMatthew Dillon 	char *topology = NULL;
4182545bca0SMatthew Dillon 	int rv;
4192545bca0SMatthew Dillon 
4202545bca0SMatthew Dillon 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 0,
4212545bca0SMatthew Dillon 	    0, &mpt->mpt_fcport_page0.Header, FALSE, 5000);
4222545bca0SMatthew Dillon 	if (rv) {
4232545bca0SMatthew Dillon 		return (-1);
4242545bca0SMatthew Dillon 	}
4252545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG, "FC Port Page 0 Header: %x %x %x %x\n",
4262545bca0SMatthew Dillon 		 mpt->mpt_fcport_page0.Header.PageVersion,
4272545bca0SMatthew Dillon 		 mpt->mpt_fcport_page0.Header.PageLength,
4282545bca0SMatthew Dillon 		 mpt->mpt_fcport_page0.Header.PageNumber,
4292545bca0SMatthew Dillon 		 mpt->mpt_fcport_page0.Header.PageType);
4302545bca0SMatthew Dillon 
4312545bca0SMatthew Dillon 
4322545bca0SMatthew Dillon 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_fcport_page0.Header,
4332545bca0SMatthew Dillon 	    sizeof(mpt->mpt_fcport_page0), FALSE, 5000);
4342545bca0SMatthew Dillon 	if (rv) {
4352545bca0SMatthew Dillon 		mpt_prt(mpt, "failed to read FC Port Page 0\n");
4362545bca0SMatthew Dillon 		return (-1);
4372545bca0SMatthew Dillon 	}
4382545bca0SMatthew Dillon 	mpt2host_config_page_fc_port_0(&mpt->mpt_fcport_page0);
4392545bca0SMatthew Dillon 
4402545bca0SMatthew Dillon 	mpt->mpt_fcport_speed = mpt->mpt_fcport_page0.CurrentSpeed;
4412545bca0SMatthew Dillon 
4422545bca0SMatthew Dillon 	switch (mpt->mpt_fcport_page0.Flags &
4432545bca0SMatthew Dillon 	    MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK) {
4442545bca0SMatthew Dillon 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT:
4452545bca0SMatthew Dillon 		mpt->mpt_fcport_speed = 0;
4462545bca0SMatthew Dillon 		topology = "<NO LOOP>";
4472545bca0SMatthew Dillon 		break;
4482545bca0SMatthew Dillon 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT:
4492545bca0SMatthew Dillon 		topology = "N-Port";
4502545bca0SMatthew Dillon 		break;
4512545bca0SMatthew Dillon 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP:
4522545bca0SMatthew Dillon 		topology = "NL-Port";
4532545bca0SMatthew Dillon 		break;
4542545bca0SMatthew Dillon 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT:
4552545bca0SMatthew Dillon 		topology = "F-Port";
4562545bca0SMatthew Dillon 		break;
4572545bca0SMatthew Dillon 	case MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP:
4582545bca0SMatthew Dillon 		topology = "FL-Port";
4592545bca0SMatthew Dillon 		break;
4602545bca0SMatthew Dillon 	default:
4612545bca0SMatthew Dillon 		mpt->mpt_fcport_speed = 0;
4622545bca0SMatthew Dillon 		topology = "?";
4632545bca0SMatthew Dillon 		break;
4642545bca0SMatthew Dillon 	}
4652545bca0SMatthew Dillon 
4662545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_INFO,
4672545bca0SMatthew Dillon 	    "FC Port Page 0: Topology <%s> WWNN 0x%08x%08x WWPN 0x%08x%08x "
4682545bca0SMatthew Dillon 	    "Speed %u-Gbit\n", topology,
4696d259fc1SSascha Wildner 	    mpt->mpt_fcport_page0.WWNN.High,
4706d259fc1SSascha Wildner 	    mpt->mpt_fcport_page0.WWNN.Low,
4716d259fc1SSascha Wildner 	    mpt->mpt_fcport_page0.WWPN.High,
4726d259fc1SSascha Wildner 	    mpt->mpt_fcport_page0.WWPN.Low,
4736d259fc1SSascha Wildner 	    mpt->mpt_fcport_speed);
4742545bca0SMatthew Dillon 	MPT_UNLOCK(mpt);
47526595b18SSascha Wildner 	ctx = device_get_sysctl_ctx(mpt->dev);
47626595b18SSascha Wildner 	tree = device_get_sysctl_tree(mpt->dev);
47726595b18SSascha Wildner 
4786d259fc1SSascha Wildner 	ksnprintf(mpt->scinfo.fc.wwnn,
4792545bca0SMatthew Dillon 	    sizeof (mpt->scinfo.fc.wwnn), "0x%08x%08x",
4802545bca0SMatthew Dillon 	    mpt->mpt_fcport_page0.WWNN.High,
4812545bca0SMatthew Dillon 	    mpt->mpt_fcport_page0.WWNN.Low);
4822545bca0SMatthew Dillon 
4836d259fc1SSascha Wildner 	ksnprintf(mpt->scinfo.fc.wwpn,
4842545bca0SMatthew Dillon 	    sizeof (mpt->scinfo.fc.wwpn), "0x%08x%08x",
4852545bca0SMatthew Dillon 	    mpt->mpt_fcport_page0.WWPN.High,
4862545bca0SMatthew Dillon 	    mpt->mpt_fcport_page0.WWPN.Low);
4872545bca0SMatthew Dillon 
48826595b18SSascha Wildner 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4892545bca0SMatthew Dillon 	       "wwnn", CTLFLAG_RD, mpt->scinfo.fc.wwnn, 0,
4902545bca0SMatthew Dillon 	       "World Wide Node Name");
4912545bca0SMatthew Dillon 
49226595b18SSascha Wildner 	SYSCTL_ADD_STRING(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
4932545bca0SMatthew Dillon 	       "wwpn", CTLFLAG_RD, mpt->scinfo.fc.wwpn, 0,
4942545bca0SMatthew Dillon 	       "World Wide Port Name");
4952545bca0SMatthew Dillon 
4962545bca0SMatthew Dillon 	MPT_LOCK(mpt);
4972545bca0SMatthew Dillon 	return (0);
4982545bca0SMatthew Dillon }
4992545bca0SMatthew Dillon 
5002545bca0SMatthew Dillon /*
5012545bca0SMatthew Dillon  * Set FC configuration information.
5022545bca0SMatthew Dillon  */
5032545bca0SMatthew Dillon static int
mpt_set_initial_config_fc(struct mpt_softc * mpt)5042545bca0SMatthew Dillon mpt_set_initial_config_fc(struct mpt_softc *mpt)
5052545bca0SMatthew Dillon {
5062545bca0SMatthew Dillon 	CONFIG_PAGE_FC_PORT_1 fc;
5072545bca0SMatthew Dillon 	U32 fl;
5082545bca0SMatthew Dillon 	int r, doit = 0;
5092545bca0SMatthew Dillon 	int role;
5102545bca0SMatthew Dillon 
5112545bca0SMatthew Dillon 	r = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_FC_PORT, 1, 0,
5122545bca0SMatthew Dillon 	    &fc.Header, FALSE, 5000);
5132545bca0SMatthew Dillon 	if (r) {
5142545bca0SMatthew Dillon 		mpt_prt(mpt, "failed to read FC page 1 header\n");
5152545bca0SMatthew Dillon 		return (mpt_fc_reset_link(mpt, 1));
5162545bca0SMatthew Dillon 	}
5172545bca0SMatthew Dillon 
5182545bca0SMatthew Dillon 	r = mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_NVRAM, 0,
5192545bca0SMatthew Dillon 	    &fc.Header, sizeof (fc), FALSE, 5000);
5202545bca0SMatthew Dillon 	if (r) {
5212545bca0SMatthew Dillon 		mpt_prt(mpt, "failed to read FC page 1\n");
5222545bca0SMatthew Dillon 		return (mpt_fc_reset_link(mpt, 1));
5232545bca0SMatthew Dillon 	}
5242545bca0SMatthew Dillon 	mpt2host_config_page_fc_port_1(&fc);
5252545bca0SMatthew Dillon 
5262545bca0SMatthew Dillon 	/*
5272545bca0SMatthew Dillon 	 * Check our flags to make sure we support the role we want.
5282545bca0SMatthew Dillon 	 */
5292545bca0SMatthew Dillon 	doit = 0;
5302545bca0SMatthew Dillon 	role = 0;
5312545bca0SMatthew Dillon 	fl = fc.Flags;
5322545bca0SMatthew Dillon 
5332545bca0SMatthew Dillon 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT) {
5342545bca0SMatthew Dillon 		role |= MPT_ROLE_INITIATOR;
5352545bca0SMatthew Dillon 	}
5362545bca0SMatthew Dillon 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
5372545bca0SMatthew Dillon 		role |= MPT_ROLE_TARGET;
5382545bca0SMatthew Dillon 	}
5392545bca0SMatthew Dillon 
5402545bca0SMatthew Dillon 	fl &= ~MPI_FCPORTPAGE1_FLAGS_PROT_MASK;
5412545bca0SMatthew Dillon 
5422545bca0SMatthew Dillon 	if (mpt->do_cfg_role == 0) {
5432545bca0SMatthew Dillon 		role = mpt->cfg_role;
5442545bca0SMatthew Dillon 	} else {
5452545bca0SMatthew Dillon 		mpt->do_cfg_role = 0;
5462545bca0SMatthew Dillon 	}
5472545bca0SMatthew Dillon 
5482545bca0SMatthew Dillon 	if (role != mpt->cfg_role) {
5492545bca0SMatthew Dillon 		if (mpt->cfg_role & MPT_ROLE_INITIATOR) {
5502545bca0SMatthew Dillon 			if ((role & MPT_ROLE_INITIATOR) == 0) {
5512545bca0SMatthew Dillon 				mpt_prt(mpt, "adding initiator role\n");
5522545bca0SMatthew Dillon 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT;
5532545bca0SMatthew Dillon 				doit++;
5542545bca0SMatthew Dillon 			} else {
5552545bca0SMatthew Dillon 				mpt_prt(mpt, "keeping initiator role\n");
5562545bca0SMatthew Dillon 			}
5572545bca0SMatthew Dillon 		} else if (role & MPT_ROLE_INITIATOR) {
5582545bca0SMatthew Dillon 			mpt_prt(mpt, "removing initiator role\n");
5592545bca0SMatthew Dillon 			doit++;
5602545bca0SMatthew Dillon 		}
5612545bca0SMatthew Dillon 		if (mpt->cfg_role & MPT_ROLE_TARGET) {
5622545bca0SMatthew Dillon 			if ((role & MPT_ROLE_TARGET) == 0) {
5632545bca0SMatthew Dillon 				mpt_prt(mpt, "adding target role\n");
5642545bca0SMatthew Dillon 				fl |= MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG;
5652545bca0SMatthew Dillon 				doit++;
5662545bca0SMatthew Dillon 			} else {
5672545bca0SMatthew Dillon 				mpt_prt(mpt, "keeping target role\n");
5682545bca0SMatthew Dillon 			}
5692545bca0SMatthew Dillon 		} else if (role & MPT_ROLE_TARGET) {
5702545bca0SMatthew Dillon 			mpt_prt(mpt, "removing target role\n");
5712545bca0SMatthew Dillon 			doit++;
5722545bca0SMatthew Dillon 		}
5732545bca0SMatthew Dillon 		mpt->role = mpt->cfg_role;
5742545bca0SMatthew Dillon 	}
5752545bca0SMatthew Dillon 
5762545bca0SMatthew Dillon 	if (fl & MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG) {
5772545bca0SMatthew Dillon 		if ((fl & MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID) == 0) {
5782545bca0SMatthew Dillon 			mpt_prt(mpt, "adding OXID option\n");
5792545bca0SMatthew Dillon 			fl |= MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID;
5802545bca0SMatthew Dillon 			doit++;
5812545bca0SMatthew Dillon 		}
5822545bca0SMatthew Dillon 	}
5832545bca0SMatthew Dillon 
5842545bca0SMatthew Dillon 	if (doit) {
5852545bca0SMatthew Dillon 		fc.Flags = fl;
5862545bca0SMatthew Dillon 		host2mpt_config_page_fc_port_1(&fc);
5872545bca0SMatthew Dillon 		r = mpt_write_cfg_page(mpt,
5882545bca0SMatthew Dillon 		    MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM, 0, &fc.Header,
5892545bca0SMatthew Dillon 		    sizeof(fc), FALSE, 5000);
5902545bca0SMatthew Dillon 		if (r != 0) {
5912545bca0SMatthew Dillon 			mpt_prt(mpt, "failed to update NVRAM with changes\n");
5922545bca0SMatthew Dillon 			return (0);
5932545bca0SMatthew Dillon 		}
5942545bca0SMatthew Dillon 		mpt_prt(mpt, "NOTE: NVRAM changes will not take "
5952545bca0SMatthew Dillon 		    "effect until next reboot or IOC reset\n");
5962545bca0SMatthew Dillon 	}
5972545bca0SMatthew Dillon 	return (0);
5982545bca0SMatthew Dillon }
5992545bca0SMatthew Dillon 
6002545bca0SMatthew Dillon static int
mptsas_sas_io_unit_pg0(struct mpt_softc * mpt,struct mptsas_portinfo * portinfo)6012545bca0SMatthew Dillon mptsas_sas_io_unit_pg0(struct mpt_softc *mpt, struct mptsas_portinfo *portinfo)
6022545bca0SMatthew Dillon {
6032545bca0SMatthew Dillon 	ConfigExtendedPageHeader_t hdr;
6042545bca0SMatthew Dillon 	struct mptsas_phyinfo *phyinfo;
6052545bca0SMatthew Dillon 	SasIOUnitPage0_t *buffer;
6062545bca0SMatthew Dillon 	int error, len, i;
6072545bca0SMatthew Dillon 
6082545bca0SMatthew Dillon 	error = mpt_read_extcfg_header(mpt, MPI_SASIOUNITPAGE0_PAGEVERSION,
6092545bca0SMatthew Dillon 				       0, 0, MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT,
6102545bca0SMatthew Dillon 				       &hdr, 0, 10000);
6112545bca0SMatthew Dillon 	if (error)
6122545bca0SMatthew Dillon 		goto out;
6132545bca0SMatthew Dillon 	if (hdr.ExtPageLength == 0) {
6142545bca0SMatthew Dillon 		error = ENXIO;
6152545bca0SMatthew Dillon 		goto out;
6162545bca0SMatthew Dillon 	}
6172545bca0SMatthew Dillon 
6182545bca0SMatthew Dillon 	len = hdr.ExtPageLength * 4;
6192545bca0SMatthew Dillon 	buffer = kmalloc(len, M_DEVBUF, M_NOWAIT|M_ZERO);
6202545bca0SMatthew Dillon 	if (buffer == NULL) {
6212545bca0SMatthew Dillon 		error = ENOMEM;
6222545bca0SMatthew Dillon 		goto out;
6232545bca0SMatthew Dillon 	}
6242545bca0SMatthew Dillon 
6252545bca0SMatthew Dillon 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
6262545bca0SMatthew Dillon 				     0, &hdr, buffer, len, 0, 10000);
6272545bca0SMatthew Dillon 	if (error) {
6282545bca0SMatthew Dillon 		kfree(buffer, M_DEVBUF);
6292545bca0SMatthew Dillon 		goto out;
6302545bca0SMatthew Dillon 	}
6312545bca0SMatthew Dillon 
6322545bca0SMatthew Dillon 	portinfo->num_phys = buffer->NumPhys;
6332545bca0SMatthew Dillon 	portinfo->phy_info = kmalloc(sizeof(*portinfo->phy_info) *
6342545bca0SMatthew Dillon 	    portinfo->num_phys, M_DEVBUF, M_NOWAIT|M_ZERO);
6352545bca0SMatthew Dillon 	if (portinfo->phy_info == NULL) {
6362545bca0SMatthew Dillon 		kfree(buffer, M_DEVBUF);
6372545bca0SMatthew Dillon 		error = ENOMEM;
6382545bca0SMatthew Dillon 		goto out;
6392545bca0SMatthew Dillon 	}
6402545bca0SMatthew Dillon 
6412545bca0SMatthew Dillon 	for (i = 0; i < portinfo->num_phys; i++) {
6422545bca0SMatthew Dillon 		phyinfo = &portinfo->phy_info[i];
6432545bca0SMatthew Dillon 		phyinfo->phy_num = i;
6442545bca0SMatthew Dillon 		phyinfo->port_id = buffer->PhyData[i].Port;
6452545bca0SMatthew Dillon 		phyinfo->negotiated_link_rate =
6462545bca0SMatthew Dillon 		    buffer->PhyData[i].NegotiatedLinkRate;
6472545bca0SMatthew Dillon 		phyinfo->handle =
6482545bca0SMatthew Dillon 		    le16toh(buffer->PhyData[i].ControllerDevHandle);
6492545bca0SMatthew Dillon 	}
6502545bca0SMatthew Dillon 
6512545bca0SMatthew Dillon 	kfree(buffer, M_DEVBUF);
6522545bca0SMatthew Dillon out:
6532545bca0SMatthew Dillon 	return (error);
6542545bca0SMatthew Dillon }
6552545bca0SMatthew Dillon 
6562545bca0SMatthew Dillon static int
mptsas_sas_phy_pg0(struct mpt_softc * mpt,struct mptsas_phyinfo * phy_info,uint32_t form,uint32_t form_specific)6572545bca0SMatthew Dillon mptsas_sas_phy_pg0(struct mpt_softc *mpt, struct mptsas_phyinfo *phy_info,
6582545bca0SMatthew Dillon 	uint32_t form, uint32_t form_specific)
6592545bca0SMatthew Dillon {
6602545bca0SMatthew Dillon 	ConfigExtendedPageHeader_t hdr;
6612545bca0SMatthew Dillon 	SasPhyPage0_t *buffer;
6622545bca0SMatthew Dillon 	int error;
6632545bca0SMatthew Dillon 
6642545bca0SMatthew Dillon 	error = mpt_read_extcfg_header(mpt, MPI_SASPHY0_PAGEVERSION, 0, 0,
6652545bca0SMatthew Dillon 				       MPI_CONFIG_EXTPAGETYPE_SAS_PHY, &hdr,
6662545bca0SMatthew Dillon 				       0, 10000);
6672545bca0SMatthew Dillon 	if (error)
6682545bca0SMatthew Dillon 		goto out;
6692545bca0SMatthew Dillon 	if (hdr.ExtPageLength == 0) {
6702545bca0SMatthew Dillon 		error = ENXIO;
6712545bca0SMatthew Dillon 		goto out;
6722545bca0SMatthew Dillon 	}
6732545bca0SMatthew Dillon 
6742545bca0SMatthew Dillon 	buffer = kmalloc(sizeof(SasPhyPage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
6752545bca0SMatthew Dillon 	if (buffer == NULL) {
6762545bca0SMatthew Dillon 		error = ENOMEM;
6772545bca0SMatthew Dillon 		goto out;
6782545bca0SMatthew Dillon 	}
6792545bca0SMatthew Dillon 
6802545bca0SMatthew Dillon 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
6812545bca0SMatthew Dillon 				     form + form_specific, &hdr, buffer,
6822545bca0SMatthew Dillon 				     sizeof(SasPhyPage0_t), 0, 10000);
6832545bca0SMatthew Dillon 	if (error) {
6842545bca0SMatthew Dillon 		kfree(buffer, M_DEVBUF);
6852545bca0SMatthew Dillon 		goto out;
6862545bca0SMatthew Dillon 	}
6872545bca0SMatthew Dillon 
6882545bca0SMatthew Dillon 	phy_info->hw_link_rate = buffer->HwLinkRate;
6892545bca0SMatthew Dillon 	phy_info->programmed_link_rate = buffer->ProgrammedLinkRate;
6902545bca0SMatthew Dillon 	phy_info->identify.dev_handle = le16toh(buffer->OwnerDevHandle);
6912545bca0SMatthew Dillon 	phy_info->attached.dev_handle = le16toh(buffer->AttachedDevHandle);
6922545bca0SMatthew Dillon 
6932545bca0SMatthew Dillon 	kfree(buffer, M_DEVBUF);
6942545bca0SMatthew Dillon out:
6952545bca0SMatthew Dillon 	return (error);
6962545bca0SMatthew Dillon }
6972545bca0SMatthew Dillon 
6982545bca0SMatthew Dillon static int
mptsas_sas_device_pg0(struct mpt_softc * mpt,struct mptsas_devinfo * device_info,uint32_t form,uint32_t form_specific)6992545bca0SMatthew Dillon mptsas_sas_device_pg0(struct mpt_softc *mpt, struct mptsas_devinfo *device_info,
7002545bca0SMatthew Dillon 	uint32_t form, uint32_t form_specific)
7012545bca0SMatthew Dillon {
7022545bca0SMatthew Dillon 	ConfigExtendedPageHeader_t hdr;
7032545bca0SMatthew Dillon 	SasDevicePage0_t *buffer;
7042545bca0SMatthew Dillon 	uint64_t sas_address;
7052545bca0SMatthew Dillon 	int error = 0;
7062545bca0SMatthew Dillon 
7072545bca0SMatthew Dillon 	bzero(device_info, sizeof(*device_info));
7082545bca0SMatthew Dillon 	error = mpt_read_extcfg_header(mpt, MPI_SASDEVICE0_PAGEVERSION, 0, 0,
7092545bca0SMatthew Dillon 				       MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE,
7102545bca0SMatthew Dillon 				       &hdr, 0, 10000);
7112545bca0SMatthew Dillon 	if (error)
7122545bca0SMatthew Dillon 		goto out;
7132545bca0SMatthew Dillon 	if (hdr.ExtPageLength == 0) {
7142545bca0SMatthew Dillon 		error = ENXIO;
7152545bca0SMatthew Dillon 		goto out;
7162545bca0SMatthew Dillon 	}
7172545bca0SMatthew Dillon 
7182545bca0SMatthew Dillon 	buffer = kmalloc(sizeof(SasDevicePage0_t), M_DEVBUF, M_NOWAIT|M_ZERO);
7192545bca0SMatthew Dillon 	if (buffer == NULL) {
7202545bca0SMatthew Dillon 		error = ENOMEM;
7212545bca0SMatthew Dillon 		goto out;
7222545bca0SMatthew Dillon 	}
7232545bca0SMatthew Dillon 
7242545bca0SMatthew Dillon 	error = mpt_read_extcfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
7252545bca0SMatthew Dillon 				     form + form_specific, &hdr, buffer,
7262545bca0SMatthew Dillon 				     sizeof(SasDevicePage0_t), 0, 10000);
7272545bca0SMatthew Dillon 	if (error) {
7282545bca0SMatthew Dillon 		kfree(buffer, M_DEVBUF);
7292545bca0SMatthew Dillon 		goto out;
7302545bca0SMatthew Dillon 	}
7312545bca0SMatthew Dillon 
7322545bca0SMatthew Dillon 	device_info->dev_handle = le16toh(buffer->DevHandle);
7332545bca0SMatthew Dillon 	device_info->parent_dev_handle = le16toh(buffer->ParentDevHandle);
7342545bca0SMatthew Dillon 	device_info->enclosure_handle = le16toh(buffer->EnclosureHandle);
7352545bca0SMatthew Dillon 	device_info->slot = le16toh(buffer->Slot);
7362545bca0SMatthew Dillon 	device_info->phy_num = buffer->PhyNum;
7372545bca0SMatthew Dillon 	device_info->physical_port = buffer->PhysicalPort;
7382545bca0SMatthew Dillon 	device_info->target_id = buffer->TargetID;
7392545bca0SMatthew Dillon 	device_info->bus = buffer->Bus;
7402545bca0SMatthew Dillon 	bcopy(&buffer->SASAddress, &sas_address, sizeof(uint64_t));
7412545bca0SMatthew Dillon 	device_info->sas_address = le64toh(sas_address);
7422545bca0SMatthew Dillon 	device_info->device_info = le32toh(buffer->DeviceInfo);
7432545bca0SMatthew Dillon 
7442545bca0SMatthew Dillon 	kfree(buffer, M_DEVBUF);
7452545bca0SMatthew Dillon out:
7462545bca0SMatthew Dillon 	return (error);
7472545bca0SMatthew Dillon }
7482545bca0SMatthew Dillon 
7492545bca0SMatthew Dillon /*
7502545bca0SMatthew Dillon  * Read SAS configuration information. Nothing to do yet.
7512545bca0SMatthew Dillon  */
7522545bca0SMatthew Dillon static int
mpt_read_config_info_sas(struct mpt_softc * mpt)7532545bca0SMatthew Dillon mpt_read_config_info_sas(struct mpt_softc *mpt)
7542545bca0SMatthew Dillon {
7552545bca0SMatthew Dillon 	struct mptsas_portinfo *portinfo;
7562545bca0SMatthew Dillon 	struct mptsas_phyinfo *phyinfo;
7572545bca0SMatthew Dillon 	int error, i;
7582545bca0SMatthew Dillon 
7592545bca0SMatthew Dillon 	portinfo = kmalloc(sizeof(*portinfo), M_DEVBUF, M_NOWAIT|M_ZERO);
7602545bca0SMatthew Dillon 	if (portinfo == NULL)
7612545bca0SMatthew Dillon 		return (ENOMEM);
7622545bca0SMatthew Dillon 
7632545bca0SMatthew Dillon 	error = mptsas_sas_io_unit_pg0(mpt, portinfo);
7642545bca0SMatthew Dillon 	if (error) {
7652545bca0SMatthew Dillon 		kfree(portinfo, M_DEVBUF);
7662545bca0SMatthew Dillon 		return (0);
7672545bca0SMatthew Dillon 	}
7682545bca0SMatthew Dillon 
7692545bca0SMatthew Dillon 	for (i = 0; i < portinfo->num_phys; i++) {
7702545bca0SMatthew Dillon 		phyinfo = &portinfo->phy_info[i];
7712545bca0SMatthew Dillon 		error = mptsas_sas_phy_pg0(mpt, phyinfo,
7722545bca0SMatthew Dillon 		    (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
7732545bca0SMatthew Dillon 		    MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
7742545bca0SMatthew Dillon 		if (error)
7752545bca0SMatthew Dillon 			break;
7762545bca0SMatthew Dillon 		error = mptsas_sas_device_pg0(mpt, &phyinfo->identify,
7772545bca0SMatthew Dillon 		    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
7782545bca0SMatthew Dillon 		    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
7792545bca0SMatthew Dillon 		    phyinfo->handle);
7802545bca0SMatthew Dillon 		if (error)
7812545bca0SMatthew Dillon 			break;
7822545bca0SMatthew Dillon 		phyinfo->identify.phy_num = phyinfo->phy_num = i;
7832545bca0SMatthew Dillon 		if (phyinfo->attached.dev_handle)
7842545bca0SMatthew Dillon 			error = mptsas_sas_device_pg0(mpt,
7852545bca0SMatthew Dillon 			    &phyinfo->attached,
7862545bca0SMatthew Dillon 			    (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
7872545bca0SMatthew Dillon 			    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
7882545bca0SMatthew Dillon 			    phyinfo->attached.dev_handle);
7892545bca0SMatthew Dillon 		if (error)
7902545bca0SMatthew Dillon 			break;
7912545bca0SMatthew Dillon 	}
7922545bca0SMatthew Dillon 	mpt->sas_portinfo = portinfo;
7932545bca0SMatthew Dillon 	return (0);
7942545bca0SMatthew Dillon }
7952545bca0SMatthew Dillon 
7962545bca0SMatthew Dillon static void
mptsas_set_sata_wc(struct mpt_softc * mpt,struct mptsas_devinfo * devinfo,int enabled)7972545bca0SMatthew Dillon mptsas_set_sata_wc(struct mpt_softc *mpt, struct mptsas_devinfo *devinfo,
7982545bca0SMatthew Dillon 	int enabled)
7992545bca0SMatthew Dillon {
8002545bca0SMatthew Dillon 	SataPassthroughRequest_t	*pass;
8012545bca0SMatthew Dillon 	request_t *req;
8022545bca0SMatthew Dillon 	int error, status;
8032545bca0SMatthew Dillon 
8042545bca0SMatthew Dillon 	req = mpt_get_request(mpt, 0);
8052545bca0SMatthew Dillon 	if (req == NULL)
8062545bca0SMatthew Dillon 		return;
8072545bca0SMatthew Dillon 
8082545bca0SMatthew Dillon 	pass = req->req_vbuf;
8092545bca0SMatthew Dillon 	bzero(pass, sizeof(SataPassthroughRequest_t));
8102545bca0SMatthew Dillon 	pass->Function = MPI_FUNCTION_SATA_PASSTHROUGH;
8112545bca0SMatthew Dillon 	pass->TargetID = devinfo->target_id;
8122545bca0SMatthew Dillon 	pass->Bus = devinfo->bus;
8132545bca0SMatthew Dillon 	pass->PassthroughFlags = 0;
8142545bca0SMatthew Dillon 	pass->ConnectionRate = MPI_SATA_PT_REQ_CONNECT_RATE_NEGOTIATED;
8152545bca0SMatthew Dillon 	pass->DataLength = 0;
8162545bca0SMatthew Dillon 	pass->MsgContext = htole32(req->index | sata_pass_handler_id);
8172545bca0SMatthew Dillon 	pass->CommandFIS[0] = 0x27;
8182545bca0SMatthew Dillon 	pass->CommandFIS[1] = 0x80;
8192545bca0SMatthew Dillon 	pass->CommandFIS[2] = 0xef;
8202545bca0SMatthew Dillon 	pass->CommandFIS[3] = (enabled) ? 0x02 : 0x82;
8212545bca0SMatthew Dillon 	pass->CommandFIS[7] = 0x40;
8222545bca0SMatthew Dillon 	pass->CommandFIS[15] = 0x08;
8232545bca0SMatthew Dillon 
8242545bca0SMatthew Dillon 	mpt_check_doorbell(mpt);
8252545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
8262545bca0SMatthew Dillon 	error = mpt_wait_req(mpt, req, REQ_STATE_DONE, REQ_STATE_DONE, 0,
8272545bca0SMatthew Dillon 			     10 * 1000);
8282545bca0SMatthew Dillon 	if (error) {
8292545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
8302545bca0SMatthew Dillon 		kprintf("error %d sending passthrough\n", error);
8312545bca0SMatthew Dillon 		return;
8322545bca0SMatthew Dillon 	}
8332545bca0SMatthew Dillon 
8342545bca0SMatthew Dillon 	status = le16toh(req->IOCStatus);
8352545bca0SMatthew Dillon 	if (status != MPI_IOCSTATUS_SUCCESS) {
8362545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
8372545bca0SMatthew Dillon 		kprintf("IOCSTATUS %d\n", status);
8382545bca0SMatthew Dillon 		return;
8392545bca0SMatthew Dillon 	}
8402545bca0SMatthew Dillon 
8412545bca0SMatthew Dillon 	mpt_free_request(mpt, req);
8422545bca0SMatthew Dillon }
8432545bca0SMatthew Dillon 
8442545bca0SMatthew Dillon /*
8452545bca0SMatthew Dillon  * Set SAS configuration information. Nothing to do yet.
8462545bca0SMatthew Dillon  */
8472545bca0SMatthew Dillon static int
mpt_set_initial_config_sas(struct mpt_softc * mpt)8482545bca0SMatthew Dillon mpt_set_initial_config_sas(struct mpt_softc *mpt)
8492545bca0SMatthew Dillon {
8502545bca0SMatthew Dillon 	struct mptsas_phyinfo *phyinfo;
8512545bca0SMatthew Dillon 	int i;
8522545bca0SMatthew Dillon 
8532545bca0SMatthew Dillon 	if ((mpt_enable_sata_wc != -1) && (mpt->sas_portinfo != NULL)) {
8542545bca0SMatthew Dillon 		for (i = 0; i < mpt->sas_portinfo->num_phys; i++) {
8552545bca0SMatthew Dillon 			phyinfo = &mpt->sas_portinfo->phy_info[i];
8562545bca0SMatthew Dillon 			if (phyinfo->attached.dev_handle == 0)
8572545bca0SMatthew Dillon 				continue;
8582545bca0SMatthew Dillon 			if ((phyinfo->attached.device_info &
8592545bca0SMatthew Dillon 			    MPI_SAS_DEVICE_INFO_SATA_DEVICE) == 0)
8602545bca0SMatthew Dillon 				continue;
8612545bca0SMatthew Dillon 			if (bootverbose)
8622545bca0SMatthew Dillon 				device_printf(mpt->dev,
8632545bca0SMatthew Dillon 				    "%sabling SATA WC on phy %d\n",
8642545bca0SMatthew Dillon 				    (mpt_enable_sata_wc) ? "En" : "Dis", i);
8652545bca0SMatthew Dillon 			mptsas_set_sata_wc(mpt, &phyinfo->attached,
8662545bca0SMatthew Dillon 					   mpt_enable_sata_wc);
8672545bca0SMatthew Dillon 		}
8682545bca0SMatthew Dillon 	}
8692545bca0SMatthew Dillon 
8702545bca0SMatthew Dillon 	return (0);
8712545bca0SMatthew Dillon }
8722545bca0SMatthew Dillon 
8732545bca0SMatthew Dillon static int
mpt_sata_pass_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)8742545bca0SMatthew Dillon mpt_sata_pass_reply_handler(struct mpt_softc *mpt, request_t *req,
8752545bca0SMatthew Dillon  uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
8762545bca0SMatthew Dillon {
8772545bca0SMatthew Dillon 
8784c42baf4SSascha Wildner 	if (req != NULL) {
8792545bca0SMatthew Dillon 		if (reply_frame != NULL) {
8802545bca0SMatthew Dillon 			req->IOCStatus = le16toh(reply_frame->IOCStatus);
8812545bca0SMatthew Dillon 		}
8822545bca0SMatthew Dillon 		req->state &= ~REQ_STATE_QUEUED;
8832545bca0SMatthew Dillon 		req->state |= REQ_STATE_DONE;
8842545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
8852545bca0SMatthew Dillon 		if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
8862545bca0SMatthew Dillon 			wakeup(req);
8872545bca0SMatthew Dillon 		} else if ((req->state & REQ_STATE_TIMEDOUT) != 0) {
8882545bca0SMatthew Dillon 			/*
8892545bca0SMatthew Dillon 			 * Whew- we can free this request (late completion)
8902545bca0SMatthew Dillon 			 */
8912545bca0SMatthew Dillon 			mpt_free_request(mpt, req);
8922545bca0SMatthew Dillon 		}
8932545bca0SMatthew Dillon 	}
8942545bca0SMatthew Dillon 
8952545bca0SMatthew Dillon 	return (TRUE);
8962545bca0SMatthew Dillon }
8972545bca0SMatthew Dillon 
8982545bca0SMatthew Dillon /*
8992545bca0SMatthew Dillon  * Read SCSI configuration information
9002545bca0SMatthew Dillon  */
9012545bca0SMatthew Dillon static int
mpt_read_config_info_spi(struct mpt_softc * mpt)9022545bca0SMatthew Dillon mpt_read_config_info_spi(struct mpt_softc *mpt)
9032545bca0SMatthew Dillon {
9042545bca0SMatthew Dillon 	int rv, i;
9052545bca0SMatthew Dillon 
9062545bca0SMatthew Dillon 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 0, 0,
9072545bca0SMatthew Dillon 	    &mpt->mpt_port_page0.Header, FALSE, 5000);
9082545bca0SMatthew Dillon 	if (rv) {
9092545bca0SMatthew Dillon 		return (-1);
9102545bca0SMatthew Dillon 	}
9112545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 0 Header: %x %x %x %x\n",
9122545bca0SMatthew Dillon 	    mpt->mpt_port_page0.Header.PageVersion,
9132545bca0SMatthew Dillon 	    mpt->mpt_port_page0.Header.PageLength,
9142545bca0SMatthew Dillon 	    mpt->mpt_port_page0.Header.PageNumber,
9152545bca0SMatthew Dillon 	    mpt->mpt_port_page0.Header.PageType);
9162545bca0SMatthew Dillon 
9172545bca0SMatthew Dillon 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 1, 0,
9182545bca0SMatthew Dillon 	    &mpt->mpt_port_page1.Header, FALSE, 5000);
9192545bca0SMatthew Dillon 	if (rv) {
9202545bca0SMatthew Dillon 		return (-1);
9212545bca0SMatthew Dillon 	}
9222545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 1 Header: %x %x %x %x\n",
9232545bca0SMatthew Dillon 	    mpt->mpt_port_page1.Header.PageVersion,
9242545bca0SMatthew Dillon 	    mpt->mpt_port_page1.Header.PageLength,
9252545bca0SMatthew Dillon 	    mpt->mpt_port_page1.Header.PageNumber,
9262545bca0SMatthew Dillon 	    mpt->mpt_port_page1.Header.PageType);
9272545bca0SMatthew Dillon 
9282545bca0SMatthew Dillon 	rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_PORT, 2, 0,
9292545bca0SMatthew Dillon 	    &mpt->mpt_port_page2.Header, FALSE, 5000);
9302545bca0SMatthew Dillon 	if (rv) {
9312545bca0SMatthew Dillon 		return (-1);
9322545bca0SMatthew Dillon 	}
9332545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG, "SPI Port Page 2 Header: %x %x %x %x\n",
9342545bca0SMatthew Dillon 	    mpt->mpt_port_page2.Header.PageVersion,
9352545bca0SMatthew Dillon 	    mpt->mpt_port_page2.Header.PageLength,
9362545bca0SMatthew Dillon 	    mpt->mpt_port_page2.Header.PageNumber,
9372545bca0SMatthew Dillon 	    mpt->mpt_port_page2.Header.PageType);
9382545bca0SMatthew Dillon 
9392545bca0SMatthew Dillon 	for (i = 0; i < 16; i++) {
9402545bca0SMatthew Dillon 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
9412545bca0SMatthew Dillon 		    0, i, &mpt->mpt_dev_page0[i].Header, FALSE, 5000);
9422545bca0SMatthew Dillon 		if (rv) {
9432545bca0SMatthew Dillon 			return (-1);
9442545bca0SMatthew Dillon 		}
9452545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_DEBUG,
9462545bca0SMatthew Dillon 		    "SPI Target %d Device Page 0 Header: %x %x %x %x\n", i,
9472545bca0SMatthew Dillon 		    mpt->mpt_dev_page0[i].Header.PageVersion,
9482545bca0SMatthew Dillon 		    mpt->mpt_dev_page0[i].Header.PageLength,
9492545bca0SMatthew Dillon 		    mpt->mpt_dev_page0[i].Header.PageNumber,
9502545bca0SMatthew Dillon 		    mpt->mpt_dev_page0[i].Header.PageType);
9512545bca0SMatthew Dillon 
9522545bca0SMatthew Dillon 		rv = mpt_read_cfg_header(mpt, MPI_CONFIG_PAGETYPE_SCSI_DEVICE,
9532545bca0SMatthew Dillon 		    1, i, &mpt->mpt_dev_page1[i].Header, FALSE, 5000);
9542545bca0SMatthew Dillon 		if (rv) {
9552545bca0SMatthew Dillon 			return (-1);
9562545bca0SMatthew Dillon 		}
9572545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_DEBUG,
9582545bca0SMatthew Dillon 		    "SPI Target %d Device Page 1 Header: %x %x %x %x\n", i,
9592545bca0SMatthew Dillon 		    mpt->mpt_dev_page1[i].Header.PageVersion,
9602545bca0SMatthew Dillon 		    mpt->mpt_dev_page1[i].Header.PageLength,
9612545bca0SMatthew Dillon 		    mpt->mpt_dev_page1[i].Header.PageNumber,
9622545bca0SMatthew Dillon 		    mpt->mpt_dev_page1[i].Header.PageType);
9632545bca0SMatthew Dillon 	}
9642545bca0SMatthew Dillon 
9652545bca0SMatthew Dillon 	/*
9662545bca0SMatthew Dillon 	 * At this point, we don't *have* to fail. As long as we have
9672545bca0SMatthew Dillon 	 * valid config header information, we can (barely) lurch
9682545bca0SMatthew Dillon 	 * along.
9692545bca0SMatthew Dillon 	 */
9702545bca0SMatthew Dillon 
9712545bca0SMatthew Dillon 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page0.Header,
9722545bca0SMatthew Dillon 	    sizeof(mpt->mpt_port_page0), FALSE, 5000);
9732545bca0SMatthew Dillon 	if (rv) {
9742545bca0SMatthew Dillon 		mpt_prt(mpt, "failed to read SPI Port Page 0\n");
9752545bca0SMatthew Dillon 	} else {
9762545bca0SMatthew Dillon 		mpt2host_config_page_scsi_port_0(&mpt->mpt_port_page0);
9772545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
9782545bca0SMatthew Dillon 		    "SPI Port Page 0: Capabilities %x PhysicalInterface %x\n",
9796d259fc1SSascha Wildner 		    mpt->mpt_port_page0.Capabilities,
9806d259fc1SSascha Wildner 		    mpt->mpt_port_page0.PhysicalInterface);
9812545bca0SMatthew Dillon 	}
9822545bca0SMatthew Dillon 
9832545bca0SMatthew Dillon 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page1.Header,
9842545bca0SMatthew Dillon 	    sizeof(mpt->mpt_port_page1), FALSE, 5000);
9852545bca0SMatthew Dillon 	if (rv) {
9862545bca0SMatthew Dillon 		mpt_prt(mpt, "failed to read SPI Port Page 1\n");
9872545bca0SMatthew Dillon 	} else {
9882545bca0SMatthew Dillon 		mpt2host_config_page_scsi_port_1(&mpt->mpt_port_page1);
9892545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_DEBUG,
9902545bca0SMatthew Dillon 		    "SPI Port Page 1: Configuration %x OnBusTimerValue %x\n",
9916d259fc1SSascha Wildner 		    mpt->mpt_port_page1.Configuration,
9926d259fc1SSascha Wildner 		    mpt->mpt_port_page1.OnBusTimerValue);
9932545bca0SMatthew Dillon 	}
9942545bca0SMatthew Dillon 
9952545bca0SMatthew Dillon 	rv = mpt_read_cur_cfg_page(mpt, 0, &mpt->mpt_port_page2.Header,
9962545bca0SMatthew Dillon 	    sizeof(mpt->mpt_port_page2), FALSE, 5000);
9972545bca0SMatthew Dillon 	if (rv) {
9982545bca0SMatthew Dillon 		mpt_prt(mpt, "failed to read SPI Port Page 2\n");
9992545bca0SMatthew Dillon 	} else {
10002545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
10012545bca0SMatthew Dillon 		    "Port Page 2: Flags %x Settings %x\n",
10026d259fc1SSascha Wildner 		    mpt->mpt_port_page2.PortFlags,
10036d259fc1SSascha Wildner 		    mpt->mpt_port_page2.PortSettings);
10042545bca0SMatthew Dillon 		mpt2host_config_page_scsi_port_2(&mpt->mpt_port_page2);
10052545bca0SMatthew Dillon 		for (i = 0; i < 16; i++) {
10062545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
10072545bca0SMatthew Dillon 			    " Port Page 2 Tgt %d: timo %x SF %x Flags %x\n",
10082545bca0SMatthew Dillon 			    i, mpt->mpt_port_page2.DeviceSettings[i].Timeout,
10092545bca0SMatthew Dillon 			    mpt->mpt_port_page2.DeviceSettings[i].SyncFactor,
10102545bca0SMatthew Dillon 			    mpt->mpt_port_page2.DeviceSettings[i].DeviceFlags);
10112545bca0SMatthew Dillon 		}
10122545bca0SMatthew Dillon 	}
10132545bca0SMatthew Dillon 
10142545bca0SMatthew Dillon 	for (i = 0; i < 16; i++) {
10152545bca0SMatthew Dillon 		rv = mpt_read_cur_cfg_page(mpt, i,
10162545bca0SMatthew Dillon 		    &mpt->mpt_dev_page0[i].Header, sizeof(*mpt->mpt_dev_page0),
10172545bca0SMatthew Dillon 		    FALSE, 5000);
10182545bca0SMatthew Dillon 		if (rv) {
10192545bca0SMatthew Dillon 			mpt_prt(mpt,
10202545bca0SMatthew Dillon 			    "cannot read SPI Target %d Device Page 0\n", i);
10212545bca0SMatthew Dillon 			continue;
10222545bca0SMatthew Dillon 		}
10232545bca0SMatthew Dillon 		mpt2host_config_page_scsi_device_0(&mpt->mpt_dev_page0[i]);
10242545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
10252545bca0SMatthew Dillon 		    "target %d page 0: Negotiated Params %x Information %x\n",
10266d259fc1SSascha Wildner 		    i, mpt->mpt_dev_page0[i].NegotiatedParameters,
10276d259fc1SSascha Wildner 		    mpt->mpt_dev_page0[i].Information);
10282545bca0SMatthew Dillon 
10292545bca0SMatthew Dillon 		rv = mpt_read_cur_cfg_page(mpt, i,
10302545bca0SMatthew Dillon 		    &mpt->mpt_dev_page1[i].Header, sizeof(*mpt->mpt_dev_page1),
10312545bca0SMatthew Dillon 		    FALSE, 5000);
10322545bca0SMatthew Dillon 		if (rv) {
10332545bca0SMatthew Dillon 			mpt_prt(mpt,
10342545bca0SMatthew Dillon 			    "cannot read SPI Target %d Device Page 1\n", i);
10352545bca0SMatthew Dillon 			continue;
10362545bca0SMatthew Dillon 		}
10372545bca0SMatthew Dillon 		mpt2host_config_page_scsi_device_1(&mpt->mpt_dev_page1[i]);
10382545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
10392545bca0SMatthew Dillon 		    "target %d page 1: Requested Params %x Configuration %x\n",
10406d259fc1SSascha Wildner 		    i, mpt->mpt_dev_page1[i].RequestedParameters,
10416d259fc1SSascha Wildner 		    mpt->mpt_dev_page1[i].Configuration);
10422545bca0SMatthew Dillon 	}
10432545bca0SMatthew Dillon 	return (0);
10442545bca0SMatthew Dillon }
10452545bca0SMatthew Dillon 
10462545bca0SMatthew Dillon /*
10472545bca0SMatthew Dillon  * Validate SPI configuration information.
10482545bca0SMatthew Dillon  *
10492545bca0SMatthew Dillon  * In particular, validate SPI Port Page 1.
10502545bca0SMatthew Dillon  */
10512545bca0SMatthew Dillon static int
mpt_set_initial_config_spi(struct mpt_softc * mpt)10522545bca0SMatthew Dillon mpt_set_initial_config_spi(struct mpt_softc *mpt)
10532545bca0SMatthew Dillon {
10546d259fc1SSascha Wildner 	int error, i, pp1val;
10552545bca0SMatthew Dillon 
10562545bca0SMatthew Dillon 	mpt->mpt_disc_enable = 0xff;
10572545bca0SMatthew Dillon 	mpt->mpt_tag_enable = 0;
10582545bca0SMatthew Dillon 
10596d259fc1SSascha Wildner 	pp1val = ((1 << mpt->mpt_ini_id) <<
10606d259fc1SSascha Wildner 	    MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID) | mpt->mpt_ini_id;
10612545bca0SMatthew Dillon 	if (mpt->mpt_port_page1.Configuration != pp1val) {
10622545bca0SMatthew Dillon 		CONFIG_PAGE_SCSI_PORT_1 tmp;
10632545bca0SMatthew Dillon 
10642545bca0SMatthew Dillon 		mpt_prt(mpt, "SPI Port Page 1 Config value bad (%x)- should "
10656d259fc1SSascha Wildner 		    "be %x\n", mpt->mpt_port_page1.Configuration, pp1val);
10662545bca0SMatthew Dillon 		tmp = mpt->mpt_port_page1;
10672545bca0SMatthew Dillon 		tmp.Configuration = pp1val;
10682545bca0SMatthew Dillon 		host2mpt_config_page_scsi_port_1(&tmp);
10692545bca0SMatthew Dillon 		error = mpt_write_cur_cfg_page(mpt, 0,
10702545bca0SMatthew Dillon 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
10712545bca0SMatthew Dillon 		if (error) {
10722545bca0SMatthew Dillon 			return (-1);
10732545bca0SMatthew Dillon 		}
10742545bca0SMatthew Dillon 		error = mpt_read_cur_cfg_page(mpt, 0,
10752545bca0SMatthew Dillon 		    &tmp.Header, sizeof(tmp), FALSE, 5000);
10762545bca0SMatthew Dillon 		if (error) {
10772545bca0SMatthew Dillon 			return (-1);
10782545bca0SMatthew Dillon 		}
10792545bca0SMatthew Dillon 		mpt2host_config_page_scsi_port_1(&tmp);
10802545bca0SMatthew Dillon 		if (tmp.Configuration != pp1val) {
10812545bca0SMatthew Dillon 			mpt_prt(mpt,
10822545bca0SMatthew Dillon 			    "failed to reset SPI Port Page 1 Config value\n");
10832545bca0SMatthew Dillon 			return (-1);
10842545bca0SMatthew Dillon 		}
10852545bca0SMatthew Dillon 		mpt->mpt_port_page1 = tmp;
10862545bca0SMatthew Dillon 	}
10872545bca0SMatthew Dillon 
10882545bca0SMatthew Dillon 	/*
10892545bca0SMatthew Dillon 	 * The purpose of this exercise is to get
10902545bca0SMatthew Dillon 	 * all targets back to async/narrow.
10912545bca0SMatthew Dillon 	 *
10922545bca0SMatthew Dillon 	 * We skip this step if the BIOS has already negotiated
10932545bca0SMatthew Dillon 	 * speeds with the targets.
10942545bca0SMatthew Dillon 	 */
10952545bca0SMatthew Dillon 	i = mpt->mpt_port_page2.PortSettings &
10962545bca0SMatthew Dillon 	    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
10972545bca0SMatthew Dillon 	if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS) {
10982545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
10992545bca0SMatthew Dillon 		    "honoring BIOS transfer negotiations\n");
11002545bca0SMatthew Dillon 	} else {
11012545bca0SMatthew Dillon 		for (i = 0; i < 16; i++) {
11022545bca0SMatthew Dillon 			mpt->mpt_dev_page1[i].RequestedParameters = 0;
11032545bca0SMatthew Dillon 			mpt->mpt_dev_page1[i].Configuration = 0;
11042545bca0SMatthew Dillon 			(void) mpt_update_spi_config(mpt, i);
11052545bca0SMatthew Dillon 		}
11062545bca0SMatthew Dillon 	}
11072545bca0SMatthew Dillon 	return (0);
11082545bca0SMatthew Dillon }
11092545bca0SMatthew Dillon 
11104c42baf4SSascha Wildner static int
mpt_cam_enable(struct mpt_softc * mpt)11112545bca0SMatthew Dillon mpt_cam_enable(struct mpt_softc *mpt)
11122545bca0SMatthew Dillon {
11132545bca0SMatthew Dillon 	int error;
11142545bca0SMatthew Dillon 
11152545bca0SMatthew Dillon 	MPT_LOCK(mpt);
11162545bca0SMatthew Dillon 
11172545bca0SMatthew Dillon 	error = EIO;
11182545bca0SMatthew Dillon 	if (mpt->is_fc) {
11192545bca0SMatthew Dillon 		if (mpt_read_config_info_fc(mpt)) {
11202545bca0SMatthew Dillon 			goto out;
11212545bca0SMatthew Dillon 		}
11222545bca0SMatthew Dillon 		if (mpt_set_initial_config_fc(mpt)) {
11232545bca0SMatthew Dillon 			goto out;
11242545bca0SMatthew Dillon 		}
11252545bca0SMatthew Dillon 	} else if (mpt->is_sas) {
11262545bca0SMatthew Dillon 		if (mpt_read_config_info_sas(mpt)) {
11272545bca0SMatthew Dillon 			goto out;
11282545bca0SMatthew Dillon 		}
11292545bca0SMatthew Dillon 		if (mpt_set_initial_config_sas(mpt)) {
11302545bca0SMatthew Dillon 			goto out;
11312545bca0SMatthew Dillon 		}
11322545bca0SMatthew Dillon 	} else if (mpt->is_spi) {
11332545bca0SMatthew Dillon 		if (mpt_read_config_info_spi(mpt)) {
11342545bca0SMatthew Dillon 			goto out;
11352545bca0SMatthew Dillon 		}
11362545bca0SMatthew Dillon 		if (mpt_set_initial_config_spi(mpt)) {
11372545bca0SMatthew Dillon 			goto out;
11382545bca0SMatthew Dillon 		}
11392545bca0SMatthew Dillon 	}
11402545bca0SMatthew Dillon 	error = 0;
11412545bca0SMatthew Dillon 
11422545bca0SMatthew Dillon out:
11432545bca0SMatthew Dillon 	MPT_UNLOCK(mpt);
11442545bca0SMatthew Dillon 	return (error);
11452545bca0SMatthew Dillon }
11462545bca0SMatthew Dillon 
11474c42baf4SSascha Wildner static void
mpt_cam_ready(struct mpt_softc * mpt)11482545bca0SMatthew Dillon mpt_cam_ready(struct mpt_softc *mpt)
11492545bca0SMatthew Dillon {
11504c42baf4SSascha Wildner 
11512545bca0SMatthew Dillon 	/*
11522545bca0SMatthew Dillon 	 * If we're in target mode, hang out resources now
11532545bca0SMatthew Dillon 	 * so we don't cause the world to hang talking to us.
11542545bca0SMatthew Dillon 	 */
11552545bca0SMatthew Dillon 	if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
11562545bca0SMatthew Dillon 		/*
11572545bca0SMatthew Dillon 		 * Try to add some target command resources
11582545bca0SMatthew Dillon 		 */
11592545bca0SMatthew Dillon 		MPT_LOCK(mpt);
11602545bca0SMatthew Dillon 		if (mpt_add_target_commands(mpt) == FALSE) {
11612545bca0SMatthew Dillon 			mpt_prt(mpt, "failed to add target commands\n");
11622545bca0SMatthew Dillon 		}
11632545bca0SMatthew Dillon 		MPT_UNLOCK(mpt);
11642545bca0SMatthew Dillon 	}
11652545bca0SMatthew Dillon 	mpt->ready = 1;
11662545bca0SMatthew Dillon }
11672545bca0SMatthew Dillon 
11684c42baf4SSascha Wildner static void
mpt_cam_detach(struct mpt_softc * mpt)11692545bca0SMatthew Dillon mpt_cam_detach(struct mpt_softc *mpt)
11702545bca0SMatthew Dillon {
11712545bca0SMatthew Dillon 	mpt_handler_t handler;
11722545bca0SMatthew Dillon 
11732545bca0SMatthew Dillon 	MPT_LOCK(mpt);
11742545bca0SMatthew Dillon 	mpt->ready = 0;
11752545bca0SMatthew Dillon 	mpt_terminate_recovery_thread(mpt);
11762545bca0SMatthew Dillon 
11772545bca0SMatthew Dillon 	handler.reply_handler = mpt_scsi_reply_handler;
11782545bca0SMatthew Dillon 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
11792545bca0SMatthew Dillon 			       scsi_io_handler_id);
11802545bca0SMatthew Dillon 	handler.reply_handler = mpt_scsi_tmf_reply_handler;
11812545bca0SMatthew Dillon 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
11822545bca0SMatthew Dillon 			       scsi_tmf_handler_id);
11832545bca0SMatthew Dillon 	handler.reply_handler = mpt_fc_els_reply_handler;
11842545bca0SMatthew Dillon 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
11852545bca0SMatthew Dillon 			       fc_els_handler_id);
11862545bca0SMatthew Dillon 	handler.reply_handler = mpt_scsi_tgt_reply_handler;
11872545bca0SMatthew Dillon 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
11882545bca0SMatthew Dillon 			       mpt->scsi_tgt_handler_id);
11892545bca0SMatthew Dillon 	handler.reply_handler = mpt_sata_pass_reply_handler;
11902545bca0SMatthew Dillon 	mpt_deregister_handler(mpt, MPT_HANDLER_REPLY, handler,
11912545bca0SMatthew Dillon 			       sata_pass_handler_id);
11922545bca0SMatthew Dillon 
11932545bca0SMatthew Dillon 	if (mpt->tmf_req != NULL) {
11942545bca0SMatthew Dillon 		mpt->tmf_req->state = REQ_STATE_ALLOCATED;
11952545bca0SMatthew Dillon 		mpt_free_request(mpt, mpt->tmf_req);
11962545bca0SMatthew Dillon 		mpt->tmf_req = NULL;
11972545bca0SMatthew Dillon 	}
11982545bca0SMatthew Dillon 	if (mpt->sas_portinfo != NULL) {
11992545bca0SMatthew Dillon 		kfree(mpt->sas_portinfo, M_DEVBUF);
12002545bca0SMatthew Dillon 		mpt->sas_portinfo = NULL;
12012545bca0SMatthew Dillon 	}
12022545bca0SMatthew Dillon 
12032545bca0SMatthew Dillon 	if (mpt->sim != NULL) {
12042545bca0SMatthew Dillon 		xpt_free_path(mpt->path);
12052545bca0SMatthew Dillon 		xpt_bus_deregister(cam_sim_path(mpt->sim));
12062545bca0SMatthew Dillon 		cam_sim_free(mpt->sim);
12072545bca0SMatthew Dillon 		mpt->sim = NULL;
12082545bca0SMatthew Dillon 	}
12092545bca0SMatthew Dillon 
12102545bca0SMatthew Dillon 	if (mpt->phydisk_sim != NULL) {
12112545bca0SMatthew Dillon 		xpt_free_path(mpt->phydisk_path);
12122545bca0SMatthew Dillon 		xpt_bus_deregister(cam_sim_path(mpt->phydisk_sim));
12132545bca0SMatthew Dillon 		cam_sim_free(mpt->phydisk_sim);
12142545bca0SMatthew Dillon 		mpt->phydisk_sim = NULL;
12152545bca0SMatthew Dillon 	}
12166d259fc1SSascha Wildner 	MPT_UNLOCK(mpt);
12172545bca0SMatthew Dillon }
12182545bca0SMatthew Dillon 
12192545bca0SMatthew Dillon /* This routine is used after a system crash to dump core onto the swap device.
12202545bca0SMatthew Dillon  */
12212545bca0SMatthew Dillon static void
mpt_poll(struct cam_sim * sim)12222545bca0SMatthew Dillon mpt_poll(struct cam_sim *sim)
12232545bca0SMatthew Dillon {
12242545bca0SMatthew Dillon 	struct mpt_softc *mpt;
12252545bca0SMatthew Dillon 
12262545bca0SMatthew Dillon 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
12272545bca0SMatthew Dillon 	mpt_intr(mpt);
12282545bca0SMatthew Dillon }
12292545bca0SMatthew Dillon 
12302545bca0SMatthew Dillon /*
12312545bca0SMatthew Dillon  * Watchdog timeout routine for SCSI requests.
12322545bca0SMatthew Dillon  */
12332545bca0SMatthew Dillon static void
mpt_timeout(void * arg)12342545bca0SMatthew Dillon mpt_timeout(void *arg)
12352545bca0SMatthew Dillon {
12362545bca0SMatthew Dillon 	union ccb	 *ccb;
12372545bca0SMatthew Dillon 	struct mpt_softc *mpt;
12382545bca0SMatthew Dillon 	request_t	 *req;
12392545bca0SMatthew Dillon 
12402545bca0SMatthew Dillon 	ccb = (union ccb *)arg;
12412545bca0SMatthew Dillon 	mpt = ccb->ccb_h.ccb_mpt_ptr;
12422545bca0SMatthew Dillon 
12432be58998SSascha Wildner 	MPT_LOCK(mpt);
12442545bca0SMatthew Dillon 	req = ccb->ccb_h.ccb_req_ptr;
12452545bca0SMatthew Dillon 	mpt_prt(mpt, "request %p:%u timed out for ccb %p (req->ccb %p)\n", req,
12462545bca0SMatthew Dillon 	    req->serno, ccb, req->ccb);
12472545bca0SMatthew Dillon /* XXX: WHAT ARE WE TRYING TO DO HERE? */
12482545bca0SMatthew Dillon 	if ((req->state & REQ_STATE_QUEUED) == REQ_STATE_QUEUED) {
12492545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
12502545bca0SMatthew Dillon 		TAILQ_INSERT_TAIL(&mpt->request_timeout_list, req, links);
12512545bca0SMatthew Dillon 		req->state |= REQ_STATE_TIMEDOUT;
12522545bca0SMatthew Dillon 		mpt_wakeup_recovery_thread(mpt);
12532545bca0SMatthew Dillon 	}
12542be58998SSascha Wildner 	MPT_UNLOCK(mpt);
12552545bca0SMatthew Dillon }
12562545bca0SMatthew Dillon 
12572545bca0SMatthew Dillon /*
12582545bca0SMatthew Dillon  * Callback routine from "bus_dmamap_load" or, in simple cases, called directly.
12592545bca0SMatthew Dillon  *
12602545bca0SMatthew Dillon  * Takes a list of physical segments and builds the SGL for SCSI IO command
12612545bca0SMatthew Dillon  * and forwards the commard to the IOC after one last check that CAM has not
12622545bca0SMatthew Dillon  * aborted the transaction.
12632545bca0SMatthew Dillon  */
12642545bca0SMatthew Dillon static void
mpt_execute_req_a64(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)12652545bca0SMatthew Dillon mpt_execute_req_a64(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
12662545bca0SMatthew Dillon {
12672545bca0SMatthew Dillon 	request_t *req, *trq;
12682545bca0SMatthew Dillon 	char *mpt_off;
12692545bca0SMatthew Dillon 	union ccb *ccb;
12702545bca0SMatthew Dillon 	struct mpt_softc *mpt;
12714c42baf4SSascha Wildner 	bus_addr_t chain_list_addr;
12724c42baf4SSascha Wildner 	int first_lim, seg, this_seg_lim;
12734c42baf4SSascha Wildner 	uint32_t addr, cur_off, flags, nxt_off, tf;
12742545bca0SMatthew Dillon 	void *sglp = NULL;
12752545bca0SMatthew Dillon 	MSG_REQUEST_HEADER *hdrp;
12762545bca0SMatthew Dillon 	SGE_SIMPLE64 *se;
12772545bca0SMatthew Dillon 	SGE_CHAIN64 *ce;
12782545bca0SMatthew Dillon 	int istgt = 0;
12792545bca0SMatthew Dillon 
12802545bca0SMatthew Dillon 	req = (request_t *)arg;
12812545bca0SMatthew Dillon 	ccb = req->ccb;
12822545bca0SMatthew Dillon 
12832545bca0SMatthew Dillon 	mpt = ccb->ccb_h.ccb_mpt_ptr;
12842545bca0SMatthew Dillon 	req = ccb->ccb_h.ccb_req_ptr;
12852545bca0SMatthew Dillon 
12862545bca0SMatthew Dillon 	hdrp = req->req_vbuf;
12872545bca0SMatthew Dillon 	mpt_off = req->req_vbuf;
12882545bca0SMatthew Dillon 
12892545bca0SMatthew Dillon 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
12902545bca0SMatthew Dillon 		error = EFBIG;
12912545bca0SMatthew Dillon 	}
12922545bca0SMatthew Dillon 
12932545bca0SMatthew Dillon 	if (error == 0) {
12942545bca0SMatthew Dillon 		switch (hdrp->Function) {
12952545bca0SMatthew Dillon 		case MPI_FUNCTION_SCSI_IO_REQUEST:
12962545bca0SMatthew Dillon 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
12972545bca0SMatthew Dillon 			istgt = 0;
12982545bca0SMatthew Dillon 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
12992545bca0SMatthew Dillon 			break;
13002545bca0SMatthew Dillon 		case MPI_FUNCTION_TARGET_ASSIST:
13012545bca0SMatthew Dillon 			istgt = 1;
13022545bca0SMatthew Dillon 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
13032545bca0SMatthew Dillon 			break;
13042545bca0SMatthew Dillon 		default:
13052545bca0SMatthew Dillon 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req_a64\n",
13062545bca0SMatthew Dillon 			    hdrp->Function);
13072545bca0SMatthew Dillon 			error = EINVAL;
13082545bca0SMatthew Dillon 			break;
13092545bca0SMatthew Dillon 		}
13102545bca0SMatthew Dillon 	}
13112545bca0SMatthew Dillon 
13122545bca0SMatthew Dillon 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
13132545bca0SMatthew Dillon 		error = EFBIG;
13142545bca0SMatthew Dillon 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
13152545bca0SMatthew Dillon 		    nseg, mpt->max_seg_cnt);
13162545bca0SMatthew Dillon 	}
13172545bca0SMatthew Dillon 
13182545bca0SMatthew Dillon bad:
13192545bca0SMatthew Dillon 	if (error != 0) {
13202545bca0SMatthew Dillon 		if (error != EFBIG && error != ENOMEM) {
13212545bca0SMatthew Dillon 			mpt_prt(mpt, "mpt_execute_req_a64: err %d\n", error);
13222545bca0SMatthew Dillon 		}
13232545bca0SMatthew Dillon 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
13242545bca0SMatthew Dillon 			cam_status status;
13252545bca0SMatthew Dillon 			mpt_freeze_ccb(ccb);
13262545bca0SMatthew Dillon 			if (error == EFBIG) {
13272545bca0SMatthew Dillon 				status = CAM_REQ_TOO_BIG;
13282545bca0SMatthew Dillon 			} else if (error == ENOMEM) {
13292545bca0SMatthew Dillon 				if (mpt->outofbeer == 0) {
13302545bca0SMatthew Dillon 					mpt->outofbeer = 1;
13312545bca0SMatthew Dillon 					xpt_freeze_simq(mpt->sim, 1);
13322545bca0SMatthew Dillon 					mpt_lprt(mpt, MPT_PRT_DEBUG,
13332545bca0SMatthew Dillon 					    "FREEZEQ\n");
13342545bca0SMatthew Dillon 				}
13352545bca0SMatthew Dillon 				status = CAM_REQUEUE_REQ;
13362545bca0SMatthew Dillon 			} else {
13372545bca0SMatthew Dillon 				status = CAM_REQ_CMP_ERR;
13382545bca0SMatthew Dillon 			}
13392545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, status);
13402545bca0SMatthew Dillon 		}
13412545bca0SMatthew Dillon 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
13422545bca0SMatthew Dillon 			request_t *cmd_req =
13432545bca0SMatthew Dillon 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
13442545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
13452545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
13462545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
13472545bca0SMatthew Dillon 		}
13482545bca0SMatthew Dillon 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
13494c42baf4SSascha Wildner 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
13502545bca0SMatthew Dillon 		xpt_done(ccb);
13512545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
13522545bca0SMatthew Dillon 		return;
13532545bca0SMatthew Dillon 	}
13542545bca0SMatthew Dillon 
13552545bca0SMatthew Dillon 	/*
13562545bca0SMatthew Dillon 	 * No data to transfer?
13572545bca0SMatthew Dillon 	 * Just make a single simple SGL with zero length.
13582545bca0SMatthew Dillon 	 */
13592545bca0SMatthew Dillon 
13602545bca0SMatthew Dillon 	if (mpt->verbose >= MPT_PRT_DEBUG) {
13612545bca0SMatthew Dillon 		int tidx = ((char *)sglp) - mpt_off;
13622545bca0SMatthew Dillon 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
13632545bca0SMatthew Dillon 	}
13642545bca0SMatthew Dillon 
13652545bca0SMatthew Dillon 	if (nseg == 0) {
13662545bca0SMatthew Dillon 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
13672545bca0SMatthew Dillon 		MPI_pSGE_SET_FLAGS(se1,
13682545bca0SMatthew Dillon 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
13692545bca0SMatthew Dillon 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
13702545bca0SMatthew Dillon 		se1->FlagsLength = htole32(se1->FlagsLength);
13712545bca0SMatthew Dillon 		goto out;
13722545bca0SMatthew Dillon 	}
13732545bca0SMatthew Dillon 
13742545bca0SMatthew Dillon 
13752545bca0SMatthew Dillon 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_64_BIT_ADDRESSING;
13762545bca0SMatthew Dillon 	if (istgt == 0) {
13772545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
13782545bca0SMatthew Dillon 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
13792545bca0SMatthew Dillon 		}
13802545bca0SMatthew Dillon 	} else {
13812545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
13822545bca0SMatthew Dillon 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
13832545bca0SMatthew Dillon 		}
13842545bca0SMatthew Dillon 	}
13852545bca0SMatthew Dillon 
13862545bca0SMatthew Dillon 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
13872545bca0SMatthew Dillon 		bus_dmasync_op_t op;
13882545bca0SMatthew Dillon 		if (istgt == 0) {
13892545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
13902545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREREAD;
13912545bca0SMatthew Dillon 			} else {
13922545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREWRITE;
13932545bca0SMatthew Dillon 			}
13942545bca0SMatthew Dillon 		} else {
13952545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
13962545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREWRITE;
13972545bca0SMatthew Dillon 			} else {
13982545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREREAD;
13992545bca0SMatthew Dillon 			}
14002545bca0SMatthew Dillon 		}
14012545bca0SMatthew Dillon 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
14022545bca0SMatthew Dillon 	}
14032545bca0SMatthew Dillon 
14042545bca0SMatthew Dillon 	/*
14052545bca0SMatthew Dillon 	 * Okay, fill in what we can at the end of the command frame.
14062545bca0SMatthew Dillon 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
14072545bca0SMatthew Dillon 	 * the command frame.
14082545bca0SMatthew Dillon 	 *
14092545bca0SMatthew Dillon 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
14102545bca0SMatthew Dillon 	 * SIMPLE64 pointers and start doing CHAIN64 entries after
14112545bca0SMatthew Dillon 	 * that.
14122545bca0SMatthew Dillon 	 */
14132545bca0SMatthew Dillon 
14142545bca0SMatthew Dillon 	if (nseg < MPT_NSGL_FIRST(mpt)) {
14152545bca0SMatthew Dillon 		first_lim = nseg;
14162545bca0SMatthew Dillon 	} else {
14172545bca0SMatthew Dillon 		/*
14182545bca0SMatthew Dillon 		 * Leave room for CHAIN element
14192545bca0SMatthew Dillon 		 */
14202545bca0SMatthew Dillon 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
14212545bca0SMatthew Dillon 	}
14222545bca0SMatthew Dillon 
14232545bca0SMatthew Dillon 	se = (SGE_SIMPLE64 *) sglp;
14242545bca0SMatthew Dillon 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
14254c42baf4SSascha Wildner 		tf = flags;
14262545bca0SMatthew Dillon 		memset(se, 0, sizeof (*se));
14274c42baf4SSascha Wildner 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
14282545bca0SMatthew Dillon 		se->Address.Low = htole32(dm_segs->ds_addr & 0xffffffff);
14292545bca0SMatthew Dillon 		if (sizeof(bus_addr_t) > 4) {
14304c42baf4SSascha Wildner 			addr = ((uint64_t)dm_segs->ds_addr) >> 32;
14314c42baf4SSascha Wildner 			/* SAS1078 36GB limitation WAR */
14324c42baf4SSascha Wildner 			if (mpt->is_1078 && (((uint64_t)dm_segs->ds_addr +
14334c42baf4SSascha Wildner 			    MPI_SGE_LENGTH(se->FlagsLength)) >> 32) == 9) {
1434f582582cSSascha Wildner 				addr |= (1U << 31);
14354c42baf4SSascha Wildner 				tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
14362545bca0SMatthew Dillon 			}
14374c42baf4SSascha Wildner 			se->Address.High = htole32(addr);
14384c42baf4SSascha Wildner 		}
14392545bca0SMatthew Dillon 		if (seg == first_lim - 1) {
14402545bca0SMatthew Dillon 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
14412545bca0SMatthew Dillon 		}
14422545bca0SMatthew Dillon 		if (seg == nseg - 1) {
14432545bca0SMatthew Dillon 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
14442545bca0SMatthew Dillon 				MPI_SGE_FLAGS_END_OF_BUFFER;
14452545bca0SMatthew Dillon 		}
14462545bca0SMatthew Dillon 		MPI_pSGE_SET_FLAGS(se, tf);
14472545bca0SMatthew Dillon 		se->FlagsLength = htole32(se->FlagsLength);
14482545bca0SMatthew Dillon 	}
14492545bca0SMatthew Dillon 
14502545bca0SMatthew Dillon 	if (seg == nseg) {
14512545bca0SMatthew Dillon 		goto out;
14522545bca0SMatthew Dillon 	}
14532545bca0SMatthew Dillon 
14542545bca0SMatthew Dillon 	/*
14552545bca0SMatthew Dillon 	 * Tell the IOC where to find the first chain element.
14562545bca0SMatthew Dillon 	 */
14572545bca0SMatthew Dillon 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
14582545bca0SMatthew Dillon 	nxt_off = MPT_RQSL(mpt);
14592545bca0SMatthew Dillon 	trq = req;
14602545bca0SMatthew Dillon 
14612545bca0SMatthew Dillon 	/*
14622545bca0SMatthew Dillon 	 * Make up the rest of the data segments out of a chain element
14634c42baf4SSascha Wildner 	 * (contained in the current request frame) which points to
14642545bca0SMatthew Dillon 	 * SIMPLE64 elements in the next request frame, possibly ending
14652545bca0SMatthew Dillon 	 * with *another* chain element (if there's more).
14662545bca0SMatthew Dillon 	 */
14672545bca0SMatthew Dillon 	while (seg < nseg) {
14682545bca0SMatthew Dillon 		/*
14692545bca0SMatthew Dillon 		 * Point to the chain descriptor. Note that the chain
14702545bca0SMatthew Dillon 		 * descriptor is at the end of the *previous* list (whether
14712545bca0SMatthew Dillon 		 * chain or simple).
14722545bca0SMatthew Dillon 		 */
14732545bca0SMatthew Dillon 		ce = (SGE_CHAIN64 *) se;
14742545bca0SMatthew Dillon 
14752545bca0SMatthew Dillon 		/*
14762545bca0SMatthew Dillon 		 * Before we change our current pointer, make  sure we won't
14772545bca0SMatthew Dillon 		 * overflow the request area with this frame. Note that we
14782545bca0SMatthew Dillon 		 * test against 'greater than' here as it's okay in this case
14792545bca0SMatthew Dillon 		 * to have next offset be just outside the request area.
14802545bca0SMatthew Dillon 		 */
14812545bca0SMatthew Dillon 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
14822545bca0SMatthew Dillon 			nxt_off = MPT_REQUEST_AREA;
14832545bca0SMatthew Dillon 			goto next_chain;
14842545bca0SMatthew Dillon 		}
14852545bca0SMatthew Dillon 
14862545bca0SMatthew Dillon 		/*
14872545bca0SMatthew Dillon 		 * Set our SGE element pointer to the beginning of the chain
14882545bca0SMatthew Dillon 		 * list and update our next chain list offset.
14892545bca0SMatthew Dillon 		 */
14902545bca0SMatthew Dillon 		se = (SGE_SIMPLE64 *) &mpt_off[nxt_off];
14912545bca0SMatthew Dillon 		cur_off = nxt_off;
14922545bca0SMatthew Dillon 		nxt_off += MPT_RQSL(mpt);
14932545bca0SMatthew Dillon 
14942545bca0SMatthew Dillon 		/*
14954c42baf4SSascha Wildner 		 * Now initialize the chain descriptor.
14962545bca0SMatthew Dillon 		 */
14972545bca0SMatthew Dillon 		memset(ce, 0, sizeof (*ce));
14982545bca0SMatthew Dillon 
14992545bca0SMatthew Dillon 		/*
15002545bca0SMatthew Dillon 		 * Get the physical address of the chain list.
15012545bca0SMatthew Dillon 		 */
15022545bca0SMatthew Dillon 		chain_list_addr = trq->req_pbuf;
15032545bca0SMatthew Dillon 		chain_list_addr += cur_off;
15042545bca0SMatthew Dillon 		if (sizeof (bus_addr_t) > 4) {
15052545bca0SMatthew Dillon 			ce->Address.High =
15062545bca0SMatthew Dillon 			    htole32(((uint64_t)chain_list_addr) >> 32);
15072545bca0SMatthew Dillon 		}
15082545bca0SMatthew Dillon 		ce->Address.Low = htole32(chain_list_addr & 0xffffffff);
15092545bca0SMatthew Dillon 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT |
15102545bca0SMatthew Dillon 			    MPI_SGE_FLAGS_64_BIT_ADDRESSING;
15112545bca0SMatthew Dillon 
15122545bca0SMatthew Dillon 		/*
15132545bca0SMatthew Dillon 		 * If we have more than a frame's worth of segments left,
15142545bca0SMatthew Dillon 		 * set up the chain list to have the last element be another
15152545bca0SMatthew Dillon 		 * chain descriptor.
15162545bca0SMatthew Dillon 		 */
15172545bca0SMatthew Dillon 		if ((nseg - seg) > MPT_NSGL(mpt)) {
15182545bca0SMatthew Dillon 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
15192545bca0SMatthew Dillon 			/*
15202545bca0SMatthew Dillon 			 * The length of the chain is the length in bytes of the
15212545bca0SMatthew Dillon 			 * number of segments plus the next chain element.
15222545bca0SMatthew Dillon 			 *
15232545bca0SMatthew Dillon 			 * The next chain descriptor offset is the length,
15242545bca0SMatthew Dillon 			 * in words, of the number of segments.
15252545bca0SMatthew Dillon 			 */
15262545bca0SMatthew Dillon 			ce->Length = (this_seg_lim - seg) *
15272545bca0SMatthew Dillon 			    sizeof (SGE_SIMPLE64);
15282545bca0SMatthew Dillon 			ce->NextChainOffset = ce->Length >> 2;
15292545bca0SMatthew Dillon 			ce->Length += sizeof (SGE_CHAIN64);
15302545bca0SMatthew Dillon 		} else {
15312545bca0SMatthew Dillon 			this_seg_lim = nseg;
15322545bca0SMatthew Dillon 			ce->Length = (this_seg_lim - seg) *
15332545bca0SMatthew Dillon 			    sizeof (SGE_SIMPLE64);
15342545bca0SMatthew Dillon 		}
15352545bca0SMatthew Dillon 		ce->Length = htole16(ce->Length);
15362545bca0SMatthew Dillon 
15372545bca0SMatthew Dillon 		/*
15382545bca0SMatthew Dillon 		 * Fill in the chain list SGE elements with our segment data.
15392545bca0SMatthew Dillon 		 *
15402545bca0SMatthew Dillon 		 * If we're the last element in this chain list, set the last
15412545bca0SMatthew Dillon 		 * element flag. If we're the completely last element period,
15422545bca0SMatthew Dillon 		 * set the end of list and end of buffer flags.
15432545bca0SMatthew Dillon 		 */
15442545bca0SMatthew Dillon 		while (seg < this_seg_lim) {
15454c42baf4SSascha Wildner 			tf = flags;
15462545bca0SMatthew Dillon 			memset(se, 0, sizeof (*se));
15474c42baf4SSascha Wildner 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
15482545bca0SMatthew Dillon 			se->Address.Low = htole32(dm_segs->ds_addr &
15492545bca0SMatthew Dillon 			    0xffffffff);
15502545bca0SMatthew Dillon 			if (sizeof (bus_addr_t) > 4) {
15514c42baf4SSascha Wildner 				addr = ((uint64_t)dm_segs->ds_addr) >> 32;
15524c42baf4SSascha Wildner 				/* SAS1078 36GB limitation WAR */
15534c42baf4SSascha Wildner 				if (mpt->is_1078 &&
15544c42baf4SSascha Wildner 				    (((uint64_t)dm_segs->ds_addr +
15554c42baf4SSascha Wildner 				    MPI_SGE_LENGTH(se->FlagsLength)) >>
15564c42baf4SSascha Wildner 				    32) == 9) {
1557f582582cSSascha Wildner 					addr |= (1U << 31);
15584c42baf4SSascha Wildner 					tf |= MPI_SGE_FLAGS_LOCAL_ADDRESS;
15592545bca0SMatthew Dillon 				}
15604c42baf4SSascha Wildner 				se->Address.High = htole32(addr);
15614c42baf4SSascha Wildner 			}
15622545bca0SMatthew Dillon 			if (seg == this_seg_lim - 1) {
15632545bca0SMatthew Dillon 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
15642545bca0SMatthew Dillon 			}
15652545bca0SMatthew Dillon 			if (seg == nseg - 1) {
15662545bca0SMatthew Dillon 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
15672545bca0SMatthew Dillon 					MPI_SGE_FLAGS_END_OF_BUFFER;
15682545bca0SMatthew Dillon 			}
15692545bca0SMatthew Dillon 			MPI_pSGE_SET_FLAGS(se, tf);
15702545bca0SMatthew Dillon 			se->FlagsLength = htole32(se->FlagsLength);
15712545bca0SMatthew Dillon 			se++;
15722545bca0SMatthew Dillon 			seg++;
15732545bca0SMatthew Dillon 			dm_segs++;
15742545bca0SMatthew Dillon 		}
15752545bca0SMatthew Dillon 
15762545bca0SMatthew Dillon     next_chain:
15772545bca0SMatthew Dillon 		/*
15782545bca0SMatthew Dillon 		 * If we have more segments to do and we've used up all of
15792545bca0SMatthew Dillon 		 * the space in a request area, go allocate another one
15802545bca0SMatthew Dillon 		 * and chain to that.
15812545bca0SMatthew Dillon 		 */
15822545bca0SMatthew Dillon 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
15832545bca0SMatthew Dillon 			request_t *nrq;
15842545bca0SMatthew Dillon 
15852545bca0SMatthew Dillon 			nrq = mpt_get_request(mpt, FALSE);
15862545bca0SMatthew Dillon 
15872545bca0SMatthew Dillon 			if (nrq == NULL) {
15882545bca0SMatthew Dillon 				error = ENOMEM;
15892545bca0SMatthew Dillon 				goto bad;
15902545bca0SMatthew Dillon 			}
15912545bca0SMatthew Dillon 
15922545bca0SMatthew Dillon 			/*
15932545bca0SMatthew Dillon 			 * Append the new request area on the tail of our list.
15942545bca0SMatthew Dillon 			 */
15952545bca0SMatthew Dillon 			if ((trq = req->chain) == NULL) {
15962545bca0SMatthew Dillon 				req->chain = nrq;
15972545bca0SMatthew Dillon 			} else {
15982545bca0SMatthew Dillon 				while (trq->chain != NULL) {
15992545bca0SMatthew Dillon 					trq = trq->chain;
16002545bca0SMatthew Dillon 				}
16012545bca0SMatthew Dillon 				trq->chain = nrq;
16022545bca0SMatthew Dillon 			}
16032545bca0SMatthew Dillon 			trq = nrq;
16042545bca0SMatthew Dillon 			mpt_off = trq->req_vbuf;
16052545bca0SMatthew Dillon 			if (mpt->verbose >= MPT_PRT_DEBUG) {
16062545bca0SMatthew Dillon 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
16072545bca0SMatthew Dillon 			}
16082545bca0SMatthew Dillon 			nxt_off = 0;
16092545bca0SMatthew Dillon 		}
16102545bca0SMatthew Dillon 	}
16112545bca0SMatthew Dillon out:
16122545bca0SMatthew Dillon 
16132545bca0SMatthew Dillon 	/*
16142545bca0SMatthew Dillon 	 * Last time we need to check if this CCB needs to be aborted.
16152545bca0SMatthew Dillon 	 */
16162545bca0SMatthew Dillon 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
16172545bca0SMatthew Dillon 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
16182545bca0SMatthew Dillon 			request_t *cmd_req =
16192545bca0SMatthew Dillon 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
16202545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
16212545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
16222545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
16232545bca0SMatthew Dillon 		}
16242545bca0SMatthew Dillon 		mpt_prt(mpt,
16252545bca0SMatthew Dillon 		    "mpt_execute_req_a64: I/O cancelled (status 0x%x)\n",
16262545bca0SMatthew Dillon 		    ccb->ccb_h.status & CAM_STATUS_MASK);
16272545bca0SMatthew Dillon 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
16282545bca0SMatthew Dillon 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
16292545bca0SMatthew Dillon 		}
16302545bca0SMatthew Dillon 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
16314c42baf4SSascha Wildner 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
16322545bca0SMatthew Dillon 		xpt_done(ccb);
16332545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
16342545bca0SMatthew Dillon 		return;
16352545bca0SMatthew Dillon 	}
16362545bca0SMatthew Dillon 
16372545bca0SMatthew Dillon 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
16382545bca0SMatthew Dillon 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
16392545bca0SMatthew Dillon 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
16402545bca0SMatthew Dillon 		    mpt_timeout, ccb);
16412545bca0SMatthew Dillon 	}
16422545bca0SMatthew Dillon 	if (mpt->verbose > MPT_PRT_DEBUG) {
16432545bca0SMatthew Dillon 		int nc = 0;
16442545bca0SMatthew Dillon 		mpt_print_request(req->req_vbuf);
16452545bca0SMatthew Dillon 		for (trq = req->chain; trq; trq = trq->chain) {
16462545bca0SMatthew Dillon 			kprintf("  Additional Chain Area %d\n", nc++);
16472545bca0SMatthew Dillon 			mpt_dump_sgl(trq->req_vbuf, 0);
16482545bca0SMatthew Dillon 		}
16492545bca0SMatthew Dillon 	}
16502545bca0SMatthew Dillon 
16512545bca0SMatthew Dillon 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
16522545bca0SMatthew Dillon 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
16532545bca0SMatthew Dillon 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
16542545bca0SMatthew Dillon #ifdef	WE_TRUST_AUTO_GOOD_STATUS
16552545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
16562545bca0SMatthew Dillon 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
16572545bca0SMatthew Dillon 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
16582545bca0SMatthew Dillon 		} else {
16592545bca0SMatthew Dillon 			tgt->state = TGT_STATE_MOVING_DATA;
16602545bca0SMatthew Dillon 		}
16612545bca0SMatthew Dillon #else
16622545bca0SMatthew Dillon 		tgt->state = TGT_STATE_MOVING_DATA;
16632545bca0SMatthew Dillon #endif
16642545bca0SMatthew Dillon 	}
16652545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
16662545bca0SMatthew Dillon }
16672545bca0SMatthew Dillon 
16682545bca0SMatthew Dillon static void
mpt_execute_req(void * arg,bus_dma_segment_t * dm_segs,int nseg,int error)16692545bca0SMatthew Dillon mpt_execute_req(void *arg, bus_dma_segment_t *dm_segs, int nseg, int error)
16702545bca0SMatthew Dillon {
16712545bca0SMatthew Dillon 	request_t *req, *trq;
16722545bca0SMatthew Dillon 	char *mpt_off;
16732545bca0SMatthew Dillon 	union ccb *ccb;
16742545bca0SMatthew Dillon 	struct mpt_softc *mpt;
16752545bca0SMatthew Dillon 	int seg, first_lim;
16762545bca0SMatthew Dillon 	uint32_t flags, nxt_off;
16772545bca0SMatthew Dillon 	void *sglp = NULL;
16782545bca0SMatthew Dillon 	MSG_REQUEST_HEADER *hdrp;
16792545bca0SMatthew Dillon 	SGE_SIMPLE32 *se;
16802545bca0SMatthew Dillon 	SGE_CHAIN32 *ce;
16812545bca0SMatthew Dillon 	int istgt = 0;
16822545bca0SMatthew Dillon 
16832545bca0SMatthew Dillon 	req = (request_t *)arg;
16842545bca0SMatthew Dillon 	ccb = req->ccb;
16852545bca0SMatthew Dillon 
16862545bca0SMatthew Dillon 	mpt = ccb->ccb_h.ccb_mpt_ptr;
16872545bca0SMatthew Dillon 	req = ccb->ccb_h.ccb_req_ptr;
16882545bca0SMatthew Dillon 
16892545bca0SMatthew Dillon 	hdrp = req->req_vbuf;
16902545bca0SMatthew Dillon 	mpt_off = req->req_vbuf;
16912545bca0SMatthew Dillon 
16922545bca0SMatthew Dillon 
16932545bca0SMatthew Dillon 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
16942545bca0SMatthew Dillon 		error = EFBIG;
16952545bca0SMatthew Dillon 	}
16962545bca0SMatthew Dillon 
16972545bca0SMatthew Dillon 	if (error == 0) {
16982545bca0SMatthew Dillon 		switch (hdrp->Function) {
16992545bca0SMatthew Dillon 		case MPI_FUNCTION_SCSI_IO_REQUEST:
17002545bca0SMatthew Dillon 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
17012545bca0SMatthew Dillon 			sglp = &((PTR_MSG_SCSI_IO_REQUEST)hdrp)->SGL;
17022545bca0SMatthew Dillon 			break;
17032545bca0SMatthew Dillon 		case MPI_FUNCTION_TARGET_ASSIST:
17042545bca0SMatthew Dillon 			istgt = 1;
17052545bca0SMatthew Dillon 			sglp = &((PTR_MSG_TARGET_ASSIST_REQUEST)hdrp)->SGL;
17062545bca0SMatthew Dillon 			break;
17072545bca0SMatthew Dillon 		default:
17082545bca0SMatthew Dillon 			mpt_prt(mpt, "bad fct 0x%x in mpt_execute_req\n",
17092545bca0SMatthew Dillon 			    hdrp->Function);
17102545bca0SMatthew Dillon 			error = EINVAL;
17112545bca0SMatthew Dillon 			break;
17122545bca0SMatthew Dillon 		}
17132545bca0SMatthew Dillon 	}
17142545bca0SMatthew Dillon 
17152545bca0SMatthew Dillon 	if (error == 0 && ((uint32_t)nseg) >= mpt->max_seg_cnt) {
17162545bca0SMatthew Dillon 		error = EFBIG;
17172545bca0SMatthew Dillon 		mpt_prt(mpt, "segment count %d too large (max %u)\n",
17182545bca0SMatthew Dillon 		    nseg, mpt->max_seg_cnt);
17192545bca0SMatthew Dillon 	}
17202545bca0SMatthew Dillon 
17212545bca0SMatthew Dillon bad:
17222545bca0SMatthew Dillon 	if (error != 0) {
17232545bca0SMatthew Dillon 		if (error != EFBIG && error != ENOMEM) {
17242545bca0SMatthew Dillon 			mpt_prt(mpt, "mpt_execute_req: err %d\n", error);
17252545bca0SMatthew Dillon 		}
17262545bca0SMatthew Dillon 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
17272545bca0SMatthew Dillon 			cam_status status;
17282545bca0SMatthew Dillon 			mpt_freeze_ccb(ccb);
17292545bca0SMatthew Dillon 			if (error == EFBIG) {
17302545bca0SMatthew Dillon 				status = CAM_REQ_TOO_BIG;
17312545bca0SMatthew Dillon 			} else if (error == ENOMEM) {
17322545bca0SMatthew Dillon 				if (mpt->outofbeer == 0) {
17332545bca0SMatthew Dillon 					mpt->outofbeer = 1;
17342545bca0SMatthew Dillon 					xpt_freeze_simq(mpt->sim, 1);
17352545bca0SMatthew Dillon 					mpt_lprt(mpt, MPT_PRT_DEBUG,
17362545bca0SMatthew Dillon 					    "FREEZEQ\n");
17372545bca0SMatthew Dillon 				}
17382545bca0SMatthew Dillon 				status = CAM_REQUEUE_REQ;
17392545bca0SMatthew Dillon 			} else {
17402545bca0SMatthew Dillon 				status = CAM_REQ_CMP_ERR;
17412545bca0SMatthew Dillon 			}
17422545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, status);
17432545bca0SMatthew Dillon 		}
17442545bca0SMatthew Dillon 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
17452545bca0SMatthew Dillon 			request_t *cmd_req =
17462545bca0SMatthew Dillon 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
17472545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
17482545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
17492545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
17502545bca0SMatthew Dillon 		}
17512545bca0SMatthew Dillon 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
17524c42baf4SSascha Wildner 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
17532545bca0SMatthew Dillon 		xpt_done(ccb);
17542545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
17552545bca0SMatthew Dillon 		return;
17562545bca0SMatthew Dillon 	}
17572545bca0SMatthew Dillon 
17582545bca0SMatthew Dillon 	/*
17592545bca0SMatthew Dillon 	 * No data to transfer?
17602545bca0SMatthew Dillon 	 * Just make a single simple SGL with zero length.
17612545bca0SMatthew Dillon 	 */
17622545bca0SMatthew Dillon 
17632545bca0SMatthew Dillon 	if (mpt->verbose >= MPT_PRT_DEBUG) {
17642545bca0SMatthew Dillon 		int tidx = ((char *)sglp) - mpt_off;
17652545bca0SMatthew Dillon 		memset(&mpt_off[tidx], 0xff, MPT_REQUEST_AREA - tidx);
17662545bca0SMatthew Dillon 	}
17672545bca0SMatthew Dillon 
17682545bca0SMatthew Dillon 	if (nseg == 0) {
17692545bca0SMatthew Dillon 		SGE_SIMPLE32 *se1 = (SGE_SIMPLE32 *) sglp;
17702545bca0SMatthew Dillon 		MPI_pSGE_SET_FLAGS(se1,
17712545bca0SMatthew Dillon 		    (MPI_SGE_FLAGS_LAST_ELEMENT | MPI_SGE_FLAGS_END_OF_BUFFER |
17722545bca0SMatthew Dillon 		    MPI_SGE_FLAGS_SIMPLE_ELEMENT | MPI_SGE_FLAGS_END_OF_LIST));
17732545bca0SMatthew Dillon 		se1->FlagsLength = htole32(se1->FlagsLength);
17742545bca0SMatthew Dillon 		goto out;
17752545bca0SMatthew Dillon 	}
17762545bca0SMatthew Dillon 
17772545bca0SMatthew Dillon 
17782545bca0SMatthew Dillon 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
17792545bca0SMatthew Dillon 	if (istgt == 0) {
17802545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
17812545bca0SMatthew Dillon 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
17822545bca0SMatthew Dillon 		}
17832545bca0SMatthew Dillon 	} else {
17842545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
17852545bca0SMatthew Dillon 			flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
17862545bca0SMatthew Dillon 		}
17872545bca0SMatthew Dillon 	}
17882545bca0SMatthew Dillon 
17892545bca0SMatthew Dillon 	if (!(ccb->ccb_h.flags & (CAM_SG_LIST_PHYS|CAM_DATA_PHYS))) {
17902545bca0SMatthew Dillon 		bus_dmasync_op_t op;
17912545bca0SMatthew Dillon 		if (istgt) {
17922545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
17932545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREREAD;
17942545bca0SMatthew Dillon 			} else {
17952545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREWRITE;
17962545bca0SMatthew Dillon 			}
17972545bca0SMatthew Dillon 		} else {
17982545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
17992545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREWRITE;
18002545bca0SMatthew Dillon 			} else {
18012545bca0SMatthew Dillon 				op = BUS_DMASYNC_PREREAD;
18022545bca0SMatthew Dillon 			}
18032545bca0SMatthew Dillon 		}
18042545bca0SMatthew Dillon 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
18052545bca0SMatthew Dillon 	}
18062545bca0SMatthew Dillon 
18072545bca0SMatthew Dillon 	/*
18082545bca0SMatthew Dillon 	 * Okay, fill in what we can at the end of the command frame.
18092545bca0SMatthew Dillon 	 * If we have up to MPT_NSGL_FIRST, we can fit them all into
18102545bca0SMatthew Dillon 	 * the command frame.
18112545bca0SMatthew Dillon 	 *
18122545bca0SMatthew Dillon 	 * Otherwise, we fill up through MPT_NSGL_FIRST less one
18132545bca0SMatthew Dillon 	 * SIMPLE32 pointers and start doing CHAIN32 entries after
18142545bca0SMatthew Dillon 	 * that.
18152545bca0SMatthew Dillon 	 */
18162545bca0SMatthew Dillon 
18172545bca0SMatthew Dillon 	if (nseg < MPT_NSGL_FIRST(mpt)) {
18182545bca0SMatthew Dillon 		first_lim = nseg;
18192545bca0SMatthew Dillon 	} else {
18202545bca0SMatthew Dillon 		/*
18212545bca0SMatthew Dillon 		 * Leave room for CHAIN element
18222545bca0SMatthew Dillon 		 */
18232545bca0SMatthew Dillon 		first_lim = MPT_NSGL_FIRST(mpt) - 1;
18242545bca0SMatthew Dillon 	}
18252545bca0SMatthew Dillon 
18262545bca0SMatthew Dillon 	se = (SGE_SIMPLE32 *) sglp;
18272545bca0SMatthew Dillon 	for (seg = 0; seg < first_lim; seg++, se++, dm_segs++) {
18282545bca0SMatthew Dillon 		uint32_t tf;
18292545bca0SMatthew Dillon 
18302545bca0SMatthew Dillon 		memset(se, 0,sizeof (*se));
18312545bca0SMatthew Dillon 		se->Address = htole32(dm_segs->ds_addr);
18322545bca0SMatthew Dillon 
18332545bca0SMatthew Dillon 		MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
18342545bca0SMatthew Dillon 		tf = flags;
18352545bca0SMatthew Dillon 		if (seg == first_lim - 1) {
18362545bca0SMatthew Dillon 			tf |= MPI_SGE_FLAGS_LAST_ELEMENT;
18372545bca0SMatthew Dillon 		}
18382545bca0SMatthew Dillon 		if (seg == nseg - 1) {
18392545bca0SMatthew Dillon 			tf |=	MPI_SGE_FLAGS_END_OF_LIST |
18402545bca0SMatthew Dillon 				MPI_SGE_FLAGS_END_OF_BUFFER;
18412545bca0SMatthew Dillon 		}
18422545bca0SMatthew Dillon 		MPI_pSGE_SET_FLAGS(se, tf);
18432545bca0SMatthew Dillon 		se->FlagsLength = htole32(se->FlagsLength);
18442545bca0SMatthew Dillon 	}
18452545bca0SMatthew Dillon 
18462545bca0SMatthew Dillon 	if (seg == nseg) {
18472545bca0SMatthew Dillon 		goto out;
18482545bca0SMatthew Dillon 	}
18492545bca0SMatthew Dillon 
18502545bca0SMatthew Dillon 	/*
18512545bca0SMatthew Dillon 	 * Tell the IOC where to find the first chain element.
18522545bca0SMatthew Dillon 	 */
18532545bca0SMatthew Dillon 	hdrp->ChainOffset = ((char *)se - (char *)hdrp) >> 2;
18542545bca0SMatthew Dillon 	nxt_off = MPT_RQSL(mpt);
18552545bca0SMatthew Dillon 	trq = req;
18562545bca0SMatthew Dillon 
18572545bca0SMatthew Dillon 	/*
18582545bca0SMatthew Dillon 	 * Make up the rest of the data segments out of a chain element
18594c42baf4SSascha Wildner 	 * (contained in the current request frame) which points to
18602545bca0SMatthew Dillon 	 * SIMPLE32 elements in the next request frame, possibly ending
18612545bca0SMatthew Dillon 	 * with *another* chain element (if there's more).
18622545bca0SMatthew Dillon 	 */
18632545bca0SMatthew Dillon 	while (seg < nseg) {
18642545bca0SMatthew Dillon 		int this_seg_lim;
18652545bca0SMatthew Dillon 		uint32_t tf, cur_off;
18662545bca0SMatthew Dillon 		bus_addr_t chain_list_addr;
18672545bca0SMatthew Dillon 
18682545bca0SMatthew Dillon 		/*
18692545bca0SMatthew Dillon 		 * Point to the chain descriptor. Note that the chain
18702545bca0SMatthew Dillon 		 * descriptor is at the end of the *previous* list (whether
18712545bca0SMatthew Dillon 		 * chain or simple).
18722545bca0SMatthew Dillon 		 */
18732545bca0SMatthew Dillon 		ce = (SGE_CHAIN32 *) se;
18742545bca0SMatthew Dillon 
18752545bca0SMatthew Dillon 		/*
18762545bca0SMatthew Dillon 		 * Before we change our current pointer, make  sure we won't
18772545bca0SMatthew Dillon 		 * overflow the request area with this frame. Note that we
18782545bca0SMatthew Dillon 		 * test against 'greater than' here as it's okay in this case
18792545bca0SMatthew Dillon 		 * to have next offset be just outside the request area.
18802545bca0SMatthew Dillon 		 */
18812545bca0SMatthew Dillon 		if ((nxt_off + MPT_RQSL(mpt)) > MPT_REQUEST_AREA) {
18822545bca0SMatthew Dillon 			nxt_off = MPT_REQUEST_AREA;
18832545bca0SMatthew Dillon 			goto next_chain;
18842545bca0SMatthew Dillon 		}
18852545bca0SMatthew Dillon 
18862545bca0SMatthew Dillon 		/*
18872545bca0SMatthew Dillon 		 * Set our SGE element pointer to the beginning of the chain
18882545bca0SMatthew Dillon 		 * list and update our next chain list offset.
18892545bca0SMatthew Dillon 		 */
18902545bca0SMatthew Dillon 		se = (SGE_SIMPLE32 *) &mpt_off[nxt_off];
18912545bca0SMatthew Dillon 		cur_off = nxt_off;
18922545bca0SMatthew Dillon 		nxt_off += MPT_RQSL(mpt);
18932545bca0SMatthew Dillon 
18942545bca0SMatthew Dillon 		/*
18954c42baf4SSascha Wildner 		 * Now initialize the chain descriptor.
18962545bca0SMatthew Dillon 		 */
18972545bca0SMatthew Dillon 		memset(ce, 0, sizeof (*ce));
18982545bca0SMatthew Dillon 
18992545bca0SMatthew Dillon 		/*
19002545bca0SMatthew Dillon 		 * Get the physical address of the chain list.
19012545bca0SMatthew Dillon 		 */
19022545bca0SMatthew Dillon 		chain_list_addr = trq->req_pbuf;
19032545bca0SMatthew Dillon 		chain_list_addr += cur_off;
19042545bca0SMatthew Dillon 
19052545bca0SMatthew Dillon 
19062545bca0SMatthew Dillon 
19072545bca0SMatthew Dillon 		ce->Address = htole32(chain_list_addr);
19082545bca0SMatthew Dillon 		ce->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
19092545bca0SMatthew Dillon 
19102545bca0SMatthew Dillon 
19112545bca0SMatthew Dillon 		/*
19122545bca0SMatthew Dillon 		 * If we have more than a frame's worth of segments left,
19132545bca0SMatthew Dillon 		 * set up the chain list to have the last element be another
19142545bca0SMatthew Dillon 		 * chain descriptor.
19152545bca0SMatthew Dillon 		 */
19162545bca0SMatthew Dillon 		if ((nseg - seg) > MPT_NSGL(mpt)) {
19172545bca0SMatthew Dillon 			this_seg_lim = seg + MPT_NSGL(mpt) - 1;
19182545bca0SMatthew Dillon 			/*
19192545bca0SMatthew Dillon 			 * The length of the chain is the length in bytes of the
19202545bca0SMatthew Dillon 			 * number of segments plus the next chain element.
19212545bca0SMatthew Dillon 			 *
19222545bca0SMatthew Dillon 			 * The next chain descriptor offset is the length,
19232545bca0SMatthew Dillon 			 * in words, of the number of segments.
19242545bca0SMatthew Dillon 			 */
19252545bca0SMatthew Dillon 			ce->Length = (this_seg_lim - seg) *
19262545bca0SMatthew Dillon 			    sizeof (SGE_SIMPLE32);
19272545bca0SMatthew Dillon 			ce->NextChainOffset = ce->Length >> 2;
19282545bca0SMatthew Dillon 			ce->Length += sizeof (SGE_CHAIN32);
19292545bca0SMatthew Dillon 		} else {
19302545bca0SMatthew Dillon 			this_seg_lim = nseg;
19312545bca0SMatthew Dillon 			ce->Length = (this_seg_lim - seg) *
19322545bca0SMatthew Dillon 			    sizeof (SGE_SIMPLE32);
19332545bca0SMatthew Dillon 		}
19342545bca0SMatthew Dillon 		ce->Length = htole16(ce->Length);
19352545bca0SMatthew Dillon 
19362545bca0SMatthew Dillon 		/*
19372545bca0SMatthew Dillon 		 * Fill in the chain list SGE elements with our segment data.
19382545bca0SMatthew Dillon 		 *
19392545bca0SMatthew Dillon 		 * If we're the last element in this chain list, set the last
19402545bca0SMatthew Dillon 		 * element flag. If we're the completely last element period,
19412545bca0SMatthew Dillon 		 * set the end of list and end of buffer flags.
19422545bca0SMatthew Dillon 		 */
19432545bca0SMatthew Dillon 		while (seg < this_seg_lim) {
19442545bca0SMatthew Dillon 			memset(se, 0, sizeof (*se));
19452545bca0SMatthew Dillon 			se->Address = htole32(dm_segs->ds_addr);
19462545bca0SMatthew Dillon 
19472545bca0SMatthew Dillon 			MPI_pSGE_SET_LENGTH(se, dm_segs->ds_len);
19482545bca0SMatthew Dillon 			tf = flags;
19492545bca0SMatthew Dillon 			if (seg == this_seg_lim - 1) {
19502545bca0SMatthew Dillon 				tf |=	MPI_SGE_FLAGS_LAST_ELEMENT;
19512545bca0SMatthew Dillon 			}
19522545bca0SMatthew Dillon 			if (seg == nseg - 1) {
19532545bca0SMatthew Dillon 				tf |=	MPI_SGE_FLAGS_END_OF_LIST |
19542545bca0SMatthew Dillon 					MPI_SGE_FLAGS_END_OF_BUFFER;
19552545bca0SMatthew Dillon 			}
19562545bca0SMatthew Dillon 			MPI_pSGE_SET_FLAGS(se, tf);
19572545bca0SMatthew Dillon 			se->FlagsLength = htole32(se->FlagsLength);
19582545bca0SMatthew Dillon 			se++;
19592545bca0SMatthew Dillon 			seg++;
19602545bca0SMatthew Dillon 			dm_segs++;
19612545bca0SMatthew Dillon 		}
19622545bca0SMatthew Dillon 
19632545bca0SMatthew Dillon     next_chain:
19642545bca0SMatthew Dillon 		/*
19652545bca0SMatthew Dillon 		 * If we have more segments to do and we've used up all of
19662545bca0SMatthew Dillon 		 * the space in a request area, go allocate another one
19672545bca0SMatthew Dillon 		 * and chain to that.
19682545bca0SMatthew Dillon 		 */
19692545bca0SMatthew Dillon 		if (seg < nseg && nxt_off >= MPT_REQUEST_AREA) {
19702545bca0SMatthew Dillon 			request_t *nrq;
19712545bca0SMatthew Dillon 
19722545bca0SMatthew Dillon 			nrq = mpt_get_request(mpt, FALSE);
19732545bca0SMatthew Dillon 
19742545bca0SMatthew Dillon 			if (nrq == NULL) {
19752545bca0SMatthew Dillon 				error = ENOMEM;
19762545bca0SMatthew Dillon 				goto bad;
19772545bca0SMatthew Dillon 			}
19782545bca0SMatthew Dillon 
19792545bca0SMatthew Dillon 			/*
19802545bca0SMatthew Dillon 			 * Append the new request area on the tail of our list.
19812545bca0SMatthew Dillon 			 */
19822545bca0SMatthew Dillon 			if ((trq = req->chain) == NULL) {
19832545bca0SMatthew Dillon 				req->chain = nrq;
19842545bca0SMatthew Dillon 			} else {
19852545bca0SMatthew Dillon 				while (trq->chain != NULL) {
19862545bca0SMatthew Dillon 					trq = trq->chain;
19872545bca0SMatthew Dillon 				}
19882545bca0SMatthew Dillon 				trq->chain = nrq;
19892545bca0SMatthew Dillon 			}
19902545bca0SMatthew Dillon 			trq = nrq;
19912545bca0SMatthew Dillon 			mpt_off = trq->req_vbuf;
19922545bca0SMatthew Dillon 			if (mpt->verbose >= MPT_PRT_DEBUG) {
19932545bca0SMatthew Dillon 				memset(mpt_off, 0xff, MPT_REQUEST_AREA);
19942545bca0SMatthew Dillon 			}
19952545bca0SMatthew Dillon 			nxt_off = 0;
19962545bca0SMatthew Dillon 		}
19972545bca0SMatthew Dillon 	}
19982545bca0SMatthew Dillon out:
19992545bca0SMatthew Dillon 
20002545bca0SMatthew Dillon 	/*
20012545bca0SMatthew Dillon 	 * Last time we need to check if this CCB needs to be aborted.
20022545bca0SMatthew Dillon 	 */
20032545bca0SMatthew Dillon 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
20042545bca0SMatthew Dillon 		if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
20052545bca0SMatthew Dillon 			request_t *cmd_req =
20062545bca0SMatthew Dillon 				MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
20072545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->state = TGT_STATE_IN_CAM;
20082545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->ccb = NULL;
20092545bca0SMatthew Dillon 			MPT_TGT_STATE(mpt, cmd_req)->req = NULL;
20102545bca0SMatthew Dillon 		}
20112545bca0SMatthew Dillon 		mpt_prt(mpt,
20122545bca0SMatthew Dillon 		    "mpt_execute_req: I/O cancelled (status 0x%x)\n",
20132545bca0SMatthew Dillon 		    ccb->ccb_h.status & CAM_STATUS_MASK);
20142545bca0SMatthew Dillon 		if (nseg && (ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
20152545bca0SMatthew Dillon 			bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
20162545bca0SMatthew Dillon 		}
20172545bca0SMatthew Dillon 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
20184c42baf4SSascha Wildner 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
20192545bca0SMatthew Dillon 		xpt_done(ccb);
20202545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
20212545bca0SMatthew Dillon 		return;
20222545bca0SMatthew Dillon 	}
20232545bca0SMatthew Dillon 
20242545bca0SMatthew Dillon 	ccb->ccb_h.status |= CAM_SIM_QUEUED;
20252545bca0SMatthew Dillon 	if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
20262545bca0SMatthew Dillon 		mpt_req_timeout(req, (ccb->ccb_h.timeout * hz) / 1000,
20272545bca0SMatthew Dillon 		    mpt_timeout, ccb);
20282545bca0SMatthew Dillon 	}
20292545bca0SMatthew Dillon 	if (mpt->verbose > MPT_PRT_DEBUG) {
20302545bca0SMatthew Dillon 		int nc = 0;
20312545bca0SMatthew Dillon 		mpt_print_request(req->req_vbuf);
20322545bca0SMatthew Dillon 		for (trq = req->chain; trq; trq = trq->chain) {
20332545bca0SMatthew Dillon 			kprintf("  Additional Chain Area %d\n", nc++);
20342545bca0SMatthew Dillon 			mpt_dump_sgl(trq->req_vbuf, 0);
20352545bca0SMatthew Dillon 		}
20362545bca0SMatthew Dillon 	}
20372545bca0SMatthew Dillon 
20382545bca0SMatthew Dillon 	if (hdrp->Function == MPI_FUNCTION_TARGET_ASSIST) {
20392545bca0SMatthew Dillon 		request_t *cmd_req = MPT_TAG_2_REQ(mpt, ccb->csio.tag_id);
20402545bca0SMatthew Dillon 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
20412545bca0SMatthew Dillon #ifdef	WE_TRUST_AUTO_GOOD_STATUS
20422545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
20432545bca0SMatthew Dillon 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
20442545bca0SMatthew Dillon 			tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
20452545bca0SMatthew Dillon 		} else {
20462545bca0SMatthew Dillon 			tgt->state = TGT_STATE_MOVING_DATA;
20472545bca0SMatthew Dillon 		}
20482545bca0SMatthew Dillon #else
20492545bca0SMatthew Dillon 		tgt->state = TGT_STATE_MOVING_DATA;
20502545bca0SMatthew Dillon #endif
20512545bca0SMatthew Dillon 	}
20522545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
20532545bca0SMatthew Dillon }
20542545bca0SMatthew Dillon 
20552545bca0SMatthew Dillon static void
mpt_start(struct cam_sim * sim,union ccb * ccb)20562545bca0SMatthew Dillon mpt_start(struct cam_sim *sim, union ccb *ccb)
20572545bca0SMatthew Dillon {
20582545bca0SMatthew Dillon 	request_t *req;
20592545bca0SMatthew Dillon 	struct mpt_softc *mpt;
20602545bca0SMatthew Dillon 	MSG_SCSI_IO_REQUEST *mpt_req;
20612545bca0SMatthew Dillon 	struct ccb_scsiio *csio = &ccb->csio;
20622545bca0SMatthew Dillon 	struct ccb_hdr *ccbh = &ccb->ccb_h;
20632545bca0SMatthew Dillon 	bus_dmamap_callback_t *cb;
20642545bca0SMatthew Dillon 	target_id_t tgt;
20652545bca0SMatthew Dillon 	int raid_passthru;
20662545bca0SMatthew Dillon 
20672545bca0SMatthew Dillon 	/* Get the pointer for the physical addapter */
20682545bca0SMatthew Dillon 	mpt = ccb->ccb_h.ccb_mpt_ptr;
20692545bca0SMatthew Dillon 	raid_passthru = (sim == mpt->phydisk_sim);
20702545bca0SMatthew Dillon 
20712545bca0SMatthew Dillon 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
20722545bca0SMatthew Dillon 		if (mpt->outofbeer == 0) {
20732545bca0SMatthew Dillon 			mpt->outofbeer = 1;
20742545bca0SMatthew Dillon 			xpt_freeze_simq(mpt->sim, 1);
20752545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
20762545bca0SMatthew Dillon 		}
20772545bca0SMatthew Dillon 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
20782545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
20792545bca0SMatthew Dillon 		xpt_done(ccb);
20802545bca0SMatthew Dillon 		return;
20812545bca0SMatthew Dillon 	}
20822545bca0SMatthew Dillon #ifdef	INVARIANTS
20832545bca0SMatthew Dillon 	mpt_req_not_spcl(mpt, req, "mpt_start", __LINE__);
20842545bca0SMatthew Dillon #endif
20852545bca0SMatthew Dillon 
20862545bca0SMatthew Dillon 	if (sizeof (bus_addr_t) > 4) {
20872545bca0SMatthew Dillon 		cb = mpt_execute_req_a64;
20882545bca0SMatthew Dillon 	} else {
20892545bca0SMatthew Dillon 		cb = mpt_execute_req;
20902545bca0SMatthew Dillon 	}
20912545bca0SMatthew Dillon 
20922545bca0SMatthew Dillon 	/*
20932545bca0SMatthew Dillon 	 * Link the ccb and the request structure so we can find
20942545bca0SMatthew Dillon 	 * the other knowing either the request or the ccb
20952545bca0SMatthew Dillon 	 */
20962545bca0SMatthew Dillon 	req->ccb = ccb;
20972545bca0SMatthew Dillon 	ccb->ccb_h.ccb_req_ptr = req;
20982545bca0SMatthew Dillon 
20992545bca0SMatthew Dillon 	/* Now we build the command for the IOC */
21002545bca0SMatthew Dillon 	mpt_req = req->req_vbuf;
21012545bca0SMatthew Dillon 	memset(mpt_req, 0, sizeof (MSG_SCSI_IO_REQUEST));
21022545bca0SMatthew Dillon 
21032545bca0SMatthew Dillon 	mpt_req->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
21042545bca0SMatthew Dillon 	if (raid_passthru) {
21052545bca0SMatthew Dillon 		mpt_req->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
21062545bca0SMatthew Dillon 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
21072545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
21082545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
21092545bca0SMatthew Dillon 			xpt_done(ccb);
21102545bca0SMatthew Dillon 			return;
21112545bca0SMatthew Dillon 		}
21122545bca0SMatthew Dillon 		mpt_req->Bus = 0;	/* we never set bus here */
21132545bca0SMatthew Dillon 	} else {
21142545bca0SMatthew Dillon 		tgt = ccb->ccb_h.target_id;
21152545bca0SMatthew Dillon 		mpt_req->Bus = 0;	/* XXX */
21162545bca0SMatthew Dillon 
21172545bca0SMatthew Dillon 	}
21182545bca0SMatthew Dillon 	mpt_req->SenseBufferLength =
21192545bca0SMatthew Dillon 		(csio->sense_len < MPT_SENSE_SIZE) ?
21202545bca0SMatthew Dillon 		 csio->sense_len : MPT_SENSE_SIZE;
21212545bca0SMatthew Dillon 
21222545bca0SMatthew Dillon 	/*
21232545bca0SMatthew Dillon 	 * We use the message context to find the request structure when we
21242545bca0SMatthew Dillon 	 * Get the command completion interrupt from the IOC.
21252545bca0SMatthew Dillon 	 */
21262545bca0SMatthew Dillon 	mpt_req->MsgContext = htole32(req->index | scsi_io_handler_id);
21272545bca0SMatthew Dillon 
21282545bca0SMatthew Dillon 	/* Which physical device to do the I/O on */
21292545bca0SMatthew Dillon 	mpt_req->TargetID = tgt;
21302545bca0SMatthew Dillon 
21312545bca0SMatthew Dillon 	/* We assume a single level LUN type */
21322545bca0SMatthew Dillon 	if (ccb->ccb_h.target_lun >= MPT_MAX_LUNS) {
21332545bca0SMatthew Dillon 		mpt_req->LUN[0] = 0x40 | ((ccb->ccb_h.target_lun >> 8) & 0x3f);
21342545bca0SMatthew Dillon 		mpt_req->LUN[1] = ccb->ccb_h.target_lun & 0xff;
21352545bca0SMatthew Dillon 	} else {
21362545bca0SMatthew Dillon 		mpt_req->LUN[1] = ccb->ccb_h.target_lun;
21372545bca0SMatthew Dillon 	}
21382545bca0SMatthew Dillon 
21392545bca0SMatthew Dillon 	/* Set the direction of the transfer */
21402545bca0SMatthew Dillon 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
21412545bca0SMatthew Dillon 		mpt_req->Control = MPI_SCSIIO_CONTROL_READ;
21422545bca0SMatthew Dillon 	} else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
21432545bca0SMatthew Dillon 		mpt_req->Control = MPI_SCSIIO_CONTROL_WRITE;
21442545bca0SMatthew Dillon 	} else {
21452545bca0SMatthew Dillon 		mpt_req->Control = MPI_SCSIIO_CONTROL_NODATATRANSFER;
21462545bca0SMatthew Dillon 	}
21472545bca0SMatthew Dillon 
21482545bca0SMatthew Dillon 	if ((ccb->ccb_h.flags & CAM_TAG_ACTION_VALID) != 0) {
21492545bca0SMatthew Dillon 		switch(ccb->csio.tag_action) {
21502545bca0SMatthew Dillon 		case MSG_HEAD_OF_Q_TAG:
21512545bca0SMatthew Dillon 			mpt_req->Control |= MPI_SCSIIO_CONTROL_HEADOFQ;
21522545bca0SMatthew Dillon 			break;
21532545bca0SMatthew Dillon 		case MSG_ACA_TASK:
21542545bca0SMatthew Dillon 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ACAQ;
21552545bca0SMatthew Dillon 			break;
21562545bca0SMatthew Dillon 		case MSG_ORDERED_Q_TAG:
21572545bca0SMatthew Dillon 			mpt_req->Control |= MPI_SCSIIO_CONTROL_ORDEREDQ;
21582545bca0SMatthew Dillon 			break;
21592545bca0SMatthew Dillon 		case MSG_SIMPLE_Q_TAG:
21602545bca0SMatthew Dillon 		default:
21612545bca0SMatthew Dillon 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
21622545bca0SMatthew Dillon 			break;
21632545bca0SMatthew Dillon 		}
21642545bca0SMatthew Dillon 	} else {
21652545bca0SMatthew Dillon 		if (mpt->is_fc || mpt->is_sas) {
21662545bca0SMatthew Dillon 			mpt_req->Control |= MPI_SCSIIO_CONTROL_SIMPLEQ;
21672545bca0SMatthew Dillon 		} else {
21682545bca0SMatthew Dillon 			/* XXX No such thing for a target doing packetized. */
21692545bca0SMatthew Dillon 			mpt_req->Control |= MPI_SCSIIO_CONTROL_UNTAGGED;
21702545bca0SMatthew Dillon 		}
21712545bca0SMatthew Dillon 	}
21722545bca0SMatthew Dillon 
21732545bca0SMatthew Dillon 	if (mpt->is_spi) {
21742545bca0SMatthew Dillon 		if (ccb->ccb_h.flags & CAM_DIS_DISCONNECT) {
21752545bca0SMatthew Dillon 			mpt_req->Control |= MPI_SCSIIO_CONTROL_NO_DISCONNECT;
21762545bca0SMatthew Dillon 		}
21772545bca0SMatthew Dillon 	}
21782545bca0SMatthew Dillon 	mpt_req->Control = htole32(mpt_req->Control);
21792545bca0SMatthew Dillon 
21802545bca0SMatthew Dillon 	/* Copy the scsi command block into place */
21812545bca0SMatthew Dillon 	if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
21822545bca0SMatthew Dillon 		bcopy(csio->cdb_io.cdb_ptr, mpt_req->CDB, csio->cdb_len);
21832545bca0SMatthew Dillon 	} else {
21842545bca0SMatthew Dillon 		bcopy(csio->cdb_io.cdb_bytes, mpt_req->CDB, csio->cdb_len);
21852545bca0SMatthew Dillon 	}
21862545bca0SMatthew Dillon 
21872545bca0SMatthew Dillon 	mpt_req->CDBLength = csio->cdb_len;
21882545bca0SMatthew Dillon 	mpt_req->DataLength = htole32(csio->dxfer_len);
21892545bca0SMatthew Dillon 	mpt_req->SenseBufferLowAddr = htole32(req->sense_pbuf);
21902545bca0SMatthew Dillon 
21912545bca0SMatthew Dillon 	/*
21922545bca0SMatthew Dillon 	 * Do a *short* print here if we're set to MPT_PRT_DEBUG
21932545bca0SMatthew Dillon 	 */
21942545bca0SMatthew Dillon 	if (mpt->verbose == MPT_PRT_DEBUG) {
21952545bca0SMatthew Dillon 		U32 df;
21962545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_start: %s op 0x%x ",
21972545bca0SMatthew Dillon 		    (mpt_req->Function == MPI_FUNCTION_SCSI_IO_REQUEST)?
21982545bca0SMatthew Dillon 		    "SCSI_IO_REQUEST" : "SCSI_IO_PASSTHRU", mpt_req->CDB[0]);
21992545bca0SMatthew Dillon 		df = mpt_req->Control & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
22002545bca0SMatthew Dillon 		if (df != MPI_SCSIIO_CONTROL_NODATATRANSFER) {
22012545bca0SMatthew Dillon 			mpt_prtc(mpt, "(%s %u byte%s ",
22022545bca0SMatthew Dillon 			    (df == MPI_SCSIIO_CONTROL_READ)?
22032545bca0SMatthew Dillon 			    "read" : "write",  csio->dxfer_len,
22042545bca0SMatthew Dillon 			    (csio->dxfer_len == 1)? ")" : "s)");
22052545bca0SMatthew Dillon 		}
22062545bca0SMatthew Dillon 		mpt_prtc(mpt, "tgt %u lun %u req %p:%u\n", tgt,
22072545bca0SMatthew Dillon 		    ccb->ccb_h.target_lun, req, req->serno);
22082545bca0SMatthew Dillon 	}
22092545bca0SMatthew Dillon 
22102545bca0SMatthew Dillon 	/*
22112545bca0SMatthew Dillon 	 * If we have any data to send with this command map it into bus space.
22122545bca0SMatthew Dillon 	 */
22132545bca0SMatthew Dillon 	if ((ccbh->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
22142545bca0SMatthew Dillon 		if ((ccbh->flags & CAM_SCATTER_VALID) == 0) {
22152545bca0SMatthew Dillon 			/*
22162545bca0SMatthew Dillon 			 * We've been given a pointer to a single buffer.
22172545bca0SMatthew Dillon 			 */
22182545bca0SMatthew Dillon 			if ((ccbh->flags & CAM_DATA_PHYS) == 0) {
22192545bca0SMatthew Dillon 				/*
22202545bca0SMatthew Dillon 				 * Virtual address that needs to translated into
22212545bca0SMatthew Dillon 				 * one or more physical address ranges.
22222545bca0SMatthew Dillon 				 */
22232545bca0SMatthew Dillon 				int error;
22246d259fc1SSascha Wildner 				crit_enter();
22252545bca0SMatthew Dillon 				error = bus_dmamap_load(mpt->buffer_dmat,
22262545bca0SMatthew Dillon 				    req->dmap, csio->data_ptr, csio->dxfer_len,
22272545bca0SMatthew Dillon 				    cb, req, 0);
22286d259fc1SSascha Wildner 				crit_exit();
22292545bca0SMatthew Dillon 				if (error == EINPROGRESS) {
22302545bca0SMatthew Dillon 					/*
22312545bca0SMatthew Dillon 					 * So as to maintain ordering,
22322545bca0SMatthew Dillon 					 * freeze the controller queue
22332545bca0SMatthew Dillon 					 * until our mapping is
22342545bca0SMatthew Dillon 					 * returned.
22352545bca0SMatthew Dillon 					 */
22362545bca0SMatthew Dillon 					xpt_freeze_simq(mpt->sim, 1);
22372545bca0SMatthew Dillon 					ccbh->status |= CAM_RELEASE_SIMQ;
22382545bca0SMatthew Dillon 				}
22392545bca0SMatthew Dillon 			} else {
22402545bca0SMatthew Dillon 				/*
22412545bca0SMatthew Dillon 				 * We have been given a pointer to single
22422545bca0SMatthew Dillon 				 * physical buffer.
22432545bca0SMatthew Dillon 				 */
22442545bca0SMatthew Dillon 				struct bus_dma_segment seg;
22452545bca0SMatthew Dillon 				seg.ds_addr =
22462545bca0SMatthew Dillon 				    (bus_addr_t)(vm_offset_t)csio->data_ptr;
22472545bca0SMatthew Dillon 				seg.ds_len = csio->dxfer_len;
22482545bca0SMatthew Dillon 				(*cb)(req, &seg, 1, 0);
22492545bca0SMatthew Dillon 			}
22502545bca0SMatthew Dillon 		} else {
22512545bca0SMatthew Dillon 			/*
22522545bca0SMatthew Dillon 			 * We have been given a list of addresses.
22532545bca0SMatthew Dillon 			 * This case could be easily supported but they are not
22542545bca0SMatthew Dillon 			 * currently generated by the CAM subsystem so there
22552545bca0SMatthew Dillon 			 * is no point in wasting the time right now.
22562545bca0SMatthew Dillon 			 */
22572545bca0SMatthew Dillon 			struct bus_dma_segment *segs;
22582545bca0SMatthew Dillon 			if ((ccbh->flags & CAM_SG_LIST_PHYS) == 0) {
22592545bca0SMatthew Dillon 				(*cb)(req, NULL, 0, EFAULT);
22602545bca0SMatthew Dillon 			} else {
22612545bca0SMatthew Dillon 				/* Just use the segments provided */
22622545bca0SMatthew Dillon 				segs = (struct bus_dma_segment *)csio->data_ptr;
22632545bca0SMatthew Dillon 				(*cb)(req, segs, csio->sglist_cnt, 0);
22642545bca0SMatthew Dillon 			}
22652545bca0SMatthew Dillon 		}
22662545bca0SMatthew Dillon 	} else {
22672545bca0SMatthew Dillon 		(*cb)(req, NULL, 0, 0);
22682545bca0SMatthew Dillon 	}
22692545bca0SMatthew Dillon }
22702545bca0SMatthew Dillon 
22712545bca0SMatthew Dillon static int
mpt_bus_reset(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun,int sleep_ok)22722545bca0SMatthew Dillon mpt_bus_reset(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun,
22732545bca0SMatthew Dillon     int sleep_ok)
22742545bca0SMatthew Dillon {
22752545bca0SMatthew Dillon 	int   error;
22762545bca0SMatthew Dillon 	uint16_t status;
22772545bca0SMatthew Dillon 	uint8_t response;
22782545bca0SMatthew Dillon 
22792545bca0SMatthew Dillon 	error = mpt_scsi_send_tmf(mpt,
22802545bca0SMatthew Dillon 	    (tgt != CAM_TARGET_WILDCARD || lun != CAM_LUN_WILDCARD) ?
22812545bca0SMatthew Dillon 	    MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET :
22822545bca0SMatthew Dillon 	    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
22832545bca0SMatthew Dillon 	    mpt->is_fc ? MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION : 0,
22842545bca0SMatthew Dillon 	    0,	/* XXX How do I get the channel ID? */
22852545bca0SMatthew Dillon 	    tgt != CAM_TARGET_WILDCARD ? tgt : 0,
22862545bca0SMatthew Dillon 	    lun != CAM_LUN_WILDCARD ? lun : 0,
22872545bca0SMatthew Dillon 	    0, sleep_ok);
22882545bca0SMatthew Dillon 
22892545bca0SMatthew Dillon 	if (error != 0) {
22902545bca0SMatthew Dillon 		/*
22912545bca0SMatthew Dillon 		 * mpt_scsi_send_tmf hard resets on failure, so no
22922545bca0SMatthew Dillon 		 * need to do so here.
22932545bca0SMatthew Dillon 		 */
22942545bca0SMatthew Dillon 		mpt_prt(mpt,
22952545bca0SMatthew Dillon 		    "mpt_bus_reset: mpt_scsi_send_tmf returned %d\n", error);
22962545bca0SMatthew Dillon 		return (EIO);
22972545bca0SMatthew Dillon 	}
22982545bca0SMatthew Dillon 
22992545bca0SMatthew Dillon 	/* Wait for bus reset to be processed by the IOC. */
23002545bca0SMatthew Dillon 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
23012545bca0SMatthew Dillon 	    REQ_STATE_DONE, sleep_ok, 5000);
23022545bca0SMatthew Dillon 
23032545bca0SMatthew Dillon 	status = le16toh(mpt->tmf_req->IOCStatus);
23042545bca0SMatthew Dillon 	response = mpt->tmf_req->ResponseCode;
23052545bca0SMatthew Dillon 	mpt->tmf_req->state = REQ_STATE_FREE;
23062545bca0SMatthew Dillon 
23072545bca0SMatthew Dillon 	if (error) {
23082545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_bus_reset: Reset timed-out. "
23092545bca0SMatthew Dillon 		    "Resetting controller.\n");
23102545bca0SMatthew Dillon 		mpt_reset(mpt, TRUE);
23112545bca0SMatthew Dillon 		return (ETIMEDOUT);
23122545bca0SMatthew Dillon 	}
23132545bca0SMatthew Dillon 
23142545bca0SMatthew Dillon 	if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
23152545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_bus_reset: TMF IOC Status 0x%x. "
23162545bca0SMatthew Dillon 		    "Resetting controller.\n", status);
23172545bca0SMatthew Dillon 		mpt_reset(mpt, TRUE);
23182545bca0SMatthew Dillon 		return (EIO);
23192545bca0SMatthew Dillon 	}
23202545bca0SMatthew Dillon 
23212545bca0SMatthew Dillon 	if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
23222545bca0SMatthew Dillon 	    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
23232545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_bus_reset: TMF Response 0x%x. "
23242545bca0SMatthew Dillon 		    "Resetting controller.\n", response);
23252545bca0SMatthew Dillon 		mpt_reset(mpt, TRUE);
23262545bca0SMatthew Dillon 		return (EIO);
23272545bca0SMatthew Dillon 	}
23282545bca0SMatthew Dillon 	return (0);
23292545bca0SMatthew Dillon }
23302545bca0SMatthew Dillon 
23312545bca0SMatthew Dillon static int
mpt_fc_reset_link(struct mpt_softc * mpt,int dowait)23322545bca0SMatthew Dillon mpt_fc_reset_link(struct mpt_softc *mpt, int dowait)
23332545bca0SMatthew Dillon {
23342545bca0SMatthew Dillon 	int r = 0;
23352545bca0SMatthew Dillon 	request_t *req;
23362545bca0SMatthew Dillon 	PTR_MSG_FC_PRIMITIVE_SEND_REQUEST fc;
23372545bca0SMatthew Dillon 
23382545bca0SMatthew Dillon 	req = mpt_get_request(mpt, FALSE);
23392545bca0SMatthew Dillon 	if (req == NULL) {
23402545bca0SMatthew Dillon 		return (ENOMEM);
23412545bca0SMatthew Dillon 	}
23422545bca0SMatthew Dillon 	fc = req->req_vbuf;
23432545bca0SMatthew Dillon 	memset(fc, 0, sizeof(*fc));
23442545bca0SMatthew Dillon 	fc->SendFlags = MPI_FC_PRIM_SEND_FLAGS_RESET_LINK;
23452545bca0SMatthew Dillon 	fc->Function = MPI_FUNCTION_FC_PRIMITIVE_SEND;
23462545bca0SMatthew Dillon 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
23472545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
23482545bca0SMatthew Dillon 	if (dowait) {
23492545bca0SMatthew Dillon 		r = mpt_wait_req(mpt, req, REQ_STATE_DONE,
23502545bca0SMatthew Dillon 		    REQ_STATE_DONE, FALSE, 60 * 1000);
23512545bca0SMatthew Dillon 		if (r == 0) {
23522545bca0SMatthew Dillon 			mpt_free_request(mpt, req);
23532545bca0SMatthew Dillon 		}
23542545bca0SMatthew Dillon 	}
23552545bca0SMatthew Dillon 	return (r);
23562545bca0SMatthew Dillon }
23572545bca0SMatthew Dillon 
23586d259fc1SSascha Wildner static void
mpt_cam_rescan_callback(struct cam_periph * periph,union ccb * ccb)23596d259fc1SSascha Wildner mpt_cam_rescan_callback(struct cam_periph *periph, union ccb *ccb)
23606d259fc1SSascha Wildner {
23616d259fc1SSascha Wildner     xpt_free_path(ccb->ccb_h.path);
2362*cec957e9SMatthew Dillon     xpt_free_ccb(&ccb->ccb_h);
23636d259fc1SSascha Wildner }
23646d259fc1SSascha Wildner 
23652545bca0SMatthew Dillon static int
mpt_cam_event(struct mpt_softc * mpt,request_t * req,MSG_EVENT_NOTIFY_REPLY * msg)23662545bca0SMatthew Dillon mpt_cam_event(struct mpt_softc *mpt, request_t *req,
23672545bca0SMatthew Dillon 	      MSG_EVENT_NOTIFY_REPLY *msg)
23682545bca0SMatthew Dillon {
23692545bca0SMatthew Dillon 	uint32_t data0, data1;
23702545bca0SMatthew Dillon 
23712545bca0SMatthew Dillon 	data0 = le32toh(msg->Data[0]);
23722545bca0SMatthew Dillon 	data1 = le32toh(msg->Data[1]);
23732545bca0SMatthew Dillon 	switch(msg->Event & 0xFF) {
23742545bca0SMatthew Dillon 	case MPI_EVENT_UNIT_ATTENTION:
23752545bca0SMatthew Dillon 		mpt_prt(mpt, "UNIT ATTENTION: Bus: 0x%02x TargetID: 0x%02x\n",
23762545bca0SMatthew Dillon 		    (data0 >> 8) & 0xff, data0 & 0xff);
23772545bca0SMatthew Dillon 		break;
23782545bca0SMatthew Dillon 
23792545bca0SMatthew Dillon 	case MPI_EVENT_IOC_BUS_RESET:
23802545bca0SMatthew Dillon 		/* We generated a bus reset */
23812545bca0SMatthew Dillon 		mpt_prt(mpt, "IOC Generated Bus Reset Port: %d\n",
23822545bca0SMatthew Dillon 		    (data0 >> 8) & 0xff);
23832545bca0SMatthew Dillon 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
23842545bca0SMatthew Dillon 		break;
23852545bca0SMatthew Dillon 
23862545bca0SMatthew Dillon 	case MPI_EVENT_EXT_BUS_RESET:
23872545bca0SMatthew Dillon 		/* Someone else generated a bus reset */
23882545bca0SMatthew Dillon 		mpt_prt(mpt, "External Bus Reset Detected\n");
23892545bca0SMatthew Dillon 		/*
23902545bca0SMatthew Dillon 		 * These replies don't return EventData like the MPI
23912545bca0SMatthew Dillon 		 * spec says they do
23922545bca0SMatthew Dillon 		 */
23932545bca0SMatthew Dillon 		xpt_async(AC_BUS_RESET, mpt->path, NULL);
23942545bca0SMatthew Dillon 		break;
23952545bca0SMatthew Dillon 
23962545bca0SMatthew Dillon 	case MPI_EVENT_RESCAN:
23972545bca0SMatthew Dillon 	{
23982545bca0SMatthew Dillon 		union ccb *ccb;
23992545bca0SMatthew Dillon 		uint32_t pathid;
24002545bca0SMatthew Dillon 		/*
24012545bca0SMatthew Dillon 		 * In general this means a device has been added to the loop.
24022545bca0SMatthew Dillon 		 */
24032545bca0SMatthew Dillon 		mpt_prt(mpt, "Rescan Port: %d\n", (data0 >> 8) & 0xff);
24042545bca0SMatthew Dillon 		if (mpt->ready == 0) {
24052545bca0SMatthew Dillon 			break;
24062545bca0SMatthew Dillon 		}
24072545bca0SMatthew Dillon 		if (mpt->phydisk_sim) {
24082545bca0SMatthew Dillon 			pathid = cam_sim_path(mpt->phydisk_sim);
24092545bca0SMatthew Dillon 		} else {
24102545bca0SMatthew Dillon 			pathid = cam_sim_path(mpt->sim);
24112545bca0SMatthew Dillon 		}
24122545bca0SMatthew Dillon 		/*
24132545bca0SMatthew Dillon 		 * Allocate a CCB, create a wildcard path for this bus,
24142545bca0SMatthew Dillon 		 * and schedule a rescan.
24152545bca0SMatthew Dillon 		 */
2416*cec957e9SMatthew Dillon 		ccb = xpt_alloc_ccb();
24172545bca0SMatthew Dillon 
24182545bca0SMatthew Dillon 		if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, pathid,
24192545bca0SMatthew Dillon 		    CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
24202545bca0SMatthew Dillon 			mpt_prt(mpt, "unable to create path for rescan\n");
2421*cec957e9SMatthew Dillon 			xpt_free_ccb(&ccb->ccb_h);
24222545bca0SMatthew Dillon 			break;
24232545bca0SMatthew Dillon 		}
24246d259fc1SSascha Wildner 
2425*cec957e9SMatthew Dillon 		xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, /*lowpri*/5);
24266d259fc1SSascha Wildner 		ccb->ccb_h.func_code = XPT_SCAN_BUS;
24276d259fc1SSascha Wildner 		ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
24286d259fc1SSascha Wildner 		ccb->crcn.flags = CAM_FLAG_NONE;
24296d259fc1SSascha Wildner 		xpt_action(ccb);
24306d259fc1SSascha Wildner 		/* scan is now in progress */
24316d259fc1SSascha Wildner 
24322545bca0SMatthew Dillon 		break;
24332545bca0SMatthew Dillon 	}
24342545bca0SMatthew Dillon 	case MPI_EVENT_LINK_STATUS_CHANGE:
24352545bca0SMatthew Dillon 		mpt_prt(mpt, "Port %d: LinkState: %s\n",
24362545bca0SMatthew Dillon 		    (data1 >> 8) & 0xff,
24372545bca0SMatthew Dillon 		    ((data0 & 0xff) == 0)?  "Failed" : "Active");
24382545bca0SMatthew Dillon 		break;
24392545bca0SMatthew Dillon 
24402545bca0SMatthew Dillon 	case MPI_EVENT_LOOP_STATE_CHANGE:
24412545bca0SMatthew Dillon 		switch ((data0 >> 16) & 0xff) {
24422545bca0SMatthew Dillon 		case 0x01:
24432545bca0SMatthew Dillon 			mpt_prt(mpt,
24442545bca0SMatthew Dillon 			    "Port 0x%x: FC LinkEvent: LIP(%02x,%02x) "
24452545bca0SMatthew Dillon 			    "(Loop Initialization)\n",
24462545bca0SMatthew Dillon 			    (data1 >> 8) & 0xff,
24472545bca0SMatthew Dillon 			    (data0 >> 8) & 0xff,
24482545bca0SMatthew Dillon 			    (data0     ) & 0xff);
24492545bca0SMatthew Dillon 			switch ((data0 >> 8) & 0xff) {
24502545bca0SMatthew Dillon 			case 0xF7:
24512545bca0SMatthew Dillon 				if ((data0 & 0xff) == 0xF7) {
24522545bca0SMatthew Dillon 					mpt_prt(mpt, "Device needs AL_PA\n");
24532545bca0SMatthew Dillon 				} else {
24542545bca0SMatthew Dillon 					mpt_prt(mpt, "Device %02x doesn't like "
24552545bca0SMatthew Dillon 					    "FC performance\n",
24562545bca0SMatthew Dillon 					    data0 & 0xFF);
24572545bca0SMatthew Dillon 				}
24582545bca0SMatthew Dillon 				break;
24592545bca0SMatthew Dillon 			case 0xF8:
24602545bca0SMatthew Dillon 				if ((data0 & 0xff) == 0xF7) {
24612545bca0SMatthew Dillon 					mpt_prt(mpt, "Device had loop failure "
24622545bca0SMatthew Dillon 					    "at its receiver prior to acquiring"
24632545bca0SMatthew Dillon 					    " AL_PA\n");
24642545bca0SMatthew Dillon 				} else {
24652545bca0SMatthew Dillon 					mpt_prt(mpt, "Device %02x detected loop"
24662545bca0SMatthew Dillon 					    " failure at its receiver\n",
24672545bca0SMatthew Dillon 					    data0 & 0xFF);
24682545bca0SMatthew Dillon 				}
24692545bca0SMatthew Dillon 				break;
24702545bca0SMatthew Dillon 			default:
24712545bca0SMatthew Dillon 				mpt_prt(mpt, "Device %02x requests that device "
24722545bca0SMatthew Dillon 				    "%02x reset itself\n",
24732545bca0SMatthew Dillon 				    data0 & 0xFF,
24742545bca0SMatthew Dillon 				    (data0 >> 8) & 0xFF);
24752545bca0SMatthew Dillon 				break;
24762545bca0SMatthew Dillon 			}
24772545bca0SMatthew Dillon 			break;
24782545bca0SMatthew Dillon 		case 0x02:
24792545bca0SMatthew Dillon 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
24802545bca0SMatthew Dillon 			    "LPE(%02x,%02x) (Loop Port Enable)\n",
24812545bca0SMatthew Dillon 			    (data1 >> 8) & 0xff, /* Port */
24822545bca0SMatthew Dillon 			    (data0 >>  8) & 0xff, /* Character 3 */
24832545bca0SMatthew Dillon 			    (data0      ) & 0xff  /* Character 4 */);
24842545bca0SMatthew Dillon 			break;
24852545bca0SMatthew Dillon 		case 0x03:
24862545bca0SMatthew Dillon 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: "
24872545bca0SMatthew Dillon 			    "LPB(%02x,%02x) (Loop Port Bypass)\n",
24882545bca0SMatthew Dillon 			    (data1 >> 8) & 0xff, /* Port */
24892545bca0SMatthew Dillon 			    (data0 >> 8) & 0xff, /* Character 3 */
24902545bca0SMatthew Dillon 			    (data0     ) & 0xff  /* Character 4 */);
24912545bca0SMatthew Dillon 			break;
24922545bca0SMatthew Dillon 		default:
24932545bca0SMatthew Dillon 			mpt_prt(mpt, "Port 0x%x: FC LinkEvent: Unknown "
24942545bca0SMatthew Dillon 			    "FC event (%02x %02x %02x)\n",
24952545bca0SMatthew Dillon 			    (data1 >> 8) & 0xff, /* Port */
24962545bca0SMatthew Dillon 			    (data0 >> 16) & 0xff, /* Event */
24972545bca0SMatthew Dillon 			    (data0 >>  8) & 0xff, /* Character 3 */
24982545bca0SMatthew Dillon 			    (data0      ) & 0xff  /* Character 4 */);
24992545bca0SMatthew Dillon 		}
25002545bca0SMatthew Dillon 		break;
25012545bca0SMatthew Dillon 
25022545bca0SMatthew Dillon 	case MPI_EVENT_LOGOUT:
25032545bca0SMatthew Dillon 		mpt_prt(mpt, "FC Logout Port: %d N_PortID: %02x\n",
25042545bca0SMatthew Dillon 		    (data1 >> 8) & 0xff, data0);
25052545bca0SMatthew Dillon 		break;
25062545bca0SMatthew Dillon 	case MPI_EVENT_QUEUE_FULL:
25072545bca0SMatthew Dillon 	{
25082545bca0SMatthew Dillon 		struct cam_sim *sim;
25092545bca0SMatthew Dillon 		struct cam_path *tmppath;
2510*cec957e9SMatthew Dillon 		struct ccb_relsim *crs;
25112545bca0SMatthew Dillon 		PTR_EVENT_DATA_QUEUE_FULL pqf;
25122545bca0SMatthew Dillon 		lun_id_t lun_id;
25132545bca0SMatthew Dillon 
25142545bca0SMatthew Dillon 		pqf = (PTR_EVENT_DATA_QUEUE_FULL)msg->Data;
25152545bca0SMatthew Dillon 		pqf->CurrentDepth = le16toh(pqf->CurrentDepth);
25162545bca0SMatthew Dillon 		mpt_prt(mpt, "QUEUE FULL EVENT: Bus 0x%02x Target 0x%02x Depth "
25172545bca0SMatthew Dillon 		    "%d\n", pqf->Bus, pqf->TargetID, pqf->CurrentDepth);
25184c42baf4SSascha Wildner 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
25194c42baf4SSascha Wildner 		    pqf->TargetID) != 0) {
25202545bca0SMatthew Dillon 			sim = mpt->phydisk_sim;
25212545bca0SMatthew Dillon 		} else {
25222545bca0SMatthew Dillon 			sim = mpt->sim;
25232545bca0SMatthew Dillon 		}
25242545bca0SMatthew Dillon 		for (lun_id = 0; lun_id < MPT_MAX_LUNS; lun_id++) {
25252545bca0SMatthew Dillon 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
25262545bca0SMatthew Dillon 			    pqf->TargetID, lun_id) != CAM_REQ_CMP) {
25272545bca0SMatthew Dillon 				mpt_prt(mpt, "unable to create a path to send "
25282545bca0SMatthew Dillon 				    "XPT_REL_SIMQ");
25292545bca0SMatthew Dillon 				break;
25302545bca0SMatthew Dillon 			}
2531*cec957e9SMatthew Dillon 			crs = &xpt_alloc_ccb()->crs;
2532*cec957e9SMatthew Dillon 			xpt_setup_ccb(&crs->ccb_h, tmppath, 5);
2533*cec957e9SMatthew Dillon 			crs->ccb_h.func_code = XPT_REL_SIMQ;
2534*cec957e9SMatthew Dillon 			crs->ccb_h.flags = CAM_DEV_QFREEZE;
2535*cec957e9SMatthew Dillon 			crs->release_flags = RELSIM_ADJUST_OPENINGS;
2536*cec957e9SMatthew Dillon 			crs->openings = pqf->CurrentDepth - 1;
2537*cec957e9SMatthew Dillon 			xpt_action((union ccb *)crs);
2538*cec957e9SMatthew Dillon 			if (crs->ccb_h.status != CAM_REQ_CMP) {
25392545bca0SMatthew Dillon 				mpt_prt(mpt, "XPT_REL_SIMQ failed\n");
25402545bca0SMatthew Dillon 			}
25412545bca0SMatthew Dillon 			xpt_free_path(tmppath);
2542*cec957e9SMatthew Dillon 			xpt_free_ccb(&crs->ccb_h);
25432545bca0SMatthew Dillon 		}
25442545bca0SMatthew Dillon 		break;
25452545bca0SMatthew Dillon 	}
25466d259fc1SSascha Wildner 	case MPI_EVENT_IR_RESYNC_UPDATE:
25476d259fc1SSascha Wildner 		mpt_prt(mpt, "IR resync update %d completed\n",
25486d259fc1SSascha Wildner 		    (data0 >> 16) & 0xff);
25496d259fc1SSascha Wildner 		break;
25504c42baf4SSascha Wildner 	case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
25514c42baf4SSascha Wildner 	{
25524c42baf4SSascha Wildner 		union ccb *ccb;
25534c42baf4SSascha Wildner 		struct cam_sim *sim;
25544c42baf4SSascha Wildner 		struct cam_path *tmppath;
25554c42baf4SSascha Wildner 		PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE psdsc;
25564c42baf4SSascha Wildner 
25574c42baf4SSascha Wildner 		psdsc = (PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE)msg->Data;
25584c42baf4SSascha Wildner 		if (mpt->phydisk_sim && mpt_is_raid_member(mpt,
25594c42baf4SSascha Wildner 		    psdsc->TargetID) != 0)
25604c42baf4SSascha Wildner 			sim = mpt->phydisk_sim;
25614c42baf4SSascha Wildner 		else
25624c42baf4SSascha Wildner 			sim = mpt->sim;
25634c42baf4SSascha Wildner 		switch(psdsc->ReasonCode) {
25644c42baf4SSascha Wildner 		case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
2565*cec957e9SMatthew Dillon 			ccb = xpt_alloc_ccb();
25664c42baf4SSascha Wildner 			if (xpt_create_path(&ccb->ccb_h.path, xpt_periph,
25674c42baf4SSascha Wildner 			    cam_sim_path(sim), psdsc->TargetID,
25684c42baf4SSascha Wildner 			    CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
25694c42baf4SSascha Wildner 				mpt_prt(mpt,
25704c42baf4SSascha Wildner 				    "unable to create path for rescan\n");
2571*cec957e9SMatthew Dillon 				xpt_free_ccb(&ccb->ccb_h);
25724c42baf4SSascha Wildner 				break;
25734c42baf4SSascha Wildner 			}
2574*cec957e9SMatthew Dillon 			xpt_setup_ccb(&ccb->ccb_h, ccb->ccb_h.path, /*lopri*/5);
25754c42baf4SSascha Wildner 			ccb->ccb_h.func_code = XPT_SCAN_BUS;
25764c42baf4SSascha Wildner 			ccb->ccb_h.cbfcnp = mpt_cam_rescan_callback;
25774c42baf4SSascha Wildner 			ccb->crcn.flags = CAM_FLAG_NONE;
25784c42baf4SSascha Wildner 			xpt_action(ccb);
2579*cec957e9SMatthew Dillon 			/* scan now in progress */
25804c42baf4SSascha Wildner 			break;
25814c42baf4SSascha Wildner 		case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
25824c42baf4SSascha Wildner 			if (xpt_create_path(&tmppath, NULL, cam_sim_path(sim),
25834c42baf4SSascha Wildner 			    psdsc->TargetID, CAM_LUN_WILDCARD) !=
25844c42baf4SSascha Wildner 			    CAM_REQ_CMP) {
25854c42baf4SSascha Wildner 				mpt_prt(mpt,
25864c42baf4SSascha Wildner 				    "unable to create path for async event");
25874c42baf4SSascha Wildner 				break;
25884c42baf4SSascha Wildner 			}
25894c42baf4SSascha Wildner 			xpt_async(AC_LOST_DEVICE, tmppath, NULL);
25904c42baf4SSascha Wildner 			xpt_free_path(tmppath);
25914c42baf4SSascha Wildner 			break;
25924c42baf4SSascha Wildner 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_INTERNAL_DEV_RESET:
25934c42baf4SSascha Wildner 		case MPI_EVENT_SAS_DEV_STAT_RC_CMPL_TASK_ABORT_INTERNAL:
25944c42baf4SSascha Wildner 		case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
25954c42baf4SSascha Wildner 			break;
25964c42baf4SSascha Wildner 		default:
25974c42baf4SSascha Wildner 			mpt_lprt(mpt, MPT_PRT_WARN,
25984c42baf4SSascha Wildner 			    "SAS device status change: Bus: 0x%02x TargetID: "
25994c42baf4SSascha Wildner 			    "0x%02x ReasonCode: 0x%02x\n", psdsc->Bus,
26004c42baf4SSascha Wildner 			    psdsc->TargetID, psdsc->ReasonCode);
26014c42baf4SSascha Wildner 			break;
26024c42baf4SSascha Wildner 		}
26034c42baf4SSascha Wildner 		break;
26044c42baf4SSascha Wildner 	}
26054c42baf4SSascha Wildner 	case MPI_EVENT_SAS_DISCOVERY_ERROR:
26064c42baf4SSascha Wildner 	{
26074c42baf4SSascha Wildner 		PTR_EVENT_DATA_DISCOVERY_ERROR pde;
26084c42baf4SSascha Wildner 
26094c42baf4SSascha Wildner 		pde = (PTR_EVENT_DATA_DISCOVERY_ERROR)msg->Data;
26104c42baf4SSascha Wildner 		pde->DiscoveryStatus = le32toh(pde->DiscoveryStatus);
26114c42baf4SSascha Wildner 		mpt_lprt(mpt, MPT_PRT_WARN,
26124c42baf4SSascha Wildner 		    "SAS discovery error: Port: 0x%02x Status: 0x%08x\n",
26134c42baf4SSascha Wildner 		    pde->Port, pde->DiscoveryStatus);
26144c42baf4SSascha Wildner 		break;
26154c42baf4SSascha Wildner 	}
26162545bca0SMatthew Dillon 	case MPI_EVENT_EVENT_CHANGE:
26172545bca0SMatthew Dillon 	case MPI_EVENT_INTEGRATED_RAID:
26184c42baf4SSascha Wildner 	case MPI_EVENT_IR2:
26194c42baf4SSascha Wildner 	case MPI_EVENT_LOG_ENTRY_ADDED:
26204c42baf4SSascha Wildner 	case MPI_EVENT_SAS_DISCOVERY:
26214c42baf4SSascha Wildner 	case MPI_EVENT_SAS_PHY_LINK_STATUS:
26222545bca0SMatthew Dillon 	case MPI_EVENT_SAS_SES:
26232545bca0SMatthew Dillon 		break;
26242545bca0SMatthew Dillon 	default:
26252545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_WARN, "mpt_cam_event: 0x%x\n",
26266d259fc1SSascha Wildner 		    msg->Event & 0xFF);
26272545bca0SMatthew Dillon 		return (0);
26282545bca0SMatthew Dillon 	}
26292545bca0SMatthew Dillon 	return (1);
26302545bca0SMatthew Dillon }
26312545bca0SMatthew Dillon 
26322545bca0SMatthew Dillon /*
26332545bca0SMatthew Dillon  * Reply path for all SCSI I/O requests, called from our
26342545bca0SMatthew Dillon  * interrupt handler by extracting our handler index from
26352545bca0SMatthew Dillon  * the MsgContext field of the reply from the IOC.
26362545bca0SMatthew Dillon  *
26372545bca0SMatthew Dillon  * This routine is optimized for the common case of a
26382545bca0SMatthew Dillon  * completion without error.  All exception handling is
26392545bca0SMatthew Dillon  * offloaded to non-inlined helper routines to minimize
26402545bca0SMatthew Dillon  * cache footprint.
26412545bca0SMatthew Dillon  */
26422545bca0SMatthew Dillon static int
mpt_scsi_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)26432545bca0SMatthew Dillon mpt_scsi_reply_handler(struct mpt_softc *mpt, request_t *req,
26442545bca0SMatthew Dillon     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
26452545bca0SMatthew Dillon {
26462545bca0SMatthew Dillon 	MSG_SCSI_IO_REQUEST *scsi_req;
26472545bca0SMatthew Dillon 	union ccb *ccb;
26482545bca0SMatthew Dillon 
26492545bca0SMatthew Dillon 	if (req->state == REQ_STATE_FREE) {
26502545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_scsi_reply_handler: req already free\n");
26512545bca0SMatthew Dillon 		return (TRUE);
26522545bca0SMatthew Dillon 	}
26532545bca0SMatthew Dillon 
26542545bca0SMatthew Dillon 	scsi_req = (MSG_SCSI_IO_REQUEST *)req->req_vbuf;
26552545bca0SMatthew Dillon 	ccb = req->ccb;
26562545bca0SMatthew Dillon 	if (ccb == NULL) {
26572545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_scsi_reply_handler: req %p:%u with no ccb\n",
26582545bca0SMatthew Dillon 		    req, req->serno);
26592545bca0SMatthew Dillon 		return (TRUE);
26602545bca0SMatthew Dillon 	}
26612545bca0SMatthew Dillon 
26622545bca0SMatthew Dillon 	mpt_req_untimeout(req, mpt_timeout, ccb);
26632545bca0SMatthew Dillon 	ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
26642545bca0SMatthew Dillon 
26652545bca0SMatthew Dillon 	if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
26662545bca0SMatthew Dillon 		bus_dmasync_op_t op;
26672545bca0SMatthew Dillon 
26682545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
26692545bca0SMatthew Dillon 			op = BUS_DMASYNC_POSTREAD;
26702545bca0SMatthew Dillon 		else
26712545bca0SMatthew Dillon 			op = BUS_DMASYNC_POSTWRITE;
26722545bca0SMatthew Dillon 		bus_dmamap_sync(mpt->buffer_dmat, req->dmap, op);
26732545bca0SMatthew Dillon 		bus_dmamap_unload(mpt->buffer_dmat, req->dmap);
26742545bca0SMatthew Dillon 	}
26752545bca0SMatthew Dillon 
26762545bca0SMatthew Dillon 	if (reply_frame == NULL) {
26772545bca0SMatthew Dillon 		/*
26782545bca0SMatthew Dillon 		 * Context only reply, completion without error status.
26792545bca0SMatthew Dillon 		 */
26802545bca0SMatthew Dillon 		ccb->csio.resid = 0;
26812545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
26822545bca0SMatthew Dillon 		ccb->csio.scsi_status = SCSI_STATUS_OK;
26832545bca0SMatthew Dillon 	} else {
26842545bca0SMatthew Dillon 		mpt_scsi_reply_frame_handler(mpt, req, reply_frame);
26852545bca0SMatthew Dillon 	}
26862545bca0SMatthew Dillon 
26872545bca0SMatthew Dillon 	if (mpt->outofbeer) {
26882545bca0SMatthew Dillon 		ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
26892545bca0SMatthew Dillon 		mpt->outofbeer = 0;
26902545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
26912545bca0SMatthew Dillon 	}
26922545bca0SMatthew Dillon 	if (scsi_req->CDB[0] == INQUIRY && (scsi_req->CDB[1] & SI_EVPD) == 0) {
26932545bca0SMatthew Dillon 		struct scsi_inquiry_data *iq =
26942545bca0SMatthew Dillon 		    (struct scsi_inquiry_data *)ccb->csio.data_ptr;
26952545bca0SMatthew Dillon 		if (scsi_req->Function ==
26962545bca0SMatthew Dillon 		    MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) {
26972545bca0SMatthew Dillon 			/*
26982545bca0SMatthew Dillon 			 * Fake out the device type so that only the
26992545bca0SMatthew Dillon 			 * pass-thru device will attach.
27002545bca0SMatthew Dillon 			 */
27012545bca0SMatthew Dillon 			iq->device &= ~0x1F;
27022545bca0SMatthew Dillon 			iq->device |= T_NODEVICE;
27032545bca0SMatthew Dillon 		}
27042545bca0SMatthew Dillon 	}
27052545bca0SMatthew Dillon 	if (mpt->verbose == MPT_PRT_DEBUG) {
27062545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_scsi_reply_handler: %p:%u complete\n",
27072545bca0SMatthew Dillon 		    req, req->serno);
27082545bca0SMatthew Dillon 	}
27094c42baf4SSascha Wildner 	KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
27102545bca0SMatthew Dillon 	xpt_done(ccb);
27112545bca0SMatthew Dillon 	if ((req->state & REQ_STATE_TIMEDOUT) == 0) {
27122545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
27132545bca0SMatthew Dillon 	} else {
27142545bca0SMatthew Dillon 		mpt_prt(mpt, "completing timedout/aborted req %p:%u\n",
27152545bca0SMatthew Dillon 		    req, req->serno);
27162545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
27172545bca0SMatthew Dillon 	}
27182545bca0SMatthew Dillon 	KASSERT((req->state & REQ_STATE_NEED_WAKEUP) == 0,
27192545bca0SMatthew Dillon 	    ("CCB req needed wakeup"));
27202545bca0SMatthew Dillon #ifdef	INVARIANTS
27212545bca0SMatthew Dillon 	mpt_req_not_spcl(mpt, req, "mpt_scsi_reply_handler", __LINE__);
27222545bca0SMatthew Dillon #endif
27232545bca0SMatthew Dillon 	mpt_free_request(mpt, req);
27242545bca0SMatthew Dillon 	return (TRUE);
27252545bca0SMatthew Dillon }
27262545bca0SMatthew Dillon 
27272545bca0SMatthew Dillon static int
mpt_scsi_tmf_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)27282545bca0SMatthew Dillon mpt_scsi_tmf_reply_handler(struct mpt_softc *mpt, request_t *req,
27292545bca0SMatthew Dillon     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
27302545bca0SMatthew Dillon {
27312545bca0SMatthew Dillon 	MSG_SCSI_TASK_MGMT_REPLY *tmf_reply;
27322545bca0SMatthew Dillon 
27332545bca0SMatthew Dillon 	KASSERT(req == mpt->tmf_req, ("TMF Reply not using mpt->tmf_req"));
27342545bca0SMatthew Dillon #ifdef	INVARIANTS
27352545bca0SMatthew Dillon 	mpt_req_not_spcl(mpt, req, "mpt_scsi_tmf_reply_handler", __LINE__);
27362545bca0SMatthew Dillon #endif
27372545bca0SMatthew Dillon 	tmf_reply = (MSG_SCSI_TASK_MGMT_REPLY *)reply_frame;
27382545bca0SMatthew Dillon 	/* Record IOC Status and Response Code of TMF for any waiters. */
27392545bca0SMatthew Dillon 	req->IOCStatus = le16toh(tmf_reply->IOCStatus);
27402545bca0SMatthew Dillon 	req->ResponseCode = tmf_reply->ResponseCode;
27412545bca0SMatthew Dillon 
27422545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG, "TMF complete: req %p:%u status 0x%x\n",
27432545bca0SMatthew Dillon 	    req, req->serno, le16toh(tmf_reply->IOCStatus));
27442545bca0SMatthew Dillon 	TAILQ_REMOVE(&mpt->request_pending_list, req, links);
27452545bca0SMatthew Dillon 	if ((req->state & REQ_STATE_NEED_WAKEUP) != 0) {
27462545bca0SMatthew Dillon 		req->state |= REQ_STATE_DONE;
27472545bca0SMatthew Dillon 		wakeup(req);
27482545bca0SMatthew Dillon 	} else {
27492545bca0SMatthew Dillon 		mpt->tmf_req->state = REQ_STATE_FREE;
27502545bca0SMatthew Dillon 	}
27512545bca0SMatthew Dillon 	return (TRUE);
27522545bca0SMatthew Dillon }
27532545bca0SMatthew Dillon 
27542545bca0SMatthew Dillon /*
27552545bca0SMatthew Dillon  * XXX: Move to definitions file
27562545bca0SMatthew Dillon  */
27572545bca0SMatthew Dillon #define	ELS	0x22
27582545bca0SMatthew Dillon #define	FC4LS	0x32
27592545bca0SMatthew Dillon #define	ABTS	0x81
27602545bca0SMatthew Dillon #define	BA_ACC	0x84
27612545bca0SMatthew Dillon 
27622545bca0SMatthew Dillon #define	LS_RJT	0x01
27632545bca0SMatthew Dillon #define	LS_ACC	0x02
27642545bca0SMatthew Dillon #define	PLOGI	0x03
27652545bca0SMatthew Dillon #define	LOGO	0x05
27662545bca0SMatthew Dillon #define SRR	0x14
27672545bca0SMatthew Dillon #define PRLI	0x20
27682545bca0SMatthew Dillon #define PRLO	0x21
27692545bca0SMatthew Dillon #define ADISC	0x52
27702545bca0SMatthew Dillon #define RSCN	0x61
27712545bca0SMatthew Dillon 
27722545bca0SMatthew Dillon static void
mpt_fc_els_send_response(struct mpt_softc * mpt,request_t * req,PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp,U8 length)27732545bca0SMatthew Dillon mpt_fc_els_send_response(struct mpt_softc *mpt, request_t *req,
27742545bca0SMatthew Dillon     PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp, U8 length)
27752545bca0SMatthew Dillon {
27762545bca0SMatthew Dillon 	uint32_t fl;
27772545bca0SMatthew Dillon 	MSG_LINK_SERVICE_RSP_REQUEST tmp;
27782545bca0SMatthew Dillon 	PTR_MSG_LINK_SERVICE_RSP_REQUEST rsp;
27792545bca0SMatthew Dillon 
27802545bca0SMatthew Dillon 	/*
27812545bca0SMatthew Dillon 	 * We are going to reuse the ELS request to send this response back.
27822545bca0SMatthew Dillon 	 */
27832545bca0SMatthew Dillon 	rsp = &tmp;
27842545bca0SMatthew Dillon 	memset(rsp, 0, sizeof(*rsp));
27852545bca0SMatthew Dillon 
27862545bca0SMatthew Dillon #ifdef	USE_IMMEDIATE_LINK_DATA
27872545bca0SMatthew Dillon 	/*
27882545bca0SMatthew Dillon 	 * Apparently the IMMEDIATE stuff doesn't seem to work.
27892545bca0SMatthew Dillon 	 */
27902545bca0SMatthew Dillon 	rsp->RspFlags = LINK_SERVICE_RSP_FLAGS_IMMEDIATE;
27912545bca0SMatthew Dillon #endif
27922545bca0SMatthew Dillon 	rsp->RspLength = length;
27932545bca0SMatthew Dillon 	rsp->Function = MPI_FUNCTION_FC_LINK_SRVC_RSP;
27942545bca0SMatthew Dillon 	rsp->MsgContext = htole32(req->index | fc_els_handler_id);
27952545bca0SMatthew Dillon 
27962545bca0SMatthew Dillon 	/*
27972545bca0SMatthew Dillon 	 * Copy over information from the original reply frame to
27982545bca0SMatthew Dillon 	 * it's correct place in the response.
27992545bca0SMatthew Dillon 	 */
28002545bca0SMatthew Dillon 	memcpy((U8 *)rsp + 0x0c, (U8 *)rp + 0x1c, 24);
28012545bca0SMatthew Dillon 
28022545bca0SMatthew Dillon 	/*
28032545bca0SMatthew Dillon 	 * And now copy back the temporary area to the original frame.
28042545bca0SMatthew Dillon 	 */
28052545bca0SMatthew Dillon 	memcpy(req->req_vbuf, rsp, sizeof (MSG_LINK_SERVICE_RSP_REQUEST));
28062545bca0SMatthew Dillon 	rsp = req->req_vbuf;
28072545bca0SMatthew Dillon 
28082545bca0SMatthew Dillon #ifdef	USE_IMMEDIATE_LINK_DATA
28092545bca0SMatthew Dillon 	memcpy((U8 *)&rsp->SGL, &((U8 *)req->req_vbuf)[MPT_RQSL(mpt)], length);
28102545bca0SMatthew Dillon #else
28112545bca0SMatthew Dillon {
28122545bca0SMatthew Dillon 	PTR_SGE_SIMPLE32 se = (PTR_SGE_SIMPLE32) &rsp->SGL;
28132545bca0SMatthew Dillon 	bus_addr_t paddr = req->req_pbuf;
28142545bca0SMatthew Dillon 	paddr += MPT_RQSL(mpt);
28152545bca0SMatthew Dillon 
28162545bca0SMatthew Dillon 	fl =
28172545bca0SMatthew Dillon 		MPI_SGE_FLAGS_HOST_TO_IOC	|
28182545bca0SMatthew Dillon 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
28192545bca0SMatthew Dillon 		MPI_SGE_FLAGS_LAST_ELEMENT	|
28202545bca0SMatthew Dillon 		MPI_SGE_FLAGS_END_OF_LIST	|
28212545bca0SMatthew Dillon 		MPI_SGE_FLAGS_END_OF_BUFFER;
28222545bca0SMatthew Dillon 	fl <<= MPI_SGE_FLAGS_SHIFT;
28232545bca0SMatthew Dillon 	fl |= (length);
28242545bca0SMatthew Dillon 	se->FlagsLength = htole32(fl);
28252545bca0SMatthew Dillon 	se->Address = htole32((uint32_t) paddr);
28262545bca0SMatthew Dillon }
28272545bca0SMatthew Dillon #endif
28282545bca0SMatthew Dillon 
28292545bca0SMatthew Dillon 	/*
28302545bca0SMatthew Dillon 	 * Send it on...
28312545bca0SMatthew Dillon 	 */
28322545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
28332545bca0SMatthew Dillon }
28342545bca0SMatthew Dillon 
28352545bca0SMatthew Dillon static int
mpt_fc_els_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)28362545bca0SMatthew Dillon mpt_fc_els_reply_handler(struct mpt_softc *mpt, request_t *req,
28372545bca0SMatthew Dillon     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
28382545bca0SMatthew Dillon {
28392545bca0SMatthew Dillon 	PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY rp =
28402545bca0SMatthew Dillon 	    (PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY) reply_frame;
28412545bca0SMatthew Dillon 	U8 rctl;
28422545bca0SMatthew Dillon 	U8 type;
28432545bca0SMatthew Dillon 	U8 cmd;
28442545bca0SMatthew Dillon 	U16 status = le16toh(reply_frame->IOCStatus);
28452545bca0SMatthew Dillon 	U32 *elsbuf;
28462545bca0SMatthew Dillon 	int ioindex;
28472545bca0SMatthew Dillon 	int do_refresh = TRUE;
28482545bca0SMatthew Dillon 
28492545bca0SMatthew Dillon #ifdef	INVARIANTS
28502545bca0SMatthew Dillon 	KASSERT(mpt_req_on_free_list(mpt, req) == 0,
28512545bca0SMatthew Dillon 	    ("fc_els_reply_handler: req %p:%u for function %x on freelist!",
28522545bca0SMatthew Dillon 	    req, req->serno, rp->Function));
28532545bca0SMatthew Dillon 	if (rp->Function != MPI_FUNCTION_FC_PRIMITIVE_SEND) {
28542545bca0SMatthew Dillon 		mpt_req_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
28552545bca0SMatthew Dillon 	} else {
28562545bca0SMatthew Dillon 		mpt_req_not_spcl(mpt, req, "fc_els_reply_handler", __LINE__);
28572545bca0SMatthew Dillon 	}
28582545bca0SMatthew Dillon #endif
28592545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG,
28602545bca0SMatthew Dillon 	    "FC_ELS Complete: req %p:%u, reply %p function %x\n",
28612545bca0SMatthew Dillon 	    req, req->serno, reply_frame, reply_frame->Function);
28622545bca0SMatthew Dillon 
28632545bca0SMatthew Dillon 	if  (status != MPI_IOCSTATUS_SUCCESS) {
28642545bca0SMatthew Dillon 		mpt_prt(mpt, "ELS REPLY STATUS 0x%x for Function %x\n",
28652545bca0SMatthew Dillon 		    status, reply_frame->Function);
28662545bca0SMatthew Dillon 		if (status == MPI_IOCSTATUS_INVALID_STATE) {
28672545bca0SMatthew Dillon 			/*
28682545bca0SMatthew Dillon 			 * XXX: to get around shutdown issue
28692545bca0SMatthew Dillon 			 */
28702545bca0SMatthew Dillon 			mpt->disabled = 1;
28712545bca0SMatthew Dillon 			return (TRUE);
28722545bca0SMatthew Dillon 		}
28732545bca0SMatthew Dillon 		return (TRUE);
28742545bca0SMatthew Dillon 	}
28752545bca0SMatthew Dillon 
28762545bca0SMatthew Dillon 	/*
28772545bca0SMatthew Dillon 	 * If the function of a link service response, we recycle the
28782545bca0SMatthew Dillon 	 * response to be a refresh for a new link service request.
28792545bca0SMatthew Dillon 	 *
28802545bca0SMatthew Dillon 	 * The request pointer is bogus in this case and we have to fetch
28812545bca0SMatthew Dillon 	 * it based upon the TransactionContext.
28822545bca0SMatthew Dillon 	 */
28832545bca0SMatthew Dillon 	if (rp->Function == MPI_FUNCTION_FC_LINK_SRVC_RSP) {
28842545bca0SMatthew Dillon 		/* Freddie Uncle Charlie Katie */
28852545bca0SMatthew Dillon 		/* We don't get the IOINDEX as part of the Link Svc Rsp */
28862545bca0SMatthew Dillon 		for (ioindex = 0; ioindex < mpt->els_cmds_allocated; ioindex++)
28872545bca0SMatthew Dillon 			if (mpt->els_cmd_ptrs[ioindex] == req) {
28882545bca0SMatthew Dillon 				break;
28892545bca0SMatthew Dillon 			}
28902545bca0SMatthew Dillon 
28912545bca0SMatthew Dillon 		KASSERT(ioindex < mpt->els_cmds_allocated,
28922545bca0SMatthew Dillon 		    ("can't find my mommie!"));
28932545bca0SMatthew Dillon 
28942545bca0SMatthew Dillon 		/* remove from active list as we're going to re-post it */
28952545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
28962545bca0SMatthew Dillon 		req->state &= ~REQ_STATE_QUEUED;
28972545bca0SMatthew Dillon 		req->state |= REQ_STATE_DONE;
28982545bca0SMatthew Dillon 		mpt_fc_post_els(mpt, req, ioindex);
28992545bca0SMatthew Dillon 		return (TRUE);
29002545bca0SMatthew Dillon 	}
29012545bca0SMatthew Dillon 
29022545bca0SMatthew Dillon 	if (rp->Function == MPI_FUNCTION_FC_PRIMITIVE_SEND) {
29032545bca0SMatthew Dillon 		/* remove from active list as we're done */
29042545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
29052545bca0SMatthew Dillon 		req->state &= ~REQ_STATE_QUEUED;
29062545bca0SMatthew Dillon 		req->state |= REQ_STATE_DONE;
29072545bca0SMatthew Dillon 		if (req->state & REQ_STATE_TIMEDOUT) {
29082545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG,
29092545bca0SMatthew Dillon 			    "Sync Primitive Send Completed After Timeout\n");
29102545bca0SMatthew Dillon 			mpt_free_request(mpt, req);
29112545bca0SMatthew Dillon 		} else if ((req->state & REQ_STATE_NEED_WAKEUP) == 0) {
29122545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG,
29132545bca0SMatthew Dillon 			    "Async Primitive Send Complete\n");
29142545bca0SMatthew Dillon 			mpt_free_request(mpt, req);
29152545bca0SMatthew Dillon 		} else {
29162545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG,
29172545bca0SMatthew Dillon 			    "Sync Primitive Send Complete- Waking Waiter\n");
29182545bca0SMatthew Dillon 			wakeup(req);
29192545bca0SMatthew Dillon 		}
29202545bca0SMatthew Dillon 		return (TRUE);
29212545bca0SMatthew Dillon 	}
29222545bca0SMatthew Dillon 
29232545bca0SMatthew Dillon 	if (rp->Function != MPI_FUNCTION_FC_LINK_SRVC_BUF_POST) {
29242545bca0SMatthew Dillon 		mpt_prt(mpt, "unexpected ELS_REPLY: Function 0x%x Flags %x "
29252545bca0SMatthew Dillon 		    "Length %d Message Flags %x\n", rp->Function, rp->Flags,
29262545bca0SMatthew Dillon 		    rp->MsgLength, rp->MsgFlags);
29272545bca0SMatthew Dillon 		return (TRUE);
29282545bca0SMatthew Dillon 	}
29292545bca0SMatthew Dillon 
29302545bca0SMatthew Dillon 	if (rp->MsgLength <= 5) {
29312545bca0SMatthew Dillon 		/*
29322545bca0SMatthew Dillon 		 * This is just a ack of an original ELS buffer post
29332545bca0SMatthew Dillon 		 */
29342545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_DEBUG,
29352545bca0SMatthew Dillon 		    "RECV'd ACK of FC_ELS buf post %p:%u\n", req, req->serno);
29362545bca0SMatthew Dillon 		return (TRUE);
29372545bca0SMatthew Dillon 	}
29382545bca0SMatthew Dillon 
29392545bca0SMatthew Dillon 
29402545bca0SMatthew Dillon 	rctl = (le32toh(rp->Rctl_Did) & MPI_FC_RCTL_MASK) >> MPI_FC_RCTL_SHIFT;
29412545bca0SMatthew Dillon 	type = (le32toh(rp->Type_Fctl) & MPI_FC_TYPE_MASK) >> MPI_FC_TYPE_SHIFT;
29422545bca0SMatthew Dillon 
29432545bca0SMatthew Dillon 	elsbuf = &((U32 *)req->req_vbuf)[MPT_RQSL(mpt)/sizeof (U32)];
29442545bca0SMatthew Dillon 	cmd = be32toh(elsbuf[0]) >> 24;
29452545bca0SMatthew Dillon 
29462545bca0SMatthew Dillon 	if (rp->Flags & MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED) {
29472545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_ALWAYS, "ELS_REPLY: response unneeded\n");
29482545bca0SMatthew Dillon 		return (TRUE);
29492545bca0SMatthew Dillon 	}
29502545bca0SMatthew Dillon 
29512545bca0SMatthew Dillon 	ioindex = le32toh(rp->TransactionContext);
29522545bca0SMatthew Dillon 	req = mpt->els_cmd_ptrs[ioindex];
29532545bca0SMatthew Dillon 
29542545bca0SMatthew Dillon 	if (rctl == ELS && type == 1) {
29552545bca0SMatthew Dillon 		switch (cmd) {
29562545bca0SMatthew Dillon 		case PRLI:
29572545bca0SMatthew Dillon 			/*
29582545bca0SMatthew Dillon 			 * Send back a PRLI ACC
29592545bca0SMatthew Dillon 			 */
29602545bca0SMatthew Dillon 			mpt_prt(mpt, "PRLI from 0x%08x%08x\n",
29612545bca0SMatthew Dillon 			    le32toh(rp->Wwn.PortNameHigh),
29622545bca0SMatthew Dillon 			    le32toh(rp->Wwn.PortNameLow));
29632545bca0SMatthew Dillon 			elsbuf[0] = htobe32(0x02100014);
29642545bca0SMatthew Dillon 			elsbuf[1] |= htobe32(0x00000100);
29652545bca0SMatthew Dillon 			elsbuf[4] = htobe32(0x00000002);
29662545bca0SMatthew Dillon 			if (mpt->role & MPT_ROLE_TARGET)
29672545bca0SMatthew Dillon 				elsbuf[4] |= htobe32(0x00000010);
29682545bca0SMatthew Dillon 			if (mpt->role & MPT_ROLE_INITIATOR)
29692545bca0SMatthew Dillon 				elsbuf[4] |= htobe32(0x00000020);
29702545bca0SMatthew Dillon 			/* remove from active list as we're done */
29712545bca0SMatthew Dillon 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
29722545bca0SMatthew Dillon 			req->state &= ~REQ_STATE_QUEUED;
29732545bca0SMatthew Dillon 			req->state |= REQ_STATE_DONE;
29742545bca0SMatthew Dillon 			mpt_fc_els_send_response(mpt, req, rp, 20);
29752545bca0SMatthew Dillon 			do_refresh = FALSE;
29762545bca0SMatthew Dillon 			break;
29772545bca0SMatthew Dillon 		case PRLO:
29782545bca0SMatthew Dillon 			memset(elsbuf, 0, 5 * (sizeof (U32)));
29792545bca0SMatthew Dillon 			elsbuf[0] = htobe32(0x02100014);
29802545bca0SMatthew Dillon 			elsbuf[1] = htobe32(0x08000100);
29812545bca0SMatthew Dillon 			mpt_prt(mpt, "PRLO from 0x%08x%08x\n",
29822545bca0SMatthew Dillon 			    le32toh(rp->Wwn.PortNameHigh),
29832545bca0SMatthew Dillon 			    le32toh(rp->Wwn.PortNameLow));
29842545bca0SMatthew Dillon 			/* remove from active list as we're done */
29852545bca0SMatthew Dillon 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
29862545bca0SMatthew Dillon 			req->state &= ~REQ_STATE_QUEUED;
29872545bca0SMatthew Dillon 			req->state |= REQ_STATE_DONE;
29882545bca0SMatthew Dillon 			mpt_fc_els_send_response(mpt, req, rp, 20);
29892545bca0SMatthew Dillon 			do_refresh = FALSE;
29902545bca0SMatthew Dillon 			break;
29912545bca0SMatthew Dillon 		default:
29922545bca0SMatthew Dillon 			mpt_prt(mpt, "ELS TYPE 1 COMMAND: %x\n", cmd);
29932545bca0SMatthew Dillon 			break;
29942545bca0SMatthew Dillon 		}
29952545bca0SMatthew Dillon 	} else if (rctl == ABTS && type == 0) {
29962545bca0SMatthew Dillon 		uint16_t rx_id = le16toh(rp->Rxid);
29972545bca0SMatthew Dillon 		uint16_t ox_id = le16toh(rp->Oxid);
29982545bca0SMatthew Dillon 		request_t *tgt_req = NULL;
29992545bca0SMatthew Dillon 
30002545bca0SMatthew Dillon 		mpt_prt(mpt,
30012545bca0SMatthew Dillon 		    "ELS: ABTS OX_ID 0x%x RX_ID 0x%x from 0x%08x%08x\n",
30022545bca0SMatthew Dillon 		    ox_id, rx_id, le32toh(rp->Wwn.PortNameHigh),
30032545bca0SMatthew Dillon 		    le32toh(rp->Wwn.PortNameLow));
30042545bca0SMatthew Dillon 		if (rx_id >= mpt->mpt_max_tgtcmds) {
30052545bca0SMatthew Dillon 			mpt_prt(mpt, "Bad RX_ID 0x%x\n", rx_id);
30062545bca0SMatthew Dillon 		} else if (mpt->tgt_cmd_ptrs == NULL) {
30072545bca0SMatthew Dillon 			mpt_prt(mpt, "No TGT CMD PTRS\n");
30082545bca0SMatthew Dillon 		} else {
30092545bca0SMatthew Dillon 			tgt_req = mpt->tgt_cmd_ptrs[rx_id];
30102545bca0SMatthew Dillon 		}
30112545bca0SMatthew Dillon 		if (tgt_req) {
30122545bca0SMatthew Dillon 			mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, tgt_req);
30136d259fc1SSascha Wildner 			union ccb *ccb;
30142545bca0SMatthew Dillon 			uint32_t ct_id;
30152545bca0SMatthew Dillon 
30162545bca0SMatthew Dillon 			/*
30172545bca0SMatthew Dillon 			 * Check to make sure we have the correct command
30182545bca0SMatthew Dillon 			 * The reply descriptor in the target state should
30192545bca0SMatthew Dillon 			 * should contain an IoIndex that should match the
30202545bca0SMatthew Dillon 			 * RX_ID.
30212545bca0SMatthew Dillon 			 *
30222545bca0SMatthew Dillon 			 * It'd be nice to have OX_ID to crosscheck with
30232545bca0SMatthew Dillon 			 * as well.
30242545bca0SMatthew Dillon 			 */
30252545bca0SMatthew Dillon 			ct_id = GET_IO_INDEX(tgt->reply_desc);
30262545bca0SMatthew Dillon 
30272545bca0SMatthew Dillon 			if (ct_id != rx_id) {
30282545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_ERROR, "ABORT Mismatch: "
30292545bca0SMatthew Dillon 				    "RX_ID received=0x%x; RX_ID in cmd=0x%x\n",
30302545bca0SMatthew Dillon 				    rx_id, ct_id);
30312545bca0SMatthew Dillon 				goto skip;
30322545bca0SMatthew Dillon 			}
30332545bca0SMatthew Dillon 
30342545bca0SMatthew Dillon 			ccb = tgt->ccb;
30352545bca0SMatthew Dillon 			if (ccb) {
30362545bca0SMatthew Dillon 				mpt_prt(mpt,
30372545bca0SMatthew Dillon 				    "CCB (%p): lun %u flags %x status %x\n",
30382545bca0SMatthew Dillon 				    ccb, ccb->ccb_h.target_lun,
30392545bca0SMatthew Dillon 				    ccb->ccb_h.flags, ccb->ccb_h.status);
30402545bca0SMatthew Dillon 			}
30412545bca0SMatthew Dillon 			mpt_prt(mpt, "target state 0x%x resid %u xfrd %u rpwrd "
30422545bca0SMatthew Dillon 			    "%x nxfers %x\n", tgt->state,
30432545bca0SMatthew Dillon 			    tgt->resid, tgt->bytes_xfered, tgt->reply_desc,
30442545bca0SMatthew Dillon 			    tgt->nxfers);
30452545bca0SMatthew Dillon   skip:
30462545bca0SMatthew Dillon 			if (mpt_abort_target_cmd(mpt, tgt_req)) {
30472545bca0SMatthew Dillon 				mpt_prt(mpt, "unable to start TargetAbort\n");
30482545bca0SMatthew Dillon 			}
30492545bca0SMatthew Dillon 		} else {
30502545bca0SMatthew Dillon 			mpt_prt(mpt, "no back pointer for RX_ID 0x%x\n", rx_id);
30512545bca0SMatthew Dillon 		}
30522545bca0SMatthew Dillon 		memset(elsbuf, 0, 5 * (sizeof (U32)));
30532545bca0SMatthew Dillon 		elsbuf[0] = htobe32(0);
30542545bca0SMatthew Dillon 		elsbuf[1] = htobe32((ox_id << 16) | rx_id);
30552545bca0SMatthew Dillon 		elsbuf[2] = htobe32(0x000ffff);
30562545bca0SMatthew Dillon 		/*
30576d259fc1SSascha Wildner 		 * Dork with the reply frame so that the response to it
30582545bca0SMatthew Dillon 		 * will be correct.
30592545bca0SMatthew Dillon 		 */
30602545bca0SMatthew Dillon 		rp->Rctl_Did += ((BA_ACC - ABTS) << MPI_FC_RCTL_SHIFT);
30612545bca0SMatthew Dillon 		/* remove from active list as we're done */
30622545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
30632545bca0SMatthew Dillon 		req->state &= ~REQ_STATE_QUEUED;
30642545bca0SMatthew Dillon 		req->state |= REQ_STATE_DONE;
30652545bca0SMatthew Dillon 		mpt_fc_els_send_response(mpt, req, rp, 12);
30662545bca0SMatthew Dillon 		do_refresh = FALSE;
30672545bca0SMatthew Dillon 	} else {
30682545bca0SMatthew Dillon 		mpt_prt(mpt, "ELS: RCTL %x TYPE %x CMD %x\n", rctl, type, cmd);
30692545bca0SMatthew Dillon 	}
30702545bca0SMatthew Dillon 	if (do_refresh == TRUE) {
30712545bca0SMatthew Dillon 		/* remove from active list as we're done */
30722545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
30732545bca0SMatthew Dillon 		req->state &= ~REQ_STATE_QUEUED;
30742545bca0SMatthew Dillon 		req->state |= REQ_STATE_DONE;
30752545bca0SMatthew Dillon 		mpt_fc_post_els(mpt, req, ioindex);
30762545bca0SMatthew Dillon 	}
30772545bca0SMatthew Dillon 	return (TRUE);
30782545bca0SMatthew Dillon }
30792545bca0SMatthew Dillon 
30802545bca0SMatthew Dillon /*
30812545bca0SMatthew Dillon  * Clean up all SCSI Initiator personality state in response
30822545bca0SMatthew Dillon  * to a controller reset.
30832545bca0SMatthew Dillon  */
30842545bca0SMatthew Dillon static void
mpt_cam_ioc_reset(struct mpt_softc * mpt,int type)30852545bca0SMatthew Dillon mpt_cam_ioc_reset(struct mpt_softc *mpt, int type)
30862545bca0SMatthew Dillon {
30874c42baf4SSascha Wildner 
30882545bca0SMatthew Dillon 	/*
30892545bca0SMatthew Dillon 	 * The pending list is already run down by
30902545bca0SMatthew Dillon 	 * the generic handler.  Perform the same
30912545bca0SMatthew Dillon 	 * operation on the timed out request list.
30922545bca0SMatthew Dillon 	 */
30932545bca0SMatthew Dillon 	mpt_complete_request_chain(mpt, &mpt->request_timeout_list,
30942545bca0SMatthew Dillon 				   MPI_IOCSTATUS_INVALID_STATE);
30952545bca0SMatthew Dillon 
30962545bca0SMatthew Dillon 	/*
30972545bca0SMatthew Dillon 	 * XXX: We need to repost ELS and Target Command Buffers?
30982545bca0SMatthew Dillon 	 */
30992545bca0SMatthew Dillon 
31002545bca0SMatthew Dillon 	/*
31012545bca0SMatthew Dillon 	 * Inform the XPT that a bus reset has occurred.
31022545bca0SMatthew Dillon 	 */
31032545bca0SMatthew Dillon 	xpt_async(AC_BUS_RESET, mpt->path, NULL);
31042545bca0SMatthew Dillon }
31052545bca0SMatthew Dillon 
31062545bca0SMatthew Dillon /*
31072545bca0SMatthew Dillon  * Parse additional completion information in the reply
31082545bca0SMatthew Dillon  * frame for SCSI I/O requests.
31092545bca0SMatthew Dillon  */
31102545bca0SMatthew Dillon static int
mpt_scsi_reply_frame_handler(struct mpt_softc * mpt,request_t * req,MSG_DEFAULT_REPLY * reply_frame)31112545bca0SMatthew Dillon mpt_scsi_reply_frame_handler(struct mpt_softc *mpt, request_t *req,
31122545bca0SMatthew Dillon 			     MSG_DEFAULT_REPLY *reply_frame)
31132545bca0SMatthew Dillon {
31142545bca0SMatthew Dillon 	union ccb *ccb;
31152545bca0SMatthew Dillon 	MSG_SCSI_IO_REPLY *scsi_io_reply;
31162545bca0SMatthew Dillon 	u_int ioc_status;
31172545bca0SMatthew Dillon 	u_int sstate;
31182545bca0SMatthew Dillon 
31192545bca0SMatthew Dillon 	MPT_DUMP_REPLY_FRAME(mpt, reply_frame);
31202545bca0SMatthew Dillon 	KASSERT(reply_frame->Function == MPI_FUNCTION_SCSI_IO_REQUEST
31212545bca0SMatthew Dillon 	     || reply_frame->Function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH,
31222545bca0SMatthew Dillon 		("MPT SCSI I/O Handler called with incorrect reply type"));
31232545bca0SMatthew Dillon 	KASSERT((reply_frame->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) == 0,
31242545bca0SMatthew Dillon 		("MPT SCSI I/O Handler called with continuation reply"));
31252545bca0SMatthew Dillon 
31262545bca0SMatthew Dillon 	scsi_io_reply = (MSG_SCSI_IO_REPLY *)reply_frame;
31272545bca0SMatthew Dillon 	ioc_status = le16toh(scsi_io_reply->IOCStatus);
31282545bca0SMatthew Dillon 	ioc_status &= MPI_IOCSTATUS_MASK;
31292545bca0SMatthew Dillon 	sstate = scsi_io_reply->SCSIState;
31302545bca0SMatthew Dillon 
31312545bca0SMatthew Dillon 	ccb = req->ccb;
31322545bca0SMatthew Dillon 	ccb->csio.resid =
31332545bca0SMatthew Dillon 	    ccb->csio.dxfer_len - le32toh(scsi_io_reply->TransferCount);
31342545bca0SMatthew Dillon 
31352545bca0SMatthew Dillon 	if ((sstate & MPI_SCSI_STATE_AUTOSENSE_VALID) != 0
31362545bca0SMatthew Dillon 	 && (ccb->ccb_h.flags & (CAM_SENSE_PHYS | CAM_SENSE_PTR)) == 0) {
31374c42baf4SSascha Wildner 		uint32_t sense_returned;
31384c42baf4SSascha Wildner 
31392545bca0SMatthew Dillon 		ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
31404c42baf4SSascha Wildner 
31414c42baf4SSascha Wildner 		sense_returned = le32toh(scsi_io_reply->SenseCount);
31424c42baf4SSascha Wildner 		if (sense_returned < ccb->csio.sense_len)
31434c42baf4SSascha Wildner 			ccb->csio.sense_resid = ccb->csio.sense_len -
31444c42baf4SSascha Wildner 						sense_returned;
31454c42baf4SSascha Wildner 		else
31464c42baf4SSascha Wildner 			ccb->csio.sense_resid = 0;
31474c42baf4SSascha Wildner 
3148bc14747bSSascha Wildner 		bzero(&ccb->csio.sense_data, sizeof(ccb->csio.sense_data));
31492545bca0SMatthew Dillon 		bcopy(req->sense_vbuf, &ccb->csio.sense_data,
31504c42baf4SSascha Wildner 		    min(ccb->csio.sense_len, sense_returned));
31512545bca0SMatthew Dillon 	}
31522545bca0SMatthew Dillon 
31532545bca0SMatthew Dillon 	if ((sstate & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) != 0) {
31542545bca0SMatthew Dillon 		/*
31552545bca0SMatthew Dillon 		 * Tag messages rejected, but non-tagged retry
31562545bca0SMatthew Dillon 		 * was successful.
31572545bca0SMatthew Dillon XXXX
31582545bca0SMatthew Dillon 		mpt_set_tags(mpt, devinfo, MPT_QUEUE_NONE);
31592545bca0SMatthew Dillon 		 */
31602545bca0SMatthew Dillon 	}
31612545bca0SMatthew Dillon 
31622545bca0SMatthew Dillon 	switch(ioc_status) {
31632545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
31642545bca0SMatthew Dillon 		/*
31652545bca0SMatthew Dillon 		 * XXX
31662545bca0SMatthew Dillon 		 * Linux driver indicates that a zero
31672545bca0SMatthew Dillon 		 * transfer length with this error code
31682545bca0SMatthew Dillon 		 * indicates a CRC error.
31692545bca0SMatthew Dillon 		 *
31702545bca0SMatthew Dillon 		 * No need to swap the bytes for checking
31712545bca0SMatthew Dillon 		 * against zero.
31722545bca0SMatthew Dillon 		 */
31732545bca0SMatthew Dillon 		if (scsi_io_reply->TransferCount == 0) {
31742545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
31752545bca0SMatthew Dillon 			break;
31762545bca0SMatthew Dillon 		}
31772545bca0SMatthew Dillon 		/* FALLTHROUGH */
31782545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:
31792545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SUCCESS:
31802545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:
31812545bca0SMatthew Dillon 		if ((sstate & MPI_SCSI_STATE_NO_SCSI_STATUS) != 0) {
31822545bca0SMatthew Dillon 			/*
31832545bca0SMatthew Dillon 			 * Status was never returned for this transaction.
31842545bca0SMatthew Dillon 			 */
31852545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_UNEXP_BUSFREE);
31862545bca0SMatthew Dillon 		} else if (scsi_io_reply->SCSIStatus != SCSI_STATUS_OK) {
31872545bca0SMatthew Dillon 			ccb->csio.scsi_status = scsi_io_reply->SCSIStatus;
31882545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_SCSI_STATUS_ERROR);
31892545bca0SMatthew Dillon 			if ((sstate & MPI_SCSI_STATE_AUTOSENSE_FAILED) != 0)
31902545bca0SMatthew Dillon 				mpt_set_ccb_status(ccb, CAM_AUTOSENSE_FAIL);
31912545bca0SMatthew Dillon 		} else if ((sstate & MPI_SCSI_STATE_RESPONSE_INFO_VALID) != 0) {
31922545bca0SMatthew Dillon 
31936d259fc1SSascha Wildner 			/* XXX Handle SPI-Packet and FCP-2 response info. */
31942545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
31952545bca0SMatthew Dillon 		} else
31962545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
31972545bca0SMatthew Dillon 		break;
31982545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:
31992545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_DATA_RUN_ERR);
32002545bca0SMatthew Dillon 		break;
32012545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:
32022545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_UNCOR_PARITY);
32032545bca0SMatthew Dillon 		break;
32042545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
32052545bca0SMatthew Dillon 		/*
32062545bca0SMatthew Dillon 		 * Since selection timeouts and "device really not
32072545bca0SMatthew Dillon 		 * there" are grouped into this error code, report
32082545bca0SMatthew Dillon 		 * selection timeout.  Selection timeouts are
32092545bca0SMatthew Dillon 		 * typically retried before giving up on the device
32102545bca0SMatthew Dillon 		 * whereas "device not there" errors are considered
32112545bca0SMatthew Dillon 		 * unretryable.
32122545bca0SMatthew Dillon 		 */
32132545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
32142545bca0SMatthew Dillon 		break;
32152545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:
32162545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_SEQUENCE_FAIL);
32172545bca0SMatthew Dillon 		break;
32182545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_INVALID_BUS:
32192545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_PATH_INVALID);
32202545bca0SMatthew Dillon 		break;
32212545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:
32222545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_TID_INVALID);
32232545bca0SMatthew Dillon 		break;
32242545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
32252545bca0SMatthew Dillon 		ccb->ccb_h.status = CAM_UA_TERMIO;
32262545bca0SMatthew Dillon 		break;
32272545bca0SMatthew Dillon 	case MPI_IOCSTATUS_INVALID_STATE:
32282545bca0SMatthew Dillon 		/*
32292545bca0SMatthew Dillon 		 * The IOC has been reset.  Emulate a bus reset.
32302545bca0SMatthew Dillon 		 */
32312545bca0SMatthew Dillon 		/* FALLTHROUGH */
32322545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:
32332545bca0SMatthew Dillon 		ccb->ccb_h.status = CAM_SCSI_BUS_RESET;
32342545bca0SMatthew Dillon 		break;
32352545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:
32362545bca0SMatthew Dillon 	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:
32372545bca0SMatthew Dillon 		/*
32382545bca0SMatthew Dillon 		 * Don't clobber any timeout status that has
32392545bca0SMatthew Dillon 		 * already been set for this transaction.  We
32402545bca0SMatthew Dillon 		 * want the SCSI layer to be able to differentiate
32412545bca0SMatthew Dillon 		 * between the command we aborted due to timeout
32422545bca0SMatthew Dillon 		 * and any innocent bystanders.
32432545bca0SMatthew Dillon 		 */
32442545bca0SMatthew Dillon 		if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG)
32452545bca0SMatthew Dillon 			break;
32462545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQ_TERMIO);
32472545bca0SMatthew Dillon 		break;
32482545bca0SMatthew Dillon 
32492545bca0SMatthew Dillon 	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
32502545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_RESRC_UNAVAIL);
32512545bca0SMatthew Dillon 		break;
32522545bca0SMatthew Dillon 	case MPI_IOCSTATUS_BUSY:
32532545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_BUSY);
32542545bca0SMatthew Dillon 		break;
32552545bca0SMatthew Dillon 	case MPI_IOCSTATUS_INVALID_FUNCTION:
32562545bca0SMatthew Dillon 	case MPI_IOCSTATUS_INVALID_SGL:
32572545bca0SMatthew Dillon 	case MPI_IOCSTATUS_INTERNAL_ERROR:
32582545bca0SMatthew Dillon 	case MPI_IOCSTATUS_INVALID_FIELD:
32592545bca0SMatthew Dillon 	default:
32602545bca0SMatthew Dillon 		/* XXX
32612545bca0SMatthew Dillon 		 * Some of the above may need to kick
32622545bca0SMatthew Dillon 		 * of a recovery action!!!!
32632545bca0SMatthew Dillon 		 */
32642545bca0SMatthew Dillon 		ccb->ccb_h.status = CAM_UNREC_HBA_ERROR;
32652545bca0SMatthew Dillon 		break;
32662545bca0SMatthew Dillon 	}
32672545bca0SMatthew Dillon 
32682545bca0SMatthew Dillon 	if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) {
32692545bca0SMatthew Dillon 		mpt_freeze_ccb(ccb);
32702545bca0SMatthew Dillon 	}
32712545bca0SMatthew Dillon 
32722545bca0SMatthew Dillon 	return (TRUE);
32732545bca0SMatthew Dillon }
32742545bca0SMatthew Dillon 
32752545bca0SMatthew Dillon static void
mpt_action(struct cam_sim * sim,union ccb * ccb)32762545bca0SMatthew Dillon mpt_action(struct cam_sim *sim, union ccb *ccb)
32772545bca0SMatthew Dillon {
32782545bca0SMatthew Dillon 	struct mpt_softc *mpt;
32792545bca0SMatthew Dillon 	struct ccb_trans_settings *cts;
32802545bca0SMatthew Dillon 	target_id_t tgt;
32812545bca0SMatthew Dillon 	lun_id_t lun;
32822545bca0SMatthew Dillon 	int raid_passthru;
32832545bca0SMatthew Dillon 
32842545bca0SMatthew Dillon 	CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mpt_action\n"));
32852545bca0SMatthew Dillon 
32862545bca0SMatthew Dillon 	mpt = (struct mpt_softc *)cam_sim_softc(sim);
32872545bca0SMatthew Dillon 	raid_passthru = (sim == mpt->phydisk_sim);
32882545bca0SMatthew Dillon 	MPT_LOCK_ASSERT(mpt);
32892545bca0SMatthew Dillon 
32902545bca0SMatthew Dillon 	tgt = ccb->ccb_h.target_id;
32912545bca0SMatthew Dillon 	lun = ccb->ccb_h.target_lun;
32922545bca0SMatthew Dillon 	if (raid_passthru &&
32932545bca0SMatthew Dillon 	    ccb->ccb_h.func_code != XPT_PATH_INQ &&
32942545bca0SMatthew Dillon 	    ccb->ccb_h.func_code != XPT_RESET_BUS &&
32952545bca0SMatthew Dillon 	    ccb->ccb_h.func_code != XPT_RESET_DEV) {
32962545bca0SMatthew Dillon 		if (mpt_map_physdisk(mpt, ccb, &tgt) != 0) {
32972545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
32982545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_DEV_NOT_THERE);
32992545bca0SMatthew Dillon 			xpt_done(ccb);
33002545bca0SMatthew Dillon 			return;
33012545bca0SMatthew Dillon 		}
33022545bca0SMatthew Dillon 	}
33032545bca0SMatthew Dillon 	ccb->ccb_h.ccb_mpt_ptr = mpt;
33042545bca0SMatthew Dillon 
33052545bca0SMatthew Dillon 	switch (ccb->ccb_h.func_code) {
33062545bca0SMatthew Dillon 	case XPT_SCSI_IO:	/* Execute the requested I/O operation */
33072545bca0SMatthew Dillon 		/*
33082545bca0SMatthew Dillon 		 * Do a couple of preliminary checks...
33092545bca0SMatthew Dillon 		 */
33102545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0) {
33112545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_CDB_PHYS) != 0) {
33122545bca0SMatthew Dillon 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
33132545bca0SMatthew Dillon 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
33142545bca0SMatthew Dillon 				break;
33152545bca0SMatthew Dillon 			}
33162545bca0SMatthew Dillon 		}
33172545bca0SMatthew Dillon 		/* Max supported CDB length is 16 bytes */
33182545bca0SMatthew Dillon 		/* XXX Unless we implement the new 32byte message type */
33192545bca0SMatthew Dillon 		if (ccb->csio.cdb_len >
33202545bca0SMatthew Dillon 		    sizeof (((PTR_MSG_SCSI_IO_REQUEST)0)->CDB)) {
33212545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
33222545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
33232545bca0SMatthew Dillon 			break;
33242545bca0SMatthew Dillon 		}
33252545bca0SMatthew Dillon #ifdef	MPT_TEST_MULTIPATH
33262545bca0SMatthew Dillon 		if (mpt->failure_id == ccb->ccb_h.target_id) {
33272545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
33282545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_SEL_TIMEOUT);
33292545bca0SMatthew Dillon 			break;
33302545bca0SMatthew Dillon 		}
33312545bca0SMatthew Dillon #endif
33322545bca0SMatthew Dillon 		ccb->csio.scsi_status = SCSI_STATUS_OK;
33332545bca0SMatthew Dillon 		mpt_start(sim, ccb);
33342545bca0SMatthew Dillon 		return;
33352545bca0SMatthew Dillon 
33362545bca0SMatthew Dillon 	case XPT_RESET_BUS:
33372545bca0SMatthew Dillon 		if (raid_passthru) {
33382545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
33392545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
33402545bca0SMatthew Dillon 			break;
33412545bca0SMatthew Dillon 		}
33422545bca0SMatthew Dillon 	case XPT_RESET_DEV:
33432545bca0SMatthew Dillon 		if (ccb->ccb_h.func_code == XPT_RESET_BUS) {
33442545bca0SMatthew Dillon 			if (bootverbose) {
33452545bca0SMatthew Dillon 				xpt_print(ccb->ccb_h.path, "reset bus\n");
33462545bca0SMatthew Dillon 			}
33472545bca0SMatthew Dillon 		} else {
33482545bca0SMatthew Dillon 			xpt_print(ccb->ccb_h.path, "reset device\n");
33492545bca0SMatthew Dillon 		}
33502545bca0SMatthew Dillon 		(void) mpt_bus_reset(mpt, tgt, lun, FALSE);
33512545bca0SMatthew Dillon 
33522545bca0SMatthew Dillon 		/*
33532545bca0SMatthew Dillon 		 * mpt_bus_reset is always successful in that it
33542545bca0SMatthew Dillon 		 * will fall back to a hard reset should a bus
33552545bca0SMatthew Dillon 		 * reset attempt fail.
33562545bca0SMatthew Dillon 		 */
33572545bca0SMatthew Dillon 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
33582545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
33592545bca0SMatthew Dillon 		break;
33602545bca0SMatthew Dillon 
33612545bca0SMatthew Dillon 	case XPT_ABORT:
33622545bca0SMatthew Dillon 	{
33632545bca0SMatthew Dillon 		union ccb *accb = ccb->cab.abort_ccb;
33642545bca0SMatthew Dillon 		switch (accb->ccb_h.func_code) {
33652545bca0SMatthew Dillon 		case XPT_ACCEPT_TARGET_IO:
33662545bca0SMatthew Dillon 		case XPT_IMMED_NOTIFY:
33672545bca0SMatthew Dillon 			ccb->ccb_h.status = mpt_abort_target_ccb(mpt, ccb);
33682545bca0SMatthew Dillon 			break;
33692545bca0SMatthew Dillon 		case XPT_CONT_TARGET_IO:
33702545bca0SMatthew Dillon 			mpt_prt(mpt, "cannot abort active CTIOs yet\n");
33712545bca0SMatthew Dillon 			ccb->ccb_h.status = CAM_UA_ABORT;
33722545bca0SMatthew Dillon 			break;
33732545bca0SMatthew Dillon 		case XPT_SCSI_IO:
33742545bca0SMatthew Dillon 			ccb->ccb_h.status = CAM_UA_ABORT;
33752545bca0SMatthew Dillon 			break;
33762545bca0SMatthew Dillon 		default:
33772545bca0SMatthew Dillon 			ccb->ccb_h.status = CAM_REQ_INVALID;
33782545bca0SMatthew Dillon 			break;
33792545bca0SMatthew Dillon 		}
33802545bca0SMatthew Dillon 		break;
33812545bca0SMatthew Dillon 	}
33822545bca0SMatthew Dillon 
33832545bca0SMatthew Dillon #define	IS_CURRENT_SETTINGS(c)	((c)->type == CTS_TYPE_CURRENT_SETTINGS)
33842545bca0SMatthew Dillon #define	DP_DISC_ENABLE	0x1
33852545bca0SMatthew Dillon #define	DP_DISC_DISABL	0x2
33862545bca0SMatthew Dillon #define	DP_DISC		(DP_DISC_ENABLE|DP_DISC_DISABL)
33872545bca0SMatthew Dillon 
33882545bca0SMatthew Dillon #define	DP_TQING_ENABLE	0x4
33892545bca0SMatthew Dillon #define	DP_TQING_DISABL	0x8
33902545bca0SMatthew Dillon #define	DP_TQING	(DP_TQING_ENABLE|DP_TQING_DISABL)
33912545bca0SMatthew Dillon 
33922545bca0SMatthew Dillon #define	DP_WIDE		0x10
33932545bca0SMatthew Dillon #define	DP_NARROW	0x20
33942545bca0SMatthew Dillon #define	DP_WIDTH	(DP_WIDE|DP_NARROW)
33952545bca0SMatthew Dillon 
33962545bca0SMatthew Dillon #define	DP_SYNC		0x40
33972545bca0SMatthew Dillon 
33982545bca0SMatthew Dillon 	case XPT_SET_TRAN_SETTINGS:	/* Nexus Settings */
33992545bca0SMatthew Dillon 	{
34002545bca0SMatthew Dillon 		struct ccb_trans_settings_scsi *scsi;
34012545bca0SMatthew Dillon 		struct ccb_trans_settings_spi *spi;
34022545bca0SMatthew Dillon 		uint8_t dval;
34032545bca0SMatthew Dillon 		u_int period;
34042545bca0SMatthew Dillon 		u_int offset;
34052545bca0SMatthew Dillon 		int i, j;
34062545bca0SMatthew Dillon 
34072545bca0SMatthew Dillon 		cts = &ccb->cts;
34082545bca0SMatthew Dillon 
34092545bca0SMatthew Dillon 		if (mpt->is_fc || mpt->is_sas) {
34102545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
34112545bca0SMatthew Dillon 			break;
34122545bca0SMatthew Dillon 		}
34132545bca0SMatthew Dillon 
34142545bca0SMatthew Dillon 		scsi = &cts->proto_specific.scsi;
34152545bca0SMatthew Dillon 		spi = &cts->xport_specific.spi;
34162545bca0SMatthew Dillon 
34172545bca0SMatthew Dillon 		/*
34182545bca0SMatthew Dillon 		 * We can be called just to valid transport and proto versions
34192545bca0SMatthew Dillon 		 */
34202545bca0SMatthew Dillon 		if (scsi->valid == 0 && spi->valid == 0) {
34212545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
34222545bca0SMatthew Dillon 			break;
34232545bca0SMatthew Dillon 		}
34242545bca0SMatthew Dillon 
34252545bca0SMatthew Dillon 		/*
34262545bca0SMatthew Dillon 		 * Skip attempting settings on RAID volume disks.
34272545bca0SMatthew Dillon 		 * Other devices on the bus get the normal treatment.
34282545bca0SMatthew Dillon 		 */
34292545bca0SMatthew Dillon 		if (mpt->phydisk_sim && raid_passthru == 0 &&
34302545bca0SMatthew Dillon 		    mpt_is_raid_volume(mpt, tgt) != 0) {
34312545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
34322545bca0SMatthew Dillon 			    "no transfer settings for RAID vols\n");
34332545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
34342545bca0SMatthew Dillon 			break;
34352545bca0SMatthew Dillon 		}
34362545bca0SMatthew Dillon 
34372545bca0SMatthew Dillon 		i = mpt->mpt_port_page2.PortSettings &
34382545bca0SMatthew Dillon 		    MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS;
34392545bca0SMatthew Dillon 		j = mpt->mpt_port_page2.PortFlags &
34402545bca0SMatthew Dillon 		    MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
34412545bca0SMatthew Dillon 		if (i == MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS &&
34422545bca0SMatthew Dillon 		    j == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) {
34432545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_ALWAYS,
34442545bca0SMatthew Dillon 			    "honoring BIOS transfer negotiations\n");
34452545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
34462545bca0SMatthew Dillon 			break;
34472545bca0SMatthew Dillon 		}
34482545bca0SMatthew Dillon 
34492545bca0SMatthew Dillon 		dval = 0;
34502545bca0SMatthew Dillon 		period = 0;
34512545bca0SMatthew Dillon 		offset = 0;
34522545bca0SMatthew Dillon 
34532545bca0SMatthew Dillon 		if ((spi->valid & CTS_SPI_VALID_DISC) != 0) {
34542545bca0SMatthew Dillon 			dval |= ((spi->flags & CTS_SPI_FLAGS_DISC_ENB) != 0) ?
34552545bca0SMatthew Dillon 			    DP_DISC_ENABLE : DP_DISC_DISABL;
34562545bca0SMatthew Dillon 		}
34572545bca0SMatthew Dillon 
34582545bca0SMatthew Dillon 		if ((scsi->valid & CTS_SCSI_VALID_TQ) != 0) {
34592545bca0SMatthew Dillon 			dval |= ((scsi->flags & CTS_SCSI_FLAGS_TAG_ENB) != 0) ?
34602545bca0SMatthew Dillon 			    DP_TQING_ENABLE : DP_TQING_DISABL;
34612545bca0SMatthew Dillon 		}
34622545bca0SMatthew Dillon 
34632545bca0SMatthew Dillon 		if ((spi->valid & CTS_SPI_VALID_BUS_WIDTH) != 0) {
34642545bca0SMatthew Dillon 			dval |= (spi->bus_width == MSG_EXT_WDTR_BUS_16_BIT) ?
34652545bca0SMatthew Dillon 			    DP_WIDE : DP_NARROW;
34662545bca0SMatthew Dillon 		}
34672545bca0SMatthew Dillon 
34682545bca0SMatthew Dillon 		if (spi->valid & CTS_SPI_VALID_SYNC_OFFSET) {
34692545bca0SMatthew Dillon 			dval |= DP_SYNC;
34702545bca0SMatthew Dillon 			offset = spi->sync_offset;
34712545bca0SMatthew Dillon 		} else {
34722545bca0SMatthew Dillon 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
34732545bca0SMatthew Dillon 			    &mpt->mpt_dev_page1[tgt];
34742545bca0SMatthew Dillon 			offset = ptr->RequestedParameters;
34752545bca0SMatthew Dillon 			offset &= MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
34762545bca0SMatthew Dillon 			offset >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
34772545bca0SMatthew Dillon 		}
34782545bca0SMatthew Dillon 		if (spi->valid & CTS_SPI_VALID_SYNC_RATE) {
34792545bca0SMatthew Dillon 			dval |= DP_SYNC;
34802545bca0SMatthew Dillon 			period = spi->sync_period;
34812545bca0SMatthew Dillon 		} else {
34822545bca0SMatthew Dillon 			PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr =
34832545bca0SMatthew Dillon 			    &mpt->mpt_dev_page1[tgt];
34842545bca0SMatthew Dillon 			period = ptr->RequestedParameters;
34852545bca0SMatthew Dillon 			period &= MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
34862545bca0SMatthew Dillon 			period >>= MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
34872545bca0SMatthew Dillon 		}
34882545bca0SMatthew Dillon 		if (dval & DP_DISC_ENABLE) {
34892545bca0SMatthew Dillon 			mpt->mpt_disc_enable |= (1 << tgt);
34902545bca0SMatthew Dillon 		} else if (dval & DP_DISC_DISABL) {
34912545bca0SMatthew Dillon 			mpt->mpt_disc_enable &= ~(1 << tgt);
34922545bca0SMatthew Dillon 		}
34932545bca0SMatthew Dillon 		if (dval & DP_TQING_ENABLE) {
34942545bca0SMatthew Dillon 			mpt->mpt_tag_enable |= (1 << tgt);
34952545bca0SMatthew Dillon 		} else if (dval & DP_TQING_DISABL) {
34962545bca0SMatthew Dillon 			mpt->mpt_tag_enable &= ~(1 << tgt);
34972545bca0SMatthew Dillon 		}
34982545bca0SMatthew Dillon 		if (dval & DP_WIDTH) {
34992545bca0SMatthew Dillon 			mpt_setwidth(mpt, tgt, 1);
35002545bca0SMatthew Dillon 		}
35012545bca0SMatthew Dillon 		if (dval & DP_SYNC) {
35022545bca0SMatthew Dillon 			mpt_setsync(mpt, tgt, period, offset);
35032545bca0SMatthew Dillon 		}
35042545bca0SMatthew Dillon 		if (dval == 0) {
35052545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
35062545bca0SMatthew Dillon 			break;
35072545bca0SMatthew Dillon 		}
35082545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
35092545bca0SMatthew Dillon 		    "set [%d]: 0x%x period 0x%x offset %d\n",
35102545bca0SMatthew Dillon 		    tgt, dval, period, offset);
35112545bca0SMatthew Dillon 		if (mpt_update_spi_config(mpt, tgt)) {
35122545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
35132545bca0SMatthew Dillon 		} else {
35142545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
35152545bca0SMatthew Dillon 		}
35162545bca0SMatthew Dillon 		break;
35172545bca0SMatthew Dillon 	}
35182545bca0SMatthew Dillon 	case XPT_GET_TRAN_SETTINGS:
35192545bca0SMatthew Dillon 	{
35202545bca0SMatthew Dillon 		struct ccb_trans_settings_scsi *scsi;
35212545bca0SMatthew Dillon 		cts = &ccb->cts;
35222545bca0SMatthew Dillon 		cts->protocol = PROTO_SCSI;
35232545bca0SMatthew Dillon 		if (mpt->is_fc) {
35242545bca0SMatthew Dillon 			struct ccb_trans_settings_fc *fc =
35252545bca0SMatthew Dillon 			    &cts->xport_specific.fc;
35262545bca0SMatthew Dillon 			cts->protocol_version = SCSI_REV_SPC;
35272545bca0SMatthew Dillon 			cts->transport = XPORT_FC;
35282545bca0SMatthew Dillon 			cts->transport_version = 0;
35292545bca0SMatthew Dillon 			fc->valid = CTS_FC_VALID_SPEED;
35302545bca0SMatthew Dillon 			fc->bitrate = 100000;
35312545bca0SMatthew Dillon 		} else if (mpt->is_sas) {
35322545bca0SMatthew Dillon 			struct ccb_trans_settings_sas *sas =
35332545bca0SMatthew Dillon 			    &cts->xport_specific.sas;
35342545bca0SMatthew Dillon 			cts->protocol_version = SCSI_REV_SPC2;
35352545bca0SMatthew Dillon 			cts->transport = XPORT_SAS;
35362545bca0SMatthew Dillon 			cts->transport_version = 0;
35372545bca0SMatthew Dillon 			sas->valid = CTS_SAS_VALID_SPEED;
35382545bca0SMatthew Dillon 			sas->bitrate = 300000;
35392545bca0SMatthew Dillon 		} else {
35402545bca0SMatthew Dillon 			cts->protocol_version = SCSI_REV_2;
35412545bca0SMatthew Dillon 			cts->transport = XPORT_SPI;
35422545bca0SMatthew Dillon 			cts->transport_version = 2;
35432545bca0SMatthew Dillon 			if (mpt_get_spi_settings(mpt, cts) != 0) {
35442545bca0SMatthew Dillon 				mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
35452545bca0SMatthew Dillon 				break;
35462545bca0SMatthew Dillon 			}
35472545bca0SMatthew Dillon 		}
35482545bca0SMatthew Dillon 		scsi = &cts->proto_specific.scsi;
35492545bca0SMatthew Dillon 		scsi->valid = CTS_SCSI_VALID_TQ;
35502545bca0SMatthew Dillon 		scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
35512545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQ_CMP);
35522545bca0SMatthew Dillon 		break;
35532545bca0SMatthew Dillon 	}
35542545bca0SMatthew Dillon 	case XPT_CALC_GEOMETRY:
35552545bca0SMatthew Dillon 	{
35562545bca0SMatthew Dillon 		struct ccb_calc_geometry *ccg;
35572545bca0SMatthew Dillon 
35582545bca0SMatthew Dillon 		ccg = &ccb->ccg;
35592545bca0SMatthew Dillon 		if (ccg->block_size == 0) {
35602545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
35612545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
35622545bca0SMatthew Dillon 			break;
35632545bca0SMatthew Dillon 		}
356452001f09SSascha Wildner 		cam_calc_geometry(ccg, /*extended*/1);
35654c42baf4SSascha Wildner 		KASSERT(ccb->ccb_h.status, ("zero ccb sts at %d", __LINE__));
35662545bca0SMatthew Dillon 		break;
35672545bca0SMatthew Dillon 	}
35682545bca0SMatthew Dillon 	case XPT_PATH_INQ:		/* Path routing inquiry */
35692545bca0SMatthew Dillon 	{
35702545bca0SMatthew Dillon 		struct ccb_pathinq *cpi = &ccb->cpi;
35712545bca0SMatthew Dillon 
35722545bca0SMatthew Dillon 		cpi->version_num = 1;
35732545bca0SMatthew Dillon 		cpi->target_sprt = 0;
35742545bca0SMatthew Dillon 		cpi->hba_eng_cnt = 0;
35752545bca0SMatthew Dillon 		cpi->max_target = mpt->port_facts[0].MaxDevices - 1;
35766d259fc1SSascha Wildner #if 0 /* XXX swildner */
35776d259fc1SSascha Wildner 		cpi->maxio = (mpt->max_cam_seg_cnt - 1) * PAGE_SIZE;
35786d259fc1SSascha Wildner #endif
35792545bca0SMatthew Dillon 		/*
35802545bca0SMatthew Dillon 		 * FC cards report MAX_DEVICES of 512, but
35812545bca0SMatthew Dillon 		 * the MSG_SCSI_IO_REQUEST target id field
35822545bca0SMatthew Dillon 		 * is only 8 bits. Until we fix the driver
35832545bca0SMatthew Dillon 		 * to support 'channels' for bus overflow,
35842545bca0SMatthew Dillon 		 * just limit it.
35852545bca0SMatthew Dillon 		 */
35862545bca0SMatthew Dillon 		if (cpi->max_target > 255) {
35872545bca0SMatthew Dillon 			cpi->max_target = 255;
35882545bca0SMatthew Dillon 		}
35892545bca0SMatthew Dillon 
35902545bca0SMatthew Dillon 		/*
35912545bca0SMatthew Dillon 		 * VMware ESX reports > 16 devices and then dies when we probe.
35922545bca0SMatthew Dillon 		 */
35932545bca0SMatthew Dillon 		if (mpt->is_spi && cpi->max_target > 15) {
35942545bca0SMatthew Dillon 			cpi->max_target = 15;
35952545bca0SMatthew Dillon 		}
35962545bca0SMatthew Dillon 		if (mpt->is_spi)
35972545bca0SMatthew Dillon 			cpi->max_lun = 7;
35982545bca0SMatthew Dillon 		else
35992545bca0SMatthew Dillon 			cpi->max_lun = MPT_MAX_LUNS;
36002545bca0SMatthew Dillon 		cpi->initiator_id = mpt->mpt_ini_id;
36012545bca0SMatthew Dillon 		cpi->bus_id = cam_sim_bus(sim);
36022545bca0SMatthew Dillon 
36032545bca0SMatthew Dillon 		/*
36042545bca0SMatthew Dillon 		 * The base speed is the speed of the underlying connection.
36052545bca0SMatthew Dillon 		 */
36062545bca0SMatthew Dillon 		cpi->protocol = PROTO_SCSI;
36072545bca0SMatthew Dillon 		if (mpt->is_fc) {
36082545bca0SMatthew Dillon 			cpi->hba_misc = PIM_NOBUSRESET;
36092545bca0SMatthew Dillon 			cpi->base_transfer_speed = 100000;
36102545bca0SMatthew Dillon 			cpi->hba_inquiry = PI_TAG_ABLE;
36112545bca0SMatthew Dillon 			cpi->transport = XPORT_FC;
36122545bca0SMatthew Dillon 			cpi->transport_version = 0;
36132545bca0SMatthew Dillon 			cpi->protocol_version = SCSI_REV_SPC;
36142545bca0SMatthew Dillon 		} else if (mpt->is_sas) {
36152545bca0SMatthew Dillon 			cpi->hba_misc = PIM_NOBUSRESET;
36162545bca0SMatthew Dillon 			cpi->base_transfer_speed = 300000;
36172545bca0SMatthew Dillon 			cpi->hba_inquiry = PI_TAG_ABLE;
36182545bca0SMatthew Dillon 			cpi->transport = XPORT_SAS;
36192545bca0SMatthew Dillon 			cpi->transport_version = 0;
36202545bca0SMatthew Dillon 			cpi->protocol_version = SCSI_REV_SPC2;
36212545bca0SMatthew Dillon 		} else {
36222545bca0SMatthew Dillon 			cpi->hba_misc = PIM_SEQSCAN;
36232545bca0SMatthew Dillon 			cpi->base_transfer_speed = 3300;
36242545bca0SMatthew Dillon 			cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
36252545bca0SMatthew Dillon 			cpi->transport = XPORT_SPI;
36262545bca0SMatthew Dillon 			cpi->transport_version = 2;
36272545bca0SMatthew Dillon 			cpi->protocol_version = SCSI_REV_2;
36282545bca0SMatthew Dillon 		}
36292545bca0SMatthew Dillon 
36302545bca0SMatthew Dillon 		/*
36312545bca0SMatthew Dillon 		 * We give our fake RAID passhtru bus a width that is MaxVolumes
36322545bca0SMatthew Dillon 		 * wide and restrict it to one lun.
36332545bca0SMatthew Dillon 		 */
36342545bca0SMatthew Dillon 		if (raid_passthru) {
36352545bca0SMatthew Dillon 			cpi->max_target = mpt->ioc_page2->MaxPhysDisks - 1;
36362545bca0SMatthew Dillon 			cpi->initiator_id = cpi->max_target + 1;
36372545bca0SMatthew Dillon 			cpi->max_lun = 0;
36382545bca0SMatthew Dillon 		}
36392545bca0SMatthew Dillon 
36402545bca0SMatthew Dillon 		if ((mpt->role & MPT_ROLE_INITIATOR) == 0) {
36412545bca0SMatthew Dillon 			cpi->hba_misc |= PIM_NOINITIATOR;
36422545bca0SMatthew Dillon 		}
36432545bca0SMatthew Dillon 		if (mpt->is_fc && (mpt->role & MPT_ROLE_TARGET)) {
36442545bca0SMatthew Dillon 			cpi->target_sprt =
36452545bca0SMatthew Dillon 			    PIT_PROCESSOR | PIT_DISCONNECT | PIT_TERM_IO;
36462545bca0SMatthew Dillon 		} else {
36472545bca0SMatthew Dillon 			cpi->target_sprt = 0;
36482545bca0SMatthew Dillon 		}
36492545bca0SMatthew Dillon 		strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
36502545bca0SMatthew Dillon 		strncpy(cpi->hba_vid, "LSI", HBA_IDLEN);
36512545bca0SMatthew Dillon 		strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
36522545bca0SMatthew Dillon 		cpi->unit_number = cam_sim_unit(sim);
36532545bca0SMatthew Dillon 		cpi->ccb_h.status = CAM_REQ_CMP;
36542545bca0SMatthew Dillon 		break;
36552545bca0SMatthew Dillon 	}
36562545bca0SMatthew Dillon 	case XPT_EN_LUN:		/* Enable LUN as a target */
36572545bca0SMatthew Dillon 	{
36582545bca0SMatthew Dillon 		int result;
36592545bca0SMatthew Dillon 
36602545bca0SMatthew Dillon 		if (ccb->cel.enable)
36612545bca0SMatthew Dillon 			result = mpt_enable_lun(mpt,
36622545bca0SMatthew Dillon 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
36632545bca0SMatthew Dillon 		else
36642545bca0SMatthew Dillon 			result = mpt_disable_lun(mpt,
36652545bca0SMatthew Dillon 			    ccb->ccb_h.target_id, ccb->ccb_h.target_lun);
36662545bca0SMatthew Dillon 		if (result == 0) {
36672545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
36682545bca0SMatthew Dillon 		} else {
36692545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
36702545bca0SMatthew Dillon 		}
36712545bca0SMatthew Dillon 		break;
36722545bca0SMatthew Dillon 	}
36732545bca0SMatthew Dillon 	case XPT_NOTIFY_ACK:		/* recycle notify ack */
36742545bca0SMatthew Dillon 	case XPT_IMMED_NOTIFY:		/* Add Immediate Notify Resource */
36752545bca0SMatthew Dillon 	case XPT_ACCEPT_TARGET_IO:	/* Add Accept Target IO Resource */
36762545bca0SMatthew Dillon 	{
36772545bca0SMatthew Dillon 		tgt_resource_t *trtp;
36782545bca0SMatthew Dillon 		lun_id_t lun = ccb->ccb_h.target_lun;
36792545bca0SMatthew Dillon 		ccb->ccb_h.sim_priv.entries[0].field = 0;
36802545bca0SMatthew Dillon 		ccb->ccb_h.sim_priv.entries[1].ptr = mpt;
36812545bca0SMatthew Dillon 		ccb->ccb_h.flags = 0;
36822545bca0SMatthew Dillon 
36832545bca0SMatthew Dillon 		if (lun == CAM_LUN_WILDCARD) {
36842545bca0SMatthew Dillon 			if (ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) {
36852545bca0SMatthew Dillon 				mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
36862545bca0SMatthew Dillon 				break;
36872545bca0SMatthew Dillon 			}
36882545bca0SMatthew Dillon 			trtp = &mpt->trt_wildcard;
36892545bca0SMatthew Dillon 		} else if (lun >= MPT_MAX_LUNS) {
36902545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_INVALID);
36912545bca0SMatthew Dillon 			break;
36922545bca0SMatthew Dillon 		} else {
36932545bca0SMatthew Dillon 			trtp = &mpt->trt[lun];
36942545bca0SMatthew Dillon 		}
36952545bca0SMatthew Dillon 		if (ccb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
36962545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
36972545bca0SMatthew Dillon 			    "Put FREE ATIO %p lun %d\n", ccb, lun);
36982545bca0SMatthew Dillon 			STAILQ_INSERT_TAIL(&trtp->atios, &ccb->ccb_h,
36992545bca0SMatthew Dillon 			    sim_links.stqe);
37002545bca0SMatthew Dillon 		} else if (ccb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
37012545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG1,
37022545bca0SMatthew Dillon 			    "Put FREE INOT lun %d\n", lun);
37032545bca0SMatthew Dillon 			STAILQ_INSERT_TAIL(&trtp->inots, &ccb->ccb_h,
37042545bca0SMatthew Dillon 			    sim_links.stqe);
37052545bca0SMatthew Dillon 		} else {
37062545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_ALWAYS, "Got Notify ACK\n");
37072545bca0SMatthew Dillon 		}
37082545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQ_INPROG);
37092545bca0SMatthew Dillon 		return;
37102545bca0SMatthew Dillon 	}
37112545bca0SMatthew Dillon 	case XPT_CONT_TARGET_IO:
37122545bca0SMatthew Dillon 		mpt_target_start_io(mpt, ccb);
37132545bca0SMatthew Dillon 		return;
37142545bca0SMatthew Dillon 
37152545bca0SMatthew Dillon 	default:
37162545bca0SMatthew Dillon 		ccb->ccb_h.status = CAM_REQ_INVALID;
37172545bca0SMatthew Dillon 		break;
37182545bca0SMatthew Dillon 	}
37192545bca0SMatthew Dillon 	xpt_done(ccb);
37202545bca0SMatthew Dillon }
37212545bca0SMatthew Dillon 
37222545bca0SMatthew Dillon static int
mpt_get_spi_settings(struct mpt_softc * mpt,struct ccb_trans_settings * cts)37232545bca0SMatthew Dillon mpt_get_spi_settings(struct mpt_softc *mpt, struct ccb_trans_settings *cts)
37242545bca0SMatthew Dillon {
37252545bca0SMatthew Dillon 	struct ccb_trans_settings_scsi *scsi = &cts->proto_specific.scsi;
37262545bca0SMatthew Dillon 	struct ccb_trans_settings_spi *spi = &cts->xport_specific.spi;
37272545bca0SMatthew Dillon 	target_id_t tgt;
37282545bca0SMatthew Dillon 	uint32_t dval, pval, oval;
37292545bca0SMatthew Dillon 	int rv;
37302545bca0SMatthew Dillon 
37312545bca0SMatthew Dillon 	if (IS_CURRENT_SETTINGS(cts) == 0) {
37322545bca0SMatthew Dillon 		tgt = cts->ccb_h.target_id;
37332545bca0SMatthew Dillon 	} else if (xpt_path_sim(cts->ccb_h.path) == mpt->phydisk_sim) {
37342545bca0SMatthew Dillon 		if (mpt_map_physdisk(mpt, (union ccb *)cts, &tgt)) {
37352545bca0SMatthew Dillon 			return (-1);
37362545bca0SMatthew Dillon 		}
37372545bca0SMatthew Dillon 	} else {
37382545bca0SMatthew Dillon 		tgt = cts->ccb_h.target_id;
37392545bca0SMatthew Dillon 	}
37402545bca0SMatthew Dillon 
37412545bca0SMatthew Dillon 	/*
37422545bca0SMatthew Dillon 	 * We aren't looking at Port Page 2 BIOS settings here-
37432545bca0SMatthew Dillon 	 * sometimes these have been known to be bogus XXX.
37442545bca0SMatthew Dillon 	 *
37452545bca0SMatthew Dillon 	 * For user settings, we pick the max from port page 0
37462545bca0SMatthew Dillon 	 *
37472545bca0SMatthew Dillon 	 * For current settings we read the current settings out from
37482545bca0SMatthew Dillon 	 * device page 0 for that target.
37492545bca0SMatthew Dillon 	 */
37502545bca0SMatthew Dillon 	if (IS_CURRENT_SETTINGS(cts)) {
37512545bca0SMatthew Dillon 		CONFIG_PAGE_SCSI_DEVICE_0 tmp;
37522545bca0SMatthew Dillon 		dval = 0;
37532545bca0SMatthew Dillon 
37542545bca0SMatthew Dillon 		tmp = mpt->mpt_dev_page0[tgt];
37552545bca0SMatthew Dillon 		rv = mpt_read_cur_cfg_page(mpt, tgt, &tmp.Header,
37562545bca0SMatthew Dillon 		    sizeof(tmp), FALSE, 5000);
37572545bca0SMatthew Dillon 		if (rv) {
37582545bca0SMatthew Dillon 			mpt_prt(mpt, "can't get tgt %d config page 0\n", tgt);
37592545bca0SMatthew Dillon 			return (rv);
37602545bca0SMatthew Dillon 		}
37612545bca0SMatthew Dillon 		mpt2host_config_page_scsi_device_0(&tmp);
37622545bca0SMatthew Dillon 
37632545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_DEBUG,
37646d259fc1SSascha Wildner 		    "mpt_get_spi_settings[%d]: current NP %x Info %x\n", tgt,
37656d259fc1SSascha Wildner 		    tmp.NegotiatedParameters, tmp.Information);
37662545bca0SMatthew Dillon 		dval |= (tmp.NegotiatedParameters & MPI_SCSIDEVPAGE0_NP_WIDE) ?
37672545bca0SMatthew Dillon 		    DP_WIDE : DP_NARROW;
37682545bca0SMatthew Dillon 		dval |= (mpt->mpt_disc_enable & (1 << tgt)) ?
37692545bca0SMatthew Dillon 		    DP_DISC_ENABLE : DP_DISC_DISABL;
37702545bca0SMatthew Dillon 		dval |= (mpt->mpt_tag_enable & (1 << tgt)) ?
37712545bca0SMatthew Dillon 		    DP_TQING_ENABLE : DP_TQING_DISABL;
37722545bca0SMatthew Dillon 		oval = tmp.NegotiatedParameters;
37732545bca0SMatthew Dillon 		oval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK;
37742545bca0SMatthew Dillon 		oval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET;
37752545bca0SMatthew Dillon 		pval = tmp.NegotiatedParameters;
37762545bca0SMatthew Dillon 		pval &= MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK;
37772545bca0SMatthew Dillon 		pval >>= MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD;
37782545bca0SMatthew Dillon 		mpt->mpt_dev_page0[tgt] = tmp;
37792545bca0SMatthew Dillon 	} else {
37802545bca0SMatthew Dillon 		dval = DP_WIDE|DP_DISC_ENABLE|DP_TQING_ENABLE|DP_SYNC;
37812545bca0SMatthew Dillon 		oval = mpt->mpt_port_page0.Capabilities;
37822545bca0SMatthew Dillon 		oval = MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(oval);
37832545bca0SMatthew Dillon 		pval = mpt->mpt_port_page0.Capabilities;
37842545bca0SMatthew Dillon 		pval = MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(pval);
37852545bca0SMatthew Dillon 	}
37862545bca0SMatthew Dillon 
37872545bca0SMatthew Dillon 	spi->valid = 0;
37882545bca0SMatthew Dillon 	scsi->valid = 0;
37892545bca0SMatthew Dillon 	spi->flags = 0;
37902545bca0SMatthew Dillon 	scsi->flags = 0;
37912545bca0SMatthew Dillon 	spi->sync_offset = oval;
37922545bca0SMatthew Dillon 	spi->sync_period = pval;
37932545bca0SMatthew Dillon 	spi->valid |= CTS_SPI_VALID_SYNC_OFFSET;
37942545bca0SMatthew Dillon 	spi->valid |= CTS_SPI_VALID_SYNC_RATE;
37952545bca0SMatthew Dillon 	spi->valid |= CTS_SPI_VALID_BUS_WIDTH;
37962545bca0SMatthew Dillon 	if (dval & DP_WIDE) {
37972545bca0SMatthew Dillon 		spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
37982545bca0SMatthew Dillon 	} else {
37992545bca0SMatthew Dillon 		spi->bus_width = MSG_EXT_WDTR_BUS_8_BIT;
38002545bca0SMatthew Dillon 	}
38012545bca0SMatthew Dillon 	if (cts->ccb_h.target_lun != CAM_LUN_WILDCARD) {
38022545bca0SMatthew Dillon 		scsi->valid = CTS_SCSI_VALID_TQ;
38032545bca0SMatthew Dillon 		if (dval & DP_TQING_ENABLE) {
38042545bca0SMatthew Dillon 			scsi->flags |= CTS_SCSI_FLAGS_TAG_ENB;
38052545bca0SMatthew Dillon 		}
38062545bca0SMatthew Dillon 		spi->valid |= CTS_SPI_VALID_DISC;
38072545bca0SMatthew Dillon 		if (dval & DP_DISC_ENABLE) {
38082545bca0SMatthew Dillon 			spi->flags |= CTS_SPI_FLAGS_DISC_ENB;
38092545bca0SMatthew Dillon 		}
38102545bca0SMatthew Dillon 	}
38112545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
38122545bca0SMatthew Dillon 	    "mpt_get_spi_settings[%d]: %s flags 0x%x per 0x%x off=%d\n", tgt,
38132545bca0SMatthew Dillon 	    IS_CURRENT_SETTINGS(cts) ? "ACTIVE" : "NVRAM ", dval, pval, oval);
38142545bca0SMatthew Dillon 	return (0);
38152545bca0SMatthew Dillon }
38162545bca0SMatthew Dillon 
38172545bca0SMatthew Dillon static void
mpt_setwidth(struct mpt_softc * mpt,int tgt,int onoff)38182545bca0SMatthew Dillon mpt_setwidth(struct mpt_softc *mpt, int tgt, int onoff)
38192545bca0SMatthew Dillon {
38202545bca0SMatthew Dillon 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
38212545bca0SMatthew Dillon 
38222545bca0SMatthew Dillon 	ptr = &mpt->mpt_dev_page1[tgt];
38232545bca0SMatthew Dillon 	if (onoff) {
38242545bca0SMatthew Dillon 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_WIDE;
38252545bca0SMatthew Dillon 	} else {
38262545bca0SMatthew Dillon 		ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_WIDE;
38272545bca0SMatthew Dillon 	}
38282545bca0SMatthew Dillon }
38292545bca0SMatthew Dillon 
38302545bca0SMatthew Dillon static void
mpt_setsync(struct mpt_softc * mpt,int tgt,int period,int offset)38312545bca0SMatthew Dillon mpt_setsync(struct mpt_softc *mpt, int tgt, int period, int offset)
38322545bca0SMatthew Dillon {
38332545bca0SMatthew Dillon 	PTR_CONFIG_PAGE_SCSI_DEVICE_1 ptr;
38342545bca0SMatthew Dillon 
38352545bca0SMatthew Dillon 	ptr = &mpt->mpt_dev_page1[tgt];
38362545bca0SMatthew Dillon 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK;
38372545bca0SMatthew Dillon 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK;
38382545bca0SMatthew Dillon 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_DT;
38392545bca0SMatthew Dillon 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_QAS;
38402545bca0SMatthew Dillon 	ptr->RequestedParameters &= ~MPI_SCSIDEVPAGE1_RP_IU;
38412545bca0SMatthew Dillon 	if (period == 0) {
38422545bca0SMatthew Dillon 		return;
38432545bca0SMatthew Dillon 	}
38442545bca0SMatthew Dillon 	ptr->RequestedParameters |=
38452545bca0SMatthew Dillon 	    period << MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD;
38462545bca0SMatthew Dillon 	ptr->RequestedParameters |=
38472545bca0SMatthew Dillon 	    offset << MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET;
38482545bca0SMatthew Dillon 	if (period < 0xa) {
38492545bca0SMatthew Dillon 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_DT;
38502545bca0SMatthew Dillon 	}
38512545bca0SMatthew Dillon 	if (period < 0x9) {
38522545bca0SMatthew Dillon 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_QAS;
38532545bca0SMatthew Dillon 		ptr->RequestedParameters |= MPI_SCSIDEVPAGE1_RP_IU;
38542545bca0SMatthew Dillon 	}
38552545bca0SMatthew Dillon }
38562545bca0SMatthew Dillon 
38572545bca0SMatthew Dillon static int
mpt_update_spi_config(struct mpt_softc * mpt,int tgt)38582545bca0SMatthew Dillon mpt_update_spi_config(struct mpt_softc *mpt, int tgt)
38592545bca0SMatthew Dillon {
38602545bca0SMatthew Dillon 	CONFIG_PAGE_SCSI_DEVICE_1 tmp;
38612545bca0SMatthew Dillon 	int rv;
38622545bca0SMatthew Dillon 
38632545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_NEGOTIATION,
38642545bca0SMatthew Dillon 	    "mpt_update_spi_config[%d].page1: Requested Params 0x%08x\n",
38656d259fc1SSascha Wildner 	    tgt, mpt->mpt_dev_page1[tgt].RequestedParameters);
38662545bca0SMatthew Dillon 	tmp = mpt->mpt_dev_page1[tgt];
38672545bca0SMatthew Dillon 	host2mpt_config_page_scsi_device_1(&tmp);
38682545bca0SMatthew Dillon 	rv = mpt_write_cur_cfg_page(mpt, tgt,
38692545bca0SMatthew Dillon 	    &tmp.Header, sizeof(tmp), FALSE, 5000);
38702545bca0SMatthew Dillon 	if (rv) {
38712545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_update_spi_config: write cur page failed\n");
38722545bca0SMatthew Dillon 		return (-1);
38732545bca0SMatthew Dillon 	}
38742545bca0SMatthew Dillon 	return (0);
38752545bca0SMatthew Dillon }
38762545bca0SMatthew Dillon 
38772545bca0SMatthew Dillon /****************************** Timeout Recovery ******************************/
38782545bca0SMatthew Dillon static int
mpt_spawn_recovery_thread(struct mpt_softc * mpt)38792545bca0SMatthew Dillon mpt_spawn_recovery_thread(struct mpt_softc *mpt)
38802545bca0SMatthew Dillon {
38812545bca0SMatthew Dillon 	int error;
38822545bca0SMatthew Dillon 
3883f582582cSSascha Wildner 	error = kthread_create(mpt_recovery_thread, mpt,
3884f582582cSSascha Wildner 	    &mpt->recovery_thread, "mpt_recovery%d", mpt->unit);
38852545bca0SMatthew Dillon 	return (error);
38862545bca0SMatthew Dillon }
38872545bca0SMatthew Dillon 
38882545bca0SMatthew Dillon static void
mpt_terminate_recovery_thread(struct mpt_softc * mpt)38892545bca0SMatthew Dillon mpt_terminate_recovery_thread(struct mpt_softc *mpt)
38902545bca0SMatthew Dillon {
38914c42baf4SSascha Wildner 
38922545bca0SMatthew Dillon 	if (mpt->recovery_thread == NULL) {
38932545bca0SMatthew Dillon 		return;
38942545bca0SMatthew Dillon 	}
38952545bca0SMatthew Dillon 	mpt->shutdwn_recovery = 1;
38962545bca0SMatthew Dillon 	wakeup(mpt);
38972545bca0SMatthew Dillon 	/*
38982545bca0SMatthew Dillon 	 * Sleep on a slightly different location
38992545bca0SMatthew Dillon 	 * for this interlock just for added safety.
39002545bca0SMatthew Dillon 	 */
39016d259fc1SSascha Wildner 	mpt_sleep(mpt, &mpt->recovery_thread, 0, "thtrm", 0);
39022545bca0SMatthew Dillon }
39032545bca0SMatthew Dillon 
39042545bca0SMatthew Dillon static void
mpt_recovery_thread(void * arg)39052545bca0SMatthew Dillon mpt_recovery_thread(void *arg)
39062545bca0SMatthew Dillon {
39072545bca0SMatthew Dillon 	struct mpt_softc *mpt;
39082545bca0SMatthew Dillon 
39092545bca0SMatthew Dillon 	mpt = (struct mpt_softc *)arg;
39102545bca0SMatthew Dillon 	MPT_LOCK(mpt);
39112545bca0SMatthew Dillon 	for (;;) {
39122545bca0SMatthew Dillon 		if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
39132545bca0SMatthew Dillon 			if (mpt->shutdwn_recovery == 0) {
39146d259fc1SSascha Wildner 				mpt_sleep(mpt, mpt, 0, "idle", 0);
39152545bca0SMatthew Dillon 			}
39162545bca0SMatthew Dillon 		}
39172545bca0SMatthew Dillon 		if (mpt->shutdwn_recovery != 0) {
39182545bca0SMatthew Dillon 			break;
39192545bca0SMatthew Dillon 		}
39202545bca0SMatthew Dillon 		mpt_recover_commands(mpt);
39212545bca0SMatthew Dillon 	}
39222545bca0SMatthew Dillon 	mpt->recovery_thread = NULL;
39232545bca0SMatthew Dillon 	wakeup(&mpt->recovery_thread);
39242545bca0SMatthew Dillon 	MPT_UNLOCK(mpt);
3925f582582cSSascha Wildner 	kthread_exit();
39262545bca0SMatthew Dillon }
39272545bca0SMatthew Dillon 
39282545bca0SMatthew Dillon static int
mpt_scsi_send_tmf(struct mpt_softc * mpt,u_int type,u_int flags,u_int channel,u_int target,u_int lun,u_int abort_ctx,int sleep_ok)39292545bca0SMatthew Dillon mpt_scsi_send_tmf(struct mpt_softc *mpt, u_int type, u_int flags,
39302545bca0SMatthew Dillon     u_int channel, u_int target, u_int lun, u_int abort_ctx, int sleep_ok)
39312545bca0SMatthew Dillon {
39322545bca0SMatthew Dillon 	MSG_SCSI_TASK_MGMT *tmf_req;
39332545bca0SMatthew Dillon 	int		    error;
39342545bca0SMatthew Dillon 
39352545bca0SMatthew Dillon 	/*
39362545bca0SMatthew Dillon 	 * Wait for any current TMF request to complete.
39372545bca0SMatthew Dillon 	 * We're only allowed to issue one TMF at a time.
39382545bca0SMatthew Dillon 	 */
39392545bca0SMatthew Dillon 	error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_FREE, REQ_STATE_FREE,
39402545bca0SMatthew Dillon 	    sleep_ok, MPT_TMF_MAX_TIMEOUT);
39412545bca0SMatthew Dillon 	if (error != 0) {
39422545bca0SMatthew Dillon 		mpt_reset(mpt, TRUE);
39432545bca0SMatthew Dillon 		return (ETIMEDOUT);
39442545bca0SMatthew Dillon 	}
39452545bca0SMatthew Dillon 
39462545bca0SMatthew Dillon 	mpt_assign_serno(mpt, mpt->tmf_req);
39472545bca0SMatthew Dillon 	mpt->tmf_req->state = REQ_STATE_ALLOCATED|REQ_STATE_QUEUED;
39482545bca0SMatthew Dillon 
39492545bca0SMatthew Dillon 	tmf_req = (MSG_SCSI_TASK_MGMT *)mpt->tmf_req->req_vbuf;
39502545bca0SMatthew Dillon 	memset(tmf_req, 0, sizeof(*tmf_req));
39512545bca0SMatthew Dillon 	tmf_req->TargetID = target;
39522545bca0SMatthew Dillon 	tmf_req->Bus = channel;
39532545bca0SMatthew Dillon 	tmf_req->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
39542545bca0SMatthew Dillon 	tmf_req->TaskType = type;
39552545bca0SMatthew Dillon 	tmf_req->MsgFlags = flags;
39562545bca0SMatthew Dillon 	tmf_req->MsgContext =
39572545bca0SMatthew Dillon 	    htole32(mpt->tmf_req->index | scsi_tmf_handler_id);
39582545bca0SMatthew Dillon 	if (lun > MPT_MAX_LUNS) {
39592545bca0SMatthew Dillon 		tmf_req->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
39602545bca0SMatthew Dillon 		tmf_req->LUN[1] = lun & 0xff;
39612545bca0SMatthew Dillon 	} else {
39622545bca0SMatthew Dillon 		tmf_req->LUN[1] = lun;
39632545bca0SMatthew Dillon 	}
39642545bca0SMatthew Dillon 	tmf_req->TaskMsgContext = abort_ctx;
39652545bca0SMatthew Dillon 
39662545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG,
39676d259fc1SSascha Wildner 	    "Issuing TMF %p:%u with MsgContext of 0x%x\n", mpt->tmf_req,
39686d259fc1SSascha Wildner 	    mpt->tmf_req->serno, tmf_req->MsgContext);
39692545bca0SMatthew Dillon 	if (mpt->verbose > MPT_PRT_DEBUG) {
39702545bca0SMatthew Dillon 		mpt_print_request(tmf_req);
39712545bca0SMatthew Dillon 	}
39722545bca0SMatthew Dillon 
39732545bca0SMatthew Dillon 	KASSERT(mpt_req_on_pending_list(mpt, mpt->tmf_req) == 0,
39742545bca0SMatthew Dillon 	    ("mpt_scsi_send_tmf: tmf_req already on pending list"));
39752545bca0SMatthew Dillon 	TAILQ_INSERT_HEAD(&mpt->request_pending_list, mpt->tmf_req, links);
39762545bca0SMatthew Dillon 	error = mpt_send_handshake_cmd(mpt, sizeof(*tmf_req), tmf_req);
39772545bca0SMatthew Dillon 	if (error != MPT_OK) {
39782545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, mpt->tmf_req, links);
39792545bca0SMatthew Dillon 		mpt->tmf_req->state = REQ_STATE_FREE;
39802545bca0SMatthew Dillon 		mpt_reset(mpt, TRUE);
39812545bca0SMatthew Dillon 	}
39822545bca0SMatthew Dillon 	return (error);
39832545bca0SMatthew Dillon }
39842545bca0SMatthew Dillon 
39852545bca0SMatthew Dillon /*
39862545bca0SMatthew Dillon  * When a command times out, it is placed on the requeust_timeout_list
39872545bca0SMatthew Dillon  * and we wake our recovery thread.  The MPT-Fusion architecture supports
39882545bca0SMatthew Dillon  * only a single TMF operation at a time, so we serially abort/bdr, etc,
39892545bca0SMatthew Dillon  * the timedout transactions.  The next TMF is issued either by the
39902545bca0SMatthew Dillon  * completion handler of the current TMF waking our recovery thread,
39912545bca0SMatthew Dillon  * or the TMF timeout handler causing a hard reset sequence.
39922545bca0SMatthew Dillon  */
39932545bca0SMatthew Dillon static void
mpt_recover_commands(struct mpt_softc * mpt)39942545bca0SMatthew Dillon mpt_recover_commands(struct mpt_softc *mpt)
39952545bca0SMatthew Dillon {
39962545bca0SMatthew Dillon 	request_t	   *req;
39972545bca0SMatthew Dillon 	union ccb	   *ccb;
39982545bca0SMatthew Dillon 	int		    error;
39992545bca0SMatthew Dillon 
40002545bca0SMatthew Dillon 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
40012545bca0SMatthew Dillon 		/*
40022545bca0SMatthew Dillon 		 * No work to do- leave.
40032545bca0SMatthew Dillon 		 */
40042545bca0SMatthew Dillon 		mpt_prt(mpt, "mpt_recover_commands: no requests.\n");
40052545bca0SMatthew Dillon 		return;
40062545bca0SMatthew Dillon 	}
40072545bca0SMatthew Dillon 
40082545bca0SMatthew Dillon 	/*
40092545bca0SMatthew Dillon 	 * Flush any commands whose completion coincides with their timeout.
40102545bca0SMatthew Dillon 	 */
40112545bca0SMatthew Dillon 	mpt_intr(mpt);
40122545bca0SMatthew Dillon 
40132545bca0SMatthew Dillon 	if (TAILQ_EMPTY(&mpt->request_timeout_list) != 0) {
40142545bca0SMatthew Dillon 		/*
40152545bca0SMatthew Dillon 		 * The timedout commands have already
40162545bca0SMatthew Dillon 		 * completed.  This typically means
40172545bca0SMatthew Dillon 		 * that either the timeout value was on
40182545bca0SMatthew Dillon 		 * the hairy edge of what the device
40192545bca0SMatthew Dillon 		 * requires or - more likely - interrupts
40202545bca0SMatthew Dillon 		 * are not happening.
40212545bca0SMatthew Dillon 		 */
40222545bca0SMatthew Dillon 		mpt_prt(mpt, "Timedout requests already complete. "
40232545bca0SMatthew Dillon 		    "Interrupts may not be functioning.\n");
40242545bca0SMatthew Dillon 		mpt_enable_ints(mpt);
40252545bca0SMatthew Dillon 		return;
40262545bca0SMatthew Dillon 	}
40272545bca0SMatthew Dillon 
40282545bca0SMatthew Dillon 	/*
40292545bca0SMatthew Dillon 	 * We have no visibility into the current state of the
40302545bca0SMatthew Dillon 	 * controller, so attempt to abort the commands in the
40312545bca0SMatthew Dillon 	 * order they timed-out. For initiator commands, we
40322545bca0SMatthew Dillon 	 * depend on the reply handler pulling requests off
40332545bca0SMatthew Dillon 	 * the timeout list.
40342545bca0SMatthew Dillon 	 */
40352545bca0SMatthew Dillon 	while ((req = TAILQ_FIRST(&mpt->request_timeout_list)) != NULL) {
40362545bca0SMatthew Dillon 		uint16_t status;
40372545bca0SMatthew Dillon 		uint8_t response;
40382545bca0SMatthew Dillon 		MSG_REQUEST_HEADER *hdrp = req->req_vbuf;
40392545bca0SMatthew Dillon 
40402545bca0SMatthew Dillon 		mpt_prt(mpt, "attempting to abort req %p:%u function %x\n",
40412545bca0SMatthew Dillon 		    req, req->serno, hdrp->Function);
40422545bca0SMatthew Dillon 		ccb = req->ccb;
40432545bca0SMatthew Dillon 		if (ccb == NULL) {
40442545bca0SMatthew Dillon 			mpt_prt(mpt, "null ccb in timed out request. "
40452545bca0SMatthew Dillon 			    "Resetting Controller.\n");
40462545bca0SMatthew Dillon 			mpt_reset(mpt, TRUE);
40472545bca0SMatthew Dillon 			continue;
40482545bca0SMatthew Dillon 		}
40492545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_CMD_TIMEOUT);
40502545bca0SMatthew Dillon 
40512545bca0SMatthew Dillon 		/*
40522545bca0SMatthew Dillon 		 * Check to see if this is not an initiator command and
40532545bca0SMatthew Dillon 		 * deal with it differently if it is.
40542545bca0SMatthew Dillon 		 */
40552545bca0SMatthew Dillon 		switch (hdrp->Function) {
40562545bca0SMatthew Dillon 		case MPI_FUNCTION_SCSI_IO_REQUEST:
40572545bca0SMatthew Dillon 		case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
40582545bca0SMatthew Dillon 			break;
40592545bca0SMatthew Dillon 		default:
40602545bca0SMatthew Dillon 			/*
40612545bca0SMatthew Dillon 			 * XXX: FIX ME: need to abort target assists...
40622545bca0SMatthew Dillon 			 */
40632545bca0SMatthew Dillon 			mpt_prt(mpt, "just putting it back on the pend q\n");
40642545bca0SMatthew Dillon 			TAILQ_REMOVE(&mpt->request_timeout_list, req, links);
40652545bca0SMatthew Dillon 			TAILQ_INSERT_HEAD(&mpt->request_pending_list, req,
40662545bca0SMatthew Dillon 			    links);
40672545bca0SMatthew Dillon 			continue;
40682545bca0SMatthew Dillon 		}
40692545bca0SMatthew Dillon 
40702545bca0SMatthew Dillon 		error = mpt_scsi_send_tmf(mpt,
40712545bca0SMatthew Dillon 		    MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
40722545bca0SMatthew Dillon 		    0, 0, ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
40732545bca0SMatthew Dillon 		    htole32(req->index | scsi_io_handler_id), TRUE);
40742545bca0SMatthew Dillon 
40752545bca0SMatthew Dillon 		if (error != 0) {
40762545bca0SMatthew Dillon 			/*
40772545bca0SMatthew Dillon 			 * mpt_scsi_send_tmf hard resets on failure, so no
40782545bca0SMatthew Dillon 			 * need to do so here.  Our queue should be emptied
40792545bca0SMatthew Dillon 			 * by the hard reset.
40802545bca0SMatthew Dillon 			 */
40812545bca0SMatthew Dillon 			continue;
40822545bca0SMatthew Dillon 		}
40832545bca0SMatthew Dillon 
40842545bca0SMatthew Dillon 		error = mpt_wait_req(mpt, mpt->tmf_req, REQ_STATE_DONE,
40852545bca0SMatthew Dillon 		    REQ_STATE_DONE, TRUE, 500);
40862545bca0SMatthew Dillon 
40872545bca0SMatthew Dillon 		status = le16toh(mpt->tmf_req->IOCStatus);
40882545bca0SMatthew Dillon 		response = mpt->tmf_req->ResponseCode;
40892545bca0SMatthew Dillon 		mpt->tmf_req->state = REQ_STATE_FREE;
40902545bca0SMatthew Dillon 
40912545bca0SMatthew Dillon 		if (error != 0) {
40922545bca0SMatthew Dillon 			/*
40932545bca0SMatthew Dillon 			 * If we've errored out,, reset the controller.
40942545bca0SMatthew Dillon 			 */
40952545bca0SMatthew Dillon 			mpt_prt(mpt, "mpt_recover_commands: abort timed-out. "
40962545bca0SMatthew Dillon 			    "Resetting controller\n");
40972545bca0SMatthew Dillon 			mpt_reset(mpt, TRUE);
40982545bca0SMatthew Dillon 			continue;
40992545bca0SMatthew Dillon 		}
41002545bca0SMatthew Dillon 
41012545bca0SMatthew Dillon 		if ((status & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
41022545bca0SMatthew Dillon 			mpt_prt(mpt, "mpt_recover_commands: IOC Status 0x%x. "
41032545bca0SMatthew Dillon 			    "Resetting controller.\n", status);
41042545bca0SMatthew Dillon 			mpt_reset(mpt, TRUE);
41052545bca0SMatthew Dillon 			continue;
41062545bca0SMatthew Dillon 		}
41072545bca0SMatthew Dillon 
41082545bca0SMatthew Dillon 		if (response != MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED &&
41092545bca0SMatthew Dillon 		    response != MPI_SCSITASKMGMT_RSP_TM_COMPLETE) {
41102545bca0SMatthew Dillon 			mpt_prt(mpt, "mpt_recover_commands: TMF Response 0x%x. "
41112545bca0SMatthew Dillon 			    "Resetting controller.\n", response);
41122545bca0SMatthew Dillon 			mpt_reset(mpt, TRUE);
41132545bca0SMatthew Dillon 			continue;
41142545bca0SMatthew Dillon 		}
41152545bca0SMatthew Dillon 		mpt_prt(mpt, "abort of req %p:%u completed\n", req, req->serno);
41162545bca0SMatthew Dillon 	}
41172545bca0SMatthew Dillon }
41182545bca0SMatthew Dillon 
41192545bca0SMatthew Dillon /************************ Target Mode Support ****************************/
41202545bca0SMatthew Dillon static void
mpt_fc_post_els(struct mpt_softc * mpt,request_t * req,int ioindex)41212545bca0SMatthew Dillon mpt_fc_post_els(struct mpt_softc *mpt, request_t *req, int ioindex)
41222545bca0SMatthew Dillon {
41232545bca0SMatthew Dillon 	MSG_LINK_SERVICE_BUFFER_POST_REQUEST *fc;
41242545bca0SMatthew Dillon 	PTR_SGE_TRANSACTION32 tep;
41252545bca0SMatthew Dillon 	PTR_SGE_SIMPLE32 se;
41262545bca0SMatthew Dillon 	bus_addr_t paddr;
41272545bca0SMatthew Dillon 	uint32_t fl;
41282545bca0SMatthew Dillon 
41292545bca0SMatthew Dillon 	paddr = req->req_pbuf;
41302545bca0SMatthew Dillon 	paddr += MPT_RQSL(mpt);
41312545bca0SMatthew Dillon 
41322545bca0SMatthew Dillon 	fc = req->req_vbuf;
41332545bca0SMatthew Dillon 	memset(fc, 0, MPT_REQUEST_AREA);
41342545bca0SMatthew Dillon 	fc->BufferCount = 1;
41352545bca0SMatthew Dillon 	fc->Function = MPI_FUNCTION_FC_LINK_SRVC_BUF_POST;
41362545bca0SMatthew Dillon 	fc->MsgContext = htole32(req->index | fc_els_handler_id);
41372545bca0SMatthew Dillon 
41382545bca0SMatthew Dillon 	/*
41392545bca0SMatthew Dillon 	 * Okay, set up ELS buffer pointers. ELS buffer pointers
41402545bca0SMatthew Dillon 	 * consist of a TE SGL element (with details length of zero)
41416d259fc1SSascha Wildner 	 * followed by a SIMPLE SGL element which holds the address
41422545bca0SMatthew Dillon 	 * of the buffer.
41432545bca0SMatthew Dillon 	 */
41442545bca0SMatthew Dillon 
41452545bca0SMatthew Dillon 	tep = (PTR_SGE_TRANSACTION32) &fc->SGL;
41462545bca0SMatthew Dillon 
41472545bca0SMatthew Dillon 	tep->ContextSize = 4;
41482545bca0SMatthew Dillon 	tep->Flags = 0;
41492545bca0SMatthew Dillon 	tep->TransactionContext[0] = htole32(ioindex);
41502545bca0SMatthew Dillon 
41512545bca0SMatthew Dillon 	se = (PTR_SGE_SIMPLE32) &tep->TransactionDetails[0];
41522545bca0SMatthew Dillon 	fl =
41532545bca0SMatthew Dillon 		MPI_SGE_FLAGS_HOST_TO_IOC	|
41542545bca0SMatthew Dillon 		MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
41552545bca0SMatthew Dillon 		MPI_SGE_FLAGS_LAST_ELEMENT	|
41562545bca0SMatthew Dillon 		MPI_SGE_FLAGS_END_OF_LIST	|
41572545bca0SMatthew Dillon 		MPI_SGE_FLAGS_END_OF_BUFFER;
41582545bca0SMatthew Dillon 	fl <<= MPI_SGE_FLAGS_SHIFT;
41592545bca0SMatthew Dillon 	fl |= (MPT_NRFM(mpt) - MPT_RQSL(mpt));
41602545bca0SMatthew Dillon 	se->FlagsLength = htole32(fl);
41612545bca0SMatthew Dillon 	se->Address = htole32((uint32_t) paddr);
41622545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG,
41632545bca0SMatthew Dillon 	    "add ELS index %d ioindex %d for %p:%u\n",
41642545bca0SMatthew Dillon 	    req->index, ioindex, req, req->serno);
41652545bca0SMatthew Dillon 	KASSERT(((req->state & REQ_STATE_LOCKED) != 0),
41662545bca0SMatthew Dillon 	    ("mpt_fc_post_els: request not locked"));
41672545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
41682545bca0SMatthew Dillon }
41692545bca0SMatthew Dillon 
41702545bca0SMatthew Dillon static void
mpt_post_target_command(struct mpt_softc * mpt,request_t * req,int ioindex)41712545bca0SMatthew Dillon mpt_post_target_command(struct mpt_softc *mpt, request_t *req, int ioindex)
41722545bca0SMatthew Dillon {
41732545bca0SMatthew Dillon 	PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST fc;
41742545bca0SMatthew Dillon 	PTR_CMD_BUFFER_DESCRIPTOR cb;
41752545bca0SMatthew Dillon 	bus_addr_t paddr;
41762545bca0SMatthew Dillon 
41772545bca0SMatthew Dillon 	paddr = req->req_pbuf;
41782545bca0SMatthew Dillon 	paddr += MPT_RQSL(mpt);
41792545bca0SMatthew Dillon 	memset(req->req_vbuf, 0, MPT_REQUEST_AREA);
41802545bca0SMatthew Dillon 	MPT_TGT_STATE(mpt, req)->state = TGT_STATE_LOADING;
41812545bca0SMatthew Dillon 
41822545bca0SMatthew Dillon 	fc = req->req_vbuf;
41832545bca0SMatthew Dillon 	fc->BufferCount = 1;
41842545bca0SMatthew Dillon 	fc->Function = MPI_FUNCTION_TARGET_CMD_BUFFER_POST;
41852545bca0SMatthew Dillon 	fc->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
41862545bca0SMatthew Dillon 
41872545bca0SMatthew Dillon 	cb = &fc->Buffer[0];
41882545bca0SMatthew Dillon 	cb->IoIndex = htole16(ioindex);
41892545bca0SMatthew Dillon 	cb->u.PhysicalAddress32 = htole32((U32) paddr);
41902545bca0SMatthew Dillon 
41912545bca0SMatthew Dillon 	mpt_check_doorbell(mpt);
41922545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
41932545bca0SMatthew Dillon }
41942545bca0SMatthew Dillon 
41952545bca0SMatthew Dillon static int
mpt_add_els_buffers(struct mpt_softc * mpt)41962545bca0SMatthew Dillon mpt_add_els_buffers(struct mpt_softc *mpt)
41972545bca0SMatthew Dillon {
41982545bca0SMatthew Dillon 	int i;
41992545bca0SMatthew Dillon 
42002545bca0SMatthew Dillon 	if (mpt->is_fc == 0) {
42012545bca0SMatthew Dillon 		return (TRUE);
42022545bca0SMatthew Dillon 	}
42032545bca0SMatthew Dillon 
42042545bca0SMatthew Dillon 	if (mpt->els_cmds_allocated) {
42052545bca0SMatthew Dillon 		return (TRUE);
42062545bca0SMatthew Dillon 	}
42072545bca0SMatthew Dillon 
42082545bca0SMatthew Dillon 	mpt->els_cmd_ptrs = kmalloc(MPT_MAX_ELS * sizeof (request_t *),
42092545bca0SMatthew Dillon 	    M_DEVBUF, M_NOWAIT | M_ZERO);
42102545bca0SMatthew Dillon 
42112545bca0SMatthew Dillon 	if (mpt->els_cmd_ptrs == NULL) {
42122545bca0SMatthew Dillon 		return (FALSE);
42132545bca0SMatthew Dillon 	}
42142545bca0SMatthew Dillon 
42152545bca0SMatthew Dillon 	/*
42162545bca0SMatthew Dillon 	 * Feed the chip some ELS buffer resources
42172545bca0SMatthew Dillon 	 */
42182545bca0SMatthew Dillon 	for (i = 0; i < MPT_MAX_ELS; i++) {
42192545bca0SMatthew Dillon 		request_t *req = mpt_get_request(mpt, FALSE);
42202545bca0SMatthew Dillon 		if (req == NULL) {
42212545bca0SMatthew Dillon 			break;
42222545bca0SMatthew Dillon 		}
42232545bca0SMatthew Dillon 		req->state |= REQ_STATE_LOCKED;
42242545bca0SMatthew Dillon 		mpt->els_cmd_ptrs[i] = req;
42252545bca0SMatthew Dillon 		mpt_fc_post_els(mpt, req, i);
42262545bca0SMatthew Dillon 	}
42272545bca0SMatthew Dillon 
42282545bca0SMatthew Dillon 	if (i == 0) {
42292545bca0SMatthew Dillon 		mpt_prt(mpt, "unable to add ELS buffer resources\n");
42302545bca0SMatthew Dillon 		kfree(mpt->els_cmd_ptrs, M_DEVBUF);
42312545bca0SMatthew Dillon 		mpt->els_cmd_ptrs = NULL;
42322545bca0SMatthew Dillon 		return (FALSE);
42332545bca0SMatthew Dillon 	}
42342545bca0SMatthew Dillon 	if (i != MPT_MAX_ELS) {
42352545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_INFO,
42362545bca0SMatthew Dillon 		    "only added %d of %d  ELS buffers\n", i, MPT_MAX_ELS);
42372545bca0SMatthew Dillon 	}
42382545bca0SMatthew Dillon 	mpt->els_cmds_allocated = i;
42392545bca0SMatthew Dillon 	return(TRUE);
42402545bca0SMatthew Dillon }
42412545bca0SMatthew Dillon 
42422545bca0SMatthew Dillon static int
mpt_add_target_commands(struct mpt_softc * mpt)42432545bca0SMatthew Dillon mpt_add_target_commands(struct mpt_softc *mpt)
42442545bca0SMatthew Dillon {
42452545bca0SMatthew Dillon 	int i, max;
42462545bca0SMatthew Dillon 
42472545bca0SMatthew Dillon 	if (mpt->tgt_cmd_ptrs) {
42482545bca0SMatthew Dillon 		return (TRUE);
42492545bca0SMatthew Dillon 	}
42502545bca0SMatthew Dillon 
42512545bca0SMatthew Dillon 	max = MPT_MAX_REQUESTS(mpt) >> 1;
42522545bca0SMatthew Dillon 	if (max > mpt->mpt_max_tgtcmds) {
42532545bca0SMatthew Dillon 		max = mpt->mpt_max_tgtcmds;
42542545bca0SMatthew Dillon 	}
42552545bca0SMatthew Dillon 	mpt->tgt_cmd_ptrs =
42562545bca0SMatthew Dillon 	    kmalloc(max * sizeof (request_t *), M_DEVBUF, M_NOWAIT | M_ZERO);
42572545bca0SMatthew Dillon 	if (mpt->tgt_cmd_ptrs == NULL) {
42582545bca0SMatthew Dillon 		mpt_prt(mpt,
42592545bca0SMatthew Dillon 		    "mpt_add_target_commands: could not allocate cmd ptrs\n");
42602545bca0SMatthew Dillon 		return (FALSE);
42612545bca0SMatthew Dillon 	}
42622545bca0SMatthew Dillon 
42632545bca0SMatthew Dillon 	for (i = 0; i < max; i++) {
42642545bca0SMatthew Dillon 		request_t *req;
42652545bca0SMatthew Dillon 
42662545bca0SMatthew Dillon 		req = mpt_get_request(mpt, FALSE);
42672545bca0SMatthew Dillon 		if (req == NULL) {
42682545bca0SMatthew Dillon 			break;
42692545bca0SMatthew Dillon 		}
42702545bca0SMatthew Dillon 		req->state |= REQ_STATE_LOCKED;
42712545bca0SMatthew Dillon 		mpt->tgt_cmd_ptrs[i] = req;
42722545bca0SMatthew Dillon 		mpt_post_target_command(mpt, req, i);
42732545bca0SMatthew Dillon 	}
42742545bca0SMatthew Dillon 
42752545bca0SMatthew Dillon 
42762545bca0SMatthew Dillon 	if (i == 0) {
42772545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_ERROR, "could not add any target bufs\n");
42782545bca0SMatthew Dillon 		kfree(mpt->tgt_cmd_ptrs, M_DEVBUF);
42792545bca0SMatthew Dillon 		mpt->tgt_cmd_ptrs = NULL;
42802545bca0SMatthew Dillon 		return (FALSE);
42812545bca0SMatthew Dillon 	}
42822545bca0SMatthew Dillon 
42832545bca0SMatthew Dillon 	mpt->tgt_cmds_allocated = i;
42842545bca0SMatthew Dillon 
42852545bca0SMatthew Dillon 	if (i < max) {
42862545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_INFO,
42872545bca0SMatthew Dillon 		    "added %d of %d target bufs\n", i, max);
42882545bca0SMatthew Dillon 	}
42892545bca0SMatthew Dillon 	return (i);
42902545bca0SMatthew Dillon }
42912545bca0SMatthew Dillon 
42922545bca0SMatthew Dillon static int
mpt_enable_lun(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun)42932545bca0SMatthew Dillon mpt_enable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
42942545bca0SMatthew Dillon {
42954c42baf4SSascha Wildner 
42962545bca0SMatthew Dillon 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
42972545bca0SMatthew Dillon 		mpt->twildcard = 1;
42982545bca0SMatthew Dillon 	} else if (lun >= MPT_MAX_LUNS) {
42992545bca0SMatthew Dillon 		return (EINVAL);
43002545bca0SMatthew Dillon 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
43012545bca0SMatthew Dillon 		return (EINVAL);
43022545bca0SMatthew Dillon 	}
43032545bca0SMatthew Dillon 	if (mpt->tenabled == 0) {
43042545bca0SMatthew Dillon 		if (mpt->is_fc) {
43052545bca0SMatthew Dillon 			(void) mpt_fc_reset_link(mpt, 0);
43062545bca0SMatthew Dillon 		}
43072545bca0SMatthew Dillon 		mpt->tenabled = 1;
43082545bca0SMatthew Dillon 	}
43092545bca0SMatthew Dillon 	if (lun == CAM_LUN_WILDCARD) {
43102545bca0SMatthew Dillon 		mpt->trt_wildcard.enabled = 1;
43112545bca0SMatthew Dillon 	} else {
43122545bca0SMatthew Dillon 		mpt->trt[lun].enabled = 1;
43132545bca0SMatthew Dillon 	}
43142545bca0SMatthew Dillon 	return (0);
43152545bca0SMatthew Dillon }
43162545bca0SMatthew Dillon 
43172545bca0SMatthew Dillon static int
mpt_disable_lun(struct mpt_softc * mpt,target_id_t tgt,lun_id_t lun)43182545bca0SMatthew Dillon mpt_disable_lun(struct mpt_softc *mpt, target_id_t tgt, lun_id_t lun)
43192545bca0SMatthew Dillon {
43202545bca0SMatthew Dillon 	int i;
43214c42baf4SSascha Wildner 
43222545bca0SMatthew Dillon 	if (tgt == CAM_TARGET_WILDCARD && lun == CAM_LUN_WILDCARD) {
43232545bca0SMatthew Dillon 		mpt->twildcard = 0;
43242545bca0SMatthew Dillon 	} else if (lun >= MPT_MAX_LUNS) {
43252545bca0SMatthew Dillon 		return (EINVAL);
43262545bca0SMatthew Dillon 	} else if (tgt != CAM_TARGET_WILDCARD && tgt != 0) {
43272545bca0SMatthew Dillon 		return (EINVAL);
43282545bca0SMatthew Dillon 	}
43292545bca0SMatthew Dillon 	if (lun == CAM_LUN_WILDCARD) {
43302545bca0SMatthew Dillon 		mpt->trt_wildcard.enabled = 0;
43312545bca0SMatthew Dillon 	} else {
43322545bca0SMatthew Dillon 		mpt->trt[lun].enabled = 0;
43332545bca0SMatthew Dillon 	}
43342545bca0SMatthew Dillon 	for (i = 0; i < MPT_MAX_LUNS; i++) {
43352545bca0SMatthew Dillon 		if (mpt->trt[lun].enabled) {
43362545bca0SMatthew Dillon 			break;
43372545bca0SMatthew Dillon 		}
43382545bca0SMatthew Dillon 	}
43392545bca0SMatthew Dillon 	if (i == MPT_MAX_LUNS && mpt->twildcard == 0) {
43402545bca0SMatthew Dillon 		if (mpt->is_fc) {
43412545bca0SMatthew Dillon 			(void) mpt_fc_reset_link(mpt, 0);
43422545bca0SMatthew Dillon 		}
43432545bca0SMatthew Dillon 		mpt->tenabled = 0;
43442545bca0SMatthew Dillon 	}
43452545bca0SMatthew Dillon 	return (0);
43462545bca0SMatthew Dillon }
43472545bca0SMatthew Dillon 
43482545bca0SMatthew Dillon /*
43492545bca0SMatthew Dillon  * Called with MPT lock held
43502545bca0SMatthew Dillon  */
43512545bca0SMatthew Dillon static void
mpt_target_start_io(struct mpt_softc * mpt,union ccb * ccb)43522545bca0SMatthew Dillon mpt_target_start_io(struct mpt_softc *mpt, union ccb *ccb)
43532545bca0SMatthew Dillon {
43542545bca0SMatthew Dillon 	struct ccb_scsiio *csio = &ccb->csio;
43552545bca0SMatthew Dillon 	request_t *cmd_req = MPT_TAG_2_REQ(mpt, csio->tag_id);
43562545bca0SMatthew Dillon 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, cmd_req);
43572545bca0SMatthew Dillon 
43582545bca0SMatthew Dillon 	switch (tgt->state) {
43592545bca0SMatthew Dillon 	case TGT_STATE_IN_CAM:
43602545bca0SMatthew Dillon 		break;
43612545bca0SMatthew Dillon 	case TGT_STATE_MOVING_DATA:
43622545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
43632545bca0SMatthew Dillon 		xpt_freeze_simq(mpt->sim, 1);
43642545bca0SMatthew Dillon 		ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
43652545bca0SMatthew Dillon 		tgt->ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
43662545bca0SMatthew Dillon 		xpt_done(ccb);
43672545bca0SMatthew Dillon 		return;
43682545bca0SMatthew Dillon 	default:
43692545bca0SMatthew Dillon 		mpt_prt(mpt, "ccb %p flags 0x%x tag 0x%08x had bad request "
43702545bca0SMatthew Dillon 		    "starting I/O\n", ccb, csio->ccb_h.flags, csio->tag_id);
43712545bca0SMatthew Dillon 		mpt_tgt_dump_req_state(mpt, cmd_req);
43722545bca0SMatthew Dillon 		mpt_set_ccb_status(ccb, CAM_REQ_CMP_ERR);
43732545bca0SMatthew Dillon 		xpt_done(ccb);
43742545bca0SMatthew Dillon 		return;
43752545bca0SMatthew Dillon 	}
43762545bca0SMatthew Dillon 
43772545bca0SMatthew Dillon 	if (csio->dxfer_len) {
43782545bca0SMatthew Dillon 		bus_dmamap_callback_t *cb;
43792545bca0SMatthew Dillon 		PTR_MSG_TARGET_ASSIST_REQUEST ta;
43802545bca0SMatthew Dillon 		request_t *req;
43812545bca0SMatthew Dillon 
43822545bca0SMatthew Dillon 		KASSERT((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE,
43834c42baf4SSascha Wildner 		    ("dxfer_len %u but direction is NONE", csio->dxfer_len));
43842545bca0SMatthew Dillon 
43852545bca0SMatthew Dillon 		if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
43862545bca0SMatthew Dillon 			if (mpt->outofbeer == 0) {
43872545bca0SMatthew Dillon 				mpt->outofbeer = 1;
43882545bca0SMatthew Dillon 				xpt_freeze_simq(mpt->sim, 1);
43892545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
43902545bca0SMatthew Dillon 			}
43912545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
43922545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
43932545bca0SMatthew Dillon 			xpt_done(ccb);
43942545bca0SMatthew Dillon 			return;
43952545bca0SMatthew Dillon 		}
43962545bca0SMatthew Dillon 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
43972545bca0SMatthew Dillon 		if (sizeof (bus_addr_t) > 4) {
43982545bca0SMatthew Dillon 			cb = mpt_execute_req_a64;
43992545bca0SMatthew Dillon 		} else {
44002545bca0SMatthew Dillon 			cb = mpt_execute_req;
44012545bca0SMatthew Dillon 		}
44022545bca0SMatthew Dillon 
44032545bca0SMatthew Dillon 		req->ccb = ccb;
44042545bca0SMatthew Dillon 		ccb->ccb_h.ccb_req_ptr = req;
44052545bca0SMatthew Dillon 
44062545bca0SMatthew Dillon 		/*
44072545bca0SMatthew Dillon 		 * Record the currently active ccb and the
44082545bca0SMatthew Dillon 		 * request for it in our target state area.
44092545bca0SMatthew Dillon 		 */
44102545bca0SMatthew Dillon 		tgt->ccb = ccb;
44112545bca0SMatthew Dillon 		tgt->req = req;
44122545bca0SMatthew Dillon 
44132545bca0SMatthew Dillon 		memset(req->req_vbuf, 0, MPT_RQSL(mpt));
44142545bca0SMatthew Dillon 		ta = req->req_vbuf;
44152545bca0SMatthew Dillon 
44162545bca0SMatthew Dillon 		if (mpt->is_sas) {
44172545bca0SMatthew Dillon 			PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
44182545bca0SMatthew Dillon 			     cmd_req->req_vbuf;
44192545bca0SMatthew Dillon 			ta->QueueTag = ssp->InitiatorTag;
44202545bca0SMatthew Dillon 		} else if (mpt->is_spi) {
44212545bca0SMatthew Dillon 			PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
44222545bca0SMatthew Dillon 			     cmd_req->req_vbuf;
44232545bca0SMatthew Dillon 			ta->QueueTag = sp->Tag;
44242545bca0SMatthew Dillon 		}
44252545bca0SMatthew Dillon 		ta->Function = MPI_FUNCTION_TARGET_ASSIST;
44262545bca0SMatthew Dillon 		ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
44272545bca0SMatthew Dillon 		ta->ReplyWord = htole32(tgt->reply_desc);
44282545bca0SMatthew Dillon 		if (csio->ccb_h.target_lun > MPT_MAX_LUNS) {
44292545bca0SMatthew Dillon 			ta->LUN[0] =
44302545bca0SMatthew Dillon 			    0x40 | ((csio->ccb_h.target_lun >> 8) & 0x3f);
44312545bca0SMatthew Dillon 			ta->LUN[1] = csio->ccb_h.target_lun & 0xff;
44322545bca0SMatthew Dillon 		} else {
44332545bca0SMatthew Dillon 			ta->LUN[1] = csio->ccb_h.target_lun;
44342545bca0SMatthew Dillon 		}
44352545bca0SMatthew Dillon 
44362545bca0SMatthew Dillon 		ta->RelativeOffset = tgt->bytes_xfered;
44372545bca0SMatthew Dillon 		ta->DataLength = ccb->csio.dxfer_len;
44382545bca0SMatthew Dillon 		if (ta->DataLength > tgt->resid) {
44392545bca0SMatthew Dillon 			ta->DataLength = tgt->resid;
44402545bca0SMatthew Dillon 		}
44412545bca0SMatthew Dillon 
44422545bca0SMatthew Dillon 		/*
44432545bca0SMatthew Dillon 		 * XXX Should be done after data transfer completes?
44442545bca0SMatthew Dillon 		 */
44452545bca0SMatthew Dillon 		tgt->resid -= csio->dxfer_len;
44462545bca0SMatthew Dillon 		tgt->bytes_xfered += csio->dxfer_len;
44472545bca0SMatthew Dillon 
44482545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
44492545bca0SMatthew Dillon 			ta->TargetAssistFlags |=
44502545bca0SMatthew Dillon 			    TARGET_ASSIST_FLAGS_DATA_DIRECTION;
44512545bca0SMatthew Dillon 		}
44522545bca0SMatthew Dillon 
44532545bca0SMatthew Dillon #ifdef	WE_TRUST_AUTO_GOOD_STATUS
44542545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) &&
44552545bca0SMatthew Dillon 		    csio->scsi_status == SCSI_STATUS_OK && tgt->resid == 0) {
44562545bca0SMatthew Dillon 			ta->TargetAssistFlags |=
44572545bca0SMatthew Dillon 			    TARGET_ASSIST_FLAGS_AUTO_STATUS;
44582545bca0SMatthew Dillon 		}
44592545bca0SMatthew Dillon #endif
44602545bca0SMatthew Dillon 		tgt->state = TGT_STATE_SETTING_UP_FOR_DATA;
44612545bca0SMatthew Dillon 
44622545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_DEBUG,
44632545bca0SMatthew Dillon 		    "DATA_CCB %p tag %x %u bytes %u resid flg %x req %p:%u "
44642545bca0SMatthew Dillon 		    "nxtstate=%d\n", csio, csio->tag_id, csio->dxfer_len,
44652545bca0SMatthew Dillon 		    tgt->resid, ccb->ccb_h.flags, req, req->serno, tgt->state);
44662545bca0SMatthew Dillon 
44672545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
44682545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
44692545bca0SMatthew Dillon 				int error;
44706d259fc1SSascha Wildner 				crit_enter();
44712545bca0SMatthew Dillon 				error = bus_dmamap_load(mpt->buffer_dmat,
44722545bca0SMatthew Dillon 				    req->dmap, csio->data_ptr, csio->dxfer_len,
44732545bca0SMatthew Dillon 				    cb, req, 0);
44746d259fc1SSascha Wildner 				crit_exit();
44752545bca0SMatthew Dillon 				if (error == EINPROGRESS) {
44762545bca0SMatthew Dillon 					xpt_freeze_simq(mpt->sim, 1);
44772545bca0SMatthew Dillon 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
44782545bca0SMatthew Dillon 				}
44792545bca0SMatthew Dillon 			} else {
44802545bca0SMatthew Dillon 				/*
44812545bca0SMatthew Dillon 				 * We have been given a pointer to single
44822545bca0SMatthew Dillon 				 * physical buffer.
44832545bca0SMatthew Dillon 				 */
44842545bca0SMatthew Dillon 				struct bus_dma_segment seg;
44852545bca0SMatthew Dillon 				seg.ds_addr = (bus_addr_t)
44862545bca0SMatthew Dillon 				    (vm_offset_t)csio->data_ptr;
44872545bca0SMatthew Dillon 				seg.ds_len = csio->dxfer_len;
44882545bca0SMatthew Dillon 				(*cb)(req, &seg, 1, 0);
44892545bca0SMatthew Dillon 			}
44902545bca0SMatthew Dillon 		} else {
44912545bca0SMatthew Dillon 			/*
44922545bca0SMatthew Dillon 			 * We have been given a list of addresses.
44932545bca0SMatthew Dillon 			 * This case could be easily supported but they are not
44942545bca0SMatthew Dillon 			 * currently generated by the CAM subsystem so there
44952545bca0SMatthew Dillon 			 * is no point in wasting the time right now.
44962545bca0SMatthew Dillon 			 */
44972545bca0SMatthew Dillon 			struct bus_dma_segment *sgs;
44982545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0) {
44992545bca0SMatthew Dillon 				(*cb)(req, NULL, 0, EFAULT);
45002545bca0SMatthew Dillon 			} else {
45012545bca0SMatthew Dillon 				/* Just use the segments provided */
45022545bca0SMatthew Dillon 				sgs = (struct bus_dma_segment *)csio->data_ptr;
45032545bca0SMatthew Dillon 				(*cb)(req, sgs, csio->sglist_cnt, 0);
45042545bca0SMatthew Dillon 			}
45052545bca0SMatthew Dillon 		}
45062545bca0SMatthew Dillon 	} else {
45072545bca0SMatthew Dillon 		uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
45082545bca0SMatthew Dillon 
45092545bca0SMatthew Dillon 		/*
45102545bca0SMatthew Dillon 		 * XXX: I don't know why this seems to happen, but
45112545bca0SMatthew Dillon 		 * XXX: completing the CCB seems to make things happy.
45122545bca0SMatthew Dillon 		 * XXX: This seems to happen if the initiator requests
45132545bca0SMatthew Dillon 		 * XXX: enough data that we have to do multiple CTIOs.
45142545bca0SMatthew Dillon 		 */
45152545bca0SMatthew Dillon 		if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
45162545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG,
45172545bca0SMatthew Dillon 			    "Meaningless STATUS CCB (%p): flags %x status %x "
45182545bca0SMatthew Dillon 			    "resid %d bytes_xfered %u\n", ccb, ccb->ccb_h.flags,
45192545bca0SMatthew Dillon 			    ccb->ccb_h.status, tgt->resid, tgt->bytes_xfered);
45202545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQ_CMP);
45212545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
45222545bca0SMatthew Dillon 			xpt_done(ccb);
45232545bca0SMatthew Dillon 			return;
45242545bca0SMatthew Dillon 		}
45252545bca0SMatthew Dillon 		if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
45262545bca0SMatthew Dillon 			sp = sense;
45272545bca0SMatthew Dillon 			memcpy(sp, &csio->sense_data,
45282545bca0SMatthew Dillon 			   min(csio->sense_len, MPT_SENSE_SIZE));
45292545bca0SMatthew Dillon 		}
45302545bca0SMatthew Dillon 		mpt_scsi_tgt_status(mpt, ccb, cmd_req, csio->scsi_status, sp);
45312545bca0SMatthew Dillon 	}
45322545bca0SMatthew Dillon }
45332545bca0SMatthew Dillon 
45342545bca0SMatthew Dillon static void
mpt_scsi_tgt_local(struct mpt_softc * mpt,request_t * cmd_req,uint32_t lun,int send,uint8_t * data,size_t length)45352545bca0SMatthew Dillon mpt_scsi_tgt_local(struct mpt_softc *mpt, request_t *cmd_req,
45362545bca0SMatthew Dillon     uint32_t lun, int send, uint8_t *data, size_t length)
45372545bca0SMatthew Dillon {
45382545bca0SMatthew Dillon 	mpt_tgt_state_t *tgt;
45392545bca0SMatthew Dillon 	PTR_MSG_TARGET_ASSIST_REQUEST ta;
45402545bca0SMatthew Dillon 	SGE_SIMPLE32 *se;
45412545bca0SMatthew Dillon 	uint32_t flags;
45422545bca0SMatthew Dillon 	uint8_t *dptr;
45432545bca0SMatthew Dillon 	bus_addr_t pptr;
45442545bca0SMatthew Dillon 	request_t *req;
45452545bca0SMatthew Dillon 
45462545bca0SMatthew Dillon 	/*
45472545bca0SMatthew Dillon 	 * We enter with resid set to the data load for the command.
45482545bca0SMatthew Dillon 	 */
45492545bca0SMatthew Dillon 	tgt = MPT_TGT_STATE(mpt, cmd_req);
45502545bca0SMatthew Dillon 	if (length == 0 || tgt->resid == 0) {
45512545bca0SMatthew Dillon 		tgt->resid = 0;
45522545bca0SMatthew Dillon 		mpt_scsi_tgt_status(mpt, NULL, cmd_req, 0, NULL);
45532545bca0SMatthew Dillon 		return;
45542545bca0SMatthew Dillon 	}
45552545bca0SMatthew Dillon 
45562545bca0SMatthew Dillon 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
45572545bca0SMatthew Dillon 		mpt_prt(mpt, "out of resources- dropping local response\n");
45582545bca0SMatthew Dillon 		return;
45592545bca0SMatthew Dillon 	}
45602545bca0SMatthew Dillon 	tgt->is_local = 1;
45612545bca0SMatthew Dillon 
45622545bca0SMatthew Dillon 
45632545bca0SMatthew Dillon 	memset(req->req_vbuf, 0, MPT_RQSL(mpt));
45642545bca0SMatthew Dillon 	ta = req->req_vbuf;
45652545bca0SMatthew Dillon 
45662545bca0SMatthew Dillon 	if (mpt->is_sas) {
45672545bca0SMatthew Dillon 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp = cmd_req->req_vbuf;
45682545bca0SMatthew Dillon 		ta->QueueTag = ssp->InitiatorTag;
45692545bca0SMatthew Dillon 	} else if (mpt->is_spi) {
45702545bca0SMatthew Dillon 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp = cmd_req->req_vbuf;
45712545bca0SMatthew Dillon 		ta->QueueTag = sp->Tag;
45722545bca0SMatthew Dillon 	}
45732545bca0SMatthew Dillon 	ta->Function = MPI_FUNCTION_TARGET_ASSIST;
45742545bca0SMatthew Dillon 	ta->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
45752545bca0SMatthew Dillon 	ta->ReplyWord = htole32(tgt->reply_desc);
45762545bca0SMatthew Dillon 	if (lun > MPT_MAX_LUNS) {
45772545bca0SMatthew Dillon 		ta->LUN[0] = 0x40 | ((lun >> 8) & 0x3f);
45782545bca0SMatthew Dillon 		ta->LUN[1] = lun & 0xff;
45792545bca0SMatthew Dillon 	} else {
45802545bca0SMatthew Dillon 		ta->LUN[1] = lun;
45812545bca0SMatthew Dillon 	}
45822545bca0SMatthew Dillon 	ta->RelativeOffset = 0;
45832545bca0SMatthew Dillon 	ta->DataLength = length;
45842545bca0SMatthew Dillon 
45852545bca0SMatthew Dillon 	dptr = req->req_vbuf;
45862545bca0SMatthew Dillon 	dptr += MPT_RQSL(mpt);
45872545bca0SMatthew Dillon 	pptr = req->req_pbuf;
45882545bca0SMatthew Dillon 	pptr += MPT_RQSL(mpt);
45892545bca0SMatthew Dillon 	memcpy(dptr, data, min(length, MPT_RQSL(mpt)));
45902545bca0SMatthew Dillon 
45912545bca0SMatthew Dillon 	se = (SGE_SIMPLE32 *) &ta->SGL[0];
45922545bca0SMatthew Dillon 	memset(se, 0,sizeof (*se));
45932545bca0SMatthew Dillon 
45942545bca0SMatthew Dillon 	flags = MPI_SGE_FLAGS_SIMPLE_ELEMENT;
45952545bca0SMatthew Dillon 	if (send) {
45962545bca0SMatthew Dillon 		ta->TargetAssistFlags |= TARGET_ASSIST_FLAGS_DATA_DIRECTION;
45972545bca0SMatthew Dillon 		flags |= MPI_SGE_FLAGS_HOST_TO_IOC;
45982545bca0SMatthew Dillon 	}
45992545bca0SMatthew Dillon 	se->Address = pptr;
46002545bca0SMatthew Dillon 	MPI_pSGE_SET_LENGTH(se, length);
46012545bca0SMatthew Dillon 	flags |= MPI_SGE_FLAGS_LAST_ELEMENT;
46022545bca0SMatthew Dillon 	flags |= MPI_SGE_FLAGS_END_OF_LIST | MPI_SGE_FLAGS_END_OF_BUFFER;
46032545bca0SMatthew Dillon 	MPI_pSGE_SET_FLAGS(se, flags);
46042545bca0SMatthew Dillon 
46052545bca0SMatthew Dillon 	tgt->ccb = NULL;
46062545bca0SMatthew Dillon 	tgt->req = req;
46072545bca0SMatthew Dillon 	tgt->resid -= length;
46082545bca0SMatthew Dillon 	tgt->bytes_xfered = length;
46092545bca0SMatthew Dillon #ifdef	WE_TRUST_AUTO_GOOD_STATUS
46102545bca0SMatthew Dillon 	tgt->state = TGT_STATE_MOVING_DATA_AND_STATUS;
46112545bca0SMatthew Dillon #else
46122545bca0SMatthew Dillon 	tgt->state = TGT_STATE_MOVING_DATA;
46132545bca0SMatthew Dillon #endif
46142545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
46152545bca0SMatthew Dillon }
46162545bca0SMatthew Dillon 
46172545bca0SMatthew Dillon /*
46182545bca0SMatthew Dillon  * Abort queued up CCBs
46192545bca0SMatthew Dillon  */
46202545bca0SMatthew Dillon static cam_status
mpt_abort_target_ccb(struct mpt_softc * mpt,union ccb * ccb)46212545bca0SMatthew Dillon mpt_abort_target_ccb(struct mpt_softc *mpt, union ccb *ccb)
46222545bca0SMatthew Dillon {
46232545bca0SMatthew Dillon 	struct mpt_hdr_stailq *lp;
46242545bca0SMatthew Dillon 	struct ccb_hdr *srch;
46252545bca0SMatthew Dillon 	int found = 0;
46262545bca0SMatthew Dillon 	union ccb *accb = ccb->cab.abort_ccb;
46272545bca0SMatthew Dillon 	tgt_resource_t *trtp;
46282545bca0SMatthew Dillon 
46292545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG, "aborting ccb %p\n", accb);
46302545bca0SMatthew Dillon 
46312545bca0SMatthew Dillon 	if (ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) {
46322545bca0SMatthew Dillon 		trtp = &mpt->trt_wildcard;
46332545bca0SMatthew Dillon 	} else {
46342545bca0SMatthew Dillon 		trtp = &mpt->trt[ccb->ccb_h.target_lun];
46352545bca0SMatthew Dillon 	}
46362545bca0SMatthew Dillon 
46372545bca0SMatthew Dillon 	if (accb->ccb_h.func_code == XPT_ACCEPT_TARGET_IO) {
46382545bca0SMatthew Dillon 		lp = &trtp->atios;
46392545bca0SMatthew Dillon 	} else if (accb->ccb_h.func_code == XPT_IMMED_NOTIFY) {
46402545bca0SMatthew Dillon 		lp = &trtp->inots;
46412545bca0SMatthew Dillon 	} else {
46422545bca0SMatthew Dillon 		return (CAM_REQ_INVALID);
46432545bca0SMatthew Dillon 	}
46442545bca0SMatthew Dillon 
46452545bca0SMatthew Dillon 	STAILQ_FOREACH(srch, lp, sim_links.stqe) {
46462545bca0SMatthew Dillon 		if (srch == &accb->ccb_h) {
46472545bca0SMatthew Dillon 			found = 1;
46482545bca0SMatthew Dillon 			STAILQ_REMOVE(lp, srch, ccb_hdr, sim_links.stqe);
46492545bca0SMatthew Dillon 			break;
46502545bca0SMatthew Dillon 		}
46512545bca0SMatthew Dillon 	}
46522545bca0SMatthew Dillon 	if (found) {
46532545bca0SMatthew Dillon 		accb->ccb_h.status = CAM_REQ_ABORTED;
46542545bca0SMatthew Dillon 		xpt_done(accb);
46552545bca0SMatthew Dillon 		return (CAM_REQ_CMP);
46562545bca0SMatthew Dillon 	}
46572545bca0SMatthew Dillon 	mpt_prt(mpt, "mpt_abort_tgt_ccb: CCB %p not found\n", ccb);
46582545bca0SMatthew Dillon 	return (CAM_PATH_INVALID);
46592545bca0SMatthew Dillon }
46602545bca0SMatthew Dillon 
46612545bca0SMatthew Dillon /*
46622545bca0SMatthew Dillon  * Ask the MPT to abort the current target command
46632545bca0SMatthew Dillon  */
46642545bca0SMatthew Dillon static int
mpt_abort_target_cmd(struct mpt_softc * mpt,request_t * cmd_req)46652545bca0SMatthew Dillon mpt_abort_target_cmd(struct mpt_softc *mpt, request_t *cmd_req)
46662545bca0SMatthew Dillon {
46672545bca0SMatthew Dillon 	int error;
46682545bca0SMatthew Dillon 	request_t *req;
46692545bca0SMatthew Dillon 	PTR_MSG_TARGET_MODE_ABORT abtp;
46702545bca0SMatthew Dillon 
46712545bca0SMatthew Dillon 	req = mpt_get_request(mpt, FALSE);
46722545bca0SMatthew Dillon 	if (req == NULL) {
46732545bca0SMatthew Dillon 		return (-1);
46742545bca0SMatthew Dillon 	}
46752545bca0SMatthew Dillon 	abtp = req->req_vbuf;
46762545bca0SMatthew Dillon 	memset(abtp, 0, sizeof (*abtp));
46772545bca0SMatthew Dillon 
46782545bca0SMatthew Dillon 	abtp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
46792545bca0SMatthew Dillon 	abtp->AbortType = TARGET_MODE_ABORT_TYPE_EXACT_IO;
46802545bca0SMatthew Dillon 	abtp->Function = MPI_FUNCTION_TARGET_MODE_ABORT;
46812545bca0SMatthew Dillon 	abtp->ReplyWord = htole32(MPT_TGT_STATE(mpt, cmd_req)->reply_desc);
46822545bca0SMatthew Dillon 	error = 0;
46832545bca0SMatthew Dillon 	if (mpt->is_fc || mpt->is_sas) {
46842545bca0SMatthew Dillon 		mpt_send_cmd(mpt, req);
46852545bca0SMatthew Dillon 	} else {
46862545bca0SMatthew Dillon 		error = mpt_send_handshake_cmd(mpt, sizeof(*req), req);
46872545bca0SMatthew Dillon 	}
46882545bca0SMatthew Dillon 	return (error);
46892545bca0SMatthew Dillon }
46902545bca0SMatthew Dillon 
46912545bca0SMatthew Dillon /*
46922545bca0SMatthew Dillon  * WE_TRUST_AUTO_GOOD_STATUS- I've found that setting
46932545bca0SMatthew Dillon  * TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS leads the
46942545bca0SMatthew Dillon  * FC929 to set bogus FC_RSP fields (nonzero residuals
46952545bca0SMatthew Dillon  * but w/o RESID fields set). This causes QLogic initiators
46962545bca0SMatthew Dillon  * to think maybe that a frame was lost.
46972545bca0SMatthew Dillon  *
46982545bca0SMatthew Dillon  * WE_CAN_USE_AUTO_REPOST- we can't use AUTO_REPOST because
46992545bca0SMatthew Dillon  * we use allocated requests to do TARGET_ASSIST and we
47002545bca0SMatthew Dillon  * need to know when to release them.
47012545bca0SMatthew Dillon  */
47022545bca0SMatthew Dillon 
47032545bca0SMatthew Dillon static void
mpt_scsi_tgt_status(struct mpt_softc * mpt,union ccb * ccb,request_t * cmd_req,uint8_t status,uint8_t const * sense_data)47042545bca0SMatthew Dillon mpt_scsi_tgt_status(struct mpt_softc *mpt, union ccb *ccb, request_t *cmd_req,
47052545bca0SMatthew Dillon     uint8_t status, uint8_t const *sense_data)
47062545bca0SMatthew Dillon {
47072545bca0SMatthew Dillon 	uint8_t *cmd_vbuf;
47082545bca0SMatthew Dillon 	mpt_tgt_state_t *tgt;
47092545bca0SMatthew Dillon 	PTR_MSG_TARGET_STATUS_SEND_REQUEST tp;
47102545bca0SMatthew Dillon 	request_t *req;
47112545bca0SMatthew Dillon 	bus_addr_t paddr;
47122545bca0SMatthew Dillon 	int resplen = 0;
47132545bca0SMatthew Dillon 	uint32_t fl;
47142545bca0SMatthew Dillon 
47152545bca0SMatthew Dillon 	cmd_vbuf = cmd_req->req_vbuf;
47162545bca0SMatthew Dillon 	cmd_vbuf += MPT_RQSL(mpt);
47172545bca0SMatthew Dillon 	tgt = MPT_TGT_STATE(mpt, cmd_req);
47182545bca0SMatthew Dillon 
47192545bca0SMatthew Dillon 	if ((req = mpt_get_request(mpt, FALSE)) == NULL) {
47202545bca0SMatthew Dillon 		if (mpt->outofbeer == 0) {
47212545bca0SMatthew Dillon 			mpt->outofbeer = 1;
47222545bca0SMatthew Dillon 			xpt_freeze_simq(mpt->sim, 1);
47232545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG, "FREEZEQ\n");
47242545bca0SMatthew Dillon 		}
47252545bca0SMatthew Dillon 		if (ccb) {
47262545bca0SMatthew Dillon 			ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
47272545bca0SMatthew Dillon 			mpt_set_ccb_status(ccb, CAM_REQUEUE_REQ);
47282545bca0SMatthew Dillon 			xpt_done(ccb);
47292545bca0SMatthew Dillon 		} else {
47302545bca0SMatthew Dillon 			mpt_prt(mpt,
47312545bca0SMatthew Dillon 			    "could not allocate status request- dropping\n");
47322545bca0SMatthew Dillon 		}
47332545bca0SMatthew Dillon 		return;
47342545bca0SMatthew Dillon 	}
47352545bca0SMatthew Dillon 	req->ccb = ccb;
47362545bca0SMatthew Dillon 	if (ccb) {
47372545bca0SMatthew Dillon 		ccb->ccb_h.ccb_mpt_ptr = mpt;
47382545bca0SMatthew Dillon 		ccb->ccb_h.ccb_req_ptr = req;
47392545bca0SMatthew Dillon 	}
47402545bca0SMatthew Dillon 
47412545bca0SMatthew Dillon 	/*
47422545bca0SMatthew Dillon 	 * Record the currently active ccb, if any, and the
47432545bca0SMatthew Dillon 	 * request for it in our target state area.
47442545bca0SMatthew Dillon 	 */
47452545bca0SMatthew Dillon 	tgt->ccb = ccb;
47462545bca0SMatthew Dillon 	tgt->req = req;
47472545bca0SMatthew Dillon 	tgt->state = TGT_STATE_SENDING_STATUS;
47482545bca0SMatthew Dillon 
47492545bca0SMatthew Dillon 	tp = req->req_vbuf;
47502545bca0SMatthew Dillon 	paddr = req->req_pbuf;
47512545bca0SMatthew Dillon 	paddr += MPT_RQSL(mpt);
47522545bca0SMatthew Dillon 
47532545bca0SMatthew Dillon 	memset(tp, 0, sizeof (*tp));
47542545bca0SMatthew Dillon 	tp->Function = MPI_FUNCTION_TARGET_STATUS_SEND;
47552545bca0SMatthew Dillon 	if (mpt->is_fc) {
47562545bca0SMatthew Dillon 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc =
47572545bca0SMatthew Dillon 		    (PTR_MPI_TARGET_FCP_CMD_BUFFER) cmd_vbuf;
47582545bca0SMatthew Dillon 		uint8_t *sts_vbuf;
47592545bca0SMatthew Dillon 		uint32_t *rsp;
47602545bca0SMatthew Dillon 
47612545bca0SMatthew Dillon 		sts_vbuf = req->req_vbuf;
47622545bca0SMatthew Dillon 		sts_vbuf += MPT_RQSL(mpt);
47632545bca0SMatthew Dillon 		rsp = (uint32_t *) sts_vbuf;
47642545bca0SMatthew Dillon 		memcpy(tp->LUN, fc->FcpLun, sizeof (tp->LUN));
47652545bca0SMatthew Dillon 
47662545bca0SMatthew Dillon 		/*
47672545bca0SMatthew Dillon 		 * The MPI_TARGET_FCP_RSP_BUFFER define is unfortunate.
47682545bca0SMatthew Dillon 		 * It has to be big-endian in memory and is organized
47692545bca0SMatthew Dillon 		 * in 32 bit words, which are much easier to deal with
47702545bca0SMatthew Dillon 		 * as words which are swizzled as needed.
47712545bca0SMatthew Dillon 		 *
47722545bca0SMatthew Dillon 		 * All we're filling here is the FC_RSP payload.
47732545bca0SMatthew Dillon 		 * We may just have the chip synthesize it if
47742545bca0SMatthew Dillon 		 * we have no residual and an OK status.
47752545bca0SMatthew Dillon 		 *
47762545bca0SMatthew Dillon 		 */
47772545bca0SMatthew Dillon 		memset(rsp, 0, sizeof (MPI_TARGET_FCP_RSP_BUFFER));
47782545bca0SMatthew Dillon 
47792545bca0SMatthew Dillon 		rsp[2] = status;
47802545bca0SMatthew Dillon 		if (tgt->resid) {
47812545bca0SMatthew Dillon 			rsp[2] |= 0x800;	/* XXXX NEED MNEMONIC!!!! */
47822545bca0SMatthew Dillon 			rsp[3] = htobe32(tgt->resid);
47832545bca0SMatthew Dillon #ifdef	WE_TRUST_AUTO_GOOD_STATUS
47842545bca0SMatthew Dillon 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
47852545bca0SMatthew Dillon #endif
47862545bca0SMatthew Dillon 		}
47872545bca0SMatthew Dillon 		if (status == SCSI_STATUS_CHECK_COND) {
47882545bca0SMatthew Dillon 			int i;
47892545bca0SMatthew Dillon 
47902545bca0SMatthew Dillon 			rsp[2] |= 0x200;	/* XXXX NEED MNEMONIC!!!! */
47912545bca0SMatthew Dillon 			rsp[4] = htobe32(MPT_SENSE_SIZE);
47922545bca0SMatthew Dillon 			if (sense_data) {
47932545bca0SMatthew Dillon 				memcpy(&rsp[8], sense_data, MPT_SENSE_SIZE);
47942545bca0SMatthew Dillon 			} else {
47952545bca0SMatthew Dillon 				mpt_prt(mpt, "mpt_scsi_tgt_status: CHECK CONDI"
47962545bca0SMatthew Dillon 				    "TION but no sense data?\n");
4797bc238f04SMatthew Dillon 				memset(&rsp[8], 0, MPT_SENSE_SIZE);
47982545bca0SMatthew Dillon 			}
47992545bca0SMatthew Dillon 			for (i = 8; i < (8 + (MPT_SENSE_SIZE >> 2)); i++) {
48002545bca0SMatthew Dillon 				rsp[i] = htobe32(rsp[i]);
48012545bca0SMatthew Dillon 			}
48022545bca0SMatthew Dillon #ifdef	WE_TRUST_AUTO_GOOD_STATUS
48032545bca0SMatthew Dillon 			resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
48042545bca0SMatthew Dillon #endif
48052545bca0SMatthew Dillon 		}
48062545bca0SMatthew Dillon #ifndef	WE_TRUST_AUTO_GOOD_STATUS
48072545bca0SMatthew Dillon 		resplen = sizeof (MPI_TARGET_FCP_RSP_BUFFER);
48082545bca0SMatthew Dillon #endif
48092545bca0SMatthew Dillon 		rsp[2] = htobe32(rsp[2]);
48102545bca0SMatthew Dillon 	} else if (mpt->is_sas) {
48112545bca0SMatthew Dillon 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp =
48122545bca0SMatthew Dillon 		    (PTR_MPI_TARGET_SSP_CMD_BUFFER) cmd_vbuf;
48132545bca0SMatthew Dillon 		memcpy(tp->LUN, ssp->LogicalUnitNumber, sizeof (tp->LUN));
48142545bca0SMatthew Dillon 	} else {
48152545bca0SMatthew Dillon 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp =
48162545bca0SMatthew Dillon 		    (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) cmd_vbuf;
48172545bca0SMatthew Dillon 		tp->StatusCode = status;
48182545bca0SMatthew Dillon 		tp->QueueTag = htole16(sp->Tag);
48192545bca0SMatthew Dillon 		memcpy(tp->LUN, sp->LogicalUnitNumber, sizeof (tp->LUN));
48202545bca0SMatthew Dillon 	}
48212545bca0SMatthew Dillon 
48222545bca0SMatthew Dillon 	tp->ReplyWord = htole32(tgt->reply_desc);
48232545bca0SMatthew Dillon 	tp->MsgContext = htole32(req->index | mpt->scsi_tgt_handler_id);
48242545bca0SMatthew Dillon 
48252545bca0SMatthew Dillon #ifdef	WE_CAN_USE_AUTO_REPOST
48262545bca0SMatthew Dillon 	tp->MsgFlags = TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER;
48272545bca0SMatthew Dillon #endif
48282545bca0SMatthew Dillon 	if (status == SCSI_STATUS_OK && resplen == 0) {
48292545bca0SMatthew Dillon 		tp->MsgFlags |= TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS;
48302545bca0SMatthew Dillon 	} else {
48312545bca0SMatthew Dillon 		tp->StatusDataSGE.u.Address32 = htole32((uint32_t) paddr);
48322545bca0SMatthew Dillon 		fl =
48332545bca0SMatthew Dillon 			MPI_SGE_FLAGS_HOST_TO_IOC	|
48342545bca0SMatthew Dillon 			MPI_SGE_FLAGS_SIMPLE_ELEMENT	|
48352545bca0SMatthew Dillon 			MPI_SGE_FLAGS_LAST_ELEMENT	|
48362545bca0SMatthew Dillon 			MPI_SGE_FLAGS_END_OF_LIST	|
48372545bca0SMatthew Dillon 			MPI_SGE_FLAGS_END_OF_BUFFER;
48382545bca0SMatthew Dillon 		fl <<= MPI_SGE_FLAGS_SHIFT;
48392545bca0SMatthew Dillon 		fl |= resplen;
48402545bca0SMatthew Dillon 		tp->StatusDataSGE.FlagsLength = htole32(fl);
48412545bca0SMatthew Dillon 	}
48422545bca0SMatthew Dillon 
48432545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG,
48442545bca0SMatthew Dillon 	    "STATUS_CCB %p (wit%s sense) tag %x req %p:%u resid %u\n",
48452545bca0SMatthew Dillon 	    ccb, sense_data?"h" : "hout", ccb? ccb->csio.tag_id : -1, req,
48462545bca0SMatthew Dillon 	    req->serno, tgt->resid);
48472545bca0SMatthew Dillon 	if (ccb) {
48482545bca0SMatthew Dillon 		ccb->ccb_h.status = CAM_SIM_QUEUED | CAM_REQ_INPROG;
48492545bca0SMatthew Dillon 		mpt_req_timeout(req, 60 * hz, mpt_timeout, ccb);
48502545bca0SMatthew Dillon 	}
48512545bca0SMatthew Dillon 	mpt_send_cmd(mpt, req);
48522545bca0SMatthew Dillon }
48532545bca0SMatthew Dillon 
48542545bca0SMatthew Dillon static void
mpt_scsi_tgt_tsk_mgmt(struct mpt_softc * mpt,request_t * req,mpt_task_mgmt_t fc,tgt_resource_t * trtp,int init_id)48552545bca0SMatthew Dillon mpt_scsi_tgt_tsk_mgmt(struct mpt_softc *mpt, request_t *req, mpt_task_mgmt_t fc,
48562545bca0SMatthew Dillon     tgt_resource_t *trtp, int init_id)
48572545bca0SMatthew Dillon {
48582545bca0SMatthew Dillon 	struct ccb_immed_notify *inot;
48592545bca0SMatthew Dillon 	mpt_tgt_state_t *tgt;
48602545bca0SMatthew Dillon 
48612545bca0SMatthew Dillon 	tgt = MPT_TGT_STATE(mpt, req);
48622545bca0SMatthew Dillon 	inot = (struct ccb_immed_notify *) STAILQ_FIRST(&trtp->inots);
48632545bca0SMatthew Dillon 	if (inot == NULL) {
48642545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_WARN, "no INOTSs- sending back BSY\n");
48652545bca0SMatthew Dillon 		mpt_scsi_tgt_status(mpt, NULL, req, SCSI_STATUS_BUSY, NULL);
48662545bca0SMatthew Dillon 		return;
48672545bca0SMatthew Dillon 	}
48682545bca0SMatthew Dillon 	STAILQ_REMOVE_HEAD(&trtp->inots, sim_links.stqe);
48692545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
48702545bca0SMatthew Dillon 	    "Get FREE INOT %p lun %d\n", inot, inot->ccb_h.target_lun);
48712545bca0SMatthew Dillon 
48722545bca0SMatthew Dillon 	memset(&inot->sense_data, 0, sizeof (inot->sense_data));
48732545bca0SMatthew Dillon 	inot->sense_len = 0;
48742545bca0SMatthew Dillon 	memset(inot->message_args, 0, sizeof (inot->message_args));
48752545bca0SMatthew Dillon 	inot->initiator_id = init_id;	/* XXX */
48762545bca0SMatthew Dillon 
48772545bca0SMatthew Dillon 	/*
48782545bca0SMatthew Dillon 	 * This is a somewhat grotesque attempt to map from task management
48792545bca0SMatthew Dillon 	 * to old style SCSI messages. God help us all.
48802545bca0SMatthew Dillon 	 */
48812545bca0SMatthew Dillon 	switch (fc) {
48822545bca0SMatthew Dillon 	case MPT_ABORT_TASK_SET:
48832545bca0SMatthew Dillon 		inot->message_args[0] = MSG_ABORT_TAG;
48842545bca0SMatthew Dillon 		break;
48852545bca0SMatthew Dillon 	case MPT_CLEAR_TASK_SET:
48862545bca0SMatthew Dillon 		inot->message_args[0] = MSG_CLEAR_TASK_SET;
48872545bca0SMatthew Dillon 		break;
48882545bca0SMatthew Dillon 	case MPT_TARGET_RESET:
48892545bca0SMatthew Dillon 		inot->message_args[0] = MSG_TARGET_RESET;
48902545bca0SMatthew Dillon 		break;
48912545bca0SMatthew Dillon 	case MPT_CLEAR_ACA:
48922545bca0SMatthew Dillon 		inot->message_args[0] = MSG_CLEAR_ACA;
48932545bca0SMatthew Dillon 		break;
48942545bca0SMatthew Dillon 	case MPT_TERMINATE_TASK:
48952545bca0SMatthew Dillon 		inot->message_args[0] = MSG_ABORT_TAG;
48962545bca0SMatthew Dillon 		break;
48972545bca0SMatthew Dillon 	default:
48982545bca0SMatthew Dillon 		inot->message_args[0] = MSG_NOOP;
48992545bca0SMatthew Dillon 		break;
49002545bca0SMatthew Dillon 	}
49012545bca0SMatthew Dillon 	tgt->ccb = (union ccb *) inot;
49022545bca0SMatthew Dillon 	inot->ccb_h.status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN;
49032545bca0SMatthew Dillon 	xpt_done((union ccb *)inot);
49042545bca0SMatthew Dillon }
49052545bca0SMatthew Dillon 
49062545bca0SMatthew Dillon static void
mpt_scsi_tgt_atio(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc)49072545bca0SMatthew Dillon mpt_scsi_tgt_atio(struct mpt_softc *mpt, request_t *req, uint32_t reply_desc)
49082545bca0SMatthew Dillon {
49092545bca0SMatthew Dillon 	static uint8_t null_iqd[SHORT_INQUIRY_LENGTH] = {
49102545bca0SMatthew Dillon 	    0x7f, 0x00, 0x02, 0x02, 0x20, 0x00, 0x00, 0x32,
49112545bca0SMatthew Dillon 	     'F',  'R',  'E',  'E',  'B',  'S',  'D',  ' ',
49122545bca0SMatthew Dillon 	     'L',  'S',  'I',  '-',  'L',  'O',  'G',  'I',
49132545bca0SMatthew Dillon 	     'C',  ' ',  'N',  'U',  'L',  'D',  'E',  'V',
49142545bca0SMatthew Dillon 	     '0',  '0',  '0',  '1'
49152545bca0SMatthew Dillon 	};
49162545bca0SMatthew Dillon 	struct ccb_accept_tio *atiop;
49172545bca0SMatthew Dillon 	lun_id_t lun;
49182545bca0SMatthew Dillon 	int tag_action = 0;
49192545bca0SMatthew Dillon 	mpt_tgt_state_t *tgt;
49202545bca0SMatthew Dillon 	tgt_resource_t *trtp = NULL;
49212545bca0SMatthew Dillon 	U8 *lunptr;
49222545bca0SMatthew Dillon 	U8 *vbuf;
49232545bca0SMatthew Dillon 	U16 itag;
49242545bca0SMatthew Dillon 	U16 ioindex;
49252545bca0SMatthew Dillon 	mpt_task_mgmt_t fct = MPT_NIL_TMT_VALUE;
49262545bca0SMatthew Dillon 	uint8_t *cdbp;
49272545bca0SMatthew Dillon 
49282545bca0SMatthew Dillon 	/*
49292545bca0SMatthew Dillon 	 * Stash info for the current command where we can get at it later.
49302545bca0SMatthew Dillon 	 */
49312545bca0SMatthew Dillon 	vbuf = req->req_vbuf;
49322545bca0SMatthew Dillon 	vbuf += MPT_RQSL(mpt);
49332545bca0SMatthew Dillon 
49342545bca0SMatthew Dillon 	/*
49352545bca0SMatthew Dillon 	 * Get our state pointer set up.
49362545bca0SMatthew Dillon 	 */
49372545bca0SMatthew Dillon 	tgt = MPT_TGT_STATE(mpt, req);
49382545bca0SMatthew Dillon 	if (tgt->state != TGT_STATE_LOADED) {
49392545bca0SMatthew Dillon 		mpt_tgt_dump_req_state(mpt, req);
49402545bca0SMatthew Dillon 		panic("bad target state in mpt_scsi_tgt_atio");
49412545bca0SMatthew Dillon 	}
49422545bca0SMatthew Dillon 	memset(tgt, 0, sizeof (mpt_tgt_state_t));
49432545bca0SMatthew Dillon 	tgt->state = TGT_STATE_IN_CAM;
49442545bca0SMatthew Dillon 	tgt->reply_desc = reply_desc;
49452545bca0SMatthew Dillon 	ioindex = GET_IO_INDEX(reply_desc);
49462545bca0SMatthew Dillon 	if (mpt->verbose >= MPT_PRT_DEBUG) {
49472545bca0SMatthew Dillon 		mpt_dump_data(mpt, "mpt_scsi_tgt_atio response", vbuf,
49482545bca0SMatthew Dillon 		    max(sizeof (MPI_TARGET_FCP_CMD_BUFFER),
49492545bca0SMatthew Dillon 		    max(sizeof (MPI_TARGET_SSP_CMD_BUFFER),
49502545bca0SMatthew Dillon 		    sizeof (MPI_TARGET_SCSI_SPI_CMD_BUFFER))));
49512545bca0SMatthew Dillon 	}
49522545bca0SMatthew Dillon 	if (mpt->is_fc) {
49532545bca0SMatthew Dillon 		PTR_MPI_TARGET_FCP_CMD_BUFFER fc;
49542545bca0SMatthew Dillon 		fc = (PTR_MPI_TARGET_FCP_CMD_BUFFER) vbuf;
49552545bca0SMatthew Dillon 		if (fc->FcpCntl[2]) {
49562545bca0SMatthew Dillon 			/*
49572545bca0SMatthew Dillon 			 * Task Management Request
49582545bca0SMatthew Dillon 			 */
49592545bca0SMatthew Dillon 			switch (fc->FcpCntl[2]) {
49602545bca0SMatthew Dillon 			case 0x2:
49612545bca0SMatthew Dillon 				fct = MPT_ABORT_TASK_SET;
49622545bca0SMatthew Dillon 				break;
49632545bca0SMatthew Dillon 			case 0x4:
49642545bca0SMatthew Dillon 				fct = MPT_CLEAR_TASK_SET;
49652545bca0SMatthew Dillon 				break;
49662545bca0SMatthew Dillon 			case 0x20:
49672545bca0SMatthew Dillon 				fct = MPT_TARGET_RESET;
49682545bca0SMatthew Dillon 				break;
49692545bca0SMatthew Dillon 			case 0x40:
49702545bca0SMatthew Dillon 				fct = MPT_CLEAR_ACA;
49712545bca0SMatthew Dillon 				break;
49722545bca0SMatthew Dillon 			case 0x80:
49732545bca0SMatthew Dillon 				fct = MPT_TERMINATE_TASK;
49742545bca0SMatthew Dillon 				break;
49752545bca0SMatthew Dillon 			default:
49762545bca0SMatthew Dillon 				mpt_prt(mpt, "CORRUPTED TASK MGMT BITS: 0x%x\n",
49772545bca0SMatthew Dillon 				    fc->FcpCntl[2]);
49782545bca0SMatthew Dillon 				mpt_scsi_tgt_status(mpt, 0, req,
49792545bca0SMatthew Dillon 				    SCSI_STATUS_OK, 0);
49802545bca0SMatthew Dillon 				return;
49812545bca0SMatthew Dillon 			}
49822545bca0SMatthew Dillon 		} else {
49832545bca0SMatthew Dillon 			switch (fc->FcpCntl[1]) {
49842545bca0SMatthew Dillon 			case 0:
49852545bca0SMatthew Dillon 				tag_action = MSG_SIMPLE_Q_TAG;
49862545bca0SMatthew Dillon 				break;
49872545bca0SMatthew Dillon 			case 1:
49882545bca0SMatthew Dillon 				tag_action = MSG_HEAD_OF_Q_TAG;
49892545bca0SMatthew Dillon 				break;
49902545bca0SMatthew Dillon 			case 2:
49912545bca0SMatthew Dillon 				tag_action = MSG_ORDERED_Q_TAG;
49922545bca0SMatthew Dillon 				break;
49932545bca0SMatthew Dillon 			default:
49942545bca0SMatthew Dillon 				/*
49952545bca0SMatthew Dillon 				 * Bah. Ignore Untagged Queing and ACA
49962545bca0SMatthew Dillon 				 */
49972545bca0SMatthew Dillon 				tag_action = MSG_SIMPLE_Q_TAG;
49982545bca0SMatthew Dillon 				break;
49992545bca0SMatthew Dillon 			}
50002545bca0SMatthew Dillon 		}
50012545bca0SMatthew Dillon 		tgt->resid = be32toh(fc->FcpDl);
50022545bca0SMatthew Dillon 		cdbp = fc->FcpCdb;
50032545bca0SMatthew Dillon 		lunptr = fc->FcpLun;
50042545bca0SMatthew Dillon 		itag = be16toh(fc->OptionalOxid);
50052545bca0SMatthew Dillon 	} else if (mpt->is_sas) {
50062545bca0SMatthew Dillon 		PTR_MPI_TARGET_SSP_CMD_BUFFER ssp;
50072545bca0SMatthew Dillon 		ssp = (PTR_MPI_TARGET_SSP_CMD_BUFFER) vbuf;
50082545bca0SMatthew Dillon 		cdbp = ssp->CDB;
50092545bca0SMatthew Dillon 		lunptr = ssp->LogicalUnitNumber;
50102545bca0SMatthew Dillon 		itag = ssp->InitiatorTag;
50112545bca0SMatthew Dillon 	} else {
50122545bca0SMatthew Dillon 		PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER sp;
50132545bca0SMatthew Dillon 		sp = (PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER) vbuf;
50142545bca0SMatthew Dillon 		cdbp = sp->CDB;
50152545bca0SMatthew Dillon 		lunptr = sp->LogicalUnitNumber;
50162545bca0SMatthew Dillon 		itag = sp->Tag;
50172545bca0SMatthew Dillon 	}
50182545bca0SMatthew Dillon 
50192545bca0SMatthew Dillon 	/*
50202545bca0SMatthew Dillon 	 * Generate a simple lun
50212545bca0SMatthew Dillon 	 */
50222545bca0SMatthew Dillon 	switch (lunptr[0] & 0xc0) {
50232545bca0SMatthew Dillon 	case 0x40:
50242545bca0SMatthew Dillon 		lun = ((lunptr[0] & 0x3f) << 8) | lunptr[1];
50252545bca0SMatthew Dillon 		break;
50262545bca0SMatthew Dillon 	case 0:
50272545bca0SMatthew Dillon 		lun = lunptr[1];
50282545bca0SMatthew Dillon 		break;
50292545bca0SMatthew Dillon 	default:
50302545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_ERROR, "cannot handle this type lun\n");
50312545bca0SMatthew Dillon 		lun = 0xffff;
50322545bca0SMatthew Dillon 		break;
50332545bca0SMatthew Dillon 	}
50342545bca0SMatthew Dillon 
50352545bca0SMatthew Dillon 	/*
50362545bca0SMatthew Dillon 	 * Deal with non-enabled or bad luns here.
50372545bca0SMatthew Dillon 	 */
50382545bca0SMatthew Dillon 	if (lun >= MPT_MAX_LUNS || mpt->tenabled == 0 ||
50392545bca0SMatthew Dillon 	    mpt->trt[lun].enabled == 0) {
50402545bca0SMatthew Dillon 		if (mpt->twildcard) {
50412545bca0SMatthew Dillon 			trtp = &mpt->trt_wildcard;
50422545bca0SMatthew Dillon 		} else if (fct == MPT_NIL_TMT_VALUE) {
50432545bca0SMatthew Dillon 			/*
50442545bca0SMatthew Dillon 			 * In this case, we haven't got an upstream listener
50452545bca0SMatthew Dillon 			 * for either a specific lun or wildcard luns. We
50462545bca0SMatthew Dillon 			 * have to make some sensible response. For regular
50472545bca0SMatthew Dillon 			 * inquiry, just return some NOT HERE inquiry data.
50482545bca0SMatthew Dillon 			 * For VPD inquiry, report illegal field in cdb.
50492545bca0SMatthew Dillon 			 * For REQUEST SENSE, just return NO SENSE data.
50502545bca0SMatthew Dillon 			 * REPORT LUNS gets illegal command.
50512545bca0SMatthew Dillon 			 * All other commands get 'no such device'.
50522545bca0SMatthew Dillon 			 */
50532545bca0SMatthew Dillon 			uint8_t *sp, cond, buf[MPT_SENSE_SIZE];
50542545bca0SMatthew Dillon 			size_t len;
50552545bca0SMatthew Dillon 
50562545bca0SMatthew Dillon 			memset(buf, 0, MPT_SENSE_SIZE);
50572545bca0SMatthew Dillon 			cond = SCSI_STATUS_CHECK_COND;
50582545bca0SMatthew Dillon 			buf[0] = 0xf0;
50592545bca0SMatthew Dillon 			buf[2] = 0x5;
50602545bca0SMatthew Dillon 			buf[7] = 0x8;
50612545bca0SMatthew Dillon 			sp = buf;
50622545bca0SMatthew Dillon 			tgt->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
50632545bca0SMatthew Dillon 
50642545bca0SMatthew Dillon 			switch (cdbp[0]) {
50652545bca0SMatthew Dillon 			case INQUIRY:
50662545bca0SMatthew Dillon 			{
50672545bca0SMatthew Dillon 				if (cdbp[1] != 0) {
50682545bca0SMatthew Dillon 					buf[12] = 0x26;
50692545bca0SMatthew Dillon 					buf[13] = 0x01;
50702545bca0SMatthew Dillon 					break;
50712545bca0SMatthew Dillon 				}
50722545bca0SMatthew Dillon 				len = min(tgt->resid, cdbp[4]);
50732545bca0SMatthew Dillon 				len = min(len, sizeof (null_iqd));
50742545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG,
50752545bca0SMatthew Dillon 				    "local inquiry %ld bytes\n", (long) len);
50762545bca0SMatthew Dillon 				mpt_scsi_tgt_local(mpt, req, lun, 1,
50772545bca0SMatthew Dillon 				    null_iqd, len);
50782545bca0SMatthew Dillon 				return;
50792545bca0SMatthew Dillon 			}
50802545bca0SMatthew Dillon 			case REQUEST_SENSE:
50812545bca0SMatthew Dillon 			{
50822545bca0SMatthew Dillon 				buf[2] = 0x0;
50832545bca0SMatthew Dillon 				len = min(tgt->resid, cdbp[4]);
50842545bca0SMatthew Dillon 				len = min(len, sizeof (buf));
50852545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG,
50862545bca0SMatthew Dillon 				    "local reqsense %ld bytes\n", (long) len);
50872545bca0SMatthew Dillon 				mpt_scsi_tgt_local(mpt, req, lun, 1,
50882545bca0SMatthew Dillon 				    buf, len);
50892545bca0SMatthew Dillon 				return;
50902545bca0SMatthew Dillon 			}
50912545bca0SMatthew Dillon 			case REPORT_LUNS:
50922545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG, "REPORT LUNS\n");
50932545bca0SMatthew Dillon 				buf[12] = 0x26;
50942545bca0SMatthew Dillon 				return;
50952545bca0SMatthew Dillon 			default:
50962545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG,
50972545bca0SMatthew Dillon 				    "CMD 0x%x to unmanaged lun %u\n",
50982545bca0SMatthew Dillon 				    cdbp[0], lun);
50992545bca0SMatthew Dillon 				buf[12] = 0x25;
51002545bca0SMatthew Dillon 				break;
51012545bca0SMatthew Dillon 			}
51022545bca0SMatthew Dillon 			mpt_scsi_tgt_status(mpt, NULL, req, cond, sp);
51032545bca0SMatthew Dillon 			return;
51042545bca0SMatthew Dillon 		}
51052545bca0SMatthew Dillon 		/* otherwise, leave trtp NULL */
51062545bca0SMatthew Dillon 	} else {
51072545bca0SMatthew Dillon 		trtp = &mpt->trt[lun];
51082545bca0SMatthew Dillon 	}
51092545bca0SMatthew Dillon 
51102545bca0SMatthew Dillon 	/*
51112545bca0SMatthew Dillon 	 * Deal with any task management
51122545bca0SMatthew Dillon 	 */
51132545bca0SMatthew Dillon 	if (fct != MPT_NIL_TMT_VALUE) {
51142545bca0SMatthew Dillon 		if (trtp == NULL) {
51152545bca0SMatthew Dillon 			mpt_prt(mpt, "task mgmt function %x but no listener\n",
51162545bca0SMatthew Dillon 			    fct);
51172545bca0SMatthew Dillon 			mpt_scsi_tgt_status(mpt, 0, req,
51182545bca0SMatthew Dillon 			    SCSI_STATUS_OK, 0);
51192545bca0SMatthew Dillon 		} else {
51202545bca0SMatthew Dillon 			mpt_scsi_tgt_tsk_mgmt(mpt, req, fct, trtp,
51212545bca0SMatthew Dillon 			    GET_INITIATOR_INDEX(reply_desc));
51222545bca0SMatthew Dillon 		}
51232545bca0SMatthew Dillon 		return;
51242545bca0SMatthew Dillon 	}
51252545bca0SMatthew Dillon 
51262545bca0SMatthew Dillon 
51272545bca0SMatthew Dillon 	atiop = (struct ccb_accept_tio *) STAILQ_FIRST(&trtp->atios);
51282545bca0SMatthew Dillon 	if (atiop == NULL) {
51292545bca0SMatthew Dillon 		mpt_lprt(mpt, MPT_PRT_WARN,
51302545bca0SMatthew Dillon 		    "no ATIOs for lun %u- sending back %s\n", lun,
51312545bca0SMatthew Dillon 		    mpt->tenabled? "QUEUE FULL" : "BUSY");
51322545bca0SMatthew Dillon 		mpt_scsi_tgt_status(mpt, NULL, req,
51332545bca0SMatthew Dillon 		    mpt->tenabled? SCSI_STATUS_QUEUE_FULL : SCSI_STATUS_BUSY,
51342545bca0SMatthew Dillon 		    NULL);
51352545bca0SMatthew Dillon 		return;
51362545bca0SMatthew Dillon 	}
51372545bca0SMatthew Dillon 	STAILQ_REMOVE_HEAD(&trtp->atios, sim_links.stqe);
51382545bca0SMatthew Dillon 	mpt_lprt(mpt, MPT_PRT_DEBUG1,
51392545bca0SMatthew Dillon 	    "Get FREE ATIO %p lun %d\n", atiop, atiop->ccb_h.target_lun);
51402545bca0SMatthew Dillon 	atiop->ccb_h.ccb_mpt_ptr = mpt;
51412545bca0SMatthew Dillon 	atiop->ccb_h.status = CAM_CDB_RECVD;
51422545bca0SMatthew Dillon 	atiop->ccb_h.target_lun = lun;
51432545bca0SMatthew Dillon 	atiop->sense_len = 0;
51442545bca0SMatthew Dillon 	atiop->init_id = GET_INITIATOR_INDEX(reply_desc);
51452545bca0SMatthew Dillon 	atiop->cdb_len = mpt_cdblen(cdbp[0], 16);
51462545bca0SMatthew Dillon 	memcpy(atiop->cdb_io.cdb_bytes, cdbp, atiop->cdb_len);
51472545bca0SMatthew Dillon 
51482545bca0SMatthew Dillon 	/*
51492545bca0SMatthew Dillon 	 * The tag we construct here allows us to find the
51502545bca0SMatthew Dillon 	 * original request that the command came in with.
51512545bca0SMatthew Dillon 	 *
51522545bca0SMatthew Dillon 	 * This way we don't have to depend on anything but the
51532545bca0SMatthew Dillon 	 * tag to find things when CCBs show back up from CAM.
51542545bca0SMatthew Dillon 	 */
51552545bca0SMatthew Dillon 	atiop->tag_id = MPT_MAKE_TAGID(mpt, req, ioindex);
51562545bca0SMatthew Dillon 	tgt->tag_id = atiop->tag_id;
51572545bca0SMatthew Dillon 	if (tag_action) {
51582545bca0SMatthew Dillon 		atiop->tag_action = tag_action;
51592545bca0SMatthew Dillon 		atiop->ccb_h.flags = CAM_TAG_ACTION_VALID;
51602545bca0SMatthew Dillon 	}
51612545bca0SMatthew Dillon 	if (mpt->verbose >= MPT_PRT_DEBUG) {
51622545bca0SMatthew Dillon 		int i;
51632545bca0SMatthew Dillon 		mpt_prt(mpt, "START_CCB %p for lun %u CDB=<", atiop,
51642545bca0SMatthew Dillon 		    atiop->ccb_h.target_lun);
51652545bca0SMatthew Dillon 		for (i = 0; i < atiop->cdb_len; i++) {
51662545bca0SMatthew Dillon 			mpt_prtc(mpt, "%02x%c", cdbp[i] & 0xff,
51672545bca0SMatthew Dillon 			    (i == (atiop->cdb_len - 1))? '>' : ' ');
51682545bca0SMatthew Dillon 		}
51692545bca0SMatthew Dillon 		mpt_prtc(mpt, " itag %x tag %x rdesc %x dl=%u\n",
51702545bca0SMatthew Dillon 		    itag, atiop->tag_id, tgt->reply_desc, tgt->resid);
51712545bca0SMatthew Dillon 	}
51722545bca0SMatthew Dillon 
51732545bca0SMatthew Dillon 	xpt_done((union ccb *)atiop);
51742545bca0SMatthew Dillon }
51752545bca0SMatthew Dillon 
51762545bca0SMatthew Dillon static void
mpt_tgt_dump_tgt_state(struct mpt_softc * mpt,request_t * req)51772545bca0SMatthew Dillon mpt_tgt_dump_tgt_state(struct mpt_softc *mpt, request_t *req)
51782545bca0SMatthew Dillon {
51792545bca0SMatthew Dillon 	mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
51802545bca0SMatthew Dillon 
51812545bca0SMatthew Dillon 	mpt_prt(mpt, "req %p:%u tgt:rdesc 0x%x resid %u xfrd %u ccb %p treq %p "
51822545bca0SMatthew Dillon 	    "nx %d tag 0x%08x state=%d\n", req, req->serno, tgt->reply_desc,
51832545bca0SMatthew Dillon 	    tgt->resid, tgt->bytes_xfered, tgt->ccb, tgt->req, tgt->nxfers,
51842545bca0SMatthew Dillon 	    tgt->tag_id, tgt->state);
51852545bca0SMatthew Dillon }
51862545bca0SMatthew Dillon 
51872545bca0SMatthew Dillon static void
mpt_tgt_dump_req_state(struct mpt_softc * mpt,request_t * req)51882545bca0SMatthew Dillon mpt_tgt_dump_req_state(struct mpt_softc *mpt, request_t *req)
51892545bca0SMatthew Dillon {
51904c42baf4SSascha Wildner 
51912545bca0SMatthew Dillon 	mpt_prt(mpt, "req %p:%u index %u (%x) state %x\n", req, req->serno,
51922545bca0SMatthew Dillon 	    req->index, req->index, req->state);
51932545bca0SMatthew Dillon 	mpt_tgt_dump_tgt_state(mpt, req);
51942545bca0SMatthew Dillon }
51952545bca0SMatthew Dillon 
51962545bca0SMatthew Dillon static int
mpt_scsi_tgt_reply_handler(struct mpt_softc * mpt,request_t * req,uint32_t reply_desc,MSG_DEFAULT_REPLY * reply_frame)51972545bca0SMatthew Dillon mpt_scsi_tgt_reply_handler(struct mpt_softc *mpt, request_t *req,
51982545bca0SMatthew Dillon     uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame)
51992545bca0SMatthew Dillon {
52002545bca0SMatthew Dillon 	int dbg;
52012545bca0SMatthew Dillon 	union ccb *ccb;
52022545bca0SMatthew Dillon 	U16 status;
52032545bca0SMatthew Dillon 
52042545bca0SMatthew Dillon 	if (reply_frame == NULL) {
52052545bca0SMatthew Dillon 		/*
52062545bca0SMatthew Dillon 		 * Figure out what the state of the command is.
52072545bca0SMatthew Dillon 		 */
52082545bca0SMatthew Dillon 		mpt_tgt_state_t *tgt = MPT_TGT_STATE(mpt, req);
52092545bca0SMatthew Dillon 
52102545bca0SMatthew Dillon #ifdef	INVARIANTS
52112545bca0SMatthew Dillon 		mpt_req_spcl(mpt, req, "turbo scsi_tgt_reply", __LINE__);
52122545bca0SMatthew Dillon 		if (tgt->req) {
52132545bca0SMatthew Dillon 			mpt_req_not_spcl(mpt, tgt->req,
52142545bca0SMatthew Dillon 			    "turbo scsi_tgt_reply associated req", __LINE__);
52152545bca0SMatthew Dillon 		}
52162545bca0SMatthew Dillon #endif
52172545bca0SMatthew Dillon 		switch(tgt->state) {
52182545bca0SMatthew Dillon 		case TGT_STATE_LOADED:
52192545bca0SMatthew Dillon 			/*
52202545bca0SMatthew Dillon 			 * This is a new command starting.
52212545bca0SMatthew Dillon 			 */
52222545bca0SMatthew Dillon 			mpt_scsi_tgt_atio(mpt, req, reply_desc);
52232545bca0SMatthew Dillon 			break;
52242545bca0SMatthew Dillon 		case TGT_STATE_MOVING_DATA:
52252545bca0SMatthew Dillon 		{
52262545bca0SMatthew Dillon 			uint8_t *sp = NULL, sense[MPT_SENSE_SIZE];
52272545bca0SMatthew Dillon 
52282545bca0SMatthew Dillon 			ccb = tgt->ccb;
52292545bca0SMatthew Dillon 			if (tgt->req == NULL) {
52302545bca0SMatthew Dillon 				panic("mpt: turbo target reply with null "
52312545bca0SMatthew Dillon 				    "associated request moving data");
52322545bca0SMatthew Dillon 				/* NOTREACHED */
52332545bca0SMatthew Dillon 			}
52342545bca0SMatthew Dillon 			if (ccb == NULL) {
52352545bca0SMatthew Dillon 				if (tgt->is_local == 0) {
52362545bca0SMatthew Dillon 					panic("mpt: turbo target reply with "
52372545bca0SMatthew Dillon 					    "null associated ccb moving data");
52382545bca0SMatthew Dillon 					/* NOTREACHED */
52392545bca0SMatthew Dillon 				}
52402545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG,
52412545bca0SMatthew Dillon 				    "TARGET_ASSIST local done\n");
52422545bca0SMatthew Dillon 				TAILQ_REMOVE(&mpt->request_pending_list,
52432545bca0SMatthew Dillon 				    tgt->req, links);
52442545bca0SMatthew Dillon 				mpt_free_request(mpt, tgt->req);
52452545bca0SMatthew Dillon 				tgt->req = NULL;
52462545bca0SMatthew Dillon 				mpt_scsi_tgt_status(mpt, NULL, req,
52472545bca0SMatthew Dillon 				    0, NULL);
52482545bca0SMatthew Dillon 				return (TRUE);
52492545bca0SMatthew Dillon 			}
52502545bca0SMatthew Dillon 			tgt->ccb = NULL;
52512545bca0SMatthew Dillon 			tgt->nxfers++;
52522545bca0SMatthew Dillon 			mpt_req_untimeout(req, mpt_timeout, ccb);
52532545bca0SMatthew Dillon 			mpt_lprt(mpt, MPT_PRT_DEBUG,
52542545bca0SMatthew Dillon 			    "TARGET_ASSIST %p (req %p:%u) done tag 0x%x\n",
52552545bca0SMatthew Dillon 			    ccb, tgt->req, tgt->req->serno, ccb->csio.tag_id);
52562545bca0SMatthew Dillon 			/*
52572545bca0SMatthew Dillon 			 * Free the Target Assist Request
52582545bca0SMatthew Dillon 			 */
52592545bca0SMatthew Dillon 			KASSERT(tgt->req->ccb == ccb,
52602545bca0SMatthew Dillon 			    ("tgt->req %p:%u tgt->req->ccb %p", tgt->req,
52612545bca0SMatthew Dillon 			    tgt->req->serno, tgt->req->ccb));
52622545bca0SMatthew Dillon 			TAILQ_REMOVE(&mpt->request_pending_list,
52632545bca0SMatthew Dillon 			    tgt->req, links);
52642545bca0SMatthew Dillon 			mpt_free_request(mpt, tgt->req);
52652545bca0SMatthew Dillon 			tgt->req = NULL;
52662545bca0SMatthew Dillon 
52672545bca0SMatthew Dillon 			/*
52682545bca0SMatthew Dillon 			 * Do we need to send status now? That is, are
52692545bca0SMatthew Dillon 			 * we done with all our data transfers?
52702545bca0SMatthew Dillon 			 */
52712545bca0SMatthew Dillon 			if ((ccb->ccb_h.flags & CAM_SEND_STATUS) == 0) {
52722545bca0SMatthew Dillon 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
52732545bca0SMatthew Dillon 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
52742545bca0SMatthew Dillon 				KASSERT(ccb->ccb_h.status,
52754c42baf4SSascha Wildner 				    ("zero ccb sts at %d", __LINE__));
52762545bca0SMatthew Dillon 				tgt->state = TGT_STATE_IN_CAM;
52772545bca0SMatthew Dillon 				if (mpt->outofbeer) {
52782545bca0SMatthew Dillon 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
52792545bca0SMatthew Dillon 					mpt->outofbeer = 0;
52802545bca0SMatthew Dillon 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
52812545bca0SMatthew Dillon 				}
52822545bca0SMatthew Dillon 				xpt_done(ccb);
52832545bca0SMatthew Dillon 				break;
52842545bca0SMatthew Dillon 			}
52852545bca0SMatthew Dillon 			/*
52862545bca0SMatthew Dillon 			 * Otherwise, send status (and sense)
52872545bca0SMatthew Dillon 			 */
52882545bca0SMatthew Dillon 			if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
52892545bca0SMatthew Dillon 				sp = sense;
52902545bca0SMatthew Dillon 				memcpy(sp, &ccb->csio.sense_data,
52912545bca0SMatthew Dillon 				   min(ccb->csio.sense_len, MPT_SENSE_SIZE));
52922545bca0SMatthew Dillon 			}
52932545bca0SMatthew Dillon 			mpt_scsi_tgt_status(mpt, ccb, req,
52942545bca0SMatthew Dillon 			    ccb->csio.scsi_status, sp);
52952545bca0SMatthew Dillon 			break;
52962545bca0SMatthew Dillon 		}
52972545bca0SMatthew Dillon 		case TGT_STATE_SENDING_STATUS:
52982545bca0SMatthew Dillon 		case TGT_STATE_MOVING_DATA_AND_STATUS:
52992545bca0SMatthew Dillon 		{
53002545bca0SMatthew Dillon 			int ioindex;
53012545bca0SMatthew Dillon 			ccb = tgt->ccb;
53022545bca0SMatthew Dillon 
53032545bca0SMatthew Dillon 			if (tgt->req == NULL) {
53042545bca0SMatthew Dillon 				panic("mpt: turbo target reply with null "
53052545bca0SMatthew Dillon 				    "associated request sending status");
53062545bca0SMatthew Dillon 				/* NOTREACHED */
53072545bca0SMatthew Dillon 			}
53082545bca0SMatthew Dillon 
53092545bca0SMatthew Dillon 			if (ccb) {
53102545bca0SMatthew Dillon 				tgt->ccb = NULL;
53112545bca0SMatthew Dillon 				if (tgt->state ==
53122545bca0SMatthew Dillon 				    TGT_STATE_MOVING_DATA_AND_STATUS) {
53132545bca0SMatthew Dillon 					tgt->nxfers++;
53142545bca0SMatthew Dillon 				}
53152545bca0SMatthew Dillon 				mpt_req_untimeout(req, mpt_timeout, ccb);
53162545bca0SMatthew Dillon 				if (ccb->ccb_h.flags & CAM_SEND_SENSE) {
53172545bca0SMatthew Dillon 					ccb->ccb_h.status |= CAM_SENT_SENSE;
53182545bca0SMatthew Dillon 				}
53192545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG,
53202545bca0SMatthew Dillon 				    "TARGET_STATUS tag %x sts %x flgs %x req "
53212545bca0SMatthew Dillon 				    "%p\n", ccb->csio.tag_id, ccb->ccb_h.status,
53222545bca0SMatthew Dillon 				    ccb->ccb_h.flags, tgt->req);
53232545bca0SMatthew Dillon 				/*
53242545bca0SMatthew Dillon 				 * Free the Target Send Status Request
53252545bca0SMatthew Dillon 				 */
53262545bca0SMatthew Dillon 				KASSERT(tgt->req->ccb == ccb,
53272545bca0SMatthew Dillon 				    ("tgt->req %p:%u tgt->req->ccb %p",
53282545bca0SMatthew Dillon 				    tgt->req, tgt->req->serno, tgt->req->ccb));
53292545bca0SMatthew Dillon 				/*
53302545bca0SMatthew Dillon 				 * Notify CAM that we're done
53312545bca0SMatthew Dillon 				 */
53322545bca0SMatthew Dillon 				mpt_set_ccb_status(ccb, CAM_REQ_CMP);
53332545bca0SMatthew Dillon 				ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
53342545bca0SMatthew Dillon 				KASSERT(ccb->ccb_h.status,
53354c42baf4SSascha Wildner 				    ("ZERO ccb sts at %d", __LINE__));
53362545bca0SMatthew Dillon 				tgt->ccb = NULL;
53372545bca0SMatthew Dillon 			} else {
53382545bca0SMatthew Dillon 				mpt_lprt(mpt, MPT_PRT_DEBUG,
53392545bca0SMatthew Dillon 				    "TARGET_STATUS non-CAM for  req %p:%u\n",
53402545bca0SMatthew Dillon 				    tgt->req, tgt->req->serno);
53412545bca0SMatthew Dillon 			}
53422545bca0SMatthew Dillon 			TAILQ_REMOVE(&mpt->request_pending_list,
53432545bca0SMatthew Dillon 			    tgt->req, links);
53442545bca0SMatthew Dillon 			mpt_free_request(mpt, tgt->req);
53452545bca0SMatthew Dillon 			tgt->req = NULL;
53462545bca0SMatthew Dillon 
53472545bca0SMatthew Dillon 			/*
53482545bca0SMatthew Dillon 			 * And re-post the Command Buffer.
53492545bca0SMatthew Dillon 			 * This will reset the state.
53502545bca0SMatthew Dillon 			 */
53512545bca0SMatthew Dillon 			ioindex = GET_IO_INDEX(reply_desc);
53522545bca0SMatthew Dillon 			TAILQ_REMOVE(&mpt->request_pending_list, req, links);
53532545bca0SMatthew Dillon 			tgt->is_local = 0;
53542545bca0SMatthew Dillon 			mpt_post_target_command(mpt, req, ioindex);
53552545bca0SMatthew Dillon 
53562545bca0SMatthew Dillon 			/*
53572545bca0SMatthew Dillon 			 * And post a done for anyone who cares
53582545bca0SMatthew Dillon 			 */
53592545bca0SMatthew Dillon 			if (ccb) {
53602545bca0SMatthew Dillon 				if (mpt->outofbeer) {
53612545bca0SMatthew Dillon 					ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
53622545bca0SMatthew Dillon 					mpt->outofbeer = 0;
53632545bca0SMatthew Dillon 					mpt_lprt(mpt, MPT_PRT_DEBUG, "THAWQ\n");
53642545bca0SMatthew Dillon 				}
53652545bca0SMatthew Dillon 				xpt_done(ccb);
53662545bca0SMatthew Dillon 			}
53672545bca0SMatthew Dillon 			break;
53682545bca0SMatthew Dillon 		}
53692545bca0SMatthew Dillon 		case TGT_STATE_NIL:	/* XXX This Never Happens XXX */
53702545bca0SMatthew Dillon 			tgt->state = TGT_STATE_LOADED;
53712545bca0SMatthew Dillon 			break;
53722545bca0SMatthew Dillon 		default:
53732545bca0SMatthew Dillon 			mpt_prt(mpt, "Unknown Target State 0x%x in Context "
53742545bca0SMatthew Dillon 			    "Reply Function\n", tgt->state);
53752545bca0SMatthew Dillon 		}
53762545bca0SMatthew Dillon 		return (TRUE);
53772545bca0SMatthew Dillon 	}
53782545bca0SMatthew Dillon 
53792545bca0SMatthew Dillon 	status = le16toh(reply_frame->IOCStatus);
53802545bca0SMatthew Dillon 	if (status != MPI_IOCSTATUS_SUCCESS) {
53812545bca0SMatthew Dillon 		dbg = MPT_PRT_ERROR;
53822545bca0SMatthew Dillon 	} else {
53832545bca0SMatthew Dillon 		dbg = MPT_PRT_DEBUG1;
53842545bca0SMatthew Dillon 	}
53852545bca0SMatthew Dillon 
53862545bca0SMatthew Dillon 	mpt_lprt(mpt, dbg,
53872545bca0SMatthew Dillon 	    "SCSI_TGT REPLY: req=%p:%u reply=%p func=%x IOCstatus 0x%x\n",
53882545bca0SMatthew Dillon 	     req, req->serno, reply_frame, reply_frame->Function, status);
53892545bca0SMatthew Dillon 
53902545bca0SMatthew Dillon 	switch (reply_frame->Function) {
53912545bca0SMatthew Dillon 	case MPI_FUNCTION_TARGET_CMD_BUFFER_POST:
53922545bca0SMatthew Dillon 	{
53932545bca0SMatthew Dillon 		mpt_tgt_state_t *tgt;
53942545bca0SMatthew Dillon #ifdef	INVARIANTS
53952545bca0SMatthew Dillon 		mpt_req_spcl(mpt, req, "tgt reply BUFFER POST", __LINE__);
53962545bca0SMatthew Dillon #endif
53972545bca0SMatthew Dillon 		if (status != MPI_IOCSTATUS_SUCCESS) {
53982545bca0SMatthew Dillon 			/*
53992545bca0SMatthew Dillon 			 * XXX What to do?
54002545bca0SMatthew Dillon 			 */
54012545bca0SMatthew Dillon 			break;
54022545bca0SMatthew Dillon 		}
54032545bca0SMatthew Dillon 		tgt = MPT_TGT_STATE(mpt, req);
54042545bca0SMatthew Dillon 		KASSERT(tgt->state == TGT_STATE_LOADING,
54054c42baf4SSascha Wildner 		    ("bad state 0x%x on reply to buffer post", tgt->state));
54062545bca0SMatthew Dillon 		mpt_assign_serno(mpt, req);
54072545bca0SMatthew Dillon 		tgt->state = TGT_STATE_LOADED;
54082545bca0SMatthew Dillon 		break;
54092545bca0SMatthew Dillon 	}
54102545bca0SMatthew Dillon 	case MPI_FUNCTION_TARGET_ASSIST:
54112545bca0SMatthew Dillon #ifdef	INVARIANTS
54122545bca0SMatthew Dillon 		mpt_req_not_spcl(mpt, req, "tgt reply TARGET ASSIST", __LINE__);
54132545bca0SMatthew Dillon #endif
54142545bca0SMatthew Dillon 		mpt_prt(mpt, "target assist completion\n");
54152545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
54162545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
54172545bca0SMatthew Dillon 		break;
54182545bca0SMatthew Dillon 	case MPI_FUNCTION_TARGET_STATUS_SEND:
54192545bca0SMatthew Dillon #ifdef	INVARIANTS
54202545bca0SMatthew Dillon 		mpt_req_not_spcl(mpt, req, "tgt reply STATUS SEND", __LINE__);
54212545bca0SMatthew Dillon #endif
54222545bca0SMatthew Dillon 		mpt_prt(mpt, "status send completion\n");
54232545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
54242545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
54252545bca0SMatthew Dillon 		break;
54262545bca0SMatthew Dillon 	case MPI_FUNCTION_TARGET_MODE_ABORT:
54272545bca0SMatthew Dillon 	{
54282545bca0SMatthew Dillon 		PTR_MSG_TARGET_MODE_ABORT_REPLY abtrp =
54292545bca0SMatthew Dillon 		    (PTR_MSG_TARGET_MODE_ABORT_REPLY) reply_frame;
54302545bca0SMatthew Dillon 		PTR_MSG_TARGET_MODE_ABORT abtp =
54312545bca0SMatthew Dillon 		    (PTR_MSG_TARGET_MODE_ABORT) req->req_vbuf;
54322545bca0SMatthew Dillon 		uint32_t cc = GET_IO_INDEX(le32toh(abtp->ReplyWord));
54332545bca0SMatthew Dillon #ifdef	INVARIANTS
54342545bca0SMatthew Dillon 		mpt_req_not_spcl(mpt, req, "tgt reply TMODE ABORT", __LINE__);
54352545bca0SMatthew Dillon #endif
54362545bca0SMatthew Dillon 		mpt_prt(mpt, "ABORT RX_ID 0x%x Complete; status 0x%x cnt %u\n",
54372545bca0SMatthew Dillon 		    cc, le16toh(abtrp->IOCStatus), le32toh(abtrp->AbortCount));
54382545bca0SMatthew Dillon 		TAILQ_REMOVE(&mpt->request_pending_list, req, links);
54392545bca0SMatthew Dillon 		mpt_free_request(mpt, req);
54402545bca0SMatthew Dillon 		break;
54412545bca0SMatthew Dillon 	}
54422545bca0SMatthew Dillon 	default:
54432545bca0SMatthew Dillon 		mpt_prt(mpt, "Unknown Target Address Reply Function code: "
54442545bca0SMatthew Dillon 		    "0x%x\n", reply_frame->Function);
54452545bca0SMatthew Dillon 		break;
54462545bca0SMatthew Dillon 	}
54472545bca0SMatthew Dillon 	return (TRUE);
54482545bca0SMatthew Dillon }
5449