1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  *
21  *
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * SCSI (SCSA) midlayer interface for PMC drier.
27  */
28 
29 #include <sys/scsi/adapters/pmcs/pmcs.h>
30 
31 extern scsi_lun_t scsi_lun64_to_lun(scsi_lun64_t lun64);
32 
33 static int pmcs_scsa_tran_tgt_init(dev_info_t *, dev_info_t *,
34     scsi_hba_tran_t *, struct scsi_device *);
35 static void pmcs_scsa_tran_tgt_free(dev_info_t *, dev_info_t *,
36     scsi_hba_tran_t *, struct scsi_device *);
37 static int pmcs_scsa_start(struct scsi_address *, struct scsi_pkt *);
38 static int pmcs_scsa_abort(struct scsi_address *, struct scsi_pkt *);
39 static int pmcs_scsa_reset(struct scsi_address *, int);
40 static int pmcs_scsi_reset_notify(struct scsi_address *, int,
41     void (*)(caddr_t), caddr_t);
42 static int pmcs_scsa_getcap(struct scsi_address *, char *, int);
43 static int pmcs_scsa_setcap(struct scsi_address *, char *, int, int);
44 static int pmcs_scsa_setup_pkt(struct scsi_pkt *, int (*)(caddr_t), caddr_t);
45 static void pmcs_scsa_teardown_pkt(struct scsi_pkt *);
46 static int pmcs_smp_start(struct smp_pkt *);
47 static int pmcs_smp_getcap(struct sas_addr *, char *);
48 static int pmcs_smp_init(dev_info_t *, dev_info_t *, sas_hba_tran_t *,
49     smp_device_t *);
50 static void pmcs_smp_free(dev_info_t *, dev_info_t *, sas_hba_tran_t *,
51     smp_device_t *);
52 
53 static int pmcs_scsi_quiesce(dev_info_t *);
54 static int pmcs_scsi_unquiesce(dev_info_t *);
55 
56 static int pmcs_cap(struct scsi_address *, char *, int, int, int);
57 static pmcs_xscsi_t *
58     pmcs_addr2xp(struct scsi_address *, uint64_t *, pmcs_cmd_t *);
59 static int pmcs_SAS_run(pmcs_cmd_t *, pmcwork_t *);
60 static void pmcs_SAS_done(pmcs_hw_t *, pmcwork_t *, uint32_t *);
61 
62 static int pmcs_SATA_run(pmcs_cmd_t *, pmcwork_t *);
63 static void pmcs_SATA_done(pmcs_hw_t *, pmcwork_t *, uint32_t *);
64 static uint8_t pmcs_SATA_rwparm(uint8_t *, uint32_t *, uint64_t *, uint64_t);
65 
66 static void pmcs_ioerror(pmcs_hw_t *, pmcs_dtype_t pmcs_dtype,
67     pmcwork_t *, uint32_t *);
68 
69 
70 int
71 pmcs_scsa_init(pmcs_hw_t *pwp, const ddi_dma_attr_t *ap)
72 {
73 	scsi_hba_tran_t *tran;
74 	ddi_dma_attr_t pmcs_scsa_dattr;
75 	int flags;
76 
77 	(void) memcpy(&pmcs_scsa_dattr, ap, sizeof (ddi_dma_attr_t));
78 	pmcs_scsa_dattr.dma_attr_sgllen =
79 	    ((PMCS_SGL_NCHUNKS - 1) * (PMCS_MAX_CHUNKS - 1)) + PMCS_SGL_NCHUNKS;
80 	pmcs_scsa_dattr.dma_attr_flags = DDI_DMA_RELAXED_ORDERING;
81 	pmcs_scsa_dattr.dma_attr_flags |= DDI_DMA_FLAGERR;
82 
83 	/*
84 	 * Allocate a transport structure
85 	 */
86 	tran = scsi_hba_tran_alloc(pwp->dip, SCSI_HBA_CANSLEEP);
87 	if (tran == NULL) {
88 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "scsi_hba_tran_alloc failed");
89 		return (DDI_FAILURE);
90 	}
91 
92 	tran->tran_hba_private		= pwp;
93 	tran->tran_tgt_init		= pmcs_scsa_tran_tgt_init;
94 	tran->tran_tgt_free		= pmcs_scsa_tran_tgt_free;
95 	tran->tran_start		= pmcs_scsa_start;
96 	tran->tran_abort		= pmcs_scsa_abort;
97 	tran->tran_reset		= pmcs_scsa_reset;
98 	tran->tran_reset_notify		= pmcs_scsi_reset_notify;
99 	tran->tran_getcap		= pmcs_scsa_getcap;
100 	tran->tran_setcap		= pmcs_scsa_setcap;
101 	tran->tran_setup_pkt		= pmcs_scsa_setup_pkt;
102 	tran->tran_teardown_pkt		= pmcs_scsa_teardown_pkt;
103 	tran->tran_quiesce		= pmcs_scsi_quiesce;
104 	tran->tran_unquiesce		= pmcs_scsi_unquiesce;
105 	tran->tran_interconnect_type	= INTERCONNECT_SAS;
106 	tran->tran_hba_len		= sizeof (pmcs_cmd_t);
107 
108 	/*
109 	 * Attach this instance of the hba
110 	 */
111 
112 	flags = SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CDB | SCSI_HBA_ADDR_COMPLEX |
113 	    SCSI_HBA_TRAN_PHCI | SCSI_HBA_HBA;
114 
115 	if (scsi_hba_attach_setup(pwp->dip, &pmcs_scsa_dattr, tran, flags)) {
116 		scsi_hba_tran_free(tran);
117 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "scsi_hba_attach failed");
118 		return (DDI_FAILURE);
119 	}
120 	pwp->tran = tran;
121 
122 	/*
123 	 * Attach the SMP part of this hba
124 	 */
125 	pwp->smp_tran = sas_hba_tran_alloc(pwp->dip);
126 	ASSERT(pwp->smp_tran != NULL);
127 	pwp->smp_tran->tran_hba_private = pwp;
128 	pwp->smp_tran->tran_smp_start = pmcs_smp_start;
129 	pwp->smp_tran->tran_sas_getcap = pmcs_smp_getcap;
130 	pwp->smp_tran->tran_smp_init = pmcs_smp_init;
131 	pwp->smp_tran->tran_smp_free = pmcs_smp_free;
132 
133 	if (sas_hba_attach_setup(pwp->dip, pwp->smp_tran) != DDI_SUCCESS) {
134 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "sas_hba_attach failed");
135 		sas_hba_tran_free(pwp->smp_tran);
136 		pwp->smp_tran = NULL;
137 		scsi_hba_tran_free(tran);
138 		return (DDI_FAILURE);
139 	}
140 
141 	return (DDI_SUCCESS);
142 }
143 
144 /*
145  * SCSA entry points
146  */
147 
148 static int
149 pmcs_scsa_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
150     scsi_hba_tran_t *tran, struct scsi_device *sd)
151 {
152 	pmcs_hw_t	*pwp = NULL;
153 	int		rval;
154 	char		*variant_prop = "sata";
155 	char		*tgt_port = NULL, *ua = NULL;
156 	pmcs_xscsi_t	*tgt = NULL;
157 	pmcs_iport_t	*iport;
158 	pmcs_lun_t	*lun = NULL;
159 	pmcs_phy_t	*phyp = NULL;
160 	uint64_t	lun_num;
161 	boolean_t	got_scratch = B_FALSE;
162 
163 	/*
164 	 * First, make sure we're an iport and get the pointer to the HBA
165 	 * node's softstate
166 	 */
167 	if (scsi_hba_iport_unit_address(hba_dip) == NULL) {
168 		pmcs_prt(TRAN2PMC(tran), PMCS_PRT_DEBUG_CONFIG,
169 		    "%s: We don't enumerate devices on the HBA node", __func__);
170 		goto tgt_init_fail;
171 	}
172 
173 	pwp = ITRAN2PMC(tran);
174 	iport = ITRAN2IPORT(tran);
175 
176 	/*
177 	 * Get the target address
178 	 */
179 	rval = scsi_device_prop_lookup_string(sd, SCSI_DEVICE_PROP_PATH,
180 	    SCSI_ADDR_PROP_TARGET_PORT, &tgt_port);
181 	if (rval != DDI_PROP_SUCCESS) {
182 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "Couldn't get target UA");
183 		pwp = NULL;
184 		goto tgt_init_fail;
185 	}
186 	pmcs_prt(pwp, PMCS_PRT_DEBUG3, "got tgt_port '%s'", tgt_port);
187 
188 	/*
189 	 * Validate that this tran_tgt_init is for an active iport.
190 	 */
191 	if (iport->ua_state == UA_INACTIVE) {
192 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
193 		    "%s: Got tran_tgt_init on inactive iport for '%s'",
194 		    __func__, tgt_port);
195 		pwp = NULL;
196 		goto tgt_init_fail;
197 	}
198 
199 	/*
200 	 * Since we're going to wait for scratch, be sure to acquire it while
201 	 * we're not holding any other locks
202 	 */
203 	(void) pmcs_acquire_scratch(pwp, B_TRUE);
204 	got_scratch = B_TRUE;
205 
206 	mutex_enter(&pwp->lock);
207 
208 	/*
209 	 * See if there's already a target softstate.  If not, allocate one.
210 	 */
211 	tgt = pmcs_get_target(iport, tgt_port);
212 
213 	if (tgt == NULL) {
214 		goto tgt_init_fail;
215 	}
216 
217 	phyp = tgt->phy;
218 	if (!IS_ROOT_PHY(phyp)) {
219 		pmcs_inc_phy_ref_count(phyp);
220 	}
221 	ASSERT(mutex_owned(&phyp->phy_lock));
222 
223 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, "tgt = 0x%p, dip = 0x%p",
224 	    (void *)tgt, (void *)tgt_dip);
225 
226 	/*
227 	 * Now get the full "w<WWN>,LUN" unit-address (including LU).
228 	 */
229 	ua = scsi_device_unit_address(sd);
230 	if (ua == NULL) {
231 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
232 		    "Couldn't get LU unit address");
233 		goto tgt_init_fail;
234 	}
235 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, "got lun ua '%s'", ua);
236 
237 	lun_num = scsi_device_prop_get_int64(sd, SCSI_DEVICE_PROP_PATH,
238 	    SCSI_ADDR_PROP_LUN64, SCSI_LUN64_ILLEGAL);
239 	if (lun_num == SCSI_LUN64_ILLEGAL) {
240 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "No LUN for tgt %p",
241 		    (void *)tgt);
242 		goto tgt_init_fail;
243 	}
244 
245 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: @%s tgt 0x%p phy 0x%p (%s)",
246 	    __func__, ua, (void *)tgt, (void *)phyp, phyp->path);
247 
248 	mutex_enter(&tgt->statlock);
249 	tgt->dtype = phyp->dtype;
250 	if (tgt->dtype != SAS && tgt->dtype != SATA) {
251 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "PHY 0x%p went away?",
252 		    (void *)phyp);
253 		goto tgt_init_fail;
254 	}
255 
256 	/* We don't support SATA devices at LUN > 0. */
257 	if ((tgt->dtype == SATA) && (lun_num > 0)) {
258 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
259 		    "%s: No support for SATA devices at LUN > 0 "
260 		    "(target = 0x%p)", __func__, (void *)tgt);
261 		goto tgt_init_fail;
262 	}
263 
264 	/*
265 	 * Allocate LU soft state. We use ddi_soft_state_bystr_zalloc instead
266 	 * of kmem_alloc because ddi_soft_state_bystr_zalloc allows us to
267 	 * verify that the framework never tries to initialize two scsi_device
268 	 * structures with the same unit-address at the same time.
269 	 */
270 	if (ddi_soft_state_bystr_zalloc(tgt->lun_sstate, ua) != DDI_SUCCESS) {
271 		pmcs_prt(pwp, PMCS_PRT_DEBUG2,
272 		    "Couldn't allocate LU soft state");
273 		goto tgt_init_fail;
274 	}
275 
276 	lun = ddi_soft_state_bystr_get(tgt->lun_sstate, ua);
277 	if (lun == NULL) {
278 		pmcs_prt(pwp, PMCS_PRT_DEBUG2, "Couldn't get LU soft state");
279 		goto tgt_init_fail;
280 	}
281 	scsi_device_hba_private_set(sd, lun);
282 	lun->lun_num = lun_num;
283 
284 	/* convert the scsi_lun64_t value to SCSI standard form */
285 	lun->scsi_lun = scsi_lun64_to_lun(lun_num);
286 
287 	ASSERT(strlen(ua) < (PMCS_MAX_UA_SIZE - 1));
288 	bcopy(ua, lun->unit_address, strnlen(ua, PMCS_MAX_UA_SIZE - 1));
289 
290 	lun->target = tgt;
291 
292 	/*
293 	 * If this is the first tran_tgt_init, add this target to our list
294 	 */
295 	if (tgt->target_num == PMCS_INVALID_TARGET_NUM) {
296 		int target;
297 		for (target = 0; target < pwp->max_dev; target++) {
298 			if (pwp->targets[target] != NULL) {
299 				continue;
300 			}
301 
302 			pwp->targets[target] = tgt;
303 			tgt->target_num = (uint16_t)target;
304 			break;
305 		}
306 
307 		if (target == pwp->max_dev) {
308 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
309 			    "Target list full.");
310 			goto tgt_init_fail;
311 		}
312 	}
313 
314 	tgt->dip = sd->sd_dev;
315 
316 	if (!pmcs_assign_device(pwp, tgt)) {
317 		pmcs_release_scratch(pwp);
318 		pwp->targets[tgt->target_num] = NULL;
319 		tgt->target_num = PMCS_INVALID_TARGET_NUM;
320 		tgt->phy = NULL;
321 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
322 		    "%s: pmcs_assign_device failed for target 0x%p",
323 		    __func__, (void *)tgt);
324 		goto tgt_init_fail;
325 	}
326 
327 	pmcs_release_scratch(pwp);
328 	tgt->ref_count++;
329 
330 	(void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH,
331 	    SCSI_ADDR_PROP_TARGET, (uint32_t)(tgt->target_num));
332 
333 	/* SM-HBA */
334 	if (tgt->dtype == SATA) {
335 		/* TCR in PSARC/1997/281 opinion */
336 		(void) scsi_device_prop_update_string(sd,
337 		    SCSI_DEVICE_PROP_PATH, "variant", variant_prop);
338 	}
339 
340 	tgt->phy_addressable = PMCS_PHY_ADDRESSABLE(phyp);
341 
342 	if (tgt->phy_addressable) {
343 		(void) scsi_device_prop_update_int(sd, SCSI_DEVICE_PROP_PATH,
344 		    SCSI_ADDR_PROP_SATA_PHY, phyp->phynum);
345 	}
346 
347 	/* SM-HBA */
348 	(void) pmcs_smhba_set_scsi_device_props(pwp, phyp, sd);
349 
350 	mutex_exit(&tgt->statlock);
351 	pmcs_unlock_phy(phyp);
352 	mutex_exit(&pwp->lock);
353 	scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port);
354 	return (DDI_SUCCESS);
355 
356 tgt_init_fail:
357 	if (got_scratch) {
358 		pmcs_release_scratch(pwp);
359 	}
360 	if (lun) {
361 		ddi_soft_state_bystr_free(tgt->lun_sstate, ua);
362 	}
363 	if (phyp) {
364 		mutex_exit(&tgt->statlock);
365 		pmcs_unlock_phy(phyp);
366 		/*
367 		 * phyp's ref count was incremented in pmcs_new_tport.
368 		 * We're failing configuration, we now need to decrement it.
369 		 */
370 		if (!IS_ROOT_PHY(phyp)) {
371 			pmcs_dec_phy_ref_count(phyp);
372 		}
373 		phyp->target = NULL;
374 	}
375 	if (tgt && tgt->ref_count == 0) {
376 		ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port);
377 	}
378 	if (pwp) {
379 		mutex_exit(&pwp->lock);
380 	}
381 	if (tgt_port) {
382 		scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, tgt_port);
383 	}
384 	return (DDI_FAILURE);
385 }
386 
387 static void
388 pmcs_scsa_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
389     scsi_hba_tran_t *tran, struct scsi_device *sd)
390 {
391 	_NOTE(ARGUNUSED(hba_dip, tgt_dip));
392 	pmcs_hw_t	*pwp;
393 	pmcs_lun_t	*lun;
394 	pmcs_xscsi_t	*target;
395 	char		*unit_address;
396 	pmcs_phy_t	*phyp;
397 
398 	if (scsi_hba_iport_unit_address(hba_dip) == NULL) {
399 		pwp = TRAN2PMC(tran);
400 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
401 		    "%s: We don't enumerate devices on the HBA node", __func__);
402 		return;
403 	}
404 
405 	lun = (pmcs_lun_t *)scsi_device_hba_private_get(sd);
406 
407 	ASSERT((lun != NULL) && (lun->target != NULL));
408 	ASSERT(lun->target->ref_count > 0);
409 
410 	target = lun->target;
411 
412 	unit_address = lun->unit_address;
413 	ddi_soft_state_bystr_free(lun->target->lun_sstate, unit_address);
414 
415 	pwp = ITRAN2PMC(tran);
416 	mutex_enter(&pwp->lock);
417 	mutex_enter(&target->statlock);
418 	ASSERT(target->phy);
419 	phyp = target->phy;
420 
421 	/*
422 	 * If this target still has a PHY pointer and that PHY's target pointer
423 	 * has been cleared, then that PHY has been reaped. In that case, there
424 	 * would be no need to decrement the reference count
425 	 */
426 	if (phyp && !IS_ROOT_PHY(phyp) && phyp->target) {
427 		pmcs_dec_phy_ref_count(phyp);
428 	}
429 
430 	if (--target->ref_count == 0) {
431 		/*
432 		 * Remove this target from our list.  The target soft
433 		 * state will remain, and the device will remain registered
434 		 * with the hardware unless/until we're told the device
435 		 * physically went away.
436 		 */
437 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
438 		    "%s: Free target 0x%p (vtgt %d)", __func__, (void *)target,
439 		    target->target_num);
440 		pwp->targets[target->target_num] = NULL;
441 		target->target_num = PMCS_INVALID_TARGET_NUM;
442 		/*
443 		 * If the target still has a PHY pointer, break the linkage
444 		 */
445 		if (phyp) {
446 			phyp->target = NULL;
447 		}
448 		target->phy = NULL;
449 		pmcs_destroy_target(target);
450 	} else {
451 		mutex_exit(&target->statlock);
452 	}
453 
454 	mutex_exit(&pwp->lock);
455 }
456 
457 static int
458 pmcs_scsa_start(struct scsi_address *ap, struct scsi_pkt *pkt)
459 {
460 	pmcs_cmd_t *sp = PKT2CMD(pkt);
461 	pmcs_hw_t *pwp = ADDR2PMC(ap);
462 	pmcs_xscsi_t *xp;
463 	boolean_t blocked;
464 	uint32_t hba_state;
465 
466 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: pkt %p sd %p cdb0=0x%02x dl=%lu",
467 	    __func__, (void *)pkt,
468 	    (void *)scsi_address_device(&pkt->pkt_address),
469 	    pkt->pkt_cdbp[0] & 0xff, pkt->pkt_dma_len);
470 
471 	if (pkt->pkt_flags & FLAG_NOINTR) {
472 		pmcs_prt(pwp, PMCS_PRT_DEBUG3, "%s: nointr pkt", __func__);
473 		return (TRAN_BADPKT);
474 	}
475 
476 	sp->cmd_tag = 0;
477 	pkt->pkt_state = pkt->pkt_statistics = 0;
478 	pkt->pkt_reason = CMD_INCOMPLETE;
479 
480 	mutex_enter(&pwp->lock);
481 	hba_state = pwp->state;
482 	blocked = pwp->blocked;
483 	mutex_exit(&pwp->lock);
484 
485 	if (hba_state != STATE_RUNNING) {
486 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba dead", __func__);
487 		return (TRAN_FATAL_ERROR);
488 	}
489 
490 	xp = pmcs_addr2xp(ap, NULL, sp);
491 	if (xp == NULL) {
492 		pmcs_prt(pwp, PMCS_PRT_DEBUG2,
493 		    "%s: dropping due to null target", __func__);
494 		goto dead_target;
495 	}
496 	ASSERT(mutex_owned(&xp->statlock));
497 
498 	/*
499 	 * First, check to see if the device is gone.
500 	 */
501 	if (xp->dev_gone) {
502 		mutex_exit(&xp->statlock);
503 		pmcs_prt(pwp, PMCS_PRT_DEBUG3,
504 		    "%s: dropping due to dead target 0x%p",
505 		    __func__, (void *)xp);
506 		goto dead_target;
507 	}
508 
509 	/*
510 	 * If we're blocked (quiesced) just return.
511 	 */
512 	if (blocked) {
513 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba blocked", __func__);
514 		mutex_exit(&xp->statlock);
515 		mutex_enter(&xp->wqlock);
516 		STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next);
517 		mutex_exit(&xp->wqlock);
518 		return (TRAN_ACCEPT);
519 	}
520 
521 	/*
522 	 * If we're draining or resetting, queue and return.
523 	 */
524 	if (xp->draining || xp->resetting || xp->recover_wait) {
525 		mutex_exit(&xp->statlock);
526 		mutex_enter(&xp->wqlock);
527 		STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next);
528 		mutex_exit(&xp->wqlock);
529 		pmcs_prt(pwp, PMCS_PRT_DEBUG1,
530 		    "%s: draining/resetting/recovering (cnt %u)",
531 		    __func__, xp->actv_cnt);
532 		/*
533 		 * By the time we get here, draining or
534 		 * resetting may have come and gone, not
535 		 * yet noticing that we had put something
536 		 * on the wait queue, so schedule a worker
537 		 * to look at this later.
538 		 */
539 		SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
540 		return (TRAN_ACCEPT);
541 	}
542 	mutex_exit(&xp->statlock);
543 
544 	/*
545 	 * Queue this command to the tail of the wait queue.
546 	 * This keeps us getting commands out of order.
547 	 */
548 	mutex_enter(&xp->wqlock);
549 	STAILQ_INSERT_TAIL(&xp->wq, sp, cmd_next);
550 	mutex_exit(&xp->wqlock);
551 
552 	/*
553 	 * Now run the queue for this device.
554 	 */
555 	(void) pmcs_scsa_wq_run_one(pwp, xp);
556 
557 	return (TRAN_ACCEPT);
558 
559 dead_target:
560 	pkt->pkt_state = STATE_GOT_BUS;
561 	pkt->pkt_reason = CMD_DEV_GONE;
562 	mutex_enter(&pwp->cq_lock);
563 	STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
564 	PMCS_CQ_RUN_LOCKED(pwp);
565 	mutex_exit(&pwp->cq_lock);
566 	return (TRAN_ACCEPT);
567 }
568 
569 static int
570 pmcs_scsa_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
571 {
572 	pmcs_hw_t *pwp = ADDR2PMC(ap);
573 	pmcs_cmd_t *sp = PKT2CMD(pkt);
574 	pmcs_xscsi_t *xp = sp->cmd_target;
575 	pmcs_phy_t *pptr;
576 	uint32_t tag;
577 	uint64_t lun;
578 	pmcwork_t *pwrk;
579 
580 	mutex_enter(&pwp->lock);
581 	if (pwp->state != STATE_RUNNING) {
582 		mutex_exit(&pwp->lock);
583 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba dead", __func__);
584 		return (0);
585 	}
586 	mutex_exit(&pwp->lock);
587 
588 	if (sp->cmd_lun) {
589 		lun = sp->cmd_lun->lun_num;
590 	} else {
591 		lun = 0;
592 	}
593 	if (xp == NULL) {
594 		return (0);
595 	}
596 
597 	/*
598 	 * See if we have a real work structure associated with this cmd.
599 	 */
600 	pwrk = pmcs_tag2wp(pwp, sp->cmd_tag);
601 	if (pwrk && pwrk->arg == sp) {
602 		tag = pwrk->htag;
603 		pptr = pwrk->phy;
604 		pwrk->timer = 0;	/* we don't time this here */
605 		ASSERT(pwrk->state == PMCS_WORK_STATE_ONCHIP);
606 		mutex_exit(&pwrk->lock);
607 		pmcs_lock_phy(pptr);
608 		if (pptr->dtype == SAS) {
609 			if (pmcs_ssp_tmf(pwp, pptr, SAS_ABORT_TASK, tag, lun,
610 			    NULL)) {
611 				pptr->abort_pending = 1;
612 				pmcs_unlock_phy(pptr);
613 				SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
614 				return (0);
615 			}
616 		} else {
617 			/*
618 			 * XXX: Was the command that was active an
619 			 * NCQ I/O command?
620 			 */
621 			pptr->need_rl_ext = 1;
622 			if (pmcs_sata_abort_ncq(pwp, pptr)) {
623 				pptr->abort_pending = 1;
624 				pmcs_unlock_phy(pptr);
625 				SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
626 				return (0);
627 			}
628 		}
629 		pptr->abort_pending = 1;
630 		pmcs_unlock_phy(pptr);
631 		SCHEDULE_WORK(pwp, PMCS_WORK_ABORT_HANDLE);
632 		return (1);
633 	}
634 	if (pwrk) {
635 		mutex_exit(&pwrk->lock);
636 	}
637 	/*
638 	 * Okay, those weren't the droids we were looking for.
639 	 * See if the command is on any of the wait queues.
640 	 */
641 	mutex_enter(&xp->wqlock);
642 	sp = NULL;
643 	STAILQ_FOREACH(sp, &xp->wq, cmd_next) {
644 		if (sp == PKT2CMD(pkt)) {
645 			STAILQ_REMOVE(&xp->wq, sp, pmcs_cmd, cmd_next);
646 			break;
647 		}
648 	}
649 	mutex_exit(&xp->wqlock);
650 	if (sp) {
651 		pkt->pkt_reason = CMD_ABORTED;
652 		pkt->pkt_statistics |= STAT_ABORTED;
653 		mutex_enter(&pwp->cq_lock);
654 		STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
655 		PMCS_CQ_RUN_LOCKED(pwp);
656 		mutex_exit(&pwp->cq_lock);
657 		return (1);
658 	}
659 	return (0);
660 }
661 
662 /*
663  * SCSA reset functions
664  */
665 static int
666 pmcs_scsa_reset(struct scsi_address *ap, int level)
667 {
668 	pmcs_hw_t *pwp = ADDR2PMC(ap);
669 	pmcs_phy_t *pptr;
670 	pmcs_xscsi_t *xp;
671 	uint64_t lun = (uint64_t)-1, *lp = NULL;
672 	int rval;
673 
674 	mutex_enter(&pwp->lock);
675 	if (pwp->state != STATE_RUNNING) {
676 		mutex_exit(&pwp->lock);
677 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: hba dead", __func__);
678 		return (0);
679 	}
680 	mutex_exit(&pwp->lock);
681 
682 	switch (level)  {
683 	case RESET_ALL:
684 		rval = 0;
685 		break;
686 	case RESET_LUN:
687 		/*
688 		 * Point lp at lun so that pmcs_addr2xp
689 		 * will fill out the 64 bit lun number.
690 		 */
691 		lp = &lun;
692 		/* FALLTHROUGH */
693 	case RESET_TARGET:
694 		xp = pmcs_addr2xp(ap, lp, NULL);
695 		if (xp == NULL) {
696 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
697 			    "%s: no xp found for this scsi address", __func__);
698 			return (0);
699 		}
700 
701 		if (xp->dev_gone) {
702 			mutex_exit(&xp->statlock);
703 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
704 			    "%s: Target 0x%p has gone away", __func__,
705 			    (void *)xp);
706 			return (0);
707 		}
708 
709 		/*
710 		 * If we're already performing this action, or if device
711 		 * state recovery is already running, just return failure.
712 		 */
713 		if (xp->resetting || xp->recover_wait) {
714 			mutex_exit(&xp->statlock);
715 			return (0);
716 		}
717 		xp->reset_wait = 0;
718 		xp->reset_success = 0;
719 		xp->resetting = 1;
720 		pptr = xp->phy;
721 		mutex_exit(&xp->statlock);
722 
723 		if (pmcs_reset_dev(pwp, pptr, lun)) {
724 			rval = 0;
725 		} else {
726 			rval = 1;
727 		}
728 
729 		mutex_enter(&xp->statlock);
730 		if (rval == 1) {
731 			xp->reset_success = 1;
732 		}
733 		if (xp->reset_wait) {
734 			xp->reset_wait = 0;
735 			cv_signal(&xp->reset_cv);
736 		}
737 		xp->resetting = 0;
738 		mutex_exit(&xp->statlock);
739 		SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
740 		break;
741 	default:
742 		rval = 0;
743 		break;
744 	}
745 
746 	return (rval);
747 }
748 
749 static int
750 pmcs_scsi_reset_notify(struct scsi_address *ap, int flag,
751     void (*callback)(caddr_t), caddr_t arg)
752 {
753 	pmcs_hw_t *pwp = ADDR2PMC(ap);
754 	return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
755 	    &pwp->lock, &pwp->reset_notify_listf));
756 }
757 
758 
759 static int
760 pmcs_cap(struct scsi_address *ap, char *cap, int val, int tonly, int set)
761 {
762 	_NOTE(ARGUNUSED(val, tonly));
763 	int cidx, rval = 0;
764 	pmcs_xscsi_t *xp;
765 
766 	cidx = scsi_hba_lookup_capstr(cap);
767 	if (cidx == -1) {
768 		return (-1);
769 	}
770 
771 	xp = pmcs_addr2xp(ap, NULL, NULL);
772 	if (xp == NULL) {
773 		return (-1);
774 	}
775 
776 	switch (cidx) {
777 	case SCSI_CAP_DMA_MAX:
778 	case SCSI_CAP_INITIATOR_ID:
779 		if (set == 0) {
780 			rval = INT_MAX;	/* argh */
781 		}
782 		break;
783 	case SCSI_CAP_DISCONNECT:
784 	case SCSI_CAP_SYNCHRONOUS:
785 	case SCSI_CAP_WIDE_XFER:
786 	case SCSI_CAP_PARITY:
787 	case SCSI_CAP_ARQ:
788 	case SCSI_CAP_UNTAGGED_QING:
789 		if (set == 0) {
790 			rval = 1;
791 		}
792 		break;
793 
794 	case SCSI_CAP_TAGGED_QING:
795 		rval = 1;
796 		break;
797 
798 	case SCSI_CAP_MSG_OUT:
799 	case SCSI_CAP_RESET_NOTIFICATION:
800 	case SCSI_CAP_QFULL_RETRIES:
801 	case SCSI_CAP_QFULL_RETRY_INTERVAL:
802 		break;
803 	case SCSI_CAP_SCSI_VERSION:
804 		if (set == 0) {
805 			rval = SCSI_VERSION_3;
806 		}
807 		break;
808 	case SCSI_CAP_INTERCONNECT_TYPE:
809 		if (set) {
810 			break;
811 		}
812 		if (xp->phy_addressable) {
813 			rval = INTERCONNECT_SATA;
814 		} else {
815 			rval = INTERCONNECT_SAS;
816 		}
817 		break;
818 	case SCSI_CAP_CDB_LEN:
819 		if (set == 0) {
820 			rval = 16;
821 		}
822 		break;
823 	case SCSI_CAP_LUN_RESET:
824 		if (set) {
825 			break;
826 		}
827 		if (xp->dtype == SATA) {
828 			rval = 0;
829 		} else {
830 			rval = 1;
831 		}
832 		break;
833 	default:
834 		rval = -1;
835 		break;
836 	}
837 	mutex_exit(&xp->statlock);
838 	pmcs_prt(ADDR2PMC(ap), PMCS_PRT_DEBUG3,
839 	    "%s: cap %s val %d set %d rval %d",
840 	    __func__, cap, val, set, rval);
841 	return (rval);
842 }
843 
844 /*
845  * Returns with statlock held if the xp is found.
846  * Fills in pmcs_cmd_t with values if pmcs_cmd_t pointer non-NULL.
847  */
848 static pmcs_xscsi_t *
849 pmcs_addr2xp(struct scsi_address *ap, uint64_t *lp, pmcs_cmd_t *sp)
850 {
851 	pmcs_xscsi_t *xp;
852 	pmcs_lun_t *lun = (pmcs_lun_t *)
853 	    scsi_device_hba_private_get(scsi_address_device(ap));
854 
855 	if ((lun == NULL) || (lun->target == NULL)) {
856 		return (NULL);
857 	}
858 	xp = lun->target;
859 	mutex_enter(&xp->statlock);
860 
861 	if (xp->dev_gone || (xp->phy == NULL)) {
862 		mutex_exit(&xp->statlock);
863 		return (NULL);
864 	}
865 
866 	if (sp != NULL) {
867 		sp->cmd_target = xp;
868 		sp->cmd_lun = lun;
869 	}
870 	if (lp) {
871 		*lp = lun->lun_num;
872 	}
873 	return (xp);
874 }
875 
876 static int
877 pmcs_scsa_getcap(struct scsi_address *ap, char *cap, int whom)
878 {
879 	int r;
880 	if (cap == NULL) {
881 		return (-1);
882 	}
883 	r = pmcs_cap(ap, cap, 0, whom, 0);
884 	return (r);
885 }
886 
887 static int
888 pmcs_scsa_setcap(struct scsi_address *ap, char *cap, int value, int whom)
889 {
890 	int r;
891 	if (cap == NULL) {
892 		return (-1);
893 	}
894 	r = pmcs_cap(ap, cap, value, whom, 1);
895 	return (r);
896 }
897 
898 static int
899 pmcs_scsa_setup_pkt(struct scsi_pkt *pkt, int (*callback)(caddr_t),
900     caddr_t cbarg)
901 {
902 	_NOTE(ARGUNUSED(callback, cbarg));
903 	pmcs_cmd_t *sp = pkt->pkt_ha_private;
904 
905 	bzero(sp, sizeof (pmcs_cmd_t));
906 	sp->cmd_pkt = pkt;
907 	return (0);
908 }
909 
910 static void
911 pmcs_scsa_teardown_pkt(struct scsi_pkt *pkt)
912 {
913 	pmcs_cmd_t *sp = pkt->pkt_ha_private;
914 	sp->cmd_target = NULL;
915 	sp->cmd_lun = NULL;
916 }
917 
918 static int
919 pmcs_smp_getcap(struct sas_addr *ap, char *cap)
920 {
921 	_NOTE(ARGUNUSED(ap));
922 	int ckey = -1;
923 	int ret = EINVAL;
924 
925 	ckey = sas_hba_lookup_capstr(cap);
926 	if (ckey == -1)
927 		return (EINVAL);
928 
929 	switch (ckey) {
930 	case SAS_CAP_SMP_CRC:
931 		ret = 0;
932 		break;
933 	default:
934 		ret = EINVAL;
935 		break;
936 	}
937 	return (ret);
938 }
939 
940 static int
941 pmcs_smp_start(struct smp_pkt *pktp)
942 {
943 	struct pmcwork *pwrk;
944 	const uint_t rdoff = SAS_SMP_MAX_PAYLOAD;
945 	uint32_t msg[PMCS_MSG_SIZE], *ptr, htag, status;
946 	uint64_t wwn;
947 	pmcs_hw_t *pwp = pktp->pkt_address->a_hba_tran->tran_hba_private;
948 	pmcs_phy_t *pptr;
949 	pmcs_xscsi_t *xp;
950 	uint_t reqsz, rspsz, will_retry;
951 	int result;
952 
953 	bcopy(pktp->pkt_address->a_wwn, &wwn, SAS_WWN_BYTE_SIZE);
954 
955 	pmcs_prt(pwp, PMCS_PRT_DEBUG1, "%s: starting for wwn 0x%" PRIx64,
956 	    __func__, wwn);
957 
958 	will_retry = pktp->pkt_will_retry;
959 
960 	(void) pmcs_acquire_scratch(pwp, B_TRUE);
961 	reqsz = pktp->pkt_reqsize;
962 	if (reqsz > SAS_SMP_MAX_PAYLOAD) {
963 		reqsz = SAS_SMP_MAX_PAYLOAD;
964 	}
965 	(void) memcpy(pwp->scratch, pktp->pkt_req, reqsz);
966 
967 	rspsz = pktp->pkt_rspsize;
968 	if (rspsz > SAS_SMP_MAX_PAYLOAD) {
969 		rspsz = SAS_SMP_MAX_PAYLOAD;
970 	}
971 
972 	/*
973 	 * The request size from the SMP driver always includes 4 bytes
974 	 * for the CRC. The PMCS chip, however, doesn't want to see those
975 	 * counts as part of the transfer size.
976 	 */
977 	reqsz -= 4;
978 
979 	pptr = pmcs_find_phy_by_wwn(pwp, wwn);
980 	/* PHY is now locked */
981 	if (pptr == NULL || pptr->dtype != EXPANDER) {
982 		if (pptr) {
983 			pmcs_unlock_phy(pptr);
984 		}
985 		pmcs_release_scratch(pwp);
986 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: could not find phy",
987 		    __func__);
988 		pktp->pkt_reason = ENXIO;
989 		return (DDI_FAILURE);
990 	}
991 
992 	pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_WAIT, pptr);
993 	if (pwrk == NULL) {
994 		pmcs_unlock_phy(pptr);
995 		pmcs_release_scratch(pwp);
996 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
997 		    "%s: could not get work structure", __func__);
998 		pktp->pkt_reason = will_retry ? EAGAIN :EBUSY;
999 		return (DDI_FAILURE);
1000 	}
1001 
1002 	pwrk->arg = msg;
1003 	pwrk->dtype = EXPANDER;
1004 	mutex_enter(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1005 	ptr = GET_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1006 	if (ptr == NULL) {
1007 		pmcs_pwork(pwp, pwrk);
1008 		mutex_exit(&pwp->iqp_lock[PMCS_IQ_OTHER]);
1009 		pmcs_unlock_phy(pptr);
1010 		pmcs_release_scratch(pwp);
1011 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: could not get IQ entry",
1012 		    __func__);
1013 		pktp->pkt_reason = will_retry ? EAGAIN :EBUSY;
1014 		return (DDI_FAILURE);
1015 	}
1016 	msg[0] = LE_32(PMCS_HIPRI(pwp, PMCS_OQ_GENERAL, PMCIN_SMP_REQUEST));
1017 	msg[1] = LE_32(pwrk->htag);
1018 	msg[2] = LE_32(pptr->device_id);
1019 	msg[3] = LE_32(SMP_INDIRECT_RESPONSE | SMP_INDIRECT_REQUEST);
1020 	msg[8] = LE_32(DWORD0(pwp->scratch_dma));
1021 	msg[9] = LE_32(DWORD1(pwp->scratch_dma));
1022 	msg[10] = LE_32(reqsz);
1023 	msg[11] = 0;
1024 	msg[12] = LE_32(DWORD0(pwp->scratch_dma+rdoff));
1025 	msg[13] = LE_32(DWORD1(pwp->scratch_dma+rdoff));
1026 	msg[14] = LE_32(rspsz);
1027 	msg[15] = 0;
1028 
1029 	COPY_MESSAGE(ptr, msg, PMCS_MSG_SIZE);
1030 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
1031 	htag = pwrk->htag;
1032 	INC_IQ_ENTRY(pwp, PMCS_IQ_OTHER);
1033 
1034 	pmcs_unlock_phy(pptr);
1035 	WAIT_FOR(pwrk, pktp->pkt_timeout * 1000, result);
1036 	pmcs_pwork(pwp, pwrk);
1037 	pmcs_lock_phy(pptr);
1038 
1039 	if (result) {
1040 		pmcs_timed_out(pwp, htag, __func__);
1041 		if (pmcs_abort(pwp, pptr, htag, 0, 0)) {
1042 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1043 			    "%s: Unable to issue SMP ABORT for htag 0x%08x",
1044 			    __func__, htag);
1045 		} else {
1046 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1047 			    "%s: Issuing SMP ABORT for htag 0x%08x",
1048 			    __func__, htag);
1049 		}
1050 		pmcs_unlock_phy(pptr);
1051 		pmcs_release_scratch(pwp);
1052 		pktp->pkt_reason = ETIMEDOUT;
1053 		return (DDI_FAILURE);
1054 	}
1055 	status = LE_32(msg[2]);
1056 	if (status == PMCOUT_STATUS_OVERFLOW) {
1057 		status = PMCOUT_STATUS_OK;
1058 		pktp->pkt_reason = EOVERFLOW;
1059 	}
1060 	if (status != PMCOUT_STATUS_OK) {
1061 		const char *emsg = pmcs_status_str(status);
1062 		if (emsg == NULL) {
1063 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1064 			    "SMP operation failed (0x%x)", status);
1065 		} else {
1066 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1067 			    "SMP operation failed (%s)", emsg);
1068 		}
1069 
1070 		if ((status == PMCOUT_STATUS_ERROR_HW_TIMEOUT) ||
1071 		    (status == PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT)) {
1072 			pktp->pkt_reason = will_retry ? EAGAIN : ETIMEDOUT;
1073 			result = DDI_FAILURE;
1074 		} else if (status ==
1075 		    PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS) {
1076 			xp = pptr->target;
1077 			if (xp == NULL) {
1078 				pktp->pkt_reason = EIO;
1079 				result = DDI_FAILURE;
1080 				goto out;
1081 			}
1082 			if (xp->dev_state !=
1083 			    PMCS_DEVICE_STATE_NON_OPERATIONAL) {
1084 				xp->dev_state =
1085 				    PMCS_DEVICE_STATE_NON_OPERATIONAL;
1086 				pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1087 				    "%s: Got _IT_NEXUS_LOSS SMP status. "
1088 				    "Tgt(0x%p) dev_state set to "
1089 				    "_NON_OPERATIONAL", __func__,
1090 				    (void *)xp);
1091 			}
1092 			/* ABORT any pending commands related to this device */
1093 			if (pmcs_abort(pwp, pptr, pptr->device_id, 1, 1) != 0) {
1094 				pptr->abort_pending = 1;
1095 				pktp->pkt_reason = EIO;
1096 				result = DDI_FAILURE;
1097 			}
1098 		} else {
1099 			pktp->pkt_reason = will_retry ? EAGAIN : EIO;
1100 			result = DDI_FAILURE;
1101 		}
1102 	} else {
1103 		(void) memcpy(pktp->pkt_rsp,
1104 		    &((uint8_t *)pwp->scratch)[rdoff], rspsz);
1105 		if (pktp->pkt_reason == EOVERFLOW) {
1106 			result = DDI_FAILURE;
1107 		} else {
1108 			result = DDI_SUCCESS;
1109 		}
1110 	}
1111 out:
1112 	pmcs_unlock_phy(pptr);
1113 	pmcs_release_scratch(pwp);
1114 	return (result);
1115 }
1116 
1117 static int
1118 pmcs_smp_init(dev_info_t *self, dev_info_t *child,
1119     sas_hba_tran_t *tran, smp_device_t *smp)
1120 {
1121 	_NOTE(ARGUNUSED(tran, smp));
1122 	pmcs_iport_t *iport;
1123 	pmcs_hw_t *pwp;
1124 	pmcs_xscsi_t *tgt;
1125 	pmcs_phy_t *phy, *pphy;
1126 	uint64_t wwn;
1127 	char *addr, *tgt_port;
1128 	int ua_form = 1;
1129 
1130 	iport = ddi_get_soft_state(pmcs_iport_softstate,
1131 	    ddi_get_instance(self));
1132 	ASSERT(iport);
1133 	if (iport == NULL)
1134 		return (DDI_FAILURE);
1135 	pwp = iport->pwp;
1136 	ASSERT(pwp);
1137 	if (pwp == NULL)
1138 		return (DDI_FAILURE);
1139 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s", __func__,
1140 	    ddi_get_name(child));
1141 
1142 	/* Get "target-port" prop from devinfo node */
1143 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
1144 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1145 	    SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) {
1146 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to lookup prop ("
1147 		    SCSI_ADDR_PROP_TARGET_PORT")", __func__);
1148 		/* Dont fail _smp_init() because we couldnt get/set a prop */
1149 		return (DDI_SUCCESS);
1150 	}
1151 
1152 	/*
1153 	 * Validate that this tran_tgt_init is for an active iport.
1154 	 */
1155 	if (iport->ua_state == UA_INACTIVE) {
1156 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1157 		    "%s: Init on inactive iport for '%s'",
1158 		    __func__, tgt_port);
1159 		ddi_prop_free(tgt_port);
1160 		return (DDI_FAILURE);
1161 	}
1162 
1163 	mutex_enter(&pwp->lock);
1164 
1165 	/* Retrieve softstate using unit-address */
1166 	tgt = pmcs_get_target(iport, tgt_port);
1167 	if (tgt == NULL) {
1168 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: tgt softstate not found",
1169 		    __func__);
1170 		ddi_prop_free(tgt_port);
1171 		mutex_exit(&pwp->lock);
1172 		return (DDI_FAILURE);
1173 	}
1174 
1175 	phy = tgt->phy;
1176 	ASSERT(mutex_owned(&phy->phy_lock));
1177 
1178 	if (IS_ROOT_PHY(phy)) {
1179 		/* Expander attached to HBA - don't ref_count it */
1180 		wwn = pwp->sas_wwns[0];
1181 	} else {
1182 		pmcs_inc_phy_ref_count(phy);
1183 
1184 		/*
1185 		 * Parent (in topology) is also an expander
1186 		 * Now that we've increased the ref count on phy, it's OK
1187 		 * to drop the lock so we can acquire the parent's lock.
1188 		 */
1189 
1190 		pphy = phy->parent;
1191 		pmcs_unlock_phy(phy);
1192 		pmcs_lock_phy(pphy);
1193 		wwn = pmcs_barray2wwn(pphy->sas_address);
1194 		pmcs_unlock_phy(pphy);
1195 		pmcs_lock_phy(phy);
1196 	}
1197 
1198 	/*
1199 	 * If this is the 1st smp_init, add this to our list.
1200 	 */
1201 	if (tgt->target_num == PMCS_INVALID_TARGET_NUM) {
1202 		int target;
1203 		for (target = 0; target < pwp->max_dev; target++) {
1204 			if (pwp->targets[target] != NULL) {
1205 				continue;
1206 			}
1207 
1208 			pwp->targets[target] = tgt;
1209 			tgt->target_num = (uint16_t)target;
1210 			tgt->assigned = 1;
1211 			tgt->dev_state = PMCS_DEVICE_STATE_OPERATIONAL;
1212 			break;
1213 		}
1214 
1215 		if (target == pwp->max_dev) {
1216 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1217 			    "Target list full.");
1218 			goto smp_init_fail;
1219 		}
1220 	}
1221 
1222 	if (!pmcs_assign_device(pwp, tgt)) {
1223 		pwp->targets[tgt->target_num] = NULL;
1224 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1225 		    "%s: pmcs_assign_device failed for target 0x%p",
1226 		    __func__, (void *)tgt);
1227 		goto smp_init_fail;
1228 	}
1229 
1230 	pmcs_unlock_phy(phy);
1231 	mutex_exit(&pwp->lock);
1232 
1233 	tgt->ref_count++;
1234 	tgt->dtype = phy->dtype;
1235 
1236 	addr = scsi_wwn_to_wwnstr(wwn, ua_form, NULL);
1237 	/* XXX: Update smp devinfo node using ndi_xxx */
1238 	if (ndi_prop_update_string(DDI_DEV_T_NONE, child,
1239 	    SCSI_ADDR_PROP_ATTACHED_PORT, addr) != DDI_SUCCESS) {
1240 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to set prop ("
1241 		    SCSI_ADDR_PROP_ATTACHED_PORT")", __func__);
1242 	}
1243 	(void) scsi_free_wwnstr(addr);
1244 	ddi_prop_free(tgt_port);
1245 	return (DDI_SUCCESS);
1246 
1247 smp_init_fail:
1248 	tgt->phy = NULL;
1249 	tgt->target_num = PMCS_INVALID_TARGET_NUM;
1250 	phy->target = NULL;
1251 	if (!IS_ROOT_PHY(phy)) {
1252 		pmcs_dec_phy_ref_count(phy);
1253 	}
1254 	pmcs_unlock_phy(phy);
1255 	mutex_exit(&pwp->lock);
1256 	ddi_soft_state_bystr_free(iport->tgt_sstate, tgt->unit_address);
1257 	ddi_prop_free(tgt_port);
1258 	return (DDI_FAILURE);
1259 }
1260 
1261 static void
1262 pmcs_smp_free(dev_info_t *self, dev_info_t *child,
1263     sas_hba_tran_t *tran, smp_device_t *smp)
1264 {
1265 	_NOTE(ARGUNUSED(tran, smp));
1266 	pmcs_iport_t *iport;
1267 	pmcs_hw_t *pwp;
1268 	pmcs_xscsi_t *tgt;
1269 	char *tgt_port;
1270 
1271 	iport = ddi_get_soft_state(pmcs_iport_softstate,
1272 	    ddi_get_instance(self));
1273 	ASSERT(iport);
1274 	if (iport == NULL)
1275 		return;
1276 
1277 	pwp = iport->pwp;
1278 	if (pwp == NULL)
1279 		return;
1280 	ASSERT(pwp);
1281 	pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG, "%s: %s", __func__,
1282 	    ddi_get_name(child));
1283 
1284 	/* Get "target-port" prop from devinfo node */
1285 	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, child,
1286 	    DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1287 	    SCSI_ADDR_PROP_TARGET_PORT, &tgt_port) != DDI_SUCCESS) {
1288 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: Failed to lookup prop ("
1289 		    SCSI_ADDR_PROP_TARGET_PORT")", __func__);
1290 		return;
1291 	}
1292 	/* Retrieve softstate using unit-address */
1293 	tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port);
1294 	ddi_prop_free(tgt_port);
1295 
1296 	if (tgt == NULL) {
1297 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: tgt softstate not found",
1298 		    __func__);
1299 		return;
1300 	}
1301 
1302 	mutex_enter(&pwp->lock);
1303 	mutex_enter(&tgt->statlock);
1304 	if (tgt->phy) {
1305 		if (!IS_ROOT_PHY(tgt->phy)) {
1306 			pmcs_dec_phy_ref_count(tgt->phy);
1307 		}
1308 	}
1309 
1310 	if (--tgt->ref_count == 0) {
1311 		/*
1312 		 * Remove this target from our list. The softstate
1313 		 * will remain, and the device will remain registered
1314 		 * with the hardware unless/until we're told that the
1315 		 * device physically went away.
1316 		 */
1317 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
1318 		    "Removing target 0x%p (vtgt %d) from target list",
1319 		    (void *)tgt, tgt->target_num);
1320 		pwp->targets[tgt->target_num] = NULL;
1321 		tgt->target_num = PMCS_INVALID_TARGET_NUM;
1322 		tgt->phy->target = NULL;
1323 		tgt->phy = NULL;
1324 	}
1325 
1326 	mutex_exit(&tgt->statlock);
1327 	mutex_exit(&pwp->lock);
1328 }
1329 
1330 static int
1331 pmcs_scsi_quiesce(dev_info_t *dip)
1332 {
1333 	pmcs_hw_t *pwp;
1334 	int totactive = -1;
1335 	pmcs_xscsi_t *xp;
1336 	uint16_t target;
1337 
1338 	if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip)))
1339 		return (0);		/* iport */
1340 
1341 	pwp  = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip));
1342 	if (pwp == NULL) {
1343 		return (-1);
1344 	}
1345 	mutex_enter(&pwp->lock);
1346 	if (pwp->state != STATE_RUNNING) {
1347 		mutex_exit(&pwp->lock);
1348 		return (-1);
1349 	}
1350 
1351 	pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s called", __func__);
1352 	pwp->blocked = 1;
1353 	while (totactive) {
1354 		totactive = 0;
1355 		for (target = 0; target < pwp->max_dev; target++) {
1356 			xp = pwp->targets[target];
1357 			if (xp == NULL) {
1358 				continue;
1359 			}
1360 			mutex_enter(&xp->statlock);
1361 			if (xp->actv_cnt) {
1362 				totactive += xp->actv_cnt;
1363 				xp->draining = 1;
1364 			}
1365 			mutex_exit(&xp->statlock);
1366 		}
1367 		if (totactive) {
1368 			cv_wait(&pwp->drain_cv, &pwp->lock);
1369 		}
1370 		/*
1371 		 * The pwp->blocked may have been reset. e.g a SCSI bus reset
1372 		 */
1373 		pwp->blocked = 1;
1374 	}
1375 
1376 	for (target = 0; target < pwp->max_dev; target++) {
1377 		xp = pwp->targets[target];
1378 		if (xp == NULL) {
1379 			continue;
1380 		}
1381 		mutex_enter(&xp->statlock);
1382 		xp->draining = 0;
1383 		mutex_exit(&xp->statlock);
1384 	}
1385 
1386 	mutex_exit(&pwp->lock);
1387 	if (totactive == 0) {
1388 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s drain complete", __func__);
1389 	}
1390 	return (0);
1391 }
1392 
1393 static int
1394 pmcs_scsi_unquiesce(dev_info_t *dip)
1395 {
1396 	pmcs_hw_t *pwp;
1397 
1398 	if (ddi_get_soft_state(pmcs_iport_softstate, ddi_get_instance(dip)))
1399 		return (0);		/* iport */
1400 
1401 	pwp  = ddi_get_soft_state(pmcs_softc_state, ddi_get_instance(dip));
1402 	if (pwp == NULL) {
1403 		return (-1);
1404 	}
1405 	mutex_enter(&pwp->lock);
1406 	if (pwp->state != STATE_RUNNING) {
1407 		mutex_exit(&pwp->lock);
1408 		return (-1);
1409 	}
1410 	pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s called", __func__);
1411 	pwp->blocked = 0;
1412 	mutex_exit(&pwp->lock);
1413 
1414 	/*
1415 	 * Run all pending commands.
1416 	 */
1417 	pmcs_scsa_wq_run(pwp);
1418 
1419 	/*
1420 	 * Complete all completed commands.
1421 	 * This also unlocks us.
1422 	 */
1423 	PMCS_CQ_RUN(pwp);
1424 	return (0);
1425 }
1426 
1427 /*
1428  * Start commands for a particular device
1429  * If the actual start of a command fails, return B_FALSE.  Any other result
1430  * is a B_TRUE return.
1431  */
1432 boolean_t
1433 pmcs_scsa_wq_run_one(pmcs_hw_t *pwp, pmcs_xscsi_t *xp)
1434 {
1435 	pmcs_cmd_t *sp;
1436 	pmcs_phy_t *phyp;
1437 	pmcwork_t *pwrk;
1438 	boolean_t run_one, blocked;
1439 	int rval;
1440 
1441 	/*
1442 	 * First, check to see if we're blocked or resource limited
1443 	 */
1444 	mutex_enter(&pwp->lock);
1445 	blocked = pwp->blocked;
1446 	/*
1447 	 * If resource_limited is set, we're resource constrained and
1448 	 * we will run only one work request for this target.
1449 	 */
1450 	run_one = pwp->resource_limited;
1451 	mutex_exit(&pwp->lock);
1452 
1453 	if (blocked) {
1454 		/* Queues will get restarted when we get unblocked */
1455 		return (B_TRUE);
1456 	}
1457 
1458 	/*
1459 	 * Might as well verify the queue is not empty before moving on
1460 	 */
1461 	mutex_enter(&xp->wqlock);
1462 	if (STAILQ_EMPTY(&xp->wq)) {
1463 		mutex_exit(&xp->wqlock);
1464 		return (B_TRUE);
1465 	}
1466 	mutex_exit(&xp->wqlock);
1467 
1468 	/*
1469 	 * If we're draining or resetting, just reschedule work queue and bail.
1470 	 */
1471 	mutex_enter(&xp->statlock);
1472 	if (xp->draining || xp->resetting || xp->special_running ||
1473 	    xp->special_needed) {
1474 		mutex_exit(&xp->statlock);
1475 		SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1476 		return (B_TRUE);
1477 	}
1478 
1479 	/*
1480 	 * Next, check to see if the target is gone.
1481 	 */
1482 	if (xp->dev_gone) {
1483 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1484 		    "%s: Flushing wait queue for dead tgt 0x%p", __func__,
1485 		    (void *)xp);
1486 		pmcs_flush_target_queues(pwp, xp, PMCS_TGT_WAIT_QUEUE);
1487 		mutex_exit(&xp->statlock);
1488 		return (B_TRUE);
1489 	}
1490 
1491 	/*
1492 	 * Increment the PHY's ref_count now so we know it won't go away
1493 	 * after we drop the target lock.  Drop it before returning.  If the
1494 	 * PHY dies, the commands we attempt to send will fail, but at least
1495 	 * we know we have a real PHY pointer.
1496 	 */
1497 	phyp = xp->phy;
1498 	pmcs_inc_phy_ref_count(phyp);
1499 	mutex_exit(&xp->statlock);
1500 
1501 	mutex_enter(&xp->wqlock);
1502 	while ((sp = STAILQ_FIRST(&xp->wq)) != NULL) {
1503 		pwrk = pmcs_gwork(pwp, PMCS_TAG_TYPE_CBACK, phyp);
1504 		if (pwrk == NULL) {
1505 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
1506 			    "%s: out of work structures", __func__);
1507 			SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1508 			break;
1509 		}
1510 		STAILQ_REMOVE_HEAD(&xp->wq, cmd_next);
1511 		mutex_exit(&xp->wqlock);
1512 
1513 		pwrk->xp = xp;
1514 		pwrk->arg = sp;
1515 		sp->cmd_tag = pwrk->htag;
1516 		pwrk->timer = US2WT(CMD2PKT(sp)->pkt_time * 1000000);
1517 		if (pwrk->timer == 0) {
1518 			pwrk->timer = US2WT(1000000);
1519 		}
1520 
1521 		pwrk->dtype = xp->dtype;
1522 
1523 		if (xp->dtype == SAS) {
1524 			pwrk->ptr = (void *) pmcs_SAS_done;
1525 			if ((rval = pmcs_SAS_run(sp, pwrk)) != 0) {
1526 				sp->cmd_tag = NULL;
1527 				pmcs_dec_phy_ref_count(phyp);
1528 				pmcs_pwork(pwp, pwrk);
1529 				SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1530 				if (rval == PMCS_WQ_RUN_FAIL_RES) {
1531 					return (B_FALSE);
1532 				} else {
1533 					return (B_TRUE);
1534 				}
1535 			}
1536 		} else {
1537 			ASSERT(xp->dtype == SATA);
1538 			pwrk->ptr = (void *) pmcs_SATA_done;
1539 			if ((rval = pmcs_SATA_run(sp, pwrk)) != 0) {
1540 				sp->cmd_tag = NULL;
1541 				pmcs_dec_phy_ref_count(phyp);
1542 				pmcs_pwork(pwp, pwrk);
1543 				SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
1544 				if (rval == PMCS_WQ_RUN_FAIL_RES) {
1545 					return (B_FALSE);
1546 				} else {
1547 					return (B_TRUE);
1548 				}
1549 			}
1550 		}
1551 
1552 		if (run_one) {
1553 			goto wq_out;
1554 		}
1555 		mutex_enter(&xp->wqlock);
1556 	}
1557 
1558 	mutex_exit(&xp->wqlock);
1559 
1560 wq_out:
1561 	pmcs_dec_phy_ref_count(phyp);
1562 	return (B_TRUE);
1563 }
1564 
1565 /*
1566  * Start commands for all devices.
1567  */
1568 void
1569 pmcs_scsa_wq_run(pmcs_hw_t *pwp)
1570 {
1571 	pmcs_xscsi_t *xp;
1572 	uint16_t target_start, target;
1573 	boolean_t	rval = B_TRUE;
1574 
1575 	mutex_enter(&pwp->lock);
1576 	target_start = pwp->last_wq_dev;
1577 	target = target_start;
1578 
1579 	do {
1580 		xp = pwp->targets[target];
1581 		if (xp == NULL) {
1582 			if (++target == pwp->max_dev) {
1583 				target = 0;
1584 			}
1585 			continue;
1586 		}
1587 
1588 		mutex_exit(&pwp->lock);
1589 		rval = pmcs_scsa_wq_run_one(pwp, xp);
1590 		if (rval == B_FALSE) {
1591 			mutex_enter(&pwp->lock);
1592 			break;
1593 		}
1594 		mutex_enter(&pwp->lock);
1595 		if (++target == pwp->max_dev) {
1596 			target = 0;
1597 		}
1598 	} while (target != target_start);
1599 
1600 	if (rval) {
1601 		pwp->resource_limited = 0; /* Not resource-constrained */
1602 	} else {
1603 		pwp->resource_limited = 1; /* Give others a chance */
1604 	}
1605 
1606 	pwp->last_wq_dev = target;
1607 	mutex_exit(&pwp->lock);
1608 }
1609 
1610 /*
1611  * Pull the completion queue, drop the lock and complete all elements.
1612  */
1613 
1614 void
1615 pmcs_scsa_cq_run(void *arg)
1616 {
1617 	pmcs_cq_thr_info_t *cqti = (pmcs_cq_thr_info_t *)arg;
1618 	pmcs_hw_t *pwp = cqti->cq_pwp;
1619 	pmcs_cmd_t *sp, *nxt;
1620 	struct scsi_pkt *pkt;
1621 	pmcs_iocomp_cb_t *ioccb, *ioccb_next;
1622 	pmcs_cb_t callback;
1623 	uint32_t niodone;
1624 
1625 	DTRACE_PROBE1(pmcs__scsa__cq__run__start, pmcs_cq_thr_info_t *, cqti);
1626 
1627 	mutex_enter(&pwp->cq_lock);
1628 
1629 	while (!pwp->cq_info.cq_stop) {
1630 		/*
1631 		 * First, check the I/O completion callback queue.
1632 		 */
1633 
1634 		ioccb = pwp->iocomp_cb_head;
1635 		pwp->iocomp_cb_head = NULL;
1636 		pwp->iocomp_cb_tail = NULL;
1637 		mutex_exit(&pwp->cq_lock);
1638 
1639 		niodone = 0;
1640 
1641 		while (ioccb) {
1642 			niodone++;
1643 			/*
1644 			 * Grab the lock on the work structure. The callback
1645 			 * routine is responsible for clearing it.
1646 			 */
1647 			mutex_enter(&ioccb->pwrk->lock);
1648 			ioccb_next = ioccb->next;
1649 			callback = (pmcs_cb_t)ioccb->pwrk->ptr;
1650 			(*callback)(pwp, ioccb->pwrk,
1651 			    (uint32_t *)((void *)ioccb->iomb));
1652 			kmem_cache_free(pwp->iocomp_cb_cache, ioccb);
1653 			ioccb = ioccb_next;
1654 		}
1655 
1656 		/*
1657 		 * Next, run the completion queue
1658 		 */
1659 
1660 		mutex_enter(&pwp->cq_lock);
1661 		sp = STAILQ_FIRST(&pwp->cq);
1662 		STAILQ_INIT(&pwp->cq);
1663 		mutex_exit(&pwp->cq_lock);
1664 
1665 		DTRACE_PROBE1(pmcs__scsa__cq__run__start__loop,
1666 		    pmcs_cq_thr_info_t *, cqti);
1667 
1668 		if (sp && pmcs_check_acc_dma_handle(pwp)) {
1669 			ddi_fm_service_impact(pwp->dip, DDI_SERVICE_UNAFFECTED);
1670 		}
1671 
1672 		while (sp) {
1673 			nxt = STAILQ_NEXT(sp, cmd_next);
1674 			pkt = CMD2PKT(sp);
1675 			pmcs_prt(pwp, PMCS_PRT_DEBUG3,
1676 			    "%s: calling completion on %p for tgt %p", __func__,
1677 			    (void *)sp, (void *)sp->cmd_target);
1678 			scsi_hba_pkt_comp(pkt);
1679 			sp = nxt;
1680 		}
1681 
1682 		DTRACE_PROBE1(pmcs__scsa__cq__run__end__loop,
1683 		    pmcs_cq_thr_info_t *, cqti);
1684 
1685 		mutex_enter(&cqti->cq_thr_lock);
1686 		cv_wait(&cqti->cq_cv, &cqti->cq_thr_lock);
1687 		mutex_exit(&cqti->cq_thr_lock);
1688 
1689 		mutex_enter(&pwp->cq_lock);
1690 	}
1691 
1692 	mutex_exit(&pwp->cq_lock);
1693 	DTRACE_PROBE1(pmcs__scsa__cq__run__stop, pmcs_cq_thr_info_t *, cqti);
1694 	thread_exit();
1695 }
1696 
1697 /*
1698  * Run a SAS command.  Called with pwrk->lock held, returns unlocked.
1699  */
1700 static int
1701 pmcs_SAS_run(pmcs_cmd_t *sp, pmcwork_t *pwrk)
1702 {
1703 	pmcs_hw_t *pwp = CMD2PMC(sp);
1704 	struct scsi_pkt *pkt = CMD2PKT(sp);
1705 	pmcs_xscsi_t *xp = pwrk->xp;
1706 	uint32_t iq, *ptr;
1707 	sas_ssp_cmd_iu_t sc;
1708 
1709 	mutex_enter(&xp->statlock);
1710 	if (!xp->assigned) {
1711 		mutex_exit(&xp->statlock);
1712 		return (PMCS_WQ_RUN_FAIL_OTHER);
1713 	}
1714 	if ((xp->actv_cnt >= xp->qdepth) || xp->recover_wait) {
1715 		mutex_exit(&xp->statlock);
1716 		mutex_enter(&xp->wqlock);
1717 		STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
1718 		mutex_exit(&xp->wqlock);
1719 		return (PMCS_WQ_RUN_FAIL_OTHER);
1720 	}
1721 	GET_IO_IQ_ENTRY(pwp, ptr, pwrk->phy->device_id, iq);
1722 	if (ptr == NULL) {
1723 		mutex_exit(&xp->statlock);
1724 		/*
1725 		 * This is a temporary failure not likely to unblocked by
1726 		 * commands completing as the test for scheduling the
1727 		 * restart of work is a per-device test.
1728 		 */
1729 		mutex_enter(&xp->wqlock);
1730 		STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
1731 		mutex_exit(&xp->wqlock);
1732 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1733 		    "%s: Failed to get IO IQ entry for tgt %d",
1734 		    __func__, xp->target_num);
1735 		return (PMCS_WQ_RUN_FAIL_RES);
1736 
1737 	}
1738 
1739 	ptr[0] =
1740 	    LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SSP_INI_IO_START));
1741 	ptr[1] = LE_32(pwrk->htag);
1742 	ptr[2] = LE_32(pwrk->phy->device_id);
1743 	ptr[3] = LE_32(pkt->pkt_dma_len);
1744 	if (ptr[3]) {
1745 		ASSERT(pkt->pkt_numcookies);
1746 		if (pkt->pkt_dma_flags & DDI_DMA_READ) {
1747 			ptr[4] = LE_32(PMCIN_DATADIR_2_INI);
1748 		} else {
1749 			ptr[4] = LE_32(PMCIN_DATADIR_2_DEV);
1750 		}
1751 		if (pmcs_dma_load(pwp, sp, ptr)) {
1752 			mutex_exit(&pwp->iqp_lock[iq]);
1753 			mutex_exit(&xp->statlock);
1754 			mutex_enter(&xp->wqlock);
1755 			if (STAILQ_EMPTY(&xp->wq)) {
1756 				STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
1757 				mutex_exit(&xp->wqlock);
1758 			} else {
1759 				mutex_exit(&xp->wqlock);
1760 				CMD2PKT(sp)->pkt_scbp[0] = STATUS_QFULL;
1761 				CMD2PKT(sp)->pkt_reason = CMD_CMPLT;
1762 				CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS |
1763 				    STATE_GOT_TARGET | STATE_SENT_CMD |
1764 				    STATE_GOT_STATUS;
1765 				mutex_enter(&pwp->cq_lock);
1766 				STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
1767 				mutex_exit(&pwp->cq_lock);
1768 				pmcs_prt(pwp, PMCS_PRT_DEBUG,
1769 				    "%s: Failed to dma_load for tgt %d (QF)",
1770 				    __func__, xp->target_num);
1771 			}
1772 			return (PMCS_WQ_RUN_FAIL_RES);
1773 		}
1774 	} else {
1775 		ptr[4] = LE_32(PMCIN_DATADIR_NONE);
1776 		CLEAN_MESSAGE(ptr, 12);
1777 	}
1778 	xp->actv_cnt++;
1779 	if (xp->actv_cnt > xp->maxdepth) {
1780 		xp->maxdepth = xp->actv_cnt;
1781 		pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: max depth now %u",
1782 		    pwrk->phy->path, xp->maxdepth);
1783 	}
1784 	mutex_exit(&xp->statlock);
1785 
1786 
1787 #ifdef	DEBUG
1788 	/*
1789 	 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED
1790 	 * event when this goes out on the wire.
1791 	 */
1792 	ptr[4] |= PMCIN_MESSAGE_REPORT;
1793 #endif
1794 	/*
1795 	 * Fill in the SSP IU
1796 	 */
1797 
1798 	bzero(&sc, sizeof (sas_ssp_cmd_iu_t));
1799 	bcopy((uint8_t *)&sp->cmd_lun->scsi_lun, sc.lun, sizeof (scsi_lun_t));
1800 
1801 	switch (pkt->pkt_flags & FLAG_TAGMASK) {
1802 	case FLAG_HTAG:
1803 		sc.task_attribute = SAS_CMD_TASK_ATTR_HEAD;
1804 		break;
1805 	case FLAG_OTAG:
1806 		sc.task_attribute = SAS_CMD_TASK_ATTR_ORDERED;
1807 		break;
1808 	case FLAG_STAG:
1809 	default:
1810 		sc.task_attribute = SAS_CMD_TASK_ATTR_SIMPLE;
1811 		break;
1812 	}
1813 	(void) memcpy(sc.cdb, pkt->pkt_cdbp,
1814 	    min(SCSA_CDBLEN(sp), sizeof (sc.cdb)));
1815 	(void) memcpy(&ptr[5], &sc, sizeof (sas_ssp_cmd_iu_t));
1816 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
1817 	mutex_exit(&pwrk->lock);
1818 	pmcs_prt(pwp, PMCS_PRT_DEBUG2,
1819 	    "%s: giving pkt %p (tag %x) to the hardware", __func__,
1820 	    (void *)pkt, pwrk->htag);
1821 #ifdef DEBUG
1822 	pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SAS INI Message", ptr);
1823 #endif
1824 	mutex_enter(&xp->aqlock);
1825 	STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next);
1826 	mutex_exit(&xp->aqlock);
1827 	INC_IQ_ENTRY(pwp, iq);
1828 
1829 	/*
1830 	 * If we just submitted the last command queued from device state
1831 	 * recovery, clear the wq_recovery_tail pointer.
1832 	 */
1833 	mutex_enter(&xp->wqlock);
1834 	if (xp->wq_recovery_tail == sp) {
1835 		xp->wq_recovery_tail = NULL;
1836 	}
1837 	mutex_exit(&xp->wqlock);
1838 
1839 	return (PMCS_WQ_RUN_SUCCESS);
1840 }
1841 
1842 /*
1843  * Complete a SAS command
1844  *
1845  * Called with pwrk lock held.
1846  * The free of pwrk releases the lock.
1847  */
1848 
1849 static void
1850 pmcs_SAS_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg)
1851 {
1852 	pmcs_cmd_t *sp = pwrk->arg;
1853 	pmcs_phy_t *pptr = pwrk->phy;
1854 	pmcs_xscsi_t *xp = pwrk->xp;
1855 	struct scsi_pkt *pkt = CMD2PKT(sp);
1856 	int dead;
1857 	uint32_t sts;
1858 	boolean_t aborted = B_FALSE;
1859 	boolean_t do_ds_recovery = B_FALSE;
1860 
1861 	ASSERT(xp != NULL);
1862 	ASSERT(sp != NULL);
1863 	ASSERT(pptr != NULL);
1864 
1865 	DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int,
1866 	    (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start,
1867 	    hrtime_t, gethrtime());
1868 
1869 	dead = pwrk->dead;
1870 
1871 	if (msg) {
1872 		sts = LE_32(msg[2]);
1873 	} else {
1874 		sts = 0;
1875 	}
1876 
1877 	if (dead != 0) {
1878 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: dead cmd tag 0x%x for %s",
1879 		    __func__, pwrk->htag, pptr->path);
1880 		goto out;
1881 	}
1882 
1883 	if (sts == PMCOUT_STATUS_ABORTED) {
1884 		aborted = B_TRUE;
1885 	}
1886 
1887 	if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) {
1888 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1889 		    "%s: cmd 0x%p (tag 0x%x) timed out for %s",
1890 		    __func__, (void *)sp, pwrk->htag, pptr->path);
1891 		do_ds_recovery = B_TRUE;
1892 		goto out;
1893 	}
1894 
1895 	/*
1896 	 * If the status isn't okay but not underflow,
1897 	 * step to the side and parse the (possible) error.
1898 	 */
1899 #ifdef DEBUG
1900 	if (msg) {
1901 		pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg);
1902 	}
1903 #endif
1904 	if (!msg) {
1905 		goto out;
1906 	}
1907 
1908 	switch (sts) {
1909 	case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
1910 	case PMCOUT_STATUS_IO_DS_NON_OPERATIONAL:
1911 	case PMCOUT_STATUS_IO_DS_IN_RECOVERY:
1912 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
1913 		    "%s: PHY %s requires device state recovery (status=%d)",
1914 		    __func__, pptr->path, sts);
1915 		do_ds_recovery = B_TRUE;
1916 		break;
1917 	case PMCOUT_STATUS_UNDERFLOW:
1918 		(void) pmcs_set_resid(pkt, pkt->pkt_dma_len, LE_32(msg[3]));
1919 		pmcs_prt(pwp, PMCS_PRT_DEBUG_UNDERFLOW,
1920 		    "%s: underflow %u for cdb 0x%x",
1921 		    __func__, LE_32(msg[3]), pkt->pkt_cdbp[0] & 0xff);
1922 		sts = PMCOUT_STATUS_OK;
1923 		msg[3] = 0;
1924 		break;
1925 	case PMCOUT_STATUS_OK:
1926 		pkt->pkt_resid = 0;
1927 		break;
1928 	}
1929 
1930 	if (sts != PMCOUT_STATUS_OK) {
1931 		pmcs_ioerror(pwp, SAS, pwrk, msg);
1932 	} else {
1933 		if (msg[3]) {
1934 			uint8_t local[PMCS_QENTRY_SIZE << 1], *xd;
1935 			sas_ssp_rsp_iu_t *rptr = (void *)local;
1936 			const int lim =
1937 			    (PMCS_QENTRY_SIZE << 1) - SAS_RSP_HDR_SIZE;
1938 			static const uint8_t ssp_rsp_evec[] = {
1939 				0x58, 0x61, 0x56, 0x72, 0x00
1940 			};
1941 
1942 			/*
1943 			 * Transform the the first part of the response
1944 			 * to host canonical form. This gives us enough
1945 			 * information to figure out what to do with the
1946 			 * rest (which remains unchanged in the incoming
1947 			 * message which can be up to two queue entries
1948 			 * in length).
1949 			 */
1950 			pmcs_endian_transform(pwp, local, &msg[5],
1951 			    ssp_rsp_evec);
1952 			xd = (uint8_t *)(&msg[5]);
1953 			xd += SAS_RSP_HDR_SIZE;
1954 
1955 			if (rptr->datapres == SAS_RSP_DATAPRES_RESPONSE_DATA) {
1956 				if (rptr->response_data_length != 4) {
1957 					pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
1958 					    "Bad SAS RESPONSE DATA LENGTH",
1959 					    msg);
1960 					pkt->pkt_reason = CMD_TRAN_ERR;
1961 					goto out;
1962 				}
1963 				(void) memcpy(&sts, xd, sizeof (uint32_t));
1964 				sts = BE_32(sts);
1965 				/*
1966 				 * The only response code we should legally get
1967 				 * here is an INVALID FRAME response code.
1968 				 */
1969 				if (sts == SAS_RSP_INVALID_FRAME) {
1970 					pmcs_prt(pwp, PMCS_PRT_DEBUG,
1971 					    "%s: pkt %p tgt %u path %s "
1972 					    "completed: INVALID FRAME response",
1973 					    __func__, (void *)pkt,
1974 					    xp->target_num, pptr->path);
1975 				} else {
1976 					pmcs_prt(pwp, PMCS_PRT_DEBUG,
1977 					    "%s: pkt %p tgt %u path %s "
1978 					    "completed: illegal response 0x%x",
1979 					    __func__, (void *)pkt,
1980 					    xp->target_num, pptr->path, sts);
1981 				}
1982 				pkt->pkt_reason = CMD_TRAN_ERR;
1983 				goto out;
1984 			}
1985 			if (rptr->datapres == SAS_RSP_DATAPRES_SENSE_DATA) {
1986 				uint32_t slen;
1987 				slen = rptr->sense_data_length;
1988 				if (slen > lim) {
1989 					slen = lim;
1990 				}
1991 				pmcs_latch_status(pwp, sp, rptr->status, xd,
1992 				    slen, pptr->path);
1993 			} else if (rptr->datapres == SAS_RSP_DATAPRES_NO_DATA) {
1994 				/*
1995 				 * This is the case for a plain SCSI status.
1996 				 */
1997 				pmcs_latch_status(pwp, sp, rptr->status, NULL,
1998 				    0, pptr->path);
1999 			} else {
2000 				pmcs_print_entry(pwp, PMCS_PRT_DEBUG,
2001 				    "illegal SAS response", msg);
2002 				pkt->pkt_reason = CMD_TRAN_ERR;
2003 				goto out;
2004 			}
2005 		} else {
2006 			pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0,
2007 			    pptr->path);
2008 		}
2009 		if (pkt->pkt_dma_len) {
2010 			pkt->pkt_state |= STATE_XFERRED_DATA;
2011 		}
2012 	}
2013 	pmcs_prt(pwp, PMCS_PRT_DEBUG2,
2014 	    "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x",
2015 	    __func__, (void *)pkt, xp->target_num, pkt->pkt_reason,
2016 	    pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]);
2017 
2018 	if (pwrk->state == PMCS_WORK_STATE_ABORTED) {
2019 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2020 		    "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p",
2021 		    __func__, (void *)pkt, pptr->path, (void *)pwrk);
2022 		aborted = B_TRUE;
2023 	}
2024 
2025 out:
2026 	pmcs_pwork(pwp, pwrk);
2027 	pmcs_dma_unload(pwp, sp);
2028 
2029 	mutex_enter(&xp->statlock);
2030 	if (xp->dev_gone) {
2031 		mutex_exit(&xp->statlock);
2032 		mutex_enter(&pwp->cq_lock);
2033 		STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
2034 		mutex_exit(&pwp->cq_lock);
2035 		pmcs_prt(pwp, PMCS_PRT_DEBUG2,
2036 		    "%s: Completing command for dead target 0x%p", __func__,
2037 		    (void *)xp);
2038 		return;
2039 	}
2040 
2041 	ASSERT(xp->actv_cnt > 0);
2042 	if (--(xp->actv_cnt) == 0) {
2043 		if (xp->draining) {
2044 			pmcs_prt(pwp, PMCS_PRT_DEBUG1,
2045 			    "%s: waking up drain waiters", __func__);
2046 			cv_signal(&pwp->drain_cv);
2047 		}
2048 	}
2049 	mutex_exit(&xp->statlock);
2050 	if (dead == 0) {
2051 #ifdef	DEBUG
2052 		pmcs_cmd_t *wp;
2053 		mutex_enter(&xp->aqlock);
2054 		STAILQ_FOREACH(wp, &xp->aq, cmd_next) {
2055 			if (wp == sp) {
2056 				break;
2057 			}
2058 		}
2059 		ASSERT(wp != NULL);
2060 #else
2061 		mutex_enter(&xp->aqlock);
2062 #endif
2063 		STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next);
2064 		if (aborted) {
2065 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
2066 			    "%s: Aborted cmd for tgt 0x%p, signaling waiters",
2067 			    __func__, (void *)xp);
2068 			cv_signal(&xp->abort_cv);
2069 		}
2070 		mutex_exit(&xp->aqlock);
2071 	}
2072 
2073 	/*
2074 	 * If do_ds_recovery is set, we need to initiate device state
2075 	 * recovery.  In this case, we put this I/O back on the head of
2076 	 * the wait queue to run again after recovery is complete
2077 	 */
2078 	if (do_ds_recovery) {
2079 		mutex_enter(&xp->statlock);
2080 		pmcs_start_dev_state_recovery(xp, pptr);
2081 		mutex_exit(&xp->statlock);
2082 		pmcs_prt(pwp, PMCS_PRT_DEBUG1, "%s: Putting cmd 0x%p back on "
2083 		    "wq during recovery for tgt 0x%p", __func__, (void *)sp,
2084 		    (void *)xp);
2085 		mutex_enter(&xp->wqlock);
2086 		if (xp->wq_recovery_tail == NULL) {
2087 			STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
2088 		} else {
2089 			/*
2090 			 * If there are other I/Os waiting at the head due to
2091 			 * device state recovery, add this one in the right spot
2092 			 * to maintain proper order.
2093 			 */
2094 			STAILQ_INSERT_AFTER(&xp->wq, xp->wq_recovery_tail, sp,
2095 			    cmd_next);
2096 		}
2097 		xp->wq_recovery_tail = sp;
2098 		mutex_exit(&xp->wqlock);
2099 	} else {
2100 		/*
2101 		 * If we're not initiating device state recovery and this
2102 		 * command was not "dead", put it on the completion queue
2103 		 */
2104 		if (!dead) {
2105 			mutex_enter(&pwp->cq_lock);
2106 			STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
2107 			mutex_exit(&pwp->cq_lock);
2108 		}
2109 	}
2110 }
2111 
2112 /*
2113  * Run a SATA command (normal reads and writes),
2114  * or block and schedule a SATL interpretation
2115  * of the command.
2116  *
2117  * Called with pwrk lock held, returns unlocked.
2118  */
2119 
2120 static int
2121 pmcs_SATA_run(pmcs_cmd_t *sp, pmcwork_t *pwrk)
2122 {
2123 	pmcs_hw_t *pwp = CMD2PMC(sp);
2124 	struct scsi_pkt *pkt = CMD2PKT(sp);
2125 	pmcs_xscsi_t *xp;
2126 	uint8_t cdb_base, asc, tag;
2127 	uint32_t *ptr, iq, nblk, i, mtype;
2128 	fis_t fis;
2129 	size_t amt;
2130 	uint64_t lba;
2131 
2132 	xp = pwrk->xp;
2133 
2134 	/*
2135 	 * First, see if this is just a plain read/write command.
2136 	 * If not, we have to queue it up for processing, block
2137 	 * any additional commands from coming in, and wake up
2138 	 * the thread that will process this command.
2139 	 */
2140 	cdb_base = pkt->pkt_cdbp[0] & 0x1f;
2141 	if (cdb_base != SCMD_READ && cdb_base != SCMD_WRITE) {
2142 		pmcs_prt(pwp, PMCS_PRT_DEBUG1, "%s: special SATA cmd %p",
2143 		    __func__, (void *)sp);
2144 
2145 		ASSERT(xp->phy != NULL);
2146 		pmcs_pwork(pwp, pwrk);
2147 		pmcs_lock_phy(xp->phy);
2148 		mutex_enter(&xp->statlock);
2149 		xp->special_needed = 1; /* Set the special_needed flag */
2150 		STAILQ_INSERT_TAIL(&xp->sq, sp, cmd_next);
2151 		if (pmcs_run_sata_special(pwp, xp)) {
2152 			SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN);
2153 		}
2154 		mutex_exit(&xp->statlock);
2155 		pmcs_unlock_phy(xp->phy);
2156 
2157 		return (PMCS_WQ_RUN_SUCCESS);
2158 	}
2159 
2160 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: regular cmd", __func__);
2161 
2162 	mutex_enter(&xp->statlock);
2163 	if (!xp->assigned) {
2164 		mutex_exit(&xp->statlock);
2165 		return (PMCS_WQ_RUN_FAIL_OTHER);
2166 	}
2167 	if (xp->special_running || xp->special_needed || xp->recover_wait) {
2168 		mutex_exit(&xp->statlock);
2169 		mutex_enter(&xp->wqlock);
2170 		STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
2171 		mutex_exit(&xp->wqlock);
2172 		/*
2173 		 * By the time we get here the special
2174 		 * commands running or waiting to be run
2175 		 * may have come and gone, so kick our
2176 		 * worker to run the waiting queues
2177 		 * just in case.
2178 		 */
2179 		SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
2180 		return (PMCS_WQ_RUN_FAIL_OTHER);
2181 	}
2182 	lba = xp->capacity;
2183 	mutex_exit(&xp->statlock);
2184 
2185 	/*
2186 	 * Extract data length and lba parameters out of the command. The
2187 	 * function pmcs_SATA_rwparm returns a non-zero ASC value if the CDB
2188 	 * values are considered illegal.
2189 	 */
2190 	asc = pmcs_SATA_rwparm(pkt->pkt_cdbp, &nblk, &lba, lba);
2191 	if (asc) {
2192 		uint8_t sns[18];
2193 		bzero(sns, sizeof (sns));
2194 		sns[0] = 0xf0;
2195 		sns[2] = 0x5;
2196 		sns[12] = asc;
2197 		pmcs_latch_status(pwp, sp, STATUS_CHECK, sns, sizeof (sns),
2198 		    pwrk->phy->path);
2199 		pmcs_pwork(pwp, pwrk);
2200 		mutex_enter(&pwp->cq_lock);
2201 		STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
2202 		PMCS_CQ_RUN_LOCKED(pwp);
2203 		mutex_exit(&pwp->cq_lock);
2204 		return (PMCS_WQ_RUN_SUCCESS);
2205 	}
2206 
2207 	/*
2208 	 * If the command decodes as not moving any data, complete it here.
2209 	 */
2210 	amt = nblk;
2211 	amt <<= 9;
2212 	amt = pmcs_set_resid(pkt, amt, nblk << 9);
2213 	if (amt == 0) {
2214 		pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0,
2215 		    pwrk->phy->path);
2216 		pmcs_pwork(pwp, pwrk);
2217 		mutex_enter(&pwp->cq_lock);
2218 		STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
2219 		PMCS_CQ_RUN_LOCKED(pwp);
2220 		mutex_exit(&pwp->cq_lock);
2221 		return (PMCS_WQ_RUN_SUCCESS);
2222 	}
2223 
2224 	/*
2225 	 * Get an inbound queue entry for this I/O
2226 	 */
2227 	GET_IO_IQ_ENTRY(pwp, ptr, xp->phy->device_id, iq);
2228 	if (ptr == NULL) {
2229 		/*
2230 		 * This is a temporary failure not likely to unblocked by
2231 		 * commands completing as the test for scheduling the
2232 		 * restart of work is a per-device test.
2233 		 */
2234 		mutex_enter(&xp->wqlock);
2235 		STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
2236 		mutex_exit(&xp->wqlock);
2237 		pmcs_dma_unload(pwp, sp);
2238 		SCHEDULE_WORK(pwp, PMCS_WORK_RUN_QUEUES);
2239 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2240 		    "%s: Failed to get IO IQ entry for tgt %d",
2241 		    __func__, xp->target_num);
2242 		return (PMCS_WQ_RUN_FAIL_RES);
2243 	}
2244 
2245 	/*
2246 	 * Get a tag.  At this point, hold statlock until the tagmap is
2247 	 * updated (just prior to sending the cmd to the hardware).
2248 	 */
2249 	mutex_enter(&xp->statlock);
2250 	for (tag = 0; tag < xp->qdepth; tag++) {
2251 		if ((xp->tagmap & (1 << tag)) == 0) {
2252 			break;
2253 		}
2254 	}
2255 
2256 	if (tag == xp->qdepth) {
2257 		mutex_exit(&xp->statlock);
2258 		mutex_exit(&pwp->iqp_lock[iq]);
2259 		mutex_enter(&xp->wqlock);
2260 		STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
2261 		mutex_exit(&xp->wqlock);
2262 		return (PMCS_WQ_RUN_FAIL_OTHER);
2263 	}
2264 
2265 	sp->cmd_satltag = (uint8_t)tag;
2266 
2267 	/*
2268 	 * Set up the command
2269 	 */
2270 	bzero(fis, sizeof (fis));
2271 	ptr[0] =
2272 	    LE_32(PMCS_IOMB_IN_SAS(PMCS_OQ_IODONE, PMCIN_SATA_HOST_IO_START));
2273 	ptr[1] = LE_32(pwrk->htag);
2274 	ptr[2] = LE_32(pwrk->phy->device_id);
2275 	ptr[3] = LE_32(amt);
2276 
2277 	if (xp->ncq) {
2278 		mtype = SATA_PROTOCOL_FPDMA | (tag << 16);
2279 		fis[0] = ((nblk & 0xff) << 24) | (C_BIT << 8) | FIS_REG_H2DEV;
2280 		if (cdb_base == SCMD_READ) {
2281 			fis[0] |= (READ_FPDMA_QUEUED << 16);
2282 		} else {
2283 			fis[0] |= (WRITE_FPDMA_QUEUED << 16);
2284 		}
2285 		fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff);
2286 		fis[2] = ((nblk & 0xff00) << 16) | ((lba >> 24) & 0xffffff);
2287 		fis[3] = tag << 3;
2288 	} else {
2289 		int op;
2290 		fis[0] = (C_BIT << 8) | FIS_REG_H2DEV;
2291 		if (xp->pio) {
2292 			mtype = SATA_PROTOCOL_PIO;
2293 			if (cdb_base == SCMD_READ) {
2294 				op = READ_SECTORS_EXT;
2295 			} else {
2296 				op = WRITE_SECTORS_EXT;
2297 			}
2298 		} else {
2299 			mtype = SATA_PROTOCOL_DMA;
2300 			if (cdb_base == SCMD_READ) {
2301 				op = READ_DMA_EXT;
2302 			} else {
2303 				op = WRITE_DMA_EXT;
2304 			}
2305 		}
2306 		fis[0] |= (op << 16);
2307 		fis[1] = (FEATURE_LBA << 24) | (lba & 0xffffff);
2308 		fis[2] = (lba >> 24) & 0xffffff;
2309 		fis[3] = nblk;
2310 	}
2311 
2312 	if (cdb_base == SCMD_READ) {
2313 		ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_INI);
2314 	} else {
2315 		ptr[4] = LE_32(mtype | PMCIN_DATADIR_2_DEV);
2316 	}
2317 #ifdef	DEBUG
2318 	/*
2319 	 * Generate a PMCOUT_STATUS_XFER_CMD_FRAME_ISSUED
2320 	 * event when this goes out on the wire.
2321 	 */
2322 	ptr[4] |= PMCIN_MESSAGE_REPORT;
2323 #endif
2324 	for (i = 0; i < (sizeof (fis_t))/(sizeof (uint32_t)); i++) {
2325 		ptr[i+5] = LE_32(fis[i]);
2326 	}
2327 	if (pmcs_dma_load(pwp, sp, ptr)) {
2328 		mutex_exit(&xp->statlock);
2329 		mutex_exit(&pwp->iqp_lock[iq]);
2330 		mutex_enter(&xp->wqlock);
2331 		STAILQ_INSERT_HEAD(&xp->wq, sp, cmd_next);
2332 		mutex_exit(&xp->wqlock);
2333 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2334 		    "%s: Failed to dma_load for tgt %d",
2335 		    __func__, xp->target_num);
2336 		return (PMCS_WQ_RUN_FAIL_RES);
2337 
2338 	}
2339 
2340 	pwrk->state = PMCS_WORK_STATE_ONCHIP;
2341 	mutex_exit(&pwrk->lock);
2342 	xp->tagmap |= (1 << tag);
2343 	xp->actv_cnt++;
2344 	if (xp->actv_cnt > xp->maxdepth) {
2345 		xp->maxdepth = xp->actv_cnt;
2346 		pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: max depth now %u",
2347 		    pwrk->phy->path, xp->maxdepth);
2348 	}
2349 	mutex_exit(&xp->statlock);
2350 	mutex_enter(&xp->aqlock);
2351 	STAILQ_INSERT_TAIL(&xp->aq, sp, cmd_next);
2352 	mutex_exit(&xp->aqlock);
2353 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: giving pkt %p to hardware",
2354 	    __func__, (void *)pkt);
2355 #ifdef DEBUG
2356 	pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "SATA INI Message", ptr);
2357 #endif
2358 	INC_IQ_ENTRY(pwp, iq);
2359 
2360 	return (PMCS_WQ_RUN_SUCCESS);
2361 }
2362 
2363 /*
2364  * Complete a SATA command.  Called with pwrk lock held.
2365  */
2366 void
2367 pmcs_SATA_done(pmcs_hw_t *pwp, pmcwork_t *pwrk, uint32_t *msg)
2368 {
2369 	pmcs_cmd_t *sp = pwrk->arg;
2370 	struct scsi_pkt *pkt = CMD2PKT(sp);
2371 	pmcs_phy_t *pptr = pwrk->phy;
2372 	int dead;
2373 	uint32_t sts;
2374 	pmcs_xscsi_t *xp;
2375 	boolean_t aborted = B_FALSE;
2376 
2377 	xp = pwrk->xp;
2378 	ASSERT(xp != NULL);
2379 
2380 	DTRACE_PROBE4(pmcs__io__done, uint64_t, pkt->pkt_dma_len, int,
2381 	    (pkt->pkt_dma_flags & DDI_DMA_READ) != 0, hrtime_t, pwrk->start,
2382 	    hrtime_t, gethrtime());
2383 
2384 	dead = pwrk->dead;
2385 
2386 	if (msg) {
2387 		sts = LE_32(msg[2]);
2388 	} else {
2389 		sts = 0;
2390 	}
2391 
2392 	if (dead != 0) {
2393 		pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: dead cmd tag 0x%x for %s",
2394 		    __func__, pwrk->htag, pptr->path);
2395 		goto out;
2396 	}
2397 	if ((pwrk->state == PMCS_WORK_STATE_TIMED_OUT) &&
2398 	    (sts != PMCOUT_STATUS_ABORTED)) {
2399 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2400 		    "%s: cmd 0x%p (tag 0x%x) timed out for %s",
2401 		    __func__, (void *)sp, pwrk->htag, pptr->path);
2402 		CMD2PKT(sp)->pkt_scbp[0] = STATUS_GOOD;
2403 		/* pkt_reason already set to CMD_TIMEOUT */
2404 		ASSERT(CMD2PKT(sp)->pkt_reason == CMD_TIMEOUT);
2405 		CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET |
2406 		    STATE_SENT_CMD;
2407 		CMD2PKT(sp)->pkt_statistics |= STAT_TIMEOUT;
2408 		goto out;
2409 	}
2410 
2411 	pmcs_prt(pwp, PMCS_PRT_DEBUG2, "%s: pkt %p tgt %u done",
2412 	    __func__, (void *)pkt, xp->target_num);
2413 
2414 	/*
2415 	 * If the status isn't okay but not underflow,
2416 	 * step to the side and parse the (possible) error.
2417 	 */
2418 #ifdef DEBUG
2419 	if (msg) {
2420 		pmcs_print_entry(pwp, PMCS_PRT_DEBUG3, "Outbound Message", msg);
2421 	}
2422 #endif
2423 	if (!msg) {
2424 		goto out;
2425 	}
2426 
2427 	/*
2428 	 * If the status isn't okay or we got a FIS response of some kind,
2429 	 * step to the side and parse the (possible) error.
2430 	 */
2431 	if ((sts != PMCOUT_STATUS_OK) || (LE_32(msg[3]) != 0)) {
2432 		if (sts == PMCOUT_STATUS_IO_DS_NON_OPERATIONAL) {
2433 			mutex_exit(&pwrk->lock);
2434 			pmcs_lock_phy(pptr);
2435 			mutex_enter(&xp->statlock);
2436 			if ((xp->resetting == 0) && (xp->reset_success != 0) &&
2437 			    (xp->reset_wait == 0)) {
2438 				mutex_exit(&xp->statlock);
2439 				if (pmcs_reset_phy(pwp, pptr,
2440 				    PMCS_PHYOP_LINK_RESET) != 0) {
2441 					pmcs_prt(pwp, PMCS_PRT_DEBUG, "%s: PHY "
2442 					    "(%s) Local Control/Link Reset "
2443 					    "FAILED as part of error recovery",
2444 					    __func__, pptr->path);
2445 				}
2446 				mutex_enter(&xp->statlock);
2447 			}
2448 			mutex_exit(&xp->statlock);
2449 			pmcs_unlock_phy(pptr);
2450 			mutex_enter(&pwrk->lock);
2451 		}
2452 		pmcs_ioerror(pwp, SATA, pwrk, msg);
2453 	} else {
2454 		pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0,
2455 		    pwrk->phy->path);
2456 		pkt->pkt_state |= STATE_XFERRED_DATA;
2457 		pkt->pkt_resid = 0;
2458 	}
2459 
2460 	pmcs_prt(pwp, PMCS_PRT_DEBUG2,
2461 	    "%s: pkt %p tgt %u done reason=%x state=%x resid=%ld status=%x",
2462 	    __func__, (void *)pkt, xp->target_num, pkt->pkt_reason,
2463 	    pkt->pkt_state, pkt->pkt_resid, pkt->pkt_scbp[0]);
2464 
2465 	if (pwrk->state == PMCS_WORK_STATE_ABORTED) {
2466 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2467 		    "%s: scsi_pkt 0x%p aborted for PHY %s; work = 0x%p",
2468 		    __func__, (void *)pkt, pptr->path, (void *)pwrk);
2469 		aborted = B_TRUE;
2470 	}
2471 
2472 out:
2473 	pmcs_pwork(pwp, pwrk);
2474 	pmcs_dma_unload(pwp, sp);
2475 
2476 	mutex_enter(&xp->statlock);
2477 	xp->tagmap &= ~(1 << sp->cmd_satltag);
2478 
2479 	if (xp->dev_gone) {
2480 		mutex_exit(&xp->statlock);
2481 		mutex_enter(&pwp->cq_lock);
2482 		STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
2483 		mutex_exit(&pwp->cq_lock);
2484 		pmcs_prt(pwp, PMCS_PRT_DEBUG2,
2485 		    "%s: Completing command for dead target 0x%p", __func__,
2486 		    (void *)xp);
2487 		return;
2488 	}
2489 
2490 	ASSERT(xp->actv_cnt > 0);
2491 	if (--(xp->actv_cnt) == 0) {
2492 		if (xp->draining) {
2493 			pmcs_prt(pwp, PMCS_PRT_DEBUG1,
2494 			    "%s: waking up drain waiters", __func__);
2495 			cv_signal(&pwp->drain_cv);
2496 		} else if (xp->special_needed) {
2497 			SCHEDULE_WORK(pwp, PMCS_WORK_SATA_RUN);
2498 		}
2499 	}
2500 	mutex_exit(&xp->statlock);
2501 
2502 	if (dead == 0) {
2503 #ifdef	DEBUG
2504 		pmcs_cmd_t *wp;
2505 		mutex_enter(&xp->aqlock);
2506 		STAILQ_FOREACH(wp, &xp->aq, cmd_next) {
2507 			if (wp == sp) {
2508 				break;
2509 			}
2510 		}
2511 		ASSERT(wp != NULL);
2512 #else
2513 		mutex_enter(&xp->aqlock);
2514 #endif
2515 		STAILQ_REMOVE(&xp->aq, sp, pmcs_cmd, cmd_next);
2516 		if (aborted) {
2517 			pmcs_prt(pwp, PMCS_PRT_DEBUG,
2518 			    "%s: Aborted cmd for tgt 0x%p, signaling waiters",
2519 			    __func__, (void *)xp);
2520 			cv_signal(&xp->abort_cv);
2521 		}
2522 		mutex_exit(&xp->aqlock);
2523 		mutex_enter(&pwp->cq_lock);
2524 		STAILQ_INSERT_TAIL(&pwp->cq, sp, cmd_next);
2525 		mutex_exit(&pwp->cq_lock);
2526 	}
2527 }
2528 
2529 static uint8_t
2530 pmcs_SATA_rwparm(uint8_t *cdb, uint32_t *xfr, uint64_t *lba, uint64_t lbamax)
2531 {
2532 	uint8_t asc = 0;
2533 	switch (cdb[0]) {
2534 	case SCMD_READ_G5:
2535 	case SCMD_WRITE_G5:
2536 		*xfr =
2537 		    (((uint32_t)cdb[10]) <<  24) |
2538 		    (((uint32_t)cdb[11]) <<  16) |
2539 		    (((uint32_t)cdb[12]) <<   8) |
2540 		    ((uint32_t)cdb[13]);
2541 		*lba =
2542 		    (((uint64_t)cdb[2]) << 56) |
2543 		    (((uint64_t)cdb[3]) << 48) |
2544 		    (((uint64_t)cdb[4]) << 40) |
2545 		    (((uint64_t)cdb[5]) << 32) |
2546 		    (((uint64_t)cdb[6]) << 24) |
2547 		    (((uint64_t)cdb[7]) << 16) |
2548 		    (((uint64_t)cdb[8]) <<  8) |
2549 		    ((uint64_t)cdb[9]);
2550 		/* Check for illegal bits */
2551 		if (cdb[15]) {
2552 			asc = 0x24;	/* invalid field in cdb */
2553 		}
2554 		break;
2555 	case SCMD_READ_G4:
2556 	case SCMD_WRITE_G4:
2557 		*xfr =
2558 		    (((uint32_t)cdb[6]) <<  16) |
2559 		    (((uint32_t)cdb[7]) <<   8) |
2560 		    ((uint32_t)cdb[8]);
2561 		*lba =
2562 		    (((uint32_t)cdb[2]) << 24) |
2563 		    (((uint32_t)cdb[3]) << 16) |
2564 		    (((uint32_t)cdb[4]) <<  8) |
2565 		    ((uint32_t)cdb[5]);
2566 		/* Check for illegal bits */
2567 		if (cdb[11]) {
2568 			asc = 0x24;	/* invalid field in cdb */
2569 		}
2570 		break;
2571 	case SCMD_READ_G1:
2572 	case SCMD_WRITE_G1:
2573 		*xfr = (((uint32_t)cdb[7]) <<  8) | ((uint32_t)cdb[8]);
2574 		*lba =
2575 		    (((uint32_t)cdb[2]) << 24) |
2576 		    (((uint32_t)cdb[3]) << 16) |
2577 		    (((uint32_t)cdb[4]) <<  8) |
2578 		    ((uint32_t)cdb[5]);
2579 		/* Check for illegal bits */
2580 		if (cdb[9]) {
2581 			asc = 0x24;	/* invalid field in cdb */
2582 		}
2583 		break;
2584 	case SCMD_READ:
2585 	case SCMD_WRITE:
2586 		*xfr = cdb[4];
2587 		if (*xfr == 0) {
2588 			*xfr = 256;
2589 		}
2590 		*lba =
2591 		    (((uint32_t)cdb[1] & 0x1f) << 16) |
2592 		    (((uint32_t)cdb[2]) << 8) |
2593 		    ((uint32_t)cdb[3]);
2594 		/* Check for illegal bits */
2595 		if (cdb[5]) {
2596 			asc = 0x24;	/* invalid field in cdb */
2597 		}
2598 		break;
2599 	}
2600 
2601 	if (asc == 0) {
2602 		if ((*lba + *xfr) > lbamax) {
2603 			asc = 0x21;	/* logical block out of range */
2604 		}
2605 	}
2606 	return (asc);
2607 }
2608 
2609 /*
2610  * Called with pwrk lock held.
2611  */
2612 static void
2613 pmcs_ioerror(pmcs_hw_t *pwp, pmcs_dtype_t t, pmcwork_t *pwrk, uint32_t *w)
2614 {
2615 	static uint8_t por[] = {
2616 	    0xf0, 0x0, 0x6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x28
2617 	};
2618 	static uint8_t parity[] = {
2619 	    0xf0, 0x0, 0xb, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x47, 5
2620 	};
2621 	const char *msg;
2622 	char buf[20];
2623 	pmcs_cmd_t *sp = pwrk->arg;
2624 	pmcs_phy_t *phyp = pwrk->phy;
2625 	struct scsi_pkt *pkt = CMD2PKT(sp);
2626 	uint32_t status;
2627 	uint32_t resid;
2628 
2629 	ASSERT(w != NULL);
2630 	status = LE_32(w[2]);
2631 	resid = LE_32(w[3]);
2632 
2633 	msg = pmcs_status_str(status);
2634 	if (msg == NULL) {
2635 		(void) snprintf(buf, sizeof (buf), "Error 0x%x", status);
2636 		msg = buf;
2637 	}
2638 
2639 	if (status != PMCOUT_STATUS_OK) {
2640 		pmcs_prt(pwp, PMCS_PRT_DEBUG2,
2641 		    "%s: device %s tag 0x%x status %s @ %llu", __func__,
2642 		    phyp->path, pwrk->htag, msg,
2643 		    (unsigned long long)gethrtime());
2644 	}
2645 
2646 	pkt->pkt_reason = CMD_CMPLT;		/* default reason */
2647 
2648 	switch (status) {
2649 	case PMCOUT_STATUS_OK:
2650 		if (t == SATA) {
2651 			int i;
2652 			fis_t fis;
2653 			for (i = 0; i < sizeof (fis) / sizeof (fis[0]); i++) {
2654 				fis[i] = LE_32(w[4+i]);
2655 			}
2656 			if ((fis[0] & 0xff) != FIS_REG_D2H) {
2657 				pmcs_prt(pwp, PMCS_PRT_DEBUG,
2658 				    "unexpected fis code 0x%x", fis[0] & 0xff);
2659 			} else {
2660 				pmcs_prt(pwp, PMCS_PRT_DEBUG, "FIS ERROR");
2661 				pmcs_fis_dump(pwp, fis);
2662 			}
2663 			pkt->pkt_reason = CMD_TRAN_ERR;
2664 			break;
2665 		}
2666 		pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path);
2667 		break;
2668 
2669 	case PMCOUT_STATUS_ABORTED:
2670 		/*
2671 		 * Command successfully aborted.
2672 		 */
2673 		if (phyp->dead) {
2674 			pkt->pkt_reason = CMD_DEV_GONE;
2675 			pkt->pkt_state = STATE_GOT_BUS;
2676 		} else if (pwrk->ssp_event != 0) {
2677 			pkt->pkt_reason = CMD_TRAN_ERR;
2678 			pkt->pkt_state = STATE_GOT_BUS;
2679 		} else if (pwrk->state == PMCS_WORK_STATE_TIMED_OUT) {
2680 			pkt->pkt_reason = CMD_TIMEOUT;
2681 			pkt->pkt_statistics |= STAT_TIMEOUT;
2682 			pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2683 			    STATE_SENT_CMD;
2684 		} else {
2685 			pkt->pkt_reason = CMD_ABORTED;
2686 			pkt->pkt_statistics |= STAT_ABORTED;
2687 			pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2688 			    STATE_SENT_CMD;
2689 		}
2690 
2691 		/*
2692 		 * PMCS_WORK_STATE_TIMED_OUT doesn't need to be preserved past
2693 		 * this point, so go ahead and mark it as aborted.
2694 		 */
2695 		pwrk->state = PMCS_WORK_STATE_ABORTED;
2696 		break;
2697 
2698 	case PMCOUT_STATUS_UNDERFLOW:
2699 		/*
2700 		 * This will only get called for SATA
2701 		 */
2702 		pkt->pkt_resid = resid;
2703 		if (pkt->pkt_dma_len < pkt->pkt_resid) {
2704 			(void) pmcs_set_resid(pkt, pkt->pkt_dma_len, resid);
2705 		}
2706 		pmcs_latch_status(pwp, sp, STATUS_GOOD, NULL, 0, phyp->path);
2707 		break;
2708 
2709 	case PMCOUT_STATUS_NO_DEVICE:
2710 	case PMCOUT_STATUS_XFER_ERROR_SATA_LINK_TIMEOUT:
2711 		pkt->pkt_reason = CMD_DEV_GONE;
2712 		break;
2713 
2714 	case PMCOUT_STATUS_OPEN_CNX_ERROR_WRONG_DESTINATION:
2715 		/*
2716 		 * Need to do rediscovery. We probably have
2717 		 * the wrong device (disk swap), so kill
2718 		 * this one.
2719 		 */
2720 	case PMCOUT_STATUS_OPEN_CNX_PROTOCOL_NOT_SUPPORTED:
2721 	case PMCOUT_STATUS_OPEN_CNX_ERROR_ZONE_VIOLATION:
2722 	case PMCOUT_STATUS_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED:
2723 	case PMCOUT_STATUS_OPEN_CNX_ERROR_UNKNOWN_EROOR:
2724 		/*
2725 		 * Need to do rediscovery.
2726 		 */
2727 		if (!phyp->dead) {
2728 			mutex_exit(&pwrk->lock);
2729 			pmcs_lock_phy(pwrk->phy);
2730 			pmcs_kill_changed(pwp, pwrk->phy, 0);
2731 			pmcs_unlock_phy(pwrk->phy);
2732 			mutex_enter(&pwrk->lock);
2733 			pkt->pkt_reason = CMD_INCOMPLETE;
2734 			pkt->pkt_state = STATE_GOT_BUS;
2735 		} else {
2736 			pkt->pkt_reason = CMD_DEV_GONE;
2737 		}
2738 		break;
2739 
2740 	case PMCOUT_STATUS_OPEN_CNX_ERROR_BREAK:
2741 	case PMCOUT_STATUS_OPEN_CNX_ERROR_IT_NEXUS_LOSS:
2742 	case PMCOUT_STATUS_OPENCNX_ERROR_BAD_DESTINATION:
2743 	case PMCOUT_STATUS_IO_XFER_ERROR_NAK_RECEIVED:
2744 		/* cmd is pending on the target */
2745 	case PMCOUT_STATUS_XFER_ERROR_OFFSET_MISMATCH:
2746 	case PMCOUT_STATUS_XFER_ERROR_REJECTED_NCQ_MODE:
2747 		/* transitory - commands sent while in NCQ failure mode */
2748 	case PMCOUT_STATUS_XFER_ERROR_ABORTED_NCQ_MODE:
2749 		/* NCQ failure */
2750 	case PMCOUT_STATUS_IO_PORT_IN_RESET:
2751 	case PMCOUT_STATUS_XFER_ERR_BREAK:
2752 	case PMCOUT_STATUS_XFER_ERR_PHY_NOT_READY:
2753 		pkt->pkt_reason = CMD_INCOMPLETE;
2754 		pkt->pkt_state = STATE_GOT_BUS;
2755 		break;
2756 
2757 	case PMCOUT_STATUS_IO_XFER_OPEN_RETRY_TIMEOUT:
2758 		pmcs_latch_status(pwp, sp, STATUS_BUSY, NULL, 0, phyp->path);
2759 		break;
2760 
2761 	case PMCOUT_STATUS_OPEN_CNX_ERROR_STP_RESOURCES_BUSY:
2762 		/* synthesize a RESERVATION CONFLICT */
2763 		pmcs_latch_status(pwp, sp, STATUS_RESERVATION_CONFLICT, NULL,
2764 		    0, phyp->path);
2765 		break;
2766 
2767 	case PMCOUT_STATUS_XFER_ERROR_ABORTED_DUE_TO_SRST:
2768 		/* synthesize a power-on/reset */
2769 		pmcs_latch_status(pwp, sp, STATUS_CHECK, por, sizeof (por),
2770 		    phyp->path);
2771 		break;
2772 
2773 	case PMCOUT_STATUS_XFER_ERROR_UNEXPECTED_PHASE:
2774 	case PMCOUT_STATUS_XFER_ERROR_RDY_OVERRUN:
2775 	case PMCOUT_STATUS_XFER_ERROR_RDY_NOT_EXPECTED:
2776 	case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT:
2777 	case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_BREAK_BEFORE_ACK_NACK:
2778 	case PMCOUT_STATUS_XFER_ERROR_CMD_ISSUE_PHY_DOWN_BEFORE_ACK_NAK:
2779 		/* synthesize a PARITY ERROR */
2780 		pmcs_latch_status(pwp, sp, STATUS_CHECK, parity,
2781 		    sizeof (parity), phyp->path);
2782 		break;
2783 
2784 	case PMCOUT_STATUS_IO_XFER_ERROR_DMA:
2785 	case PMCOUT_STATUS_IO_NOT_VALID:
2786 	case PMCOUT_STATUS_PROG_ERROR:
2787 	case PMCOUT_STATUS_XFER_ERROR_PEER_ABORTED:
2788 	case PMCOUT_STATUS_XFER_ERROR_SATA: /* non-NCQ failure */
2789 	default:
2790 		pkt->pkt_reason = CMD_TRAN_ERR;
2791 		break;
2792 	}
2793 }
2794 
2795 /*
2796  * Latch up SCSI status
2797  */
2798 
2799 void
2800 pmcs_latch_status(pmcs_hw_t *pwp, pmcs_cmd_t *sp, uint8_t status,
2801     uint8_t *snsp, size_t snslen, char *path)
2802 {
2803 	static const char c1[] =
2804 	    "%s: Status Byte 0x%02x for CDB0=0x%02x (%02x %02x %02x) "
2805 	    "HTAG 0x%x @ %llu";
2806 	static const char c2[] =
2807 	    "%s: Status Byte 0x%02x for CDB0=0x%02x HTAG 0x%x @ %llu";
2808 
2809 	CMD2PKT(sp)->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET |
2810 	    STATE_SENT_CMD | STATE_GOT_STATUS;
2811 	CMD2PKT(sp)->pkt_scbp[0] = status;
2812 
2813 	if (status == STATUS_CHECK && snsp &&
2814 	    (size_t)SCSA_STSLEN(sp) >= sizeof (struct scsi_arq_status)) {
2815 		struct scsi_arq_status *aqp =
2816 		    (void *) CMD2PKT(sp)->pkt_scbp;
2817 		size_t amt = sizeof (struct scsi_extended_sense);
2818 		uint8_t key = scsi_sense_key(snsp);
2819 		uint8_t asc = scsi_sense_asc(snsp);
2820 		uint8_t ascq = scsi_sense_ascq(snsp);
2821 		if (amt > snslen) {
2822 			amt = snslen;
2823 		}
2824 		pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, c1, path, status,
2825 		    CMD2PKT(sp)->pkt_cdbp[0] & 0xff, key, asc, ascq,
2826 		    sp->cmd_tag, (unsigned long long)gethrtime());
2827 		CMD2PKT(sp)->pkt_state |= STATE_ARQ_DONE;
2828 		(*(uint8_t *)&aqp->sts_rqpkt_status) = STATUS_GOOD;
2829 		aqp->sts_rqpkt_statistics = 0;
2830 		aqp->sts_rqpkt_reason = CMD_CMPLT;
2831 		aqp->sts_rqpkt_state = STATE_GOT_BUS |
2832 		    STATE_GOT_TARGET | STATE_SENT_CMD |
2833 		    STATE_XFERRED_DATA | STATE_GOT_STATUS;
2834 		(void) memcpy(&aqp->sts_sensedata, snsp, amt);
2835 		if (aqp->sts_sensedata.es_class != CLASS_EXTENDED_SENSE) {
2836 			aqp->sts_rqpkt_reason = CMD_TRAN_ERR;
2837 			aqp->sts_rqpkt_state = 0;
2838 			aqp->sts_rqpkt_resid =
2839 			    sizeof (struct scsi_extended_sense);
2840 		} else {
2841 			aqp->sts_rqpkt_resid =
2842 			    sizeof (struct scsi_extended_sense) - amt;
2843 		}
2844 	} else if (status) {
2845 		pmcs_prt(pwp, PMCS_PRT_DEBUG_SCSI_STATUS, c2,
2846 		    path, status, CMD2PKT(sp)->pkt_cdbp[0] & 0xff,
2847 		    sp->cmd_tag, (unsigned long long)gethrtime());
2848 	}
2849 
2850 	CMD2PKT(sp)->pkt_reason = CMD_CMPLT;
2851 }
2852 
2853 /*
2854  * Calculate and set packet residual and return the amount
2855  * left over after applying various filters.
2856  */
2857 size_t
2858 pmcs_set_resid(struct scsi_pkt *pkt, size_t amt, uint32_t cdbamt)
2859 {
2860 	pkt->pkt_resid = cdbamt;
2861 	if (amt > pkt->pkt_resid) {
2862 		amt = pkt->pkt_resid;
2863 	}
2864 	if (amt > pkt->pkt_dma_len) {
2865 		amt = pkt->pkt_dma_len;
2866 	}
2867 	return (amt);
2868 }
2869 
2870 /*
2871  * Return the existing target softstate if there is one.  If there is,
2872  * the PHY is locked as well and that lock must be freed by the caller
2873  * after the target/PHY linkage is established.
2874  */
2875 pmcs_xscsi_t *
2876 pmcs_get_target(pmcs_iport_t *iport, char *tgt_port)
2877 {
2878 	pmcs_hw_t *pwp = iport->pwp;
2879 	pmcs_phy_t *phyp;
2880 	pmcs_xscsi_t *tgt;
2881 	uint64_t wwn;
2882 	char unit_address[PMCS_MAX_UA_SIZE];
2883 	int ua_form = 1;
2884 
2885 	/*
2886 	 * Find the PHY for this target
2887 	 */
2888 	phyp = pmcs_find_phy_by_sas_address(pwp, iport, NULL, tgt_port);
2889 	if (phyp == NULL) {
2890 		pmcs_prt(pwp, PMCS_PRT_DEBUG3, "%s: No PHY for target @ %s",
2891 		    __func__, tgt_port);
2892 		return (NULL);
2893 	}
2894 
2895 	tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, tgt_port);
2896 
2897 	if (tgt) {
2898 		/*
2899 		 * There's already a target.  Check its PHY pointer to see
2900 		 * if we need to clear the old linkages
2901 		 */
2902 		if (tgt->phy && (tgt->phy != phyp)) {
2903 			pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2904 			    "%s: Target PHY updated from %p to %p", __func__,
2905 			    (void *)tgt->phy, (void *)phyp);
2906 			if (!IS_ROOT_PHY(tgt->phy)) {
2907 				pmcs_dec_phy_ref_count(tgt->phy);
2908 				pmcs_inc_phy_ref_count(phyp);
2909 			}
2910 			tgt->phy->target = NULL;
2911 		}
2912 
2913 		tgt->phy = phyp;
2914 		phyp->target = tgt;
2915 		return (tgt);
2916 	}
2917 
2918 	/*
2919 	 * Make sure the PHY we found is on the correct iport
2920 	 */
2921 	if (phyp->iport != iport) {
2922 		pmcs_prt(pwp, PMCS_PRT_DEBUG,
2923 		    "%s: No target at %s on this iport", __func__, tgt_port);
2924 		pmcs_unlock_phy(phyp);
2925 		return (NULL);
2926 	}
2927 
2928 	/*
2929 	 * Allocate the new softstate
2930 	 */
2931 	wwn = pmcs_barray2wwn(phyp->sas_address);
2932 	(void) scsi_wwn_to_wwnstr(wwn, ua_form, unit_address);
2933 
2934 	if (ddi_soft_state_bystr_zalloc(iport->tgt_sstate, unit_address) !=
2935 	    DDI_SUCCESS) {
2936 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2937 		    "%s: Couldn't alloc softstate for device at %s",
2938 		    __func__, unit_address);
2939 		pmcs_unlock_phy(phyp);
2940 		return (NULL);
2941 	}
2942 
2943 	tgt = ddi_soft_state_bystr_get(iport->tgt_sstate, unit_address);
2944 	STAILQ_INIT(&tgt->wq);
2945 	STAILQ_INIT(&tgt->aq);
2946 	STAILQ_INIT(&tgt->sq);
2947 	mutex_init(&tgt->statlock, NULL, MUTEX_DRIVER,
2948 	    DDI_INTR_PRI(pwp->intr_pri));
2949 	mutex_init(&tgt->wqlock, NULL, MUTEX_DRIVER,
2950 	    DDI_INTR_PRI(pwp->intr_pri));
2951 	mutex_init(&tgt->aqlock, NULL, MUTEX_DRIVER,
2952 	    DDI_INTR_PRI(pwp->intr_pri));
2953 	cv_init(&tgt->reset_cv, NULL, CV_DRIVER, NULL);
2954 	cv_init(&tgt->abort_cv, NULL, CV_DRIVER, NULL);
2955 	tgt->qdepth = 1;
2956 	tgt->target_num = PMCS_INVALID_TARGET_NUM;
2957 	bcopy(unit_address, tgt->unit_address, PMCS_MAX_UA_SIZE);
2958 	tgt->pwp = pwp;
2959 	tgt->ua = strdup(iport->ua);
2960 	tgt->phy = phyp;
2961 	ASSERT((phyp->target == NULL) || (phyp->target == tgt));
2962 	if (phyp->target == NULL) {
2963 		phyp->target = tgt;
2964 	}
2965 
2966 	/*
2967 	 * Don't allocate LUN softstate for SMP targets
2968 	 */
2969 	if (phyp->dtype == EXPANDER) {
2970 		return (tgt);
2971 	}
2972 
2973 	if (ddi_soft_state_bystr_init(&tgt->lun_sstate,
2974 	    sizeof (pmcs_lun_t), PMCS_LUN_SSTATE_SZ) != 0) {
2975 		pmcs_prt(pwp, PMCS_PRT_DEBUG_CONFIG,
2976 		    "%s: LUN soft_state_bystr_init failed", __func__);
2977 		ddi_soft_state_bystr_free(iport->tgt_sstate, tgt_port);
2978 		pmcs_unlock_phy(phyp);
2979 		return (NULL);
2980 	}
2981 
2982 	return (tgt);
2983 }
2984