1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  *
31  * nv_sata is a combo SATA HBA driver for ck804/mcp55 based chipsets.
32  *
33  * NCQ
34  * ---
35  *
36  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
37  * and is likely to be revisited in the future.
38  *
39  *
40  * Power Management
41  * ----------------
42  *
43  * Normally power management would be responsible for ensuring the device
44  * is quiescent and then changing power states to the device, such as
45  * powering down parts or all of the device.  mcp55/ck804 is unique in
46  * that it is only available as part of a larger southbridge chipset, so
47  * removing power to the device isn't possible.  Switches to control
48  * power management states D0/D3 in the PCI configuration space appear to
49  * be supported but changes to these states are apparently are ignored.
50  * The only further PM that the driver _could_ do is shut down the PHY,
51  * but in order to deliver the first rev of the driver sooner than later,
52  * that will be deferred until some future phase.
53  *
54  * Since the driver currently will not directly change any power state to
55  * the device, no power() entry point will be required.  However, it is
56  * possible that in ACPI power state S3, aka suspend to RAM, that power
57  * can be removed to the device, and the driver cannot rely on BIOS to
58  * have reset any state.  For the time being, there is no known
59  * non-default configurations that need to be programmed.  This judgement
60  * is based on the port of the legacy ata driver not having any such
61  * functionality and based on conversations with the PM team.  If such a
62  * restoration is later deemed necessary it can be incorporated into the
63  * DDI_RESUME processing.
64  *
65  */
66 
67 #include <sys/scsi/scsi.h>
68 #include <sys/pci.h>
69 #include <sys/byteorder.h>
70 #include <sys/sata/sata_hba.h>
71 #include <sys/sata/adapters/nv_sata/nv_sata.h>
72 #include <sys/disp.h>
73 #include <sys/note.h>
74 #include <sys/promif.h>
75 
76 
77 /*
78  * Function prototypes for driver entry points
79  */
80 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
81 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
82 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
83     void *arg, void **result);
84 
85 /*
86  * Function prototypes for entry points from sata service module
87  * These functions are distinguished from other local functions
88  * by the prefix "nv_sata_"
89  */
90 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
91 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
92 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
93 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
94 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
95 
96 /*
97  * Local function prototypes
98  */
99 static uint_t mcp55_intr(caddr_t arg1, caddr_t arg2);
100 static uint_t mcp04_intr(caddr_t arg1, caddr_t arg2);
101 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
102 #ifdef NV_MSI_SUPPORTED
103 static int nv_add_msi_intrs(nv_ctl_t *nvc);
104 #endif
105 static void nv_rem_intrs(nv_ctl_t *nvc);
106 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
107 static int nv_start_nodata(nv_port_t *nvp, int slot);
108 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
109 static int nv_start_pio_in(nv_port_t *nvp, int slot);
110 static int nv_start_pio_out(nv_port_t *nvp, int slot);
111 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
112 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
113 static int nv_start_dma(nv_port_t *nvp, int slot);
114 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
115 static void nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
116 static void nv_uninit_ctl(nv_ctl_t *nvc);
117 static void mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
118 static void mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
119 static void nv_uninit_port(nv_port_t *nvp);
120 static int nv_init_port(nv_port_t *nvp);
121 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
122 static int mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
123 #ifdef NCQ
124 static int mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
125 #endif
126 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
127 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
128     int state);
129 static boolean_t nv_check_link(uint32_t sstatus);
130 static void nv_common_reg_init(nv_ctl_t *nvc);
131 static void mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
132 static void nv_reset(nv_port_t *nvp);
133 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
134 static void nv_timeout(void *);
135 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
136 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
137 static void nv_read_signature(nv_port_t *nvp);
138 static void mcp55_set_intr(nv_port_t *nvp, int flag);
139 static void mcp04_set_intr(nv_port_t *nvp, int flag);
140 static void nv_resume(nv_port_t *nvp);
141 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
142 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason);
143 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
144     sata_pkt_t *spkt);
145 static void nv_report_add_remove(nv_port_t *nvp, int flags);
146 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
147 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
148     uchar_t failure_onbits2, uchar_t failure_offbits2,
149     uchar_t failure_onbits3, uchar_t failure_offbits3,
150     uint_t timeout_usec, int type_wait);
151 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
152     uint_t timeout_usec, int type_wait);
153 
154 
155 /*
156  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
157  * Verify if needed if ported to other ISA.
158  */
159 static ddi_dma_attr_t buffer_dma_attr = {
160 	DMA_ATTR_V0,		/* dma_attr_version */
161 	0,			/* dma_attr_addr_lo: lowest bus address */
162 	0xffffffffull,		/* dma_attr_addr_hi: */
163 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
164 	4,			/* dma_attr_align */
165 	1,			/* dma_attr_burstsizes. */
166 	1,			/* dma_attr_minxfer */
167 	0xffffffffull,		/* dma_attr_max xfer including all cookies */
168 	0xffffffffull,		/* dma_attr_seg */
169 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
170 	512,			/* dma_attr_granular */
171 	0,			/* dma_attr_flags */
172 };
173 
174 
175 /*
176  * DMA attributes for PRD tables
177  */
178 ddi_dma_attr_t nv_prd_dma_attr = {
179 	DMA_ATTR_V0,		/* dma_attr_version */
180 	0,			/* dma_attr_addr_lo */
181 	0xffffffffull,		/* dma_attr_addr_hi */
182 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
183 	4,			/* dma_attr_align */
184 	1,			/* dma_attr_burstsizes */
185 	1,			/* dma_attr_minxfer */
186 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
187 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
188 	1,			/* dma_attr_sgllen */
189 	1,			/* dma_attr_granular */
190 	0			/* dma_attr_flags */
191 };
192 
193 /*
194  * Device access attributes
195  */
196 static ddi_device_acc_attr_t accattr = {
197     DDI_DEVICE_ATTR_V0,
198     DDI_STRUCTURE_LE_ACC,
199     DDI_STRICTORDER_ACC
200 };
201 
202 
203 static struct dev_ops nv_dev_ops = {
204 	DEVO_REV,		/* devo_rev */
205 	0,			/* refcnt  */
206 	nv_getinfo,		/* info */
207 	nulldev,		/* identify */
208 	nulldev,		/* probe */
209 	nv_attach,		/* attach */
210 	nv_detach,		/* detach */
211 	nodev,			/* no reset */
212 	(struct cb_ops *)0,	/* driver operations */
213 	NULL,			/* bus operations */
214 	NULL			/* power */
215 };
216 
217 static sata_tran_hotplug_ops_t nv_hotplug_ops;
218 
219 extern struct mod_ops mod_driverops;
220 
221 static  struct modldrv modldrv = {
222 	&mod_driverops,	/* driverops */
223 	"Nvidia ck804/mcp55 HBA v%I%",
224 	&nv_dev_ops,	/* driver ops */
225 };
226 
227 static  struct modlinkage modlinkage = {
228 	MODREV_1,
229 	&modldrv,
230 	NULL
231 };
232 
233 
234 /*
235  * wait between checks of reg status
236  */
237 int nv_usec_delay = NV_WAIT_REG_CHECK;
238 
239 /*
240  * The following is needed for nv_vcmn_err()
241  */
242 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
243 static char nv_log_buf[NV_STRING_512];
244 int nv_debug_flags = NVDBG_ALWAYS;
245 int nv_log_to_console = B_FALSE;
246 
247 int nv_log_delay = 0;
248 int nv_prom_print = B_FALSE;
249 
250 /*
251  * for debugging
252  */
253 #ifdef DEBUG
254 int ncq_commands = 0;
255 int non_ncq_commands = 0;
256 #endif
257 
258 /*
259  * Opaque state pointer to be initialized by ddi_soft_state_init()
260  */
261 static void *nv_statep	= NULL;
262 
263 
264 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
265 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
266 	nv_sata_activate,	/* activate port. cfgadm -c connect */
267 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
268 };
269 
270 
271 /*
272  *  nv module initialization
273  */
274 int
275 _init(void)
276 {
277 	int	error;
278 
279 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
280 
281 	if (error != 0) {
282 
283 		return (error);
284 	}
285 
286 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
287 
288 	if ((error = sata_hba_init(&modlinkage)) != 0) {
289 		ddi_soft_state_fini(&nv_statep);
290 		mutex_destroy(&nv_log_mutex);
291 
292 		return (error);
293 	}
294 
295 	error = mod_install(&modlinkage);
296 	if (error != 0) {
297 		sata_hba_fini(&modlinkage);
298 		ddi_soft_state_fini(&nv_statep);
299 		mutex_destroy(&nv_log_mutex);
300 
301 		return (error);
302 	}
303 
304 	return (error);
305 }
306 
307 
308 /*
309  * nv module uninitialize
310  */
311 int
312 _fini(void)
313 {
314 	int	error;
315 
316 	error = mod_remove(&modlinkage);
317 
318 	if (error != 0) {
319 		return (error);
320 	}
321 
322 	/*
323 	 * remove the resources allocated in _init()
324 	 */
325 	mutex_destroy(&nv_log_mutex);
326 	sata_hba_fini(&modlinkage);
327 	ddi_soft_state_fini(&nv_statep);
328 
329 	return (error);
330 }
331 
332 
333 /*
334  * nv _info entry point
335  */
336 int
337 _info(struct modinfo *modinfop)
338 {
339 	return (mod_info(&modlinkage, modinfop));
340 }
341 
342 
343 /*
344  * these wrappers for ddi_{get,put}8 are for observability
345  * with dtrace
346  */
347 #ifdef DEBUG
348 
349 static void
350 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
351 {
352 	ddi_put8(handle, dev_addr, value);
353 }
354 
355 static void
356 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
357 {
358 	ddi_put32(handle, dev_addr, value);
359 }
360 
361 static uint32_t
362 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
363 {
364 	return (ddi_get32(handle, dev_addr));
365 }
366 
367 static void
368 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
369 {
370 	ddi_put16(handle, dev_addr, value);
371 }
372 
373 static uint16_t
374 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
375 {
376 	return (ddi_get16(handle, dev_addr));
377 }
378 
379 static uint8_t
380 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
381 {
382 	return (ddi_get8(handle, dev_addr));
383 }
384 
385 #else
386 
387 #define	nv_put8 ddi_put8
388 #define	nv_put32 ddi_put32
389 #define	nv_get32 ddi_get32
390 #define	nv_put16 ddi_put16
391 #define	nv_get16 ddi_get16
392 #define	nv_get8 ddi_get8
393 
394 #endif
395 
396 
397 /*
398  * Driver attach
399  */
400 static int
401 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
402 {
403 	int status, attach_state, intr_types, bar, i;
404 	int inst = ddi_get_instance(dip);
405 	ddi_acc_handle_t pci_conf_handle;
406 	nv_ctl_t *nvc;
407 	uint8_t subclass;
408 
409 	switch (cmd) {
410 
411 	case DDI_ATTACH:
412 
413 		NVLOG((NVDBG_INIT, NULL, NULL,
414 		    "nv_attach(): DDI_ATTACH inst %d", inst));
415 
416 		attach_state = ATTACH_PROGRESS_NONE;
417 
418 		status = ddi_soft_state_zalloc(nv_statep, inst);
419 
420 		if (status != DDI_SUCCESS) {
421 			break;
422 		}
423 
424 		nvc = ddi_get_soft_state(nv_statep, inst);
425 
426 		nvc->nvc_dip = dip;
427 
428 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
429 
430 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
431 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
432 			    PCI_CONF_REVID);
433 			NVLOG((NVDBG_INIT, NULL, NULL,
434 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
435 			    inst, nvc->nvc_revid, nv_debug_flags));
436 		} else {
437 			break;
438 		}
439 
440 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
441 
442 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
443 
444 		if (subclass & PCI_MASS_RAID) {
445 			cmn_err(CE_WARN,
446 			    "attach failed: RAID mode not supported");
447 			break;
448 		}
449 
450 		/*
451 		 * the 6 bars of the controller are:
452 		 * 0: port 0 task file
453 		 * 1: port 0 status
454 		 * 2: port 1 task file
455 		 * 3: port 1 status
456 		 * 4: bus master for both ports
457 		 * 5: extended registers for SATA features
458 		 */
459 		for (bar = 0; bar < 6; bar++) {
460 			status = ddi_regs_map_setup(dip, bar + 1,
461 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
462 			    &nvc->nvc_bar_hdl[bar]);
463 
464 			if (status != DDI_SUCCESS) {
465 				NVLOG((NVDBG_INIT, nvc, NULL,
466 				    "ddi_regs_map_setup failure for bar"
467 				    " %d status = %d", bar, status));
468 				break;
469 			}
470 		}
471 
472 		attach_state |= ATTACH_PROGRESS_BARS;
473 
474 		/*
475 		 * initialize controller and driver core
476 		 */
477 		status = nv_init_ctl(nvc, pci_conf_handle);
478 
479 		if (status == NV_FAILURE) {
480 			NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl failed"));
481 
482 			break;
483 		}
484 
485 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
486 
487 		/*
488 		 * initialize mutexes
489 		 */
490 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
491 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
492 
493 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
494 
495 		/*
496 		 * get supported interrupt types
497 		 */
498 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
499 		    DDI_SUCCESS) {
500 			nv_cmn_err(CE_WARN, nvc, NULL,
501 			    "!ddi_intr_get_supported_types failed");
502 			NVLOG((NVDBG_INIT, nvc, NULL,
503 			    "interrupt supported types failed"));
504 
505 			break;
506 		}
507 
508 		NVLOG((NVDBG_INIT, nvc, NULL,
509 		    "ddi_intr_get_supported_types() returned: 0x%x",
510 		    intr_types));
511 
512 #ifdef NV_MSI_SUPPORTED
513 		if (intr_types & DDI_INTR_TYPE_MSI) {
514 			NVLOG((NVDBG_INIT, nvc, NULL,
515 			    "using MSI interrupt type"));
516 
517 			/*
518 			 * Try MSI first, but fall back to legacy if MSI
519 			 * attach fails
520 			 */
521 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
522 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
523 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
524 				NVLOG((NVDBG_INIT, nvc, NULL,
525 				    "MSI interrupt setup done"));
526 			} else {
527 				nv_cmn_err(CE_CONT, nvc, NULL,
528 				    "!MSI registration failed "
529 				    "will try Legacy interrupts");
530 			}
531 		}
532 #endif
533 
534 		/*
535 		 * Either the MSI interrupt setup has failed or only
536 		 * the fixed interrupts are available on the system.
537 		 */
538 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
539 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
540 
541 			NVLOG((NVDBG_INIT, nvc, NULL,
542 			    "using Legacy interrupt type"));
543 
544 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
545 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
546 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
547 				NVLOG((NVDBG_INIT, nvc, NULL,
548 				    "Legacy interrupt setup done"));
549 			} else {
550 				nv_cmn_err(CE_WARN, nvc, NULL,
551 				    "!legacy interrupt setup failed");
552 				NVLOG((NVDBG_INIT, nvc, NULL,
553 				    "legacy interrupt setup failed"));
554 				break;
555 			}
556 		}
557 
558 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
559 			NVLOG((NVDBG_INIT, nvc, NULL,
560 			    "no interrupts registered"));
561 			break;
562 		}
563 
564 		/*
565 		 * attach to sata module
566 		 */
567 		if (sata_hba_attach(nvc->nvc_dip,
568 		    &nvc->nvc_sata_hba_tran,
569 		    DDI_ATTACH) != DDI_SUCCESS) {
570 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
571 
572 			break;
573 		}
574 
575 		pci_config_teardown(&pci_conf_handle);
576 
577 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS"));
578 
579 		return (DDI_SUCCESS);
580 
581 	case DDI_RESUME:
582 
583 		nvc = ddi_get_soft_state(nv_statep, inst);
584 
585 		NVLOG((NVDBG_INIT, nvc, NULL,
586 		    "nv_attach(): DDI_RESUME inst %d", inst));
587 
588 
589 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
590 
591 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
592 			nv_resume(&(nvc->nvc_port[i]));
593 		}
594 
595 		return (DDI_SUCCESS);
596 
597 	default:
598 		return (DDI_FAILURE);
599 	}
600 
601 
602 	/*
603 	 * DDI_ATTACH failure path starts here
604 	 */
605 
606 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
607 		nv_rem_intrs(nvc);
608 	}
609 
610 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
611 		/*
612 		 * Remove timers
613 		 */
614 		int port = 0;
615 		nv_port_t *nvp;
616 
617 		for (; port < NV_MAX_PORTS(nvc); port++) {
618 			nvp = &(nvc->nvc_port[port]);
619 			if (nvp->nvp_timeout_id != 0) {
620 				(void) untimeout(nvp->nvp_timeout_id);
621 			}
622 		}
623 	}
624 
625 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
626 		mutex_destroy(&nvc->nvc_mutex);
627 	}
628 
629 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
630 		nv_uninit_ctl(nvc);
631 	}
632 
633 	if (attach_state & ATTACH_PROGRESS_BARS) {
634 		while (--bar >= 0) {
635 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
636 		}
637 	}
638 
639 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
640 		ddi_soft_state_free(nv_statep, inst);
641 	}
642 
643 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
644 		pci_config_teardown(&pci_conf_handle);
645 	}
646 
647 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
648 
649 	return (DDI_FAILURE);
650 }
651 
652 
653 static int
654 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
655 {
656 	int i, port, inst = ddi_get_instance(dip);
657 	nv_ctl_t *nvc;
658 	nv_port_t *nvp;
659 
660 	nvc = ddi_get_soft_state(nv_statep, inst);
661 
662 	switch (cmd) {
663 
664 	case DDI_DETACH:
665 
666 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH"));
667 
668 		/*
669 		 * Remove interrupts
670 		 */
671 		nv_rem_intrs(nvc);
672 
673 		/*
674 		 * Remove timers
675 		 */
676 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
677 			nvp = &(nvc->nvc_port[port]);
678 			if (nvp->nvp_timeout_id != 0) {
679 				(void) untimeout(nvp->nvp_timeout_id);
680 			}
681 		}
682 
683 		/*
684 		 * Remove maps
685 		 */
686 		for (i = 0; i < 6; i++) {
687 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
688 		}
689 
690 		/*
691 		 * Destroy mutexes
692 		 */
693 		mutex_destroy(&nvc->nvc_mutex);
694 
695 		/*
696 		 * Uninitialize the controller
697 		 */
698 		nv_uninit_ctl(nvc);
699 
700 		/*
701 		 * unregister from the sata module
702 		 */
703 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
704 
705 		/*
706 		 * Free soft state
707 		 */
708 		ddi_soft_state_free(nv_statep, inst);
709 
710 		return (DDI_SUCCESS);
711 
712 	case DDI_SUSPEND:
713 		/*
714 		 * The PM functions for suspend and resume are incomplete
715 		 * and need additional work.  It may or may not work in
716 		 * the current state.
717 		 */
718 		NVLOG((NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND"));
719 		nvc->nvc_state |= NV_CTRL_SUSPEND;
720 
721 		return (DDI_SUCCESS);
722 
723 	default:
724 		return (DDI_FAILURE);
725 	}
726 }
727 
728 
729 /*ARGSUSED*/
730 static int
731 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
732 {
733 	nv_ctl_t *nvc;
734 	int instance;
735 	dev_t dev;
736 
737 	dev = (dev_t)arg;
738 	instance = getminor(dev);
739 
740 	switch (infocmd) {
741 	case DDI_INFO_DEVT2DEVINFO:
742 		nvc = ddi_get_soft_state(nv_statep,  instance);
743 		if (nvc != NULL) {
744 			*result = nvc->nvc_dip;
745 			return (DDI_SUCCESS);
746 		} else {
747 			*result = NULL;
748 			return (DDI_FAILURE);
749 		}
750 	case DDI_INFO_DEVT2INSTANCE:
751 		*(int *)result = instance;
752 		break;
753 	default:
754 		break;
755 	}
756 	return (DDI_SUCCESS);
757 }
758 
759 
760 /*
761  * Called by sata module to probe a port.  Port and device state
762  * are not changed here... only reported back to the sata module.
763  *
764  * If probe confirms a device is present for the first time, it will
765  * initiate a device reset, then probe will be called again and the
766  * signature will be check.  If the signature is valid, data structures
767  * will be initialized.
768  */
769 static int
770 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
771 {
772 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
773 	uint8_t cport = sd->satadev_addr.cport;
774 	uint8_t pmport = sd->satadev_addr.pmport;
775 	uint8_t qual = sd->satadev_addr.qual;
776 	clock_t nv_lbolt = ddi_get_lbolt();
777 	nv_port_t *nvp;
778 
779 	if (cport >= NV_MAX_PORTS(nvc)) {
780 		sd->satadev_type = SATA_DTYPE_NONE;
781 		sd->satadev_state = SATA_STATE_PROBED;
782 
783 		return (SATA_FAILURE);
784 	}
785 
786 	ASSERT(nvc->nvc_port != NULL);
787 	nvp = &(nvc->nvc_port[cport]);
788 	ASSERT(nvp != NULL);
789 
790 	NVLOG((NVDBG_PROBE, nvc, nvp,
791 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
792 	    "qual: 0x%x", cport, pmport, qual));
793 
794 	mutex_enter(&nvp->nvp_mutex);
795 
796 	/*
797 	 * This check seems to be done in the SATA module.
798 	 * It may not be required here
799 	 */
800 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
801 		nv_cmn_err(CE_WARN, nvc, nvp,
802 		    "port inactive.  Use cfgadm to activate");
803 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
804 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
805 		mutex_exit(&nvp->nvp_mutex);
806 
807 		return (SATA_FAILURE);
808 	}
809 
810 	if (qual == SATA_ADDR_PMPORT) {
811 		sd->satadev_type = SATA_DTYPE_NONE;
812 		sd->satadev_state = SATA_STATE_PROBED;
813 		mutex_exit(&nvp->nvp_mutex);
814 		nv_cmn_err(CE_WARN, nvc, nvp,
815 		    "controller does not support port multiplier");
816 
817 		return (SATA_FAILURE);
818 	}
819 
820 	sd->satadev_state = SATA_PSTATE_PWRON;
821 
822 	nv_copy_registers(nvp, sd, NULL);
823 
824 	/*
825 	 * determine link status
826 	 */
827 	if (nv_check_link(sd->satadev_scr.sstatus) == B_FALSE) {
828 		uint8_t det;
829 
830 		/*
831 		 * Reset will cause the link to go down for a short period of
832 		 * time.  If link is lost for less than 2 seconds ignore it
833 		 * so that the reset can progress.
834 		 */
835 		if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
836 
837 			if (nvp->nvp_link_lost_time == 0) {
838 				nvp->nvp_link_lost_time = nv_lbolt;
839 			}
840 
841 			if (TICK_TO_SEC(nv_lbolt -
842 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
843 				NVLOG((NVDBG_ALWAYS, nvp->nvp_ctlp, nvp,
844 				    "probe: intermittent link lost while"
845 				    " resetting"));
846 				/*
847 				 * fake status of link so that probe continues
848 				 */
849 				SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
850 				    SSTATUS_IPM_ACTIVE);
851 				SSTATUS_SET_DET(sd->satadev_scr.sstatus,
852 				    SSTATUS_DET_DEVPRE_PHYCOM);
853 				sd->satadev_type = SATA_DTYPE_UNKNOWN;
854 				mutex_exit(&nvp->nvp_mutex);
855 
856 				return (SATA_SUCCESS);
857 			} else {
858 				nvp->nvp_state &=
859 				    ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
860 			}
861 		}
862 
863 		/*
864 		 * no link, so tear down port and abort all active packets
865 		 */
866 
867 		det = (sd->satadev_scr.sstatus & SSTATUS_DET) >>
868 		    SSTATUS_DET_SHIFT;
869 
870 		switch (det) {
871 		case SSTATUS_DET_NODEV:
872 		case SSTATUS_DET_PHYOFFLINE:
873 			sd->satadev_type = SATA_DTYPE_NONE;
874 			break;
875 		default:
876 			sd->satadev_type = SATA_DTYPE_UNKNOWN;
877 			break;
878 		}
879 
880 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
881 		    "probe: link lost invoking nv_abort_active"));
882 
883 		(void) nv_abort_active(nvp, NULL, SATA_PKT_TIMEOUT);
884 		nv_uninit_port(nvp);
885 
886 		mutex_exit(&nvp->nvp_mutex);
887 
888 		return (SATA_SUCCESS);
889 	} else {
890 		nvp->nvp_link_lost_time = 0;
891 	}
892 
893 	/*
894 	 * A device is present so clear hotremoved flag
895 	 */
896 	nvp->nvp_state &= ~NV_PORT_HOTREMOVED;
897 
898 	/*
899 	 * If the signature was acquired previously there is no need to
900 	 * do it again.
901 	 */
902 	if (nvp->nvp_signature != 0) {
903 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
904 		    "probe: signature acquired previously"));
905 		sd->satadev_type = nvp->nvp_type;
906 		mutex_exit(&nvp->nvp_mutex);
907 
908 		return (SATA_SUCCESS);
909 	}
910 
911 	/*
912 	 * If NV_PORT_RESET is not set, this is the first time through
913 	 * so perform reset and return.
914 	 */
915 	if ((nvp->nvp_state & NV_PORT_RESET) == 0) {
916 		NVLOG((NVDBG_PROBE, nvp->nvp_ctlp, nvp,
917 		    "probe: first reset to get sig"));
918 		nvp->nvp_state |= NV_PORT_RESET_PROBE;
919 		nv_reset(nvp);
920 		sd->satadev_type = nvp->nvp_type = SATA_DTYPE_UNKNOWN;
921 		nvp->nvp_probe_time = nv_lbolt;
922 		mutex_exit(&nvp->nvp_mutex);
923 
924 		return (SATA_SUCCESS);
925 	}
926 
927 	/*
928 	 * Reset was done previously.  see if the signature is
929 	 * available.
930 	 */
931 	nv_read_signature(nvp);
932 	sd->satadev_type = nvp->nvp_type;
933 
934 	/*
935 	 * Some drives may require additional resets to get a
936 	 * valid signature.  If a drive was not just powered up, the signature
937 	 * should arrive within half a second of reset.  Therefore if more
938 	 * than 5 seconds has elapsed while waiting for a signature, reset
939 	 * again.  These extra resets do not appear to create problems when
940 	 * the drive is spinning up for more than this reset period.
941 	 */
942 	if (nvp->nvp_signature == 0) {
943 		if (TICK_TO_SEC(nv_lbolt - nvp->nvp_reset_time) > 5) {
944 			NVLOG((NVDBG_PROBE, nvc, nvp, "additional reset"
945 			    " during signature acquisition"));
946 			nv_reset(nvp);
947 		}
948 
949 		mutex_exit(&nvp->nvp_mutex);
950 
951 		return (SATA_SUCCESS);
952 	}
953 
954 	NVLOG((NVDBG_PROBE, nvc, nvp, "signature acquired after %d ms",
955 	    TICK_TO_MSEC(nv_lbolt - nvp->nvp_probe_time)));
956 
957 	/*
958 	 * nv_sata only deals with ATA disks so far.  If it is
959 	 * not an ATA disk, then just return.
960 	 */
961 	if (nvp->nvp_type != SATA_DTYPE_ATADISK) {
962 		nv_cmn_err(CE_WARN, nvc, nvp, "Driver currently handles only"
963 		    " disks.  Signature acquired was %X", nvp->nvp_signature);
964 		mutex_exit(&nvp->nvp_mutex);
965 
966 		return (SATA_SUCCESS);
967 	}
968 
969 	/*
970 	 * make sure structures are initialized
971 	 */
972 	if (nv_init_port(nvp) == NV_SUCCESS) {
973 		NVLOG((NVDBG_PROBE, nvc, nvp,
974 		    "device detected and set up at port %d", cport));
975 		mutex_exit(&nvp->nvp_mutex);
976 
977 		return (SATA_SUCCESS);
978 	} else {
979 		nv_cmn_err(CE_WARN, nvc, nvp, "failed to set up data "
980 		    "structures for port %d", cport);
981 		mutex_exit(&nvp->nvp_mutex);
982 
983 		return (SATA_FAILURE);
984 	}
985 	/*NOTREACHED*/
986 }
987 
988 
989 /*
990  * Called by sata module to start a new command.
991  */
992 static int
993 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
994 {
995 	int cport = spkt->satapkt_device.satadev_addr.cport;
996 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
997 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
998 	int ret;
999 
1000 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1001 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg));
1002 
1003 	mutex_enter(&nvp->nvp_mutex);
1004 
1005 	/*
1006 	 * hotremoved is an intermediate state where the link was lost,
1007 	 * but the hotplug event has not yet been processed by the sata
1008 	 * module.  Fail the request.
1009 	 */
1010 	if (nvp->nvp_state & NV_PORT_HOTREMOVED) {
1011 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1012 		spkt->satapkt_device.satadev_state = SATA_STATE_UNKNOWN;
1013 		NVLOG((NVDBG_ERRS, nvc, nvp,
1014 		    "nv_sata_start: NV_PORT_HOTREMOVED"));
1015 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1016 		mutex_exit(&nvp->nvp_mutex);
1017 
1018 		return (SATA_TRAN_PORT_ERROR);
1019 	}
1020 
1021 	if (nvp->nvp_state & NV_PORT_RESET) {
1022 		NVLOG((NVDBG_ERRS, nvc, nvp,
1023 		    "still waiting for reset completion"));
1024 		spkt->satapkt_reason = SATA_PKT_BUSY;
1025 		mutex_exit(&nvp->nvp_mutex);
1026 
1027 		/*
1028 		 * If in panic, timeouts do not occur, so fake one
1029 		 * so that the signature can be acquired to complete
1030 		 * the reset handling.
1031 		 */
1032 		if (ddi_in_panic()) {
1033 			nv_timeout(nvp);
1034 		}
1035 
1036 		return (SATA_TRAN_BUSY);
1037 	}
1038 
1039 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1040 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1041 		NVLOG((NVDBG_ERRS, nvc, nvp,
1042 		    "nv_sata_start: SATA_DTYPE_NONE"));
1043 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1044 		mutex_exit(&nvp->nvp_mutex);
1045 
1046 		return (SATA_TRAN_PORT_ERROR);
1047 	}
1048 
1049 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_ATAPICD) {
1050 		ASSERT(nvp->nvp_type == SATA_DTYPE_ATAPICD);
1051 		nv_cmn_err(CE_WARN, nvc, nvp,
1052 		    "optical devices not supported");
1053 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1054 		mutex_exit(&nvp->nvp_mutex);
1055 
1056 		return (SATA_TRAN_CMD_UNSUPPORTED);
1057 	}
1058 
1059 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1060 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1061 		nv_cmn_err(CE_WARN, nvc, nvp,
1062 		    "port multipliers not supported by controller");
1063 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1064 		mutex_exit(&nvp->nvp_mutex);
1065 
1066 		return (SATA_TRAN_CMD_UNSUPPORTED);
1067 	}
1068 
1069 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1070 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1071 		NVLOG((NVDBG_ERRS, nvc, nvp,
1072 		    "nv_sata_start: port not yet initialized"));
1073 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1074 		mutex_exit(&nvp->nvp_mutex);
1075 
1076 		return (SATA_TRAN_PORT_ERROR);
1077 	}
1078 
1079 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1080 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1081 		NVLOG((NVDBG_ERRS, nvc, nvp,
1082 		    "nv_sata_start: NV_PORT_INACTIVE"));
1083 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1084 		mutex_exit(&nvp->nvp_mutex);
1085 
1086 		return (SATA_TRAN_PORT_ERROR);
1087 	}
1088 
1089 	if (nvp->nvp_state & NV_PORT_FAILED) {
1090 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1091 		NVLOG((NVDBG_ERRS, nvc, nvp,
1092 		    "nv_sata_start: NV_PORT_FAILED state"));
1093 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1094 		mutex_exit(&nvp->nvp_mutex);
1095 
1096 		return (SATA_TRAN_PORT_ERROR);
1097 	}
1098 
1099 	/*
1100 	 * after a device reset, and then when sata module restore processing
1101 	 * is complete, the sata module will set sata_clear_dev_reset which
1102 	 * indicates that restore processing has completed and normal
1103 	 * non-restore related commands should be processed.
1104 	 */
1105 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1106 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1107 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1108 		    "nv_sata_start: clearing NV_PORT_RESTORE"));
1109 	}
1110 
1111 	/*
1112 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1113 	 * only allow commands which restore device state.  The sata module
1114 	 * marks such commands with with sata_ignore_dev_reset.
1115 	 *
1116 	 * during coredump, nv_reset is called and but then the restore
1117 	 * doesn't happen.  For now, workaround by ignoring the wait for
1118 	 * restore if the system is panicing.
1119 	 */
1120 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1121 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1122 	    (ddi_in_panic() == 0)) {
1123 		spkt->satapkt_reason = SATA_PKT_BUSY;
1124 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1125 		    "nv_sata_start: waiting for restore "));
1126 		mutex_exit(&nvp->nvp_mutex);
1127 
1128 		return (SATA_TRAN_BUSY);
1129 	}
1130 
1131 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1132 		spkt->satapkt_reason = SATA_PKT_BUSY;
1133 		NVLOG((NVDBG_ERRS, nvc, nvp,
1134 		    "nv_sata_start: NV_PORT_ABORTING"));
1135 		mutex_exit(&nvp->nvp_mutex);
1136 
1137 		return (SATA_TRAN_BUSY);
1138 	}
1139 
1140 	if (spkt->satapkt_op_mode &
1141 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1142 
1143 		ret = nv_start_sync(nvp, spkt);
1144 
1145 		mutex_exit(&nvp->nvp_mutex);
1146 
1147 		return (ret);
1148 	}
1149 
1150 	/*
1151 	 * start command asynchronous command
1152 	 */
1153 	ret = nv_start_async(nvp, spkt);
1154 
1155 	mutex_exit(&nvp->nvp_mutex);
1156 
1157 	return (ret);
1158 }
1159 
1160 
1161 /*
1162  * SATA_OPMODE_POLLING implies the driver is in a
1163  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1164  * If only SATA_OPMODE_SYNCH is set, the driver can use
1165  * interrupts and sleep wait on a cv.
1166  *
1167  * If SATA_OPMODE_POLLING is set, the driver can't use
1168  * interrupts and must busy wait and simulate the
1169  * interrupts by waiting for BSY to be cleared.
1170  *
1171  * Synchronous mode has to return BUSY if there are
1172  * any other commands already on the drive.
1173  */
1174 static int
1175 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1176 {
1177 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1178 	int ret;
1179 
1180 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry"));
1181 
1182 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1183 		spkt->satapkt_reason = SATA_PKT_BUSY;
1184 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1185 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1186 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1187 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1188 		    (&(nvp->nvp_slot[0]))->nvslot_spkt));
1189 
1190 		return (SATA_TRAN_BUSY);
1191 	}
1192 
1193 	/*
1194 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1195 	 */
1196 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1197 	    servicing_interrupt()) {
1198 		spkt->satapkt_reason = SATA_PKT_BUSY;
1199 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
1200 		    "SYNC mode not allowed during interrupt");
1201 
1202 		return (SATA_TRAN_BUSY);
1203 
1204 	}
1205 
1206 	/*
1207 	 * disable interrupt generation if in polled mode
1208 	 */
1209 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1210 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1211 	}
1212 
1213 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1214 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1215 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1216 		}
1217 
1218 		return (ret);
1219 	}
1220 
1221 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1222 		mutex_exit(&nvp->nvp_mutex);
1223 		ret = nv_poll_wait(nvp, spkt);
1224 		mutex_enter(&nvp->nvp_mutex);
1225 
1226 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1227 
1228 		NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1229 			" done % reason %d", ret));
1230 
1231 		return (ret);
1232 	}
1233 
1234 	/*
1235 	 * non-polling synchronous mode handling.  The interrupt will signal
1236 	 * when the IO is completed.
1237 	 */
1238 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1239 
1240 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1241 
1242 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1243 	}
1244 
1245 	NVLOG((NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1246 	    " done % reason %d", spkt->satapkt_reason));
1247 
1248 	return (SATA_TRAN_ACCEPTED);
1249 }
1250 
1251 
1252 static int
1253 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1254 {
1255 	int ret;
1256 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1257 #if ! defined(__lock_lint)
1258 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1259 #endif
1260 
1261 	NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter"));
1262 
1263 	for (;;) {
1264 
1265 		NV_DELAY_NSEC(400);
1266 
1267 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait"));
1268 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1269 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1270 			mutex_enter(&nvp->nvp_mutex);
1271 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1272 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1273 			nv_reset(nvp);
1274 			nv_complete_io(nvp, spkt, 0);
1275 			mutex_exit(&nvp->nvp_mutex);
1276 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1277 			    "SATA_STATUS_BSY"));
1278 
1279 			return (SATA_TRAN_ACCEPTED);
1280 		}
1281 
1282 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr"));
1283 
1284 		/*
1285 		 * Simulate interrupt.
1286 		 */
1287 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1288 		NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr"));
1289 
1290 		if (ret != DDI_INTR_CLAIMED) {
1291 			NVLOG((NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1292 			    " unclaimed -- resetting"));
1293 			mutex_enter(&nvp->nvp_mutex);
1294 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1295 			nv_reset(nvp);
1296 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1297 			nv_complete_io(nvp, spkt, 0);
1298 			mutex_exit(&nvp->nvp_mutex);
1299 
1300 			return (SATA_TRAN_ACCEPTED);
1301 		}
1302 
1303 #if ! defined(__lock_lint)
1304 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1305 			/*
1306 			 * packet is complete
1307 			 */
1308 			return (SATA_TRAN_ACCEPTED);
1309 		}
1310 #endif
1311 	}
1312 	/*NOTREACHED*/
1313 }
1314 
1315 
1316 /*
1317  * Called by sata module to abort outstanding packets.
1318  */
1319 /*ARGSUSED*/
1320 static int
1321 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1322 {
1323 	int cport = spkt->satapkt_device.satadev_addr.cport;
1324 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1325 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1326 	int c_a, ret;
1327 
1328 	ASSERT(cport < NV_MAX_PORTS(nvc));
1329 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt));
1330 
1331 	mutex_enter(&nvp->nvp_mutex);
1332 
1333 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1334 		mutex_exit(&nvp->nvp_mutex);
1335 		nv_cmn_err(CE_WARN, nvc, nvp,
1336 		    "abort request failed: port inactive");
1337 
1338 		return (SATA_FAILURE);
1339 	}
1340 
1341 	/*
1342 	 * spkt == NULL then abort all commands
1343 	 */
1344 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED);
1345 
1346 	if (c_a) {
1347 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1348 		    "packets aborted running=%d", c_a));
1349 		ret = SATA_SUCCESS;
1350 	} else {
1351 		if (spkt == NULL) {
1352 			NVLOG((NVDBG_ENTRY, nvc, nvp, "no spkts to abort"));
1353 		} else {
1354 			NVLOG((NVDBG_ENTRY, nvc, nvp,
1355 			    "can't find spkt to abort"));
1356 		}
1357 		ret = SATA_FAILURE;
1358 	}
1359 
1360 	mutex_exit(&nvp->nvp_mutex);
1361 
1362 	return (ret);
1363 }
1364 
1365 
1366 /*
1367  * if spkt == NULL abort all pkts running, otherwise
1368  * abort the requested packet.  must be called with nv_mutex
1369  * held and returns with it held.  Not NCQ aware.
1370  */
1371 static int
1372 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason)
1373 {
1374 	int aborted = 0, i, reset_once = B_FALSE;
1375 	struct nv_slot *nv_slotp;
1376 	sata_pkt_t *spkt_slot;
1377 
1378 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1379 
1380 	/*
1381 	 * return if the port is not configured
1382 	 */
1383 	if (nvp->nvp_slot == NULL) {
1384 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1385 		    "nv_abort_active: not configured so returning"));
1386 
1387 		return (0);
1388 	}
1389 
1390 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active"));
1391 
1392 	nvp->nvp_state |= NV_PORT_ABORTING;
1393 
1394 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1395 
1396 		nv_slotp = &(nvp->nvp_slot[i]);
1397 		spkt_slot = nv_slotp->nvslot_spkt;
1398 
1399 		/*
1400 		 * skip if not active command in slot
1401 		 */
1402 		if (spkt_slot == NULL) {
1403 			continue;
1404 		}
1405 
1406 		/*
1407 		 * if a specific packet was requested, skip if
1408 		 * this is not a match
1409 		 */
1410 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1411 			continue;
1412 		}
1413 
1414 		/*
1415 		 * stop the hardware.  This could need reworking
1416 		 * when NCQ is enabled in the driver.
1417 		 */
1418 		if (reset_once == B_FALSE) {
1419 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1420 
1421 			/*
1422 			 * stop DMA engine
1423 			 */
1424 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1425 
1426 			nv_reset(nvp);
1427 			reset_once = B_TRUE;
1428 		}
1429 
1430 		spkt_slot->satapkt_reason = abort_reason;
1431 		nv_complete_io(nvp, spkt_slot, i);
1432 		aborted++;
1433 	}
1434 
1435 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1436 
1437 	return (aborted);
1438 }
1439 
1440 
1441 /*
1442  * Called by sata module to reset a port, device, or the controller.
1443  */
1444 static int
1445 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1446 {
1447 	int cport = sd->satadev_addr.cport;
1448 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1449 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1450 	int ret = SATA_SUCCESS;
1451 
1452 	ASSERT(cport < NV_MAX_PORTS(nvc));
1453 
1454 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_reset"));
1455 
1456 	mutex_enter(&nvp->nvp_mutex);
1457 
1458 	switch (sd->satadev_addr.qual) {
1459 
1460 	case SATA_ADDR_CPORT:
1461 		/*FALLTHROUGH*/
1462 	case SATA_ADDR_DCPORT:
1463 		nv_reset(nvp);
1464 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1465 
1466 		break;
1467 	case SATA_ADDR_CNTRL:
1468 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1469 		    "nv_sata_reset: constroller reset not supported"));
1470 
1471 		break;
1472 	case SATA_ADDR_PMPORT:
1473 	case SATA_ADDR_DPMPORT:
1474 		NVLOG((NVDBG_ENTRY, nvc, nvp,
1475 		    "nv_sata_reset: port multipliers not supported"));
1476 		/*FALLTHROUGH*/
1477 	default:
1478 		/*
1479 		 * unsupported case
1480 		 */
1481 		ret = SATA_FAILURE;
1482 		break;
1483 	}
1484 
1485 	if (ret == SATA_SUCCESS) {
1486 		/*
1487 		 * If the port is inactive, do a quiet reset and don't attempt
1488 		 * to wait for reset completion or do any post reset processing
1489 		 */
1490 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1491 			nvp->nvp_state &= ~NV_PORT_RESET;
1492 			nvp->nvp_reset_time = 0;
1493 		}
1494 
1495 		/*
1496 		 * clear the port failed flag
1497 		 */
1498 		nvp->nvp_state &= ~NV_PORT_FAILED;
1499 	}
1500 
1501 	mutex_exit(&nvp->nvp_mutex);
1502 
1503 	return (ret);
1504 }
1505 
1506 
1507 /*
1508  * Sata entry point to handle port activation.  cfgadm -c connect
1509  */
1510 static int
1511 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1512 {
1513 	int cport = sd->satadev_addr.cport;
1514 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1515 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1516 
1517 	ASSERT(cport < NV_MAX_PORTS(nvc));
1518 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_activate"));
1519 
1520 	mutex_enter(&nvp->nvp_mutex);
1521 
1522 	sd->satadev_state = SATA_STATE_READY;
1523 
1524 	nv_copy_registers(nvp, sd, NULL);
1525 
1526 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1527 
1528 	nvp->nvp_state = 0;
1529 
1530 	mutex_exit(&nvp->nvp_mutex);
1531 
1532 	return (SATA_SUCCESS);
1533 }
1534 
1535 
1536 /*
1537  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1538  */
1539 static int
1540 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1541 {
1542 	int cport = sd->satadev_addr.cport;
1543 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1544 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1545 
1546 	ASSERT(cport < NV_MAX_PORTS(nvc));
1547 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate"));
1548 
1549 	mutex_enter(&nvp->nvp_mutex);
1550 
1551 	(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET);
1552 
1553 	/*
1554 	 * mark the device as inaccessible
1555 	 */
1556 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1557 
1558 	/*
1559 	 * disable the interrupts on port
1560 	 */
1561 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1562 
1563 	nv_uninit_port(nvp);
1564 
1565 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1566 	nv_copy_registers(nvp, sd, NULL);
1567 
1568 	mutex_exit(&nvp->nvp_mutex);
1569 
1570 	return (SATA_SUCCESS);
1571 }
1572 
1573 
1574 /*
1575  * find an empty slot in the driver's queue, increment counters,
1576  * and then invoke the appropriate PIO or DMA start routine.
1577  */
1578 static int
1579 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1580 {
1581 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1582 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1583 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1584 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1585 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1586 	nv_slot_t *nv_slotp;
1587 	boolean_t dma_cmd;
1588 
1589 	NVLOG((NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1590 	    sata_cmdp->satacmd_cmd_reg));
1591 
1592 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1593 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1594 		nvp->nvp_ncq_run++;
1595 		/*
1596 		 * search for an empty NCQ slot.  by the time, it's already
1597 		 * been determined by the caller that there is room on the
1598 		 * queue.
1599 		 */
1600 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1601 		    on_bit <<= 1) {
1602 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1603 				break;
1604 			}
1605 		}
1606 
1607 		/*
1608 		 * the first empty slot found, should not exceed the queue
1609 		 * depth of the drive.  if it does it's an error.
1610 		 */
1611 		ASSERT(slot != nvp->nvp_queue_depth);
1612 
1613 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1614 		    nvp->nvp_sactive);
1615 		ASSERT((sactive & on_bit) == 0);
1616 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1617 		NVLOG((NVDBG_INIT, nvc, nvp, "setting SACTIVE onbit: %X",
1618 		    on_bit));
1619 		nvp->nvp_sactive_cache |= on_bit;
1620 
1621 		ncq = NVSLOT_NCQ;
1622 
1623 	} else {
1624 		nvp->nvp_non_ncq_run++;
1625 		slot = 0;
1626 	}
1627 
1628 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1629 
1630 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1631 
1632 	nv_slotp->nvslot_spkt = spkt;
1633 	nv_slotp->nvslot_flags = ncq;
1634 
1635 	/*
1636 	 * the sata module doesn't indicate which commands utilize the
1637 	 * DMA engine, so find out using this switch table.
1638 	 */
1639 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1640 	case SATAC_READ_DMA_EXT:
1641 	case SATAC_WRITE_DMA_EXT:
1642 	case SATAC_WRITE_DMA:
1643 	case SATAC_READ_DMA:
1644 	case SATAC_READ_DMA_QUEUED:
1645 	case SATAC_READ_DMA_QUEUED_EXT:
1646 	case SATAC_WRITE_DMA_QUEUED:
1647 	case SATAC_WRITE_DMA_QUEUED_EXT:
1648 	case SATAC_READ_FPDMA_QUEUED:
1649 	case SATAC_WRITE_FPDMA_QUEUED:
1650 		dma_cmd = B_TRUE;
1651 		break;
1652 	default:
1653 		dma_cmd = B_FALSE;
1654 	}
1655 
1656 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1657 		NVLOG((NVDBG_DELIVER, nvc,  nvp, "DMA command"));
1658 		nv_slotp->nvslot_start = nv_start_dma;
1659 		nv_slotp->nvslot_intr = nv_intr_dma;
1660 	} else if (direction == SATA_DIR_NODATA_XFER) {
1661 		NVLOG((NVDBG_DELIVER, nvc, nvp, "non-data command"));
1662 		nv_slotp->nvslot_start = nv_start_nodata;
1663 		nv_slotp->nvslot_intr = nv_intr_nodata;
1664 	} else if (direction == SATA_DIR_READ) {
1665 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio in command"));
1666 		nv_slotp->nvslot_start = nv_start_pio_in;
1667 		nv_slotp->nvslot_intr = nv_intr_pio_in;
1668 		nv_slotp->nvslot_byte_count =
1669 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1670 		nv_slotp->nvslot_v_addr =
1671 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1672 	} else if (direction == SATA_DIR_WRITE) {
1673 		NVLOG((NVDBG_DELIVER, nvc, nvp, "pio out command"));
1674 		nv_slotp->nvslot_start = nv_start_pio_out;
1675 		nv_slotp->nvslot_intr = nv_intr_pio_out;
1676 		nv_slotp->nvslot_byte_count =
1677 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
1678 		nv_slotp->nvslot_v_addr =
1679 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
1680 	} else {
1681 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
1682 		    " %d cookies %d cmd %x",
1683 		    sata_cmdp->satacmd_flags.sata_data_direction,
1684 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
1685 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1686 		ret = SATA_TRAN_CMD_UNSUPPORTED;
1687 
1688 		goto fail;
1689 	}
1690 
1691 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
1692 	    SATA_TRAN_ACCEPTED) {
1693 		nv_slotp->nvslot_stime = ddi_get_lbolt();
1694 
1695 		/*
1696 		 * start timer if it's not already running and this packet
1697 		 * is not requesting polled mode.
1698 		 */
1699 		if ((nvp->nvp_timeout_id == 0) &&
1700 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
1701 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1702 			    drv_usectohz(NV_ONE_SEC));
1703 		}
1704 
1705 		return (SATA_TRAN_ACCEPTED);
1706 	}
1707 
1708 	fail:
1709 
1710 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
1711 
1712 	if (ncq == NVSLOT_NCQ) {
1713 		nvp->nvp_ncq_run--;
1714 		nvp->nvp_sactive_cache &= ~on_bit;
1715 	} else {
1716 		nvp->nvp_non_ncq_run--;
1717 	}
1718 	nv_slotp->nvslot_spkt = NULL;
1719 	nv_slotp->nvslot_flags = 0;
1720 
1721 	return (ret);
1722 }
1723 
1724 
1725 /*
1726  * Check if the signature is ready and if non-zero translate
1727  * it into a solaris sata defined type.
1728  */
1729 static void
1730 nv_read_signature(nv_port_t *nvp)
1731 {
1732 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1733 
1734 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
1735 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
1736 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
1737 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
1738 
1739 	switch (nvp->nvp_signature) {
1740 
1741 	case NV_SIG_DISK:
1742 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "drive is a disk"));
1743 		nvp->nvp_type = SATA_DTYPE_ATADISK;
1744 		break;
1745 	case NV_SIG_ATAPI:
1746 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1747 		    "drive is an optical device"));
1748 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
1749 		break;
1750 	case NV_SIG_PM:
1751 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1752 		    "device is a port multiplier"));
1753 		nvp->nvp_type = SATA_DTYPE_PMULT;
1754 		break;
1755 	case NV_SIG_NOTREADY:
1756 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
1757 		    "signature not ready"));
1758 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1759 		break;
1760 	default:
1761 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
1762 		    " recognized", nvp->nvp_signature);
1763 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1764 		break;
1765 	}
1766 
1767 	if (nvp->nvp_signature) {
1768 		nvp->nvp_state &= ~(NV_PORT_RESET_PROBE|NV_PORT_RESET);
1769 	}
1770 }
1771 
1772 
1773 /*
1774  * Reset the port
1775  */
1776 static void
1777 nv_reset(nv_port_t *nvp)
1778 {
1779 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1780 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
1781 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1782 	uint32_t sctrl;
1783 
1784 	NVLOG((NVDBG_ENTRY, nvc, nvp, "nv_reset()"));
1785 
1786 	ASSERT(mutex_owned(&nvp->nvp_mutex));
1787 
1788 	/*
1789 	 * clear signature registers
1790 	 */
1791 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
1792 	nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
1793 	nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
1794 	nv_put8(cmdhdl, nvp->nvp_count, 0);
1795 
1796 	nvp->nvp_signature = 0;
1797 	nvp->nvp_type = 0;
1798 	nvp->nvp_state |= NV_PORT_RESET;
1799 	nvp->nvp_reset_time = ddi_get_lbolt();
1800 	nvp->nvp_link_lost_time = 0;
1801 
1802 	/*
1803 	 * assert reset in PHY by writing a 1 to bit 0 scontrol
1804 	 */
1805 	sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
1806 
1807 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl | SCONTROL_DET_COMRESET);
1808 
1809 	/*
1810 	 * wait 1ms
1811 	 */
1812 	drv_usecwait(1000);
1813 
1814 	/*
1815 	 * de-assert reset in PHY
1816 	 */
1817 	nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
1818 
1819 	/*
1820 	 * make sure timer is running
1821 	 */
1822 	if (nvp->nvp_timeout_id == 0) {
1823 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
1824 		    drv_usectohz(NV_ONE_SEC));
1825 	}
1826 }
1827 
1828 
1829 /*
1830  * Initialize register handling specific to mcp55
1831  */
1832 /* ARGSUSED */
1833 static void
1834 mcp55_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1835 {
1836 	nv_port_t *nvp;
1837 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1838 	uint8_t off, port;
1839 
1840 	nvc->nvc_mcp55_ctl = (uint32_t *)(bar5 + MCP55_CTL);
1841 	nvc->nvc_mcp55_ncq = (uint32_t *)(bar5 + MCP55_NCQ);
1842 
1843 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
1844 		nvp = &(nvc->nvc_port[port]);
1845 		nvp->nvp_mcp55_int_status =
1846 		    (uint16_t *)(bar5 + MCP55_INT_STATUS + off);
1847 		nvp->nvp_mcp55_int_ctl =
1848 		    (uint16_t *)(bar5 + MCP55_INT_CTL + off);
1849 
1850 		/*
1851 		 * clear any previous interrupts asserted
1852 		 */
1853 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_status,
1854 		    MCP55_INT_CLEAR);
1855 
1856 		/*
1857 		 * These are the interrupts to accept for now.  The spec
1858 		 * says these are enable bits, but nvidia has indicated
1859 		 * these are masking bits.  Even though they may be masked
1860 		 * out to prevent asserting the main interrupt, they can
1861 		 * still be asserted while reading the interrupt status
1862 		 * register, so that needs to be considered in the interrupt
1863 		 * handler.
1864 		 */
1865 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp55_int_ctl,
1866 		    ~(MCP55_INT_IGNORE));
1867 	}
1868 
1869 	/*
1870 	 * Allow the driver to program the BM on the first command instead
1871 	 * of waiting for an interrupt.
1872 	 */
1873 #ifdef NCQ
1874 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
1875 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq, flags);
1876 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
1877 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ctl, flags);
1878 #endif
1879 
1880 
1881 #if 0
1882 	/*
1883 	 * This caused problems on some but not all mcp55 based systems.
1884 	 * DMA writes would never complete.  This happens even on small
1885 	 * mem systems, and only setting NV_40BIT_PRD below and not
1886 	 * buffer_dma_attr.dma_attr_addr_hi, so it seems to be a hardware
1887 	 * issue that needs further investigation.
1888 	 */
1889 
1890 	/*
1891 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
1892 	 * Enable DMA to take advantage of that.
1893 	 *
1894 	 */
1895 	if (nvc->nvc_revid >= 0xa3) {
1896 		uint32_t reg32;
1897 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev id is %X and"
1898 		    " is capable of 40-bit addressing", nvc->nvc_revid));
1899 		buffer_dma_attr.dma_attr_addr_hi = 0xffffffffffull;
1900 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
1901 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
1902 		    reg32 |NV_40BIT_PRD);
1903 	} else {
1904 		NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "rev is %X and is "
1905 		    "not capable of 40-bit addressing", nvc->nvc_revid));
1906 	}
1907 #endif
1908 
1909 }
1910 
1911 
1912 /*
1913  * Initialize register handling specific to mcp04
1914  */
1915 static void
1916 mcp04_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1917 {
1918 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1919 	uint32_t reg32;
1920 	uint16_t reg16;
1921 	nv_port_t *nvp;
1922 	int j;
1923 
1924 	/*
1925 	 * delay hotplug interrupts until PHYRDY.
1926 	 */
1927 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
1928 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
1929 	    reg32 | MCP04_CFG_DELAY_HOTPLUG_INTR);
1930 
1931 	/*
1932 	 * enable hot plug interrupts for channel x and y
1933 	 */
1934 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
1935 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
1936 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
1937 	    NV_HIRQ_EN | reg16);
1938 
1939 
1940 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
1941 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
1942 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
1943 	    NV_HIRQ_EN | reg16);
1944 
1945 	nvc->nvc_mcp04_int_status = (uint8_t *)(bar5 + MCP04_SATA_INT_STATUS);
1946 
1947 	/*
1948 	 * clear any existing interrupt pending then enable
1949 	 */
1950 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
1951 		nvp = &(nvc->nvc_port[j]);
1952 		mutex_enter(&nvp->nvp_mutex);
1953 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
1954 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
1955 		mutex_exit(&nvp->nvp_mutex);
1956 	}
1957 }
1958 
1959 
1960 /*
1961  * Initialize the controller and set up driver data structures.
1962  * determine if ck804 or mcp55 class.
1963  */
1964 static int
1965 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
1966 {
1967 	struct sata_hba_tran stran;
1968 	nv_port_t *nvp;
1969 	int j, ck804 = B_TRUE;
1970 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
1971 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
1972 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
1973 	uint32_t reg32;
1974 	uint8_t reg8, reg8_save;
1975 
1976 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_init_ctl entered"));
1977 
1978 	/*
1979 	 * Need to set bit 2 to 1 at config offset 0x50
1980 	 * to enable access to the bar5 registers.
1981 	 */
1982 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
1983 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
1984 	    reg32 | NV_BAR5_SPACE_EN);
1985 
1986 	/*
1987 	 * Determine if this is ck804 or mcp55.  ck804 will map in the
1988 	 * task file registers into bar5 while mcp55 won't.  The offset of
1989 	 * the task file registers in mcp55's space is unused, so it will
1990 	 * return zero.  So check one of the task file registers to see if it is
1991 	 * writable and reads back what was written.  If it's mcp55 it will
1992 	 * return back 0xff whereas ck804 will return the value written.
1993 	 */
1994 	reg8_save = nv_get8(bar5_hdl,
1995 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
1996 
1997 
1998 	for (j = 1; j < 3; j++) {
1999 
2000 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2001 		reg8 = nv_get8(bar5_hdl,
2002 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2003 
2004 		if (reg8 != j) {
2005 			ck804 = B_FALSE;
2006 			break;
2007 		}
2008 	}
2009 
2010 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2011 
2012 	if (ck804 == B_TRUE) {
2013 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is CK804"));
2014 		nvc->nvc_interrupt = mcp04_intr;
2015 		nvc->nvc_reg_init = mcp04_reg_init;
2016 		nvc->nvc_set_intr = mcp04_set_intr;
2017 	} else {
2018 		NVLOG((NVDBG_INIT, nvc, NULL, "controller is MCP55"));
2019 		nvc->nvc_interrupt = mcp55_intr;
2020 		nvc->nvc_reg_init = mcp55_reg_init;
2021 		nvc->nvc_set_intr = mcp55_set_intr;
2022 	}
2023 
2024 
2025 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2026 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2027 	stran.sata_tran_hba_dma_attr = &buffer_dma_attr;
2028 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2029 	stran.sata_tran_hba_features_support =
2030 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN;
2031 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2032 	stran.sata_tran_probe_port = nv_sata_probe;
2033 	stran.sata_tran_start = nv_sata_start;
2034 	stran.sata_tran_abort = nv_sata_abort;
2035 	stran.sata_tran_reset_dport = nv_sata_reset;
2036 	stran.sata_tran_selftest = NULL;
2037 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2038 	stran.sata_tran_pwrmgt_ops = NULL;
2039 	stran.sata_tran_ioctl = NULL;
2040 	nvc->nvc_sata_hba_tran = stran;
2041 
2042 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2043 	    KM_SLEEP);
2044 
2045 	/*
2046 	 * initialize registers common to all chipsets
2047 	 */
2048 	nv_common_reg_init(nvc);
2049 
2050 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2051 		nvp = &(nvc->nvc_port[j]);
2052 
2053 		cmd_addr = nvp->nvp_cmd_addr;
2054 		ctl_addr = nvp->nvp_ctl_addr;
2055 		bm_addr = nvp->nvp_bm_addr;
2056 
2057 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2058 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2059 
2060 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2061 
2062 		nvp->nvp_data	= cmd_addr + NV_DATA;
2063 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2064 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2065 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2066 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2067 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2068 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2069 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2070 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2071 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2072 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2073 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2074 
2075 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2076 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2077 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2078 
2079 		nvp->nvp_state = 0;
2080 	}
2081 
2082 	/*
2083 	 * initialize register by calling chip specific reg initialization
2084 	 */
2085 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2086 
2087 	return (NV_SUCCESS);
2088 }
2089 
2090 
2091 /*
2092  * Initialize data structures with enough slots to handle queuing, if
2093  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2094  * NCQ support is built into the driver and enabled.  It might have been
2095  * better to derive the true size from the drive itself, but the sata
2096  * module only sends down that information on the first NCQ command,
2097  * which means possibly re-sizing the structures on an interrupt stack,
2098  * making error handling more messy.  The easy way is to just allocate
2099  * all 32 slots, which is what most drives support anyway.
2100  */
2101 static int
2102 nv_init_port(nv_port_t *nvp)
2103 {
2104 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2105 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2106 	dev_info_t *dip = nvc->nvc_dip;
2107 	ddi_device_acc_attr_t dev_attr;
2108 	size_t buf_size;
2109 	ddi_dma_cookie_t cookie;
2110 	uint_t count;
2111 	int rc, i;
2112 
2113 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2114 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2115 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2116 
2117 	if (nvp->nvp_state & NV_PORT_INIT) {
2118 		NVLOG((NVDBG_INIT, nvc, nvp,
2119 		    "nv_init_port previously initialized"));
2120 
2121 		return (NV_SUCCESS);
2122 	} else {
2123 		NVLOG((NVDBG_INIT, nvc, nvp, "nv_init_port initializing"));
2124 	}
2125 
2126 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2127 	    NV_QUEUE_SLOTS, KM_SLEEP);
2128 
2129 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2130 	    NV_QUEUE_SLOTS, KM_SLEEP);
2131 
2132 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2133 	    NV_QUEUE_SLOTS, KM_SLEEP);
2134 
2135 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2136 	    NV_QUEUE_SLOTS, KM_SLEEP);
2137 
2138 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2139 	    KM_SLEEP);
2140 
2141 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2142 
2143 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2144 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2145 
2146 		if (rc != DDI_SUCCESS) {
2147 			nv_uninit_port(nvp);
2148 
2149 			return (NV_FAILURE);
2150 		}
2151 
2152 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2153 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2154 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2155 		    &(nvp->nvp_sg_acc_hdl[i]));
2156 
2157 		if (rc != DDI_SUCCESS) {
2158 			nv_uninit_port(nvp);
2159 
2160 			return (NV_FAILURE);
2161 		}
2162 
2163 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2164 		    nvp->nvp_sg_addr[i], buf_size,
2165 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2166 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2167 
2168 		if (rc != DDI_DMA_MAPPED) {
2169 			nv_uninit_port(nvp);
2170 
2171 			return (NV_FAILURE);
2172 		}
2173 
2174 		ASSERT(count == 1);
2175 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2176 
2177 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2178 
2179 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2180 	}
2181 
2182 	/*
2183 	 * nvp_queue_depth represents the actual drive queue depth, not the
2184 	 * number of slots allocated in the structures (which may be more).
2185 	 * Actual queue depth is only learned after the first NCQ command, so
2186 	 * initialize it to 1 for now.
2187 	 */
2188 	nvp->nvp_queue_depth = 1;
2189 
2190 	nvp->nvp_state |= NV_PORT_INIT;
2191 
2192 	return (NV_SUCCESS);
2193 }
2194 
2195 
2196 /*
2197  * Free dynamically allocated structures for port.
2198  */
2199 static void
2200 nv_uninit_port(nv_port_t *nvp)
2201 {
2202 	int i;
2203 
2204 	/*
2205 	 * It is possible to reach here before a port has been initialized or
2206 	 * after it has already been uninitialized.  Just return in that case.
2207 	 */
2208 	if (nvp->nvp_slot == NULL) {
2209 
2210 		return;
2211 	}
2212 
2213 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp,
2214 	    "nv_uninit_port uninitializing"));
2215 
2216 	nvp->nvp_type = SATA_DTYPE_NONE;
2217 
2218 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2219 		if (nvp->nvp_sg_paddr[i]) {
2220 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2221 		}
2222 
2223 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2224 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2225 		}
2226 
2227 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2228 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2229 		}
2230 	}
2231 
2232 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2233 	nvp->nvp_slot = NULL;
2234 
2235 	kmem_free(nvp->nvp_sg_dma_hdl,
2236 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2237 	nvp->nvp_sg_dma_hdl = NULL;
2238 
2239 	kmem_free(nvp->nvp_sg_acc_hdl,
2240 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2241 	nvp->nvp_sg_acc_hdl = NULL;
2242 
2243 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2244 	nvp->nvp_sg_addr = NULL;
2245 
2246 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2247 	nvp->nvp_sg_paddr = NULL;
2248 
2249 	nvp->nvp_state &= ~NV_PORT_INIT;
2250 	nvp->nvp_signature = 0;
2251 }
2252 
2253 
2254 /*
2255  * Cache register offsets and access handles to frequently accessed registers
2256  * which are common to either chipset.
2257  */
2258 static void
2259 nv_common_reg_init(nv_ctl_t *nvc)
2260 {
2261 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2262 	uchar_t *bm_addr_offset, *sreg_offset;
2263 	uint8_t bar, port;
2264 	nv_port_t *nvp;
2265 
2266 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2267 		if (port == 0) {
2268 			bar = NV_BAR_0;
2269 			bm_addr_offset = 0;
2270 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2271 		} else {
2272 			bar = NV_BAR_2;
2273 			bm_addr_offset = (uchar_t *)8;
2274 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2275 		}
2276 
2277 		nvp = &(nvc->nvc_port[port]);
2278 		nvp->nvp_ctlp = nvc;
2279 		nvp->nvp_port_num = port;
2280 		NVLOG((NVDBG_INIT, nvc, nvp, "setting up port mappings"));
2281 
2282 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2283 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2284 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2285 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2286 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2287 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2288 		    (long)bm_addr_offset;
2289 
2290 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2291 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2292 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2293 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2294 	}
2295 }
2296 
2297 
2298 static void
2299 nv_uninit_ctl(nv_ctl_t *nvc)
2300 {
2301 	int port;
2302 	nv_port_t *nvp;
2303 
2304 	NVLOG((NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered"));
2305 
2306 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2307 		nvp = &(nvc->nvc_port[port]);
2308 		mutex_enter(&nvp->nvp_mutex);
2309 		NVLOG((NVDBG_INIT, nvc, nvp, "uninitializing port"));
2310 		nv_uninit_port(nvp);
2311 		mutex_exit(&nvp->nvp_mutex);
2312 		mutex_destroy(&nvp->nvp_mutex);
2313 		cv_destroy(&nvp->nvp_poll_cv);
2314 	}
2315 
2316 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2317 	nvc->nvc_port = NULL;
2318 }
2319 
2320 
2321 /*
2322  * mcp04 interrupt.  This is a wrapper around mcp04_intr_process so
2323  * that interrupts from other devices can be disregarded while dtracing.
2324  */
2325 /* ARGSUSED */
2326 static uint_t
2327 mcp04_intr(caddr_t arg1, caddr_t arg2)
2328 {
2329 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2330 	uint8_t intr_status;
2331 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2332 
2333 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2334 
2335 	if (intr_status == 0) {
2336 
2337 		return (DDI_INTR_UNCLAIMED);
2338 	}
2339 
2340 	mcp04_intr_process(nvc, intr_status);
2341 
2342 	return (DDI_INTR_CLAIMED);
2343 }
2344 
2345 
2346 /*
2347  * Main interrupt handler for ck804.  handles normal device
2348  * interrupts as well as port hot plug and remove interrupts.
2349  *
2350  */
2351 static void
2352 mcp04_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
2353 {
2354 
2355 	int port, i;
2356 	nv_port_t *nvp;
2357 	nv_slot_t *nv_slotp;
2358 	uchar_t	status;
2359 	sata_pkt_t *spkt;
2360 	uint8_t bmstatus, clear_bits;
2361 	ddi_acc_handle_t bmhdl;
2362 	int nvcleared = 0;
2363 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2364 	uint32_t sstatus;
2365 	int port_mask_hot[] = {
2366 		MCP04_INT_PDEV_HOT, MCP04_INT_SDEV_HOT,
2367 	};
2368 	int port_mask_pm[] = {
2369 		MCP04_INT_PDEV_PM, MCP04_INT_SDEV_PM,
2370 	};
2371 
2372 	NVLOG((NVDBG_INTR, nvc, NULL,
2373 	    "mcp04_intr_process entered intr_status=%x", intr_status));
2374 
2375 	/*
2376 	 * For command completion interrupt, explicit clear is not required.
2377 	 * however, for the error cases explicit clear is performed.
2378 	 */
2379 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2380 
2381 		int port_mask[] = {MCP04_INT_PDEV_INT, MCP04_INT_SDEV_INT};
2382 
2383 		if ((port_mask[port] & intr_status) == 0) {
2384 			continue;
2385 		}
2386 
2387 		NVLOG((NVDBG_INTR, nvc, NULL,
2388 		    "mcp04_intr_process interrupt on port %d", port));
2389 
2390 		nvp = &(nvc->nvc_port[port]);
2391 
2392 		mutex_enter(&nvp->nvp_mutex);
2393 
2394 		/*
2395 		 * there was a corner case found where an interrupt
2396 		 * arrived before nvp_slot was set.  Should
2397 		 * probably should track down why that happens and try
2398 		 * to eliminate that source and then get rid of this
2399 		 * check.
2400 		 */
2401 		if (nvp->nvp_slot == NULL) {
2402 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2403 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2404 			    "received before initialization "
2405 			    "completed status=%x", status));
2406 			mutex_exit(&nvp->nvp_mutex);
2407 
2408 			/*
2409 			 * clear interrupt bits
2410 			 */
2411 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2412 			    port_mask[port]);
2413 
2414 			continue;
2415 		}
2416 
2417 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
2418 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
2419 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
2420 			    " no command in progress status=%x", status));
2421 			mutex_exit(&nvp->nvp_mutex);
2422 
2423 			/*
2424 			 * clear interrupt bits
2425 			 */
2426 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2427 			    port_mask[port]);
2428 
2429 			continue;
2430 		}
2431 
2432 		bmhdl = nvp->nvp_bm_hdl;
2433 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2434 
2435 		if (!(bmstatus & BMISX_IDEINTS)) {
2436 			mutex_exit(&nvp->nvp_mutex);
2437 
2438 			continue;
2439 		}
2440 
2441 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2442 
2443 		if (status & SATA_STATUS_BSY) {
2444 			mutex_exit(&nvp->nvp_mutex);
2445 
2446 			continue;
2447 		}
2448 
2449 		nv_slotp = &(nvp->nvp_slot[0]);
2450 
2451 		ASSERT(nv_slotp);
2452 
2453 		spkt = nv_slotp->nvslot_spkt;
2454 
2455 		if (spkt == NULL) {
2456 			mutex_exit(&nvp->nvp_mutex);
2457 
2458 			continue;
2459 		}
2460 
2461 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2462 
2463 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2464 
2465 		/*
2466 		 * If there is no link cannot be certain about the completion
2467 		 * of the packet, so abort it.
2468 		 */
2469 		if (nv_check_link((&spkt->satapkt_device)->
2470 		    satadev_scr.sstatus) == B_FALSE) {
2471 
2472 			(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2473 
2474 		} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2475 
2476 			nv_complete_io(nvp, spkt, 0);
2477 		}
2478 
2479 		mutex_exit(&nvp->nvp_mutex);
2480 	}
2481 
2482 	/*
2483 	 * mcp04 often doesn't correctly distinguish hot add/remove
2484 	 * interrupts.  Frequently both the ADD and the REMOVE bits
2485 	 * are asserted, whether it was a remove or add.  Use sstatus
2486 	 * to distinguish hot add from hot remove.
2487 	 */
2488 
2489 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2490 		clear_bits = 0;
2491 
2492 		nvp = &(nvc->nvc_port[port]);
2493 		mutex_enter(&nvp->nvp_mutex);
2494 
2495 		if ((port_mask_pm[port] & intr_status) != 0) {
2496 			clear_bits = port_mask_pm[port];
2497 			NVLOG((NVDBG_HOT, nvc, nvp,
2498 			    "clearing PM interrupt bit: %x",
2499 			    intr_status & port_mask_pm[port]));
2500 		}
2501 
2502 		if ((port_mask_hot[port] & intr_status) == 0) {
2503 			if (clear_bits != 0) {
2504 				goto clear;
2505 			} else {
2506 				mutex_exit(&nvp->nvp_mutex);
2507 				continue;
2508 			}
2509 		}
2510 
2511 		/*
2512 		 * reaching here means there was a hot add or remove.
2513 		 */
2514 		clear_bits |= port_mask_hot[port];
2515 
2516 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
2517 
2518 		sstatus = nv_get32(bar5_hdl,
2519 		    nvc->nvc_port[port].nvp_sstatus);
2520 
2521 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
2522 		    SSTATUS_DET_DEVPRE_PHYCOM) {
2523 			nv_report_add_remove(nvp, 0);
2524 		} else {
2525 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2526 		}
2527 	clear:
2528 		/*
2529 		 * clear interrupt bits.  explicit interrupt clear is
2530 		 * required for hotplug interrupts.
2531 		 */
2532 		nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status, clear_bits);
2533 
2534 		/*
2535 		 * make sure it's flushed and cleared.  If not try
2536 		 * again.  Sometimes it has been observed to not clear
2537 		 * on the first try.
2538 		 */
2539 		intr_status = nv_get8(bar5_hdl, nvc->nvc_mcp04_int_status);
2540 
2541 		/*
2542 		 * make 10 additional attempts to clear the interrupt
2543 		 */
2544 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
2545 			NVLOG((NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
2546 			    "still not clear try=%d", intr_status,
2547 			    ++nvcleared));
2548 			nv_put8(bar5_hdl, nvc->nvc_mcp04_int_status,
2549 			    clear_bits);
2550 			intr_status = nv_get8(bar5_hdl,
2551 			    nvc->nvc_mcp04_int_status);
2552 		}
2553 
2554 		/*
2555 		 * if still not clear, log a message and disable the
2556 		 * port. highly unlikely that this path is taken, but it
2557 		 * gives protection against a wedged interrupt.
2558 		 */
2559 		if (intr_status & clear_bits) {
2560 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2561 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2562 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2563 			nvp->nvp_state |= NV_PORT_FAILED;
2564 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2565 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
2566 			    "interrupt.  disabling port intr_status=%X",
2567 			    intr_status);
2568 		}
2569 
2570 		mutex_exit(&nvp->nvp_mutex);
2571 	}
2572 }
2573 
2574 
2575 /*
2576  * Interrupt handler for mcp55.  It is invoked by the wrapper for each port
2577  * on the controller, to handle completion and hot plug and remove events.
2578  *
2579  */
2580 static uint_t
2581 mcp55_intr_port(nv_port_t *nvp)
2582 {
2583 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2584 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2585 	uint8_t clear = 0, intr_cycles = 0;
2586 	int ret = DDI_INTR_UNCLAIMED;
2587 	uint16_t int_status;
2588 
2589 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered"));
2590 
2591 	for (;;) {
2592 		/*
2593 		 * read current interrupt status
2594 		 */
2595 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_status);
2596 
2597 		NVLOG((NVDBG_INTR, nvc, nvp, "int_status = %x", int_status));
2598 
2599 		/*
2600 		 * MCP55_INT_IGNORE interrupts will show up in the status,
2601 		 * but are masked out from causing an interrupt to be generated
2602 		 * to the processor.  Ignore them here by masking them out.
2603 		 */
2604 		int_status &= ~(MCP55_INT_IGNORE);
2605 
2606 		/*
2607 		 * exit the loop when no more interrupts to process
2608 		 */
2609 		if (int_status == 0) {
2610 
2611 			break;
2612 		}
2613 
2614 		if (int_status & MCP55_INT_COMPLETE) {
2615 			NVLOG((NVDBG_INTR, nvc, nvp,
2616 			    "mcp55_packet_complete_intr"));
2617 			/*
2618 			 * since int_status was set, return DDI_INTR_CLAIMED
2619 			 * from the DDI's perspective even though the packet
2620 			 * completion may not have succeeded.  If it fails,
2621 			 * need to manually clear the interrupt, otherwise
2622 			 * clearing is implicit.
2623 			 */
2624 			ret = DDI_INTR_CLAIMED;
2625 			if (mcp55_packet_complete_intr(nvc, nvp) ==
2626 			    NV_FAILURE) {
2627 				clear = MCP55_INT_COMPLETE;
2628 			}
2629 		}
2630 
2631 		if (int_status & MCP55_INT_DMA_SETUP) {
2632 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr"));
2633 
2634 			/*
2635 			 * Needs to be cleared before starting the BM, so do it
2636 			 * now.  make sure this is still working.
2637 			 */
2638 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status,
2639 			    MCP55_INT_DMA_SETUP);
2640 #ifdef NCQ
2641 			ret = mcp55_dma_setup_intr(nvc, nvp);
2642 #endif
2643 		}
2644 
2645 		if (int_status & MCP55_INT_REM) {
2646 			NVLOG((NVDBG_INTR, nvc, nvp, "mcp55 device removed"));
2647 			clear = MCP55_INT_REM;
2648 			ret = DDI_INTR_CLAIMED;
2649 
2650 			mutex_enter(&nvp->nvp_mutex);
2651 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
2652 			mutex_exit(&nvp->nvp_mutex);
2653 
2654 		} else if (int_status & MCP55_INT_ADD) {
2655 			NVLOG((NVDBG_HOT, nvc, nvp, "mcp55 device added"));
2656 			clear = MCP55_INT_ADD;
2657 			ret = DDI_INTR_CLAIMED;
2658 
2659 			mutex_enter(&nvp->nvp_mutex);
2660 			nv_report_add_remove(nvp, 0);
2661 			mutex_exit(&nvp->nvp_mutex);
2662 		}
2663 
2664 		if (clear) {
2665 			nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, clear);
2666 			clear = 0;
2667 		}
2668 
2669 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
2670 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
2671 			    "processing.  Disabling port int_status=%X"
2672 			    " clear=%X", int_status, clear);
2673 			mutex_enter(&nvp->nvp_mutex);
2674 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2675 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
2676 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
2677 			nvp->nvp_state |= NV_PORT_FAILED;
2678 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR);
2679 			mutex_exit(&nvp->nvp_mutex);
2680 		}
2681 	}
2682 
2683 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret));
2684 
2685 	return (ret);
2686 }
2687 
2688 
2689 /* ARGSUSED */
2690 static uint_t
2691 mcp55_intr(caddr_t arg1, caddr_t arg2)
2692 {
2693 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
2694 	int ret;
2695 
2696 	ret = mcp55_intr_port(&(nvc->nvc_port[0]));
2697 	ret |= mcp55_intr_port(&(nvc->nvc_port[1]));
2698 
2699 	return (ret);
2700 }
2701 
2702 
2703 #ifdef NCQ
2704 /*
2705  * with software driven NCQ on mcp55, an interrupt occurs right
2706  * before the drive is ready to do a DMA transfer.  At this point,
2707  * the PRD table needs to be programmed and the DMA engine enabled
2708  * and ready to go.
2709  *
2710  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
2711  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
2712  * -- clear bit 0 of master command reg
2713  * -- program PRD
2714  * -- clear the interrupt status bit for the DMA Setup FIS
2715  * -- set bit 0 of the bus master command register
2716  */
2717 static int
2718 mcp55_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2719 {
2720 	int slot;
2721 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2722 	uint8_t bmicx;
2723 	int port = nvp->nvp_port_num;
2724 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
2725 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
2726 
2727 	nv_cmn_err(CE_PANIC, nvc, nvp,
2728 		"this is should not be executed at all until NCQ");
2729 
2730 	mutex_enter(&nvp->nvp_mutex);
2731 
2732 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp55_ncq);
2733 
2734 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
2735 
2736 	NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_dma_setup_intr slot %d"
2737 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache));
2738 
2739 	/*
2740 	 * halt the DMA engine.  This step is necessary according to
2741 	 * the mcp55 spec, probably since there may have been a "first" packet
2742 	 * that already programmed the DMA engine, but may not turn out to
2743 	 * be the first one processed.
2744 	 */
2745 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
2746 
2747 #if 0
2748 	if (bmicx & BMICX_SSBM) {
2749 		NVLOG((NVDBG_INTR, nvc, nvp, "BM was already enabled for "
2750 		    "another packet.  Cancelling and reprogramming"));
2751 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2752 	}
2753 #endif
2754 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2755 
2756 	nv_start_dma_engine(nvp, slot);
2757 
2758 	mutex_exit(&nvp->nvp_mutex);
2759 
2760 	return (DDI_INTR_CLAIMED);
2761 }
2762 #endif /* NCQ */
2763 
2764 
2765 /*
2766  * packet completion interrupt.  If the packet is complete, invoke
2767  * the packet completion callback.
2768  */
2769 static int
2770 mcp55_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
2771 {
2772 	uint8_t status, bmstatus;
2773 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
2774 	int sactive;
2775 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
2776 	sata_pkt_t *spkt;
2777 	nv_slot_t *nv_slotp;
2778 
2779 	mutex_enter(&nvp->nvp_mutex);
2780 
2781 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
2782 
2783 	if (!(bmstatus & BMISX_IDEINTS)) {
2784 		NVLOG((NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set"));
2785 		mutex_exit(&nvp->nvp_mutex);
2786 
2787 		return (NV_FAILURE);
2788 	}
2789 
2790 	/*
2791 	 * If the just completed item is a non-ncq command, the busy
2792 	 * bit should not be set
2793 	 */
2794 	if (nvp->nvp_non_ncq_run) {
2795 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
2796 		if (status & SATA_STATUS_BSY) {
2797 			nv_cmn_err(CE_WARN, nvc, nvp,
2798 			    "unexpected SATA_STATUS_BSY set");
2799 			mutex_exit(&nvp->nvp_mutex);
2800 			/*
2801 			 * calling function will clear interrupt.  then
2802 			 * the real interrupt will either arrive or the
2803 			 * packet timeout handling will take over and
2804 			 * reset.
2805 			 */
2806 			return (NV_FAILURE);
2807 		}
2808 
2809 	} else {
2810 		/*
2811 		 * NCQ check for BSY here and wait if still bsy before
2812 		 * continuing. Rather than wait for it to be cleared
2813 		 * when starting a packet and wasting CPU time, the starting
2814 		 * thread can exit immediate, but might have to spin here
2815 		 * for a bit possibly.  Needs more work and experimentation.
2816 		 */
2817 		ASSERT(nvp->nvp_ncq_run);
2818 	}
2819 
2820 
2821 	if (nvp->nvp_ncq_run) {
2822 		ncq_command = B_TRUE;
2823 		ASSERT(nvp->nvp_non_ncq_run == 0);
2824 	} else {
2825 		ASSERT(nvp->nvp_non_ncq_run != 0);
2826 	}
2827 
2828 	/*
2829 	 * active_pkt_bit will represent the bitmap of the single completed
2830 	 * packet.  Because of the nature of sw assisted NCQ, only one
2831 	 * command will complete per interrupt.
2832 	 */
2833 
2834 	if (ncq_command == B_FALSE) {
2835 		active_pkt = 0;
2836 	} else {
2837 		/*
2838 		 * NCQ: determine which command just completed, by examining
2839 		 * which bit cleared in the register since last written.
2840 		 */
2841 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
2842 
2843 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
2844 
2845 		ASSERT(active_pkt_bit);
2846 
2847 
2848 		/*
2849 		 * this failure path needs more work to handle the
2850 		 * error condition and recovery.
2851 		 */
2852 		if (active_pkt_bit == 0) {
2853 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2854 
2855 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
2856 			    "nvp->nvp_sactive %X", sactive,
2857 			    nvp->nvp_sactive_cache);
2858 
2859 			(void) nv_get8(cmdhdl, nvp->nvp_status);
2860 
2861 			mutex_exit(&nvp->nvp_mutex);
2862 
2863 			return (NV_FAILURE);
2864 		}
2865 
2866 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
2867 		    active_pkt++, active_pkt_bit >>= 1) {
2868 		}
2869 
2870 		/*
2871 		 * make sure only one bit is ever turned on
2872 		 */
2873 		ASSERT(active_pkt_bit == 1);
2874 
2875 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
2876 	}
2877 
2878 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
2879 
2880 	spkt = nv_slotp->nvslot_spkt;
2881 
2882 	ASSERT(spkt != NULL);
2883 
2884 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
2885 
2886 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
2887 
2888 	/*
2889 	 * If there is no link cannot be certain about the completion
2890 	 * of the packet, so abort it.
2891 	 */
2892 	if (nv_check_link((&spkt->satapkt_device)->
2893 	    satadev_scr.sstatus) == B_FALSE) {
2894 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR);
2895 
2896 	} else if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
2897 
2898 		nv_complete_io(nvp, spkt, active_pkt);
2899 	}
2900 
2901 	mutex_exit(&nvp->nvp_mutex);
2902 
2903 	return (NV_SUCCESS);
2904 }
2905 
2906 
2907 static void
2908 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
2909 {
2910 
2911 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
2912 
2913 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
2914 		nvp->nvp_ncq_run--;
2915 	} else {
2916 		nvp->nvp_non_ncq_run--;
2917 	}
2918 
2919 	/*
2920 	 * mark the packet slot idle so it can be reused.  Do this before
2921 	 * calling satapkt_comp so the slot can be reused.
2922 	 */
2923 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
2924 
2925 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
2926 		/*
2927 		 * If this is not timed polled mode cmd, which has an
2928 		 * active thread monitoring for completion, then need
2929 		 * to signal the sleeping thread that the cmd is complete.
2930 		 */
2931 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
2932 			cv_signal(&nvp->nvp_poll_cv);
2933 		}
2934 
2935 		return;
2936 	}
2937 
2938 	if (spkt->satapkt_comp != NULL) {
2939 		mutex_exit(&nvp->nvp_mutex);
2940 		(*spkt->satapkt_comp)(spkt);
2941 		mutex_enter(&nvp->nvp_mutex);
2942 	}
2943 }
2944 
2945 
2946 /*
2947  * check whether packet is ncq command or not.  for ncq command,
2948  * start it if there is still room on queue.  for non-ncq command only
2949  * start if no other command is running.
2950  */
2951 static int
2952 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
2953 {
2954 	uint8_t cmd, ncq;
2955 
2956 	NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry"));
2957 
2958 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2959 
2960 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
2961 	    (cmd == SATAC_READ_FPDMA_QUEUED));
2962 
2963 	if (ncq == B_FALSE) {
2964 
2965 		if ((nvp->nvp_non_ncq_run == 1) ||
2966 		    (nvp->nvp_ncq_run > 0)) {
2967 			/*
2968 			 * next command is non-ncq which can't run
2969 			 * concurrently.  exit and return queue full.
2970 			 */
2971 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
2972 
2973 			return (SATA_TRAN_QUEUE_FULL);
2974 		}
2975 
2976 		return (nv_start_common(nvp, spkt));
2977 	}
2978 
2979 	/*
2980 	 * ncq == B_TRUE
2981 	 */
2982 	if (nvp->nvp_non_ncq_run == 1) {
2983 		/*
2984 		 * cannot start any NCQ commands when there
2985 		 * is a non-NCQ command running.
2986 		 */
2987 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
2988 
2989 		return (SATA_TRAN_QUEUE_FULL);
2990 	}
2991 
2992 #ifdef NCQ
2993 	/*
2994 	 * this is not compiled for now as satapkt_device.satadev_qdepth
2995 	 * is being pulled out until NCQ support is later addressed
2996 	 *
2997 	 * nvp_queue_depth is initialized by the first NCQ command
2998 	 * received.
2999 	 */
3000 	if (nvp->nvp_queue_depth == 1) {
3001 		nvp->nvp_queue_depth =
3002 		    spkt->satapkt_device.satadev_qdepth;
3003 
3004 		ASSERT(nvp->nvp_queue_depth > 1);
3005 
3006 		NVLOG((NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3007 		    "nv_process_queue: nvp_queue_depth set to %d",
3008 		    nvp->nvp_queue_depth));
3009 	}
3010 #endif
3011 
3012 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3013 		/*
3014 		 * max number of NCQ commands already active
3015 		 */
3016 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3017 
3018 		return (SATA_TRAN_QUEUE_FULL);
3019 	}
3020 
3021 	return (nv_start_common(nvp, spkt));
3022 }
3023 
3024 
3025 /*
3026  * configure INTx and legacy interrupts
3027  */
3028 static int
3029 nv_add_legacy_intrs(nv_ctl_t *nvc)
3030 {
3031 	dev_info_t	*devinfo = nvc->nvc_dip;
3032 	int		actual, count = 0;
3033 	int		x, y, rc, inum = 0;
3034 
3035 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_legacy_intrs"));
3036 
3037 	/*
3038 	 * get number of interrupts
3039 	 */
3040 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3041 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3042 		NVLOG((NVDBG_INTR, nvc, NULL,
3043 		    "ddi_intr_get_nintrs() failed, "
3044 		    "rc %d count %d", rc, count));
3045 
3046 		return (DDI_FAILURE);
3047 	}
3048 
3049 	/*
3050 	 * allocate an array of interrupt handles
3051 	 */
3052 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3053 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3054 
3055 	/*
3056 	 * call ddi_intr_alloc()
3057 	 */
3058 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3059 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3060 
3061 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3062 		nv_cmn_err(CE_WARN, nvc, NULL,
3063 		    "ddi_intr_alloc() failed, rc %d", rc);
3064 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3065 
3066 		return (DDI_FAILURE);
3067 	}
3068 
3069 	if (actual < count) {
3070 		nv_cmn_err(CE_WARN, nvc, NULL,
3071 		    "ddi_intr_alloc: requested: %d, received: %d",
3072 		    count, actual);
3073 
3074 		goto failure;
3075 	}
3076 
3077 	nvc->nvc_intr_cnt = actual;
3078 
3079 	/*
3080 	 * get intr priority
3081 	 */
3082 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3083 	    DDI_SUCCESS) {
3084 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3085 
3086 		goto failure;
3087 	}
3088 
3089 	/*
3090 	 * Test for high level mutex
3091 	 */
3092 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3093 		nv_cmn_err(CE_WARN, nvc, NULL,
3094 		    "nv_add_legacy_intrs: high level intr not supported");
3095 
3096 		goto failure;
3097 	}
3098 
3099 	for (x = 0; x < actual; x++) {
3100 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3101 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3102 			nv_cmn_err(CE_WARN, nvc, NULL,
3103 			    "ddi_intr_add_handler() failed");
3104 
3105 			goto failure;
3106 		}
3107 	}
3108 
3109 	/*
3110 	 * call ddi_intr_enable() for legacy interrupts
3111 	 */
3112 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3113 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3114 	}
3115 
3116 	return (DDI_SUCCESS);
3117 
3118 	failure:
3119 	/*
3120 	 * free allocated intr and nvc_htable
3121 	 */
3122 	for (y = 0; y < actual; y++) {
3123 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3124 	}
3125 
3126 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3127 
3128 	return (DDI_FAILURE);
3129 }
3130 
3131 #ifdef	NV_MSI_SUPPORTED
3132 /*
3133  * configure MSI interrupts
3134  */
3135 static int
3136 nv_add_msi_intrs(nv_ctl_t *nvc)
3137 {
3138 	dev_info_t	*devinfo = nvc->nvc_dip;
3139 	int		count, avail, actual;
3140 	int		x, y, rc, inum = 0;
3141 
3142 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_add_msi_intrs"));
3143 
3144 	/*
3145 	 * get number of interrupts
3146 	 */
3147 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3148 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3149 		nv_cmn_err(CE_WARN, nvc, NULL,
3150 		    "ddi_intr_get_nintrs() failed, "
3151 		    "rc %d count %d", rc, count);
3152 
3153 		return (DDI_FAILURE);
3154 	}
3155 
3156 	/*
3157 	 * get number of available interrupts
3158 	 */
3159 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3160 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3161 		nv_cmn_err(CE_WARN, nvc, NULL,
3162 		    "ddi_intr_get_navail() failed, "
3163 		    "rc %d avail %d", rc, avail);
3164 
3165 		return (DDI_FAILURE);
3166 	}
3167 
3168 	if (avail < count) {
3169 		nv_cmn_err(CE_WARN, nvc, NULL,
3170 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3171 		    avail, count);
3172 	}
3173 
3174 	/*
3175 	 * allocate an array of interrupt handles
3176 	 */
3177 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3178 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3179 
3180 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3181 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3182 
3183 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3184 		nv_cmn_err(CE_WARN, nvc, NULL,
3185 		    "ddi_intr_alloc() failed, rc %d", rc);
3186 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3187 
3188 		return (DDI_FAILURE);
3189 	}
3190 
3191 	/*
3192 	 * Use interrupt count returned or abort?
3193 	 */
3194 	if (actual < count) {
3195 		NVLOG((NVDBG_INIT, nvc, NULL,
3196 		    "Requested: %d, Received: %d", count, actual));
3197 	}
3198 
3199 	nvc->nvc_intr_cnt = actual;
3200 
3201 	/*
3202 	 * get priority for first msi, assume remaining are all the same
3203 	 */
3204 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3205 	    DDI_SUCCESS) {
3206 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3207 
3208 		goto failure;
3209 	}
3210 
3211 	/*
3212 	 * test for high level mutex
3213 	 */
3214 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3215 		nv_cmn_err(CE_WARN, nvc, NULL,
3216 		    "nv_add_msi_intrs: high level intr not supported");
3217 
3218 		goto failure;
3219 	}
3220 
3221 	/*
3222 	 * Call ddi_intr_add_handler()
3223 	 */
3224 	for (x = 0; x < actual; x++) {
3225 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3226 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3227 			nv_cmn_err(CE_WARN, nvc, NULL,
3228 			    "ddi_intr_add_handler() failed");
3229 
3230 			goto failure;
3231 		}
3232 	}
3233 
3234 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3235 
3236 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3237 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3238 		    nvc->nvc_intr_cnt);
3239 	} else {
3240 		/*
3241 		 * Call ddi_intr_enable() for MSI non block enable
3242 		 */
3243 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3244 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3245 		}
3246 	}
3247 
3248 	return (DDI_SUCCESS);
3249 
3250 	failure:
3251 	/*
3252 	 * free allocated intr and nvc_htable
3253 	 */
3254 	for (y = 0; y < actual; y++) {
3255 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3256 	}
3257 
3258 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3259 
3260 	return (DDI_FAILURE);
3261 }
3262 #endif
3263 
3264 
3265 static void
3266 nv_rem_intrs(nv_ctl_t *nvc)
3267 {
3268 	int x, i;
3269 	nv_port_t *nvp;
3270 
3271 	NVLOG((NVDBG_ENTRY, nvc, NULL, "nv_rem_intrs"));
3272 
3273 	/*
3274 	 * prevent controller from generating interrupts by
3275 	 * masking them out.  This is an extra precaution.
3276 	 */
3277 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3278 		nvp = (&nvc->nvc_port[i]);
3279 		mutex_enter(&nvp->nvp_mutex);
3280 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3281 		mutex_exit(&nvp->nvp_mutex);
3282 	}
3283 
3284 	/*
3285 	 * disable all interrupts
3286 	 */
3287 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
3288 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
3289 		(void) ddi_intr_block_disable(nvc->nvc_htable,
3290 		    nvc->nvc_intr_cnt);
3291 	} else {
3292 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3293 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
3294 		}
3295 	}
3296 
3297 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3298 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
3299 		(void) ddi_intr_free(nvc->nvc_htable[x]);
3300 	}
3301 
3302 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3303 }
3304 
3305 
3306 /*
3307  * variable argument wrapper for cmn_err.  prefixes the instance and port
3308  * number if possible
3309  */
3310 static void
3311 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
3312 {
3313 	char port[NV_STRING_10];
3314 	char inst[NV_STRING_10];
3315 
3316 	mutex_enter(&nv_log_mutex);
3317 
3318 	if (nvc) {
3319 		(void) snprintf(inst, NV_STRING_10, "inst %d",
3320 		    ddi_get_instance(nvc->nvc_dip));
3321 	} else {
3322 		inst[0] = '\0';
3323 	}
3324 
3325 	if (nvp) {
3326 		(void) sprintf(port, " port %d", nvp->nvp_port_num);
3327 	} else {
3328 		port[0] = '\0';
3329 	}
3330 
3331 	(void) sprintf(nv_log_buf, "nv_sata %s%s%s", inst, port,
3332 	    (inst[0]|port[0] ? ": " :""));
3333 
3334 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
3335 	    NV_STRING_512 - strlen(nv_log_buf), fmt, ap);
3336 
3337 	/*
3338 	 * normally set to log to console but in some debug situations it
3339 	 * may be useful to log only to a file.
3340 	 */
3341 	if (nv_log_to_console) {
3342 		if (nv_prom_print) {
3343 			prom_printf("%s\n", nv_log_buf);
3344 		} else {
3345 			cmn_err(ce, "%s", nv_log_buf);
3346 		}
3347 
3348 
3349 	} else {
3350 		cmn_err(ce, "!%s", nv_log_buf);
3351 	}
3352 
3353 	mutex_exit(&nv_log_mutex);
3354 }
3355 
3356 
3357 /*
3358  * wrapper for cmn_err
3359  */
3360 static void
3361 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3362 {
3363 	va_list ap;
3364 
3365 	va_start(ap, fmt);
3366 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
3367 	va_end(ap);
3368 }
3369 
3370 
3371 #if defined(DEBUG)
3372 /*
3373  * prefixes the instance and port number if possible to the debug message
3374  */
3375 static void
3376 nv_log(uint_t flag, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
3377 {
3378 	va_list ap;
3379 
3380 	if ((nv_debug_flags & flag) == 0) {
3381 		return;
3382 	}
3383 
3384 	va_start(ap, fmt);
3385 	nv_vcmn_err(CE_NOTE, nvc, nvp, fmt, ap);
3386 	va_end(ap);
3387 
3388 	/*
3389 	 * useful for some debugging situations
3390 	 */
3391 	if (nv_log_delay) {
3392 		drv_usecwait(nv_log_delay);
3393 	}
3394 
3395 }
3396 #endif /* DEBUG */
3397 
3398 
3399 /*
3400  * program registers which are common to all commands
3401  */
3402 static void
3403 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
3404 {
3405 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3406 	sata_pkt_t *spkt;
3407 	sata_cmd_t *satacmd;
3408 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3409 	uint8_t cmd, ncq = B_FALSE;
3410 
3411 	spkt = nv_slotp->nvslot_spkt;
3412 	satacmd = &spkt->satapkt_cmd;
3413 	cmd = satacmd->satacmd_cmd_reg;
3414 
3415 	ASSERT(nvp->nvp_slot);
3416 
3417 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3418 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3419 		ncq = B_TRUE;
3420 	}
3421 
3422 	/*
3423 	 * select the drive
3424 	 */
3425 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
3426 
3427 	/*
3428 	 * make certain the drive selected
3429 	 */
3430 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
3431 	    NV_SEC2USEC(5), 0) == B_FALSE) {
3432 
3433 		return;
3434 	}
3435 
3436 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
3437 
3438 	case ATA_ADDR_LBA:
3439 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode"));
3440 
3441 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3442 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3443 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3444 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3445 
3446 		break;
3447 
3448 	case ATA_ADDR_LBA28:
3449 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3450 		    "ATA_ADDR_LBA28 mode"));
3451 		/*
3452 		 * NCQ only uses 48-bit addressing
3453 		 */
3454 		ASSERT(ncq != B_TRUE);
3455 
3456 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3457 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3458 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3459 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3460 
3461 		break;
3462 
3463 	case ATA_ADDR_LBA48:
3464 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3465 		    "ATA_ADDR_LBA48 mode"));
3466 
3467 		/*
3468 		 * for NCQ, tag goes into count register and real sector count
3469 		 * into features register.  The sata module does the translation
3470 		 * in the satacmd.
3471 		 */
3472 		if (ncq == B_TRUE) {
3473 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
3474 			nv_put8(cmdhdl, nvp->nvp_feature,
3475 			    satacmd->satacmd_features_reg_ext);
3476 			nv_put8(cmdhdl, nvp->nvp_feature,
3477 			    satacmd->satacmd_features_reg);
3478 		} else {
3479 			nv_put8(cmdhdl, nvp->nvp_count,
3480 			    satacmd->satacmd_sec_count_msb);
3481 			nv_put8(cmdhdl, nvp->nvp_count,
3482 			    satacmd->satacmd_sec_count_lsb);
3483 		}
3484 
3485 		/*
3486 		 * send the high-order half first
3487 		 */
3488 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
3489 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
3490 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
3491 		/*
3492 		 * Send the low-order half
3493 		 */
3494 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3495 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3496 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3497 
3498 		break;
3499 
3500 	case 0:
3501 		/*
3502 		 * non-media access commands such as identify and features
3503 		 * take this path.
3504 		 */
3505 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
3506 		nv_put8(cmdhdl, nvp->nvp_feature,
3507 		    satacmd->satacmd_features_reg);
3508 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
3509 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
3510 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
3511 
3512 		break;
3513 
3514 	default:
3515 		break;
3516 	}
3517 
3518 	ASSERT(nvp->nvp_slot);
3519 }
3520 
3521 
3522 /*
3523  * start a command that involves no media access
3524  */
3525 static int
3526 nv_start_nodata(nv_port_t *nvp, int slot)
3527 {
3528 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3529 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3530 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3531 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3532 
3533 	nv_program_taskfile_regs(nvp, slot);
3534 
3535 	/*
3536 	 * This next one sets the controller in motion
3537 	 */
3538 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
3539 
3540 	return (SATA_TRAN_ACCEPTED);
3541 }
3542 
3543 
3544 int
3545 nv_bm_status_clear(nv_port_t *nvp)
3546 {
3547 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3548 	uchar_t	status, ret;
3549 
3550 	/*
3551 	 * Get the current BM status
3552 	 */
3553 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
3554 
3555 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
3556 
3557 	/*
3558 	 * Clear the latches (and preserve the other bits)
3559 	 */
3560 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
3561 
3562 	return (ret);
3563 }
3564 
3565 
3566 /*
3567  * program the bus master DMA engine with the PRD address for
3568  * the active slot command, and start the DMA engine.
3569  */
3570 static void
3571 nv_start_dma_engine(nv_port_t *nvp, int slot)
3572 {
3573 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3574 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3575 	uchar_t direction;
3576 
3577 	ASSERT(nv_slotp->nvslot_spkt != NULL);
3578 
3579 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
3580 	    == SATA_DIR_READ) {
3581 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
3582 	} else {
3583 		direction = BMICX_RWCON_READ_FROM_MEMORY;
3584 	}
3585 
3586 	NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3587 	    "nv_start_dma_engine entered"));
3588 
3589 	/*
3590 	 * reset the controller's interrupt and error status bits
3591 	 */
3592 	(void) nv_bm_status_clear(nvp);
3593 
3594 	/*
3595 	 * program the PRD table physical start address
3596 	 */
3597 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
3598 
3599 	/*
3600 	 * set the direction control and start the DMA controller
3601 	 */
3602 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
3603 }
3604 
3605 /*
3606  * start dma command, either in or out
3607  */
3608 static int
3609 nv_start_dma(nv_port_t *nvp, int slot)
3610 {
3611 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3612 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3613 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3614 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3615 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
3616 #ifdef NCQ
3617 	uint8_t ncq = B_FALSE;
3618 #endif
3619 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
3620 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
3621 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
3622 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
3623 
3624 	ASSERT(sg_count != 0);
3625 
3626 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
3627 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
3628 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
3629 		    sata_cmdp->satacmd_num_dma_cookies);
3630 
3631 		return (NV_FAILURE);
3632 	}
3633 
3634 	nv_program_taskfile_regs(nvp, slot);
3635 
3636 	/*
3637 	 * start the drive in motion
3638 	 */
3639 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
3640 
3641 	/*
3642 	 * the drive starts processing the transaction when the cmd register
3643 	 * is written.  This is done here before programming the DMA engine to
3644 	 * parallelize and save some time.  In the event that the drive is ready
3645 	 * before DMA, it will wait.
3646 	 */
3647 #ifdef NCQ
3648 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3649 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
3650 		ncq = B_TRUE;
3651 	}
3652 #endif
3653 
3654 	/*
3655 	 * copy the PRD list to PRD table in DMA accessible memory
3656 	 * so that the controller can access it.
3657 	 */
3658 	for (idx = 0; idx < sg_count; idx++, srcp++) {
3659 		uint32_t size;
3660 
3661 		ASSERT(srcp->dmac_size <= UINT16_MAX);
3662 
3663 		nv_put32(sghdl, dstp++, srcp->dmac_address);
3664 
3665 		size = srcp->dmac_size;
3666 
3667 		/*
3668 		 * If this is a 40-bit address, copy bits 32-40 of the
3669 		 * physical address to bits 16-24 of the PRD count.
3670 		 */
3671 		if (srcp->dmac_laddress > UINT32_MAX) {
3672 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
3673 		}
3674 
3675 		/*
3676 		 * set the end of table flag for the last entry
3677 		 */
3678 		if (idx == (sg_count - 1)) {
3679 			size |= PRDE_EOT;
3680 		}
3681 
3682 		nv_put32(sghdl, dstp++, size);
3683 	}
3684 
3685 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
3686 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
3687 
3688 	nv_start_dma_engine(nvp, slot);
3689 
3690 #ifdef NCQ
3691 	/*
3692 	 * optimization:  for SWNCQ, start DMA engine if this is the only
3693 	 * command running.  Preliminary NCQ efforts indicated this needs
3694 	 * more debugging.
3695 	 *
3696 	 * if (nvp->nvp_ncq_run <= 1)
3697 	 */
3698 
3699 	if (ncq == B_FALSE) {
3700 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
3701 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
3702 		    " cmd = %X", non_ncq_commands++, cmd));
3703 		nv_start_dma_engine(nvp, slot);
3704 	} else {
3705 		NVLOG((NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "?NCQ, so program "
3706 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd));
3707 	}
3708 #endif /* NCQ */
3709 
3710 	return (SATA_TRAN_ACCEPTED);
3711 }
3712 
3713 
3714 /*
3715  * start a PIO data-in ATA command
3716  */
3717 static int
3718 nv_start_pio_in(nv_port_t *nvp, int slot)
3719 {
3720 
3721 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3722 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3723 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3724 
3725 	nv_program_taskfile_regs(nvp, slot);
3726 
3727 	/*
3728 	 * This next one sets the drive in motion
3729 	 */
3730 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3731 
3732 	return (SATA_TRAN_ACCEPTED);
3733 }
3734 
3735 
3736 /*
3737  * start a PIO data-out ATA command
3738  */
3739 static int
3740 nv_start_pio_out(nv_port_t *nvp, int slot)
3741 {
3742 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
3743 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3744 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3745 
3746 	nv_program_taskfile_regs(nvp, slot);
3747 
3748 	/*
3749 	 * this next one sets the drive in motion
3750 	 */
3751 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
3752 
3753 	/*
3754 	 * wait for the busy bit to settle
3755 	 */
3756 	NV_DELAY_NSEC(400);
3757 
3758 	/*
3759 	 * wait for the drive to assert DRQ to send the first chunk
3760 	 * of data. Have to busy wait because there's no interrupt for
3761 	 * the first chunk. This is bad... uses a lot of cycles if the
3762 	 * drive responds too slowly or if the wait loop granularity
3763 	 * is too large. It's even worse if the drive is defective and
3764 	 * the loop times out.
3765 	 */
3766 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
3767 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
3768 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
3769 	    4000000, 0) == B_FALSE) {
3770 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3771 
3772 		goto error;
3773 	}
3774 
3775 	/*
3776 	 * send the first block.
3777 	 */
3778 	nv_intr_pio_out(nvp, nv_slotp);
3779 
3780 	/*
3781 	 * If nvslot_flags is not set to COMPLETE yet, then processing
3782 	 * is OK so far, so return.  Otherwise, fall into error handling
3783 	 * below.
3784 	 */
3785 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
3786 
3787 		return (SATA_TRAN_ACCEPTED);
3788 	}
3789 
3790 	error:
3791 	/*
3792 	 * there was an error so reset the device and complete the packet.
3793 	 */
3794 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3795 	nv_complete_io(nvp, spkt, 0);
3796 	nv_reset(nvp);
3797 
3798 	return (SATA_TRAN_PORT_ERROR);
3799 }
3800 
3801 
3802 /*
3803  * Interrupt processing for a non-data ATA command.
3804  */
3805 static void
3806 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
3807 {
3808 	uchar_t status;
3809 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3810 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3811 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3812 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3813 
3814 	NVLOG((NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered"));
3815 
3816 	status = nv_get8(cmdhdl, nvp->nvp_status);
3817 
3818 	/*
3819 	 * check for errors
3820 	 */
3821 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
3822 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3823 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3824 		    nvp->nvp_altstatus);
3825 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3826 	} else {
3827 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
3828 	}
3829 
3830 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3831 }
3832 
3833 
3834 /*
3835  * ATA command, PIO data in
3836  */
3837 static void
3838 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
3839 {
3840 	uchar_t	status;
3841 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3842 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3843 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3844 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3845 	int count;
3846 
3847 	status = nv_get8(cmdhdl, nvp->nvp_status);
3848 
3849 	if (status & SATA_STATUS_BSY) {
3850 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3851 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3852 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3853 		    nvp->nvp_altstatus);
3854 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3855 		nv_reset(nvp);
3856 
3857 		return;
3858 	}
3859 
3860 	/*
3861 	 * check for errors
3862 	 */
3863 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
3864 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
3865 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3866 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3867 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3868 
3869 		return;
3870 	}
3871 
3872 	/*
3873 	 * read the next chunk of data (if any)
3874 	 */
3875 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
3876 
3877 	/*
3878 	 * read count bytes
3879 	 */
3880 	ASSERT(count != 0);
3881 
3882 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
3883 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
3884 
3885 	nv_slotp->nvslot_v_addr += count;
3886 	nv_slotp->nvslot_byte_count -= count;
3887 
3888 
3889 	if (nv_slotp->nvslot_byte_count != 0) {
3890 		/*
3891 		 * more to transfer.  Wait for next interrupt.
3892 		 */
3893 		return;
3894 	}
3895 
3896 	/*
3897 	 * transfer is complete. wait for the busy bit to settle.
3898 	 */
3899 	NV_DELAY_NSEC(400);
3900 
3901 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
3902 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3903 }
3904 
3905 
3906 /*
3907  * ATA command PIO data out
3908  */
3909 static void
3910 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
3911 {
3912 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
3913 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
3914 	uchar_t status;
3915 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
3916 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3917 	int count;
3918 
3919 	/*
3920 	 * clear the IRQ
3921 	 */
3922 	status = nv_get8(cmdhdl, nvp->nvp_status);
3923 
3924 	if (status & SATA_STATUS_BSY) {
3925 		/*
3926 		 * this should not happen
3927 		 */
3928 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
3929 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3930 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
3931 		    nvp->nvp_altstatus);
3932 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
3933 
3934 		return;
3935 	}
3936 
3937 	/*
3938 	 * check for errors
3939 	 */
3940 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
3941 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
3942 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3943 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3944 
3945 		return;
3946 	}
3947 
3948 	/*
3949 	 * this is the condition which signals the drive is
3950 	 * no longer ready to transfer.  Likely that the transfer
3951 	 * completed successfully, but check that byte_count is
3952 	 * zero.
3953 	 */
3954 	if ((status & SATA_STATUS_DRQ) == 0) {
3955 
3956 		if (nv_slotp->nvslot_byte_count == 0) {
3957 			/*
3958 			 * complete; successful transfer
3959 			 */
3960 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
3961 		} else {
3962 			/*
3963 			 * error condition, incomplete transfer
3964 			 */
3965 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3966 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
3967 		}
3968 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
3969 
3970 		return;
3971 	}
3972 
3973 	/*
3974 	 * write the next chunk of data
3975 	 */
3976 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
3977 
3978 	/*
3979 	 * read or write count bytes
3980 	 */
3981 
3982 	ASSERT(count != 0);
3983 
3984 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
3985 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
3986 
3987 	nv_slotp->nvslot_v_addr += count;
3988 	nv_slotp->nvslot_byte_count -= count;
3989 }
3990 
3991 
3992 /*
3993  * ATA command, DMA data in/out
3994  */
3995 static void
3996 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
3997 {
3998 	uchar_t status;
3999 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4000 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4001 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4002 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4003 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4004 	uchar_t	bmicx;
4005 	uchar_t bm_status;
4006 
4007 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4008 
4009 	/*
4010 	 * stop DMA engine.
4011 	 */
4012 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
4013 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
4014 
4015 	/*
4016 	 * get the status and clear the IRQ, and check for DMA error
4017 	 */
4018 	status = nv_get8(cmdhdl, nvp->nvp_status);
4019 
4020 	/*
4021 	 * check for drive errors
4022 	 */
4023 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4024 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4025 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4026 		(void) nv_bm_status_clear(nvp);
4027 
4028 		return;
4029 	}
4030 
4031 	bm_status = nv_bm_status_clear(nvp);
4032 
4033 	/*
4034 	 * check for bus master errors
4035 	 */
4036 	if (bm_status & BMISX_IDERR) {
4037 		spkt->satapkt_reason = SATA_PKT_RESET;
4038 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4039 		    nvp->nvp_altstatus);
4040 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4041 		nv_reset(nvp);
4042 
4043 		return;
4044 	}
4045 
4046 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4047 }
4048 
4049 
4050 /*
4051  * Wait for a register of a controller to achieve a specific state.
4052  * To return normally, all the bits in the first sub-mask must be ON,
4053  * all the bits in the second sub-mask must be OFF.
4054  * If timeout_usec microseconds pass without the controller achieving
4055  * the desired bit configuration, return TRUE, else FALSE.
4056  *
4057  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4058  * occur for the first 250 us, then switch over to a sleeping wait.
4059  *
4060  */
4061 int
4062 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
4063     int type_wait)
4064 {
4065 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4066 	hrtime_t end, cur, start_sleep, start;
4067 	int first_time = B_TRUE;
4068 	ushort_t val;
4069 
4070 	for (;;) {
4071 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4072 
4073 		if ((val & onbits) == onbits && (val & offbits) == 0) {
4074 
4075 			return (B_TRUE);
4076 		}
4077 
4078 		cur = gethrtime();
4079 
4080 		/*
4081 		 * store the start time and calculate the end
4082 		 * time.  also calculate "start_sleep" which is
4083 		 * the point after which the driver will stop busy
4084 		 * waiting and change to sleep waiting.
4085 		 */
4086 		if (first_time) {
4087 			first_time = B_FALSE;
4088 			/*
4089 			 * start and end are in nanoseconds
4090 			 */
4091 			start = cur;
4092 			end = start + timeout_usec * 1000;
4093 			/*
4094 			 * add 1 ms to start
4095 			 */
4096 			start_sleep =  start + 250000;
4097 
4098 			if (servicing_interrupt()) {
4099 				type_wait = NV_NOSLEEP;
4100 			}
4101 		}
4102 
4103 		if (cur > end) {
4104 
4105 			break;
4106 		}
4107 
4108 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4109 #if ! defined(__lock_lint)
4110 			delay(1);
4111 #endif
4112 		} else {
4113 			drv_usecwait(nv_usec_delay);
4114 		}
4115 	}
4116 
4117 	return (B_FALSE);
4118 }
4119 
4120 
4121 /*
4122  * This is a slightly more complicated version that checks
4123  * for error conditions and bails-out rather than looping
4124  * until the timeout is exceeded.
4125  *
4126  * hybrid waiting algorithm: if not in interrupt context, busy looping will
4127  * occur for the first 250 us, then switch over to a sleeping wait.
4128  */
4129 int
4130 nv_wait3(
4131 	nv_port_t	*nvp,
4132 	uchar_t		onbits1,
4133 	uchar_t		offbits1,
4134 	uchar_t		failure_onbits2,
4135 	uchar_t		failure_offbits2,
4136 	uchar_t		failure_onbits3,
4137 	uchar_t		failure_offbits3,
4138 	uint_t		timeout_usec,
4139 	int		type_wait)
4140 {
4141 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4142 	hrtime_t end, cur, start_sleep, start;
4143 	int first_time = B_TRUE;
4144 	ushort_t val;
4145 
4146 	for (;;) {
4147 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
4148 
4149 		/*
4150 		 * check for expected condition
4151 		 */
4152 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
4153 
4154 			return (B_TRUE);
4155 		}
4156 
4157 		/*
4158 		 * check for error conditions
4159 		 */
4160 		if ((val & failure_onbits2) == failure_onbits2 &&
4161 		    (val & failure_offbits2) == 0) {
4162 
4163 			return (B_FALSE);
4164 		}
4165 
4166 		if ((val & failure_onbits3) == failure_onbits3 &&
4167 		    (val & failure_offbits3) == 0) {
4168 
4169 			return (B_FALSE);
4170 		}
4171 
4172 		/*
4173 		 * store the start time and calculate the end
4174 		 * time.  also calculate "start_sleep" which is
4175 		 * the point after which the driver will stop busy
4176 		 * waiting and change to sleep waiting.
4177 		 */
4178 		if (first_time) {
4179 			first_time = B_FALSE;
4180 			/*
4181 			 * start and end are in nanoseconds
4182 			 */
4183 			cur = start = gethrtime();
4184 			end = start + timeout_usec * 1000;
4185 			/*
4186 			 * add 1 ms to start
4187 			 */
4188 			start_sleep =  start + 250000;
4189 
4190 			if (servicing_interrupt()) {
4191 				type_wait = NV_NOSLEEP;
4192 			}
4193 		} else {
4194 			cur = gethrtime();
4195 		}
4196 
4197 		if (cur > end) {
4198 
4199 			break;
4200 		}
4201 
4202 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
4203 #if ! defined(__lock_lint)
4204 			delay(1);
4205 #endif
4206 		} else {
4207 			drv_usecwait(nv_usec_delay);
4208 		}
4209 	}
4210 
4211 	return (B_FALSE);
4212 }
4213 
4214 
4215 /*
4216  * nv_check_link() checks if a specified link is active device present
4217  * and communicating.
4218  */
4219 static boolean_t
4220 nv_check_link(uint32_t sstatus)
4221 {
4222 	uint8_t det;
4223 
4224 	det = (sstatus & SSTATUS_DET) >> SSTATUS_DET_SHIFT;
4225 
4226 	return (det == SSTATUS_DET_DEVPRE_PHYCOM);
4227 }
4228 
4229 
4230 /*
4231  * nv_port_state_change() reports the state of the port to the
4232  * sata module by calling sata_hba_event_notify().  This
4233  * function is called any time the state of the port is changed
4234  */
4235 static void
4236 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
4237 {
4238 	sata_device_t sd;
4239 
4240 	bzero((void *)&sd, sizeof (sata_device_t));
4241 	sd.satadev_rev = SATA_DEVICE_REV;
4242 	nv_copy_registers(nvp, &sd, NULL);
4243 
4244 	/*
4245 	 * When NCQ is implemented sactive and snotific field need to be
4246 	 * updated.
4247 	 */
4248 	sd.satadev_addr.cport = nvp->nvp_port_num;
4249 	sd.satadev_addr.qual = addr_type;
4250 	sd.satadev_state = state;
4251 
4252 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
4253 }
4254 
4255 
4256 /*
4257  * timeout processing:
4258  *
4259  * Check if any packets have crossed a timeout threshold.  If so, then
4260  * abort the packet.  This function is not NCQ aware.
4261  *
4262  * If reset was invoked in any other place than nv_sata_probe(), then
4263  * monitor for reset completion here.
4264  *
4265  */
4266 static void
4267 nv_timeout(void *arg)
4268 {
4269 	nv_port_t *nvp = arg;
4270 	nv_slot_t *nv_slotp;
4271 	int restart_timeout = B_FALSE;
4272 
4273 	mutex_enter(&nvp->nvp_mutex);
4274 
4275 	/*
4276 	 * If the probe entry point is driving the reset and signature
4277 	 * acquisition, just return.
4278 	 */
4279 	if (nvp->nvp_state & NV_PORT_RESET_PROBE) {
4280 		goto finished;
4281 	}
4282 
4283 	/*
4284 	 * If the port is not in the init state, it likely
4285 	 * means the link was lost while a timeout was active.
4286 	 */
4287 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
4288 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4289 		    "nv_timeout: port uninitialized"));
4290 
4291 		goto finished;
4292 	}
4293 
4294 	if (nvp->nvp_state & NV_PORT_RESET) {
4295 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4296 		uint32_t sstatus;
4297 
4298 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4299 		    "nv_timeout(): port waiting for signature"));
4300 
4301 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4302 
4303 		/*
4304 		 * check for link presence.  If the link remains
4305 		 * missing for more than 2 seconds, send a remove
4306 		 * event and abort signature acquisition.
4307 		 */
4308 		if (nv_check_link(sstatus) == B_FALSE) {
4309 			clock_t e_link_lost = ddi_get_lbolt();
4310 
4311 			if (nvp->nvp_link_lost_time == 0) {
4312 				nvp->nvp_link_lost_time = e_link_lost;
4313 			}
4314 			if (TICK_TO_SEC(e_link_lost -
4315 			    nvp->nvp_link_lost_time) < NV_LINK_LOST_OK) {
4316 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4317 				    "probe: intermittent link lost while"
4318 				    " resetting"));
4319 				restart_timeout = B_TRUE;
4320 			} else {
4321 				NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4322 				    "link lost during signature acquisition."
4323 				    "  Giving up"));
4324 				nv_port_state_change(nvp,
4325 				    SATA_EVNT_DEVICE_DETACHED|
4326 				    SATA_EVNT_LINK_LOST,
4327 				    SATA_ADDR_CPORT, 0);
4328 				nvp->nvp_state |= NV_PORT_HOTREMOVED;
4329 				nvp->nvp_state &= ~NV_PORT_RESET;
4330 			}
4331 
4332 			goto finished;
4333 		} else {
4334 
4335 			nvp->nvp_link_lost_time = 0;
4336 		}
4337 
4338 		nv_read_signature(nvp);
4339 
4340 		if (nvp->nvp_signature != 0) {
4341 			if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
4342 				nvp->nvp_state |= NV_PORT_RESTORE;
4343 				nv_port_state_change(nvp,
4344 				    SATA_EVNT_DEVICE_RESET,
4345 				    SATA_ADDR_DCPORT,
4346 				    SATA_DSTATE_RESET|SATA_DSTATE_PWR_ACTIVE);
4347 			}
4348 
4349 			goto finished;
4350 		}
4351 
4352 		/*
4353 		 * Reset if more than 5 seconds has passed without
4354 		 * acquiring a signature.
4355 		 */
4356 		if (TICK_TO_SEC(ddi_get_lbolt() - nvp->nvp_reset_time) > 5) {
4357 			nv_reset(nvp);
4358 		}
4359 
4360 		restart_timeout = B_TRUE;
4361 		goto finished;
4362 	}
4363 
4364 
4365 	/*
4366 	 * not yet NCQ aware
4367 	 */
4368 	nv_slotp = &(nvp->nvp_slot[0]);
4369 
4370 	/*
4371 	 * this happens early on before nv_slotp is set
4372 	 * up OR when a device was unexpectedly removed and
4373 	 * there was an active packet.
4374 	 */
4375 	if (nv_slotp == NULL) {
4376 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4377 		    "nv_timeout: nv_slotp == NULL"));
4378 
4379 		goto finished;
4380 	}
4381 
4382 	/*
4383 	 * perform timeout checking and processing only if there is an
4384 	 * active packet on the port
4385 	 */
4386 	if (nv_slotp->nvslot_spkt != NULL)  {
4387 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4388 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4389 		uint8_t cmd = satacmd->satacmd_cmd_reg;
4390 		uint64_t lba;
4391 
4392 #if ! defined(__lock_lint) && defined(DEBUG)
4393 
4394 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
4395 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
4396 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
4397 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
4398 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
4399 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
4400 #endif
4401 
4402 		/*
4403 		 * timeout not needed if there is a polling thread
4404 		 */
4405 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
4406 
4407 			goto finished;
4408 		}
4409 
4410 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
4411 		    spkt->satapkt_time) {
4412 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4413 			    "abort timeout: "
4414 			    "nvslot_stime: %ld max ticks till timeout: "
4415 			    "%ld cur_time: %ld cmd=%x lba=%d",
4416 			    nv_slotp->nvslot_stime, drv_usectohz(MICROSEC *
4417 			    spkt->satapkt_time), ddi_get_lbolt(), cmd, lba));
4418 
4419 			(void) nv_abort_active(nvp, spkt, SATA_PKT_TIMEOUT);
4420 
4421 		} else {
4422 			NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp, "nv_timeout:"
4423 			    " still in use so restarting timeout"));
4424 		}
4425 		restart_timeout = B_TRUE;
4426 
4427 	} else {
4428 		/*
4429 		 * there was no active packet, so do not re-enable timeout
4430 		 */
4431 		NVLOG((NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
4432 		    "nv_timeout: no active packet so not re-arming timeout"));
4433 	}
4434 
4435 	finished:
4436 
4437 	if (restart_timeout == B_TRUE) {
4438 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
4439 		    drv_usectohz(NV_ONE_SEC));
4440 	} else {
4441 		nvp->nvp_timeout_id = 0;
4442 	}
4443 	mutex_exit(&nvp->nvp_mutex);
4444 }
4445 
4446 
4447 /*
4448  * enable or disable the 3 interrupt types the driver is
4449  * interested in: completion, add and remove.
4450  */
4451 static void
4452 mcp04_set_intr(nv_port_t *nvp, int flag)
4453 {
4454 	nv_ctl_t *nvc = nvp->nvp_ctlp;
4455 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4456 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
4457 	uint8_t intr_bits[] = { MCP04_INT_PDEV_HOT|MCP04_INT_PDEV_INT,
4458 	    MCP04_INT_SDEV_HOT|MCP04_INT_SDEV_INT };
4459 	uint8_t clear_all_bits[] = { MCP04_INT_PDEV_ALL, MCP04_INT_SDEV_ALL };
4460 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
4461 
4462 	ASSERT(mutex_owned(&nvp->nvp_mutex));
4463 
4464 	/*
4465 	 * controller level lock also required since access to an 8-bit
4466 	 * interrupt register is shared between both channels.
4467 	 */
4468 	mutex_enter(&nvc->nvc_mutex);
4469 
4470 	if (flag & NV_INTR_CLEAR_ALL) {
4471 		NVLOG((NVDBG_INTR, nvc, nvp,
4472 		    "mcp04_set_intr: NV_INTR_CLEAR_ALL"));
4473 
4474 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
4475 		    (uint8_t *)(nvc->nvc_mcp04_int_status));
4476 
4477 		if (intr_status & clear_all_bits[port]) {
4478 
4479 			nv_put8(nvc->nvc_bar_hdl[5],
4480 			    (uint8_t *)(nvc->nvc_mcp04_int_status),
4481 			    clear_all_bits[port]);
4482 
4483 			NVLOG((NVDBG_INTR, nvc, nvp,
4484 			    "interrupt bits cleared %x",
4485 			    intr_status & clear_all_bits[port]));
4486 		}
4487 	}
4488 
4489 	if (flag & NV_INTR_DISABLE) {
4490 		NVLOG((NVDBG_INTR, nvc, nvp,
4491 		    "mcp04_set_intr: NV_INTR_DISABLE"));
4492 		int_en = nv_get8(bar5_hdl,
4493 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4494 		int_en &= ~intr_bits[port];
4495 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4496 		    int_en);
4497 	}
4498 
4499 	if (flag & NV_INTR_ENABLE) {
4500 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp04_set_intr: NV_INTR_ENABLE"));
4501 		int_en = nv_get8(bar5_hdl,
4502 		    (uint8_t *)(bar5 + MCP04_SATA_INT_EN));
4503 		int_en |= intr_bits[port];
4504 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + MCP04_SATA_INT_EN),
4505 		    int_en);
4506 	}
4507 
4508 	mutex_exit(&nvc->nvc_mutex);
4509 }
4510 
4511 
4512 /*
4513  * enable or disable the 3 interrupts the driver is interested in:
4514  * completion interrupt, hot add, and hot remove interrupt.
4515  */
4516 static void
4517 mcp55_set_intr(nv_port_t *nvp, int flag)
4518 {
4519 	nv_ctl_t *nvc = nvp->nvp_ctlp;
4520 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
4521 	uint16_t intr_bits =
4522 	    MCP55_INT_ADD|MCP55_INT_REM|MCP55_INT_COMPLETE;
4523 	uint16_t int_en;
4524 
4525 	ASSERT(mutex_owned(&nvp->nvp_mutex));
4526 
4527 	NVLOG((NVDBG_HOT, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag));
4528 
4529 	if (flag & NV_INTR_CLEAR_ALL) {
4530 		NVLOG((NVDBG_INTR, nvc, nvp,
4531 		    "mcp55_set_intr: NV_INTR_CLEAR_ALL"));
4532 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_status, MCP55_INT_CLEAR);
4533 	}
4534 
4535 	if (flag & NV_INTR_ENABLE) {
4536 		NVLOG((NVDBG_INTR, nvc, nvp, "mcp55_set_intr: NV_INTR_ENABLE"));
4537 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4538 		int_en |= intr_bits;
4539 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4540 	}
4541 
4542 	if (flag & NV_INTR_DISABLE) {
4543 		NVLOG((NVDBG_INTR, nvc, nvp,
4544 		    "mcp55_set_intr: NV_INTR_DISABLE"));
4545 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp55_int_ctl);
4546 		int_en &= ~intr_bits;
4547 		nv_put16(bar5_hdl, nvp->nvp_mcp55_int_ctl, int_en);
4548 	}
4549 }
4550 
4551 
4552 /*
4553  * The PM functions for suspend and resume are incomplete and need additional
4554  * work.  It may or may not work in the current state.
4555  */
4556 static void
4557 nv_resume(nv_port_t *nvp)
4558 {
4559 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()"));
4560 
4561 	mutex_enter(&nvp->nvp_mutex);
4562 
4563 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
4564 		mutex_exit(&nvp->nvp_mutex);
4565 
4566 		return;
4567 	}
4568 
4569 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
4570 
4571 	/*
4572 	 * power may have been removed to the port and the
4573 	 * drive, and/or a drive may have been added or removed.
4574 	 * Force a reset which will cause a probe and re-establish
4575 	 * any state needed on the drive.
4576 	 * nv_reset(nvp);
4577 	 */
4578 
4579 	mutex_exit(&nvp->nvp_mutex);
4580 }
4581 
4582 
4583 static void
4584 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
4585 {
4586 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4587 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
4588 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4589 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4590 	uchar_t status;
4591 	struct sata_cmd_flags flags;
4592 
4593 	NVLOG((NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_copy_registers()"));
4594 
4595 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4596 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
4597 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
4598 
4599 	if (spkt == NULL) {
4600 
4601 		return;
4602 	}
4603 
4604 	/*
4605 	 * in the error case, implicitly set the return of regs needed
4606 	 * for error handling.
4607 	 */
4608 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
4609 	    nvp->nvp_altstatus);
4610 
4611 	flags = scmd->satacmd_flags;
4612 
4613 	if (status & SATA_STATUS_ERR) {
4614 		flags.sata_copy_out_lba_low_msb = B_TRUE;
4615 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
4616 		flags.sata_copy_out_lba_high_msb = B_TRUE;
4617 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
4618 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
4619 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
4620 		flags.sata_copy_out_error_reg = B_TRUE;
4621 		flags.sata_copy_out_sec_count_msb = B_TRUE;
4622 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
4623 		scmd->satacmd_status_reg = status;
4624 	}
4625 
4626 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
4627 
4628 		/*
4629 		 * set HOB so that high byte will be read
4630 		 */
4631 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
4632 
4633 		/*
4634 		 * get the requested high bytes
4635 		 */
4636 		if (flags.sata_copy_out_sec_count_msb) {
4637 			scmd->satacmd_sec_count_msb =
4638 			    nv_get8(cmdhdl, nvp->nvp_count);
4639 		}
4640 
4641 		if (flags.sata_copy_out_lba_low_msb) {
4642 			scmd->satacmd_lba_low_msb =
4643 			    nv_get8(cmdhdl, nvp->nvp_sect);
4644 		}
4645 
4646 		if (flags.sata_copy_out_lba_mid_msb) {
4647 			scmd->satacmd_lba_mid_msb =
4648 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
4649 		}
4650 
4651 		if (flags.sata_copy_out_lba_high_msb) {
4652 			scmd->satacmd_lba_high_msb =
4653 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
4654 		}
4655 	}
4656 
4657 	/*
4658 	 * disable HOB so that low byte is read
4659 	 */
4660 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
4661 
4662 	/*
4663 	 * get the requested low bytes
4664 	 */
4665 	if (flags.sata_copy_out_sec_count_lsb) {
4666 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
4667 	}
4668 
4669 	if (flags.sata_copy_out_lba_low_lsb) {
4670 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
4671 	}
4672 
4673 	if (flags.sata_copy_out_lba_mid_lsb) {
4674 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
4675 	}
4676 
4677 	if (flags.sata_copy_out_lba_high_lsb) {
4678 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
4679 	}
4680 
4681 	/*
4682 	 * get the device register if requested
4683 	 */
4684 	if (flags.sata_copy_out_device_reg) {
4685 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
4686 	}
4687 
4688 	/*
4689 	 * get the error register if requested
4690 	 */
4691 	if (flags.sata_copy_out_error_reg) {
4692 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4693 	}
4694 }
4695 
4696 
4697 /*
4698  * Hot plug and remove interrupts can occur when the device is reset.  Just
4699  * masking the interrupt doesn't always work well because if a
4700  * different interrupt arrives on the other port, the driver can still
4701  * end up checking the state of the other port and discover the hot
4702  * interrupt flag is set even though it was masked.  Checking for recent
4703  * reset activity and then ignoring turns out to be the easiest way.
4704  */
4705 static void
4706 nv_report_add_remove(nv_port_t *nvp, int flags)
4707 {
4708 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
4709 	clock_t time_diff = ddi_get_lbolt() - nvp->nvp_reset_time;
4710 	uint32_t sstatus;
4711 	int i;
4712 
4713 	/*
4714 	 * If reset within last 1 second ignore.  This should be
4715 	 * reworked and improved instead of having this somewhat
4716 	 * heavy handed clamping job.
4717 	 */
4718 	if (time_diff < drv_usectohz(NV_ONE_SEC)) {
4719 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
4720 		    "ignoring plug interrupt was %dms ago",
4721 		    TICK_TO_MSEC(time_diff)));
4722 
4723 		return;
4724 	}
4725 
4726 	/*
4727 	 * wait up to 1ms for sstatus to settle and reflect the true
4728 	 * status of the port.  Failure to do so can create confusion
4729 	 * in probe, where the incorrect sstatus value can still
4730 	 * persist.
4731 	 */
4732 	for (i = 0; i < 1000; i++) {
4733 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
4734 
4735 		if ((flags == NV_PORT_HOTREMOVED) &&
4736 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
4737 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
4738 			break;
4739 		}
4740 
4741 		if ((flags != NV_PORT_HOTREMOVED) &&
4742 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
4743 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
4744 			break;
4745 		}
4746 		drv_usecwait(1);
4747 	}
4748 
4749 	NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4750 	    "sstatus took %i us for DEVPRE_PHYCOM to settle", i));
4751 
4752 	if (flags == NV_PORT_HOTREMOVED) {
4753 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4754 		    "nv_report_add_remove() hot removed"));
4755 		nv_port_state_change(nvp,
4756 		    SATA_EVNT_DEVICE_DETACHED,
4757 		    SATA_ADDR_CPORT, 0);
4758 
4759 		nvp->nvp_state |= NV_PORT_HOTREMOVED;
4760 	} else {
4761 		NVLOG((NVDBG_HOT, nvp->nvp_ctlp, nvp,
4762 		    "nv_report_add_remove() hot plugged"));
4763 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
4764 		    SATA_ADDR_CPORT, 0);
4765 	}
4766 }
4767