1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  *
28  * nv_sata is a combo SATA HBA driver for ck804/mcp5x (mcp5x = mcp55/mcp51)
29  * based chipsets.
30  *
31  * NCQ
32  * ---
33  *
34  * A portion of the NCQ is in place, but is incomplete.  NCQ is disabled
35  * and is likely to be revisited in the future.
36  *
37  *
38  * Power Management
39  * ----------------
40  *
41  * Normally power management would be responsible for ensuring the device
42  * is quiescent and then changing power states to the device, such as
43  * powering down parts or all of the device.  mcp5x/ck804 is unique in
44  * that it is only available as part of a larger southbridge chipset, so
45  * removing power to the device isn't possible.  Switches to control
46  * power management states D0/D3 in the PCI configuration space appear to
47  * be supported but changes to these states are apparently are ignored.
48  * The only further PM that the driver _could_ do is shut down the PHY,
49  * but in order to deliver the first rev of the driver sooner than later,
50  * that will be deferred until some future phase.
51  *
52  * Since the driver currently will not directly change any power state to
53  * the device, no power() entry point will be required.  However, it is
54  * possible that in ACPI power state S3, aka suspend to RAM, that power
55  * can be removed to the device, and the driver cannot rely on BIOS to
56  * have reset any state.  For the time being, there is no known
57  * non-default configurations that need to be programmed.  This judgement
58  * is based on the port of the legacy ata driver not having any such
59  * functionality and based on conversations with the PM team.  If such a
60  * restoration is later deemed necessary it can be incorporated into the
61  * DDI_RESUME processing.
62  *
63  */
64 
65 #include <sys/scsi/scsi.h>
66 #include <sys/pci.h>
67 #include <sys/byteorder.h>
68 #include <sys/sunddi.h>
69 #include <sys/sata/sata_hba.h>
70 #ifdef SGPIO_SUPPORT
71 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
72 #include <sys/devctl.h>
73 #include <sys/sdt.h>
74 #endif
75 #include <sys/sata/adapters/nv_sata/nv_sata.h>
76 #include <sys/disp.h>
77 #include <sys/note.h>
78 #include <sys/promif.h>
79 
80 
81 /*
82  * Function prototypes for driver entry points
83  */
84 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
85 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
86 static int nv_quiesce(dev_info_t *dip);
87 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
88     void *arg, void **result);
89 
90 /*
91  * Function prototypes for entry points from sata service module
92  * These functions are distinguished from other local functions
93  * by the prefix "nv_sata_"
94  */
95 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
96 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
97 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
98 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
100 
101 /*
102  * Local function prototypes
103  */
104 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
105 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
106 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
107 #ifdef NV_MSI_SUPPORTED
108 static int nv_add_msi_intrs(nv_ctl_t *nvc);
109 #endif
110 static void nv_rem_intrs(nv_ctl_t *nvc);
111 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
112 static int nv_start_nodata(nv_port_t *nvp, int slot);
113 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
114 static int nv_start_pio_in(nv_port_t *nvp, int slot);
115 static int nv_start_pio_out(nv_port_t *nvp, int slot);
116 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
117 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
118 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
119 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
120 static int nv_start_dma(nv_port_t *nvp, int slot);
121 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
122 static void nv_uninit_ctl(nv_ctl_t *nvc);
123 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
124 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 static void nv_uninit_port(nv_port_t *nvp);
126 static int nv_init_port(nv_port_t *nvp);
127 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
128 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
129 #ifdef NCQ
130 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
131 #endif
132 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
133 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
134     int state);
135 static void nv_common_reg_init(nv_ctl_t *nvc);
136 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
137 static void nv_reset(nv_port_t *nvp, char *reason);
138 static void nv_complete_io(nv_port_t *nvp,  sata_pkt_t *spkt, int slot);
139 static void nv_timeout(void *);
140 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
141 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
142 static void nv_read_signature(nv_port_t *nvp);
143 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
144 static void ck804_set_intr(nv_port_t *nvp, int flag);
145 static void nv_resume(nv_port_t *nvp);
146 static void nv_suspend(nv_port_t *nvp);
147 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
148 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
149     int flag);
150 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
151     sata_pkt_t *spkt);
152 static void nv_report_add_remove(nv_port_t *nvp, int flags);
153 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
154 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
155     uchar_t failure_onbits2, uchar_t failure_offbits2,
156     uchar_t failure_onbits3, uchar_t failure_offbits3,
157     uint_t timeout_usec, int type_wait);
158 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
159     uint_t timeout_usec, int type_wait);
160 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
161 static void nv_init_port_link_processing(nv_ctl_t *nvc);
162 static void nv_setup_timeout(nv_port_t *nvp, int time);
163 static void nv_monitor_reset(nv_port_t *nvp);
164 static int nv_bm_status_clear(nv_port_t *nvp);
165 static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
166 
167 #ifdef SGPIO_SUPPORT
168 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
169 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
170 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
171     cred_t *credp, int *rvalp);
172 
173 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
174 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
175     uint32_t *cbpp);
176 static int nv_sgp_init(nv_ctl_t *nvc);
177 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
178 static int nv_sgp_csr_read(nv_ctl_t *nvc);
179 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
180 static int nv_sgp_write_data(nv_ctl_t *nvc);
181 static void nv_sgp_activity_led_ctl(void *arg);
182 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
183 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
184 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
185 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
186 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
187 static void nv_sgp_cleanup(nv_ctl_t *nvc);
188 #endif
189 
190 
191 /*
192  * DMA attributes for the data buffer for x86.  dma_attr_burstsizes is unused.
193  * Verify if needed if ported to other ISA.
194  */
195 static ddi_dma_attr_t buffer_dma_attr = {
196 	DMA_ATTR_V0,		/* dma_attr_version */
197 	0,			/* dma_attr_addr_lo: lowest bus address */
198 	0xffffffffull,		/* dma_attr_addr_hi: */
199 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
200 	4,			/* dma_attr_align */
201 	1,			/* dma_attr_burstsizes. */
202 	1,			/* dma_attr_minxfer */
203 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
204 	0xffffffffull,		/* dma_attr_seg */
205 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
206 	512,			/* dma_attr_granular */
207 	0,			/* dma_attr_flags */
208 };
209 static ddi_dma_attr_t buffer_dma_40bit_attr = {
210 	DMA_ATTR_V0,		/* dma_attr_version */
211 	0,			/* dma_attr_addr_lo: lowest bus address */
212 	0xffffffffffull,	/* dma_attr_addr_hi: */
213 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max i.e for one cookie */
214 	4,			/* dma_attr_align */
215 	1,			/* dma_attr_burstsizes. */
216 	1,			/* dma_attr_minxfer */
217 	0xffffffffull,		/* dma_attr_maxxfer including all cookies */
218 	0xffffffffull,		/* dma_attr_seg */
219 	NV_DMA_NSEGS,		/* dma_attr_sgllen */
220 	512,			/* dma_attr_granular */
221 	0,			/* dma_attr_flags */
222 };
223 
224 
225 /*
226  * DMA attributes for PRD tables
227  */
228 ddi_dma_attr_t nv_prd_dma_attr = {
229 	DMA_ATTR_V0,		/* dma_attr_version */
230 	0,			/* dma_attr_addr_lo */
231 	0xffffffffull,		/* dma_attr_addr_hi */
232 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_count_max */
233 	4,			/* dma_attr_align */
234 	1,			/* dma_attr_burstsizes */
235 	1,			/* dma_attr_minxfer */
236 	NV_BM_64K_BOUNDARY,	/* dma_attr_maxxfer */
237 	NV_BM_64K_BOUNDARY - 1,	/* dma_attr_seg */
238 	1,			/* dma_attr_sgllen */
239 	1,			/* dma_attr_granular */
240 	0			/* dma_attr_flags */
241 };
242 
243 /*
244  * Device access attributes
245  */
246 static ddi_device_acc_attr_t accattr = {
247     DDI_DEVICE_ATTR_V0,
248     DDI_STRUCTURE_LE_ACC,
249     DDI_STRICTORDER_ACC
250 };
251 
252 
253 #ifdef SGPIO_SUPPORT
254 static struct cb_ops nv_cb_ops = {
255 	nv_open,		/* open */
256 	nv_close,		/* close */
257 	nodev,			/* strategy (block) */
258 	nodev,			/* print (block) */
259 	nodev,			/* dump (block) */
260 	nodev,			/* read */
261 	nodev,			/* write */
262 	nv_ioctl,		/* ioctl */
263 	nodev,			/* devmap */
264 	nodev,			/* mmap */
265 	nodev,			/* segmap */
266 	nochpoll,		/* chpoll */
267 	ddi_prop_op,		/* prop_op */
268 	NULL,			/* streams */
269 	D_NEW | D_MP |
270 	D_64BIT | D_HOTPLUG,	/* flags */
271 	CB_REV			/* rev */
272 };
273 #endif  /* SGPIO_SUPPORT */
274 
275 
276 static struct dev_ops nv_dev_ops = {
277 	DEVO_REV,		/* devo_rev */
278 	0,			/* refcnt  */
279 	nv_getinfo,		/* info */
280 	nulldev,		/* identify */
281 	nulldev,		/* probe */
282 	nv_attach,		/* attach */
283 	nv_detach,		/* detach */
284 	nodev,			/* no reset */
285 #ifdef SGPIO_SUPPORT
286 	&nv_cb_ops,		/* driver operations */
287 #else
288 	(struct cb_ops *)0,	/* driver operations */
289 #endif
290 	NULL,			/* bus operations */
291 	NULL,			/* power */
292 	nv_quiesce		/* quiesce */
293 };
294 
295 
296 /*
297  * Request Sense CDB for ATAPI
298  */
299 static const uint8_t nv_rqsense_cdb[16] = {
300 	SCMD_REQUEST_SENSE,
301 	0,
302 	0,
303 	0,
304 	SATA_ATAPI_MIN_RQSENSE_LEN,
305 	0,
306 	0, 0, 0, 0, 0, 0, 0, 0, 0, 0	/* pad out to max CDB length */
307 };
308 
309 
310 static sata_tran_hotplug_ops_t nv_hotplug_ops;
311 
312 extern struct mod_ops mod_driverops;
313 
314 static  struct modldrv modldrv = {
315 	&mod_driverops,	/* driverops */
316 	"Nvidia ck804/mcp51/mcp55 HBA",
317 	&nv_dev_ops,	/* driver ops */
318 };
319 
320 static  struct modlinkage modlinkage = {
321 	MODREV_1,
322 	&modldrv,
323 	NULL
324 };
325 
326 
327 /*
328  * Wait for a signature.
329  * If this variable is non-zero, the driver will wait for a device signature
330  * before reporting a device reset to the sata module.
331  * Some (most?) drives will not process commands sent to them before D2H FIS
332  * is sent to a host.
333  */
334 int nv_wait_for_signature = 1;
335 
336 /*
337  * Check for a signature availability.
338  * If this variable is non-zero, the driver will check task file error register
339  * for indication of a signature availability before reading a signature.
340  * Task file error register bit 0 set to 1 indicates that the drive
341  * is ready and it has sent the D2H FIS with a signature.
342  * This behavior of the error register is not reliable in the mcp5x controller.
343  */
344 int nv_check_tfr_error = 0;
345 
346 /*
347  * Max signature acquisition time, in milliseconds.
348  * The driver will try to acquire a device signature within specified time and
349  * quit acquisition operation if signature was not acquired.
350  */
351 long nv_sig_acquisition_time = NV_SIG_ACQUISITION_TIME;
352 
353 /*
354  * If this variable is non-zero, the driver will wait for a signature in the
355  * nv_monitor_reset function without any time limit.
356  * Used for debugging and drive evaluation.
357  */
358 int nv_wait_here_forever = 0;
359 
360 /*
361  * Reset after hotplug.
362  * If this variable is non-zero, driver will reset device after hotplug
363  * (device attached) interrupt.
364  * If the variable is zero, driver will not reset the new device nor will it
365  * try to read device signature.
366  * Chipset is generating a hotplug (device attached) interrupt with a delay, so
367  * the device should have already sent the D2H FIS with the signature.
368  */
369 int nv_reset_after_hotplug = 1;
370 
371 /*
372  * Delay after device hotplug.
373  * It specifies the time between detecting a hotplugged device and sending
374  * a notification to the SATA module.
375  * It is used when device is not reset after hotpugging and acquiring signature
376  * may be unreliable. The delay should be long enough for a device to become
377  * ready to accept commands.
378  */
379 int nv_hotplug_delay = NV_HOTPLUG_DELAY;
380 
381 
382 /*
383  * Maximum number of consecutive interrupts processed in the loop in the
384  * single invocation of the port interrupt routine.
385  */
386 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
387 
388 
389 
390 /*
391  * wait between checks of reg status
392  */
393 int nv_usec_delay = NV_WAIT_REG_CHECK;
394 
395 /*
396  * The following is needed for nv_vcmn_err()
397  */
398 static kmutex_t nv_log_mutex; /* protects nv_log_buf */
399 static char nv_log_buf[NV_LOGBUF_LEN];
400 int nv_debug_flags =
401     NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
402 int nv_log_to_console = B_FALSE;
403 
404 int nv_prom_print = B_FALSE;
405 
406 /*
407  * for debugging
408  */
409 #ifdef DEBUG
410 int ncq_commands = 0;
411 int non_ncq_commands = 0;
412 #endif
413 
414 /*
415  * Opaque state pointer to be initialized by ddi_soft_state_init()
416  */
417 static void *nv_statep	= NULL;
418 
419 /*
420  * Map from CBP to shared space
421  *
422  * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
423  * Control Block Pointer as well as the corresponding Control Block) that
424  * is shared across all driver instances associated with that part.  The
425  * Control Block is used to update and query the LED state for the devices
426  * on the controllers associated with those instances.  There is also some
427  * driver state (called the 'common' area here) associated with each SGPIO
428  * Control Block.  The nv_sgp_cpb2cmn is used to map a given CBP to its
429  * control area.
430  *
431  * The driver can also use this mapping array to determine whether the
432  * common area for a given CBP has been initialized, and, if it isn't
433  * initialized, initialize it.
434  *
435  * When a driver instance with a CBP value that is already in the array is
436  * initialized, it will use the pointer to the previously initialized common
437  * area associated with that SGPIO CBP value, rather than initialize it
438  * itself.
439  *
440  * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
441  */
442 #ifdef SGPIO_SUPPORT
443 static kmutex_t nv_sgp_c2c_mutex;
444 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
445 #endif
446 
447 /* We still have problems in 40-bit DMA support, so disable it by default */
448 int nv_sata_40bit_dma = B_TRUE;
449 
450 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
451 	SATA_TRAN_HOTPLUG_OPS_REV_1,	/* structure version */
452 	nv_sata_activate,	/* activate port. cfgadm -c connect */
453 	nv_sata_deactivate	/* deactivate port. cfgadm -c disconnect */
454 };
455 
456 
457 /*
458  *  nv module initialization
459  */
460 int
461 _init(void)
462 {
463 	int	error;
464 #ifdef SGPIO_SUPPORT
465 	int	i;
466 #endif
467 
468 	error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
469 
470 	if (error != 0) {
471 
472 		return (error);
473 	}
474 
475 	mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
476 #ifdef SGPIO_SUPPORT
477 	mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
478 
479 	for (i = 0; i < NV_MAX_CBPS; i++) {
480 		nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
481 		nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
482 	}
483 #endif
484 
485 	if ((error = sata_hba_init(&modlinkage)) != 0) {
486 		ddi_soft_state_fini(&nv_statep);
487 		mutex_destroy(&nv_log_mutex);
488 
489 		return (error);
490 	}
491 
492 	error = mod_install(&modlinkage);
493 	if (error != 0) {
494 		sata_hba_fini(&modlinkage);
495 		ddi_soft_state_fini(&nv_statep);
496 		mutex_destroy(&nv_log_mutex);
497 
498 		return (error);
499 	}
500 
501 	return (error);
502 }
503 
504 
505 /*
506  * nv module uninitialize
507  */
508 int
509 _fini(void)
510 {
511 	int	error;
512 
513 	error = mod_remove(&modlinkage);
514 
515 	if (error != 0) {
516 		return (error);
517 	}
518 
519 	/*
520 	 * remove the resources allocated in _init()
521 	 */
522 	mutex_destroy(&nv_log_mutex);
523 #ifdef SGPIO_SUPPORT
524 	mutex_destroy(&nv_sgp_c2c_mutex);
525 #endif
526 	sata_hba_fini(&modlinkage);
527 	ddi_soft_state_fini(&nv_statep);
528 
529 	return (error);
530 }
531 
532 
533 /*
534  * nv _info entry point
535  */
536 int
537 _info(struct modinfo *modinfop)
538 {
539 	return (mod_info(&modlinkage, modinfop));
540 }
541 
542 
543 /*
544  * these wrappers for ddi_{get,put}8 are for observability
545  * with dtrace
546  */
547 #ifdef DEBUG
548 
549 static void
550 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
551 {
552 	ddi_put8(handle, dev_addr, value);
553 }
554 
555 static void
556 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
557 {
558 	ddi_put32(handle, dev_addr, value);
559 }
560 
561 static uint32_t
562 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
563 {
564 	return (ddi_get32(handle, dev_addr));
565 }
566 
567 static void
568 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
569 {
570 	ddi_put16(handle, dev_addr, value);
571 }
572 
573 static uint16_t
574 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
575 {
576 	return (ddi_get16(handle, dev_addr));
577 }
578 
579 static uint8_t
580 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
581 {
582 	return (ddi_get8(handle, dev_addr));
583 }
584 
585 #else
586 
587 #define	nv_put8 ddi_put8
588 #define	nv_put32 ddi_put32
589 #define	nv_get32 ddi_get32
590 #define	nv_put16 ddi_put16
591 #define	nv_get16 ddi_get16
592 #define	nv_get8 ddi_get8
593 
594 #endif
595 
596 
597 /*
598  * Driver attach
599  */
600 static int
601 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
602 {
603 	int status, attach_state, intr_types, bar, i, command;
604 	int inst = ddi_get_instance(dip);
605 	ddi_acc_handle_t pci_conf_handle;
606 	nv_ctl_t *nvc;
607 	uint8_t subclass;
608 	uint32_t reg32;
609 #ifdef SGPIO_SUPPORT
610 	pci_regspec_t *regs;
611 	int rlen;
612 #endif
613 
614 	switch (cmd) {
615 
616 	case DDI_ATTACH:
617 
618 		attach_state = ATTACH_PROGRESS_NONE;
619 
620 		status = ddi_soft_state_zalloc(nv_statep, inst);
621 
622 		if (status != DDI_SUCCESS) {
623 			break;
624 		}
625 
626 		nvc = ddi_get_soft_state(nv_statep, inst);
627 
628 		nvc->nvc_dip = dip;
629 
630 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
631 
632 		attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
633 
634 		if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
635 			nvc->nvc_revid = pci_config_get8(pci_conf_handle,
636 			    PCI_CONF_REVID);
637 			NVLOG(NVDBG_INIT, nvc, NULL,
638 			    "inst %d: silicon revid is %x nv_debug_flags=%x",
639 			    inst, nvc->nvc_revid, nv_debug_flags);
640 		} else {
641 			break;
642 		}
643 
644 		attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
645 
646 		/*
647 		 * Set the PCI command register: enable IO/MEM/Master.
648 		 */
649 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
650 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
651 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
652 
653 		subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
654 
655 		if (subclass & PCI_MASS_RAID) {
656 			cmn_err(CE_WARN,
657 			    "attach failed: RAID mode not supported");
658 
659 			break;
660 		}
661 
662 		/*
663 		 * the 6 bars of the controller are:
664 		 * 0: port 0 task file
665 		 * 1: port 0 status
666 		 * 2: port 1 task file
667 		 * 3: port 1 status
668 		 * 4: bus master for both ports
669 		 * 5: extended registers for SATA features
670 		 */
671 		for (bar = 0; bar < 6; bar++) {
672 			status = ddi_regs_map_setup(dip, bar + 1,
673 			    (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
674 			    &nvc->nvc_bar_hdl[bar]);
675 
676 			if (status != DDI_SUCCESS) {
677 				NVLOG(NVDBG_INIT, nvc, NULL,
678 				    "ddi_regs_map_setup failure for bar"
679 				    " %d status = %d", bar, status);
680 				break;
681 			}
682 		}
683 
684 		attach_state |= ATTACH_PROGRESS_BARS;
685 
686 		/*
687 		 * initialize controller structures
688 		 */
689 		status = nv_init_ctl(nvc, pci_conf_handle);
690 
691 		if (status == NV_FAILURE) {
692 			NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
693 			    NULL);
694 
695 			break;
696 		}
697 
698 		attach_state |= ATTACH_PROGRESS_CTL_SETUP;
699 
700 		/*
701 		 * initialize mutexes
702 		 */
703 		mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
704 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
705 
706 		attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
707 
708 		/*
709 		 * get supported interrupt types
710 		 */
711 		if (ddi_intr_get_supported_types(dip, &intr_types) !=
712 		    DDI_SUCCESS) {
713 			nv_cmn_err(CE_WARN, nvc, NULL,
714 			    "ddi_intr_get_supported_types failed");
715 
716 			break;
717 		}
718 
719 		NVLOG(NVDBG_INIT, nvc, NULL,
720 		    "ddi_intr_get_supported_types() returned: 0x%x",
721 		    intr_types);
722 
723 #ifdef NV_MSI_SUPPORTED
724 		if (intr_types & DDI_INTR_TYPE_MSI) {
725 			NVLOG(NVDBG_INIT, nvc, NULL,
726 			    "using MSI interrupt type", NULL);
727 
728 			/*
729 			 * Try MSI first, but fall back to legacy if MSI
730 			 * attach fails
731 			 */
732 			if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
733 				nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
734 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
735 				NVLOG(NVDBG_INIT, nvc, NULL,
736 				    "MSI interrupt setup done", NULL);
737 			} else {
738 				nv_cmn_err(CE_CONT, nvc, NULL,
739 				    "MSI registration failed "
740 				    "will try Legacy interrupts");
741 			}
742 		}
743 #endif
744 
745 		/*
746 		 * Either the MSI interrupt setup has failed or only
747 		 * the fixed interrupts are available on the system.
748 		 */
749 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
750 		    (intr_types & DDI_INTR_TYPE_FIXED)) {
751 
752 			NVLOG(NVDBG_INIT, nvc, NULL,
753 			    "using Legacy interrupt type", NULL);
754 
755 			if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
756 				nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
757 				attach_state |= ATTACH_PROGRESS_INTR_ADDED;
758 				NVLOG(NVDBG_INIT, nvc, NULL,
759 				    "Legacy interrupt setup done", NULL);
760 			} else {
761 				nv_cmn_err(CE_WARN, nvc, NULL,
762 				    "legacy interrupt setup failed");
763 				NVLOG(NVDBG_INIT, nvc, NULL,
764 				    "legacy interrupt setup failed", NULL);
765 				break;
766 			}
767 		}
768 
769 		if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
770 			NVLOG(NVDBG_INIT, nvc, NULL,
771 			    "no interrupts registered", NULL);
772 			break;
773 		}
774 
775 #ifdef SGPIO_SUPPORT
776 		/*
777 		 * save off the controller number
778 		 */
779 		(void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
780 		    "reg", (caddr_t)&regs, &rlen);
781 		nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
782 		kmem_free(regs, rlen);
783 
784 		/*
785 		 * initialize SGPIO
786 		 */
787 		nv_sgp_led_init(nvc, pci_conf_handle);
788 #endif	/* SGPIO_SUPPORT */
789 
790 		/*
791 		 * Initiate link processing and device identification
792 		 */
793 		nv_init_port_link_processing(nvc);
794 		/*
795 		 * attach to sata module
796 		 */
797 		if (sata_hba_attach(nvc->nvc_dip,
798 		    &nvc->nvc_sata_hba_tran,
799 		    DDI_ATTACH) != DDI_SUCCESS) {
800 			attach_state |= ATTACH_PROGRESS_SATA_MODULE;
801 
802 			break;
803 		}
804 
805 		pci_config_teardown(&pci_conf_handle);
806 
807 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
808 
809 		return (DDI_SUCCESS);
810 
811 	case DDI_RESUME:
812 
813 		nvc = ddi_get_soft_state(nv_statep, inst);
814 
815 		NVLOG(NVDBG_INIT, nvc, NULL,
816 		    "nv_attach(): DDI_RESUME inst %d", inst);
817 
818 		if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
819 			return (DDI_FAILURE);
820 		}
821 
822 		/*
823 		 * Set the PCI command register: enable IO/MEM/Master.
824 		 */
825 		command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
826 		pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
827 		    command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
828 
829 		/*
830 		 * Need to set bit 2 to 1 at config offset 0x50
831 		 * to enable access to the bar5 registers.
832 		 */
833 		reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
834 
835 		if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
836 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
837 			    reg32 | NV_BAR5_SPACE_EN);
838 		}
839 
840 		nvc->nvc_state &= ~NV_CTRL_SUSPEND;
841 
842 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
843 			nv_resume(&(nvc->nvc_port[i]));
844 		}
845 
846 		pci_config_teardown(&pci_conf_handle);
847 
848 		return (DDI_SUCCESS);
849 
850 	default:
851 		return (DDI_FAILURE);
852 	}
853 
854 
855 	/*
856 	 * DDI_ATTACH failure path starts here
857 	 */
858 
859 	if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
860 		nv_rem_intrs(nvc);
861 	}
862 
863 	if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
864 		/*
865 		 * Remove timers
866 		 */
867 		int port = 0;
868 		nv_port_t *nvp;
869 
870 		for (; port < NV_MAX_PORTS(nvc); port++) {
871 			nvp = &(nvc->nvc_port[port]);
872 			if (nvp->nvp_timeout_id != 0) {
873 				(void) untimeout(nvp->nvp_timeout_id);
874 			}
875 		}
876 	}
877 
878 	if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
879 		mutex_destroy(&nvc->nvc_mutex);
880 	}
881 
882 	if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
883 		nv_uninit_ctl(nvc);
884 	}
885 
886 	if (attach_state & ATTACH_PROGRESS_BARS) {
887 		while (--bar >= 0) {
888 			ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
889 		}
890 	}
891 
892 	if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
893 		ddi_soft_state_free(nv_statep, inst);
894 	}
895 
896 	if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
897 		pci_config_teardown(&pci_conf_handle);
898 	}
899 
900 	cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
901 
902 	return (DDI_FAILURE);
903 }
904 
905 
906 static int
907 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
908 {
909 	int i, port, inst = ddi_get_instance(dip);
910 	nv_ctl_t *nvc;
911 	nv_port_t *nvp;
912 
913 	nvc = ddi_get_soft_state(nv_statep, inst);
914 
915 	switch (cmd) {
916 
917 	case DDI_DETACH:
918 
919 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
920 
921 		/*
922 		 * Remove interrupts
923 		 */
924 		nv_rem_intrs(nvc);
925 
926 		/*
927 		 * Remove timers
928 		 */
929 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
930 			nvp = &(nvc->nvc_port[port]);
931 			if (nvp->nvp_timeout_id != 0) {
932 				(void) untimeout(nvp->nvp_timeout_id);
933 			}
934 		}
935 
936 		/*
937 		 * Remove maps
938 		 */
939 		for (i = 0; i < 6; i++) {
940 			ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
941 		}
942 
943 		/*
944 		 * Destroy mutexes
945 		 */
946 		mutex_destroy(&nvc->nvc_mutex);
947 
948 		/*
949 		 * Uninitialize the controller structures
950 		 */
951 		nv_uninit_ctl(nvc);
952 
953 #ifdef SGPIO_SUPPORT
954 		/*
955 		 * release SGPIO resources
956 		 */
957 		nv_sgp_cleanup(nvc);
958 #endif
959 
960 		/*
961 		 * unregister from the sata module
962 		 */
963 		(void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
964 
965 		/*
966 		 * Free soft state
967 		 */
968 		ddi_soft_state_free(nv_statep, inst);
969 
970 		return (DDI_SUCCESS);
971 
972 	case DDI_SUSPEND:
973 
974 		NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
975 
976 		for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
977 			nv_suspend(&(nvc->nvc_port[i]));
978 		}
979 
980 		nvc->nvc_state |= NV_CTRL_SUSPEND;
981 
982 		return (DDI_SUCCESS);
983 
984 	default:
985 		return (DDI_FAILURE);
986 	}
987 }
988 
989 
990 /*ARGSUSED*/
991 static int
992 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
993 {
994 	nv_ctl_t *nvc;
995 	int instance;
996 	dev_t dev;
997 
998 	dev = (dev_t)arg;
999 	instance = getminor(dev);
1000 
1001 	switch (infocmd) {
1002 	case DDI_INFO_DEVT2DEVINFO:
1003 		nvc = ddi_get_soft_state(nv_statep,  instance);
1004 		if (nvc != NULL) {
1005 			*result = nvc->nvc_dip;
1006 			return (DDI_SUCCESS);
1007 		} else {
1008 			*result = NULL;
1009 			return (DDI_FAILURE);
1010 		}
1011 	case DDI_INFO_DEVT2INSTANCE:
1012 		*(int *)result = instance;
1013 		break;
1014 	default:
1015 		break;
1016 	}
1017 	return (DDI_SUCCESS);
1018 }
1019 
1020 
1021 #ifdef SGPIO_SUPPORT
1022 /* ARGSUSED */
1023 static int
1024 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1025 {
1026 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1027 
1028 	if (nvc == NULL) {
1029 		return (ENXIO);
1030 	}
1031 
1032 	return (0);
1033 }
1034 
1035 
1036 /* ARGSUSED */
1037 static int
1038 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1039 {
1040 	return (0);
1041 }
1042 
1043 
1044 /* ARGSUSED */
1045 static int
1046 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1047 {
1048 	nv_ctl_t *nvc;
1049 	int inst;
1050 	int status;
1051 	int ctlr, port;
1052 	int drive;
1053 	uint8_t curr_led;
1054 	struct dc_led_ctl led;
1055 
1056 	inst = getminor(dev);
1057 	if (inst == -1) {
1058 		return (EBADF);
1059 	}
1060 
1061 	nvc = ddi_get_soft_state(nv_statep, inst);
1062 	if (nvc == NULL) {
1063 		return (EBADF);
1064 	}
1065 
1066 	if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1067 		return (EIO);
1068 	}
1069 
1070 	switch (cmd) {
1071 	case DEVCTL_SET_LED:
1072 		status = ddi_copyin((void *)arg, &led,
1073 		    sizeof (struct dc_led_ctl), mode);
1074 		if (status != 0)
1075 			return (EFAULT);
1076 
1077 		/*
1078 		 * Since only the first two controller currently support
1079 		 * SGPIO (as per NVIDIA docs), this code will as well.
1080 		 * Note that this validate the port value within led_state
1081 		 * as well.
1082 		 */
1083 
1084 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1085 		if ((ctlr != 0) && (ctlr != 1))
1086 			return (ENXIO);
1087 
1088 		if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1089 		    (led.led_state & DCL_STATE_SLOW_BLNK)) {
1090 			return (EINVAL);
1091 		}
1092 
1093 		drive = led.led_number;
1094 
1095 		if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1096 		    (led.led_state == DCL_STATE_OFF)) {
1097 
1098 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1099 				nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1100 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1101 				nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1102 			} else {
1103 				return (ENXIO);
1104 			}
1105 
1106 			port = SGP_DRV_TO_PORT(led.led_number);
1107 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1108 		}
1109 
1110 		if (led.led_ctl_active == DCL_CNTRL_ON) {
1111 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1112 				nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1113 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1114 				nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1115 			} else {
1116 				return (ENXIO);
1117 			}
1118 
1119 			port = SGP_DRV_TO_PORT(led.led_number);
1120 			nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1121 		}
1122 
1123 		break;
1124 
1125 	case DEVCTL_GET_LED:
1126 		status = ddi_copyin((void *)arg, &led,
1127 		    sizeof (struct dc_led_ctl), mode);
1128 		if (status != 0)
1129 			return (EFAULT);
1130 
1131 		/*
1132 		 * Since only the first two controller currently support
1133 		 * SGPIO (as per NVIDIA docs), this code will as well.
1134 		 * Note that this validate the port value within led_state
1135 		 * as well.
1136 		 */
1137 
1138 		ctlr = SGP_DRV_TO_CTLR(led.led_number);
1139 		if ((ctlr != 0) && (ctlr != 1))
1140 			return (ENXIO);
1141 
1142 		curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1143 		    led.led_number);
1144 
1145 		port = SGP_DRV_TO_PORT(led.led_number);
1146 		if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1147 			led.led_ctl_active = DCL_CNTRL_ON;
1148 
1149 			if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1150 				if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1151 					led.led_state = DCL_STATE_OFF;
1152 				else
1153 					led.led_state = DCL_STATE_ON;
1154 			} else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1155 				if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1156 					led.led_state = DCL_STATE_OFF;
1157 				else
1158 					led.led_state = DCL_STATE_ON;
1159 			} else {
1160 				return (ENXIO);
1161 			}
1162 		} else {
1163 			led.led_ctl_active = DCL_CNTRL_OFF;
1164 			/*
1165 			 * Not really off, but never set and no constant for
1166 			 * tri-state
1167 			 */
1168 			led.led_state = DCL_STATE_OFF;
1169 		}
1170 
1171 		status = ddi_copyout(&led, (void *)arg,
1172 		    sizeof (struct dc_led_ctl), mode);
1173 		if (status != 0)
1174 			return (EFAULT);
1175 
1176 		break;
1177 
1178 	case DEVCTL_NUM_LEDS:
1179 		led.led_number = SGPIO_DRV_CNT_VALUE;
1180 		led.led_ctl_active = 1;
1181 		led.led_type = 3;
1182 
1183 		/*
1184 		 * According to documentation, NVIDIA SGPIO is supposed to
1185 		 * support blinking, but it does not seem to work in practice.
1186 		 */
1187 		led.led_state = DCL_STATE_ON;
1188 
1189 		status = ddi_copyout(&led, (void *)arg,
1190 		    sizeof (struct dc_led_ctl), mode);
1191 		if (status != 0)
1192 			return (EFAULT);
1193 
1194 		break;
1195 
1196 	default:
1197 		return (EINVAL);
1198 	}
1199 
1200 	return (0);
1201 }
1202 #endif	/* SGPIO_SUPPORT */
1203 
1204 
1205 /*
1206  * Called by sata module to probe a port.  Port and device state
1207  * are not changed here... only reported back to the sata module.
1208  *
1209  */
1210 static int
1211 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1212 {
1213 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1214 	uint8_t cport = sd->satadev_addr.cport;
1215 	uint8_t pmport = sd->satadev_addr.pmport;
1216 	uint8_t qual = sd->satadev_addr.qual;
1217 	nv_port_t *nvp;
1218 
1219 	if (cport >= NV_MAX_PORTS(nvc)) {
1220 		sd->satadev_type = SATA_DTYPE_NONE;
1221 		sd->satadev_state = SATA_STATE_UNKNOWN;
1222 
1223 		return (SATA_FAILURE);
1224 	}
1225 
1226 	ASSERT(nvc->nvc_port != NULL);
1227 	nvp = &(nvc->nvc_port[cport]);
1228 	ASSERT(nvp != NULL);
1229 
1230 	NVLOG(NVDBG_ENTRY, nvc, nvp,
1231 	    "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1232 	    "qual: 0x%x", cport, pmport, qual);
1233 
1234 	mutex_enter(&nvp->nvp_mutex);
1235 
1236 	/*
1237 	 * This check seems to be done in the SATA module.
1238 	 * It may not be required here
1239 	 */
1240 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1241 		nv_cmn_err(CE_WARN, nvc, nvp,
1242 		    "port inactive.  Use cfgadm to activate");
1243 		sd->satadev_type = SATA_DTYPE_UNKNOWN;
1244 		sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1245 		mutex_exit(&nvp->nvp_mutex);
1246 
1247 		return (SATA_SUCCESS);
1248 	}
1249 
1250 	if (nvp->nvp_state & NV_PORT_FAILED) {
1251 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1252 		    "probe: port failed", NULL);
1253 		sd->satadev_type = SATA_DTYPE_NONE;
1254 		sd->satadev_state = SATA_PSTATE_FAILED;
1255 		mutex_exit(&nvp->nvp_mutex);
1256 
1257 		return (SATA_SUCCESS);
1258 	}
1259 
1260 	if (qual == SATA_ADDR_PMPORT) {
1261 		sd->satadev_type = SATA_DTYPE_NONE;
1262 		sd->satadev_state = SATA_STATE_UNKNOWN;
1263 		mutex_exit(&nvp->nvp_mutex);
1264 		nv_cmn_err(CE_WARN, nvc, nvp,
1265 		    "controller does not support port multiplier");
1266 
1267 		return (SATA_SUCCESS);
1268 	}
1269 
1270 	sd->satadev_state = SATA_PSTATE_PWRON;
1271 
1272 	nv_copy_registers(nvp, sd, NULL);
1273 
1274 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
1275 		/*
1276 		 * We are waiting for reset to complete and to fetch
1277 		 * a signature.
1278 		 * Reset will cause the link to go down for a short period of
1279 		 * time.  If reset processing continues for less than
1280 		 * NV_LINK_DOWN_TIMEOUT, fake the status of the link so that
1281 		 * we will not report intermittent link down.
1282 		 * Maybe we should report previous link state?
1283 		 */
1284 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) <
1285 		    NV_LINK_DOWN_TIMEOUT) {
1286 			SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1287 			    SSTATUS_IPM_ACTIVE);
1288 			SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1289 			    SSTATUS_DET_DEVPRE_PHYCOM);
1290 			sd->satadev_type = nvp->nvp_type;
1291 			mutex_exit(&nvp->nvp_mutex);
1292 
1293 			return (SATA_SUCCESS);
1294 		}
1295 	}
1296 	/*
1297 	 * Just report the current port state
1298 	 */
1299 	sd->satadev_type = nvp->nvp_type;
1300 	sd->satadev_state = nvp->nvp_state | SATA_PSTATE_PWRON;
1301 	mutex_exit(&nvp->nvp_mutex);
1302 
1303 #ifdef SGPIO_SUPPORT
1304 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
1305 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1306 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1307 	} else {
1308 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
1309 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
1310 	}
1311 #endif
1312 
1313 	return (SATA_SUCCESS);
1314 }
1315 
1316 
1317 /*
1318  * Called by sata module to start a new command.
1319  */
1320 static int
1321 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1322 {
1323 	int cport = spkt->satapkt_device.satadev_addr.cport;
1324 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1325 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1326 	int ret;
1327 
1328 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1329 	    spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1330 
1331 	mutex_enter(&nvp->nvp_mutex);
1332 
1333 	/*
1334 	 * record number of commands for debugging
1335 	 */
1336 	nvp->nvp_seq++;
1337 
1338 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
1339 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1340 		NVLOG(NVDBG_ERRS, nvc, nvp,
1341 		    "nv_sata_start: port not yet initialized", NULL);
1342 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1343 		mutex_exit(&nvp->nvp_mutex);
1344 
1345 		return (SATA_TRAN_PORT_ERROR);
1346 	}
1347 
1348 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1349 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1350 		NVLOG(NVDBG_ERRS, nvc, nvp,
1351 		    "nv_sata_start: NV_PORT_INACTIVE", NULL);
1352 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1353 		mutex_exit(&nvp->nvp_mutex);
1354 
1355 		return (SATA_TRAN_PORT_ERROR);
1356 	}
1357 
1358 	if (nvp->nvp_state & NV_PORT_FAILED) {
1359 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1360 		NVLOG(NVDBG_ERRS, nvc, nvp,
1361 		    "nv_sata_start: NV_PORT_FAILED state", NULL);
1362 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1363 		mutex_exit(&nvp->nvp_mutex);
1364 
1365 		return (SATA_TRAN_PORT_ERROR);
1366 	}
1367 
1368 	if (nvp->nvp_state & NV_PORT_RESET) {
1369 		NVLOG(NVDBG_ERRS, nvc, nvp,
1370 		    "still waiting for reset completion", NULL);
1371 		spkt->satapkt_reason = SATA_PKT_BUSY;
1372 		mutex_exit(&nvp->nvp_mutex);
1373 
1374 		/*
1375 		 * If in panic, timeouts do not occur, so fake one
1376 		 * so that the signature can be acquired to complete
1377 		 * the reset handling.
1378 		 */
1379 		if (ddi_in_panic()) {
1380 			nv_timeout(nvp);
1381 		}
1382 
1383 		return (SATA_TRAN_BUSY);
1384 	}
1385 
1386 	if (nvp->nvp_type == SATA_DTYPE_NONE) {
1387 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1388 		NVLOG(NVDBG_ERRS, nvc, nvp,
1389 		    "nv_sata_start: SATA_DTYPE_NONE", NULL);
1390 		nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1391 		mutex_exit(&nvp->nvp_mutex);
1392 
1393 		return (SATA_TRAN_PORT_ERROR);
1394 	}
1395 
1396 	if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1397 		ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1398 		nv_cmn_err(CE_WARN, nvc, nvp,
1399 		    "port multipliers not supported by controller");
1400 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1401 		mutex_exit(&nvp->nvp_mutex);
1402 
1403 		return (SATA_TRAN_CMD_UNSUPPORTED);
1404 	}
1405 
1406 	/*
1407 	 * after a device reset, and then when sata module restore processing
1408 	 * is complete, the sata module will set sata_clear_dev_reset which
1409 	 * indicates that restore processing has completed and normal
1410 	 * non-restore related commands should be processed.
1411 	 */
1412 	if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1413 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1414 		NVLOG(NVDBG_RESET, nvc, nvp,
1415 		    "nv_sata_start: clearing NV_PORT_RESTORE", NULL);
1416 	}
1417 
1418 	/*
1419 	 * if the device was recently reset as indicated by NV_PORT_RESTORE,
1420 	 * only allow commands which restore device state.  The sata module
1421 	 * marks such commands with with sata_ignore_dev_reset.
1422 	 *
1423 	 * during coredump, nv_reset is called and but then the restore
1424 	 * doesn't happen.  For now, workaround by ignoring the wait for
1425 	 * restore if the system is panicing.
1426 	 */
1427 	if ((nvp->nvp_state & NV_PORT_RESTORE) &&
1428 	    !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1429 	    (ddi_in_panic() == 0)) {
1430 		spkt->satapkt_reason = SATA_PKT_BUSY;
1431 		NVLOG(NVDBG_RESET, nvc, nvp,
1432 		    "nv_sata_start: waiting for restore ", NULL);
1433 		mutex_exit(&nvp->nvp_mutex);
1434 
1435 		return (SATA_TRAN_BUSY);
1436 	}
1437 
1438 	if (nvp->nvp_state & NV_PORT_ABORTING) {
1439 		spkt->satapkt_reason = SATA_PKT_BUSY;
1440 		NVLOG(NVDBG_ERRS, nvc, nvp,
1441 		    "nv_sata_start: NV_PORT_ABORTING", NULL);
1442 		mutex_exit(&nvp->nvp_mutex);
1443 
1444 		return (SATA_TRAN_BUSY);
1445 	}
1446 
1447 	/* Clear SError to be able to check errors after the command failure */
1448 	nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1449 
1450 	if (spkt->satapkt_op_mode &
1451 	    (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1452 
1453 		ret = nv_start_sync(nvp, spkt);
1454 
1455 		mutex_exit(&nvp->nvp_mutex);
1456 
1457 		return (ret);
1458 	}
1459 
1460 	/*
1461 	 * start command asynchronous command
1462 	 */
1463 	ret = nv_start_async(nvp, spkt);
1464 
1465 	mutex_exit(&nvp->nvp_mutex);
1466 
1467 	return (ret);
1468 }
1469 
1470 
1471 /*
1472  * SATA_OPMODE_POLLING implies the driver is in a
1473  * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1474  * If only SATA_OPMODE_SYNCH is set, the driver can use
1475  * interrupts and sleep wait on a cv.
1476  *
1477  * If SATA_OPMODE_POLLING is set, the driver can't use
1478  * interrupts and must busy wait and simulate the
1479  * interrupts by waiting for BSY to be cleared.
1480  *
1481  * Synchronous mode has to return BUSY if there are
1482  * any other commands already on the drive.
1483  */
1484 static int
1485 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1486 {
1487 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1488 	int ret;
1489 
1490 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1491 	    NULL);
1492 
1493 	if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1494 		spkt->satapkt_reason = SATA_PKT_BUSY;
1495 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1496 		    "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1497 		    "ncq_run: %d non_ncq_run: %d  spkt: %p",
1498 		    nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1499 		    (&(nvp->nvp_slot[0]))->nvslot_spkt);
1500 
1501 		return (SATA_TRAN_BUSY);
1502 	}
1503 
1504 	/*
1505 	 * if SYNC but not POLL, verify that this is not on interrupt thread.
1506 	 */
1507 	if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1508 	    servicing_interrupt()) {
1509 		spkt->satapkt_reason = SATA_PKT_BUSY;
1510 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1511 		    "SYNC mode not allowed during interrupt", NULL);
1512 
1513 		return (SATA_TRAN_BUSY);
1514 
1515 	}
1516 
1517 	/*
1518 	 * disable interrupt generation if in polled mode
1519 	 */
1520 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1521 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1522 	}
1523 
1524 	if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1525 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1526 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1527 		}
1528 
1529 		return (ret);
1530 	}
1531 
1532 	if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1533 		mutex_exit(&nvp->nvp_mutex);
1534 		ret = nv_poll_wait(nvp, spkt);
1535 		mutex_enter(&nvp->nvp_mutex);
1536 
1537 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1538 
1539 		NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1540 		    " done % reason %d", ret);
1541 
1542 		return (ret);
1543 	}
1544 
1545 	/*
1546 	 * non-polling synchronous mode handling.  The interrupt will signal
1547 	 * when the IO is completed.
1548 	 */
1549 	cv_wait(&nvp->nvp_poll_cv, &nvp->nvp_mutex);
1550 
1551 	if (spkt->satapkt_reason != SATA_PKT_COMPLETED) {
1552 
1553 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1554 	}
1555 
1556 	NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1557 	    " done % reason %d", spkt->satapkt_reason);
1558 
1559 	return (SATA_TRAN_ACCEPTED);
1560 }
1561 
1562 
1563 static int
1564 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1565 {
1566 	int ret;
1567 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1568 #if ! defined(__lock_lint)
1569 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1570 #endif
1571 
1572 	NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1573 
1574 	for (;;) {
1575 
1576 		NV_DELAY_NSEC(400);
1577 
1578 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1579 		    NULL);
1580 		if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1581 		    NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1582 			mutex_enter(&nvp->nvp_mutex);
1583 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1584 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1585 			nvp->nvp_state |= NV_PORT_RESET;
1586 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1587 			    NV_PORT_RESET_RETRY);
1588 			nv_reset(nvp, "poll_wait");
1589 			nv_complete_io(nvp, spkt, 0);
1590 			mutex_exit(&nvp->nvp_mutex);
1591 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1592 			    "SATA_STATUS_BSY", NULL);
1593 
1594 			return (SATA_TRAN_ACCEPTED);
1595 		}
1596 
1597 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1598 		    NULL);
1599 
1600 		/*
1601 		 * Simulate interrupt.
1602 		 */
1603 		ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1604 		NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1605 		    NULL);
1606 
1607 		if (ret != DDI_INTR_CLAIMED) {
1608 			NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1609 			    " unclaimed -- resetting", NULL);
1610 			mutex_enter(&nvp->nvp_mutex);
1611 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1612 			nvp->nvp_state |= NV_PORT_RESET;
1613 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
1614 			    NV_PORT_RESET_RETRY);
1615 			nv_reset(nvp, "poll_wait intr not claimed");
1616 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1617 			nv_complete_io(nvp, spkt, 0);
1618 			mutex_exit(&nvp->nvp_mutex);
1619 
1620 			return (SATA_TRAN_ACCEPTED);
1621 		}
1622 
1623 #if ! defined(__lock_lint)
1624 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1625 			/*
1626 			 * packet is complete
1627 			 */
1628 			return (SATA_TRAN_ACCEPTED);
1629 		}
1630 #endif
1631 	}
1632 	/*NOTREACHED*/
1633 }
1634 
1635 
1636 /*
1637  * Called by sata module to abort outstanding packets.
1638  */
1639 /*ARGSUSED*/
1640 static int
1641 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1642 {
1643 	int cport = spkt->satapkt_device.satadev_addr.cport;
1644 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1645 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1646 	int c_a, ret;
1647 
1648 	ASSERT(cport < NV_MAX_PORTS(nvc));
1649 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1650 
1651 	mutex_enter(&nvp->nvp_mutex);
1652 
1653 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
1654 		mutex_exit(&nvp->nvp_mutex);
1655 		nv_cmn_err(CE_WARN, nvc, nvp,
1656 		    "abort request failed: port inactive");
1657 
1658 		return (SATA_FAILURE);
1659 	}
1660 
1661 	/*
1662 	 * spkt == NULL then abort all commands
1663 	 */
1664 	c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1665 
1666 	if (c_a) {
1667 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1668 		    "packets aborted running=%d", c_a);
1669 		ret = SATA_SUCCESS;
1670 	} else {
1671 		if (spkt == NULL) {
1672 			NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1673 		} else {
1674 			NVLOG(NVDBG_ENTRY, nvc, nvp,
1675 			    "can't find spkt to abort", NULL);
1676 		}
1677 		ret = SATA_FAILURE;
1678 	}
1679 
1680 	mutex_exit(&nvp->nvp_mutex);
1681 
1682 	return (ret);
1683 }
1684 
1685 
1686 /*
1687  * if spkt == NULL abort all pkts running, otherwise
1688  * abort the requested packet.  must be called with nv_mutex
1689  * held and returns with it held.  Not NCQ aware.
1690  */
1691 static int
1692 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason, int flag)
1693 {
1694 	int aborted = 0, i, reset_once = B_FALSE;
1695 	struct nv_slot *nv_slotp;
1696 	sata_pkt_t *spkt_slot;
1697 
1698 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1699 
1700 	/*
1701 	 * return if the port is not configured
1702 	 */
1703 	if (nvp->nvp_slot == NULL) {
1704 		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
1705 		    "nv_abort_active: not configured so returning", NULL);
1706 
1707 		return (0);
1708 	}
1709 
1710 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1711 
1712 	nvp->nvp_state |= NV_PORT_ABORTING;
1713 
1714 	for (i = 0; i < nvp->nvp_queue_depth; i++) {
1715 
1716 		nv_slotp = &(nvp->nvp_slot[i]);
1717 		spkt_slot = nv_slotp->nvslot_spkt;
1718 
1719 		/*
1720 		 * skip if not active command in slot
1721 		 */
1722 		if (spkt_slot == NULL) {
1723 			continue;
1724 		}
1725 
1726 		/*
1727 		 * if a specific packet was requested, skip if
1728 		 * this is not a match
1729 		 */
1730 		if ((spkt != NULL) && (spkt != spkt_slot)) {
1731 			continue;
1732 		}
1733 
1734 		/*
1735 		 * stop the hardware.  This could need reworking
1736 		 * when NCQ is enabled in the driver.
1737 		 */
1738 		if (reset_once == B_FALSE) {
1739 			ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1740 
1741 			/*
1742 			 * stop DMA engine
1743 			 */
1744 			nv_put8(bmhdl, nvp->nvp_bmicx,  0);
1745 
1746 			/*
1747 			 * Reset only if explicitly specified by the arg flag
1748 			 */
1749 			if (flag == B_TRUE) {
1750 				reset_once = B_TRUE;
1751 				nvp->nvp_state |= NV_PORT_RESET;
1752 				nvp->nvp_state &= ~(NV_PORT_RESTORE |
1753 				    NV_PORT_RESET_RETRY);
1754 				nv_reset(nvp, "abort_active");
1755 			}
1756 		}
1757 
1758 		spkt_slot->satapkt_reason = abort_reason;
1759 		nv_complete_io(nvp, spkt_slot, i);
1760 		aborted++;
1761 	}
1762 
1763 	nvp->nvp_state &= ~NV_PORT_ABORTING;
1764 
1765 	return (aborted);
1766 }
1767 
1768 
1769 /*
1770  * Called by sata module to reset a port, device, or the controller.
1771  */
1772 static int
1773 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1774 {
1775 	int cport = sd->satadev_addr.cport;
1776 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1777 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1778 	int ret = SATA_SUCCESS;
1779 
1780 	ASSERT(cport < NV_MAX_PORTS(nvc));
1781 
1782 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1783 
1784 	mutex_enter(&nvp->nvp_mutex);
1785 
1786 	switch (sd->satadev_addr.qual) {
1787 
1788 	case SATA_ADDR_CPORT:
1789 		/*FALLTHROUGH*/
1790 	case SATA_ADDR_DCPORT:
1791 		nvp->nvp_state |= NV_PORT_RESET;
1792 		nvp->nvp_state &= ~NV_PORT_RESTORE;
1793 		nv_reset(nvp, "sata_reset");
1794 		(void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1795 
1796 		break;
1797 	case SATA_ADDR_CNTRL:
1798 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1799 		    "nv_sata_reset: conroller reset not supported", NULL);
1800 
1801 		break;
1802 	case SATA_ADDR_PMPORT:
1803 	case SATA_ADDR_DPMPORT:
1804 		NVLOG(NVDBG_ENTRY, nvc, nvp,
1805 		    "nv_sata_reset: port multipliers not supported", NULL);
1806 		/*FALLTHROUGH*/
1807 	default:
1808 		/*
1809 		 * unsupported case
1810 		 */
1811 		ret = SATA_FAILURE;
1812 		break;
1813 	}
1814 
1815 	if (ret == SATA_SUCCESS) {
1816 		/*
1817 		 * If the port is inactive, do a quiet reset and don't attempt
1818 		 * to wait for reset completion or do any post reset processing
1819 		 */
1820 		if (nvp->nvp_state & NV_PORT_INACTIVE) {
1821 			nvp->nvp_state &= ~NV_PORT_RESET;
1822 			nvp->nvp_reset_time = 0;
1823 		}
1824 
1825 		/*
1826 		 * clear the port failed flag
1827 		 */
1828 		nvp->nvp_state &= ~NV_PORT_FAILED;
1829 	}
1830 
1831 	mutex_exit(&nvp->nvp_mutex);
1832 
1833 	return (ret);
1834 }
1835 
1836 
1837 /*
1838  * Sata entry point to handle port activation.  cfgadm -c connect
1839  */
1840 static int
1841 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1842 {
1843 	int cport = sd->satadev_addr.cport;
1844 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1845 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1846 
1847 	ASSERT(cport < NV_MAX_PORTS(nvc));
1848 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1849 
1850 	mutex_enter(&nvp->nvp_mutex);
1851 
1852 	sd->satadev_state = SATA_STATE_READY;
1853 
1854 	nv_copy_registers(nvp, sd, NULL);
1855 
1856 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1857 
1858 	nvp->nvp_state &= ~NV_PORT_INACTIVE;
1859 	/* Initiate link probing and device signature acquisition */
1860 	nvp->nvp_type = SATA_DTYPE_NONE;
1861 	nvp->nvp_signature = 0;
1862 	nvp->nvp_state |= NV_PORT_RESET; /* | NV_PORT_PROBE; */
1863 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
1864 	nv_reset(nvp, "sata_activate");
1865 
1866 	mutex_exit(&nvp->nvp_mutex);
1867 
1868 	return (SATA_SUCCESS);
1869 }
1870 
1871 
1872 /*
1873  * Sata entry point to handle port deactivation.  cfgadm -c disconnect
1874  */
1875 static int
1876 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1877 {
1878 	int cport = sd->satadev_addr.cport;
1879 	nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1880 	nv_port_t *nvp = &(nvc->nvc_port[cport]);
1881 
1882 	ASSERT(cport < NV_MAX_PORTS(nvc));
1883 	NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1884 
1885 	mutex_enter(&nvp->nvp_mutex);
1886 
1887 	(void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1888 
1889 	/*
1890 	 * make the device inaccessible
1891 	 */
1892 	nvp->nvp_state |= NV_PORT_INACTIVE;
1893 
1894 	/*
1895 	 * disable the interrupts on port
1896 	 */
1897 	(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1898 
1899 	sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1900 	nv_copy_registers(nvp, sd, NULL);
1901 
1902 	mutex_exit(&nvp->nvp_mutex);
1903 
1904 	return (SATA_SUCCESS);
1905 }
1906 
1907 
1908 /*
1909  * find an empty slot in the driver's queue, increment counters,
1910  * and then invoke the appropriate PIO or DMA start routine.
1911  */
1912 static int
1913 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
1914 {
1915 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
1916 	int on_bit = 0x01, slot, sactive, ret, ncq = 0;
1917 	uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
1918 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
1919 	nv_ctl_t *nvc = nvp->nvp_ctlp;
1920 	nv_slot_t *nv_slotp;
1921 	boolean_t dma_cmd;
1922 
1923 	NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common  entered: cmd: 0x%x",
1924 	    sata_cmdp->satacmd_cmd_reg);
1925 
1926 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
1927 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
1928 		nvp->nvp_ncq_run++;
1929 		/*
1930 		 * search for an empty NCQ slot.  by the time, it's already
1931 		 * been determined by the caller that there is room on the
1932 		 * queue.
1933 		 */
1934 		for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
1935 		    on_bit <<= 1) {
1936 			if ((nvp->nvp_sactive_cache & on_bit) == 0) {
1937 				break;
1938 			}
1939 		}
1940 
1941 		/*
1942 		 * the first empty slot found, should not exceed the queue
1943 		 * depth of the drive.  if it does it's an error.
1944 		 */
1945 		ASSERT(slot != nvp->nvp_queue_depth);
1946 
1947 		sactive = nv_get32(nvc->nvc_bar_hdl[5],
1948 		    nvp->nvp_sactive);
1949 		ASSERT((sactive & on_bit) == 0);
1950 		nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
1951 		NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
1952 		    on_bit);
1953 		nvp->nvp_sactive_cache |= on_bit;
1954 
1955 		ncq = NVSLOT_NCQ;
1956 
1957 	} else {
1958 		nvp->nvp_non_ncq_run++;
1959 		slot = 0;
1960 	}
1961 
1962 	nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
1963 
1964 	ASSERT(nv_slotp->nvslot_spkt == NULL);
1965 
1966 	nv_slotp->nvslot_spkt = spkt;
1967 	nv_slotp->nvslot_flags = ncq;
1968 
1969 	/*
1970 	 * the sata module doesn't indicate which commands utilize the
1971 	 * DMA engine, so find out using this switch table.
1972 	 */
1973 	switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
1974 	case SATAC_READ_DMA_EXT:
1975 	case SATAC_WRITE_DMA_EXT:
1976 	case SATAC_WRITE_DMA:
1977 	case SATAC_READ_DMA:
1978 	case SATAC_READ_DMA_QUEUED:
1979 	case SATAC_READ_DMA_QUEUED_EXT:
1980 	case SATAC_WRITE_DMA_QUEUED:
1981 	case SATAC_WRITE_DMA_QUEUED_EXT:
1982 	case SATAC_READ_FPDMA_QUEUED:
1983 	case SATAC_WRITE_FPDMA_QUEUED:
1984 	case SATAC_DSM:
1985 		dma_cmd = B_TRUE;
1986 		break;
1987 	default:
1988 		dma_cmd = B_FALSE;
1989 	}
1990 
1991 	if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
1992 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "DMA command", NULL);
1993 		nv_slotp->nvslot_start = nv_start_dma;
1994 		nv_slotp->nvslot_intr = nv_intr_dma;
1995 	} else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
1996 		NVLOG(NVDBG_DELIVER, nvc,  nvp, "packet command", NULL);
1997 		nv_slotp->nvslot_start = nv_start_pkt_pio;
1998 		nv_slotp->nvslot_intr = nv_intr_pkt_pio;
1999 		if ((direction == SATA_DIR_READ) ||
2000 		    (direction == SATA_DIR_WRITE)) {
2001 			nv_slotp->nvslot_byte_count =
2002 			    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2003 			nv_slotp->nvslot_v_addr =
2004 			    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2005 			/*
2006 			 * Freeing DMA resources allocated by the framework
2007 			 * now to avoid buffer overwrite (dma sync) problems
2008 			 * when the buffer is released at command completion.
2009 			 * Primarily an issue on systems with more than
2010 			 * 4GB of memory.
2011 			 */
2012 			sata_free_dma_resources(spkt);
2013 		}
2014 	} else if (direction == SATA_DIR_NODATA_XFER) {
2015 		NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2016 		nv_slotp->nvslot_start = nv_start_nodata;
2017 		nv_slotp->nvslot_intr = nv_intr_nodata;
2018 	} else if (direction == SATA_DIR_READ) {
2019 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2020 		nv_slotp->nvslot_start = nv_start_pio_in;
2021 		nv_slotp->nvslot_intr = nv_intr_pio_in;
2022 		nv_slotp->nvslot_byte_count =
2023 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2024 		nv_slotp->nvslot_v_addr =
2025 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2026 		/*
2027 		 * Freeing DMA resources allocated by the framework now to
2028 		 * avoid buffer overwrite (dma sync) problems when the buffer
2029 		 * is released at command completion.  This is not an issue
2030 		 * for write because write does not update the buffer.
2031 		 * Primarily an issue on systems with more than 4GB of memory.
2032 		 */
2033 		sata_free_dma_resources(spkt);
2034 	} else if (direction == SATA_DIR_WRITE) {
2035 		NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2036 		nv_slotp->nvslot_start = nv_start_pio_out;
2037 		nv_slotp->nvslot_intr = nv_intr_pio_out;
2038 		nv_slotp->nvslot_byte_count =
2039 		    spkt->satapkt_cmd.satacmd_bp->b_bcount;
2040 		nv_slotp->nvslot_v_addr =
2041 		    spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2042 	} else {
2043 		nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2044 		    " %d cookies %d cmd %x",
2045 		    sata_cmdp->satacmd_flags.sata_data_direction,
2046 		    sata_cmdp->satacmd_num_dma_cookies,  cmd);
2047 		spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2048 		ret = SATA_TRAN_CMD_UNSUPPORTED;
2049 
2050 		goto fail;
2051 	}
2052 
2053 	if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2054 	    SATA_TRAN_ACCEPTED) {
2055 #ifdef SGPIO_SUPPORT
2056 		nv_sgp_drive_active(nvp->nvp_ctlp,
2057 		    (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2058 #endif
2059 		nv_slotp->nvslot_stime = ddi_get_lbolt();
2060 
2061 		/*
2062 		 * start timer if it's not already running and this packet
2063 		 * is not requesting polled mode.
2064 		 */
2065 		if ((nvp->nvp_timeout_id == 0) &&
2066 		    ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2067 			nv_setup_timeout(nvp, NV_ONE_SEC);
2068 		}
2069 
2070 		nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2071 		nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2072 
2073 		return (SATA_TRAN_ACCEPTED);
2074 	}
2075 
2076 	fail:
2077 
2078 	spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2079 
2080 	if (ncq == NVSLOT_NCQ) {
2081 		nvp->nvp_ncq_run--;
2082 		nvp->nvp_sactive_cache &= ~on_bit;
2083 	} else {
2084 		nvp->nvp_non_ncq_run--;
2085 	}
2086 	nv_slotp->nvslot_spkt = NULL;
2087 	nv_slotp->nvslot_flags = 0;
2088 
2089 	return (ret);
2090 }
2091 
2092 
2093 /*
2094  * Check if the signature is ready and if non-zero translate
2095  * it into a solaris sata defined type.
2096  */
2097 static void
2098 nv_read_signature(nv_port_t *nvp)
2099 {
2100 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2101 	int retry_once = 0;
2102 
2103 	retry:
2104 	/*
2105 	 * Task file error register bit 0 set to 1 indicate that drive
2106 	 * is ready and have sent D2H FIS with a signature.
2107 	 */
2108 	if (nv_check_tfr_error != 0) {
2109 		uint8_t tfr_error = nv_get8(cmdhdl, nvp->nvp_error);
2110 		if (!(tfr_error & SATA_ERROR_ILI)) {
2111 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2112 			    "nv_read_signature: signature not ready", NULL);
2113 
2114 			return;
2115 		}
2116 	}
2117 
2118 	nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2119 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2120 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2121 	nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2122 
2123 	NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2124 	    "nv_read_signature: 0x%x ", nvp->nvp_signature);
2125 
2126 	switch (nvp->nvp_signature) {
2127 
2128 	case NV_SIG_DISK:
2129 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2130 		nvp->nvp_type = SATA_DTYPE_ATADISK;
2131 		break;
2132 	case NV_SIG_ATAPI:
2133 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2134 		    "drive is an optical device", NULL);
2135 		nvp->nvp_type = SATA_DTYPE_ATAPICD;
2136 		break;
2137 	case NV_SIG_PM:
2138 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2139 		    "device is a port multiplier", NULL);
2140 		nvp->nvp_type = SATA_DTYPE_PMULT;
2141 		break;
2142 	case NV_SIG_NOTREADY:
2143 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2144 		    "signature not ready", NULL);
2145 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2146 		break;
2147 	default:
2148 		if (retry_once++ == 0) {
2149 			/*
2150 			 * this is a rare corner case where the controller
2151 			 * was in the middle of updating the registers as the
2152 			 * driver is reading them.  If this happens, wait a
2153 			 * bit and retry, but just once.
2154 			 */
2155 			NV_DELAY_NSEC(1000000);
2156 
2157 			goto retry;
2158 		}
2159 
2160 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "signature %X not"
2161 		    " recognized", nvp->nvp_signature);
2162 		nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2163 		break;
2164 	}
2165 
2166 	if (nvp->nvp_signature) {
2167 		nvp->nvp_state &= ~(NV_PORT_RESET_RETRY | NV_PORT_RESET);
2168 	}
2169 
2170 #ifdef SGPIO_SUPPORT
2171 	if (nvp->nvp_signature == NV_SIG_DISK) {
2172 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2173 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2174 	} else {
2175 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2176 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2177 	}
2178 #endif
2179 }
2180 
2181 
2182 /*
2183  * Set up a new timeout or complete a timeout.
2184  * Timeout value has to be specified in microseconds. If time is zero, no new
2185  * timeout is scheduled.
2186  * Must be called at the end of the timeout routine.
2187  */
2188 static void
2189 nv_setup_timeout(nv_port_t *nvp, int time)
2190 {
2191 	clock_t old_duration = nvp->nvp_timeout_duration;
2192 
2193 	ASSERT(time != 0);
2194 
2195 	if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2196 		/*
2197 		 * Since we are dropping the mutex for untimeout,
2198 		 * the timeout may be executed while we are trying to
2199 		 * untimeout and setting up a new timeout.
2200 		 * If nvp_timeout_duration is 0, then this function
2201 		 * was re-entered. Just exit.
2202 		 */
2203 	cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2204 		return;
2205 	}
2206 	nvp->nvp_timeout_duration = 0;
2207 	if (nvp->nvp_timeout_id == 0) {
2208 		/* Start new timer */
2209 		nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2210 		    drv_usectohz(time));
2211 	} else {
2212 		/*
2213 		 * If the currently running timeout is due later than the
2214 		 * requested one, restart it with a new expiration.
2215 		 * Our timeouts do not need to be accurate - we would be just
2216 		 * checking that the specified time was exceeded.
2217 		 */
2218 		if (old_duration > time) {
2219 			mutex_exit(&nvp->nvp_mutex);
2220 			(void) untimeout(nvp->nvp_timeout_id);
2221 			mutex_enter(&nvp->nvp_mutex);
2222 			nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2223 			    drv_usectohz(time));
2224 		}
2225 	}
2226 	nvp->nvp_timeout_duration = time;
2227 }
2228 
2229 
2230 
2231 int nv_reset_length = NV_RESET_LENGTH;
2232 
2233 /*
2234  * Reset the port
2235  *
2236  * Entered with nvp mutex held
2237  */
2238 static void
2239 nv_reset(nv_port_t *nvp, char *reason)
2240 {
2241 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2242 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2243 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2244 	uint32_t sctrl, serr, sstatus;
2245 	uint8_t bmicx;
2246 	int i, j, reset = 0;
2247 
2248 	ASSERT(mutex_owned(&nvp->nvp_mutex));
2249 
2250 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2251 
2252 	/*
2253 	 * stop DMA engine.
2254 	 */
2255 	bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2256 	nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
2257 
2258 	nvp->nvp_state |= NV_PORT_RESET;
2259 	nvp->nvp_reset_time = ddi_get_lbolt();
2260 	nvp->nvp_reset_count++;
2261 
2262 	if (strcmp(reason, "attach") != 0) {
2263 		nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x",
2264 		    reason, serr);
2265 		/*
2266 		 * keep a record of why the first reset occurred, for debugging
2267 		 */
2268 		if (nvp->nvp_first_reset_reason[0] == '\0') {
2269 			(void) strncpy(nvp->nvp_first_reset_reason,
2270 			    reason, NV_REASON_LEN);
2271 			nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2272 		}
2273 	}
2274 
2275 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset_count: %d",
2276 	    nvp->nvp_reset_count);
2277 
2278 	(void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2279 
2280 	/*
2281 	 * ensure there is terminating NULL
2282 	 */
2283 	nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2284 
2285 	/*
2286 	 * Issue hardware reset; retry if necessary.
2287 	 */
2288 	for (i = 0; i < NV_RESET_ATTEMPTS; i++) {
2289 		/*
2290 		 * Clear signature registers
2291 		 */
2292 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
2293 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2294 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2295 		nv_put8(cmdhdl, nvp->nvp_count, 0);
2296 
2297 		/* Clear task file error register */
2298 		nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2299 
2300 		/*
2301 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
2302 		 */
2303 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2304 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2305 		    sctrl | SCONTROL_DET_COMRESET);
2306 
2307 		/* Wait at least 1ms, as required by the spec */
2308 		drv_usecwait(nv_reset_length);
2309 
2310 		/* Reset all accumulated error bits */
2311 		nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2312 
2313 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2314 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2315 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2316 		    "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2317 
2318 		/* de-assert reset in PHY */
2319 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
2320 		    sctrl & ~SCONTROL_DET_COMRESET);
2321 
2322 		/*
2323 		 * Wait up to 10ms for COMINIT to arrive, indicating that
2324 		 * the device recognized COMRESET.
2325 		 */
2326 		for (j = 0; j < 10; j++) {
2327 			drv_usecwait(NV_ONE_MSEC);
2328 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2329 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2330 			    (SSTATUS_GET_DET(sstatus) ==
2331 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2332 				reset = 1;
2333 				break;
2334 			}
2335 		}
2336 		if (reset == 1)
2337 			break;
2338 	}
2339 	serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2340 	if (reset == 0) {
2341 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2342 		    "(serr 0x%x) after %d attempts", serr, i);
2343 	} else {
2344 		NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded (serr 0x%x)"
2345 		    "after %dms", serr, TICK_TO_MSEC(ddi_get_lbolt() -
2346 		    nvp->nvp_reset_time));
2347 	}
2348 	nvp->nvp_reset_time = ddi_get_lbolt();
2349 
2350 	if (servicing_interrupt()) {
2351 		nv_setup_timeout(nvp, NV_ONE_MSEC);
2352 	} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
2353 		nv_monitor_reset(nvp);
2354 	}
2355 }
2356 
2357 
2358 /*
2359  * Initialize register handling specific to mcp51/mcp55
2360  */
2361 /* ARGSUSED */
2362 static void
2363 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2364 {
2365 	nv_port_t *nvp;
2366 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2367 	uint8_t off, port;
2368 
2369 	nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2370 	nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2371 
2372 	for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2373 		nvp = &(nvc->nvc_port[port]);
2374 		nvp->nvp_mcp5x_int_status =
2375 		    (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2376 		nvp->nvp_mcp5x_int_ctl =
2377 		    (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2378 
2379 		/*
2380 		 * clear any previous interrupts asserted
2381 		 */
2382 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2383 		    MCP5X_INT_CLEAR);
2384 
2385 		/*
2386 		 * These are the interrupts to accept for now.  The spec
2387 		 * says these are enable bits, but nvidia has indicated
2388 		 * these are masking bits.  Even though they may be masked
2389 		 * out to prevent asserting the main interrupt, they can
2390 		 * still be asserted while reading the interrupt status
2391 		 * register, so that needs to be considered in the interrupt
2392 		 * handler.
2393 		 */
2394 		nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2395 		    ~(MCP5X_INT_IGNORE));
2396 	}
2397 
2398 	/*
2399 	 * Allow the driver to program the BM on the first command instead
2400 	 * of waiting for an interrupt.
2401 	 */
2402 #ifdef NCQ
2403 	flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2404 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2405 	flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2406 	nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2407 #endif
2408 
2409 	/*
2410 	 * mcp55 rev A03 and above supports 40-bit physical addressing.
2411 	 * Enable DMA to take advantage of that.
2412 	 *
2413 	 */
2414 	if (nvc->nvc_revid >= 0xa3) {
2415 		if (nv_sata_40bit_dma == B_TRUE) {
2416 			uint32_t reg32;
2417 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2418 			    "rev id is %X.  40-bit DMA addressing"
2419 			    " enabled", nvc->nvc_revid);
2420 			nvc->dma_40bit = B_TRUE;
2421 
2422 			reg32 = pci_config_get32(pci_conf_handle,
2423 			    NV_SATA_CFG_20);
2424 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2425 			    reg32 | NV_40BIT_PRD);
2426 
2427 			/*
2428 			 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2429 			 * bits) for the primary PRD table, and bits 8-15
2430 			 * contain the top 8 bits for the secondary.  Set
2431 			 * to zero because the DMA attribute table for PRD
2432 			 * allocation forces it into 32 bit address space
2433 			 * anyway.
2434 			 */
2435 			reg32 = pci_config_get32(pci_conf_handle,
2436 			    NV_SATA_CFG_23);
2437 			pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2438 			    reg32 & 0xffff0000);
2439 		} else {
2440 			NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2441 			    "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2442 		}
2443 	} else {
2444 		nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "rev id is %X and is "
2445 		    "not capable of 40-bit DMA addressing", nvc->nvc_revid);
2446 	}
2447 }
2448 
2449 
2450 /*
2451  * Initialize register handling specific to ck804
2452  */
2453 static void
2454 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2455 {
2456 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2457 	uint32_t reg32;
2458 	uint16_t reg16;
2459 	nv_port_t *nvp;
2460 	int j;
2461 
2462 	/*
2463 	 * delay hotplug interrupts until PHYRDY.
2464 	 */
2465 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2466 	pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2467 	    reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2468 
2469 	/*
2470 	 * enable hot plug interrupts for channel x and y
2471 	 */
2472 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2473 	    (uint16_t *)(bar5 + NV_ADMACTL_X));
2474 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2475 	    NV_HIRQ_EN | reg16);
2476 
2477 
2478 	reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2479 	    (uint16_t *)(bar5 + NV_ADMACTL_Y));
2480 	nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2481 	    NV_HIRQ_EN | reg16);
2482 
2483 	nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2484 
2485 	/*
2486 	 * clear any existing interrupt pending then enable
2487 	 */
2488 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2489 		nvp = &(nvc->nvc_port[j]);
2490 		mutex_enter(&nvp->nvp_mutex);
2491 		(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2492 		    NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2493 		mutex_exit(&nvp->nvp_mutex);
2494 	}
2495 }
2496 
2497 
2498 /*
2499  * Initialize the controller and set up driver data structures.
2500  * determine if ck804 or mcp5x class.
2501  */
2502 static int
2503 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2504 {
2505 	struct sata_hba_tran stran;
2506 	nv_port_t *nvp;
2507 	int j, ck804;
2508 	uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2509 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2510 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
2511 	uint32_t reg32;
2512 	uint8_t reg8, reg8_save;
2513 
2514 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2515 
2516 	ck804 = B_TRUE;
2517 #ifdef SGPIO_SUPPORT
2518 	nvc->nvc_mcp5x_flag = B_FALSE;
2519 #endif
2520 
2521 	/*
2522 	 * Need to set bit 2 to 1 at config offset 0x50
2523 	 * to enable access to the bar5 registers.
2524 	 */
2525 	reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2526 	if (!(reg32 & NV_BAR5_SPACE_EN)) {
2527 		pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2528 		    reg32 | NV_BAR5_SPACE_EN);
2529 	}
2530 
2531 	/*
2532 	 * Determine if this is ck804 or mcp5x.  ck804 will map in the
2533 	 * task file registers into bar5 while mcp5x won't.  The offset of
2534 	 * the task file registers in mcp5x's space is unused, so it will
2535 	 * return zero.  So check one of the task file registers to see if it is
2536 	 * writable and reads back what was written.  If it's mcp5x it will
2537 	 * return back 0xff whereas ck804 will return the value written.
2538 	 */
2539 	reg8_save = nv_get8(bar5_hdl,
2540 	    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2541 
2542 
2543 	for (j = 1; j < 3; j++) {
2544 
2545 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2546 		reg8 = nv_get8(bar5_hdl,
2547 		    (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2548 
2549 		if (reg8 != j) {
2550 			ck804 = B_FALSE;
2551 			nvc->nvc_mcp5x_flag = B_TRUE;
2552 			break;
2553 		}
2554 	}
2555 
2556 	nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2557 
2558 	if (ck804 == B_TRUE) {
2559 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804", NULL);
2560 		nvc->nvc_interrupt = ck804_intr;
2561 		nvc->nvc_reg_init = ck804_reg_init;
2562 		nvc->nvc_set_intr = ck804_set_intr;
2563 	} else {
2564 		NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55", NULL);
2565 		nvc->nvc_interrupt = mcp5x_intr;
2566 		nvc->nvc_reg_init = mcp5x_reg_init;
2567 		nvc->nvc_set_intr = mcp5x_set_intr;
2568 	}
2569 
2570 
2571 	stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2572 	stran.sata_tran_hba_dip = nvc->nvc_dip;
2573 	stran.sata_tran_hba_num_cports = NV_NUM_CPORTS;
2574 	stran.sata_tran_hba_features_support =
2575 	    SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2576 	stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2577 	stran.sata_tran_probe_port = nv_sata_probe;
2578 	stran.sata_tran_start = nv_sata_start;
2579 	stran.sata_tran_abort = nv_sata_abort;
2580 	stran.sata_tran_reset_dport = nv_sata_reset;
2581 	stran.sata_tran_selftest = NULL;
2582 	stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2583 	stran.sata_tran_pwrmgt_ops = NULL;
2584 	stran.sata_tran_ioctl = NULL;
2585 	nvc->nvc_sata_hba_tran = stran;
2586 
2587 	nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2588 	    KM_SLEEP);
2589 
2590 	/*
2591 	 * initialize registers common to all chipsets
2592 	 */
2593 	nv_common_reg_init(nvc);
2594 
2595 	for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2596 		nvp = &(nvc->nvc_port[j]);
2597 
2598 		cmd_addr = nvp->nvp_cmd_addr;
2599 		ctl_addr = nvp->nvp_ctl_addr;
2600 		bm_addr = nvp->nvp_bm_addr;
2601 
2602 		mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2603 		    DDI_INTR_PRI(nvc->nvc_intr_pri));
2604 
2605 		cv_init(&nvp->nvp_poll_cv, NULL, CV_DRIVER, NULL);
2606 
2607 		nvp->nvp_data	= cmd_addr + NV_DATA;
2608 		nvp->nvp_error	= cmd_addr + NV_ERROR;
2609 		nvp->nvp_feature = cmd_addr + NV_FEATURE;
2610 		nvp->nvp_count	= cmd_addr + NV_COUNT;
2611 		nvp->nvp_sect	= cmd_addr + NV_SECT;
2612 		nvp->nvp_lcyl	= cmd_addr + NV_LCYL;
2613 		nvp->nvp_hcyl	= cmd_addr + NV_HCYL;
2614 		nvp->nvp_drvhd	= cmd_addr + NV_DRVHD;
2615 		nvp->nvp_status	= cmd_addr + NV_STATUS;
2616 		nvp->nvp_cmd	= cmd_addr + NV_CMD;
2617 		nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2618 		nvp->nvp_devctl	= ctl_addr + NV_DEVCTL;
2619 
2620 		nvp->nvp_bmicx	= bm_addr + BMICX_REG;
2621 		nvp->nvp_bmisx	= bm_addr + BMISX_REG;
2622 		nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2623 
2624 		nvp->nvp_state = 0;
2625 
2626 		/*
2627 		 * Initialize dma handles, etc.
2628 		 * If it fails, the port is in inactive state.
2629 		 */
2630 		(void) nv_init_port(nvp);
2631 	}
2632 
2633 	/*
2634 	 * initialize register by calling chip specific reg initialization
2635 	 */
2636 	(*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2637 
2638 	/* initialize the hba dma attribute */
2639 	if (nvc->dma_40bit == B_TRUE)
2640 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2641 		    &buffer_dma_40bit_attr;
2642 	else
2643 		nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2644 		    &buffer_dma_attr;
2645 
2646 	return (NV_SUCCESS);
2647 }
2648 
2649 
2650 /*
2651  * Initialize data structures with enough slots to handle queuing, if
2652  * enabled.  NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2653  * NCQ support is built into the driver and enabled.  It might have been
2654  * better to derive the true size from the drive itself, but the sata
2655  * module only sends down that information on the first NCQ command,
2656  * which means possibly re-sizing the structures on an interrupt stack,
2657  * making error handling more messy.  The easy way is to just allocate
2658  * all 32 slots, which is what most drives support anyway.
2659  */
2660 static int
2661 nv_init_port(nv_port_t *nvp)
2662 {
2663 	nv_ctl_t *nvc = nvp->nvp_ctlp;
2664 	size_t	prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2665 	dev_info_t *dip = nvc->nvc_dip;
2666 	ddi_device_acc_attr_t dev_attr;
2667 	size_t buf_size;
2668 	ddi_dma_cookie_t cookie;
2669 	uint_t count;
2670 	int rc, i;
2671 
2672 	dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2673 	dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2674 	dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2675 
2676 	if (nvp->nvp_state & NV_PORT_INIT) {
2677 		NVLOG(NVDBG_INIT, nvc, nvp,
2678 		    "nv_init_port previously initialized", NULL);
2679 
2680 		return (NV_SUCCESS);
2681 	} else {
2682 		NVLOG(NVDBG_INIT, nvc, nvp, "nv_init_port initializing", NULL);
2683 	}
2684 
2685 	nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2686 	    NV_QUEUE_SLOTS, KM_SLEEP);
2687 
2688 	nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2689 	    NV_QUEUE_SLOTS, KM_SLEEP);
2690 
2691 	nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2692 	    NV_QUEUE_SLOTS, KM_SLEEP);
2693 
2694 	nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2695 	    NV_QUEUE_SLOTS, KM_SLEEP);
2696 
2697 	nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2698 	    KM_SLEEP);
2699 
2700 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2701 
2702 		rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2703 		    DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2704 
2705 		if (rc != DDI_SUCCESS) {
2706 			nv_uninit_port(nvp);
2707 
2708 			return (NV_FAILURE);
2709 		}
2710 
2711 		rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2712 		    &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2713 		    NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2714 		    &(nvp->nvp_sg_acc_hdl[i]));
2715 
2716 		if (rc != DDI_SUCCESS) {
2717 			nv_uninit_port(nvp);
2718 
2719 			return (NV_FAILURE);
2720 		}
2721 
2722 		rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2723 		    nvp->nvp_sg_addr[i], buf_size,
2724 		    DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2725 		    DDI_DMA_SLEEP, NULL, &cookie, &count);
2726 
2727 		if (rc != DDI_DMA_MAPPED) {
2728 			nv_uninit_port(nvp);
2729 
2730 			return (NV_FAILURE);
2731 		}
2732 
2733 		ASSERT(count == 1);
2734 		ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2735 
2736 		ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2737 
2738 		nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2739 	}
2740 
2741 	/*
2742 	 * nvp_queue_depth represents the actual drive queue depth, not the
2743 	 * number of slots allocated in the structures (which may be more).
2744 	 * Actual queue depth is only learned after the first NCQ command, so
2745 	 * initialize it to 1 for now.
2746 	 */
2747 	nvp->nvp_queue_depth = 1;
2748 
2749 	/*
2750 	 * Port is initialized whether the device is attached or not.
2751 	 * Link processing and device identification will be started later,
2752 	 * after interrupts are initialized.
2753 	 */
2754 	nvp->nvp_type = SATA_DTYPE_NONE;
2755 	nvp->nvp_signature = 0;
2756 
2757 	nvp->nvp_state |= NV_PORT_INIT;
2758 
2759 	return (NV_SUCCESS);
2760 }
2761 
2762 
2763 /*
2764  * Establish initial link & device type
2765  * Called only from nv_attach
2766  * Loops up to approximately 210ms; can exit earlier.
2767  * The time includes wait for the link up and completion of the initial
2768  * signature gathering operation.
2769  */
2770 static void
2771 nv_init_port_link_processing(nv_ctl_t *nvc)
2772 {
2773 	ddi_acc_handle_t bar5_hdl;
2774 	nv_port_t *nvp;
2775 	volatile uint32_t sstatus;
2776 	int port, links_up, ready_ports, i;
2777 
2778 
2779 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2780 		nvp = &(nvc->nvc_port[port]);
2781 		if (nvp != NULL && (nvp->nvp_state & NV_PORT_INIT)) {
2782 			/*
2783 			 * Initiate device identification, if any is attached
2784 			 * and reset was not already applied by hot-plug
2785 			 * event processing.
2786 			 */
2787 			mutex_enter(&nvp->nvp_mutex);
2788 			if (!(nvp->nvp_state & NV_PORT_RESET)) {
2789 				nvp->nvp_state |= NV_PORT_RESET | NV_PORT_PROBE;
2790 				nv_reset(nvp, "attach");
2791 			}
2792 			mutex_exit(&nvp->nvp_mutex);
2793 		}
2794 	}
2795 	/*
2796 	 * Wait up to 10ms for links up.
2797 	 * Spec says that link should be up in 1ms.
2798 	 */
2799 	for (i = 0; i < 10; i++) {
2800 		drv_usecwait(NV_ONE_MSEC);
2801 		links_up = 0;
2802 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2803 			nvp = &(nvc->nvc_port[port]);
2804 			mutex_enter(&nvp->nvp_mutex);
2805 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2806 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2807 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2808 			    (SSTATUS_GET_DET(sstatus) ==
2809 			    SSTATUS_DET_DEVPRE_PHYCOM)) {
2810 				if ((nvp->nvp_state & NV_PORT_RESET) &&
2811 				    nvp->nvp_type == SATA_DTYPE_NONE) {
2812 					nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2813 				}
2814 				NVLOG(NVDBG_INIT, nvc, nvp,
2815 				    "nv_init_port_link_processing()"
2816 				    "link up; time from reset %dms",
2817 				    TICK_TO_MSEC(ddi_get_lbolt() -
2818 				    nvp->nvp_reset_time));
2819 				links_up++;
2820 			}
2821 			mutex_exit(&nvp->nvp_mutex);
2822 		}
2823 		if (links_up == NV_MAX_PORTS(nvc)) {
2824 			break;
2825 		}
2826 	}
2827 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2828 	    "%d links up", links_up);
2829 	/*
2830 	 * At this point, if any device is attached, the link is established.
2831 	 * Wait till devices are ready to be accessed, no more than 200ms.
2832 	 * 200ms is empirical time in which a signature should be available.
2833 	 */
2834 	for (i = 0; i < 200; i++) {
2835 		ready_ports = 0;
2836 		for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2837 			nvp = &(nvc->nvc_port[port]);
2838 			mutex_enter(&nvp->nvp_mutex);
2839 			bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2840 			sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2841 			if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2842 			    (SSTATUS_GET_DET(sstatus) ==
2843 			    SSTATUS_DET_DEVPRE_PHYCOM) &&
2844 			    !(nvp->nvp_state & (NV_PORT_RESET |
2845 			    NV_PORT_RESET_RETRY))) {
2846 				/*
2847 				 * Reset already processed
2848 				 */
2849 				NVLOG(NVDBG_RESET, nvc, nvp,
2850 				    "nv_init_port_link_processing()"
2851 				    "device ready; port state %x; "
2852 				    "time from reset %dms", nvp->nvp_state,
2853 				    TICK_TO_MSEC(ddi_get_lbolt() -
2854 				    nvp->nvp_reset_time));
2855 
2856 				ready_ports++;
2857 			}
2858 			mutex_exit(&nvp->nvp_mutex);
2859 		}
2860 		if (ready_ports == links_up) {
2861 			break;
2862 		}
2863 		drv_usecwait(NV_ONE_MSEC);
2864 	}
2865 	NVLOG(NVDBG_RESET, nvc, nvp, "nv_init_port_link_processing():"
2866 	    "%d devices ready", ready_ports);
2867 }
2868 
2869 /*
2870  * Free dynamically allocated structures for port.
2871  */
2872 static void
2873 nv_uninit_port(nv_port_t *nvp)
2874 {
2875 	int i;
2876 
2877 	/*
2878 	 * It is possible to reach here before a port has been initialized or
2879 	 * after it has already been uninitialized.  Just return in that case.
2880 	 */
2881 	if (nvp->nvp_slot == NULL) {
2882 
2883 		return;
2884 	}
2885 	/*
2886 	 * Mark port unusable now.
2887 	 */
2888 	nvp->nvp_state &= ~NV_PORT_INIT;
2889 
2890 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2891 	    "nv_uninit_port uninitializing", NULL);
2892 
2893 #ifdef SGPIO_SUPPORT
2894 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2895 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2896 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2897 	}
2898 #endif
2899 
2900 	nvp->nvp_type = SATA_DTYPE_NONE;
2901 
2902 	for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2903 		if (nvp->nvp_sg_paddr[i]) {
2904 			(void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2905 		}
2906 
2907 		if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2908 			ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2909 		}
2910 
2911 		if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2912 			ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2913 		}
2914 	}
2915 
2916 	kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2917 	nvp->nvp_slot = NULL;
2918 
2919 	kmem_free(nvp->nvp_sg_dma_hdl,
2920 	    sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2921 	nvp->nvp_sg_dma_hdl = NULL;
2922 
2923 	kmem_free(nvp->nvp_sg_acc_hdl,
2924 	    sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2925 	nvp->nvp_sg_acc_hdl = NULL;
2926 
2927 	kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2928 	nvp->nvp_sg_addr = NULL;
2929 
2930 	kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2931 	nvp->nvp_sg_paddr = NULL;
2932 }
2933 
2934 
2935 /*
2936  * Cache register offsets and access handles to frequently accessed registers
2937  * which are common to either chipset.
2938  */
2939 static void
2940 nv_common_reg_init(nv_ctl_t *nvc)
2941 {
2942 	uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2943 	uchar_t *bm_addr_offset, *sreg_offset;
2944 	uint8_t bar, port;
2945 	nv_port_t *nvp;
2946 
2947 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2948 		if (port == 0) {
2949 			bar = NV_BAR_0;
2950 			bm_addr_offset = 0;
2951 			sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2952 		} else {
2953 			bar = NV_BAR_2;
2954 			bm_addr_offset = (uchar_t *)8;
2955 			sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2956 		}
2957 
2958 		nvp = &(nvc->nvc_port[port]);
2959 		nvp->nvp_ctlp = nvc;
2960 		nvp->nvp_port_num = port;
2961 		NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2962 
2963 		nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2964 		nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2965 		nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2966 		nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2967 		nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2968 		nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2969 		    (long)bm_addr_offset;
2970 
2971 		nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2972 		nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2973 		nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2974 		nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2975 	}
2976 }
2977 
2978 
2979 static void
2980 nv_uninit_ctl(nv_ctl_t *nvc)
2981 {
2982 	int port;
2983 	nv_port_t *nvp;
2984 
2985 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2986 
2987 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2988 		nvp = &(nvc->nvc_port[port]);
2989 		mutex_enter(&nvp->nvp_mutex);
2990 		NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
2991 		nv_uninit_port(nvp);
2992 		mutex_exit(&nvp->nvp_mutex);
2993 		mutex_destroy(&nvp->nvp_mutex);
2994 		cv_destroy(&nvp->nvp_poll_cv);
2995 	}
2996 
2997 	kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
2998 	nvc->nvc_port = NULL;
2999 }
3000 
3001 
3002 /*
3003  * ck804 interrupt.  This is a wrapper around ck804_intr_process so
3004  * that interrupts from other devices can be disregarded while dtracing.
3005  */
3006 /* ARGSUSED */
3007 static uint_t
3008 ck804_intr(caddr_t arg1, caddr_t arg2)
3009 {
3010 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3011 	uint8_t intr_status;
3012 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3013 
3014 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3015 		return (DDI_INTR_UNCLAIMED);
3016 
3017 	intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3018 
3019 	if (intr_status == 0) {
3020 
3021 		return (DDI_INTR_UNCLAIMED);
3022 	}
3023 
3024 	ck804_intr_process(nvc, intr_status);
3025 
3026 	return (DDI_INTR_CLAIMED);
3027 }
3028 
3029 
3030 /*
3031  * Main interrupt handler for ck804.  handles normal device
3032  * interrupts as well as port hot plug and remove interrupts.
3033  *
3034  */
3035 static void
3036 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3037 {
3038 
3039 	int port, i;
3040 	nv_port_t *nvp;
3041 	nv_slot_t *nv_slotp;
3042 	uchar_t	status;
3043 	sata_pkt_t *spkt;
3044 	uint8_t bmstatus, clear_bits;
3045 	ddi_acc_handle_t bmhdl;
3046 	int nvcleared = 0;
3047 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3048 	uint32_t sstatus;
3049 	int port_mask_hot[] = {
3050 		CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3051 	};
3052 	int port_mask_pm[] = {
3053 		CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3054 	};
3055 
3056 	NVLOG(NVDBG_INTR, nvc, NULL,
3057 	    "ck804_intr_process entered intr_status=%x", intr_status);
3058 
3059 	/*
3060 	 * For command completion interrupt, explicit clear is not required.
3061 	 * however, for the error cases explicit clear is performed.
3062 	 */
3063 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3064 
3065 		int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3066 
3067 		if ((port_mask[port] & intr_status) == 0) {
3068 			continue;
3069 		}
3070 
3071 		NVLOG(NVDBG_INTR, nvc, NULL,
3072 		    "ck804_intr_process interrupt on port %d", port);
3073 
3074 		nvp = &(nvc->nvc_port[port]);
3075 
3076 		mutex_enter(&nvp->nvp_mutex);
3077 
3078 		/*
3079 		 * there was a corner case found where an interrupt
3080 		 * arrived before nvp_slot was set.  Should
3081 		 * probably should track down why that happens and try
3082 		 * to eliminate that source and then get rid of this
3083 		 * check.
3084 		 */
3085 		if (nvp->nvp_slot == NULL) {
3086 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3087 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3088 			    "received before initialization "
3089 			    "completed status=%x", status);
3090 			mutex_exit(&nvp->nvp_mutex);
3091 
3092 			/*
3093 			 * clear interrupt bits
3094 			 */
3095 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3096 			    port_mask[port]);
3097 
3098 			continue;
3099 		}
3100 
3101 		if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL)  {
3102 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3103 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3104 			    " no command in progress status=%x", status);
3105 			mutex_exit(&nvp->nvp_mutex);
3106 
3107 			/*
3108 			 * clear interrupt bits
3109 			 */
3110 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3111 			    port_mask[port]);
3112 
3113 			continue;
3114 		}
3115 
3116 		bmhdl = nvp->nvp_bm_hdl;
3117 		bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3118 
3119 		if (!(bmstatus & BMISX_IDEINTS)) {
3120 			mutex_exit(&nvp->nvp_mutex);
3121 
3122 			continue;
3123 		}
3124 
3125 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3126 
3127 		if (status & SATA_STATUS_BSY) {
3128 			mutex_exit(&nvp->nvp_mutex);
3129 
3130 			continue;
3131 		}
3132 
3133 		nv_slotp = &(nvp->nvp_slot[0]);
3134 
3135 		ASSERT(nv_slotp);
3136 
3137 		spkt = nv_slotp->nvslot_spkt;
3138 
3139 		if (spkt == NULL) {
3140 			mutex_exit(&nvp->nvp_mutex);
3141 
3142 			continue;
3143 		}
3144 
3145 		(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3146 
3147 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3148 
3149 		if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3150 
3151 			nv_complete_io(nvp, spkt, 0);
3152 		}
3153 
3154 		mutex_exit(&nvp->nvp_mutex);
3155 	}
3156 
3157 	/*
3158 	 * ck804 often doesn't correctly distinguish hot add/remove
3159 	 * interrupts.  Frequently both the ADD and the REMOVE bits
3160 	 * are asserted, whether it was a remove or add.  Use sstatus
3161 	 * to distinguish hot add from hot remove.
3162 	 */
3163 
3164 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3165 		clear_bits = 0;
3166 
3167 		nvp = &(nvc->nvc_port[port]);
3168 		mutex_enter(&nvp->nvp_mutex);
3169 
3170 		if ((port_mask_pm[port] & intr_status) != 0) {
3171 			clear_bits = port_mask_pm[port];
3172 			NVLOG(NVDBG_HOT, nvc, nvp,
3173 			    "clearing PM interrupt bit: %x",
3174 			    intr_status & port_mask_pm[port]);
3175 		}
3176 
3177 		if ((port_mask_hot[port] & intr_status) == 0) {
3178 			if (clear_bits != 0) {
3179 				goto clear;
3180 			} else {
3181 				mutex_exit(&nvp->nvp_mutex);
3182 				continue;
3183 			}
3184 		}
3185 
3186 		/*
3187 		 * reaching here means there was a hot add or remove.
3188 		 */
3189 		clear_bits |= port_mask_hot[port];
3190 
3191 		ASSERT(nvc->nvc_port[port].nvp_sstatus);
3192 
3193 		sstatus = nv_get32(bar5_hdl,
3194 		    nvc->nvc_port[port].nvp_sstatus);
3195 
3196 		if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3197 		    SSTATUS_DET_DEVPRE_PHYCOM) {
3198 			nv_report_add_remove(nvp, 0);
3199 		} else {
3200 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3201 		}
3202 	clear:
3203 		/*
3204 		 * clear interrupt bits.  explicit interrupt clear is
3205 		 * required for hotplug interrupts.
3206 		 */
3207 		nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3208 
3209 		/*
3210 		 * make sure it's flushed and cleared.  If not try
3211 		 * again.  Sometimes it has been observed to not clear
3212 		 * on the first try.
3213 		 */
3214 		intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3215 
3216 		/*
3217 		 * make 10 additional attempts to clear the interrupt
3218 		 */
3219 		for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3220 			NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3221 			    "still not clear try=%d", intr_status,
3222 			    ++nvcleared);
3223 			nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3224 			    clear_bits);
3225 			intr_status = nv_get8(bar5_hdl,
3226 			    nvc->nvc_ck804_int_status);
3227 		}
3228 
3229 		/*
3230 		 * if still not clear, log a message and disable the
3231 		 * port. highly unlikely that this path is taken, but it
3232 		 * gives protection against a wedged interrupt.
3233 		 */
3234 		if (intr_status & clear_bits) {
3235 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3236 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3237 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3238 			nvp->nvp_state |= NV_PORT_FAILED;
3239 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3240 			    B_TRUE);
3241 			nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3242 			    "interrupt.  disabling port intr_status=%X",
3243 			    intr_status);
3244 		}
3245 
3246 		mutex_exit(&nvp->nvp_mutex);
3247 	}
3248 }
3249 
3250 
3251 /*
3252  * Interrupt handler for mcp5x.  It is invoked by the wrapper for each port
3253  * on the controller, to handle completion and hot plug and remove events.
3254  *
3255  */
3256 static uint_t
3257 mcp5x_intr_port(nv_port_t *nvp)
3258 {
3259 	nv_ctl_t *nvc = nvp->nvp_ctlp;
3260 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3261 	uint8_t clear = 0, intr_cycles = 0;
3262 	int ret = DDI_INTR_UNCLAIMED;
3263 	uint16_t int_status;
3264 	clock_t intr_time;
3265 	int loop_cnt = 0;
3266 
3267 	nvp->intr_start_time = ddi_get_lbolt();
3268 
3269 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3270 
3271 	do {
3272 		/*
3273 		 * read current interrupt status
3274 		 */
3275 		int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3276 
3277 		NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3278 
3279 		/*
3280 		 * MCP5X_INT_IGNORE interrupts will show up in the status,
3281 		 * but are masked out from causing an interrupt to be generated
3282 		 * to the processor.  Ignore them here by masking them out.
3283 		 */
3284 		int_status &= ~(MCP5X_INT_IGNORE);
3285 
3286 		/*
3287 		 * exit the loop when no more interrupts to process
3288 		 */
3289 		if (int_status == 0) {
3290 
3291 			break;
3292 		}
3293 
3294 		if (int_status & MCP5X_INT_COMPLETE) {
3295 			NVLOG(NVDBG_INTR, nvc, nvp,
3296 			    "mcp5x_packet_complete_intr", NULL);
3297 			/*
3298 			 * since int_status was set, return DDI_INTR_CLAIMED
3299 			 * from the DDI's perspective even though the packet
3300 			 * completion may not have succeeded.  If it fails,
3301 			 * need to manually clear the interrupt, otherwise
3302 			 * clearing is implicit.
3303 			 */
3304 			ret = DDI_INTR_CLAIMED;
3305 			if (mcp5x_packet_complete_intr(nvc, nvp) ==
3306 			    NV_FAILURE) {
3307 				clear |= MCP5X_INT_COMPLETE;
3308 			} else {
3309 				intr_cycles = 0;
3310 			}
3311 		}
3312 
3313 		if (int_status & MCP5X_INT_DMA_SETUP) {
3314 			NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3315 			    NULL);
3316 
3317 			/*
3318 			 * Needs to be cleared before starting the BM, so do it
3319 			 * now.  make sure this is still working.
3320 			 */
3321 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3322 			    MCP5X_INT_DMA_SETUP);
3323 #ifdef NCQ
3324 			ret = mcp5x_dma_setup_intr(nvc, nvp);
3325 #endif
3326 		}
3327 
3328 		if (int_status & MCP5X_INT_REM) {
3329 			clear |= MCP5X_INT_REM;
3330 			ret = DDI_INTR_CLAIMED;
3331 
3332 			mutex_enter(&nvp->nvp_mutex);
3333 			nv_report_add_remove(nvp, NV_PORT_HOTREMOVED);
3334 			mutex_exit(&nvp->nvp_mutex);
3335 
3336 		} else if (int_status & MCP5X_INT_ADD) {
3337 			clear |= MCP5X_INT_ADD;
3338 			ret = DDI_INTR_CLAIMED;
3339 
3340 			mutex_enter(&nvp->nvp_mutex);
3341 			nv_report_add_remove(nvp, 0);
3342 			mutex_exit(&nvp->nvp_mutex);
3343 		}
3344 		if (clear) {
3345 			nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3346 			clear = 0;
3347 		}
3348 		/* Protect against a stuck interrupt */
3349 		if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3350 			nv_cmn_err(CE_WARN, nvc, nvp, "excessive interrupt "
3351 			    "processing.  Disabling port int_status=%X"
3352 			    " clear=%X", int_status, clear);
3353 			mutex_enter(&nvp->nvp_mutex);
3354 			(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3355 			nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3356 			    SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3357 			nvp->nvp_state |= NV_PORT_FAILED;
3358 			(void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3359 			    B_TRUE);
3360 			mutex_exit(&nvp->nvp_mutex);
3361 		}
3362 
3363 	} while (loop_cnt++ < nv_max_intr_loops);
3364 
3365 	if (loop_cnt > nvp->intr_loop_cnt) {
3366 		NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3367 		    "Exiting with multiple intr loop count %d", loop_cnt);
3368 		nvp->intr_loop_cnt = loop_cnt;
3369 	}
3370 
3371 	if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3372 	    (NVDBG_INTR | NVDBG_VERBOSE)) {
3373 		uint8_t status, bmstatus;
3374 		uint16_t int_status2;
3375 
3376 		if (int_status & MCP5X_INT_COMPLETE) {
3377 			status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3378 			bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3379 			int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3380 			    nvp->nvp_mcp5x_int_status);
3381 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3382 			    "mcp55_intr_port: Exiting with altstatus %x, "
3383 			    "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3384 			    " loop_cnt %d ", status, bmstatus, int_status2,
3385 			    int_status, ret, loop_cnt);
3386 		}
3387 	}
3388 
3389 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3390 
3391 	/*
3392 	 * To facilitate debugging, keep track of the length of time spent in
3393 	 * the port interrupt routine.
3394 	 */
3395 	intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3396 	if (intr_time > nvp->intr_duration)
3397 		nvp->intr_duration = intr_time;
3398 
3399 	return (ret);
3400 }
3401 
3402 
3403 /* ARGSUSED */
3404 static uint_t
3405 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3406 {
3407 	nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3408 	int ret;
3409 
3410 	if (nvc->nvc_state & NV_CTRL_SUSPEND)
3411 		return (DDI_INTR_UNCLAIMED);
3412 
3413 	ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3414 	ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3415 
3416 	return (ret);
3417 }
3418 
3419 
3420 #ifdef NCQ
3421 /*
3422  * with software driven NCQ on mcp5x, an interrupt occurs right
3423  * before the drive is ready to do a DMA transfer.  At this point,
3424  * the PRD table needs to be programmed and the DMA engine enabled
3425  * and ready to go.
3426  *
3427  * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3428  * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3429  * -- clear bit 0 of master command reg
3430  * -- program PRD
3431  * -- clear the interrupt status bit for the DMA Setup FIS
3432  * -- set bit 0 of the bus master command register
3433  */
3434 static int
3435 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3436 {
3437 	int slot;
3438 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3439 	uint8_t bmicx;
3440 	int port = nvp->nvp_port_num;
3441 	uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3442 	    MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3443 
3444 	nv_cmn_err(CE_PANIC, nvc, nvp,
3445 	    "this is should not be executed at all until NCQ");
3446 
3447 	mutex_enter(&nvp->nvp_mutex);
3448 
3449 	slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3450 
3451 	slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3452 
3453 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3454 	    " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3455 
3456 	/*
3457 	 * halt the DMA engine.  This step is necessary according to
3458 	 * the mcp5x spec, probably since there may have been a "first" packet
3459 	 * that already programmed the DMA engine, but may not turn out to
3460 	 * be the first one processed.
3461 	 */
3462 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3463 
3464 	if (bmicx & BMICX_SSBM) {
3465 		NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3466 		    "another packet.  Cancelling and reprogramming", NULL);
3467 		nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3468 	}
3469 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
3470 
3471 	nv_start_dma_engine(nvp, slot);
3472 
3473 	mutex_exit(&nvp->nvp_mutex);
3474 
3475 	return (DDI_INTR_CLAIMED);
3476 }
3477 #endif /* NCQ */
3478 
3479 
3480 /*
3481  * packet completion interrupt.  If the packet is complete, invoke
3482  * the packet completion callback.
3483  */
3484 static int
3485 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3486 {
3487 	uint8_t status, bmstatus;
3488 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3489 	int sactive;
3490 	int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3491 	sata_pkt_t *spkt;
3492 	nv_slot_t *nv_slotp;
3493 
3494 	mutex_enter(&nvp->nvp_mutex);
3495 
3496 	bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3497 
3498 	if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3499 		NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set", NULL);
3500 		mutex_exit(&nvp->nvp_mutex);
3501 
3502 		return (NV_FAILURE);
3503 	}
3504 
3505 	/*
3506 	 * Commands may have been processed by abort or timeout before
3507 	 * interrupt processing acquired the mutex. So we may be processing
3508 	 * an interrupt for packets that were already removed.
3509 	 * For functionning NCQ processing all slots may be checked, but
3510 	 * with NCQ disabled (current code), relying on *_run flags is OK.
3511 	 */
3512 	if (nvp->nvp_non_ncq_run) {
3513 		/*
3514 		 * If the just completed item is a non-ncq command, the busy
3515 		 * bit should not be set
3516 		 */
3517 		status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3518 		if (status & SATA_STATUS_BSY) {
3519 			nv_cmn_err(CE_WARN, nvc, nvp,
3520 			    "unexpected SATA_STATUS_BSY set");
3521 			mutex_exit(&nvp->nvp_mutex);
3522 			/*
3523 			 * calling function will clear interrupt.  then
3524 			 * the real interrupt will either arrive or the
3525 			 * packet timeout handling will take over and
3526 			 * reset.
3527 			 */
3528 			return (NV_FAILURE);
3529 		}
3530 		ASSERT(nvp->nvp_ncq_run == 0);
3531 	} else {
3532 		ASSERT(nvp->nvp_non_ncq_run == 0);
3533 		/*
3534 		 * Pre-NCQ code!
3535 		 * Nothing to do. The packet for the command that just
3536 		 * completed is already gone. Just clear the interrupt.
3537 		 */
3538 		(void) nv_bm_status_clear(nvp);
3539 		(void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3540 		mutex_exit(&nvp->nvp_mutex);
3541 		return (NV_SUCCESS);
3542 
3543 		/*
3544 		 * NCQ check for BSY here and wait if still bsy before
3545 		 * continuing. Rather than wait for it to be cleared
3546 		 * when starting a packet and wasting CPU time, the starting
3547 		 * thread can exit immediate, but might have to spin here
3548 		 * for a bit possibly.  Needs more work and experimentation.
3549 		 *
3550 		 */
3551 	}
3552 
3553 	/*
3554 	 * active_pkt_bit will represent the bitmap of the single completed
3555 	 * packet.  Because of the nature of sw assisted NCQ, only one
3556 	 * command will complete per interrupt.
3557 	 */
3558 
3559 	if (ncq_command == B_FALSE) {
3560 		active_pkt = 0;
3561 	} else {
3562 		/*
3563 		 * NCQ: determine which command just completed, by examining
3564 		 * which bit cleared in the register since last written.
3565 		 */
3566 		sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3567 
3568 		active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3569 
3570 		ASSERT(active_pkt_bit);
3571 
3572 
3573 		/*
3574 		 * this failure path needs more work to handle the
3575 		 * error condition and recovery.
3576 		 */
3577 		if (active_pkt_bit == 0) {
3578 			ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3579 
3580 			nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X  "
3581 			    "nvp->nvp_sactive %X", sactive,
3582 			    nvp->nvp_sactive_cache);
3583 
3584 			(void) nv_get8(cmdhdl, nvp->nvp_status);
3585 
3586 			mutex_exit(&nvp->nvp_mutex);
3587 
3588 			return (NV_FAILURE);
3589 		}
3590 
3591 		for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3592 		    active_pkt++, active_pkt_bit >>= 1) {
3593 		}
3594 
3595 		/*
3596 		 * make sure only one bit is ever turned on
3597 		 */
3598 		ASSERT(active_pkt_bit == 1);
3599 
3600 		nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3601 	}
3602 
3603 	nv_slotp = &(nvp->nvp_slot[active_pkt]);
3604 
3605 	spkt = nv_slotp->nvslot_spkt;
3606 
3607 	ASSERT(spkt != NULL);
3608 
3609 	(*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3610 
3611 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3612 
3613 	if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3614 
3615 		nv_complete_io(nvp, spkt, active_pkt);
3616 	}
3617 
3618 	mutex_exit(&nvp->nvp_mutex);
3619 
3620 	return (NV_SUCCESS);
3621 }
3622 
3623 
3624 static void
3625 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3626 {
3627 
3628 	ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3629 
3630 	if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3631 		nvp->nvp_ncq_run--;
3632 	} else {
3633 		nvp->nvp_non_ncq_run--;
3634 	}
3635 
3636 	/*
3637 	 * mark the packet slot idle so it can be reused.  Do this before
3638 	 * calling satapkt_comp so the slot can be reused.
3639 	 */
3640 	(&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3641 
3642 	if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3643 		/*
3644 		 * If this is not timed polled mode cmd, which has an
3645 		 * active thread monitoring for completion, then need
3646 		 * to signal the sleeping thread that the cmd is complete.
3647 		 */
3648 		if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3649 			cv_signal(&nvp->nvp_poll_cv);
3650 		}
3651 
3652 		return;
3653 	}
3654 
3655 	if (spkt->satapkt_comp != NULL) {
3656 		mutex_exit(&nvp->nvp_mutex);
3657 		(*spkt->satapkt_comp)(spkt);
3658 		mutex_enter(&nvp->nvp_mutex);
3659 	}
3660 }
3661 
3662 
3663 /*
3664  * check whether packet is ncq command or not.  for ncq command,
3665  * start it if there is still room on queue.  for non-ncq command only
3666  * start if no other command is running.
3667  */
3668 static int
3669 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3670 {
3671 	uint8_t cmd, ncq;
3672 
3673 	NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3674 
3675 	cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3676 
3677 	ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3678 	    (cmd == SATAC_READ_FPDMA_QUEUED));
3679 
3680 	if (ncq == B_FALSE) {
3681 
3682 		if ((nvp->nvp_non_ncq_run == 1) ||
3683 		    (nvp->nvp_ncq_run > 0)) {
3684 			/*
3685 			 * next command is non-ncq which can't run
3686 			 * concurrently.  exit and return queue full.
3687 			 */
3688 			spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3689 
3690 			return (SATA_TRAN_QUEUE_FULL);
3691 		}
3692 
3693 		return (nv_start_common(nvp, spkt));
3694 	}
3695 
3696 	/*
3697 	 * ncq == B_TRUE
3698 	 */
3699 	if (nvp->nvp_non_ncq_run == 1) {
3700 		/*
3701 		 * cannot start any NCQ commands when there
3702 		 * is a non-NCQ command running.
3703 		 */
3704 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3705 
3706 		return (SATA_TRAN_QUEUE_FULL);
3707 	}
3708 
3709 #ifdef NCQ
3710 	/*
3711 	 * this is not compiled for now as satapkt_device.satadev_qdepth
3712 	 * is being pulled out until NCQ support is later addressed
3713 	 *
3714 	 * nvp_queue_depth is initialized by the first NCQ command
3715 	 * received.
3716 	 */
3717 	if (nvp->nvp_queue_depth == 1) {
3718 		nvp->nvp_queue_depth =
3719 		    spkt->satapkt_device.satadev_qdepth;
3720 
3721 		ASSERT(nvp->nvp_queue_depth > 1);
3722 
3723 		NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3724 		    "nv_process_queue: nvp_queue_depth set to %d",
3725 		    nvp->nvp_queue_depth);
3726 	}
3727 #endif
3728 
3729 	if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3730 		/*
3731 		 * max number of NCQ commands already active
3732 		 */
3733 		spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3734 
3735 		return (SATA_TRAN_QUEUE_FULL);
3736 	}
3737 
3738 	return (nv_start_common(nvp, spkt));
3739 }
3740 
3741 
3742 /*
3743  * configure INTx and legacy interrupts
3744  */
3745 static int
3746 nv_add_legacy_intrs(nv_ctl_t *nvc)
3747 {
3748 	dev_info_t	*devinfo = nvc->nvc_dip;
3749 	int		actual, count = 0;
3750 	int		x, y, rc, inum = 0;
3751 
3752 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3753 
3754 	/*
3755 	 * get number of interrupts
3756 	 */
3757 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3758 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3759 		NVLOG(NVDBG_INIT, nvc, NULL,
3760 		    "ddi_intr_get_nintrs() failed, "
3761 		    "rc %d count %d", rc, count);
3762 
3763 		return (DDI_FAILURE);
3764 	}
3765 
3766 	/*
3767 	 * allocate an array of interrupt handles
3768 	 */
3769 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3770 	nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3771 
3772 	/*
3773 	 * call ddi_intr_alloc()
3774 	 */
3775 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3776 	    inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3777 
3778 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3779 		nv_cmn_err(CE_WARN, nvc, NULL,
3780 		    "ddi_intr_alloc() failed, rc %d", rc);
3781 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3782 
3783 		return (DDI_FAILURE);
3784 	}
3785 
3786 	if (actual < count) {
3787 		nv_cmn_err(CE_WARN, nvc, NULL,
3788 		    "ddi_intr_alloc: requested: %d, received: %d",
3789 		    count, actual);
3790 
3791 		goto failure;
3792 	}
3793 
3794 	nvc->nvc_intr_cnt = actual;
3795 
3796 	/*
3797 	 * get intr priority
3798 	 */
3799 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3800 	    DDI_SUCCESS) {
3801 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3802 
3803 		goto failure;
3804 	}
3805 
3806 	/*
3807 	 * Test for high level mutex
3808 	 */
3809 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3810 		nv_cmn_err(CE_WARN, nvc, NULL,
3811 		    "nv_add_legacy_intrs: high level intr not supported");
3812 
3813 		goto failure;
3814 	}
3815 
3816 	for (x = 0; x < actual; x++) {
3817 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3818 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3819 			nv_cmn_err(CE_WARN, nvc, NULL,
3820 			    "ddi_intr_add_handler() failed");
3821 
3822 			goto failure;
3823 		}
3824 	}
3825 
3826 	/*
3827 	 * call ddi_intr_enable() for legacy interrupts
3828 	 */
3829 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3830 		(void) ddi_intr_enable(nvc->nvc_htable[x]);
3831 	}
3832 
3833 	return (DDI_SUCCESS);
3834 
3835 	failure:
3836 	/*
3837 	 * free allocated intr and nvc_htable
3838 	 */
3839 	for (y = 0; y < actual; y++) {
3840 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3841 	}
3842 
3843 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3844 
3845 	return (DDI_FAILURE);
3846 }
3847 
3848 #ifdef	NV_MSI_SUPPORTED
3849 /*
3850  * configure MSI interrupts
3851  */
3852 static int
3853 nv_add_msi_intrs(nv_ctl_t *nvc)
3854 {
3855 	dev_info_t	*devinfo = nvc->nvc_dip;
3856 	int		count, avail, actual;
3857 	int		x, y, rc, inum = 0;
3858 
3859 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3860 
3861 	/*
3862 	 * get number of interrupts
3863 	 */
3864 	rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3865 	if ((rc != DDI_SUCCESS) || (count == 0)) {
3866 		nv_cmn_err(CE_WARN, nvc, NULL,
3867 		    "ddi_intr_get_nintrs() failed, "
3868 		    "rc %d count %d", rc, count);
3869 
3870 		return (DDI_FAILURE);
3871 	}
3872 
3873 	/*
3874 	 * get number of available interrupts
3875 	 */
3876 	rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3877 	if ((rc != DDI_SUCCESS) || (avail == 0)) {
3878 		nv_cmn_err(CE_WARN, nvc, NULL,
3879 		    "ddi_intr_get_navail() failed, "
3880 		    "rc %d avail %d", rc, avail);
3881 
3882 		return (DDI_FAILURE);
3883 	}
3884 
3885 	if (avail < count) {
3886 		nv_cmn_err(CE_WARN, nvc, NULL,
3887 		    "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3888 		    avail, count);
3889 	}
3890 
3891 	/*
3892 	 * allocate an array of interrupt handles
3893 	 */
3894 	nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3895 	nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3896 
3897 	rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3898 	    inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3899 
3900 	if ((rc != DDI_SUCCESS) || (actual == 0)) {
3901 		nv_cmn_err(CE_WARN, nvc, NULL,
3902 		    "ddi_intr_alloc() failed, rc %d", rc);
3903 		kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3904 
3905 		return (DDI_FAILURE);
3906 	}
3907 
3908 	/*
3909 	 * Use interrupt count returned or abort?
3910 	 */
3911 	if (actual < count) {
3912 		NVLOG(NVDBG_INIT, nvc, NULL,
3913 		    "Requested: %d, Received: %d", count, actual);
3914 	}
3915 
3916 	nvc->nvc_intr_cnt = actual;
3917 
3918 	/*
3919 	 * get priority for first msi, assume remaining are all the same
3920 	 */
3921 	if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3922 	    DDI_SUCCESS) {
3923 		nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3924 
3925 		goto failure;
3926 	}
3927 
3928 	/*
3929 	 * test for high level mutex
3930 	 */
3931 	if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3932 		nv_cmn_err(CE_WARN, nvc, NULL,
3933 		    "nv_add_msi_intrs: high level intr not supported");
3934 
3935 		goto failure;
3936 	}
3937 
3938 	/*
3939 	 * Call ddi_intr_add_handler()
3940 	 */
3941 	for (x = 0; x < actual; x++) {
3942 		if (ddi_intr_add_handler(nvc->nvc_htable[x],
3943 		    nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3944 			nv_cmn_err(CE_WARN, nvc, NULL,
3945 			    "ddi_intr_add_handler() failed");
3946 
3947 			goto failure;
3948 		}
3949 	}
3950 
3951 	(void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3952 
3953 	if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3954 		(void) ddi_intr_block_enable(nvc->nvc_htable,
3955 		    nvc->nvc_intr_cnt);
3956 	} else {
3957 		/*
3958 		 * Call ddi_intr_enable() for MSI non block enable
3959 		 */
3960 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3961 			(void) ddi_intr_enable(nvc->nvc_htable[x]);
3962 		}
3963 	}
3964 
3965 	return (DDI_SUCCESS);
3966 
3967 	failure:
3968 	/*
3969 	 * free allocated intr and nvc_htable
3970 	 */
3971 	for (y = 0; y < actual; y++) {
3972 		(void) ddi_intr_free(nvc->nvc_htable[y]);
3973 	}
3974 
3975 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3976 
3977 	return (DDI_FAILURE);
3978 }
3979 #endif
3980 
3981 
3982 static void
3983 nv_rem_intrs(nv_ctl_t *nvc)
3984 {
3985 	int x, i;
3986 	nv_port_t *nvp;
3987 
3988 	NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
3989 
3990 	/*
3991 	 * prevent controller from generating interrupts by
3992 	 * masking them out.  This is an extra precaution.
3993 	 */
3994 	for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
3995 		nvp = (&nvc->nvc_port[i]);
3996 		mutex_enter(&nvp->nvp_mutex);
3997 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3998 		mutex_exit(&nvp->nvp_mutex);
3999 	}
4000 
4001 	/*
4002 	 * disable all interrupts
4003 	 */
4004 	if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4005 	    (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4006 		(void) ddi_intr_block_disable(nvc->nvc_htable,
4007 		    nvc->nvc_intr_cnt);
4008 	} else {
4009 		for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4010 			(void) ddi_intr_disable(nvc->nvc_htable[x]);
4011 		}
4012 	}
4013 
4014 	for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4015 		(void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4016 		(void) ddi_intr_free(nvc->nvc_htable[x]);
4017 	}
4018 
4019 	kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4020 }
4021 
4022 
4023 /*
4024  * variable argument wrapper for cmn_err.  prefixes the instance and port
4025  * number if possible
4026  */
4027 static void
4028 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, va_list ap)
4029 {
4030 	char port[NV_STR_LEN];
4031 	char inst[NV_STR_LEN];
4032 	dev_info_t *dip;
4033 
4034 	if (nvc) {
4035 		(void) snprintf(inst, NV_STR_LEN, "inst %d",
4036 		    ddi_get_instance(nvc->nvc_dip));
4037 		dip = nvc->nvc_dip;
4038 	} else {
4039 		inst[0] = '\0';
4040 	}
4041 
4042 	if (nvp) {
4043 		(void) sprintf(port, "port%d", nvp->nvp_port_num);
4044 		dip = nvp->nvp_ctlp->nvc_dip;
4045 	} else {
4046 		port[0] = '\0';
4047 	}
4048 
4049 	mutex_enter(&nv_log_mutex);
4050 
4051 	(void) sprintf(nv_log_buf, "nv_sata %s %s%s", inst, port,
4052 	    (inst[0]|port[0] ? ": " :""));
4053 
4054 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4055 	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4056 
4057 	/*
4058 	 * Log to console or log to file, depending on
4059 	 * nv_log_to_console setting.
4060 	 */
4061 	if (nv_log_to_console) {
4062 		if (nv_prom_print) {
4063 			prom_printf("%s\n", nv_log_buf);
4064 		} else {
4065 			cmn_err(ce, "%s", nv_log_buf);
4066 		}
4067 
4068 
4069 	} else {
4070 		cmn_err(ce, "!%s", nv_log_buf);
4071 	}
4072 
4073 
4074 	(void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4075 
4076 	(void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4077 	    NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4078 
4079 	sata_trace_debug(dip, nv_log_buf);
4080 
4081 
4082 	mutex_exit(&nv_log_mutex);
4083 }
4084 
4085 
4086 /*
4087  * wrapper for cmn_err
4088  */
4089 static void
4090 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4091 {
4092 	va_list ap;
4093 
4094 	va_start(ap, fmt);
4095 	nv_vcmn_err(ce, nvc, nvp, fmt, ap);
4096 	va_end(ap);
4097 }
4098 
4099 
4100 static void
4101 nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4102 {
4103 	va_list ap;
4104 
4105 	va_start(ap, fmt);
4106 
4107 	if (nvp == NULL && nvc == NULL) {
4108 		sata_vtrace_debug(NULL, fmt, ap);
4109 		va_end(ap);
4110 
4111 		return;
4112 	}
4113 
4114 	if (nvp == NULL && nvc != NULL) {
4115 		sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4116 		va_end(ap);
4117 
4118 		return;
4119 	}
4120 
4121 	/*
4122 	 * nvp is not NULL, but nvc might be.  Reference nvp for both
4123 	 * port and dip.
4124 	 */
4125 	mutex_enter(&nv_log_mutex);
4126 
4127 	(void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4128 	    nvp->nvp_port_num, fmt);
4129 
4130 	sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4131 
4132 	mutex_exit(&nv_log_mutex);
4133 
4134 	va_end(ap);
4135 }
4136 
4137 
4138 /*
4139  * program registers which are common to all commands
4140  */
4141 static void
4142 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4143 {
4144 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4145 	sata_pkt_t *spkt;
4146 	sata_cmd_t *satacmd;
4147 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4148 	uint8_t cmd, ncq = B_FALSE;
4149 
4150 	spkt = nv_slotp->nvslot_spkt;
4151 	satacmd = &spkt->satapkt_cmd;
4152 	cmd = satacmd->satacmd_cmd_reg;
4153 
4154 	ASSERT(nvp->nvp_slot);
4155 
4156 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4157 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4158 		ncq = B_TRUE;
4159 	}
4160 
4161 	/*
4162 	 * select the drive
4163 	 */
4164 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4165 
4166 	/*
4167 	 * make certain the drive selected
4168 	 */
4169 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4170 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4171 
4172 		return;
4173 	}
4174 
4175 	switch (spkt->satapkt_cmd.satacmd_addr_type) {
4176 
4177 	case ATA_ADDR_LBA:
4178 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4179 		    NULL);
4180 
4181 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4182 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4183 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4184 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4185 		nv_put8(cmdhdl, nvp->nvp_feature,
4186 		    satacmd->satacmd_features_reg);
4187 
4188 		break;
4189 
4190 	case ATA_ADDR_LBA28:
4191 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4192 		    "ATA_ADDR_LBA28 mode", NULL);
4193 		/*
4194 		 * NCQ only uses 48-bit addressing
4195 		 */
4196 		ASSERT(ncq != B_TRUE);
4197 
4198 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4199 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4200 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4201 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4202 		nv_put8(cmdhdl, nvp->nvp_feature,
4203 		    satacmd->satacmd_features_reg);
4204 
4205 		break;
4206 
4207 	case ATA_ADDR_LBA48:
4208 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4209 		    "ATA_ADDR_LBA48 mode", NULL);
4210 
4211 		/*
4212 		 * for NCQ, tag goes into count register and real sector count
4213 		 * into features register.  The sata module does the translation
4214 		 * in the satacmd.
4215 		 */
4216 		if (ncq == B_TRUE) {
4217 			nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4218 		} else {
4219 			nv_put8(cmdhdl, nvp->nvp_count,
4220 			    satacmd->satacmd_sec_count_msb);
4221 			nv_put8(cmdhdl, nvp->nvp_count,
4222 			    satacmd->satacmd_sec_count_lsb);
4223 		}
4224 		nv_put8(cmdhdl, nvp->nvp_feature,
4225 		    satacmd->satacmd_features_reg_ext);
4226 		nv_put8(cmdhdl, nvp->nvp_feature,
4227 		    satacmd->satacmd_features_reg);
4228 
4229 		/*
4230 		 * send the high-order half first
4231 		 */
4232 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4233 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4234 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4235 		/*
4236 		 * Send the low-order half
4237 		 */
4238 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4239 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4240 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4241 
4242 		break;
4243 
4244 	case 0:
4245 		/*
4246 		 * non-media access commands such as identify and features
4247 		 * take this path.
4248 		 */
4249 		nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4250 		nv_put8(cmdhdl, nvp->nvp_feature,
4251 		    satacmd->satacmd_features_reg);
4252 		nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4253 		nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4254 		nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4255 
4256 		break;
4257 
4258 	default:
4259 		break;
4260 	}
4261 
4262 	ASSERT(nvp->nvp_slot);
4263 }
4264 
4265 
4266 /*
4267  * start a command that involves no media access
4268  */
4269 static int
4270 nv_start_nodata(nv_port_t *nvp, int slot)
4271 {
4272 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4273 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4274 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4275 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4276 
4277 	nv_program_taskfile_regs(nvp, slot);
4278 
4279 	/*
4280 	 * This next one sets the controller in motion
4281 	 */
4282 	nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4283 
4284 	return (SATA_TRAN_ACCEPTED);
4285 }
4286 
4287 
4288 static int
4289 nv_bm_status_clear(nv_port_t *nvp)
4290 {
4291 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4292 	uchar_t	status, ret;
4293 
4294 	/*
4295 	 * Get the current BM status
4296 	 */
4297 	ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4298 
4299 	status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4300 
4301 	/*
4302 	 * Clear the latches (and preserve the other bits)
4303 	 */
4304 	nv_put8(bmhdl, nvp->nvp_bmisx, status);
4305 
4306 	return (ret);
4307 }
4308 
4309 
4310 /*
4311  * program the bus master DMA engine with the PRD address for
4312  * the active slot command, and start the DMA engine.
4313  */
4314 static void
4315 nv_start_dma_engine(nv_port_t *nvp, int slot)
4316 {
4317 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4318 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4319 	uchar_t direction;
4320 
4321 	ASSERT(nv_slotp->nvslot_spkt != NULL);
4322 
4323 	if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4324 	    == SATA_DIR_READ) {
4325 		direction = BMICX_RWCON_WRITE_TO_MEMORY;
4326 	} else {
4327 		direction = BMICX_RWCON_READ_FROM_MEMORY;
4328 	}
4329 
4330 	NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4331 	    "nv_start_dma_engine entered", NULL);
4332 
4333 #if NOT_USED
4334 	/*
4335 	 * NOT NEEDED. Left here of historical reason.
4336 	 * Reset the controller's interrupt and error status bits.
4337 	 */
4338 	(void) nv_bm_status_clear(nvp);
4339 #endif
4340 	/*
4341 	 * program the PRD table physical start address
4342 	 */
4343 	nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4344 
4345 	/*
4346 	 * set the direction control and start the DMA controller
4347 	 */
4348 	nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4349 }
4350 
4351 /*
4352  * start dma command, either in or out
4353  */
4354 static int
4355 nv_start_dma(nv_port_t *nvp, int slot)
4356 {
4357 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4358 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4359 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4360 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4361 	uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4362 #ifdef NCQ
4363 	uint8_t ncq = B_FALSE;
4364 #endif
4365 	ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4366 	uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4367 	int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4368 	ddi_dma_cookie_t  *srcp = sata_cmdp->satacmd_dma_cookie_list;
4369 
4370 	ASSERT(sg_count != 0);
4371 
4372 	if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4373 		nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4374 		    " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4375 		    sata_cmdp->satacmd_num_dma_cookies);
4376 
4377 		return (NV_FAILURE);
4378 	}
4379 
4380 	nv_program_taskfile_regs(nvp, slot);
4381 
4382 	/*
4383 	 * start the drive in motion
4384 	 */
4385 	nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4386 
4387 	/*
4388 	 * the drive starts processing the transaction when the cmd register
4389 	 * is written.  This is done here before programming the DMA engine to
4390 	 * parallelize and save some time.  In the event that the drive is ready
4391 	 * before DMA, it will wait.
4392 	 */
4393 #ifdef NCQ
4394 	if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4395 	    (cmd == SATAC_READ_FPDMA_QUEUED)) {
4396 		ncq = B_TRUE;
4397 	}
4398 #endif
4399 
4400 	/*
4401 	 * copy the PRD list to PRD table in DMA accessible memory
4402 	 * so that the controller can access it.
4403 	 */
4404 	for (idx = 0; idx < sg_count; idx++, srcp++) {
4405 		uint32_t size;
4406 
4407 		nv_put32(sghdl, dstp++, srcp->dmac_address);
4408 
4409 		/* Set the number of bytes to transfer, 0 implies 64KB */
4410 		size = srcp->dmac_size;
4411 		if (size == 0x10000)
4412 			size = 0;
4413 
4414 		/*
4415 		 * If this is a 40-bit address, copy bits 32-40 of the
4416 		 * physical address to bits 16-24 of the PRD count.
4417 		 */
4418 		if (srcp->dmac_laddress > UINT32_MAX) {
4419 			size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4420 		}
4421 
4422 		/*
4423 		 * set the end of table flag for the last entry
4424 		 */
4425 		if (idx == (sg_count - 1)) {
4426 			size |= PRDE_EOT;
4427 		}
4428 
4429 		nv_put32(sghdl, dstp++, size);
4430 	}
4431 
4432 	(void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4433 	    sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4434 
4435 	nv_start_dma_engine(nvp, slot);
4436 
4437 #ifdef NCQ
4438 	/*
4439 	 * optimization:  for SWNCQ, start DMA engine if this is the only
4440 	 * command running.  Preliminary NCQ efforts indicated this needs
4441 	 * more debugging.
4442 	 *
4443 	 * if (nvp->nvp_ncq_run <= 1)
4444 	 */
4445 
4446 	if (ncq == B_FALSE) {
4447 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4448 		    "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4449 		    " cmd = %X", non_ncq_commands++, cmd);
4450 		nv_start_dma_engine(nvp, slot);
4451 	} else {
4452 		NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4453 		    "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4454 	}
4455 #endif /* NCQ */
4456 
4457 	return (SATA_TRAN_ACCEPTED);
4458 }
4459 
4460 
4461 /*
4462  * start a PIO data-in ATA command
4463  */
4464 static int
4465 nv_start_pio_in(nv_port_t *nvp, int slot)
4466 {
4467 
4468 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4469 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4470 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4471 
4472 	nv_program_taskfile_regs(nvp, slot);
4473 
4474 	/*
4475 	 * This next one sets the drive in motion
4476 	 */
4477 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4478 
4479 	return (SATA_TRAN_ACCEPTED);
4480 }
4481 
4482 
4483 /*
4484  * start a PIO data-out ATA command
4485  */
4486 static int
4487 nv_start_pio_out(nv_port_t *nvp, int slot)
4488 {
4489 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4490 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4491 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4492 
4493 	nv_program_taskfile_regs(nvp, slot);
4494 
4495 	/*
4496 	 * this next one sets the drive in motion
4497 	 */
4498 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4499 
4500 	/*
4501 	 * wait for the busy bit to settle
4502 	 */
4503 	NV_DELAY_NSEC(400);
4504 
4505 	/*
4506 	 * wait for the drive to assert DRQ to send the first chunk
4507 	 * of data. Have to busy wait because there's no interrupt for
4508 	 * the first chunk. This is bad... uses a lot of cycles if the
4509 	 * drive responds too slowly or if the wait loop granularity
4510 	 * is too large. It's even worse if the drive is defective and
4511 	 * the loop times out.
4512 	 */
4513 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4514 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4515 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4516 	    4000000, 0) == B_FALSE) {
4517 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4518 
4519 		goto error;
4520 	}
4521 
4522 	/*
4523 	 * send the first block.
4524 	 */
4525 	nv_intr_pio_out(nvp, nv_slotp);
4526 
4527 	/*
4528 	 * If nvslot_flags is not set to COMPLETE yet, then processing
4529 	 * is OK so far, so return.  Otherwise, fall into error handling
4530 	 * below.
4531 	 */
4532 	if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4533 
4534 		return (SATA_TRAN_ACCEPTED);
4535 	}
4536 
4537 	error:
4538 	/*
4539 	 * there was an error so reset the device and complete the packet.
4540 	 */
4541 	nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4542 	nv_complete_io(nvp, spkt, 0);
4543 	nvp->nvp_state |= NV_PORT_RESET;
4544 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4545 	nv_reset(nvp, "pio_out");
4546 
4547 	return (SATA_TRAN_PORT_ERROR);
4548 }
4549 
4550 
4551 /*
4552  * start a ATAPI Packet command (PIO data in or out)
4553  */
4554 static int
4555 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4556 {
4557 	nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4558 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4559 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4560 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4561 
4562 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4563 	    "nv_start_pkt_pio: start", NULL);
4564 
4565 	/*
4566 	 * Write the PACKET command to the command register.  Normally
4567 	 * this would be done through nv_program_taskfile_regs().  It
4568 	 * is done here because some values need to be overridden.
4569 	 */
4570 
4571 	/* select the drive */
4572 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4573 
4574 	/* make certain the drive selected */
4575 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4576 	    NV_SEC2USEC(5), 0) == B_FALSE) {
4577 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4578 		    "nv_start_pkt_pio: drive select failed", NULL);
4579 		return (SATA_TRAN_PORT_ERROR);
4580 	}
4581 
4582 	/*
4583 	 * The command is always sent via PIO, despite whatever the SATA
4584 	 * framework sets in the command.  Overwrite the DMA bit to do this.
4585 	 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4586 	 */
4587 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
4588 
4589 	/* set appropriately by the sata framework */
4590 	nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4591 	nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4592 	nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4593 	nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4594 
4595 	/* initiate the command by writing the command register last */
4596 	nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4597 
4598 	/* Give the host controller time to do its thing */
4599 	NV_DELAY_NSEC(400);
4600 
4601 	/*
4602 	 * Wait for the device to indicate that it is ready for the command
4603 	 * ATAPI protocol state - HP0: Check_Status_A
4604 	 */
4605 
4606 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4607 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4608 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4609 	    4000000, 0) == B_FALSE) {
4610 		/*
4611 		 * Either an error or device fault occurred or the wait
4612 		 * timed out.  According to the ATAPI protocol, command
4613 		 * completion is also possible.  Other implementations of
4614 		 * this protocol don't handle this last case, so neither
4615 		 * does this code.
4616 		 */
4617 
4618 		if (nv_get8(cmdhdl, nvp->nvp_status) &
4619 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4620 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4621 
4622 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4623 			    "nv_start_pkt_pio: device error (HP0)", NULL);
4624 		} else {
4625 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4626 
4627 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4628 			    "nv_start_pkt_pio: timeout (HP0)", NULL);
4629 		}
4630 
4631 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4632 		nv_complete_io(nvp, spkt, 0);
4633 		nvp->nvp_state |= NV_PORT_RESET;
4634 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4635 		nv_reset(nvp, "start_pkt_pio");
4636 
4637 		return (SATA_TRAN_PORT_ERROR);
4638 	}
4639 
4640 	/*
4641 	 * Put the ATAPI command in the data register
4642 	 * ATAPI protocol state - HP1: Send_Packet
4643 	 */
4644 
4645 	ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4646 	    (ushort_t *)nvp->nvp_data,
4647 	    (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4648 
4649 	/*
4650 	 * See you in nv_intr_pkt_pio.
4651 	 * ATAPI protocol state - HP3: INTRQ_wait
4652 	 */
4653 
4654 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4655 	    "nv_start_pkt_pio: exiting into HP3", NULL);
4656 
4657 	return (SATA_TRAN_ACCEPTED);
4658 }
4659 
4660 
4661 /*
4662  * Interrupt processing for a non-data ATA command.
4663  */
4664 static void
4665 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4666 {
4667 	uchar_t status;
4668 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4669 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4670 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4671 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4672 
4673 	NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4674 
4675 	status = nv_get8(cmdhdl, nvp->nvp_status);
4676 
4677 	/*
4678 	 * check for errors
4679 	 */
4680 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4681 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4682 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4683 		    nvp->nvp_altstatus);
4684 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4685 	} else {
4686 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
4687 	}
4688 
4689 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4690 }
4691 
4692 
4693 /*
4694  * ATA command, PIO data in
4695  */
4696 static void
4697 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4698 {
4699 	uchar_t	status;
4700 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4701 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4702 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4703 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4704 	int count;
4705 
4706 	status = nv_get8(cmdhdl, nvp->nvp_status);
4707 
4708 	if (status & SATA_STATUS_BSY) {
4709 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4710 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4711 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4712 		    nvp->nvp_altstatus);
4713 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4714 		nvp->nvp_state |= NV_PORT_RESET;
4715 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
4716 		nv_reset(nvp, "intr_pio_in");
4717 
4718 		return;
4719 	}
4720 
4721 	/*
4722 	 * check for errors
4723 	 */
4724 	if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4725 	    SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4726 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4727 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4728 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4729 
4730 		return;
4731 	}
4732 
4733 	/*
4734 	 * read the next chunk of data (if any)
4735 	 */
4736 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4737 
4738 	/*
4739 	 * read count bytes
4740 	 */
4741 	ASSERT(count != 0);
4742 
4743 	ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4744 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4745 
4746 	nv_slotp->nvslot_v_addr += count;
4747 	nv_slotp->nvslot_byte_count -= count;
4748 
4749 
4750 	if (nv_slotp->nvslot_byte_count != 0) {
4751 		/*
4752 		 * more to transfer.  Wait for next interrupt.
4753 		 */
4754 		return;
4755 	}
4756 
4757 	/*
4758 	 * transfer is complete. wait for the busy bit to settle.
4759 	 */
4760 	NV_DELAY_NSEC(400);
4761 
4762 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
4763 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4764 }
4765 
4766 
4767 /*
4768  * ATA command PIO data out
4769  */
4770 static void
4771 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4772 {
4773 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4774 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4775 	uchar_t status;
4776 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4777 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4778 	int count;
4779 
4780 	/*
4781 	 * clear the IRQ
4782 	 */
4783 	status = nv_get8(cmdhdl, nvp->nvp_status);
4784 
4785 	if (status & SATA_STATUS_BSY) {
4786 		/*
4787 		 * this should not happen
4788 		 */
4789 		spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4790 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4791 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4792 		    nvp->nvp_altstatus);
4793 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4794 
4795 		return;
4796 	}
4797 
4798 	/*
4799 	 * check for errors
4800 	 */
4801 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4802 		nv_copy_registers(nvp,  &spkt->satapkt_device, spkt);
4803 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4804 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4805 
4806 		return;
4807 	}
4808 
4809 	/*
4810 	 * this is the condition which signals the drive is
4811 	 * no longer ready to transfer.  Likely that the transfer
4812 	 * completed successfully, but check that byte_count is
4813 	 * zero.
4814 	 */
4815 	if ((status & SATA_STATUS_DRQ) == 0) {
4816 
4817 		if (nv_slotp->nvslot_byte_count == 0) {
4818 			/*
4819 			 * complete; successful transfer
4820 			 */
4821 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
4822 		} else {
4823 			/*
4824 			 * error condition, incomplete transfer
4825 			 */
4826 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4827 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4828 		}
4829 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4830 
4831 		return;
4832 	}
4833 
4834 	/*
4835 	 * write the next chunk of data
4836 	 */
4837 	count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4838 
4839 	/*
4840 	 * read or write count bytes
4841 	 */
4842 
4843 	ASSERT(count != 0);
4844 
4845 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4846 	    (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4847 
4848 	nv_slotp->nvslot_v_addr += count;
4849 	nv_slotp->nvslot_byte_count -= count;
4850 }
4851 
4852 
4853 /*
4854  * ATAPI PACKET command, PIO in/out interrupt
4855  *
4856  * Under normal circumstances, one of four different interrupt scenarios
4857  * will result in this function being called:
4858  *
4859  * 1. Packet command data transfer
4860  * 2. Packet command completion
4861  * 3. Request sense data transfer
4862  * 4. Request sense command completion
4863  */
4864 static void
4865 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4866 {
4867 	uchar_t	status;
4868 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4869 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4870 	int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4871 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4872 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4873 	uint16_t ctlr_count;
4874 	int count;
4875 
4876 	/* ATAPI protocol state - HP2: Check_Status_B */
4877 
4878 	status = nv_get8(cmdhdl, nvp->nvp_status);
4879 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4880 	    "nv_intr_pkt_pio: status 0x%x", status);
4881 
4882 	if (status & SATA_STATUS_BSY) {
4883 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4884 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4885 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4886 		} else {
4887 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4888 			spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4889 			nvp->nvp_state |= NV_PORT_RESET;
4890 			nvp->nvp_state &= ~(NV_PORT_RESTORE |
4891 			    NV_PORT_RESET_RETRY);
4892 			nv_reset(nvp, "intr_pkt_pio");
4893 		}
4894 
4895 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4896 		    "nv_intr_pkt_pio: busy - status 0x%x", status);
4897 
4898 		return;
4899 	}
4900 
4901 	if ((status & SATA_STATUS_DF) != 0) {
4902 		/*
4903 		 * On device fault, just clean up and bail.  Request sense
4904 		 * will just default to its NO SENSE initialized value.
4905 		 */
4906 
4907 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4908 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4909 		}
4910 
4911 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4912 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4913 
4914 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4915 		    nvp->nvp_altstatus);
4916 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4917 		    nvp->nvp_error);
4918 
4919 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4920 		    "nv_intr_pkt_pio: device fault", NULL);
4921 
4922 		return;
4923 	}
4924 
4925 	if ((status & SATA_STATUS_ERR) != 0) {
4926 		/*
4927 		 * On command error, figure out whether we are processing a
4928 		 * request sense.  If so, clean up and bail.  Otherwise,
4929 		 * do a REQUEST SENSE.
4930 		 */
4931 
4932 		if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4933 			nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4934 			if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4935 			    NV_FAILURE) {
4936 				nv_copy_registers(nvp, &spkt->satapkt_device,
4937 				    spkt);
4938 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4939 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4940 			}
4941 
4942 			sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4943 			    nvp->nvp_altstatus);
4944 			sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4945 			    nvp->nvp_error);
4946 		} else {
4947 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4948 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4949 
4950 			nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4951 		}
4952 
4953 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4954 		    "nv_intr_pkt_pio: error (status 0x%x)", status);
4955 
4956 		return;
4957 	}
4958 
4959 	if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4960 		/*
4961 		 * REQUEST SENSE command processing
4962 		 */
4963 
4964 		if ((status & (SATA_STATUS_DRQ)) != 0) {
4965 			/* ATAPI state - HP4: Transfer_Data */
4966 
4967 			/* read the byte count from the controller */
4968 			ctlr_count =
4969 			    (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
4970 			ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
4971 
4972 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4973 			    "nv_intr_pkt_pio: ctlr byte count - %d",
4974 			    ctlr_count);
4975 
4976 			if (ctlr_count == 0) {
4977 				/* no data to transfer - some devices do this */
4978 
4979 				spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4980 				nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4981 
4982 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4983 				    "nv_intr_pkt_pio: done (no data)", NULL);
4984 
4985 				return;
4986 			}
4987 
4988 			count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
4989 
4990 			/* transfer the data */
4991 			ddi_rep_get16(cmdhdl,
4992 			    (ushort_t *)nv_slotp->nvslot_rqsense_buff,
4993 			    (ushort_t *)nvp->nvp_data, (count >> 1),
4994 			    DDI_DEV_NO_AUTOINCR);
4995 
4996 			/* consume residual bytes */
4997 			ctlr_count -= count;
4998 
4999 			if (ctlr_count > 0) {
5000 				for (; ctlr_count > 0; ctlr_count -= 2)
5001 					(void) ddi_get16(cmdhdl,
5002 					    (ushort_t *)nvp->nvp_data);
5003 			}
5004 
5005 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5006 			    "nv_intr_pkt_pio: transition to HP2", NULL);
5007 		} else {
5008 			/* still in ATAPI state - HP2 */
5009 
5010 			/*
5011 			 * In order to avoid clobbering the rqsense data
5012 			 * set by the SATA framework, the sense data read
5013 			 * from the device is put in a separate buffer and
5014 			 * copied into the packet after the request sense
5015 			 * command successfully completes.
5016 			 */
5017 			bcopy(nv_slotp->nvslot_rqsense_buff,
5018 			    spkt->satapkt_cmd.satacmd_rqsense,
5019 			    SATA_ATAPI_RQSENSE_LEN);
5020 
5021 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5022 			spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5023 
5024 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5025 			    "nv_intr_pkt_pio: request sense done", NULL);
5026 		}
5027 
5028 		return;
5029 	}
5030 
5031 	/*
5032 	 * Normal command processing
5033 	 */
5034 
5035 	if ((status & (SATA_STATUS_DRQ)) != 0) {
5036 		/* ATAPI protocol state - HP4: Transfer_Data */
5037 
5038 		/* read the byte count from the controller */
5039 		ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5040 		ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5041 
5042 		if (ctlr_count == 0) {
5043 			/* no data to transfer - some devices do this */
5044 
5045 			spkt->satapkt_reason = SATA_PKT_COMPLETED;
5046 			nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5047 
5048 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5049 			    "nv_intr_pkt_pio: done (no data)", NULL);
5050 
5051 			return;
5052 		}
5053 
5054 		count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5055 
5056 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5057 		    "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5058 
5059 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5060 		    "nv_intr_pkt_pio: byte_count 0x%x",
5061 		    nv_slotp->nvslot_byte_count);
5062 
5063 		/* transfer the data */
5064 
5065 		if (direction == SATA_DIR_READ) {
5066 			ddi_rep_get16(cmdhdl,
5067 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5068 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5069 			    DDI_DEV_NO_AUTOINCR);
5070 
5071 			ctlr_count -= count;
5072 
5073 			if (ctlr_count > 0) {
5074 				/* consume remainding bytes */
5075 
5076 				for (; ctlr_count > 0;
5077 				    ctlr_count -= 2)
5078 					(void) ddi_get16(cmdhdl,
5079 					    (ushort_t *)nvp->nvp_data);
5080 
5081 				NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5082 				    "nv_intr_pkt_pio: bytes remained", NULL);
5083 			}
5084 		} else {
5085 			ddi_rep_put16(cmdhdl,
5086 			    (ushort_t *)nv_slotp->nvslot_v_addr,
5087 			    (ushort_t *)nvp->nvp_data, (count >> 1),
5088 			    DDI_DEV_NO_AUTOINCR);
5089 		}
5090 
5091 		nv_slotp->nvslot_v_addr += count;
5092 		nv_slotp->nvslot_byte_count -= count;
5093 
5094 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5095 		    "nv_intr_pkt_pio: transition to HP2", NULL);
5096 	} else {
5097 		/* still in ATAPI state - HP2 */
5098 
5099 		spkt->satapkt_reason = SATA_PKT_COMPLETED;
5100 		nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5101 
5102 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5103 		    "nv_intr_pkt_pio: done", NULL);
5104 	}
5105 }
5106 
5107 /*
5108  * ATA command, DMA data in/out
5109  */
5110 static void
5111 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5112 {
5113 	uchar_t status;
5114 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5115 	sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5116 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5117 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5118 	ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5119 	uchar_t	bmicx;
5120 	uchar_t bm_status;
5121 
5122 	nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5123 
5124 	/*
5125 	 * stop DMA engine.
5126 	 */
5127 	bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5128 	nv_put8(bmhdl, nvp->nvp_bmicx,  bmicx & ~BMICX_SSBM);
5129 
5130 	/*
5131 	 * get the status and clear the IRQ, and check for DMA error
5132 	 */
5133 	status = nv_get8(cmdhdl, nvp->nvp_status);
5134 
5135 	/*
5136 	 * check for drive errors
5137 	 */
5138 	if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5139 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5140 		spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5141 		(void) nv_bm_status_clear(nvp);
5142 
5143 		return;
5144 	}
5145 
5146 	bm_status = nv_bm_status_clear(nvp);
5147 
5148 	/*
5149 	 * check for bus master errors
5150 	 */
5151 	if (bm_status & BMISX_IDERR) {
5152 		spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5153 		sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5154 		    nvp->nvp_altstatus);
5155 		sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5156 		nvp->nvp_state |= NV_PORT_RESET;
5157 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5158 		nv_reset(nvp, "intr_dma");
5159 
5160 		return;
5161 	}
5162 
5163 	spkt->satapkt_reason = SATA_PKT_COMPLETED;
5164 }
5165 
5166 
5167 /*
5168  * Wait for a register of a controller to achieve a specific state.
5169  * To return normally, all the bits in the first sub-mask must be ON,
5170  * all the bits in the second sub-mask must be OFF.
5171  * If timeout_usec microseconds pass without the controller achieving
5172  * the desired bit configuration, return TRUE, else FALSE.
5173  *
5174  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5175  * occur for the first 250 us, then switch over to a sleeping wait.
5176  *
5177  */
5178 int
5179 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5180     int type_wait)
5181 {
5182 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5183 	hrtime_t end, cur, start_sleep, start;
5184 	int first_time = B_TRUE;
5185 	ushort_t val;
5186 
5187 	for (;;) {
5188 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5189 
5190 		if ((val & onbits) == onbits && (val & offbits) == 0) {
5191 
5192 			return (B_TRUE);
5193 		}
5194 
5195 		cur = gethrtime();
5196 
5197 		/*
5198 		 * store the start time and calculate the end
5199 		 * time.  also calculate "start_sleep" which is
5200 		 * the point after which the driver will stop busy
5201 		 * waiting and change to sleep waiting.
5202 		 */
5203 		if (first_time) {
5204 			first_time = B_FALSE;
5205 			/*
5206 			 * start and end are in nanoseconds
5207 			 */
5208 			start = cur;
5209 			end = start + timeout_usec * 1000;
5210 			/*
5211 			 * add 1 ms to start
5212 			 */
5213 			start_sleep =  start + 250000;
5214 
5215 			if (servicing_interrupt()) {
5216 				type_wait = NV_NOSLEEP;
5217 			}
5218 		}
5219 
5220 		if (cur > end) {
5221 
5222 			break;
5223 		}
5224 
5225 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5226 #if ! defined(__lock_lint)
5227 			delay(1);
5228 #endif
5229 		} else {
5230 			drv_usecwait(nv_usec_delay);
5231 		}
5232 	}
5233 
5234 	return (B_FALSE);
5235 }
5236 
5237 
5238 /*
5239  * This is a slightly more complicated version that checks
5240  * for error conditions and bails-out rather than looping
5241  * until the timeout is exceeded.
5242  *
5243  * hybrid waiting algorithm: if not in interrupt context, busy looping will
5244  * occur for the first 250 us, then switch over to a sleeping wait.
5245  */
5246 int
5247 nv_wait3(
5248 	nv_port_t	*nvp,
5249 	uchar_t		onbits1,
5250 	uchar_t		offbits1,
5251 	uchar_t		failure_onbits2,
5252 	uchar_t		failure_offbits2,
5253 	uchar_t		failure_onbits3,
5254 	uchar_t		failure_offbits3,
5255 	uint_t		timeout_usec,
5256 	int		type_wait)
5257 {
5258 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5259 	hrtime_t end, cur, start_sleep, start;
5260 	int first_time = B_TRUE;
5261 	ushort_t val;
5262 
5263 	for (;;) {
5264 		val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5265 
5266 		/*
5267 		 * check for expected condition
5268 		 */
5269 		if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5270 
5271 			return (B_TRUE);
5272 		}
5273 
5274 		/*
5275 		 * check for error conditions
5276 		 */
5277 		if ((val & failure_onbits2) == failure_onbits2 &&
5278 		    (val & failure_offbits2) == 0) {
5279 
5280 			return (B_FALSE);
5281 		}
5282 
5283 		if ((val & failure_onbits3) == failure_onbits3 &&
5284 		    (val & failure_offbits3) == 0) {
5285 
5286 			return (B_FALSE);
5287 		}
5288 
5289 		/*
5290 		 * store the start time and calculate the end
5291 		 * time.  also calculate "start_sleep" which is
5292 		 * the point after which the driver will stop busy
5293 		 * waiting and change to sleep waiting.
5294 		 */
5295 		if (first_time) {
5296 			first_time = B_FALSE;
5297 			/*
5298 			 * start and end are in nanoseconds
5299 			 */
5300 			cur = start = gethrtime();
5301 			end = start + timeout_usec * 1000;
5302 			/*
5303 			 * add 1 ms to start
5304 			 */
5305 			start_sleep =  start + 250000;
5306 
5307 			if (servicing_interrupt()) {
5308 				type_wait = NV_NOSLEEP;
5309 			}
5310 		} else {
5311 			cur = gethrtime();
5312 		}
5313 
5314 		if (cur > end) {
5315 
5316 			break;
5317 		}
5318 
5319 		if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5320 #if ! defined(__lock_lint)
5321 			delay(1);
5322 #endif
5323 		} else {
5324 			drv_usecwait(nv_usec_delay);
5325 		}
5326 	}
5327 
5328 	return (B_FALSE);
5329 }
5330 
5331 
5332 /*
5333  * nv_port_state_change() reports the state of the port to the
5334  * sata module by calling sata_hba_event_notify().  This
5335  * function is called any time the state of the port is changed
5336  */
5337 static void
5338 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5339 {
5340 	sata_device_t sd;
5341 
5342 	NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5343 	    "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5344 	    "time %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5345 
5346 	bzero((void *)&sd, sizeof (sata_device_t));
5347 	sd.satadev_rev = SATA_DEVICE_REV;
5348 	nv_copy_registers(nvp, &sd, NULL);
5349 
5350 	/*
5351 	 * When NCQ is implemented sactive and snotific field need to be
5352 	 * updated.
5353 	 */
5354 	sd.satadev_addr.cport = nvp->nvp_port_num;
5355 	sd.satadev_addr.qual = addr_type;
5356 	sd.satadev_state = state;
5357 
5358 	sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5359 }
5360 
5361 
5362 
5363 /*
5364  * Monitor reset progress and signature gathering.
5365  * This function may loop, so it should not be called from interrupt
5366  * context.
5367  *
5368  * Entered with nvp mutex held.
5369  */
5370 static void
5371 nv_monitor_reset(nv_port_t *nvp)
5372 {
5373 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5374 	uint32_t sstatus;
5375 	int send_notification = B_FALSE;
5376 	uint8_t dev_type;
5377 
5378 	sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5379 
5380 	/*
5381 	 * We do not know here the reason for port reset.
5382 	 * Check the link status. The link needs to be active before
5383 	 * we can check the link's status.
5384 	 */
5385 	if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5386 	    (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5387 		/*
5388 		 * Either link is not active or there is no device
5389 		 * If the link remains down for more than NV_LINK_DOWN_TIMEOUT
5390 		 * (milliseconds), abort signature acquisition and complete
5391 		 * reset processing.
5392 		 * The link will go down when COMRESET is sent by nv_reset(),
5393 		 * so it is practically nvp_reset_time milliseconds.
5394 		 */
5395 
5396 		if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5397 		    NV_LINK_DOWN_TIMEOUT) {
5398 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5399 			    "nv_monitor_reset: no link - ending signature "
5400 			    "acquisition; time after reset %ldms",
5401 			    TICK_TO_MSEC(ddi_get_lbolt() -
5402 			    nvp->nvp_reset_time));
5403 		}
5404 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5405 		    NV_PORT_PROBE | NV_PORT_HOTPLUG_DELAY);
5406 		/*
5407 		 * Else, if the link was lost (i.e. was present before)
5408 		 * the controller should generate a 'remove' interrupt
5409 		 * that will cause the appropriate event notification.
5410 		 */
5411 		return;
5412 	}
5413 
5414 	NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5415 	    "nv_monitor_reset: link up after reset; time %ldms",
5416 	    TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5417 
5418 sig_read:
5419 	if (nvp->nvp_signature != 0) {
5420 		/*
5421 		 * The link is up. The signature was acquired before (device
5422 		 * was present).
5423 		 * But we may need to wait for the signature (D2H FIS) before
5424 		 * accessing the drive.
5425 		 */
5426 		if (nv_wait_for_signature != 0) {
5427 			uint32_t old_signature;
5428 			uint8_t old_type;
5429 
5430 			old_signature = nvp->nvp_signature;
5431 			old_type = nvp->nvp_type;
5432 			nvp->nvp_signature = 0;
5433 			nv_read_signature(nvp);
5434 			if (nvp->nvp_signature == 0) {
5435 				nvp->nvp_signature = old_signature;
5436 				nvp->nvp_type = old_type;
5437 
5438 #ifdef NV_DEBUG
5439 				/* FOR DEBUGGING */
5440 				if (nv_wait_here_forever) {
5441 					drv_usecwait(1000);
5442 					goto sig_read;
5443 				}
5444 #endif
5445 				/*
5446 				 * Wait, but not endlessly.
5447 				 */
5448 				if (TICK_TO_MSEC(ddi_get_lbolt() -
5449 				    nvp->nvp_reset_time) <
5450 				    nv_sig_acquisition_time) {
5451 					drv_usecwait(1000);
5452 					goto sig_read;
5453 				} else if (!(nvp->nvp_state &
5454 				    NV_PORT_RESET_RETRY)) {
5455 					/*
5456 					 * Retry reset.
5457 					 */
5458 					NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5459 					    "nv_monitor_reset: retrying reset "
5460 					    "time after first reset: %ldms",
5461 					    TICK_TO_MSEC(ddi_get_lbolt() -
5462 					    nvp->nvp_reset_time));
5463 					nvp->nvp_state |= NV_PORT_RESET_RETRY;
5464 					nv_reset(nvp, "monitor_reset 1");
5465 					goto sig_read;
5466 				}
5467 
5468 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5469 				    "nv_monitor_reset: terminating signature "
5470 				    "acquisition (1); time after reset: %ldms",
5471 				    TICK_TO_MSEC(ddi_get_lbolt() -
5472 				    nvp->nvp_reset_time));
5473 			} else {
5474 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5475 				    "nv_monitor_reset: signature acquired; "
5476 				    "time after reset: %ldms",
5477 				    TICK_TO_MSEC(ddi_get_lbolt() -
5478 				    nvp->nvp_reset_time));
5479 			}
5480 		}
5481 		/*
5482 		 * Clear reset state, set device reset recovery state
5483 		 */
5484 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
5485 		    NV_PORT_PROBE);
5486 		nvp->nvp_state |= NV_PORT_RESTORE;
5487 
5488 		/*
5489 		 * Need to send reset event notification
5490 		 */
5491 		send_notification = B_TRUE;
5492 	} else {
5493 		/*
5494 		 * The link is up. The signature was not acquired before.
5495 		 * We can try to fetch a device signature.
5496 		 */
5497 		dev_type = nvp->nvp_type;
5498 
5499 acquire_signature:
5500 		nv_read_signature(nvp);
5501 		if (nvp->nvp_signature != 0) {
5502 			/*
5503 			 * Got device signature.
5504 			 */
5505 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5506 			    "nv_monitor_reset: signature acquired; "
5507 			    "time after reset: %ldms",
5508 			    TICK_TO_MSEC(ddi_get_lbolt() -
5509 			    nvp->nvp_reset_time));
5510 
5511 			/* Clear internal reset state */
5512 			nvp->nvp_state &=
5513 			    ~(NV_PORT_RESET | NV_PORT_RESET_RETRY);
5514 
5515 			if (dev_type != SATA_DTYPE_NONE) {
5516 				/*
5517 				 * We acquired the signature for a
5518 				 * pre-existing device that was not identified
5519 				 * before and and was reset.
5520 				 * Need to enter the device reset recovery
5521 				 * state and to send the reset notification.
5522 				 */
5523 				nvp->nvp_state |= NV_PORT_RESTORE;
5524 				send_notification = B_TRUE;
5525 			} else {
5526 				/*
5527 				 * Else, We acquired the signature because a new
5528 				 * device was attached (the driver attach or
5529 				 * a hot-plugged device). There is no need to
5530 				 * enter the device reset recovery state or to
5531 				 * send the reset notification, but we may need
5532 				 * to send a device attached notification.
5533 				 */
5534 				if (nvp->nvp_state & NV_PORT_PROBE) {
5535 					nv_port_state_change(nvp,
5536 					    SATA_EVNT_DEVICE_ATTACHED,
5537 					    SATA_ADDR_CPORT, 0);
5538 					nvp->nvp_state &= ~NV_PORT_PROBE;
5539 				}
5540 			}
5541 		} else {
5542 			if (TICK_TO_MSEC(ddi_get_lbolt() -
5543 			    nvp->nvp_reset_time) < nv_sig_acquisition_time) {
5544 				drv_usecwait(1000);
5545 				goto acquire_signature;
5546 			} else if (!(nvp->nvp_state & NV_PORT_RESET_RETRY)) {
5547 				/*
5548 				 * Some drives may require additional
5549 				 * reset(s) to get a valid signature
5550 				 * (indicating that the drive is ready).
5551 				 * If a drive was not just powered
5552 				 * up, the signature should be available
5553 				 * within few hundred milliseconds
5554 				 * after reset.  Therefore, if more than
5555 				 * NV_SIG_ACQUISITION_TIME has elapsed
5556 				 * while waiting for a signature, reset
5557 				 * device again.
5558 				 */
5559 				NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5560 				    "nv_monitor_reset: retrying reset "
5561 				    "time after first reset: %ldms",
5562 				    TICK_TO_MSEC(ddi_get_lbolt() -
5563 				    nvp->nvp_reset_time));
5564 				nvp->nvp_state |= NV_PORT_RESET_RETRY;
5565 				nv_reset(nvp, "monitor_reset 2");
5566 				drv_usecwait(1000);
5567 				goto acquire_signature;
5568 			}
5569 			/*
5570 			 * Terminating signature acquisition.
5571 			 * Hopefully, the drive is ready.
5572 			 * The SATA module can deal with this as long as it
5573 			 * knows that some device is attached and a device
5574 			 * responds to commands.
5575 			 */
5576 			if (!(nvp->nvp_state & NV_PORT_PROBE)) {
5577 				send_notification = B_TRUE;
5578 			}
5579 			nvp->nvp_state &= ~(NV_PORT_RESET |
5580 			    NV_PORT_RESET_RETRY);
5581 			nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5582 			if (nvp->nvp_state & NV_PORT_PROBE) {
5583 				nv_port_state_change(nvp,
5584 				    SATA_EVNT_DEVICE_ATTACHED,
5585 				    SATA_ADDR_CPORT, 0);
5586 				nvp->nvp_state &= ~NV_PORT_PROBE;
5587 			}
5588 			nvp->nvp_type = dev_type;
5589 			NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5590 			    "nv_monitor_reset: terminating signature "
5591 			    "acquisition (2); time after reset: %ldms",
5592 			    TICK_TO_MSEC(ddi_get_lbolt() -
5593 			    nvp->nvp_reset_time));
5594 		}
5595 	}
5596 
5597 	if (send_notification) {
5598 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5599 		    SATA_ADDR_DCPORT,
5600 		    SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5601 	}
5602 
5603 #ifdef SGPIO_SUPPORT
5604 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5605 		nv_sgp_drive_connect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5606 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5607 	} else {
5608 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5609 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5610 	}
5611 #endif
5612 }
5613 
5614 
5615 /*
5616  * Send a hotplug (add device) notification at the appropriate time after
5617  * hotplug detection.
5618  * Relies on nvp_reset_time set at a hotplug detection time.
5619  * Called only from nv_timeout when NV_PORT_HOTPLUG_DELAY flag is set in
5620  * the nvp_state.
5621  */
5622 static void
5623 nv_delay_hotplug_notification(nv_port_t *nvp)
5624 {
5625 
5626 	if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5627 	    nv_hotplug_delay) {
5628 		NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5629 		    "nv_delay_hotplug_notification: notifying framework after "
5630 		    "%dms delay", TICK_TO_MSEC(ddi_get_lbolt() -
5631 		    nvp->nvp_reset_time));
5632 		nvp->nvp_state &= ~NV_PORT_HOTPLUG_DELAY;
5633 		nv_port_state_change(nvp, SATA_EVNT_DEVICE_ATTACHED,
5634 		    SATA_ADDR_CPORT, 0);
5635 	}
5636 }
5637 
5638 /*
5639  * timeout processing:
5640  *
5641  * Check if any packets have crossed a timeout threshold.  If so,
5642  * abort the packet.  This function is not NCQ-aware.
5643  *
5644  * If reset was invoked, call reset monitoring function.
5645  *
5646  * Timeout frequency may be lower for checking packet timeout (1s)
5647  * and higher for reset monitoring (1ms)
5648  *
5649  */
5650 static void
5651 nv_timeout(void *arg)
5652 {
5653 	nv_port_t *nvp = arg;
5654 	nv_slot_t *nv_slotp;
5655 	int next_timeout = NV_ONE_SEC;	/* Default */
5656 	uint16_t int_status;
5657 	uint8_t status, bmstatus;
5658 	static int intr_warn_once = 0;
5659 
5660 	ASSERT(nvp != NULL);
5661 
5662 	mutex_enter(&nvp->nvp_mutex);
5663 	nvp->nvp_timeout_id = 0;
5664 
5665 	/*
5666 	 * If the port is not in the init state, ignore it.
5667 	 */
5668 	if ((nvp->nvp_state & NV_PORT_INIT) == 0) {
5669 		NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5670 		    "nv_timeout: port uninitialized", NULL);
5671 		next_timeout = 0;
5672 
5673 		goto finished;
5674 	}
5675 
5676 	if (nvp->nvp_state & (NV_PORT_RESET | NV_PORT_RESET_RETRY)) {
5677 		nv_monitor_reset(nvp);
5678 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5679 
5680 		goto finished;
5681 	}
5682 
5683 	if ((nvp->nvp_state & NV_PORT_HOTPLUG_DELAY) != 0) {
5684 		nv_delay_hotplug_notification(nvp);
5685 		next_timeout = NV_ONE_MSEC;	/* at least 1ms */
5686 
5687 		goto finished;
5688 	}
5689 
5690 	/*
5691 	 * Not yet NCQ-aware - there is only one command active.
5692 	 */
5693 	nv_slotp = &(nvp->nvp_slot[0]);
5694 
5695 	/*
5696 	 * perform timeout checking and processing only if there is an
5697 	 * active packet on the port
5698 	 */
5699 	if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL)  {
5700 		sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5701 		sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5702 		uint8_t cmd = satacmd->satacmd_cmd_reg;
5703 		uint64_t lba;
5704 
5705 #if ! defined(__lock_lint) && defined(DEBUG)
5706 
5707 		lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5708 		    ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5709 		    ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5710 		    ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5711 		    ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5712 		    ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5713 #endif
5714 
5715 		/*
5716 		 * timeout not needed if there is a polling thread
5717 		 */
5718 		if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5719 			next_timeout = 0;
5720 
5721 			goto finished;
5722 		}
5723 
5724 		if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5725 		    spkt->satapkt_time) {
5726 
5727 			uint32_t serr = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5728 			    nvp->nvp_serror);
5729 
5730 			nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5731 			    "nv_timeout: aborting: "
5732 			    "nvslot_stime: %ld max ticks till timeout: "
5733 			    "%ld cur_time: %ld cmd=%x lba=%d seq=%d",
5734 			    nv_slotp->nvslot_stime,
5735 			    drv_usectohz(MICROSEC *
5736 			    spkt->satapkt_time), ddi_get_lbolt(),
5737 			    cmd, lba, nvp->nvp_seq);
5738 
5739 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5740 			    "nv_timeout: SError at timeout: 0x%x", serr);
5741 
5742 			NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5743 			    "nv_timeout: previous cmd=%x",
5744 			    nvp->nvp_previous_cmd);
5745 
5746 			if (nvp->nvp_mcp5x_int_status != NULL) {
5747 				status = nv_get8(nvp->nvp_ctl_hdl,
5748 				    nvp->nvp_altstatus);
5749 				bmstatus = nv_get8(nvp->nvp_bm_hdl,
5750 				    nvp->nvp_bmisx);
5751 				int_status = nv_get16(
5752 				    nvp->nvp_ctlp->nvc_bar_hdl[5],
5753 				    nvp->nvp_mcp5x_int_status);
5754 				NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5755 				    "nv_timeout: altstatus %x, bmicx %x, "
5756 				    "int_status %X", status, bmstatus,
5757 				    int_status);
5758 
5759 				if (int_status & MCP5X_INT_COMPLETE) {
5760 					/*
5761 					 * Completion interrupt was missed!
5762 					 * Issue warning message once
5763 					 */
5764 					if (!intr_warn_once) {
5765 						nv_cmn_err(CE_WARN,
5766 						    nvp->nvp_ctlp,
5767 						    nvp,
5768 						    "nv_sata: missing command "
5769 						    "completion interrupt(s)!");
5770 						intr_warn_once = 1;
5771 					}
5772 					NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5773 					    nvp, "timeout detected with "
5774 					    "interrupt ready - calling "
5775 					    "int directly", NULL);
5776 					mutex_exit(&nvp->nvp_mutex);
5777 					(void) mcp5x_intr_port(nvp);
5778 					mutex_enter(&nvp->nvp_mutex);
5779 				} else {
5780 					/*
5781 					 * True timeout and not a missing
5782 					 * interrupt.
5783 					 */
5784 					(void) nv_abort_active(nvp, spkt,
5785 					    SATA_PKT_TIMEOUT, B_TRUE);
5786 				}
5787 			} else {
5788 				(void) nv_abort_active(nvp, spkt,
5789 				    SATA_PKT_TIMEOUT, B_TRUE);
5790 			}
5791 
5792 		} else {
5793 			NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5794 			    "nv_timeout:"
5795 			    " still in use so restarting timeout",
5796 			    NULL);
5797 
5798 			next_timeout = NV_ONE_SEC;
5799 		}
5800 	} else {
5801 		/*
5802 		 * there was no active packet, so do not re-enable timeout
5803 		 */
5804 		next_timeout = 0;
5805 		NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5806 		    "nv_timeout: no active packet so not re-arming "
5807 		    "timeout", NULL);
5808 	}
5809 
5810 finished:
5811 	if (next_timeout != 0) {
5812 		nv_setup_timeout(nvp, next_timeout);
5813 	}
5814 	mutex_exit(&nvp->nvp_mutex);
5815 }
5816 
5817 
5818 /*
5819  * enable or disable the 3 interrupt types the driver is
5820  * interested in: completion, add and remove.
5821  */
5822 static void
5823 ck804_set_intr(nv_port_t *nvp, int flag)
5824 {
5825 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5826 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5827 	uchar_t *bar5  = nvc->nvc_bar_addr[5];
5828 	uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5829 	    CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5830 	uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5831 	uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5832 
5833 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5834 		int_en = nv_get8(bar5_hdl,
5835 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5836 		int_en &= ~intr_bits[port];
5837 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5838 		    int_en);
5839 		return;
5840 	}
5841 
5842 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5843 
5844 	/*
5845 	 * controller level lock also required since access to an 8-bit
5846 	 * interrupt register is shared between both channels.
5847 	 */
5848 	mutex_enter(&nvc->nvc_mutex);
5849 
5850 	if (flag & NV_INTR_CLEAR_ALL) {
5851 		NVLOG(NVDBG_INTR, nvc, nvp,
5852 		    "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5853 
5854 		intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5855 		    (uint8_t *)(nvc->nvc_ck804_int_status));
5856 
5857 		if (intr_status & clear_all_bits[port]) {
5858 
5859 			nv_put8(nvc->nvc_bar_hdl[5],
5860 			    (uint8_t *)(nvc->nvc_ck804_int_status),
5861 			    clear_all_bits[port]);
5862 
5863 			NVLOG(NVDBG_INTR, nvc, nvp,
5864 			    "interrupt bits cleared %x",
5865 			    intr_status & clear_all_bits[port]);
5866 		}
5867 	}
5868 
5869 	if (flag & NV_INTR_DISABLE) {
5870 		NVLOG(NVDBG_INTR, nvc, nvp,
5871 		    "ck804_set_intr: NV_INTR_DISABLE", NULL);
5872 		int_en = nv_get8(bar5_hdl,
5873 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5874 		int_en &= ~intr_bits[port];
5875 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5876 		    int_en);
5877 	}
5878 
5879 	if (flag & NV_INTR_ENABLE) {
5880 		NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5881 		    NULL);
5882 		int_en = nv_get8(bar5_hdl,
5883 		    (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5884 		int_en |= intr_bits[port];
5885 		nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5886 		    int_en);
5887 	}
5888 
5889 	mutex_exit(&nvc->nvc_mutex);
5890 }
5891 
5892 
5893 /*
5894  * enable or disable the 3 interrupts the driver is interested in:
5895  * completion interrupt, hot add, and hot remove interrupt.
5896  */
5897 static void
5898 mcp5x_set_intr(nv_port_t *nvp, int flag)
5899 {
5900 	nv_ctl_t *nvc = nvp->nvp_ctlp;
5901 	ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5902 	uint16_t intr_bits =
5903 	    MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5904 	uint16_t int_en;
5905 
5906 	if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5907 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5908 		int_en &= ~intr_bits;
5909 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5910 		return;
5911 	}
5912 
5913 	ASSERT(mutex_owned(&nvp->nvp_mutex));
5914 
5915 	NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
5916 
5917 	if (flag & NV_INTR_CLEAR_ALL) {
5918 		NVLOG(NVDBG_INTR, nvc, nvp,
5919 		    "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
5920 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
5921 	}
5922 
5923 	if (flag & NV_INTR_ENABLE) {
5924 		NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
5925 		    NULL);
5926 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5927 		int_en |= intr_bits;
5928 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5929 	}
5930 
5931 	if (flag & NV_INTR_DISABLE) {
5932 		NVLOG(NVDBG_INTR, nvc, nvp,
5933 		    "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
5934 		int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5935 		int_en &= ~intr_bits;
5936 		nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
5937 	}
5938 }
5939 
5940 
5941 static void
5942 nv_resume(nv_port_t *nvp)
5943 {
5944 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
5945 
5946 	mutex_enter(&nvp->nvp_mutex);
5947 
5948 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5949 		mutex_exit(&nvp->nvp_mutex);
5950 
5951 		return;
5952 	}
5953 
5954 	/* Enable interrupt */
5955 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5956 
5957 	/*
5958 	 * Power may have been removed to the port and the
5959 	 * drive, and/or a drive may have been added or removed.
5960 	 * Force a reset which will cause a probe and re-establish
5961 	 * any state needed on the drive.
5962 	 */
5963 	nvp->nvp_state |= NV_PORT_RESET;
5964 	nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
5965 	nv_reset(nvp, "resume");
5966 
5967 	mutex_exit(&nvp->nvp_mutex);
5968 }
5969 
5970 
5971 static void
5972 nv_suspend(nv_port_t *nvp)
5973 {
5974 	NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
5975 
5976 	mutex_enter(&nvp->nvp_mutex);
5977 
5978 #ifdef SGPIO_SUPPORT
5979 	if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5980 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
5981 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
5982 	}
5983 #endif
5984 
5985 	if (nvp->nvp_state & NV_PORT_INACTIVE) {
5986 		mutex_exit(&nvp->nvp_mutex);
5987 
5988 		return;
5989 	}
5990 
5991 	/*
5992 	 * Stop the timeout handler.
5993 	 * (It will be restarted in nv_reset() during nv_resume().)
5994 	 */
5995 	if (nvp->nvp_timeout_id) {
5996 		(void) untimeout(nvp->nvp_timeout_id);
5997 		nvp->nvp_timeout_id = 0;
5998 	}
5999 
6000 	/* Disable interrupt */
6001 	(*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
6002 	    NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
6003 
6004 	mutex_exit(&nvp->nvp_mutex);
6005 }
6006 
6007 
6008 static void
6009 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6010 {
6011 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6012 	sata_cmd_t *scmd = &spkt->satapkt_cmd;
6013 	ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6014 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6015 	uchar_t status;
6016 	struct sata_cmd_flags flags;
6017 
6018 	sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6019 	sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6020 	sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6021 
6022 	if (spkt == NULL) {
6023 
6024 		return;
6025 	}
6026 
6027 	/*
6028 	 * in the error case, implicitly set the return of regs needed
6029 	 * for error handling.
6030 	 */
6031 	status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6032 	    nvp->nvp_altstatus);
6033 
6034 	flags = scmd->satacmd_flags;
6035 
6036 	if (status & SATA_STATUS_ERR) {
6037 		flags.sata_copy_out_lba_low_msb = B_TRUE;
6038 		flags.sata_copy_out_lba_mid_msb = B_TRUE;
6039 		flags.sata_copy_out_lba_high_msb = B_TRUE;
6040 		flags.sata_copy_out_lba_low_lsb = B_TRUE;
6041 		flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6042 		flags.sata_copy_out_lba_high_lsb = B_TRUE;
6043 		flags.sata_copy_out_error_reg = B_TRUE;
6044 		flags.sata_copy_out_sec_count_msb = B_TRUE;
6045 		flags.sata_copy_out_sec_count_lsb = B_TRUE;
6046 		scmd->satacmd_status_reg = status;
6047 	}
6048 
6049 	if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6050 
6051 		/*
6052 		 * set HOB so that high byte will be read
6053 		 */
6054 		nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6055 
6056 		/*
6057 		 * get the requested high bytes
6058 		 */
6059 		if (flags.sata_copy_out_sec_count_msb) {
6060 			scmd->satacmd_sec_count_msb =
6061 			    nv_get8(cmdhdl, nvp->nvp_count);
6062 		}
6063 
6064 		if (flags.sata_copy_out_lba_low_msb) {
6065 			scmd->satacmd_lba_low_msb =
6066 			    nv_get8(cmdhdl, nvp->nvp_sect);
6067 		}
6068 
6069 		if (flags.sata_copy_out_lba_mid_msb) {
6070 			scmd->satacmd_lba_mid_msb =
6071 			    nv_get8(cmdhdl, nvp->nvp_lcyl);
6072 		}
6073 
6074 		if (flags.sata_copy_out_lba_high_msb) {
6075 			scmd->satacmd_lba_high_msb =
6076 			    nv_get8(cmdhdl, nvp->nvp_hcyl);
6077 		}
6078 	}
6079 
6080 	/*
6081 	 * disable HOB so that low byte is read
6082 	 */
6083 	nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6084 
6085 	/*
6086 	 * get the requested low bytes
6087 	 */
6088 	if (flags.sata_copy_out_sec_count_lsb) {
6089 		scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6090 	}
6091 
6092 	if (flags.sata_copy_out_lba_low_lsb) {
6093 		scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6094 	}
6095 
6096 	if (flags.sata_copy_out_lba_mid_lsb) {
6097 		scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6098 	}
6099 
6100 	if (flags.sata_copy_out_lba_high_lsb) {
6101 		scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6102 	}
6103 
6104 	/*
6105 	 * get the device register if requested
6106 	 */
6107 	if (flags.sata_copy_out_device_reg) {
6108 		scmd->satacmd_device_reg =  nv_get8(cmdhdl, nvp->nvp_drvhd);
6109 	}
6110 
6111 	/*
6112 	 * get the error register if requested
6113 	 */
6114 	if (flags.sata_copy_out_error_reg) {
6115 		scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6116 	}
6117 }
6118 
6119 
6120 /*
6121  * Hot plug and remove interrupts can occur when the device is reset.  Just
6122  * masking the interrupt doesn't always work well because if a
6123  * different interrupt arrives on the other port, the driver can still
6124  * end up checking the state of the other port and discover the hot
6125  * interrupt flag is set even though it was masked.  Checking for recent
6126  * reset activity and then ignoring turns out to be the easiest way.
6127  *
6128  * Entered with nvp mutex held.
6129  */
6130 static void
6131 nv_report_add_remove(nv_port_t *nvp, int flags)
6132 {
6133 	ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6134 	uint32_t sstatus;
6135 	int i;
6136 	clock_t nv_lbolt = ddi_get_lbolt();
6137 
6138 
6139 	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove() - "
6140 	    "time (ticks) %d flags %x", nv_lbolt, flags);
6141 
6142 	/*
6143 	 * wait up to 1ms for sstatus to settle and reflect the true
6144 	 * status of the port.  Failure to do so can create confusion
6145 	 * in probe, where the incorrect sstatus value can still
6146 	 * persist.
6147 	 */
6148 	for (i = 0; i < 1000; i++) {
6149 		sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6150 
6151 		if ((flags == NV_PORT_HOTREMOVED) &&
6152 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) !=
6153 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6154 			break;
6155 		}
6156 
6157 		if ((flags != NV_PORT_HOTREMOVED) &&
6158 		    ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
6159 		    SSTATUS_DET_DEVPRE_PHYCOM)) {
6160 			break;
6161 		}
6162 		drv_usecwait(1);
6163 	}
6164 
6165 	NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6166 	    "sstatus took %d us for DEVPRE_PHYCOM to settle", i);
6167 
6168 	if (flags == NV_PORT_HOTREMOVED) {
6169 
6170 		(void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
6171 		    B_FALSE);
6172 
6173 		/*
6174 		 * No device, no point of bothering with device reset
6175 		 */
6176 		nvp->nvp_type = SATA_DTYPE_NONE;
6177 		nvp->nvp_signature = 0;
6178 		nvp->nvp_state &= ~(NV_PORT_RESET | NV_PORT_RESET_RETRY |
6179 		    NV_PORT_RESTORE);
6180 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6181 		    "nv_report_add_remove() hot removed", NULL);
6182 		nv_port_state_change(nvp,
6183 		    SATA_EVNT_DEVICE_DETACHED,
6184 		    SATA_ADDR_CPORT, 0);
6185 
6186 #ifdef SGPIO_SUPPORT
6187 		nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6188 		    nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6189 #endif
6190 	} else {
6191 		/*
6192 		 * This is a hot plug or link up indication
6193 		 * Now, re-check the link state - no link, no device
6194 		 */
6195 		if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
6196 		    (SSTATUS_GET_DET(sstatus) == SSTATUS_DET_DEVPRE_PHYCOM)) {
6197 
6198 			if (nvp->nvp_type == SATA_DTYPE_NONE) {
6199 				/*
6200 				 * Real device attach - there was no device
6201 				 * attached to this port before this report
6202 				 */
6203 				NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6204 				    "nv_report_add_remove() new device hot"
6205 				    "plugged", NULL);
6206 				nvp->nvp_reset_time = ddi_get_lbolt();
6207 				if (!(nvp->nvp_state &
6208 				    (NV_PORT_RESET_RETRY | NV_PORT_RESET))) {
6209 
6210 					nvp->nvp_signature = 0;
6211 					if (nv_reset_after_hotplug != 0) {
6212 
6213 						/*
6214 						 * Send reset to obtain a device
6215 						 * signature
6216 						 */
6217 						nvp->nvp_state |=
6218 						    NV_PORT_RESET |
6219 						    NV_PORT_PROBE;
6220 						nv_reset(nvp,
6221 						    "report_add_remove");
6222 					} else {
6223 						nvp->nvp_type =
6224 						    SATA_DTYPE_UNKNOWN;
6225 					}
6226 				}
6227 
6228 				if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6229 					if (nv_reset_after_hotplug == 0) {
6230 						/*
6231 						 * In case a hotplug interrupt
6232 						 * is generated right after a
6233 						 * link is up, delay reporting
6234 						 * a hotplug event to let the
6235 						 * drive to initialize and to
6236 						 * send a D2H FIS with a
6237 						 * signature.
6238 						 * The timeout will issue an
6239 						 * event notification after
6240 						 * the NV_HOTPLUG_DELAY
6241 						 * milliseconds delay.
6242 						 */
6243 						nvp->nvp_state |=
6244 						    NV_PORT_HOTPLUG_DELAY;
6245 						nvp->nvp_type =
6246 						    SATA_DTYPE_UNKNOWN;
6247 						/*
6248 						 * Make sure timer is running.
6249 						 */
6250 						nv_setup_timeout(nvp,
6251 						    NV_ONE_MSEC);
6252 					} else {
6253 						nv_port_state_change(nvp,
6254 						    SATA_EVNT_DEVICE_ATTACHED,
6255 						    SATA_ADDR_CPORT, 0);
6256 					}
6257 				}
6258 				return;
6259 			}
6260 			/*
6261 			 * Otherwise it is a bogus attach, indicating recovered
6262 			 * link loss. No real need to report it after-the-fact.
6263 			 * But we may keep some statistics, or notify the
6264 			 * sata module by reporting LINK_LOST/LINK_ESTABLISHED
6265 			 * events to keep track of such occurrences.
6266 			 * Anyhow, we may want to terminate signature
6267 			 * acquisition.
6268 			 */
6269 			NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6270 			    "nv_report_add_remove() ignoring plug interrupt "
6271 			    "- recovered link?", NULL);
6272 
6273 			if (nvp->nvp_state &
6274 			    (NV_PORT_RESET_RETRY | NV_PORT_RESET)) {
6275 				NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6276 				    "nv_report_add_remove() - "
6277 				    "time since last reset %dms",
6278 				    TICK_TO_MSEC(ddi_get_lbolt() -
6279 				    nvp->nvp_reset_time));
6280 				/*
6281 				 * If the driver does not have to wait for
6282 				 * a signature, then terminate reset processing
6283 				 * now.
6284 				 */
6285 				if (nv_wait_for_signature == 0) {
6286 					NVLOG(NVDBG_RESET, nvp->nvp_ctlp,
6287 					    nvp, "nv_report_add_remove() - ",
6288 					    "terminating signature acquisition",
6289 					    ", time after reset: %dms",
6290 					    TICK_TO_MSEC(ddi_get_lbolt() -
6291 					    nvp->nvp_reset_time));
6292 
6293 					nvp->nvp_state &= ~(NV_PORT_RESET |
6294 					    NV_PORT_RESET_RETRY);
6295 
6296 					if (!(nvp->nvp_state & NV_PORT_PROBE)) {
6297 						nvp->nvp_state |=
6298 						    NV_PORT_RESTORE;
6299 						nvp->nvp_state &=
6300 						    ~NV_PORT_PROBE;
6301 
6302 						/*
6303 						 * It is not the initial device
6304 						 * probing, so notify sata
6305 						 * module that device was
6306 						 * reset
6307 						 */
6308 						nv_port_state_change(nvp,
6309 						    SATA_EVNT_DEVICE_RESET,
6310 						    SATA_ADDR_DCPORT,
6311 						    SATA_DSTATE_RESET |
6312 						    SATA_DSTATE_PWR_ACTIVE);
6313 					}
6314 
6315 				}
6316 			}
6317 			return;
6318 		}
6319 		NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_report_add_remove()"
6320 		    "ignoring add dev interrupt - "
6321 		    "link is down or no device!", NULL);
6322 	}
6323 
6324 }
6325 
6326 /*
6327  * Get request sense data and stuff it the command's sense buffer.
6328  * Start a request sense command in order to get sense data to insert
6329  * in the sata packet's rqsense buffer.  The command completion
6330  * processing is in nv_intr_pkt_pio.
6331  *
6332  * The sata framework provides a function to allocate and set-up a
6333  * request sense packet command. The reasons it is not being used here is:
6334  * a) it cannot be called in an interrupt context and this function is
6335  *    called in an interrupt context.
6336  * b) it allocates DMA resources that are not used here because this is
6337  *    implemented using PIO.
6338  *
6339  * If, in the future, this is changed to use DMA, the sata framework should
6340  * be used to allocate and set-up the error retrieval (request sense)
6341  * command.
6342  */
6343 static int
6344 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6345 {
6346 	sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6347 	sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6348 	ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6349 	int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6350 
6351 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6352 	    "nv_start_rqsense_pio: start", NULL);
6353 
6354 	/* clear the local request sense buffer before starting the command */
6355 	bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6356 
6357 	/* Write the request sense PACKET command */
6358 
6359 	/* select the drive */
6360 	nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6361 
6362 	/* make certain the drive selected */
6363 	if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6364 	    NV_SEC2USEC(5), 0) == B_FALSE) {
6365 		NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6366 		    "nv_start_rqsense_pio: drive select failed", NULL);
6367 		return (NV_FAILURE);
6368 	}
6369 
6370 	/* set up the command */
6371 	nv_put8(cmdhdl, nvp->nvp_feature, 0);	/* deassert DMA and OVL */
6372 	nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6373 	nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6374 	nv_put8(cmdhdl, nvp->nvp_sect, 0);
6375 	nv_put8(cmdhdl, nvp->nvp_count, 0);	/* no tag */
6376 
6377 	/* initiate the command by writing the command register last */
6378 	nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6379 
6380 	/* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6381 	NV_DELAY_NSEC(400);
6382 
6383 	/*
6384 	 * Wait for the device to indicate that it is ready for the command
6385 	 * ATAPI protocol state - HP0: Check_Status_A
6386 	 */
6387 
6388 	if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6389 	    SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6390 	    SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6391 	    4000000, 0) == B_FALSE) {
6392 		if (nv_get8(cmdhdl, nvp->nvp_status) &
6393 		    (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6394 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6395 			    "nv_start_rqsense_pio: rqsense dev error (HP0)",
6396 			    NULL);
6397 		} else {
6398 			NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6399 			    "nv_start_rqsense_pio: rqsense timeout (HP0)",
6400 			    NULL);
6401 		}
6402 
6403 		nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6404 		nv_complete_io(nvp, spkt, 0);
6405 		nvp->nvp_state |= NV_PORT_RESET;
6406 		nvp->nvp_state &= ~(NV_PORT_RESTORE | NV_PORT_RESET_RETRY);
6407 		nv_reset(nvp, "rqsense_pio");
6408 
6409 		return (NV_FAILURE);
6410 	}
6411 
6412 	/*
6413 	 * Put the ATAPI command in the data register
6414 	 * ATAPI protocol state - HP1: Send_Packet
6415 	 */
6416 
6417 	ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6418 	    (ushort_t *)nvp->nvp_data,
6419 	    (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6420 
6421 	NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6422 	    "nv_start_rqsense_pio: exiting into HP3", NULL);
6423 
6424 	return (NV_SUCCESS);
6425 }
6426 
6427 /*
6428  * quiesce(9E) entry point.
6429  *
6430  * This function is called when the system is single-threaded at high
6431  * PIL with preemption disabled. Therefore, this function must not be
6432  * blocked.
6433  *
6434  * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6435  * DDI_FAILURE indicates an error condition and should almost never happen.
6436  */
6437 static int
6438 nv_quiesce(dev_info_t *dip)
6439 {
6440 	int port, instance = ddi_get_instance(dip);
6441 	nv_ctl_t *nvc;
6442 
6443 	if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6444 		return (DDI_FAILURE);
6445 
6446 	for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6447 		nv_port_t *nvp = &(nvc->nvc_port[port]);
6448 		ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6449 		ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6450 		uint32_t sctrl;
6451 
6452 		/*
6453 		 * Stop the controllers from generating interrupts.
6454 		 */
6455 		(*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6456 
6457 		/*
6458 		 * clear signature registers
6459 		 */
6460 		nv_put8(cmdhdl, nvp->nvp_sect, 0);
6461 		nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6462 		nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6463 		nv_put8(cmdhdl, nvp->nvp_count, 0);
6464 
6465 		nvp->nvp_signature = 0;
6466 		nvp->nvp_type = 0;
6467 		nvp->nvp_state |= NV_PORT_RESET;
6468 		nvp->nvp_reset_time = ddi_get_lbolt();
6469 
6470 		/*
6471 		 * assert reset in PHY by writing a 1 to bit 0 scontrol
6472 		 */
6473 		sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6474 
6475 		nv_put32(bar5_hdl, nvp->nvp_sctrl,
6476 		    sctrl | SCONTROL_DET_COMRESET);
6477 
6478 		/*
6479 		 * wait 1ms
6480 		 */
6481 		drv_usecwait(1000);
6482 
6483 		/*
6484 		 * de-assert reset in PHY
6485 		 */
6486 		nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6487 	}
6488 
6489 	return (DDI_SUCCESS);
6490 }
6491 
6492 
6493 #ifdef SGPIO_SUPPORT
6494 /*
6495  * NVIDIA specific SGPIO LED support
6496  * Please refer to the NVIDIA documentation for additional details
6497  */
6498 
6499 /*
6500  * nv_sgp_led_init
6501  * Detect SGPIO support.  If present, initialize.
6502  */
6503 static void
6504 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6505 {
6506 	uint16_t csrp;		/* SGPIO_CSRP from PCI config space */
6507 	uint32_t cbp;		/* SGPIO_CBP from PCI config space */
6508 	nv_sgp_cmn_t *cmn;	/* shared data structure */
6509 	int i;
6510 	char tqname[SGPIO_TQ_NAME_LEN];
6511 	extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6512 
6513 	/*
6514 	 * Initialize with appropriately invalid values in case this function
6515 	 * exits without initializing SGPIO (for example, there is no SGPIO
6516 	 * support).
6517 	 */
6518 	nvc->nvc_sgp_csr = 0;
6519 	nvc->nvc_sgp_cbp = NULL;
6520 	nvc->nvc_sgp_cmn = NULL;
6521 
6522 	/*
6523 	 * Only try to initialize SGPIO LED support if this property
6524 	 * indicates it should be.
6525 	 */
6526 	if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6527 	    "enable-sgpio-leds", 0) != 1)
6528 		return;
6529 
6530 	/*
6531 	 * CK804 can pass the sgpio_detect test even though it does not support
6532 	 * SGPIO, so don't even look at a CK804.
6533 	 */
6534 	if (nvc->nvc_mcp5x_flag != B_TRUE)
6535 		return;
6536 
6537 	/*
6538 	 * The NVIDIA SGPIO support can nominally handle 6 drives.
6539 	 * However, the current implementation only supports 4 drives.
6540 	 * With two drives per controller, that means only look at the
6541 	 * first two controllers.
6542 	 */
6543 	if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6544 		return;
6545 
6546 	/* confirm that the SGPIO registers are there */
6547 	if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6548 		NVLOG(NVDBG_INIT, nvc, NULL,
6549 		    "SGPIO registers not detected", NULL);
6550 		return;
6551 	}
6552 
6553 	/* save off the SGPIO_CSR I/O address */
6554 	nvc->nvc_sgp_csr = csrp;
6555 
6556 	/* map in Control Block */
6557 	nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6558 	    sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6559 
6560 	/* initialize the SGPIO h/w */
6561 	if (nv_sgp_init(nvc) == NV_FAILURE) {
6562 		nv_cmn_err(CE_WARN, nvc, NULL,
6563 		    "Unable to initialize SGPIO");
6564 	}
6565 
6566 	/*
6567 	 * Initialize the shared space for this instance.  This could
6568 	 * involve allocating the space, saving a pointer to the space
6569 	 * and starting the taskq that actually turns the LEDs on and off.
6570 	 * Or, it could involve just getting the pointer to the already
6571 	 * allocated space.
6572 	 */
6573 
6574 	mutex_enter(&nv_sgp_c2c_mutex);
6575 
6576 	/* try and find our CBP in the mapping table */
6577 	cmn = NULL;
6578 	for (i = 0; i < NV_MAX_CBPS; i++) {
6579 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6580 			cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6581 			break;
6582 		}
6583 
6584 		if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6585 			break;
6586 	}
6587 
6588 	if (i >= NV_MAX_CBPS) {
6589 		/*
6590 		 * CBP to shared space mapping table is full
6591 		 */
6592 		nvc->nvc_sgp_cmn = NULL;
6593 		nv_cmn_err(CE_WARN, nvc, NULL,
6594 		    "LED handling not initialized - too many controllers");
6595 	} else if (cmn == NULL) {
6596 		/*
6597 		 * Allocate the shared space, point the SGPIO scratch register
6598 		 * at it and start the led update taskq.
6599 		 */
6600 
6601 		/* allocate shared space */
6602 		cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6603 		    KM_SLEEP);
6604 		if (cmn == NULL) {
6605 			nv_cmn_err(CE_WARN, nvc, NULL,
6606 			    "Failed to allocate shared data");
6607 			return;
6608 		}
6609 
6610 		nvc->nvc_sgp_cmn = cmn;
6611 
6612 		/* initialize the shared data structure */
6613 		cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6614 		cmn->nvs_connected = 0;
6615 		cmn->nvs_activity = 0;
6616 		cmn->nvs_cbp = cbp;
6617 
6618 		mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6619 		mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6620 		cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6621 
6622 		/* put the address in the SGPIO scratch register */
6623 #if defined(__amd64)
6624 		nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6625 #else
6626 		nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6627 #endif
6628 
6629 		/* add an entry to the cbp to cmn mapping table */
6630 
6631 		/* i should be the next available table position */
6632 		nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6633 		nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6634 
6635 		/* start the activity LED taskq */
6636 
6637 		/*
6638 		 * The taskq name should be unique and the time
6639 		 */
6640 		(void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6641 		    "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6642 		cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6643 		    TASKQ_DEFAULTPRI, 0);
6644 		if (cmn->nvs_taskq == NULL) {
6645 			cmn->nvs_taskq_delay = 0;
6646 			nv_cmn_err(CE_WARN, nvc, NULL,
6647 			    "Failed to start activity LED taskq");
6648 		} else {
6649 			cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6650 			(void) ddi_taskq_dispatch(cmn->nvs_taskq,
6651 			    nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6652 		}
6653 	} else {
6654 		nvc->nvc_sgp_cmn = cmn;
6655 		cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6656 	}
6657 
6658 	mutex_exit(&nv_sgp_c2c_mutex);
6659 }
6660 
6661 /*
6662  * nv_sgp_detect
6663  * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6664  * report back whether both were readable.
6665  */
6666 static int
6667 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6668     uint32_t *cbpp)
6669 {
6670 	/* get the SGPIO_CSRP */
6671 	*csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6672 	if (*csrpp == 0) {
6673 		return (NV_FAILURE);
6674 	}
6675 
6676 	/* SGPIO_CSRP is good, get the SGPIO_CBP */
6677 	*cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6678 	if (*cbpp == 0) {
6679 		return (NV_FAILURE);
6680 	}
6681 
6682 	/* SGPIO_CBP is good, so we must support SGPIO */
6683 	return (NV_SUCCESS);
6684 }
6685 
6686 /*
6687  * nv_sgp_init
6688  * Initialize SGPIO.
6689  * The initialization process is described by NVIDIA, but the hardware does
6690  * not always behave as documented, so several steps have been changed and/or
6691  * omitted.
6692  */
6693 static int
6694 nv_sgp_init(nv_ctl_t *nvc)
6695 {
6696 	int seq;
6697 	int rval = NV_SUCCESS;
6698 	hrtime_t start, end;
6699 	uint32_t cmd;
6700 	uint32_t status;
6701 	int drive_count;
6702 
6703 	status = nv_sgp_csr_read(nvc);
6704 	if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6705 		/* SGPIO logic is in reset state and requires initialization */
6706 
6707 		/* noting the Sequence field value */
6708 		seq = SGPIO_CSR_SEQ(status);
6709 
6710 		/* issue SGPIO_CMD_READ_PARAMS command */
6711 		cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6712 		nv_sgp_csr_write(nvc, cmd);
6713 
6714 		DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6715 
6716 		/* poll for command completion */
6717 		start = gethrtime();
6718 		end = start + NV_SGP_CMD_TIMEOUT;
6719 		for (;;) {
6720 			status = nv_sgp_csr_read(nvc);
6721 
6722 			/* break on error */
6723 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6724 				NVLOG(NVDBG_VERBOSE, nvc, NULL,
6725 				    "Command error during initialization",
6726 				    NULL);
6727 				rval = NV_FAILURE;
6728 				break;
6729 			}
6730 
6731 			/* command processing is taking place */
6732 			if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6733 				if (SGPIO_CSR_SEQ(status) != seq) {
6734 					NVLOG(NVDBG_VERBOSE, nvc, NULL,
6735 					    "Sequence number change error",
6736 					    NULL);
6737 				}
6738 
6739 				break;
6740 			}
6741 
6742 			/* if completion not detected in 2000ms ... */
6743 
6744 			if (gethrtime() > end)
6745 				break;
6746 
6747 			/* wait 400 ns before checking again */
6748 			NV_DELAY_NSEC(400);
6749 		}
6750 	}
6751 
6752 	if (rval == NV_FAILURE)
6753 		return (rval);
6754 
6755 	if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6756 		NVLOG(NVDBG_VERBOSE, nvc, NULL,
6757 		    "SGPIO logic not operational after init - state %d",
6758 		    SGPIO_CSR_SSTAT(status));
6759 		/*
6760 		 * Should return (NV_FAILURE) but the hardware can be
6761 		 * operational even if the SGPIO Status does not indicate
6762 		 * this.
6763 		 */
6764 	}
6765 
6766 	/*
6767 	 * NVIDIA recommends reading the supported drive count even
6768 	 * though they also indicate that it is always 4 at this time.
6769 	 */
6770 	drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6771 	if (drive_count != SGPIO_DRV_CNT_VALUE) {
6772 		NVLOG(NVDBG_INIT, nvc, NULL,
6773 		    "SGPIO reported undocumented drive count - %d",
6774 		    drive_count);
6775 	}
6776 
6777 	NVLOG(NVDBG_INIT, nvc, NULL,
6778 	    "initialized ctlr: %d csr: 0x%08x",
6779 	    nvc->nvc_ctlr_num, nvc->nvc_sgp_csr);
6780 
6781 	return (rval);
6782 }
6783 
6784 static int
6785 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6786 {
6787 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6788 
6789 	if (cmn == NULL)
6790 		return (NV_FAILURE);
6791 
6792 	mutex_enter(&cmn->nvs_slock);
6793 	cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6794 	mutex_exit(&cmn->nvs_slock);
6795 
6796 	return (NV_SUCCESS);
6797 }
6798 
6799 /*
6800  * nv_sgp_csr_read
6801  * This is just a 32-bit port read from the value that was obtained from the
6802  * PCI config space.
6803  *
6804  * XXX It was advised to use the in[bwl] function for this, even though they
6805  * are obsolete interfaces.
6806  */
6807 static int
6808 nv_sgp_csr_read(nv_ctl_t *nvc)
6809 {
6810 	return (inl(nvc->nvc_sgp_csr));
6811 }
6812 
6813 /*
6814  * nv_sgp_csr_write
6815  * This is just a 32-bit I/O port write.  The port number was obtained from
6816  * the PCI config space.
6817  *
6818  * XXX It was advised to use the out[bwl] function for this, even though they
6819  * are obsolete interfaces.
6820  */
6821 static void
6822 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6823 {
6824 	outl(nvc->nvc_sgp_csr, val);
6825 }
6826 
6827 /*
6828  * nv_sgp_write_data
6829  * Cause SGPIO to send Control Block data
6830  */
6831 static int
6832 nv_sgp_write_data(nv_ctl_t *nvc)
6833 {
6834 	hrtime_t start, end;
6835 	uint32_t status;
6836 	uint32_t cmd;
6837 
6838 	/* issue command */
6839 	cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6840 	nv_sgp_csr_write(nvc, cmd);
6841 
6842 	/* poll for completion */
6843 	start = gethrtime();
6844 	end = start + NV_SGP_CMD_TIMEOUT;
6845 	for (;;) {
6846 		status = nv_sgp_csr_read(nvc);
6847 
6848 		/* break on error completion */
6849 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6850 			break;
6851 
6852 		/* break on successful completion */
6853 		if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6854 			break;
6855 
6856 		/* Wait 400 ns and try again */
6857 		NV_DELAY_NSEC(400);
6858 
6859 		if (gethrtime() > end)
6860 			break;
6861 	}
6862 
6863 	if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6864 		return (NV_SUCCESS);
6865 
6866 	return (NV_FAILURE);
6867 }
6868 
6869 /*
6870  * nv_sgp_activity_led_ctl
6871  * This is run as a taskq.  It wakes up at a fixed interval and checks to
6872  * see if any of the activity LEDs need to be changed.
6873  */
6874 static void
6875 nv_sgp_activity_led_ctl(void *arg)
6876 {
6877 	nv_ctl_t *nvc = (nv_ctl_t *)arg;
6878 	nv_sgp_cmn_t *cmn;
6879 	volatile nv_sgp_cb_t *cbp;
6880 	clock_t ticks;
6881 	uint8_t drv_leds;
6882 	uint32_t old_leds;
6883 	uint32_t new_led_state;
6884 	int i;
6885 
6886 	cmn = nvc->nvc_sgp_cmn;
6887 	cbp = nvc->nvc_sgp_cbp;
6888 
6889 	do {
6890 		/* save off the old state of all of the LEDs */
6891 		old_leds = cbp->sgpio0_tr;
6892 
6893 		DTRACE_PROBE3(sgpio__activity__state,
6894 		    int, cmn->nvs_connected, int, cmn->nvs_activity,
6895 		    int, old_leds);
6896 
6897 		new_led_state = 0;
6898 
6899 		/* for each drive */
6900 		for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6901 
6902 			/* get the current state of the LEDs for the drive */
6903 			drv_leds = SGPIO0_TR_DRV(old_leds, i);
6904 
6905 			if ((cmn->nvs_connected & (1 << i)) == 0) {
6906 				/* if not connected, turn off activity */
6907 				drv_leds &= ~TR_ACTIVE_MASK;
6908 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6909 
6910 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6911 				new_led_state |=
6912 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6913 
6914 				continue;
6915 			}
6916 
6917 			if ((cmn->nvs_activity & (1 << i)) == 0) {
6918 				/* connected, but not active */
6919 				drv_leds &= ~TR_ACTIVE_MASK;
6920 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6921 
6922 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6923 				new_led_state |=
6924 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6925 
6926 				continue;
6927 			}
6928 
6929 			/* connected and active */
6930 			if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6931 				/* was enabled, so disable */
6932 				drv_leds &= ~TR_ACTIVE_MASK;
6933 				drv_leds |=
6934 				    TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6935 
6936 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6937 				new_led_state |=
6938 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6939 			} else {
6940 				/* was disabled, so enable */
6941 				drv_leds &= ~TR_ACTIVE_MASK;
6942 				drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6943 
6944 				new_led_state &= SGPIO0_TR_DRV_CLR(i);
6945 				new_led_state |=
6946 				    SGPIO0_TR_DRV_SET(drv_leds, i);
6947 			}
6948 
6949 			/*
6950 			 * clear the activity bit
6951 			 * if there is drive activity again within the
6952 			 * loop interval (now 1/16 second), nvs_activity
6953 			 * will be reset and the "connected and active"
6954 			 * condition above will cause the LED to blink
6955 			 * off and on at the loop interval rate.  The
6956 			 * rate may be increased (interval shortened) as
6957 			 * long as it is not more than 1/30 second.
6958 			 */
6959 			mutex_enter(&cmn->nvs_slock);
6960 			cmn->nvs_activity &= ~(1 << i);
6961 			mutex_exit(&cmn->nvs_slock);
6962 		}
6963 
6964 		DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6965 
6966 		/* write out LED values */
6967 
6968 		mutex_enter(&cmn->nvs_slock);
6969 		cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6970 		cbp->sgpio0_tr |= new_led_state;
6971 		cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6972 		mutex_exit(&cmn->nvs_slock);
6973 
6974 		if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6975 			NVLOG(NVDBG_VERBOSE, nvc, NULL,
6976 			    "nv_sgp_write_data failure updating active LED",
6977 			    NULL);
6978 		}
6979 
6980 		/* now rest for the interval */
6981 		mutex_enter(&cmn->nvs_tlock);
6982 		ticks = drv_usectohz(cmn->nvs_taskq_delay);
6983 		if (ticks > 0)
6984 			(void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6985 			    ticks, TR_CLOCK_TICK);
6986 		mutex_exit(&cmn->nvs_tlock);
6987 	} while (ticks > 0);
6988 }
6989 
6990 /*
6991  * nv_sgp_drive_connect
6992  * Set the flag used to indicate that the drive is attached to the HBA.
6993  * Used to let the taskq know that it should turn the Activity LED on.
6994  */
6995 static void
6996 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6997 {
6998 	nv_sgp_cmn_t *cmn;
6999 
7000 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7001 		return;
7002 	cmn = nvc->nvc_sgp_cmn;
7003 
7004 	mutex_enter(&cmn->nvs_slock);
7005 	cmn->nvs_connected |= (1 << drive);
7006 	mutex_exit(&cmn->nvs_slock);
7007 }
7008 
7009 /*
7010  * nv_sgp_drive_disconnect
7011  * Clears the flag used to indicate that the drive is no longer attached
7012  * to the HBA.  Used to let the taskq know that it should turn the
7013  * Activity LED off.  The flag that indicates that the drive is in use is
7014  * also cleared.
7015  */
7016 static void
7017 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
7018 {
7019 	nv_sgp_cmn_t *cmn;
7020 
7021 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7022 		return;
7023 	cmn = nvc->nvc_sgp_cmn;
7024 
7025 	mutex_enter(&cmn->nvs_slock);
7026 	cmn->nvs_connected &= ~(1 << drive);
7027 	cmn->nvs_activity &= ~(1 << drive);
7028 	mutex_exit(&cmn->nvs_slock);
7029 }
7030 
7031 /*
7032  * nv_sgp_drive_active
7033  * Sets the flag used to indicate that the drive has been accessed and the
7034  * LED should be flicked off, then on.  It is cleared at a fixed time
7035  * interval by the LED taskq and set by the sata command start.
7036  */
7037 static void
7038 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
7039 {
7040 	nv_sgp_cmn_t *cmn;
7041 
7042 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7043 		return;
7044 	cmn = nvc->nvc_sgp_cmn;
7045 
7046 	DTRACE_PROBE1(sgpio__active, int, drive);
7047 
7048 	mutex_enter(&cmn->nvs_slock);
7049 	cmn->nvs_activity |= (1 << drive);
7050 	mutex_exit(&cmn->nvs_slock);
7051 }
7052 
7053 
7054 /*
7055  * nv_sgp_locate
7056  * Turns the Locate/OK2RM LED off or on for a particular drive.  State is
7057  * maintained in the SGPIO Control Block.
7058  */
7059 static void
7060 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
7061 {
7062 	uint8_t leds;
7063 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7064 	nv_sgp_cmn_t *cmn;
7065 
7066 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7067 		return;
7068 	cmn = nvc->nvc_sgp_cmn;
7069 
7070 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7071 		return;
7072 
7073 	DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7074 
7075 	mutex_enter(&cmn->nvs_slock);
7076 
7077 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7078 
7079 	leds &= ~TR_LOCATE_MASK;
7080 	leds |= TR_LOCATE_SET(value);
7081 
7082 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7083 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7084 
7085 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7086 
7087 	mutex_exit(&cmn->nvs_slock);
7088 
7089 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7090 		nv_cmn_err(CE_WARN, nvc, NULL,
7091 		    "nv_sgp_write_data failure updating OK2RM/Locate LED");
7092 	}
7093 }
7094 
7095 /*
7096  * nv_sgp_error
7097  * Turns the Error/Failure LED off or on for a particular drive.  State is
7098  * maintained in the SGPIO Control Block.
7099  */
7100 static void
7101 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7102 {
7103 	uint8_t leds;
7104 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7105 	nv_sgp_cmn_t *cmn;
7106 
7107 	if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7108 		return;
7109 	cmn = nvc->nvc_sgp_cmn;
7110 
7111 	if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7112 		return;
7113 
7114 	DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7115 
7116 	mutex_enter(&cmn->nvs_slock);
7117 
7118 	leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7119 
7120 	leds &= ~TR_ERROR_MASK;
7121 	leds |= TR_ERROR_SET(value);
7122 
7123 	cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7124 	cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7125 
7126 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7127 
7128 	mutex_exit(&cmn->nvs_slock);
7129 
7130 	if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7131 		nv_cmn_err(CE_WARN, nvc, NULL,
7132 		    "nv_sgp_write_data failure updating Fail/Error LED");
7133 	}
7134 }
7135 
7136 static void
7137 nv_sgp_cleanup(nv_ctl_t *nvc)
7138 {
7139 	int drive, i;
7140 	uint8_t drv_leds;
7141 	uint32_t led_state;
7142 	volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7143 	nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7144 	extern void psm_unmap_phys(caddr_t, size_t);
7145 
7146 	/*
7147 	 * If the SGPIO Control Block isn't mapped or the shared data
7148 	 * structure isn't present in this instance, there isn't much that
7149 	 * can be cleaned up.
7150 	 */
7151 	if ((cb == NULL) || (cmn == NULL))
7152 		return;
7153 
7154 	/* turn off activity LEDs for this controller */
7155 	drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7156 
7157 	/* get the existing LED state */
7158 	led_state = cb->sgpio0_tr;
7159 
7160 	/* turn off port 0 */
7161 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7162 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7163 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7164 
7165 	/* turn off port 1 */
7166 	drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7167 	led_state &= SGPIO0_TR_DRV_CLR(drive);
7168 	led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7169 
7170 	/* set the new led state, which should turn off this ctrl's LEDs */
7171 	cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7172 	(void) nv_sgp_write_data(nvc);
7173 
7174 	/* clear the controller's in use bit */
7175 	mutex_enter(&cmn->nvs_slock);
7176 	cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7177 	mutex_exit(&cmn->nvs_slock);
7178 
7179 	if (cmn->nvs_in_use == 0) {
7180 		/* if all "in use" bits cleared, take everything down */
7181 
7182 		if (cmn->nvs_taskq != NULL) {
7183 			/* allow activity taskq to exit */
7184 			cmn->nvs_taskq_delay = 0;
7185 			cv_broadcast(&cmn->nvs_cv);
7186 
7187 			/* then destroy it */
7188 			ddi_taskq_destroy(cmn->nvs_taskq);
7189 		}
7190 
7191 		/* turn off all of the LEDs */
7192 		cb->sgpio0_tr = 0;
7193 		cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7194 		(void) nv_sgp_write_data(nvc);
7195 
7196 		cb->sgpio_sr = NULL;
7197 
7198 		/* zero out the CBP to cmn mapping */
7199 		for (i = 0; i < NV_MAX_CBPS; i++) {
7200 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7201 				nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7202 				break;
7203 			}
7204 
7205 			if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7206 				break;
7207 		}
7208 
7209 		/* free resources */
7210 		cv_destroy(&cmn->nvs_cv);
7211 		mutex_destroy(&cmn->nvs_tlock);
7212 		mutex_destroy(&cmn->nvs_slock);
7213 
7214 		kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7215 	}
7216 
7217 	nvc->nvc_sgp_cmn = NULL;
7218 
7219 	/* unmap the SGPIO Control Block */
7220 	psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7221 }
7222 #endif	/* SGPIO_SUPPORT */
7223