1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * 37 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 38 * 39 * Permission to use, copy, modify, and distribute this software for any 40 * purpose with or without fee is hereby granted, provided that the above 41 * copyright notice and this permission notice appear in all copies. 42 * 43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 50 * 51 * 52 * 53 * $OpenBSD: sili.c,v 1.147 2009/02/16 21:19:07 miod Exp $ 54 */ 55 56 #include "sili.h" 57 58 void sili_port_interrupt_enable(struct sili_port *ap); 59 void sili_port_interrupt_redisable(struct sili_port *ap); 60 void sili_port_interrupt_reenable(struct sili_port *ap); 61 62 int sili_load_prb(struct sili_ccb *); 63 void sili_unload_prb(struct sili_ccb *); 64 static void sili_load_prb_callback(void *info, bus_dma_segment_t *segs, 65 int nsegs, int error); 66 void sili_start(struct sili_ccb *); 67 static void sili_port_reinit(struct sili_port *ap); 68 int sili_port_softreset(struct sili_port *ap); 69 int sili_port_hardreset(struct sili_port *ap); 70 void sili_port_hardstop(struct sili_port *ap); 71 void sili_port_listen(struct sili_port *ap); 72 73 static void sili_ata_cmd_timeout_unserialized(void *); 74 static int sili_core_timeout(struct sili_ccb *ccb, int really_error); 75 void sili_check_active_timeouts(struct sili_port *ap); 76 77 void sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb); 78 79 void sili_port_read_ncq_error(struct sili_port *, int); 80 81 struct sili_dmamem *sili_dmamem_alloc(struct sili_softc *, bus_dma_tag_t tag); 82 void sili_dmamem_free(struct sili_softc *, struct sili_dmamem *); 83 static void sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error); 84 85 static void sili_dummy_done(struct ata_xfer *xa); 86 static void sili_empty_done(struct sili_ccb *ccb); 87 static void sili_ata_cmd_done(struct sili_ccb *ccb); 88 89 /* 90 * Initialize the global SILI hardware. This code does not set up any of 91 * its ports. 92 */ 93 int 94 sili_init(struct sili_softc *sc) 95 { 96 DPRINTF(SILI_D_VERBOSE, " GHC 0x%b", 97 sili_read(sc, SILI_REG_GHC), SILI_FMT_GHC); 98 99 /* 100 * Reset the entire chip. This also resets all ports. 101 * 102 * The spec doesn't say anything about how long we have to 103 * wait, so wait 10ms. 104 */ 105 sili_write(sc, SILI_REG_GCTL, SILI_REG_GCTL_GRESET); 106 sili_os_sleep(10); 107 sili_write(sc, SILI_REG_GCTL, 0); 108 sili_os_sleep(10); 109 110 return (0); 111 } 112 113 /* 114 * Allocate and initialize an SILI port. 115 */ 116 int 117 sili_port_alloc(struct sili_softc *sc, u_int port) 118 { 119 struct sili_port *ap; 120 struct ata_port *at; 121 struct sili_prb *prb; 122 struct sili_ccb *ccb; 123 int rc = ENOMEM; 124 int error; 125 int i; 126 127 ap = kmalloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 128 ap->ap_err_scratch = kmalloc(512, M_DEVBUF, M_WAITOK | M_ZERO); 129 130 ksnprintf(ap->ap_name, sizeof(ap->ap_name), "%s%d.%d", 131 device_get_name(sc->sc_dev), 132 device_get_unit(sc->sc_dev), 133 port); 134 sc->sc_ports[port] = ap; 135 136 /* 137 * Allocate enough so we never have to reallocate, it makes 138 * it easier. 139 * 140 * ap_pmcount will be reduced by the scan if we encounter the 141 * port multiplier port prior to target 15. 142 */ 143 if (ap->ap_ata == NULL) { 144 ap->ap_ata = kmalloc(sizeof(*ap->ap_ata) * SILI_MAX_PMPORTS, 145 M_DEVBUF, M_INTWAIT | M_ZERO); 146 for (i = 0; i < SILI_MAX_PMPORTS; ++i) { 147 at = &ap->ap_ata[i]; 148 at->at_sili_port = ap; 149 at->at_target = i; 150 at->at_probe = ATA_PROBE_NEED_INIT; 151 at->at_features |= ATA_PORT_F_RESCAN; 152 ksnprintf(at->at_name, sizeof(at->at_name), 153 "%s.%d", ap->ap_name, i); 154 } 155 } 156 if (bus_space_subregion(sc->sc_piot, sc->sc_pioh, 157 SILI_PORT_REGION(port), SILI_PORT_SIZE, 158 &ap->ap_ioh) != 0) { 159 device_printf(sc->sc_dev, 160 "unable to create register window for port %d\n", 161 port); 162 goto freeport; 163 } 164 165 ap->ap_sc = sc; 166 ap->ap_num = port; 167 ap->ap_probe = ATA_PROBE_NEED_INIT; 168 TAILQ_INIT(&ap->ap_ccb_free); 169 TAILQ_INIT(&ap->ap_ccb_pending); 170 lockinit(&ap->ap_ccb_lock, "silipo", 0, 0); 171 172 /* Disable port interrupts */ 173 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 174 175 /* 176 * Reset the port. This is similar to a Device Reset but far 177 * more invasive. We use Device Reset in our hardreset function. 178 * This function also does the same OOB initialization sequence 179 * that Device Reset does. 180 * 181 * NOTE: SILI_PREG_STATUS_READY will not be asserted unless and until 182 * a device is connected to the port, so we can't use it to 183 * verify that the port exists. 184 */ 185 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 186 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 187 device_printf(sc->sc_dev, 188 "Port %d will not go into reset\n", port); 189 goto freeport; 190 } 191 sili_os_sleep(10); 192 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 193 194 /* 195 * Allocate the SGE Table 196 */ 197 ap->ap_dmamem_prbs = sili_dmamem_alloc(sc, sc->sc_tag_prbs); 198 if (ap->ap_dmamem_prbs == NULL) { 199 kprintf("%s: NOSGET\n", PORTNAME(ap)); 200 goto freeport; 201 } 202 203 /* 204 * Set up the SGE table base address 205 */ 206 ap->ap_prbs = (struct sili_prb *)SILI_DMA_KVA(ap->ap_dmamem_prbs); 207 208 /* 209 * Allocate a CCB for each command slot 210 */ 211 ap->ap_ccbs = kmalloc(sizeof(struct sili_ccb) * sc->sc_ncmds, M_DEVBUF, 212 M_WAITOK | M_ZERO); 213 if (ap->ap_ccbs == NULL) { 214 device_printf(sc->sc_dev, 215 "unable to allocate command list for port %d\n", 216 port); 217 goto freeport; 218 } 219 220 /* 221 * Most structures are in the port BAR. Assign convenient 222 * pointers in the CCBs 223 */ 224 for (i = 0; i < sc->sc_ncmds; i++) { 225 ccb = &ap->ap_ccbs[i]; 226 227 error = bus_dmamap_create(sc->sc_tag_data, BUS_DMA_ALLOCNOW, 228 &ccb->ccb_dmamap); 229 if (error) { 230 device_printf(sc->sc_dev, 231 "unable to create dmamap for port %d " 232 "ccb %d\n", port, i); 233 goto freeport; 234 } 235 236 /* 237 * WARNING!!! Access to the rfis is only allowed under very 238 * carefully controlled circumstances because it 239 * is located in the LRAM and reading from the 240 * LRAM has hardware issues which can blow the 241 * port up. I kid you not (from Linux, and 242 * verified by testing here). 243 */ 244 callout_init(&ccb->ccb_timeout); 245 ccb->ccb_slot = i; 246 ccb->ccb_port = ap; 247 ccb->ccb_prb = &ap->ap_prbs[i]; 248 ccb->ccb_prb_paddr = SILI_DMA_DVA(ap->ap_dmamem_prbs) + 249 sizeof(*ccb->ccb_prb) * i; 250 ccb->ccb_xa.fis = &ccb->ccb_prb->prb_h2d; 251 prb = bus_space_kva(ap->ap_sc->sc_iot, ap->ap_ioh, 252 SILI_PREG_LRAM_SLOT(i)); 253 ccb->ccb_prb_lram = prb; 254 /* 255 * Point our rfis to host-memory instead of the LRAM PRB. 256 * It will be copied back if ATA_F_AUTOSENSE is set. The 257 * LRAM PRB is buggy. 258 */ 259 /*ccb->ccb_xa.rfis = &prb->prb_d2h;*/ 260 ccb->ccb_xa.rfis = (void *)ccb->ccb_xa.fis; 261 262 ccb->ccb_xa.packetcmd = prb_packet(ccb->ccb_prb); 263 ccb->ccb_xa.tag = i; 264 265 ccb->ccb_xa.state = ATA_S_COMPLETE; 266 267 /* 268 * Reserve CCB[1] as the error CCB. It doesn't matter 269 * which one we use for the Sili controllers. 270 */ 271 if (i == 1) 272 ap->ap_err_ccb = ccb; 273 else 274 sili_put_ccb(ccb); 275 } 276 /* 277 * Do not call sili_port_init() here, the helper thread will 278 * call it for the parallel probe 279 */ 280 sili_os_start_port(ap); 281 return(0); 282 freeport: 283 sili_port_free(sc, port); 284 return (rc); 285 } 286 287 /* 288 * This is called once by the low level attach (from the helper thread) 289 * to get the port state machine rolling, and typically only called again 290 * on a hot-plug insertion event. 291 * 292 * This is called for PM attachments and hot-plug insertion events, and 293 * typically not called again until after an unplug/replug sequence. 294 * 295 * Returns 0 if a device is successfully detected. 296 */ 297 int 298 sili_port_init(struct sili_port *ap) 299 { 300 /* 301 * Do a very hard reset of the port 302 */ 303 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 304 sili_os_sleep(10); 305 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 306 307 /* 308 * Register initialization 309 */ 310 sili_pwrite(ap, SILI_PREG_FIFO_CTL, 311 SILI_PREG_FIFO_CTL_ENCODE(1024, 1024)); 312 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_32BITDMA | 313 SILI_PREG_CTL_PMA); 314 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_NOAUTOCC); 315 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 316 sili_pwrite(ap, SILI_PREG_SNTF, -1); 317 ap->ap_probe = ATA_PROBE_NEED_HARD_RESET; 318 ap->ap_pmcount = 0; 319 sili_port_interrupt_enable(ap); 320 return (0); 321 } 322 323 /* 324 * Handle an errored port. This routine is called when the only 325 * commands left on the queue are expired, meaning we can safely 326 * go through a port init to clear its state. 327 * 328 * We complete the expired CCBs and then restart the queue. 329 */ 330 static 331 void 332 sili_port_reinit(struct sili_port *ap) 333 { 334 struct sili_ccb *ccb; 335 struct ata_port *at; 336 int slot; 337 int target; 338 u_int32_t data; 339 340 if (bootverbose || 1) { 341 kprintf("%s: reiniting port after error reent=%d " 342 "expired=%08x\n", 343 PORTNAME(ap), 344 (ap->ap_flags & AP_F_REINIT_ACTIVE), 345 ap->ap_expired); 346 } 347 348 /* 349 * Clear port resume, clear bits 16:13 in the port device status 350 * register. This is from the data sheet. 351 * 352 * Data sheet does not specify a delay but it seems prudent. 353 */ 354 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 355 sili_os_sleep(10); 356 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 357 data = sili_pread(ap, SILI_PREG_PM_STATUS(target)); 358 data &= ~(SILI_PREG_PM_STATUS_SERVICE | 359 SILI_PREG_PM_STATUS_LEGACY | 360 SILI_PREG_PM_STATUS_NATIVE | 361 SILI_PREG_PM_STATUS_VBSY); 362 sili_pwrite(ap, SILI_PREG_PM_STATUS(target), data); 363 sili_pwrite(ap, SILI_PREG_PM_QACTIVE(target), 0); 364 } 365 366 /* 367 * Issue a Port Initialize and wait for it to clear. This flushes 368 * commands but does not reset the port. Then wait for port ready. 369 */ 370 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_INIT); 371 if (sili_pwait_clr_to(ap, 5000, SILI_PREG_STATUS, SILI_PREG_CTL_INIT)) { 372 kprintf("%s: Unable to reinit, port failed\n", 373 PORTNAME(ap)); 374 } 375 if (sili_pwait_set(ap, SILI_PREG_STATUS, SILI_PREG_STATUS_READY)) { 376 kprintf("%s: Unable to reinit, port will not come ready\n", 377 PORTNAME(ap)); 378 } 379 380 /* 381 * If reentrant, stop here. Otherwise the state for the original 382 * ahci_port_reinit() will get ripped out from under it. 383 */ 384 if (ap->ap_flags & AP_F_REINIT_ACTIVE) 385 return; 386 ap->ap_flags |= AP_F_REINIT_ACTIVE; 387 388 /* 389 * Read the LOG ERROR page for targets that returned a specific 390 * D2H FIS with ERR set. 391 * 392 * Don't bother if we are already using the error CCB. 393 */ 394 if ((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0) { 395 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 396 at = &ap->ap_ata[target]; 397 if (at->at_features & ATA_PORT_F_READLOG) { 398 at->at_features &= ~ATA_PORT_F_READLOG; 399 sili_port_read_ncq_error(ap, target); 400 } 401 } 402 } 403 404 /* 405 * Finally clean out the expired commands, we've probed the error 406 * status (or hopefully probed the error status). Well, ok, 407 * we probably didn't XXX. 408 */ 409 while (ap->ap_expired) { 410 slot = ffs(ap->ap_expired) - 1; 411 ap->ap_expired &= ~(1 << slot); 412 KKASSERT(ap->ap_active & (1 << slot)); 413 ap->ap_active &= ~(1 << slot); 414 --ap->ap_active_cnt; 415 ccb = &ap->ap_ccbs[slot]; 416 ccb->ccb_xa.state = ATA_S_TIMEOUT; 417 ccb->ccb_done(ccb); 418 ccb->ccb_xa.complete(&ccb->ccb_xa); 419 } 420 ap->ap_flags &= ~AP_F_REINIT_ACTIVE; 421 422 /* 423 * Wow. All done. We can get the port moving again. 424 */ 425 if (ap->ap_probe == ATA_PROBE_FAILED) { 426 kprintf("%s: reinit failed, port is dead\n", PORTNAME(ap)); 427 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 428 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 429 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 430 ccb->ccb_xa.state = ATA_S_TIMEOUT; 431 ccb->ccb_done(ccb); 432 ccb->ccb_xa.complete(&ccb->ccb_xa); 433 } 434 } else { 435 sili_issue_pending_commands(ap, NULL); 436 } 437 } 438 439 /* 440 * Enable or re-enable interrupts on a port. 441 * 442 * This routine is called from the port initialization code or from the 443 * helper thread as the real interrupt may be forced to turn off certain 444 * interrupt sources. 445 */ 446 void 447 sili_port_interrupt_enable(struct sili_port *ap) 448 { 449 u_int32_t data; 450 451 data = SILI_PREG_INT_CCOMPLETE | SILI_PREG_INT_CERROR | 452 SILI_PREG_INT_PHYRDYCHG | SILI_PREG_INT_DEVEXCHG | 453 SILI_PREG_INT_DECODE | SILI_PREG_INT_CRC | 454 SILI_PREG_INT_HANDSHK | SILI_PREG_INT_PMCHANGE; 455 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 456 data |= SILI_PREG_INT_SDB; 457 sili_pwrite(ap, SILI_PREG_INT_ENABLE, data); 458 } 459 460 void 461 sili_port_interrupt_redisable(struct sili_port *ap) 462 { 463 u_int32_t data; 464 465 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 466 data &= SILI_REG_GINT_PORTMASK; 467 data &= ~(1 << ap->ap_num); 468 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 469 } 470 471 void 472 sili_port_interrupt_reenable(struct sili_port *ap) 473 { 474 u_int32_t data; 475 476 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 477 data &= SILI_REG_GINT_PORTMASK; 478 data |= (1 << ap->ap_num); 479 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 480 } 481 482 /* 483 * Run the port / target state machine from a main context. 484 * 485 * The state machine for the port is always run. 486 * 487 * If atx is non-NULL run the state machine for a particular target. 488 * If atx is NULL run the state machine for all targets. 489 */ 490 void 491 sili_port_state_machine(struct sili_port *ap, int initial) 492 { 493 struct ata_port *at; 494 u_int32_t data; 495 int target; 496 int didsleep; 497 int loop; 498 499 /* 500 * State machine for port. Note that CAM is not yet associated 501 * during the initial parallel probe and the port's probe state 502 * will not get past ATA_PROBE_NEED_IDENT. 503 */ 504 { 505 if (initial == 0 && ap->ap_probe <= ATA_PROBE_NEED_HARD_RESET) { 506 kprintf("%s: Waiting 7 seconds on insertion\n", 507 PORTNAME(ap)); 508 sili_os_sleep(7000); 509 initial = 1; 510 } 511 if (ap->ap_probe == ATA_PROBE_NEED_INIT) 512 sili_port_init(ap); 513 if (ap->ap_probe == ATA_PROBE_NEED_HARD_RESET) 514 sili_port_reset(ap, NULL, 1); 515 if (ap->ap_probe == ATA_PROBE_NEED_SOFT_RESET) 516 sili_port_reset(ap, NULL, 0); 517 if (ap->ap_probe == ATA_PROBE_NEED_IDENT) 518 sili_cam_probe(ap, NULL); 519 } 520 if (ap->ap_type != ATA_PORT_T_PM) { 521 if (ap->ap_probe == ATA_PROBE_FAILED) { 522 sili_cam_changed(ap, NULL, 0); 523 } else if (ap->ap_probe >= ATA_PROBE_NEED_IDENT) { 524 sili_cam_changed(ap, NULL, 1); 525 } 526 return; 527 } 528 529 /* 530 * Port Multiplier state machine. 531 * 532 * Get a mask of changed targets and combine with any runnable 533 * states already present. 534 */ 535 for (loop = 0; ;++loop) { 536 if (sili_pm_read(ap, 15, SATA_PMREG_EINFO, &data)) { 537 kprintf("%s: PM unable to read hot-plug bitmap\n", 538 PORTNAME(ap)); 539 break; 540 } 541 542 /* 543 * Do at least one loop, then stop if no more state changes 544 * have occured. The PM might not generate a new 545 * notification until we clear the entire bitmap. 546 */ 547 if (loop && data == 0) 548 break; 549 550 /* 551 * New devices showing up in the bitmap require some spin-up 552 * time before we start probing them. Reset didsleep. The 553 * first new device we detect will sleep before probing. 554 * 555 * This only applies to devices whos change bit is set in 556 * the data, and does not apply to the initial boot-time 557 * probe. 558 */ 559 didsleep = 0; 560 561 for (target = 0; target < ap->ap_pmcount; ++target) { 562 at = &ap->ap_ata[target]; 563 564 /* 565 * Check the target state for targets behind the PM 566 * which have changed state. This will adjust 567 * at_probe and set ATA_PORT_F_RESCAN 568 * 569 * We want to wait at least 10 seconds before probing 570 * a newly inserted device. If the check status 571 * indicates a device is present and in need of a 572 * hard reset, we make sure we have slept before 573 * continuing. 574 * 575 * We also need to wait at least 1 second for the 576 * PHY state to change after insertion, if we 577 * haven't already waited the 10 seconds. 578 * 579 * NOTE: When pm_check_good finds a good port it 580 * typically starts us in probe state 581 * NEED_HARD_RESET rather than INIT. 582 */ 583 if (data & (1 << target)) { 584 if (initial == 0 && didsleep == 0) 585 sili_os_sleep(1000); 586 sili_pm_check_good(ap, target); 587 if (initial == 0 && didsleep == 0 && 588 at->at_probe <= ATA_PROBE_NEED_HARD_RESET 589 ) { 590 didsleep = 1; 591 kprintf("%s: Waiting 10 seconds on insertion\n", PORTNAME(ap)); 592 sili_os_sleep(10000); 593 } 594 } 595 596 /* 597 * Report hot-plug events before the probe state 598 * really gets hot. Only actual events are reported 599 * here to reduce spew. 600 */ 601 if (data & (1 << target)) { 602 kprintf("%s: HOTPLUG (PM) - ", ATANAME(ap, at)); 603 switch(at->at_probe) { 604 case ATA_PROBE_NEED_INIT: 605 case ATA_PROBE_NEED_HARD_RESET: 606 kprintf("Device inserted\n"); 607 break; 608 case ATA_PROBE_FAILED: 609 kprintf("Device removed\n"); 610 break; 611 default: 612 kprintf("Device probe in progress\n"); 613 break; 614 } 615 } 616 617 /* 618 * Run through the state machine as necessary if 619 * the port is not marked failed. 620 * 621 * The state machine may stop at NEED_IDENT if 622 * CAM is not yet attached. 623 * 624 * Acquire exclusive access to the port while we 625 * are doing this. This prevents command-completion 626 * from queueing commands for non-polled targets 627 * inbetween our probe steps. We need to do this 628 * because the reset probes can generate severe PHY 629 * and protocol errors and soft-brick the port. 630 */ 631 if (at->at_probe != ATA_PROBE_FAILED && 632 at->at_probe != ATA_PROBE_GOOD) { 633 if (at->at_probe == ATA_PROBE_NEED_INIT) 634 sili_pm_port_init(ap, at); 635 if (at->at_probe == ATA_PROBE_NEED_HARD_RESET) 636 sili_port_reset(ap, at, 1); 637 if (at->at_probe == ATA_PROBE_NEED_SOFT_RESET) 638 sili_port_reset(ap, at, 0); 639 if (at->at_probe == ATA_PROBE_NEED_IDENT) 640 sili_cam_probe(ap, at); 641 } 642 643 /* 644 * Add or remove from CAM 645 */ 646 if (at->at_features & ATA_PORT_F_RESCAN) { 647 at->at_features &= ~ATA_PORT_F_RESCAN; 648 if (at->at_probe == ATA_PROBE_FAILED) { 649 sili_cam_changed(ap, at, 0); 650 } else if (at->at_probe >= ATA_PROBE_NEED_IDENT) { 651 sili_cam_changed(ap, at, 1); 652 } 653 } 654 data &= ~(1 << target); 655 } 656 if (data) { 657 kprintf("%s: WARNING (PM): extra bits set in " 658 "EINFO: %08x\n", PORTNAME(ap), data); 659 while (target < SILI_MAX_PMPORTS) { 660 sili_pm_check_good(ap, target); 661 ++target; 662 } 663 } 664 } 665 } 666 667 /* 668 * De-initialize and detach a port. 669 */ 670 void 671 sili_port_free(struct sili_softc *sc, u_int port) 672 { 673 struct sili_port *ap = sc->sc_ports[port]; 674 struct sili_ccb *ccb; 675 676 /* 677 * Ensure port is disabled and its interrupts are all flushed. 678 */ 679 if (ap->ap_sc) { 680 sili_os_stop_port(ap); 681 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 682 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 683 sili_write(ap->ap_sc, SILI_REG_GCTL, 684 sili_read(ap->ap_sc, SILI_REG_GCTL) & 685 ~SILI_REG_GINT_PORTST(ap->ap_num)); 686 } 687 688 if (ap->ap_ccbs) { 689 while ((ccb = sili_get_ccb(ap)) != NULL) { 690 if (ccb->ccb_dmamap) { 691 bus_dmamap_destroy(sc->sc_tag_data, 692 ccb->ccb_dmamap); 693 ccb->ccb_dmamap = NULL; 694 } 695 } 696 if ((ccb = ap->ap_err_ccb) != NULL) { 697 if (ccb->ccb_dmamap) { 698 bus_dmamap_destroy(sc->sc_tag_data, 699 ccb->ccb_dmamap); 700 ccb->ccb_dmamap = NULL; 701 } 702 ap->ap_err_ccb = NULL; 703 } 704 kfree(ap->ap_ccbs, M_DEVBUF); 705 ap->ap_ccbs = NULL; 706 } 707 708 if (ap->ap_dmamem_prbs) { 709 sili_dmamem_free(sc, ap->ap_dmamem_prbs); 710 ap->ap_dmamem_prbs = NULL; 711 } 712 if (ap->ap_ata) { 713 kfree(ap->ap_ata, M_DEVBUF); 714 ap->ap_ata = NULL; 715 } 716 if (ap->ap_err_scratch) { 717 kfree(ap->ap_err_scratch, M_DEVBUF); 718 ap->ap_err_scratch = NULL; 719 } 720 721 /* bus_space(9) says we dont free the subregions handle */ 722 723 kfree(ap, M_DEVBUF); 724 sc->sc_ports[port] = NULL; 725 } 726 727 /* 728 * Reset a port. 729 * 730 * If hard is 0 perform a softreset of the port. 731 * If hard is 1 perform a hard reset of the port. 732 * If hard is 2 perform a hard reset of the port and cycle the phy. 733 * 734 * If at is non-NULL an indirect port via a port-multiplier is being 735 * reset, otherwise a direct port is being reset. 736 * 737 * NOTE: Indirect ports can only be soft-reset. 738 */ 739 int 740 sili_port_reset(struct sili_port *ap, struct ata_port *at, int hard) 741 { 742 int rc; 743 744 if (hard) { 745 if (at) 746 rc = sili_pm_hardreset(ap, at->at_target, hard); 747 else 748 rc = sili_port_hardreset(ap); 749 } else { 750 if (at) 751 rc = sili_pm_softreset(ap, at->at_target); 752 else 753 rc = sili_port_softreset(ap); 754 } 755 return(rc); 756 } 757 758 /* 759 * SILI soft reset, Section 10.4.1 760 * 761 * (at) will be NULL when soft-resetting a directly-attached device, and 762 * non-NULL when soft-resetting a device through a port multiplier. 763 * 764 * This function keeps port communications intact and attempts to generate 765 * a reset to the connected device using device commands. 766 */ 767 int 768 sili_port_softreset(struct sili_port *ap) 769 { 770 struct sili_ccb *ccb = NULL; 771 struct sili_prb *prb; 772 int error; 773 u_int32_t sig; 774 775 error = EIO; 776 777 if (bootverbose) 778 kprintf("%s: START SOFTRESET\n", PORTNAME(ap)); 779 780 crit_enter(); 781 ap->ap_state = AP_S_NORMAL; 782 783 /* 784 * Prep the special soft-reset SII command. 785 */ 786 ccb = sili_get_err_ccb(ap); 787 ccb->ccb_done = sili_empty_done; 788 ccb->ccb_xa.flags = ATA_F_POLL | ATA_F_AUTOSENSE | ATA_F_EXCLUSIVE; 789 ccb->ccb_xa.complete = sili_dummy_done; 790 ccb->ccb_xa.at = NULL; 791 792 prb = ccb->ccb_prb; 793 bzero(&prb->prb_h2d, sizeof(prb->prb_h2d)); 794 prb->prb_h2d.flags = 0; 795 prb->prb_control = SILI_PRB_CTRL_SOFTRESET; 796 prb->prb_override = 0; 797 prb->prb_xfer_count = 0; 798 799 ccb->ccb_xa.state = ATA_S_PENDING; 800 801 /* 802 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 803 */ 804 if (sili_poll(ccb, 8000, sili_quick_timeout) != ATA_S_COMPLETE) { 805 kprintf("%s: First FIS failed\n", PORTNAME(ap)); 806 goto err; 807 } 808 809 sig = (prb->prb_d2h.lba_high << 24) | 810 (prb->prb_d2h.lba_mid << 16) | 811 (prb->prb_d2h.lba_low << 8) | 812 (prb->prb_d2h.sector_count); 813 if (bootverbose) 814 kprintf("%s: SOFTRESET SIGNATURE %08x\n", PORTNAME(ap), sig); 815 816 /* 817 * If the softreset is trying to clear a BSY condition after a 818 * normal portreset we assign the port type. 819 * 820 * If the softreset is being run first as part of the ccb error 821 * processing code then report if the device signature changed 822 * unexpectedly. 823 */ 824 if (ap->ap_type == ATA_PORT_T_NONE) { 825 ap->ap_type = sili_port_signature(ap, NULL, sig); 826 } else { 827 if (sili_port_signature(ap, NULL, sig) != ap->ap_type) { 828 kprintf("%s: device signature unexpectedly " 829 "changed\n", PORTNAME(ap)); 830 error = EBUSY; /* XXX */ 831 } 832 } 833 error = 0; 834 err: 835 if (ccb != NULL) { 836 sili_put_err_ccb(ccb); 837 } 838 839 /* 840 * If we failed to softreset make the port quiescent, otherwise 841 * make sure the port's start/stop state matches what it was on 842 * entry. 843 * 844 * Don't kill the port if the softreset is on a port multiplier 845 * target, that would kill all the targets! 846 */ 847 if (bootverbose) { 848 kprintf("%s: END SOFTRESET %d prob=%d state=%d\n", 849 PORTNAME(ap), error, ap->ap_probe, ap->ap_state); 850 } 851 if (error) { 852 sili_port_hardstop(ap); 853 /* ap_probe set to failed */ 854 } else { 855 ap->ap_probe = ATA_PROBE_NEED_IDENT; 856 ap->ap_pmcount = 1; 857 } 858 crit_exit(); 859 860 sili_pwrite(ap, SILI_PREG_SERR, -1); 861 if (bootverbose) 862 kprintf("%s: END SOFTRESET\n", PORTNAME(ap)); 863 864 return (error); 865 } 866 867 /* 868 * This function does a hard reset of the port. Note that the device 869 * connected to the port could still end-up hung. Phy detection is 870 * used to short-cut longer operations. 871 */ 872 int 873 sili_port_hardreset(struct sili_port *ap) 874 { 875 u_int32_t data; 876 int error; 877 int loop; 878 879 if (bootverbose) 880 kprintf("%s: START HARDRESET\n", PORTNAME(ap)); 881 882 ap->ap_state = AP_S_NORMAL; 883 884 /* 885 * Set SCTL up for any speed restrictions before issuing the 886 * device reset. This may also take us out of an INIT state 887 * (if we were previously in a continuous reset state from 888 * sili_port_listen()). 889 */ 890 data = SILI_PREG_SCTL_SPM_NONE | 891 SILI_PREG_SCTL_IPM_NONE | 892 SILI_PREG_SCTL_SPD_NONE | 893 SILI_PREG_SCTL_DET_NONE; 894 if (SiliForceGen1 & (1 << ap->ap_num)) { 895 data &= ~SILI_PREG_SCTL_SPD_NONE; 896 data |= SILI_PREG_SCTL_SPD_GEN1; 897 } 898 sili_pwrite(ap, SILI_PREG_SCTL, data); 899 900 /* 901 * The transition from a continuous COMRESET state from 902 * sili_port_listen() back to device detect can take a 903 * few seconds. It's quite non-deterministic. Most of 904 * the time it takes far less. Use a polling loop to 905 * wait. 906 */ 907 loop = 4000; 908 while (loop > 0) { 909 data = sili_pread(ap, SILI_PREG_SSTS); 910 if (data & SILI_PREG_SSTS_DET) 911 break; 912 loop -= sili_os_softsleep(); 913 } 914 sili_os_sleep(100); 915 916 /* 917 * Issue Device Reset, give the phy a little time to settle down. 918 * 919 * NOTE: Unlike Port Reset, the port ready signal will not 920 * go active unless a device is established to be on 921 * the port. 922 */ 923 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 924 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 925 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET); 926 if (sili_pwait_clr(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET)) { 927 kprintf("%s: hardreset failed to clear\n", PORTNAME(ap)); 928 } 929 sili_os_sleep(20); 930 931 /* 932 * Try to determine if there is a device on the port. 933 * 934 * Give the device 3/10 second to at least be detected. 935 */ 936 loop = 300; 937 while (loop > 0) { 938 data = sili_pread(ap, SILI_PREG_SSTS); 939 if (data & SILI_PREG_SSTS_DET) 940 break; 941 loop -= sili_os_softsleep(); 942 } 943 if (loop <= 0) { 944 if (bootverbose) { 945 kprintf("%s: Port appears to be unplugged\n", 946 PORTNAME(ap)); 947 } 948 error = ENODEV; 949 goto done; 950 } 951 952 /* 953 * There is something on the port. Give the device 3 seconds 954 * to detect. 955 */ 956 if (sili_pwait_eq(ap, 3000, SILI_PREG_SSTS, 957 SILI_PREG_SSTS_DET, SILI_PREG_SSTS_DET_DEV)) { 958 if (bootverbose) { 959 kprintf("%s: Device may be powered down\n", 960 PORTNAME(ap)); 961 } 962 error = ENODEV; 963 goto pmdetect; 964 } 965 966 /* 967 * We got something that definitely looks like a device. Give 968 * the device time to send us its first D2H FIS. 969 * 970 * This effectively waits for BSY to clear. 971 */ 972 if (sili_pwait_set_to(ap, 3000, SILI_PREG_STATUS, 973 SILI_PREG_STATUS_READY)) { 974 error = EBUSY; 975 } else { 976 error = 0; 977 } 978 979 pmdetect: 980 /* 981 * Do the PM port probe regardless of how things turned out above. 982 * 983 * If the PM port probe fails it will return the original error 984 * from above. 985 */ 986 if (ap->ap_sc->sc_flags & SILI_F_SPM) { 987 error = sili_pm_port_probe(ap, error); 988 } 989 990 done: 991 /* 992 * Finish up 993 */ 994 switch(error) { 995 case 0: 996 if (ap->ap_type == ATA_PORT_T_PM) 997 ap->ap_probe = ATA_PROBE_GOOD; 998 else 999 ap->ap_probe = ATA_PROBE_NEED_SOFT_RESET; 1000 break; 1001 case ENODEV: 1002 /* 1003 * No device detected. 1004 */ 1005 data = sili_pread(ap, SILI_PREG_SSTS); 1006 1007 switch(data & SATA_PM_SSTS_DET) { 1008 case SILI_PREG_SSTS_DET_DEV_NE: 1009 kprintf("%s: Device not communicating\n", 1010 PORTNAME(ap)); 1011 break; 1012 case SILI_PREG_SSTS_DET_OFFLINE: 1013 kprintf("%s: PHY offline\n", 1014 PORTNAME(ap)); 1015 break; 1016 default: 1017 kprintf("%s: No device detected\n", 1018 PORTNAME(ap)); 1019 break; 1020 } 1021 sili_port_hardstop(ap); 1022 break; 1023 default: 1024 /* 1025 * (EBUSY) 1026 */ 1027 kprintf("%s: Device on port is bricked\n", 1028 PORTNAME(ap)); 1029 sili_port_hardstop(ap); 1030 break; 1031 } 1032 sili_pwrite(ap, SILI_PREG_SERR, -1); 1033 1034 if (bootverbose) 1035 kprintf("%s: END HARDRESET %d\n", PORTNAME(ap), error); 1036 return (error); 1037 } 1038 1039 /* 1040 * Hard-stop on hot-swap device removal. See 10.10.1 1041 * 1042 * Place the port in a mode that will allow it to detect hot-swap insertions. 1043 * This is a bit imprecise because just setting-up SCTL to DET_INIT doesn't 1044 * seem to do the job. 1045 */ 1046 void 1047 sili_port_hardstop(struct sili_port *ap) 1048 { 1049 struct sili_ccb *ccb; 1050 struct ata_port *at; 1051 int i; 1052 int slot; 1053 int serial; 1054 1055 ap->ap_state = AP_S_FATAL_ERROR; 1056 ap->ap_probe = ATA_PROBE_FAILED; 1057 ap->ap_type = ATA_PORT_T_NONE; 1058 1059 /* 1060 * Clean up AT sub-ports on SATA port. 1061 */ 1062 for (i = 0; ap->ap_ata && i < SILI_MAX_PMPORTS; ++i) { 1063 at = &ap->ap_ata[i]; 1064 at->at_type = ATA_PORT_T_NONE; 1065 at->at_probe = ATA_PROBE_FAILED; 1066 at->at_features &= ~ATA_PORT_F_READLOG; 1067 } 1068 1069 /* 1070 * Kill the port. Don't bother waiting for it to transition 1071 * back up. 1072 */ 1073 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 1074 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 1075 kprintf("%s: Port will not go into reset\n", 1076 PORTNAME(ap)); 1077 } 1078 sili_os_sleep(10); 1079 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 1080 1081 /* 1082 * Turn off port-multiplier control bit 1083 */ 1084 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 1085 1086 /* 1087 * Clean up the command list. 1088 */ 1089 restart: 1090 while (ap->ap_active) { 1091 slot = ffs(ap->ap_active) - 1; 1092 ap->ap_active &= ~(1 << slot); 1093 ap->ap_expired &= ~(1 << slot); 1094 --ap->ap_active_cnt; 1095 ccb = &ap->ap_ccbs[slot]; 1096 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_RUNNING) { 1097 serial = ccb->ccb_xa.serial; 1098 callout_stop_sync(&ccb->ccb_timeout); 1099 if (serial != ccb->ccb_xa.serial) { 1100 kprintf("%s: Warning: timeout race ccb %p\n", 1101 PORTNAME(ap), ccb); 1102 goto restart; 1103 } 1104 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 1105 } 1106 ccb->ccb_xa.flags &= ~(ATA_F_TIMEOUT_DESIRED | 1107 ATA_F_TIMEOUT_EXPIRED); 1108 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1109 ccb->ccb_done(ccb); 1110 ccb->ccb_xa.complete(&ccb->ccb_xa); 1111 } 1112 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1113 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1114 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1115 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 1116 ccb->ccb_done(ccb); 1117 ccb->ccb_xa.complete(&ccb->ccb_xa); 1118 } 1119 KKASSERT(ap->ap_active_cnt == 0); 1120 1121 /* 1122 * Put the port into a listen mode, we want to get insertion/removal 1123 * events. 1124 */ 1125 sili_port_listen(ap); 1126 } 1127 1128 /* 1129 * Place port into a listen mode for hotplug events only. The port has 1130 * already been reset and the command processor may not be ready due 1131 * to the lack of a device. 1132 */ 1133 void 1134 sili_port_listen(struct sili_port *ap) 1135 { 1136 u_int32_t data; 1137 1138 #if 1 1139 data = SILI_PREG_SCTL_SPM_NONE | 1140 SILI_PREG_SCTL_IPM_NONE | 1141 SILI_PREG_SCTL_SPD_NONE | 1142 SILI_PREG_SCTL_DET_INIT; 1143 if (SiliForceGen1 & (1 << ap->ap_num)) { 1144 data &= ~SILI_PREG_SCTL_SPD_NONE; 1145 data |= SILI_PREG_SCTL_SPD_GEN1; 1146 } 1147 #endif 1148 sili_os_sleep(20); 1149 sili_pwrite(ap, SILI_PREG_SERR, -1); 1150 sili_pwrite(ap, SILI_PREG_INT_ENABLE, SILI_PREG_INT_PHYRDYCHG | 1151 SILI_PREG_INT_DEVEXCHG); 1152 } 1153 1154 /* 1155 * Figure out what type of device is connected to the port, ATAPI or 1156 * DISK. 1157 */ 1158 int 1159 sili_port_signature(struct sili_port *ap, struct ata_port *at, u_int32_t sig) 1160 { 1161 if (bootverbose) 1162 kprintf("%s: sig %08x\n", ATANAME(ap, at), sig); 1163 if ((sig & 0xffff0000) == (SATA_SIGNATURE_ATAPI & 0xffff0000)) { 1164 return(ATA_PORT_T_ATAPI); 1165 } else if ((sig & 0xffff0000) == 1166 (SATA_SIGNATURE_PORT_MULTIPLIER & 0xffff0000)) { 1167 return(ATA_PORT_T_PM); 1168 } else { 1169 return(ATA_PORT_T_DISK); 1170 } 1171 } 1172 1173 /* 1174 * Load the DMA descriptor table for a CCB's buffer. 1175 * 1176 * NOTE: ATA_F_PIO is auto-selected by sili part. 1177 */ 1178 int 1179 sili_load_prb(struct sili_ccb *ccb) 1180 { 1181 struct sili_port *ap = ccb->ccb_port; 1182 struct sili_softc *sc = ap->ap_sc; 1183 struct ata_xfer *xa = &ccb->ccb_xa; 1184 struct sili_prb *prb = ccb->ccb_prb; 1185 struct sili_sge *sge; 1186 bus_dmamap_t dmap = ccb->ccb_dmamap; 1187 int error; 1188 1189 /* 1190 * Set up the PRB. The PRB contains 2 SGE's (1 if it is an ATAPI 1191 * command). The SGE must be set up to link to the rest of our 1192 * SGE array, in blocks of four SGEs (a SGE table) starting at 1193 */ 1194 prb->prb_xfer_count = 0; 1195 prb->prb_control = 0; 1196 prb->prb_override = 0; 1197 sge = (ccb->ccb_xa.flags & ATA_F_PACKET) ? 1198 &prb->prb_sge_packet : &prb->prb_sge_normal; 1199 if (xa->datalen == 0) { 1200 sge->sge_flags = SILI_SGE_FLAGS_TRM | SILI_SGE_FLAGS_DRD; 1201 sge->sge_count = 0; 1202 return (0); 1203 } 1204 1205 if (ccb->ccb_xa.flags & ATA_F_READ) 1206 prb->prb_control |= SILI_PRB_CTRL_READ; 1207 if (ccb->ccb_xa.flags & ATA_F_WRITE) 1208 prb->prb_control |= SILI_PRB_CTRL_WRITE; 1209 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1210 sge->sge_count = 0; 1211 sge->sge_paddr = ccb->ccb_prb_paddr + 1212 offsetof(struct sili_prb, prb_sge[0]); 1213 1214 /* 1215 * Load our sge array. 1216 */ 1217 error = bus_dmamap_load(sc->sc_tag_data, dmap, 1218 xa->data, xa->datalen, 1219 sili_load_prb_callback, 1220 ccb, 1221 ((xa->flags & ATA_F_NOWAIT) ? 1222 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)); 1223 if (error != 0) { 1224 kprintf("%s: error %d loading dmamap\n", PORTNAME(ap), error); 1225 return (1); 1226 } 1227 1228 bus_dmamap_sync(sc->sc_tag_data, dmap, 1229 (xa->flags & ATA_F_READ) ? 1230 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1231 1232 return (0); 1233 } 1234 1235 /* 1236 * Callback from BUSDMA system to load the segment list. 1237 * 1238 * The scatter/gather table is loaded by the sili chip in blocks of 1239 * four SGE's. If a continuance is required the last entry in each 1240 * block must point to the next block. 1241 */ 1242 static 1243 void 1244 sili_load_prb_callback(void *info, bus_dma_segment_t *segs, int nsegs, 1245 int error) 1246 { 1247 struct sili_ccb *ccb = info; 1248 struct sili_sge *sge; 1249 int sgi; 1250 1251 KKASSERT(nsegs <= SILI_MAX_SGET); 1252 1253 sgi = 0; 1254 sge = &ccb->ccb_prb->prb_sge[0]; 1255 while (nsegs) { 1256 if ((sgi & 3) == 3) { 1257 sge->sge_paddr = htole64(ccb->ccb_prb_paddr + 1258 offsetof(struct sili_prb, 1259 prb_sge[sgi + 1])); 1260 sge->sge_count = 0; 1261 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1262 } else { 1263 sge->sge_paddr = htole64(segs->ds_addr); 1264 sge->sge_count = htole32(segs->ds_len); 1265 sge->sge_flags = 0; 1266 --nsegs; 1267 ++segs; 1268 } 1269 ++sge; 1270 ++sgi; 1271 } 1272 --sge; 1273 sge->sge_flags |= SILI_SGE_FLAGS_TRM; 1274 } 1275 1276 void 1277 sili_unload_prb(struct sili_ccb *ccb) 1278 { 1279 struct sili_port *ap = ccb->ccb_port; 1280 struct sili_softc *sc = ap->ap_sc; 1281 struct ata_xfer *xa = &ccb->ccb_xa; 1282 bus_dmamap_t dmap = ccb->ccb_dmamap; 1283 1284 if (xa->datalen != 0) { 1285 bus_dmamap_sync(sc->sc_tag_data, dmap, 1286 (xa->flags & ATA_F_READ) ? 1287 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1288 1289 bus_dmamap_unload(sc->sc_tag_data, dmap); 1290 1291 if (ccb->ccb_xa.flags & ATA_F_NCQ) 1292 xa->resid = 0; 1293 else 1294 xa->resid = xa->datalen - 1295 le32toh(ccb->ccb_prb->prb_xfer_count); 1296 } 1297 } 1298 1299 /* 1300 * Start a command and poll for completion. 1301 * 1302 * timeout is in ms and only counts once the command gets on-chip. 1303 * 1304 * Returns ATA_S_* state, compare against ATA_S_COMPLETE to determine 1305 * that no error occured. 1306 * 1307 * NOTE: If the caller specifies a NULL timeout function the caller is 1308 * responsible for clearing hardware state on failure, but we will 1309 * deal with removing the ccb from any pending queue. 1310 * 1311 * NOTE: NCQ should never be used with this function. 1312 * 1313 * NOTE: If the port is in a failed state and stopped we do not try 1314 * to activate the ccb. 1315 */ 1316 int 1317 sili_poll(struct sili_ccb *ccb, int timeout, 1318 void (*timeout_fn)(struct sili_ccb *)) 1319 { 1320 struct sili_port *ap = ccb->ccb_port; 1321 1322 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) { 1323 ccb->ccb_xa.state = ATA_S_ERROR; 1324 return(ccb->ccb_xa.state); 1325 } 1326 1327 KKASSERT((ap->ap_expired & (1 << ccb->ccb_slot)) == 0); 1328 sili_start(ccb); 1329 1330 do { 1331 sili_port_intr(ap, 1); 1332 switch(ccb->ccb_xa.state) { 1333 case ATA_S_ONCHIP: 1334 timeout -= sili_os_softsleep(); 1335 break; 1336 case ATA_S_PENDING: 1337 /* 1338 * The packet can get stuck on the pending queue 1339 * if the port refuses to come ready. XXX 1340 */ 1341 #if 0 1342 if (xxx AP_F_EXCLUSIVE_ACCESS) 1343 timeout -= sili_os_softsleep(); 1344 else 1345 #endif 1346 sili_os_softsleep(); 1347 sili_check_active_timeouts(ap); 1348 break; 1349 default: 1350 return (ccb->ccb_xa.state); 1351 } 1352 } while (timeout > 0); 1353 1354 /* 1355 * Don't spew if this is a probe during hard reset 1356 */ 1357 if (ap->ap_probe != ATA_PROBE_NEED_HARD_RESET) { 1358 kprintf("%s: Poll timeout slot %d\n", 1359 ATANAME(ap, ccb->ccb_xa.at), 1360 ccb->ccb_slot); 1361 } 1362 1363 timeout_fn(ccb); 1364 1365 return(ccb->ccb_xa.state); 1366 } 1367 1368 /* 1369 * When polling we have to check if the currently active CCB(s) 1370 * have timed out as the callout will be deadlocked while we 1371 * hold the port lock. 1372 */ 1373 void 1374 sili_check_active_timeouts(struct sili_port *ap) 1375 { 1376 struct sili_ccb *ccb; 1377 u_int32_t mask; 1378 int tag; 1379 1380 mask = ap->ap_active; 1381 while (mask) { 1382 tag = ffs(mask) - 1; 1383 mask &= ~(1 << tag); 1384 ccb = &ap->ap_ccbs[tag]; 1385 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_EXPIRED) { 1386 sili_core_timeout(ccb, 0); 1387 } 1388 } 1389 } 1390 1391 static 1392 __inline 1393 void 1394 sili_start_timeout(struct sili_ccb *ccb) 1395 { 1396 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_DESIRED) { 1397 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_RUNNING; 1398 callout_reset(&ccb->ccb_timeout, 1399 (ccb->ccb_xa.timeout * hz + 999) / 1000, 1400 sili_ata_cmd_timeout_unserialized, ccb); 1401 } 1402 } 1403 1404 void 1405 sili_start(struct sili_ccb *ccb) 1406 { 1407 struct sili_port *ap = ccb->ccb_port; 1408 #if 0 1409 struct sili_softc *sc = ap->ap_sc; 1410 #endif 1411 1412 KKASSERT(ccb->ccb_xa.state == ATA_S_PENDING); 1413 1414 /* 1415 * Sync our SGE table and PRB 1416 */ 1417 bus_dmamap_sync(ap->ap_dmamem_prbs->adm_tag, 1418 ap->ap_dmamem_prbs->adm_map, 1419 BUS_DMASYNC_PREWRITE); 1420 1421 /* 1422 * XXX dmamap for PRB XXX BUS_DMASYNC_PREWRITE 1423 */ 1424 1425 /* 1426 * Controller will update shared memory! 1427 * XXX bus_dmamap_sync ... BUS_DMASYNC_PREREAD ... 1428 */ 1429 /* Prepare RFIS area for write by controller */ 1430 1431 /* 1432 * There's no point trying to optimize this, it only shaves a few 1433 * nanoseconds so just queue the command and call our generic issue. 1434 */ 1435 sili_issue_pending_commands(ap, ccb); 1436 } 1437 1438 /* 1439 * Wait for all commands to complete processing. We hold the lock so no 1440 * new commands will be queued. 1441 */ 1442 void 1443 sili_exclusive_access(struct sili_port *ap) 1444 { 1445 while (ap->ap_active) { 1446 sili_port_intr(ap, 1); 1447 sili_os_softsleep(); 1448 } 1449 } 1450 1451 /* 1452 * If ccb is not NULL enqueue and/or issue it. 1453 * 1454 * If ccb is NULL issue whatever we can from the queue. However, nothing 1455 * new is issued if the exclusive access flag is set or expired ccb's are 1456 * present. 1457 * 1458 * If existing commands are still active (ap_active) we can only 1459 * issue matching new commands. 1460 */ 1461 void 1462 sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb) 1463 { 1464 /* 1465 * Enqueue the ccb. 1466 * 1467 * If just running the queue and in exclusive access mode we 1468 * just return. Also in this case if there are any expired ccb's 1469 * we want to clear the queue so the port can be safely stopped. 1470 * 1471 * XXX sili chip - expiration needs to be per-target if PM supports 1472 * FBSS? 1473 */ 1474 if (ccb) { 1475 TAILQ_INSERT_TAIL(&ap->ap_ccb_pending, ccb, ccb_entry); 1476 } else if (ap->ap_expired) { 1477 return; 1478 } 1479 1480 /* 1481 * Pull the next ccb off the queue and run it if possible. 1482 * If the port is not ready to accept commands enable the 1483 * ready interrupt instead of starting a new command. 1484 * 1485 * XXX limit ncqdepth for attached devices behind PM 1486 */ 1487 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1488 /* 1489 * Port may be wedged. 1490 */ 1491 if ((sili_pread(ap, SILI_PREG_STATUS) & 1492 SILI_PREG_STATUS_READY) == 0) { 1493 kprintf("%s: slot %d NOT READY\n", 1494 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 1495 sili_pwrite(ap, SILI_PREG_INT_ENABLE, 1496 SILI_PREG_INT_READY); 1497 break; 1498 } 1499 1500 /* 1501 * Handle exclusivity requirements. ATA_F_EXCLUSIVE is used 1502 * when we may have to access the rfis which is stored in 1503 * the LRAM PRB. Unfortunately reading the LRAM PRB is 1504 * highly problematic, so requests (like PM requests) which 1505 * need to access the rfis use exclusive mode and then 1506 * access the copy made by the port interrupt code back in 1507 * host memory. 1508 */ 1509 if (ap->ap_active & ~ap->ap_expired) { 1510 /* 1511 * There may be multiple ccb's already running, 1512 * if any are running and ap_run_flags sets 1513 * one of these flags then we know only one is 1514 * running. 1515 * 1516 * XXX Current AUTOSENSE code forces exclusivity 1517 * to simplify the code. 1518 */ 1519 if (ap->ap_run_flags & 1520 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1521 break; 1522 } 1523 1524 /* 1525 * If the ccb we want to run is exclusive and ccb's 1526 * are still active on the port, we can't queue it 1527 * yet. 1528 * 1529 * XXX Current AUTOSENSE code forces exclusivity 1530 * to simplify the code. 1531 */ 1532 if (ccb->ccb_xa.flags & 1533 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1534 break; 1535 } 1536 } 1537 1538 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1539 ccb->ccb_xa.state = ATA_S_ONCHIP; 1540 ap->ap_active |= 1 << ccb->ccb_slot; 1541 ap->ap_active_cnt++; 1542 ap->ap_run_flags = ccb->ccb_xa.flags; 1543 1544 /* 1545 * We can't use the CMD_FIFO method because it requires us 1546 * building the PRB in the LRAM, and the LRAM is buggy. So 1547 * we use host memory for the PRB. 1548 */ 1549 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot), 1550 (u_int32_t)ccb->ccb_prb_paddr); 1551 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot) + 4, 1552 (u_int32_t)(ccb->ccb_prb_paddr >> 32)); 1553 /* sili_pwrite(ap, SILI_PREG_CMD_FIFO, ccb->ccb_slot); */ 1554 sili_start_timeout(ccb); 1555 } 1556 } 1557 1558 void 1559 sili_intr(void *arg) 1560 { 1561 struct sili_softc *sc = arg; 1562 struct sili_port *ap; 1563 u_int32_t gint; 1564 int port; 1565 1566 /* 1567 * Check if the master enable is up, and whether any interrupts are 1568 * pending. 1569 * 1570 * Clear the ints we got. 1571 */ 1572 if ((sc->sc_flags & SILI_F_INT_GOOD) == 0) 1573 return; 1574 gint = sili_read(sc, SILI_REG_GINT); 1575 if (gint == 0 || gint == 0xffffffff) 1576 return; 1577 sili_write(sc, SILI_REG_GINT, gint); 1578 1579 /* 1580 * Process interrupts for each port in a non-blocking fashion. 1581 */ 1582 while (gint & SILI_REG_GINT_PORTMASK) { 1583 port = ffs(gint) - 1; 1584 ap = sc->sc_ports[port]; 1585 if (ap) { 1586 if (sili_os_lock_port_nb(ap) == 0) { 1587 sili_port_intr(ap, 0); 1588 sili_os_unlock_port(ap); 1589 } else { 1590 sili_port_interrupt_redisable(ap); 1591 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1592 } 1593 } 1594 gint &= ~(1 << port); 1595 } 1596 } 1597 1598 /* 1599 * Core called from helper thread. 1600 */ 1601 void 1602 sili_port_thread_core(struct sili_port *ap, int mask) 1603 { 1604 /* 1605 * Process any expired timedouts. 1606 */ 1607 sili_os_lock_port(ap); 1608 if (mask & AP_SIGF_TIMEOUT) { 1609 sili_check_active_timeouts(ap); 1610 } 1611 1612 /* 1613 * Process port interrupts which require a higher level of 1614 * intervention. 1615 */ 1616 if (mask & AP_SIGF_PORTINT) { 1617 sili_port_intr(ap, 1); 1618 sili_port_interrupt_reenable(ap); 1619 sili_os_unlock_port(ap); 1620 } else { 1621 sili_os_unlock_port(ap); 1622 } 1623 } 1624 1625 /* 1626 * Core per-port interrupt handler. 1627 * 1628 * If blockable is 0 we cannot call sili_os_sleep() at all and we can only 1629 * deal with normal command completions which do not require blocking. 1630 */ 1631 void 1632 sili_port_intr(struct sili_port *ap, int blockable) 1633 { 1634 struct sili_softc *sc = ap->ap_sc; 1635 u_int32_t is; 1636 int slot; 1637 struct sili_ccb *ccb = NULL; 1638 struct ata_port *ccb_at = NULL; 1639 u_int32_t active; 1640 u_int32_t finished; 1641 const u_int32_t blockable_mask = SILI_PREG_IST_PHYRDYCHG | 1642 SILI_PREG_IST_DEVEXCHG | 1643 SILI_PREG_IST_CERROR | 1644 SILI_PREG_IST_DECODE | 1645 SILI_PREG_IST_CRC | 1646 SILI_PREG_IST_HANDSHK; 1647 const u_int32_t fatal_mask = SILI_PREG_IST_PHYRDYCHG | 1648 SILI_PREG_IST_DEVEXCHG | 1649 SILI_PREG_IST_DECODE | 1650 SILI_PREG_IST_CRC | 1651 SILI_PREG_IST_HANDSHK; 1652 1653 enum { NEED_NOTHING, NEED_HOTPLUG_INSERT, 1654 NEED_HOTPLUG_REMOVE } need = NEED_NOTHING; 1655 1656 /* 1657 * NOTE: CCOMPLETE was automatically cleared when we read INT_STATUS. 1658 */ 1659 is = sili_pread(ap, SILI_PREG_INT_STATUS); 1660 is &= SILI_PREG_IST_MASK; 1661 if (is & SILI_PREG_IST_CCOMPLETE) 1662 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CCOMPLETE); 1663 1664 /* 1665 * If we can't block then we can't handle these here. Disable 1666 * the interrupts in question so we don't live-lock, the helper 1667 * thread will re-enable them. 1668 * 1669 * If the port is in a completely failed state we do not want 1670 * to drop through to failed-command-processing if blockable is 0, 1671 * just let the thread deal with it all. 1672 * 1673 * Otherwise we fall through and still handle DHRS and any commands 1674 * which completed normally. Even if we are errored we haven't 1675 * stopped the port yet so CI/SACT are still good. 1676 */ 1677 if (blockable == 0) { 1678 if (ap->ap_state == AP_S_FATAL_ERROR) { 1679 sili_port_interrupt_redisable(ap); 1680 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1681 /*is &= ~blockable_mask;*/ 1682 return; 1683 } 1684 if (is & blockable_mask) { 1685 sili_port_interrupt_redisable(ap); 1686 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1687 /*is &= ~blockable_mask;*/ 1688 return; 1689 } 1690 } 1691 1692 if (is & SILI_PREG_IST_CERROR) { 1693 /* 1694 * Command failed (blockable). 1695 * 1696 * This stops command processing. We can extract the PM 1697 * target from the PMP field in SILI_PREG_CONTEXT. The 1698 * tag is not necessarily valid so don't use that. 1699 * 1700 * We must then expire all CCB's for that target and resume 1701 * processing if any other targets have active commands. 1702 * Particular error codes can be recovered by reading the LOG 1703 * page. 1704 * 1705 * The expire handling code will do the rest, which is 1706 * basically to reset the port once the only active 1707 * commands remaining are all expired. 1708 */ 1709 u_int32_t error; 1710 int target; 1711 int resume = 1; 1712 1713 target = (sili_pread(ap, SILI_PREG_CONTEXT) >> 1714 SILI_PREG_CONTEXT_PMPORT_SHIFT) & 1715 SILI_PREG_CONTEXT_PMPORT_MASK; 1716 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CERROR); 1717 active = ap->ap_active & ~ap->ap_expired; 1718 error = sili_pread(ap, SILI_PREG_CERROR); 1719 kprintf("%s.%d target error %d active=%08x hactive=%08x " 1720 "SERR=%b\n", 1721 PORTNAME(ap), target, error, 1722 active, sili_pread(ap, SILI_PREG_SLOTST), 1723 sili_pread(ap, SILI_PREG_SERR), SILI_PFMT_SERR); 1724 1725 while (active) { 1726 slot = ffs(active) - 1; 1727 ccb = &ap->ap_ccbs[slot]; 1728 if ((ccb_at = ccb->ccb_xa.at) == NULL) 1729 ccb_at = &ap->ap_ata[0]; 1730 if (target == ccb_at->at_target) { 1731 if ((ccb->ccb_xa.flags & ATA_F_NCQ) && 1732 (error == SILI_PREG_CERROR_DEVICE || 1733 error == SILI_PREG_CERROR_SDBERROR)) { 1734 ccb_at->at_features |= ATA_PORT_F_READLOG; 1735 } 1736 if (sili_core_timeout(ccb, 1) == 0) 1737 resume = 0; 1738 } 1739 active &= ~(1 << slot); 1740 } 1741 1742 /* 1743 * Resume will be 0 if the timeout reinited and restarted 1744 * the port. Otherwise we resume the port to allow other 1745 * commands to complete. 1746 */ 1747 if (resume) 1748 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESUME); 1749 } 1750 1751 /* 1752 * Device notification to us (non-blocking) 1753 * 1754 * This is interrupt status SILIPREG_IST_SDB 1755 * 1756 * NOTE! On some parts notification bits can get set without 1757 * generating an interrupt. It is unclear whether this is 1758 * a bug in the PM (sending a DTOH device setbits with 'N' set 1759 * and 'I' not set), or a bug in the host controller. 1760 * 1761 * It only seems to occur under load. 1762 */ 1763 if (sc->sc_flags & SILI_F_SSNTF) { 1764 u_int32_t data; 1765 const char *xstr; 1766 1767 data = sili_pread(ap, SILI_PREG_SNTF); 1768 if (is & SILI_PREG_IST_SDB) { 1769 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1770 SILI_PREG_IST_SDB); 1771 is &= ~SILI_PREG_IST_SDB; 1772 xstr = " (no SDBS!)"; 1773 } else { 1774 xstr = ""; 1775 } 1776 if (data) { 1777 kprintf("%s: NOTIFY %08x%s\n", 1778 PORTNAME(ap), data, xstr); 1779 sili_pwrite(ap, SILI_PREG_SNTF, data); 1780 sili_cam_changed(ap, NULL, -1); 1781 } 1782 } 1783 1784 /* 1785 * Port change (hot-plug) (blockable). 1786 * 1787 * A PCS interrupt will occur on hot-plug once communication is 1788 * established. 1789 * 1790 * A PRCS interrupt will occur on hot-unplug (and possibly also 1791 * on hot-plug). 1792 * 1793 * XXX We can then check the CPS (Cold Presence State) bit, if 1794 * supported, to determine if a device is plugged in or not and do 1795 * the right thing. 1796 * 1797 * WARNING: A PCS interrupt is cleared by clearing DIAG_X, and 1798 * can also occur if an unsolicited COMINIT is received. 1799 * If this occurs command processing is automatically 1800 * stopped (CR goes inactive) and the port must be stopped 1801 * and restarted. 1802 */ 1803 if (is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)) { 1804 /* XXX */ 1805 sili_pwrite(ap, SILI_PREG_SERR, 1806 (SILI_PREG_SERR_DIAG_N | SILI_PREG_SERR_DIAG_X)); 1807 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1808 is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)); 1809 1810 is &= ~(SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG); 1811 kprintf("%s: Port change\n", PORTNAME(ap)); 1812 1813 switch (sili_pread(ap, SILI_PREG_SSTS) & SILI_PREG_SSTS_DET) { 1814 case SILI_PREG_SSTS_DET_DEV: 1815 if (ap->ap_type == ATA_PORT_T_NONE && 1816 ap->ap_probe == ATA_PROBE_FAILED) { 1817 need = NEED_HOTPLUG_INSERT; 1818 goto fatal; 1819 } 1820 break; 1821 default: 1822 kprintf("%s: Device lost\n", PORTNAME(ap)); 1823 if (ap->ap_type != ATA_PORT_T_NONE) { 1824 need = NEED_HOTPLUG_REMOVE; 1825 goto fatal; 1826 } 1827 break; 1828 } 1829 } 1830 1831 /* 1832 * Check for remaining errors - they are fatal. (blockable) 1833 */ 1834 if (is & fatal_mask) { 1835 u_int32_t serr; 1836 1837 sili_pwrite(ap, SILI_PREG_INT_STATUS, is & fatal_mask); 1838 1839 serr = sili_pread(ap, SILI_PREG_SERR); 1840 kprintf("%s: Unrecoverable errors (IS: %b, SERR: %b), " 1841 "disabling port.\n", 1842 PORTNAME(ap), 1843 is, SILI_PFMT_INT_STATUS, 1844 serr, SILI_PFMT_SERR 1845 ); 1846 is &= ~fatal_mask; 1847 /* XXX try recovery first */ 1848 goto fatal; 1849 } 1850 1851 /* 1852 * Fail all outstanding commands if we know the port won't recover. 1853 * 1854 * We may have a ccb_at if the failed command is known and was 1855 * being sent to a device over a port multiplier (PM). In this 1856 * case if the port itself has not completely failed we fail just 1857 * the commands related to that target. 1858 */ 1859 if (ap->ap_state == AP_S_FATAL_ERROR && 1860 (ap->ap_active & ~ap->ap_expired)) { 1861 kprintf("%s: Fatal port error, expiring %08x\n", 1862 PORTNAME(ap), ap->ap_active & ~ap->ap_expired); 1863 fatal: 1864 ap->ap_state = AP_S_FATAL_ERROR; 1865 1866 /* 1867 * Error all the active slots. If running across a PM 1868 * try to error out just the slots related to the target. 1869 */ 1870 active = ap->ap_active & ~ap->ap_expired; 1871 1872 while (active) { 1873 slot = ffs(active) - 1; 1874 active &= ~(1 << slot); 1875 ccb = &ap->ap_ccbs[slot]; 1876 sili_core_timeout(ccb, 1); 1877 } 1878 } 1879 1880 /* 1881 * CCB completion (non blocking). 1882 * 1883 * CCB completion is detected by noticing the slot bit in 1884 * the port slot status register has cleared while the bit 1885 * is still set in our ap_active variable. 1886 * 1887 * When completing expired events we must remember to reinit 1888 * the port once everything is clear. 1889 * 1890 * Due to a single-level recursion when reading the log page, 1891 * it is possible for the slot to already have been cleared 1892 * for some expired tags, do not include expired tags in 1893 * the list. 1894 */ 1895 active = ap->ap_active & ~sili_pread(ap, SILI_PREG_SLOTST); 1896 active &= ~ap->ap_expired; 1897 1898 finished = active; 1899 while (active) { 1900 slot = ffs(active) - 1; 1901 ccb = &ap->ap_ccbs[slot]; 1902 1903 DPRINTF(SILI_D_INTR, "%s: slot %d is complete%s\n", 1904 PORTNAME(ap), slot, ccb->ccb_xa.state == ATA_S_ERROR ? 1905 " (error)" : ""); 1906 1907 active &= ~(1 << slot); 1908 1909 /* 1910 * XXX sync POSTREAD for return data? 1911 */ 1912 ap->ap_active &= ~(1 << ccb->ccb_slot); 1913 --ap->ap_active_cnt; 1914 1915 /* 1916 * Complete the ccb. If the ccb was marked expired it 1917 * may or may not have been cleared from the port, 1918 * make sure we mark it as having timed out. 1919 * 1920 * In a normal completion if AUTOSENSE is set we copy 1921 * the PRB LRAM rfis back to the rfis in host-memory. 1922 * 1923 * XXX Currently AUTOSENSE also forces exclusivity so we 1924 * can safely work around a hardware bug when reading 1925 * the LRAM. 1926 */ 1927 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 1928 ap->ap_expired &= ~(1 << ccb->ccb_slot); 1929 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1930 ccb->ccb_done(ccb); 1931 ccb->ccb_xa.complete(&ccb->ccb_xa); 1932 } else { 1933 if (ccb->ccb_xa.state == ATA_S_ONCHIP) { 1934 ccb->ccb_xa.state = ATA_S_COMPLETE; 1935 if (ccb->ccb_xa.flags & ATA_F_AUTOSENSE) { 1936 memcpy(ccb->ccb_xa.rfis, 1937 &ccb->ccb_prb_lram->prb_d2h, 1938 sizeof(ccb->ccb_prb_lram->prb_d2h)); 1939 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 1940 ccb->ccb_xa.state = ATA_S_ERROR; 1941 } 1942 } 1943 ccb->ccb_done(ccb); 1944 } 1945 } 1946 if (is & SILI_PREG_IST_READY) { 1947 is &= ~SILI_PREG_IST_READY; 1948 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_READY); 1949 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_READY); 1950 } 1951 1952 /* 1953 * If we had expired commands and were waiting for 1954 * remaining commands to complete, and they have now 1955 * completed, we can reinit the port. 1956 * 1957 * This will also clean out the expired commands. 1958 * The timeout code also calls sili_port_reinit() if 1959 * the only commands remaining after a timeout are all 1960 * now expired commands. 1961 * 1962 * Otherwise just reissue. 1963 */ 1964 if (ap->ap_expired && ap->ap_active == ap->ap_expired) { 1965 if (finished) 1966 sili_port_reinit(ap); 1967 } else { 1968 sili_issue_pending_commands(ap, NULL); 1969 } 1970 1971 /* 1972 * Cleanup. Will not be set if non-blocking. 1973 */ 1974 switch(need) { 1975 case NEED_HOTPLUG_INSERT: 1976 /* 1977 * A hot-plug insertion event has occured and all 1978 * outstanding commands have already been revoked. 1979 * 1980 * Don't recurse if this occurs while we are 1981 * resetting the port. 1982 * 1983 * Place the port in a continuous COMRESET state 1984 * until the INIT code gets to it. 1985 */ 1986 kprintf("%s: HOTPLUG - Device inserted\n", 1987 PORTNAME(ap)); 1988 ap->ap_probe = ATA_PROBE_NEED_INIT; 1989 sili_cam_changed(ap, NULL, -1); 1990 break; 1991 case NEED_HOTPLUG_REMOVE: 1992 /* 1993 * A hot-plug removal event has occured and all 1994 * outstanding commands have already been revoked. 1995 * 1996 * Don't recurse if this occurs while we are 1997 * resetting the port. 1998 */ 1999 kprintf("%s: HOTPLUG - Device removed\n", 2000 PORTNAME(ap)); 2001 sili_port_hardstop(ap); 2002 /* ap_probe set to failed */ 2003 sili_cam_changed(ap, NULL, -1); 2004 break; 2005 default: 2006 break; 2007 } 2008 } 2009 2010 struct sili_ccb * 2011 sili_get_ccb(struct sili_port *ap) 2012 { 2013 struct sili_ccb *ccb; 2014 2015 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2016 ccb = TAILQ_FIRST(&ap->ap_ccb_free); 2017 if (ccb != NULL) { 2018 KKASSERT(ccb->ccb_xa.state == ATA_S_PUT); 2019 TAILQ_REMOVE(&ap->ap_ccb_free, ccb, ccb_entry); 2020 ccb->ccb_xa.state = ATA_S_SETUP; 2021 ccb->ccb_xa.at = NULL; 2022 } 2023 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2024 2025 return (ccb); 2026 } 2027 2028 void 2029 sili_put_ccb(struct sili_ccb *ccb) 2030 { 2031 struct sili_port *ap = ccb->ccb_port; 2032 2033 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2034 ccb->ccb_xa.state = ATA_S_PUT; 2035 ++ccb->ccb_xa.serial; 2036 TAILQ_INSERT_TAIL(&ap->ap_ccb_free, ccb, ccb_entry); 2037 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2038 } 2039 2040 struct sili_ccb * 2041 sili_get_err_ccb(struct sili_port *ap) 2042 { 2043 struct sili_ccb *err_ccb; 2044 2045 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0); 2046 ap->ap_flags |= AP_F_ERR_CCB_RESERVED; 2047 2048 /* 2049 * Grab a CCB to use for error recovery. This should never fail, as 2050 * we ask atascsi to reserve one for us at init time. 2051 */ 2052 err_ccb = ap->ap_err_ccb; 2053 KKASSERT(err_ccb != NULL); 2054 err_ccb->ccb_xa.flags = 0; 2055 err_ccb->ccb_done = sili_empty_done; 2056 2057 return err_ccb; 2058 } 2059 2060 void 2061 sili_put_err_ccb(struct sili_ccb *ccb) 2062 { 2063 struct sili_port *ap = ccb->ccb_port; 2064 2065 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) != 0); 2066 2067 KKASSERT(ccb == ap->ap_err_ccb); 2068 2069 ap->ap_flags &= ~AP_F_ERR_CCB_RESERVED; 2070 } 2071 2072 /* 2073 * Read log page to get NCQ error. 2074 * 2075 * Return 0 on success 2076 */ 2077 void 2078 sili_port_read_ncq_error(struct sili_port *ap, int target) 2079 { 2080 struct sili_ccb *ccb; 2081 struct ata_fis_h2d *fis; 2082 int status; 2083 2084 DPRINTF(SILI_D_VERBOSE, "%s: read log page\n", PORTNAME(ap)); 2085 2086 /* Prep error CCB for READ LOG EXT, page 10h, 1 sector. */ 2087 ccb = sili_get_err_ccb(ap); 2088 ccb->ccb_done = sili_empty_done; 2089 ccb->ccb_xa.flags = ATA_F_NOWAIT | ATA_F_READ | ATA_F_POLL; 2090 ccb->ccb_xa.data = ap->ap_err_scratch; 2091 ccb->ccb_xa.datalen = 512; 2092 ccb->ccb_xa.complete = sili_dummy_done; 2093 ccb->ccb_xa.at = &ap->ap_ata[target]; 2094 fis = &ccb->ccb_prb->prb_h2d; 2095 bzero(fis, sizeof(*fis)); 2096 2097 fis->type = ATA_FIS_TYPE_H2D; 2098 fis->flags = ATA_H2D_FLAGS_CMD | target; 2099 fis->command = ATA_C_READ_LOG_EXT; 2100 fis->lba_low = 0x10; /* queued error log page (10h) */ 2101 fis->sector_count = 1; /* number of sectors (1) */ 2102 fis->sector_count_exp = 0; 2103 fis->lba_mid = 0; /* starting offset */ 2104 fis->lba_mid_exp = 0; 2105 fis->device = 0; 2106 2107 /* 2108 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 2109 */ 2110 if (sili_load_prb(ccb) != 0) { 2111 status = ATA_S_ERROR; 2112 } else { 2113 ccb->ccb_xa.state = ATA_S_PENDING; 2114 status = sili_poll(ccb, 1000, sili_quick_timeout); 2115 } 2116 2117 /* 2118 * Just spew if it fails, there isn't much we can do at this point. 2119 */ 2120 if (status != ATA_S_COMPLETE) { 2121 kprintf("%s: log page read failed, slot %d was still active.\n", 2122 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 2123 } 2124 2125 /* Done with the error CCB now. */ 2126 sili_unload_prb(ccb); 2127 sili_put_err_ccb(ccb); 2128 2129 /* Extract failed register set and tags from the scratch space. */ 2130 if (status == ATA_S_COMPLETE) { 2131 struct ata_log_page_10h *log; 2132 int err_slot; 2133 2134 log = (struct ata_log_page_10h *)ap->ap_err_scratch; 2135 if (log->err_regs.type & ATA_LOG_10H_TYPE_NOTQUEUED) { 2136 /* 2137 * Not queued bit was set - wasn't an NCQ error? 2138 * 2139 * XXX This bit seems to be set a lot even for NCQ 2140 * errors? 2141 */ 2142 } else { 2143 /* 2144 * Copy back the log record as a D2H register FIS. 2145 */ 2146 err_slot = log->err_regs.type & 2147 ATA_LOG_10H_TYPE_TAG_MASK; 2148 ccb = &ap->ap_ccbs[err_slot]; 2149 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 2150 kprintf("%s: read NCQ error page slot=%d\n", 2151 ATANAME(ap, ccb->ccb_xa.at), err_slot 2152 ); 2153 memcpy(&ccb->ccb_prb->prb_d2h, &log->err_regs, 2154 sizeof(struct ata_fis_d2h)); 2155 ccb->ccb_prb->prb_d2h.type = ATA_FIS_TYPE_D2H; 2156 ccb->ccb_prb->prb_d2h.flags = 0; 2157 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 2158 ccb->ccb_xa.state = ATA_S_ERROR; 2159 } else { 2160 kprintf("%s: read NCQ error page slot=%d, " 2161 "slot does not match any cmds\n", 2162 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2163 err_slot 2164 ); 2165 } 2166 } 2167 } 2168 } 2169 2170 /* 2171 * Allocate memory for various structures DMAd by hardware. The maximum 2172 * number of segments for these tags is 1 so the DMA memory will have a 2173 * single physical base address. 2174 */ 2175 struct sili_dmamem * 2176 sili_dmamem_alloc(struct sili_softc *sc, bus_dma_tag_t tag) 2177 { 2178 struct sili_dmamem *adm; 2179 int error; 2180 2181 adm = kmalloc(sizeof(*adm), M_DEVBUF, M_INTWAIT | M_ZERO); 2182 2183 error = bus_dmamem_alloc(tag, (void **)&adm->adm_kva, 2184 BUS_DMA_ZERO, &adm->adm_map); 2185 if (error == 0) { 2186 adm->adm_tag = tag; 2187 error = bus_dmamap_load(tag, adm->adm_map, 2188 adm->adm_kva, 2189 bus_dma_tag_getmaxsize(tag), 2190 sili_dmamem_saveseg, &adm->adm_busaddr, 2191 0); 2192 } 2193 if (error) { 2194 if (adm->adm_map) { 2195 bus_dmamap_destroy(tag, adm->adm_map); 2196 adm->adm_map = NULL; 2197 adm->adm_tag = NULL; 2198 adm->adm_kva = NULL; 2199 } 2200 kfree(adm, M_DEVBUF); 2201 adm = NULL; 2202 } 2203 return (adm); 2204 } 2205 2206 static 2207 void 2208 sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error) 2209 { 2210 KKASSERT(error == 0); 2211 KKASSERT(nsegs == 1); 2212 *(bus_addr_t *)info = segs->ds_addr; 2213 } 2214 2215 2216 void 2217 sili_dmamem_free(struct sili_softc *sc, struct sili_dmamem *adm) 2218 { 2219 if (adm->adm_map) { 2220 bus_dmamap_unload(adm->adm_tag, adm->adm_map); 2221 bus_dmamap_destroy(adm->adm_tag, adm->adm_map); 2222 adm->adm_map = NULL; 2223 adm->adm_tag = NULL; 2224 adm->adm_kva = NULL; 2225 } 2226 kfree(adm, M_DEVBUF); 2227 } 2228 2229 u_int32_t 2230 sili_read(struct sili_softc *sc, bus_size_t r) 2231 { 2232 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2233 BUS_SPACE_BARRIER_READ); 2234 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)); 2235 } 2236 2237 void 2238 sili_write(struct sili_softc *sc, bus_size_t r, u_int32_t v) 2239 { 2240 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 2241 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2242 BUS_SPACE_BARRIER_WRITE); 2243 } 2244 2245 u_int32_t 2246 sili_pread(struct sili_port *ap, bus_size_t r) 2247 { 2248 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2249 BUS_SPACE_BARRIER_READ); 2250 return (bus_space_read_4(ap->ap_sc->sc_iot, ap->ap_ioh, r)); 2251 } 2252 2253 void 2254 sili_pwrite(struct sili_port *ap, bus_size_t r, u_int32_t v) 2255 { 2256 bus_space_write_4(ap->ap_sc->sc_iot, ap->ap_ioh, r, v); 2257 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2258 BUS_SPACE_BARRIER_WRITE); 2259 } 2260 2261 /* 2262 * Wait up to (timeout) milliseconds for the masked port register to 2263 * match the target. 2264 * 2265 * Timeout is in milliseconds. 2266 */ 2267 int 2268 sili_pwait_eq(struct sili_port *ap, int timeout, 2269 bus_size_t r, u_int32_t mask, u_int32_t target) 2270 { 2271 int t; 2272 2273 /* 2274 * Loop hard up to 100uS 2275 */ 2276 for (t = 0; t < 100; ++t) { 2277 if ((sili_pread(ap, r) & mask) == target) 2278 return (0); 2279 sili_os_hardsleep(1); /* us */ 2280 } 2281 2282 do { 2283 timeout -= sili_os_softsleep(); 2284 if ((sili_pread(ap, r) & mask) == target) 2285 return (0); 2286 } while (timeout > 0); 2287 return (1); 2288 } 2289 2290 int 2291 sili_wait_ne(struct sili_softc *sc, bus_size_t r, u_int32_t mask, 2292 u_int32_t target) 2293 { 2294 int t; 2295 2296 /* 2297 * Loop hard up to 100uS 2298 */ 2299 for (t = 0; t < 100; ++t) { 2300 if ((sili_read(sc, r) & mask) != target) 2301 return (0); 2302 sili_os_hardsleep(1); /* us */ 2303 } 2304 2305 /* 2306 * And one millisecond the slow way 2307 */ 2308 t = 1000; 2309 do { 2310 t -= sili_os_softsleep(); 2311 if ((sili_read(sc, r) & mask) != target) 2312 return (0); 2313 } while (t > 0); 2314 2315 return (1); 2316 } 2317 2318 2319 /* 2320 * Acquire an ata transfer. 2321 * 2322 * Pass a NULL at for direct-attached transfers, and a non-NULL at for 2323 * targets that go through the port multiplier. 2324 */ 2325 struct ata_xfer * 2326 sili_ata_get_xfer(struct sili_port *ap, struct ata_port *at) 2327 { 2328 struct sili_ccb *ccb; 2329 2330 ccb = sili_get_ccb(ap); 2331 if (ccb == NULL) { 2332 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer: NULL ccb\n", 2333 PORTNAME(ap)); 2334 return (NULL); 2335 } 2336 2337 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer got slot %d\n", 2338 PORTNAME(ap), ccb->ccb_slot); 2339 2340 bzero(ccb->ccb_xa.fis, sizeof(*ccb->ccb_xa.fis)); 2341 ccb->ccb_xa.at = at; 2342 ccb->ccb_xa.fis->type = ATA_FIS_TYPE_H2D; 2343 2344 return (&ccb->ccb_xa); 2345 } 2346 2347 void 2348 sili_ata_put_xfer(struct ata_xfer *xa) 2349 { 2350 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2351 2352 DPRINTF(SILI_D_XFER, "sili_ata_put_xfer slot %d\n", ccb->ccb_slot); 2353 2354 sili_put_ccb(ccb); 2355 } 2356 2357 int 2358 sili_ata_cmd(struct ata_xfer *xa) 2359 { 2360 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2361 2362 KKASSERT(xa->state == ATA_S_SETUP); 2363 2364 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) 2365 goto failcmd; 2366 #if 0 2367 kprintf("%s: started std command %b ccb %d ccb_at %p %d\n", 2368 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2369 sili_pread(ccb->ccb_port, SILI_PREG_CMD), SILI_PFMT_CMD, 2370 ccb->ccb_slot, 2371 ccb->ccb_xa.at, 2372 ccb->ccb_xa.at ? ccb->ccb_xa.at->at_target : -1); 2373 #endif 2374 2375 ccb->ccb_done = sili_ata_cmd_done; 2376 2377 if (sili_load_prb(ccb) != 0) 2378 goto failcmd; 2379 2380 xa->state = ATA_S_PENDING; 2381 2382 if (xa->flags & ATA_F_POLL) 2383 return (sili_poll(ccb, xa->timeout, sili_ata_cmd_timeout)); 2384 2385 crit_enter(); 2386 KKASSERT((xa->flags & ATA_F_TIMEOUT_EXPIRED) == 0); 2387 xa->flags |= ATA_F_TIMEOUT_DESIRED; 2388 sili_start(ccb); 2389 crit_exit(); 2390 return (xa->state); 2391 2392 failcmd: 2393 crit_enter(); 2394 xa->state = ATA_S_ERROR; 2395 xa->complete(xa); 2396 crit_exit(); 2397 return (ATA_S_ERROR); 2398 } 2399 2400 static void 2401 sili_ata_cmd_done(struct sili_ccb *ccb) 2402 { 2403 struct ata_xfer *xa = &ccb->ccb_xa; 2404 int serial; 2405 2406 /* 2407 * NOTE: callout does not lock port and may race us modifying 2408 * the flags, so make sure its stopped. 2409 */ 2410 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2411 serial = ccb->ccb_xa.serial; 2412 callout_stop_sync(&ccb->ccb_timeout); 2413 if (serial != ccb->ccb_xa.serial) { 2414 kprintf("%s: Warning: timeout race ccb %p\n", 2415 PORTNAME(ccb->ccb_port), ccb); 2416 return; 2417 } 2418 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2419 } 2420 xa->flags &= ~(ATA_F_TIMEOUT_DESIRED | ATA_F_TIMEOUT_EXPIRED); 2421 2422 KKASSERT(xa->state != ATA_S_ONCHIP); 2423 sili_unload_prb(ccb); 2424 2425 if (xa->state != ATA_S_TIMEOUT) 2426 xa->complete(xa); 2427 } 2428 2429 /* 2430 * Timeout from callout, MPSAFE - nothing can mess with the CCB's flags 2431 * while the callout is runing. 2432 * 2433 * We can't safely get the port lock here or delay, we could block 2434 * the callout thread. 2435 */ 2436 static void 2437 sili_ata_cmd_timeout_unserialized(void *arg) 2438 { 2439 struct sili_ccb *ccb = arg; 2440 struct sili_port *ap = ccb->ccb_port; 2441 2442 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 2443 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_EXPIRED; 2444 sili_os_signal_port_thread(ap, AP_SIGF_TIMEOUT); 2445 } 2446 2447 void 2448 sili_ata_cmd_timeout(struct sili_ccb *ccb) 2449 { 2450 sili_core_timeout(ccb, 0); 2451 } 2452 2453 /* 2454 * Timeout code, typically called when the port command processor is running. 2455 * 2456 * Returns 0 if all timeout processing completed, non-zero if it is still 2457 * in progress. 2458 */ 2459 static 2460 int 2461 sili_core_timeout(struct sili_ccb *ccb, int really_error) 2462 { 2463 struct ata_xfer *xa = &ccb->ccb_xa; 2464 struct sili_port *ap = ccb->ccb_port; 2465 struct ata_port *at; 2466 2467 at = ccb->ccb_xa.at; 2468 2469 kprintf("%s: CMD %s state=%d slot=%d\n" 2470 "\t active=%08x\n" 2471 "\texpired=%08x\n" 2472 "\thactive=%08x\n", 2473 ATANAME(ap, at), 2474 (really_error ? "ERROR" : "TIMEOUT"), 2475 ccb->ccb_xa.state, ccb->ccb_slot, 2476 ap->ap_active, 2477 ap->ap_expired, 2478 sili_pread(ap, SILI_PREG_SLOTST) 2479 ); 2480 2481 /* 2482 * NOTE: Timeout will not be running if the command was polled. 2483 * If we got here at least one of these flags should be set. 2484 * 2485 * However, it might be running if we are called from the 2486 * interrupt error handling code. 2487 */ 2488 KKASSERT(xa->flags & (ATA_F_POLL | ATA_F_TIMEOUT_DESIRED | 2489 ATA_F_TIMEOUT_RUNNING)); 2490 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2491 callout_stop(&ccb->ccb_timeout); 2492 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2493 } 2494 xa->flags &= ~ATA_F_TIMEOUT_EXPIRED; 2495 2496 if (ccb->ccb_xa.state == ATA_S_PENDING) { 2497 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2498 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2499 ccb->ccb_done(ccb); 2500 xa->complete(xa); 2501 sili_issue_pending_commands(ap, NULL); 2502 return(1); 2503 } 2504 if (ccb->ccb_xa.state != ATA_S_ONCHIP) { 2505 kprintf("%s: Unexpected state during timeout: %d\n", 2506 ATANAME(ap, at), ccb->ccb_xa.state); 2507 return(1); 2508 } 2509 2510 /* 2511 * We can't process timeouts while other commands are running. 2512 */ 2513 ap->ap_expired |= 1 << ccb->ccb_slot; 2514 2515 if (ap->ap_active != ap->ap_expired) { 2516 kprintf("%s: Deferred timeout until its safe, slot %d\n", 2517 ATANAME(ap, at), ccb->ccb_slot); 2518 return(1); 2519 } 2520 2521 /* 2522 * We have to issue a Port reinit. We don't read an error log 2523 * page for timeouts. Reiniting the port will clear all pending 2524 * commands. 2525 */ 2526 sili_port_reinit(ap); 2527 return(0); 2528 } 2529 2530 /* 2531 * Used by the softreset, pm_port_probe, and read_ncq_error only, in very 2532 * specialized, controlled circumstances. 2533 */ 2534 void 2535 sili_quick_timeout(struct sili_ccb *ccb) 2536 { 2537 struct sili_port *ap = ccb->ccb_port; 2538 2539 switch (ccb->ccb_xa.state) { 2540 case ATA_S_PENDING: 2541 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2542 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2543 break; 2544 case ATA_S_ONCHIP: 2545 KKASSERT((ap->ap_active & ~ap->ap_expired) == 2546 (1 << ccb->ccb_slot)); 2547 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2548 ap->ap_active &= ~(1 << ccb->ccb_slot); 2549 KKASSERT(ap->ap_active_cnt > 0); 2550 --ap->ap_active_cnt; 2551 sili_port_reinit(ap); 2552 break; 2553 default: 2554 panic("%s: sili_quick_timeout: ccb in bad state %d", 2555 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_xa.state); 2556 } 2557 } 2558 2559 static void 2560 sili_dummy_done(struct ata_xfer *xa) 2561 { 2562 } 2563 2564 static void 2565 sili_empty_done(struct sili_ccb *ccb) 2566 { 2567 } 2568