1 /* 2 * (MPSAFE) 3 * 4 * Copyright (c) 2009 The DragonFly Project. All rights reserved. 5 * 6 * This code is derived from software contributed to The DragonFly Project 7 * by Matthew Dillon <dillon@backplane.com> 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in 17 * the documentation and/or other materials provided with the 18 * distribution. 19 * 3. Neither the name of The DragonFly Project nor the names of its 20 * contributors may be used to endorse or promote products derived 21 * from this software without specific, prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 24 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 25 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 26 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 27 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 28 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 29 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 30 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 31 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 32 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 33 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * 37 * Copyright (c) 2006 David Gwynne <dlg@openbsd.org> 38 * 39 * Permission to use, copy, modify, and distribute this software for any 40 * purpose with or without fee is hereby granted, provided that the above 41 * copyright notice and this permission notice appear in all copies. 42 * 43 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 44 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 45 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 46 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 47 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 48 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 49 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 50 * 51 * 52 * 53 * $OpenBSD: sili.c,v 1.147 2009/02/16 21:19:07 miod Exp $ 54 */ 55 56 #include "sili.h" 57 58 void sili_port_interrupt_enable(struct sili_port *ap); 59 void sili_port_interrupt_redisable(struct sili_port *ap); 60 void sili_port_interrupt_reenable(struct sili_port *ap); 61 62 int sili_load_prb(struct sili_ccb *); 63 void sili_unload_prb(struct sili_ccb *); 64 static void sili_load_prb_callback(void *info, bus_dma_segment_t *segs, 65 int nsegs, int error); 66 void sili_start(struct sili_ccb *); 67 static void sili_port_reinit(struct sili_port *ap); 68 int sili_port_softreset(struct sili_port *ap); 69 int sili_port_hardreset(struct sili_port *ap); 70 void sili_port_hardstop(struct sili_port *ap); 71 void sili_port_listen(struct sili_port *ap); 72 73 static void sili_ata_cmd_timeout_unserialized(void *); 74 static int sili_core_timeout(struct sili_ccb *ccb, int really_error); 75 void sili_check_active_timeouts(struct sili_port *ap); 76 77 void sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb); 78 79 void sili_port_read_ncq_error(struct sili_port *, int); 80 81 struct sili_dmamem *sili_dmamem_alloc(struct sili_softc *, bus_dma_tag_t tag); 82 void sili_dmamem_free(struct sili_softc *, struct sili_dmamem *); 83 static void sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error); 84 85 static void sili_dummy_done(struct ata_xfer *xa); 86 static void sili_empty_done(struct sili_ccb *ccb); 87 static void sili_ata_cmd_done(struct sili_ccb *ccb); 88 89 /* 90 * Initialize the global SILI hardware. This code does not set up any of 91 * its ports. 92 */ 93 int 94 sili_init(struct sili_softc *sc) 95 { 96 DPRINTF(SILI_D_VERBOSE, " GHC 0x%b", 97 sili_read(sc, SILI_REG_GHC), SILI_FMT_GHC); 98 99 /* 100 * Reset the entire chip. This also resets all ports. 101 * 102 * The spec doesn't say anything about how long we have to 103 * wait, so wait 10ms. 104 */ 105 sili_write(sc, SILI_REG_GCTL, SILI_REG_GCTL_GRESET); 106 sili_os_sleep(10); 107 sili_write(sc, SILI_REG_GCTL, 0); 108 sili_os_sleep(10); 109 110 return (0); 111 } 112 113 /* 114 * Allocate and initialize an SILI port. 115 */ 116 int 117 sili_port_alloc(struct sili_softc *sc, u_int port) 118 { 119 struct sili_port *ap; 120 struct ata_port *at; 121 struct sili_prb *prb; 122 struct sili_ccb *ccb; 123 int rc = ENOMEM; 124 int error; 125 int i; 126 127 ap = kmalloc(sizeof(*ap), M_DEVBUF, M_WAITOK | M_ZERO); 128 ap->ap_err_scratch = kmalloc(512, M_DEVBUF, M_WAITOK | M_ZERO); 129 130 ksnprintf(ap->ap_name, sizeof(ap->ap_name), "%s%d.%d", 131 device_get_name(sc->sc_dev), 132 device_get_unit(sc->sc_dev), 133 port); 134 sc->sc_ports[port] = ap; 135 136 /* 137 * Allocate enough so we never have to reallocate, it makes 138 * it easier. 139 * 140 * ap_pmcount will be reduced by the scan if we encounter the 141 * port multiplier port prior to target 15. 142 */ 143 if (ap->ap_ata == NULL) { 144 ap->ap_ata = kmalloc(sizeof(*ap->ap_ata) * SILI_MAX_PMPORTS, 145 M_DEVBUF, M_INTWAIT | M_ZERO); 146 for (i = 0; i < SILI_MAX_PMPORTS; ++i) { 147 at = &ap->ap_ata[i]; 148 at->at_sili_port = ap; 149 at->at_target = i; 150 at->at_probe = ATA_PROBE_NEED_INIT; 151 at->at_features |= ATA_PORT_F_RESCAN; 152 ksnprintf(at->at_name, sizeof(at->at_name), 153 "%s.%d", ap->ap_name, i); 154 } 155 } 156 if (bus_space_subregion(sc->sc_piot, sc->sc_pioh, 157 SILI_PORT_REGION(port), SILI_PORT_SIZE, 158 &ap->ap_ioh) != 0) { 159 device_printf(sc->sc_dev, 160 "unable to create register window for port %d\n", 161 port); 162 goto freeport; 163 } 164 165 ap->ap_sc = sc; 166 ap->ap_num = port; 167 ap->ap_probe = ATA_PROBE_NEED_INIT; 168 TAILQ_INIT(&ap->ap_ccb_free); 169 TAILQ_INIT(&ap->ap_ccb_pending); 170 lockinit(&ap->ap_ccb_lock, "silipo", 0, 0); 171 172 /* Disable port interrupts */ 173 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 174 175 /* 176 * Reset the port. This is similar to a Device Reset but far 177 * more invasive. We use Device Reset in our hardreset function. 178 * This function also does the same OOB initialization sequence 179 * that Device Reset does. 180 * 181 * NOTE: SILI_PREG_STATUS_READY will not be asserted unless and until 182 * a device is connected to the port, so we can't use it to 183 * verify that the port exists. 184 */ 185 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 186 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 187 device_printf(sc->sc_dev, 188 "Port %d will not go into reset\n", port); 189 goto freeport; 190 } 191 sili_os_sleep(10); 192 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 193 194 /* 195 * Allocate the SGE Table 196 */ 197 ap->ap_dmamem_prbs = sili_dmamem_alloc(sc, sc->sc_tag_prbs); 198 if (ap->ap_dmamem_prbs == NULL) { 199 kprintf("%s: NOSGET\n", PORTNAME(ap)); 200 goto freeport; 201 } 202 203 /* 204 * Set up the SGE table base address 205 */ 206 ap->ap_prbs = (struct sili_prb *)SILI_DMA_KVA(ap->ap_dmamem_prbs); 207 208 /* 209 * Allocate a CCB for each command slot 210 */ 211 ap->ap_ccbs = kmalloc(sizeof(struct sili_ccb) * sc->sc_ncmds, M_DEVBUF, 212 M_WAITOK | M_ZERO); 213 214 /* 215 * Most structures are in the port BAR. Assign convenient 216 * pointers in the CCBs 217 */ 218 for (i = 0; i < sc->sc_ncmds; i++) { 219 ccb = &ap->ap_ccbs[i]; 220 221 error = bus_dmamap_create(sc->sc_tag_data, BUS_DMA_ALLOCNOW, 222 &ccb->ccb_dmamap); 223 if (error) { 224 device_printf(sc->sc_dev, 225 "unable to create dmamap for port %d " 226 "ccb %d\n", port, i); 227 goto freeport; 228 } 229 230 /* 231 * WARNING!!! Access to the rfis is only allowed under very 232 * carefully controlled circumstances because it 233 * is located in the LRAM and reading from the 234 * LRAM has hardware issues which can blow the 235 * port up. I kid you not (from Linux, and 236 * verified by testing here). 237 */ 238 callout_init(&ccb->ccb_timeout); 239 ccb->ccb_slot = i; 240 ccb->ccb_port = ap; 241 ccb->ccb_prb = &ap->ap_prbs[i]; 242 ccb->ccb_prb_paddr = SILI_DMA_DVA(ap->ap_dmamem_prbs) + 243 sizeof(*ccb->ccb_prb) * i; 244 ccb->ccb_xa.fis = &ccb->ccb_prb->prb_h2d; 245 prb = bus_space_kva(ap->ap_sc->sc_iot, ap->ap_ioh, 246 SILI_PREG_LRAM_SLOT(i)); 247 ccb->ccb_prb_lram = prb; 248 /* 249 * Point our rfis to host-memory instead of the LRAM PRB. 250 * It will be copied back if ATA_F_AUTOSENSE is set. The 251 * LRAM PRB is buggy. 252 */ 253 /*ccb->ccb_xa.rfis = &prb->prb_d2h;*/ 254 ccb->ccb_xa.rfis = (void *)ccb->ccb_xa.fis; 255 256 ccb->ccb_xa.packetcmd = prb_packet(ccb->ccb_prb); 257 ccb->ccb_xa.tag = i; 258 259 ccb->ccb_xa.state = ATA_S_COMPLETE; 260 261 /* 262 * Reserve CCB[1] as the error CCB. It doesn't matter 263 * which one we use for the Sili controllers. 264 */ 265 if (i == 1) 266 ap->ap_err_ccb = ccb; 267 else 268 sili_put_ccb(ccb); 269 } 270 /* 271 * Do not call sili_port_init() here, the helper thread will 272 * call it for the parallel probe 273 */ 274 sili_os_start_port(ap); 275 return(0); 276 freeport: 277 sili_port_free(sc, port); 278 return (rc); 279 } 280 281 /* 282 * This is called once by the low level attach (from the helper thread) 283 * to get the port state machine rolling, and typically only called again 284 * on a hot-plug insertion event. 285 * 286 * This is called for PM attachments and hot-plug insertion events, and 287 * typically not called again until after an unplug/replug sequence. 288 * 289 * Returns 0 if a device is successfully detected. 290 */ 291 int 292 sili_port_init(struct sili_port *ap) 293 { 294 /* 295 * Do a very hard reset of the port 296 */ 297 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 298 sili_os_sleep(10); 299 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 300 301 /* 302 * Register initialization 303 */ 304 sili_pwrite(ap, SILI_PREG_FIFO_CTL, 305 SILI_PREG_FIFO_CTL_ENCODE(1024, 1024)); 306 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_32BITDMA | 307 SILI_PREG_CTL_PMA); 308 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_NOAUTOCC); 309 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 310 sili_pwrite(ap, SILI_PREG_SNTF, -1); 311 ap->ap_probe = ATA_PROBE_NEED_HARD_RESET; 312 ap->ap_pmcount = 0; 313 sili_port_interrupt_enable(ap); 314 return (0); 315 } 316 317 /* 318 * Handle an errored port. This routine is called when the only 319 * commands left on the queue are expired, meaning we can safely 320 * go through a port init to clear its state. 321 * 322 * We complete the expired CCBs and then restart the queue. 323 */ 324 static 325 void 326 sili_port_reinit(struct sili_port *ap) 327 { 328 struct sili_ccb *ccb; 329 struct ata_port *at; 330 int slot; 331 int target; 332 u_int32_t data; 333 334 if (bootverbose || 1) { 335 kprintf("%s: reiniting port after error reent=%d " 336 "expired=%08x\n", 337 PORTNAME(ap), 338 (ap->ap_flags & AP_F_REINIT_ACTIVE), 339 ap->ap_expired); 340 } 341 342 /* 343 * Clear port resume, clear bits 16:13 in the port device status 344 * register. This is from the data sheet. 345 * 346 * Data sheet does not specify a delay but it seems prudent. 347 */ 348 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 349 sili_os_sleep(10); 350 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 351 data = sili_pread(ap, SILI_PREG_PM_STATUS(target)); 352 data &= ~(SILI_PREG_PM_STATUS_SERVICE | 353 SILI_PREG_PM_STATUS_LEGACY | 354 SILI_PREG_PM_STATUS_NATIVE | 355 SILI_PREG_PM_STATUS_VBSY); 356 sili_pwrite(ap, SILI_PREG_PM_STATUS(target), data); 357 sili_pwrite(ap, SILI_PREG_PM_QACTIVE(target), 0); 358 } 359 360 /* 361 * Issue a Port Initialize and wait for it to clear. This flushes 362 * commands but does not reset the port. Then wait for port ready. 363 */ 364 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_INIT); 365 if (sili_pwait_clr_to(ap, 5000, SILI_PREG_STATUS, SILI_PREG_CTL_INIT)) { 366 kprintf("%s: Unable to reinit, port failed\n", 367 PORTNAME(ap)); 368 } 369 if (sili_pwait_set(ap, SILI_PREG_STATUS, SILI_PREG_STATUS_READY)) { 370 kprintf("%s: Unable to reinit, port will not come ready\n", 371 PORTNAME(ap)); 372 } 373 374 /* 375 * If reentrant, stop here. Otherwise the state for the original 376 * ahci_port_reinit() will get ripped out from under it. 377 */ 378 if (ap->ap_flags & AP_F_REINIT_ACTIVE) 379 return; 380 ap->ap_flags |= AP_F_REINIT_ACTIVE; 381 382 /* 383 * Read the LOG ERROR page for targets that returned a specific 384 * D2H FIS with ERR set. 385 * 386 * Don't bother if we are already using the error CCB. 387 */ 388 if ((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0) { 389 for (target = 0; target < SILI_MAX_PMPORTS; ++target) { 390 at = &ap->ap_ata[target]; 391 if (at->at_features & ATA_PORT_F_READLOG) { 392 at->at_features &= ~ATA_PORT_F_READLOG; 393 sili_port_read_ncq_error(ap, target); 394 } 395 } 396 } 397 398 /* 399 * Finally clean out the expired commands, we've probed the error 400 * status (or hopefully probed the error status). Well, ok, 401 * we probably didn't XXX. 402 */ 403 while (ap->ap_expired) { 404 slot = ffs(ap->ap_expired) - 1; 405 ap->ap_expired &= ~(1 << slot); 406 KKASSERT(ap->ap_active & (1 << slot)); 407 ap->ap_active &= ~(1 << slot); 408 --ap->ap_active_cnt; 409 ccb = &ap->ap_ccbs[slot]; 410 ccb->ccb_xa.state = ATA_S_TIMEOUT; 411 ccb->ccb_done(ccb); 412 ccb->ccb_xa.complete(&ccb->ccb_xa); 413 } 414 ap->ap_flags &= ~AP_F_REINIT_ACTIVE; 415 416 /* 417 * Wow. All done. We can get the port moving again. 418 */ 419 if (ap->ap_probe == ATA_PROBE_FAILED) { 420 kprintf("%s: reinit failed, port is dead\n", PORTNAME(ap)); 421 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 422 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 423 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 424 ccb->ccb_xa.state = ATA_S_TIMEOUT; 425 ccb->ccb_done(ccb); 426 ccb->ccb_xa.complete(&ccb->ccb_xa); 427 } 428 } else { 429 sili_issue_pending_commands(ap, NULL); 430 } 431 } 432 433 /* 434 * Enable or re-enable interrupts on a port. 435 * 436 * This routine is called from the port initialization code or from the 437 * helper thread as the real interrupt may be forced to turn off certain 438 * interrupt sources. 439 */ 440 void 441 sili_port_interrupt_enable(struct sili_port *ap) 442 { 443 u_int32_t data; 444 445 data = SILI_PREG_INT_CCOMPLETE | SILI_PREG_INT_CERROR | 446 SILI_PREG_INT_PHYRDYCHG | SILI_PREG_INT_DEVEXCHG | 447 SILI_PREG_INT_DECODE | SILI_PREG_INT_CRC | 448 SILI_PREG_INT_HANDSHK | SILI_PREG_INT_PMCHANGE; 449 if (ap->ap_sc->sc_flags & SILI_F_SSNTF) 450 data |= SILI_PREG_INT_SDB; 451 sili_pwrite(ap, SILI_PREG_INT_ENABLE, data); 452 } 453 454 void 455 sili_port_interrupt_redisable(struct sili_port *ap) 456 { 457 u_int32_t data; 458 459 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 460 data &= SILI_REG_GINT_PORTMASK; 461 data &= ~(1 << ap->ap_num); 462 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 463 } 464 465 void 466 sili_port_interrupt_reenable(struct sili_port *ap) 467 { 468 u_int32_t data; 469 470 data = sili_read(ap->ap_sc, SILI_REG_GCTL); 471 data &= SILI_REG_GINT_PORTMASK; 472 data |= (1 << ap->ap_num); 473 sili_write(ap->ap_sc, SILI_REG_GCTL, data); 474 } 475 476 /* 477 * Run the port / target state machine from a main context. 478 * 479 * The state machine for the port is always run. 480 * 481 * If atx is non-NULL run the state machine for a particular target. 482 * If atx is NULL run the state machine for all targets. 483 */ 484 void 485 sili_port_state_machine(struct sili_port *ap, int initial) 486 { 487 struct ata_port *at; 488 u_int32_t data; 489 int target; 490 int didsleep; 491 int loop; 492 493 /* 494 * State machine for port. Note that CAM is not yet associated 495 * during the initial parallel probe and the port's probe state 496 * will not get past ATA_PROBE_NEED_IDENT. 497 */ 498 { 499 if (initial == 0 && ap->ap_probe <= ATA_PROBE_NEED_HARD_RESET) { 500 kprintf("%s: Waiting 7 seconds on insertion\n", 501 PORTNAME(ap)); 502 sili_os_sleep(7000); 503 initial = 1; 504 } 505 if (ap->ap_probe == ATA_PROBE_NEED_INIT) 506 sili_port_init(ap); 507 if (ap->ap_probe == ATA_PROBE_NEED_HARD_RESET) 508 sili_port_reset(ap, NULL, 1); 509 if (ap->ap_probe == ATA_PROBE_NEED_SOFT_RESET) 510 sili_port_reset(ap, NULL, 0); 511 if (ap->ap_probe == ATA_PROBE_NEED_IDENT) 512 sili_cam_probe(ap, NULL); 513 } 514 if (ap->ap_type != ATA_PORT_T_PM) { 515 if (ap->ap_probe == ATA_PROBE_FAILED) { 516 sili_cam_changed(ap, NULL, 0); 517 } else if (ap->ap_probe >= ATA_PROBE_NEED_IDENT) { 518 sili_cam_changed(ap, NULL, 1); 519 } 520 return; 521 } 522 523 /* 524 * Port Multiplier state machine. 525 * 526 * Get a mask of changed targets and combine with any runnable 527 * states already present. 528 */ 529 for (loop = 0; ;++loop) { 530 if (sili_pm_read(ap, 15, SATA_PMREG_EINFO, &data)) { 531 kprintf("%s: PM unable to read hot-plug bitmap\n", 532 PORTNAME(ap)); 533 break; 534 } 535 536 /* 537 * Do at least one loop, then stop if no more state changes 538 * have occured. The PM might not generate a new 539 * notification until we clear the entire bitmap. 540 */ 541 if (loop && data == 0) 542 break; 543 544 /* 545 * New devices showing up in the bitmap require some spin-up 546 * time before we start probing them. Reset didsleep. The 547 * first new device we detect will sleep before probing. 548 * 549 * This only applies to devices whos change bit is set in 550 * the data, and does not apply to the initial boot-time 551 * probe. 552 */ 553 didsleep = 0; 554 555 for (target = 0; target < ap->ap_pmcount; ++target) { 556 at = &ap->ap_ata[target]; 557 558 /* 559 * Check the target state for targets behind the PM 560 * which have changed state. This will adjust 561 * at_probe and set ATA_PORT_F_RESCAN 562 * 563 * We want to wait at least 10 seconds before probing 564 * a newly inserted device. If the check status 565 * indicates a device is present and in need of a 566 * hard reset, we make sure we have slept before 567 * continuing. 568 * 569 * We also need to wait at least 1 second for the 570 * PHY state to change after insertion, if we 571 * haven't already waited the 10 seconds. 572 * 573 * NOTE: When pm_check_good finds a good port it 574 * typically starts us in probe state 575 * NEED_HARD_RESET rather than INIT. 576 */ 577 if (data & (1 << target)) { 578 if (initial == 0 && didsleep == 0) 579 sili_os_sleep(1000); 580 sili_pm_check_good(ap, target); 581 if (initial == 0 && didsleep == 0 && 582 at->at_probe <= ATA_PROBE_NEED_HARD_RESET 583 ) { 584 didsleep = 1; 585 kprintf("%s: Waiting 10 seconds on insertion\n", PORTNAME(ap)); 586 sili_os_sleep(10000); 587 } 588 } 589 590 /* 591 * Report hot-plug events before the probe state 592 * really gets hot. Only actual events are reported 593 * here to reduce spew. 594 */ 595 if (data & (1 << target)) { 596 kprintf("%s: HOTPLUG (PM) - ", ATANAME(ap, at)); 597 switch(at->at_probe) { 598 case ATA_PROBE_NEED_INIT: 599 case ATA_PROBE_NEED_HARD_RESET: 600 kprintf("Device inserted\n"); 601 break; 602 case ATA_PROBE_FAILED: 603 kprintf("Device removed\n"); 604 break; 605 default: 606 kprintf("Device probe in progress\n"); 607 break; 608 } 609 } 610 611 /* 612 * Run through the state machine as necessary if 613 * the port is not marked failed. 614 * 615 * The state machine may stop at NEED_IDENT if 616 * CAM is not yet attached. 617 * 618 * Acquire exclusive access to the port while we 619 * are doing this. This prevents command-completion 620 * from queueing commands for non-polled targets 621 * inbetween our probe steps. We need to do this 622 * because the reset probes can generate severe PHY 623 * and protocol errors and soft-brick the port. 624 */ 625 if (at->at_probe != ATA_PROBE_FAILED && 626 at->at_probe != ATA_PROBE_GOOD) { 627 if (at->at_probe == ATA_PROBE_NEED_INIT) 628 sili_pm_port_init(ap, at); 629 if (at->at_probe == ATA_PROBE_NEED_HARD_RESET) 630 sili_port_reset(ap, at, 1); 631 if (at->at_probe == ATA_PROBE_NEED_SOFT_RESET) 632 sili_port_reset(ap, at, 0); 633 if (at->at_probe == ATA_PROBE_NEED_IDENT) 634 sili_cam_probe(ap, at); 635 } 636 637 /* 638 * Add or remove from CAM 639 */ 640 if (at->at_features & ATA_PORT_F_RESCAN) { 641 at->at_features &= ~ATA_PORT_F_RESCAN; 642 if (at->at_probe == ATA_PROBE_FAILED) { 643 sili_cam_changed(ap, at, 0); 644 } else if (at->at_probe >= ATA_PROBE_NEED_IDENT) { 645 sili_cam_changed(ap, at, 1); 646 } 647 } 648 data &= ~(1 << target); 649 } 650 if (data) { 651 kprintf("%s: WARNING (PM): extra bits set in " 652 "EINFO: %08x\n", PORTNAME(ap), data); 653 while (target < SILI_MAX_PMPORTS) { 654 sili_pm_check_good(ap, target); 655 ++target; 656 } 657 } 658 } 659 } 660 661 /* 662 * De-initialize and detach a port. 663 */ 664 void 665 sili_port_free(struct sili_softc *sc, u_int port) 666 { 667 struct sili_port *ap = sc->sc_ports[port]; 668 struct sili_ccb *ccb; 669 670 /* 671 * Ensure port is disabled and its interrupts are all flushed. 672 */ 673 if (ap->ap_sc) { 674 sili_os_stop_port(ap); 675 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_MASK); 676 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 677 sili_write(ap->ap_sc, SILI_REG_GCTL, 678 sili_read(ap->ap_sc, SILI_REG_GCTL) & 679 ~SILI_REG_GINT_PORTST(ap->ap_num)); 680 } 681 682 if (ap->ap_ccbs) { 683 while ((ccb = sili_get_ccb(ap)) != NULL) { 684 if (ccb->ccb_dmamap) { 685 bus_dmamap_destroy(sc->sc_tag_data, 686 ccb->ccb_dmamap); 687 ccb->ccb_dmamap = NULL; 688 } 689 } 690 if ((ccb = ap->ap_err_ccb) != NULL) { 691 if (ccb->ccb_dmamap) { 692 bus_dmamap_destroy(sc->sc_tag_data, 693 ccb->ccb_dmamap); 694 ccb->ccb_dmamap = NULL; 695 } 696 ap->ap_err_ccb = NULL; 697 } 698 kfree(ap->ap_ccbs, M_DEVBUF); 699 ap->ap_ccbs = NULL; 700 } 701 702 if (ap->ap_dmamem_prbs) { 703 sili_dmamem_free(sc, ap->ap_dmamem_prbs); 704 ap->ap_dmamem_prbs = NULL; 705 } 706 if (ap->ap_ata) { 707 kfree(ap->ap_ata, M_DEVBUF); 708 ap->ap_ata = NULL; 709 } 710 if (ap->ap_err_scratch) { 711 kfree(ap->ap_err_scratch, M_DEVBUF); 712 ap->ap_err_scratch = NULL; 713 } 714 715 /* bus_space(9) says we dont free the subregions handle */ 716 717 kfree(ap, M_DEVBUF); 718 sc->sc_ports[port] = NULL; 719 } 720 721 /* 722 * Reset a port. 723 * 724 * If hard is 0 perform a softreset of the port. 725 * If hard is 1 perform a hard reset of the port. 726 * If hard is 2 perform a hard reset of the port and cycle the phy. 727 * 728 * If at is non-NULL an indirect port via a port-multiplier is being 729 * reset, otherwise a direct port is being reset. 730 * 731 * NOTE: Indirect ports can only be soft-reset. 732 */ 733 int 734 sili_port_reset(struct sili_port *ap, struct ata_port *at, int hard) 735 { 736 int rc; 737 738 if (hard) { 739 if (at) 740 rc = sili_pm_hardreset(ap, at->at_target, hard); 741 else 742 rc = sili_port_hardreset(ap); 743 } else { 744 if (at) 745 rc = sili_pm_softreset(ap, at->at_target); 746 else 747 rc = sili_port_softreset(ap); 748 } 749 return(rc); 750 } 751 752 /* 753 * SILI soft reset, Section 10.4.1 754 * 755 * (at) will be NULL when soft-resetting a directly-attached device, and 756 * non-NULL when soft-resetting a device through a port multiplier. 757 * 758 * This function keeps port communications intact and attempts to generate 759 * a reset to the connected device using device commands. 760 */ 761 int 762 sili_port_softreset(struct sili_port *ap) 763 { 764 struct sili_ccb *ccb = NULL; 765 struct sili_prb *prb; 766 int error; 767 u_int32_t sig; 768 769 error = EIO; 770 771 if (bootverbose) 772 kprintf("%s: START SOFTRESET\n", PORTNAME(ap)); 773 774 crit_enter(); 775 ap->ap_state = AP_S_NORMAL; 776 777 /* 778 * Prep the special soft-reset SII command. 779 */ 780 ccb = sili_get_err_ccb(ap); 781 ccb->ccb_done = sili_empty_done; 782 ccb->ccb_xa.flags = ATA_F_POLL | ATA_F_AUTOSENSE | ATA_F_EXCLUSIVE; 783 ccb->ccb_xa.complete = sili_dummy_done; 784 ccb->ccb_xa.at = NULL; 785 786 prb = ccb->ccb_prb; 787 bzero(&prb->prb_h2d, sizeof(prb->prb_h2d)); 788 prb->prb_h2d.flags = 0; 789 prb->prb_control = SILI_PRB_CTRL_SOFTRESET; 790 prb->prb_override = 0; 791 prb->prb_xfer_count = 0; 792 793 ccb->ccb_xa.state = ATA_S_PENDING; 794 795 /* 796 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 797 */ 798 if (sili_poll(ccb, 8000, sili_quick_timeout) != ATA_S_COMPLETE) { 799 kprintf("%s: First FIS failed\n", PORTNAME(ap)); 800 goto err; 801 } 802 803 sig = (prb->prb_d2h.lba_high << 24) | 804 (prb->prb_d2h.lba_mid << 16) | 805 (prb->prb_d2h.lba_low << 8) | 806 (prb->prb_d2h.sector_count); 807 if (bootverbose) 808 kprintf("%s: SOFTRESET SIGNATURE %08x\n", PORTNAME(ap), sig); 809 810 /* 811 * If the softreset is trying to clear a BSY condition after a 812 * normal portreset we assign the port type. 813 * 814 * If the softreset is being run first as part of the ccb error 815 * processing code then report if the device signature changed 816 * unexpectedly. 817 */ 818 if (ap->ap_type == ATA_PORT_T_NONE) { 819 ap->ap_type = sili_port_signature(ap, NULL, sig); 820 } else { 821 if (sili_port_signature(ap, NULL, sig) != ap->ap_type) { 822 kprintf("%s: device signature unexpectedly " 823 "changed\n", PORTNAME(ap)); 824 error = EBUSY; /* XXX */ 825 } 826 } 827 error = 0; 828 err: 829 if (ccb != NULL) { 830 sili_put_err_ccb(ccb); 831 } 832 833 /* 834 * If we failed to softreset make the port quiescent, otherwise 835 * make sure the port's start/stop state matches what it was on 836 * entry. 837 * 838 * Don't kill the port if the softreset is on a port multiplier 839 * target, that would kill all the targets! 840 */ 841 if (bootverbose) { 842 kprintf("%s: END SOFTRESET %d prob=%d state=%d\n", 843 PORTNAME(ap), error, ap->ap_probe, ap->ap_state); 844 } 845 if (error) { 846 sili_port_hardstop(ap); 847 /* ap_probe set to failed */ 848 } else { 849 ap->ap_probe = ATA_PROBE_NEED_IDENT; 850 ap->ap_pmcount = 1; 851 } 852 crit_exit(); 853 854 sili_pwrite(ap, SILI_PREG_SERR, -1); 855 if (bootverbose) 856 kprintf("%s: END SOFTRESET\n", PORTNAME(ap)); 857 858 return (error); 859 } 860 861 /* 862 * This function does a hard reset of the port. Note that the device 863 * connected to the port could still end-up hung. Phy detection is 864 * used to short-cut longer operations. 865 */ 866 int 867 sili_port_hardreset(struct sili_port *ap) 868 { 869 u_int32_t data; 870 int error; 871 int loop; 872 873 if (bootverbose) 874 kprintf("%s: START HARDRESET\n", PORTNAME(ap)); 875 876 ap->ap_state = AP_S_NORMAL; 877 878 /* 879 * Set SCTL up for any speed restrictions before issuing the 880 * device reset. This may also take us out of an INIT state 881 * (if we were previously in a continuous reset state from 882 * sili_port_listen()). 883 */ 884 data = SILI_PREG_SCTL_SPM_NONE | 885 SILI_PREG_SCTL_IPM_NONE | 886 SILI_PREG_SCTL_SPD_NONE | 887 SILI_PREG_SCTL_DET_NONE; 888 if (SiliForceGen1 & (1 << ap->ap_num)) { 889 data &= ~SILI_PREG_SCTL_SPD_NONE; 890 data |= SILI_PREG_SCTL_SPD_GEN1; 891 } 892 sili_pwrite(ap, SILI_PREG_SCTL, data); 893 894 /* 895 * The transition from a continuous COMRESET state from 896 * sili_port_listen() back to device detect can take a 897 * few seconds. It's quite non-deterministic. Most of 898 * the time it takes far less. Use a polling loop to 899 * wait. 900 */ 901 loop = 4000; 902 while (loop > 0) { 903 data = sili_pread(ap, SILI_PREG_SSTS); 904 if (data & SILI_PREG_SSTS_DET) 905 break; 906 loop -= sili_os_softsleep(); 907 } 908 sili_os_sleep(100); 909 910 /* 911 * Issue Device Reset, give the phy a little time to settle down. 912 * 913 * NOTE: Unlike Port Reset, the port ready signal will not 914 * go active unless a device is established to be on 915 * the port. 916 */ 917 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 918 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESUME); 919 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET); 920 if (sili_pwait_clr(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_DEVRESET)) { 921 kprintf("%s: hardreset failed to clear\n", PORTNAME(ap)); 922 } 923 sili_os_sleep(20); 924 925 /* 926 * Try to determine if there is a device on the port. 927 * 928 * Give the device 3/10 second to at least be detected. 929 */ 930 loop = 300; 931 while (loop > 0) { 932 data = sili_pread(ap, SILI_PREG_SSTS); 933 if (data & SILI_PREG_SSTS_DET) 934 break; 935 loop -= sili_os_softsleep(); 936 } 937 if (loop <= 0) { 938 if (bootverbose) { 939 kprintf("%s: Port appears to be unplugged\n", 940 PORTNAME(ap)); 941 } 942 error = ENODEV; 943 goto done; 944 } 945 946 /* 947 * There is something on the port. Give the device 3 seconds 948 * to detect. 949 */ 950 if (sili_pwait_eq(ap, 3000, SILI_PREG_SSTS, 951 SILI_PREG_SSTS_DET, SILI_PREG_SSTS_DET_DEV)) { 952 if (bootverbose) { 953 kprintf("%s: Device may be powered down\n", 954 PORTNAME(ap)); 955 } 956 error = ENODEV; 957 goto pmdetect; 958 } 959 960 /* 961 * We got something that definitely looks like a device. Give 962 * the device time to send us its first D2H FIS. 963 * 964 * This effectively waits for BSY to clear. 965 */ 966 if (sili_pwait_set_to(ap, 3000, SILI_PREG_STATUS, 967 SILI_PREG_STATUS_READY)) { 968 error = EBUSY; 969 } else { 970 error = 0; 971 } 972 973 pmdetect: 974 /* 975 * Do the PM port probe regardless of how things turned out above. 976 * 977 * If the PM port probe fails it will return the original error 978 * from above. 979 */ 980 if (ap->ap_sc->sc_flags & SILI_F_SPM) { 981 error = sili_pm_port_probe(ap, error); 982 } 983 984 done: 985 /* 986 * Finish up 987 */ 988 switch(error) { 989 case 0: 990 if (ap->ap_type == ATA_PORT_T_PM) 991 ap->ap_probe = ATA_PROBE_GOOD; 992 else 993 ap->ap_probe = ATA_PROBE_NEED_SOFT_RESET; 994 break; 995 case ENODEV: 996 /* 997 * No device detected. 998 */ 999 data = sili_pread(ap, SILI_PREG_SSTS); 1000 1001 switch(data & SATA_PM_SSTS_DET) { 1002 case SILI_PREG_SSTS_DET_DEV_NE: 1003 kprintf("%s: Device not communicating\n", 1004 PORTNAME(ap)); 1005 break; 1006 case SILI_PREG_SSTS_DET_OFFLINE: 1007 kprintf("%s: PHY offline\n", 1008 PORTNAME(ap)); 1009 break; 1010 default: 1011 kprintf("%s: No device detected\n", 1012 PORTNAME(ap)); 1013 break; 1014 } 1015 sili_port_hardstop(ap); 1016 break; 1017 default: 1018 /* 1019 * (EBUSY) 1020 */ 1021 kprintf("%s: Device on port is bricked\n", 1022 PORTNAME(ap)); 1023 sili_port_hardstop(ap); 1024 break; 1025 } 1026 sili_pwrite(ap, SILI_PREG_SERR, -1); 1027 1028 if (bootverbose) 1029 kprintf("%s: END HARDRESET %d\n", PORTNAME(ap), error); 1030 return (error); 1031 } 1032 1033 /* 1034 * Hard-stop on hot-swap device removal. See 10.10.1 1035 * 1036 * Place the port in a mode that will allow it to detect hot-swap insertions. 1037 * This is a bit imprecise because just setting-up SCTL to DET_INIT doesn't 1038 * seem to do the job. 1039 */ 1040 void 1041 sili_port_hardstop(struct sili_port *ap) 1042 { 1043 struct sili_ccb *ccb; 1044 struct ata_port *at; 1045 int i; 1046 int slot; 1047 int serial; 1048 1049 ap->ap_state = AP_S_FATAL_ERROR; 1050 ap->ap_probe = ATA_PROBE_FAILED; 1051 ap->ap_type = ATA_PORT_T_NONE; 1052 1053 /* 1054 * Clean up AT sub-ports on SATA port. 1055 */ 1056 for (i = 0; ap->ap_ata && i < SILI_MAX_PMPORTS; ++i) { 1057 at = &ap->ap_ata[i]; 1058 at->at_type = ATA_PORT_T_NONE; 1059 at->at_probe = ATA_PROBE_FAILED; 1060 at->at_features &= ~ATA_PORT_F_READLOG; 1061 } 1062 1063 /* 1064 * Kill the port. Don't bother waiting for it to transition 1065 * back up. 1066 */ 1067 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESET); 1068 if (sili_pread(ap, SILI_PREG_STATUS) & SILI_PREG_STATUS_READY) { 1069 kprintf("%s: Port will not go into reset\n", 1070 PORTNAME(ap)); 1071 } 1072 sili_os_sleep(10); 1073 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_RESET); 1074 1075 /* 1076 * Turn off port-multiplier control bit 1077 */ 1078 sili_pwrite(ap, SILI_PREG_CTL_CLR, SILI_PREG_CTL_PMA); 1079 1080 /* 1081 * Clean up the command list. 1082 */ 1083 restart: 1084 while (ap->ap_active) { 1085 slot = ffs(ap->ap_active) - 1; 1086 ap->ap_active &= ~(1 << slot); 1087 ap->ap_expired &= ~(1 << slot); 1088 --ap->ap_active_cnt; 1089 ccb = &ap->ap_ccbs[slot]; 1090 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_RUNNING) { 1091 serial = ccb->ccb_xa.serial; 1092 callout_stop_sync(&ccb->ccb_timeout); 1093 if (serial != ccb->ccb_xa.serial) { 1094 kprintf("%s: Warning: timeout race ccb %p\n", 1095 PORTNAME(ap), ccb); 1096 goto restart; 1097 } 1098 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 1099 } 1100 ccb->ccb_xa.flags &= ~(ATA_F_TIMEOUT_DESIRED | 1101 ATA_F_TIMEOUT_EXPIRED); 1102 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1103 ccb->ccb_done(ccb); 1104 ccb->ccb_xa.complete(&ccb->ccb_xa); 1105 } 1106 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1107 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1108 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1109 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_DESIRED; 1110 ccb->ccb_done(ccb); 1111 ccb->ccb_xa.complete(&ccb->ccb_xa); 1112 } 1113 KKASSERT(ap->ap_active_cnt == 0); 1114 1115 /* 1116 * Put the port into a listen mode, we want to get insertion/removal 1117 * events. 1118 */ 1119 sili_port_listen(ap); 1120 } 1121 1122 /* 1123 * Place port into a listen mode for hotplug events only. The port has 1124 * already been reset and the command processor may not be ready due 1125 * to the lack of a device. 1126 */ 1127 void 1128 sili_port_listen(struct sili_port *ap) 1129 { 1130 u_int32_t data; 1131 1132 #if 1 1133 data = SILI_PREG_SCTL_SPM_NONE | 1134 SILI_PREG_SCTL_IPM_NONE | 1135 SILI_PREG_SCTL_SPD_NONE | 1136 SILI_PREG_SCTL_DET_INIT; 1137 if (SiliForceGen1 & (1 << ap->ap_num)) { 1138 data &= ~SILI_PREG_SCTL_SPD_NONE; 1139 data |= SILI_PREG_SCTL_SPD_GEN1; 1140 } 1141 #endif 1142 sili_os_sleep(20); 1143 sili_pwrite(ap, SILI_PREG_SERR, -1); 1144 sili_pwrite(ap, SILI_PREG_INT_ENABLE, SILI_PREG_INT_PHYRDYCHG | 1145 SILI_PREG_INT_DEVEXCHG); 1146 } 1147 1148 /* 1149 * Figure out what type of device is connected to the port, ATAPI or 1150 * DISK. 1151 */ 1152 int 1153 sili_port_signature(struct sili_port *ap, struct ata_port *at, u_int32_t sig) 1154 { 1155 if (bootverbose) 1156 kprintf("%s: sig %08x\n", ATANAME(ap, at), sig); 1157 if ((sig & 0xffff0000) == (SATA_SIGNATURE_ATAPI & 0xffff0000)) { 1158 return(ATA_PORT_T_ATAPI); 1159 } else if ((sig & 0xffff0000) == 1160 (SATA_SIGNATURE_PORT_MULTIPLIER & 0xffff0000)) { 1161 return(ATA_PORT_T_PM); 1162 } else { 1163 return(ATA_PORT_T_DISK); 1164 } 1165 } 1166 1167 /* 1168 * Load the DMA descriptor table for a CCB's buffer. 1169 * 1170 * NOTE: ATA_F_PIO is auto-selected by sili part. 1171 */ 1172 int 1173 sili_load_prb(struct sili_ccb *ccb) 1174 { 1175 struct sili_port *ap = ccb->ccb_port; 1176 struct sili_softc *sc = ap->ap_sc; 1177 struct ata_xfer *xa = &ccb->ccb_xa; 1178 struct sili_prb *prb = ccb->ccb_prb; 1179 struct sili_sge *sge; 1180 bus_dmamap_t dmap = ccb->ccb_dmamap; 1181 int error; 1182 1183 /* 1184 * Set up the PRB. The PRB contains 2 SGE's (1 if it is an ATAPI 1185 * command). The SGE must be set up to link to the rest of our 1186 * SGE array, in blocks of four SGEs (a SGE table) starting at 1187 */ 1188 prb->prb_xfer_count = 0; 1189 prb->prb_control = 0; 1190 prb->prb_override = 0; 1191 sge = (ccb->ccb_xa.flags & ATA_F_PACKET) ? 1192 &prb->prb_sge_packet : &prb->prb_sge_normal; 1193 if (xa->datalen == 0) { 1194 sge->sge_flags = SILI_SGE_FLAGS_TRM | SILI_SGE_FLAGS_DRD; 1195 sge->sge_count = 0; 1196 return (0); 1197 } 1198 1199 if (ccb->ccb_xa.flags & ATA_F_READ) 1200 prb->prb_control |= SILI_PRB_CTRL_READ; 1201 if (ccb->ccb_xa.flags & ATA_F_WRITE) 1202 prb->prb_control |= SILI_PRB_CTRL_WRITE; 1203 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1204 sge->sge_count = 0; 1205 sge->sge_paddr = ccb->ccb_prb_paddr + 1206 offsetof(struct sili_prb, prb_sge[0]); 1207 1208 /* 1209 * Load our sge array. 1210 */ 1211 error = bus_dmamap_load(sc->sc_tag_data, dmap, 1212 xa->data, xa->datalen, 1213 sili_load_prb_callback, 1214 ccb, 1215 ((xa->flags & ATA_F_NOWAIT) ? 1216 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)); 1217 if (error != 0) { 1218 kprintf("%s: error %d loading dmamap\n", PORTNAME(ap), error); 1219 return (1); 1220 } 1221 1222 bus_dmamap_sync(sc->sc_tag_data, dmap, 1223 (xa->flags & ATA_F_READ) ? 1224 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1225 1226 return (0); 1227 } 1228 1229 /* 1230 * Callback from BUSDMA system to load the segment list. 1231 * 1232 * The scatter/gather table is loaded by the sili chip in blocks of 1233 * four SGE's. If a continuance is required the last entry in each 1234 * block must point to the next block. 1235 */ 1236 static 1237 void 1238 sili_load_prb_callback(void *info, bus_dma_segment_t *segs, int nsegs, 1239 int error) 1240 { 1241 struct sili_ccb *ccb = info; 1242 struct sili_sge *sge; 1243 int sgi; 1244 1245 KKASSERT(nsegs <= SILI_MAX_SGET); 1246 1247 sgi = 0; 1248 sge = &ccb->ccb_prb->prb_sge[0]; 1249 while (nsegs) { 1250 if ((sgi & 3) == 3) { 1251 sge->sge_paddr = htole64(ccb->ccb_prb_paddr + 1252 offsetof(struct sili_prb, 1253 prb_sge[sgi + 1])); 1254 sge->sge_count = 0; 1255 sge->sge_flags = SILI_SGE_FLAGS_LNK; 1256 } else { 1257 sge->sge_paddr = htole64(segs->ds_addr); 1258 sge->sge_count = htole32(segs->ds_len); 1259 sge->sge_flags = 0; 1260 --nsegs; 1261 ++segs; 1262 } 1263 ++sge; 1264 ++sgi; 1265 } 1266 --sge; 1267 sge->sge_flags |= SILI_SGE_FLAGS_TRM; 1268 } 1269 1270 void 1271 sili_unload_prb(struct sili_ccb *ccb) 1272 { 1273 struct sili_port *ap = ccb->ccb_port; 1274 struct sili_softc *sc = ap->ap_sc; 1275 struct ata_xfer *xa = &ccb->ccb_xa; 1276 bus_dmamap_t dmap = ccb->ccb_dmamap; 1277 1278 if (xa->datalen != 0) { 1279 bus_dmamap_sync(sc->sc_tag_data, dmap, 1280 (xa->flags & ATA_F_READ) ? 1281 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1282 1283 bus_dmamap_unload(sc->sc_tag_data, dmap); 1284 1285 if (ccb->ccb_xa.flags & ATA_F_NCQ) 1286 xa->resid = 0; 1287 else 1288 xa->resid = xa->datalen - 1289 le32toh(ccb->ccb_prb->prb_xfer_count); 1290 } 1291 } 1292 1293 /* 1294 * Start a command and poll for completion. 1295 * 1296 * timeout is in ms and only counts once the command gets on-chip. 1297 * 1298 * Returns ATA_S_* state, compare against ATA_S_COMPLETE to determine 1299 * that no error occured. 1300 * 1301 * NOTE: If the caller specifies a NULL timeout function the caller is 1302 * responsible for clearing hardware state on failure, but we will 1303 * deal with removing the ccb from any pending queue. 1304 * 1305 * NOTE: NCQ should never be used with this function. 1306 * 1307 * NOTE: If the port is in a failed state and stopped we do not try 1308 * to activate the ccb. 1309 */ 1310 int 1311 sili_poll(struct sili_ccb *ccb, int timeout, 1312 void (*timeout_fn)(struct sili_ccb *)) 1313 { 1314 struct sili_port *ap = ccb->ccb_port; 1315 1316 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) { 1317 ccb->ccb_xa.state = ATA_S_ERROR; 1318 return(ccb->ccb_xa.state); 1319 } 1320 1321 KKASSERT((ap->ap_expired & (1 << ccb->ccb_slot)) == 0); 1322 sili_start(ccb); 1323 1324 do { 1325 sili_port_intr(ap, 1); 1326 switch(ccb->ccb_xa.state) { 1327 case ATA_S_ONCHIP: 1328 timeout -= sili_os_softsleep(); 1329 break; 1330 case ATA_S_PENDING: 1331 /* 1332 * The packet can get stuck on the pending queue 1333 * if the port refuses to come ready. XXX 1334 */ 1335 #if 0 1336 if (xxx AP_F_EXCLUSIVE_ACCESS) 1337 timeout -= sili_os_softsleep(); 1338 else 1339 #endif 1340 sili_os_softsleep(); 1341 sili_check_active_timeouts(ap); 1342 break; 1343 default: 1344 return (ccb->ccb_xa.state); 1345 } 1346 } while (timeout > 0); 1347 1348 /* 1349 * Don't spew if this is a probe during hard reset 1350 */ 1351 if (ap->ap_probe != ATA_PROBE_NEED_HARD_RESET) { 1352 kprintf("%s: Poll timeout slot %d\n", 1353 ATANAME(ap, ccb->ccb_xa.at), 1354 ccb->ccb_slot); 1355 } 1356 1357 timeout_fn(ccb); 1358 1359 return(ccb->ccb_xa.state); 1360 } 1361 1362 /* 1363 * When polling we have to check if the currently active CCB(s) 1364 * have timed out as the callout will be deadlocked while we 1365 * hold the port lock. 1366 */ 1367 void 1368 sili_check_active_timeouts(struct sili_port *ap) 1369 { 1370 struct sili_ccb *ccb; 1371 u_int32_t mask; 1372 int tag; 1373 1374 mask = ap->ap_active; 1375 while (mask) { 1376 tag = ffs(mask) - 1; 1377 mask &= ~(1 << tag); 1378 ccb = &ap->ap_ccbs[tag]; 1379 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_EXPIRED) { 1380 sili_core_timeout(ccb, 0); 1381 } 1382 } 1383 } 1384 1385 static 1386 __inline 1387 void 1388 sili_start_timeout(struct sili_ccb *ccb) 1389 { 1390 if (ccb->ccb_xa.flags & ATA_F_TIMEOUT_DESIRED) { 1391 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_RUNNING; 1392 callout_reset(&ccb->ccb_timeout, 1393 (ccb->ccb_xa.timeout * hz + 999) / 1000, 1394 sili_ata_cmd_timeout_unserialized, ccb); 1395 } 1396 } 1397 1398 void 1399 sili_start(struct sili_ccb *ccb) 1400 { 1401 struct sili_port *ap = ccb->ccb_port; 1402 #if 0 1403 struct sili_softc *sc = ap->ap_sc; 1404 #endif 1405 1406 KKASSERT(ccb->ccb_xa.state == ATA_S_PENDING); 1407 1408 /* 1409 * Sync our SGE table and PRB 1410 */ 1411 bus_dmamap_sync(ap->ap_dmamem_prbs->adm_tag, 1412 ap->ap_dmamem_prbs->adm_map, 1413 BUS_DMASYNC_PREWRITE); 1414 1415 /* 1416 * XXX dmamap for PRB XXX BUS_DMASYNC_PREWRITE 1417 */ 1418 1419 /* 1420 * Controller will update shared memory! 1421 * XXX bus_dmamap_sync ... BUS_DMASYNC_PREREAD ... 1422 */ 1423 /* Prepare RFIS area for write by controller */ 1424 1425 /* 1426 * There's no point trying to optimize this, it only shaves a few 1427 * nanoseconds so just queue the command and call our generic issue. 1428 */ 1429 sili_issue_pending_commands(ap, ccb); 1430 } 1431 1432 /* 1433 * Wait for all commands to complete processing. We hold the lock so no 1434 * new commands will be queued. 1435 */ 1436 void 1437 sili_exclusive_access(struct sili_port *ap) 1438 { 1439 while (ap->ap_active) { 1440 sili_port_intr(ap, 1); 1441 sili_os_softsleep(); 1442 } 1443 } 1444 1445 /* 1446 * If ccb is not NULL enqueue and/or issue it. 1447 * 1448 * If ccb is NULL issue whatever we can from the queue. However, nothing 1449 * new is issued if the exclusive access flag is set or expired ccb's are 1450 * present. 1451 * 1452 * If existing commands are still active (ap_active) we can only 1453 * issue matching new commands. 1454 */ 1455 void 1456 sili_issue_pending_commands(struct sili_port *ap, struct sili_ccb *ccb) 1457 { 1458 /* 1459 * Enqueue the ccb. 1460 * 1461 * If just running the queue and in exclusive access mode we 1462 * just return. Also in this case if there are any expired ccb's 1463 * we want to clear the queue so the port can be safely stopped. 1464 * 1465 * XXX sili chip - expiration needs to be per-target if PM supports 1466 * FBSS? 1467 */ 1468 if (ccb) { 1469 TAILQ_INSERT_TAIL(&ap->ap_ccb_pending, ccb, ccb_entry); 1470 } else if (ap->ap_expired) { 1471 return; 1472 } 1473 1474 /* 1475 * Pull the next ccb off the queue and run it if possible. 1476 * If the port is not ready to accept commands enable the 1477 * ready interrupt instead of starting a new command. 1478 * 1479 * XXX limit ncqdepth for attached devices behind PM 1480 */ 1481 while ((ccb = TAILQ_FIRST(&ap->ap_ccb_pending)) != NULL) { 1482 /* 1483 * Port may be wedged. 1484 */ 1485 if ((sili_pread(ap, SILI_PREG_STATUS) & 1486 SILI_PREG_STATUS_READY) == 0) { 1487 kprintf("%s: slot %d NOT READY\n", 1488 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 1489 sili_pwrite(ap, SILI_PREG_INT_ENABLE, 1490 SILI_PREG_INT_READY); 1491 break; 1492 } 1493 1494 /* 1495 * Handle exclusivity requirements. ATA_F_EXCLUSIVE is used 1496 * when we may have to access the rfis which is stored in 1497 * the LRAM PRB. Unfortunately reading the LRAM PRB is 1498 * highly problematic, so requests (like PM requests) which 1499 * need to access the rfis use exclusive mode and then 1500 * access the copy made by the port interrupt code back in 1501 * host memory. 1502 */ 1503 if (ap->ap_active & ~ap->ap_expired) { 1504 /* 1505 * There may be multiple ccb's already running, 1506 * if any are running and ap_run_flags sets 1507 * one of these flags then we know only one is 1508 * running. 1509 * 1510 * XXX Current AUTOSENSE code forces exclusivity 1511 * to simplify the code. 1512 */ 1513 if (ap->ap_run_flags & 1514 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1515 break; 1516 } 1517 1518 /* 1519 * If the ccb we want to run is exclusive and ccb's 1520 * are still active on the port, we can't queue it 1521 * yet. 1522 * 1523 * XXX Current AUTOSENSE code forces exclusivity 1524 * to simplify the code. 1525 */ 1526 if (ccb->ccb_xa.flags & 1527 (ATA_F_EXCLUSIVE | ATA_F_AUTOSENSE)) { 1528 break; 1529 } 1530 } 1531 1532 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 1533 ccb->ccb_xa.state = ATA_S_ONCHIP; 1534 ap->ap_active |= 1 << ccb->ccb_slot; 1535 ap->ap_active_cnt++; 1536 ap->ap_run_flags = ccb->ccb_xa.flags; 1537 1538 /* 1539 * We can't use the CMD_FIFO method because it requires us 1540 * building the PRB in the LRAM, and the LRAM is buggy. So 1541 * we use host memory for the PRB. 1542 */ 1543 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot), 1544 (u_int32_t)ccb->ccb_prb_paddr); 1545 sili_pwrite(ap, SILI_PREG_CMDACT(ccb->ccb_slot) + 4, 1546 (u_int32_t)(ccb->ccb_prb_paddr >> 32)); 1547 /* sili_pwrite(ap, SILI_PREG_CMD_FIFO, ccb->ccb_slot); */ 1548 sili_start_timeout(ccb); 1549 } 1550 } 1551 1552 void 1553 sili_intr(void *arg) 1554 { 1555 struct sili_softc *sc = arg; 1556 struct sili_port *ap; 1557 u_int32_t gint; 1558 int port; 1559 1560 /* 1561 * Check if the master enable is up, and whether any interrupts are 1562 * pending. 1563 * 1564 * Clear the ints we got. 1565 */ 1566 if ((sc->sc_flags & SILI_F_INT_GOOD) == 0) 1567 return; 1568 gint = sili_read(sc, SILI_REG_GINT); 1569 if (gint == 0 || gint == 0xffffffff) 1570 return; 1571 sili_write(sc, SILI_REG_GINT, gint); 1572 1573 /* 1574 * Process interrupts for each port in a non-blocking fashion. 1575 */ 1576 while (gint & SILI_REG_GINT_PORTMASK) { 1577 port = ffs(gint) - 1; 1578 ap = sc->sc_ports[port]; 1579 if (ap) { 1580 if (sili_os_lock_port_nb(ap) == 0) { 1581 sili_port_intr(ap, 0); 1582 sili_os_unlock_port(ap); 1583 } else { 1584 sili_port_interrupt_redisable(ap); 1585 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1586 } 1587 } 1588 gint &= ~(1 << port); 1589 } 1590 } 1591 1592 /* 1593 * Core called from helper thread. 1594 */ 1595 void 1596 sili_port_thread_core(struct sili_port *ap, int mask) 1597 { 1598 /* 1599 * Process any expired timedouts. 1600 */ 1601 sili_os_lock_port(ap); 1602 if (mask & AP_SIGF_TIMEOUT) { 1603 sili_check_active_timeouts(ap); 1604 } 1605 1606 /* 1607 * Process port interrupts which require a higher level of 1608 * intervention. 1609 */ 1610 if (mask & AP_SIGF_PORTINT) { 1611 sili_port_intr(ap, 1); 1612 sili_port_interrupt_reenable(ap); 1613 } 1614 sili_os_unlock_port(ap); 1615 } 1616 1617 /* 1618 * Core per-port interrupt handler. 1619 * 1620 * If blockable is 0 we cannot call sili_os_sleep() at all and we can only 1621 * deal with normal command completions which do not require blocking. 1622 */ 1623 void 1624 sili_port_intr(struct sili_port *ap, int blockable) 1625 { 1626 struct sili_softc *sc = ap->ap_sc; 1627 u_int32_t is; 1628 int slot; 1629 struct sili_ccb *ccb = NULL; 1630 struct ata_port *ccb_at = NULL; 1631 u_int32_t active; 1632 u_int32_t finished; 1633 const u_int32_t blockable_mask = SILI_PREG_IST_PHYRDYCHG | 1634 SILI_PREG_IST_DEVEXCHG | 1635 SILI_PREG_IST_CERROR | 1636 SILI_PREG_IST_DECODE | 1637 SILI_PREG_IST_CRC | 1638 SILI_PREG_IST_HANDSHK; 1639 const u_int32_t fatal_mask = SILI_PREG_IST_PHYRDYCHG | 1640 SILI_PREG_IST_DEVEXCHG | 1641 SILI_PREG_IST_DECODE | 1642 SILI_PREG_IST_CRC | 1643 SILI_PREG_IST_HANDSHK; 1644 1645 enum { NEED_NOTHING, NEED_HOTPLUG_INSERT, 1646 NEED_HOTPLUG_REMOVE } need = NEED_NOTHING; 1647 1648 /* 1649 * NOTE: CCOMPLETE was automatically cleared when we read INT_STATUS. 1650 */ 1651 is = sili_pread(ap, SILI_PREG_INT_STATUS); 1652 is &= SILI_PREG_IST_MASK; 1653 if (is & SILI_PREG_IST_CCOMPLETE) 1654 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CCOMPLETE); 1655 1656 /* 1657 * If we can't block then we can't handle these here. Disable 1658 * the interrupts in question so we don't live-lock, the helper 1659 * thread will re-enable them. 1660 * 1661 * If the port is in a completely failed state we do not want 1662 * to drop through to failed-command-processing if blockable is 0, 1663 * just let the thread deal with it all. 1664 * 1665 * Otherwise we fall through and still handle DHRS and any commands 1666 * which completed normally. Even if we are errored we haven't 1667 * stopped the port yet so CI/SACT are still good. 1668 */ 1669 if (blockable == 0) { 1670 if (ap->ap_state == AP_S_FATAL_ERROR) { 1671 sili_port_interrupt_redisable(ap); 1672 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1673 /*is &= ~blockable_mask;*/ 1674 return; 1675 } 1676 if (is & blockable_mask) { 1677 sili_port_interrupt_redisable(ap); 1678 sili_os_signal_port_thread(ap, AP_SIGF_PORTINT); 1679 /*is &= ~blockable_mask;*/ 1680 return; 1681 } 1682 } 1683 1684 if (is & SILI_PREG_IST_CERROR) { 1685 /* 1686 * Command failed (blockable). 1687 * 1688 * This stops command processing. We can extract the PM 1689 * target from the PMP field in SILI_PREG_CONTEXT. The 1690 * tag is not necessarily valid so don't use that. 1691 * 1692 * We must then expire all CCB's for that target and resume 1693 * processing if any other targets have active commands. 1694 * Particular error codes can be recovered by reading the LOG 1695 * page. 1696 * 1697 * The expire handling code will do the rest, which is 1698 * basically to reset the port once the only active 1699 * commands remaining are all expired. 1700 */ 1701 u_int32_t error; 1702 int target; 1703 int resume = 1; 1704 1705 target = (sili_pread(ap, SILI_PREG_CONTEXT) >> 1706 SILI_PREG_CONTEXT_PMPORT_SHIFT) & 1707 SILI_PREG_CONTEXT_PMPORT_MASK; 1708 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_CERROR); 1709 active = ap->ap_active & ~ap->ap_expired; 1710 error = sili_pread(ap, SILI_PREG_CERROR); 1711 kprintf("%s.%d target error %d active=%08x hactive=%08x " 1712 "SERR=%b\n", 1713 PORTNAME(ap), target, error, 1714 active, sili_pread(ap, SILI_PREG_SLOTST), 1715 sili_pread(ap, SILI_PREG_SERR), SILI_PFMT_SERR); 1716 1717 while (active) { 1718 slot = ffs(active) - 1; 1719 ccb = &ap->ap_ccbs[slot]; 1720 if ((ccb_at = ccb->ccb_xa.at) == NULL) 1721 ccb_at = &ap->ap_ata[0]; 1722 if (target == ccb_at->at_target) { 1723 if ((ccb->ccb_xa.flags & ATA_F_NCQ) && 1724 (error == SILI_PREG_CERROR_DEVICE || 1725 error == SILI_PREG_CERROR_SDBERROR)) { 1726 ccb_at->at_features |= ATA_PORT_F_READLOG; 1727 } 1728 if (sili_core_timeout(ccb, 1) == 0) 1729 resume = 0; 1730 } 1731 active &= ~(1 << slot); 1732 } 1733 1734 /* 1735 * Resume will be 0 if the timeout reinited and restarted 1736 * the port. Otherwise we resume the port to allow other 1737 * commands to complete. 1738 */ 1739 if (resume) 1740 sili_pwrite(ap, SILI_PREG_CTL_SET, SILI_PREG_CTL_RESUME); 1741 } 1742 1743 /* 1744 * Device notification to us (non-blocking) 1745 * 1746 * This is interrupt status SILIPREG_IST_SDB 1747 * 1748 * NOTE! On some parts notification bits can get set without 1749 * generating an interrupt. It is unclear whether this is 1750 * a bug in the PM (sending a DTOH device setbits with 'N' set 1751 * and 'I' not set), or a bug in the host controller. 1752 * 1753 * It only seems to occur under load. 1754 */ 1755 if (sc->sc_flags & SILI_F_SSNTF) { 1756 u_int32_t data; 1757 const char *xstr; 1758 1759 data = sili_pread(ap, SILI_PREG_SNTF); 1760 if (is & SILI_PREG_IST_SDB) { 1761 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1762 SILI_PREG_IST_SDB); 1763 is &= ~SILI_PREG_IST_SDB; 1764 xstr = " (no SDBS!)"; 1765 } else { 1766 xstr = ""; 1767 } 1768 if (data) { 1769 kprintf("%s: NOTIFY %08x%s\n", 1770 PORTNAME(ap), data, xstr); 1771 sili_pwrite(ap, SILI_PREG_SNTF, data); 1772 sili_cam_changed(ap, NULL, -1); 1773 } 1774 } 1775 1776 /* 1777 * Port change (hot-plug) (blockable). 1778 * 1779 * A PCS interrupt will occur on hot-plug once communication is 1780 * established. 1781 * 1782 * A PRCS interrupt will occur on hot-unplug (and possibly also 1783 * on hot-plug). 1784 * 1785 * XXX We can then check the CPS (Cold Presence State) bit, if 1786 * supported, to determine if a device is plugged in or not and do 1787 * the right thing. 1788 * 1789 * WARNING: A PCS interrupt is cleared by clearing DIAG_X, and 1790 * can also occur if an unsolicited COMINIT is received. 1791 * If this occurs command processing is automatically 1792 * stopped (CR goes inactive) and the port must be stopped 1793 * and restarted. 1794 */ 1795 if (is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)) { 1796 /* XXX */ 1797 sili_pwrite(ap, SILI_PREG_SERR, 1798 (SILI_PREG_SERR_DIAG_N | SILI_PREG_SERR_DIAG_X)); 1799 sili_pwrite(ap, SILI_PREG_INT_STATUS, 1800 is & (SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG)); 1801 1802 is &= ~(SILI_PREG_IST_PHYRDYCHG | SILI_PREG_IST_DEVEXCHG); 1803 kprintf("%s: Port change\n", PORTNAME(ap)); 1804 1805 switch (sili_pread(ap, SILI_PREG_SSTS) & SILI_PREG_SSTS_DET) { 1806 case SILI_PREG_SSTS_DET_DEV: 1807 if (ap->ap_type == ATA_PORT_T_NONE && 1808 ap->ap_probe == ATA_PROBE_FAILED) { 1809 need = NEED_HOTPLUG_INSERT; 1810 goto fatal; 1811 } 1812 break; 1813 default: 1814 kprintf("%s: Device lost\n", PORTNAME(ap)); 1815 if (ap->ap_type != ATA_PORT_T_NONE) { 1816 need = NEED_HOTPLUG_REMOVE; 1817 goto fatal; 1818 } 1819 break; 1820 } 1821 } 1822 1823 /* 1824 * Check for remaining errors - they are fatal. (blockable) 1825 */ 1826 if (is & fatal_mask) { 1827 u_int32_t serr; 1828 1829 sili_pwrite(ap, SILI_PREG_INT_STATUS, is & fatal_mask); 1830 1831 serr = sili_pread(ap, SILI_PREG_SERR); 1832 kprintf("%s: Unrecoverable errors (IS: %b, SERR: %b), " 1833 "disabling port.\n", 1834 PORTNAME(ap), 1835 is, SILI_PFMT_INT_STATUS, 1836 serr, SILI_PFMT_SERR 1837 ); 1838 is &= ~fatal_mask; 1839 /* XXX try recovery first */ 1840 goto fatal; 1841 } 1842 1843 /* 1844 * Fail all outstanding commands if we know the port won't recover. 1845 * 1846 * We may have a ccb_at if the failed command is known and was 1847 * being sent to a device over a port multiplier (PM). In this 1848 * case if the port itself has not completely failed we fail just 1849 * the commands related to that target. 1850 */ 1851 if (ap->ap_state == AP_S_FATAL_ERROR && 1852 (ap->ap_active & ~ap->ap_expired)) { 1853 kprintf("%s: Fatal port error, expiring %08x\n", 1854 PORTNAME(ap), ap->ap_active & ~ap->ap_expired); 1855 fatal: 1856 ap->ap_state = AP_S_FATAL_ERROR; 1857 1858 /* 1859 * Error all the active slots. If running across a PM 1860 * try to error out just the slots related to the target. 1861 */ 1862 active = ap->ap_active & ~ap->ap_expired; 1863 1864 while (active) { 1865 slot = ffs(active) - 1; 1866 active &= ~(1 << slot); 1867 ccb = &ap->ap_ccbs[slot]; 1868 sili_core_timeout(ccb, 1); 1869 } 1870 } 1871 1872 /* 1873 * CCB completion (non blocking). 1874 * 1875 * CCB completion is detected by noticing the slot bit in 1876 * the port slot status register has cleared while the bit 1877 * is still set in our ap_active variable. 1878 * 1879 * When completing expired events we must remember to reinit 1880 * the port once everything is clear. 1881 * 1882 * Due to a single-level recursion when reading the log page, 1883 * it is possible for the slot to already have been cleared 1884 * for some expired tags, do not include expired tags in 1885 * the list. 1886 */ 1887 active = ap->ap_active & ~sili_pread(ap, SILI_PREG_SLOTST); 1888 active &= ~ap->ap_expired; 1889 1890 finished = active; 1891 while (active) { 1892 slot = ffs(active) - 1; 1893 ccb = &ap->ap_ccbs[slot]; 1894 1895 DPRINTF(SILI_D_INTR, "%s: slot %d is complete%s\n", 1896 PORTNAME(ap), slot, ccb->ccb_xa.state == ATA_S_ERROR ? 1897 " (error)" : ""); 1898 1899 active &= ~(1 << slot); 1900 1901 /* 1902 * XXX sync POSTREAD for return data? 1903 */ 1904 ap->ap_active &= ~(1 << ccb->ccb_slot); 1905 --ap->ap_active_cnt; 1906 1907 /* 1908 * Complete the ccb. If the ccb was marked expired it 1909 * may or may not have been cleared from the port, 1910 * make sure we mark it as having timed out. 1911 * 1912 * In a normal completion if AUTOSENSE is set we copy 1913 * the PRB LRAM rfis back to the rfis in host-memory. 1914 * 1915 * XXX Currently AUTOSENSE also forces exclusivity so we 1916 * can safely work around a hardware bug when reading 1917 * the LRAM. 1918 */ 1919 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 1920 ap->ap_expired &= ~(1 << ccb->ccb_slot); 1921 ccb->ccb_xa.state = ATA_S_TIMEOUT; 1922 ccb->ccb_done(ccb); 1923 ccb->ccb_xa.complete(&ccb->ccb_xa); 1924 } else { 1925 if (ccb->ccb_xa.state == ATA_S_ONCHIP) { 1926 ccb->ccb_xa.state = ATA_S_COMPLETE; 1927 if (ccb->ccb_xa.flags & ATA_F_AUTOSENSE) { 1928 memcpy(ccb->ccb_xa.rfis, 1929 &ccb->ccb_prb_lram->prb_d2h, 1930 sizeof(ccb->ccb_prb_lram->prb_d2h)); 1931 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 1932 ccb->ccb_xa.state = ATA_S_ERROR; 1933 } 1934 } 1935 ccb->ccb_done(ccb); 1936 } 1937 } 1938 if (is & SILI_PREG_IST_READY) { 1939 is &= ~SILI_PREG_IST_READY; 1940 sili_pwrite(ap, SILI_PREG_INT_DISABLE, SILI_PREG_INT_READY); 1941 sili_pwrite(ap, SILI_PREG_INT_STATUS, SILI_PREG_IST_READY); 1942 } 1943 1944 /* 1945 * If we had expired commands and were waiting for 1946 * remaining commands to complete, and they have now 1947 * completed, we can reinit the port. 1948 * 1949 * This will also clean out the expired commands. 1950 * The timeout code also calls sili_port_reinit() if 1951 * the only commands remaining after a timeout are all 1952 * now expired commands. 1953 * 1954 * Otherwise just reissue. 1955 */ 1956 if (ap->ap_expired && ap->ap_active == ap->ap_expired) { 1957 if (finished) 1958 sili_port_reinit(ap); 1959 } else { 1960 sili_issue_pending_commands(ap, NULL); 1961 } 1962 1963 /* 1964 * Cleanup. Will not be set if non-blocking. 1965 */ 1966 switch(need) { 1967 case NEED_HOTPLUG_INSERT: 1968 /* 1969 * A hot-plug insertion event has occured and all 1970 * outstanding commands have already been revoked. 1971 * 1972 * Don't recurse if this occurs while we are 1973 * resetting the port. 1974 * 1975 * Place the port in a continuous COMRESET state 1976 * until the INIT code gets to it. 1977 */ 1978 kprintf("%s: HOTPLUG - Device inserted\n", 1979 PORTNAME(ap)); 1980 ap->ap_probe = ATA_PROBE_NEED_INIT; 1981 sili_cam_changed(ap, NULL, -1); 1982 break; 1983 case NEED_HOTPLUG_REMOVE: 1984 /* 1985 * A hot-plug removal event has occured and all 1986 * outstanding commands have already been revoked. 1987 * 1988 * Don't recurse if this occurs while we are 1989 * resetting the port. 1990 */ 1991 kprintf("%s: HOTPLUG - Device removed\n", 1992 PORTNAME(ap)); 1993 sili_port_hardstop(ap); 1994 /* ap_probe set to failed */ 1995 sili_cam_changed(ap, NULL, -1); 1996 break; 1997 default: 1998 break; 1999 } 2000 } 2001 2002 struct sili_ccb * 2003 sili_get_ccb(struct sili_port *ap) 2004 { 2005 struct sili_ccb *ccb; 2006 2007 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2008 ccb = TAILQ_FIRST(&ap->ap_ccb_free); 2009 if (ccb != NULL) { 2010 KKASSERT(ccb->ccb_xa.state == ATA_S_PUT); 2011 TAILQ_REMOVE(&ap->ap_ccb_free, ccb, ccb_entry); 2012 ccb->ccb_xa.state = ATA_S_SETUP; 2013 ccb->ccb_xa.at = NULL; 2014 } 2015 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2016 2017 return (ccb); 2018 } 2019 2020 void 2021 sili_put_ccb(struct sili_ccb *ccb) 2022 { 2023 struct sili_port *ap = ccb->ccb_port; 2024 2025 lockmgr(&ap->ap_ccb_lock, LK_EXCLUSIVE); 2026 ccb->ccb_xa.state = ATA_S_PUT; 2027 ++ccb->ccb_xa.serial; 2028 TAILQ_INSERT_TAIL(&ap->ap_ccb_free, ccb, ccb_entry); 2029 lockmgr(&ap->ap_ccb_lock, LK_RELEASE); 2030 } 2031 2032 struct sili_ccb * 2033 sili_get_err_ccb(struct sili_port *ap) 2034 { 2035 struct sili_ccb *err_ccb; 2036 2037 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) == 0); 2038 ap->ap_flags |= AP_F_ERR_CCB_RESERVED; 2039 2040 /* 2041 * Grab a CCB to use for error recovery. This should never fail, as 2042 * we ask atascsi to reserve one for us at init time. 2043 */ 2044 err_ccb = ap->ap_err_ccb; 2045 KKASSERT(err_ccb != NULL); 2046 err_ccb->ccb_xa.flags = 0; 2047 err_ccb->ccb_done = sili_empty_done; 2048 2049 return err_ccb; 2050 } 2051 2052 void 2053 sili_put_err_ccb(struct sili_ccb *ccb) 2054 { 2055 struct sili_port *ap = ccb->ccb_port; 2056 2057 KKASSERT((ap->ap_flags & AP_F_ERR_CCB_RESERVED) != 0); 2058 2059 KKASSERT(ccb == ap->ap_err_ccb); 2060 2061 ap->ap_flags &= ~AP_F_ERR_CCB_RESERVED; 2062 } 2063 2064 /* 2065 * Read log page to get NCQ error. 2066 * 2067 * Return 0 on success 2068 */ 2069 void 2070 sili_port_read_ncq_error(struct sili_port *ap, int target) 2071 { 2072 struct sili_ccb *ccb; 2073 struct ata_fis_h2d *fis; 2074 int status; 2075 2076 DPRINTF(SILI_D_VERBOSE, "%s: read log page\n", PORTNAME(ap)); 2077 2078 /* Prep error CCB for READ LOG EXT, page 10h, 1 sector. */ 2079 ccb = sili_get_err_ccb(ap); 2080 ccb->ccb_done = sili_empty_done; 2081 ccb->ccb_xa.flags = ATA_F_NOWAIT | ATA_F_READ | ATA_F_POLL; 2082 ccb->ccb_xa.data = ap->ap_err_scratch; 2083 ccb->ccb_xa.datalen = 512; 2084 ccb->ccb_xa.complete = sili_dummy_done; 2085 ccb->ccb_xa.at = &ap->ap_ata[target]; 2086 fis = &ccb->ccb_prb->prb_h2d; 2087 bzero(fis, sizeof(*fis)); 2088 2089 fis->type = ATA_FIS_TYPE_H2D; 2090 fis->flags = ATA_H2D_FLAGS_CMD | target; 2091 fis->command = ATA_C_READ_LOG_EXT; 2092 fis->lba_low = 0x10; /* queued error log page (10h) */ 2093 fis->sector_count = 1; /* number of sectors (1) */ 2094 fis->sector_count_exp = 0; 2095 fis->lba_mid = 0; /* starting offset */ 2096 fis->lba_mid_exp = 0; 2097 fis->device = 0; 2098 2099 /* 2100 * NOTE: Must use sili_quick_timeout() because we hold the err_ccb 2101 */ 2102 if (sili_load_prb(ccb) != 0) { 2103 status = ATA_S_ERROR; 2104 } else { 2105 ccb->ccb_xa.state = ATA_S_PENDING; 2106 status = sili_poll(ccb, 1000, sili_quick_timeout); 2107 } 2108 2109 /* 2110 * Just spew if it fails, there isn't much we can do at this point. 2111 */ 2112 if (status != ATA_S_COMPLETE) { 2113 kprintf("%s: log page read failed, slot %d was still active.\n", 2114 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_slot); 2115 } 2116 2117 /* Done with the error CCB now. */ 2118 sili_unload_prb(ccb); 2119 sili_put_err_ccb(ccb); 2120 2121 /* Extract failed register set and tags from the scratch space. */ 2122 if (status == ATA_S_COMPLETE) { 2123 struct ata_log_page_10h *log; 2124 int err_slot; 2125 2126 log = (struct ata_log_page_10h *)ap->ap_err_scratch; 2127 if (log->err_regs.type & ATA_LOG_10H_TYPE_NOTQUEUED) { 2128 /* 2129 * Not queued bit was set - wasn't an NCQ error? 2130 * 2131 * XXX This bit seems to be set a lot even for NCQ 2132 * errors? 2133 */ 2134 } else { 2135 /* 2136 * Copy back the log record as a D2H register FIS. 2137 */ 2138 err_slot = log->err_regs.type & 2139 ATA_LOG_10H_TYPE_TAG_MASK; 2140 ccb = &ap->ap_ccbs[err_slot]; 2141 if (ap->ap_expired & (1 << ccb->ccb_slot)) { 2142 kprintf("%s: read NCQ error page slot=%d\n", 2143 ATANAME(ap, ccb->ccb_xa.at), err_slot 2144 ); 2145 memcpy(&ccb->ccb_prb->prb_d2h, &log->err_regs, 2146 sizeof(struct ata_fis_d2h)); 2147 ccb->ccb_prb->prb_d2h.type = ATA_FIS_TYPE_D2H; 2148 ccb->ccb_prb->prb_d2h.flags = 0; 2149 if (ccb->ccb_xa.state == ATA_S_TIMEOUT) 2150 ccb->ccb_xa.state = ATA_S_ERROR; 2151 } else { 2152 kprintf("%s: read NCQ error page slot=%d, " 2153 "slot does not match any cmds\n", 2154 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2155 err_slot 2156 ); 2157 } 2158 } 2159 } 2160 } 2161 2162 /* 2163 * Allocate memory for various structures DMAd by hardware. The maximum 2164 * number of segments for these tags is 1 so the DMA memory will have a 2165 * single physical base address. 2166 */ 2167 struct sili_dmamem * 2168 sili_dmamem_alloc(struct sili_softc *sc, bus_dma_tag_t tag) 2169 { 2170 struct sili_dmamem *adm; 2171 int error; 2172 2173 adm = kmalloc(sizeof(*adm), M_DEVBUF, M_INTWAIT | M_ZERO); 2174 2175 error = bus_dmamem_alloc(tag, (void **)&adm->adm_kva, 2176 BUS_DMA_ZERO, &adm->adm_map); 2177 if (error == 0) { 2178 adm->adm_tag = tag; 2179 error = bus_dmamap_load(tag, adm->adm_map, 2180 adm->adm_kva, 2181 bus_dma_tag_getmaxsize(tag), 2182 sili_dmamem_saveseg, &adm->adm_busaddr, 2183 0); 2184 } 2185 if (error) { 2186 if (adm->adm_map) { 2187 bus_dmamap_destroy(tag, adm->adm_map); 2188 adm->adm_map = NULL; 2189 adm->adm_tag = NULL; 2190 adm->adm_kva = NULL; 2191 } 2192 kfree(adm, M_DEVBUF); 2193 adm = NULL; 2194 } 2195 return (adm); 2196 } 2197 2198 static 2199 void 2200 sili_dmamem_saveseg(void *info, bus_dma_segment_t *segs, int nsegs, int error) 2201 { 2202 KKASSERT(error == 0); 2203 KKASSERT(nsegs == 1); 2204 *(bus_addr_t *)info = segs->ds_addr; 2205 } 2206 2207 2208 void 2209 sili_dmamem_free(struct sili_softc *sc, struct sili_dmamem *adm) 2210 { 2211 if (adm->adm_map) { 2212 bus_dmamap_unload(adm->adm_tag, adm->adm_map); 2213 bus_dmamap_destroy(adm->adm_tag, adm->adm_map); 2214 adm->adm_map = NULL; 2215 adm->adm_tag = NULL; 2216 adm->adm_kva = NULL; 2217 } 2218 kfree(adm, M_DEVBUF); 2219 } 2220 2221 u_int32_t 2222 sili_read(struct sili_softc *sc, bus_size_t r) 2223 { 2224 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2225 BUS_SPACE_BARRIER_READ); 2226 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)); 2227 } 2228 2229 void 2230 sili_write(struct sili_softc *sc, bus_size_t r, u_int32_t v) 2231 { 2232 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v); 2233 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4, 2234 BUS_SPACE_BARRIER_WRITE); 2235 } 2236 2237 u_int32_t 2238 sili_pread(struct sili_port *ap, bus_size_t r) 2239 { 2240 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2241 BUS_SPACE_BARRIER_READ); 2242 return (bus_space_read_4(ap->ap_sc->sc_iot, ap->ap_ioh, r)); 2243 } 2244 2245 void 2246 sili_pwrite(struct sili_port *ap, bus_size_t r, u_int32_t v) 2247 { 2248 bus_space_write_4(ap->ap_sc->sc_iot, ap->ap_ioh, r, v); 2249 bus_space_barrier(ap->ap_sc->sc_iot, ap->ap_ioh, r, 4, 2250 BUS_SPACE_BARRIER_WRITE); 2251 } 2252 2253 /* 2254 * Wait up to (timeout) milliseconds for the masked port register to 2255 * match the target. 2256 * 2257 * Timeout is in milliseconds. 2258 */ 2259 int 2260 sili_pwait_eq(struct sili_port *ap, int timeout, 2261 bus_size_t r, u_int32_t mask, u_int32_t target) 2262 { 2263 int t; 2264 2265 /* 2266 * Loop hard up to 100uS 2267 */ 2268 for (t = 0; t < 100; ++t) { 2269 if ((sili_pread(ap, r) & mask) == target) 2270 return (0); 2271 sili_os_hardsleep(1); /* us */ 2272 } 2273 2274 do { 2275 timeout -= sili_os_softsleep(); 2276 if ((sili_pread(ap, r) & mask) == target) 2277 return (0); 2278 } while (timeout > 0); 2279 return (1); 2280 } 2281 2282 int 2283 sili_wait_ne(struct sili_softc *sc, bus_size_t r, u_int32_t mask, 2284 u_int32_t target) 2285 { 2286 int t; 2287 2288 /* 2289 * Loop hard up to 100uS 2290 */ 2291 for (t = 0; t < 100; ++t) { 2292 if ((sili_read(sc, r) & mask) != target) 2293 return (0); 2294 sili_os_hardsleep(1); /* us */ 2295 } 2296 2297 /* 2298 * And one millisecond the slow way 2299 */ 2300 t = 1000; 2301 do { 2302 t -= sili_os_softsleep(); 2303 if ((sili_read(sc, r) & mask) != target) 2304 return (0); 2305 } while (t > 0); 2306 2307 return (1); 2308 } 2309 2310 2311 /* 2312 * Acquire an ata transfer. 2313 * 2314 * Pass a NULL at for direct-attached transfers, and a non-NULL at for 2315 * targets that go through the port multiplier. 2316 */ 2317 struct ata_xfer * 2318 sili_ata_get_xfer(struct sili_port *ap, struct ata_port *at) 2319 { 2320 struct sili_ccb *ccb; 2321 2322 ccb = sili_get_ccb(ap); 2323 if (ccb == NULL) { 2324 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer: NULL ccb\n", 2325 PORTNAME(ap)); 2326 return (NULL); 2327 } 2328 2329 DPRINTF(SILI_D_XFER, "%s: sili_ata_get_xfer got slot %d\n", 2330 PORTNAME(ap), ccb->ccb_slot); 2331 2332 bzero(ccb->ccb_xa.fis, sizeof(*ccb->ccb_xa.fis)); 2333 ccb->ccb_xa.at = at; 2334 ccb->ccb_xa.fis->type = ATA_FIS_TYPE_H2D; 2335 2336 return (&ccb->ccb_xa); 2337 } 2338 2339 void 2340 sili_ata_put_xfer(struct ata_xfer *xa) 2341 { 2342 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2343 2344 DPRINTF(SILI_D_XFER, "sili_ata_put_xfer slot %d\n", ccb->ccb_slot); 2345 2346 sili_put_ccb(ccb); 2347 } 2348 2349 int 2350 sili_ata_cmd(struct ata_xfer *xa) 2351 { 2352 struct sili_ccb *ccb = (struct sili_ccb *)xa; 2353 2354 KKASSERT(xa->state == ATA_S_SETUP); 2355 2356 if (ccb->ccb_port->ap_state == AP_S_FATAL_ERROR) 2357 goto failcmd; 2358 #if 0 2359 kprintf("%s: started std command %b ccb %d ccb_at %p %d\n", 2360 ATANAME(ccb->ccb_port, ccb->ccb_xa.at), 2361 sili_pread(ccb->ccb_port, SILI_PREG_CMD), SILI_PFMT_CMD, 2362 ccb->ccb_slot, 2363 ccb->ccb_xa.at, 2364 ccb->ccb_xa.at ? ccb->ccb_xa.at->at_target : -1); 2365 #endif 2366 2367 ccb->ccb_done = sili_ata_cmd_done; 2368 2369 if (sili_load_prb(ccb) != 0) 2370 goto failcmd; 2371 2372 xa->state = ATA_S_PENDING; 2373 2374 if (xa->flags & ATA_F_POLL) 2375 return (sili_poll(ccb, xa->timeout, sili_ata_cmd_timeout)); 2376 2377 crit_enter(); 2378 KKASSERT((xa->flags & ATA_F_TIMEOUT_EXPIRED) == 0); 2379 xa->flags |= ATA_F_TIMEOUT_DESIRED; 2380 sili_start(ccb); 2381 crit_exit(); 2382 return (xa->state); 2383 2384 failcmd: 2385 crit_enter(); 2386 xa->state = ATA_S_ERROR; 2387 xa->complete(xa); 2388 crit_exit(); 2389 return (ATA_S_ERROR); 2390 } 2391 2392 static void 2393 sili_ata_cmd_done(struct sili_ccb *ccb) 2394 { 2395 struct ata_xfer *xa = &ccb->ccb_xa; 2396 int serial; 2397 2398 /* 2399 * NOTE: callout does not lock port and may race us modifying 2400 * the flags, so make sure its stopped. 2401 */ 2402 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2403 serial = ccb->ccb_xa.serial; 2404 callout_stop_sync(&ccb->ccb_timeout); 2405 if (serial != ccb->ccb_xa.serial) { 2406 kprintf("%s: Warning: timeout race ccb %p\n", 2407 PORTNAME(ccb->ccb_port), ccb); 2408 return; 2409 } 2410 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2411 } 2412 xa->flags &= ~(ATA_F_TIMEOUT_DESIRED | ATA_F_TIMEOUT_EXPIRED); 2413 2414 KKASSERT(xa->state != ATA_S_ONCHIP); 2415 sili_unload_prb(ccb); 2416 2417 if (xa->state != ATA_S_TIMEOUT) 2418 xa->complete(xa); 2419 } 2420 2421 /* 2422 * Timeout from callout, MPSAFE - nothing can mess with the CCB's flags 2423 * while the callout is runing. 2424 * 2425 * We can't safely get the port lock here or delay, we could block 2426 * the callout thread. 2427 */ 2428 static void 2429 sili_ata_cmd_timeout_unserialized(void *arg) 2430 { 2431 struct sili_ccb *ccb = arg; 2432 struct sili_port *ap = ccb->ccb_port; 2433 2434 ccb->ccb_xa.flags &= ~ATA_F_TIMEOUT_RUNNING; 2435 ccb->ccb_xa.flags |= ATA_F_TIMEOUT_EXPIRED; 2436 sili_os_signal_port_thread(ap, AP_SIGF_TIMEOUT); 2437 } 2438 2439 void 2440 sili_ata_cmd_timeout(struct sili_ccb *ccb) 2441 { 2442 sili_core_timeout(ccb, 0); 2443 } 2444 2445 /* 2446 * Timeout code, typically called when the port command processor is running. 2447 * 2448 * Returns 0 if all timeout processing completed, non-zero if it is still 2449 * in progress. 2450 */ 2451 static 2452 int 2453 sili_core_timeout(struct sili_ccb *ccb, int really_error) 2454 { 2455 struct ata_xfer *xa = &ccb->ccb_xa; 2456 struct sili_port *ap = ccb->ccb_port; 2457 struct ata_port *at; 2458 2459 at = ccb->ccb_xa.at; 2460 2461 kprintf("%s: CMD %s state=%d slot=%d\n" 2462 "\t active=%08x\n" 2463 "\texpired=%08x\n" 2464 "\thactive=%08x\n", 2465 ATANAME(ap, at), 2466 (really_error ? "ERROR" : "TIMEOUT"), 2467 ccb->ccb_xa.state, ccb->ccb_slot, 2468 ap->ap_active, 2469 ap->ap_expired, 2470 sili_pread(ap, SILI_PREG_SLOTST) 2471 ); 2472 2473 /* 2474 * NOTE: Timeout will not be running if the command was polled. 2475 * If we got here at least one of these flags should be set. 2476 * 2477 * However, it might be running if we are called from the 2478 * interrupt error handling code. 2479 */ 2480 KKASSERT(xa->flags & (ATA_F_POLL | ATA_F_TIMEOUT_DESIRED | 2481 ATA_F_TIMEOUT_RUNNING)); 2482 if (xa->flags & ATA_F_TIMEOUT_RUNNING) { 2483 callout_stop(&ccb->ccb_timeout); 2484 xa->flags &= ~ATA_F_TIMEOUT_RUNNING; 2485 } 2486 xa->flags &= ~ATA_F_TIMEOUT_EXPIRED; 2487 2488 if (ccb->ccb_xa.state == ATA_S_PENDING) { 2489 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2490 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2491 ccb->ccb_done(ccb); 2492 xa->complete(xa); 2493 sili_issue_pending_commands(ap, NULL); 2494 return(1); 2495 } 2496 if (ccb->ccb_xa.state != ATA_S_ONCHIP) { 2497 kprintf("%s: Unexpected state during timeout: %d\n", 2498 ATANAME(ap, at), ccb->ccb_xa.state); 2499 return(1); 2500 } 2501 2502 /* 2503 * We can't process timeouts while other commands are running. 2504 */ 2505 ap->ap_expired |= 1 << ccb->ccb_slot; 2506 2507 if (ap->ap_active != ap->ap_expired) { 2508 kprintf("%s: Deferred timeout until its safe, slot %d\n", 2509 ATANAME(ap, at), ccb->ccb_slot); 2510 return(1); 2511 } 2512 2513 /* 2514 * We have to issue a Port reinit. We don't read an error log 2515 * page for timeouts. Reiniting the port will clear all pending 2516 * commands. 2517 */ 2518 sili_port_reinit(ap); 2519 return(0); 2520 } 2521 2522 /* 2523 * Used by the softreset, pm_port_probe, and read_ncq_error only, in very 2524 * specialized, controlled circumstances. 2525 */ 2526 void 2527 sili_quick_timeout(struct sili_ccb *ccb) 2528 { 2529 struct sili_port *ap = ccb->ccb_port; 2530 2531 switch (ccb->ccb_xa.state) { 2532 case ATA_S_PENDING: 2533 TAILQ_REMOVE(&ap->ap_ccb_pending, ccb, ccb_entry); 2534 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2535 break; 2536 case ATA_S_ONCHIP: 2537 KKASSERT((ap->ap_active & ~ap->ap_expired) == 2538 (1 << ccb->ccb_slot)); 2539 ccb->ccb_xa.state = ATA_S_TIMEOUT; 2540 ap->ap_active &= ~(1 << ccb->ccb_slot); 2541 KKASSERT(ap->ap_active_cnt > 0); 2542 --ap->ap_active_cnt; 2543 sili_port_reinit(ap); 2544 break; 2545 default: 2546 panic("%s: sili_quick_timeout: ccb in bad state %d", 2547 ATANAME(ap, ccb->ccb_xa.at), ccb->ccb_xa.state); 2548 } 2549 } 2550 2551 static void 2552 sili_dummy_done(struct ata_xfer *xa) 2553 { 2554 } 2555 2556 static void 2557 sili_empty_done(struct sili_ccb *ccb) 2558 { 2559 } 2560