1 /* $OpenBSD: pciide.c,v 1.137 2003/07/30 19:59:17 grange Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Manuel Bouyer. 18 * 4. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * 33 */ 34 35 /* 36 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 37 * 38 * Redistribution and use in source and binary forms, with or without 39 * modification, are permitted provided that the following conditions 40 * are met: 41 * 1. Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * 2. Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in the 45 * documentation and/or other materials provided with the distribution. 46 * 3. All advertising materials mentioning features or use of this software 47 * must display the following acknowledgement: 48 * This product includes software developed by Christopher G. Demetriou 49 * for the NetBSD Project. 50 * 4. The name of the author may not be used to endorse or promote products 51 * derived from this software without specific prior written permission 52 * 53 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 54 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 55 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 56 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 57 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 58 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 62 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 63 */ 64 65 /* 66 * PCI IDE controller driver. 67 * 68 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 69 * sys/dev/pci/ppb.c, revision 1.16). 70 * 71 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 72 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 73 * 5/16/94" from the PCI SIG. 74 * 75 */ 76 77 #define DEBUG_DMA 0x01 78 #define DEBUG_XFERS 0x02 79 #define DEBUG_FUNCS 0x08 80 #define DEBUG_PROBE 0x10 81 82 #ifdef WDCDEBUG 83 int wdcdebug_pciide_mask = 0; 84 #define WDCDEBUG_PRINT(args, level) do { \ 85 if ((wdcdebug_pciide_mask & (level)) != 0) \ 86 printf args; \ 87 } while (0) 88 #else 89 #define WDCDEBUG_PRINT(args, level) 90 #endif 91 #include <sys/param.h> 92 #include <sys/systm.h> 93 #include <sys/device.h> 94 #include <sys/malloc.h> 95 96 #include <uvm/uvm_extern.h> 97 98 #include <machine/endian.h> 99 100 #include <dev/pci/pcireg.h> 101 #include <dev/pci/pcivar.h> 102 #include <dev/pci/pcidevs.h> 103 #include <dev/pci/pciidereg.h> 104 #include <dev/pci/pciidevar.h> 105 #include <dev/pci/pciide_piix_reg.h> 106 #include <dev/pci/pciide_amd_reg.h> 107 #include <dev/pci/pciide_apollo_reg.h> 108 #include <dev/pci/pciide_cmd_reg.h> 109 #include <dev/pci/pciide_sii3112_reg.h> 110 #include <dev/pci/pciide_cy693_reg.h> 111 #include <dev/pci/pciide_sis_reg.h> 112 #include <dev/pci/pciide_acer_reg.h> 113 #include <dev/pci/pciide_pdc202xx_reg.h> 114 #include <dev/pci/pciide_opti_reg.h> 115 #include <dev/pci/pciide_hpt_reg.h> 116 #include <dev/pci/pciide_acard_reg.h> 117 #include <dev/pci/pciide_natsemi_reg.h> 118 #include <dev/pci/pciide_nforce_reg.h> 119 #include <dev/pci/pciide_i31244_reg.h> 120 #include <dev/pci/cy82c693var.h> 121 122 #include <dev/ata/atavar.h> 123 #include <dev/ic/wdcreg.h> 124 #include <dev/ic/wdcvar.h> 125 126 /* inlines for reading/writing 8-bit PCI registers */ 127 static __inline u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 128 int); 129 static __inline void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 130 int, u_int8_t); 131 132 static __inline u_int8_t 133 pciide_pci_read(pc, pa, reg) 134 pci_chipset_tag_t pc; 135 pcitag_t pa; 136 int reg; 137 { 138 139 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 140 ((reg & 0x03) * 8) & 0xff); 141 } 142 143 static __inline void 144 pciide_pci_write(pc, pa, reg, val) 145 pci_chipset_tag_t pc; 146 pcitag_t pa; 147 int reg; 148 u_int8_t val; 149 { 150 pcireg_t pcival; 151 152 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 153 pcival &= ~(0xff << ((reg & 0x03) * 8)); 154 pcival |= (val << ((reg & 0x03) * 8)); 155 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 156 } 157 158 struct pciide_softc { 159 struct wdc_softc sc_wdcdev; /* common wdc definitions */ 160 pci_chipset_tag_t sc_pc; /* PCI registers info */ 161 pcitag_t sc_tag; 162 void *sc_pci_ih; /* PCI interrupt handle */ 163 int sc_dma_ok; /* bus-master DMA info */ 164 bus_space_tag_t sc_dma_iot; 165 bus_space_handle_t sc_dma_ioh; 166 bus_dma_tag_t sc_dmat; 167 168 /* 169 * Some controllers might have DMA restrictions other than 170 * the norm. 171 */ 172 bus_size_t sc_dma_maxsegsz; 173 bus_size_t sc_dma_boundary; 174 175 /* For Cypress */ 176 const struct cy82c693_handle *sc_cy_handle; 177 int sc_cy_compatchan; 178 179 /* For SiS */ 180 u_int8_t sis_type; 181 182 /* Chip description */ 183 const struct pciide_product_desc *sc_pp; 184 /* Chip revision */ 185 int sc_rev; 186 /* common definitions */ 187 struct channel_softc *wdc_chanarray[PCIIDE_NUM_CHANNELS]; 188 /* internal bookkeeping */ 189 struct pciide_channel { /* per-channel data */ 190 struct channel_softc wdc_channel; /* generic part */ 191 char *name; 192 int hw_ok; /* hardware mapped & OK? */ 193 int compat; /* is it compat? */ 194 int dma_in_progress; 195 void *ih; /* compat or pci handle */ 196 bus_space_handle_t ctl_baseioh; /* ctrl regs blk, native mode */ 197 /* DMA tables and DMA map for xfer, for each drive */ 198 struct pciide_dma_maps { 199 bus_dmamap_t dmamap_table; 200 struct idedma_table *dma_table; 201 bus_dmamap_t dmamap_xfer; 202 int dma_flags; 203 } dma_maps[2]; 204 } pciide_channels[PCIIDE_NUM_CHANNELS]; 205 }; 206 207 void default_chip_map(struct pciide_softc*, struct pci_attach_args*); 208 209 void sata_setup_channel(struct channel_softc *); 210 211 void piix_chip_map(struct pciide_softc*, struct pci_attach_args*); 212 void piix_setup_channel(struct channel_softc*); 213 void piix3_4_setup_channel(struct channel_softc*); 214 215 static u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 216 static u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas*); 217 static u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 218 219 void amd756_chip_map(struct pciide_softc*, struct pci_attach_args*); 220 void amd756_setup_channel(struct channel_softc*); 221 222 void apollo_chip_map(struct pciide_softc*, struct pci_attach_args*); 223 void apollo_setup_channel(struct channel_softc*); 224 225 void cmd_chip_map(struct pciide_softc*, struct pci_attach_args*); 226 void cmd0643_9_chip_map(struct pciide_softc*, struct pci_attach_args*); 227 void cmd0643_9_setup_channel(struct channel_softc*); 228 void cmd680_chip_map(struct pciide_softc*, struct pci_attach_args*); 229 void cmd680_setup_channel(struct channel_softc*); 230 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 231 void cmd_channel_map(struct pci_attach_args *, 232 struct pciide_softc *, int); 233 int cmd_pci_intr(void *); 234 void cmd646_9_irqack(struct channel_softc *); 235 236 void sii3112_chip_map(struct pciide_softc*, struct pci_attach_args*); 237 void sii3112_setup_channel(struct channel_softc*); 238 239 void cy693_chip_map(struct pciide_softc*, struct pci_attach_args*); 240 void cy693_setup_channel(struct channel_softc*); 241 242 void sis_chip_map(struct pciide_softc*, struct pci_attach_args*); 243 void sis_setup_channel(struct channel_softc*); 244 void sis96x_setup_channel(struct channel_softc *); 245 int sis_hostbr_match(struct pci_attach_args *); 246 int sis_south_match(struct pci_attach_args *); 247 248 void natsemi_chip_map(struct pciide_softc*, struct pci_attach_args*); 249 void natsemi_setup_channel(struct channel_softc*); 250 int natsemi_pci_intr(void *); 251 void natsemi_irqack(struct channel_softc *); 252 253 void acer_chip_map(struct pciide_softc*, struct pci_attach_args*); 254 void acer_setup_channel(struct channel_softc*); 255 int acer_pci_intr(void *); 256 257 void pdc202xx_chip_map(struct pciide_softc*, struct pci_attach_args*); 258 void pdc202xx_setup_channel(struct channel_softc*); 259 void pdc20268_setup_channel(struct channel_softc*); 260 int pdc202xx_pci_intr(void *); 261 int pdc20265_pci_intr(void *); 262 void pdc20262_dma_start(void *, int, int); 263 int pdc20262_dma_finish(void *, int, int); 264 265 void opti_chip_map(struct pciide_softc*, struct pci_attach_args*); 266 void opti_setup_channel(struct channel_softc*); 267 268 void hpt_chip_map(struct pciide_softc*, struct pci_attach_args*); 269 void hpt_setup_channel(struct channel_softc*); 270 int hpt_pci_intr(void *); 271 272 void acard_chip_map(struct pciide_softc*, struct pci_attach_args*); 273 void acard_setup_channel(struct channel_softc*); 274 int acard_pci_intr(void *); 275 276 void serverworks_chip_map(struct pciide_softc*, struct pci_attach_args*); 277 void serverworks_setup_channel(struct channel_softc*); 278 int serverworks_pci_intr(void *); 279 280 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 281 void nforce_setup_channel(struct channel_softc *); 282 int nforce_pci_intr(void *); 283 284 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 285 286 void pciide_channel_dma_setup(struct pciide_channel *); 287 int pciide_dma_table_setup(struct pciide_softc*, int, int); 288 int pciide_dma_init(void *, int, int, void *, size_t, int); 289 void pciide_dma_start(void *, int, int); 290 int pciide_dma_finish(void *, int, int); 291 void pciide_irqack(struct channel_softc *); 292 void pciide_print_modes(struct pciide_channel *); 293 void pciide_print_channels(int, pcireg_t); 294 295 struct pciide_product_desc { 296 u_int32_t ide_product; 297 u_short ide_flags; 298 /* map and setup chip, probe drives */ 299 void (*chip_map)(struct pciide_softc*, struct pci_attach_args*); 300 }; 301 302 /* Flags for ide_flags */ 303 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 304 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 305 306 /* Default product description for devices not known from this controller */ 307 const struct pciide_product_desc default_product_desc = { 308 0, /* Generic PCI IDE controller */ 309 0, 310 default_chip_map 311 }; 312 313 const struct pciide_product_desc pciide_intel_products[] = { 314 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 315 0, 316 default_chip_map 317 }, 318 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 319 0, 320 piix_chip_map 321 }, 322 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 323 0, 324 piix_chip_map 325 }, 326 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 327 0, 328 piix_chip_map 329 }, 330 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 331 0, 332 piix_chip_map 333 }, 334 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 335 0, 336 piix_chip_map 337 }, 338 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 339 0, 340 piix_chip_map 341 }, 342 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 343 0, 344 piix_chip_map 345 }, 346 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 347 0, 348 piix_chip_map 349 }, 350 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 351 0, 352 piix_chip_map 353 }, 354 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 355 0, 356 piix_chip_map 357 }, 358 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 359 0, 360 piix_chip_map 361 }, 362 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 363 0, 364 piix_chip_map 365 }, 366 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 367 0, 368 piix_chip_map 369 }, 370 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB/ER (ICH5/5R) SATA */ 371 0, 372 piix_chip_map 373 }, 374 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 375 0, 376 artisea_chip_map 377 } 378 }; 379 380 const struct pciide_product_desc pciide_amd_products[] = { 381 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 382 0, 383 amd756_chip_map 384 }, 385 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 386 0, 387 amd756_chip_map 388 }, 389 { PCI_PRODUCT_AMD_PBC768_IDE, 390 0, 391 amd756_chip_map 392 }, 393 { PCI_PRODUCT_AMD_8111_IDE, 394 0, 395 amd756_chip_map 396 } 397 }; 398 399 #ifdef notyet 400 const struct pciide_product_desc pciide_opti_products[] = { 401 402 { PCI_PRODUCT_OPTI_82C621, 403 0, 404 opti_chip_map 405 }, 406 { PCI_PRODUCT_OPTI_82C568, 407 0, 408 opti_chip_map 409 }, 410 { PCI_PRODUCT_OPTI_82D568, 411 0, 412 opti_chip_map 413 }, 414 }; 415 #endif 416 417 const struct pciide_product_desc pciide_cmd_products[] = { 418 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 419 0, 420 cmd_chip_map 421 }, 422 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 423 0, 424 cmd0643_9_chip_map 425 }, 426 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 427 0, 428 cmd0643_9_chip_map 429 }, 430 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 431 IDE_PCI_CLASS_OVERRIDE, 432 cmd0643_9_chip_map 433 }, 434 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 435 IDE_PCI_CLASS_OVERRIDE, 436 cmd0643_9_chip_map 437 }, 438 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 439 IDE_PCI_CLASS_OVERRIDE, 440 cmd680_chip_map 441 }, 442 { PCI_PRODUCT_CMDTECH_3112, /* SiI 3112 SATA */ 443 IDE_PCI_CLASS_OVERRIDE, /* XXX: subclass RAID */ 444 sii3112_chip_map 445 } 446 }; 447 448 const struct pciide_product_desc pciide_via_products[] = { 449 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 450 0, 451 apollo_chip_map 452 }, 453 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 454 0, 455 apollo_chip_map 456 } 457 }; 458 459 const struct pciide_product_desc pciide_cypress_products[] = { 460 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 461 IDE_16BIT_IOSPACE, 462 cy693_chip_map 463 } 464 }; 465 466 const struct pciide_product_desc pciide_sis_products[] = { 467 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 468 0, 469 sis_chip_map 470 } 471 }; 472 473 const struct pciide_product_desc pciide_natsemi_products[] = { 474 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 475 0, 476 natsemi_chip_map 477 } 478 }; 479 480 const struct pciide_product_desc pciide_acer_products[] = { 481 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 482 0, 483 acer_chip_map 484 } 485 }; 486 487 const struct pciide_product_desc pciide_triones_products[] = { 488 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 489 IDE_PCI_CLASS_OVERRIDE, 490 hpt_chip_map, 491 }, 492 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 493 IDE_PCI_CLASS_OVERRIDE, 494 hpt_chip_map 495 }, 496 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 497 IDE_PCI_CLASS_OVERRIDE, 498 hpt_chip_map 499 }, 500 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 501 IDE_PCI_CLASS_OVERRIDE, 502 hpt_chip_map 503 }, 504 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 505 IDE_PCI_CLASS_OVERRIDE, 506 hpt_chip_map 507 } 508 }; 509 510 const struct pciide_product_desc pciide_promise_products[] = { 511 { PCI_PRODUCT_PROMISE_PDC20246, 512 IDE_PCI_CLASS_OVERRIDE, 513 pdc202xx_chip_map, 514 }, 515 { PCI_PRODUCT_PROMISE_PDC20262, 516 IDE_PCI_CLASS_OVERRIDE, 517 pdc202xx_chip_map, 518 }, 519 { PCI_PRODUCT_PROMISE_PDC20265, 520 IDE_PCI_CLASS_OVERRIDE, 521 pdc202xx_chip_map, 522 }, 523 { PCI_PRODUCT_PROMISE_PDC20267, 524 IDE_PCI_CLASS_OVERRIDE, 525 pdc202xx_chip_map, 526 }, 527 { PCI_PRODUCT_PROMISE_PDC20268, 528 IDE_PCI_CLASS_OVERRIDE, 529 pdc202xx_chip_map, 530 }, 531 { PCI_PRODUCT_PROMISE_PDC20268R, 532 IDE_PCI_CLASS_OVERRIDE, 533 pdc202xx_chip_map, 534 }, 535 { PCI_PRODUCT_PROMISE_PDC20269, 536 IDE_PCI_CLASS_OVERRIDE, 537 pdc202xx_chip_map, 538 }, 539 { PCI_PRODUCT_PROMISE_PDC20271, 540 IDE_PCI_CLASS_OVERRIDE, 541 pdc202xx_chip_map, 542 }, 543 { PCI_PRODUCT_PROMISE_PDC20275, 544 IDE_PCI_CLASS_OVERRIDE, 545 pdc202xx_chip_map, 546 }, 547 { PCI_PRODUCT_PROMISE_PDC20276, 548 IDE_PCI_CLASS_OVERRIDE, 549 pdc202xx_chip_map, 550 }, 551 { PCI_PRODUCT_PROMISE_PDC20277, 552 IDE_PCI_CLASS_OVERRIDE, 553 pdc202xx_chip_map, 554 }, 555 { PCI_PRODUCT_PROMISE_PDC20376, /* PDC20376 SATA */ 556 IDE_PCI_CLASS_OVERRIDE, /* XXX: subclass RAID */ 557 pdc202xx_chip_map, 558 } 559 }; 560 561 const struct pciide_product_desc pciide_acard_products[] = { 562 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 563 IDE_PCI_CLASS_OVERRIDE, 564 acard_chip_map, 565 }, 566 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 567 IDE_PCI_CLASS_OVERRIDE, 568 acard_chip_map, 569 }, 570 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 571 IDE_PCI_CLASS_OVERRIDE, 572 acard_chip_map, 573 } 574 }; 575 576 const struct pciide_product_desc pciide_serverworks_products[] = { 577 { PCI_PRODUCT_RCC_OSB4_IDE, 578 0, 579 serverworks_chip_map, 580 }, 581 { PCI_PRODUCT_RCC_CSB5_IDE, 582 0, 583 serverworks_chip_map, 584 }, 585 { PCI_PRODUCT_RCC_CSB6_IDE, 586 0, 587 serverworks_chip_map, 588 }, 589 }; 590 591 const struct pciide_product_desc pciide_nvidia_products[] = { 592 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 593 0, 594 nforce_chip_map 595 }, 596 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 597 0, 598 nforce_chip_map 599 } 600 }; 601 602 603 struct pciide_vendor_desc { 604 u_int32_t ide_vendor; 605 const struct pciide_product_desc *ide_products; 606 int ide_nproducts; 607 }; 608 609 const struct pciide_vendor_desc pciide_vendors[] = { 610 { PCI_VENDOR_INTEL, pciide_intel_products, 611 sizeof(pciide_intel_products)/sizeof(pciide_intel_products[0]) }, 612 { PCI_VENDOR_AMD, pciide_amd_products, 613 sizeof(pciide_amd_products)/sizeof(pciide_amd_products[0]) }, 614 #ifdef notyet 615 { PCI_VENDOR_OPTI, pciide_opti_products, 616 sizeof(pciide_opti_products)/sizeof(pciide_opti_products[0]) }, 617 #endif 618 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 619 sizeof(pciide_cmd_products)/sizeof(pciide_cmd_products[0]) }, 620 { PCI_VENDOR_VIATECH, pciide_via_products, 621 sizeof(pciide_via_products)/sizeof(pciide_via_products[0]) }, 622 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 623 sizeof(pciide_cypress_products)/sizeof(pciide_cypress_products[0]) }, 624 { PCI_VENDOR_SIS, pciide_sis_products, 625 sizeof(pciide_sis_products)/sizeof(pciide_sis_products[0]) }, 626 { PCI_VENDOR_NS, pciide_natsemi_products, 627 sizeof(pciide_natsemi_products)/sizeof(pciide_natsemi_products[0]) }, 628 { PCI_VENDOR_ALI, pciide_acer_products, 629 sizeof(pciide_acer_products)/sizeof(pciide_acer_products[0]) }, 630 { PCI_VENDOR_TRIONES, pciide_triones_products, 631 sizeof(pciide_triones_products)/sizeof(pciide_triones_products[0]) }, 632 { PCI_VENDOR_ACARD, pciide_acard_products, 633 sizeof(pciide_acard_products)/sizeof(pciide_acard_products[0]) }, 634 { PCI_VENDOR_RCC, pciide_serverworks_products, 635 sizeof(pciide_serverworks_products)/sizeof(pciide_serverworks_products[0]) }, 636 { PCI_VENDOR_PROMISE, pciide_promise_products, 637 sizeof(pciide_promise_products)/sizeof(pciide_promise_products[0]) }, 638 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 639 sizeof(pciide_nvidia_products)/sizeof(pciide_nvidia_products[0]) } 640 }; 641 642 /* options passed via the 'flags' config keyword */ 643 #define PCIIDE_OPTIONS_DMA 0x01 644 645 #ifndef __OpenBSD__ 646 int pciide_match(struct device *, struct cfdata *, void *); 647 #else 648 int pciide_match(struct device *, void *, void *); 649 #endif 650 void pciide_attach(struct device *, struct device *, void *); 651 652 struct cfattach pciide_ca = { 653 sizeof(struct pciide_softc), pciide_match, pciide_attach 654 }; 655 656 #ifdef __OpenBSD__ 657 struct cfdriver pciide_cd = { 658 NULL, "pciide", DV_DULL 659 }; 660 #endif 661 int pciide_chipen(struct pciide_softc *, struct pci_attach_args *); 662 int pciide_mapregs_compat( struct pci_attach_args *, 663 struct pciide_channel *, int, bus_size_t *, bus_size_t*); 664 int pciide_mapregs_native(struct pci_attach_args *, 665 struct pciide_channel *, bus_size_t *, bus_size_t *, 666 int (*pci_intr)(void *)); 667 void pciide_mapreg_dma(struct pciide_softc *, 668 struct pci_attach_args *); 669 int pciide_chansetup(struct pciide_softc *, int, pcireg_t); 670 void pciide_mapchan(struct pci_attach_args *, 671 struct pciide_channel *, pcireg_t, bus_size_t *, bus_size_t *, 672 int (*pci_intr)(void *)); 673 int pciide_chan_candisable(struct pciide_channel *); 674 void pciide_map_compat_intr( struct pci_attach_args *, 675 struct pciide_channel *, int, int); 676 void pciide_unmap_compat_intr( struct pci_attach_args *, 677 struct pciide_channel *, int, int); 678 int pciide_compat_intr(void *); 679 int pciide_pci_intr(void *); 680 int pciide_intr_flag(struct pciide_channel *); 681 682 const struct pciide_product_desc* pciide_lookup_product(u_int32_t); 683 684 const struct pciide_product_desc * 685 pciide_lookup_product(id) 686 u_int32_t id; 687 { 688 const struct pciide_product_desc *pp; 689 const struct pciide_vendor_desc *vp; 690 int i; 691 692 for (i = 0, vp = pciide_vendors; 693 i < sizeof(pciide_vendors)/sizeof(pciide_vendors[0]); 694 vp++, i++) 695 if (PCI_VENDOR(id) == vp->ide_vendor) 696 break; 697 698 if (i == sizeof(pciide_vendors)/sizeof(pciide_vendors[0])) 699 return NULL; 700 701 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 702 if (PCI_PRODUCT(id) == pp->ide_product) 703 break; 704 705 if (i == vp->ide_nproducts) 706 return NULL; 707 return pp; 708 } 709 710 int 711 pciide_match(parent, match, aux) 712 struct device *parent; 713 #ifdef __OpenBSD__ 714 void *match; 715 #else 716 struct cfdata *match; 717 #endif 718 void *aux; 719 { 720 struct pci_attach_args *pa = aux; 721 const struct pciide_product_desc *pp; 722 723 /* 724 * Some IDE controllers have severe bugs when used in PCI mode. 725 * We punt and attach them to the ISA bus instead. 726 */ 727 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 728 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 729 return (0); 730 731 /* 732 * Check the ID register to see that it's a PCI IDE controller. 733 * If it is, we assume that we can deal with it; it _should_ 734 * work in a standardized way... 735 */ 736 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE && 737 PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 738 return (1); 739 } 740 741 /* 742 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 743 * controllers. Let see if we can deal with it anyway. 744 */ 745 pp = pciide_lookup_product(pa->pa_id); 746 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) { 747 return (1); 748 } 749 750 return (0); 751 } 752 753 void 754 pciide_attach(parent, self, aux) 755 struct device *parent, *self; 756 void *aux; 757 { 758 struct pci_attach_args *pa = aux; 759 pci_chipset_tag_t pc = pa->pa_pc; 760 pcitag_t tag = pa->pa_tag; 761 struct pciide_softc *sc = (struct pciide_softc *)self; 762 pcireg_t csr; 763 char devinfo[256]; 764 765 sc->sc_pp = pciide_lookup_product(pa->pa_id); 766 if (sc->sc_pp == NULL) { 767 sc->sc_pp = &default_product_desc; 768 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, 769 sizeof devinfo); 770 } 771 772 sc->sc_pc = pa->pa_pc; 773 sc->sc_tag = pa->pa_tag; 774 775 /* Set up DMA defaults; these might be adjusted by chip_map. */ 776 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 777 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 778 779 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 780 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 781 782 sc->sc_pp->chip_map(sc, pa); 783 784 if (sc->sc_dma_ok) { 785 csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); 786 csr |= PCI_COMMAND_MASTER_ENABLE; 787 pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, csr); 788 } 789 790 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 791 pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG)), DEBUG_PROBE); 792 } 793 794 /* tell whether the chip is enabled or not */ 795 int 796 pciide_chipen(sc, pa) 797 struct pciide_softc *sc; 798 struct pci_attach_args *pa; 799 { 800 pcireg_t csr; 801 802 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 803 if ((csr & PCI_COMMAND_IO_ENABLE) == 0 ) { 804 printf("\n%s: device disabled\n", 805 sc->sc_wdcdev.sc_dev.dv_xname); 806 return 0; 807 } 808 809 return 1; 810 } 811 812 int 813 pciide_mapregs_compat(pa, cp, compatchan, cmdsizep, ctlsizep) 814 struct pci_attach_args *pa; 815 struct pciide_channel *cp; 816 int compatchan; 817 bus_size_t *cmdsizep, *ctlsizep; 818 { 819 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 820 struct channel_softc *wdc_cp = &cp->wdc_channel; 821 822 cp->compat = 1; 823 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 824 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 825 826 wdc_cp->cmd_iot = pa->pa_iot; 827 828 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 829 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 830 printf("%s: couldn't map %s cmd regs\n", 831 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 832 return (0); 833 } 834 835 wdc_cp->ctl_iot = pa->pa_iot; 836 837 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 838 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 839 printf("%s: couldn't map %s ctl regs\n", 840 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 841 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 842 PCIIDE_COMPAT_CMD_SIZE); 843 return (0); 844 } 845 846 return (1); 847 } 848 849 int 850 pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, pci_intr) 851 struct pci_attach_args * pa; 852 struct pciide_channel *cp; 853 bus_size_t *cmdsizep, *ctlsizep; 854 int (*pci_intr)(void *); 855 { 856 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 857 struct channel_softc *wdc_cp = &cp->wdc_channel; 858 const char *intrstr; 859 pci_intr_handle_t intrhandle; 860 pcireg_t maptype; 861 862 cp->compat = 0; 863 864 if (sc->sc_pci_ih == NULL) { 865 if (pci_intr_map(pa, &intrhandle) != 0) { 866 printf("%s: couldn't map native-PCI interrupt\n", 867 sc->sc_wdcdev.sc_dev.dv_xname); 868 return 0; 869 } 870 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 871 #ifdef __OpenBSD__ 872 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 873 intrhandle, IPL_BIO, pci_intr, sc, 874 sc->sc_wdcdev.sc_dev.dv_xname); 875 #else 876 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 877 intrhandle, IPL_BIO, pci_intr, sc); 878 #endif 879 if (sc->sc_pci_ih != NULL) { 880 printf("%s: using %s for native-PCI interrupt\n", 881 sc->sc_wdcdev.sc_dev.dv_xname, 882 intrstr ? intrstr : "unknown interrupt"); 883 } else { 884 printf("%s: couldn't establish native-PCI interrupt", 885 sc->sc_wdcdev.sc_dev.dv_xname); 886 if (intrstr != NULL) 887 printf(" at %s", intrstr); 888 printf("\n"); 889 return 0; 890 } 891 } 892 cp->ih = sc->sc_pci_ih; 893 894 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 895 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 896 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 897 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 898 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 899 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 900 maptype, 0, 901 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 902 printf("%s: couldn't map %s cmd regs\n", 903 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 904 return 0; 905 } 906 907 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 908 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 909 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 910 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 911 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 912 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 913 maptype, 0, 914 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 915 printf("%s: couldn't map %s ctl regs\n", 916 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 917 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 918 return 0; 919 } 920 /* 921 * In native mode, 4 bytes of I/O space are mapped for the control 922 * register, the control register is at offset 2. Pass the generic 923 * code a handle for only one byte at the right offset. 924 */ 925 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 926 &wdc_cp->ctl_ioh) != 0) { 927 printf("%s: unable to subregion %s ctl regs\n", 928 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 929 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 930 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 931 return 0; 932 } 933 return (1); 934 } 935 936 void 937 pciide_mapreg_dma(sc, pa) 938 struct pciide_softc *sc; 939 struct pci_attach_args *pa; 940 { 941 pcireg_t maptype; 942 bus_addr_t addr; 943 944 /* 945 * Map DMA registers 946 * 947 * Note that sc_dma_ok is the right variable to test to see if 948 * DMA can be done. If the interface doesn't support DMA, 949 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 950 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 951 * non-zero if the interface supports DMA and the registers 952 * could be mapped. 953 * 954 * XXX Note that despite the fact that the Bus Master IDE specs 955 * XXX say that "The bus master IDE function uses 16 bytes of IO 956 * XXX space," some controllers (at least the United 957 * XXX Microelectronics UM8886BF) place it in memory space. 958 */ 959 960 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 961 PCIIDE_REG_BUS_MASTER_DMA); 962 963 switch (maptype) { 964 case PCI_MAPREG_TYPE_IO: 965 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 966 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 967 &addr, NULL, NULL) == 0); 968 if (sc->sc_dma_ok == 0) { 969 printf(", unused (couldn't query registers)"); 970 break; 971 } 972 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 973 && addr >= 0x10000) { 974 sc->sc_dma_ok = 0; 975 printf(", unused (registers at unsafe address %#lx)", addr); 976 break; 977 } 978 /* FALLTHROUGH */ 979 980 case PCI_MAPREG_MEM_TYPE_32BIT: 981 sc->sc_dma_ok = (pci_mapreg_map(pa, 982 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 983 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, NULL, 0) == 0); 984 sc->sc_dmat = pa->pa_dmat; 985 if (sc->sc_dma_ok == 0) { 986 printf(", unused (couldn't map registers)"); 987 } else { 988 sc->sc_wdcdev.dma_arg = sc; 989 sc->sc_wdcdev.dma_init = pciide_dma_init; 990 sc->sc_wdcdev.dma_start = pciide_dma_start; 991 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 992 } 993 break; 994 995 default: 996 sc->sc_dma_ok = 0; 997 printf(", (unsupported maptype 0x%x)", maptype); 998 break; 999 } 1000 } 1001 1002 int 1003 pciide_intr_flag(struct pciide_channel *cp) 1004 { 1005 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1006 1007 if (cp->dma_in_progress) { 1008 int retry = 10; 1009 int status; 1010 1011 /* Check the status register */ 1012 for (retry = 10; retry > 0; retry--) { 1013 status = bus_space_read_1(sc->sc_dma_iot, 1014 sc->sc_dma_ioh, 1015 IDEDMA_CTL(cp->wdc_channel.channel)); 1016 if (status & IDEDMA_CTL_INTR) { 1017 break; 1018 } 1019 DELAY(5); 1020 } 1021 1022 /* Not for us. */ 1023 if (retry == 0) 1024 return (0); 1025 1026 return (1); 1027 } 1028 1029 return (-1); 1030 } 1031 1032 int 1033 pciide_compat_intr(arg) 1034 void *arg; 1035 { 1036 struct pciide_channel *cp = arg; 1037 1038 if (pciide_intr_flag(cp) == 0) 1039 return 0; 1040 1041 #ifdef DIAGNOSTIC 1042 /* should only be called for a compat channel */ 1043 if (cp->compat == 0) 1044 panic("pciide compat intr called for non-compat chan %p", cp); 1045 #endif 1046 return (wdcintr(&cp->wdc_channel)); 1047 } 1048 1049 int 1050 pciide_pci_intr(arg) 1051 void *arg; 1052 { 1053 struct pciide_softc *sc = arg; 1054 struct pciide_channel *cp; 1055 struct channel_softc *wdc_cp; 1056 int i, rv, crv; 1057 1058 rv = 0; 1059 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1060 cp = &sc->pciide_channels[i]; 1061 wdc_cp = &cp->wdc_channel; 1062 1063 /* If a compat channel skip. */ 1064 if (cp->compat) 1065 continue; 1066 /* if this channel not waiting for intr, skip */ 1067 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) 1068 continue; 1069 1070 if (pciide_intr_flag(cp) == 0) 1071 continue; 1072 1073 crv = wdcintr(wdc_cp); 1074 if (crv == 0) 1075 ; /* leave rv alone */ 1076 else if (crv == 1) 1077 rv = 1; /* claim the intr */ 1078 else if (rv == 0) /* crv should be -1 in this case */ 1079 rv = crv; /* if we've done no better, take it */ 1080 } 1081 return (rv); 1082 } 1083 1084 void 1085 pciide_channel_dma_setup(cp) 1086 struct pciide_channel *cp; 1087 { 1088 int drive; 1089 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1090 struct ata_drive_datas *drvp; 1091 1092 for (drive = 0; drive < 2; drive++) { 1093 drvp = &cp->wdc_channel.ch_drive[drive]; 1094 /* If no drive, skip */ 1095 if ((drvp->drive_flags & DRIVE) == 0) 1096 continue; 1097 /* setup DMA if needed */ 1098 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1099 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1100 sc->sc_dma_ok == 0) { 1101 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1102 continue; 1103 } 1104 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1105 != 0) { 1106 /* Abort DMA setup */ 1107 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1108 continue; 1109 } 1110 } 1111 } 1112 1113 int 1114 pciide_dma_table_setup(sc, channel, drive) 1115 struct pciide_softc *sc; 1116 int channel, drive; 1117 { 1118 bus_dma_segment_t seg; 1119 int error, rseg; 1120 const bus_size_t dma_table_size = 1121 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1122 struct pciide_dma_maps *dma_maps = 1123 &sc->pciide_channels[channel].dma_maps[drive]; 1124 1125 /* If table was already allocated, just return */ 1126 if (dma_maps->dma_table) 1127 return 0; 1128 1129 /* Allocate memory for the DMA tables and map it */ 1130 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1131 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1132 BUS_DMA_NOWAIT)) != 0) { 1133 printf("%s:%d: unable to allocate table DMA for " 1134 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1135 channel, drive, error); 1136 return error; 1137 } 1138 1139 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1140 dma_table_size, 1141 (caddr_t *)&dma_maps->dma_table, 1142 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1143 printf("%s:%d: unable to map table DMA for" 1144 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1145 channel, drive, error); 1146 return error; 1147 } 1148 1149 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1150 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1151 seg.ds_addr), DEBUG_PROBE); 1152 1153 /* Create and load table DMA map for this disk */ 1154 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1155 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1156 &dma_maps->dmamap_table)) != 0) { 1157 printf("%s:%d: unable to create table DMA map for " 1158 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1159 channel, drive, error); 1160 return error; 1161 } 1162 if ((error = bus_dmamap_load(sc->sc_dmat, 1163 dma_maps->dmamap_table, 1164 dma_maps->dma_table, 1165 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1166 printf("%s:%d: unable to load table DMA map for " 1167 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1168 channel, drive, error); 1169 return error; 1170 } 1171 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1172 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1173 /* Create a xfer DMA map for this drive */ 1174 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1175 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1176 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1177 &dma_maps->dmamap_xfer)) != 0) { 1178 printf("%s:%d: unable to create xfer DMA map for " 1179 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1180 channel, drive, error); 1181 return error; 1182 } 1183 return 0; 1184 } 1185 1186 int 1187 pciide_dma_init(v, channel, drive, databuf, datalen, flags) 1188 void *v; 1189 int channel, drive; 1190 void *databuf; 1191 size_t datalen; 1192 int flags; 1193 { 1194 struct pciide_softc *sc = v; 1195 int error, seg; 1196 struct pciide_dma_maps *dma_maps = 1197 &sc->pciide_channels[channel].dma_maps[drive]; 1198 #ifndef BUS_DMA_RAW 1199 #define BUS_DMA_RAW 0 1200 #endif 1201 1202 error = bus_dmamap_load(sc->sc_dmat, 1203 dma_maps->dmamap_xfer, 1204 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 1205 if (error) { 1206 printf("%s:%d: unable to load xfer DMA map for" 1207 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1208 channel, drive, error); 1209 return error; 1210 } 1211 1212 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1213 dma_maps->dmamap_xfer->dm_mapsize, 1214 (flags & WDC_DMA_READ) ? 1215 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 1216 1217 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 1218 #ifdef DIAGNOSTIC 1219 /* A segment must not cross a 64k boundary */ 1220 { 1221 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 1222 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 1223 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 1224 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 1225 printf("pciide_dma: segment %d physical addr 0x%lx" 1226 " len 0x%lx not properly aligned\n", 1227 seg, phys, len); 1228 panic("pciide_dma: buf align"); 1229 } 1230 } 1231 #endif 1232 dma_maps->dma_table[seg].base_addr = 1233 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 1234 dma_maps->dma_table[seg].byte_count = 1235 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 1236 IDEDMA_BYTE_COUNT_MASK); 1237 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 1238 seg, letoh32(dma_maps->dma_table[seg].byte_count), 1239 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 1240 1241 } 1242 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 1243 htole32(IDEDMA_BYTE_COUNT_EOT); 1244 1245 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 1246 dma_maps->dmamap_table->dm_mapsize, 1247 BUS_DMASYNC_PREWRITE); 1248 1249 /* Maps are ready. Start DMA function */ 1250 #ifdef DIAGNOSTIC 1251 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 1252 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 1253 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1254 panic("pciide_dma_init: table align"); 1255 } 1256 #endif 1257 1258 /* Clear status bits */ 1259 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1260 IDEDMA_CTL(channel), 1261 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1262 IDEDMA_CTL(channel))); 1263 /* Write table addr */ 1264 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1265 IDEDMA_TBL(channel), 1266 dma_maps->dmamap_table->dm_segs[0].ds_addr); 1267 /* set read/write */ 1268 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1269 IDEDMA_CMD(channel), 1270 (flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE: 0); 1271 /* remember flags */ 1272 dma_maps->dma_flags = flags; 1273 return 0; 1274 } 1275 1276 void 1277 pciide_dma_start(v, channel, drive) 1278 void *v; 1279 int channel, drive; 1280 { 1281 struct pciide_softc *sc = v; 1282 1283 WDCDEBUG_PRINT(("pciide_dma_start\n"),DEBUG_XFERS); 1284 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1285 IDEDMA_CMD(channel), 1286 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1287 IDEDMA_CMD(channel)) | IDEDMA_CMD_START); 1288 1289 sc->pciide_channels[channel].dma_in_progress = 1; 1290 } 1291 1292 int 1293 pciide_dma_finish(v, channel, drive) 1294 void *v; 1295 int channel, drive; 1296 { 1297 struct pciide_softc *sc = v; 1298 u_int8_t status; 1299 int error = 0; 1300 struct pciide_dma_maps *dma_maps = 1301 &sc->pciide_channels[channel].dma_maps[drive]; 1302 1303 sc->pciide_channels[channel].dma_in_progress = 0; 1304 1305 status = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1306 IDEDMA_CTL(channel)); 1307 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 1308 DEBUG_XFERS); 1309 1310 /* stop DMA channel */ 1311 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1312 IDEDMA_CMD(channel), 1313 (dma_maps->dma_flags & WDC_DMA_READ) ? 1314 0x00 : IDEDMA_CMD_WRITE); 1315 1316 /* Unload the map of the data buffer */ 1317 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 1318 dma_maps->dmamap_xfer->dm_mapsize, 1319 (dma_maps->dma_flags & WDC_DMA_READ) ? 1320 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1321 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 1322 1323 /* Clear status bits */ 1324 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1325 IDEDMA_CTL(channel), 1326 status); 1327 1328 if ((status & IDEDMA_CTL_ERR) != 0) { 1329 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 1330 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 1331 error |= WDC_DMAST_ERR; 1332 } 1333 1334 if ((status & IDEDMA_CTL_INTR) == 0) { 1335 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 1336 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 1337 drive, status); 1338 error |= WDC_DMAST_NOIRQ; 1339 } 1340 1341 if ((status & IDEDMA_CTL_ACT) != 0) { 1342 /* data underrun, may be a valid condition for ATAPI */ 1343 error |= WDC_DMAST_UNDER; 1344 } 1345 return error; 1346 } 1347 1348 void 1349 pciide_irqack(chp) 1350 struct channel_softc *chp; 1351 { 1352 struct pciide_channel *cp = (struct pciide_channel*)chp; 1353 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1354 1355 /* clear status bits in IDE DMA registers */ 1356 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1357 IDEDMA_CTL(chp->channel), 1358 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1359 IDEDMA_CTL(chp->channel))); 1360 } 1361 1362 /* some common code used by several chip_map */ 1363 int 1364 pciide_chansetup(sc, channel, interface) 1365 struct pciide_softc *sc; 1366 int channel; 1367 pcireg_t interface; 1368 { 1369 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1370 sc->wdc_chanarray[channel] = &cp->wdc_channel; 1371 cp->name = PCIIDE_CHANNEL_NAME(channel); 1372 cp->wdc_channel.channel = channel; 1373 cp->wdc_channel.wdc = &sc->sc_wdcdev; 1374 cp->wdc_channel.ch_queue = 1375 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 1376 if (cp->wdc_channel.ch_queue == NULL) { 1377 printf("%s: %s " 1378 "cannot allocate memory for command queue", 1379 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1380 return 0; 1381 } 1382 cp->hw_ok = 1; 1383 1384 return 1; 1385 } 1386 1387 /* some common code used by several chip channel_map */ 1388 void 1389 pciide_mapchan(pa, cp, interface, cmdsizep, ctlsizep, pci_intr) 1390 struct pci_attach_args *pa; 1391 struct pciide_channel *cp; 1392 pcireg_t interface; 1393 bus_size_t *cmdsizep, *ctlsizep; 1394 int (*pci_intr)(void *); 1395 { 1396 struct channel_softc *wdc_cp = &cp->wdc_channel; 1397 1398 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 1399 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 1400 pci_intr); 1401 else 1402 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1403 wdc_cp->channel, cmdsizep, ctlsizep); 1404 if (cp->hw_ok == 0) 1405 return; 1406 wdc_cp->data32iot = wdc_cp->cmd_iot; 1407 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 1408 wdcattach(wdc_cp); 1409 } 1410 1411 /* 1412 * Generic code to call to know if a channel can be disabled. Return 1 1413 * if channel can be disabled, 0 if not 1414 */ 1415 int 1416 pciide_chan_candisable(cp) 1417 struct pciide_channel *cp; 1418 { 1419 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1420 struct channel_softc *wdc_cp = &cp->wdc_channel; 1421 1422 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 1423 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 1424 printf("%s: %s disabled (no drives)\n", 1425 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1426 cp->hw_ok = 0; 1427 return 1; 1428 } 1429 return 0; 1430 } 1431 1432 /* 1433 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 1434 * Set hw_ok=0 on failure 1435 */ 1436 void 1437 pciide_map_compat_intr(pa, cp, compatchan, interface) 1438 struct pci_attach_args *pa; 1439 struct pciide_channel *cp; 1440 int compatchan, interface; 1441 { 1442 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1443 struct channel_softc *wdc_cp = &cp->wdc_channel; 1444 1445 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1446 return; 1447 1448 cp->compat = 1; 1449 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 1450 pa, compatchan, pciide_compat_intr, cp); 1451 if (cp->ih == NULL) { 1452 printf("%s: no compatibility interrupt for use by %s\n", 1453 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1454 cp->hw_ok = 0; 1455 } 1456 } 1457 1458 /* 1459 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 1460 * Set hw_ok=0 on failure 1461 */ 1462 void 1463 pciide_unmap_compat_intr(pa, cp, compatchan, interface) 1464 struct pci_attach_args *pa; 1465 struct pciide_channel *cp; 1466 int compatchan, interface; 1467 { 1468 struct channel_softc *wdc_cp = &cp->wdc_channel; 1469 1470 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 1471 return; 1472 1473 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 1474 } 1475 1476 void 1477 pciide_print_channels(nchannels, interface) 1478 int nchannels; 1479 pcireg_t interface; 1480 { 1481 int i; 1482 1483 for (i = 0; i < nchannels; i++) { 1484 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 1485 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 1486 "configured" : "wired", 1487 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 1488 "compatibility"); 1489 } 1490 1491 printf("\n"); 1492 } 1493 1494 void 1495 pciide_print_modes(cp) 1496 struct pciide_channel *cp; 1497 { 1498 wdc_print_current_modes(&cp->wdc_channel); 1499 } 1500 1501 void 1502 default_chip_map(sc, pa) 1503 struct pciide_softc *sc; 1504 struct pci_attach_args *pa; 1505 { 1506 struct pciide_channel *cp; 1507 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1508 pcireg_t csr; 1509 int channel, drive; 1510 struct ata_drive_datas *drvp; 1511 u_int8_t idedma_ctl; 1512 bus_size_t cmdsize, ctlsize; 1513 char *failreason; 1514 1515 if (pciide_chipen(sc, pa) == 0) 1516 return; 1517 1518 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 1519 printf(": DMA"); 1520 if (sc->sc_pp == &default_product_desc && 1521 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 1522 PCIIDE_OPTIONS_DMA) == 0) { 1523 printf(" (unsupported)"); 1524 sc->sc_dma_ok = 0; 1525 } else { 1526 pciide_mapreg_dma(sc, pa); 1527 if (sc->sc_dma_ok != 0) 1528 printf(", (partial support)"); 1529 } 1530 } else { 1531 printf(": no DMA"); 1532 sc->sc_dma_ok = 0; 1533 } 1534 if (sc->sc_dma_ok) { 1535 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1536 sc->sc_wdcdev.irqack = pciide_irqack; 1537 } 1538 sc->sc_wdcdev.PIO_cap = 0; 1539 sc->sc_wdcdev.DMA_cap = 0; 1540 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1541 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1542 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 1543 1544 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 1545 1546 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1547 cp = &sc->pciide_channels[channel]; 1548 if (pciide_chansetup(sc, channel, interface) == 0) 1549 continue; 1550 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 1551 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 1552 &ctlsize, pciide_pci_intr); 1553 } else { 1554 cp->hw_ok = pciide_mapregs_compat(pa, cp, 1555 channel, &cmdsize, &ctlsize); 1556 } 1557 if (cp->hw_ok == 0) 1558 continue; 1559 /* 1560 * Check to see if something appears to be there. 1561 */ 1562 failreason = NULL; 1563 pciide_map_compat_intr(pa, cp, channel, interface); 1564 if (cp->hw_ok == 0) 1565 continue; 1566 if (!wdcprobe(&cp->wdc_channel)) { 1567 failreason = "not responding; disabled or no drives?"; 1568 goto next; 1569 } 1570 /* 1571 * Now, make sure it's actually attributable to this PCI IDE 1572 * channel by trying to access the channel again while the 1573 * PCI IDE controller's I/O space is disabled. (If the 1574 * channel no longer appears to be there, it belongs to 1575 * this controller.) YUCK! 1576 */ 1577 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 1578 PCI_COMMAND_STATUS_REG); 1579 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1580 csr & ~PCI_COMMAND_IO_ENABLE); 1581 if (wdcprobe(&cp->wdc_channel)) 1582 failreason = "other hardware responding at addresses"; 1583 pci_conf_write(sc->sc_pc, sc->sc_tag, 1584 PCI_COMMAND_STATUS_REG, csr); 1585 next: 1586 if (failreason) { 1587 printf("%s: %s ignored (%s)\n", 1588 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1589 failreason); 1590 cp->hw_ok = 0; 1591 pciide_unmap_compat_intr(pa, cp, channel, interface); 1592 bus_space_unmap(cp->wdc_channel.cmd_iot, 1593 cp->wdc_channel.cmd_ioh, cmdsize); 1594 bus_space_unmap(cp->wdc_channel.ctl_iot, 1595 cp->wdc_channel.ctl_ioh, ctlsize); 1596 } 1597 if (cp->hw_ok) { 1598 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 1599 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 1600 wdcattach(&cp->wdc_channel); 1601 } 1602 } 1603 1604 if (sc->sc_dma_ok == 0) 1605 return; 1606 1607 /* Allocate DMA maps */ 1608 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1609 idedma_ctl = 0; 1610 cp = &sc->pciide_channels[channel]; 1611 for (drive = 0; drive < 2; drive++) { 1612 drvp = &cp->wdc_channel.ch_drive[drive]; 1613 /* If no drive, skip */ 1614 if ((drvp->drive_flags & DRIVE) == 0) 1615 continue; 1616 if ((drvp->drive_flags & DRIVE_DMA) == 0) 1617 continue; 1618 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 1619 /* Abort DMA setup */ 1620 printf("%s:%d:%d: cannot allocate DMA maps, " 1621 "using PIO transfers\n", 1622 sc->sc_wdcdev.sc_dev.dv_xname, 1623 channel, drive); 1624 drvp->drive_flags &= ~DRIVE_DMA; 1625 } 1626 printf("%s:%d:%d: using DMA data transfers\n", 1627 sc->sc_wdcdev.sc_dev.dv_xname, 1628 channel, drive); 1629 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1630 } 1631 if (idedma_ctl != 0) { 1632 /* Add software bits in status register */ 1633 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1634 IDEDMA_CTL(channel), 1635 idedma_ctl); 1636 } 1637 } 1638 } 1639 1640 void 1641 sata_setup_channel(struct channel_softc *chp) 1642 { 1643 struct ata_drive_datas *drvp; 1644 int drive; 1645 u_int32_t idedma_ctl; 1646 struct pciide_channel *cp = (struct pciide_channel*)chp; 1647 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc; 1648 1649 /* setup DMA if needed */ 1650 pciide_channel_dma_setup(cp); 1651 1652 idedma_ctl = 0; 1653 1654 for (drive = 0; drive < 2; drive++) { 1655 drvp = &chp->ch_drive[drive]; 1656 /* If no drive, skip */ 1657 if ((drvp->drive_flags & DRIVE) == 0) 1658 continue; 1659 if (drvp->drive_flags & DRIVE_UDMA) { 1660 /* use Ultra/DMA */ 1661 drvp->drive_flags &= ~DRIVE_DMA; 1662 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1663 } else if (drvp->drive_flags & DRIVE_DMA) { 1664 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1665 } 1666 } 1667 1668 /* 1669 * Nothing to do to setup modes; it is meaningless in S-ATA 1670 * (but many S-ATA drives still want to get the SET_FEATURE 1671 * command). 1672 */ 1673 if (idedma_ctl != 0) { 1674 /* Add software bits in status register */ 1675 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1676 IDEDMA_CTL(chp->channel), idedma_ctl); 1677 } 1678 pciide_print_modes(cp); 1679 } 1680 1681 void 1682 piix_chip_map(sc, pa) 1683 struct pciide_softc *sc; 1684 struct pci_attach_args *pa; 1685 { 1686 struct pciide_channel *cp; 1687 int channel; 1688 u_int32_t idetim; 1689 bus_size_t cmdsize, ctlsize; 1690 1691 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 1692 1693 if (pciide_chipen(sc, pa) == 0) 1694 return; 1695 1696 printf(": DMA"); 1697 pciide_mapreg_dma(sc, pa); 1698 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 1699 WDC_CAPABILITY_MODE; 1700 if (sc->sc_dma_ok) { 1701 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 1702 sc->sc_wdcdev.irqack = pciide_irqack; 1703 switch (sc->sc_pp->ide_product) { 1704 case PCI_PRODUCT_INTEL_82371AB_IDE: 1705 case PCI_PRODUCT_INTEL_82440MX_IDE: 1706 case PCI_PRODUCT_INTEL_82801AA_IDE: 1707 case PCI_PRODUCT_INTEL_82801AB_IDE: 1708 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1709 case PCI_PRODUCT_INTEL_82801BA_IDE: 1710 case PCI_PRODUCT_INTEL_82801CAM_IDE: 1711 case PCI_PRODUCT_INTEL_82801CA_IDE: 1712 case PCI_PRODUCT_INTEL_82801DB_IDE: 1713 case PCI_PRODUCT_INTEL_82801DBM_IDE: 1714 case PCI_PRODUCT_INTEL_82801EB_IDE: 1715 case PCI_PRODUCT_INTEL_82801EB_SATA: 1716 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 1717 break; 1718 } 1719 } 1720 sc->sc_wdcdev.PIO_cap = 4; 1721 sc->sc_wdcdev.DMA_cap = 2; 1722 switch (sc->sc_pp->ide_product) { 1723 case PCI_PRODUCT_INTEL_82801AA_IDE: 1724 sc->sc_wdcdev.UDMA_cap = 4; 1725 break; 1726 case PCI_PRODUCT_INTEL_82801BAM_IDE: 1727 case PCI_PRODUCT_INTEL_82801BA_IDE: 1728 case PCI_PRODUCT_INTEL_82801CAM_IDE: 1729 case PCI_PRODUCT_INTEL_82801CA_IDE: 1730 case PCI_PRODUCT_INTEL_82801DB_IDE: 1731 case PCI_PRODUCT_INTEL_82801DBM_IDE: 1732 case PCI_PRODUCT_INTEL_82801EB_IDE: 1733 case PCI_PRODUCT_INTEL_82801EB_SATA: 1734 sc->sc_wdcdev.UDMA_cap = 5; 1735 break; 1736 default: 1737 sc->sc_wdcdev.UDMA_cap = 2; 1738 break; 1739 } 1740 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA) 1741 sc->sc_wdcdev.set_modes = sata_setup_channel; 1742 else if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE) 1743 sc->sc_wdcdev.set_modes = piix_setup_channel; 1744 else 1745 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 1746 sc->sc_wdcdev.channels = sc->wdc_chanarray; 1747 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 1748 1749 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 1750 1751 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA) 1752 goto chansetup; 1753 1754 WDCDEBUG_PRINT(("piix_setup_chip: old idetim=0x%x", 1755 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1756 DEBUG_PROBE); 1757 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1758 WDCDEBUG_PRINT((", sidetim=0x%x", 1759 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1760 DEBUG_PROBE); 1761 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1762 WDCDEBUG_PRINT((", udamreg 0x%x", 1763 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1764 DEBUG_PROBE); 1765 } 1766 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1767 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1768 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1769 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1770 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 1771 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 1772 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 1773 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 1774 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 1775 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1776 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1777 DEBUG_PROBE); 1778 } 1779 1780 } 1781 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1782 1783 chansetup: 1784 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 1785 cp = &sc->pciide_channels[channel]; 1786 1787 /* SATA setup */ 1788 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA) { 1789 if (pciide_chansetup(sc, channel, interface) == 0) 1790 continue; 1791 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 1792 pciide_pci_intr); 1793 if (cp->hw_ok == 0) 1794 continue; 1795 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1796 continue; 1797 } 1798 1799 /* PIIX is compat-only */ 1800 if (pciide_chansetup(sc, channel, 0) == 0) 1801 continue; 1802 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1803 if ((PIIX_IDETIM_READ(idetim, channel) & 1804 PIIX_IDETIM_IDE) == 0) { 1805 printf("%s: %s ignored (disabled)\n", 1806 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1807 continue; 1808 } 1809 /* PIIX are compat-only pciide devices */ 1810 pciide_map_compat_intr(pa, cp, channel, 0); 1811 if (cp->hw_ok == 0) 1812 continue; 1813 pciide_mapchan(pa, cp, 0, &cmdsize, &ctlsize, pciide_pci_intr); 1814 if (cp->hw_ok == 0) 1815 goto next; 1816 if (pciide_chan_candisable(cp)) { 1817 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 1818 channel); 1819 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 1820 idetim); 1821 } 1822 if (cp->hw_ok == 0) 1823 goto next; 1824 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 1825 next: 1826 if (cp->hw_ok == 0) 1827 pciide_unmap_compat_intr(pa, cp, channel, 0); 1828 } 1829 1830 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_SATA) 1831 return; 1832 1833 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 1834 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 1835 DEBUG_PROBE); 1836 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE) { 1837 WDCDEBUG_PRINT((", sidetim=0x%x", 1838 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 1839 DEBUG_PROBE); 1840 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 1841 WDCDEBUG_PRINT((", udamreg 0x%x", 1842 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 1843 DEBUG_PROBE); 1844 } 1845 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 1846 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 1847 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 1848 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 1849 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 1850 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 1851 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 1852 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 1853 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 1854 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 1855 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 1856 DEBUG_PROBE); 1857 } 1858 } 1859 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 1860 } 1861 1862 void 1863 piix_setup_channel(chp) 1864 struct channel_softc *chp; 1865 { 1866 u_int8_t mode[2], drive; 1867 u_int32_t oidetim, idetim, idedma_ctl; 1868 struct pciide_channel *cp = (struct pciide_channel*)chp; 1869 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1870 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 1871 1872 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1873 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 1874 idedma_ctl = 0; 1875 1876 /* set up new idetim: Enable IDE registers decode */ 1877 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 1878 chp->channel); 1879 1880 /* setup DMA */ 1881 pciide_channel_dma_setup(cp); 1882 1883 /* 1884 * Here we have to mess up with drives mode: PIIX can't have 1885 * different timings for master and slave drives. 1886 * We need to find the best combination. 1887 */ 1888 1889 /* If both drives supports DMA, take the lower mode */ 1890 if ((drvp[0].drive_flags & DRIVE_DMA) && 1891 (drvp[1].drive_flags & DRIVE_DMA)) { 1892 mode[0] = mode[1] = 1893 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 1894 drvp[0].DMA_mode = mode[0]; 1895 drvp[1].DMA_mode = mode[1]; 1896 goto ok; 1897 } 1898 /* 1899 * If only one drive supports DMA, use its mode, and 1900 * put the other one in PIO mode 0 if mode not compatible 1901 */ 1902 if (drvp[0].drive_flags & DRIVE_DMA) { 1903 mode[0] = drvp[0].DMA_mode; 1904 mode[1] = drvp[1].PIO_mode; 1905 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 1906 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 1907 mode[1] = drvp[1].PIO_mode = 0; 1908 goto ok; 1909 } 1910 if (drvp[1].drive_flags & DRIVE_DMA) { 1911 mode[1] = drvp[1].DMA_mode; 1912 mode[0] = drvp[0].PIO_mode; 1913 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 1914 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 1915 mode[0] = drvp[0].PIO_mode = 0; 1916 goto ok; 1917 } 1918 /* 1919 * If both drives are not DMA, takes the lower mode, unless 1920 * one of them is PIO mode < 2 1921 */ 1922 if (drvp[0].PIO_mode < 2) { 1923 mode[0] = drvp[0].PIO_mode = 0; 1924 mode[1] = drvp[1].PIO_mode; 1925 } else if (drvp[1].PIO_mode < 2) { 1926 mode[1] = drvp[1].PIO_mode = 0; 1927 mode[0] = drvp[0].PIO_mode; 1928 } else { 1929 mode[0] = mode[1] = 1930 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 1931 drvp[0].PIO_mode = mode[0]; 1932 drvp[1].PIO_mode = mode[1]; 1933 } 1934 ok: /* The modes are setup */ 1935 for (drive = 0; drive < 2; drive++) { 1936 if (drvp[drive].drive_flags & DRIVE_DMA) { 1937 idetim |= piix_setup_idetim_timings( 1938 mode[drive], 1, chp->channel); 1939 goto end; 1940 } 1941 } 1942 /* If we are there, none of the drives are DMA */ 1943 if (mode[0] >= 2) 1944 idetim |= piix_setup_idetim_timings( 1945 mode[0], 0, chp->channel); 1946 else 1947 idetim |= piix_setup_idetim_timings( 1948 mode[1], 0, chp->channel); 1949 end: /* 1950 * timing mode is now set up in the controller. Enable 1951 * it per-drive 1952 */ 1953 for (drive = 0; drive < 2; drive++) { 1954 /* If no drive, skip */ 1955 if ((drvp[drive].drive_flags & DRIVE) == 0) 1956 continue; 1957 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 1958 if (drvp[drive].drive_flags & DRIVE_DMA) 1959 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 1960 } 1961 if (idedma_ctl != 0) { 1962 /* Add software bits in status register */ 1963 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1964 IDEDMA_CTL(chp->channel), 1965 idedma_ctl); 1966 } 1967 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 1968 pciide_print_modes(cp); 1969 } 1970 1971 void 1972 piix3_4_setup_channel(chp) 1973 struct channel_softc *chp; 1974 { 1975 struct ata_drive_datas *drvp; 1976 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 1977 struct pciide_channel *cp = (struct pciide_channel*)chp; 1978 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1979 int drive; 1980 int channel = chp->channel; 1981 1982 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 1983 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 1984 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 1985 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 1986 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 1987 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 1988 PIIX_SIDETIM_RTC_MASK(channel)); 1989 1990 idedma_ctl = 0; 1991 /* If channel disabled, no need to go further */ 1992 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 1993 return; 1994 /* set up new idetim: Enable IDE registers decode */ 1995 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 1996 1997 /* setup DMA if needed */ 1998 pciide_channel_dma_setup(cp); 1999 2000 for (drive = 0; drive < 2; drive++) { 2001 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2002 PIIX_UDMATIM_SET(0x3, channel, drive)); 2003 drvp = &chp->ch_drive[drive]; 2004 /* If no drive, skip */ 2005 if ((drvp->drive_flags & DRIVE) == 0) 2006 continue; 2007 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2008 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2009 goto pio; 2010 2011 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2012 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2013 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2014 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2015 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2016 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2017 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2018 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2019 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 2020 ideconf |= PIIX_CONFIG_PINGPONG; 2021 } 2022 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2023 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2024 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2025 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2026 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2027 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2028 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE) { 2029 /* setup Ultra/100 */ 2030 if (drvp->UDMA_mode > 2 && 2031 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2032 drvp->UDMA_mode = 2; 2033 if (drvp->UDMA_mode > 4) { 2034 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2035 } else { 2036 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2037 if (drvp->UDMA_mode > 2) { 2038 ideconf |= PIIX_CONFIG_UDMA66(channel, 2039 drive); 2040 } else { 2041 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2042 drive); 2043 } 2044 } 2045 } 2046 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE) { 2047 /* setup Ultra/66 */ 2048 if (drvp->UDMA_mode > 2 && 2049 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2050 drvp->UDMA_mode = 2; 2051 if (drvp->UDMA_mode > 2) 2052 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 2053 else 2054 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 2055 } 2056 2057 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2058 (drvp->drive_flags & DRIVE_UDMA)) { 2059 /* use Ultra/DMA */ 2060 drvp->drive_flags &= ~DRIVE_DMA; 2061 udmareg |= PIIX_UDMACTL_DRV_EN( channel,drive); 2062 udmareg |= PIIX_UDMATIM_SET( 2063 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 2064 } else { 2065 /* use Multiword DMA */ 2066 drvp->drive_flags &= ~DRIVE_UDMA; 2067 if (drive == 0) { 2068 idetim |= piix_setup_idetim_timings( 2069 drvp->DMA_mode, 1, channel); 2070 } else { 2071 sidetim |= piix_setup_sidetim_timings( 2072 drvp->DMA_mode, 1, channel); 2073 idetim =PIIX_IDETIM_SET(idetim, 2074 PIIX_IDETIM_SITRE, channel); 2075 } 2076 } 2077 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2078 2079 pio: /* use PIO mode */ 2080 idetim |= piix_setup_idetim_drvs(drvp); 2081 if (drive == 0) { 2082 idetim |= piix_setup_idetim_timings( 2083 drvp->PIO_mode, 0, channel); 2084 } else { 2085 sidetim |= piix_setup_sidetim_timings( 2086 drvp->PIO_mode, 0, channel); 2087 idetim =PIIX_IDETIM_SET(idetim, 2088 PIIX_IDETIM_SITRE, channel); 2089 } 2090 } 2091 if (idedma_ctl != 0) { 2092 /* Add software bits in status register */ 2093 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2094 IDEDMA_CTL(channel), 2095 idedma_ctl); 2096 } 2097 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2098 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 2099 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 2100 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 2101 pciide_print_modes(cp); 2102 } 2103 2104 2105 /* setup ISP and RTC fields, based on mode */ 2106 static u_int32_t 2107 piix_setup_idetim_timings(mode, dma, channel) 2108 u_int8_t mode; 2109 u_int8_t dma; 2110 u_int8_t channel; 2111 { 2112 2113 if (dma) 2114 return PIIX_IDETIM_SET(0, 2115 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 2116 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 2117 channel); 2118 else 2119 return PIIX_IDETIM_SET(0, 2120 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 2121 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 2122 channel); 2123 } 2124 2125 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 2126 static u_int32_t 2127 piix_setup_idetim_drvs(drvp) 2128 struct ata_drive_datas *drvp; 2129 { 2130 u_int32_t ret = 0; 2131 struct channel_softc *chp = drvp->chnl_softc; 2132 u_int8_t channel = chp->channel; 2133 u_int8_t drive = drvp->drive; 2134 2135 /* 2136 * If drive is using UDMA, timings setups are independant 2137 * So just check DMA and PIO here. 2138 */ 2139 if (drvp->drive_flags & DRIVE_DMA) { 2140 /* if mode = DMA mode 0, use compatible timings */ 2141 if ((drvp->drive_flags & DRIVE_DMA) && 2142 drvp->DMA_mode == 0) { 2143 drvp->PIO_mode = 0; 2144 return ret; 2145 } 2146 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2147 /* 2148 * PIO and DMA timings are the same, use fast timings for PIO 2149 * too, else use compat timings. 2150 */ 2151 if ((piix_isp_pio[drvp->PIO_mode] != 2152 piix_isp_dma[drvp->DMA_mode]) || 2153 (piix_rtc_pio[drvp->PIO_mode] != 2154 piix_rtc_dma[drvp->DMA_mode])) 2155 drvp->PIO_mode = 0; 2156 /* if PIO mode <= 2, use compat timings for PIO */ 2157 if (drvp->PIO_mode <= 2) { 2158 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 2159 channel); 2160 return ret; 2161 } 2162 } 2163 2164 /* 2165 * Now setup PIO modes. If mode < 2, use compat timings. 2166 * Else enable fast timings. Enable IORDY and prefetch/post 2167 * if PIO mode >= 3. 2168 */ 2169 2170 if (drvp->PIO_mode < 2) 2171 return ret; 2172 2173 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 2174 if (drvp->PIO_mode >= 3) { 2175 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 2176 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 2177 } 2178 return ret; 2179 } 2180 2181 /* setup values in SIDETIM registers, based on mode */ 2182 static u_int32_t 2183 piix_setup_sidetim_timings(mode, dma, channel) 2184 u_int8_t mode; 2185 u_int8_t dma; 2186 u_int8_t channel; 2187 { 2188 if (dma) 2189 return PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 2190 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel); 2191 else 2192 return PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 2193 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel); 2194 } 2195 2196 void 2197 amd756_chip_map(sc, pa) 2198 struct pciide_softc *sc; 2199 struct pci_attach_args *pa; 2200 { 2201 struct pciide_channel *cp; 2202 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2203 int channel; 2204 pcireg_t chanenable; 2205 bus_size_t cmdsize, ctlsize; 2206 2207 if (pciide_chipen(sc, pa) == 0) 2208 return; 2209 2210 printf(": DMA"); 2211 pciide_mapreg_dma(sc, pa); 2212 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2213 WDC_CAPABILITY_MODE; 2214 if (sc->sc_dma_ok) { 2215 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 2216 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 2217 sc->sc_wdcdev.irqack = pciide_irqack; 2218 } 2219 sc->sc_wdcdev.PIO_cap = 4; 2220 sc->sc_wdcdev.DMA_cap = 2; 2221 switch (sc->sc_pp->ide_product) { 2222 case PCI_PRODUCT_AMD_766_IDE: 2223 case PCI_PRODUCT_AMD_PBC768_IDE: 2224 case PCI_PRODUCT_AMD_8111_IDE: 2225 sc->sc_wdcdev.UDMA_cap = 5; 2226 break; 2227 default: 2228 sc->sc_wdcdev.UDMA_cap = 4; 2229 break; 2230 } 2231 sc->sc_wdcdev.set_modes = amd756_setup_channel; 2232 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2233 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2234 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 2235 2236 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2237 2238 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2239 cp = &sc->pciide_channels[channel]; 2240 if (pciide_chansetup(sc, channel, interface) == 0) 2241 continue; 2242 2243 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 2244 printf("%s: %s ignored (disabled)\n", 2245 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2246 continue; 2247 } 2248 pciide_map_compat_intr(pa, cp, channel, interface); 2249 if (cp->hw_ok == 0) 2250 continue; 2251 2252 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2253 pciide_pci_intr); 2254 2255 if (pciide_chan_candisable(cp)) { 2256 chanenable &= ~AMD756_CHAN_EN(channel); 2257 } 2258 if (cp->hw_ok == 0) { 2259 pciide_unmap_compat_intr(pa, cp, channel, interface); 2260 continue; 2261 } 2262 2263 amd756_setup_channel(&cp->wdc_channel); 2264 } 2265 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 2266 chanenable); 2267 return; 2268 } 2269 2270 void 2271 amd756_setup_channel(chp) 2272 struct channel_softc *chp; 2273 { 2274 u_int32_t udmatim_reg, datatim_reg; 2275 u_int8_t idedma_ctl; 2276 int mode, drive; 2277 struct ata_drive_datas *drvp; 2278 struct pciide_channel *cp = (struct pciide_channel*)chp; 2279 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2280 pcireg_t chanenable; 2281 #ifndef PCIIDE_AMD756_ENABLEDMA 2282 int product = PCI_PRODUCT( 2283 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ID_REG)); 2284 int rev = PCI_REVISION( 2285 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2286 #endif 2287 2288 idedma_ctl = 0; 2289 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 2290 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 2291 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 2292 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 2293 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 2294 AMD756_CHANSTATUS_EN); 2295 2296 /* setup DMA if needed */ 2297 pciide_channel_dma_setup(cp); 2298 2299 for (drive = 0; drive < 2; drive++) { 2300 drvp = &chp->ch_drive[drive]; 2301 /* If no drive, skip */ 2302 if ((drvp->drive_flags & DRIVE) == 0) 2303 continue; 2304 /* add timing values, setup DMA if needed */ 2305 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2306 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2307 mode = drvp->PIO_mode; 2308 goto pio; 2309 } 2310 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2311 (drvp->drive_flags & DRIVE_UDMA)) { 2312 /* use Ultra/DMA */ 2313 drvp->drive_flags &= ~DRIVE_DMA; 2314 2315 /* Check cable */ 2316 if ((chanenable & AMD756_CABLE(chp->channel, 2317 drive)) == 0 && drvp->UDMA_mode > 2) { 2318 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 2319 "cable not detected\n", drvp->drive_name, 2320 sc->sc_wdcdev.sc_dev.dv_xname, 2321 chp->channel, drive), DEBUG_PROBE); 2322 drvp->UDMA_mode = 2; 2323 } 2324 2325 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 2326 AMD756_UDMA_EN_MTH(chp->channel, drive) | 2327 AMD756_UDMA_TIME(chp->channel, drive, 2328 amd756_udma_tim[drvp->UDMA_mode]); 2329 /* can use PIO timings, MW DMA unused */ 2330 mode = drvp->PIO_mode; 2331 } else { 2332 /* use Multiword DMA, but only if revision is OK */ 2333 drvp->drive_flags &= ~DRIVE_UDMA; 2334 #ifndef PCIIDE_AMD756_ENABLEDMA 2335 /* 2336 * The workaround doesn't seem to be necessary 2337 * with all drives, so it can be disabled by 2338 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 2339 * triggered. 2340 */ 2341 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 2342 printf("%s:%d:%d: multi-word DMA disabled due " 2343 "to chip revision\n", 2344 sc->sc_wdcdev.sc_dev.dv_xname, 2345 chp->channel, drive); 2346 mode = drvp->PIO_mode; 2347 drvp->drive_flags &= ~DRIVE_DMA; 2348 goto pio; 2349 } 2350 #endif 2351 /* mode = min(pio, dma+2) */ 2352 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2353 mode = drvp->PIO_mode; 2354 else 2355 mode = drvp->DMA_mode + 2; 2356 } 2357 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2358 2359 pio: /* setup PIO mode */ 2360 if (mode <= 2) { 2361 drvp->DMA_mode = 0; 2362 drvp->PIO_mode = 0; 2363 mode = 0; 2364 } else { 2365 drvp->PIO_mode = mode; 2366 drvp->DMA_mode = mode - 2; 2367 } 2368 datatim_reg |= 2369 AMD756_DATATIM_PULSE(chp->channel, drive, 2370 amd756_pio_set[mode]) | 2371 AMD756_DATATIM_RECOV(chp->channel, drive, 2372 amd756_pio_rec[mode]); 2373 } 2374 if (idedma_ctl != 0) { 2375 /* Add software bits in status register */ 2376 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2377 IDEDMA_CTL(chp->channel), 2378 idedma_ctl); 2379 } 2380 pciide_print_modes(cp); 2381 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 2382 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 2383 } 2384 2385 void 2386 apollo_chip_map(sc, pa) 2387 struct pciide_softc *sc; 2388 struct pci_attach_args *pa; 2389 { 2390 struct pciide_channel *cp; 2391 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2392 int channel; 2393 u_int32_t ideconf; 2394 bus_size_t cmdsize, ctlsize; 2395 pcitag_t pcib_tag; 2396 pcireg_t pcib_id, pcib_class; 2397 2398 if (pciide_chipen(sc, pa) == 0) 2399 return; 2400 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 2401 2402 pcib_id = pci_conf_read(sc->sc_pc, pcib_tag, PCI_ID_REG); 2403 pcib_class = pci_conf_read(sc->sc_pc, pcib_tag, PCI_CLASS_REG); 2404 2405 switch (PCI_PRODUCT(pcib_id)) { 2406 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 2407 if (PCI_REVISION(pcib_class) >= 0x02) { 2408 printf(": ATA33"); 2409 sc->sc_wdcdev.UDMA_cap = 2; 2410 } else { 2411 printf(": DMA"); 2412 sc->sc_wdcdev.UDMA_cap = 0; 2413 } 2414 break; 2415 case PCI_PRODUCT_VIATECH_VT82C596A: 2416 if (PCI_REVISION(pcib_class) >= 0x12) { 2417 printf(": ATA66"); 2418 sc->sc_wdcdev.UDMA_cap = 4; 2419 } else { 2420 printf(": ATA33"); 2421 sc->sc_wdcdev.UDMA_cap = 2; 2422 } 2423 break; 2424 2425 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 2426 if (PCI_REVISION(pcib_class) >= 0x40) { 2427 printf(": ATA100"); 2428 sc->sc_wdcdev.UDMA_cap = 5; 2429 } else { 2430 printf(": ATA66"); 2431 sc->sc_wdcdev.UDMA_cap = 4; 2432 } 2433 break; 2434 case PCI_PRODUCT_VIATECH_VT8231_ISA: 2435 printf(": ATA100"); 2436 sc->sc_wdcdev.UDMA_cap = 5; 2437 break; 2438 case PCI_PRODUCT_VIATECH_VT8366_ISA: 2439 printf(": ATA100"); 2440 sc->sc_wdcdev.UDMA_cap = 5; 2441 break; 2442 case PCI_PRODUCT_VIATECH_VT8233_ISA: 2443 printf(": ATA133"); 2444 sc->sc_wdcdev.UDMA_cap = 6; 2445 break; 2446 case PCI_PRODUCT_VIATECH_VT8235_ISA: 2447 printf(": ATA133"); 2448 sc->sc_wdcdev.UDMA_cap = 6; 2449 break; 2450 default: 2451 printf(": DMA"); 2452 sc->sc_wdcdev.UDMA_cap = 0; 2453 break; 2454 } 2455 2456 pciide_mapreg_dma(sc, pa); 2457 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2458 WDC_CAPABILITY_MODE; 2459 if (sc->sc_dma_ok) { 2460 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2461 sc->sc_wdcdev.irqack = pciide_irqack; 2462 if (sc->sc_wdcdev.UDMA_cap > 0) 2463 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2464 } 2465 sc->sc_wdcdev.PIO_cap = 4; 2466 sc->sc_wdcdev.DMA_cap = 2; 2467 sc->sc_wdcdev.set_modes = apollo_setup_channel; 2468 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2469 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2470 2471 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2472 2473 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 2474 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2475 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 2476 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 2477 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2478 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 2479 DEBUG_PROBE); 2480 2481 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2482 cp = &sc->pciide_channels[channel]; 2483 if (pciide_chansetup(sc, channel, interface) == 0) 2484 continue; 2485 2486 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF); 2487 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 2488 printf("%s: %s ignored (disabled)\n", 2489 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2490 continue; 2491 } 2492 pciide_map_compat_intr(pa, cp, channel, interface); 2493 if (cp->hw_ok == 0) 2494 continue; 2495 2496 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2497 pciide_pci_intr); 2498 if (cp->hw_ok == 0) { 2499 goto next; 2500 } 2501 if (pciide_chan_candisable(cp)) { 2502 ideconf &= ~APO_IDECONF_EN(channel); 2503 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_IDECONF, 2504 ideconf); 2505 } 2506 2507 if (cp->hw_ok == 0) 2508 goto next; 2509 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 2510 next: 2511 if (cp->hw_ok == 0) 2512 pciide_unmap_compat_intr(pa, cp, channel, interface); 2513 } 2514 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 2515 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 2516 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 2517 } 2518 2519 void 2520 apollo_setup_channel(chp) 2521 struct channel_softc *chp; 2522 { 2523 u_int32_t udmatim_reg, datatim_reg; 2524 u_int8_t idedma_ctl; 2525 int mode, drive; 2526 struct ata_drive_datas *drvp; 2527 struct pciide_channel *cp = (struct pciide_channel*)chp; 2528 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2529 2530 idedma_ctl = 0; 2531 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 2532 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 2533 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 2534 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 2535 2536 /* setup DMA if needed */ 2537 pciide_channel_dma_setup(cp); 2538 2539 /* 2540 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 2541 * downgrade to Ultra/33 if needed 2542 */ 2543 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 2544 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 2545 /* both drives UDMA */ 2546 if (chp->ch_drive[0].UDMA_mode > 2 && 2547 chp->ch_drive[1].UDMA_mode <= 2) { 2548 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 2549 chp->ch_drive[0].UDMA_mode = 2; 2550 } else if (chp->ch_drive[1].UDMA_mode > 2 && 2551 chp->ch_drive[0].UDMA_mode <= 2) { 2552 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 2553 chp->ch_drive[1].UDMA_mode = 2; 2554 } 2555 } 2556 2557 for (drive = 0; drive < 2; drive++) { 2558 drvp = &chp->ch_drive[drive]; 2559 /* If no drive, skip */ 2560 if ((drvp->drive_flags & DRIVE) == 0) 2561 continue; 2562 /* add timing values, setup DMA if needed */ 2563 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2564 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 2565 mode = drvp->PIO_mode; 2566 goto pio; 2567 } 2568 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 2569 (drvp->drive_flags & DRIVE_UDMA)) { 2570 /* use Ultra/DMA */ 2571 drvp->drive_flags &= ~DRIVE_DMA; 2572 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 2573 APO_UDMA_EN_MTH(chp->channel, drive); 2574 if (sc->sc_wdcdev.UDMA_cap == 6) { 2575 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2576 drive, apollo_udma133_tim[drvp->UDMA_mode]); 2577 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 2578 /* 686b */ 2579 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2580 drive, apollo_udma100_tim[drvp->UDMA_mode]); 2581 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 2582 /* 596b or 686a */ 2583 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 2584 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2585 drive, apollo_udma66_tim[drvp->UDMA_mode]); 2586 } else { 2587 /* 596a or 586b */ 2588 udmatim_reg |= APO_UDMA_TIME(chp->channel, 2589 drive, apollo_udma33_tim[drvp->UDMA_mode]); 2590 } 2591 /* can use PIO timings, MW DMA unused */ 2592 mode = drvp->PIO_mode; 2593 } else { 2594 /* use Multiword DMA */ 2595 drvp->drive_flags &= ~DRIVE_UDMA; 2596 /* mode = min(pio, dma+2) */ 2597 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 2598 mode = drvp->PIO_mode; 2599 else 2600 mode = drvp->DMA_mode + 2; 2601 } 2602 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2603 2604 pio: /* setup PIO mode */ 2605 if (mode <= 2) { 2606 drvp->DMA_mode = 0; 2607 drvp->PIO_mode = 0; 2608 mode = 0; 2609 } else { 2610 drvp->PIO_mode = mode; 2611 drvp->DMA_mode = mode - 2; 2612 } 2613 datatim_reg |= 2614 APO_DATATIM_PULSE(chp->channel, drive, 2615 apollo_pio_set[mode]) | 2616 APO_DATATIM_RECOV(chp->channel, drive, 2617 apollo_pio_rec[mode]); 2618 } 2619 if (idedma_ctl != 0) { 2620 /* Add software bits in status register */ 2621 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2622 IDEDMA_CTL(chp->channel), 2623 idedma_ctl); 2624 } 2625 pciide_print_modes(cp); 2626 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 2627 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 2628 } 2629 2630 void 2631 cmd_channel_map(pa, sc, channel) 2632 struct pci_attach_args *pa; 2633 struct pciide_softc *sc; 2634 int channel; 2635 { 2636 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2637 bus_size_t cmdsize, ctlsize; 2638 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 2639 pcireg_t interface; 2640 2641 /* 2642 * The 0648/0649 can be told to identify as a RAID controller. 2643 * In this case, we have to fake interface 2644 */ 2645 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2646 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2647 PCIIDE_INTERFACE_SETTABLE(1); 2648 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2649 CMD_CONF_DSA1) 2650 interface |= PCIIDE_INTERFACE_PCI(0) | 2651 PCIIDE_INTERFACE_PCI(1); 2652 } else { 2653 interface = PCI_INTERFACE(pa->pa_class); 2654 } 2655 2656 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2657 cp->name = PCIIDE_CHANNEL_NAME(channel); 2658 cp->wdc_channel.channel = channel; 2659 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2660 2661 if (channel > 0) { 2662 cp->wdc_channel.ch_queue = 2663 sc->pciide_channels[0].wdc_channel.ch_queue; 2664 } else { 2665 cp->wdc_channel.ch_queue = 2666 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 2667 } 2668 if (cp->wdc_channel.ch_queue == NULL) { 2669 printf( 2670 "%s: %s cannot allocate memory for command queue", 2671 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2672 return; 2673 } 2674 2675 /* 2676 * with a CMD PCI64x, if we get here, the first channel is enabled: 2677 * there's no way to disable the first channel without disabling 2678 * the whole device 2679 */ 2680 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 2681 printf("%s: %s ignored (disabled)\n", 2682 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2683 return; 2684 } 2685 cp->hw_ok = 1; 2686 pciide_map_compat_intr(pa, cp, channel, interface); 2687 if (cp->hw_ok == 0) 2688 return; 2689 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 2690 if (cp->hw_ok == 0) { 2691 pciide_unmap_compat_intr(pa, cp, channel, interface); 2692 return; 2693 } 2694 if (pciide_chan_candisable(cp)) { 2695 if (channel == 1) { 2696 ctrl &= ~CMD_CTRL_2PORT; 2697 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2698 CMD_CTRL, ctrl); 2699 pciide_unmap_compat_intr(pa, cp, channel, interface); 2700 } 2701 } 2702 } 2703 2704 int 2705 cmd_pci_intr(arg) 2706 void *arg; 2707 { 2708 struct pciide_softc *sc = arg; 2709 struct pciide_channel *cp; 2710 struct channel_softc *wdc_cp; 2711 int i, rv, crv; 2712 u_int32_t priirq, secirq; 2713 2714 rv = 0; 2715 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2716 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2717 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 2718 cp = &sc->pciide_channels[i]; 2719 wdc_cp = &cp->wdc_channel; 2720 /* If a compat channel skip. */ 2721 if (cp->compat) 2722 continue; 2723 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 2724 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 2725 crv = wdcintr(wdc_cp); 2726 if (crv == 0) { 2727 #if 0 2728 printf("%s:%d: bogus intr\n", 2729 sc->sc_wdcdev.sc_dev.dv_xname, i); 2730 #endif 2731 } else 2732 rv = 1; 2733 } 2734 } 2735 return rv; 2736 } 2737 2738 void 2739 cmd_chip_map(sc, pa) 2740 struct pciide_softc *sc; 2741 struct pci_attach_args *pa; 2742 { 2743 int channel; 2744 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2745 /* 2746 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2747 * and base adresses registers can be disabled at 2748 * hardware level. In this case, the device is wired 2749 * in compat mode and its first channel is always enabled, 2750 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2751 * In fact, it seems that the first channel of the CMD PCI0640 2752 * can't be disabled. 2753 */ 2754 2755 #ifdef PCIIDE_CMD064x_DISABLE 2756 if (pciide_chipen(sc, pa) == 0) 2757 return; 2758 #endif 2759 2760 printf(": no DMA"); 2761 sc->sc_dma_ok = 0; 2762 2763 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2764 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2765 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 2766 2767 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2768 2769 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2770 cmd_channel_map(pa, sc, channel); 2771 } 2772 } 2773 2774 void 2775 cmd0643_9_chip_map(sc, pa) 2776 struct pciide_softc *sc; 2777 struct pci_attach_args *pa; 2778 { 2779 struct pciide_channel *cp; 2780 int channel; 2781 int rev = PCI_REVISION( 2782 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); 2783 pcireg_t interface; 2784 2785 /* 2786 * The 0648/0649 can be told to identify as a RAID controller. 2787 * In this case, we have to fake interface 2788 */ 2789 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 2790 interface = PCIIDE_INTERFACE_SETTABLE(0) | 2791 PCIIDE_INTERFACE_SETTABLE(1); 2792 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 2793 CMD_CONF_DSA1) 2794 interface |= PCIIDE_INTERFACE_PCI(0) | 2795 PCIIDE_INTERFACE_PCI(1); 2796 } else { 2797 interface = PCI_INTERFACE(pa->pa_class); 2798 } 2799 2800 /* 2801 * For a CMD PCI064x, the use of PCI_COMMAND_IO_ENABLE 2802 * and base adresses registers can be disabled at 2803 * hardware level. In this case, the device is wired 2804 * in compat mode and its first channel is always enabled, 2805 * but we can't rely on PCI_COMMAND_IO_ENABLE. 2806 * In fact, it seems that the first channel of the CMD PCI0640 2807 * can't be disabled. 2808 */ 2809 2810 #ifdef PCIIDE_CMD064x_DISABLE 2811 if (pciide_chipen(sc, pa) == 0) 2812 return; 2813 #endif 2814 printf(": DMA"); 2815 pciide_mapreg_dma(sc, pa); 2816 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2817 WDC_CAPABILITY_MODE; 2818 if (sc->sc_dma_ok) { 2819 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2820 switch (sc->sc_pp->ide_product) { 2821 case PCI_PRODUCT_CMDTECH_649: 2822 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2823 sc->sc_wdcdev.UDMA_cap = 5; 2824 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2825 break; 2826 case PCI_PRODUCT_CMDTECH_648: 2827 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2828 sc->sc_wdcdev.UDMA_cap = 4; 2829 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2830 break; 2831 case PCI_PRODUCT_CMDTECH_646: 2832 if (rev >= CMD0646U2_REV) { 2833 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2834 sc->sc_wdcdev.UDMA_cap = 2; 2835 } else if (rev >= CMD0646U_REV) { 2836 /* 2837 * Linux's driver claims that the 646U is broken 2838 * with UDMA. Only enable it if we know what we're 2839 * doing 2840 */ 2841 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 2842 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2843 sc->sc_wdcdev.UDMA_cap = 2; 2844 #endif 2845 /* explicitly disable UDMA */ 2846 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2847 CMD_UDMATIM(0), 0); 2848 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2849 CMD_UDMATIM(1), 0); 2850 } 2851 sc->sc_wdcdev.irqack = cmd646_9_irqack; 2852 break; 2853 default: 2854 sc->sc_wdcdev.irqack = pciide_irqack; 2855 } 2856 } 2857 2858 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2859 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2860 sc->sc_wdcdev.PIO_cap = 4; 2861 sc->sc_wdcdev.DMA_cap = 2; 2862 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 2863 2864 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2865 2866 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 2867 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2868 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2869 DEBUG_PROBE); 2870 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2871 cp = &sc->pciide_channels[channel]; 2872 cmd_channel_map(pa, sc, channel); 2873 if (cp->hw_ok == 0) 2874 continue; 2875 cmd0643_9_setup_channel(&cp->wdc_channel); 2876 } 2877 /* 2878 * note - this also makes sure we clear the irq disable and reset 2879 * bits 2880 */ 2881 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 2882 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 2883 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 2884 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 2885 DEBUG_PROBE); 2886 } 2887 2888 void 2889 cmd0643_9_setup_channel(chp) 2890 struct channel_softc *chp; 2891 { 2892 struct ata_drive_datas *drvp; 2893 u_int8_t tim; 2894 u_int32_t idedma_ctl, udma_reg; 2895 int drive; 2896 struct pciide_channel *cp = (struct pciide_channel*)chp; 2897 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2898 2899 idedma_ctl = 0; 2900 /* setup DMA if needed */ 2901 pciide_channel_dma_setup(cp); 2902 2903 for (drive = 0; drive < 2; drive++) { 2904 drvp = &chp->ch_drive[drive]; 2905 /* If no drive, skip */ 2906 if ((drvp->drive_flags & DRIVE) == 0) 2907 continue; 2908 /* add timing values, setup DMA if needed */ 2909 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 2910 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 2911 if (drvp->drive_flags & DRIVE_UDMA) { 2912 /* UltraDMA on a 646U2, 0648 or 0649 */ 2913 drvp->drive_flags &= ~DRIVE_DMA; 2914 udma_reg = pciide_pci_read(sc->sc_pc, 2915 sc->sc_tag, CMD_UDMATIM(chp->channel)); 2916 if (drvp->UDMA_mode > 2 && 2917 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 2918 CMD_BICSR) & 2919 CMD_BICSR_80(chp->channel)) == 0) { 2920 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 2921 "80-wire cable not detected\n", 2922 drvp->drive_name, 2923 sc->sc_wdcdev.sc_dev.dv_xname, 2924 chp->channel, drive), DEBUG_PROBE); 2925 drvp->UDMA_mode = 2; 2926 } 2927 if (drvp->UDMA_mode > 2) 2928 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 2929 else if (sc->sc_wdcdev.UDMA_cap > 2) 2930 udma_reg |= CMD_UDMATIM_UDMA33(drive); 2931 udma_reg |= CMD_UDMATIM_UDMA(drive); 2932 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 2933 CMD_UDMATIM_TIM_OFF(drive)); 2934 udma_reg |= 2935 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 2936 CMD_UDMATIM_TIM_OFF(drive)); 2937 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2938 CMD_UDMATIM(chp->channel), udma_reg); 2939 } else { 2940 /* 2941 * use Multiword DMA. 2942 * Timings will be used for both PIO and DMA, 2943 * so adjust DMA mode if needed 2944 * if we have a 0646U2/8/9, turn off UDMA 2945 */ 2946 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2947 udma_reg = pciide_pci_read(sc->sc_pc, 2948 sc->sc_tag, 2949 CMD_UDMATIM(chp->channel)); 2950 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 2951 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2952 CMD_UDMATIM(chp->channel), 2953 udma_reg); 2954 } 2955 if (drvp->PIO_mode >= 3 && 2956 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 2957 drvp->DMA_mode = drvp->PIO_mode - 2; 2958 } 2959 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 2960 } 2961 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2962 } 2963 pciide_pci_write(sc->sc_pc, sc->sc_tag, 2964 CMD_DATA_TIM(chp->channel, drive), tim); 2965 } 2966 if (idedma_ctl != 0) { 2967 /* Add software bits in status register */ 2968 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2969 IDEDMA_CTL(chp->channel), 2970 idedma_ctl); 2971 } 2972 pciide_print_modes(cp); 2973 #ifdef __sparc64__ 2974 /* 2975 * The Ultra 5 has a tendency to hang during reboot. This is due 2976 * to the PCI0646U asserting a PCI interrupt line when the chip 2977 * registers claim that it is not. Performing a reset at this 2978 * point appears to eliminate the symptoms. It is likely the 2979 * real cause is still lurking somewhere in the code. 2980 */ 2981 wdcreset(chp, SILENT); 2982 #endif /* __sparc64__ */ 2983 } 2984 2985 void 2986 cmd646_9_irqack(chp) 2987 struct channel_softc *chp; 2988 { 2989 u_int32_t priirq, secirq; 2990 struct pciide_channel *cp = (struct pciide_channel*)chp; 2991 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2992 2993 if (chp->channel == 0) { 2994 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 2995 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 2996 } else { 2997 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 2998 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 2999 } 3000 pciide_irqack(chp); 3001 } 3002 3003 void 3004 cmd680_chip_map(sc, pa) 3005 struct pciide_softc *sc; 3006 struct pci_attach_args *pa; 3007 { 3008 struct pciide_channel *cp; 3009 int channel; 3010 3011 if (pciide_chipen(sc, pa) == 0) 3012 return; 3013 printf("\n%s: bus-master DMA support present", 3014 sc->sc_wdcdev.sc_dev.dv_xname); 3015 pciide_mapreg_dma(sc, pa); 3016 printf("\n"); 3017 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3018 WDC_CAPABILITY_MODE; 3019 if (sc->sc_dma_ok) { 3020 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3021 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3022 sc->sc_wdcdev.UDMA_cap = 6; 3023 sc->sc_wdcdev.irqack = pciide_irqack; 3024 } 3025 3026 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3027 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3028 sc->sc_wdcdev.PIO_cap = 4; 3029 sc->sc_wdcdev.DMA_cap = 2; 3030 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3031 3032 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3033 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3034 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3035 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3036 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3037 cp = &sc->pciide_channels[channel]; 3038 cmd680_channel_map(pa, sc, channel); 3039 if (cp->hw_ok == 0) 3040 continue; 3041 cmd680_setup_channel(&cp->wdc_channel); 3042 } 3043 } 3044 3045 void 3046 cmd680_channel_map(pa, sc, channel) 3047 struct pci_attach_args *pa; 3048 struct pciide_softc *sc; 3049 int channel; 3050 { 3051 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3052 bus_size_t cmdsize, ctlsize; 3053 int interface, i, reg; 3054 static const u_int8_t init_val[] = 3055 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 3056 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 3057 3058 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3059 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3060 PCIIDE_INTERFACE_SETTABLE(1); 3061 interface |= PCIIDE_INTERFACE_PCI(0) | 3062 PCIIDE_INTERFACE_PCI(1); 3063 } else { 3064 interface = PCI_INTERFACE(pa->pa_class); 3065 } 3066 3067 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3068 cp->name = PCIIDE_CHANNEL_NAME(channel); 3069 cp->wdc_channel.channel = channel; 3070 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3071 3072 cp->wdc_channel.ch_queue = 3073 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3074 if (cp->wdc_channel.ch_queue == NULL) { 3075 printf("%s %s: " 3076 "can't allocate memory for command queue", 3077 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3078 return; 3079 } 3080 3081 /* XXX */ 3082 reg = 0xa2 + channel * 16; 3083 for (i = 0; i < sizeof(init_val); i++) 3084 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 3085 3086 printf("%s: %s %s to %s mode\n", 3087 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 3088 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 3089 "configured" : "wired", 3090 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 3091 "native-PCI" : "compatibility"); 3092 3093 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 3094 if (cp->hw_ok == 0) 3095 return; 3096 pciide_map_compat_intr(pa, cp, channel, interface); 3097 } 3098 3099 void 3100 cmd680_setup_channel(chp) 3101 struct channel_softc *chp; 3102 { 3103 struct ata_drive_datas *drvp; 3104 u_int8_t mode, off, scsc; 3105 u_int16_t val; 3106 u_int32_t idedma_ctl; 3107 int drive; 3108 struct pciide_channel *cp = (struct pciide_channel*)chp; 3109 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3110 pci_chipset_tag_t pc = sc->sc_pc; 3111 pcitag_t pa = sc->sc_tag; 3112 static const u_int8_t udma2_tbl[] = 3113 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 3114 static const u_int8_t udma_tbl[] = 3115 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 3116 static const u_int16_t dma_tbl[] = 3117 { 0x2208, 0x10c2, 0x10c1 }; 3118 static const u_int16_t pio_tbl[] = 3119 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 3120 3121 idedma_ctl = 0; 3122 pciide_channel_dma_setup(cp); 3123 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 3124 3125 for (drive = 0; drive < 2; drive++) { 3126 drvp = &chp->ch_drive[drive]; 3127 /* If no drive, skip */ 3128 if ((drvp->drive_flags & DRIVE) == 0) 3129 continue; 3130 mode &= ~(0x03 << (drive * 4)); 3131 if (drvp->drive_flags & DRIVE_UDMA) { 3132 drvp->drive_flags &= ~DRIVE_DMA; 3133 off = 0xa0 + chp->channel * 16; 3134 if (drvp->UDMA_mode > 2 && 3135 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 3136 drvp->UDMA_mode = 2; 3137 scsc = pciide_pci_read(pc, pa, 0x8a); 3138 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 3139 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 3140 scsc = pciide_pci_read(pc, pa, 0x8a); 3141 if ((scsc & 0x30) == 0) 3142 drvp->UDMA_mode = 5; 3143 } 3144 mode |= 0x03 << (drive * 4); 3145 off = 0xac + chp->channel * 16 + drive * 2; 3146 val = pciide_pci_read(pc, pa, off) & ~0x3f; 3147 if (scsc & 0x30) 3148 val |= udma2_tbl[drvp->UDMA_mode]; 3149 else 3150 val |= udma_tbl[drvp->UDMA_mode]; 3151 pciide_pci_write(pc, pa, off, val); 3152 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3153 } else if (drvp->drive_flags & DRIVE_DMA) { 3154 mode |= 0x02 << (drive * 4); 3155 off = 0xa8 + chp->channel * 16 + drive * 2; 3156 val = dma_tbl[drvp->DMA_mode]; 3157 pciide_pci_write(pc, pa, off, val & 0xff); 3158 pciide_pci_write(pc, pa, off, val >> 8); 3159 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3160 } else { 3161 mode |= 0x01 << (drive * 4); 3162 off = 0xa4 + chp->channel * 16 + drive * 2; 3163 val = pio_tbl[drvp->PIO_mode]; 3164 pciide_pci_write(pc, pa, off, val & 0xff); 3165 pciide_pci_write(pc, pa, off, val >> 8); 3166 } 3167 } 3168 3169 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 3170 if (idedma_ctl != 0) { 3171 /* Add software bits in status register */ 3172 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3173 IDEDMA_CTL(chp->channel), 3174 idedma_ctl); 3175 } 3176 pciide_print_modes(cp); 3177 } 3178 3179 void 3180 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3181 { 3182 struct pciide_channel *cp; 3183 bus_size_t cmdsize, ctlsize; 3184 pcireg_t interface; 3185 int channel; 3186 3187 if (pciide_chipen(sc, pa) == 0) 3188 return; 3189 3190 printf(": DMA"); 3191 pciide_mapreg_dma(sc, pa); 3192 3193 /* 3194 * Rev. <= 0x01 of the 3112 have a bug that can cause data 3195 * corruption if DMA transfers cross an 8K boundary. This is 3196 * apparently hard to tickle, but we'll go ahead and play it 3197 * safe. 3198 */ 3199 if (PCI_REVISION(pa->pa_class) <= 0x01) { 3200 sc->sc_dma_maxsegsz = 8192; 3201 sc->sc_dma_boundary = 8192; 3202 } 3203 3204 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3205 WDC_CAPABILITY_MODE; 3206 sc->sc_wdcdev.PIO_cap = 4; 3207 if (sc->sc_dma_ok) { 3208 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3209 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3210 sc->sc_wdcdev.irqack = pciide_irqack; 3211 sc->sc_wdcdev.DMA_cap = 2; 3212 sc->sc_wdcdev.UDMA_cap = 6; 3213 } 3214 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 3215 3216 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3217 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3218 3219 /* 3220 * The 3112 can be told to identify as a RAID controller. 3221 * In this case, we have to fake interface 3222 */ 3223 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3224 interface = PCI_INTERFACE(pa->pa_class); 3225 } else { 3226 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3227 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3228 } 3229 3230 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3231 3232 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3233 cp = &sc->pciide_channels[channel]; 3234 if (pciide_chansetup(sc, channel, interface) == 0) 3235 continue; 3236 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3237 pciide_pci_intr); 3238 if (cp->hw_ok == 0) 3239 continue; 3240 pciide_map_compat_intr(pa, cp, channel, interface); 3241 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3242 } 3243 } 3244 3245 void 3246 sii3112_setup_channel(struct channel_softc *chp) 3247 { 3248 struct ata_drive_datas *drvp; 3249 int drive; 3250 u_int32_t idedma_ctl, dtm; 3251 struct pciide_channel *cp = (struct pciide_channel*)chp; 3252 struct pciide_softc *sc = (struct pciide_softc*)cp->wdc_channel.wdc; 3253 3254 /* setup DMA if needed */ 3255 pciide_channel_dma_setup(cp); 3256 3257 idedma_ctl = 0; 3258 dtm = 0; 3259 3260 for (drive = 0; drive < 2; drive++) { 3261 drvp = &chp->ch_drive[drive]; 3262 /* If no drive, skip */ 3263 if ((drvp->drive_flags & DRIVE) == 0) 3264 continue; 3265 if (drvp->drive_flags & DRIVE_UDMA) { 3266 /* use Ultra/DMA */ 3267 drvp->drive_flags &= ~DRIVE_DMA; 3268 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3269 dtm |= DTM_IDEx_DMA; 3270 } else if (drvp->drive_flags & DRIVE_DMA) { 3271 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3272 dtm |= DTM_IDEx_DMA; 3273 } else { 3274 dtm |= DTM_IDEx_PIO; 3275 } 3276 } 3277 3278 /* 3279 * Nothing to do to setup modes; it is meaningless in S-ATA 3280 * (but many S-ATA drives still want to get the SET_FEATURE 3281 * command). 3282 */ 3283 if (idedma_ctl != 0) { 3284 /* Add software bits in status register */ 3285 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3286 IDEDMA_CTL(chp->channel), idedma_ctl); 3287 } 3288 pci_conf_write(sc->sc_pc, sc->sc_tag, 3289 chp->channel == 0 ? SII3112_DTM_IDE0 : SII3112_DTM_IDE1, dtm); 3290 pciide_print_modes(cp); 3291 } 3292 3293 void 3294 cy693_chip_map(sc, pa) 3295 struct pciide_softc *sc; 3296 struct pci_attach_args *pa; 3297 { 3298 struct pciide_channel *cp; 3299 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3300 bus_size_t cmdsize, ctlsize; 3301 3302 if (pciide_chipen(sc, pa) == 0) 3303 return; 3304 /* 3305 * this chip has 2 PCI IDE functions, one for primary and one for 3306 * secondary. So we need to call pciide_mapregs_compat() with 3307 * the real channel 3308 */ 3309 if (pa->pa_function == 1) { 3310 sc->sc_cy_compatchan = 0; 3311 } else if (pa->pa_function == 2) { 3312 sc->sc_cy_compatchan = 1; 3313 } else { 3314 printf(": unexpected PCI function %d\n", pa->pa_function); 3315 return; 3316 } 3317 3318 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 3319 printf(": DMA"); 3320 pciide_mapreg_dma(sc, pa); 3321 } else { 3322 printf(": no DMA"); 3323 sc->sc_dma_ok = 0; 3324 } 3325 3326 sc->sc_cy_handle = cy82c693_init(pa->pa_iot); 3327 if (sc->sc_cy_handle == NULL) { 3328 printf(", (unable to map ctl registers)"); 3329 sc->sc_dma_ok = 0; 3330 } 3331 3332 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3333 WDC_CAPABILITY_MODE; 3334 if (sc->sc_dma_ok) { 3335 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3336 sc->sc_wdcdev.irqack = pciide_irqack; 3337 } 3338 sc->sc_wdcdev.PIO_cap = 4; 3339 sc->sc_wdcdev.DMA_cap = 2; 3340 sc->sc_wdcdev.set_modes = cy693_setup_channel; 3341 3342 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3343 sc->sc_wdcdev.nchannels = 1; 3344 3345 /* Only one channel for this chip; if we are here it's enabled */ 3346 cp = &sc->pciide_channels[0]; 3347 sc->wdc_chanarray[0] = &cp->wdc_channel; 3348 cp->name = PCIIDE_CHANNEL_NAME(0); 3349 cp->wdc_channel.channel = 0; 3350 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3351 cp->wdc_channel.ch_queue = 3352 malloc(sizeof(struct channel_queue), M_DEVBUF, M_NOWAIT); 3353 if (cp->wdc_channel.ch_queue == NULL) { 3354 printf(": cannot allocate memory for command queue\n"); 3355 return; 3356 } 3357 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 3358 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 3359 "configured" : "wired"); 3360 if (interface & PCIIDE_INTERFACE_PCI(0)) { 3361 printf("native-PCI\n"); 3362 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 3363 pciide_pci_intr); 3364 } else { 3365 printf("compatibility\n"); 3366 cp->hw_ok = pciide_mapregs_compat(pa, cp, sc->sc_cy_compatchan, 3367 &cmdsize, &ctlsize); 3368 } 3369 3370 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 3371 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 3372 pciide_map_compat_intr(pa, cp, sc->sc_cy_compatchan, interface); 3373 if (cp->hw_ok == 0) 3374 return; 3375 wdcattach(&cp->wdc_channel); 3376 if (pciide_chan_candisable(cp)) { 3377 pci_conf_write(sc->sc_pc, sc->sc_tag, 3378 PCI_COMMAND_STATUS_REG, 0); 3379 } 3380 if (cp->hw_ok == 0) { 3381 pciide_unmap_compat_intr(pa, cp, sc->sc_cy_compatchan, 3382 interface); 3383 return; 3384 } 3385 3386 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 3387 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)),DEBUG_PROBE); 3388 cy693_setup_channel(&cp->wdc_channel); 3389 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 3390 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 3391 } 3392 3393 void 3394 cy693_setup_channel(chp) 3395 struct channel_softc *chp; 3396 { 3397 struct ata_drive_datas *drvp; 3398 int drive; 3399 u_int32_t cy_cmd_ctrl; 3400 u_int32_t idedma_ctl; 3401 struct pciide_channel *cp = (struct pciide_channel*)chp; 3402 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3403 int dma_mode = -1; 3404 3405 cy_cmd_ctrl = idedma_ctl = 0; 3406 3407 /* setup DMA if needed */ 3408 pciide_channel_dma_setup(cp); 3409 3410 for (drive = 0; drive < 2; drive++) { 3411 drvp = &chp->ch_drive[drive]; 3412 /* If no drive, skip */ 3413 if ((drvp->drive_flags & DRIVE) == 0) 3414 continue; 3415 /* add timing values, setup DMA if needed */ 3416 if (drvp->drive_flags & DRIVE_DMA) { 3417 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3418 /* use Multiword DMA */ 3419 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 3420 dma_mode = drvp->DMA_mode; 3421 } 3422 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3423 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 3424 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3425 CY_CMD_CTRL_IOW_REC_OFF(drive)); 3426 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 3427 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 3428 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 3429 CY_CMD_CTRL_IOR_REC_OFF(drive)); 3430 } 3431 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 3432 chp->ch_drive[0].DMA_mode = dma_mode; 3433 chp->ch_drive[1].DMA_mode = dma_mode; 3434 3435 if (dma_mode == -1) 3436 dma_mode = 0; 3437 3438 if (sc->sc_cy_handle != NULL) { 3439 /* Note: `multiple' is implied. */ 3440 cy82c693_write(sc->sc_cy_handle, 3441 (sc->sc_cy_compatchan == 0) ? 3442 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 3443 } 3444 3445 pciide_print_modes(cp); 3446 3447 if (idedma_ctl != 0) { 3448 /* Add software bits in status register */ 3449 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3450 IDEDMA_CTL(chp->channel), idedma_ctl); 3451 } 3452 } 3453 3454 static struct sis_hostbr_type { 3455 u_int16_t id; 3456 u_int8_t rev; 3457 u_int8_t udma_mode; 3458 char *name; 3459 u_int8_t type; 3460 #define SIS_TYPE_NOUDMA 0 3461 #define SIS_TYPE_66 1 3462 #define SIS_TYPE_100OLD 2 3463 #define SIS_TYPE_100NEW 3 3464 #define SIS_TYPE_133OLD 4 3465 #define SIS_TYPE_133NEW 5 3466 #define SIS_TYPE_SOUTH 6 3467 } sis_hostbr_type[] = { 3468 /* Most infos here are from sos@freebsd.org */ 3469 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 3470 #if 0 3471 /* 3472 * controllers associated to a rev 0x2 530 Host to PCI Bridge 3473 * have problems with UDMA (info provided by Christos) 3474 */ 3475 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 3476 #endif 3477 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 3478 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 3479 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 3480 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 3481 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 3482 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 3483 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 3484 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 3485 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 3486 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 3487 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 3488 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 3489 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 3490 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 3491 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 3492 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 3493 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 3494 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 3495 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 3496 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 3497 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 3498 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 3499 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 3500 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 3501 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 3502 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 3503 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 3504 /* 3505 * From sos@freebsd.org: the 0x961 ID will never be found in real world 3506 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 3507 */ 3508 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 3509 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW} 3510 }; 3511 3512 static struct sis_hostbr_type *sis_hostbr_type_match; 3513 3514 int 3515 sis_hostbr_match(struct pci_attach_args *pa) 3516 { 3517 int i; 3518 3519 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 3520 return (0); 3521 sis_hostbr_type_match = NULL; 3522 for (i = 0; 3523 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 3524 i++) { 3525 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 3526 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 3527 sis_hostbr_type_match = &sis_hostbr_type[i]; 3528 } 3529 return (sis_hostbr_type_match != NULL); 3530 } 3531 3532 int 3533 sis_south_match(struct pci_attach_args *pa) 3534 { 3535 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 3536 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 3537 PCI_REVISION(pa->pa_class) >= 0x10); 3538 } 3539 3540 void 3541 sis_chip_map(sc, pa) 3542 struct pciide_softc *sc; 3543 struct pci_attach_args *pa; 3544 { 3545 struct pciide_channel *cp; 3546 int channel; 3547 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 3548 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3549 pcireg_t rev = PCI_REVISION(pa->pa_class); 3550 bus_size_t cmdsize, ctlsize; 3551 pcitag_t br_tag; 3552 struct pci_attach_args br_pa; 3553 3554 if (pciide_chipen(sc, pa) == 0) 3555 return; 3556 3557 /* Find PCI bridge (dev 0 func 0 on the same bus) */ 3558 br_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 0, 0); 3559 br_pa.pa_id = pci_conf_read(sc->sc_pc, br_tag, PCI_ID_REG); 3560 br_pa.pa_class = pci_conf_read(sc->sc_pc, br_tag, PCI_CLASS_REG); 3561 WDCDEBUG_PRINT(("%s: PCI bridge pa_id=0x%x pa_class=0x%x\n", 3562 __func__, br_pa.pa_id, br_pa.pa_class), DEBUG_PROBE); 3563 3564 if (sis_hostbr_match(&br_pa)) { 3565 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 3566 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 3567 pciide_pci_read(sc->sc_pc, sc->sc_tag, 3568 SIS_REG_57) & 0x7f); 3569 if (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 3570 PCI_ID_REG)) == SIS_PRODUCT_5518) { 3571 sc->sis_type = SIS_TYPE_133NEW; 3572 sc->sc_wdcdev.UDMA_cap = 3573 sis_hostbr_type_match->udma_mode; 3574 } else { 3575 /* Find ISA bridge (func 0 of the same dev) */ 3576 br_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 3577 pa->pa_device, 0); 3578 br_pa.pa_id = pci_conf_read(sc->sc_pc, 3579 br_tag, PCI_ID_REG); 3580 br_pa.pa_class = pci_conf_read(sc->sc_pc, 3581 br_tag, PCI_CLASS_REG); 3582 WDCDEBUG_PRINT(("%s: ISA bridge " 3583 "pa_id=0x%x pa_class=0x%x\n", 3584 __func__, br_pa.pa_id, br_pa.pa_class), 3585 DEBUG_PROBE); 3586 3587 if (sis_south_match(&br_pa)) { 3588 sc->sis_type = SIS_TYPE_133OLD; 3589 sc->sc_wdcdev.UDMA_cap = 3590 sis_hostbr_type_match->udma_mode; 3591 } else { 3592 sc->sis_type = SIS_TYPE_100NEW; 3593 sc->sc_wdcdev.UDMA_cap = 3594 sis_hostbr_type_match->udma_mode; 3595 } 3596 } 3597 } else { 3598 sc->sis_type = sis_hostbr_type_match->type; 3599 sc->sc_wdcdev.UDMA_cap = 3600 sis_hostbr_type_match->udma_mode; 3601 } 3602 printf(": %s", sis_hostbr_type_match->name); 3603 } else { 3604 printf(": 5597/5598"); 3605 if (rev >= 0xd0) { 3606 sc->sc_wdcdev.UDMA_cap = 2; 3607 sc->sis_type = SIS_TYPE_66; 3608 } else { 3609 sc->sc_wdcdev.UDMA_cap = 0; 3610 sc->sis_type = SIS_TYPE_NOUDMA; 3611 } 3612 } 3613 3614 printf(": DMA"); 3615 pciide_mapreg_dma(sc, pa); 3616 3617 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3618 WDC_CAPABILITY_MODE; 3619 if (sc->sc_dma_ok) { 3620 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3621 sc->sc_wdcdev.irqack = pciide_irqack; 3622 if (sc->sis_type >= SIS_TYPE_66) 3623 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3624 } 3625 3626 sc->sc_wdcdev.PIO_cap = 4; 3627 sc->sc_wdcdev.DMA_cap = 2; 3628 3629 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3630 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3631 switch (sc->sis_type) { 3632 case SIS_TYPE_NOUDMA: 3633 case SIS_TYPE_66: 3634 case SIS_TYPE_100OLD: 3635 sc->sc_wdcdev.set_modes = sis_setup_channel; 3636 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 3637 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 3638 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 3639 break; 3640 case SIS_TYPE_100NEW: 3641 case SIS_TYPE_133OLD: 3642 sc->sc_wdcdev.set_modes = sis_setup_channel; 3643 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 3644 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 3645 break; 3646 case SIS_TYPE_133NEW: 3647 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 3648 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 3649 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 3650 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 3651 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 3652 break; 3653 } 3654 3655 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3656 3657 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3658 cp = &sc->pciide_channels[channel]; 3659 if (pciide_chansetup(sc, channel, interface) == 0) 3660 continue; 3661 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 3662 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 3663 printf("%s: %s ignored (disabled)\n", 3664 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3665 continue; 3666 } 3667 pciide_map_compat_intr(pa, cp, channel, interface); 3668 if (cp->hw_ok == 0) 3669 continue; 3670 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3671 pciide_pci_intr); 3672 if (cp->hw_ok == 0) { 3673 pciide_unmap_compat_intr(pa, cp, channel, interface); 3674 continue; 3675 } 3676 if (pciide_chan_candisable(cp)) { 3677 if (channel == 0) 3678 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 3679 else 3680 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 3681 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 3682 sis_ctr0); 3683 } 3684 if (cp->hw_ok == 0) { 3685 pciide_unmap_compat_intr(pa, cp, channel, interface); 3686 continue; 3687 } 3688 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 3689 } 3690 } 3691 3692 void 3693 sis96x_setup_channel(struct channel_softc *chp) 3694 { 3695 struct ata_drive_datas *drvp; 3696 int drive; 3697 u_int32_t sis_tim; 3698 u_int32_t idedma_ctl; 3699 int regtim; 3700 struct pciide_channel *cp = (struct pciide_channel*)chp; 3701 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3702 3703 sis_tim = 0; 3704 idedma_ctl = 0; 3705 /* setup DMA if needed */ 3706 pciide_channel_dma_setup(cp); 3707 3708 for (drive = 0; drive < 2; drive++) { 3709 regtim = SIS_TIM133( 3710 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 3711 chp->channel, drive); 3712 drvp = &chp->ch_drive[drive]; 3713 /* If no drive, skip */ 3714 if ((drvp->drive_flags & DRIVE) == 0) 3715 continue; 3716 /* add timing values, setup DMA if needed */ 3717 if (drvp->drive_flags & DRIVE_UDMA) { 3718 /* use Ultra/DMA */ 3719 drvp->drive_flags &= ~DRIVE_DMA; 3720 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3721 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 3722 if (drvp->UDMA_mode > 2) 3723 drvp->UDMA_mode = 2; 3724 } 3725 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 3726 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 3727 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3728 } else if (drvp->drive_flags & DRIVE_DMA) { 3729 /* 3730 * use Multiword DMA 3731 * Timings will be used for both PIO and DMA, 3732 * so adjust DMA mode if needed 3733 */ 3734 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3735 drvp->PIO_mode = drvp->DMA_mode + 2; 3736 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3737 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3738 drvp->PIO_mode - 2 : 0; 3739 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 3740 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3741 } else { 3742 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 3743 } 3744 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 3745 "channel %d drive %d: 0x%x (reg 0x%x)\n", 3746 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 3747 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 3748 } 3749 if (idedma_ctl != 0) { 3750 /* Add software bits in status register */ 3751 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3752 IDEDMA_CTL(chp->channel), idedma_ctl); 3753 } 3754 pciide_print_modes(cp); 3755 } 3756 3757 void 3758 sis_setup_channel(chp) 3759 struct channel_softc *chp; 3760 { 3761 struct ata_drive_datas *drvp; 3762 int drive; 3763 u_int32_t sis_tim; 3764 u_int32_t idedma_ctl; 3765 struct pciide_channel *cp = (struct pciide_channel*)chp; 3766 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3767 3768 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 3769 "channel %d 0x%x\n", chp->channel, 3770 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 3771 DEBUG_PROBE); 3772 sis_tim = 0; 3773 idedma_ctl = 0; 3774 /* setup DMA if needed */ 3775 pciide_channel_dma_setup(cp); 3776 3777 for (drive = 0; drive < 2; drive++) { 3778 drvp = &chp->ch_drive[drive]; 3779 /* If no drive, skip */ 3780 if ((drvp->drive_flags & DRIVE) == 0) 3781 continue; 3782 /* add timing values, setup DMA if needed */ 3783 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 3784 (drvp->drive_flags & DRIVE_UDMA) == 0) 3785 goto pio; 3786 3787 if (drvp->drive_flags & DRIVE_UDMA) { 3788 /* use Ultra/DMA */ 3789 drvp->drive_flags &= ~DRIVE_DMA; 3790 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3791 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 3792 if (drvp->UDMA_mode > 2) 3793 drvp->UDMA_mode = 2; 3794 } 3795 switch (sc->sis_type) { 3796 case SIS_TYPE_66: 3797 case SIS_TYPE_100OLD: 3798 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 3799 SIS_TIM66_UDMA_TIME_OFF(drive); 3800 break; 3801 case SIS_TYPE_100NEW: 3802 sis_tim |= 3803 sis_udma100new_tim[drvp->UDMA_mode] << 3804 SIS_TIM100_UDMA_TIME_OFF(drive); 3805 case SIS_TYPE_133OLD: 3806 sis_tim |= 3807 sis_udma133old_tim[drvp->UDMA_mode] << 3808 SIS_TIM100_UDMA_TIME_OFF(drive); 3809 break; 3810 default: 3811 printf("unknown SiS IDE type %d\n", 3812 sc->sis_type); 3813 } 3814 } else { 3815 /* 3816 * use Multiword DMA 3817 * Timings will be used for both PIO and DMA, 3818 * so adjust DMA mode if needed 3819 */ 3820 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 3821 drvp->PIO_mode = drvp->DMA_mode + 2; 3822 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 3823 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 3824 drvp->PIO_mode - 2 : 0; 3825 if (drvp->DMA_mode == 0) 3826 drvp->PIO_mode = 0; 3827 } 3828 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3829 pio: switch (sc->sis_type) { 3830 case SIS_TYPE_NOUDMA: 3831 case SIS_TYPE_66: 3832 case SIS_TYPE_100OLD: 3833 sis_tim |= sis_pio_act[drvp->PIO_mode] << 3834 SIS_TIM66_ACT_OFF(drive); 3835 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 3836 SIS_TIM66_REC_OFF(drive); 3837 break; 3838 case SIS_TYPE_100NEW: 3839 case SIS_TYPE_133OLD: 3840 sis_tim |= sis_pio_act[drvp->PIO_mode] << 3841 SIS_TIM100_ACT_OFF(drive); 3842 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 3843 SIS_TIM100_REC_OFF(drive); 3844 break; 3845 default: 3846 printf("unknown SiS IDE type %d\n", 3847 sc->sis_type); 3848 } 3849 } 3850 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 3851 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 3852 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 3853 if (idedma_ctl != 0) { 3854 /* Add software bits in status register */ 3855 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3856 IDEDMA_CTL(chp->channel), idedma_ctl); 3857 } 3858 pciide_print_modes(cp); 3859 } 3860 3861 void 3862 natsemi_chip_map(sc, pa) 3863 struct pciide_softc *sc; 3864 struct pci_attach_args *pa; 3865 { 3866 struct pciide_channel *cp; 3867 int channel; 3868 pcireg_t interface, ctl; 3869 bus_size_t cmdsize, ctlsize; 3870 3871 if (pciide_chipen(sc, pa) == 0) 3872 return; 3873 3874 printf(": DMA"); 3875 pciide_mapreg_dma(sc, pa); 3876 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3877 3878 if (sc->sc_dma_ok) { 3879 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3880 sc->sc_wdcdev.irqack = natsemi_irqack; 3881 } 3882 3883 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 3884 3885 /* 3886 * Mask off interrupts from both channels, appropriate channel(s) 3887 * will be unmasked later. 3888 */ 3889 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 3890 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 3891 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 3892 3893 sc->sc_wdcdev.PIO_cap = 4; 3894 sc->sc_wdcdev.DMA_cap = 2; 3895 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 3896 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3897 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3898 3899 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 3900 PCI_CLASS_REG)); 3901 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 3902 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3903 3904 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 3905 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 3906 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 3907 ctl &= ~NATSEMI_CTRL1_INTAMASK; 3908 else 3909 ctl |= NATSEMI_CTRL1_INTAMASK; 3910 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 3911 3912 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3913 cp = &sc->pciide_channels[channel]; 3914 if (pciide_chansetup(sc, channel, interface) == 0) 3915 continue; 3916 3917 pciide_map_compat_intr(pa, cp, channel, interface); 3918 if (cp->hw_ok == 0) 3919 continue; 3920 3921 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3922 natsemi_pci_intr); 3923 if (cp->hw_ok == 0) { 3924 pciide_unmap_compat_intr(pa, cp, channel, interface); 3925 continue; 3926 } 3927 natsemi_setup_channel(&cp->wdc_channel); 3928 } 3929 } 3930 3931 void 3932 natsemi_setup_channel(chp) 3933 struct channel_softc *chp; 3934 { 3935 struct ata_drive_datas *drvp; 3936 int drive, ndrives = 0; 3937 u_int32_t idedma_ctl = 0; 3938 struct pciide_channel *cp = (struct pciide_channel*)chp; 3939 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3940 u_int8_t tim; 3941 3942 /* setup DMA if needed */ 3943 pciide_channel_dma_setup(cp); 3944 3945 for (drive = 0; drive < 2; drive++) { 3946 drvp = &chp->ch_drive[drive]; 3947 /* If no drive, skip */ 3948 if ((drvp->drive_flags & DRIVE) == 0) 3949 continue; 3950 3951 ndrives++; 3952 /* add timing values, setup DMA if needed */ 3953 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 3954 tim = natsemi_pio_pulse[drvp->PIO_mode] | 3955 (natsemi_pio_recover[drvp->PIO_mode] << 4); 3956 } else { 3957 /* 3958 * use Multiword DMA 3959 * Timings will be used for both PIO and DMA, 3960 * so adjust DMA mode if needed 3961 */ 3962 if (drvp->PIO_mode >= 3 && 3963 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3964 drvp->DMA_mode = drvp->PIO_mode - 2; 3965 } 3966 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3967 tim = natsemi_dma_pulse[drvp->DMA_mode] | 3968 (natsemi_dma_recover[drvp->DMA_mode] << 4); 3969 } 3970 3971 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3972 NATSEMI_RTREG(chp->channel, drive), tim); 3973 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3974 NATSEMI_WTREG(chp->channel, drive), tim); 3975 } 3976 if (idedma_ctl != 0) { 3977 /* Add software bits in status register */ 3978 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3979 IDEDMA_CTL(chp->channel), idedma_ctl); 3980 } 3981 if (ndrives > 0) { 3982 /* Unmask the channel if at least one drive is found */ 3983 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 3984 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 3985 ~(NATSEMI_CHMASK(chp->channel))); 3986 } 3987 3988 pciide_print_modes(cp); 3989 3990 /* Go ahead and ack interrupts generated during probe. */ 3991 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3992 IDEDMA_CTL(chp->channel), 3993 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3994 IDEDMA_CTL(chp->channel))); 3995 } 3996 3997 void 3998 natsemi_irqack(chp) 3999 struct channel_softc *chp; 4000 { 4001 struct pciide_channel *cp = (struct pciide_channel*)chp; 4002 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4003 u_int8_t clr; 4004 4005 /* The "clear" bits are in the wrong register *sigh* */ 4006 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4007 IDEDMA_CMD(chp->channel)); 4008 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4009 IDEDMA_CTL(chp->channel)) & 4010 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 4011 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4012 IDEDMA_CMD(chp->channel), clr); 4013 } 4014 4015 int 4016 natsemi_pci_intr(arg) 4017 void *arg; 4018 { 4019 struct pciide_softc *sc = arg; 4020 struct pciide_channel *cp; 4021 struct channel_softc *wdc_cp; 4022 int i, rv, crv; 4023 u_int8_t ide_dmactl, msk; 4024 4025 rv = 0; 4026 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 4027 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4028 cp = &sc->pciide_channels[i]; 4029 wdc_cp = &cp->wdc_channel; 4030 4031 /* If a compat channel skip. */ 4032 if (cp->compat) 4033 continue; 4034 4035 /* If this channel is masked, skip it. */ 4036 if (msk & NATSEMI_CHMASK(i)) 4037 continue; 4038 4039 /* Get intr status */ 4040 ide_dmactl = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4041 IDEDMA_CTL(i)); 4042 4043 if (ide_dmactl & IDEDMA_CTL_ERR) 4044 printf("%s:%d: error intr\n", 4045 sc->sc_wdcdev.sc_dev.dv_xname, i); 4046 4047 if (ide_dmactl & IDEDMA_CTL_INTR) { 4048 crv = wdcintr(wdc_cp); 4049 if (crv == 0) 4050 printf("%s:%d: bogus intr\n", 4051 sc->sc_wdcdev.sc_dev.dv_xname, i); 4052 else 4053 rv = 1; 4054 } 4055 } 4056 return (rv); 4057 } 4058 4059 void 4060 acer_chip_map(sc, pa) 4061 struct pciide_softc *sc; 4062 struct pci_attach_args *pa; 4063 { 4064 struct pciide_channel *cp; 4065 int channel; 4066 pcireg_t cr, interface; 4067 bus_size_t cmdsize, ctlsize; 4068 pcireg_t rev = PCI_REVISION(pa->pa_class); 4069 4070 if (pciide_chipen(sc, pa) == 0) 4071 return; 4072 4073 printf(": DMA"); 4074 pciide_mapreg_dma(sc, pa); 4075 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4076 WDC_CAPABILITY_MODE; 4077 4078 if (sc->sc_dma_ok) { 4079 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 4080 if (rev >= 0x20) { 4081 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 4082 if (rev >= 0xC4) 4083 sc->sc_wdcdev.UDMA_cap = 5; 4084 else if (rev >= 0xC2) 4085 sc->sc_wdcdev.UDMA_cap = 4; 4086 else 4087 sc->sc_wdcdev.UDMA_cap = 2; 4088 } 4089 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4090 sc->sc_wdcdev.irqack = pciide_irqack; 4091 } 4092 4093 sc->sc_wdcdev.PIO_cap = 4; 4094 sc->sc_wdcdev.DMA_cap = 2; 4095 sc->sc_wdcdev.set_modes = acer_setup_channel; 4096 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4097 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4098 4099 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 4100 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 4101 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 4102 4103 /* Enable "microsoft register bits" R/W. */ 4104 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 4105 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 4106 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 4107 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 4108 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 4109 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 4110 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 4111 ~ACER_CHANSTATUSREGS_RO); 4112 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 4113 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 4114 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 4115 /* Don't use cr, re-read the real register content instead */ 4116 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 4117 PCI_CLASS_REG)); 4118 4119 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4120 4121 /* From linux: enable "Cable Detection" */ 4122 if (rev >= 0xC2) 4123 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 4124 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 4125 | ACER_0x4B_CDETECT); 4126 4127 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4128 cp = &sc->pciide_channels[channel]; 4129 if (pciide_chansetup(sc, channel, interface) == 0) 4130 continue; 4131 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 4132 printf("%s: %s ignored (disabled)\n", 4133 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4134 continue; 4135 } 4136 pciide_map_compat_intr(pa, cp, channel, interface); 4137 if (cp->hw_ok == 0) 4138 continue; 4139 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4140 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 4141 if (cp->hw_ok == 0) { 4142 pciide_unmap_compat_intr(pa, cp, channel, interface); 4143 continue; 4144 } 4145 if (pciide_chan_candisable(cp)) { 4146 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 4147 pci_conf_write(sc->sc_pc, sc->sc_tag, 4148 PCI_CLASS_REG, cr); 4149 } 4150 if (cp->hw_ok == 0) { 4151 pciide_unmap_compat_intr(pa, cp, channel, interface); 4152 continue; 4153 } 4154 acer_setup_channel(&cp->wdc_channel); 4155 } 4156 } 4157 4158 void 4159 acer_setup_channel(chp) 4160 struct channel_softc *chp; 4161 { 4162 struct ata_drive_datas *drvp; 4163 int drive; 4164 u_int32_t acer_fifo_udma; 4165 u_int32_t idedma_ctl; 4166 struct pciide_channel *cp = (struct pciide_channel*)chp; 4167 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4168 4169 idedma_ctl = 0; 4170 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 4171 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 4172 acer_fifo_udma), DEBUG_PROBE); 4173 /* setup DMA if needed */ 4174 pciide_channel_dma_setup(cp); 4175 4176 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 4177 DRIVE_UDMA) { /* check 80 pins cable */ 4178 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 4179 ACER_0x4A_80PIN(chp->channel)) { 4180 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 4181 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 4182 DEBUG_PROBE); 4183 if (chp->ch_drive[0].UDMA_mode > 2) 4184 chp->ch_drive[0].UDMA_mode = 2; 4185 if (chp->ch_drive[1].UDMA_mode > 2) 4186 chp->ch_drive[1].UDMA_mode = 2; 4187 } 4188 } 4189 4190 for (drive = 0; drive < 2; drive++) { 4191 drvp = &chp->ch_drive[drive]; 4192 /* If no drive, skip */ 4193 if ((drvp->drive_flags & DRIVE) == 0) 4194 continue; 4195 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 4196 "channel %d drive %d 0x%x\n", chp->channel, drive, 4197 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4198 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 4199 /* clear FIFO/DMA mode */ 4200 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 4201 ACER_UDMA_EN(chp->channel, drive) | 4202 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 4203 4204 /* add timing values, setup DMA if needed */ 4205 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 4206 (drvp->drive_flags & DRIVE_UDMA) == 0) { 4207 acer_fifo_udma |= 4208 ACER_FTH_OPL(chp->channel, drive, 0x1); 4209 goto pio; 4210 } 4211 4212 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 4213 if (drvp->drive_flags & DRIVE_UDMA) { 4214 /* use Ultra/DMA */ 4215 drvp->drive_flags &= ~DRIVE_DMA; 4216 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 4217 acer_fifo_udma |= 4218 ACER_UDMA_TIM(chp->channel, drive, 4219 acer_udma[drvp->UDMA_mode]); 4220 /* XXX disable if one drive < UDMA3 ? */ 4221 if (drvp->UDMA_mode >= 3) { 4222 pciide_pci_write(sc->sc_pc, sc->sc_tag, 4223 ACER_0x4B, 4224 pciide_pci_read(sc->sc_pc, sc->sc_tag, 4225 ACER_0x4B) | ACER_0x4B_UDMA66); 4226 } 4227 } else { 4228 /* 4229 * use Multiword DMA 4230 * Timings will be used for both PIO and DMA, 4231 * so adjust DMA mode if needed 4232 */ 4233 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 4234 drvp->PIO_mode = drvp->DMA_mode + 2; 4235 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 4236 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 4237 drvp->PIO_mode - 2 : 0; 4238 if (drvp->DMA_mode == 0) 4239 drvp->PIO_mode = 0; 4240 } 4241 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4242 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 4243 ACER_IDETIM(chp->channel, drive), 4244 acer_pio[drvp->PIO_mode]); 4245 } 4246 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 4247 acer_fifo_udma), DEBUG_PROBE); 4248 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 4249 if (idedma_ctl != 0) { 4250 /* Add software bits in status register */ 4251 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4252 IDEDMA_CTL(chp->channel), idedma_ctl); 4253 } 4254 pciide_print_modes(cp); 4255 } 4256 4257 int 4258 acer_pci_intr(arg) 4259 void *arg; 4260 { 4261 struct pciide_softc *sc = arg; 4262 struct pciide_channel *cp; 4263 struct channel_softc *wdc_cp; 4264 int i, rv, crv; 4265 u_int32_t chids; 4266 4267 rv = 0; 4268 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 4269 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4270 cp = &sc->pciide_channels[i]; 4271 wdc_cp = &cp->wdc_channel; 4272 /* If a compat channel skip. */ 4273 if (cp->compat) 4274 continue; 4275 if (chids & ACER_CHIDS_INT(i)) { 4276 crv = wdcintr(wdc_cp); 4277 if (crv == 0) 4278 printf("%s:%d: bogus intr\n", 4279 sc->sc_wdcdev.sc_dev.dv_xname, i); 4280 else 4281 rv = 1; 4282 } 4283 } 4284 return rv; 4285 } 4286 4287 void 4288 hpt_chip_map(sc, pa) 4289 struct pciide_softc *sc; 4290 struct pci_attach_args *pa; 4291 { 4292 struct pciide_channel *cp; 4293 int i, compatchan, revision; 4294 pcireg_t interface; 4295 bus_size_t cmdsize, ctlsize; 4296 4297 if (pciide_chipen(sc, pa) == 0) 4298 return; 4299 revision = sc->sc_rev = PCI_REVISION(pa->pa_class); 4300 4301 /* 4302 * when the chip is in native mode it identifies itself as a 4303 * 'misc mass storage'. Fake interface in this case. 4304 */ 4305 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4306 interface = PCI_INTERFACE(pa->pa_class); 4307 } else { 4308 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4309 PCIIDE_INTERFACE_PCI(0); 4310 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4311 (revision == HPT370_REV || revision == HPT370A_REV || 4312 revision == HPT372_REV)) || 4313 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4314 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4315 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4316 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 4317 interface |= PCIIDE_INTERFACE_PCI(1); 4318 } 4319 4320 printf(": DMA"); 4321 pciide_mapreg_dma(sc, pa); 4322 printf("\n"); 4323 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4324 WDC_CAPABILITY_MODE; 4325 if (sc->sc_dma_ok) { 4326 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4327 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4328 sc->sc_wdcdev.irqack = pciide_irqack; 4329 } 4330 sc->sc_wdcdev.PIO_cap = 4; 4331 sc->sc_wdcdev.DMA_cap = 2; 4332 4333 sc->sc_wdcdev.set_modes = hpt_setup_channel; 4334 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4335 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4336 revision == HPT366_REV) { 4337 sc->sc_wdcdev.UDMA_cap = 4; 4338 /* 4339 * The 366 has 2 PCI IDE functions, one for primary and one 4340 * for secondary. So we need to call pciide_mapregs_compat() 4341 * with the real channel 4342 */ 4343 if (pa->pa_function == 0) { 4344 compatchan = 0; 4345 } else if (pa->pa_function == 1) { 4346 compatchan = 1; 4347 } else { 4348 printf("%s: unexpected PCI function %d\n", 4349 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 4350 return; 4351 } 4352 sc->sc_wdcdev.nchannels = 1; 4353 } else { 4354 sc->sc_wdcdev.nchannels = 2; 4355 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4356 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4357 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4358 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 4359 sc->sc_wdcdev.UDMA_cap = 6; 4360 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 4361 if (revision == HPT372_REV) 4362 sc->sc_wdcdev.UDMA_cap = 6; 4363 else 4364 sc->sc_wdcdev.UDMA_cap = 5; 4365 } 4366 } 4367 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4368 cp = &sc->pciide_channels[i]; 4369 if (sc->sc_wdcdev.nchannels > 1) { 4370 compatchan = i; 4371 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 4372 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 4373 printf("%s: %s ignored (disabled)\n", 4374 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4375 continue; 4376 } 4377 } 4378 if (pciide_chansetup(sc, i, interface) == 0) 4379 continue; 4380 if (interface & PCIIDE_INTERFACE_PCI(i)) { 4381 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 4382 &ctlsize, hpt_pci_intr); 4383 } else { 4384 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 4385 &cmdsize, &ctlsize); 4386 } 4387 if (cp->hw_ok == 0) 4388 return; 4389 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4390 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4391 wdcattach(&cp->wdc_channel); 4392 hpt_setup_channel(&cp->wdc_channel); 4393 } 4394 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4395 (revision == HPT370_REV || revision == HPT370A_REV || 4396 revision == HPT372_REV)) || 4397 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4398 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4399 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4400 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 4401 /* 4402 * Turn off fast interrupts 4403 */ 4404 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 4405 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 4406 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 4407 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 4408 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 4409 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 4410 4411 /* 4412 * HPT370 and highter has a bit to disable interrupts, 4413 * make sure to clear it 4414 */ 4415 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 4416 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 4417 ~HPT_CSEL_IRQDIS); 4418 } 4419 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 4420 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 4421 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 4422 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 4423 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 4424 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 4425 revision == HPT372_REV)) 4426 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 4427 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 4428 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 4429 4430 return; 4431 } 4432 4433 void 4434 hpt_setup_channel(chp) 4435 struct channel_softc *chp; 4436 { 4437 struct ata_drive_datas *drvp; 4438 int drive; 4439 int cable; 4440 u_int32_t before, after; 4441 u_int32_t idedma_ctl; 4442 struct pciide_channel *cp = (struct pciide_channel*)chp; 4443 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4444 int revision = sc->sc_rev; 4445 u_int32_t *tim_pio, *tim_dma, *tim_udma; 4446 4447 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 4448 4449 /* setup DMA if needed */ 4450 pciide_channel_dma_setup(cp); 4451 4452 idedma_ctl = 0; 4453 4454 switch (sc->sc_pp->ide_product) { 4455 case PCI_PRODUCT_TRIONES_HPT366: 4456 if (revision == HPT370_REV || 4457 revision == HPT370A_REV) { 4458 tim_pio = hpt370_pio; 4459 tim_dma = hpt370_dma; 4460 tim_udma = hpt370_udma; 4461 } else if (revision == HPT372_REV) { 4462 tim_pio = hpt372_pio; 4463 tim_dma = hpt372_dma; 4464 tim_udma = hpt372_udma; 4465 } else { 4466 tim_pio = hpt366_pio; 4467 tim_dma = hpt366_dma; 4468 tim_udma = hpt366_udma; 4469 } 4470 break; 4471 case PCI_PRODUCT_TRIONES_HPT372A: 4472 case PCI_PRODUCT_TRIONES_HPT302: 4473 case PCI_PRODUCT_TRIONES_HPT371: 4474 tim_pio = hpt372_pio; 4475 tim_dma = hpt372_dma; 4476 tim_udma = hpt372_udma; 4477 break; 4478 case PCI_PRODUCT_TRIONES_HPT374: 4479 tim_pio = hpt374_pio; 4480 tim_dma = hpt374_dma; 4481 tim_udma = hpt374_udma; 4482 break; 4483 default: 4484 printf("%s: no known timing values\n", 4485 sc->sc_wdcdev.sc_dev.dv_xname); 4486 goto end; 4487 } 4488 4489 /* Per drive settings */ 4490 for (drive = 0; drive < 2; drive++) { 4491 drvp = &chp->ch_drive[drive]; 4492 /* If no drive, skip */ 4493 if ((drvp->drive_flags & DRIVE) == 0) 4494 continue; 4495 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 4496 HPT_IDETIM(chp->channel, drive)); 4497 4498 /* add timing values, setup DMA if needed */ 4499 if (drvp->drive_flags & DRIVE_UDMA) { 4500 /* use Ultra/DMA */ 4501 drvp->drive_flags &= ~DRIVE_DMA; 4502 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 4503 drvp->UDMA_mode > 2) { 4504 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 4505 "cable not detected\n", drvp->drive_name, 4506 sc->sc_wdcdev.sc_dev.dv_xname, 4507 chp->channel, drive), DEBUG_PROBE); 4508 drvp->UDMA_mode = 2; 4509 } 4510 after = tim_udma[drvp->UDMA_mode]; 4511 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4512 } else if (drvp->drive_flags & DRIVE_DMA) { 4513 /* 4514 * use Multiword DMA. 4515 * Timings will be used for both PIO and DMA, so adjust 4516 * DMA mode if needed 4517 */ 4518 if (drvp->PIO_mode >= 3 && 4519 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 4520 drvp->DMA_mode = drvp->PIO_mode - 2; 4521 } 4522 after = tim_dma[drvp->DMA_mode]; 4523 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4524 } else { 4525 /* PIO only */ 4526 after = tim_pio[drvp->PIO_mode]; 4527 } 4528 pci_conf_write(sc->sc_pc, sc->sc_tag, 4529 HPT_IDETIM(chp->channel, drive), after); 4530 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 4531 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 4532 after, before), DEBUG_PROBE); 4533 } 4534 end: 4535 if (idedma_ctl != 0) { 4536 /* Add software bits in status register */ 4537 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4538 IDEDMA_CTL(chp->channel), idedma_ctl); 4539 } 4540 pciide_print_modes(cp); 4541 } 4542 4543 int 4544 hpt_pci_intr(arg) 4545 void *arg; 4546 { 4547 struct pciide_softc *sc = arg; 4548 struct pciide_channel *cp; 4549 struct channel_softc *wdc_cp; 4550 int rv = 0; 4551 int dmastat, i, crv; 4552 4553 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4554 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4555 IDEDMA_CTL(i)); 4556 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 4557 IDEDMA_CTL_INTR) 4558 continue; 4559 cp = &sc->pciide_channels[i]; 4560 wdc_cp = &cp->wdc_channel; 4561 crv = wdcintr(wdc_cp); 4562 if (crv == 0) { 4563 printf("%s:%d: bogus intr\n", 4564 sc->sc_wdcdev.sc_dev.dv_xname, i); 4565 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4566 IDEDMA_CTL(i), dmastat); 4567 } else 4568 rv = 1; 4569 } 4570 return rv; 4571 } 4572 4573 /* Macros to test product */ 4574 #define PDC_IS_262(sc) \ 4575 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 4576 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 4577 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 4578 #define PDC_IS_265(sc) \ 4579 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 4580 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 4581 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 4582 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 4583 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 4584 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 4585 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 4586 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 4587 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277 || \ 4588 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20376) 4589 #define PDC_IS_268(sc) \ 4590 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 4591 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 4592 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 4593 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 4594 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 4595 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 4596 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277 || \ 4597 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20376) 4598 #define PDC_IS_269(sc) \ 4599 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 4600 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 4601 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 4602 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 4603 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277 || \ 4604 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20376) 4605 4606 static __inline u_int8_t 4607 pdc268_config_read(struct channel_softc *chp, int index) 4608 { 4609 struct pciide_channel *cp = (struct pciide_channel *)chp; 4610 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4611 int channel = chp->channel; 4612 4613 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4614 PDC268_INDEX(channel), index); 4615 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4616 PDC268_DATA(channel))); 4617 } 4618 4619 static __inline void 4620 pdc268_config_write(struct channel_softc *chp, int index, u_int8_t value) 4621 { 4622 struct pciide_channel *cp = (struct pciide_channel *)chp; 4623 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4624 int channel = chp->channel; 4625 4626 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4627 PDC268_INDEX(channel), index); 4628 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4629 PDC268_DATA(channel), value); 4630 } 4631 4632 void 4633 pdc202xx_chip_map(sc, pa) 4634 struct pciide_softc *sc; 4635 struct pci_attach_args *pa; 4636 { 4637 struct pciide_channel *cp; 4638 int channel; 4639 pcireg_t interface, st, mode; 4640 bus_size_t cmdsize, ctlsize; 4641 4642 if (!PDC_IS_268(sc)) { 4643 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 4644 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 4645 st), DEBUG_PROBE); 4646 } 4647 if (pciide_chipen(sc, pa) == 0) 4648 return; 4649 4650 /* turn off RAID mode */ 4651 if (!PDC_IS_268(sc)) 4652 st &= ~PDC2xx_STATE_IDERAID; 4653 4654 /* 4655 * can't rely on the PCI_CLASS_REG content if the chip was in raid 4656 * mode. We have to fake interface 4657 */ 4658 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 4659 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 4660 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4661 4662 printf(": DMA"); 4663 pciide_mapreg_dma(sc, pa); 4664 4665 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4666 WDC_CAPABILITY_MODE; 4667 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 4668 PDC_IS_262(sc)) 4669 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 4670 if (sc->sc_dma_ok) { 4671 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4672 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4673 sc->sc_wdcdev.irqack = pciide_irqack; 4674 } 4675 sc->sc_wdcdev.PIO_cap = 4; 4676 sc->sc_wdcdev.DMA_cap = 2; 4677 if (PDC_IS_269(sc)) 4678 sc->sc_wdcdev.UDMA_cap = 6; 4679 else if (PDC_IS_265(sc)) 4680 sc->sc_wdcdev.UDMA_cap = 5; 4681 else if (PDC_IS_262(sc)) 4682 sc->sc_wdcdev.UDMA_cap = 4; 4683 else 4684 sc->sc_wdcdev.UDMA_cap = 2; 4685 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 4686 pdc20268_setup_channel : pdc202xx_setup_channel; 4687 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4688 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4689 4690 if (PDC_IS_262(sc)) { 4691 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 4692 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 4693 } 4694 4695 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 4696 if (!PDC_IS_268(sc)) { 4697 /* setup failsafe defaults */ 4698 mode = 0; 4699 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 4700 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 4701 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 4702 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 4703 for (channel = 0; 4704 channel < sc->sc_wdcdev.nchannels; 4705 channel++) { 4706 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 4707 "drive 0 initial timings 0x%x, now 0x%x\n", 4708 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 4709 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 4710 DEBUG_PROBE); 4711 pci_conf_write(sc->sc_pc, sc->sc_tag, 4712 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 4713 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 4714 "drive 1 initial timings 0x%x, now 0x%x\n", 4715 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 4716 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 4717 pci_conf_write(sc->sc_pc, sc->sc_tag, 4718 PDC2xx_TIM(channel, 1), mode); 4719 } 4720 4721 mode = PDC2xx_SCR_DMA; 4722 if (PDC_IS_262(sc)) { 4723 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 4724 } else { 4725 /* the BIOS set it up this way */ 4726 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 4727 } 4728 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 4729 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 4730 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 4731 "now 0x%x\n", 4732 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4733 PDC2xx_SCR), 4734 mode), DEBUG_PROBE); 4735 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4736 PDC2xx_SCR, mode); 4737 4738 /* controller initial state register is OK even without BIOS */ 4739 /* Set DMA mode to IDE DMA compatibility */ 4740 mode = 4741 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 4742 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 4743 DEBUG_PROBE); 4744 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 4745 mode | 0x1); 4746 mode = 4747 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 4748 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 4749 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 4750 mode | 0x1); 4751 } 4752 4753 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4754 cp = &sc->pciide_channels[channel]; 4755 if (pciide_chansetup(sc, channel, interface) == 0) 4756 continue; 4757 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 4758 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 4759 printf("%s: %s ignored (disabled)\n", 4760 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4761 continue; 4762 } 4763 pciide_map_compat_intr(pa, cp, channel, interface); 4764 if (cp->hw_ok == 0) 4765 continue; 4766 if (PDC_IS_265(sc)) 4767 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4768 pdc20265_pci_intr); 4769 else 4770 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4771 pdc202xx_pci_intr); 4772 if (cp->hw_ok == 0) { 4773 pciide_unmap_compat_intr(pa, cp, channel, interface); 4774 continue; 4775 } 4776 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 4777 st &= ~(PDC_IS_262(sc) ? 4778 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 4779 pciide_unmap_compat_intr(pa, cp, channel, interface); 4780 } 4781 if (PDC_IS_268(sc)) 4782 pdc20268_setup_channel(&cp->wdc_channel); 4783 else 4784 pdc202xx_setup_channel(&cp->wdc_channel); 4785 } 4786 if (!PDC_IS_268(sc)) { 4787 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 4788 "0x%x\n", st), DEBUG_PROBE); 4789 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 4790 } 4791 return; 4792 } 4793 4794 void 4795 pdc202xx_setup_channel(chp) 4796 struct channel_softc *chp; 4797 { 4798 struct ata_drive_datas *drvp; 4799 int drive; 4800 pcireg_t mode, st; 4801 u_int32_t idedma_ctl, scr, atapi; 4802 struct pciide_channel *cp = (struct pciide_channel*)chp; 4803 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4804 int channel = chp->channel; 4805 4806 /* setup DMA if needed */ 4807 pciide_channel_dma_setup(cp); 4808 4809 idedma_ctl = 0; 4810 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 4811 sc->sc_wdcdev.sc_dev.dv_xname, 4812 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 4813 DEBUG_PROBE); 4814 4815 /* Per channel settings */ 4816 if (PDC_IS_262(sc)) { 4817 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4818 PDC262_U66); 4819 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 4820 /* Check cable */ 4821 if ((st & PDC262_STATE_80P(channel)) != 0 && 4822 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 4823 chp->ch_drive[0].UDMA_mode > 2) || 4824 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 4825 chp->ch_drive[1].UDMA_mode > 2))) { 4826 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 4827 sc->sc_wdcdev.sc_dev.dv_xname, channel), 4828 DEBUG_PROBE); 4829 if (chp->ch_drive[0].UDMA_mode > 2) 4830 chp->ch_drive[0].UDMA_mode = 2; 4831 if (chp->ch_drive[1].UDMA_mode > 2) 4832 chp->ch_drive[1].UDMA_mode = 2; 4833 } 4834 /* Trim UDMA mode */ 4835 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 4836 chp->ch_drive[0].UDMA_mode <= 2) || 4837 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 4838 chp->ch_drive[1].UDMA_mode <= 2)) { 4839 if (chp->ch_drive[0].UDMA_mode > 2) 4840 chp->ch_drive[0].UDMA_mode = 2; 4841 if (chp->ch_drive[1].UDMA_mode > 2) 4842 chp->ch_drive[1].UDMA_mode = 2; 4843 } 4844 /* Set U66 if needed */ 4845 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 4846 chp->ch_drive[0].UDMA_mode > 2) || 4847 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 4848 chp->ch_drive[1].UDMA_mode > 2)) 4849 scr |= PDC262_U66_EN(channel); 4850 else 4851 scr &= ~PDC262_U66_EN(channel); 4852 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4853 PDC262_U66, scr); 4854 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 4855 sc->sc_wdcdev.sc_dev.dv_xname, channel, 4856 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4857 PDC262_ATAPI(channel))), DEBUG_PROBE); 4858 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 4859 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 4860 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 4861 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 4862 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 4863 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 4864 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 4865 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 4866 atapi = 0; 4867 else 4868 atapi = PDC262_ATAPI_UDMA; 4869 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 4870 PDC262_ATAPI(channel), atapi); 4871 } 4872 } 4873 for (drive = 0; drive < 2; drive++) { 4874 drvp = &chp->ch_drive[drive]; 4875 /* If no drive, skip */ 4876 if ((drvp->drive_flags & DRIVE) == 0) 4877 continue; 4878 mode = 0; 4879 if (drvp->drive_flags & DRIVE_UDMA) { 4880 /* use Ultra/DMA */ 4881 drvp->drive_flags &= ~DRIVE_DMA; 4882 mode = PDC2xx_TIM_SET_MB(mode, 4883 pdc2xx_udma_mb[drvp->UDMA_mode]); 4884 mode = PDC2xx_TIM_SET_MC(mode, 4885 pdc2xx_udma_mc[drvp->UDMA_mode]); 4886 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4887 } else if (drvp->drive_flags & DRIVE_DMA) { 4888 mode = PDC2xx_TIM_SET_MB(mode, 4889 pdc2xx_dma_mb[drvp->DMA_mode]); 4890 mode = PDC2xx_TIM_SET_MC(mode, 4891 pdc2xx_dma_mc[drvp->DMA_mode]); 4892 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4893 } else { 4894 mode = PDC2xx_TIM_SET_MB(mode, 4895 pdc2xx_dma_mb[0]); 4896 mode = PDC2xx_TIM_SET_MC(mode, 4897 pdc2xx_dma_mc[0]); 4898 } 4899 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 4900 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 4901 if (drvp->drive_flags & DRIVE_ATA) 4902 mode |= PDC2xx_TIM_PRE; 4903 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 4904 if (drvp->PIO_mode >= 3) { 4905 mode |= PDC2xx_TIM_IORDY; 4906 if (drive == 0) 4907 mode |= PDC2xx_TIM_IORDYp; 4908 } 4909 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 4910 "timings 0x%x\n", 4911 sc->sc_wdcdev.sc_dev.dv_xname, 4912 chp->channel, drive, mode), DEBUG_PROBE); 4913 pci_conf_write(sc->sc_pc, sc->sc_tag, 4914 PDC2xx_TIM(chp->channel, drive), mode); 4915 } 4916 if (idedma_ctl != 0) { 4917 /* Add software bits in status register */ 4918 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4919 IDEDMA_CTL(channel), idedma_ctl); 4920 } 4921 pciide_print_modes(cp); 4922 } 4923 4924 void 4925 pdc20268_setup_channel(chp) 4926 struct channel_softc *chp; 4927 { 4928 struct ata_drive_datas *drvp; 4929 int drive, cable; 4930 u_int32_t idedma_ctl; 4931 struct pciide_channel *cp = (struct pciide_channel*)chp; 4932 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4933 int channel = chp->channel; 4934 4935 /* check 80 pins cable */ 4936 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 4937 4938 /* setup DMA if needed */ 4939 pciide_channel_dma_setup(cp); 4940 4941 idedma_ctl = 0; 4942 4943 for (drive = 0; drive < 2; drive++) { 4944 drvp = &chp->ch_drive[drive]; 4945 /* If no drive, skip */ 4946 if ((drvp->drive_flags & DRIVE) == 0) 4947 continue; 4948 if (drvp->drive_flags & DRIVE_UDMA) { 4949 /* use Ultra/DMA */ 4950 drvp->drive_flags &= ~DRIVE_DMA; 4951 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4952 if (cable && drvp->UDMA_mode > 2) { 4953 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 4954 "cable not detected\n", drvp->drive_name, 4955 sc->sc_wdcdev.sc_dev.dv_xname, 4956 channel, drive), DEBUG_PROBE); 4957 drvp->UDMA_mode = 2; 4958 } 4959 } else if (drvp->drive_flags & DRIVE_DMA) { 4960 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4961 } 4962 } 4963 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 4964 if (idedma_ctl != 0) { 4965 /* Add software bits in status register */ 4966 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4967 IDEDMA_CTL(channel), idedma_ctl); 4968 } 4969 pciide_print_modes(cp); 4970 } 4971 4972 int 4973 pdc202xx_pci_intr(arg) 4974 void *arg; 4975 { 4976 struct pciide_softc *sc = arg; 4977 struct pciide_channel *cp; 4978 struct channel_softc *wdc_cp; 4979 int i, rv, crv; 4980 u_int32_t scr; 4981 4982 rv = 0; 4983 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 4984 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 4985 cp = &sc->pciide_channels[i]; 4986 wdc_cp = &cp->wdc_channel; 4987 /* If a compat channel skip. */ 4988 if (cp->compat) 4989 continue; 4990 if (scr & PDC2xx_SCR_INT(i)) { 4991 crv = wdcintr(wdc_cp); 4992 if (crv == 0) 4993 printf("%s:%d: bogus intr (reg 0x%x)\n", 4994 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 4995 else 4996 rv = 1; 4997 } 4998 } 4999 return rv; 5000 } 5001 5002 int 5003 pdc20265_pci_intr(arg) 5004 void *arg; 5005 { 5006 struct pciide_softc *sc = arg; 5007 struct pciide_channel *cp; 5008 struct channel_softc *wdc_cp; 5009 int i, rv, crv; 5010 u_int32_t dmastat; 5011 5012 rv = 0; 5013 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5014 cp = &sc->pciide_channels[i]; 5015 wdc_cp = &cp->wdc_channel; 5016 /* If a compat channel skip. */ 5017 if (cp->compat) 5018 continue; 5019 5020 /* 5021 * In case of shared IRQ check that the interrupt 5022 * was actually generated by this channel. 5023 * Only check the channel that is enabled. 5024 */ 5025 if (cp->hw_ok && PDC_IS_268(sc)) { 5026 if ((pdc268_config_read(wdc_cp, 5027 0x0b) & PDC268_INTR) == 0) 5028 continue; 5029 } 5030 5031 /* 5032 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 5033 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 5034 * So use it instead (requires 2 reg reads instead of 1, 5035 * but we can't do it another way). 5036 */ 5037 dmastat = bus_space_read_1(sc->sc_dma_iot, 5038 sc->sc_dma_ioh, IDEDMA_CTL(i)); 5039 if ((dmastat & IDEDMA_CTL_INTR) == 0) 5040 continue; 5041 5042 crv = wdcintr(wdc_cp); 5043 if (crv == 0) 5044 printf("%s:%d: bogus intr\n", 5045 sc->sc_wdcdev.sc_dev.dv_xname, i); 5046 else 5047 rv = 1; 5048 } 5049 return rv; 5050 } 5051 5052 void 5053 pdc20262_dma_start(void *v, int channel, int drive) 5054 { 5055 struct pciide_softc *sc = v; 5056 struct pciide_dma_maps *dma_maps = 5057 &sc->pciide_channels[channel].dma_maps[drive]; 5058 u_int8_t clock; 5059 u_int32_t count; 5060 5061 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 5062 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5063 PDC262_U66); 5064 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5065 PDC262_U66, clock | PDC262_U66_EN(channel)); 5066 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 5067 count |= dma_maps->dma_flags & WDC_DMA_READ ? 5068 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 5069 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5070 PDC262_ATAPI(channel), count); 5071 } 5072 5073 pciide_dma_start(v, channel, drive); 5074 } 5075 5076 int 5077 pdc20262_dma_finish(void *v, int channel, int drive) 5078 { 5079 struct pciide_softc *sc = v; 5080 struct pciide_dma_maps *dma_maps = 5081 &sc->pciide_channels[channel].dma_maps[drive]; 5082 u_int8_t clock; 5083 5084 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 5085 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5086 PDC262_U66); 5087 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5088 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 5089 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 5090 PDC262_ATAPI(channel), 0); 5091 } 5092 5093 return (pciide_dma_finish(v, channel, drive)); 5094 } 5095 5096 /* 5097 * Inline functions for accessing the timing registers of the 5098 * OPTi controller. 5099 * 5100 * These *MUST* disable interrupts as they need atomic access to 5101 * certain magic registers. Failure to adhere to this *will* 5102 * break things in subtle ways if the wdc registers are accessed 5103 * by an interrupt routine while this magic sequence is executing. 5104 */ 5105 static __inline__ u_int8_t 5106 opti_read_config(struct channel_softc *chp, int reg) 5107 { 5108 u_int8_t rv; 5109 int s = splhigh(); 5110 5111 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 5112 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5113 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5114 5115 /* Followed by an 8-bit write of 0x3 to register #2 */ 5116 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 5117 5118 /* Now we can read the required register */ 5119 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 5120 5121 /* Restore the real registers */ 5122 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 5123 5124 splx(s); 5125 5126 return rv; 5127 } 5128 5129 static __inline__ void 5130 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 5131 { 5132 int s = splhigh(); 5133 5134 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 5135 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5136 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 5137 5138 /* Followed by an 8-bit write of 0x3 to register #2 */ 5139 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 5140 5141 /* Now we can write the required register */ 5142 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 5143 5144 /* Restore the real registers */ 5145 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 5146 5147 splx(s); 5148 } 5149 5150 void 5151 opti_chip_map(sc, pa) 5152 struct pciide_softc *sc; 5153 struct pci_attach_args *pa; 5154 { 5155 struct pciide_channel *cp; 5156 bus_size_t cmdsize, ctlsize; 5157 pcireg_t interface; 5158 u_int8_t init_ctrl; 5159 int channel; 5160 5161 if (pciide_chipen(sc, pa) == 0) 5162 return; 5163 printf(": DMA"); 5164 /* 5165 * XXXSCW: 5166 * There seem to be a couple of buggy revisions/implementations 5167 * of the OPTi pciide chipset. This kludge seems to fix one of 5168 * the reported problems (NetBSD PR/11644) but still fails for the 5169 * other (NetBSD PR/13151), although the latter may be due to other 5170 * issues too... 5171 */ 5172 if (PCI_REVISION(pa->pa_class) <= 0x12) { 5173 printf(" (disabled)"); 5174 sc->sc_dma_ok = 0; 5175 sc->sc_wdcdev.cap = 0; 5176 } else { 5177 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 5178 pciide_mapreg_dma(sc, pa); 5179 } 5180 5181 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 5182 sc->sc_wdcdev.PIO_cap = 4; 5183 if (sc->sc_dma_ok) { 5184 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5185 sc->sc_wdcdev.irqack = pciide_irqack; 5186 sc->sc_wdcdev.DMA_cap = 2; 5187 } 5188 sc->sc_wdcdev.set_modes = opti_setup_channel; 5189 5190 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5191 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5192 5193 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 5194 OPTI_REG_INIT_CONTROL); 5195 5196 interface = PCI_INTERFACE(pa->pa_class); 5197 5198 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5199 5200 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5201 cp = &sc->pciide_channels[channel]; 5202 if (pciide_chansetup(sc, channel, interface) == 0) 5203 continue; 5204 if (channel == 1 && 5205 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 5206 printf("%s: %s ignored (disabled)\n", 5207 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5208 continue; 5209 } 5210 pciide_map_compat_intr(pa, cp, channel, interface); 5211 if (cp->hw_ok == 0) 5212 continue; 5213 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5214 pciide_pci_intr); 5215 if (cp->hw_ok == 0) { 5216 pciide_unmap_compat_intr(pa, cp, channel, interface); 5217 continue; 5218 } 5219 opti_setup_channel(&cp->wdc_channel); 5220 } 5221 } 5222 5223 void 5224 opti_setup_channel(chp) 5225 struct channel_softc *chp; 5226 { 5227 struct ata_drive_datas *drvp; 5228 struct pciide_channel *cp = (struct pciide_channel*)chp; 5229 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5230 int drive,spd; 5231 int mode[2]; 5232 u_int8_t rv, mr; 5233 5234 /* 5235 * The `Delay' and `Address Setup Time' fields of the 5236 * Miscellaneous Register are always zero initially. 5237 */ 5238 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 5239 mr &= ~(OPTI_MISC_DELAY_MASK | 5240 OPTI_MISC_ADDR_SETUP_MASK | 5241 OPTI_MISC_INDEX_MASK); 5242 5243 /* Prime the control register before setting timing values */ 5244 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 5245 5246 /* Determine the clockrate of the PCIbus the chip is attached to */ 5247 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 5248 spd &= OPTI_STRAP_PCI_SPEED_MASK; 5249 5250 /* setup DMA if needed */ 5251 pciide_channel_dma_setup(cp); 5252 5253 for (drive = 0; drive < 2; drive++) { 5254 drvp = &chp->ch_drive[drive]; 5255 /* If no drive, skip */ 5256 if ((drvp->drive_flags & DRIVE) == 0) { 5257 mode[drive] = -1; 5258 continue; 5259 } 5260 5261 if ((drvp->drive_flags & DRIVE_DMA)) { 5262 /* 5263 * Timings will be used for both PIO and DMA, 5264 * so adjust DMA mode if needed 5265 */ 5266 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5267 drvp->PIO_mode = drvp->DMA_mode + 2; 5268 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5269 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5270 drvp->PIO_mode - 2 : 0; 5271 if (drvp->DMA_mode == 0) 5272 drvp->PIO_mode = 0; 5273 5274 mode[drive] = drvp->DMA_mode + 5; 5275 } else 5276 mode[drive] = drvp->PIO_mode; 5277 5278 if (drive && mode[0] >= 0 && 5279 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 5280 /* 5281 * Can't have two drives using different values 5282 * for `Address Setup Time'. 5283 * Slow down the faster drive to compensate. 5284 */ 5285 int d = (opti_tim_as[spd][mode[0]] > 5286 opti_tim_as[spd][mode[1]]) ? 0 : 1; 5287 5288 mode[d] = mode[1-d]; 5289 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 5290 chp->ch_drive[d].DMA_mode = 0; 5291 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 5292 } 5293 } 5294 5295 for (drive = 0; drive < 2; drive++) { 5296 int m; 5297 if ((m = mode[drive]) < 0) 5298 continue; 5299 5300 /* Set the Address Setup Time and select appropriate index */ 5301 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 5302 rv |= OPTI_MISC_INDEX(drive); 5303 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 5304 5305 /* Set the pulse width and recovery timing parameters */ 5306 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 5307 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 5308 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 5309 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 5310 5311 /* Set the Enhanced Mode register appropriately */ 5312 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 5313 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 5314 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 5315 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 5316 } 5317 5318 /* Finally, enable the timings */ 5319 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 5320 5321 pciide_print_modes(cp); 5322 } 5323 5324 void 5325 serverworks_chip_map(sc, pa) 5326 struct pciide_softc *sc; 5327 struct pci_attach_args *pa; 5328 { 5329 struct pciide_channel *cp; 5330 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5331 pcitag_t pcib_tag; 5332 int channel; 5333 bus_size_t cmdsize, ctlsize; 5334 5335 if (pciide_chipen(sc, pa) == 0) 5336 return; 5337 5338 printf(": DMA"); 5339 pciide_mapreg_dma(sc, pa); 5340 printf("\n"); 5341 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5342 WDC_CAPABILITY_MODE; 5343 5344 if (sc->sc_dma_ok) { 5345 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5346 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5347 sc->sc_wdcdev.irqack = pciide_irqack; 5348 } 5349 sc->sc_wdcdev.PIO_cap = 4; 5350 sc->sc_wdcdev.DMA_cap = 2; 5351 switch (sc->sc_pp->ide_product) { 5352 case PCI_PRODUCT_RCC_OSB4_IDE: 5353 sc->sc_wdcdev.UDMA_cap = 2; 5354 break; 5355 case PCI_PRODUCT_RCC_CSB5_IDE: 5356 if (PCI_REVISION(pa->pa_class) < 0x92) 5357 sc->sc_wdcdev.UDMA_cap = 4; 5358 else 5359 sc->sc_wdcdev.UDMA_cap = 5; 5360 break; 5361 case PCI_PRODUCT_RCC_CSB6_IDE: 5362 sc->sc_wdcdev.UDMA_cap = 5; 5363 break; 5364 } 5365 5366 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 5367 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5368 sc->sc_wdcdev.nchannels = 2; 5369 5370 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5371 cp = &sc->pciide_channels[channel]; 5372 if (pciide_chansetup(sc, channel, interface) == 0) 5373 continue; 5374 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5375 serverworks_pci_intr); 5376 if (cp->hw_ok == 0) 5377 return; 5378 pciide_map_compat_intr(pa, cp, channel, interface); 5379 if (cp->hw_ok == 0) 5380 return; 5381 serverworks_setup_channel(&cp->wdc_channel); 5382 } 5383 5384 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 5385 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 5386 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 5387 } 5388 5389 void 5390 serverworks_setup_channel(chp) 5391 struct channel_softc *chp; 5392 { 5393 struct ata_drive_datas *drvp; 5394 struct pciide_channel *cp = (struct pciide_channel*)chp; 5395 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5396 int channel = chp->channel; 5397 int drive, unit; 5398 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 5399 u_int32_t idedma_ctl; 5400 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 5401 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 5402 5403 /* setup DMA if needed */ 5404 pciide_channel_dma_setup(cp); 5405 5406 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 5407 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 5408 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 5409 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 5410 5411 pio_time &= ~(0xffff << (16 * channel)); 5412 dma_time &= ~(0xffff << (16 * channel)); 5413 pio_mode &= ~(0xff << (8 * channel + 16)); 5414 udma_mode &= ~(0xff << (8 * channel + 16)); 5415 udma_mode &= ~(3 << (2 * channel)); 5416 5417 idedma_ctl = 0; 5418 5419 /* Per drive settings */ 5420 for (drive = 0; drive < 2; drive++) { 5421 drvp = &chp->ch_drive[drive]; 5422 /* If no drive, skip */ 5423 if ((drvp->drive_flags & DRIVE) == 0) 5424 continue; 5425 unit = drive + 2 * channel; 5426 /* add timing values, setup DMA if needed */ 5427 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 5428 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 5429 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 5430 (drvp->drive_flags & DRIVE_UDMA)) { 5431 /* use Ultra/DMA, check for 80-pin cable */ 5432 if (drvp->UDMA_mode > 2 && 5433 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 5434 PCI_SUBSYS_ID_REG)) & 5435 (1 << (14 + channel))) == 0) { 5436 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5437 "cable not detected\n", drvp->drive_name, 5438 sc->sc_wdcdev.sc_dev.dv_xname, 5439 channel, drive), DEBUG_PROBE); 5440 drvp->UDMA_mode = 2; 5441 } 5442 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 5443 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 5444 udma_mode |= 1 << unit; 5445 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5446 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 5447 (drvp->drive_flags & DRIVE_DMA)) { 5448 /* use Multiword DMA */ 5449 drvp->drive_flags &= ~DRIVE_UDMA; 5450 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 5451 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5452 } else { 5453 /* PIO only */ 5454 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 5455 } 5456 } 5457 5458 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 5459 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 5460 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 5461 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 5462 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 5463 5464 if (idedma_ctl != 0) { 5465 /* Add software bits in status register */ 5466 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5467 IDEDMA_CTL(channel), idedma_ctl); 5468 } 5469 pciide_print_modes(cp); 5470 } 5471 5472 int 5473 serverworks_pci_intr(arg) 5474 void *arg; 5475 { 5476 struct pciide_softc *sc = arg; 5477 struct pciide_channel *cp; 5478 struct channel_softc *wdc_cp; 5479 int rv = 0; 5480 int dmastat, i, crv; 5481 5482 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5483 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5484 IDEDMA_CTL(i)); 5485 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 5486 IDEDMA_CTL_INTR) 5487 continue; 5488 cp = &sc->pciide_channels[i]; 5489 wdc_cp = &cp->wdc_channel; 5490 crv = wdcintr(wdc_cp); 5491 if (crv == 0) { 5492 printf("%s:%d: bogus intr\n", 5493 sc->sc_wdcdev.sc_dev.dv_xname, i); 5494 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5495 IDEDMA_CTL(i), dmastat); 5496 } else 5497 rv = 1; 5498 } 5499 return rv; 5500 } 5501 5502 5503 #define ACARD_IS_850(sc) \ 5504 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 5505 5506 void 5507 acard_chip_map(sc, pa) 5508 struct pciide_softc *sc; 5509 struct pci_attach_args *pa; 5510 { 5511 struct pciide_channel *cp; 5512 int i; 5513 pcireg_t interface; 5514 bus_size_t cmdsize, ctlsize; 5515 5516 if (pciide_chipen(sc, pa) == 0) 5517 return; 5518 5519 /* 5520 * when the chip is in native mode it identifies itself as a 5521 * 'misc mass storage'. Fake interface in this case. 5522 */ 5523 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5524 interface = PCI_INTERFACE(pa->pa_class); 5525 } else { 5526 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5527 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 5528 } 5529 5530 printf(": DMA"); 5531 pciide_mapreg_dma(sc, pa); 5532 printf("\n"); 5533 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5534 WDC_CAPABILITY_MODE; 5535 5536 if (sc->sc_dma_ok) { 5537 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5538 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5539 sc->sc_wdcdev.irqack = pciide_irqack; 5540 } 5541 sc->sc_wdcdev.PIO_cap = 4; 5542 sc->sc_wdcdev.DMA_cap = 2; 5543 sc->sc_wdcdev.UDMA_cap = ACARD_IS_850(sc) ? 2 : 4; 5544 5545 sc->sc_wdcdev.set_modes = acard_setup_channel; 5546 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5547 sc->sc_wdcdev.nchannels = 2; 5548 5549 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5550 cp = &sc->pciide_channels[i]; 5551 if (pciide_chansetup(sc, i, interface) == 0) 5552 continue; 5553 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5554 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5555 &ctlsize, pciide_pci_intr); 5556 } else { 5557 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 5558 &cmdsize, &ctlsize); 5559 } 5560 if (cp->hw_ok == 0) 5561 return; 5562 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5563 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5564 wdcattach(&cp->wdc_channel); 5565 acard_setup_channel(&cp->wdc_channel); 5566 } 5567 if (!ACARD_IS_850(sc)) { 5568 u_int32_t reg; 5569 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 5570 reg &= ~ATP860_CTRL_INT; 5571 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 5572 } 5573 } 5574 5575 void 5576 acard_setup_channel(chp) 5577 struct channel_softc *chp; 5578 { 5579 struct ata_drive_datas *drvp; 5580 struct pciide_channel *cp = (struct pciide_channel*)chp; 5581 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5582 int channel = chp->channel; 5583 int drive; 5584 u_int32_t idetime, udma_mode; 5585 u_int32_t idedma_ctl; 5586 5587 /* setup DMA if needed */ 5588 pciide_channel_dma_setup(cp); 5589 5590 if (ACARD_IS_850(sc)) { 5591 idetime = 0; 5592 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 5593 udma_mode &= ~ATP850_UDMA_MASK(channel); 5594 } else { 5595 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 5596 idetime &= ~ATP860_SETTIME_MASK(channel); 5597 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 5598 udma_mode &= ~ATP860_UDMA_MASK(channel); 5599 } 5600 5601 idedma_ctl = 0; 5602 5603 /* Per drive settings */ 5604 for (drive = 0; drive < 2; drive++) { 5605 drvp = &chp->ch_drive[drive]; 5606 /* If no drive, skip */ 5607 if ((drvp->drive_flags & DRIVE) == 0) 5608 continue; 5609 /* add timing values, setup DMA if needed */ 5610 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 5611 (drvp->drive_flags & DRIVE_UDMA)) { 5612 /* use Ultra/DMA */ 5613 if (ACARD_IS_850(sc)) { 5614 idetime |= ATP850_SETTIME(drive, 5615 acard_act_udma[drvp->UDMA_mode], 5616 acard_rec_udma[drvp->UDMA_mode]); 5617 udma_mode |= ATP850_UDMA_MODE(channel, drive, 5618 acard_udma_conf[drvp->UDMA_mode]); 5619 } else { 5620 idetime |= ATP860_SETTIME(channel, drive, 5621 acard_act_udma[drvp->UDMA_mode], 5622 acard_rec_udma[drvp->UDMA_mode]); 5623 udma_mode |= ATP860_UDMA_MODE(channel, drive, 5624 acard_udma_conf[drvp->UDMA_mode]); 5625 } 5626 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5627 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 5628 (drvp->drive_flags & DRIVE_DMA)) { 5629 /* use Multiword DMA */ 5630 drvp->drive_flags &= ~DRIVE_UDMA; 5631 if (ACARD_IS_850(sc)) { 5632 idetime |= ATP850_SETTIME(drive, 5633 acard_act_dma[drvp->DMA_mode], 5634 acard_rec_dma[drvp->DMA_mode]); 5635 } else { 5636 idetime |= ATP860_SETTIME(channel, drive, 5637 acard_act_dma[drvp->DMA_mode], 5638 acard_rec_dma[drvp->DMA_mode]); 5639 } 5640 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5641 } else { 5642 /* PIO only */ 5643 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 5644 if (ACARD_IS_850(sc)) { 5645 idetime |= ATP850_SETTIME(drive, 5646 acard_act_pio[drvp->PIO_mode], 5647 acard_rec_pio[drvp->PIO_mode]); 5648 } else { 5649 idetime |= ATP860_SETTIME(channel, drive, 5650 acard_act_pio[drvp->PIO_mode], 5651 acard_rec_pio[drvp->PIO_mode]); 5652 } 5653 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 5654 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 5655 | ATP8x0_CTRL_EN(channel)); 5656 } 5657 } 5658 5659 if (idedma_ctl != 0) { 5660 /* Add software bits in status register */ 5661 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5662 IDEDMA_CTL(channel), idedma_ctl); 5663 } 5664 pciide_print_modes(cp); 5665 5666 if (ACARD_IS_850(sc)) { 5667 pci_conf_write(sc->sc_pc, sc->sc_tag, 5668 ATP850_IDETIME(channel), idetime); 5669 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 5670 } else { 5671 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 5672 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 5673 } 5674 } 5675 5676 int 5677 acard_pci_intr(arg) 5678 void *arg; 5679 { 5680 struct pciide_softc *sc = arg; 5681 struct pciide_channel *cp; 5682 struct channel_softc *wdc_cp; 5683 int rv = 0; 5684 int dmastat, i, crv; 5685 5686 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5687 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5688 IDEDMA_CTL(i)); 5689 if ((dmastat & IDEDMA_CTL_INTR) == 0) 5690 continue; 5691 cp = &sc->pciide_channels[i]; 5692 wdc_cp = &cp->wdc_channel; 5693 if ((wdc_cp->ch_flags & WDCF_IRQ_WAIT) == 0) { 5694 (void)wdcintr(wdc_cp); 5695 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5696 IDEDMA_CTL(i), dmastat); 5697 continue; 5698 } 5699 crv = wdcintr(wdc_cp); 5700 if (crv == 0) 5701 printf("%s:%d: bogus intr\n", 5702 sc->sc_wdcdev.sc_dev.dv_xname, i); 5703 else if (crv == 1) 5704 rv = 1; 5705 else if (rv == 0) 5706 rv = crv; 5707 } 5708 return rv; 5709 } 5710 5711 void 5712 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5713 { 5714 struct pciide_channel *cp; 5715 int channel; 5716 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5717 bus_size_t cmdsize, ctlsize; 5718 u_int32_t conf; 5719 5720 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 5721 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 5722 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 5723 5724 if (pciide_chipen(sc, pa) == 0) 5725 return; 5726 5727 printf(": DMA"); 5728 pciide_mapreg_dma(sc, pa); 5729 5730 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5731 WDC_CAPABILITY_MODE; 5732 if (sc->sc_dma_ok) { 5733 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5734 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5735 sc->sc_wdcdev.irqack = pciide_irqack; 5736 } 5737 sc->sc_wdcdev.PIO_cap = 4; 5738 sc->sc_wdcdev.DMA_cap = 2; 5739 switch (PCI_PRODUCT(pa->pa_id)) { 5740 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 5741 sc->sc_wdcdev.UDMA_cap = 5; 5742 break; 5743 case PCI_PRODUCT_NVIDIA_NFORCE2_IDE: 5744 sc->sc_wdcdev.UDMA_cap = 6; 5745 break; 5746 default: 5747 sc->sc_wdcdev.UDMA_cap = 0; 5748 } 5749 sc->sc_wdcdev.set_modes = nforce_setup_channel; 5750 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5751 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5752 5753 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5754 5755 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5756 cp = &sc->pciide_channels[channel]; 5757 5758 if (pciide_chansetup(sc, channel, interface) == 0) 5759 continue; 5760 5761 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 5762 printf("%s: %s ignored (disabled)\n", 5763 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5764 continue; 5765 } 5766 5767 pciide_map_compat_intr(pa, cp, channel, interface); 5768 if (cp->hw_ok == 0) 5769 continue; 5770 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5771 nforce_pci_intr); 5772 if (cp->hw_ok == 0) { 5773 pciide_unmap_compat_intr(pa, cp, channel, interface); 5774 continue; 5775 } 5776 5777 if (pciide_chan_candisable(cp)) { 5778 conf &= ~NFORCE_CHAN_EN(channel); 5779 pciide_unmap_compat_intr(pa, cp, channel, interface); 5780 continue; 5781 } 5782 5783 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5784 } 5785 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 5786 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 5787 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 5788 } 5789 5790 void 5791 nforce_setup_channel(struct channel_softc *chp) 5792 { 5793 struct ata_drive_datas *drvp; 5794 int drive, mode; 5795 u_int32_t idedma_ctl; 5796 struct pciide_channel *cp = (struct pciide_channel*)chp; 5797 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5798 int channel = chp->channel; 5799 u_int32_t conf, piodmatim, piotim, udmatim; 5800 5801 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 5802 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 5803 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 5804 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 5805 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 5806 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 5807 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 5808 5809 /* Setup DMA if needed */ 5810 pciide_channel_dma_setup(cp); 5811 5812 /* Clear all bits for this channel */ 5813 idedma_ctl = 0; 5814 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 5815 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 5816 5817 /* Per channel settings */ 5818 for (drive = 0; drive < 2; drive++) { 5819 drvp = &chp->ch_drive[drive]; 5820 5821 /* If no drive, skip */ 5822 if ((drvp->drive_flags & DRIVE) == 0) 5823 continue; 5824 5825 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5826 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5827 /* Setup UltraDMA mode */ 5828 drvp->drive_flags &= ~DRIVE_DMA; 5829 5830 /* Check cable */ 5831 if ((conf & NFORCE_CONF_CABLE(channel, drive)) == 0 && 5832 drvp->UDMA_mode > 2) { 5833 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 5834 "cable not detected\n", drvp->drive_name, 5835 sc->sc_wdcdev.sc_dev.dv_xname, 5836 channel, drive), DEBUG_PROBE); 5837 drvp->UDMA_mode = 2; 5838 } 5839 5840 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 5841 nforce_udma[drvp->UDMA_mode]) | 5842 NFORCE_UDMA_EN(channel, drive) | 5843 NFORCE_UDMA_ENM(channel, drive); 5844 5845 mode = drvp->PIO_mode; 5846 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5847 (drvp->drive_flags & DRIVE_DMA) != 0) { 5848 /* Setup multiword DMA mode */ 5849 drvp->drive_flags &= ~DRIVE_UDMA; 5850 5851 /* mode = min(pio, dma + 2) */ 5852 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5853 mode = drvp->PIO_mode; 5854 else 5855 mode = drvp->DMA_mode + 2; 5856 } else { 5857 goto pio; 5858 } 5859 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5860 5861 pio: 5862 /* Setup PIO mode */ 5863 if (mode <= 2) { 5864 drvp->DMA_mode = 0; 5865 drvp->PIO_mode = 0; 5866 mode = 0; 5867 } else { 5868 drvp->PIO_mode = mode; 5869 drvp->DMA_mode = mode - 2; 5870 } 5871 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 5872 nforce_pio[mode]); 5873 } 5874 5875 if (idedma_ctl != 0) { 5876 /* Add software bits in status register */ 5877 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5878 IDEDMA_CTL(channel), idedma_ctl); 5879 } 5880 5881 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 5882 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 5883 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 5884 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 5885 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 5886 5887 pciide_print_modes(cp); 5888 } 5889 5890 int 5891 nforce_pci_intr(void *arg) 5892 { 5893 struct pciide_softc *sc = arg; 5894 struct pciide_channel *cp; 5895 struct channel_softc *wdc_cp; 5896 int i, rv, crv; 5897 u_int32_t dmastat; 5898 5899 rv = 0; 5900 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5901 cp = &sc->pciide_channels[i]; 5902 wdc_cp = &cp->wdc_channel; 5903 5904 /* Skip compat channel */ 5905 if (cp->compat) 5906 continue; 5907 5908 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5909 IDEDMA_CTL(i)); 5910 if ((dmastat & IDEDMA_CTL_INTR) == 0) 5911 continue; 5912 5913 crv = wdcintr(wdc_cp); 5914 if (crv == 0) 5915 printf("%s:%d: bogus intr\n", 5916 sc->sc_wdcdev.sc_dev.dv_xname, i); 5917 else 5918 rv = 1; 5919 } 5920 return rv; 5921 } 5922 5923 void 5924 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5925 { 5926 struct pciide_channel *cp; 5927 bus_size_t cmdsize, ctlsize; 5928 pcireg_t interface; 5929 int channel; 5930 5931 if (pciide_chipen(sc, pa) == 0) 5932 return; 5933 5934 printf("%s: DMA", 5935 sc->sc_wdcdev.sc_dev.dv_xname); 5936 #ifndef PCIIDE_I31244_ENABLEDMA 5937 if (PCI_REVISION(pa->pa_class) == 0) { 5938 printf(" disabled due to rev. 0"); 5939 sc->sc_dma_ok = 0; 5940 } else 5941 #endif 5942 pciide_mapreg_dma(sc, pa); 5943 printf("\n"); 5944 5945 /* 5946 * XXX Configure LEDs to show activity. 5947 */ 5948 5949 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5950 WDC_CAPABILITY_MODE; 5951 sc->sc_wdcdev.PIO_cap = 4; 5952 if (sc->sc_dma_ok) { 5953 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5954 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5955 sc->sc_wdcdev.irqack = pciide_irqack; 5956 sc->sc_wdcdev.DMA_cap = 2; 5957 sc->sc_wdcdev.UDMA_cap = 6; 5958 } 5959 sc->sc_wdcdev.set_modes = sata_setup_channel; 5960 5961 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5962 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5963 5964 interface = PCI_INTERFACE(pa->pa_class); 5965 5966 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5967 cp = &sc->pciide_channels[channel]; 5968 if (pciide_chansetup(sc, channel, interface) == 0) 5969 continue; 5970 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5971 pciide_pci_intr); 5972 if (cp->hw_ok == 0) 5973 continue; 5974 pciide_map_compat_intr(pa, cp, channel, interface); 5975 sata_setup_channel(&cp->wdc_channel); 5976 } 5977 } 5978