1 /* $OpenBSD: pciide.c,v 1.339 2012/04/22 14:22:28 miod Exp $ */ 2 /* $NetBSD: pciide.c,v 1.127 2001/08/03 01:31:08 tsutsui Exp $ */ 3 4 /* 5 * Copyright (c) 1999, 2000, 2001 Manuel Bouyer. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 17 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 18 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 19 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 21 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 25 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 * 27 */ 28 29 /* 30 * Copyright (c) 1996, 1998 Christopher G. Demetriou. All rights reserved. 31 * 32 * Redistribution and use in source and binary forms, with or without 33 * modification, are permitted provided that the following conditions 34 * are met: 35 * 1. Redistributions of source code must retain the above copyright 36 * notice, this list of conditions and the following disclaimer. 37 * 2. Redistributions in binary form must reproduce the above copyright 38 * notice, this list of conditions and the following disclaimer in the 39 * documentation and/or other materials provided with the distribution. 40 * 3. All advertising materials mentioning features or use of this software 41 * must display the following acknowledgement: 42 * This product includes software developed by Christopher G. Demetriou 43 * for the NetBSD Project. 44 * 4. The name of the author may not be used to endorse or promote products 45 * derived from this software without specific prior written permission 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * PCI IDE controller driver. 61 * 62 * Author: Christopher G. Demetriou, March 2, 1998 (derived from NetBSD 63 * sys/dev/pci/ppb.c, revision 1.16). 64 * 65 * See "PCI IDE Controller Specification, Revision 1.0 3/4/94" and 66 * "Programming Interface for Bus Master IDE Controller, Revision 1.0 67 * 5/16/94" from the PCI SIG. 68 * 69 */ 70 71 #define DEBUG_DMA 0x01 72 #define DEBUG_XFERS 0x02 73 #define DEBUG_FUNCS 0x08 74 #define DEBUG_PROBE 0x10 75 76 #ifdef WDCDEBUG 77 #ifndef WDCDEBUG_PCIIDE_MASK 78 #define WDCDEBUG_PCIIDE_MASK 0x00 79 #endif 80 int wdcdebug_pciide_mask = WDCDEBUG_PCIIDE_MASK; 81 #define WDCDEBUG_PRINT(args, level) do { \ 82 if ((wdcdebug_pciide_mask & (level)) != 0) \ 83 printf args; \ 84 } while (0) 85 #else 86 #define WDCDEBUG_PRINT(args, level) 87 #endif 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/device.h> 91 #include <sys/malloc.h> 92 93 #include <machine/bus.h> 94 #include <machine/endian.h> 95 96 #include <dev/ata/atavar.h> 97 #include <dev/ata/satareg.h> 98 #include <dev/ic/wdcreg.h> 99 #include <dev/ic/wdcvar.h> 100 101 #include <dev/pci/pcireg.h> 102 #include <dev/pci/pcivar.h> 103 #include <dev/pci/pcidevs.h> 104 105 #include <dev/pci/pciidereg.h> 106 #include <dev/pci/pciidevar.h> 107 #include <dev/pci/pciide_piix_reg.h> 108 #include <dev/pci/pciide_amd_reg.h> 109 #include <dev/pci/pciide_apollo_reg.h> 110 #include <dev/pci/pciide_cmd_reg.h> 111 #include <dev/pci/pciide_sii3112_reg.h> 112 #include <dev/pci/pciide_cy693_reg.h> 113 #include <dev/pci/pciide_sis_reg.h> 114 #include <dev/pci/pciide_acer_reg.h> 115 #include <dev/pci/pciide_pdc202xx_reg.h> 116 #include <dev/pci/pciide_opti_reg.h> 117 #include <dev/pci/pciide_hpt_reg.h> 118 #include <dev/pci/pciide_acard_reg.h> 119 #include <dev/pci/pciide_natsemi_reg.h> 120 #include <dev/pci/pciide_nforce_reg.h> 121 #include <dev/pci/pciide_i31244_reg.h> 122 #include <dev/pci/pciide_ite_reg.h> 123 #include <dev/pci/pciide_ixp_reg.h> 124 #include <dev/pci/pciide_svwsata_reg.h> 125 #include <dev/pci/pciide_jmicron_reg.h> 126 #include <dev/pci/cy82c693var.h> 127 128 /* functions for reading/writing 8-bit PCI registers */ 129 130 u_int8_t pciide_pci_read(pci_chipset_tag_t, pcitag_t, 131 int); 132 void pciide_pci_write(pci_chipset_tag_t, pcitag_t, 133 int, u_int8_t); 134 135 u_int8_t 136 pciide_pci_read(pci_chipset_tag_t pc, pcitag_t pa, int reg) 137 { 138 return (pci_conf_read(pc, pa, (reg & ~0x03)) >> 139 ((reg & 0x03) * 8) & 0xff); 140 } 141 142 void 143 pciide_pci_write(pci_chipset_tag_t pc, pcitag_t pa, int reg, u_int8_t val) 144 { 145 pcireg_t pcival; 146 147 pcival = pci_conf_read(pc, pa, (reg & ~0x03)); 148 pcival &= ~(0xff << ((reg & 0x03) * 8)); 149 pcival |= (val << ((reg & 0x03) * 8)); 150 pci_conf_write(pc, pa, (reg & ~0x03), pcival); 151 } 152 153 void default_chip_map(struct pciide_softc *, struct pci_attach_args *); 154 155 void sata_chip_map(struct pciide_softc *, struct pci_attach_args *); 156 void sata_setup_channel(struct channel_softc *); 157 158 void piix_chip_map(struct pciide_softc *, struct pci_attach_args *); 159 void piixsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 160 void piix_setup_channel(struct channel_softc *); 161 void piix3_4_setup_channel(struct channel_softc *); 162 void piix_timing_debug(struct pciide_softc *); 163 164 u_int32_t piix_setup_idetim_timings(u_int8_t, u_int8_t, u_int8_t); 165 u_int32_t piix_setup_idetim_drvs(struct ata_drive_datas *); 166 u_int32_t piix_setup_sidetim_timings(u_int8_t, u_int8_t, u_int8_t); 167 168 void amd756_chip_map(struct pciide_softc *, struct pci_attach_args *); 169 void amd756_setup_channel(struct channel_softc *); 170 171 void apollo_chip_map(struct pciide_softc *, struct pci_attach_args *); 172 void apollo_setup_channel(struct channel_softc *); 173 174 void cmd_chip_map(struct pciide_softc *, struct pci_attach_args *); 175 void cmd0643_9_chip_map(struct pciide_softc *, struct pci_attach_args *); 176 void cmd0643_9_setup_channel(struct channel_softc *); 177 void cmd680_chip_map(struct pciide_softc *, struct pci_attach_args *); 178 void cmd680_setup_channel(struct channel_softc *); 179 void cmd680_channel_map(struct pci_attach_args *, struct pciide_softc *, int); 180 void cmd_channel_map(struct pci_attach_args *, 181 struct pciide_softc *, int); 182 int cmd_pci_intr(void *); 183 void cmd646_9_irqack(struct channel_softc *); 184 185 void sii_fixup_cacheline(struct pciide_softc *, struct pci_attach_args *); 186 void sii3112_chip_map(struct pciide_softc *, struct pci_attach_args *); 187 void sii3112_setup_channel(struct channel_softc *); 188 void sii3112_drv_probe(struct channel_softc *); 189 void sii3114_chip_map(struct pciide_softc *, struct pci_attach_args *); 190 void sii3114_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 191 int sii3114_chansetup(struct pciide_softc *, int); 192 void sii3114_mapchan(struct pciide_channel *); 193 u_int8_t sii3114_dmacmd_read(struct pciide_softc *, int); 194 void sii3114_dmacmd_write(struct pciide_softc *, int, u_int8_t); 195 u_int8_t sii3114_dmactl_read(struct pciide_softc *, int); 196 void sii3114_dmactl_write(struct pciide_softc *, int, u_int8_t); 197 void sii3114_dmatbl_write(struct pciide_softc *, int, u_int32_t); 198 199 void cy693_chip_map(struct pciide_softc *, struct pci_attach_args *); 200 void cy693_setup_channel(struct channel_softc *); 201 202 void sis_chip_map(struct pciide_softc *, struct pci_attach_args *); 203 void sis_setup_channel(struct channel_softc *); 204 void sis96x_setup_channel(struct channel_softc *); 205 int sis_hostbr_match(struct pci_attach_args *); 206 int sis_south_match(struct pci_attach_args *); 207 208 void natsemi_chip_map(struct pciide_softc *, struct pci_attach_args *); 209 void natsemi_setup_channel(struct channel_softc *); 210 int natsemi_pci_intr(void *); 211 void natsemi_irqack(struct channel_softc *); 212 void ns_scx200_chip_map(struct pciide_softc *, struct pci_attach_args *); 213 void ns_scx200_setup_channel(struct channel_softc *); 214 215 void acer_chip_map(struct pciide_softc *, struct pci_attach_args *); 216 void acer_setup_channel(struct channel_softc *); 217 int acer_pci_intr(void *); 218 int acer_dma_init(void *, int, int, void *, size_t, int); 219 220 void pdc202xx_chip_map(struct pciide_softc *, struct pci_attach_args *); 221 void pdc202xx_setup_channel(struct channel_softc *); 222 void pdc20268_setup_channel(struct channel_softc *); 223 int pdc202xx_pci_intr(void *); 224 int pdc20265_pci_intr(void *); 225 void pdc20262_dma_start(void *, int, int); 226 int pdc20262_dma_finish(void *, int, int, int); 227 228 u_int8_t pdc268_config_read(struct channel_softc *, int); 229 230 void pdcsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 231 void pdc203xx_setup_channel(struct channel_softc *); 232 int pdc203xx_pci_intr(void *); 233 void pdc203xx_irqack(struct channel_softc *); 234 void pdc203xx_dma_start(void *,int ,int); 235 int pdc203xx_dma_finish(void *, int, int, int); 236 int pdc205xx_pci_intr(void *); 237 void pdc205xx_do_reset(struct channel_softc *); 238 void pdc205xx_drv_probe(struct channel_softc *); 239 240 void opti_chip_map(struct pciide_softc *, struct pci_attach_args *); 241 void opti_setup_channel(struct channel_softc *); 242 243 void hpt_chip_map(struct pciide_softc *, struct pci_attach_args *); 244 void hpt_setup_channel(struct channel_softc *); 245 int hpt_pci_intr(void *); 246 247 void acard_chip_map(struct pciide_softc *, struct pci_attach_args *); 248 void acard_setup_channel(struct channel_softc *); 249 250 void serverworks_chip_map(struct pciide_softc *, struct pci_attach_args *); 251 void serverworks_setup_channel(struct channel_softc *); 252 int serverworks_pci_intr(void *); 253 254 void svwsata_chip_map(struct pciide_softc *, struct pci_attach_args *); 255 void svwsata_mapreg_dma(struct pciide_softc *, struct pci_attach_args *); 256 void svwsata_mapchan(struct pciide_channel *); 257 u_int8_t svwsata_dmacmd_read(struct pciide_softc *, int); 258 void svwsata_dmacmd_write(struct pciide_softc *, int, u_int8_t); 259 u_int8_t svwsata_dmactl_read(struct pciide_softc *, int); 260 void svwsata_dmactl_write(struct pciide_softc *, int, u_int8_t); 261 void svwsata_dmatbl_write(struct pciide_softc *, int, u_int32_t); 262 void svwsata_drv_probe(struct channel_softc *); 263 264 void nforce_chip_map(struct pciide_softc *, struct pci_attach_args *); 265 void nforce_setup_channel(struct channel_softc *); 266 int nforce_pci_intr(void *); 267 268 void artisea_chip_map(struct pciide_softc *, struct pci_attach_args *); 269 270 void ite_chip_map(struct pciide_softc *, struct pci_attach_args *); 271 void ite_setup_channel(struct channel_softc *); 272 273 void ixp_chip_map(struct pciide_softc *, struct pci_attach_args *); 274 void ixp_setup_channel(struct channel_softc *); 275 276 void jmicron_chip_map(struct pciide_softc *, struct pci_attach_args *); 277 void jmicron_setup_channel(struct channel_softc *); 278 279 void phison_chip_map(struct pciide_softc *, struct pci_attach_args *); 280 void phison_setup_channel(struct channel_softc *); 281 282 void sch_chip_map(struct pciide_softc *, struct pci_attach_args *); 283 void sch_setup_channel(struct channel_softc *); 284 285 struct pciide_product_desc { 286 u_int32_t ide_product; 287 u_short ide_flags; 288 /* map and setup chip, probe drives */ 289 void (*chip_map)(struct pciide_softc *, struct pci_attach_args *); 290 }; 291 292 /* Flags for ide_flags */ 293 #define IDE_PCI_CLASS_OVERRIDE 0x0001 /* accept even if class != pciide */ 294 #define IDE_16BIT_IOSPACE 0x0002 /* I/O space BARS ignore upper word */ 295 296 /* Default product description for devices not known from this controller */ 297 const struct pciide_product_desc default_product_desc = { 298 0, /* Generic PCI IDE controller */ 299 0, 300 default_chip_map 301 }; 302 303 const struct pciide_product_desc pciide_intel_products[] = { 304 { PCI_PRODUCT_INTEL_31244, /* Intel 31244 SATA */ 305 0, 306 artisea_chip_map 307 }, 308 { PCI_PRODUCT_INTEL_82092AA, /* Intel 82092AA IDE */ 309 0, 310 default_chip_map 311 }, 312 { PCI_PRODUCT_INTEL_82371FB_IDE, /* Intel 82371FB IDE (PIIX) */ 313 0, 314 piix_chip_map 315 }, 316 { PCI_PRODUCT_INTEL_82371FB_ISA, /* Intel 82371FB IDE (PIIX) */ 317 0, 318 piix_chip_map 319 }, 320 { PCI_PRODUCT_INTEL_82372FB_IDE, /* Intel 82372FB IDE (PIIX4) */ 321 0, 322 piix_chip_map 323 }, 324 { PCI_PRODUCT_INTEL_82371SB_IDE, /* Intel 82371SB IDE (PIIX3) */ 325 0, 326 piix_chip_map 327 }, 328 { PCI_PRODUCT_INTEL_82371AB_IDE, /* Intel 82371AB IDE (PIIX4) */ 329 0, 330 piix_chip_map 331 }, 332 { PCI_PRODUCT_INTEL_82371MX, /* Intel 82371MX IDE */ 333 0, 334 piix_chip_map 335 }, 336 { PCI_PRODUCT_INTEL_82440MX_IDE, /* Intel 82440MX IDE */ 337 0, 338 piix_chip_map 339 }, 340 { PCI_PRODUCT_INTEL_82451NX, /* Intel 82451NX (PIIX4) IDE */ 341 0, 342 piix_chip_map 343 }, 344 { PCI_PRODUCT_INTEL_82801AA_IDE, /* Intel 82801AA IDE (ICH) */ 345 0, 346 piix_chip_map 347 }, 348 { PCI_PRODUCT_INTEL_82801AB_IDE, /* Intel 82801AB IDE (ICH0) */ 349 0, 350 piix_chip_map 351 }, 352 { PCI_PRODUCT_INTEL_82801BAM_IDE, /* Intel 82801BAM IDE (ICH2) */ 353 0, 354 piix_chip_map 355 }, 356 { PCI_PRODUCT_INTEL_82801BA_IDE, /* Intel 82801BA IDE (ICH2) */ 357 0, 358 piix_chip_map 359 }, 360 { PCI_PRODUCT_INTEL_82801CAM_IDE, /* Intel 82801CAM IDE (ICH3) */ 361 0, 362 piix_chip_map 363 }, 364 { PCI_PRODUCT_INTEL_82801CA_IDE, /* Intel 82801CA IDE (ICH3) */ 365 0, 366 piix_chip_map 367 }, 368 { PCI_PRODUCT_INTEL_82801DB_IDE, /* Intel 82801DB IDE (ICH4) */ 369 0, 370 piix_chip_map 371 }, 372 { PCI_PRODUCT_INTEL_82801DBL_IDE, /* Intel 82801DBL IDE (ICH4-L) */ 373 0, 374 piix_chip_map 375 }, 376 { PCI_PRODUCT_INTEL_82801DBM_IDE, /* Intel 82801DBM IDE (ICH4-M) */ 377 0, 378 piix_chip_map 379 }, 380 { PCI_PRODUCT_INTEL_82801EB_IDE, /* Intel 82801EB/ER (ICH5/5R) IDE */ 381 0, 382 piix_chip_map 383 }, 384 { PCI_PRODUCT_INTEL_82801EB_SATA, /* Intel 82801EB (ICH5) SATA */ 385 0, 386 piixsata_chip_map 387 }, 388 { PCI_PRODUCT_INTEL_82801ER_SATA, /* Intel 82801ER (ICH5R) SATA */ 389 0, 390 piixsata_chip_map 391 }, 392 { PCI_PRODUCT_INTEL_6300ESB_IDE, /* Intel 6300ESB IDE */ 393 0, 394 piix_chip_map 395 }, 396 { PCI_PRODUCT_INTEL_6300ESB_SATA, /* Intel 6300ESB SATA */ 397 0, 398 piixsata_chip_map 399 }, 400 { PCI_PRODUCT_INTEL_6300ESB_SATA2, /* Intel 6300ESB SATA */ 401 0, 402 piixsata_chip_map 403 }, 404 { PCI_PRODUCT_INTEL_6321ESB_IDE, /* Intel 6321ESB IDE */ 405 0, 406 piix_chip_map 407 }, 408 { PCI_PRODUCT_INTEL_82801FB_IDE, /* Intel 82801FB (ICH6) IDE */ 409 0, 410 piix_chip_map 411 }, 412 { PCI_PRODUCT_INTEL_82801FBM_SATA, /* Intel 82801FBM (ICH6M) SATA */ 413 0, 414 piixsata_chip_map 415 }, 416 { PCI_PRODUCT_INTEL_82801FB_SATA, /* Intel 82801FB (ICH6) SATA */ 417 0, 418 piixsata_chip_map 419 }, 420 { PCI_PRODUCT_INTEL_82801FR_SATA, /* Intel 82801FR (ICH6R) SATA */ 421 0, 422 piixsata_chip_map 423 }, 424 { PCI_PRODUCT_INTEL_82801GB_IDE, /* Intel 82801GB (ICH7) IDE */ 425 0, 426 piix_chip_map 427 }, 428 { PCI_PRODUCT_INTEL_82801GB_SATA, /* Intel 82801GB (ICH7) SATA */ 429 0, 430 piixsata_chip_map 431 }, 432 { PCI_PRODUCT_INTEL_82801GR_AHCI, /* Intel 82801GR (ICH7R) AHCI */ 433 0, 434 piixsata_chip_map 435 }, 436 { PCI_PRODUCT_INTEL_82801GR_RAID, /* Intel 82801GR (ICH7R) RAID */ 437 0, 438 piixsata_chip_map 439 }, 440 { PCI_PRODUCT_INTEL_82801GBM_SATA, /* Intel 82801GBM (ICH7M) SATA */ 441 0, 442 piixsata_chip_map 443 }, 444 { PCI_PRODUCT_INTEL_82801GBM_AHCI, /* Intel 82801GBM (ICH7M) AHCI */ 445 0, 446 piixsata_chip_map 447 }, 448 { PCI_PRODUCT_INTEL_82801GHM_RAID, /* Intel 82801GHM (ICH7M DH) RAID */ 449 0, 450 piixsata_chip_map 451 }, 452 { PCI_PRODUCT_INTEL_82801H_SATA_1, /* Intel 82801H (ICH8) SATA */ 453 0, 454 piixsata_chip_map 455 }, 456 { PCI_PRODUCT_INTEL_82801H_AHCI_6P, /* Intel 82801H (ICH8) AHCI */ 457 0, 458 piixsata_chip_map 459 }, 460 { PCI_PRODUCT_INTEL_82801H_RAID, /* Intel 82801H (ICH8) RAID */ 461 0, 462 piixsata_chip_map 463 }, 464 { PCI_PRODUCT_INTEL_82801H_AHCI_4P, /* Intel 82801H (ICH8) AHCI */ 465 0, 466 piixsata_chip_map 467 }, 468 { PCI_PRODUCT_INTEL_82801H_SATA_2, /* Intel 82801H (ICH8) SATA */ 469 0, 470 piixsata_chip_map 471 }, 472 { PCI_PRODUCT_INTEL_82801HBM_SATA, /* Intel 82801HBM (ICH8M) SATA */ 473 0, 474 piixsata_chip_map 475 }, 476 { PCI_PRODUCT_INTEL_82801HBM_AHCI, /* Intel 82801HBM (ICH8M) AHCI */ 477 0, 478 piixsata_chip_map 479 }, 480 { PCI_PRODUCT_INTEL_82801HBM_RAID, /* Intel 82801HBM (ICH8M) RAID */ 481 0, 482 piixsata_chip_map 483 }, 484 { PCI_PRODUCT_INTEL_82801HBM_IDE, /* Intel 82801HBM (ICH8M) IDE */ 485 0, 486 piix_chip_map 487 }, 488 { PCI_PRODUCT_INTEL_82801I_SATA_1, /* Intel 82801I (ICH9) SATA */ 489 0, 490 piixsata_chip_map 491 }, 492 { PCI_PRODUCT_INTEL_82801I_SATA_2, /* Intel 82801I (ICH9) SATA */ 493 0, 494 piixsata_chip_map 495 }, 496 { PCI_PRODUCT_INTEL_82801I_SATA_3, /* Intel 82801I (ICH9) SATA */ 497 0, 498 piixsata_chip_map 499 }, 500 { PCI_PRODUCT_INTEL_82801I_SATA_4, /* Intel 82801I (ICH9) SATA */ 501 0, 502 piixsata_chip_map 503 }, 504 { PCI_PRODUCT_INTEL_82801I_SATA_5, /* Intel 82801I (ICH9M) SATA */ 505 0, 506 piixsata_chip_map 507 }, 508 { PCI_PRODUCT_INTEL_82801I_SATA_6, /* Intel 82801I (ICH9M) SATA */ 509 0, 510 piixsata_chip_map 511 }, 512 { PCI_PRODUCT_INTEL_82801JD_SATA_1, /* Intel 82801JD (ICH10) SATA */ 513 0, 514 piixsata_chip_map 515 }, 516 { PCI_PRODUCT_INTEL_82801JD_SATA_2, /* Intel 82801JD (ICH10) SATA */ 517 0, 518 piixsata_chip_map 519 }, 520 { PCI_PRODUCT_INTEL_82801JI_SATA_1, /* Intel 82801JI (ICH10) SATA */ 521 0, 522 piixsata_chip_map 523 }, 524 { PCI_PRODUCT_INTEL_82801JI_SATA_2, /* Intel 82801JI (ICH10) SATA */ 525 0, 526 piixsata_chip_map 527 }, 528 { PCI_PRODUCT_INTEL_6321ESB_SATA, /* Intel 6321ESB SATA */ 529 0, 530 piixsata_chip_map 531 }, 532 { PCI_PRODUCT_INTEL_3400_SATA_1, /* Intel 3400 SATA */ 533 0, 534 piixsata_chip_map 535 }, 536 { PCI_PRODUCT_INTEL_3400_SATA_2, /* Intel 3400 SATA */ 537 0, 538 piixsata_chip_map 539 }, 540 { PCI_PRODUCT_INTEL_3400_SATA_3, /* Intel 3400 SATA */ 541 0, 542 piixsata_chip_map 543 }, 544 { PCI_PRODUCT_INTEL_3400_SATA_4, /* Intel 3400 SATA */ 545 0, 546 piixsata_chip_map 547 }, 548 { PCI_PRODUCT_INTEL_3400_SATA_5, /* Intel 3400 SATA */ 549 0, 550 piixsata_chip_map 551 }, 552 { PCI_PRODUCT_INTEL_3400_SATA_6, /* Intel 3400 SATA */ 553 0, 554 piixsata_chip_map 555 }, 556 { PCI_PRODUCT_INTEL_6SERIES_SATA_1, /* Intel 6 Series SATA */ 557 0, 558 piixsata_chip_map 559 }, 560 { PCI_PRODUCT_INTEL_6SERIES_SATA_2, /* Intel 6 Series SATA */ 561 0, 562 piixsata_chip_map 563 }, 564 { PCI_PRODUCT_INTEL_6SERIES_SATA_3, /* Intel 6 Series SATA */ 565 0, 566 piixsata_chip_map 567 }, 568 { PCI_PRODUCT_INTEL_6SERIES_SATA_4, /* Intel 6 Series SATA */ 569 0, 570 piixsata_chip_map 571 }, 572 { PCI_PRODUCT_INTEL_EP80579_SATA, /* Intel EP80579 SATA */ 573 0, 574 piixsata_chip_map 575 }, 576 { PCI_PRODUCT_INTEL_SCH_IDE, /* Intel SCH IDE */ 577 0, 578 sch_chip_map 579 } 580 }; 581 582 const struct pciide_product_desc pciide_amd_products[] = { 583 { PCI_PRODUCT_AMD_PBC756_IDE, /* AMD 756 */ 584 0, 585 amd756_chip_map 586 }, 587 { PCI_PRODUCT_AMD_766_IDE, /* AMD 766 */ 588 0, 589 amd756_chip_map 590 }, 591 { PCI_PRODUCT_AMD_PBC768_IDE, 592 0, 593 amd756_chip_map 594 }, 595 { PCI_PRODUCT_AMD_8111_IDE, 596 0, 597 amd756_chip_map 598 }, 599 { PCI_PRODUCT_AMD_CS5536_IDE, 600 0, 601 amd756_chip_map 602 }, 603 { PCI_PRODUCT_AMD_HUDSON2_IDE, 604 0, 605 ixp_chip_map 606 } 607 }; 608 609 #ifdef notyet 610 const struct pciide_product_desc pciide_opti_products[] = { 611 612 { PCI_PRODUCT_OPTI_82C621, 613 0, 614 opti_chip_map 615 }, 616 { PCI_PRODUCT_OPTI_82C568, 617 0, 618 opti_chip_map 619 }, 620 { PCI_PRODUCT_OPTI_82D568, 621 0, 622 opti_chip_map 623 } 624 }; 625 #endif 626 627 const struct pciide_product_desc pciide_cmd_products[] = { 628 { PCI_PRODUCT_CMDTECH_640, /* CMD Technology PCI0640 */ 629 0, 630 cmd_chip_map 631 }, 632 { PCI_PRODUCT_CMDTECH_643, /* CMD Technology PCI0643 */ 633 0, 634 cmd0643_9_chip_map 635 }, 636 { PCI_PRODUCT_CMDTECH_646, /* CMD Technology PCI0646 */ 637 0, 638 cmd0643_9_chip_map 639 }, 640 { PCI_PRODUCT_CMDTECH_648, /* CMD Technology PCI0648 */ 641 0, 642 cmd0643_9_chip_map 643 }, 644 { PCI_PRODUCT_CMDTECH_649, /* CMD Technology PCI0649 */ 645 0, 646 cmd0643_9_chip_map 647 }, 648 { PCI_PRODUCT_CMDTECH_680, /* CMD Technology PCI0680 */ 649 IDE_PCI_CLASS_OVERRIDE, 650 cmd680_chip_map 651 }, 652 { PCI_PRODUCT_CMDTECH_3112, /* SiI3112 SATA */ 653 0, 654 sii3112_chip_map 655 }, 656 { PCI_PRODUCT_CMDTECH_3512, /* SiI3512 SATA */ 657 0, 658 sii3112_chip_map 659 }, 660 { PCI_PRODUCT_CMDTECH_AAR_1210SA, /* Adaptec AAR-1210SA */ 661 0, 662 sii3112_chip_map 663 }, 664 { PCI_PRODUCT_CMDTECH_3114, /* SiI3114 SATA */ 665 0, 666 sii3114_chip_map 667 } 668 }; 669 670 const struct pciide_product_desc pciide_via_products[] = { 671 { PCI_PRODUCT_VIATECH_VT82C416, /* VIA VT82C416 IDE */ 672 0, 673 apollo_chip_map 674 }, 675 { PCI_PRODUCT_VIATECH_VT82C571, /* VIA VT82C571 IDE */ 676 0, 677 apollo_chip_map 678 }, 679 { PCI_PRODUCT_VIATECH_VT6410, /* VIA VT6410 IDE */ 680 IDE_PCI_CLASS_OVERRIDE, 681 apollo_chip_map 682 }, 683 { PCI_PRODUCT_VIATECH_VT6415, /* VIA VT6415 IDE */ 684 IDE_PCI_CLASS_OVERRIDE, 685 apollo_chip_map 686 }, 687 { PCI_PRODUCT_VIATECH_CX700_IDE, /* VIA CX700 IDE */ 688 0, 689 apollo_chip_map 690 }, 691 { PCI_PRODUCT_VIATECH_VX700_IDE, /* VIA VX700 IDE */ 692 0, 693 apollo_chip_map 694 }, 695 { PCI_PRODUCT_VIATECH_VX855_IDE, /* VIA VX855 IDE */ 696 0, 697 apollo_chip_map 698 }, 699 { PCI_PRODUCT_VIATECH_VX900_IDE, /* VIA VX900 IDE */ 700 0, 701 apollo_chip_map 702 }, 703 { PCI_PRODUCT_VIATECH_VT6420_SATA, /* VIA VT6420 SATA */ 704 0, 705 sata_chip_map 706 }, 707 { PCI_PRODUCT_VIATECH_VT6421_SATA, /* VIA VT6421 SATA */ 708 0, 709 sata_chip_map 710 }, 711 { PCI_PRODUCT_VIATECH_VT8237A_SATA, /* VIA VT8237A SATA */ 712 0, 713 sata_chip_map 714 }, 715 { PCI_PRODUCT_VIATECH_VT8237A_SATA_2, /* VIA VT8237A SATA */ 716 0, 717 sata_chip_map 718 }, 719 { PCI_PRODUCT_VIATECH_VT8237S_SATA, /* VIA VT8237S SATA */ 720 0, 721 sata_chip_map 722 }, 723 { PCI_PRODUCT_VIATECH_VT8251_SATA, /* VIA VT8251 SATA */ 724 0, 725 sata_chip_map 726 } 727 }; 728 729 const struct pciide_product_desc pciide_cypress_products[] = { 730 { PCI_PRODUCT_CONTAQ_82C693, /* Contaq CY82C693 IDE */ 731 IDE_16BIT_IOSPACE, 732 cy693_chip_map 733 } 734 }; 735 736 const struct pciide_product_desc pciide_sis_products[] = { 737 { PCI_PRODUCT_SIS_5513, /* SIS 5513 EIDE */ 738 0, 739 sis_chip_map 740 }, 741 { PCI_PRODUCT_SIS_180, /* SIS 180 SATA */ 742 0, 743 sata_chip_map 744 }, 745 { PCI_PRODUCT_SIS_181, /* SIS 181 SATA */ 746 0, 747 sata_chip_map 748 }, 749 { PCI_PRODUCT_SIS_182, /* SIS 182 SATA */ 750 0, 751 sata_chip_map 752 }, 753 { PCI_PRODUCT_SIS_1183, /* SIS 1183 SATA */ 754 0, 755 sata_chip_map 756 } 757 }; 758 759 /* 760 * The National/AMD CS5535 requires MSRs to set DMA/PIO modes so it 761 * has been banished to the MD i386 pciide_machdep 762 */ 763 const struct pciide_product_desc pciide_natsemi_products[] = { 764 #ifdef __i386__ 765 { PCI_PRODUCT_NS_CS5535_IDE, /* National/AMD CS5535 IDE */ 766 0, 767 gcsc_chip_map 768 }, 769 #endif 770 { PCI_PRODUCT_NS_PC87415, /* National Semi PC87415 IDE */ 771 0, 772 natsemi_chip_map 773 }, 774 { PCI_PRODUCT_NS_SCx200_IDE, /* National Semi SCx200 IDE */ 775 0, 776 ns_scx200_chip_map 777 } 778 }; 779 780 const struct pciide_product_desc pciide_acer_products[] = { 781 { PCI_PRODUCT_ALI_M5229, /* Acer Labs M5229 UDMA IDE */ 782 0, 783 acer_chip_map 784 } 785 }; 786 787 const struct pciide_product_desc pciide_triones_products[] = { 788 { PCI_PRODUCT_TRIONES_HPT366, /* Highpoint HPT36x/37x IDE */ 789 IDE_PCI_CLASS_OVERRIDE, 790 hpt_chip_map, 791 }, 792 { PCI_PRODUCT_TRIONES_HPT372A, /* Highpoint HPT372A IDE */ 793 IDE_PCI_CLASS_OVERRIDE, 794 hpt_chip_map 795 }, 796 { PCI_PRODUCT_TRIONES_HPT302, /* Highpoint HPT302 IDE */ 797 IDE_PCI_CLASS_OVERRIDE, 798 hpt_chip_map 799 }, 800 { PCI_PRODUCT_TRIONES_HPT371, /* Highpoint HPT371 IDE */ 801 IDE_PCI_CLASS_OVERRIDE, 802 hpt_chip_map 803 }, 804 { PCI_PRODUCT_TRIONES_HPT374, /* Highpoint HPT374 IDE */ 805 IDE_PCI_CLASS_OVERRIDE, 806 hpt_chip_map 807 } 808 }; 809 810 const struct pciide_product_desc pciide_promise_products[] = { 811 { PCI_PRODUCT_PROMISE_PDC20246, 812 IDE_PCI_CLASS_OVERRIDE, 813 pdc202xx_chip_map, 814 }, 815 { PCI_PRODUCT_PROMISE_PDC20262, 816 IDE_PCI_CLASS_OVERRIDE, 817 pdc202xx_chip_map, 818 }, 819 { PCI_PRODUCT_PROMISE_PDC20265, 820 IDE_PCI_CLASS_OVERRIDE, 821 pdc202xx_chip_map, 822 }, 823 { PCI_PRODUCT_PROMISE_PDC20267, 824 IDE_PCI_CLASS_OVERRIDE, 825 pdc202xx_chip_map, 826 }, 827 { PCI_PRODUCT_PROMISE_PDC20268, 828 IDE_PCI_CLASS_OVERRIDE, 829 pdc202xx_chip_map, 830 }, 831 { PCI_PRODUCT_PROMISE_PDC20268R, 832 IDE_PCI_CLASS_OVERRIDE, 833 pdc202xx_chip_map, 834 }, 835 { PCI_PRODUCT_PROMISE_PDC20269, 836 IDE_PCI_CLASS_OVERRIDE, 837 pdc202xx_chip_map, 838 }, 839 { PCI_PRODUCT_PROMISE_PDC20271, 840 IDE_PCI_CLASS_OVERRIDE, 841 pdc202xx_chip_map, 842 }, 843 { PCI_PRODUCT_PROMISE_PDC20275, 844 IDE_PCI_CLASS_OVERRIDE, 845 pdc202xx_chip_map, 846 }, 847 { PCI_PRODUCT_PROMISE_PDC20276, 848 IDE_PCI_CLASS_OVERRIDE, 849 pdc202xx_chip_map, 850 }, 851 { PCI_PRODUCT_PROMISE_PDC20277, 852 IDE_PCI_CLASS_OVERRIDE, 853 pdc202xx_chip_map, 854 }, 855 { PCI_PRODUCT_PROMISE_PDC20318, 856 IDE_PCI_CLASS_OVERRIDE, 857 pdcsata_chip_map, 858 }, 859 { PCI_PRODUCT_PROMISE_PDC20319, 860 IDE_PCI_CLASS_OVERRIDE, 861 pdcsata_chip_map, 862 }, 863 { PCI_PRODUCT_PROMISE_PDC20371, 864 IDE_PCI_CLASS_OVERRIDE, 865 pdcsata_chip_map, 866 }, 867 { PCI_PRODUCT_PROMISE_PDC20375, 868 IDE_PCI_CLASS_OVERRIDE, 869 pdcsata_chip_map, 870 }, 871 { PCI_PRODUCT_PROMISE_PDC20376, 872 IDE_PCI_CLASS_OVERRIDE, 873 pdcsata_chip_map, 874 }, 875 { PCI_PRODUCT_PROMISE_PDC20377, 876 IDE_PCI_CLASS_OVERRIDE, 877 pdcsata_chip_map, 878 }, 879 { PCI_PRODUCT_PROMISE_PDC20378, 880 IDE_PCI_CLASS_OVERRIDE, 881 pdcsata_chip_map, 882 }, 883 { PCI_PRODUCT_PROMISE_PDC20379, 884 IDE_PCI_CLASS_OVERRIDE, 885 pdcsata_chip_map, 886 }, 887 { PCI_PRODUCT_PROMISE_PDC40518, 888 IDE_PCI_CLASS_OVERRIDE, 889 pdcsata_chip_map, 890 }, 891 { PCI_PRODUCT_PROMISE_PDC40519, 892 IDE_PCI_CLASS_OVERRIDE, 893 pdcsata_chip_map, 894 }, 895 { PCI_PRODUCT_PROMISE_PDC40718, 896 IDE_PCI_CLASS_OVERRIDE, 897 pdcsata_chip_map, 898 }, 899 { PCI_PRODUCT_PROMISE_PDC40719, 900 IDE_PCI_CLASS_OVERRIDE, 901 pdcsata_chip_map, 902 }, 903 { PCI_PRODUCT_PROMISE_PDC40779, 904 IDE_PCI_CLASS_OVERRIDE, 905 pdcsata_chip_map, 906 }, 907 { PCI_PRODUCT_PROMISE_PDC20571, 908 IDE_PCI_CLASS_OVERRIDE, 909 pdcsata_chip_map, 910 }, 911 { PCI_PRODUCT_PROMISE_PDC20575, 912 IDE_PCI_CLASS_OVERRIDE, 913 pdcsata_chip_map, 914 }, 915 { PCI_PRODUCT_PROMISE_PDC20579, 916 IDE_PCI_CLASS_OVERRIDE, 917 pdcsata_chip_map, 918 }, 919 { PCI_PRODUCT_PROMISE_PDC20771, 920 IDE_PCI_CLASS_OVERRIDE, 921 pdcsata_chip_map, 922 }, 923 { PCI_PRODUCT_PROMISE_PDC20775, 924 IDE_PCI_CLASS_OVERRIDE, 925 pdcsata_chip_map, 926 } 927 }; 928 929 const struct pciide_product_desc pciide_acard_products[] = { 930 { PCI_PRODUCT_ACARD_ATP850U, /* Acard ATP850U Ultra33 Controller */ 931 IDE_PCI_CLASS_OVERRIDE, 932 acard_chip_map, 933 }, 934 { PCI_PRODUCT_ACARD_ATP860, /* Acard ATP860 Ultra66 Controller */ 935 IDE_PCI_CLASS_OVERRIDE, 936 acard_chip_map, 937 }, 938 { PCI_PRODUCT_ACARD_ATP860A, /* Acard ATP860-A Ultra66 Controller */ 939 IDE_PCI_CLASS_OVERRIDE, 940 acard_chip_map, 941 }, 942 { PCI_PRODUCT_ACARD_ATP865A, /* Acard ATP865-A Ultra133 Controller */ 943 IDE_PCI_CLASS_OVERRIDE, 944 acard_chip_map, 945 }, 946 { PCI_PRODUCT_ACARD_ATP865R, /* Acard ATP865-R Ultra133 Controller */ 947 IDE_PCI_CLASS_OVERRIDE, 948 acard_chip_map, 949 } 950 }; 951 952 const struct pciide_product_desc pciide_serverworks_products[] = { 953 { PCI_PRODUCT_RCC_OSB4_IDE, 954 0, 955 serverworks_chip_map, 956 }, 957 { PCI_PRODUCT_RCC_CSB5_IDE, 958 0, 959 serverworks_chip_map, 960 }, 961 { PCI_PRODUCT_RCC_CSB6_IDE, 962 0, 963 serverworks_chip_map, 964 }, 965 { PCI_PRODUCT_RCC_CSB6_RAID_IDE, 966 0, 967 serverworks_chip_map, 968 }, 969 { PCI_PRODUCT_RCC_HT_1000_IDE, 970 0, 971 serverworks_chip_map, 972 }, 973 { PCI_PRODUCT_RCC_K2_SATA, 974 0, 975 svwsata_chip_map, 976 }, 977 { PCI_PRODUCT_RCC_FRODO4_SATA, 978 0, 979 svwsata_chip_map, 980 }, 981 { PCI_PRODUCT_RCC_FRODO8_SATA, 982 0, 983 svwsata_chip_map, 984 }, 985 { PCI_PRODUCT_RCC_HT_1000_SATA_1, 986 0, 987 svwsata_chip_map, 988 }, 989 { PCI_PRODUCT_RCC_HT_1000_SATA_2, 990 0, 991 svwsata_chip_map, 992 } 993 }; 994 995 const struct pciide_product_desc pciide_nvidia_products[] = { 996 { PCI_PRODUCT_NVIDIA_NFORCE_IDE, 997 0, 998 nforce_chip_map 999 }, 1000 { PCI_PRODUCT_NVIDIA_NFORCE2_IDE, 1001 0, 1002 nforce_chip_map 1003 }, 1004 { PCI_PRODUCT_NVIDIA_NFORCE2_400_IDE, 1005 0, 1006 nforce_chip_map 1007 }, 1008 { PCI_PRODUCT_NVIDIA_NFORCE3_IDE, 1009 0, 1010 nforce_chip_map 1011 }, 1012 { PCI_PRODUCT_NVIDIA_NFORCE3_250_IDE, 1013 0, 1014 nforce_chip_map 1015 }, 1016 { PCI_PRODUCT_NVIDIA_NFORCE4_ATA133, 1017 0, 1018 nforce_chip_map 1019 }, 1020 { PCI_PRODUCT_NVIDIA_MCP04_IDE, 1021 0, 1022 nforce_chip_map 1023 }, 1024 { PCI_PRODUCT_NVIDIA_MCP51_IDE, 1025 0, 1026 nforce_chip_map 1027 }, 1028 { PCI_PRODUCT_NVIDIA_MCP55_IDE, 1029 0, 1030 nforce_chip_map 1031 }, 1032 { PCI_PRODUCT_NVIDIA_MCP61_IDE, 1033 0, 1034 nforce_chip_map 1035 }, 1036 { PCI_PRODUCT_NVIDIA_MCP65_IDE, 1037 0, 1038 nforce_chip_map 1039 }, 1040 { PCI_PRODUCT_NVIDIA_MCP67_IDE, 1041 0, 1042 nforce_chip_map 1043 }, 1044 { PCI_PRODUCT_NVIDIA_MCP73_IDE, 1045 0, 1046 nforce_chip_map 1047 }, 1048 { PCI_PRODUCT_NVIDIA_MCP77_IDE, 1049 0, 1050 nforce_chip_map 1051 }, 1052 { PCI_PRODUCT_NVIDIA_NFORCE2_400_SATA, 1053 0, 1054 sata_chip_map 1055 }, 1056 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA, 1057 0, 1058 sata_chip_map 1059 }, 1060 { PCI_PRODUCT_NVIDIA_NFORCE3_250_SATA2, 1061 0, 1062 sata_chip_map 1063 }, 1064 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA1, 1065 0, 1066 sata_chip_map 1067 }, 1068 { PCI_PRODUCT_NVIDIA_NFORCE4_SATA2, 1069 0, 1070 sata_chip_map 1071 }, 1072 { PCI_PRODUCT_NVIDIA_MCP04_SATA, 1073 0, 1074 sata_chip_map 1075 }, 1076 { PCI_PRODUCT_NVIDIA_MCP04_SATA2, 1077 0, 1078 sata_chip_map 1079 }, 1080 { PCI_PRODUCT_NVIDIA_MCP51_SATA, 1081 0, 1082 sata_chip_map 1083 }, 1084 { PCI_PRODUCT_NVIDIA_MCP51_SATA2, 1085 0, 1086 sata_chip_map 1087 }, 1088 { PCI_PRODUCT_NVIDIA_MCP55_SATA, 1089 0, 1090 sata_chip_map 1091 }, 1092 { PCI_PRODUCT_NVIDIA_MCP55_SATA2, 1093 0, 1094 sata_chip_map 1095 }, 1096 { PCI_PRODUCT_NVIDIA_MCP61_SATA, 1097 0, 1098 sata_chip_map 1099 }, 1100 { PCI_PRODUCT_NVIDIA_MCP61_SATA2, 1101 0, 1102 sata_chip_map 1103 }, 1104 { PCI_PRODUCT_NVIDIA_MCP61_SATA3, 1105 0, 1106 sata_chip_map 1107 }, 1108 { PCI_PRODUCT_NVIDIA_MCP65_SATA_1, 1109 0, 1110 sata_chip_map 1111 }, 1112 { PCI_PRODUCT_NVIDIA_MCP65_SATA_2, 1113 0, 1114 sata_chip_map 1115 }, 1116 { PCI_PRODUCT_NVIDIA_MCP65_SATA_3, 1117 0, 1118 sata_chip_map 1119 }, 1120 { PCI_PRODUCT_NVIDIA_MCP65_SATA_4, 1121 0, 1122 sata_chip_map 1123 }, 1124 { PCI_PRODUCT_NVIDIA_MCP67_SATA_1, 1125 0, 1126 sata_chip_map 1127 }, 1128 { PCI_PRODUCT_NVIDIA_MCP67_SATA_2, 1129 0, 1130 sata_chip_map 1131 }, 1132 { PCI_PRODUCT_NVIDIA_MCP67_SATA_3, 1133 0, 1134 sata_chip_map 1135 }, 1136 { PCI_PRODUCT_NVIDIA_MCP67_SATA_4, 1137 0, 1138 sata_chip_map 1139 }, 1140 { PCI_PRODUCT_NVIDIA_MCP77_SATA_1, 1141 0, 1142 sata_chip_map 1143 }, 1144 { PCI_PRODUCT_NVIDIA_MCP79_SATA_1, 1145 0, 1146 sata_chip_map 1147 }, 1148 { PCI_PRODUCT_NVIDIA_MCP79_SATA_2, 1149 0, 1150 sata_chip_map 1151 }, 1152 { PCI_PRODUCT_NVIDIA_MCP79_SATA_3, 1153 0, 1154 sata_chip_map 1155 }, 1156 { PCI_PRODUCT_NVIDIA_MCP79_SATA_4, 1157 0, 1158 sata_chip_map 1159 }, 1160 { PCI_PRODUCT_NVIDIA_MCP89_SATA_1, 1161 0, 1162 sata_chip_map 1163 }, 1164 { PCI_PRODUCT_NVIDIA_MCP89_SATA_2, 1165 0, 1166 sata_chip_map 1167 }, 1168 { PCI_PRODUCT_NVIDIA_MCP89_SATA_3, 1169 0, 1170 sata_chip_map 1171 }, 1172 { PCI_PRODUCT_NVIDIA_MCP89_SATA_4, 1173 0, 1174 sata_chip_map 1175 } 1176 }; 1177 1178 const struct pciide_product_desc pciide_ite_products[] = { 1179 { PCI_PRODUCT_ITEXPRESS_IT8211F, 1180 IDE_PCI_CLASS_OVERRIDE, 1181 ite_chip_map 1182 }, 1183 { PCI_PRODUCT_ITEXPRESS_IT8212F, 1184 IDE_PCI_CLASS_OVERRIDE, 1185 ite_chip_map 1186 } 1187 }; 1188 1189 const struct pciide_product_desc pciide_ati_products[] = { 1190 { PCI_PRODUCT_ATI_SB200_IDE, 1191 0, 1192 ixp_chip_map 1193 }, 1194 { PCI_PRODUCT_ATI_SB300_IDE, 1195 0, 1196 ixp_chip_map 1197 }, 1198 { PCI_PRODUCT_ATI_SB400_IDE, 1199 0, 1200 ixp_chip_map 1201 }, 1202 { PCI_PRODUCT_ATI_SB600_IDE, 1203 0, 1204 ixp_chip_map 1205 }, 1206 { PCI_PRODUCT_ATI_SB700_IDE, 1207 0, 1208 ixp_chip_map 1209 }, 1210 { PCI_PRODUCT_ATI_SB300_SATA, 1211 0, 1212 sii3112_chip_map 1213 }, 1214 { PCI_PRODUCT_ATI_SB400_SATA_1, 1215 0, 1216 sii3112_chip_map 1217 }, 1218 { PCI_PRODUCT_ATI_SB400_SATA_2, 1219 0, 1220 sii3112_chip_map 1221 } 1222 }; 1223 1224 const struct pciide_product_desc pciide_jmicron_products[] = { 1225 { PCI_PRODUCT_JMICRON_JMB361, 1226 0, 1227 jmicron_chip_map 1228 }, 1229 { PCI_PRODUCT_JMICRON_JMB363, 1230 0, 1231 jmicron_chip_map 1232 }, 1233 { PCI_PRODUCT_JMICRON_JMB365, 1234 0, 1235 jmicron_chip_map 1236 }, 1237 { PCI_PRODUCT_JMICRON_JMB366, 1238 0, 1239 jmicron_chip_map 1240 }, 1241 { PCI_PRODUCT_JMICRON_JMB368, 1242 0, 1243 jmicron_chip_map 1244 } 1245 }; 1246 1247 const struct pciide_product_desc pciide_phison_products[] = { 1248 { PCI_PRODUCT_PHISON_PS5000, 1249 0, 1250 phison_chip_map 1251 }, 1252 }; 1253 1254 struct pciide_vendor_desc { 1255 u_int32_t ide_vendor; 1256 const struct pciide_product_desc *ide_products; 1257 int ide_nproducts; 1258 }; 1259 1260 const struct pciide_vendor_desc pciide_vendors[] = { 1261 { PCI_VENDOR_INTEL, pciide_intel_products, 1262 nitems(pciide_intel_products) }, 1263 { PCI_VENDOR_AMD, pciide_amd_products, 1264 nitems(pciide_amd_products) }, 1265 #ifdef notyet 1266 { PCI_VENDOR_OPTI, pciide_opti_products, 1267 nitems(pciide_opti_products) }, 1268 #endif 1269 { PCI_VENDOR_CMDTECH, pciide_cmd_products, 1270 nitems(pciide_cmd_products) }, 1271 { PCI_VENDOR_VIATECH, pciide_via_products, 1272 nitems(pciide_via_products) }, 1273 { PCI_VENDOR_CONTAQ, pciide_cypress_products, 1274 nitems(pciide_cypress_products) }, 1275 { PCI_VENDOR_SIS, pciide_sis_products, 1276 nitems(pciide_sis_products) }, 1277 { PCI_VENDOR_NS, pciide_natsemi_products, 1278 nitems(pciide_natsemi_products) }, 1279 { PCI_VENDOR_ALI, pciide_acer_products, 1280 nitems(pciide_acer_products) }, 1281 { PCI_VENDOR_TRIONES, pciide_triones_products, 1282 nitems(pciide_triones_products) }, 1283 { PCI_VENDOR_ACARD, pciide_acard_products, 1284 nitems(pciide_acard_products) }, 1285 { PCI_VENDOR_RCC, pciide_serverworks_products, 1286 nitems(pciide_serverworks_products) }, 1287 { PCI_VENDOR_PROMISE, pciide_promise_products, 1288 nitems(pciide_promise_products) }, 1289 { PCI_VENDOR_NVIDIA, pciide_nvidia_products, 1290 nitems(pciide_nvidia_products) }, 1291 { PCI_VENDOR_ITEXPRESS, pciide_ite_products, 1292 nitems(pciide_ite_products) }, 1293 { PCI_VENDOR_ATI, pciide_ati_products, 1294 nitems(pciide_ati_products) }, 1295 { PCI_VENDOR_JMICRON, pciide_jmicron_products, 1296 nitems(pciide_jmicron_products) }, 1297 { PCI_VENDOR_PHISON, pciide_phison_products, 1298 nitems(pciide_phison_products) } 1299 }; 1300 1301 /* options passed via the 'flags' config keyword */ 1302 #define PCIIDE_OPTIONS_DMA 0x01 1303 1304 int pciide_match(struct device *, void *, void *); 1305 void pciide_attach(struct device *, struct device *, void *); 1306 int pciide_detach(struct device *, int); 1307 int pciide_activate(struct device *, int); 1308 1309 struct cfattach pciide_pci_ca = { 1310 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1311 pciide_detach, pciide_activate 1312 }; 1313 1314 struct cfattach pciide_jmb_ca = { 1315 sizeof(struct pciide_softc), pciide_match, pciide_attach, 1316 pciide_detach, pciide_activate 1317 }; 1318 1319 struct cfdriver pciide_cd = { 1320 NULL, "pciide", DV_DULL 1321 }; 1322 1323 const struct pciide_product_desc *pciide_lookup_product(u_int32_t); 1324 1325 const struct pciide_product_desc * 1326 pciide_lookup_product(u_int32_t id) 1327 { 1328 const struct pciide_product_desc *pp; 1329 const struct pciide_vendor_desc *vp; 1330 int i; 1331 1332 for (i = 0, vp = pciide_vendors; i < nitems(pciide_vendors); vp++, i++) 1333 if (PCI_VENDOR(id) == vp->ide_vendor) 1334 break; 1335 1336 if (i == nitems(pciide_vendors)) 1337 return (NULL); 1338 1339 for (pp = vp->ide_products, i = 0; i < vp->ide_nproducts; pp++, i++) 1340 if (PCI_PRODUCT(id) == pp->ide_product) 1341 break; 1342 1343 if (i == vp->ide_nproducts) 1344 return (NULL); 1345 return (pp); 1346 } 1347 1348 int 1349 pciide_match(struct device *parent, void *match, void *aux) 1350 { 1351 struct pci_attach_args *pa = aux; 1352 const struct pciide_product_desc *pp; 1353 1354 /* 1355 * Some IDE controllers have severe bugs when used in PCI mode. 1356 * We punt and attach them to the ISA bus instead. 1357 */ 1358 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_PCTECH && 1359 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_PCTECH_RZ1000) 1360 return (0); 1361 1362 /* 1363 * Some controllers (e.g. promise Ultra-33) don't claim to be PCI IDE 1364 * controllers. Let see if we can deal with it anyway. 1365 */ 1366 pp = pciide_lookup_product(pa->pa_id); 1367 if (pp && (pp->ide_flags & IDE_PCI_CLASS_OVERRIDE)) 1368 return (1); 1369 1370 /* 1371 * Check the ID register to see that it's a PCI IDE controller. 1372 * If it is, we assume that we can deal with it; it _should_ 1373 * work in a standardized way... 1374 */ 1375 if (PCI_CLASS(pa->pa_class) == PCI_CLASS_MASS_STORAGE) { 1376 switch (PCI_SUBCLASS(pa->pa_class)) { 1377 case PCI_SUBCLASS_MASS_STORAGE_IDE: 1378 return (1); 1379 1380 /* 1381 * We only match these if we know they have 1382 * a match, as we may not support native interfaces 1383 * on them. 1384 */ 1385 case PCI_SUBCLASS_MASS_STORAGE_SATA: 1386 case PCI_SUBCLASS_MASS_STORAGE_RAID: 1387 case PCI_SUBCLASS_MASS_STORAGE_MISC: 1388 if (pp) 1389 return (1); 1390 else 1391 return (0); 1392 break; 1393 } 1394 } 1395 1396 return (0); 1397 } 1398 1399 void 1400 pciide_attach(struct device *parent, struct device *self, void *aux) 1401 { 1402 struct pciide_softc *sc = (struct pciide_softc *)self; 1403 struct pci_attach_args *pa = aux; 1404 1405 sc->sc_pp = pciide_lookup_product(pa->pa_id); 1406 if (sc->sc_pp == NULL) 1407 sc->sc_pp = &default_product_desc; 1408 sc->sc_rev = PCI_REVISION(pa->pa_class); 1409 1410 sc->sc_pc = pa->pa_pc; 1411 sc->sc_tag = pa->pa_tag; 1412 1413 /* Set up DMA defaults; these might be adjusted by chip_map. */ 1414 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX; 1415 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_ALIGN; 1416 1417 sc->sc_dmacmd_read = pciide_dmacmd_read; 1418 sc->sc_dmacmd_write = pciide_dmacmd_write; 1419 sc->sc_dmactl_read = pciide_dmactl_read; 1420 sc->sc_dmactl_write = pciide_dmactl_write; 1421 sc->sc_dmatbl_write = pciide_dmatbl_write; 1422 1423 WDCDEBUG_PRINT((" sc_pc=%p, sc_tag=%p, pa_class=0x%x\n", sc->sc_pc, 1424 sc->sc_tag, pa->pa_class), DEBUG_PROBE); 1425 1426 sc->sc_pp->chip_map(sc, pa); 1427 1428 WDCDEBUG_PRINT(("pciide: command/status register=0x%x\n", 1429 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG)), 1430 DEBUG_PROBE); 1431 } 1432 1433 int 1434 pciide_detach(struct device *self, int flags) 1435 { 1436 struct pciide_softc *sc = (struct pciide_softc *)self; 1437 if (sc->chip_unmap == NULL) 1438 panic("unmap not yet implemented for this chipset"); 1439 else 1440 sc->chip_unmap(sc, flags); 1441 1442 return 0; 1443 } 1444 1445 int 1446 pciide_activate(struct device *self, int act) 1447 { 1448 int rv = 0; 1449 struct pciide_softc *sc = (struct pciide_softc *)self; 1450 int i; 1451 1452 switch (act) { 1453 case DVACT_QUIESCE: 1454 rv = config_activate_children(self, act); 1455 break; 1456 case DVACT_SUSPEND: 1457 rv = config_activate_children(self, act); 1458 1459 for (i = 0; i < nitems(sc->sc_save); i++) 1460 sc->sc_save[i] = pci_conf_read(sc->sc_pc, 1461 sc->sc_tag, PCI_MAPREG_END + 0x18 + (i * 4)); 1462 1463 if (sc->sc_pp->chip_map == sch_chip_map) { 1464 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1465 sc->sc_tag, SCH_D0TIM); 1466 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1467 sc->sc_tag, SCH_D1TIM); 1468 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1469 sc->sc_save2[0] = pciide_pci_read(sc->sc_pc, 1470 sc->sc_tag, ICH5_SATA_MAP); 1471 sc->sc_save2[1] = pciide_pci_read(sc->sc_pc, 1472 sc->sc_tag, ICH5_SATA_PI); 1473 sc->sc_save2[2] = pciide_pci_read(sc->sc_pc, 1474 sc->sc_tag, ICH_SATA_PCS); 1475 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1476 sc->sc_save[0] = pci_conf_read(sc->sc_pc, 1477 sc->sc_tag, SII3112_SCS_CMD); 1478 sc->sc_save[1] = pci_conf_read(sc->sc_pc, 1479 sc->sc_tag, SII3112_PCI_CFGCTL); 1480 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1481 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1482 sc->sc_tag, IT_TIM(0)); 1483 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1484 sc->sc_save2[0] = pci_conf_read(sc->sc_pc, 1485 sc->sc_tag, NFORCE_PIODMATIM); 1486 sc->sc_save2[1] = pci_conf_read(sc->sc_pc, 1487 sc->sc_tag, NFORCE_PIOTIM); 1488 sc->sc_save2[2] = pci_conf_read(sc->sc_pc, 1489 sc->sc_tag, NFORCE_UDMATIM); 1490 } 1491 break; 1492 case DVACT_RESUME: 1493 for (i = 0; i < nitems(sc->sc_save); i++) 1494 pci_conf_write(sc->sc_pc, sc->sc_tag, 1495 PCI_MAPREG_END + 0x18 + (i * 4), 1496 sc->sc_save[i]); 1497 1498 if (sc->sc_pp->chip_map == default_chip_map || 1499 sc->sc_pp->chip_map == sata_chip_map || 1500 sc->sc_pp->chip_map == piix_chip_map || 1501 sc->sc_pp->chip_map == amd756_chip_map || 1502 sc->sc_pp->chip_map == phison_chip_map || 1503 sc->sc_pp->chip_map == ixp_chip_map || 1504 sc->sc_pp->chip_map == acard_chip_map || 1505 sc->sc_pp->chip_map == default_chip_map || 1506 sc->sc_pp->chip_map == apollo_chip_map || 1507 sc->sc_pp->chip_map == sis_chip_map) { 1508 /* nothing to restore -- uses only 0x40 - 0x56 */ 1509 } else if (sc->sc_pp->chip_map == sch_chip_map) { 1510 pci_conf_write(sc->sc_pc, sc->sc_tag, 1511 SCH_D0TIM, sc->sc_save2[0]); 1512 pci_conf_write(sc->sc_pc, sc->sc_tag, 1513 SCH_D1TIM, sc->sc_save2[1]); 1514 } else if (sc->sc_pp->chip_map == piixsata_chip_map) { 1515 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1516 ICH5_SATA_MAP, sc->sc_save2[0]); 1517 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1518 ICH5_SATA_PI, sc->sc_save2[1]); 1519 pciide_pci_write(sc->sc_pc, sc->sc_tag, 1520 ICH_SATA_PCS, sc->sc_save2[2]); 1521 } else if (sc->sc_pp->chip_map == sii3112_chip_map) { 1522 pci_conf_write(sc->sc_pc, sc->sc_tag, 1523 SII3112_SCS_CMD, sc->sc_save[0]); 1524 delay(50 * 1000); 1525 pci_conf_write(sc->sc_pc, sc->sc_tag, 1526 SII3112_PCI_CFGCTL, sc->sc_save[1]); 1527 delay(50 * 1000); 1528 } else if (sc->sc_pp->chip_map == ite_chip_map) { 1529 pci_conf_write(sc->sc_pc, sc->sc_tag, 1530 IT_TIM(0), sc->sc_save2[0]); 1531 } else if (sc->sc_pp->chip_map == nforce_chip_map) { 1532 pci_conf_write(sc->sc_pc, sc->sc_tag, 1533 NFORCE_PIODMATIM, sc->sc_save2[0]); 1534 pci_conf_write(sc->sc_pc, sc->sc_tag, 1535 NFORCE_PIOTIM, sc->sc_save2[1]); 1536 pci_conf_write(sc->sc_pc, sc->sc_tag, 1537 NFORCE_UDMATIM, sc->sc_save2[2]); 1538 } else { 1539 printf("%s: restore for unknown chip map %x\n", 1540 sc->sc_wdcdev.sc_dev.dv_xname, 1541 sc->sc_pp->ide_product); 1542 } 1543 1544 rv = config_activate_children(self, act); 1545 break; 1546 } 1547 return (rv); 1548 } 1549 1550 int 1551 pciide_mapregs_compat(struct pci_attach_args *pa, struct pciide_channel *cp, 1552 int compatchan, bus_size_t *cmdsizep, bus_size_t *ctlsizep) 1553 { 1554 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1555 struct channel_softc *wdc_cp = &cp->wdc_channel; 1556 pcireg_t csr; 1557 1558 cp->compat = 1; 1559 *cmdsizep = PCIIDE_COMPAT_CMD_SIZE; 1560 *ctlsizep = PCIIDE_COMPAT_CTL_SIZE; 1561 1562 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); 1563 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 1564 csr | PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MASTER_ENABLE); 1565 1566 wdc_cp->cmd_iot = pa->pa_iot; 1567 1568 if (bus_space_map(wdc_cp->cmd_iot, PCIIDE_COMPAT_CMD_BASE(compatchan), 1569 PCIIDE_COMPAT_CMD_SIZE, 0, &wdc_cp->cmd_ioh) != 0) { 1570 printf("%s: couldn't map %s cmd regs\n", 1571 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1572 return (0); 1573 } 1574 1575 wdc_cp->ctl_iot = pa->pa_iot; 1576 1577 if (bus_space_map(wdc_cp->ctl_iot, PCIIDE_COMPAT_CTL_BASE(compatchan), 1578 PCIIDE_COMPAT_CTL_SIZE, 0, &wdc_cp->ctl_ioh) != 0) { 1579 printf("%s: couldn't map %s ctl regs\n", 1580 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1581 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, 1582 PCIIDE_COMPAT_CMD_SIZE); 1583 return (0); 1584 } 1585 wdc_cp->cmd_iosz = *cmdsizep; 1586 wdc_cp->ctl_iosz = *ctlsizep; 1587 1588 return (1); 1589 } 1590 1591 int 1592 pciide_unmapregs_compat(struct pciide_softc *sc, struct pciide_channel *cp) 1593 { 1594 struct channel_softc *wdc_cp = &cp->wdc_channel; 1595 1596 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1597 bus_space_unmap(wdc_cp->ctl_iot, wdc_cp->cmd_ioh, wdc_cp->ctl_iosz); 1598 1599 if (sc->sc_pci_ih != NULL) { 1600 pciide_machdep_compat_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1601 sc->sc_pci_ih = NULL; 1602 } 1603 1604 return (0); 1605 } 1606 1607 int 1608 pciide_mapregs_native(struct pci_attach_args *pa, struct pciide_channel *cp, 1609 bus_size_t *cmdsizep, bus_size_t *ctlsizep, int (*pci_intr)(void *)) 1610 { 1611 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1612 struct channel_softc *wdc_cp = &cp->wdc_channel; 1613 const char *intrstr; 1614 pci_intr_handle_t intrhandle; 1615 pcireg_t maptype; 1616 1617 cp->compat = 0; 1618 1619 if (sc->sc_pci_ih == NULL) { 1620 if (pci_intr_map(pa, &intrhandle) != 0) { 1621 printf("%s: couldn't map native-PCI interrupt\n", 1622 sc->sc_wdcdev.sc_dev.dv_xname); 1623 return (0); 1624 } 1625 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 1626 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 1627 intrhandle, IPL_BIO, pci_intr, sc, 1628 sc->sc_wdcdev.sc_dev.dv_xname); 1629 if (sc->sc_pci_ih != NULL) { 1630 printf("%s: using %s for native-PCI interrupt\n", 1631 sc->sc_wdcdev.sc_dev.dv_xname, 1632 intrstr ? intrstr : "unknown interrupt"); 1633 } else { 1634 printf("%s: couldn't establish native-PCI interrupt", 1635 sc->sc_wdcdev.sc_dev.dv_xname); 1636 if (intrstr != NULL) 1637 printf(" at %s", intrstr); 1638 printf("\n"); 1639 return (0); 1640 } 1641 } 1642 cp->ih = sc->sc_pci_ih; 1643 sc->sc_pc = pa->pa_pc; 1644 1645 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1646 PCIIDE_REG_CMD_BASE(wdc_cp->channel)); 1647 WDCDEBUG_PRINT(("%s: %s cmd regs mapping: %s\n", 1648 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1649 (maptype == PCI_MAPREG_TYPE_IO ? "I/O" : "memory")), DEBUG_PROBE); 1650 if (pci_mapreg_map(pa, PCIIDE_REG_CMD_BASE(wdc_cp->channel), 1651 maptype, 0, 1652 &wdc_cp->cmd_iot, &wdc_cp->cmd_ioh, NULL, cmdsizep, 0) != 0) { 1653 printf("%s: couldn't map %s cmd regs\n", 1654 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1655 return (0); 1656 } 1657 1658 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1659 PCIIDE_REG_CTL_BASE(wdc_cp->channel)); 1660 WDCDEBUG_PRINT(("%s: %s ctl regs mapping: %s\n", 1661 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 1662 (maptype == PCI_MAPREG_TYPE_IO ? "I/O": "memory")), DEBUG_PROBE); 1663 if (pci_mapreg_map(pa, PCIIDE_REG_CTL_BASE(wdc_cp->channel), 1664 maptype, 0, 1665 &wdc_cp->ctl_iot, &cp->ctl_baseioh, NULL, ctlsizep, 0) != 0) { 1666 printf("%s: couldn't map %s ctl regs\n", 1667 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1668 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1669 return (0); 1670 } 1671 /* 1672 * In native mode, 4 bytes of I/O space are mapped for the control 1673 * register, the control register is at offset 2. Pass the generic 1674 * code a handle for only one byte at the right offset. 1675 */ 1676 if (bus_space_subregion(wdc_cp->ctl_iot, cp->ctl_baseioh, 2, 1, 1677 &wdc_cp->ctl_ioh) != 0) { 1678 printf("%s: unable to subregion %s ctl regs\n", 1679 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 1680 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, *cmdsizep); 1681 bus_space_unmap(wdc_cp->cmd_iot, cp->ctl_baseioh, *ctlsizep); 1682 return (0); 1683 } 1684 wdc_cp->cmd_iosz = *cmdsizep; 1685 wdc_cp->ctl_iosz = *ctlsizep; 1686 1687 return (1); 1688 } 1689 1690 int 1691 pciide_unmapregs_native(struct pciide_softc *sc, struct pciide_channel *cp) 1692 { 1693 struct channel_softc *wdc_cp = &cp->wdc_channel; 1694 1695 bus_space_unmap(wdc_cp->cmd_iot, wdc_cp->cmd_ioh, wdc_cp->cmd_iosz); 1696 1697 /* Unmap the whole control space, not just the sub-region */ 1698 bus_space_unmap(wdc_cp->ctl_iot, cp->ctl_baseioh, wdc_cp->ctl_iosz); 1699 1700 if (sc->sc_pci_ih != NULL) { 1701 pci_intr_disestablish(sc->sc_pc, sc->sc_pci_ih); 1702 sc->sc_pci_ih = NULL; 1703 } 1704 1705 return (0); 1706 } 1707 1708 void 1709 pciide_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 1710 { 1711 pcireg_t maptype; 1712 bus_addr_t addr; 1713 1714 /* 1715 * Map DMA registers 1716 * 1717 * Note that sc_dma_ok is the right variable to test to see if 1718 * DMA can be done. If the interface doesn't support DMA, 1719 * sc_dma_ok will never be non-zero. If the DMA regs couldn't 1720 * be mapped, it'll be zero. I.e., sc_dma_ok will only be 1721 * non-zero if the interface supports DMA and the registers 1722 * could be mapped. 1723 * 1724 * XXX Note that despite the fact that the Bus Master IDE specs 1725 * XXX say that "The bus master IDE function uses 16 bytes of IO 1726 * XXX space", some controllers (at least the United 1727 * XXX Microelectronics UM8886BF) place it in memory space. 1728 */ 1729 1730 maptype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, 1731 PCIIDE_REG_BUS_MASTER_DMA); 1732 1733 switch (maptype) { 1734 case PCI_MAPREG_TYPE_IO: 1735 sc->sc_dma_ok = (pci_mapreg_info(pa->pa_pc, pa->pa_tag, 1736 PCIIDE_REG_BUS_MASTER_DMA, PCI_MAPREG_TYPE_IO, 1737 &addr, NULL, NULL) == 0); 1738 if (sc->sc_dma_ok == 0) { 1739 printf(", unused (couldn't query registers)"); 1740 break; 1741 } 1742 if ((sc->sc_pp->ide_flags & IDE_16BIT_IOSPACE) 1743 && addr >= 0x10000) { 1744 sc->sc_dma_ok = 0; 1745 printf(", unused (registers at unsafe address %#lx)", addr); 1746 break; 1747 } 1748 /* FALLTHROUGH */ 1749 1750 case PCI_MAPREG_MEM_TYPE_32BIT: 1751 sc->sc_dma_ok = (pci_mapreg_map(pa, 1752 PCIIDE_REG_BUS_MASTER_DMA, maptype, 0, 1753 &sc->sc_dma_iot, &sc->sc_dma_ioh, NULL, &sc->sc_dma_iosz, 1754 0) == 0); 1755 sc->sc_dmat = pa->pa_dmat; 1756 if (sc->sc_dma_ok == 0) { 1757 printf(", unused (couldn't map registers)"); 1758 } else { 1759 sc->sc_wdcdev.dma_arg = sc; 1760 sc->sc_wdcdev.dma_init = pciide_dma_init; 1761 sc->sc_wdcdev.dma_start = pciide_dma_start; 1762 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 1763 } 1764 break; 1765 1766 default: 1767 sc->sc_dma_ok = 0; 1768 printf(", (unsupported maptype 0x%x)", maptype); 1769 break; 1770 } 1771 } 1772 1773 void 1774 pciide_unmapreg_dma(struct pciide_softc *sc) 1775 { 1776 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, sc->sc_dma_iosz); 1777 } 1778 1779 int 1780 pciide_intr_flag(struct pciide_channel *cp) 1781 { 1782 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1783 int chan = cp->wdc_channel.channel; 1784 1785 if (cp->dma_in_progress) { 1786 int retry = 10; 1787 int status; 1788 1789 /* Check the status register */ 1790 for (retry = 10; retry > 0; retry--) { 1791 status = PCIIDE_DMACTL_READ(sc, chan); 1792 if (status & IDEDMA_CTL_INTR) { 1793 break; 1794 } 1795 DELAY(5); 1796 } 1797 1798 /* Not for us. */ 1799 if (retry == 0) 1800 return (0); 1801 1802 return (1); 1803 } 1804 1805 return (-1); 1806 } 1807 1808 int 1809 pciide_compat_intr(void *arg) 1810 { 1811 struct pciide_channel *cp = arg; 1812 1813 if (pciide_intr_flag(cp) == 0) 1814 return (0); 1815 1816 #ifdef DIAGNOSTIC 1817 /* should only be called for a compat channel */ 1818 if (cp->compat == 0) 1819 panic("pciide compat intr called for non-compat chan %p", cp); 1820 #endif 1821 return (wdcintr(&cp->wdc_channel)); 1822 } 1823 1824 int 1825 pciide_pci_intr(void *arg) 1826 { 1827 struct pciide_softc *sc = arg; 1828 struct pciide_channel *cp; 1829 struct channel_softc *wdc_cp; 1830 int i, rv, crv; 1831 1832 rv = 0; 1833 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 1834 cp = &sc->pciide_channels[i]; 1835 wdc_cp = &cp->wdc_channel; 1836 1837 /* If a compat channel skip. */ 1838 if (cp->compat) 1839 continue; 1840 1841 if (cp->hw_ok == 0) 1842 continue; 1843 1844 if (pciide_intr_flag(cp) == 0) 1845 continue; 1846 1847 crv = wdcintr(wdc_cp); 1848 if (crv == 0) 1849 ; /* leave rv alone */ 1850 else if (crv == 1) 1851 rv = 1; /* claim the intr */ 1852 else if (rv == 0) /* crv should be -1 in this case */ 1853 rv = crv; /* if we've done no better, take it */ 1854 } 1855 return (rv); 1856 } 1857 1858 u_int8_t 1859 pciide_dmacmd_read(struct pciide_softc *sc, int chan) 1860 { 1861 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1862 IDEDMA_CMD(chan))); 1863 } 1864 1865 void 1866 pciide_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 1867 { 1868 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1869 IDEDMA_CMD(chan), val); 1870 } 1871 1872 u_int8_t 1873 pciide_dmactl_read(struct pciide_softc *sc, int chan) 1874 { 1875 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1876 IDEDMA_CTL(chan))); 1877 } 1878 1879 void 1880 pciide_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 1881 { 1882 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 1883 IDEDMA_CTL(chan), val); 1884 } 1885 1886 void 1887 pciide_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 1888 { 1889 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 1890 IDEDMA_TBL(chan), val); 1891 } 1892 1893 void 1894 pciide_channel_dma_setup(struct pciide_channel *cp) 1895 { 1896 int drive; 1897 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 1898 struct ata_drive_datas *drvp; 1899 1900 for (drive = 0; drive < 2; drive++) { 1901 drvp = &cp->wdc_channel.ch_drive[drive]; 1902 /* If no drive, skip */ 1903 if ((drvp->drive_flags & DRIVE) == 0) 1904 continue; 1905 /* setup DMA if needed */ 1906 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 1907 (drvp->drive_flags & DRIVE_UDMA) == 0) || 1908 sc->sc_dma_ok == 0) { 1909 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1910 continue; 1911 } 1912 if (pciide_dma_table_setup(sc, cp->wdc_channel.channel, drive) 1913 != 0) { 1914 /* Abort DMA setup */ 1915 drvp->drive_flags &= ~(DRIVE_DMA | DRIVE_UDMA); 1916 continue; 1917 } 1918 } 1919 } 1920 1921 int 1922 pciide_dma_table_setup(struct pciide_softc *sc, int channel, int drive) 1923 { 1924 bus_dma_segment_t seg; 1925 int error, rseg; 1926 const bus_size_t dma_table_size = 1927 sizeof(struct idedma_table) * NIDEDMA_TABLES; 1928 struct pciide_dma_maps *dma_maps = 1929 &sc->pciide_channels[channel].dma_maps[drive]; 1930 1931 /* If table was already allocated, just return */ 1932 if (dma_maps->dma_table) 1933 return (0); 1934 1935 /* Allocate memory for the DMA tables and map it */ 1936 if ((error = bus_dmamem_alloc(sc->sc_dmat, dma_table_size, 1937 IDEDMA_TBL_ALIGN, IDEDMA_TBL_ALIGN, &seg, 1, &rseg, 1938 BUS_DMA_NOWAIT)) != 0) { 1939 printf("%s:%d: unable to allocate table DMA for " 1940 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1941 channel, drive, error); 1942 return (error); 1943 } 1944 1945 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 1946 dma_table_size, 1947 (caddr_t *)&dma_maps->dma_table, 1948 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1949 printf("%s:%d: unable to map table DMA for" 1950 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1951 channel, drive, error); 1952 return (error); 1953 } 1954 1955 WDCDEBUG_PRINT(("pciide_dma_table_setup: table at %p len %ld, " 1956 "phy 0x%lx\n", dma_maps->dma_table, dma_table_size, 1957 seg.ds_addr), DEBUG_PROBE); 1958 1959 /* Create and load table DMA map for this disk */ 1960 if ((error = bus_dmamap_create(sc->sc_dmat, dma_table_size, 1961 1, dma_table_size, IDEDMA_TBL_ALIGN, BUS_DMA_NOWAIT, 1962 &dma_maps->dmamap_table)) != 0) { 1963 printf("%s:%d: unable to create table DMA map for " 1964 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1965 channel, drive, error); 1966 return (error); 1967 } 1968 if ((error = bus_dmamap_load(sc->sc_dmat, 1969 dma_maps->dmamap_table, 1970 dma_maps->dma_table, 1971 dma_table_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1972 printf("%s:%d: unable to load table DMA map for " 1973 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1974 channel, drive, error); 1975 return (error); 1976 } 1977 WDCDEBUG_PRINT(("pciide_dma_table_setup: phy addr of table 0x%lx\n", 1978 dma_maps->dmamap_table->dm_segs[0].ds_addr), DEBUG_PROBE); 1979 /* Create a xfer DMA map for this drive */ 1980 if ((error = bus_dmamap_create(sc->sc_dmat, IDEDMA_BYTE_COUNT_MAX, 1981 NIDEDMA_TABLES, sc->sc_dma_maxsegsz, sc->sc_dma_boundary, 1982 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, 1983 &dma_maps->dmamap_xfer)) != 0) { 1984 printf("%s:%d: unable to create xfer DMA map for " 1985 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 1986 channel, drive, error); 1987 return (error); 1988 } 1989 return (0); 1990 } 1991 1992 int 1993 pciide_dma_init(void *v, int channel, int drive, void *databuf, 1994 size_t datalen, int flags) 1995 { 1996 struct pciide_softc *sc = v; 1997 int error, seg; 1998 struct pciide_channel *cp = &sc->pciide_channels[channel]; 1999 struct pciide_dma_maps *dma_maps = 2000 &sc->pciide_channels[channel].dma_maps[drive]; 2001 #ifndef BUS_DMA_RAW 2002 #define BUS_DMA_RAW 0 2003 #endif 2004 2005 error = bus_dmamap_load(sc->sc_dmat, 2006 dma_maps->dmamap_xfer, 2007 databuf, datalen, NULL, BUS_DMA_NOWAIT|BUS_DMA_RAW); 2008 if (error) { 2009 printf("%s:%d: unable to load xfer DMA map for " 2010 "drive %d, error=%d\n", sc->sc_wdcdev.sc_dev.dv_xname, 2011 channel, drive, error); 2012 return (error); 2013 } 2014 2015 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2016 dma_maps->dmamap_xfer->dm_mapsize, 2017 (flags & WDC_DMA_READ) ? 2018 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); 2019 2020 for (seg = 0; seg < dma_maps->dmamap_xfer->dm_nsegs; seg++) { 2021 #ifdef DIAGNOSTIC 2022 /* A segment must not cross a 64k boundary */ 2023 { 2024 u_long phys = dma_maps->dmamap_xfer->dm_segs[seg].ds_addr; 2025 u_long len = dma_maps->dmamap_xfer->dm_segs[seg].ds_len; 2026 if ((phys & ~IDEDMA_BYTE_COUNT_MASK) != 2027 ((phys + len - 1) & ~IDEDMA_BYTE_COUNT_MASK)) { 2028 printf("pciide_dma: segment %d physical addr 0x%lx" 2029 " len 0x%lx not properly aligned\n", 2030 seg, phys, len); 2031 panic("pciide_dma: buf align"); 2032 } 2033 } 2034 #endif 2035 dma_maps->dma_table[seg].base_addr = 2036 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_addr); 2037 dma_maps->dma_table[seg].byte_count = 2038 htole32(dma_maps->dmamap_xfer->dm_segs[seg].ds_len & 2039 IDEDMA_BYTE_COUNT_MASK); 2040 WDCDEBUG_PRINT(("\t seg %d len %d addr 0x%x\n", 2041 seg, letoh32(dma_maps->dma_table[seg].byte_count), 2042 letoh32(dma_maps->dma_table[seg].base_addr)), DEBUG_DMA); 2043 2044 } 2045 dma_maps->dma_table[dma_maps->dmamap_xfer->dm_nsegs -1].byte_count |= 2046 htole32(IDEDMA_BYTE_COUNT_EOT); 2047 2048 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_table, 0, 2049 dma_maps->dmamap_table->dm_mapsize, 2050 BUS_DMASYNC_PREWRITE); 2051 2052 /* Maps are ready. Start DMA function */ 2053 #ifdef DIAGNOSTIC 2054 if (dma_maps->dmamap_table->dm_segs[0].ds_addr & ~IDEDMA_TBL_MASK) { 2055 printf("pciide_dma_init: addr 0x%lx not properly aligned\n", 2056 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2057 panic("pciide_dma_init: table align"); 2058 } 2059 #endif 2060 2061 /* Clear status bits */ 2062 PCIIDE_DMACTL_WRITE(sc, channel, PCIIDE_DMACTL_READ(sc, channel)); 2063 /* Write table addr */ 2064 PCIIDE_DMATBL_WRITE(sc, channel, 2065 dma_maps->dmamap_table->dm_segs[0].ds_addr); 2066 /* set read/write */ 2067 PCIIDE_DMACMD_WRITE(sc, channel, 2068 ((flags & WDC_DMA_READ) ? IDEDMA_CMD_WRITE : 0) | cp->idedma_cmd); 2069 /* remember flags */ 2070 dma_maps->dma_flags = flags; 2071 return (0); 2072 } 2073 2074 void 2075 pciide_dma_start(void *v, int channel, int drive) 2076 { 2077 struct pciide_softc *sc = v; 2078 2079 WDCDEBUG_PRINT(("pciide_dma_start\n"), DEBUG_XFERS); 2080 PCIIDE_DMACMD_WRITE(sc, channel, PCIIDE_DMACMD_READ(sc, channel) | 2081 IDEDMA_CMD_START); 2082 2083 sc->pciide_channels[channel].dma_in_progress = 1; 2084 } 2085 2086 int 2087 pciide_dma_finish(void *v, int channel, int drive, int force) 2088 { 2089 struct pciide_softc *sc = v; 2090 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2091 u_int8_t status; 2092 int error = 0; 2093 struct pciide_dma_maps *dma_maps = 2094 &sc->pciide_channels[channel].dma_maps[drive]; 2095 2096 status = PCIIDE_DMACTL_READ(sc, channel); 2097 WDCDEBUG_PRINT(("pciide_dma_finish: status 0x%x\n", status), 2098 DEBUG_XFERS); 2099 if (status == 0xff) 2100 return (status); 2101 2102 if (force == 0 && (status & IDEDMA_CTL_INTR) == 0) { 2103 error = WDC_DMAST_NOIRQ; 2104 goto done; 2105 } 2106 2107 /* stop DMA channel */ 2108 PCIIDE_DMACMD_WRITE(sc, channel, 2109 ((dma_maps->dma_flags & WDC_DMA_READ) ? 2110 0x00 : IDEDMA_CMD_WRITE) | cp->idedma_cmd); 2111 2112 /* Unload the map of the data buffer */ 2113 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 2114 dma_maps->dmamap_xfer->dm_mapsize, 2115 (dma_maps->dma_flags & WDC_DMA_READ) ? 2116 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 2117 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 2118 2119 /* Clear status bits */ 2120 PCIIDE_DMACTL_WRITE(sc, channel, status); 2121 2122 if ((status & IDEDMA_CTL_ERR) != 0) { 2123 printf("%s:%d:%d: bus-master DMA error: status=0x%x\n", 2124 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, status); 2125 error |= WDC_DMAST_ERR; 2126 } 2127 2128 if ((status & IDEDMA_CTL_INTR) == 0) { 2129 printf("%s:%d:%d: bus-master DMA error: missing interrupt, " 2130 "status=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, channel, 2131 drive, status); 2132 error |= WDC_DMAST_NOIRQ; 2133 } 2134 2135 if ((status & IDEDMA_CTL_ACT) != 0) { 2136 /* data underrun, may be a valid condition for ATAPI */ 2137 error |= WDC_DMAST_UNDER; 2138 } 2139 2140 done: 2141 sc->pciide_channels[channel].dma_in_progress = 0; 2142 return (error); 2143 } 2144 2145 void 2146 pciide_irqack(struct channel_softc *chp) 2147 { 2148 struct pciide_channel *cp = (struct pciide_channel *)chp; 2149 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2150 int chan = chp->channel; 2151 2152 /* clear status bits in IDE DMA registers */ 2153 PCIIDE_DMACTL_WRITE(sc, chan, PCIIDE_DMACTL_READ(sc, chan)); 2154 } 2155 2156 /* some common code used by several chip_map */ 2157 int 2158 pciide_chansetup(struct pciide_softc *sc, int channel, pcireg_t interface) 2159 { 2160 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2161 sc->wdc_chanarray[channel] = &cp->wdc_channel; 2162 cp->name = PCIIDE_CHANNEL_NAME(channel); 2163 cp->wdc_channel.channel = channel; 2164 cp->wdc_channel.wdc = &sc->sc_wdcdev; 2165 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 2166 if (cp->wdc_channel.ch_queue == NULL) { 2167 printf("%s: %s " 2168 "cannot allocate channel queue", 2169 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2170 return (0); 2171 } 2172 cp->hw_ok = 1; 2173 2174 return (1); 2175 } 2176 2177 void 2178 pciide_chanfree(struct pciide_softc *sc, int channel) 2179 { 2180 struct pciide_channel *cp = &sc->pciide_channels[channel]; 2181 if (cp->wdc_channel.ch_queue) 2182 wdc_free_queue(cp->wdc_channel.ch_queue); 2183 } 2184 2185 /* some common code used by several chip channel_map */ 2186 void 2187 pciide_mapchan(struct pci_attach_args *pa, struct pciide_channel *cp, 2188 pcireg_t interface, bus_size_t *cmdsizep, bus_size_t *ctlsizep, 2189 int (*pci_intr)(void *)) 2190 { 2191 struct channel_softc *wdc_cp = &cp->wdc_channel; 2192 2193 if (interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) 2194 cp->hw_ok = pciide_mapregs_native(pa, cp, cmdsizep, ctlsizep, 2195 pci_intr); 2196 else 2197 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2198 wdc_cp->channel, cmdsizep, ctlsizep); 2199 if (cp->hw_ok == 0) 2200 return; 2201 wdc_cp->data32iot = wdc_cp->cmd_iot; 2202 wdc_cp->data32ioh = wdc_cp->cmd_ioh; 2203 wdcattach(wdc_cp); 2204 } 2205 2206 void 2207 pciide_unmap_chan(struct pciide_softc *sc, struct pciide_channel *cp, int flags) 2208 { 2209 struct channel_softc *wdc_cp = &cp->wdc_channel; 2210 2211 wdcdetach(wdc_cp, flags); 2212 2213 if (cp->compat != 0) 2214 pciide_unmapregs_compat(sc, cp); 2215 else 2216 pciide_unmapregs_native(sc, cp); 2217 } 2218 2219 /* 2220 * Generic code to call to know if a channel can be disabled. Return 1 2221 * if channel can be disabled, 0 if not 2222 */ 2223 int 2224 pciide_chan_candisable(struct pciide_channel *cp) 2225 { 2226 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2227 struct channel_softc *wdc_cp = &cp->wdc_channel; 2228 2229 if ((wdc_cp->ch_drive[0].drive_flags & DRIVE) == 0 && 2230 (wdc_cp->ch_drive[1].drive_flags & DRIVE) == 0) { 2231 printf("%s: %s disabled (no drives)\n", 2232 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2233 cp->hw_ok = 0; 2234 return (1); 2235 } 2236 return (0); 2237 } 2238 2239 /* 2240 * generic code to map the compat intr if hw_ok=1 and it is a compat channel. 2241 * Set hw_ok=0 on failure 2242 */ 2243 void 2244 pciide_map_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2245 int compatchan, int interface) 2246 { 2247 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2248 struct channel_softc *wdc_cp = &cp->wdc_channel; 2249 2250 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2251 return; 2252 2253 cp->compat = 1; 2254 cp->ih = pciide_machdep_compat_intr_establish(&sc->sc_wdcdev.sc_dev, 2255 pa, compatchan, pciide_compat_intr, cp); 2256 if (cp->ih == NULL) { 2257 printf("%s: no compatibility interrupt for use by %s\n", 2258 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2259 cp->hw_ok = 0; 2260 } 2261 } 2262 2263 /* 2264 * generic code to unmap the compat intr if hw_ok=1 and it is a compat channel. 2265 * Set hw_ok=0 on failure 2266 */ 2267 void 2268 pciide_unmap_compat_intr(struct pci_attach_args *pa, struct pciide_channel *cp, 2269 int compatchan, int interface) 2270 { 2271 struct channel_softc *wdc_cp = &cp->wdc_channel; 2272 2273 if ((interface & PCIIDE_INTERFACE_PCI(wdc_cp->channel)) != 0) 2274 return; 2275 2276 pciide_machdep_compat_intr_disestablish(pa->pa_pc, cp->ih); 2277 } 2278 2279 void 2280 pciide_print_channels(int nchannels, pcireg_t interface) 2281 { 2282 int i; 2283 2284 for (i = 0; i < nchannels; i++) { 2285 printf(", %s %s to %s", PCIIDE_CHANNEL_NAME(i), 2286 (interface & PCIIDE_INTERFACE_SETTABLE(i)) ? 2287 "configured" : "wired", 2288 (interface & PCIIDE_INTERFACE_PCI(i)) ? "native-PCI" : 2289 "compatibility"); 2290 } 2291 2292 printf("\n"); 2293 } 2294 2295 void 2296 pciide_print_modes(struct pciide_channel *cp) 2297 { 2298 wdc_print_current_modes(&cp->wdc_channel); 2299 } 2300 2301 void 2302 default_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2303 { 2304 struct pciide_channel *cp; 2305 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2306 pcireg_t csr; 2307 int channel, drive; 2308 struct ata_drive_datas *drvp; 2309 u_int8_t idedma_ctl; 2310 bus_size_t cmdsize, ctlsize; 2311 char *failreason; 2312 2313 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 2314 printf(": DMA"); 2315 if (sc->sc_pp == &default_product_desc && 2316 (sc->sc_wdcdev.sc_dev.dv_cfdata->cf_flags & 2317 PCIIDE_OPTIONS_DMA) == 0) { 2318 printf(" (unsupported)"); 2319 sc->sc_dma_ok = 0; 2320 } else { 2321 pciide_mapreg_dma(sc, pa); 2322 if (sc->sc_dma_ok != 0) 2323 printf(", (partial support)"); 2324 } 2325 } else { 2326 printf(": no DMA"); 2327 sc->sc_dma_ok = 0; 2328 } 2329 if (sc->sc_dma_ok) { 2330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2331 sc->sc_wdcdev.irqack = pciide_irqack; 2332 } 2333 sc->sc_wdcdev.PIO_cap = 0; 2334 sc->sc_wdcdev.DMA_cap = 0; 2335 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2336 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2337 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16; 2338 2339 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2340 2341 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2342 cp = &sc->pciide_channels[channel]; 2343 if (pciide_chansetup(sc, channel, interface) == 0) 2344 continue; 2345 if (interface & PCIIDE_INTERFACE_PCI(channel)) { 2346 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 2347 &ctlsize, pciide_pci_intr); 2348 } else { 2349 cp->hw_ok = pciide_mapregs_compat(pa, cp, 2350 channel, &cmdsize, &ctlsize); 2351 } 2352 if (cp->hw_ok == 0) 2353 continue; 2354 /* 2355 * Check to see if something appears to be there. 2356 */ 2357 failreason = NULL; 2358 pciide_map_compat_intr(pa, cp, channel, interface); 2359 if (cp->hw_ok == 0) 2360 continue; 2361 if (!wdcprobe(&cp->wdc_channel)) { 2362 failreason = "not responding; disabled or no drives?"; 2363 goto next; 2364 } 2365 /* 2366 * Now, make sure it's actually attributable to this PCI IDE 2367 * channel by trying to access the channel again while the 2368 * PCI IDE controller's I/O space is disabled. (If the 2369 * channel no longer appears to be there, it belongs to 2370 * this controller.) YUCK! 2371 */ 2372 csr = pci_conf_read(sc->sc_pc, sc->sc_tag, 2373 PCI_COMMAND_STATUS_REG); 2374 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, 2375 csr & ~PCI_COMMAND_IO_ENABLE); 2376 if (wdcprobe(&cp->wdc_channel)) 2377 failreason = "other hardware responding at addresses"; 2378 pci_conf_write(sc->sc_pc, sc->sc_tag, 2379 PCI_COMMAND_STATUS_REG, csr); 2380 next: 2381 if (failreason) { 2382 printf("%s: %s ignored (%s)\n", 2383 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 2384 failreason); 2385 cp->hw_ok = 0; 2386 pciide_unmap_compat_intr(pa, cp, channel, interface); 2387 bus_space_unmap(cp->wdc_channel.cmd_iot, 2388 cp->wdc_channel.cmd_ioh, cmdsize); 2389 if (interface & PCIIDE_INTERFACE_PCI(channel)) 2390 bus_space_unmap(cp->wdc_channel.ctl_iot, 2391 cp->ctl_baseioh, ctlsize); 2392 else 2393 bus_space_unmap(cp->wdc_channel.ctl_iot, 2394 cp->wdc_channel.ctl_ioh, ctlsize); 2395 } 2396 if (cp->hw_ok) { 2397 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 2398 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 2399 wdcattach(&cp->wdc_channel); 2400 } 2401 } 2402 2403 if (sc->sc_dma_ok == 0) 2404 return; 2405 2406 /* Allocate DMA maps */ 2407 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2408 idedma_ctl = 0; 2409 cp = &sc->pciide_channels[channel]; 2410 for (drive = 0; drive < 2; drive++) { 2411 drvp = &cp->wdc_channel.ch_drive[drive]; 2412 /* If no drive, skip */ 2413 if ((drvp->drive_flags & DRIVE) == 0) 2414 continue; 2415 if ((drvp->drive_flags & DRIVE_DMA) == 0) 2416 continue; 2417 if (pciide_dma_table_setup(sc, channel, drive) != 0) { 2418 /* Abort DMA setup */ 2419 printf("%s:%d:%d: cannot allocate DMA maps, " 2420 "using PIO transfers\n", 2421 sc->sc_wdcdev.sc_dev.dv_xname, 2422 channel, drive); 2423 drvp->drive_flags &= ~DRIVE_DMA; 2424 } 2425 printf("%s:%d:%d: using DMA data transfers\n", 2426 sc->sc_wdcdev.sc_dev.dv_xname, 2427 channel, drive); 2428 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2429 } 2430 if (idedma_ctl != 0) { 2431 /* Add software bits in status register */ 2432 PCIIDE_DMACTL_WRITE(sc, channel, idedma_ctl); 2433 } 2434 } 2435 } 2436 2437 void 2438 default_chip_unmap(struct pciide_softc *sc, int flags) 2439 { 2440 struct pciide_channel *cp; 2441 int channel; 2442 2443 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2444 cp = &sc->pciide_channels[channel]; 2445 pciide_unmap_chan(sc, cp, flags); 2446 pciide_chanfree(sc, channel); 2447 } 2448 2449 pciide_unmapreg_dma(sc); 2450 2451 if (sc->sc_cookie) 2452 free(sc->sc_cookie, M_DEVBUF); 2453 } 2454 2455 void 2456 sata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2457 { 2458 struct pciide_channel *cp; 2459 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2460 int channel; 2461 bus_size_t cmdsize, ctlsize; 2462 2463 if (interface == 0) { 2464 WDCDEBUG_PRINT(("sata_chip_map interface == 0\n"), 2465 DEBUG_PROBE); 2466 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 2467 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 2468 } 2469 2470 printf(": DMA"); 2471 pciide_mapreg_dma(sc, pa); 2472 printf("\n"); 2473 2474 if (sc->sc_dma_ok) { 2475 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2476 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2477 sc->sc_wdcdev.irqack = pciide_irqack; 2478 } 2479 sc->sc_wdcdev.PIO_cap = 4; 2480 sc->sc_wdcdev.DMA_cap = 2; 2481 sc->sc_wdcdev.UDMA_cap = 6; 2482 2483 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2484 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2485 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2486 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2487 sc->sc_wdcdev.set_modes = sata_setup_channel; 2488 sc->chip_unmap = default_chip_unmap; 2489 2490 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2491 cp = &sc->pciide_channels[channel]; 2492 if (pciide_chansetup(sc, channel, interface) == 0) 2493 continue; 2494 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2495 pciide_pci_intr); 2496 sata_setup_channel(&cp->wdc_channel); 2497 } 2498 } 2499 2500 void 2501 sata_setup_channel(struct channel_softc *chp) 2502 { 2503 struct ata_drive_datas *drvp; 2504 int drive; 2505 u_int32_t idedma_ctl; 2506 struct pciide_channel *cp = (struct pciide_channel *)chp; 2507 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2508 2509 /* setup DMA if needed */ 2510 pciide_channel_dma_setup(cp); 2511 2512 idedma_ctl = 0; 2513 2514 for (drive = 0; drive < 2; drive++) { 2515 drvp = &chp->ch_drive[drive]; 2516 /* If no drive, skip */ 2517 if ((drvp->drive_flags & DRIVE) == 0) 2518 continue; 2519 if (drvp->drive_flags & DRIVE_UDMA) { 2520 /* use Ultra/DMA */ 2521 drvp->drive_flags &= ~DRIVE_DMA; 2522 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2523 } else if (drvp->drive_flags & DRIVE_DMA) { 2524 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2525 } 2526 } 2527 2528 /* 2529 * Nothing to do to setup modes; it is meaningless in S-ATA 2530 * (but many S-ATA drives still want to get the SET_FEATURE 2531 * command). 2532 */ 2533 if (idedma_ctl != 0) { 2534 /* Add software bits in status register */ 2535 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 2536 } 2537 pciide_print_modes(cp); 2538 } 2539 2540 void 2541 piix_timing_debug(struct pciide_softc *sc) 2542 { 2543 WDCDEBUG_PRINT(("piix_setup_chip: idetim=0x%x", 2544 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM)), 2545 DEBUG_PROBE); 2546 if (sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_IDE && 2547 sc->sc_pp->ide_product != PCI_PRODUCT_INTEL_82371FB_ISA) { 2548 WDCDEBUG_PRINT((", sidetim=0x%x", 2549 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM)), 2550 DEBUG_PROBE); 2551 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 2552 WDCDEBUG_PRINT((", udmareg 0x%x", 2553 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG)), 2554 DEBUG_PROBE); 2555 } 2556 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2557 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2558 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2559 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2560 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2561 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2562 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2563 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2564 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2565 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2566 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2567 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2568 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2569 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2570 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2571 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2572 WDCDEBUG_PRINT((", IDE_CONTROL 0x%x", 2573 pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG)), 2574 DEBUG_PROBE); 2575 } 2576 } 2577 WDCDEBUG_PRINT(("\n"), DEBUG_PROBE); 2578 } 2579 2580 void 2581 piix_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2582 { 2583 struct pciide_channel *cp; 2584 int channel; 2585 u_int32_t idetim; 2586 bus_size_t cmdsize, ctlsize; 2587 2588 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2589 2590 printf(": DMA"); 2591 pciide_mapreg_dma(sc, pa); 2592 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2593 WDC_CAPABILITY_MODE; 2594 if (sc->sc_dma_ok) { 2595 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2596 sc->sc_wdcdev.irqack = pciide_irqack; 2597 switch (sc->sc_pp->ide_product) { 2598 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2599 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2600 case PCI_PRODUCT_INTEL_82371AB_IDE: 2601 case PCI_PRODUCT_INTEL_82372FB_IDE: 2602 case PCI_PRODUCT_INTEL_82440MX_IDE: 2603 case PCI_PRODUCT_INTEL_82451NX: 2604 case PCI_PRODUCT_INTEL_82801AA_IDE: 2605 case PCI_PRODUCT_INTEL_82801AB_IDE: 2606 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2607 case PCI_PRODUCT_INTEL_82801BA_IDE: 2608 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2609 case PCI_PRODUCT_INTEL_82801CA_IDE: 2610 case PCI_PRODUCT_INTEL_82801DB_IDE: 2611 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2612 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2613 case PCI_PRODUCT_INTEL_82801EB_IDE: 2614 case PCI_PRODUCT_INTEL_82801FB_IDE: 2615 case PCI_PRODUCT_INTEL_82801GB_IDE: 2616 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2617 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 2618 break; 2619 } 2620 } 2621 sc->sc_wdcdev.PIO_cap = 4; 2622 sc->sc_wdcdev.DMA_cap = 2; 2623 switch (sc->sc_pp->ide_product) { 2624 case PCI_PRODUCT_INTEL_82801AA_IDE: 2625 case PCI_PRODUCT_INTEL_82372FB_IDE: 2626 sc->sc_wdcdev.UDMA_cap = 4; 2627 break; 2628 case PCI_PRODUCT_INTEL_6300ESB_IDE: 2629 case PCI_PRODUCT_INTEL_6321ESB_IDE: 2630 case PCI_PRODUCT_INTEL_82801BAM_IDE: 2631 case PCI_PRODUCT_INTEL_82801BA_IDE: 2632 case PCI_PRODUCT_INTEL_82801CAM_IDE: 2633 case PCI_PRODUCT_INTEL_82801CA_IDE: 2634 case PCI_PRODUCT_INTEL_82801DB_IDE: 2635 case PCI_PRODUCT_INTEL_82801DBL_IDE: 2636 case PCI_PRODUCT_INTEL_82801DBM_IDE: 2637 case PCI_PRODUCT_INTEL_82801EB_IDE: 2638 case PCI_PRODUCT_INTEL_82801FB_IDE: 2639 case PCI_PRODUCT_INTEL_82801GB_IDE: 2640 case PCI_PRODUCT_INTEL_82801HBM_IDE: 2641 sc->sc_wdcdev.UDMA_cap = 5; 2642 break; 2643 default: 2644 sc->sc_wdcdev.UDMA_cap = 2; 2645 break; 2646 } 2647 2648 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_IDE || 2649 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82371FB_ISA) { 2650 sc->sc_wdcdev.set_modes = piix_setup_channel; 2651 } else { 2652 sc->sc_wdcdev.set_modes = piix3_4_setup_channel; 2653 } 2654 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2655 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2656 2657 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2658 2659 piix_timing_debug(sc); 2660 2661 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2662 cp = &sc->pciide_channels[channel]; 2663 2664 if (pciide_chansetup(sc, channel, interface) == 0) 2665 continue; 2666 idetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2667 if ((PIIX_IDETIM_READ(idetim, channel) & 2668 PIIX_IDETIM_IDE) == 0) { 2669 printf("%s: %s ignored (disabled)\n", 2670 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 2671 continue; 2672 } 2673 pciide_map_compat_intr(pa, cp, channel, interface); 2674 if (cp->hw_ok == 0) 2675 continue; 2676 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2677 pciide_pci_intr); 2678 if (cp->hw_ok == 0) 2679 goto next; 2680 if (pciide_chan_candisable(cp)) { 2681 idetim = PIIX_IDETIM_CLEAR(idetim, PIIX_IDETIM_IDE, 2682 channel); 2683 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, 2684 idetim); 2685 } 2686 if (cp->hw_ok == 0) 2687 goto next; 2688 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2689 next: 2690 if (cp->hw_ok == 0) 2691 pciide_unmap_compat_intr(pa, cp, channel, interface); 2692 } 2693 2694 piix_timing_debug(sc); 2695 } 2696 2697 void 2698 piixsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 2699 { 2700 struct pciide_channel *cp; 2701 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 2702 int channel; 2703 bus_size_t cmdsize, ctlsize; 2704 u_int8_t reg, ich = 0; 2705 2706 printf(": DMA"); 2707 pciide_mapreg_dma(sc, pa); 2708 2709 if (sc->sc_dma_ok) { 2710 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 2711 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 2712 sc->sc_wdcdev.irqack = pciide_irqack; 2713 sc->sc_wdcdev.DMA_cap = 2; 2714 sc->sc_wdcdev.UDMA_cap = 6; 2715 } 2716 sc->sc_wdcdev.PIO_cap = 4; 2717 2718 sc->sc_wdcdev.channels = sc->wdc_chanarray; 2719 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 2720 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 2721 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 2722 sc->sc_wdcdev.set_modes = sata_setup_channel; 2723 2724 switch(sc->sc_pp->ide_product) { 2725 case PCI_PRODUCT_INTEL_6300ESB_SATA: 2726 case PCI_PRODUCT_INTEL_6300ESB_SATA2: 2727 case PCI_PRODUCT_INTEL_82801EB_SATA: 2728 case PCI_PRODUCT_INTEL_82801ER_SATA: 2729 ich = 5; 2730 break; 2731 case PCI_PRODUCT_INTEL_82801FB_SATA: 2732 case PCI_PRODUCT_INTEL_82801FR_SATA: 2733 case PCI_PRODUCT_INTEL_82801FBM_SATA: 2734 ich = 6; 2735 break; 2736 default: 2737 ich = 7; 2738 break; 2739 } 2740 2741 /* 2742 * Put the SATA portion of controllers that don't operate in combined 2743 * mode into native PCI modes so the maximum number of devices can be 2744 * used. Intel calls this "enhanced mode" 2745 */ 2746 if (ich == 5) { 2747 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP); 2748 if ((reg & ICH5_SATA_MAP_COMBINED) == 0) { 2749 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2750 ICH5_SATA_PI); 2751 reg |= ICH5_SATA_PI_PRI_NATIVE | 2752 ICH5_SATA_PI_SEC_NATIVE; 2753 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2754 ICH5_SATA_PI, reg); 2755 interface |= PCIIDE_INTERFACE_PCI(0) | 2756 PCIIDE_INTERFACE_PCI(1); 2757 } 2758 } else { 2759 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, ICH5_SATA_MAP) & 2760 ICH6_SATA_MAP_CMB_MASK; 2761 if (reg != ICH6_SATA_MAP_CMB_PRI && 2762 reg != ICH6_SATA_MAP_CMB_SEC) { 2763 reg = pciide_pci_read(pa->pa_pc, pa->pa_tag, 2764 ICH5_SATA_PI); 2765 reg |= ICH5_SATA_PI_PRI_NATIVE | 2766 ICH5_SATA_PI_SEC_NATIVE; 2767 2768 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2769 ICH5_SATA_PI, reg); 2770 interface |= PCIIDE_INTERFACE_PCI(0) | 2771 PCIIDE_INTERFACE_PCI(1); 2772 2773 /* 2774 * Ask for SATA IDE Mode, we don't need to do this 2775 * for the combined mode case as combined mode is 2776 * only allowed in IDE Mode 2777 */ 2778 if (ich >= 7) { 2779 reg = pciide_pci_read(sc->sc_pc, sc->sc_tag, 2780 ICH5_SATA_MAP) & ~ICH7_SATA_MAP_SMS_MASK; 2781 pciide_pci_write(pa->pa_pc, pa->pa_tag, 2782 ICH5_SATA_MAP, reg); 2783 } 2784 } 2785 } 2786 2787 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 2788 2789 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 2790 cp = &sc->pciide_channels[channel]; 2791 if (pciide_chansetup(sc, channel, interface) == 0) 2792 continue; 2793 2794 pciide_map_compat_intr(pa, cp, channel, interface); 2795 if (cp->hw_ok == 0) 2796 continue; 2797 2798 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 2799 pciide_pci_intr); 2800 if (cp->hw_ok != 0) 2801 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 2802 2803 if (cp->hw_ok == 0) 2804 pciide_unmap_compat_intr(pa, cp, channel, interface); 2805 } 2806 } 2807 2808 void 2809 piix_setup_channel(struct channel_softc *chp) 2810 { 2811 u_int8_t mode[2], drive; 2812 u_int32_t oidetim, idetim, idedma_ctl; 2813 struct pciide_channel *cp = (struct pciide_channel *)chp; 2814 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2815 struct ata_drive_datas *drvp = cp->wdc_channel.ch_drive; 2816 2817 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2818 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, chp->channel); 2819 idedma_ctl = 0; 2820 2821 /* set up new idetim: Enable IDE registers decode */ 2822 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, 2823 chp->channel); 2824 2825 /* setup DMA */ 2826 pciide_channel_dma_setup(cp); 2827 2828 /* 2829 * Here we have to mess up with drives mode: PIIX can't have 2830 * different timings for master and slave drives. 2831 * We need to find the best combination. 2832 */ 2833 2834 /* If both drives supports DMA, take the lower mode */ 2835 if ((drvp[0].drive_flags & DRIVE_DMA) && 2836 (drvp[1].drive_flags & DRIVE_DMA)) { 2837 mode[0] = mode[1] = 2838 min(drvp[0].DMA_mode, drvp[1].DMA_mode); 2839 drvp[0].DMA_mode = mode[0]; 2840 drvp[1].DMA_mode = mode[1]; 2841 goto ok; 2842 } 2843 /* 2844 * If only one drive supports DMA, use its mode, and 2845 * put the other one in PIO mode 0 if mode not compatible 2846 */ 2847 if (drvp[0].drive_flags & DRIVE_DMA) { 2848 mode[0] = drvp[0].DMA_mode; 2849 mode[1] = drvp[1].PIO_mode; 2850 if (piix_isp_pio[mode[1]] != piix_isp_dma[mode[0]] || 2851 piix_rtc_pio[mode[1]] != piix_rtc_dma[mode[0]]) 2852 mode[1] = drvp[1].PIO_mode = 0; 2853 goto ok; 2854 } 2855 if (drvp[1].drive_flags & DRIVE_DMA) { 2856 mode[1] = drvp[1].DMA_mode; 2857 mode[0] = drvp[0].PIO_mode; 2858 if (piix_isp_pio[mode[0]] != piix_isp_dma[mode[1]] || 2859 piix_rtc_pio[mode[0]] != piix_rtc_dma[mode[1]]) 2860 mode[0] = drvp[0].PIO_mode = 0; 2861 goto ok; 2862 } 2863 /* 2864 * If both drives are not DMA, takes the lower mode, unless 2865 * one of them is PIO mode < 2 2866 */ 2867 if (drvp[0].PIO_mode < 2) { 2868 mode[0] = drvp[0].PIO_mode = 0; 2869 mode[1] = drvp[1].PIO_mode; 2870 } else if (drvp[1].PIO_mode < 2) { 2871 mode[1] = drvp[1].PIO_mode = 0; 2872 mode[0] = drvp[0].PIO_mode; 2873 } else { 2874 mode[0] = mode[1] = 2875 min(drvp[1].PIO_mode, drvp[0].PIO_mode); 2876 drvp[0].PIO_mode = mode[0]; 2877 drvp[1].PIO_mode = mode[1]; 2878 } 2879 ok: /* The modes are setup */ 2880 for (drive = 0; drive < 2; drive++) { 2881 if (drvp[drive].drive_flags & DRIVE_DMA) { 2882 idetim |= piix_setup_idetim_timings( 2883 mode[drive], 1, chp->channel); 2884 goto end; 2885 } 2886 } 2887 /* If we are there, none of the drives are DMA */ 2888 if (mode[0] >= 2) 2889 idetim |= piix_setup_idetim_timings( 2890 mode[0], 0, chp->channel); 2891 else 2892 idetim |= piix_setup_idetim_timings( 2893 mode[1], 0, chp->channel); 2894 end: /* 2895 * timing mode is now set up in the controller. Enable 2896 * it per-drive 2897 */ 2898 for (drive = 0; drive < 2; drive++) { 2899 /* If no drive, skip */ 2900 if ((drvp[drive].drive_flags & DRIVE) == 0) 2901 continue; 2902 idetim |= piix_setup_idetim_drvs(&drvp[drive]); 2903 if (drvp[drive].drive_flags & DRIVE_DMA) 2904 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 2905 } 2906 if (idedma_ctl != 0) { 2907 /* Add software bits in status register */ 2908 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 2909 IDEDMA_CTL(chp->channel), 2910 idedma_ctl); 2911 } 2912 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 2913 pciide_print_modes(cp); 2914 } 2915 2916 void 2917 piix3_4_setup_channel(struct channel_softc *chp) 2918 { 2919 struct ata_drive_datas *drvp; 2920 u_int32_t oidetim, idetim, sidetim, udmareg, ideconf, idedma_ctl; 2921 struct pciide_channel *cp = (struct pciide_channel *)chp; 2922 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 2923 int drive; 2924 int channel = chp->channel; 2925 2926 oidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_IDETIM); 2927 sidetim = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM); 2928 udmareg = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG); 2929 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, PIIX_CONFIG); 2930 idetim = PIIX_IDETIM_CLEAR(oidetim, 0xffff, channel); 2931 sidetim &= ~(PIIX_SIDETIM_ISP_MASK(channel) | 2932 PIIX_SIDETIM_RTC_MASK(channel)); 2933 2934 idedma_ctl = 0; 2935 /* If channel disabled, no need to go further */ 2936 if ((PIIX_IDETIM_READ(oidetim, channel) & PIIX_IDETIM_IDE) == 0) 2937 return; 2938 /* set up new idetim: Enable IDE registers decode */ 2939 idetim = PIIX_IDETIM_SET(idetim, PIIX_IDETIM_IDE, channel); 2940 2941 /* setup DMA if needed */ 2942 pciide_channel_dma_setup(cp); 2943 2944 for (drive = 0; drive < 2; drive++) { 2945 udmareg &= ~(PIIX_UDMACTL_DRV_EN(channel, drive) | 2946 PIIX_UDMATIM_SET(0x3, channel, drive)); 2947 drvp = &chp->ch_drive[drive]; 2948 /* If no drive, skip */ 2949 if ((drvp->drive_flags & DRIVE) == 0) 2950 continue; 2951 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 2952 (drvp->drive_flags & DRIVE_UDMA) == 0)) 2953 goto pio; 2954 2955 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2956 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2957 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 2958 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AB_IDE || 2959 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2960 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE || 2961 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE || 2962 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2963 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2964 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2965 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2966 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2967 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2968 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2969 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE || 2970 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 2971 ideconf |= PIIX_CONFIG_PINGPONG; 2972 } 2973 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6300ESB_IDE || 2974 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_6321ESB_IDE || 2975 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BAM_IDE || 2976 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801BA_IDE|| 2977 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CAM_IDE|| 2978 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801CA_IDE || 2979 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DB_IDE || 2980 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBL_IDE || 2981 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801DBM_IDE || 2982 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801EB_IDE || 2983 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801FB_IDE || 2984 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801GB_IDE || 2985 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801HBM_IDE) { 2986 /* setup Ultra/100 */ 2987 if (drvp->UDMA_mode > 2 && 2988 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 2989 drvp->UDMA_mode = 2; 2990 if (drvp->UDMA_mode > 4) { 2991 ideconf |= PIIX_CONFIG_UDMA100(channel, drive); 2992 } else { 2993 ideconf &= ~PIIX_CONFIG_UDMA100(channel, drive); 2994 if (drvp->UDMA_mode > 2) { 2995 ideconf |= PIIX_CONFIG_UDMA66(channel, 2996 drive); 2997 } else { 2998 ideconf &= ~PIIX_CONFIG_UDMA66(channel, 2999 drive); 3000 } 3001 } 3002 } 3003 if (sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82801AA_IDE || 3004 sc->sc_pp->ide_product == PCI_PRODUCT_INTEL_82372FB_IDE) { 3005 /* setup Ultra/66 */ 3006 if (drvp->UDMA_mode > 2 && 3007 (ideconf & PIIX_CONFIG_CR(channel, drive)) == 0) 3008 drvp->UDMA_mode = 2; 3009 if (drvp->UDMA_mode > 2) 3010 ideconf |= PIIX_CONFIG_UDMA66(channel, drive); 3011 else 3012 ideconf &= ~PIIX_CONFIG_UDMA66(channel, drive); 3013 } 3014 3015 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3016 (drvp->drive_flags & DRIVE_UDMA)) { 3017 /* use Ultra/DMA */ 3018 drvp->drive_flags &= ~DRIVE_DMA; 3019 udmareg |= PIIX_UDMACTL_DRV_EN( channel, drive); 3020 udmareg |= PIIX_UDMATIM_SET( 3021 piix4_sct_udma[drvp->UDMA_mode], channel, drive); 3022 } else { 3023 /* use Multiword DMA */ 3024 drvp->drive_flags &= ~DRIVE_UDMA; 3025 if (drive == 0) { 3026 idetim |= piix_setup_idetim_timings( 3027 drvp->DMA_mode, 1, channel); 3028 } else { 3029 sidetim |= piix_setup_sidetim_timings( 3030 drvp->DMA_mode, 1, channel); 3031 idetim = PIIX_IDETIM_SET(idetim, 3032 PIIX_IDETIM_SITRE, channel); 3033 } 3034 } 3035 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3036 3037 pio: /* use PIO mode */ 3038 idetim |= piix_setup_idetim_drvs(drvp); 3039 if (drive == 0) { 3040 idetim |= piix_setup_idetim_timings( 3041 drvp->PIO_mode, 0, channel); 3042 } else { 3043 sidetim |= piix_setup_sidetim_timings( 3044 drvp->PIO_mode, 0, channel); 3045 idetim = PIIX_IDETIM_SET(idetim, 3046 PIIX_IDETIM_SITRE, channel); 3047 } 3048 } 3049 if (idedma_ctl != 0) { 3050 /* Add software bits in status register */ 3051 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3052 IDEDMA_CTL(channel), 3053 idedma_ctl); 3054 } 3055 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_IDETIM, idetim); 3056 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_SIDETIM, sidetim); 3057 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_UDMAREG, udmareg); 3058 pci_conf_write(sc->sc_pc, sc->sc_tag, PIIX_CONFIG, ideconf); 3059 pciide_print_modes(cp); 3060 } 3061 3062 3063 /* setup ISP and RTC fields, based on mode */ 3064 u_int32_t 3065 piix_setup_idetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3066 { 3067 3068 if (dma) 3069 return (PIIX_IDETIM_SET(0, 3070 PIIX_IDETIM_ISP_SET(piix_isp_dma[mode]) | 3071 PIIX_IDETIM_RTC_SET(piix_rtc_dma[mode]), 3072 channel)); 3073 else 3074 return (PIIX_IDETIM_SET(0, 3075 PIIX_IDETIM_ISP_SET(piix_isp_pio[mode]) | 3076 PIIX_IDETIM_RTC_SET(piix_rtc_pio[mode]), 3077 channel)); 3078 } 3079 3080 /* setup DTE, PPE, IE and TIME field based on PIO mode */ 3081 u_int32_t 3082 piix_setup_idetim_drvs(struct ata_drive_datas *drvp) 3083 { 3084 u_int32_t ret = 0; 3085 struct channel_softc *chp = drvp->chnl_softc; 3086 u_int8_t channel = chp->channel; 3087 u_int8_t drive = drvp->drive; 3088 3089 /* 3090 * If drive is using UDMA, timings setups are independant 3091 * So just check DMA and PIO here. 3092 */ 3093 if (drvp->drive_flags & DRIVE_DMA) { 3094 /* if mode = DMA mode 0, use compatible timings */ 3095 if ((drvp->drive_flags & DRIVE_DMA) && 3096 drvp->DMA_mode == 0) { 3097 drvp->PIO_mode = 0; 3098 return (ret); 3099 } 3100 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3101 /* 3102 * PIO and DMA timings are the same, use fast timings for PIO 3103 * too, else use compat timings. 3104 */ 3105 if ((piix_isp_pio[drvp->PIO_mode] != 3106 piix_isp_dma[drvp->DMA_mode]) || 3107 (piix_rtc_pio[drvp->PIO_mode] != 3108 piix_rtc_dma[drvp->DMA_mode])) 3109 drvp->PIO_mode = 0; 3110 /* if PIO mode <= 2, use compat timings for PIO */ 3111 if (drvp->PIO_mode <= 2) { 3112 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_DTE(drive), 3113 channel); 3114 return (ret); 3115 } 3116 } 3117 3118 /* 3119 * Now setup PIO modes. If mode < 2, use compat timings. 3120 * Else enable fast timings. Enable IORDY and prefetch/post 3121 * if PIO mode >= 3. 3122 */ 3123 3124 if (drvp->PIO_mode < 2) 3125 return (ret); 3126 3127 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_TIME(drive), channel); 3128 if (drvp->PIO_mode >= 3) { 3129 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_IE(drive), channel); 3130 ret = PIIX_IDETIM_SET(ret, PIIX_IDETIM_PPE(drive), channel); 3131 } 3132 return (ret); 3133 } 3134 3135 /* setup values in SIDETIM registers, based on mode */ 3136 u_int32_t 3137 piix_setup_sidetim_timings(u_int8_t mode, u_int8_t dma, u_int8_t channel) 3138 { 3139 if (dma) 3140 return (PIIX_SIDETIM_ISP_SET(piix_isp_dma[mode], channel) | 3141 PIIX_SIDETIM_RTC_SET(piix_rtc_dma[mode], channel)); 3142 else 3143 return (PIIX_SIDETIM_ISP_SET(piix_isp_pio[mode], channel) | 3144 PIIX_SIDETIM_RTC_SET(piix_rtc_pio[mode], channel)); 3145 } 3146 3147 void 3148 amd756_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3149 { 3150 struct pciide_channel *cp; 3151 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3152 int channel; 3153 pcireg_t chanenable; 3154 bus_size_t cmdsize, ctlsize; 3155 3156 printf(": DMA"); 3157 pciide_mapreg_dma(sc, pa); 3158 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3159 WDC_CAPABILITY_MODE; 3160 if (sc->sc_dma_ok) { 3161 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 3162 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 3163 sc->sc_wdcdev.irqack = pciide_irqack; 3164 } 3165 sc->sc_wdcdev.PIO_cap = 4; 3166 sc->sc_wdcdev.DMA_cap = 2; 3167 switch (sc->sc_pp->ide_product) { 3168 case PCI_PRODUCT_AMD_8111_IDE: 3169 sc->sc_wdcdev.UDMA_cap = 6; 3170 break; 3171 case PCI_PRODUCT_AMD_766_IDE: 3172 case PCI_PRODUCT_AMD_PBC768_IDE: 3173 sc->sc_wdcdev.UDMA_cap = 5; 3174 break; 3175 default: 3176 sc->sc_wdcdev.UDMA_cap = 4; 3177 break; 3178 } 3179 sc->sc_wdcdev.set_modes = amd756_setup_channel; 3180 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3181 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3182 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN); 3183 3184 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3185 3186 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3187 cp = &sc->pciide_channels[channel]; 3188 if (pciide_chansetup(sc, channel, interface) == 0) 3189 continue; 3190 3191 if ((chanenable & AMD756_CHAN_EN(channel)) == 0) { 3192 printf("%s: %s ignored (disabled)\n", 3193 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3194 continue; 3195 } 3196 pciide_map_compat_intr(pa, cp, channel, interface); 3197 if (cp->hw_ok == 0) 3198 continue; 3199 3200 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3201 pciide_pci_intr); 3202 3203 if (pciide_chan_candisable(cp)) { 3204 chanenable &= ~AMD756_CHAN_EN(channel); 3205 } 3206 if (cp->hw_ok == 0) { 3207 pciide_unmap_compat_intr(pa, cp, channel, interface); 3208 continue; 3209 } 3210 3211 amd756_setup_channel(&cp->wdc_channel); 3212 } 3213 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_CHANSTATUS_EN, 3214 chanenable); 3215 return; 3216 } 3217 3218 void 3219 amd756_setup_channel(struct channel_softc *chp) 3220 { 3221 u_int32_t udmatim_reg, datatim_reg; 3222 u_int8_t idedma_ctl; 3223 int mode, drive; 3224 struct ata_drive_datas *drvp; 3225 struct pciide_channel *cp = (struct pciide_channel *)chp; 3226 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3227 pcireg_t chanenable; 3228 #ifndef PCIIDE_AMD756_ENABLEDMA 3229 int product = sc->sc_pp->ide_product; 3230 int rev = sc->sc_rev; 3231 #endif 3232 3233 idedma_ctl = 0; 3234 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_DATATIM); 3235 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, AMD756_UDMA); 3236 datatim_reg &= ~AMD756_DATATIM_MASK(chp->channel); 3237 udmatim_reg &= ~AMD756_UDMA_MASK(chp->channel); 3238 chanenable = pci_conf_read(sc->sc_pc, sc->sc_tag, 3239 AMD756_CHANSTATUS_EN); 3240 3241 /* setup DMA if needed */ 3242 pciide_channel_dma_setup(cp); 3243 3244 for (drive = 0; drive < 2; drive++) { 3245 drvp = &chp->ch_drive[drive]; 3246 /* If no drive, skip */ 3247 if ((drvp->drive_flags & DRIVE) == 0) 3248 continue; 3249 /* add timing values, setup DMA if needed */ 3250 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3251 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3252 mode = drvp->PIO_mode; 3253 goto pio; 3254 } 3255 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3256 (drvp->drive_flags & DRIVE_UDMA)) { 3257 /* use Ultra/DMA */ 3258 drvp->drive_flags &= ~DRIVE_DMA; 3259 3260 /* Check cable */ 3261 if ((chanenable & AMD756_CABLE(chp->channel, 3262 drive)) == 0 && drvp->UDMA_mode > 2) { 3263 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 3264 "cable not detected\n", drvp->drive_name, 3265 sc->sc_wdcdev.sc_dev.dv_xname, 3266 chp->channel, drive), DEBUG_PROBE); 3267 drvp->UDMA_mode = 2; 3268 } 3269 3270 udmatim_reg |= AMD756_UDMA_EN(chp->channel, drive) | 3271 AMD756_UDMA_EN_MTH(chp->channel, drive) | 3272 AMD756_UDMA_TIME(chp->channel, drive, 3273 amd756_udma_tim[drvp->UDMA_mode]); 3274 /* can use PIO timings, MW DMA unused */ 3275 mode = drvp->PIO_mode; 3276 } else { 3277 /* use Multiword DMA, but only if revision is OK */ 3278 drvp->drive_flags &= ~DRIVE_UDMA; 3279 #ifndef PCIIDE_AMD756_ENABLEDMA 3280 /* 3281 * The workaround doesn't seem to be necessary 3282 * with all drives, so it can be disabled by 3283 * PCIIDE_AMD756_ENABLEDMA. It causes a hard hang if 3284 * triggered. 3285 */ 3286 if (AMD756_CHIPREV_DISABLEDMA(product, rev)) { 3287 printf("%s:%d:%d: multi-word DMA disabled due " 3288 "to chip revision\n", 3289 sc->sc_wdcdev.sc_dev.dv_xname, 3290 chp->channel, drive); 3291 mode = drvp->PIO_mode; 3292 drvp->drive_flags &= ~DRIVE_DMA; 3293 goto pio; 3294 } 3295 #endif 3296 /* mode = min(pio, dma+2) */ 3297 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3298 mode = drvp->PIO_mode; 3299 else 3300 mode = drvp->DMA_mode + 2; 3301 } 3302 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3303 3304 pio: /* setup PIO mode */ 3305 if (mode <= 2) { 3306 drvp->DMA_mode = 0; 3307 drvp->PIO_mode = 0; 3308 mode = 0; 3309 } else { 3310 drvp->PIO_mode = mode; 3311 drvp->DMA_mode = mode - 2; 3312 } 3313 datatim_reg |= 3314 AMD756_DATATIM_PULSE(chp->channel, drive, 3315 amd756_pio_set[mode]) | 3316 AMD756_DATATIM_RECOV(chp->channel, drive, 3317 amd756_pio_rec[mode]); 3318 } 3319 if (idedma_ctl != 0) { 3320 /* Add software bits in status register */ 3321 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3322 IDEDMA_CTL(chp->channel), 3323 idedma_ctl); 3324 } 3325 pciide_print_modes(cp); 3326 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_DATATIM, datatim_reg); 3327 pci_conf_write(sc->sc_pc, sc->sc_tag, AMD756_UDMA, udmatim_reg); 3328 } 3329 3330 void 3331 apollo_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3332 { 3333 struct pciide_channel *cp; 3334 pcireg_t interface; 3335 int no_ideconf = 0, channel; 3336 u_int32_t ideconf; 3337 bus_size_t cmdsize, ctlsize; 3338 pcitag_t tag; 3339 pcireg_t id, class; 3340 3341 /* 3342 * Fake interface since VT6410 is claimed to be a ``RAID'' device. 3343 */ 3344 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 3345 interface = PCI_INTERFACE(pa->pa_class); 3346 } else { 3347 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 3348 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 3349 } 3350 3351 switch (PCI_PRODUCT(pa->pa_id)) { 3352 case PCI_PRODUCT_VIATECH_VT6410: 3353 case PCI_PRODUCT_VIATECH_VT6415: 3354 no_ideconf = 1; 3355 /* FALLTHROUGH */ 3356 case PCI_PRODUCT_VIATECH_CX700_IDE: 3357 case PCI_PRODUCT_VIATECH_VX700_IDE: 3358 case PCI_PRODUCT_VIATECH_VX855_IDE: 3359 case PCI_PRODUCT_VIATECH_VX900_IDE: 3360 printf(": ATA133"); 3361 sc->sc_wdcdev.UDMA_cap = 6; 3362 break; 3363 default: 3364 /* 3365 * Determine the DMA capabilities by looking at the 3366 * ISA bridge. 3367 */ 3368 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 3369 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3370 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3371 3372 /* 3373 * XXX On the VT8237, the ISA bridge is on a different 3374 * device. 3375 */ 3376 if (PCI_CLASS(class) != PCI_CLASS_BRIDGE && 3377 pa->pa_device == 15) { 3378 tag = pci_make_tag(pa->pa_pc, pa->pa_bus, 17, 0); 3379 id = pci_conf_read(sc->sc_pc, tag, PCI_ID_REG); 3380 class = pci_conf_read(sc->sc_pc, tag, PCI_CLASS_REG); 3381 } 3382 3383 switch (PCI_PRODUCT(id)) { 3384 case PCI_PRODUCT_VIATECH_VT82C586_ISA: 3385 if (PCI_REVISION(class) >= 0x02) { 3386 printf(": ATA33"); 3387 sc->sc_wdcdev.UDMA_cap = 2; 3388 } else { 3389 printf(": DMA"); 3390 sc->sc_wdcdev.UDMA_cap = 0; 3391 } 3392 break; 3393 case PCI_PRODUCT_VIATECH_VT82C596A: 3394 if (PCI_REVISION(class) >= 0x12) { 3395 printf(": ATA66"); 3396 sc->sc_wdcdev.UDMA_cap = 4; 3397 } else { 3398 printf(": ATA33"); 3399 sc->sc_wdcdev.UDMA_cap = 2; 3400 } 3401 break; 3402 3403 case PCI_PRODUCT_VIATECH_VT82C686A_ISA: 3404 if (PCI_REVISION(class) >= 0x40) { 3405 printf(": ATA100"); 3406 sc->sc_wdcdev.UDMA_cap = 5; 3407 } else { 3408 printf(": ATA66"); 3409 sc->sc_wdcdev.UDMA_cap = 4; 3410 } 3411 break; 3412 case PCI_PRODUCT_VIATECH_VT8231_ISA: 3413 case PCI_PRODUCT_VIATECH_VT8233_ISA: 3414 printf(": ATA100"); 3415 sc->sc_wdcdev.UDMA_cap = 5; 3416 break; 3417 case PCI_PRODUCT_VIATECH_VT8233A_ISA: 3418 case PCI_PRODUCT_VIATECH_VT8235_ISA: 3419 case PCI_PRODUCT_VIATECH_VT8237_ISA: 3420 printf(": ATA133"); 3421 sc->sc_wdcdev.UDMA_cap = 6; 3422 break; 3423 default: 3424 printf(": DMA"); 3425 sc->sc_wdcdev.UDMA_cap = 0; 3426 break; 3427 } 3428 break; 3429 } 3430 3431 pciide_mapreg_dma(sc, pa); 3432 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3433 WDC_CAPABILITY_MODE; 3434 if (sc->sc_dma_ok) { 3435 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3436 sc->sc_wdcdev.irqack = pciide_irqack; 3437 if (sc->sc_wdcdev.UDMA_cap > 0) 3438 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3439 } 3440 sc->sc_wdcdev.PIO_cap = 4; 3441 sc->sc_wdcdev.DMA_cap = 2; 3442 sc->sc_wdcdev.set_modes = apollo_setup_channel; 3443 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3444 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3445 3446 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3447 3448 WDCDEBUG_PRINT(("apollo_chip_map: old APO_IDECONF=0x%x, " 3449 "APO_CTLMISC=0x%x, APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3450 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_IDECONF), 3451 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_CTLMISC), 3452 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3453 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), 3454 DEBUG_PROBE); 3455 3456 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3457 cp = &sc->pciide_channels[channel]; 3458 if (pciide_chansetup(sc, channel, interface) == 0) 3459 continue; 3460 3461 if (no_ideconf == 0) { 3462 ideconf = pci_conf_read(sc->sc_pc, sc->sc_tag, 3463 APO_IDECONF); 3464 if ((ideconf & APO_IDECONF_EN(channel)) == 0) { 3465 printf("%s: %s ignored (disabled)\n", 3466 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3467 continue; 3468 } 3469 } 3470 pciide_map_compat_intr(pa, cp, channel, interface); 3471 if (cp->hw_ok == 0) 3472 continue; 3473 3474 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 3475 pciide_pci_intr); 3476 if (cp->hw_ok == 0) { 3477 goto next; 3478 } 3479 if (pciide_chan_candisable(cp)) { 3480 if (no_ideconf == 0) { 3481 ideconf &= ~APO_IDECONF_EN(channel); 3482 pci_conf_write(sc->sc_pc, sc->sc_tag, 3483 APO_IDECONF, ideconf); 3484 } 3485 } 3486 3487 if (cp->hw_ok == 0) 3488 goto next; 3489 apollo_setup_channel(&sc->pciide_channels[channel].wdc_channel); 3490 next: 3491 if (cp->hw_ok == 0) 3492 pciide_unmap_compat_intr(pa, cp, channel, interface); 3493 } 3494 WDCDEBUG_PRINT(("apollo_chip_map: APO_DATATIM=0x%x, APO_UDMA=0x%x\n", 3495 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM), 3496 pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA)), DEBUG_PROBE); 3497 } 3498 3499 void 3500 apollo_setup_channel(struct channel_softc *chp) 3501 { 3502 u_int32_t udmatim_reg, datatim_reg; 3503 u_int8_t idedma_ctl; 3504 int mode, drive; 3505 struct ata_drive_datas *drvp; 3506 struct pciide_channel *cp = (struct pciide_channel *)chp; 3507 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3508 3509 idedma_ctl = 0; 3510 datatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_DATATIM); 3511 udmatim_reg = pci_conf_read(sc->sc_pc, sc->sc_tag, APO_UDMA); 3512 datatim_reg &= ~APO_DATATIM_MASK(chp->channel); 3513 udmatim_reg &= ~APO_UDMA_MASK(chp->channel); 3514 3515 /* setup DMA if needed */ 3516 pciide_channel_dma_setup(cp); 3517 3518 /* 3519 * We can't mix Ultra/33 and Ultra/66 on the same channel, so 3520 * downgrade to Ultra/33 if needed 3521 */ 3522 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 3523 (chp->ch_drive[1].drive_flags & DRIVE_UDMA)) { 3524 /* both drives UDMA */ 3525 if (chp->ch_drive[0].UDMA_mode > 2 && 3526 chp->ch_drive[1].UDMA_mode <= 2) { 3527 /* drive 0 Ultra/66, drive 1 Ultra/33 */ 3528 chp->ch_drive[0].UDMA_mode = 2; 3529 } else if (chp->ch_drive[1].UDMA_mode > 2 && 3530 chp->ch_drive[0].UDMA_mode <= 2) { 3531 /* drive 1 Ultra/66, drive 0 Ultra/33 */ 3532 chp->ch_drive[1].UDMA_mode = 2; 3533 } 3534 } 3535 3536 for (drive = 0; drive < 2; drive++) { 3537 drvp = &chp->ch_drive[drive]; 3538 /* If no drive, skip */ 3539 if ((drvp->drive_flags & DRIVE) == 0) 3540 continue; 3541 /* add timing values, setup DMA if needed */ 3542 if (((drvp->drive_flags & DRIVE_DMA) == 0 && 3543 (drvp->drive_flags & DRIVE_UDMA) == 0)) { 3544 mode = drvp->PIO_mode; 3545 goto pio; 3546 } 3547 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 3548 (drvp->drive_flags & DRIVE_UDMA)) { 3549 /* use Ultra/DMA */ 3550 drvp->drive_flags &= ~DRIVE_DMA; 3551 udmatim_reg |= APO_UDMA_EN(chp->channel, drive) | 3552 APO_UDMA_EN_MTH(chp->channel, drive); 3553 if (sc->sc_wdcdev.UDMA_cap == 6) { 3554 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3555 drive, apollo_udma133_tim[drvp->UDMA_mode]); 3556 } else if (sc->sc_wdcdev.UDMA_cap == 5) { 3557 /* 686b */ 3558 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3559 drive, apollo_udma100_tim[drvp->UDMA_mode]); 3560 } else if (sc->sc_wdcdev.UDMA_cap == 4) { 3561 /* 596b or 686a */ 3562 udmatim_reg |= APO_UDMA_CLK66(chp->channel); 3563 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3564 drive, apollo_udma66_tim[drvp->UDMA_mode]); 3565 } else { 3566 /* 596a or 586b */ 3567 udmatim_reg |= APO_UDMA_TIME(chp->channel, 3568 drive, apollo_udma33_tim[drvp->UDMA_mode]); 3569 } 3570 /* can use PIO timings, MW DMA unused */ 3571 mode = drvp->PIO_mode; 3572 } else { 3573 /* use Multiword DMA */ 3574 drvp->drive_flags &= ~DRIVE_UDMA; 3575 /* mode = min(pio, dma+2) */ 3576 if (drvp->PIO_mode <= (drvp->DMA_mode +2)) 3577 mode = drvp->PIO_mode; 3578 else 3579 mode = drvp->DMA_mode + 2; 3580 } 3581 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3582 3583 pio: /* setup PIO mode */ 3584 if (mode <= 2) { 3585 drvp->DMA_mode = 0; 3586 drvp->PIO_mode = 0; 3587 mode = 0; 3588 } else { 3589 drvp->PIO_mode = mode; 3590 drvp->DMA_mode = mode - 2; 3591 } 3592 datatim_reg |= 3593 APO_DATATIM_PULSE(chp->channel, drive, 3594 apollo_pio_set[mode]) | 3595 APO_DATATIM_RECOV(chp->channel, drive, 3596 apollo_pio_rec[mode]); 3597 } 3598 if (idedma_ctl != 0) { 3599 /* Add software bits in status register */ 3600 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3601 IDEDMA_CTL(chp->channel), 3602 idedma_ctl); 3603 } 3604 pciide_print_modes(cp); 3605 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_DATATIM, datatim_reg); 3606 pci_conf_write(sc->sc_pc, sc->sc_tag, APO_UDMA, udmatim_reg); 3607 } 3608 3609 void 3610 cmd_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3611 int channel) 3612 { 3613 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3614 bus_size_t cmdsize, ctlsize; 3615 u_int8_t ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CTRL); 3616 pcireg_t interface; 3617 int one_channel; 3618 3619 /* 3620 * The 0648/0649 can be told to identify as a RAID controller. 3621 * In this case, we have to fake interface 3622 */ 3623 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3624 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3625 PCIIDE_INTERFACE_SETTABLE(1); 3626 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3627 CMD_CONF_DSA1) 3628 interface |= PCIIDE_INTERFACE_PCI(0) | 3629 PCIIDE_INTERFACE_PCI(1); 3630 } else { 3631 interface = PCI_INTERFACE(pa->pa_class); 3632 } 3633 3634 sc->wdc_chanarray[channel] = &cp->wdc_channel; 3635 cp->name = PCIIDE_CHANNEL_NAME(channel); 3636 cp->wdc_channel.channel = channel; 3637 cp->wdc_channel.wdc = &sc->sc_wdcdev; 3638 3639 /* 3640 * Older CMD64X doesn't have independant channels 3641 */ 3642 switch (sc->sc_pp->ide_product) { 3643 case PCI_PRODUCT_CMDTECH_649: 3644 one_channel = 0; 3645 break; 3646 default: 3647 one_channel = 1; 3648 break; 3649 } 3650 3651 if (channel > 0 && one_channel) { 3652 cp->wdc_channel.ch_queue = 3653 sc->pciide_channels[0].wdc_channel.ch_queue; 3654 } else { 3655 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 3656 } 3657 if (cp->wdc_channel.ch_queue == NULL) { 3658 printf( 3659 "%s: %s cannot allocate channel queue", 3660 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3661 return; 3662 } 3663 3664 /* 3665 * with a CMD PCI64x, if we get here, the first channel is enabled: 3666 * there's no way to disable the first channel without disabling 3667 * the whole device 3668 */ 3669 if (channel != 0 && (ctrl & CMD_CTRL_2PORT) == 0) { 3670 printf("%s: %s ignored (disabled)\n", 3671 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 3672 return; 3673 } 3674 cp->hw_ok = 1; 3675 pciide_map_compat_intr(pa, cp, channel, interface); 3676 if (cp->hw_ok == 0) 3677 return; 3678 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, cmd_pci_intr); 3679 if (cp->hw_ok == 0) { 3680 pciide_unmap_compat_intr(pa, cp, channel, interface); 3681 return; 3682 } 3683 if (pciide_chan_candisable(cp)) { 3684 if (channel == 1) { 3685 ctrl &= ~CMD_CTRL_2PORT; 3686 pciide_pci_write(pa->pa_pc, pa->pa_tag, 3687 CMD_CTRL, ctrl); 3688 pciide_unmap_compat_intr(pa, cp, channel, interface); 3689 } 3690 } 3691 } 3692 3693 int 3694 cmd_pci_intr(void *arg) 3695 { 3696 struct pciide_softc *sc = arg; 3697 struct pciide_channel *cp; 3698 struct channel_softc *wdc_cp; 3699 int i, rv, crv; 3700 u_int32_t priirq, secirq; 3701 3702 rv = 0; 3703 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3704 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3705 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 3706 cp = &sc->pciide_channels[i]; 3707 wdc_cp = &cp->wdc_channel; 3708 /* If a compat channel skip. */ 3709 if (cp->compat) 3710 continue; 3711 if ((i == 0 && (priirq & CMD_CONF_DRV0_INTR)) || 3712 (i == 1 && (secirq & CMD_ARTTIM23_IRQ))) { 3713 crv = wdcintr(wdc_cp); 3714 if (crv == 0) { 3715 #if 0 3716 printf("%s:%d: bogus intr\n", 3717 sc->sc_wdcdev.sc_dev.dv_xname, i); 3718 #endif 3719 } else 3720 rv = 1; 3721 } 3722 } 3723 return (rv); 3724 } 3725 3726 void 3727 cmd_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3728 { 3729 int channel; 3730 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 3731 3732 printf(": no DMA"); 3733 sc->sc_dma_ok = 0; 3734 3735 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3736 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3737 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 3738 3739 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3740 3741 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3742 cmd_channel_map(pa, sc, channel); 3743 } 3744 } 3745 3746 void 3747 cmd0643_9_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3748 { 3749 struct pciide_channel *cp; 3750 int channel; 3751 int rev = sc->sc_rev; 3752 pcireg_t interface; 3753 3754 /* 3755 * The 0648/0649 can be told to identify as a RAID controller. 3756 * In this case, we have to fake interface 3757 */ 3758 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 3759 interface = PCIIDE_INTERFACE_SETTABLE(0) | 3760 PCIIDE_INTERFACE_SETTABLE(1); 3761 if (pciide_pci_read(pa->pa_pc, pa->pa_tag, CMD_CONF) & 3762 CMD_CONF_DSA1) 3763 interface |= PCIIDE_INTERFACE_PCI(0) | 3764 PCIIDE_INTERFACE_PCI(1); 3765 } else { 3766 interface = PCI_INTERFACE(pa->pa_class); 3767 } 3768 3769 printf(": DMA"); 3770 pciide_mapreg_dma(sc, pa); 3771 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3772 WDC_CAPABILITY_MODE; 3773 if (sc->sc_dma_ok) { 3774 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3775 switch (sc->sc_pp->ide_product) { 3776 case PCI_PRODUCT_CMDTECH_649: 3777 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3778 sc->sc_wdcdev.UDMA_cap = 5; 3779 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3780 break; 3781 case PCI_PRODUCT_CMDTECH_648: 3782 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3783 sc->sc_wdcdev.UDMA_cap = 4; 3784 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3785 break; 3786 case PCI_PRODUCT_CMDTECH_646: 3787 if (rev >= CMD0646U2_REV) { 3788 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3789 sc->sc_wdcdev.UDMA_cap = 2; 3790 } else if (rev >= CMD0646U_REV) { 3791 /* 3792 * Linux's driver claims that the 646U is broken 3793 * with UDMA. Only enable it if we know what we're 3794 * doing 3795 */ 3796 #ifdef PCIIDE_CMD0646U_ENABLEUDMA 3797 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3798 sc->sc_wdcdev.UDMA_cap = 2; 3799 #endif 3800 /* explicitly disable UDMA */ 3801 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3802 CMD_UDMATIM(0), 0); 3803 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3804 CMD_UDMATIM(1), 0); 3805 } 3806 sc->sc_wdcdev.irqack = cmd646_9_irqack; 3807 break; 3808 default: 3809 sc->sc_wdcdev.irqack = pciide_irqack; 3810 } 3811 } 3812 3813 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3814 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3815 sc->sc_wdcdev.PIO_cap = 4; 3816 sc->sc_wdcdev.DMA_cap = 2; 3817 sc->sc_wdcdev.set_modes = cmd0643_9_setup_channel; 3818 3819 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 3820 3821 WDCDEBUG_PRINT(("cmd0643_9_chip_map: old timings reg 0x%x 0x%x\n", 3822 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3823 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3824 DEBUG_PROBE); 3825 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3826 cp = &sc->pciide_channels[channel]; 3827 cmd_channel_map(pa, sc, channel); 3828 if (cp->hw_ok == 0) 3829 continue; 3830 cmd0643_9_setup_channel(&cp->wdc_channel); 3831 } 3832 /* 3833 * note - this also makes sure we clear the irq disable and reset 3834 * bits 3835 */ 3836 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_DMA_MODE, CMD_DMA_MULTIPLE); 3837 WDCDEBUG_PRINT(("cmd0643_9_chip_map: timings reg now 0x%x 0x%x\n", 3838 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54), 3839 pci_conf_read(sc->sc_pc, sc->sc_tag, 0x58)), 3840 DEBUG_PROBE); 3841 } 3842 3843 void 3844 cmd0643_9_setup_channel(struct channel_softc *chp) 3845 { 3846 struct ata_drive_datas *drvp; 3847 u_int8_t tim; 3848 u_int32_t idedma_ctl, udma_reg; 3849 int drive; 3850 struct pciide_channel *cp = (struct pciide_channel *)chp; 3851 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3852 3853 idedma_ctl = 0; 3854 /* setup DMA if needed */ 3855 pciide_channel_dma_setup(cp); 3856 3857 for (drive = 0; drive < 2; drive++) { 3858 drvp = &chp->ch_drive[drive]; 3859 /* If no drive, skip */ 3860 if ((drvp->drive_flags & DRIVE) == 0) 3861 continue; 3862 /* add timing values, setup DMA if needed */ 3863 tim = cmd0643_9_data_tim_pio[drvp->PIO_mode]; 3864 if (drvp->drive_flags & (DRIVE_DMA | DRIVE_UDMA)) { 3865 if (drvp->drive_flags & DRIVE_UDMA) { 3866 /* UltraDMA on a 646U2, 0648 or 0649 */ 3867 drvp->drive_flags &= ~DRIVE_DMA; 3868 udma_reg = pciide_pci_read(sc->sc_pc, 3869 sc->sc_tag, CMD_UDMATIM(chp->channel)); 3870 if (drvp->UDMA_mode > 2 && 3871 (pciide_pci_read(sc->sc_pc, sc->sc_tag, 3872 CMD_BICSR) & 3873 CMD_BICSR_80(chp->channel)) == 0) { 3874 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 3875 "80-wire cable not detected\n", 3876 drvp->drive_name, 3877 sc->sc_wdcdev.sc_dev.dv_xname, 3878 chp->channel, drive), DEBUG_PROBE); 3879 drvp->UDMA_mode = 2; 3880 } 3881 if (drvp->UDMA_mode > 2) 3882 udma_reg &= ~CMD_UDMATIM_UDMA33(drive); 3883 else if (sc->sc_wdcdev.UDMA_cap > 2) 3884 udma_reg |= CMD_UDMATIM_UDMA33(drive); 3885 udma_reg |= CMD_UDMATIM_UDMA(drive); 3886 udma_reg &= ~(CMD_UDMATIM_TIM_MASK << 3887 CMD_UDMATIM_TIM_OFF(drive)); 3888 udma_reg |= 3889 (cmd0646_9_tim_udma[drvp->UDMA_mode] << 3890 CMD_UDMATIM_TIM_OFF(drive)); 3891 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3892 CMD_UDMATIM(chp->channel), udma_reg); 3893 } else { 3894 /* 3895 * use Multiword DMA. 3896 * Timings will be used for both PIO and DMA, 3897 * so adjust DMA mode if needed 3898 * if we have a 0646U2/8/9, turn off UDMA 3899 */ 3900 if (sc->sc_wdcdev.cap & WDC_CAPABILITY_UDMA) { 3901 udma_reg = pciide_pci_read(sc->sc_pc, 3902 sc->sc_tag, 3903 CMD_UDMATIM(chp->channel)); 3904 udma_reg &= ~CMD_UDMATIM_UDMA(drive); 3905 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3906 CMD_UDMATIM(chp->channel), 3907 udma_reg); 3908 } 3909 if (drvp->PIO_mode >= 3 && 3910 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 3911 drvp->DMA_mode = drvp->PIO_mode - 2; 3912 } 3913 tim = cmd0643_9_data_tim_dma[drvp->DMA_mode]; 3914 } 3915 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 3916 } 3917 pciide_pci_write(sc->sc_pc, sc->sc_tag, 3918 CMD_DATA_TIM(chp->channel, drive), tim); 3919 } 3920 if (idedma_ctl != 0) { 3921 /* Add software bits in status register */ 3922 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 3923 IDEDMA_CTL(chp->channel), 3924 idedma_ctl); 3925 } 3926 pciide_print_modes(cp); 3927 #ifdef __sparc64__ 3928 /* 3929 * The Ultra 5 has a tendency to hang during reboot. This is due 3930 * to the PCI0646U asserting a PCI interrupt line when the chip 3931 * registers claim that it is not. Performing a reset at this 3932 * point appears to eliminate the symptoms. It is likely the 3933 * real cause is still lurking somewhere in the code. 3934 */ 3935 wdcreset(chp, SILENT); 3936 #endif /* __sparc64__ */ 3937 } 3938 3939 void 3940 cmd646_9_irqack(struct channel_softc *chp) 3941 { 3942 u_int32_t priirq, secirq; 3943 struct pciide_channel *cp = (struct pciide_channel *)chp; 3944 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 3945 3946 if (chp->channel == 0) { 3947 priirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_CONF); 3948 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_CONF, priirq); 3949 } else { 3950 secirq = pciide_pci_read(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23); 3951 pciide_pci_write(sc->sc_pc, sc->sc_tag, CMD_ARTTIM23, secirq); 3952 } 3953 pciide_irqack(chp); 3954 } 3955 3956 void 3957 cmd680_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 3958 { 3959 struct pciide_channel *cp; 3960 int channel; 3961 3962 printf("\n%s: bus-master DMA support present", 3963 sc->sc_wdcdev.sc_dev.dv_xname); 3964 pciide_mapreg_dma(sc, pa); 3965 printf("\n"); 3966 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 3967 WDC_CAPABILITY_MODE; 3968 if (sc->sc_dma_ok) { 3969 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 3970 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 3971 sc->sc_wdcdev.UDMA_cap = 6; 3972 sc->sc_wdcdev.irqack = pciide_irqack; 3973 } 3974 3975 sc->sc_wdcdev.channels = sc->wdc_chanarray; 3976 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 3977 sc->sc_wdcdev.PIO_cap = 4; 3978 sc->sc_wdcdev.DMA_cap = 2; 3979 sc->sc_wdcdev.set_modes = cmd680_setup_channel; 3980 3981 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x80, 0x00); 3982 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x84, 0x00); 3983 pciide_pci_write(sc->sc_pc, sc->sc_tag, 0x8a, 3984 pciide_pci_read(sc->sc_pc, sc->sc_tag, 0x8a) | 0x01); 3985 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 3986 cp = &sc->pciide_channels[channel]; 3987 cmd680_channel_map(pa, sc, channel); 3988 if (cp->hw_ok == 0) 3989 continue; 3990 cmd680_setup_channel(&cp->wdc_channel); 3991 } 3992 } 3993 3994 void 3995 cmd680_channel_map(struct pci_attach_args *pa, struct pciide_softc *sc, 3996 int channel) 3997 { 3998 struct pciide_channel *cp = &sc->pciide_channels[channel]; 3999 bus_size_t cmdsize, ctlsize; 4000 int interface, i, reg; 4001 static const u_int8_t init_val[] = 4002 { 0x8a, 0x32, 0x8a, 0x32, 0x8a, 0x32, 4003 0x92, 0x43, 0x92, 0x43, 0x09, 0x40, 0x09, 0x40 }; 4004 4005 if (PCI_SUBCLASS(pa->pa_class) != PCI_SUBCLASS_MASS_STORAGE_IDE) { 4006 interface = PCIIDE_INTERFACE_SETTABLE(0) | 4007 PCIIDE_INTERFACE_SETTABLE(1); 4008 interface |= PCIIDE_INTERFACE_PCI(0) | 4009 PCIIDE_INTERFACE_PCI(1); 4010 } else { 4011 interface = PCI_INTERFACE(pa->pa_class); 4012 } 4013 4014 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4015 cp->name = PCIIDE_CHANNEL_NAME(channel); 4016 cp->wdc_channel.channel = channel; 4017 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4018 4019 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4020 if (cp->wdc_channel.ch_queue == NULL) { 4021 printf("%s %s: " 4022 "cannot allocate channel queue", 4023 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4024 return; 4025 } 4026 4027 /* XXX */ 4028 reg = 0xa2 + channel * 16; 4029 for (i = 0; i < sizeof(init_val); i++) 4030 pciide_pci_write(sc->sc_pc, sc->sc_tag, reg + i, init_val[i]); 4031 4032 printf("%s: %s %s to %s mode\n", 4033 sc->sc_wdcdev.sc_dev.dv_xname, cp->name, 4034 (interface & PCIIDE_INTERFACE_SETTABLE(channel)) ? 4035 "configured" : "wired", 4036 (interface & PCIIDE_INTERFACE_PCI(channel)) ? 4037 "native-PCI" : "compatibility"); 4038 4039 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); 4040 if (cp->hw_ok == 0) 4041 return; 4042 pciide_map_compat_intr(pa, cp, channel, interface); 4043 } 4044 4045 void 4046 cmd680_setup_channel(struct channel_softc *chp) 4047 { 4048 struct ata_drive_datas *drvp; 4049 u_int8_t mode, off, scsc; 4050 u_int16_t val; 4051 u_int32_t idedma_ctl; 4052 int drive; 4053 struct pciide_channel *cp = (struct pciide_channel *)chp; 4054 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4055 pci_chipset_tag_t pc = sc->sc_pc; 4056 pcitag_t pa = sc->sc_tag; 4057 static const u_int8_t udma2_tbl[] = 4058 { 0x0f, 0x0b, 0x07, 0x06, 0x03, 0x02, 0x01 }; 4059 static const u_int8_t udma_tbl[] = 4060 { 0x0c, 0x07, 0x05, 0x04, 0x02, 0x01, 0x00 }; 4061 static const u_int16_t dma_tbl[] = 4062 { 0x2208, 0x10c2, 0x10c1 }; 4063 static const u_int16_t pio_tbl[] = 4064 { 0x328a, 0x2283, 0x1104, 0x10c3, 0x10c1 }; 4065 4066 idedma_ctl = 0; 4067 pciide_channel_dma_setup(cp); 4068 mode = pciide_pci_read(pc, pa, 0x80 + chp->channel * 4); 4069 4070 for (drive = 0; drive < 2; drive++) { 4071 drvp = &chp->ch_drive[drive]; 4072 /* If no drive, skip */ 4073 if ((drvp->drive_flags & DRIVE) == 0) 4074 continue; 4075 mode &= ~(0x03 << (drive * 4)); 4076 if (drvp->drive_flags & DRIVE_UDMA) { 4077 drvp->drive_flags &= ~DRIVE_DMA; 4078 off = 0xa0 + chp->channel * 16; 4079 if (drvp->UDMA_mode > 2 && 4080 (pciide_pci_read(pc, pa, off) & 0x01) == 0) 4081 drvp->UDMA_mode = 2; 4082 scsc = pciide_pci_read(pc, pa, 0x8a); 4083 if (drvp->UDMA_mode == 6 && (scsc & 0x30) == 0) { 4084 pciide_pci_write(pc, pa, 0x8a, scsc | 0x01); 4085 scsc = pciide_pci_read(pc, pa, 0x8a); 4086 if ((scsc & 0x30) == 0) 4087 drvp->UDMA_mode = 5; 4088 } 4089 mode |= 0x03 << (drive * 4); 4090 off = 0xac + chp->channel * 16 + drive * 2; 4091 val = pciide_pci_read(pc, pa, off) & ~0x3f; 4092 if (scsc & 0x30) 4093 val |= udma2_tbl[drvp->UDMA_mode]; 4094 else 4095 val |= udma_tbl[drvp->UDMA_mode]; 4096 pciide_pci_write(pc, pa, off, val); 4097 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4098 } else if (drvp->drive_flags & DRIVE_DMA) { 4099 mode |= 0x02 << (drive * 4); 4100 off = 0xa8 + chp->channel * 16 + drive * 2; 4101 val = dma_tbl[drvp->DMA_mode]; 4102 pciide_pci_write(pc, pa, off, val & 0xff); 4103 pciide_pci_write(pc, pa, off, val >> 8); 4104 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4105 } else { 4106 mode |= 0x01 << (drive * 4); 4107 off = 0xa4 + chp->channel * 16 + drive * 2; 4108 val = pio_tbl[drvp->PIO_mode]; 4109 pciide_pci_write(pc, pa, off, val & 0xff); 4110 pciide_pci_write(pc, pa, off, val >> 8); 4111 } 4112 } 4113 4114 pciide_pci_write(pc, pa, 0x80 + chp->channel * 4, mode); 4115 if (idedma_ctl != 0) { 4116 /* Add software bits in status register */ 4117 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4118 IDEDMA_CTL(chp->channel), 4119 idedma_ctl); 4120 } 4121 pciide_print_modes(cp); 4122 } 4123 4124 /* 4125 * When the Silicon Image 3112 retries a PCI memory read command, 4126 * it may retry it as a memory read multiple command under some 4127 * circumstances. This can totally confuse some PCI controllers, 4128 * so ensure that it will never do this by making sure that the 4129 * Read Threshold (FIFO Read Request Control) field of the FIFO 4130 * Valid Byte Count and Control registers for both channels (BA5 4131 * offset 0x40 and 0x44) are set to be at least as large as the 4132 * cacheline size register. 4133 */ 4134 void 4135 sii_fixup_cacheline(struct pciide_softc *sc, struct pci_attach_args *pa) 4136 { 4137 pcireg_t cls, reg40, reg44; 4138 4139 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4140 cls = (cls >> PCI_CACHELINE_SHIFT) & PCI_CACHELINE_MASK; 4141 cls *= 4; 4142 if (cls > 224) { 4143 cls = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); 4144 cls &= ~(PCI_CACHELINE_MASK << PCI_CACHELINE_SHIFT); 4145 cls |= ((224/4) << PCI_CACHELINE_SHIFT); 4146 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG, cls); 4147 cls = 224; 4148 } 4149 if (cls < 32) 4150 cls = 32; 4151 cls = (cls + 31) / 32; 4152 reg40 = ba5_read_4(sc, 0x40); 4153 reg44 = ba5_read_4(sc, 0x44); 4154 if ((reg40 & 0x7) < cls) 4155 ba5_write_4(sc, 0x40, (reg40 & ~0x07) | cls); 4156 if ((reg44 & 0x7) < cls) 4157 ba5_write_4(sc, 0x44, (reg44 & ~0x07) | cls); 4158 } 4159 4160 void 4161 sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4162 { 4163 struct pciide_channel *cp; 4164 bus_size_t cmdsize, ctlsize; 4165 pcireg_t interface, scs_cmd, cfgctl; 4166 int channel; 4167 struct pciide_satalink *sl; 4168 4169 /* Allocate memory for private data */ 4170 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4171 sl = sc->sc_cookie; 4172 4173 sc->chip_unmap = default_chip_unmap; 4174 4175 #define SII3112_RESET_BITS \ 4176 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4177 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4178 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) 4179 4180 /* 4181 * Reset everything and then unblock all of the interrupts. 4182 */ 4183 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4184 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4185 scs_cmd | SII3112_RESET_BITS); 4186 delay(50 * 1000); 4187 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4188 scs_cmd & SCS_CMD_BA5_EN); 4189 delay(50 * 1000); 4190 4191 if (scs_cmd & SCS_CMD_BA5_EN) { 4192 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4193 PCI_MAPREG_TYPE_MEM | 4194 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4195 &sl->ba5_st, &sl->ba5_sh, 4196 NULL, NULL, 0) != 0) 4197 printf(": unable to map BA5 register space\n"); 4198 else 4199 sl->ba5_en = 1; 4200 } else { 4201 cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, 4202 SII3112_PCI_CFGCTL); 4203 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, 4204 cfgctl | CFGCTL_BA5INDEN); 4205 } 4206 4207 printf(": DMA"); 4208 pciide_mapreg_dma(sc, pa); 4209 printf("\n"); 4210 4211 /* 4212 * Rev. <= 0x01 of the 3112 have a bug that can cause data 4213 * corruption if DMA transfers cross an 8K boundary. This is 4214 * apparently hard to tickle, but we'll go ahead and play it 4215 * safe. 4216 */ 4217 if (sc->sc_rev <= 0x01) { 4218 sc->sc_dma_maxsegsz = 8192; 4219 sc->sc_dma_boundary = 8192; 4220 } 4221 4222 sii_fixup_cacheline(sc, pa); 4223 4224 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4225 sc->sc_wdcdev.PIO_cap = 4; 4226 if (sc->sc_dma_ok) { 4227 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4228 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4229 sc->sc_wdcdev.irqack = pciide_irqack; 4230 sc->sc_wdcdev.DMA_cap = 2; 4231 sc->sc_wdcdev.UDMA_cap = 6; 4232 } 4233 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4234 4235 /* We can use SControl and SStatus to probe for drives. */ 4236 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4237 4238 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4239 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 4240 4241 /* 4242 * The 3112 either identifies itself as a RAID storage device 4243 * or a Misc storage device. Fake up the interface bits for 4244 * what our driver expects. 4245 */ 4246 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 4247 interface = PCI_INTERFACE(pa->pa_class); 4248 } else { 4249 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 4250 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 4251 } 4252 4253 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4254 cp = &sc->pciide_channels[channel]; 4255 if (pciide_chansetup(sc, channel, interface) == 0) 4256 continue; 4257 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 4258 pciide_pci_intr); 4259 if (cp->hw_ok == 0) 4260 continue; 4261 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4262 } 4263 } 4264 4265 void 4266 sii3112_setup_channel(struct channel_softc *chp) 4267 { 4268 struct ata_drive_datas *drvp; 4269 int drive; 4270 u_int32_t idedma_ctl, dtm; 4271 struct pciide_channel *cp = (struct pciide_channel *)chp; 4272 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4273 4274 /* setup DMA if needed */ 4275 pciide_channel_dma_setup(cp); 4276 4277 idedma_ctl = 0; 4278 dtm = 0; 4279 4280 for (drive = 0; drive < 2; drive++) { 4281 drvp = &chp->ch_drive[drive]; 4282 /* If no drive, skip */ 4283 if ((drvp->drive_flags & DRIVE) == 0) 4284 continue; 4285 if (drvp->drive_flags & DRIVE_UDMA) { 4286 /* use Ultra/DMA */ 4287 drvp->drive_flags &= ~DRIVE_DMA; 4288 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4289 dtm |= DTM_IDEx_DMA; 4290 } else if (drvp->drive_flags & DRIVE_DMA) { 4291 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4292 dtm |= DTM_IDEx_DMA; 4293 } else { 4294 dtm |= DTM_IDEx_PIO; 4295 } 4296 } 4297 4298 /* 4299 * Nothing to do to setup modes; it is meaningless in S-ATA 4300 * (but many S-ATA drives still want to get the SET_FEATURE 4301 * command). 4302 */ 4303 if (idedma_ctl != 0) { 4304 /* Add software bits in status register */ 4305 PCIIDE_DMACTL_WRITE(sc, chp->channel, idedma_ctl); 4306 } 4307 BA5_WRITE_4(sc, chp->channel, ba5_IDE_DTM, dtm); 4308 pciide_print_modes(cp); 4309 } 4310 4311 void 4312 sii3112_drv_probe(struct channel_softc *chp) 4313 { 4314 struct pciide_channel *cp = (struct pciide_channel *)chp; 4315 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4316 uint32_t scontrol, sstatus; 4317 uint8_t scnt, sn, cl, ch; 4318 int s; 4319 4320 /* 4321 * The 3112 is a 2-port part, and only has one drive per channel 4322 * (each port emulates a master drive). 4323 * 4324 * The 3114 is similar, but has 4 channels. 4325 */ 4326 4327 /* 4328 * Request communication initialization sequence, any speed. 4329 * Performing this is the equivalent of an ATA Reset. 4330 */ 4331 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 4332 4333 /* 4334 * XXX We don't yet support SATA power management; disable all 4335 * power management state transitions. 4336 */ 4337 scontrol |= SControl_IPM_NONE; 4338 4339 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4340 delay(50 * 1000); 4341 scontrol &= ~SControl_DET_INIT; 4342 BA5_WRITE_4(sc, chp->channel, ba5_SControl, scontrol); 4343 delay(50 * 1000); 4344 4345 sstatus = BA5_READ_4(sc, chp->channel, ba5_SStatus); 4346 #if 0 4347 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 4348 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 4349 BA5_READ_4(sc, chp->channel, ba5_SControl)); 4350 #endif 4351 switch (sstatus & SStatus_DET_mask) { 4352 case SStatus_DET_NODEV: 4353 /* No device; be silent. */ 4354 break; 4355 4356 case SStatus_DET_DEV_NE: 4357 printf("%s: port %d: device connected, but " 4358 "communication not established\n", 4359 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4360 break; 4361 4362 case SStatus_DET_OFFLINE: 4363 printf("%s: port %d: PHY offline\n", 4364 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4365 break; 4366 4367 case SStatus_DET_DEV: 4368 /* 4369 * XXX ATAPI detection doesn't currently work. Don't 4370 * XXX know why. But, it's not like the standard method 4371 * XXX can detect an ATAPI device connected via a SATA/PATA 4372 * XXX bridge, so at least this is no worse. --thorpej 4373 */ 4374 if (chp->_vtbl != NULL) 4375 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 4376 else 4377 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 4378 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 4379 delay(10); /* 400ns delay */ 4380 /* Save register contents. */ 4381 if (chp->_vtbl != NULL) { 4382 scnt = CHP_READ_REG(chp, wdr_seccnt); 4383 sn = CHP_READ_REG(chp, wdr_sector); 4384 cl = CHP_READ_REG(chp, wdr_cyl_lo); 4385 ch = CHP_READ_REG(chp, wdr_cyl_hi); 4386 } else { 4387 scnt = bus_space_read_1(chp->cmd_iot, 4388 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 4389 sn = bus_space_read_1(chp->cmd_iot, 4390 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 4391 cl = bus_space_read_1(chp->cmd_iot, 4392 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 4393 ch = bus_space_read_1(chp->cmd_iot, 4394 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 4395 } 4396 #if 0 4397 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 4398 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 4399 scnt, sn, cl, ch); 4400 #endif 4401 /* 4402 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 4403 * cases we get wrong values here, so ignore it. 4404 */ 4405 s = splbio(); 4406 if (cl == 0x14 && ch == 0xeb) 4407 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 4408 else 4409 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 4410 splx(s); 4411 4412 printf("%s: port %d: device present", 4413 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 4414 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 4415 case 1: 4416 printf(", speed: 1.5Gb/s"); 4417 break; 4418 case 2: 4419 printf(", speed: 3.0Gb/s"); 4420 break; 4421 } 4422 printf("\n"); 4423 break; 4424 4425 default: 4426 printf("%s: port %d: unknown SStatus: 0x%08x\n", 4427 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 4428 } 4429 } 4430 4431 void 4432 sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4433 { 4434 struct pciide_channel *cp; 4435 pcireg_t scs_cmd; 4436 pci_intr_handle_t intrhandle; 4437 const char *intrstr; 4438 int channel; 4439 struct pciide_satalink *sl; 4440 4441 /* Allocate memory for private data */ 4442 sc->sc_cookie = malloc(sizeof(*sl), M_DEVBUF, M_NOWAIT | M_ZERO); 4443 sl = sc->sc_cookie; 4444 4445 #define SII3114_RESET_BITS \ 4446 (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ 4447 SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ 4448 SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ 4449 SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ 4450 SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) 4451 4452 /* 4453 * Reset everything and then unblock all of the interrupts. 4454 */ 4455 scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); 4456 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4457 scs_cmd | SII3114_RESET_BITS); 4458 delay(50 * 1000); 4459 pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, 4460 scs_cmd & SCS_CMD_M66EN); 4461 delay(50 * 1000); 4462 4463 /* 4464 * On the 3114, the BA5 register space is always enabled. In 4465 * order to use the 3114 in any sane way, we must use this BA5 4466 * register space, and so we consider it an error if we cannot 4467 * map it. 4468 * 4469 * As a consequence of using BA5, our register mapping is different 4470 * from a normal PCI IDE controller's, and so we are unable to use 4471 * most of the common PCI IDE register mapping functions. 4472 */ 4473 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 4474 PCI_MAPREG_TYPE_MEM | 4475 PCI_MAPREG_MEM_TYPE_32BIT, 0, 4476 &sl->ba5_st, &sl->ba5_sh, 4477 NULL, NULL, 0) != 0) { 4478 printf(": unable to map BA5 register space\n"); 4479 return; 4480 } 4481 sl->ba5_en = 1; 4482 4483 /* 4484 * Set the Interrupt Steering bit in the IDEDMA_CMD register of 4485 * channel 2. This is required at all times for proper operation 4486 * when using the BA5 register space (otherwise interrupts from 4487 * all 4 channels won't work). 4488 */ 4489 BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); 4490 4491 printf(": DMA"); 4492 sii3114_mapreg_dma(sc, pa); 4493 printf("\n"); 4494 4495 sii_fixup_cacheline(sc, pa); 4496 4497 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32; 4498 sc->sc_wdcdev.PIO_cap = 4; 4499 if (sc->sc_dma_ok) { 4500 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 4501 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 4502 sc->sc_wdcdev.irqack = pciide_irqack; 4503 sc->sc_wdcdev.DMA_cap = 2; 4504 sc->sc_wdcdev.UDMA_cap = 6; 4505 } 4506 sc->sc_wdcdev.set_modes = sii3112_setup_channel; 4507 4508 /* We can use SControl and SStatus to probe for drives. */ 4509 sc->sc_wdcdev.drv_probe = sii3112_drv_probe; 4510 4511 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4512 sc->sc_wdcdev.nchannels = 4; 4513 4514 /* Map and establish the interrupt handler. */ 4515 if (pci_intr_map(pa, &intrhandle) != 0) { 4516 printf("%s: couldn't map native-PCI interrupt\n", 4517 sc->sc_wdcdev.sc_dev.dv_xname); 4518 return; 4519 } 4520 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 4521 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 4522 /* XXX */ 4523 pciide_pci_intr, sc, 4524 sc->sc_wdcdev.sc_dev.dv_xname); 4525 if (sc->sc_pci_ih != NULL) { 4526 printf("%s: using %s for native-PCI interrupt\n", 4527 sc->sc_wdcdev.sc_dev.dv_xname, 4528 intrstr ? intrstr : "unknown interrupt"); 4529 } else { 4530 printf("%s: couldn't establish native-PCI interrupt", 4531 sc->sc_wdcdev.sc_dev.dv_xname); 4532 if (intrstr != NULL) 4533 printf(" at %s", intrstr); 4534 printf("\n"); 4535 return; 4536 } 4537 4538 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 4539 cp = &sc->pciide_channels[channel]; 4540 if (sii3114_chansetup(sc, channel) == 0) 4541 continue; 4542 sii3114_mapchan(cp); 4543 if (cp->hw_ok == 0) 4544 continue; 4545 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 4546 } 4547 } 4548 4549 void 4550 sii3114_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 4551 { 4552 int chan, reg; 4553 bus_size_t size; 4554 struct pciide_satalink *sl = sc->sc_cookie; 4555 4556 sc->sc_wdcdev.dma_arg = sc; 4557 sc->sc_wdcdev.dma_init = pciide_dma_init; 4558 sc->sc_wdcdev.dma_start = pciide_dma_start; 4559 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 4560 4561 /* 4562 * Slice off a subregion of BA5 for each of the channel's DMA 4563 * registers. 4564 */ 4565 4566 sc->sc_dma_iot = sl->ba5_st; 4567 for (chan = 0; chan < 4; chan++) { 4568 for (reg = 0; reg < IDEDMA_NREGS; reg++) { 4569 size = 4; 4570 if (size > (IDEDMA_SCH_OFFSET - reg)) 4571 size = IDEDMA_SCH_OFFSET - reg; 4572 if (bus_space_subregion(sl->ba5_st, 4573 sl->ba5_sh, 4574 satalink_ba5_regmap[chan].ba5_IDEDMA_CMD + reg, 4575 size, &sl->regs[chan].dma_iohs[reg]) != 0) { 4576 sc->sc_dma_ok = 0; 4577 printf(": can't subregion offset " 4578 "%lu size %lu", 4579 (u_long) satalink_ba5_regmap[ 4580 chan].ba5_IDEDMA_CMD + reg, 4581 (u_long) size); 4582 return; 4583 } 4584 } 4585 } 4586 4587 sc->sc_dmacmd_read = sii3114_dmacmd_read; 4588 sc->sc_dmacmd_write = sii3114_dmacmd_write; 4589 sc->sc_dmactl_read = sii3114_dmactl_read; 4590 sc->sc_dmactl_write = sii3114_dmactl_write; 4591 sc->sc_dmatbl_write = sii3114_dmatbl_write; 4592 4593 /* DMA registers all set up! */ 4594 sc->sc_dmat = pa->pa_dmat; 4595 sc->sc_dma_ok = 1; 4596 } 4597 4598 int 4599 sii3114_chansetup(struct pciide_softc *sc, int channel) 4600 { 4601 static const char *channel_names[] = { 4602 "port 0", 4603 "port 1", 4604 "port 2", 4605 "port 3", 4606 }; 4607 struct pciide_channel *cp = &sc->pciide_channels[channel]; 4608 4609 sc->wdc_chanarray[channel] = &cp->wdc_channel; 4610 4611 /* 4612 * We must always keep the Interrupt Steering bit set in channel 2's 4613 * IDEDMA_CMD register. 4614 */ 4615 if (channel == 2) 4616 cp->idedma_cmd = IDEDMA_CMD_INT_STEER; 4617 4618 cp->name = channel_names[channel]; 4619 cp->wdc_channel.channel = channel; 4620 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4621 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4622 if (cp->wdc_channel.ch_queue == NULL) { 4623 printf("%s %s channel: " 4624 "cannot allocate channel queue", 4625 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4626 return (0); 4627 } 4628 return (1); 4629 } 4630 4631 void 4632 sii3114_mapchan(struct pciide_channel *cp) 4633 { 4634 struct channel_softc *wdc_cp = &cp->wdc_channel; 4635 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4636 struct pciide_satalink *sl = sc->sc_cookie; 4637 int chan = wdc_cp->channel; 4638 int i; 4639 4640 cp->hw_ok = 0; 4641 cp->compat = 0; 4642 cp->ih = sc->sc_pci_ih; 4643 4644 sl->regs[chan].cmd_iot = sl->ba5_st; 4645 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4646 satalink_ba5_regmap[chan].ba5_IDE_TF0, 4647 9, &sl->regs[chan].cmd_baseioh) != 0) { 4648 printf("%s: couldn't subregion %s cmd base\n", 4649 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4650 return; 4651 } 4652 4653 sl->regs[chan].ctl_iot = sl->ba5_st; 4654 if (bus_space_subregion(sl->ba5_st, sl->ba5_sh, 4655 satalink_ba5_regmap[chan].ba5_IDE_TF8, 4656 1, &cp->ctl_baseioh) != 0) { 4657 printf("%s: couldn't subregion %s ctl base\n", 4658 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4659 return; 4660 } 4661 sl->regs[chan].ctl_ioh = cp->ctl_baseioh; 4662 4663 for (i = 0; i < WDC_NREG; i++) { 4664 if (bus_space_subregion(sl->regs[chan].cmd_iot, 4665 sl->regs[chan].cmd_baseioh, 4666 i, i == 0 ? 4 : 1, 4667 &sl->regs[chan].cmd_iohs[i]) != 0) { 4668 printf("%s: couldn't subregion %s channel " 4669 "cmd regs\n", 4670 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 4671 return; 4672 } 4673 } 4674 sl->regs[chan].cmd_iohs[wdr_status & _WDC_REGMASK] = 4675 sl->regs[chan].cmd_iohs[wdr_command & _WDC_REGMASK]; 4676 sl->regs[chan].cmd_iohs[wdr_features & _WDC_REGMASK] = 4677 sl->regs[chan].cmd_iohs[wdr_error & _WDC_REGMASK]; 4678 wdc_cp->data32iot = wdc_cp->cmd_iot = sl->regs[chan].cmd_iot; 4679 wdc_cp->data32ioh = wdc_cp->cmd_ioh = sl->regs[chan].cmd_iohs[0]; 4680 wdc_cp->_vtbl = &wdc_sii3114_vtbl; 4681 wdcattach(wdc_cp); 4682 cp->hw_ok = 1; 4683 } 4684 4685 u_int8_t 4686 sii3114_read_reg(struct channel_softc *chp, enum wdc_regs reg) 4687 { 4688 struct pciide_channel *cp = (struct pciide_channel *)chp; 4689 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4690 struct pciide_satalink *sl = sc->sc_cookie; 4691 4692 if (reg & _WDC_AUX) 4693 return (bus_space_read_1(sl->regs[chp->channel].ctl_iot, 4694 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 4695 else 4696 return (bus_space_read_1(sl->regs[chp->channel].cmd_iot, 4697 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0)); 4698 } 4699 4700 void 4701 sii3114_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 4702 { 4703 struct pciide_channel *cp = (struct pciide_channel *)chp; 4704 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4705 struct pciide_satalink *sl = sc->sc_cookie; 4706 4707 if (reg & _WDC_AUX) 4708 bus_space_write_1(sl->regs[chp->channel].ctl_iot, 4709 sl->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 4710 else 4711 bus_space_write_1(sl->regs[chp->channel].cmd_iot, 4712 sl->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 4713 0, val); 4714 } 4715 4716 u_int8_t 4717 sii3114_dmacmd_read(struct pciide_softc *sc, int chan) 4718 { 4719 struct pciide_satalink *sl = sc->sc_cookie; 4720 4721 return (bus_space_read_1(sc->sc_dma_iot, 4722 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0)); 4723 } 4724 4725 void 4726 sii3114_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 4727 { 4728 struct pciide_satalink *sl = sc->sc_cookie; 4729 4730 bus_space_write_1(sc->sc_dma_iot, 4731 sl->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, val); 4732 } 4733 4734 u_int8_t 4735 sii3114_dmactl_read(struct pciide_softc *sc, int chan) 4736 { 4737 struct pciide_satalink *sl = sc->sc_cookie; 4738 4739 return (bus_space_read_1(sc->sc_dma_iot, 4740 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0)); 4741 } 4742 4743 void 4744 sii3114_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 4745 { 4746 struct pciide_satalink *sl = sc->sc_cookie; 4747 4748 bus_space_write_1(sc->sc_dma_iot, 4749 sl->regs[chan].dma_iohs[IDEDMA_CTL(0)], 0, val); 4750 } 4751 4752 void 4753 sii3114_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 4754 { 4755 struct pciide_satalink *sl = sc->sc_cookie; 4756 4757 bus_space_write_4(sc->sc_dma_iot, 4758 sl->regs[chan].dma_iohs[IDEDMA_TBL(0)], 0, val); 4759 } 4760 4761 void 4762 cy693_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 4763 { 4764 struct pciide_channel *cp; 4765 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 4766 bus_size_t cmdsize, ctlsize; 4767 struct pciide_cy *cy; 4768 4769 /* Allocate memory for private data */ 4770 sc->sc_cookie = malloc(sizeof(*cy), M_DEVBUF, M_NOWAIT | M_ZERO); 4771 cy = sc->sc_cookie; 4772 4773 /* 4774 * this chip has 2 PCI IDE functions, one for primary and one for 4775 * secondary. So we need to call pciide_mapregs_compat() with 4776 * the real channel 4777 */ 4778 if (pa->pa_function == 1) { 4779 cy->cy_compatchan = 0; 4780 } else if (pa->pa_function == 2) { 4781 cy->cy_compatchan = 1; 4782 } else { 4783 printf(": unexpected PCI function %d\n", pa->pa_function); 4784 return; 4785 } 4786 4787 if (interface & PCIIDE_INTERFACE_BUS_MASTER_DMA) { 4788 printf(": DMA"); 4789 pciide_mapreg_dma(sc, pa); 4790 } else { 4791 printf(": no DMA"); 4792 sc->sc_dma_ok = 0; 4793 } 4794 4795 cy->cy_handle = cy82c693_init(pa->pa_iot); 4796 if (cy->cy_handle == NULL) { 4797 printf(", (unable to map ctl registers)"); 4798 sc->sc_dma_ok = 0; 4799 } 4800 4801 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 4802 WDC_CAPABILITY_MODE; 4803 if (sc->sc_dma_ok) { 4804 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 4805 sc->sc_wdcdev.irqack = pciide_irqack; 4806 } 4807 sc->sc_wdcdev.PIO_cap = 4; 4808 sc->sc_wdcdev.DMA_cap = 2; 4809 sc->sc_wdcdev.set_modes = cy693_setup_channel; 4810 4811 sc->sc_wdcdev.channels = sc->wdc_chanarray; 4812 sc->sc_wdcdev.nchannels = 1; 4813 4814 /* Only one channel for this chip; if we are here it's enabled */ 4815 cp = &sc->pciide_channels[0]; 4816 sc->wdc_chanarray[0] = &cp->wdc_channel; 4817 cp->name = PCIIDE_CHANNEL_NAME(0); 4818 cp->wdc_channel.channel = 0; 4819 cp->wdc_channel.wdc = &sc->sc_wdcdev; 4820 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 4821 if (cp->wdc_channel.ch_queue == NULL) { 4822 printf(": cannot allocate channel queue\n"); 4823 return; 4824 } 4825 printf(", %s %s to ", PCIIDE_CHANNEL_NAME(0), 4826 (interface & PCIIDE_INTERFACE_SETTABLE(0)) ? 4827 "configured" : "wired"); 4828 if (interface & PCIIDE_INTERFACE_PCI(0)) { 4829 printf("native-PCI\n"); 4830 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, &ctlsize, 4831 pciide_pci_intr); 4832 } else { 4833 printf("compatibility\n"); 4834 cp->hw_ok = pciide_mapregs_compat(pa, cp, cy->cy_compatchan, 4835 &cmdsize, &ctlsize); 4836 } 4837 4838 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 4839 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 4840 pciide_map_compat_intr(pa, cp, cy->cy_compatchan, interface); 4841 if (cp->hw_ok == 0) 4842 return; 4843 wdcattach(&cp->wdc_channel); 4844 if (pciide_chan_candisable(cp)) { 4845 pci_conf_write(sc->sc_pc, sc->sc_tag, 4846 PCI_COMMAND_STATUS_REG, 0); 4847 } 4848 if (cp->hw_ok == 0) { 4849 pciide_unmap_compat_intr(pa, cp, cy->cy_compatchan, 4850 interface); 4851 return; 4852 } 4853 4854 WDCDEBUG_PRINT(("cy693_chip_map: old timings reg 0x%x\n", 4855 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4856 cy693_setup_channel(&cp->wdc_channel); 4857 WDCDEBUG_PRINT(("cy693_chip_map: new timings reg 0x%x\n", 4858 pci_conf_read(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL)), DEBUG_PROBE); 4859 } 4860 4861 void 4862 cy693_setup_channel(struct channel_softc *chp) 4863 { 4864 struct ata_drive_datas *drvp; 4865 int drive; 4866 u_int32_t cy_cmd_ctrl; 4867 u_int32_t idedma_ctl; 4868 struct pciide_channel *cp = (struct pciide_channel *)chp; 4869 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 4870 int dma_mode = -1; 4871 struct pciide_cy *cy = sc->sc_cookie; 4872 4873 cy_cmd_ctrl = idedma_ctl = 0; 4874 4875 /* setup DMA if needed */ 4876 pciide_channel_dma_setup(cp); 4877 4878 for (drive = 0; drive < 2; drive++) { 4879 drvp = &chp->ch_drive[drive]; 4880 /* If no drive, skip */ 4881 if ((drvp->drive_flags & DRIVE) == 0) 4882 continue; 4883 /* add timing values, setup DMA if needed */ 4884 if (drvp->drive_flags & DRIVE_DMA) { 4885 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 4886 /* use Multiword DMA */ 4887 if (dma_mode == -1 || dma_mode > drvp->DMA_mode) 4888 dma_mode = drvp->DMA_mode; 4889 } 4890 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4891 CY_CMD_CTRL_IOW_PULSE_OFF(drive)); 4892 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4893 CY_CMD_CTRL_IOW_REC_OFF(drive)); 4894 cy_cmd_ctrl |= (cy_pio_pulse[drvp->PIO_mode] << 4895 CY_CMD_CTRL_IOR_PULSE_OFF(drive)); 4896 cy_cmd_ctrl |= (cy_pio_rec[drvp->PIO_mode] << 4897 CY_CMD_CTRL_IOR_REC_OFF(drive)); 4898 } 4899 pci_conf_write(sc->sc_pc, sc->sc_tag, CY_CMD_CTRL, cy_cmd_ctrl); 4900 chp->ch_drive[0].DMA_mode = dma_mode; 4901 chp->ch_drive[1].DMA_mode = dma_mode; 4902 4903 if (dma_mode == -1) 4904 dma_mode = 0; 4905 4906 if (cy->cy_handle != NULL) { 4907 /* Note: `multiple' is implied. */ 4908 cy82c693_write(cy->cy_handle, 4909 (cy->cy_compatchan == 0) ? 4910 CY_DMA_IDX_PRIMARY : CY_DMA_IDX_SECONDARY, dma_mode); 4911 } 4912 4913 pciide_print_modes(cp); 4914 4915 if (idedma_ctl != 0) { 4916 /* Add software bits in status register */ 4917 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 4918 IDEDMA_CTL(chp->channel), idedma_ctl); 4919 } 4920 } 4921 4922 static struct sis_hostbr_type { 4923 u_int16_t id; 4924 u_int8_t rev; 4925 u_int8_t udma_mode; 4926 char *name; 4927 u_int8_t type; 4928 #define SIS_TYPE_NOUDMA 0 4929 #define SIS_TYPE_66 1 4930 #define SIS_TYPE_100OLD 2 4931 #define SIS_TYPE_100NEW 3 4932 #define SIS_TYPE_133OLD 4 4933 #define SIS_TYPE_133NEW 5 4934 #define SIS_TYPE_SOUTH 6 4935 } sis_hostbr_type[] = { 4936 /* Most infos here are from sos@freebsd.org */ 4937 {PCI_PRODUCT_SIS_530, 0x00, 4, "530", SIS_TYPE_66}, 4938 #if 0 4939 /* 4940 * controllers associated to a rev 0x2 530 Host to PCI Bridge 4941 * have problems with UDMA (info provided by Christos) 4942 */ 4943 {PCI_PRODUCT_SIS_530, 0x02, 0, "530 (buggy)", SIS_TYPE_NOUDMA}, 4944 #endif 4945 {PCI_PRODUCT_SIS_540, 0x00, 4, "540", SIS_TYPE_66}, 4946 {PCI_PRODUCT_SIS_550, 0x00, 4, "550", SIS_TYPE_66}, 4947 {PCI_PRODUCT_SIS_620, 0x00, 4, "620", SIS_TYPE_66}, 4948 {PCI_PRODUCT_SIS_630, 0x00, 4, "630", SIS_TYPE_66}, 4949 {PCI_PRODUCT_SIS_630, 0x30, 5, "630S", SIS_TYPE_100NEW}, 4950 {PCI_PRODUCT_SIS_633, 0x00, 5, "633", SIS_TYPE_100NEW}, 4951 {PCI_PRODUCT_SIS_635, 0x00, 5, "635", SIS_TYPE_100NEW}, 4952 {PCI_PRODUCT_SIS_640, 0x00, 4, "640", SIS_TYPE_SOUTH}, 4953 {PCI_PRODUCT_SIS_645, 0x00, 6, "645", SIS_TYPE_SOUTH}, 4954 {PCI_PRODUCT_SIS_646, 0x00, 6, "645DX", SIS_TYPE_SOUTH}, 4955 {PCI_PRODUCT_SIS_648, 0x00, 6, "648", SIS_TYPE_SOUTH}, 4956 {PCI_PRODUCT_SIS_650, 0x00, 6, "650", SIS_TYPE_SOUTH}, 4957 {PCI_PRODUCT_SIS_651, 0x00, 6, "651", SIS_TYPE_SOUTH}, 4958 {PCI_PRODUCT_SIS_652, 0x00, 6, "652", SIS_TYPE_SOUTH}, 4959 {PCI_PRODUCT_SIS_655, 0x00, 6, "655", SIS_TYPE_SOUTH}, 4960 {PCI_PRODUCT_SIS_658, 0x00, 6, "658", SIS_TYPE_SOUTH}, 4961 {PCI_PRODUCT_SIS_661, 0x00, 6, "661", SIS_TYPE_SOUTH}, 4962 {PCI_PRODUCT_SIS_730, 0x00, 5, "730", SIS_TYPE_100OLD}, 4963 {PCI_PRODUCT_SIS_733, 0x00, 5, "733", SIS_TYPE_100NEW}, 4964 {PCI_PRODUCT_SIS_735, 0x00, 5, "735", SIS_TYPE_100NEW}, 4965 {PCI_PRODUCT_SIS_740, 0x00, 5, "740", SIS_TYPE_SOUTH}, 4966 {PCI_PRODUCT_SIS_741, 0x00, 6, "741", SIS_TYPE_SOUTH}, 4967 {PCI_PRODUCT_SIS_745, 0x00, 5, "745", SIS_TYPE_100NEW}, 4968 {PCI_PRODUCT_SIS_746, 0x00, 6, "746", SIS_TYPE_SOUTH}, 4969 {PCI_PRODUCT_SIS_748, 0x00, 6, "748", SIS_TYPE_SOUTH}, 4970 {PCI_PRODUCT_SIS_750, 0x00, 6, "750", SIS_TYPE_SOUTH}, 4971 {PCI_PRODUCT_SIS_751, 0x00, 6, "751", SIS_TYPE_SOUTH}, 4972 {PCI_PRODUCT_SIS_752, 0x00, 6, "752", SIS_TYPE_SOUTH}, 4973 {PCI_PRODUCT_SIS_755, 0x00, 6, "755", SIS_TYPE_SOUTH}, 4974 {PCI_PRODUCT_SIS_760, 0x00, 6, "760", SIS_TYPE_SOUTH}, 4975 /* 4976 * From sos@freebsd.org: the 0x961 ID will never be found in real world 4977 * {PCI_PRODUCT_SIS_961, 0x00, 6, "961", SIS_TYPE_133NEW}, 4978 */ 4979 {PCI_PRODUCT_SIS_962, 0x00, 6, "962", SIS_TYPE_133NEW}, 4980 {PCI_PRODUCT_SIS_963, 0x00, 6, "963", SIS_TYPE_133NEW}, 4981 {PCI_PRODUCT_SIS_964, 0x00, 6, "964", SIS_TYPE_133NEW}, 4982 {PCI_PRODUCT_SIS_965, 0x00, 6, "965", SIS_TYPE_133NEW}, 4983 {PCI_PRODUCT_SIS_966, 0x00, 6, "966", SIS_TYPE_133NEW}, 4984 {PCI_PRODUCT_SIS_968, 0x00, 6, "968", SIS_TYPE_133NEW} 4985 }; 4986 4987 static struct sis_hostbr_type *sis_hostbr_type_match; 4988 4989 int 4990 sis_hostbr_match(struct pci_attach_args *pa) 4991 { 4992 int i; 4993 4994 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_SIS) 4995 return (0); 4996 sis_hostbr_type_match = NULL; 4997 for (i = 0; 4998 i < sizeof(sis_hostbr_type) / sizeof(sis_hostbr_type[0]); 4999 i++) { 5000 if (PCI_PRODUCT(pa->pa_id) == sis_hostbr_type[i].id && 5001 PCI_REVISION(pa->pa_class) >= sis_hostbr_type[i].rev) 5002 sis_hostbr_type_match = &sis_hostbr_type[i]; 5003 } 5004 return (sis_hostbr_type_match != NULL); 5005 } 5006 5007 int 5008 sis_south_match(struct pci_attach_args *pa) 5009 { 5010 return(PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SIS && 5011 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SIS_85C503 && 5012 PCI_REVISION(pa->pa_class) >= 0x10); 5013 } 5014 5015 void 5016 sis_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5017 { 5018 struct pciide_channel *cp; 5019 int channel; 5020 u_int8_t sis_ctr0 = pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_CTRL0); 5021 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5022 int rev = sc->sc_rev; 5023 bus_size_t cmdsize, ctlsize; 5024 struct pciide_sis *sis; 5025 5026 /* Allocate memory for private data */ 5027 sc->sc_cookie = malloc(sizeof(*sis), M_DEVBUF, M_NOWAIT | M_ZERO); 5028 sis = sc->sc_cookie; 5029 5030 pci_find_device(NULL, sis_hostbr_match); 5031 5032 if (sis_hostbr_type_match) { 5033 if (sis_hostbr_type_match->type == SIS_TYPE_SOUTH) { 5034 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_57, 5035 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5036 SIS_REG_57) & 0x7f); 5037 if (sc->sc_pp->ide_product == SIS_PRODUCT_5518) { 5038 sis->sis_type = SIS_TYPE_133NEW; 5039 sc->sc_wdcdev.UDMA_cap = 5040 sis_hostbr_type_match->udma_mode; 5041 } else { 5042 if (pci_find_device(NULL, sis_south_match)) { 5043 sis->sis_type = SIS_TYPE_133OLD; 5044 sc->sc_wdcdev.UDMA_cap = 5045 sis_hostbr_type_match->udma_mode; 5046 } else { 5047 sis->sis_type = SIS_TYPE_100NEW; 5048 sc->sc_wdcdev.UDMA_cap = 5049 sis_hostbr_type_match->udma_mode; 5050 } 5051 } 5052 } else { 5053 sis->sis_type = sis_hostbr_type_match->type; 5054 sc->sc_wdcdev.UDMA_cap = 5055 sis_hostbr_type_match->udma_mode; 5056 } 5057 printf(": %s", sis_hostbr_type_match->name); 5058 } else { 5059 printf(": 5597/5598"); 5060 if (rev >= 0xd0) { 5061 sc->sc_wdcdev.UDMA_cap = 2; 5062 sis->sis_type = SIS_TYPE_66; 5063 } else { 5064 sc->sc_wdcdev.UDMA_cap = 0; 5065 sis->sis_type = SIS_TYPE_NOUDMA; 5066 } 5067 } 5068 5069 printf(": DMA"); 5070 pciide_mapreg_dma(sc, pa); 5071 5072 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5073 WDC_CAPABILITY_MODE; 5074 if (sc->sc_dma_ok) { 5075 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5076 sc->sc_wdcdev.irqack = pciide_irqack; 5077 if (sis->sis_type >= SIS_TYPE_66) 5078 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5079 } 5080 5081 sc->sc_wdcdev.PIO_cap = 4; 5082 sc->sc_wdcdev.DMA_cap = 2; 5083 5084 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5085 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5086 switch (sis->sis_type) { 5087 case SIS_TYPE_NOUDMA: 5088 case SIS_TYPE_66: 5089 case SIS_TYPE_100OLD: 5090 sc->sc_wdcdev.set_modes = sis_setup_channel; 5091 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_MISC, 5092 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_MISC) | 5093 SIS_MISC_TIM_SEL | SIS_MISC_FIFO_SIZE | SIS_MISC_GTC); 5094 break; 5095 case SIS_TYPE_100NEW: 5096 case SIS_TYPE_133OLD: 5097 sc->sc_wdcdev.set_modes = sis_setup_channel; 5098 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_49, 5099 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_49) | 0x01); 5100 break; 5101 case SIS_TYPE_133NEW: 5102 sc->sc_wdcdev.set_modes = sis96x_setup_channel; 5103 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_50, 5104 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_50) & 0xf7); 5105 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_REG_52, 5106 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_52) & 0xf7); 5107 break; 5108 } 5109 5110 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5111 5112 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5113 cp = &sc->pciide_channels[channel]; 5114 if (pciide_chansetup(sc, channel, interface) == 0) 5115 continue; 5116 if ((channel == 0 && (sis_ctr0 & SIS_CTRL0_CHAN0_EN) == 0) || 5117 (channel == 1 && (sis_ctr0 & SIS_CTRL0_CHAN1_EN) == 0)) { 5118 printf("%s: %s ignored (disabled)\n", 5119 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5120 continue; 5121 } 5122 pciide_map_compat_intr(pa, cp, channel, interface); 5123 if (cp->hw_ok == 0) 5124 continue; 5125 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5126 pciide_pci_intr); 5127 if (cp->hw_ok == 0) { 5128 pciide_unmap_compat_intr(pa, cp, channel, interface); 5129 continue; 5130 } 5131 if (pciide_chan_candisable(cp)) { 5132 if (channel == 0) 5133 sis_ctr0 &= ~SIS_CTRL0_CHAN0_EN; 5134 else 5135 sis_ctr0 &= ~SIS_CTRL0_CHAN1_EN; 5136 pciide_pci_write(sc->sc_pc, sc->sc_tag, SIS_CTRL0, 5137 sis_ctr0); 5138 } 5139 if (cp->hw_ok == 0) { 5140 pciide_unmap_compat_intr(pa, cp, channel, interface); 5141 continue; 5142 } 5143 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5144 } 5145 } 5146 5147 void 5148 sis96x_setup_channel(struct channel_softc *chp) 5149 { 5150 struct ata_drive_datas *drvp; 5151 int drive; 5152 u_int32_t sis_tim; 5153 u_int32_t idedma_ctl; 5154 int regtim; 5155 struct pciide_channel *cp = (struct pciide_channel *)chp; 5156 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5157 5158 sis_tim = 0; 5159 idedma_ctl = 0; 5160 /* setup DMA if needed */ 5161 pciide_channel_dma_setup(cp); 5162 5163 for (drive = 0; drive < 2; drive++) { 5164 regtim = SIS_TIM133( 5165 pciide_pci_read(sc->sc_pc, sc->sc_tag, SIS_REG_57), 5166 chp->channel, drive); 5167 drvp = &chp->ch_drive[drive]; 5168 /* If no drive, skip */ 5169 if ((drvp->drive_flags & DRIVE) == 0) 5170 continue; 5171 /* add timing values, setup DMA if needed */ 5172 if (drvp->drive_flags & DRIVE_UDMA) { 5173 /* use Ultra/DMA */ 5174 drvp->drive_flags &= ~DRIVE_DMA; 5175 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5176 SIS96x_REG_CBL(chp->channel)) & SIS96x_REG_CBL_33) { 5177 if (drvp->UDMA_mode > 2) 5178 drvp->UDMA_mode = 2; 5179 } 5180 sis_tim |= sis_udma133new_tim[drvp->UDMA_mode]; 5181 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5182 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5183 } else if (drvp->drive_flags & DRIVE_DMA) { 5184 /* 5185 * use Multiword DMA 5186 * Timings will be used for both PIO and DMA, 5187 * so adjust DMA mode if needed 5188 */ 5189 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5190 drvp->PIO_mode = drvp->DMA_mode + 2; 5191 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5192 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5193 drvp->PIO_mode - 2 : 0; 5194 sis_tim |= sis_dma133new_tim[drvp->DMA_mode]; 5195 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5196 } else { 5197 sis_tim |= sis_pio133new_tim[drvp->PIO_mode]; 5198 } 5199 WDCDEBUG_PRINT(("sis96x_setup_channel: new timings reg for " 5200 "channel %d drive %d: 0x%x (reg 0x%x)\n", 5201 chp->channel, drive, sis_tim, regtim), DEBUG_PROBE); 5202 pci_conf_write(sc->sc_pc, sc->sc_tag, regtim, sis_tim); 5203 } 5204 if (idedma_ctl != 0) { 5205 /* Add software bits in status register */ 5206 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5207 IDEDMA_CTL(chp->channel), idedma_ctl); 5208 } 5209 pciide_print_modes(cp); 5210 } 5211 5212 void 5213 sis_setup_channel(struct channel_softc *chp) 5214 { 5215 struct ata_drive_datas *drvp; 5216 int drive; 5217 u_int32_t sis_tim; 5218 u_int32_t idedma_ctl; 5219 struct pciide_channel *cp = (struct pciide_channel *)chp; 5220 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5221 struct pciide_sis *sis = sc->sc_cookie; 5222 5223 WDCDEBUG_PRINT(("sis_setup_channel: old timings reg for " 5224 "channel %d 0x%x\n", chp->channel, 5225 pci_conf_read(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel))), 5226 DEBUG_PROBE); 5227 sis_tim = 0; 5228 idedma_ctl = 0; 5229 /* setup DMA if needed */ 5230 pciide_channel_dma_setup(cp); 5231 5232 for (drive = 0; drive < 2; drive++) { 5233 drvp = &chp->ch_drive[drive]; 5234 /* If no drive, skip */ 5235 if ((drvp->drive_flags & DRIVE) == 0) 5236 continue; 5237 /* add timing values, setup DMA if needed */ 5238 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5239 (drvp->drive_flags & DRIVE_UDMA) == 0) 5240 goto pio; 5241 5242 if (drvp->drive_flags & DRIVE_UDMA) { 5243 /* use Ultra/DMA */ 5244 drvp->drive_flags &= ~DRIVE_DMA; 5245 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, 5246 SIS_REG_CBL) & SIS_REG_CBL_33(chp->channel)) { 5247 if (drvp->UDMA_mode > 2) 5248 drvp->UDMA_mode = 2; 5249 } 5250 switch (sis->sis_type) { 5251 case SIS_TYPE_66: 5252 case SIS_TYPE_100OLD: 5253 sis_tim |= sis_udma66_tim[drvp->UDMA_mode] << 5254 SIS_TIM66_UDMA_TIME_OFF(drive); 5255 break; 5256 case SIS_TYPE_100NEW: 5257 sis_tim |= 5258 sis_udma100new_tim[drvp->UDMA_mode] << 5259 SIS_TIM100_UDMA_TIME_OFF(drive); 5260 break; 5261 case SIS_TYPE_133OLD: 5262 sis_tim |= 5263 sis_udma133old_tim[drvp->UDMA_mode] << 5264 SIS_TIM100_UDMA_TIME_OFF(drive); 5265 break; 5266 default: 5267 printf("unknown SiS IDE type %d\n", 5268 sis->sis_type); 5269 } 5270 } else { 5271 /* 5272 * use Multiword DMA 5273 * Timings will be used for both PIO and DMA, 5274 * so adjust DMA mode if needed 5275 */ 5276 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5277 drvp->PIO_mode = drvp->DMA_mode + 2; 5278 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5279 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5280 drvp->PIO_mode - 2 : 0; 5281 if (drvp->DMA_mode == 0) 5282 drvp->PIO_mode = 0; 5283 } 5284 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5285 pio: switch (sis->sis_type) { 5286 case SIS_TYPE_NOUDMA: 5287 case SIS_TYPE_66: 5288 case SIS_TYPE_100OLD: 5289 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5290 SIS_TIM66_ACT_OFF(drive); 5291 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5292 SIS_TIM66_REC_OFF(drive); 5293 break; 5294 case SIS_TYPE_100NEW: 5295 case SIS_TYPE_133OLD: 5296 sis_tim |= sis_pio_act[drvp->PIO_mode] << 5297 SIS_TIM100_ACT_OFF(drive); 5298 sis_tim |= sis_pio_rec[drvp->PIO_mode] << 5299 SIS_TIM100_REC_OFF(drive); 5300 break; 5301 default: 5302 printf("unknown SiS IDE type %d\n", 5303 sis->sis_type); 5304 } 5305 } 5306 WDCDEBUG_PRINT(("sis_setup_channel: new timings reg for " 5307 "channel %d 0x%x\n", chp->channel, sis_tim), DEBUG_PROBE); 5308 pci_conf_write(sc->sc_pc, sc->sc_tag, SIS_TIM(chp->channel), sis_tim); 5309 if (idedma_ctl != 0) { 5310 /* Add software bits in status register */ 5311 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5312 IDEDMA_CTL(chp->channel), idedma_ctl); 5313 } 5314 pciide_print_modes(cp); 5315 } 5316 5317 void 5318 natsemi_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5319 { 5320 struct pciide_channel *cp; 5321 int channel; 5322 pcireg_t interface, ctl; 5323 bus_size_t cmdsize, ctlsize; 5324 5325 printf(": DMA"); 5326 pciide_mapreg_dma(sc, pa); 5327 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 5328 5329 if (sc->sc_dma_ok) { 5330 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 5331 sc->sc_wdcdev.irqack = natsemi_irqack; 5332 } 5333 5334 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CCBT, 0xb7); 5335 5336 /* 5337 * Mask off interrupts from both channels, appropriate channel(s) 5338 * will be unmasked later. 5339 */ 5340 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5341 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) | 5342 NATSEMI_CHMASK(0) | NATSEMI_CHMASK(1)); 5343 5344 sc->sc_wdcdev.PIO_cap = 4; 5345 sc->sc_wdcdev.DMA_cap = 2; 5346 sc->sc_wdcdev.set_modes = natsemi_setup_channel; 5347 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5348 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5349 5350 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5351 PCI_CLASS_REG)); 5352 interface &= ~PCIIDE_CHANSTATUS_EN; /* Reserved on PC87415 */ 5353 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5354 5355 /* If we're in PCIIDE mode, unmask INTA, otherwise mask it. */ 5356 ctl = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1); 5357 if (interface & (PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1))) 5358 ctl &= ~NATSEMI_CTRL1_INTAMASK; 5359 else 5360 ctl |= NATSEMI_CTRL1_INTAMASK; 5361 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL1, ctl); 5362 5363 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5364 cp = &sc->pciide_channels[channel]; 5365 if (pciide_chansetup(sc, channel, interface) == 0) 5366 continue; 5367 5368 pciide_map_compat_intr(pa, cp, channel, interface); 5369 if (cp->hw_ok == 0) 5370 continue; 5371 5372 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5373 natsemi_pci_intr); 5374 if (cp->hw_ok == 0) { 5375 pciide_unmap_compat_intr(pa, cp, channel, interface); 5376 continue; 5377 } 5378 natsemi_setup_channel(&cp->wdc_channel); 5379 } 5380 } 5381 5382 void 5383 natsemi_setup_channel(struct channel_softc *chp) 5384 { 5385 struct ata_drive_datas *drvp; 5386 int drive, ndrives = 0; 5387 u_int32_t idedma_ctl = 0; 5388 struct pciide_channel *cp = (struct pciide_channel *)chp; 5389 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5390 u_int8_t tim; 5391 5392 /* setup DMA if needed */ 5393 pciide_channel_dma_setup(cp); 5394 5395 for (drive = 0; drive < 2; drive++) { 5396 drvp = &chp->ch_drive[drive]; 5397 /* If no drive, skip */ 5398 if ((drvp->drive_flags & DRIVE) == 0) 5399 continue; 5400 5401 ndrives++; 5402 /* add timing values, setup DMA if needed */ 5403 if ((drvp->drive_flags & DRIVE_DMA) == 0) { 5404 tim = natsemi_pio_pulse[drvp->PIO_mode] | 5405 (natsemi_pio_recover[drvp->PIO_mode] << 4); 5406 } else { 5407 /* 5408 * use Multiword DMA 5409 * Timings will be used for both PIO and DMA, 5410 * so adjust DMA mode if needed 5411 */ 5412 if (drvp->PIO_mode >= 3 && 5413 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 5414 drvp->DMA_mode = drvp->PIO_mode - 2; 5415 } 5416 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5417 tim = natsemi_dma_pulse[drvp->DMA_mode] | 5418 (natsemi_dma_recover[drvp->DMA_mode] << 4); 5419 } 5420 5421 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5422 NATSEMI_RTREG(chp->channel, drive), tim); 5423 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5424 NATSEMI_WTREG(chp->channel, drive), tim); 5425 } 5426 if (idedma_ctl != 0) { 5427 /* Add software bits in status register */ 5428 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5429 IDEDMA_CTL(chp->channel), idedma_ctl); 5430 } 5431 if (ndrives > 0) { 5432 /* Unmask the channel if at least one drive is found */ 5433 pciide_pci_write(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2, 5434 pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2) & 5435 ~(NATSEMI_CHMASK(chp->channel))); 5436 } 5437 5438 pciide_print_modes(cp); 5439 5440 /* Go ahead and ack interrupts generated during probe. */ 5441 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5442 IDEDMA_CTL(chp->channel), 5443 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5444 IDEDMA_CTL(chp->channel))); 5445 } 5446 5447 void 5448 natsemi_irqack(struct channel_softc *chp) 5449 { 5450 struct pciide_channel *cp = (struct pciide_channel *)chp; 5451 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5452 u_int8_t clr; 5453 5454 /* The "clear" bits are in the wrong register *sigh* */ 5455 clr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5456 IDEDMA_CMD(chp->channel)); 5457 clr |= bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5458 IDEDMA_CTL(chp->channel)) & 5459 (IDEDMA_CTL_ERR | IDEDMA_CTL_INTR); 5460 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5461 IDEDMA_CMD(chp->channel), clr); 5462 } 5463 5464 int 5465 natsemi_pci_intr(void *arg) 5466 { 5467 struct pciide_softc *sc = arg; 5468 struct pciide_channel *cp; 5469 struct channel_softc *wdc_cp; 5470 int i, rv, crv; 5471 u_int8_t msk; 5472 5473 rv = 0; 5474 msk = pciide_pci_read(sc->sc_pc, sc->sc_tag, NATSEMI_CTRL2); 5475 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5476 cp = &sc->pciide_channels[i]; 5477 wdc_cp = &cp->wdc_channel; 5478 5479 /* If a compat channel skip. */ 5480 if (cp->compat) 5481 continue; 5482 5483 /* If this channel is masked, skip it. */ 5484 if (msk & NATSEMI_CHMASK(i)) 5485 continue; 5486 5487 if (pciide_intr_flag(cp) == 0) 5488 continue; 5489 5490 crv = wdcintr(wdc_cp); 5491 if (crv == 0) 5492 ; /* leave rv alone */ 5493 else if (crv == 1) 5494 rv = 1; /* claim the intr */ 5495 else if (rv == 0) /* crv should be -1 in this case */ 5496 rv = crv; /* if we've done no better, take it */ 5497 } 5498 return (rv); 5499 } 5500 5501 void 5502 ns_scx200_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5503 { 5504 struct pciide_channel *cp; 5505 int channel; 5506 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 5507 bus_size_t cmdsize, ctlsize; 5508 5509 printf(": DMA"); 5510 pciide_mapreg_dma(sc, pa); 5511 5512 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5513 WDC_CAPABILITY_MODE; 5514 if (sc->sc_dma_ok) { 5515 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5516 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5517 sc->sc_wdcdev.irqack = pciide_irqack; 5518 } 5519 sc->sc_wdcdev.PIO_cap = 4; 5520 sc->sc_wdcdev.DMA_cap = 2; 5521 sc->sc_wdcdev.UDMA_cap = 2; 5522 5523 sc->sc_wdcdev.set_modes = ns_scx200_setup_channel; 5524 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5525 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5526 5527 /* 5528 * Soekris net4801 errata 0003: 5529 * 5530 * The SC1100 built in busmaster IDE controller is pretty standard, 5531 * but have two bugs: data transfers need to be dword aligned and 5532 * it cannot do an exact 64Kbyte data transfer. 5533 * 5534 * Assume that reducing maximum segment size by one page 5535 * will be enough, and restrict boundary too for extra certainty. 5536 */ 5537 if (sc->sc_pp->ide_product == PCI_PRODUCT_NS_SCx200_IDE) { 5538 sc->sc_dma_maxsegsz = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5539 sc->sc_dma_boundary = IDEDMA_BYTE_COUNT_MAX - PAGE_SIZE; 5540 } 5541 5542 /* 5543 * This chip seems to be unable to do one-sector transfers 5544 * using DMA. 5545 */ 5546 sc->sc_wdcdev.quirks = WDC_QUIRK_NOSHORTDMA; 5547 5548 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5549 5550 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5551 cp = &sc->pciide_channels[channel]; 5552 if (pciide_chansetup(sc, channel, interface) == 0) 5553 continue; 5554 pciide_map_compat_intr(pa, cp, channel, interface); 5555 if (cp->hw_ok == 0) 5556 continue; 5557 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5558 pciide_pci_intr); 5559 if (cp->hw_ok == 0) { 5560 pciide_unmap_compat_intr(pa, cp, channel, interface); 5561 continue; 5562 } 5563 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 5564 } 5565 } 5566 5567 void 5568 ns_scx200_setup_channel(struct channel_softc *chp) 5569 { 5570 struct ata_drive_datas *drvp; 5571 int drive, mode; 5572 u_int32_t idedma_ctl; 5573 struct pciide_channel *cp = (struct pciide_channel*)chp; 5574 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5575 int channel = chp->channel; 5576 int pioformat; 5577 pcireg_t piotim, dmatim; 5578 5579 /* Setup DMA if needed */ 5580 pciide_channel_dma_setup(cp); 5581 5582 idedma_ctl = 0; 5583 5584 pioformat = (pci_conf_read(sc->sc_pc, sc->sc_tag, 5585 SCx200_TIM_DMA(0, 0)) >> SCx200_PIOFORMAT_SHIFT) & 0x01; 5586 WDCDEBUG_PRINT(("%s: pio format %d\n", __func__, pioformat), 5587 DEBUG_PROBE); 5588 5589 /* Per channel settings */ 5590 for (drive = 0; drive < 2; drive++) { 5591 drvp = &chp->ch_drive[drive]; 5592 5593 /* If no drive, skip */ 5594 if ((drvp->drive_flags & DRIVE) == 0) 5595 continue; 5596 5597 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5598 SCx200_TIM_PIO(channel, drive)); 5599 dmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, 5600 SCx200_TIM_DMA(channel, drive)); 5601 WDCDEBUG_PRINT(("%s:%d:%d: piotim=0x%x, dmatim=0x%x\n", 5602 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5603 piotim, dmatim), DEBUG_PROBE); 5604 5605 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 5606 (drvp->drive_flags & DRIVE_UDMA) != 0) { 5607 /* Setup UltraDMA mode */ 5608 drvp->drive_flags &= ~DRIVE_DMA; 5609 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5610 dmatim = scx200_udma33[drvp->UDMA_mode]; 5611 mode = drvp->PIO_mode; 5612 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 5613 (drvp->drive_flags & DRIVE_DMA) != 0) { 5614 /* Setup multiword DMA mode */ 5615 drvp->drive_flags &= ~DRIVE_UDMA; 5616 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5617 dmatim = scx200_dma33[drvp->DMA_mode]; 5618 5619 /* mode = min(pio, dma + 2) */ 5620 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 5621 mode = drvp->PIO_mode; 5622 else 5623 mode = drvp->DMA_mode + 2; 5624 } else { 5625 mode = drvp->PIO_mode; 5626 } 5627 5628 /* Setup PIO mode */ 5629 drvp->PIO_mode = mode; 5630 if (mode < 2) 5631 drvp->DMA_mode = 0; 5632 else 5633 drvp->DMA_mode = mode - 2; 5634 5635 piotim = scx200_pio33[pioformat][drvp->PIO_mode]; 5636 5637 WDCDEBUG_PRINT(("%s:%d:%d: new piotim=0x%x, dmatim=0x%x\n", 5638 sc->sc_wdcdev.sc_dev.dv_xname, channel, drive, 5639 piotim, dmatim), DEBUG_PROBE); 5640 5641 pci_conf_write(sc->sc_pc, sc->sc_tag, 5642 SCx200_TIM_PIO(channel, drive), piotim); 5643 pci_conf_write(sc->sc_pc, sc->sc_tag, 5644 SCx200_TIM_DMA(channel, drive), dmatim); 5645 } 5646 5647 if (idedma_ctl != 0) { 5648 /* Add software bits in status register */ 5649 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5650 IDEDMA_CTL(channel), idedma_ctl); 5651 } 5652 5653 pciide_print_modes(cp); 5654 } 5655 5656 void 5657 acer_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5658 { 5659 struct pciide_channel *cp; 5660 int channel; 5661 pcireg_t cr, interface; 5662 bus_size_t cmdsize, ctlsize; 5663 int rev = sc->sc_rev; 5664 5665 printf(": DMA"); 5666 pciide_mapreg_dma(sc, pa); 5667 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5668 WDC_CAPABILITY_MODE; 5669 5670 if (sc->sc_dma_ok) { 5671 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA; 5672 if (rev >= 0x20) { 5673 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA; 5674 if (rev >= 0xC4) 5675 sc->sc_wdcdev.UDMA_cap = 5; 5676 else if (rev >= 0xC2) 5677 sc->sc_wdcdev.UDMA_cap = 4; 5678 else 5679 sc->sc_wdcdev.UDMA_cap = 2; 5680 } 5681 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5682 sc->sc_wdcdev.irqack = pciide_irqack; 5683 if (rev <= 0xC4) 5684 sc->sc_wdcdev.dma_init = acer_dma_init; 5685 } 5686 5687 sc->sc_wdcdev.PIO_cap = 4; 5688 sc->sc_wdcdev.DMA_cap = 2; 5689 sc->sc_wdcdev.set_modes = acer_setup_channel; 5690 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5691 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 5692 5693 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, 5694 (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | 5695 ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); 5696 5697 /* Enable "microsoft register bits" R/W. */ 5698 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, 5699 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); 5700 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, 5701 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & 5702 ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); 5703 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, 5704 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & 5705 ~ACER_CHANSTATUSREGS_RO); 5706 cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); 5707 cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); 5708 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); 5709 /* Don't use cr, re-read the real register content instead */ 5710 interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, 5711 PCI_CLASS_REG)); 5712 5713 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 5714 5715 /* From linux: enable "Cable Detection" */ 5716 if (rev >= 0xC2) 5717 pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, 5718 pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) 5719 | ACER_0x4B_CDETECT); 5720 5721 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 5722 cp = &sc->pciide_channels[channel]; 5723 if (pciide_chansetup(sc, channel, interface) == 0) 5724 continue; 5725 if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { 5726 printf("%s: %s ignored (disabled)\n", 5727 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5728 continue; 5729 } 5730 pciide_map_compat_intr(pa, cp, channel, interface); 5731 if (cp->hw_ok == 0) 5732 continue; 5733 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 5734 (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); 5735 if (cp->hw_ok == 0) { 5736 pciide_unmap_compat_intr(pa, cp, channel, interface); 5737 continue; 5738 } 5739 if (pciide_chan_candisable(cp)) { 5740 cr &= ~(PCIIDE_CHAN_EN(channel) << PCI_INTERFACE_SHIFT); 5741 pci_conf_write(sc->sc_pc, sc->sc_tag, 5742 PCI_CLASS_REG, cr); 5743 } 5744 if (cp->hw_ok == 0) { 5745 pciide_unmap_compat_intr(pa, cp, channel, interface); 5746 continue; 5747 } 5748 acer_setup_channel(&cp->wdc_channel); 5749 } 5750 } 5751 5752 void 5753 acer_setup_channel(struct channel_softc *chp) 5754 { 5755 struct ata_drive_datas *drvp; 5756 int drive; 5757 u_int32_t acer_fifo_udma; 5758 u_int32_t idedma_ctl; 5759 struct pciide_channel *cp = (struct pciide_channel *)chp; 5760 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 5761 5762 idedma_ctl = 0; 5763 acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); 5764 WDCDEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", 5765 acer_fifo_udma), DEBUG_PROBE); 5766 /* setup DMA if needed */ 5767 pciide_channel_dma_setup(cp); 5768 5769 if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & 5770 DRIVE_UDMA) { /* check 80 pins cable */ 5771 if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & 5772 ACER_0x4A_80PIN(chp->channel)) { 5773 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 5774 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel), 5775 DEBUG_PROBE); 5776 if (chp->ch_drive[0].UDMA_mode > 2) 5777 chp->ch_drive[0].UDMA_mode = 2; 5778 if (chp->ch_drive[1].UDMA_mode > 2) 5779 chp->ch_drive[1].UDMA_mode = 2; 5780 } 5781 } 5782 5783 for (drive = 0; drive < 2; drive++) { 5784 drvp = &chp->ch_drive[drive]; 5785 /* If no drive, skip */ 5786 if ((drvp->drive_flags & DRIVE) == 0) 5787 continue; 5788 WDCDEBUG_PRINT(("acer_setup_channel: old timings reg for " 5789 "channel %d drive %d 0x%x\n", chp->channel, drive, 5790 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5791 ACER_IDETIM(chp->channel, drive))), DEBUG_PROBE); 5792 /* clear FIFO/DMA mode */ 5793 acer_fifo_udma &= ~(ACER_FTH_OPL(chp->channel, drive, 0x3) | 5794 ACER_UDMA_EN(chp->channel, drive) | 5795 ACER_UDMA_TIM(chp->channel, drive, 0x7)); 5796 5797 /* add timing values, setup DMA if needed */ 5798 if ((drvp->drive_flags & DRIVE_DMA) == 0 && 5799 (drvp->drive_flags & DRIVE_UDMA) == 0) { 5800 acer_fifo_udma |= 5801 ACER_FTH_OPL(chp->channel, drive, 0x1); 5802 goto pio; 5803 } 5804 5805 acer_fifo_udma |= ACER_FTH_OPL(chp->channel, drive, 0x2); 5806 if (drvp->drive_flags & DRIVE_UDMA) { 5807 /* use Ultra/DMA */ 5808 drvp->drive_flags &= ~DRIVE_DMA; 5809 acer_fifo_udma |= ACER_UDMA_EN(chp->channel, drive); 5810 acer_fifo_udma |= 5811 ACER_UDMA_TIM(chp->channel, drive, 5812 acer_udma[drvp->UDMA_mode]); 5813 /* XXX disable if one drive < UDMA3 ? */ 5814 if (drvp->UDMA_mode >= 3) { 5815 pciide_pci_write(sc->sc_pc, sc->sc_tag, 5816 ACER_0x4B, 5817 pciide_pci_read(sc->sc_pc, sc->sc_tag, 5818 ACER_0x4B) | ACER_0x4B_UDMA66); 5819 } 5820 } else { 5821 /* 5822 * use Multiword DMA 5823 * Timings will be used for both PIO and DMA, 5824 * so adjust DMA mode if needed 5825 */ 5826 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 5827 drvp->PIO_mode = drvp->DMA_mode + 2; 5828 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 5829 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 5830 drvp->PIO_mode - 2 : 0; 5831 if (drvp->DMA_mode == 0) 5832 drvp->PIO_mode = 0; 5833 } 5834 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 5835 pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, 5836 ACER_IDETIM(chp->channel, drive), 5837 acer_pio[drvp->PIO_mode]); 5838 } 5839 WDCDEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", 5840 acer_fifo_udma), DEBUG_PROBE); 5841 pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); 5842 if (idedma_ctl != 0) { 5843 /* Add software bits in status register */ 5844 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 5845 IDEDMA_CTL(chp->channel), idedma_ctl); 5846 } 5847 pciide_print_modes(cp); 5848 } 5849 5850 int 5851 acer_pci_intr(void *arg) 5852 { 5853 struct pciide_softc *sc = arg; 5854 struct pciide_channel *cp; 5855 struct channel_softc *wdc_cp; 5856 int i, rv, crv; 5857 u_int32_t chids; 5858 5859 rv = 0; 5860 chids = pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CHIDS); 5861 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5862 cp = &sc->pciide_channels[i]; 5863 wdc_cp = &cp->wdc_channel; 5864 /* If a compat channel skip. */ 5865 if (cp->compat) 5866 continue; 5867 if (chids & ACER_CHIDS_INT(i)) { 5868 crv = wdcintr(wdc_cp); 5869 if (crv == 0) 5870 printf("%s:%d: bogus intr\n", 5871 sc->sc_wdcdev.sc_dev.dv_xname, i); 5872 else 5873 rv = 1; 5874 } 5875 } 5876 return (rv); 5877 } 5878 5879 int 5880 acer_dma_init(void *v, int channel, int drive, void *databuf, 5881 size_t datalen, int flags) 5882 { 5883 /* Use PIO for LBA48 transfers. */ 5884 if (flags & WDC_DMA_LBA48) 5885 return (EINVAL); 5886 5887 return (pciide_dma_init(v, channel, drive, databuf, datalen, flags)); 5888 } 5889 5890 void 5891 hpt_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 5892 { 5893 struct pciide_channel *cp; 5894 int i, compatchan, revision; 5895 pcireg_t interface; 5896 bus_size_t cmdsize, ctlsize; 5897 5898 revision = sc->sc_rev; 5899 5900 /* 5901 * when the chip is in native mode it identifies itself as a 5902 * 'misc mass storage'. Fake interface in this case. 5903 */ 5904 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 5905 interface = PCI_INTERFACE(pa->pa_class); 5906 } else { 5907 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 5908 PCIIDE_INTERFACE_PCI(0); 5909 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5910 (revision == HPT370_REV || revision == HPT370A_REV || 5911 revision == HPT372_REV)) || 5912 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5913 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5914 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5915 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5916 interface |= PCIIDE_INTERFACE_PCI(1); 5917 } 5918 5919 printf(": DMA"); 5920 pciide_mapreg_dma(sc, pa); 5921 printf("\n"); 5922 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 5923 WDC_CAPABILITY_MODE; 5924 if (sc->sc_dma_ok) { 5925 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 5926 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 5927 sc->sc_wdcdev.irqack = pciide_irqack; 5928 } 5929 sc->sc_wdcdev.PIO_cap = 4; 5930 sc->sc_wdcdev.DMA_cap = 2; 5931 5932 sc->sc_wdcdev.set_modes = hpt_setup_channel; 5933 sc->sc_wdcdev.channels = sc->wdc_chanarray; 5934 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5935 revision == HPT366_REV) { 5936 sc->sc_wdcdev.UDMA_cap = 4; 5937 /* 5938 * The 366 has 2 PCI IDE functions, one for primary and one 5939 * for secondary. So we need to call pciide_mapregs_compat() 5940 * with the real channel 5941 */ 5942 if (pa->pa_function == 0) { 5943 compatchan = 0; 5944 } else if (pa->pa_function == 1) { 5945 compatchan = 1; 5946 } else { 5947 printf("%s: unexpected PCI function %d\n", 5948 sc->sc_wdcdev.sc_dev.dv_xname, pa->pa_function); 5949 return; 5950 } 5951 sc->sc_wdcdev.nchannels = 1; 5952 } else { 5953 sc->sc_wdcdev.nchannels = 2; 5954 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5955 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5956 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5957 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) 5958 sc->sc_wdcdev.UDMA_cap = 6; 5959 else if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366) { 5960 if (revision == HPT372_REV) 5961 sc->sc_wdcdev.UDMA_cap = 6; 5962 else 5963 sc->sc_wdcdev.UDMA_cap = 5; 5964 } 5965 } 5966 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 5967 cp = &sc->pciide_channels[i]; 5968 if (sc->sc_wdcdev.nchannels > 1) { 5969 compatchan = i; 5970 if((pciide_pci_read(sc->sc_pc, sc->sc_tag, 5971 HPT370_CTRL1(i)) & HPT370_CTRL1_EN) == 0) { 5972 printf("%s: %s ignored (disabled)\n", 5973 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 5974 continue; 5975 } 5976 } 5977 if (pciide_chansetup(sc, i, interface) == 0) 5978 continue; 5979 if (interface & PCIIDE_INTERFACE_PCI(i)) { 5980 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 5981 &ctlsize, hpt_pci_intr); 5982 } else { 5983 cp->hw_ok = pciide_mapregs_compat(pa, cp, compatchan, 5984 &cmdsize, &ctlsize); 5985 } 5986 if (cp->hw_ok == 0) 5987 return; 5988 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 5989 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 5990 wdcattach(&cp->wdc_channel); 5991 hpt_setup_channel(&cp->wdc_channel); 5992 } 5993 if ((sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 5994 (revision == HPT370_REV || revision == HPT370A_REV || 5995 revision == HPT372_REV)) || 5996 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 5997 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 5998 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 5999 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374) { 6000 /* 6001 * Turn off fast interrupts 6002 */ 6003 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0), 6004 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(0)) & 6005 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6006 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1), 6007 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT370_CTRL2(1)) & 6008 ~(HPT370_CTRL2_FASTIRQ | HPT370_CTRL2_HIRQ)); 6009 6010 /* 6011 * HPT370 and highter has a bit to disable interrupts, 6012 * make sure to clear it 6013 */ 6014 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_CSEL, 6015 pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL) & 6016 ~HPT_CSEL_IRQDIS); 6017 } 6018 /* set clocks, etc (mandatory on 372/4, optional otherwise) */ 6019 if (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT372A || 6020 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT302 || 6021 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT371 || 6022 sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT374 || 6023 (sc->sc_pp->ide_product == PCI_PRODUCT_TRIONES_HPT366 && 6024 revision == HPT372_REV)) 6025 pciide_pci_write(sc->sc_pc, sc->sc_tag, HPT_SC2, 6026 (pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_SC2) & 6027 HPT_SC2_MAEN) | HPT_SC2_OSC_EN); 6028 6029 return; 6030 } 6031 6032 void 6033 hpt_setup_channel(struct channel_softc *chp) 6034 { 6035 struct ata_drive_datas *drvp; 6036 int drive; 6037 int cable; 6038 u_int32_t before, after; 6039 u_int32_t idedma_ctl; 6040 struct pciide_channel *cp = (struct pciide_channel *)chp; 6041 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6042 int revision = sc->sc_rev; 6043 u_int32_t *tim_pio, *tim_dma, *tim_udma; 6044 6045 cable = pciide_pci_read(sc->sc_pc, sc->sc_tag, HPT_CSEL); 6046 6047 /* setup DMA if needed */ 6048 pciide_channel_dma_setup(cp); 6049 6050 idedma_ctl = 0; 6051 6052 switch (sc->sc_pp->ide_product) { 6053 case PCI_PRODUCT_TRIONES_HPT366: 6054 if (revision == HPT370_REV || 6055 revision == HPT370A_REV) { 6056 tim_pio = hpt370_pio; 6057 tim_dma = hpt370_dma; 6058 tim_udma = hpt370_udma; 6059 } else if (revision == HPT372_REV) { 6060 tim_pio = hpt372_pio; 6061 tim_dma = hpt372_dma; 6062 tim_udma = hpt372_udma; 6063 } else { 6064 tim_pio = hpt366_pio; 6065 tim_dma = hpt366_dma; 6066 tim_udma = hpt366_udma; 6067 } 6068 break; 6069 case PCI_PRODUCT_TRIONES_HPT372A: 6070 case PCI_PRODUCT_TRIONES_HPT302: 6071 case PCI_PRODUCT_TRIONES_HPT371: 6072 tim_pio = hpt372_pio; 6073 tim_dma = hpt372_dma; 6074 tim_udma = hpt372_udma; 6075 break; 6076 case PCI_PRODUCT_TRIONES_HPT374: 6077 tim_pio = hpt374_pio; 6078 tim_dma = hpt374_dma; 6079 tim_udma = hpt374_udma; 6080 break; 6081 default: 6082 printf("%s: no known timing values\n", 6083 sc->sc_wdcdev.sc_dev.dv_xname); 6084 goto end; 6085 } 6086 6087 /* Per drive settings */ 6088 for (drive = 0; drive < 2; drive++) { 6089 drvp = &chp->ch_drive[drive]; 6090 /* If no drive, skip */ 6091 if ((drvp->drive_flags & DRIVE) == 0) 6092 continue; 6093 before = pci_conf_read(sc->sc_pc, sc->sc_tag, 6094 HPT_IDETIM(chp->channel, drive)); 6095 6096 /* add timing values, setup DMA if needed */ 6097 if (drvp->drive_flags & DRIVE_UDMA) { 6098 /* use Ultra/DMA */ 6099 drvp->drive_flags &= ~DRIVE_DMA; 6100 if ((cable & HPT_CSEL_CBLID(chp->channel)) != 0 && 6101 drvp->UDMA_mode > 2) { 6102 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6103 "cable not detected\n", drvp->drive_name, 6104 sc->sc_wdcdev.sc_dev.dv_xname, 6105 chp->channel, drive), DEBUG_PROBE); 6106 drvp->UDMA_mode = 2; 6107 } 6108 after = tim_udma[drvp->UDMA_mode]; 6109 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6110 } else if (drvp->drive_flags & DRIVE_DMA) { 6111 /* 6112 * use Multiword DMA. 6113 * Timings will be used for both PIO and DMA, so adjust 6114 * DMA mode if needed 6115 */ 6116 if (drvp->PIO_mode >= 3 && 6117 (drvp->DMA_mode + 2) > drvp->PIO_mode) { 6118 drvp->DMA_mode = drvp->PIO_mode - 2; 6119 } 6120 after = tim_dma[drvp->DMA_mode]; 6121 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6122 } else { 6123 /* PIO only */ 6124 after = tim_pio[drvp->PIO_mode]; 6125 } 6126 pci_conf_write(sc->sc_pc, sc->sc_tag, 6127 HPT_IDETIM(chp->channel, drive), after); 6128 WDCDEBUG_PRINT(("%s: bus speed register set to 0x%08x " 6129 "(BIOS 0x%08x)\n", sc->sc_wdcdev.sc_dev.dv_xname, 6130 after, before), DEBUG_PROBE); 6131 } 6132 end: 6133 if (idedma_ctl != 0) { 6134 /* Add software bits in status register */ 6135 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6136 IDEDMA_CTL(chp->channel), idedma_ctl); 6137 } 6138 pciide_print_modes(cp); 6139 } 6140 6141 int 6142 hpt_pci_intr(void *arg) 6143 { 6144 struct pciide_softc *sc = arg; 6145 struct pciide_channel *cp; 6146 struct channel_softc *wdc_cp; 6147 int rv = 0; 6148 int dmastat, i, crv; 6149 6150 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6151 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6152 IDEDMA_CTL(i)); 6153 if((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 6154 IDEDMA_CTL_INTR) 6155 continue; 6156 cp = &sc->pciide_channels[i]; 6157 wdc_cp = &cp->wdc_channel; 6158 crv = wdcintr(wdc_cp); 6159 if (crv == 0) { 6160 printf("%s:%d: bogus intr\n", 6161 sc->sc_wdcdev.sc_dev.dv_xname, i); 6162 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6163 IDEDMA_CTL(i), dmastat); 6164 } else 6165 rv = 1; 6166 } 6167 return (rv); 6168 } 6169 6170 /* Macros to test product */ 6171 #define PDC_IS_262(sc) \ 6172 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20262 || \ 6173 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6174 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267) 6175 #define PDC_IS_265(sc) \ 6176 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20265 || \ 6177 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20267 || \ 6178 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6179 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6180 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6181 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6182 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6183 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6184 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6185 #define PDC_IS_268(sc) \ 6186 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268 || \ 6187 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20268R || \ 6188 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6189 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6190 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6191 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6192 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6193 #define PDC_IS_269(sc) \ 6194 ((sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20269 || \ 6195 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20271 || \ 6196 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20275 || \ 6197 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20276 || \ 6198 (sc)->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20277) 6199 6200 u_int8_t 6201 pdc268_config_read(struct channel_softc *chp, int index) 6202 { 6203 struct pciide_channel *cp = (struct pciide_channel *)chp; 6204 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6205 int channel = chp->channel; 6206 6207 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6208 PDC268_INDEX(channel), index); 6209 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6210 PDC268_DATA(channel))); 6211 } 6212 6213 void 6214 pdc202xx_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6215 { 6216 struct pciide_channel *cp; 6217 int channel; 6218 pcireg_t interface, st, mode; 6219 bus_size_t cmdsize, ctlsize; 6220 6221 if (!PDC_IS_268(sc)) { 6222 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6223 WDCDEBUG_PRINT(("pdc202xx_setup_chip: controller state 0x%x\n", 6224 st), DEBUG_PROBE); 6225 } 6226 6227 /* turn off RAID mode */ 6228 if (!PDC_IS_268(sc)) 6229 st &= ~PDC2xx_STATE_IDERAID; 6230 6231 /* 6232 * can't rely on the PCI_CLASS_REG content if the chip was in raid 6233 * mode. We have to fake interface 6234 */ 6235 interface = PCIIDE_INTERFACE_SETTABLE(0) | PCIIDE_INTERFACE_SETTABLE(1); 6236 if (PDC_IS_268(sc) || (st & PDC2xx_STATE_NATIVE)) 6237 interface |= PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 6238 6239 printf(": DMA"); 6240 pciide_mapreg_dma(sc, pa); 6241 6242 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 6243 WDC_CAPABILITY_MODE; 6244 if (sc->sc_pp->ide_product == PCI_PRODUCT_PROMISE_PDC20246 || 6245 PDC_IS_262(sc)) 6246 sc->sc_wdcdev.cap |= WDC_CAPABILITY_NO_ATAPI_DMA; 6247 if (sc->sc_dma_ok) { 6248 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6249 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6250 sc->sc_wdcdev.irqack = pciide_irqack; 6251 } 6252 sc->sc_wdcdev.PIO_cap = 4; 6253 sc->sc_wdcdev.DMA_cap = 2; 6254 if (PDC_IS_269(sc)) 6255 sc->sc_wdcdev.UDMA_cap = 6; 6256 else if (PDC_IS_265(sc)) 6257 sc->sc_wdcdev.UDMA_cap = 5; 6258 else if (PDC_IS_262(sc)) 6259 sc->sc_wdcdev.UDMA_cap = 4; 6260 else 6261 sc->sc_wdcdev.UDMA_cap = 2; 6262 sc->sc_wdcdev.set_modes = PDC_IS_268(sc) ? 6263 pdc20268_setup_channel : pdc202xx_setup_channel; 6264 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6265 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 6266 6267 if (PDC_IS_262(sc)) { 6268 sc->sc_wdcdev.dma_start = pdc20262_dma_start; 6269 sc->sc_wdcdev.dma_finish = pdc20262_dma_finish; 6270 } 6271 6272 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 6273 if (!PDC_IS_268(sc)) { 6274 /* setup failsafe defaults */ 6275 mode = 0; 6276 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[0]); 6277 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[0]); 6278 mode = PDC2xx_TIM_SET_MB(mode, pdc2xx_dma_mb[0]); 6279 mode = PDC2xx_TIM_SET_MC(mode, pdc2xx_dma_mc[0]); 6280 for (channel = 0; 6281 channel < sc->sc_wdcdev.nchannels; 6282 channel++) { 6283 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6284 "drive 0 initial timings 0x%x, now 0x%x\n", 6285 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6286 PDC2xx_TIM(channel, 0)), mode | PDC2xx_TIM_IORDYp), 6287 DEBUG_PROBE); 6288 pci_conf_write(sc->sc_pc, sc->sc_tag, 6289 PDC2xx_TIM(channel, 0), mode | PDC2xx_TIM_IORDYp); 6290 WDCDEBUG_PRINT(("pdc202xx_setup_chip: channel %d " 6291 "drive 1 initial timings 0x%x, now 0x%x\n", 6292 channel, pci_conf_read(sc->sc_pc, sc->sc_tag, 6293 PDC2xx_TIM(channel, 1)), mode), DEBUG_PROBE); 6294 pci_conf_write(sc->sc_pc, sc->sc_tag, 6295 PDC2xx_TIM(channel, 1), mode); 6296 } 6297 6298 mode = PDC2xx_SCR_DMA; 6299 if (PDC_IS_262(sc)) { 6300 mode = PDC2xx_SCR_SET_GEN(mode, PDC262_SCR_GEN_LAT); 6301 } else { 6302 /* the BIOS set it up this way */ 6303 mode = PDC2xx_SCR_SET_GEN(mode, 0x1); 6304 } 6305 mode = PDC2xx_SCR_SET_I2C(mode, 0x3); /* ditto */ 6306 mode = PDC2xx_SCR_SET_POLL(mode, 0x1); /* ditto */ 6307 WDCDEBUG_PRINT(("pdc202xx_setup_chip: initial SCR 0x%x, " 6308 "now 0x%x\n", 6309 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6310 PDC2xx_SCR), 6311 mode), DEBUG_PROBE); 6312 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6313 PDC2xx_SCR, mode); 6314 6315 /* controller initial state register is OK even without BIOS */ 6316 /* Set DMA mode to IDE DMA compatibility */ 6317 mode = 6318 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM); 6319 WDCDEBUG_PRINT(("pdc202xx_setup_chip: primary mode 0x%x", mode), 6320 DEBUG_PROBE); 6321 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_PM, 6322 mode | 0x1); 6323 mode = 6324 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM); 6325 WDCDEBUG_PRINT((", secondary mode 0x%x\n", mode ), DEBUG_PROBE); 6326 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SM, 6327 mode | 0x1); 6328 } 6329 6330 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 6331 cp = &sc->pciide_channels[channel]; 6332 if (pciide_chansetup(sc, channel, interface) == 0) 6333 continue; 6334 if (!PDC_IS_268(sc) && (st & (PDC_IS_262(sc) ? 6335 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel))) == 0) { 6336 printf("%s: %s ignored (disabled)\n", 6337 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 6338 continue; 6339 } 6340 pciide_map_compat_intr(pa, cp, channel, interface); 6341 if (cp->hw_ok == 0) 6342 continue; 6343 if (PDC_IS_265(sc)) 6344 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6345 pdc20265_pci_intr); 6346 else 6347 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 6348 pdc202xx_pci_intr); 6349 if (cp->hw_ok == 0) { 6350 pciide_unmap_compat_intr(pa, cp, channel, interface); 6351 continue; 6352 } 6353 if (!PDC_IS_268(sc) && pciide_chan_candisable(cp)) { 6354 st &= ~(PDC_IS_262(sc) ? 6355 PDC262_STATE_EN(channel):PDC246_STATE_EN(channel)); 6356 pciide_unmap_compat_intr(pa, cp, channel, interface); 6357 } 6358 if (PDC_IS_268(sc)) 6359 pdc20268_setup_channel(&cp->wdc_channel); 6360 else 6361 pdc202xx_setup_channel(&cp->wdc_channel); 6362 } 6363 if (!PDC_IS_268(sc)) { 6364 WDCDEBUG_PRINT(("pdc202xx_setup_chip: new controller state " 6365 "0x%x\n", st), DEBUG_PROBE); 6366 pci_conf_write(sc->sc_pc, sc->sc_tag, PDC2xx_STATE, st); 6367 } 6368 return; 6369 } 6370 6371 void 6372 pdc202xx_setup_channel(struct channel_softc *chp) 6373 { 6374 struct ata_drive_datas *drvp; 6375 int drive; 6376 pcireg_t mode, st; 6377 u_int32_t idedma_ctl, scr, atapi; 6378 struct pciide_channel *cp = (struct pciide_channel *)chp; 6379 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6380 int channel = chp->channel; 6381 6382 /* setup DMA if needed */ 6383 pciide_channel_dma_setup(cp); 6384 6385 idedma_ctl = 0; 6386 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s: scr 0x%x\n", 6387 sc->sc_wdcdev.sc_dev.dv_xname, 6388 bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, PDC262_U66)), 6389 DEBUG_PROBE); 6390 6391 /* Per channel settings */ 6392 if (PDC_IS_262(sc)) { 6393 scr = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6394 PDC262_U66); 6395 st = pci_conf_read(sc->sc_pc, sc->sc_tag, PDC2xx_STATE); 6396 /* Check cable */ 6397 if ((st & PDC262_STATE_80P(channel)) != 0 && 6398 ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6399 chp->ch_drive[0].UDMA_mode > 2) || 6400 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6401 chp->ch_drive[1].UDMA_mode > 2))) { 6402 WDCDEBUG_PRINT(("%s:%d: 80-wire cable not detected\n", 6403 sc->sc_wdcdev.sc_dev.dv_xname, channel), 6404 DEBUG_PROBE); 6405 if (chp->ch_drive[0].UDMA_mode > 2) 6406 chp->ch_drive[0].UDMA_mode = 2; 6407 if (chp->ch_drive[1].UDMA_mode > 2) 6408 chp->ch_drive[1].UDMA_mode = 2; 6409 } 6410 /* Trim UDMA mode */ 6411 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6412 chp->ch_drive[0].UDMA_mode <= 2) || 6413 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6414 chp->ch_drive[1].UDMA_mode <= 2)) { 6415 if (chp->ch_drive[0].UDMA_mode > 2) 6416 chp->ch_drive[0].UDMA_mode = 2; 6417 if (chp->ch_drive[1].UDMA_mode > 2) 6418 chp->ch_drive[1].UDMA_mode = 2; 6419 } 6420 /* Set U66 if needed */ 6421 if ((chp->ch_drive[0].drive_flags & DRIVE_UDMA && 6422 chp->ch_drive[0].UDMA_mode > 2) || 6423 (chp->ch_drive[1].drive_flags & DRIVE_UDMA && 6424 chp->ch_drive[1].UDMA_mode > 2)) 6425 scr |= PDC262_U66_EN(channel); 6426 else 6427 scr &= ~PDC262_U66_EN(channel); 6428 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6429 PDC262_U66, scr); 6430 WDCDEBUG_PRINT(("pdc202xx_setup_channel %s:%d: ATAPI 0x%x\n", 6431 sc->sc_wdcdev.sc_dev.dv_xname, channel, 6432 bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6433 PDC262_ATAPI(channel))), DEBUG_PROBE); 6434 if (chp->ch_drive[0].drive_flags & DRIVE_ATAPI || 6435 chp->ch_drive[1].drive_flags & DRIVE_ATAPI) { 6436 if (((chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6437 !(chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6438 (chp->ch_drive[1].drive_flags & DRIVE_DMA)) || 6439 ((chp->ch_drive[1].drive_flags & DRIVE_UDMA) && 6440 !(chp->ch_drive[0].drive_flags & DRIVE_UDMA) && 6441 (chp->ch_drive[0].drive_flags & DRIVE_DMA))) 6442 atapi = 0; 6443 else 6444 atapi = PDC262_ATAPI_UDMA; 6445 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6446 PDC262_ATAPI(channel), atapi); 6447 } 6448 } 6449 for (drive = 0; drive < 2; drive++) { 6450 drvp = &chp->ch_drive[drive]; 6451 /* If no drive, skip */ 6452 if ((drvp->drive_flags & DRIVE) == 0) 6453 continue; 6454 mode = 0; 6455 if (drvp->drive_flags & DRIVE_UDMA) { 6456 /* use Ultra/DMA */ 6457 drvp->drive_flags &= ~DRIVE_DMA; 6458 mode = PDC2xx_TIM_SET_MB(mode, 6459 pdc2xx_udma_mb[drvp->UDMA_mode]); 6460 mode = PDC2xx_TIM_SET_MC(mode, 6461 pdc2xx_udma_mc[drvp->UDMA_mode]); 6462 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6463 } else if (drvp->drive_flags & DRIVE_DMA) { 6464 mode = PDC2xx_TIM_SET_MB(mode, 6465 pdc2xx_dma_mb[drvp->DMA_mode]); 6466 mode = PDC2xx_TIM_SET_MC(mode, 6467 pdc2xx_dma_mc[drvp->DMA_mode]); 6468 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6469 } else { 6470 mode = PDC2xx_TIM_SET_MB(mode, 6471 pdc2xx_dma_mb[0]); 6472 mode = PDC2xx_TIM_SET_MC(mode, 6473 pdc2xx_dma_mc[0]); 6474 } 6475 mode = PDC2xx_TIM_SET_PA(mode, pdc2xx_pa[drvp->PIO_mode]); 6476 mode = PDC2xx_TIM_SET_PB(mode, pdc2xx_pb[drvp->PIO_mode]); 6477 if (drvp->drive_flags & DRIVE_ATA) 6478 mode |= PDC2xx_TIM_PRE; 6479 mode |= PDC2xx_TIM_SYNC | PDC2xx_TIM_ERRDY; 6480 if (drvp->PIO_mode >= 3) { 6481 mode |= PDC2xx_TIM_IORDY; 6482 if (drive == 0) 6483 mode |= PDC2xx_TIM_IORDYp; 6484 } 6485 WDCDEBUG_PRINT(("pdc202xx_setup_channel: %s:%d:%d " 6486 "timings 0x%x\n", 6487 sc->sc_wdcdev.sc_dev.dv_xname, 6488 chp->channel, drive, mode), DEBUG_PROBE); 6489 pci_conf_write(sc->sc_pc, sc->sc_tag, 6490 PDC2xx_TIM(chp->channel, drive), mode); 6491 } 6492 if (idedma_ctl != 0) { 6493 /* Add software bits in status register */ 6494 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6495 IDEDMA_CTL(channel), idedma_ctl); 6496 } 6497 pciide_print_modes(cp); 6498 } 6499 6500 void 6501 pdc20268_setup_channel(struct channel_softc *chp) 6502 { 6503 struct ata_drive_datas *drvp; 6504 int drive, cable; 6505 u_int32_t idedma_ctl; 6506 struct pciide_channel *cp = (struct pciide_channel *)chp; 6507 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6508 int channel = chp->channel; 6509 6510 /* check 80 pins cable */ 6511 cable = pdc268_config_read(chp, 0x0b) & PDC268_CABLE; 6512 6513 /* setup DMA if needed */ 6514 pciide_channel_dma_setup(cp); 6515 6516 idedma_ctl = 0; 6517 6518 for (drive = 0; drive < 2; drive++) { 6519 drvp = &chp->ch_drive[drive]; 6520 /* If no drive, skip */ 6521 if ((drvp->drive_flags & DRIVE) == 0) 6522 continue; 6523 if (drvp->drive_flags & DRIVE_UDMA) { 6524 /* use Ultra/DMA */ 6525 drvp->drive_flags &= ~DRIVE_DMA; 6526 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6527 if (cable && drvp->UDMA_mode > 2) { 6528 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 6529 "cable not detected\n", drvp->drive_name, 6530 sc->sc_wdcdev.sc_dev.dv_xname, 6531 channel, drive), DEBUG_PROBE); 6532 drvp->UDMA_mode = 2; 6533 } 6534 } else if (drvp->drive_flags & DRIVE_DMA) { 6535 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 6536 } 6537 } 6538 /* nothing to do to setup modes, the controller snoop SET_FEATURE cmd */ 6539 if (idedma_ctl != 0) { 6540 /* Add software bits in status register */ 6541 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6542 IDEDMA_CTL(channel), idedma_ctl); 6543 } 6544 pciide_print_modes(cp); 6545 } 6546 6547 int 6548 pdc202xx_pci_intr(void *arg) 6549 { 6550 struct pciide_softc *sc = arg; 6551 struct pciide_channel *cp; 6552 struct channel_softc *wdc_cp; 6553 int i, rv, crv; 6554 u_int32_t scr; 6555 6556 rv = 0; 6557 scr = bus_space_read_4(sc->sc_dma_iot, sc->sc_dma_ioh, PDC2xx_SCR); 6558 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6559 cp = &sc->pciide_channels[i]; 6560 wdc_cp = &cp->wdc_channel; 6561 /* If a compat channel skip. */ 6562 if (cp->compat) 6563 continue; 6564 if (scr & PDC2xx_SCR_INT(i)) { 6565 crv = wdcintr(wdc_cp); 6566 if (crv == 0) 6567 printf("%s:%d: bogus intr (reg 0x%x)\n", 6568 sc->sc_wdcdev.sc_dev.dv_xname, i, scr); 6569 else 6570 rv = 1; 6571 } 6572 } 6573 return (rv); 6574 } 6575 6576 int 6577 pdc20265_pci_intr(void *arg) 6578 { 6579 struct pciide_softc *sc = arg; 6580 struct pciide_channel *cp; 6581 struct channel_softc *wdc_cp; 6582 int i, rv, crv; 6583 u_int32_t dmastat; 6584 6585 rv = 0; 6586 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6587 cp = &sc->pciide_channels[i]; 6588 wdc_cp = &cp->wdc_channel; 6589 /* If a compat channel skip. */ 6590 if (cp->compat) 6591 continue; 6592 6593 /* 6594 * In case of shared IRQ check that the interrupt 6595 * was actually generated by this channel. 6596 * Only check the channel that is enabled. 6597 */ 6598 if (cp->hw_ok && PDC_IS_268(sc)) { 6599 if ((pdc268_config_read(wdc_cp, 6600 0x0b) & PDC268_INTR) == 0) 6601 continue; 6602 } 6603 6604 /* 6605 * The Ultra/100 seems to assert PDC2xx_SCR_INT * spuriously, 6606 * however it asserts INT in IDEDMA_CTL even for non-DMA ops. 6607 * So use it instead (requires 2 reg reads instead of 1, 6608 * but we can't do it another way). 6609 */ 6610 dmastat = bus_space_read_1(sc->sc_dma_iot, 6611 sc->sc_dma_ioh, IDEDMA_CTL(i)); 6612 if ((dmastat & IDEDMA_CTL_INTR) == 0) 6613 continue; 6614 6615 crv = wdcintr(wdc_cp); 6616 if (crv == 0) 6617 printf("%s:%d: bogus intr\n", 6618 sc->sc_wdcdev.sc_dev.dv_xname, i); 6619 else 6620 rv = 1; 6621 } 6622 return (rv); 6623 } 6624 6625 void 6626 pdc20262_dma_start(void *v, int channel, int drive) 6627 { 6628 struct pciide_softc *sc = v; 6629 struct pciide_dma_maps *dma_maps = 6630 &sc->pciide_channels[channel].dma_maps[drive]; 6631 u_int8_t clock; 6632 u_int32_t count; 6633 6634 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6635 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6636 PDC262_U66); 6637 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6638 PDC262_U66, clock | PDC262_U66_EN(channel)); 6639 count = dma_maps->dmamap_xfer->dm_mapsize >> 1; 6640 count |= dma_maps->dma_flags & WDC_DMA_READ ? 6641 PDC262_ATAPI_LBA48_READ : PDC262_ATAPI_LBA48_WRITE; 6642 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6643 PDC262_ATAPI(channel), count); 6644 } 6645 6646 pciide_dma_start(v, channel, drive); 6647 } 6648 6649 int 6650 pdc20262_dma_finish(void *v, int channel, int drive, int force) 6651 { 6652 struct pciide_softc *sc = v; 6653 struct pciide_dma_maps *dma_maps = 6654 &sc->pciide_channels[channel].dma_maps[drive]; 6655 u_int8_t clock; 6656 6657 if (dma_maps->dma_flags & WDC_DMA_LBA48) { 6658 clock = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6659 PDC262_U66); 6660 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 6661 PDC262_U66, clock & ~PDC262_U66_EN(channel)); 6662 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 6663 PDC262_ATAPI(channel), 0); 6664 } 6665 6666 return (pciide_dma_finish(v, channel, drive, force)); 6667 } 6668 6669 void 6670 pdcsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 6671 { 6672 struct pciide_channel *cp; 6673 struct channel_softc *wdc_cp; 6674 struct pciide_pdcsata *ps; 6675 int channel, i; 6676 bus_size_t dmasize; 6677 pci_intr_handle_t intrhandle; 6678 const char *intrstr; 6679 6680 /* Allocate memory for private data */ 6681 sc->sc_cookie = malloc(sizeof(*ps), M_DEVBUF, M_NOWAIT | M_ZERO); 6682 ps = sc->sc_cookie; 6683 6684 /* 6685 * Promise SATA controllers have 3 or 4 channels, 6686 * the usual IDE registers are mapped in I/O space, with offsets. 6687 */ 6688 if (pci_intr_map(pa, &intrhandle) != 0) { 6689 printf(": couldn't map interrupt\n"); 6690 return; 6691 } 6692 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 6693 6694 switch (sc->sc_pp->ide_product) { 6695 case PCI_PRODUCT_PROMISE_PDC20318: 6696 case PCI_PRODUCT_PROMISE_PDC20319: 6697 case PCI_PRODUCT_PROMISE_PDC20371: 6698 case PCI_PRODUCT_PROMISE_PDC20375: 6699 case PCI_PRODUCT_PROMISE_PDC20376: 6700 case PCI_PRODUCT_PROMISE_PDC20377: 6701 case PCI_PRODUCT_PROMISE_PDC20378: 6702 case PCI_PRODUCT_PROMISE_PDC20379: 6703 default: 6704 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6705 intrhandle, IPL_BIO, pdc203xx_pci_intr, sc, 6706 sc->sc_wdcdev.sc_dev.dv_xname); 6707 break; 6708 6709 case PCI_PRODUCT_PROMISE_PDC40518: 6710 case PCI_PRODUCT_PROMISE_PDC40519: 6711 case PCI_PRODUCT_PROMISE_PDC40718: 6712 case PCI_PRODUCT_PROMISE_PDC40719: 6713 case PCI_PRODUCT_PROMISE_PDC40779: 6714 case PCI_PRODUCT_PROMISE_PDC20571: 6715 case PCI_PRODUCT_PROMISE_PDC20575: 6716 case PCI_PRODUCT_PROMISE_PDC20579: 6717 case PCI_PRODUCT_PROMISE_PDC20771: 6718 case PCI_PRODUCT_PROMISE_PDC20775: 6719 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, 6720 intrhandle, IPL_BIO, pdc205xx_pci_intr, sc, 6721 sc->sc_wdcdev.sc_dev.dv_xname); 6722 break; 6723 } 6724 6725 if (sc->sc_pci_ih == NULL) { 6726 printf(": couldn't establish native-PCI interrupt"); 6727 if (intrstr != NULL) 6728 printf(" at %s", intrstr); 6729 printf("\n"); 6730 return; 6731 } 6732 6733 sc->sc_dma_ok = (pci_mapreg_map(pa, PCIIDE_REG_BUS_MASTER_DMA, 6734 PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_dma_iot, 6735 &sc->sc_dma_ioh, NULL, &dmasize, 0) == 0); 6736 if (!sc->sc_dma_ok) { 6737 printf(": couldn't map bus-master DMA registers\n"); 6738 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6739 return; 6740 } 6741 6742 sc->sc_dmat = pa->pa_dmat; 6743 6744 if (pci_mapreg_map(pa, PDC203xx_BAR_IDEREGS, 6745 PCI_MAPREG_MEM_TYPE_32BIT, 0, &ps->ba5_st, 6746 &ps->ba5_sh, NULL, NULL, 0) != 0) { 6747 printf(": couldn't map IDE registers\n"); 6748 bus_space_unmap(sc->sc_dma_iot, sc->sc_dma_ioh, dmasize); 6749 pci_intr_disestablish(pa->pa_pc, sc->sc_pci_ih); 6750 return; 6751 } 6752 6753 printf(": DMA\n"); 6754 6755 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16; 6756 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 6757 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 6758 sc->sc_wdcdev.irqack = pdc203xx_irqack; 6759 sc->sc_wdcdev.PIO_cap = 4; 6760 sc->sc_wdcdev.DMA_cap = 2; 6761 sc->sc_wdcdev.UDMA_cap = 6; 6762 sc->sc_wdcdev.set_modes = pdc203xx_setup_channel; 6763 sc->sc_wdcdev.channels = sc->wdc_chanarray; 6764 6765 switch (sc->sc_pp->ide_product) { 6766 case PCI_PRODUCT_PROMISE_PDC20318: 6767 case PCI_PRODUCT_PROMISE_PDC20319: 6768 case PCI_PRODUCT_PROMISE_PDC20371: 6769 case PCI_PRODUCT_PROMISE_PDC20375: 6770 case PCI_PRODUCT_PROMISE_PDC20376: 6771 case PCI_PRODUCT_PROMISE_PDC20377: 6772 case PCI_PRODUCT_PROMISE_PDC20378: 6773 case PCI_PRODUCT_PROMISE_PDC20379: 6774 default: 6775 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x06c, 0x00ff0033); 6776 sc->sc_wdcdev.nchannels = 6777 (bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x48) & 0x02) ? 6778 PDC203xx_NCHANNELS : 3; 6779 break; 6780 6781 case PCI_PRODUCT_PROMISE_PDC40518: 6782 case PCI_PRODUCT_PROMISE_PDC40519: 6783 case PCI_PRODUCT_PROMISE_PDC40718: 6784 case PCI_PRODUCT_PROMISE_PDC40719: 6785 case PCI_PRODUCT_PROMISE_PDC40779: 6786 case PCI_PRODUCT_PROMISE_PDC20571: 6787 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6788 sc->sc_wdcdev.nchannels = PDC40718_NCHANNELS; 6789 6790 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6791 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6792 6793 break; 6794 case PCI_PRODUCT_PROMISE_PDC20575: 6795 case PCI_PRODUCT_PROMISE_PDC20579: 6796 case PCI_PRODUCT_PROMISE_PDC20771: 6797 case PCI_PRODUCT_PROMISE_PDC20775: 6798 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, 0x00ff00ff); 6799 sc->sc_wdcdev.nchannels = PDC20575_NCHANNELS; 6800 6801 sc->sc_wdcdev.reset = pdc205xx_do_reset; 6802 sc->sc_wdcdev.drv_probe = pdc205xx_drv_probe; 6803 6804 break; 6805 } 6806 6807 sc->sc_wdcdev.dma_arg = sc; 6808 sc->sc_wdcdev.dma_init = pciide_dma_init; 6809 sc->sc_wdcdev.dma_start = pdc203xx_dma_start; 6810 sc->sc_wdcdev.dma_finish = pdc203xx_dma_finish; 6811 6812 for (channel = 0; channel < sc->sc_wdcdev.nchannels; 6813 channel++) { 6814 cp = &sc->pciide_channels[channel]; 6815 sc->wdc_chanarray[channel] = &cp->wdc_channel; 6816 6817 cp->ih = sc->sc_pci_ih; 6818 cp->name = NULL; 6819 cp->wdc_channel.channel = channel; 6820 cp->wdc_channel.wdc = &sc->sc_wdcdev; 6821 cp->wdc_channel.ch_queue = wdc_alloc_queue(); 6822 if (cp->wdc_channel.ch_queue == NULL) { 6823 printf("%s: channel %d: " 6824 "cannot allocate channel queue\n", 6825 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6826 continue; 6827 } 6828 wdc_cp = &cp->wdc_channel; 6829 6830 ps->regs[channel].ctl_iot = ps->ba5_st; 6831 ps->regs[channel].cmd_iot = ps->ba5_st; 6832 6833 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6834 0x0238 + (channel << 7), 1, 6835 &ps->regs[channel].ctl_ioh) != 0) { 6836 printf("%s: couldn't map channel %d ctl regs\n", 6837 sc->sc_wdcdev.sc_dev.dv_xname, 6838 channel); 6839 continue; 6840 } 6841 for (i = 0; i < WDC_NREG; i++) { 6842 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6843 0x0200 + (i << 2) + (channel << 7), i == 0 ? 4 : 1, 6844 &ps->regs[channel].cmd_iohs[i]) != 0) { 6845 printf("%s: couldn't map channel %d cmd " 6846 "regs\n", 6847 sc->sc_wdcdev.sc_dev.dv_xname, 6848 channel); 6849 continue; 6850 } 6851 } 6852 ps->regs[channel].cmd_iohs[wdr_status & _WDC_REGMASK] = 6853 ps->regs[channel].cmd_iohs[wdr_command & _WDC_REGMASK]; 6854 ps->regs[channel].cmd_iohs[wdr_features & _WDC_REGMASK] = 6855 ps->regs[channel].cmd_iohs[wdr_error & _WDC_REGMASK]; 6856 wdc_cp->data32iot = wdc_cp->cmd_iot = 6857 ps->regs[channel].cmd_iot; 6858 wdc_cp->data32ioh = wdc_cp->cmd_ioh = 6859 ps->regs[channel].cmd_iohs[0]; 6860 wdc_cp->_vtbl = &wdc_pdc203xx_vtbl; 6861 6862 /* 6863 * Subregion de busmaster registers. They're spread all over 6864 * the controller's register space :(. They are also 4 bytes 6865 * sized, with some specific extentions in the extra bits. 6866 * It also seems that the IDEDMA_CTL register isn't available. 6867 */ 6868 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6869 0x260 + (channel << 7), 1, 6870 &ps->regs[channel].dma_iohs[IDEDMA_CMD(0)]) != 0) { 6871 printf("%s channel %d: can't subregion DMA " 6872 "registers\n", 6873 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6874 continue; 6875 } 6876 if (bus_space_subregion(ps->ba5_st, ps->ba5_sh, 6877 0x244 + (channel << 7), 4, 6878 &ps->regs[channel].dma_iohs[IDEDMA_TBL(0)]) != 0) { 6879 printf("%s channel %d: can't subregion DMA " 6880 "registers\n", 6881 sc->sc_wdcdev.sc_dev.dv_xname, channel); 6882 continue; 6883 } 6884 6885 wdcattach(wdc_cp); 6886 bus_space_write_4(sc->sc_dma_iot, 6887 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 6888 (bus_space_read_4(sc->sc_dma_iot, 6889 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 6890 0) & ~0x00003f9f) | (channel + 1)); 6891 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 6892 (channel + 1) << 2, 0x00000001); 6893 6894 pdc203xx_setup_channel(&cp->wdc_channel); 6895 } 6896 6897 printf("%s: using %s for native-PCI interrupt\n", 6898 sc->sc_wdcdev.sc_dev.dv_xname, 6899 intrstr ? intrstr : "unknown interrupt"); 6900 } 6901 6902 void 6903 pdc203xx_setup_channel(struct channel_softc *chp) 6904 { 6905 struct ata_drive_datas *drvp; 6906 struct pciide_channel *cp = (struct pciide_channel *)chp; 6907 int drive, s; 6908 6909 pciide_channel_dma_setup(cp); 6910 6911 for (drive = 0; drive < 2; drive++) { 6912 drvp = &chp->ch_drive[drive]; 6913 if ((drvp->drive_flags & DRIVE) == 0) 6914 continue; 6915 if (drvp->drive_flags & DRIVE_UDMA) { 6916 s = splbio(); 6917 drvp->drive_flags &= ~DRIVE_DMA; 6918 splx(s); 6919 } 6920 } 6921 pciide_print_modes(cp); 6922 } 6923 6924 int 6925 pdc203xx_pci_intr(void *arg) 6926 { 6927 struct pciide_softc *sc = arg; 6928 struct pciide_channel *cp; 6929 struct channel_softc *wdc_cp; 6930 struct pciide_pdcsata *ps = sc->sc_cookie; 6931 int i, rv, crv; 6932 u_int32_t scr; 6933 6934 rv = 0; 6935 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x00040); 6936 6937 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6938 cp = &sc->pciide_channels[i]; 6939 wdc_cp = &cp->wdc_channel; 6940 if (scr & (1 << (i + 1))) { 6941 crv = wdcintr(wdc_cp); 6942 if (crv == 0) { 6943 printf("%s:%d: bogus intr (reg 0x%x)\n", 6944 sc->sc_wdcdev.sc_dev.dv_xname, 6945 i, scr); 6946 } else 6947 rv = 1; 6948 } 6949 } 6950 6951 return (rv); 6952 } 6953 6954 int 6955 pdc205xx_pci_intr(void *arg) 6956 { 6957 struct pciide_softc *sc = arg; 6958 struct pciide_channel *cp; 6959 struct channel_softc *wdc_cp; 6960 struct pciide_pdcsata *ps = sc->sc_cookie; 6961 int i, rv, crv; 6962 u_int32_t scr, status; 6963 6964 rv = 0; 6965 scr = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x40); 6966 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x40, scr & 0x0000ffff); 6967 6968 status = bus_space_read_4(ps->ba5_st, ps->ba5_sh, 0x60); 6969 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 0x60, status & 0x000000ff); 6970 6971 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 6972 cp = &sc->pciide_channels[i]; 6973 wdc_cp = &cp->wdc_channel; 6974 if (scr & (1 << (i + 1))) { 6975 crv = wdcintr(wdc_cp); 6976 if (crv == 0) { 6977 printf("%s:%d: bogus intr (reg 0x%x)\n", 6978 sc->sc_wdcdev.sc_dev.dv_xname, 6979 i, scr); 6980 } else 6981 rv = 1; 6982 } 6983 } 6984 return rv; 6985 } 6986 6987 void 6988 pdc203xx_irqack(struct channel_softc *chp) 6989 { 6990 struct pciide_channel *cp = (struct pciide_channel *)chp; 6991 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 6992 struct pciide_pdcsata *ps = sc->sc_cookie; 6993 int chan = chp->channel; 6994 6995 bus_space_write_4(sc->sc_dma_iot, 6996 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 0, 6997 (bus_space_read_4(sc->sc_dma_iot, 6998 ps->regs[chan].dma_iohs[IDEDMA_CMD(0)], 6999 0) & ~0x00003f9f) | (chan + 1)); 7000 bus_space_write_4(ps->ba5_st, ps->ba5_sh, 7001 (chan + 1) << 2, 0x00000001); 7002 } 7003 7004 void 7005 pdc203xx_dma_start(void *v, int channel, int drive) 7006 { 7007 struct pciide_softc *sc = v; 7008 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7009 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7010 struct pciide_pdcsata *ps = sc->sc_cookie; 7011 7012 /* Write table address */ 7013 bus_space_write_4(sc->sc_dma_iot, 7014 ps->regs[channel].dma_iohs[IDEDMA_TBL(0)], 0, 7015 dma_maps->dmamap_table->dm_segs[0].ds_addr); 7016 7017 /* Start DMA engine */ 7018 bus_space_write_4(sc->sc_dma_iot, 7019 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7020 (bus_space_read_4(sc->sc_dma_iot, 7021 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7022 0) & ~0xc0) | ((dma_maps->dma_flags & WDC_DMA_READ) ? 0x80 : 0xc0)); 7023 } 7024 7025 int 7026 pdc203xx_dma_finish(void *v, int channel, int drive, int force) 7027 { 7028 struct pciide_softc *sc = v; 7029 struct pciide_channel *cp = &sc->pciide_channels[channel]; 7030 struct pciide_dma_maps *dma_maps = &cp->dma_maps[drive]; 7031 struct pciide_pdcsata *ps = sc->sc_cookie; 7032 7033 /* Stop DMA channel */ 7034 bus_space_write_4(sc->sc_dma_iot, 7035 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 0, 7036 (bus_space_read_4(sc->sc_dma_iot, 7037 ps->regs[channel].dma_iohs[IDEDMA_CMD(0)], 7038 0) & ~0x80)); 7039 7040 /* Unload the map of the data buffer */ 7041 bus_dmamap_sync(sc->sc_dmat, dma_maps->dmamap_xfer, 0, 7042 dma_maps->dmamap_xfer->dm_mapsize, 7043 (dma_maps->dma_flags & WDC_DMA_READ) ? 7044 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 7045 bus_dmamap_unload(sc->sc_dmat, dma_maps->dmamap_xfer); 7046 7047 return (0); 7048 } 7049 7050 u_int8_t 7051 pdc203xx_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7052 { 7053 struct pciide_channel *cp = (struct pciide_channel *)chp; 7054 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7055 struct pciide_pdcsata *ps = sc->sc_cookie; 7056 u_int8_t val; 7057 7058 if (reg & _WDC_AUX) { 7059 return (bus_space_read_1(ps->regs[chp->channel].ctl_iot, 7060 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK)); 7061 } else { 7062 val = bus_space_read_1(ps->regs[chp->channel].cmd_iot, 7063 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 0); 7064 return (val); 7065 } 7066 } 7067 7068 void 7069 pdc203xx_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7070 { 7071 struct pciide_channel *cp = (struct pciide_channel *)chp; 7072 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7073 struct pciide_pdcsata *ps = sc->sc_cookie; 7074 7075 if (reg & _WDC_AUX) 7076 bus_space_write_1(ps->regs[chp->channel].ctl_iot, 7077 ps->regs[chp->channel].ctl_ioh, reg & _WDC_REGMASK, val); 7078 else 7079 bus_space_write_1(ps->regs[chp->channel].cmd_iot, 7080 ps->regs[chp->channel].cmd_iohs[reg & _WDC_REGMASK], 7081 0, val); 7082 } 7083 7084 void 7085 pdc205xx_do_reset(struct channel_softc *chp) 7086 { 7087 struct pciide_channel *cp = (struct pciide_channel *)chp; 7088 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7089 struct pciide_pdcsata *ps = sc->sc_cookie; 7090 u_int32_t scontrol; 7091 7092 wdc_do_reset(chp); 7093 7094 /* reset SATA */ 7095 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7096 SCONTROL_WRITE(ps, chp->channel, scontrol); 7097 delay(50*1000); 7098 7099 scontrol &= ~SControl_DET_INIT; 7100 SCONTROL_WRITE(ps, chp->channel, scontrol); 7101 delay(50*1000); 7102 } 7103 7104 void 7105 pdc205xx_drv_probe(struct channel_softc *chp) 7106 { 7107 struct pciide_channel *cp = (struct pciide_channel *)chp; 7108 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7109 struct pciide_pdcsata *ps = sc->sc_cookie; 7110 bus_space_handle_t *iohs; 7111 u_int32_t scontrol, sstatus; 7112 u_int16_t scnt, sn, cl, ch; 7113 int s; 7114 7115 SCONTROL_WRITE(ps, chp->channel, 0); 7116 delay(50*1000); 7117 7118 scontrol = SControl_DET_INIT | SControl_SPD_ANY | SControl_IPM_NONE; 7119 SCONTROL_WRITE(ps,chp->channel,scontrol); 7120 delay(50*1000); 7121 7122 scontrol &= ~SControl_DET_INIT; 7123 SCONTROL_WRITE(ps,chp->channel,scontrol); 7124 delay(50*1000); 7125 7126 sstatus = SSTATUS_READ(ps,chp->channel); 7127 7128 switch (sstatus & SStatus_DET_mask) { 7129 case SStatus_DET_NODEV: 7130 /* No Device; be silent. */ 7131 break; 7132 7133 case SStatus_DET_DEV_NE: 7134 printf("%s: port %d: device connected, but " 7135 "communication not established\n", 7136 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7137 break; 7138 7139 case SStatus_DET_OFFLINE: 7140 printf("%s: port %d: PHY offline\n", 7141 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7142 break; 7143 7144 case SStatus_DET_DEV: 7145 iohs = ps->regs[chp->channel].cmd_iohs; 7146 bus_space_write_1(chp->cmd_iot, iohs[wdr_sdh], 0, 7147 WDSD_IBM); 7148 delay(10); /* 400ns delay */ 7149 scnt = bus_space_read_2(chp->cmd_iot, iohs[wdr_seccnt], 0); 7150 sn = bus_space_read_2(chp->cmd_iot, iohs[wdr_sector], 0); 7151 cl = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_lo], 0); 7152 ch = bus_space_read_2(chp->cmd_iot, iohs[wdr_cyl_hi], 0); 7153 #if 0 7154 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7155 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7156 scnt, sn, cl, ch); 7157 #endif 7158 /* 7159 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7160 * cases we get wrong values here, so ignore it. 7161 */ 7162 s = splbio(); 7163 if (cl == 0x14 && ch == 0xeb) 7164 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7165 else 7166 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7167 splx(s); 7168 #if 0 7169 printf("%s: port %d: device present", 7170 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7171 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7172 case 1: 7173 printf(", speed: 1.5Gb/s"); 7174 break; 7175 case 2: 7176 printf(", speed: 3.0Gb/s"); 7177 break; 7178 } 7179 printf("\n"); 7180 #endif 7181 break; 7182 7183 default: 7184 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7185 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7186 } 7187 } 7188 7189 #ifdef notyet 7190 /* 7191 * Inline functions for accessing the timing registers of the 7192 * OPTi controller. 7193 * 7194 * These *MUST* disable interrupts as they need atomic access to 7195 * certain magic registers. Failure to adhere to this *will* 7196 * break things in subtle ways if the wdc registers are accessed 7197 * by an interrupt routine while this magic sequence is executing. 7198 */ 7199 static __inline__ u_int8_t 7200 opti_read_config(struct channel_softc *chp, int reg) 7201 { 7202 u_int8_t rv; 7203 int s = splhigh(); 7204 7205 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7206 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7207 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7208 7209 /* Followed by an 8-bit write of 0x3 to register #2 */ 7210 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7211 7212 /* Now we can read the required register */ 7213 rv = bus_space_read_1(chp->cmd_iot, chp->cmd_ioh, reg); 7214 7215 /* Restore the real registers */ 7216 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7217 7218 splx(s); 7219 7220 return (rv); 7221 } 7222 7223 static __inline__ void 7224 opti_write_config(struct channel_softc *chp, int reg, u_int8_t val) 7225 { 7226 int s = splhigh(); 7227 7228 /* Two consecutive 16-bit reads from register #1 (0x1f1/0x171) */ 7229 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7230 (void) bus_space_read_2(chp->cmd_iot, chp->cmd_ioh, wdr_features); 7231 7232 /* Followed by an 8-bit write of 0x3 to register #2 */ 7233 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x03u); 7234 7235 /* Now we can write the required register */ 7236 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, reg, val); 7237 7238 /* Restore the real registers */ 7239 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, wdr_seccnt, 0x83u); 7240 7241 splx(s); 7242 } 7243 7244 void 7245 opti_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7246 { 7247 struct pciide_channel *cp; 7248 bus_size_t cmdsize, ctlsize; 7249 pcireg_t interface; 7250 u_int8_t init_ctrl; 7251 int channel; 7252 7253 printf(": DMA"); 7254 /* 7255 * XXXSCW: 7256 * There seem to be a couple of buggy revisions/implementations 7257 * of the OPTi pciide chipset. This kludge seems to fix one of 7258 * the reported problems (NetBSD PR/11644) but still fails for the 7259 * other (NetBSD PR/13151), although the latter may be due to other 7260 * issues too... 7261 */ 7262 if (sc->sc_rev <= 0x12) { 7263 printf(" (disabled)"); 7264 sc->sc_dma_ok = 0; 7265 sc->sc_wdcdev.cap = 0; 7266 } else { 7267 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA32; 7268 pciide_mapreg_dma(sc, pa); 7269 } 7270 7271 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_MODE; 7272 sc->sc_wdcdev.PIO_cap = 4; 7273 if (sc->sc_dma_ok) { 7274 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7275 sc->sc_wdcdev.irqack = pciide_irqack; 7276 sc->sc_wdcdev.DMA_cap = 2; 7277 } 7278 sc->sc_wdcdev.set_modes = opti_setup_channel; 7279 7280 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7281 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 7282 7283 init_ctrl = pciide_pci_read(sc->sc_pc, sc->sc_tag, 7284 OPTI_REG_INIT_CONTROL); 7285 7286 interface = PCI_INTERFACE(pa->pa_class); 7287 7288 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 7289 7290 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7291 cp = &sc->pciide_channels[channel]; 7292 if (pciide_chansetup(sc, channel, interface) == 0) 7293 continue; 7294 if (channel == 1 && 7295 (init_ctrl & OPTI_INIT_CONTROL_CH2_DISABLE) != 0) { 7296 printf("%s: %s ignored (disabled)\n", 7297 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7298 continue; 7299 } 7300 pciide_map_compat_intr(pa, cp, channel, interface); 7301 if (cp->hw_ok == 0) 7302 continue; 7303 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7304 pciide_pci_intr); 7305 if (cp->hw_ok == 0) { 7306 pciide_unmap_compat_intr(pa, cp, channel, interface); 7307 continue; 7308 } 7309 opti_setup_channel(&cp->wdc_channel); 7310 } 7311 } 7312 7313 void 7314 opti_setup_channel(struct channel_softc *chp) 7315 { 7316 struct ata_drive_datas *drvp; 7317 struct pciide_channel *cp = (struct pciide_channel *)chp; 7318 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7319 int drive, spd; 7320 int mode[2]; 7321 u_int8_t rv, mr; 7322 7323 /* 7324 * The `Delay' and `Address Setup Time' fields of the 7325 * Miscellaneous Register are always zero initially. 7326 */ 7327 mr = opti_read_config(chp, OPTI_REG_MISC) & ~OPTI_MISC_INDEX_MASK; 7328 mr &= ~(OPTI_MISC_DELAY_MASK | 7329 OPTI_MISC_ADDR_SETUP_MASK | 7330 OPTI_MISC_INDEX_MASK); 7331 7332 /* Prime the control register before setting timing values */ 7333 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_DISABLE); 7334 7335 /* Determine the clockrate of the PCIbus the chip is attached to */ 7336 spd = (int) opti_read_config(chp, OPTI_REG_STRAP); 7337 spd &= OPTI_STRAP_PCI_SPEED_MASK; 7338 7339 /* setup DMA if needed */ 7340 pciide_channel_dma_setup(cp); 7341 7342 for (drive = 0; drive < 2; drive++) { 7343 drvp = &chp->ch_drive[drive]; 7344 /* If no drive, skip */ 7345 if ((drvp->drive_flags & DRIVE) == 0) { 7346 mode[drive] = -1; 7347 continue; 7348 } 7349 7350 if ((drvp->drive_flags & DRIVE_DMA)) { 7351 /* 7352 * Timings will be used for both PIO and DMA, 7353 * so adjust DMA mode if needed 7354 */ 7355 if (drvp->PIO_mode > (drvp->DMA_mode + 2)) 7356 drvp->PIO_mode = drvp->DMA_mode + 2; 7357 if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) 7358 drvp->DMA_mode = (drvp->PIO_mode > 2) ? 7359 drvp->PIO_mode - 2 : 0; 7360 if (drvp->DMA_mode == 0) 7361 drvp->PIO_mode = 0; 7362 7363 mode[drive] = drvp->DMA_mode + 5; 7364 } else 7365 mode[drive] = drvp->PIO_mode; 7366 7367 if (drive && mode[0] >= 0 && 7368 (opti_tim_as[spd][mode[0]] != opti_tim_as[spd][mode[1]])) { 7369 /* 7370 * Can't have two drives using different values 7371 * for `Address Setup Time'. 7372 * Slow down the faster drive to compensate. 7373 */ 7374 int d = (opti_tim_as[spd][mode[0]] > 7375 opti_tim_as[spd][mode[1]]) ? 0 : 1; 7376 7377 mode[d] = mode[1-d]; 7378 chp->ch_drive[d].PIO_mode = chp->ch_drive[1-d].PIO_mode; 7379 chp->ch_drive[d].DMA_mode = 0; 7380 chp->ch_drive[d].drive_flags &= DRIVE_DMA; 7381 } 7382 } 7383 7384 for (drive = 0; drive < 2; drive++) { 7385 int m; 7386 if ((m = mode[drive]) < 0) 7387 continue; 7388 7389 /* Set the Address Setup Time and select appropriate index */ 7390 rv = opti_tim_as[spd][m] << OPTI_MISC_ADDR_SETUP_SHIFT; 7391 rv |= OPTI_MISC_INDEX(drive); 7392 opti_write_config(chp, OPTI_REG_MISC, mr | rv); 7393 7394 /* Set the pulse width and recovery timing parameters */ 7395 rv = opti_tim_cp[spd][m] << OPTI_PULSE_WIDTH_SHIFT; 7396 rv |= opti_tim_rt[spd][m] << OPTI_RECOVERY_TIME_SHIFT; 7397 opti_write_config(chp, OPTI_REG_READ_CYCLE_TIMING, rv); 7398 opti_write_config(chp, OPTI_REG_WRITE_CYCLE_TIMING, rv); 7399 7400 /* Set the Enhanced Mode register appropriately */ 7401 rv = pciide_pci_read(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE); 7402 rv &= ~OPTI_ENH_MODE_MASK(chp->channel, drive); 7403 rv |= OPTI_ENH_MODE(chp->channel, drive, opti_tim_em[m]); 7404 pciide_pci_write(sc->sc_pc, sc->sc_tag, OPTI_REG_ENH_MODE, rv); 7405 } 7406 7407 /* Finally, enable the timings */ 7408 opti_write_config(chp, OPTI_REG_CONTROL, OPTI_CONTROL_ENABLE); 7409 7410 pciide_print_modes(cp); 7411 } 7412 #endif 7413 7414 void 7415 serverworks_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7416 { 7417 struct pciide_channel *cp; 7418 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 7419 pcitag_t pcib_tag; 7420 int channel; 7421 bus_size_t cmdsize, ctlsize; 7422 7423 printf(": DMA"); 7424 pciide_mapreg_dma(sc, pa); 7425 printf("\n"); 7426 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7427 WDC_CAPABILITY_MODE; 7428 7429 if (sc->sc_dma_ok) { 7430 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7431 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7432 sc->sc_wdcdev.irqack = pciide_irqack; 7433 } 7434 sc->sc_wdcdev.PIO_cap = 4; 7435 sc->sc_wdcdev.DMA_cap = 2; 7436 switch (sc->sc_pp->ide_product) { 7437 case PCI_PRODUCT_RCC_OSB4_IDE: 7438 sc->sc_wdcdev.UDMA_cap = 2; 7439 break; 7440 case PCI_PRODUCT_RCC_CSB5_IDE: 7441 if (sc->sc_rev < 0x92) 7442 sc->sc_wdcdev.UDMA_cap = 4; 7443 else 7444 sc->sc_wdcdev.UDMA_cap = 5; 7445 break; 7446 case PCI_PRODUCT_RCC_CSB6_IDE: 7447 sc->sc_wdcdev.UDMA_cap = 4; 7448 break; 7449 case PCI_PRODUCT_RCC_CSB6_RAID_IDE: 7450 case PCI_PRODUCT_RCC_HT_1000_IDE: 7451 sc->sc_wdcdev.UDMA_cap = 5; 7452 break; 7453 } 7454 7455 sc->sc_wdcdev.set_modes = serverworks_setup_channel; 7456 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7457 sc->sc_wdcdev.nchannels = 7458 (sc->sc_pp->ide_product == PCI_PRODUCT_RCC_CSB6_IDE ? 1 : 2); 7459 7460 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7461 cp = &sc->pciide_channels[channel]; 7462 if (pciide_chansetup(sc, channel, interface) == 0) 7463 continue; 7464 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 7465 serverworks_pci_intr); 7466 if (cp->hw_ok == 0) 7467 return; 7468 pciide_map_compat_intr(pa, cp, channel, interface); 7469 if (cp->hw_ok == 0) 7470 return; 7471 serverworks_setup_channel(&cp->wdc_channel); 7472 } 7473 7474 pcib_tag = pci_make_tag(pa->pa_pc, pa->pa_bus, pa->pa_device, 0); 7475 pci_conf_write(pa->pa_pc, pcib_tag, 0x64, 7476 (pci_conf_read(pa->pa_pc, pcib_tag, 0x64) & ~0x2000) | 0x4000); 7477 } 7478 7479 void 7480 serverworks_setup_channel(struct channel_softc *chp) 7481 { 7482 struct ata_drive_datas *drvp; 7483 struct pciide_channel *cp = (struct pciide_channel *)chp; 7484 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7485 int channel = chp->channel; 7486 int drive, unit; 7487 u_int32_t pio_time, dma_time, pio_mode, udma_mode; 7488 u_int32_t idedma_ctl; 7489 static const u_int8_t pio_modes[5] = {0x5d, 0x47, 0x34, 0x22, 0x20}; 7490 static const u_int8_t dma_modes[3] = {0x77, 0x21, 0x20}; 7491 7492 /* setup DMA if needed */ 7493 pciide_channel_dma_setup(cp); 7494 7495 pio_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x40); 7496 dma_time = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x44); 7497 pio_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x48); 7498 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, 0x54); 7499 7500 pio_time &= ~(0xffff << (16 * channel)); 7501 dma_time &= ~(0xffff << (16 * channel)); 7502 pio_mode &= ~(0xff << (8 * channel + 16)); 7503 udma_mode &= ~(0xff << (8 * channel + 16)); 7504 udma_mode &= ~(3 << (2 * channel)); 7505 7506 idedma_ctl = 0; 7507 7508 /* Per drive settings */ 7509 for (drive = 0; drive < 2; drive++) { 7510 drvp = &chp->ch_drive[drive]; 7511 /* If no drive, skip */ 7512 if ((drvp->drive_flags & DRIVE) == 0) 7513 continue; 7514 unit = drive + 2 * channel; 7515 /* add timing values, setup DMA if needed */ 7516 pio_time |= pio_modes[drvp->PIO_mode] << (8 * (unit^1)); 7517 pio_mode |= drvp->PIO_mode << (4 * unit + 16); 7518 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 7519 (drvp->drive_flags & DRIVE_UDMA)) { 7520 /* use Ultra/DMA, check for 80-pin cable */ 7521 if (sc->sc_rev <= 0x92 && drvp->UDMA_mode > 2 && 7522 (PCI_PRODUCT(pci_conf_read(sc->sc_pc, sc->sc_tag, 7523 PCI_SUBSYS_ID_REG)) & 7524 (1 << (14 + channel))) == 0) { 7525 WDCDEBUG_PRINT(("%s(%s:%d:%d): 80-wire " 7526 "cable not detected\n", drvp->drive_name, 7527 sc->sc_wdcdev.sc_dev.dv_xname, 7528 channel, drive), DEBUG_PROBE); 7529 drvp->UDMA_mode = 2; 7530 } 7531 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7532 udma_mode |= drvp->UDMA_mode << (4 * unit + 16); 7533 udma_mode |= 1 << unit; 7534 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7535 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 7536 (drvp->drive_flags & DRIVE_DMA)) { 7537 /* use Multiword DMA */ 7538 drvp->drive_flags &= ~DRIVE_UDMA; 7539 dma_time |= dma_modes[drvp->DMA_mode] << (8 * (unit^1)); 7540 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 7541 } else { 7542 /* PIO only */ 7543 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 7544 } 7545 } 7546 7547 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x40, pio_time); 7548 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x44, dma_time); 7549 if (sc->sc_pp->ide_product != PCI_PRODUCT_RCC_OSB4_IDE) 7550 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x48, pio_mode); 7551 pci_conf_write(sc->sc_pc, sc->sc_tag, 0x54, udma_mode); 7552 7553 if (idedma_ctl != 0) { 7554 /* Add software bits in status register */ 7555 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7556 IDEDMA_CTL(channel), idedma_ctl); 7557 } 7558 pciide_print_modes(cp); 7559 } 7560 7561 int 7562 serverworks_pci_intr(void *arg) 7563 { 7564 struct pciide_softc *sc = arg; 7565 struct pciide_channel *cp; 7566 struct channel_softc *wdc_cp; 7567 int rv = 0; 7568 int dmastat, i, crv; 7569 7570 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7571 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7572 IDEDMA_CTL(i)); 7573 if ((dmastat & (IDEDMA_CTL_ACT | IDEDMA_CTL_INTR)) != 7574 IDEDMA_CTL_INTR) 7575 continue; 7576 cp = &sc->pciide_channels[i]; 7577 wdc_cp = &cp->wdc_channel; 7578 crv = wdcintr(wdc_cp); 7579 if (crv == 0) { 7580 printf("%s:%d: bogus intr\n", 7581 sc->sc_wdcdev.sc_dev.dv_xname, i); 7582 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7583 IDEDMA_CTL(i), dmastat); 7584 } else 7585 rv = 1; 7586 } 7587 return (rv); 7588 } 7589 7590 void 7591 svwsata_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7592 { 7593 struct pciide_channel *cp; 7594 pci_intr_handle_t intrhandle; 7595 const char *intrstr; 7596 int channel; 7597 struct pciide_svwsata *ss; 7598 7599 /* Allocate memory for private data */ 7600 sc->sc_cookie = malloc(sizeof(*ss), M_DEVBUF, M_NOWAIT | M_ZERO); 7601 ss = sc->sc_cookie; 7602 7603 /* The 4-port version has a dummy second function. */ 7604 if (pci_conf_read(sc->sc_pc, sc->sc_tag, 7605 PCI_MAPREG_START + 0x14) == 0) { 7606 printf("\n"); 7607 return; 7608 } 7609 7610 if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, 7611 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 7612 &ss->ba5_st, &ss->ba5_sh, NULL, NULL, 0) != 0) { 7613 printf(": unable to map BA5 register space\n"); 7614 return; 7615 } 7616 7617 printf(": DMA"); 7618 svwsata_mapreg_dma(sc, pa); 7619 printf("\n"); 7620 7621 if (sc->sc_dma_ok) { 7622 sc->sc_wdcdev.cap |= WDC_CAPABILITY_UDMA | 7623 WDC_CAPABILITY_DMA | WDC_CAPABILITY_IRQACK; 7624 sc->sc_wdcdev.irqack = pciide_irqack; 7625 } 7626 sc->sc_wdcdev.PIO_cap = 4; 7627 sc->sc_wdcdev.DMA_cap = 2; 7628 sc->sc_wdcdev.UDMA_cap = 6; 7629 7630 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7631 sc->sc_wdcdev.nchannels = 4; 7632 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7633 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 7634 sc->sc_wdcdev.set_modes = sata_setup_channel; 7635 7636 /* We can use SControl and SStatus to probe for drives. */ 7637 sc->sc_wdcdev.drv_probe = svwsata_drv_probe; 7638 7639 /* Map and establish the interrupt handler. */ 7640 if(pci_intr_map(pa, &intrhandle) != 0) { 7641 printf("%s: couldn't map native-PCI interrupt\n", 7642 sc->sc_wdcdev.sc_dev.dv_xname); 7643 return; 7644 } 7645 intrstr = pci_intr_string(pa->pa_pc, intrhandle); 7646 sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, 7647 pciide_pci_intr, sc, sc->sc_wdcdev.sc_dev.dv_xname); 7648 if (sc->sc_pci_ih != NULL) { 7649 printf("%s: using %s for native-PCI interrupt\n", 7650 sc->sc_wdcdev.sc_dev.dv_xname, 7651 intrstr ? intrstr : "unknown interrupt"); 7652 } else { 7653 printf("%s: couldn't establish native-PCI interrupt", 7654 sc->sc_wdcdev.sc_dev.dv_xname); 7655 if (intrstr != NULL) 7656 printf(" at %s", intrstr); 7657 printf("\n"); 7658 return; 7659 } 7660 7661 switch (sc->sc_pp->ide_product) { 7662 case PCI_PRODUCT_RCC_K2_SATA: 7663 bus_space_write_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1, 7664 bus_space_read_4(ss->ba5_st, ss->ba5_sh, SVWSATA_SICR1) 7665 & ~0x00040000); 7666 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7667 SVWSATA_SIM, 0); 7668 break; 7669 } 7670 7671 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 7672 cp = &sc->pciide_channels[channel]; 7673 if (pciide_chansetup(sc, channel, 0) == 0) 7674 continue; 7675 svwsata_mapchan(cp); 7676 sata_setup_channel(&cp->wdc_channel); 7677 } 7678 } 7679 7680 void 7681 svwsata_mapreg_dma(struct pciide_softc *sc, struct pci_attach_args *pa) 7682 { 7683 struct pciide_svwsata *ss = sc->sc_cookie; 7684 7685 sc->sc_wdcdev.dma_arg = sc; 7686 sc->sc_wdcdev.dma_init = pciide_dma_init; 7687 sc->sc_wdcdev.dma_start = pciide_dma_start; 7688 sc->sc_wdcdev.dma_finish = pciide_dma_finish; 7689 7690 /* XXX */ 7691 sc->sc_dma_iot = ss->ba5_st; 7692 sc->sc_dma_ioh = ss->ba5_sh; 7693 7694 sc->sc_dmacmd_read = svwsata_dmacmd_read; 7695 sc->sc_dmacmd_write = svwsata_dmacmd_write; 7696 sc->sc_dmactl_read = svwsata_dmactl_read; 7697 sc->sc_dmactl_write = svwsata_dmactl_write; 7698 sc->sc_dmatbl_write = svwsata_dmatbl_write; 7699 7700 /* DMA registers all set up! */ 7701 sc->sc_dmat = pa->pa_dmat; 7702 sc->sc_dma_ok = 1; 7703 } 7704 7705 u_int8_t 7706 svwsata_dmacmd_read(struct pciide_softc *sc, int chan) 7707 { 7708 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7709 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0))); 7710 } 7711 7712 void 7713 svwsata_dmacmd_write(struct pciide_softc *sc, int chan, u_int8_t val) 7714 { 7715 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7716 (chan << 8) + SVWSATA_DMA + IDEDMA_CMD(0), val); 7717 } 7718 7719 u_int8_t 7720 svwsata_dmactl_read(struct pciide_softc *sc, int chan) 7721 { 7722 return (bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7723 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0))); 7724 } 7725 7726 void 7727 svwsata_dmactl_write(struct pciide_softc *sc, int chan, u_int8_t val) 7728 { 7729 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 7730 (chan << 8) + SVWSATA_DMA + IDEDMA_CTL(0), val); 7731 } 7732 7733 void 7734 svwsata_dmatbl_write(struct pciide_softc *sc, int chan, u_int32_t val) 7735 { 7736 bus_space_write_4(sc->sc_dma_iot, sc->sc_dma_ioh, 7737 (chan << 8) + SVWSATA_DMA + IDEDMA_TBL(0), val); 7738 } 7739 7740 void 7741 svwsata_mapchan(struct pciide_channel *cp) 7742 { 7743 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7744 struct channel_softc *wdc_cp = &cp->wdc_channel; 7745 struct pciide_svwsata *ss = sc->sc_cookie; 7746 7747 cp->compat = 0; 7748 cp->ih = sc->sc_pci_ih; 7749 7750 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7751 (wdc_cp->channel << 8) + SVWSATA_TF0, 7752 SVWSATA_TF8 - SVWSATA_TF0, &wdc_cp->cmd_ioh) != 0) { 7753 printf("%s: couldn't map %s cmd regs\n", 7754 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7755 return; 7756 } 7757 if (bus_space_subregion(ss->ba5_st, ss->ba5_sh, 7758 (wdc_cp->channel << 8) + SVWSATA_TF8, 4, 7759 &wdc_cp->ctl_ioh) != 0) { 7760 printf("%s: couldn't map %s ctl regs\n", 7761 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 7762 return; 7763 } 7764 wdc_cp->cmd_iot = wdc_cp->ctl_iot = ss->ba5_st; 7765 wdc_cp->_vtbl = &wdc_svwsata_vtbl; 7766 wdc_cp->ch_flags |= WDCF_DMA_BEFORE_CMD; 7767 wdcattach(wdc_cp); 7768 } 7769 7770 void 7771 svwsata_drv_probe(struct channel_softc *chp) 7772 { 7773 struct pciide_channel *cp = (struct pciide_channel *)chp; 7774 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 7775 struct pciide_svwsata *ss = sc->sc_cookie; 7776 int channel = chp->channel; 7777 uint32_t scontrol, sstatus; 7778 uint8_t scnt, sn, cl, ch; 7779 int s; 7780 7781 /* 7782 * Request communication initialization sequence, any speed. 7783 * Performing this is the equivalent of an ATA Reset. 7784 */ 7785 scontrol = SControl_DET_INIT | SControl_SPD_ANY; 7786 7787 /* 7788 * XXX We don't yet support SATA power management; disable all 7789 * power management state transitions. 7790 */ 7791 scontrol |= SControl_IPM_NONE; 7792 7793 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7794 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7795 delay(50 * 1000); 7796 scontrol &= ~SControl_DET_INIT; 7797 bus_space_write_4(ss->ba5_st, ss->ba5_sh, 7798 (channel << 8) + SVWSATA_SCONTROL, scontrol); 7799 delay(50 * 1000); 7800 7801 sstatus = bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7802 (channel << 8) + SVWSATA_SSTATUS); 7803 #if 0 7804 printf("%s: port %d: SStatus=0x%08x, SControl=0x%08x\n", 7805 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus, 7806 bus_space_read_4(ss->ba5_st, ss->ba5_sh, 7807 (channel << 8) + SVWSATA_SSTATUS)); 7808 #endif 7809 switch (sstatus & SStatus_DET_mask) { 7810 case SStatus_DET_NODEV: 7811 /* No device; be silent. */ 7812 break; 7813 7814 case SStatus_DET_DEV_NE: 7815 printf("%s: port %d: device connected, but " 7816 "communication not established\n", 7817 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7818 break; 7819 7820 case SStatus_DET_OFFLINE: 7821 printf("%s: port %d: PHY offline\n", 7822 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7823 break; 7824 7825 case SStatus_DET_DEV: 7826 /* 7827 * XXX ATAPI detection doesn't currently work. Don't 7828 * XXX know why. But, it's not like the standard method 7829 * XXX can detect an ATAPI device connected via a SATA/PATA 7830 * XXX bridge, so at least this is no worse. --thorpej 7831 */ 7832 if (chp->_vtbl != NULL) 7833 CHP_WRITE_REG(chp, wdr_sdh, WDSD_IBM | (0 << 4)); 7834 else 7835 bus_space_write_1(chp->cmd_iot, chp->cmd_ioh, 7836 wdr_sdh & _WDC_REGMASK, WDSD_IBM | (0 << 4)); 7837 delay(10); /* 400ns delay */ 7838 /* Save register contents. */ 7839 if (chp->_vtbl != NULL) { 7840 scnt = CHP_READ_REG(chp, wdr_seccnt); 7841 sn = CHP_READ_REG(chp, wdr_sector); 7842 cl = CHP_READ_REG(chp, wdr_cyl_lo); 7843 ch = CHP_READ_REG(chp, wdr_cyl_hi); 7844 } else { 7845 scnt = bus_space_read_1(chp->cmd_iot, 7846 chp->cmd_ioh, wdr_seccnt & _WDC_REGMASK); 7847 sn = bus_space_read_1(chp->cmd_iot, 7848 chp->cmd_ioh, wdr_sector & _WDC_REGMASK); 7849 cl = bus_space_read_1(chp->cmd_iot, 7850 chp->cmd_ioh, wdr_cyl_lo & _WDC_REGMASK); 7851 ch = bus_space_read_1(chp->cmd_iot, 7852 chp->cmd_ioh, wdr_cyl_hi & _WDC_REGMASK); 7853 } 7854 #if 0 7855 printf("%s: port %d: scnt=0x%x sn=0x%x cl=0x%x ch=0x%x\n", 7856 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, 7857 scnt, sn, cl, ch); 7858 #endif 7859 /* 7860 * scnt and sn are supposed to be 0x1 for ATAPI, but in some 7861 * cases we get wrong values here, so ignore it. 7862 */ 7863 s = splbio(); 7864 if (cl == 0x14 && ch == 0xeb) 7865 chp->ch_drive[0].drive_flags |= DRIVE_ATAPI; 7866 else 7867 chp->ch_drive[0].drive_flags |= DRIVE_ATA; 7868 splx(s); 7869 7870 printf("%s: port %d: device present", 7871 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel); 7872 switch ((sstatus & SStatus_SPD_mask) >> SStatus_SPD_shift) { 7873 case 1: 7874 printf(", speed: 1.5Gb/s"); 7875 break; 7876 case 2: 7877 printf(", speed: 3.0Gb/s"); 7878 break; 7879 } 7880 printf("\n"); 7881 break; 7882 7883 default: 7884 printf("%s: port %d: unknown SStatus: 0x%08x\n", 7885 sc->sc_wdcdev.sc_dev.dv_xname, chp->channel, sstatus); 7886 } 7887 } 7888 7889 u_int8_t 7890 svwsata_read_reg(struct channel_softc *chp, enum wdc_regs reg) 7891 { 7892 if (reg & _WDC_AUX) { 7893 return (bus_space_read_4(chp->ctl_iot, chp->ctl_ioh, 7894 (reg & _WDC_REGMASK) << 2)); 7895 } else { 7896 return (bus_space_read_4(chp->cmd_iot, chp->cmd_ioh, 7897 (reg & _WDC_REGMASK) << 2)); 7898 } 7899 } 7900 7901 void 7902 svwsata_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int8_t val) 7903 { 7904 if (reg & _WDC_AUX) { 7905 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7906 (reg & _WDC_REGMASK) << 2, val); 7907 } else { 7908 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7909 (reg & _WDC_REGMASK) << 2, val); 7910 } 7911 } 7912 7913 void 7914 svwsata_lba48_write_reg(struct channel_softc *chp, enum wdc_regs reg, u_int16_t val) 7915 { 7916 if (reg & _WDC_AUX) { 7917 bus_space_write_4(chp->ctl_iot, chp->ctl_ioh, 7918 (reg & _WDC_REGMASK) << 2, val); 7919 } else { 7920 bus_space_write_4(chp->cmd_iot, chp->cmd_ioh, 7921 (reg & _WDC_REGMASK) << 2, val); 7922 } 7923 } 7924 7925 #define ACARD_IS_850(sc) \ 7926 ((sc)->sc_pp->ide_product == PCI_PRODUCT_ACARD_ATP850U) 7927 7928 void 7929 acard_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 7930 { 7931 struct pciide_channel *cp; 7932 int i; 7933 pcireg_t interface; 7934 bus_size_t cmdsize, ctlsize; 7935 7936 /* 7937 * when the chip is in native mode it identifies itself as a 7938 * 'misc mass storage'. Fake interface in this case. 7939 */ 7940 if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { 7941 interface = PCI_INTERFACE(pa->pa_class); 7942 } else { 7943 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 7944 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 7945 } 7946 7947 printf(": DMA"); 7948 pciide_mapreg_dma(sc, pa); 7949 printf("\n"); 7950 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 7951 WDC_CAPABILITY_MODE; 7952 7953 if (sc->sc_dma_ok) { 7954 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 7955 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 7956 sc->sc_wdcdev.irqack = pciide_irqack; 7957 } 7958 sc->sc_wdcdev.PIO_cap = 4; 7959 sc->sc_wdcdev.DMA_cap = 2; 7960 switch (sc->sc_pp->ide_product) { 7961 case PCI_PRODUCT_ACARD_ATP850U: 7962 sc->sc_wdcdev.UDMA_cap = 2; 7963 break; 7964 case PCI_PRODUCT_ACARD_ATP860: 7965 case PCI_PRODUCT_ACARD_ATP860A: 7966 sc->sc_wdcdev.UDMA_cap = 4; 7967 break; 7968 case PCI_PRODUCT_ACARD_ATP865A: 7969 case PCI_PRODUCT_ACARD_ATP865R: 7970 sc->sc_wdcdev.UDMA_cap = 6; 7971 break; 7972 } 7973 7974 sc->sc_wdcdev.set_modes = acard_setup_channel; 7975 sc->sc_wdcdev.channels = sc->wdc_chanarray; 7976 sc->sc_wdcdev.nchannels = 2; 7977 7978 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 7979 cp = &sc->pciide_channels[i]; 7980 if (pciide_chansetup(sc, i, interface) == 0) 7981 continue; 7982 if (interface & PCIIDE_INTERFACE_PCI(i)) { 7983 cp->hw_ok = pciide_mapregs_native(pa, cp, &cmdsize, 7984 &ctlsize, pciide_pci_intr); 7985 } else { 7986 cp->hw_ok = pciide_mapregs_compat(pa, cp, i, 7987 &cmdsize, &ctlsize); 7988 } 7989 if (cp->hw_ok == 0) 7990 return; 7991 cp->wdc_channel.data32iot = cp->wdc_channel.cmd_iot; 7992 cp->wdc_channel.data32ioh = cp->wdc_channel.cmd_ioh; 7993 wdcattach(&cp->wdc_channel); 7994 acard_setup_channel(&cp->wdc_channel); 7995 } 7996 if (!ACARD_IS_850(sc)) { 7997 u_int32_t reg; 7998 reg = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL); 7999 reg &= ~ATP860_CTRL_INT; 8000 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, reg); 8001 } 8002 } 8003 8004 void 8005 acard_setup_channel(struct channel_softc *chp) 8006 { 8007 struct ata_drive_datas *drvp; 8008 struct pciide_channel *cp = (struct pciide_channel *)chp; 8009 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8010 int channel = chp->channel; 8011 int drive; 8012 u_int32_t idetime, udma_mode; 8013 u_int32_t idedma_ctl; 8014 8015 /* setup DMA if needed */ 8016 pciide_channel_dma_setup(cp); 8017 8018 if (ACARD_IS_850(sc)) { 8019 idetime = 0; 8020 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP850_UDMA); 8021 udma_mode &= ~ATP850_UDMA_MASK(channel); 8022 } else { 8023 idetime = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_IDETIME); 8024 idetime &= ~ATP860_SETTIME_MASK(channel); 8025 udma_mode = pci_conf_read(sc->sc_pc, sc->sc_tag, ATP860_UDMA); 8026 udma_mode &= ~ATP860_UDMA_MASK(channel); 8027 } 8028 8029 idedma_ctl = 0; 8030 8031 /* Per drive settings */ 8032 for (drive = 0; drive < 2; drive++) { 8033 drvp = &chp->ch_drive[drive]; 8034 /* If no drive, skip */ 8035 if ((drvp->drive_flags & DRIVE) == 0) 8036 continue; 8037 /* add timing values, setup DMA if needed */ 8038 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) && 8039 (drvp->drive_flags & DRIVE_UDMA)) { 8040 /* use Ultra/DMA */ 8041 if (ACARD_IS_850(sc)) { 8042 idetime |= ATP850_SETTIME(drive, 8043 acard_act_udma[drvp->UDMA_mode], 8044 acard_rec_udma[drvp->UDMA_mode]); 8045 udma_mode |= ATP850_UDMA_MODE(channel, drive, 8046 acard_udma_conf[drvp->UDMA_mode]); 8047 } else { 8048 idetime |= ATP860_SETTIME(channel, drive, 8049 acard_act_udma[drvp->UDMA_mode], 8050 acard_rec_udma[drvp->UDMA_mode]); 8051 udma_mode |= ATP860_UDMA_MODE(channel, drive, 8052 acard_udma_conf[drvp->UDMA_mode]); 8053 } 8054 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8055 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) && 8056 (drvp->drive_flags & DRIVE_DMA)) { 8057 /* use Multiword DMA */ 8058 drvp->drive_flags &= ~DRIVE_UDMA; 8059 if (ACARD_IS_850(sc)) { 8060 idetime |= ATP850_SETTIME(drive, 8061 acard_act_dma[drvp->DMA_mode], 8062 acard_rec_dma[drvp->DMA_mode]); 8063 } else { 8064 idetime |= ATP860_SETTIME(channel, drive, 8065 acard_act_dma[drvp->DMA_mode], 8066 acard_rec_dma[drvp->DMA_mode]); 8067 } 8068 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8069 } else { 8070 /* PIO only */ 8071 drvp->drive_flags &= ~(DRIVE_UDMA | DRIVE_DMA); 8072 if (ACARD_IS_850(sc)) { 8073 idetime |= ATP850_SETTIME(drive, 8074 acard_act_pio[drvp->PIO_mode], 8075 acard_rec_pio[drvp->PIO_mode]); 8076 } else { 8077 idetime |= ATP860_SETTIME(channel, drive, 8078 acard_act_pio[drvp->PIO_mode], 8079 acard_rec_pio[drvp->PIO_mode]); 8080 } 8081 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL, 8082 pci_conf_read(sc->sc_pc, sc->sc_tag, ATP8x0_CTRL) 8083 | ATP8x0_CTRL_EN(channel)); 8084 } 8085 } 8086 8087 if (idedma_ctl != 0) { 8088 /* Add software bits in status register */ 8089 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8090 IDEDMA_CTL(channel), idedma_ctl); 8091 } 8092 pciide_print_modes(cp); 8093 8094 if (ACARD_IS_850(sc)) { 8095 pci_conf_write(sc->sc_pc, sc->sc_tag, 8096 ATP850_IDETIME(channel), idetime); 8097 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP850_UDMA, udma_mode); 8098 } else { 8099 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_IDETIME, idetime); 8100 pci_conf_write(sc->sc_pc, sc->sc_tag, ATP860_UDMA, udma_mode); 8101 } 8102 } 8103 8104 void 8105 nforce_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8106 { 8107 struct pciide_channel *cp; 8108 int channel; 8109 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8110 bus_size_t cmdsize, ctlsize; 8111 u_int32_t conf; 8112 8113 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8114 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8115 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8116 8117 printf(": DMA"); 8118 pciide_mapreg_dma(sc, pa); 8119 8120 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8121 WDC_CAPABILITY_MODE; 8122 if (sc->sc_dma_ok) { 8123 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8124 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8125 sc->sc_wdcdev.irqack = pciide_irqack; 8126 } 8127 sc->sc_wdcdev.PIO_cap = 4; 8128 sc->sc_wdcdev.DMA_cap = 2; 8129 switch (sc->sc_pp->ide_product) { 8130 case PCI_PRODUCT_NVIDIA_NFORCE_IDE: 8131 sc->sc_wdcdev.UDMA_cap = 5; 8132 break; 8133 default: 8134 sc->sc_wdcdev.UDMA_cap = 6; 8135 } 8136 sc->sc_wdcdev.set_modes = nforce_setup_channel; 8137 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8138 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8139 8140 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8141 8142 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8143 cp = &sc->pciide_channels[channel]; 8144 8145 if (pciide_chansetup(sc, channel, interface) == 0) 8146 continue; 8147 8148 if ((conf & NFORCE_CHAN_EN(channel)) == 0) { 8149 printf("%s: %s ignored (disabled)\n", 8150 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8151 continue; 8152 } 8153 8154 pciide_map_compat_intr(pa, cp, channel, interface); 8155 if (cp->hw_ok == 0) 8156 continue; 8157 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8158 nforce_pci_intr); 8159 if (cp->hw_ok == 0) { 8160 pciide_unmap_compat_intr(pa, cp, channel, interface); 8161 continue; 8162 } 8163 8164 if (pciide_chan_candisable(cp)) { 8165 conf &= ~NFORCE_CHAN_EN(channel); 8166 pciide_unmap_compat_intr(pa, cp, channel, interface); 8167 continue; 8168 } 8169 8170 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8171 } 8172 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8173 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8174 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_CONF, conf); 8175 } 8176 8177 void 8178 nforce_setup_channel(struct channel_softc *chp) 8179 { 8180 struct ata_drive_datas *drvp; 8181 int drive, mode; 8182 u_int32_t idedma_ctl; 8183 struct pciide_channel *cp = (struct pciide_channel *)chp; 8184 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8185 int channel = chp->channel; 8186 u_int32_t conf, piodmatim, piotim, udmatim; 8187 8188 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_CONF); 8189 piodmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM); 8190 piotim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_PIOTIM); 8191 udmatim = pci_conf_read(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM); 8192 WDCDEBUG_PRINT(("%s: %s old timing values: piodmatim=0x%x, " 8193 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8194 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8195 8196 /* Setup DMA if needed */ 8197 pciide_channel_dma_setup(cp); 8198 8199 /* Clear all bits for this channel */ 8200 idedma_ctl = 0; 8201 piodmatim &= ~NFORCE_PIODMATIM_MASK(channel); 8202 udmatim &= ~NFORCE_UDMATIM_MASK(channel); 8203 8204 /* Per channel settings */ 8205 for (drive = 0; drive < 2; drive++) { 8206 drvp = &chp->ch_drive[drive]; 8207 8208 /* If no drive, skip */ 8209 if ((drvp->drive_flags & DRIVE) == 0) 8210 continue; 8211 8212 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8213 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8214 /* Setup UltraDMA mode */ 8215 drvp->drive_flags &= ~DRIVE_DMA; 8216 8217 udmatim |= NFORCE_UDMATIM_SET(channel, drive, 8218 nforce_udma[drvp->UDMA_mode]) | 8219 NFORCE_UDMA_EN(channel, drive) | 8220 NFORCE_UDMA_ENM(channel, drive); 8221 8222 mode = drvp->PIO_mode; 8223 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8224 (drvp->drive_flags & DRIVE_DMA) != 0) { 8225 /* Setup multiword DMA mode */ 8226 drvp->drive_flags &= ~DRIVE_UDMA; 8227 8228 /* mode = min(pio, dma + 2) */ 8229 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8230 mode = drvp->PIO_mode; 8231 else 8232 mode = drvp->DMA_mode + 2; 8233 } else { 8234 mode = drvp->PIO_mode; 8235 goto pio; 8236 } 8237 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8238 8239 pio: 8240 /* Setup PIO mode */ 8241 if (mode <= 2) { 8242 drvp->DMA_mode = 0; 8243 drvp->PIO_mode = 0; 8244 mode = 0; 8245 } else { 8246 drvp->PIO_mode = mode; 8247 drvp->DMA_mode = mode - 2; 8248 } 8249 piodmatim |= NFORCE_PIODMATIM_SET(channel, drive, 8250 nforce_pio[mode]); 8251 } 8252 8253 if (idedma_ctl != 0) { 8254 /* Add software bits in status register */ 8255 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8256 IDEDMA_CTL(channel), idedma_ctl); 8257 } 8258 8259 WDCDEBUG_PRINT(("%s: %s new timing values: piodmatim=0x%x, " 8260 "piotim=0x%x, udmatim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8261 cp->name, piodmatim, piotim, udmatim), DEBUG_PROBE); 8262 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_PIODMATIM, piodmatim); 8263 pci_conf_write(sc->sc_pc, sc->sc_tag, NFORCE_UDMATIM, udmatim); 8264 8265 pciide_print_modes(cp); 8266 } 8267 8268 int 8269 nforce_pci_intr(void *arg) 8270 { 8271 struct pciide_softc *sc = arg; 8272 struct pciide_channel *cp; 8273 struct channel_softc *wdc_cp; 8274 int i, rv, crv; 8275 u_int32_t dmastat; 8276 8277 rv = 0; 8278 for (i = 0; i < sc->sc_wdcdev.nchannels; i++) { 8279 cp = &sc->pciide_channels[i]; 8280 wdc_cp = &cp->wdc_channel; 8281 8282 /* Skip compat channel */ 8283 if (cp->compat) 8284 continue; 8285 8286 dmastat = bus_space_read_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8287 IDEDMA_CTL(i)); 8288 if ((dmastat & IDEDMA_CTL_INTR) == 0) 8289 continue; 8290 8291 crv = wdcintr(wdc_cp); 8292 if (crv == 0) 8293 printf("%s:%d: bogus intr\n", 8294 sc->sc_wdcdev.sc_dev.dv_xname, i); 8295 else 8296 rv = 1; 8297 } 8298 return (rv); 8299 } 8300 8301 void 8302 artisea_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8303 { 8304 struct pciide_channel *cp; 8305 bus_size_t cmdsize, ctlsize; 8306 pcireg_t interface; 8307 int channel; 8308 8309 printf(": DMA"); 8310 #ifdef PCIIDE_I31244_DISABLEDMA 8311 if (sc->sc_rev == 0) { 8312 printf(" disabled due to rev. 0"); 8313 sc->sc_dma_ok = 0; 8314 } else 8315 #endif 8316 pciide_mapreg_dma(sc, pa); 8317 printf("\n"); 8318 8319 /* 8320 * XXX Configure LEDs to show activity. 8321 */ 8322 8323 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8324 WDC_CAPABILITY_MODE | WDC_CAPABILITY_SATA; 8325 sc->sc_wdcdev.PIO_cap = 4; 8326 if (sc->sc_dma_ok) { 8327 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8328 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8329 sc->sc_wdcdev.irqack = pciide_irqack; 8330 sc->sc_wdcdev.DMA_cap = 2; 8331 sc->sc_wdcdev.UDMA_cap = 6; 8332 } 8333 sc->sc_wdcdev.set_modes = sata_setup_channel; 8334 8335 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8336 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8337 8338 interface = PCI_INTERFACE(pa->pa_class); 8339 8340 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8341 cp = &sc->pciide_channels[channel]; 8342 if (pciide_chansetup(sc, channel, interface) == 0) 8343 continue; 8344 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8345 pciide_pci_intr); 8346 if (cp->hw_ok == 0) 8347 continue; 8348 pciide_map_compat_intr(pa, cp, channel, interface); 8349 sata_setup_channel(&cp->wdc_channel); 8350 } 8351 } 8352 8353 void 8354 ite_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8355 { 8356 struct pciide_channel *cp; 8357 int channel; 8358 pcireg_t interface; 8359 bus_size_t cmdsize, ctlsize; 8360 pcireg_t cfg, modectl; 8361 8362 /* 8363 * Fake interface since IT8212F is claimed to be a ``RAID'' device. 8364 */ 8365 interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | 8366 PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); 8367 8368 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8369 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8370 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8371 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8372 modectl & IT_MODE_MASK), DEBUG_PROBE); 8373 8374 printf(": DMA"); 8375 pciide_mapreg_dma(sc, pa); 8376 8377 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8378 WDC_CAPABILITY_MODE; 8379 if (sc->sc_dma_ok) { 8380 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8381 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8382 sc->sc_wdcdev.irqack = pciide_irqack; 8383 } 8384 sc->sc_wdcdev.PIO_cap = 4; 8385 sc->sc_wdcdev.DMA_cap = 2; 8386 sc->sc_wdcdev.UDMA_cap = 6; 8387 8388 sc->sc_wdcdev.set_modes = ite_setup_channel; 8389 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8390 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8391 8392 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8393 8394 /* Disable RAID */ 8395 modectl &= ~IT_MODE_RAID1; 8396 /* Disable CPU firmware mode */ 8397 modectl &= ~IT_MODE_CPU; 8398 8399 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8400 8401 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8402 cp = &sc->pciide_channels[channel]; 8403 8404 if (pciide_chansetup(sc, channel, interface) == 0) 8405 continue; 8406 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8407 pciide_pci_intr); 8408 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8409 } 8410 8411 /* Re-read configuration registers after channels setup */ 8412 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8413 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8414 WDCDEBUG_PRINT(("%s: cfg=0x%x, modectl=0x%x\n", 8415 sc->sc_wdcdev.sc_dev.dv_xname, cfg & IT_CFG_MASK, 8416 modectl & IT_MODE_MASK), DEBUG_PROBE); 8417 } 8418 8419 void 8420 ite_setup_channel(struct channel_softc *chp) 8421 { 8422 struct ata_drive_datas *drvp; 8423 int drive, mode; 8424 u_int32_t idedma_ctl; 8425 struct pciide_channel *cp = (struct pciide_channel *)chp; 8426 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8427 int channel = chp->channel; 8428 pcireg_t cfg, modectl; 8429 pcireg_t tim; 8430 8431 cfg = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_CFG); 8432 modectl = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_MODE); 8433 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, IT_TIM(channel)); 8434 WDCDEBUG_PRINT(("%s:%d: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8435 channel, tim), DEBUG_PROBE); 8436 8437 /* Setup DMA if needed */ 8438 pciide_channel_dma_setup(cp); 8439 8440 /* Clear all bits for this channel */ 8441 idedma_ctl = 0; 8442 8443 /* Per channel settings */ 8444 for (drive = 0; drive < 2; drive++) { 8445 drvp = &chp->ch_drive[drive]; 8446 8447 /* If no drive, skip */ 8448 if ((drvp->drive_flags & DRIVE) == 0) 8449 continue; 8450 8451 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8452 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8453 /* Setup UltraDMA mode */ 8454 drvp->drive_flags &= ~DRIVE_DMA; 8455 modectl &= ~IT_MODE_DMA(channel, drive); 8456 8457 #if 0 8458 /* Check cable, works only in CPU firmware mode */ 8459 if (drvp->UDMA_mode > 2 && 8460 (cfg & IT_CFG_CABLE(channel, drive)) == 0) { 8461 WDCDEBUG_PRINT(("%s(%s:%d:%d): " 8462 "80-wire cable not detected\n", 8463 drvp->drive_name, 8464 sc->sc_wdcdev.sc_dev.dv_xname, 8465 channel, drive), DEBUG_PROBE); 8466 drvp->UDMA_mode = 2; 8467 } 8468 #endif 8469 8470 if (drvp->UDMA_mode >= 5) 8471 tim |= IT_TIM_UDMA5(drive); 8472 else 8473 tim &= ~IT_TIM_UDMA5(drive); 8474 8475 mode = drvp->PIO_mode; 8476 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8477 (drvp->drive_flags & DRIVE_DMA) != 0) { 8478 /* Setup multiword DMA mode */ 8479 drvp->drive_flags &= ~DRIVE_UDMA; 8480 modectl |= IT_MODE_DMA(channel, drive); 8481 8482 /* mode = min(pio, dma + 2) */ 8483 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8484 mode = drvp->PIO_mode; 8485 else 8486 mode = drvp->DMA_mode + 2; 8487 } else { 8488 mode = drvp->PIO_mode; 8489 goto pio; 8490 } 8491 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8492 8493 pio: 8494 /* Setup PIO mode */ 8495 if (mode <= 2) { 8496 drvp->DMA_mode = 0; 8497 drvp->PIO_mode = 0; 8498 mode = 0; 8499 } else { 8500 drvp->PIO_mode = mode; 8501 drvp->DMA_mode = mode - 2; 8502 } 8503 8504 /* Enable IORDY if PIO mode >= 3 */ 8505 if (drvp->PIO_mode >= 3) 8506 cfg |= IT_CFG_IORDY(channel); 8507 } 8508 8509 WDCDEBUG_PRINT(("%s: tim=0x%x\n", sc->sc_wdcdev.sc_dev.dv_xname, 8510 tim), DEBUG_PROBE); 8511 8512 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_CFG, cfg); 8513 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_MODE, modectl); 8514 pci_conf_write(sc->sc_pc, sc->sc_tag, IT_TIM(channel), tim); 8515 8516 if (idedma_ctl != 0) { 8517 /* Add software bits in status register */ 8518 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8519 IDEDMA_CTL(channel), idedma_ctl); 8520 } 8521 8522 pciide_print_modes(cp); 8523 } 8524 8525 void 8526 ixp_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8527 { 8528 struct pciide_channel *cp; 8529 int channel; 8530 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8531 bus_size_t cmdsize, ctlsize; 8532 8533 printf(": DMA"); 8534 pciide_mapreg_dma(sc, pa); 8535 8536 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8537 WDC_CAPABILITY_MODE; 8538 if (sc->sc_dma_ok) { 8539 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8540 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8541 sc->sc_wdcdev.irqack = pciide_irqack; 8542 } 8543 sc->sc_wdcdev.PIO_cap = 4; 8544 sc->sc_wdcdev.DMA_cap = 2; 8545 sc->sc_wdcdev.UDMA_cap = 6; 8546 8547 sc->sc_wdcdev.set_modes = ixp_setup_channel; 8548 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8549 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8550 8551 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8552 8553 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8554 cp = &sc->pciide_channels[channel]; 8555 if (pciide_chansetup(sc, channel, interface) == 0) 8556 continue; 8557 pciide_map_compat_intr(pa, cp, channel, interface); 8558 if (cp->hw_ok == 0) 8559 continue; 8560 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8561 pciide_pci_intr); 8562 if (cp->hw_ok == 0) { 8563 pciide_unmap_compat_intr(pa, cp, channel, interface); 8564 continue; 8565 } 8566 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8567 } 8568 } 8569 8570 void 8571 ixp_setup_channel(struct channel_softc *chp) 8572 { 8573 struct ata_drive_datas *drvp; 8574 int drive, mode; 8575 u_int32_t idedma_ctl; 8576 struct pciide_channel *cp = (struct pciide_channel*)chp; 8577 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8578 int channel = chp->channel; 8579 pcireg_t udma, mdma_timing, pio, pio_timing; 8580 8581 pio_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING); 8582 pio = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL); 8583 mdma_timing = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING); 8584 udma = pci_conf_read(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL); 8585 8586 /* Setup DMA if needed */ 8587 pciide_channel_dma_setup(cp); 8588 8589 idedma_ctl = 0; 8590 8591 /* Per channel settings */ 8592 for (drive = 0; drive < 2; drive++) { 8593 drvp = &chp->ch_drive[drive]; 8594 8595 /* If no drive, skip */ 8596 if ((drvp->drive_flags & DRIVE) == 0) 8597 continue; 8598 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8599 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8600 /* Setup UltraDMA mode */ 8601 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8602 IXP_UDMA_ENABLE(udma, chp->channel, drive); 8603 IXP_SET_MODE(udma, chp->channel, drive, 8604 drvp->UDMA_mode); 8605 mode = drvp->PIO_mode; 8606 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8607 (drvp->drive_flags & DRIVE_DMA) != 0) { 8608 /* Setup multiword DMA mode */ 8609 drvp->drive_flags &= ~DRIVE_UDMA; 8610 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8611 IXP_UDMA_DISABLE(udma, chp->channel, drive); 8612 IXP_SET_TIMING(mdma_timing, chp->channel, drive, 8613 ixp_mdma_timings[drvp->DMA_mode]); 8614 8615 /* mode = min(pio, dma + 2) */ 8616 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8617 mode = drvp->PIO_mode; 8618 else 8619 mode = drvp->DMA_mode + 2; 8620 } else { 8621 mode = drvp->PIO_mode; 8622 } 8623 8624 /* Setup PIO mode */ 8625 drvp->PIO_mode = mode; 8626 if (mode < 2) 8627 drvp->DMA_mode = 0; 8628 else 8629 drvp->DMA_mode = mode - 2; 8630 /* 8631 * Set PIO mode and timings 8632 * Linux driver avoids PIO mode 1, let's do it too. 8633 */ 8634 if (drvp->PIO_mode == 1) 8635 drvp->PIO_mode = 0; 8636 8637 IXP_SET_MODE(pio, chp->channel, drive, drvp->PIO_mode); 8638 IXP_SET_TIMING(pio_timing, chp->channel, drive, 8639 ixp_pio_timings[drvp->PIO_mode]); 8640 } 8641 8642 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_UDMA_CTL, udma); 8643 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_MDMA_TIMING, mdma_timing); 8644 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_CTL, pio); 8645 pci_conf_write(sc->sc_pc, sc->sc_tag, IXP_PIO_TIMING, pio_timing); 8646 8647 if (idedma_ctl != 0) { 8648 /* Add software bits in status register */ 8649 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8650 IDEDMA_CTL(channel), idedma_ctl); 8651 } 8652 8653 pciide_print_modes(cp); 8654 } 8655 8656 void 8657 jmicron_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8658 { 8659 struct pciide_channel *cp; 8660 int channel; 8661 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8662 bus_size_t cmdsize, ctlsize; 8663 u_int32_t conf; 8664 8665 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8666 WDCDEBUG_PRINT(("%s: conf register 0x%x\n", 8667 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8668 8669 printf(": DMA"); 8670 pciide_mapreg_dma(sc, pa); 8671 8672 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8673 WDC_CAPABILITY_MODE; 8674 if (sc->sc_dma_ok) { 8675 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8676 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8677 sc->sc_wdcdev.irqack = pciide_irqack; 8678 } 8679 sc->sc_wdcdev.PIO_cap = 4; 8680 sc->sc_wdcdev.DMA_cap = 2; 8681 sc->sc_wdcdev.UDMA_cap = 6; 8682 sc->sc_wdcdev.set_modes = jmicron_setup_channel; 8683 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8684 sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; 8685 8686 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8687 8688 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8689 cp = &sc->pciide_channels[channel]; 8690 8691 if (pciide_chansetup(sc, channel, interface) == 0) 8692 continue; 8693 8694 #if 0 8695 if ((conf & JMICRON_CHAN_EN(channel)) == 0) { 8696 printf("%s: %s ignored (disabled)\n", 8697 sc->sc_wdcdev.sc_dev.dv_xname, cp->name); 8698 continue; 8699 } 8700 #endif 8701 8702 pciide_map_compat_intr(pa, cp, channel, interface); 8703 if (cp->hw_ok == 0) 8704 continue; 8705 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8706 pciide_pci_intr); 8707 if (cp->hw_ok == 0) { 8708 pciide_unmap_compat_intr(pa, cp, channel, interface); 8709 continue; 8710 } 8711 8712 if (pciide_chan_candisable(cp)) { 8713 conf &= ~JMICRON_CHAN_EN(channel); 8714 pciide_unmap_compat_intr(pa, cp, channel, interface); 8715 continue; 8716 } 8717 8718 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8719 } 8720 WDCDEBUG_PRINT(("%s: new conf register 0x%x\n", 8721 sc->sc_wdcdev.sc_dev.dv_xname, conf), DEBUG_PROBE); 8722 pci_conf_write(sc->sc_pc, sc->sc_tag, JMICRON_CONF, conf); 8723 } 8724 8725 void 8726 jmicron_setup_channel(struct channel_softc *chp) 8727 { 8728 struct ata_drive_datas *drvp; 8729 int drive, mode; 8730 u_int32_t idedma_ctl; 8731 struct pciide_channel *cp = (struct pciide_channel *)chp; 8732 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8733 int channel = chp->channel; 8734 u_int32_t conf; 8735 8736 conf = pci_conf_read(sc->sc_pc, sc->sc_tag, JMICRON_CONF); 8737 8738 /* Setup DMA if needed */ 8739 pciide_channel_dma_setup(cp); 8740 8741 /* Clear all bits for this channel */ 8742 idedma_ctl = 0; 8743 8744 /* Per channel settings */ 8745 for (drive = 0; drive < 2; drive++) { 8746 drvp = &chp->ch_drive[drive]; 8747 8748 /* If no drive, skip */ 8749 if ((drvp->drive_flags & DRIVE) == 0) 8750 continue; 8751 8752 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8753 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8754 /* Setup UltraDMA mode */ 8755 drvp->drive_flags &= ~DRIVE_DMA; 8756 8757 /* see if cable is up to scratch */ 8758 if ((conf & JMICRON_CONF_40PIN) && 8759 (drvp->UDMA_mode > 2)) 8760 drvp->UDMA_mode = 2; 8761 8762 mode = drvp->PIO_mode; 8763 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8764 (drvp->drive_flags & DRIVE_DMA) != 0) { 8765 /* Setup multiword DMA mode */ 8766 drvp->drive_flags &= ~DRIVE_UDMA; 8767 8768 /* mode = min(pio, dma + 2) */ 8769 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8770 mode = drvp->PIO_mode; 8771 else 8772 mode = drvp->DMA_mode + 2; 8773 } else { 8774 mode = drvp->PIO_mode; 8775 goto pio; 8776 } 8777 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8778 8779 pio: 8780 /* Setup PIO mode */ 8781 if (mode <= 2) { 8782 drvp->DMA_mode = 0; 8783 drvp->PIO_mode = 0; 8784 } else { 8785 drvp->PIO_mode = mode; 8786 drvp->DMA_mode = mode - 2; 8787 } 8788 } 8789 8790 if (idedma_ctl != 0) { 8791 /* Add software bits in status register */ 8792 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8793 IDEDMA_CTL(channel), idedma_ctl); 8794 } 8795 8796 pciide_print_modes(cp); 8797 } 8798 8799 void 8800 phison_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8801 { 8802 struct pciide_channel *cp; 8803 int channel; 8804 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8805 bus_size_t cmdsize, ctlsize; 8806 8807 sc->chip_unmap = default_chip_unmap; 8808 8809 printf(": DMA"); 8810 pciide_mapreg_dma(sc, pa); 8811 8812 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8813 WDC_CAPABILITY_MODE; 8814 if (sc->sc_dma_ok) { 8815 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8816 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8817 sc->sc_wdcdev.irqack = pciide_irqack; 8818 } 8819 sc->sc_wdcdev.PIO_cap = 4; 8820 sc->sc_wdcdev.DMA_cap = 2; 8821 sc->sc_wdcdev.UDMA_cap = 5; 8822 sc->sc_wdcdev.set_modes = phison_setup_channel; 8823 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8824 sc->sc_wdcdev.nchannels = 1; 8825 8826 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8827 8828 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8829 cp = &sc->pciide_channels[channel]; 8830 8831 if (pciide_chansetup(sc, channel, interface) == 0) 8832 continue; 8833 8834 pciide_map_compat_intr(pa, cp, channel, interface); 8835 if (cp->hw_ok == 0) 8836 continue; 8837 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8838 pciide_pci_intr); 8839 if (cp->hw_ok == 0) { 8840 pciide_unmap_compat_intr(pa, cp, channel, interface); 8841 continue; 8842 } 8843 8844 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8845 } 8846 } 8847 8848 void 8849 phison_setup_channel(struct channel_softc *chp) 8850 { 8851 struct ata_drive_datas *drvp; 8852 int drive, mode; 8853 u_int32_t idedma_ctl; 8854 struct pciide_channel *cp = (struct pciide_channel *)chp; 8855 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8856 int channel = chp->channel; 8857 8858 /* Setup DMA if needed */ 8859 pciide_channel_dma_setup(cp); 8860 8861 /* Clear all bits for this channel */ 8862 idedma_ctl = 0; 8863 8864 /* Per channel settings */ 8865 for (drive = 0; drive < 2; drive++) { 8866 drvp = &chp->ch_drive[drive]; 8867 8868 /* If no drive, skip */ 8869 if ((drvp->drive_flags & DRIVE) == 0) 8870 continue; 8871 8872 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8873 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8874 /* Setup UltraDMA mode */ 8875 drvp->drive_flags &= ~DRIVE_DMA; 8876 mode = drvp->PIO_mode; 8877 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8878 (drvp->drive_flags & DRIVE_DMA) != 0) { 8879 /* Setup multiword DMA mode */ 8880 drvp->drive_flags &= ~DRIVE_UDMA; 8881 8882 /* mode = min(pio, dma + 2) */ 8883 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 8884 mode = drvp->PIO_mode; 8885 else 8886 mode = drvp->DMA_mode + 2; 8887 } else { 8888 mode = drvp->PIO_mode; 8889 goto pio; 8890 } 8891 idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); 8892 8893 pio: 8894 /* Setup PIO mode */ 8895 if (mode <= 2) { 8896 drvp->DMA_mode = 0; 8897 drvp->PIO_mode = 0; 8898 } else { 8899 drvp->PIO_mode = mode; 8900 drvp->DMA_mode = mode - 2; 8901 } 8902 } 8903 8904 if (idedma_ctl != 0) { 8905 /* Add software bits in status register */ 8906 bus_space_write_1(sc->sc_dma_iot, sc->sc_dma_ioh, 8907 IDEDMA_CTL(channel), idedma_ctl); 8908 } 8909 8910 pciide_print_modes(cp); 8911 } 8912 8913 void 8914 sch_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) 8915 { 8916 struct pciide_channel *cp; 8917 int channel; 8918 pcireg_t interface = PCI_INTERFACE(pa->pa_class); 8919 bus_size_t cmdsize, ctlsize; 8920 8921 printf(": DMA"); 8922 pciide_mapreg_dma(sc, pa); 8923 8924 sc->sc_wdcdev.cap = WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | 8925 WDC_CAPABILITY_MODE; 8926 if (sc->sc_dma_ok) { 8927 sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; 8928 sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; 8929 sc->sc_wdcdev.irqack = pciide_irqack; 8930 } 8931 sc->sc_wdcdev.PIO_cap = 4; 8932 sc->sc_wdcdev.DMA_cap = 2; 8933 sc->sc_wdcdev.UDMA_cap = 5; 8934 sc->sc_wdcdev.set_modes = sch_setup_channel; 8935 sc->sc_wdcdev.channels = sc->wdc_chanarray; 8936 sc->sc_wdcdev.nchannels = 1; 8937 8938 pciide_print_channels(sc->sc_wdcdev.nchannels, interface); 8939 8940 for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { 8941 cp = &sc->pciide_channels[channel]; 8942 8943 if (pciide_chansetup(sc, channel, interface) == 0) 8944 continue; 8945 8946 pciide_map_compat_intr(pa, cp, channel, interface); 8947 if (cp->hw_ok == 0) 8948 continue; 8949 pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, 8950 pciide_pci_intr); 8951 if (cp->hw_ok == 0) { 8952 pciide_unmap_compat_intr(pa, cp, channel, interface); 8953 continue; 8954 } 8955 8956 sc->sc_wdcdev.set_modes(&cp->wdc_channel); 8957 } 8958 } 8959 8960 void 8961 sch_setup_channel(struct channel_softc *chp) 8962 { 8963 struct ata_drive_datas *drvp; 8964 int drive, mode; 8965 u_int32_t tim, timaddr; 8966 struct pciide_channel *cp = (struct pciide_channel *)chp; 8967 struct pciide_softc *sc = (struct pciide_softc *)cp->wdc_channel.wdc; 8968 8969 /* Setup DMA if needed */ 8970 pciide_channel_dma_setup(cp); 8971 8972 /* Per channel settings */ 8973 for (drive = 0; drive < 2; drive++) { 8974 drvp = &chp->ch_drive[drive]; 8975 8976 /* If no drive, skip */ 8977 if ((drvp->drive_flags & DRIVE) == 0) 8978 continue; 8979 8980 timaddr = (drive == 0) ? SCH_D0TIM : SCH_D1TIM; 8981 tim = pci_conf_read(sc->sc_pc, sc->sc_tag, timaddr); 8982 tim &= ~SCH_TIM_MASK; 8983 8984 if ((chp->wdc->cap & WDC_CAPABILITY_UDMA) != 0 && 8985 (drvp->drive_flags & DRIVE_UDMA) != 0) { 8986 /* Setup UltraDMA mode */ 8987 drvp->drive_flags &= ~DRIVE_DMA; 8988 8989 mode = drvp->PIO_mode; 8990 tim |= (drvp->UDMA_mode << 16) | SCH_TIM_SYNCDMA; 8991 } else if ((chp->wdc->cap & WDC_CAPABILITY_DMA) != 0 && 8992 (drvp->drive_flags & DRIVE_DMA) != 0) { 8993 /* Setup multiword DMA mode */ 8994 drvp->drive_flags &= ~DRIVE_UDMA; 8995 8996 tim &= ~SCH_TIM_SYNCDMA; 8997 8998 /* mode = min(pio, dma + 2) */ 8999 if (drvp->PIO_mode <= (drvp->DMA_mode + 2)) 9000 mode = drvp->PIO_mode; 9001 else 9002 mode = drvp->DMA_mode + 2; 9003 } else { 9004 mode = drvp->PIO_mode; 9005 goto pio; 9006 } 9007 9008 pio: 9009 /* Setup PIO mode */ 9010 if (mode <= 2) { 9011 drvp->DMA_mode = 0; 9012 drvp->PIO_mode = 0; 9013 } else { 9014 drvp->PIO_mode = mode; 9015 drvp->DMA_mode = mode - 2; 9016 } 9017 tim |= (drvp->DMA_mode << 8) | (drvp->PIO_mode); 9018 pci_conf_write(sc->sc_pc, sc->sc_tag, timaddr, tim); 9019 } 9020 9021 pciide_print_modes(cp); 9022 } 9023