1 /*- 2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* local prototypes */ 28 static int ata_marvell_pata_chipinit(device_t dev); 29 static int ata_marvell_pata_allocate(device_t dev); 30 static void ata_marvell_pata_setmode(device_t dev, int mode); 31 static int ata_marvell_edma_allocate(device_t dev); 32 static int ata_marvell_edma_status(device_t dev); 33 static int ata_marvell_edma_begin_transaction(struct ata_request *request); 34 static int ata_marvell_edma_end_transaction(struct ata_request *request); 35 static void ata_marvell_edma_reset(device_t dev); 36 static void ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 37 static void ata_marvell_edma_dmainit(device_t dev); 38 39 /* misc defines */ 40 #undef MV_60XX 41 #undef MV_7042 42 #define MV_50XX 50 43 #define MV_60XX 60 44 #define MV_6042 62 45 #define MV_7042 72 46 #define MV_61XX 61 47 48 #define ATA_MV_HOST_BASE(ch) \ 49 ((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000) 50 #define ATA_MV_EDMA_BASE(ch) \ 51 ((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000) 52 53 struct ata_marvell_response { 54 u_int16_t tag; 55 u_int8_t edma_status; 56 u_int8_t dev_status; 57 u_int32_t timestamp; 58 }; 59 60 struct ata_marvell_dma_prdentry { 61 u_int32_t addrlo; 62 u_int32_t count; 63 u_int32_t addrhi; 64 u_int32_t reserved; 65 }; 66 67 /* 68 * Marvell chipset support functions 69 */ 70 int 71 ata_marvell_ident(device_t dev) 72 { 73 struct ata_pci_controller *ctlr = device_get_softc(dev); 74 static const struct ata_chip_id ids[] = 75 {{ ATA_M88SX5040, 0, 4, MV_50XX, ATA_SA150, "88SX5040" }, 76 { ATA_M88SX5041, 0, 4, MV_50XX, ATA_SA150, "88SX5041" }, 77 { ATA_M88SX5080, 0, 8, MV_50XX, ATA_SA150, "88SX5080" }, 78 { ATA_M88SX5081, 0, 8, MV_50XX, ATA_SA150, "88SX5081" }, 79 { ATA_M88SX6041, 0, 4, MV_60XX, ATA_SA300, "88SX6041" }, 80 { ATA_M88SX6042, 0, 4, MV_6042, ATA_SA300, "88SX6042" }, 81 { ATA_M88SX6081, 0, 8, MV_60XX, ATA_SA300, "88SX6081" }, 82 { ATA_M88SX7042, 0, 4, MV_7042, ATA_SA300, "88SX7042" }, 83 { ATA_M88SX6101, 0, 1, MV_61XX, ATA_UDMA6, "88SX6101" }, 84 { ATA_M88SX6121, 0, 1, MV_61XX, ATA_UDMA6, "88SX6121" }, 85 { ATA_M88SX6145, 0, 2, MV_61XX, ATA_UDMA6, "88SX6145" }, 86 { 0, 0, 0, 0, 0, 0}}; 87 88 if (pci_get_vendor(dev) != ATA_MARVELL_ID) 89 return ENXIO; 90 91 if (!(ctlr->chip = ata_match_chip(dev, ids))) 92 return ENXIO; 93 94 ata_set_desc(dev); 95 96 switch (ctlr->chip->cfg2) { 97 case MV_50XX: 98 case MV_60XX: 99 case MV_6042: 100 case MV_7042: 101 ctlr->chipinit = ata_marvell_edma_chipinit; 102 break; 103 case MV_61XX: 104 ctlr->chipinit = ata_marvell_pata_chipinit; 105 break; 106 } 107 return 0; 108 } 109 110 static int 111 ata_marvell_pata_chipinit(device_t dev) 112 { 113 struct ata_pci_controller *ctlr = device_get_softc(dev); 114 115 if (ata_setup_interrupt(dev, ata_generic_intr)) 116 return ENXIO; 117 118 ctlr->allocate = ata_marvell_pata_allocate; 119 ctlr->setmode = ata_marvell_pata_setmode; 120 ctlr->channels = ctlr->chip->cfg1; 121 return 0; 122 } 123 124 static int 125 ata_marvell_pata_allocate(device_t dev) 126 { 127 struct ata_channel *ch = device_get_softc(dev); 128 129 /* setup the usual register normal pci style */ 130 if (ata_pci_allocate(dev)) 131 return ENXIO; 132 133 /* dont use 32 bit PIO transfers */ 134 ch->flags |= ATA_USE_16BIT; 135 136 return 0; 137 } 138 139 static void 140 ata_marvell_pata_setmode(device_t dev, int mode) 141 { 142 device_t gparent = GRANDPARENT(dev); 143 struct ata_pci_controller *ctlr = device_get_softc(gparent); 144 struct ata_device *atadev = device_get_softc(dev); 145 146 mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); 147 mode = ata_check_80pin(dev, mode); 148 if (!ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode)) 149 atadev->mode = mode; 150 } 151 152 static int 153 ata_marvell_edma_chipinit(device_t dev) 154 { 155 struct ata_pci_controller *ctlr = device_get_softc(dev); 156 157 if (ata_setup_interrupt(dev, ata_generic_intr)) 158 return ENXIO; 159 160 ctlr->r_type1 = SYS_RES_MEMORY; 161 ctlr->r_rid1 = PCIR_BAR(0); 162 if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, 163 &ctlr->r_rid1, RF_ACTIVE))) { 164 ata_teardown_interrupt(dev); 165 return ENXIO; 166 } 167 168 /* mask all host controller interrupts */ 169 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000); 170 171 /* mask all PCI interrupts */ 172 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000); 173 174 ctlr->allocate = ata_marvell_edma_allocate; 175 ctlr->reset = ata_marvell_edma_reset; 176 ctlr->dmainit = ata_marvell_edma_dmainit; 177 ctlr->setmode = ata_sata_setmode; 178 ctlr->channels = ctlr->chip->cfg1; 179 180 /* clear host controller interrupts */ 181 ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000); 182 if (ctlr->chip->cfg1 > 4) 183 ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000); 184 185 /* clear PCI interrupts */ 186 ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000); 187 188 /* unmask PCI interrupts we want */ 189 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff); 190 191 /* unmask host controller interrupts we want */ 192 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ | 193 /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25)); 194 195 /* enable PCI interrupt */ 196 pci_write_config(dev, PCIR_COMMAND, 197 pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2); 198 return 0; 199 } 200 201 static int 202 ata_marvell_edma_allocate(device_t dev) 203 { 204 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 205 struct ata_channel *ch = device_get_softc(dev); 206 u_int64_t work; 207 int i; 208 209 work = ch->dma->work_bus; 210 /* clear work area */ 211 bzero(ch->dma->work, 1024+256); 212 bus_dmamap_sync(ch->dma->work_tag, ch->dma->work_map, 213 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 214 215 /* set legacy ATA resources */ 216 for (i = ATA_DATA; i <= ATA_COMMAND; i++) { 217 ch->r_io[i].res = ctlr->r_res1; 218 ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch); 219 } 220 ch->r_io[ATA_CONTROL].res = ctlr->r_res1; 221 ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch); 222 ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1; 223 ata_default_registers(dev); 224 225 /* set SATA resources */ 226 switch (ctlr->chip->cfg2) { 227 case MV_50XX: 228 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; 229 ch->r_io[ATA_SSTATUS].offset = 0x00100 + ATA_MV_HOST_BASE(ch); 230 ch->r_io[ATA_SERROR].res = ctlr->r_res1; 231 ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch); 232 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; 233 ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch); 234 break; 235 case MV_60XX: 236 case MV_6042: 237 case MV_7042: 238 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; 239 ch->r_io[ATA_SSTATUS].offset = 0x02300 + ATA_MV_EDMA_BASE(ch); 240 ch->r_io[ATA_SERROR].res = ctlr->r_res1; 241 ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch); 242 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; 243 ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch); 244 ch->r_io[ATA_SACTIVE].res = ctlr->r_res1; 245 ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch); 246 break; 247 } 248 249 ch->flags |= ATA_NO_SLAVE; 250 ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */ 251 ata_generic_hw(dev); 252 ch->hw.begin_transaction = ata_marvell_edma_begin_transaction; 253 ch->hw.end_transaction = ata_marvell_edma_end_transaction; 254 ch->hw.status = ata_marvell_edma_status; 255 256 /* disable the EDMA machinery */ 257 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 258 DELAY(100000); /* SOS should poll for disabled */ 259 260 /* set configuration to non-queued 128b read transfers stop on error */ 261 ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13)); 262 263 /* request queue base high */ 264 ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), work >> 32); 265 266 /* request queue in ptr */ 267 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff); 268 269 /* request queue out ptr */ 270 ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0); 271 272 /* response queue base high */ 273 work += 1024; 274 ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), work >> 32); 275 276 /* response queue in ptr */ 277 ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0); 278 279 /* response queue out ptr */ 280 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff); 281 282 /* clear SATA error register */ 283 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); 284 285 /* clear any outstanding error interrupts */ 286 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 287 288 /* unmask all error interrupts */ 289 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); 290 291 /* enable EDMA machinery */ 292 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 293 return 0; 294 } 295 296 static int 297 ata_marvell_edma_status(device_t dev) 298 { 299 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 300 struct ata_channel *ch = device_get_softc(dev); 301 u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60); 302 int shift = (ch->unit << 1) + (ch->unit > 3); 303 304 if (cause & (1 << shift)) { 305 306 /* clear interrupt(s) */ 307 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 308 309 /* do we have any PHY events ? */ 310 ata_sata_phy_check_events(dev); 311 } 312 313 /* do we have any device action ? */ 314 return (cause & (2 << shift)); 315 } 316 317 /* must be called with ATA channel locked and state_mtx held */ 318 static int 319 ata_marvell_edma_begin_transaction(struct ata_request *request) 320 { 321 struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); 322 struct ata_channel *ch = device_get_softc(request->parent); 323 u_int32_t req_in; 324 u_int8_t *bytep; 325 int i, tag = 0x07; /* XXX why 0x07 ? */ 326 int dummy, error, slot; 327 328 /* only DMA R/W goes through the EMDA machine */ 329 if (request->u.ata.command != ATA_READ_DMA && 330 request->u.ata.command != ATA_WRITE_DMA && 331 request->u.ata.command != ATA_READ_DMA48 && 332 request->u.ata.command != ATA_WRITE_DMA48) { 333 334 /* disable the EDMA machinery */ 335 if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001) 336 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 337 return ata_begin_transaction(request); 338 } 339 340 /* check for 48 bit access and convert if needed */ 341 ata_modify_if_48bit(request); 342 343 /* check sanity, setup SG list and DMA engine */ 344 if ((error = ch->dma->load(ch->dev, request->data, request->bytecount, 345 request->flags & ATA_R_READ, ch->dma->sg, 346 &dummy))) { 347 device_printf(request->dev, "setting up DMA failed\n"); 348 request->result = error; 349 return ATA_OP_FINISHED; 350 } 351 352 /* get next free request queue slot */ 353 req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch)); 354 slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f; 355 bytep = (u_int8_t *)(ch->dma->work); 356 bytep += (slot << 5); 357 358 /* fill in this request */ 359 le32enc(bytep + 0 * sizeof(u_int32_t), 360 (long)ch->dma->sg_bus & 0xffffffff); 361 le32enc(bytep + 1 * sizeof(u_int32_t), 362 (u_int64_t)ch->dma->sg_bus >> 32); 363 if (ctlr->chip->cfg2 != MV_6042 && ctlr->chip->cfg2 != MV_7042) { 364 le16enc(bytep + 4 * sizeof(u_int16_t), 365 (request->flags & ATA_R_READ ? 0x01 : 0x00) | (tag<<1)); 366 367 i = 10; 368 bytep[i++] = (request->u.ata.count >> 8) & 0xff; 369 bytep[i++] = 0x10 | ATA_COUNT; 370 bytep[i++] = request->u.ata.count & 0xff; 371 bytep[i++] = 0x10 | ATA_COUNT; 372 373 bytep[i++] = (request->u.ata.lba >> 24) & 0xff; 374 bytep[i++] = 0x10 | ATA_SECTOR; 375 bytep[i++] = request->u.ata.lba & 0xff; 376 bytep[i++] = 0x10 | ATA_SECTOR; 377 378 bytep[i++] = (request->u.ata.lba >> 32) & 0xff; 379 bytep[i++] = 0x10 | ATA_CYL_LSB; 380 bytep[i++] = (request->u.ata.lba >> 8) & 0xff; 381 bytep[i++] = 0x10 | ATA_CYL_LSB; 382 383 bytep[i++] = (request->u.ata.lba >> 40) & 0xff; 384 bytep[i++] = 0x10 | ATA_CYL_MSB; 385 bytep[i++] = (request->u.ata.lba >> 16) & 0xff; 386 bytep[i++] = 0x10 | ATA_CYL_MSB; 387 388 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf); 389 bytep[i++] = 0x10 | ATA_DRIVE; 390 391 bytep[i++] = request->u.ata.command; 392 bytep[i++] = 0x90 | ATA_COMMAND; 393 } else { 394 le32enc(bytep + 2 * sizeof(u_int32_t), 395 (request->flags & ATA_R_READ ? 0x01 : 0x00) | (tag<<1)); 396 397 i = 16; 398 bytep[i++] = 0; 399 bytep[i++] = 0; 400 bytep[i++] = request->u.ata.command; 401 bytep[i++] = request->u.ata.feature & 0xff; 402 403 bytep[i++] = request->u.ata.lba & 0xff; 404 bytep[i++] = (request->u.ata.lba >> 8) & 0xff; 405 bytep[i++] = (request->u.ata.lba >> 16) & 0xff; 406 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0x0f); 407 408 bytep[i++] = (request->u.ata.lba >> 24) & 0xff; 409 bytep[i++] = (request->u.ata.lba >> 32) & 0xff; 410 bytep[i++] = (request->u.ata.lba >> 40) & 0xff; 411 bytep[i++] = (request->u.ata.feature >> 8) & 0xff; 412 413 bytep[i++] = request->u.ata.count & 0xff; 414 bytep[i++] = (request->u.ata.count >> 8) & 0xff; 415 bytep[i++] = 0; 416 bytep[i++] = 0; 417 } 418 419 bus_dmamap_sync(ch->dma->work_tag, ch->dma->work_map, 420 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 421 422 /* enable EDMA machinery if needed */ 423 if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) { 424 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 425 while (!(ATA_INL(ctlr->r_res1, 426 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) 427 DELAY(10); 428 } 429 430 /* tell EDMA it has a new request */ 431 slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f; 432 req_in &= 0xfffffc00; 433 req_in += (slot << 5); 434 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in); 435 436 return ATA_OP_CONTINUES; 437 } 438 439 /* must be called with ATA channel locked and state_mtx held */ 440 static int 441 ata_marvell_edma_end_transaction(struct ata_request *request) 442 { 443 struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); 444 struct ata_channel *ch = device_get_softc(request->parent); 445 int offset = (ch->unit > 3 ? 0x30014 : 0x20014); 446 u_int32_t icr = ATA_INL(ctlr->r_res1, offset); 447 int res; 448 449 /* EDMA interrupt */ 450 if ((icr & (0x0001 << (ch->unit & 3)))) { 451 struct ata_marvell_response *response; 452 u_int32_t rsp_in, rsp_out; 453 int slot; 454 455 /* stop timeout */ 456 callout_stop_sync(&request->callout); 457 458 /* get response ptr's */ 459 rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch)); 460 rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch)); 461 slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f; 462 rsp_out &= 0xffffff00; 463 rsp_out += (slot << 3); 464 bus_dmamap_sync(ch->dma->work_tag, ch->dma->work_map, 465 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 466 response = (struct ata_marvell_response *) 467 (ch->dma->work + 1024 + (slot << 3)); 468 469 /* record status for this request */ 470 request->status = response->dev_status; 471 request->error = 0; 472 473 /* ack response */ 474 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out); 475 476 /* update progress */ 477 if (!(request->status & ATA_S_ERROR) && 478 !(request->flags & ATA_R_TIMEOUT)) 479 request->donecount = request->bytecount; 480 481 /* unload SG list */ 482 ch->dma->unload(ch->dev); 483 484 res = ATA_OP_FINISHED; 485 } 486 487 /* legacy ATA interrupt */ 488 else { 489 res = ata_end_transaction(request); 490 } 491 492 /* ack interrupt */ 493 ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3)))); 494 return res; 495 } 496 497 static void 498 ata_marvell_edma_reset(device_t dev) 499 { 500 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 501 struct ata_channel *ch = device_get_softc(dev); 502 503 /* disable the EDMA machinery */ 504 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 505 while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) 506 DELAY(10); 507 508 /* clear SATA error register */ 509 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); 510 511 /* clear any outstanding error interrupts */ 512 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 513 514 /* unmask all error interrupts */ 515 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); 516 517 /* enable channel and test for devices */ 518 if (ata_sata_phy_reset(dev)) 519 ata_generic_reset(dev); 520 521 /* enable EDMA machinery */ 522 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 523 } 524 525 static void 526 ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, 527 int error) 528 { 529 struct ata_dmasetprd_args *args = xsc; 530 struct ata_marvell_dma_prdentry *prd = args->dmatab; 531 int i; 532 533 if ((args->error = error)) 534 return; 535 536 for (i = 0; i < nsegs; i++) { 537 prd[i].addrlo = htole32(segs[i].ds_addr); 538 prd[i].count = htole32(segs[i].ds_len); 539 prd[i].addrhi = htole32((u_int64_t)segs[i].ds_addr >> 32); 540 prd[i].reserved = 0; 541 } 542 prd[i - 1].count |= htole32(ATA_DMA_EOT); 543 KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n")); 544 args->nsegs = nsegs; 545 } 546 547 static void 548 ata_marvell_edma_dmainit(device_t dev) 549 { 550 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 551 struct ata_channel *ch = device_get_softc(dev); 552 553 ata_dmainit(dev); 554 if (ch->dma) { 555 /* note start and stop are not used here */ 556 ch->dma->setprd = ata_marvell_edma_dmasetprd; 557 558 if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004) 559 ch->dma->max_address = BUS_SPACE_MAXADDR; 560 561 /* chip does not reliably do 64K DMA transfers */ 562 if (ctlr->chip->cfg2 == MV_50XX || ctlr->chip->cfg2 == MV_60XX) 563 ch->dma->max_iosize = 64 * DEV_BSIZE; 564 else 565 ch->dma->max_iosize = (ATA_DMA_ENTRIES - 1) * PAGE_SIZE; 566 } 567 } 568