1 /*- 2 * Copyright (c) 2008 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 15 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 16 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 17 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 18 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 19 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 20 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 21 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 23 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 24 * 25 * $FreeBSD: src/sys/dev/sdhci/sdhci.c,v 1.8 2009/02/17 19:12:15 mav Exp $ 26 */ 27 28 #include <sys/param.h> 29 #include <sys/systm.h> 30 #include <sys/bus.h> 31 #include <sys/callout.h> 32 #include <sys/conf.h> 33 #include <sys/kernel.h> 34 #include <sys/lock.h> 35 #include <sys/module.h> 36 #include <sys/spinlock.h> 37 #include <sys/resource.h> 38 #include <sys/rman.h> 39 #include <sys/sysctl.h> 40 #include <sys/taskqueue.h> 41 42 #include <bus/mmc/bridge.h> 43 #include <bus/mmc/mmcreg.h> 44 #include <bus/mmc/mmcbrvar.h> 45 46 #include "mmcbr_if.h" 47 #include "sdhci.h" 48 #include "sdhci_if.h" 49 50 SYSCTL_NODE(_hw, OID_AUTO, sdhci, CTLFLAG_RD, 0, "sdhci driver"); 51 52 int sdhci_debug = 0; 53 TUNABLE_INT("hw.sdhci.debug", &sdhci_debug); 54 SYSCTL_INT(_hw_sdhci, OID_AUTO, debug, CTLFLAG_RW, &sdhci_debug, 0, "Debug level"); 55 56 static int sdhci_sdma_disable = 0; 57 TUNABLE_INT("hw.sdhci.sdma_disable", &sdhci_sdma_disable); 58 59 static int sdhci_adma2_disable = 0; 60 TUNABLE_INT("hw.sdhci.adma2_disable", &sdhci_adma2_disable); 61 62 static int sdhci_adma2_test = 0; 63 TUNABLE_INT("hw.sdhci.adma2_test", &sdhci_adma2_test); 64 65 #define RD1(slot, off) SDHCI_READ_1((slot)->bus, (slot), (off)) 66 #define RD2(slot, off) SDHCI_READ_2((slot)->bus, (slot), (off)) 67 #define RD4(slot, off) SDHCI_READ_4((slot)->bus, (slot), (off)) 68 #define RD_MULTI_4(slot, off, ptr, count) \ 69 SDHCI_READ_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) 70 #define WR1(slot, off, val) SDHCI_WRITE_1((slot)->bus, (slot), (off), (val)) 71 #define WR2(slot, off, val) SDHCI_WRITE_2((slot)->bus, (slot), (off), (val)) 72 #define WR4(slot, off, val) SDHCI_WRITE_4((slot)->bus, (slot), (off), (val)) 73 #define WR_MULTI_4(slot, off, ptr, count) \ 74 SDHCI_WRITE_MULTI_4((slot)->bus, (slot), (off), (ptr), (count)) 75 76 static int slot_printf(struct sdhci_slot *, const char *, ...) 77 __printflike(2, 3); 78 79 static void sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock); 80 static void sdhci_start(struct sdhci_slot *slot); 81 static void sdhci_start_data(struct sdhci_slot *slot, struct mmc_data *data); 82 83 static void sdhci_card_task(void *, int); 84 85 static int sdhci_dma_alloc(struct sdhci_slot *slot); 86 static void sdhci_dmamem_free(bus_dmamem_t *mem); 87 static void sdhci_dma_free(struct sdhci_slot *slot); 88 static void sdhci_adma2_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, 89 int error); 90 91 /* helper routines */ 92 #define SDHCI_LOCK(_slot) lockmgr(&(_slot)->lock, LK_EXCLUSIVE) 93 #define SDHCI_UNLOCK(_slot) lockmgr(&(_slot)->lock, LK_RELEASE) 94 #define SDHCI_LOCK_INIT(_slot) lockinit(&(_slot)->lock, "sdhci", 0, LK_CANRECURSE) 95 #define SDHCI_LOCK_DESTROY(_slot) lockuninit(&(_slot)->lock); 96 #define SDHCI_ASSERT_LOCKED(_slot) KKASSERT(lockstatus(&(_slot)->lock, curthread) != 0); 97 #define SDHCI_ASSERT_UNLOCKED(_slot) KKASSERT(lockstatus(&(_slot)->lock, curthread) == 0); 98 99 #define SDHCI_DEFAULT_MAX_FREQ 50 100 101 #define SDHCI_200_MAX_DIVIDER 256 102 #define SDHCI_300_MAX_DIVIDER 2046 103 104 /* 105 * Broadcom BCM577xx Controller Constants 106 */ 107 #define BCM577XX_DEFAULT_MAX_DIVIDER 256 /* Maximum divider supported by the default clock source. */ 108 #define BCM577XX_ALT_CLOCK_BASE 63000000 /* Alternative clock's base frequency. */ 109 110 #define BCM577XX_HOST_CONTROL 0x198 111 #define BCM577XX_CTRL_CLKSEL_MASK 0xFFFFCFFF 112 #define BCM577XX_CTRL_CLKSEL_SHIFT 12 113 #define BCM577XX_CTRL_CLKSEL_DEFAULT 0x0 114 #define BCM577XX_CTRL_CLKSEL_64MHZ 0x3 115 116 static int 117 slot_printf(struct sdhci_slot *slot, const char * fmt, ...) 118 { 119 __va_list ap; 120 int retval; 121 122 retval = kprintf("%s-slot%d: ", 123 device_get_nameunit(slot->bus), slot->num); 124 125 __va_start(ap, fmt); 126 retval += kvprintf(fmt, ap); 127 __va_end(ap); 128 return (retval); 129 } 130 131 static void 132 sdhci_dumpregs(struct sdhci_slot *slot) 133 { 134 slot_printf(slot, 135 "============== REGISTER DUMP ==============\n"); 136 137 slot_printf(slot, "SDMA addr: 0x%08x | Version: 0x%08x\n", 138 RD4(slot, SDHCI_SDMA_ADDRESS), RD2(slot, SDHCI_HOST_VERSION)); 139 slot_printf(slot, "Blk size: 0x%08x | Blk cnt: 0x%08x\n", 140 RD2(slot, SDHCI_BLOCK_SIZE), RD2(slot, SDHCI_BLOCK_COUNT)); 141 slot_printf(slot, "Argument: 0x%08x | Trn mode: 0x%08x\n", 142 RD4(slot, SDHCI_ARGUMENT), RD2(slot, SDHCI_TRANSFER_MODE)); 143 slot_printf(slot, "Present: 0x%08x | Host ctl: 0x%08x\n", 144 RD4(slot, SDHCI_PRESENT_STATE), RD1(slot, SDHCI_HOST_CONTROL)); 145 slot_printf(slot, "Power: 0x%08x | Blk gap: 0x%08x\n", 146 RD1(slot, SDHCI_POWER_CONTROL), RD1(slot, SDHCI_BLOCK_GAP_CONTROL)); 147 slot_printf(slot, "Wake-up: 0x%08x | Clock: 0x%08x\n", 148 RD1(slot, SDHCI_WAKE_UP_CONTROL), RD2(slot, SDHCI_CLOCK_CONTROL)); 149 slot_printf(slot, "Timeout: 0x%08x | Int stat: 0x%08x\n", 150 RD1(slot, SDHCI_TIMEOUT_CONTROL), RD4(slot, SDHCI_INT_STATUS)); 151 slot_printf(slot, "Int enab: 0x%08x | Sig enab: 0x%08x\n", 152 RD4(slot, SDHCI_INT_ENABLE), RD4(slot, SDHCI_SIGNAL_ENABLE)); 153 slot_printf(slot, "AC12 err: 0x%08x | Host ctl2: 0x%08x\n", 154 RD2(slot, SDHCI_ACMD12_ERR), RD2(slot, SDHCI_HOST_CONTROL2)); 155 slot_printf(slot, "Caps: 0x%08x | Caps2: 0x%08x\n", 156 RD4(slot, SDHCI_CAPABILITIES), RD4(slot, SDHCI_CAPABILITIES2)); 157 slot_printf(slot, "Max curr: 0x%08x | ADMA err: 0x%08x\n", 158 RD4(slot, SDHCI_MAX_CURRENT), RD1(slot, SDHCI_ADMA_ERR)); 159 slot_printf(slot, "ADMA addr: 0x%08x | Slot int: 0x%08x\n", 160 RD4(slot, SDHCI_ADMA_ADDRESS_LOW), RD2(slot, SDHCI_SLOT_INT_STATUS)); 161 162 slot_printf(slot, 163 "===========================================\n"); 164 } 165 166 static void 167 sdhci_reset(struct sdhci_slot *slot, uint8_t mask) 168 { 169 int timeout; 170 uint32_t clock; 171 172 if (slot->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { 173 if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot)) 174 return; 175 } 176 177 /* Some controllers need this kick or reset won't work. */ 178 if ((mask & SDHCI_RESET_ALL) == 0 && 179 (slot->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)) { 180 /* This is to force an update */ 181 clock = slot->clock; 182 slot->clock = 0; 183 sdhci_set_clock(slot, clock); 184 } 185 186 if (mask & SDHCI_RESET_ALL) { 187 slot->clock = 0; 188 slot->power = 0; 189 } 190 191 WR1(slot, SDHCI_SOFTWARE_RESET, mask); 192 193 if (slot->quirks & SDHCI_QUIRK_WAITFOR_RESET_ASSERTED) { 194 /* 195 * Resets on TI OMAPs and AM335x are incompatible with SDHCI 196 * specification. The reset bit has internal propagation delay, 197 * so a fast read after write returns 0 even if reset process is 198 * in progress. The workaround is to poll for 1 before polling 199 * for 0. In the worst case, if we miss seeing it asserted the 200 * time we spent waiting is enough to ensure the reset finishes. 201 */ 202 timeout = 10000; 203 while ((RD1(slot, SDHCI_SOFTWARE_RESET) & mask) != mask) { 204 if (timeout <= 0) 205 break; 206 timeout--; 207 DELAY(1); 208 } 209 } 210 211 /* Wait max 100 ms */ 212 timeout = 10000; 213 /* Controller clears the bits when it's done */ 214 while (RD1(slot, SDHCI_SOFTWARE_RESET) & mask) { 215 if (timeout <= 0) { 216 slot_printf(slot, "Reset 0x%x never completed.\n", 217 mask); 218 sdhci_dumpregs(slot); 219 return; 220 } 221 timeout--; 222 DELAY(10); 223 } 224 } 225 226 static void 227 sdhci_init(struct sdhci_slot *slot) 228 { 229 230 sdhci_reset(slot, SDHCI_RESET_ALL); 231 232 /* Enable interrupts. */ 233 slot->intmask = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | 234 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | 235 SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | 236 SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 237 SDHCI_INT_DMA_END | SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE | 238 SDHCI_INT_ACMD12ERR | SDHCI_INT_ADMAERR; 239 if (!(slot->opt & SDHCI_SLOT_EMBEDDED)) 240 slot->intmask |= SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT; 241 WR4(slot, SDHCI_INT_ENABLE, slot->intmask); 242 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 243 } 244 245 static void 246 sdhci_set_clock(struct sdhci_slot *slot, uint32_t clock) 247 { 248 uint32_t clk_base; 249 uint32_t clk_sel; 250 uint32_t res; 251 uint16_t clk; 252 uint16_t div; 253 int timeout; 254 255 if (clock == slot->clock) 256 return; 257 slot->clock = clock; 258 259 /* Turn off the clock. */ 260 clk = RD2(slot, SDHCI_CLOCK_CONTROL); 261 WR2(slot, SDHCI_CLOCK_CONTROL, clk & ~SDHCI_CLOCK_CARD_EN); 262 /* If no clock requested - leave it so. */ 263 if (clock == 0) 264 return; 265 266 /* Determine the clock base frequency */ 267 clk_base = slot->max_clk; 268 if (slot->quirks & SDHCI_QUIRK_BCM577XX_400KHZ_CLKSRC) { 269 clk_sel = RD2(slot, BCM577XX_HOST_CONTROL) & BCM577XX_CTRL_CLKSEL_MASK; 270 271 /* Select clock source appropriate for the requested frequency. */ 272 if ((clk_base / BCM577XX_DEFAULT_MAX_DIVIDER) > clock) { 273 clk_base = BCM577XX_ALT_CLOCK_BASE; 274 clk_sel |= (BCM577XX_CTRL_CLKSEL_64MHZ << BCM577XX_CTRL_CLKSEL_SHIFT); 275 } else { 276 clk_sel |= (BCM577XX_CTRL_CLKSEL_DEFAULT << BCM577XX_CTRL_CLKSEL_SHIFT); 277 } 278 279 WR2(slot, BCM577XX_HOST_CONTROL, clk_sel); 280 } 281 282 /* Recalculate timeout clock frequency based on the new sd clock. */ 283 if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) 284 slot->timeout_clk = slot->clock / 1000; 285 286 if (slot->version < SDHCI_SPEC_300) { 287 /* Looking for highest freq <= clock. */ 288 res = clk_base; 289 for (div = 1; div < SDHCI_200_MAX_DIVIDER; div <<= 1) { 290 if (res <= clock) 291 break; 292 res >>= 1; 293 } 294 /* Divider 1:1 is 0x00, 2:1 is 0x01, 256:1 is 0x80 ... */ 295 div >>= 1; 296 } else { 297 /* Version 3.0 divisors are multiples of two up to 1023 * 2 */ 298 if (clock >= clk_base) 299 div = 0; 300 else { 301 for (div = 2; div < SDHCI_300_MAX_DIVIDER; div += 2) { 302 if ((clk_base / div) <= clock) 303 break; 304 } 305 } 306 div >>= 1; 307 } 308 309 if (bootverbose || sdhci_debug) 310 slot_printf(slot, "Divider %d for freq %d (base %d)\n", 311 div, clock, clk_base); 312 313 /* Now we have got divider, set it. */ 314 clk = (div & SDHCI_DIVIDER_MASK) << SDHCI_DIVIDER_SHIFT; 315 clk |= ((div >> SDHCI_DIVIDER_MASK_LEN) & SDHCI_DIVIDER_HI_MASK) 316 << SDHCI_DIVIDER_HI_SHIFT; 317 318 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 319 /* Enable clock. */ 320 clk |= SDHCI_CLOCK_INT_EN; 321 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 322 /* Wait up to 10 ms until it stabilize. */ 323 timeout = 10; 324 while (!((clk = RD2(slot, SDHCI_CLOCK_CONTROL)) 325 & SDHCI_CLOCK_INT_STABLE)) { 326 if (timeout == 0) { 327 slot_printf(slot, 328 "Internal clock never stabilised.\n"); 329 sdhci_dumpregs(slot); 330 return; 331 } 332 timeout--; 333 DELAY(1000); 334 } 335 /* Pass clock signal to the bus. */ 336 clk |= SDHCI_CLOCK_CARD_EN; 337 WR2(slot, SDHCI_CLOCK_CONTROL, clk); 338 } 339 340 static void 341 sdhci_set_power(struct sdhci_slot *slot, u_char power) 342 { 343 uint8_t pwr; 344 345 if (slot->power == power) 346 return; 347 348 slot->power = power; 349 350 /* Turn off the power. */ 351 pwr = 0; 352 WR1(slot, SDHCI_POWER_CONTROL, pwr); 353 /* If power down requested - leave it so. */ 354 if (power == 0) 355 return; 356 /* Set voltage. */ 357 switch (1 << power) { 358 case MMC_OCR_LOW_VOLTAGE: 359 pwr |= SDHCI_POWER_180; 360 break; 361 case MMC_OCR_290_300: 362 case MMC_OCR_300_310: 363 pwr |= SDHCI_POWER_300; 364 break; 365 case MMC_OCR_320_330: 366 case MMC_OCR_330_340: 367 pwr |= SDHCI_POWER_330; 368 break; 369 } 370 WR1(slot, SDHCI_POWER_CONTROL, pwr); 371 /* Turn on the power. */ 372 pwr |= SDHCI_POWER_ON; 373 WR1(slot, SDHCI_POWER_CONTROL, pwr); 374 } 375 376 static void 377 sdhci_read_block_pio(struct sdhci_slot *slot) 378 { 379 uint32_t data; 380 char *buffer; 381 size_t left; 382 383 buffer = slot->curcmd->data->data; 384 buffer += slot->offset; 385 /* Transfer one block at a time. */ 386 left = min(512, slot->curcmd->data->len - slot->offset); 387 slot->offset += left; 388 389 /* If we are too fast, broken controllers return zeroes. */ 390 if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) 391 DELAY(10); 392 /* Handle unaligned and aligned buffer cases. */ 393 if ((intptr_t)buffer & 3) { 394 while (left > 3) { 395 data = RD4(slot, SDHCI_BUFFER); 396 buffer[0] = data; 397 buffer[1] = (data >> 8); 398 buffer[2] = (data >> 16); 399 buffer[3] = (data >> 24); 400 buffer += 4; 401 left -= 4; 402 } 403 } else { 404 RD_MULTI_4(slot, SDHCI_BUFFER, 405 (uint32_t *)buffer, left >> 2); 406 left &= 3; 407 } 408 /* Handle uneven size case. */ 409 if (left > 0) { 410 data = RD4(slot, SDHCI_BUFFER); 411 while (left > 0) { 412 *(buffer++) = data; 413 data >>= 8; 414 left--; 415 } 416 } 417 } 418 419 static void 420 sdhci_write_block_pio(struct sdhci_slot *slot) 421 { 422 uint32_t data = 0; 423 char *buffer; 424 size_t left; 425 426 buffer = slot->curcmd->data->data; 427 buffer += slot->offset; 428 /* Transfer one block at a time. */ 429 left = min(512, slot->curcmd->data->len - slot->offset); 430 slot->offset += left; 431 432 /* Handle unaligned and aligned buffer cases. */ 433 if ((intptr_t)buffer & 3) { 434 while (left > 3) { 435 data = buffer[0] + 436 (buffer[1] << 8) + 437 (buffer[2] << 16) + 438 (buffer[3] << 24); 439 left -= 4; 440 buffer += 4; 441 WR4(slot, SDHCI_BUFFER, data); 442 } 443 } else { 444 WR_MULTI_4(slot, SDHCI_BUFFER, 445 (uint32_t *)buffer, left >> 2); 446 left &= 3; 447 } 448 /* Handle uneven size case. */ 449 if (left > 0) { 450 while (left > 0) { 451 data <<= 8; 452 data += *(buffer++); 453 left--; 454 } 455 WR4(slot, SDHCI_BUFFER, data); 456 } 457 } 458 459 static void 460 sdhci_transfer_pio(struct sdhci_slot *slot) 461 { 462 463 /* Read as many blocks as possible. */ 464 if (slot->curcmd->data->flags & MMC_DATA_READ) { 465 while (RD4(slot, SDHCI_PRESENT_STATE) & 466 SDHCI_DATA_AVAILABLE) { 467 sdhci_read_block_pio(slot); 468 if (slot->offset >= slot->curcmd->data->len) 469 break; 470 } 471 } else { 472 while (RD4(slot, SDHCI_PRESENT_STATE) & 473 SDHCI_SPACE_AVAILABLE) { 474 sdhci_write_block_pio(slot); 475 if (slot->offset >= slot->curcmd->data->len) 476 break; 477 } 478 } 479 } 480 481 static void 482 sdhci_card_delay(void *arg) 483 { 484 struct sdhci_slot *slot = arg; 485 486 taskqueue_enqueue(taskqueue_swi_mp, &slot->card_task); 487 } 488 489 static void 490 sdhci_card_task(void *arg, int pending) 491 { 492 struct sdhci_slot *slot = arg; 493 494 SDHCI_LOCK(slot); 495 if (SDHCI_GET_CARD_PRESENT(slot->bus, slot)) { 496 if (slot->dev == NULL) { 497 /* If card is present - attach mmc bus. */ 498 slot->dev = device_add_child(slot->bus, "mmc", -1); 499 device_set_ivars(slot->dev, slot); 500 SDHCI_UNLOCK(slot); 501 device_probe_and_attach(slot->dev); 502 } else 503 SDHCI_UNLOCK(slot); 504 } else { 505 if (slot->dev != NULL) { 506 /* If no card present - detach mmc bus. */ 507 device_t d = slot->dev; 508 slot->dev = NULL; 509 SDHCI_UNLOCK(slot); 510 device_delete_child(slot->bus, d); 511 } else 512 SDHCI_UNLOCK(slot); 513 } 514 } 515 516 static int 517 sdhci_dma_alloc(struct sdhci_slot *slot) 518 { 519 int err; 520 521 /* Allocate DMA memory for SDMA. */ 522 err = bus_dmamem_coherent(bus_get_dma_tag(slot->bus), 523 DMA_BLOCK_SIZE, 0, BUS_SPACE_MAXADDR_32BIT, 524 BUS_SPACE_MAXADDR, DMA_BLOCK_SIZE, BUS_DMA_NOWAIT, 525 &slot->sdma_mem); 526 if (err != 0) { 527 device_printf(slot->bus, "Can't alloc DMA memory for SDMA\n"); 528 goto done; 529 } 530 531 /* Allocate DMA memory for 32bit ADMA2 descriptors. */ 532 err = bus_dmamem_coherent(bus_get_dma_tag(slot->bus), 533 4, 0, BUS_SPACE_MAXADDR_32BIT, 534 BUS_SPACE_MAXADDR, SDHCI_ADMA2_DESCBUF_SIZE, BUS_DMA_NOWAIT, 535 &slot->adma2_descs); 536 if (err != 0) { 537 device_printf(slot->bus, 538 "Can't alloc DMA memory for ADMA2 descriptors\n"); 539 goto error1; 540 } 541 542 /* Allocate DMA tag for 32bit ADMA2 data buffer */ 543 err = bus_dma_tag_create(bus_get_dma_tag(slot->bus), 544 4, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 545 MAXPHYS, SDHCI_ADMA2_DESC_COUNT, 546 MIN(MAXPHYS, SDHCI_ADMA2_MAX_SEGSIZE), 547 BUS_DMA_ALLOCNOW | BUS_DMA_ALLOCALL, 548 &slot->adma2_tag); 549 if (err != 0) { 550 device_printf(slot->bus, "Can't create DMA tag for ADMA2\n"); 551 goto error2; 552 } 553 554 /* Allocate DMA map for ADMA2 data buffer */ 555 err = bus_dmamap_create(slot->adma2_tag, BUS_DMA_NOWAIT, 556 &slot->adma2_map); 557 if (err != 0) { 558 device_printf(slot->bus, "Can't create DMA map for ADMA2\n"); 559 goto error3; 560 } 561 562 return (0); 563 564 error3: 565 bus_dma_tag_destroy(slot->adma2_tag); 566 error2: 567 sdhci_dmamem_free(&slot->adma2_descs); 568 error1: 569 sdhci_dmamem_free(&slot->sdma_mem); 570 done: 571 return (err); 572 } 573 574 static void 575 sdhci_dmamem_free(bus_dmamem_t *dma) 576 { 577 bus_dmamap_unload(dma->dmem_tag, dma->dmem_map); 578 bus_dmamem_free(dma->dmem_tag, dma->dmem_addr, dma->dmem_map); 579 bus_dma_tag_destroy(dma->dmem_tag); 580 } 581 582 static void 583 sdhci_dma_free(struct sdhci_slot *slot) 584 { 585 bus_dmamap_destroy(slot->adma2_tag, slot->adma2_map); 586 bus_dma_tag_destroy(slot->adma2_tag); 587 sdhci_dmamem_free(&slot->sdma_mem); 588 sdhci_dmamem_free(&slot->adma2_descs); 589 } 590 591 static void 592 sdhci_adma2_getaddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 593 { 594 struct sdhci_slot *slot = arg; 595 bus_dmamem_t *descmem = &slot->adma2_descs; 596 struct sdhci_adma2_desc32 *descs = (void *)descmem->dmem_addr; 597 int i; 598 599 if (error != 0) { 600 /* This signals, that loading was unsuccessful */ 601 memset(&descs[0], 0, sizeof(*descs)); 602 return; 603 } 604 605 for (i = 0; i < nsegs; i++) { 606 descs[i].address = segs[i].ds_addr; 607 /* 608 * The 65536 segment length case is broken in some sdhc host 609 * controllers, so we actually use a maximum segment length 610 * of 32768 for the DMA mapping and ds_len should be at most 611 * 32768 here. 612 */ 613 if (segs[i].ds_len == 65536) 614 descs[i].length = 0; 615 else 616 descs[i].length = segs[i].ds_len; 617 descs[i].attribute = 618 SDHCI_ADMA2_ATTR_VALID | SDHCI_ADMA2_ATTR_OP_TRAN; 619 } 620 descs[nsegs-1].attribute |= SDHCI_ADMA2_ATTR_END; 621 /* If there is room left, explicitly add an invalid descriptor. */ 622 if (nsegs < SDHCI_ADMA2_DESC_COUNT) 623 memset(&descs[nsegs], 0, sizeof(*descs)); 624 } 625 626 int 627 sdhci_init_slot(device_t dev, struct sdhci_slot *slot, int num) 628 { 629 uint32_t caps, freq; 630 int err; 631 632 SDHCI_LOCK_INIT(slot); 633 slot->num = num; 634 slot->bus = dev; 635 636 err = sdhci_dma_alloc(slot); 637 if (err != 0) { 638 SDHCI_LOCK_DESTROY(slot); 639 return (err); 640 } 641 642 /* Initialize slot. */ 643 sdhci_init(slot); 644 slot->version = (RD2(slot, SDHCI_HOST_VERSION) 645 >> SDHCI_SPEC_VER_SHIFT) & SDHCI_SPEC_VER_MASK; 646 if (slot->quirks & SDHCI_QUIRK_MISSING_CAPS) 647 caps = slot->caps; 648 else 649 caps = RD4(slot, SDHCI_CAPABILITIES); 650 if (slot->version >= SDHCI_SPEC_300) { 651 if ((caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_REMOVABLE && 652 (caps & SDHCI_SLOTTYPE_MASK) != SDHCI_SLOTTYPE_EMBEDDED) { 653 device_printf(dev, 654 "Driver doesn't support shared bus slots\n"); 655 sdhci_dma_free(slot); 656 SDHCI_LOCK_DESTROY(slot); 657 return (1); 658 } else if ((caps & SDHCI_SLOTTYPE_MASK) == 659 SDHCI_SLOTTYPE_EMBEDDED) { 660 slot->opt |= SDHCI_SLOT_EMBEDDED; 661 } 662 } 663 /* Calculate base clock frequency. */ 664 if (slot->version >= SDHCI_SPEC_300) 665 freq = (caps & SDHCI_CLOCK_V3_BASE_MASK) >> 666 SDHCI_CLOCK_BASE_SHIFT; 667 else 668 freq = (caps & SDHCI_CLOCK_BASE_MASK) >> 669 SDHCI_CLOCK_BASE_SHIFT; 670 if (freq != 0) 671 slot->max_clk = freq * 1000000; 672 /* 673 * If the frequency wasn't in the capabilities and the hardware driver 674 * hasn't already set max_clk we're probably not going to work right 675 * with an assumption, so complain about it. 676 */ 677 if (slot->max_clk == 0) { 678 slot->max_clk = SDHCI_DEFAULT_MAX_FREQ * 1000000; 679 device_printf(dev, "Hardware doesn't specify base clock " 680 "frequency, using %dMHz as default.\n", SDHCI_DEFAULT_MAX_FREQ); 681 } 682 /* Calculate timeout clock frequency. */ 683 if (slot->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK) { 684 slot->timeout_clk = slot->max_clk / 1000; 685 } else { 686 slot->timeout_clk = 687 (caps & SDHCI_TIMEOUT_CLK_MASK) >> SDHCI_TIMEOUT_CLK_SHIFT; 688 if (caps & SDHCI_TIMEOUT_CLK_UNIT) 689 slot->timeout_clk *= 1000; 690 } 691 /* 692 * If the frequency wasn't in the capabilities and the hardware driver 693 * hasn't already set timeout_clk we'll probably work okay using the 694 * max timeout, but still mention it. 695 */ 696 if (slot->timeout_clk == 0) { 697 device_printf(dev, "Hardware doesn't specify timeout clock " 698 "frequency, setting BROKEN_TIMEOUT quirk.\n"); 699 slot->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; 700 } 701 702 slot->host.f_min = SDHCI_MIN_FREQ(slot->bus, slot); 703 slot->host.f_max = slot->max_clk; 704 slot->host.host_ocr = 0; 705 if (caps & SDHCI_CAN_VDD_330) 706 slot->host.host_ocr |= MMC_OCR_320_330 | MMC_OCR_330_340; 707 if (caps & SDHCI_CAN_VDD_300) 708 slot->host.host_ocr |= MMC_OCR_290_300 | MMC_OCR_300_310; 709 if (caps & SDHCI_CAN_VDD_180) 710 slot->host.host_ocr |= MMC_OCR_LOW_VOLTAGE; 711 if (slot->host.host_ocr == 0) { 712 device_printf(dev, "Hardware doesn't report any " 713 "support voltages.\n"); 714 } 715 slot->host.caps = MMC_CAP_4_BIT_DATA; 716 if (caps & SDHCI_CAN_DO_8BITBUS) 717 slot->host.caps |= MMC_CAP_8_BIT_DATA; 718 if (caps & SDHCI_CAN_DO_HISPD) 719 slot->host.caps |= MMC_CAP_HSPEED; 720 if (slot->quirks & SDHCI_QUIRK_WAIT_WHILE_BUSY) 721 slot->host.caps |= MMC_CAP_WAIT_WHILE_BUSY; 722 /* Decide if we have usable DMA. */ 723 if (caps & SDHCI_CAN_DO_DMA) 724 slot->opt |= SDHCI_HAVE_SDMA; 725 if (caps & SDHCI_CAN_DO_ADMA2) 726 slot->opt |= SDHCI_HAVE_ADMA2; 727 728 /* Use ADMA2 only on whitelisted models, or when explicitly enabled. */ 729 if (sdhci_adma2_test == 0 && 730 (slot->quirks & SDHCI_QUIRK_WHITELIST_ADMA2) == 0) { 731 slot->opt &= ~SDHCI_HAVE_ADMA2; 732 } 733 734 if (slot->quirks & SDHCI_QUIRK_BROKEN_DMA) { 735 slot->opt &= ~SDHCI_HAVE_SDMA; 736 slot->opt &= ~SDHCI_HAVE_ADMA2; 737 } 738 if (slot->quirks & SDHCI_QUIRK_FORCE_SDMA) 739 slot->opt |= SDHCI_HAVE_SDMA; 740 741 if (sdhci_sdma_disable) 742 slot->opt &= ~SDHCI_HAVE_SDMA; 743 if (sdhci_adma2_disable) 744 slot->opt &= ~SDHCI_HAVE_ADMA2; 745 746 /* 747 * Use platform-provided transfer backend 748 * with PIO as a fallback mechanism 749 */ 750 if (slot->opt & SDHCI_PLATFORM_TRANSFER) { 751 slot->opt &= ~SDHCI_HAVE_SDMA; 752 slot->opt &= ~SDHCI_HAVE_ADMA2; 753 } 754 755 if (bootverbose || sdhci_debug) { 756 slot_printf(slot, "%uMHz%s %s%s%s%s %s%s\n", 757 slot->max_clk / 1000000, 758 (caps & SDHCI_CAN_DO_HISPD) ? " HS" : "", 759 (slot->host.caps & MMC_CAP_8_BIT_DATA) ? "8bits" : 760 ((slot->host.caps & MMC_CAP_4_BIT_DATA) ? "4bits" : 761 "1bit"), 762 (caps & SDHCI_CAN_VDD_330) ? " 3.3V" : "", 763 (caps & SDHCI_CAN_VDD_300) ? " 3.0V" : "", 764 (caps & SDHCI_CAN_VDD_180) ? " 1.8V" : "", 765 (slot->opt & SDHCI_HAVE_ADMA2) ? "ADMA2" : 766 (slot->opt & SDHCI_HAVE_SDMA) ? "SDMA" : "PIO", 767 (slot->version < SDHCI_SPEC_300) ? "" : 768 (slot->opt & SDHCI_SLOT_EMBEDDED) ? " (embedded)" : 769 " (removable)"); 770 sdhci_dumpregs(slot); 771 } 772 773 slot->timeout = 10; 774 slot->failures = 0; 775 SYSCTL_ADD_INT(device_get_sysctl_ctx(slot->bus), 776 SYSCTL_CHILDREN(device_get_sysctl_tree(slot->bus)), OID_AUTO, 777 "timeout", CTLFLAG_RW, &slot->timeout, 0, 778 "Maximum timeout for SDHCI transfers (in secs)"); 779 TASK_INIT(&slot->card_task, 0, sdhci_card_task, slot); 780 callout_init_mp(&slot->card_callout); 781 callout_init_lk(&slot->timeout_callout, &slot->lock); 782 return (0); 783 } 784 785 void 786 sdhci_start_slot(struct sdhci_slot *slot) 787 { 788 sdhci_card_task(slot, 0); 789 } 790 791 int 792 sdhci_cleanup_slot(struct sdhci_slot *slot) 793 { 794 device_t d; 795 796 callout_drain(&slot->timeout_callout); 797 callout_drain(&slot->card_callout); 798 taskqueue_drain(taskqueue_swi_mp, &slot->card_task); 799 800 SDHCI_LOCK(slot); 801 d = slot->dev; 802 slot->dev = NULL; 803 SDHCI_UNLOCK(slot); 804 if (d != NULL) 805 device_delete_child(slot->bus, d); 806 807 SDHCI_LOCK(slot); 808 sdhci_reset(slot, SDHCI_RESET_ALL); 809 SDHCI_UNLOCK(slot); 810 811 sdhci_dma_free(slot); 812 813 SDHCI_LOCK_DESTROY(slot); 814 815 return (0); 816 } 817 818 int 819 sdhci_generic_suspend(struct sdhci_slot *slot) 820 { 821 sdhci_reset(slot, SDHCI_RESET_ALL); 822 823 return (0); 824 } 825 826 int 827 sdhci_generic_resume(struct sdhci_slot *slot) 828 { 829 sdhci_init(slot); 830 831 return (0); 832 } 833 834 uint32_t 835 sdhci_generic_min_freq(device_t brdev __unused, struct sdhci_slot *slot) 836 { 837 if (slot->version >= SDHCI_SPEC_300) 838 return (slot->max_clk / SDHCI_300_MAX_DIVIDER); 839 else 840 return (slot->max_clk / SDHCI_200_MAX_DIVIDER); 841 } 842 843 boolean_t 844 sdhci_generic_get_card_present(device_t brdev __unused, struct sdhci_slot *slot) 845 { 846 if (slot->opt & SDHCI_SLOT_EMBEDDED) 847 return 1; 848 849 return (RD4(slot, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT); 850 } 851 852 int 853 sdhci_generic_update_ios(device_t brdev, device_t reqdev) 854 { 855 struct sdhci_slot *slot = device_get_ivars(reqdev); 856 struct mmc_ios *ios = &slot->host.ios; 857 858 SDHCI_LOCK(slot); 859 /* Do full reset on bus power down to clear from any state. */ 860 if (ios->power_mode == power_off) { 861 WR4(slot, SDHCI_SIGNAL_ENABLE, 0); 862 sdhci_init(slot); 863 } 864 /* Configure the bus. */ 865 sdhci_set_clock(slot, ios->clock); 866 sdhci_set_power(slot, (ios->power_mode == power_off) ? 0 : ios->vdd); 867 if (ios->bus_width == bus_width_8) { 868 slot->hostctrl |= SDHCI_CTRL_8BITBUS; 869 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 870 } else if (ios->bus_width == bus_width_4) { 871 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 872 slot->hostctrl |= SDHCI_CTRL_4BITBUS; 873 } else if (ios->bus_width == bus_width_1) { 874 slot->hostctrl &= ~SDHCI_CTRL_8BITBUS; 875 slot->hostctrl &= ~SDHCI_CTRL_4BITBUS; 876 } else { 877 panic("Invalid bus width: %d", ios->bus_width); 878 } 879 if (ios->timing == bus_timing_hs && 880 !(slot->quirks & SDHCI_QUIRK_DONT_SET_HISPD_BIT)) 881 slot->hostctrl |= SDHCI_CTRL_HISPD; 882 else 883 slot->hostctrl &= ~SDHCI_CTRL_HISPD; 884 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); 885 /* Some controllers like reset after bus changes. */ 886 if (slot->quirks & SDHCI_QUIRK_RESET_ON_IOS) 887 sdhci_reset(slot, SDHCI_RESET_CMD | SDHCI_RESET_DATA); 888 889 SDHCI_UNLOCK(slot); 890 return (0); 891 } 892 893 static void 894 sdhci_req_done(struct sdhci_slot *slot) 895 { 896 struct mmc_request *req; 897 898 if (slot->req != NULL && slot->curcmd != NULL) { 899 callout_stop(&slot->timeout_callout); 900 if (slot->curcmd->error != MMC_ERR_TIMEOUT) 901 slot->failures = 0; 902 req = slot->req; 903 slot->req = NULL; 904 slot->curcmd = NULL; 905 req->done(req); 906 } 907 } 908 909 static void 910 sdhci_timeout(void *arg) 911 { 912 struct sdhci_slot *slot = arg; 913 914 if (slot->curcmd != NULL) { 915 slot_printf(slot, " Controller timeout\n"); 916 sdhci_dumpregs(slot); 917 sdhci_reset(slot, SDHCI_RESET_CMD|SDHCI_RESET_DATA); 918 slot->curcmd->error = MMC_ERR_TIMEOUT; 919 sdhci_req_done(slot); 920 } else { 921 slot_printf(slot, " Spurious timeout - no active command\n"); 922 } 923 } 924 925 static void 926 sdhci_set_transfer_mode(struct sdhci_slot *slot, struct mmc_data *data) 927 { 928 uint16_t mode; 929 930 if (data == NULL) 931 return; 932 933 mode = SDHCI_TRNS_BLK_CNT_EN; 934 if (data->len > 512) 935 mode |= SDHCI_TRNS_MULTI; 936 if (data->flags & MMC_DATA_READ) 937 mode |= SDHCI_TRNS_READ; 938 if (slot->req->stop) 939 mode |= SDHCI_TRNS_ACMD12; 940 if (slot->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA2)) 941 mode |= SDHCI_TRNS_DMA; 942 943 WR2(slot, SDHCI_TRANSFER_MODE, mode); 944 } 945 946 static void 947 sdhci_start_command(struct sdhci_slot *slot, struct mmc_command *cmd) 948 { 949 int flags, timeout; 950 uint32_t mask; 951 952 slot->curcmd = cmd; 953 slot->cmd_done = 0; 954 955 cmd->error = MMC_ERR_NONE; 956 957 /* This flags combination is not supported by controller. */ 958 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) { 959 slot_printf(slot, "Unsupported response type!\n"); 960 cmd->error = MMC_ERR_FAILED; 961 sdhci_req_done(slot); 962 return; 963 } 964 965 /* 966 * Do not issue command if there is no card, clock or power. 967 * Controller will not detect timeout without clock active. 968 */ 969 if (!SDHCI_GET_CARD_PRESENT(slot->bus, slot) || 970 slot->power == 0 || 971 slot->clock == 0) { 972 cmd->error = MMC_ERR_FAILED; 973 sdhci_req_done(slot); 974 return; 975 } 976 /* Always wait for free CMD bus. */ 977 mask = SDHCI_CMD_INHIBIT; 978 /* Wait for free DAT if we have data or busy signal. */ 979 if (cmd->data || (cmd->flags & MMC_RSP_BUSY)) 980 mask |= SDHCI_DAT_INHIBIT; 981 /* We shouldn't wait for DAT for stop commands. */ 982 if (cmd == slot->req->stop) 983 mask &= ~SDHCI_DAT_INHIBIT; 984 /* 985 * Wait for bus no more then 250 ms. Typically there will be no wait 986 * here at all, but when writing a crash dump we may be bypassing the 987 * host platform's interrupt handler, and in some cases that handler 988 * may be working around hardware quirks such as not respecting r1b 989 * busy indications. In those cases, this wait-loop serves the purpose 990 * of waiting for the prior command and data transfers to be done, and 991 * SD cards are allowed to take up to 250ms for write and erase ops. 992 * (It's usually more like 20-30ms in the real world.) 993 */ 994 timeout = 250; 995 while (mask & RD4(slot, SDHCI_PRESENT_STATE)) { 996 if (timeout == 0) { 997 slot_printf(slot, "Controller never released " 998 "inhibit bit(s).\n"); 999 sdhci_dumpregs(slot); 1000 cmd->error = MMC_ERR_FAILED; 1001 sdhci_req_done(slot); 1002 return; 1003 } 1004 timeout--; 1005 DELAY(1000); 1006 } 1007 1008 /* Prepare command flags. */ 1009 if (!(cmd->flags & MMC_RSP_PRESENT)) 1010 flags = SDHCI_CMD_RESP_NONE; 1011 else if (cmd->flags & MMC_RSP_136) 1012 flags = SDHCI_CMD_RESP_LONG; 1013 else if (cmd->flags & MMC_RSP_BUSY) 1014 flags = SDHCI_CMD_RESP_SHORT_BUSY; 1015 else 1016 flags = SDHCI_CMD_RESP_SHORT; 1017 if (cmd->flags & MMC_RSP_CRC) 1018 flags |= SDHCI_CMD_CRC; 1019 if (cmd->flags & MMC_RSP_OPCODE) 1020 flags |= SDHCI_CMD_INDEX; 1021 if (cmd->data) 1022 flags |= SDHCI_CMD_DATA; 1023 if (cmd->opcode == MMC_STOP_TRANSMISSION) 1024 flags |= SDHCI_CMD_TYPE_ABORT; 1025 /* Prepare data. */ 1026 sdhci_start_data(slot, cmd->data); 1027 /* 1028 * Interrupt aggregation: To reduce total number of interrupts 1029 * group response interrupt with data interrupt when possible. 1030 * If there is going to be a data interrupt, mask the response one. 1031 */ 1032 if (slot->data_done == 0) { 1033 WR4(slot, SDHCI_SIGNAL_ENABLE, 1034 slot->intmask &= ~SDHCI_INT_RESPONSE); 1035 } 1036 /* Set command argument. */ 1037 WR4(slot, SDHCI_ARGUMENT, cmd->arg); 1038 /* Set data transfer mode. */ 1039 sdhci_set_transfer_mode(slot, cmd->data); 1040 /* Start command. */ 1041 WR2(slot, SDHCI_COMMAND_FLAGS, (cmd->opcode << 8) | (flags & 0xff)); 1042 1043 /* 1044 * Start timeout callout. Timeout is dropped to 2 seconds with 1045 * repeated controller timeouts. 1046 */ 1047 if (slot->failures) 1048 timeout = slot->timeout / 5; 1049 else 1050 timeout = slot->timeout; 1051 if (timeout < 2) 1052 timeout = 2; 1053 callout_reset(&slot->timeout_callout, timeout * hz, 1054 sdhci_timeout, slot); 1055 } 1056 1057 static void 1058 sdhci_finish_command(struct sdhci_slot *slot) 1059 { 1060 int i; 1061 1062 slot->cmd_done = 1; 1063 /* Interrupt aggregation: Restore command interrupt. 1064 * Main restore point for the case when command interrupt 1065 * happened first. */ 1066 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask |= SDHCI_INT_RESPONSE); 1067 /* In case of error - reset host and return. */ 1068 if (slot->curcmd->error) { 1069 sdhci_reset(slot, SDHCI_RESET_CMD); 1070 sdhci_reset(slot, SDHCI_RESET_DATA); 1071 sdhci_start(slot); 1072 return; 1073 } 1074 /* If command has response - fetch it. */ 1075 if (slot->curcmd->flags & MMC_RSP_PRESENT) { 1076 if (slot->curcmd->flags & MMC_RSP_136) { 1077 /* CRC is stripped so we need one byte shift. */ 1078 uint8_t extra = 0; 1079 for (i = 0; i < 4; i++) { 1080 uint32_t val = RD4(slot, SDHCI_RESPONSE + i * 4); 1081 if (slot->quirks & SDHCI_QUIRK_DONT_SHIFT_RESPONSE) { 1082 slot->curcmd->resp[3 - i] = val; 1083 } else { 1084 slot->curcmd->resp[3 - i] = 1085 (val << 8) | extra; 1086 extra = val >> 24; 1087 } 1088 } 1089 } else { 1090 slot->curcmd->resp[0] = RD4(slot, SDHCI_RESPONSE); 1091 } 1092 } 1093 /* If data ready - finish. */ 1094 if (slot->data_done) 1095 sdhci_start(slot); 1096 } 1097 1098 static void 1099 sdhci_start_data(struct sdhci_slot *slot, struct mmc_data *data) 1100 { 1101 uint32_t target_timeout, current_timeout; 1102 uint8_t div; 1103 1104 if (data == NULL && (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { 1105 slot->data_done = 1; 1106 return; 1107 } 1108 1109 slot->data_done = 0; 1110 1111 /* Calculate and set data timeout.*/ 1112 /* XXX: We should have this from mmc layer, now assume 1 sec. */ 1113 if (slot->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL) { 1114 div = 0xe; 1115 } else { 1116 target_timeout = 1000000; 1117 div = 0; 1118 current_timeout = (1 << 13) * 1000 / slot->timeout_clk; 1119 while (current_timeout < target_timeout && div < 0xE) { 1120 ++div; 1121 current_timeout <<= 1; 1122 } 1123 /* Compensate for an off-by-one error in the CaFe chip.*/ 1124 if (div < 0xE && 1125 (slot->quirks & SDHCI_QUIRK_INCR_TIMEOUT_CONTROL)) { 1126 ++div; 1127 } 1128 } 1129 WR1(slot, SDHCI_TIMEOUT_CONTROL, div); 1130 1131 if (data == NULL) 1132 return; 1133 1134 /* Use DMA if possible. Prefer ADMA2 over SDMA. */ 1135 if ((slot->opt & SDHCI_HAVE_ADMA2)) { 1136 slot->flags |= SDHCI_USE_ADMA2; 1137 slot->flags &= ~SDHCI_USE_SDMA; 1138 } else if ((slot->opt & SDHCI_HAVE_SDMA)) { 1139 slot->flags |= SDHCI_USE_SDMA; 1140 slot->flags &= ~SDHCI_USE_ADMA2; 1141 } 1142 /* If data is small, broken DMA may return zeroes instead of data. */ 1143 if ((slot->quirks & SDHCI_QUIRK_BROKEN_TIMINGS) && 1144 (data->len <= 512)) { 1145 slot->flags &= ~SDHCI_USE_SDMA; 1146 slot->flags &= ~SDHCI_USE_ADMA2; 1147 } 1148 /* Some controllers require even block sizes. */ 1149 if ((slot->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE) && 1150 ((data->len) & 0x3)) { 1151 slot->flags &= ~SDHCI_USE_SDMA; 1152 slot->flags &= ~SDHCI_USE_ADMA2; 1153 } 1154 /* Load DMA buffer. */ 1155 if (slot->flags & SDHCI_USE_ADMA2) { 1156 bus_dmamem_t *descmem = &slot->adma2_descs; 1157 struct sdhci_adma2_desc32 *descs = (void *)descmem->dmem_addr; 1158 int err; 1159 1160 /* It shouldn't really be possible for this to fail */ 1161 err = bus_dmamap_load(slot->adma2_tag, slot->adma2_map, 1162 data->data, data->len, sdhci_adma2_getaddr, slot, 1163 dumping ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); 1164 if (err != 0) { 1165 device_printf(slot->bus, 1166 "Dma load for ADMA2 fail: %d\n", err); 1167 } else if (descs[0].address == 0) { 1168 device_printf(slot->bus, 1169 "Dma load for ADMA2 fail, segment constraints\n"); 1170 } 1171 if (err != 0 || descs[0].address == 0) { 1172 /* fallback to PIO for this request */ 1173 slot->flags &= ~SDHCI_USE_ADMA2; 1174 goto pio_fallback; 1175 } 1176 /* sync dma descriptors */ 1177 bus_dmamap_sync(descmem->dmem_tag, descmem->dmem_map, 1178 BUS_DMASYNC_PREWRITE); 1179 /* sync data buffers */ 1180 if (data->flags & MMC_DATA_READ) { 1181 bus_dmamap_sync(slot->adma2_tag, slot->adma2_map, 1182 BUS_DMASYNC_PREREAD); 1183 } else { 1184 bus_dmamap_sync(slot->adma2_tag, slot->adma2_map, 1185 BUS_DMASYNC_PREWRITE); 1186 } 1187 WR4(slot, SDHCI_ADMA_ADDRESS_LOW, descmem->dmem_busaddr); 1188 if ((slot->hostctrl & SDHCI_CTRL_DMA_MASK) != 1189 SDHCI_CTRL_ADMA2) { 1190 slot->hostctrl &= ~SDHCI_CTRL_DMA_MASK; 1191 slot->hostctrl |= SDHCI_CTRL_ADMA2; 1192 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); 1193 } 1194 /* We don't expect any DMA_END interrupts with ADMA2 */ 1195 WR4(slot, SDHCI_SIGNAL_ENABLE, 1196 slot->intmask &= ~SDHCI_INT_DMA_END); 1197 } else if (slot->flags & SDHCI_USE_SDMA) { 1198 bus_dmamem_t *sdma = &slot->sdma_mem; 1199 1200 if (data->flags & MMC_DATA_READ) { 1201 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1202 BUS_DMASYNC_PREREAD); 1203 } else { 1204 memcpy(sdma->dmem_addr, data->data, 1205 (data->len < DMA_BLOCK_SIZE) ? 1206 data->len : DMA_BLOCK_SIZE); 1207 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1208 BUS_DMASYNC_PREWRITE); 1209 } 1210 WR4(slot, SDHCI_SDMA_ADDRESS, sdma->dmem_busaddr); 1211 if ((slot->hostctrl & SDHCI_CTRL_DMA_MASK) != 1212 SDHCI_CTRL_SDMA) { 1213 slot->hostctrl &= ~SDHCI_CTRL_DMA_MASK; 1214 slot->hostctrl |= SDHCI_CTRL_SDMA; 1215 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl); 1216 } 1217 /* Interrupt aggregation: Mask border interrupt 1218 * for the last page and unmask else. */ 1219 if (data->len == DMA_BLOCK_SIZE) 1220 slot->intmask &= ~SDHCI_INT_DMA_END; 1221 else 1222 slot->intmask |= SDHCI_INT_DMA_END; 1223 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 1224 } 1225 pio_fallback: 1226 /* Current data offset for both PIO and SDMA. */ 1227 slot->offset = 0; 1228 /* Set block size and for SDMA request IRQ on 4K border. */ 1229 WR2(slot, SDHCI_BLOCK_SIZE, 1230 SDHCI_MAKE_BLKSZ(DMA_BOUNDARY, (data->len < 512)?data->len:512)); 1231 /* Set block count. */ 1232 WR2(slot, SDHCI_BLOCK_COUNT, (data->len + 511) / 512); 1233 } 1234 1235 void 1236 sdhci_finish_data(struct sdhci_slot *slot) 1237 { 1238 struct mmc_data *data = slot->curcmd->data; 1239 1240 /* Interrupt aggregation: Restore command interrupt. 1241 * Auxiliary restore point for the case when data interrupt 1242 * happened first. */ 1243 if (!slot->cmd_done) { 1244 WR4(slot, SDHCI_SIGNAL_ENABLE, 1245 slot->intmask |= SDHCI_INT_RESPONSE); 1246 } 1247 /* Unload rest of data from DMA buffer. */ 1248 if (!slot->data_done && (slot->flags & SDHCI_USE_ADMA2)) { 1249 bus_dmamem_t *descmem = &slot->adma2_descs; 1250 1251 bus_dmamap_sync(descmem->dmem_tag, descmem->dmem_map, 1252 BUS_DMASYNC_POSTWRITE); 1253 if (data->flags & MMC_DATA_READ) { 1254 bus_dmamap_sync(slot->adma2_tag, slot->adma2_map, 1255 BUS_DMASYNC_POSTREAD); 1256 } else { 1257 bus_dmamap_sync(slot->adma2_tag, slot->adma2_map, 1258 BUS_DMASYNC_POSTWRITE); 1259 } 1260 bus_dmamap_unload(slot->adma2_tag, slot->adma2_map); 1261 } else if (!slot->data_done && (slot->flags & SDHCI_USE_SDMA)) { 1262 bus_dmamem_t *sdma = &slot->sdma_mem; 1263 1264 if (data->flags & MMC_DATA_READ) { 1265 size_t left = data->len - slot->offset; 1266 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1267 BUS_DMASYNC_POSTREAD); 1268 memcpy((u_char*)data->data + slot->offset, 1269 sdma->dmem_addr, 1270 (left < DMA_BLOCK_SIZE)?left:DMA_BLOCK_SIZE); 1271 } else { 1272 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1273 BUS_DMASYNC_POSTWRITE); 1274 } 1275 } 1276 slot->data_done = 1; 1277 /* If there was an error - reset the host. */ 1278 if (slot->curcmd->error) { 1279 sdhci_reset(slot, SDHCI_RESET_CMD); 1280 sdhci_reset(slot, SDHCI_RESET_DATA); 1281 sdhci_start(slot); 1282 return; 1283 } 1284 /* If we already have command response - finish. */ 1285 if (slot->cmd_done) 1286 sdhci_start(slot); 1287 } 1288 1289 static void 1290 sdhci_start(struct sdhci_slot *slot) 1291 { 1292 struct mmc_request *req; 1293 1294 req = slot->req; 1295 if (req == NULL) 1296 return; 1297 1298 if (!(slot->flags & CMD_STARTED)) { 1299 slot->flags |= CMD_STARTED; 1300 sdhci_start_command(slot, req->cmd); 1301 return; 1302 } 1303 /* We don't need this until using Auto-CMD12 feature 1304 if (!(slot->flags & STOP_STARTED) && req->stop) { 1305 slot->flags |= STOP_STARTED; 1306 sdhci_start_command(slot, req->stop); 1307 return; 1308 } 1309 */ 1310 if (sdhci_debug > 1) 1311 slot_printf(slot, "result: %d\n", req->cmd->error); 1312 if (!req->cmd->error && 1313 (slot->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)) { 1314 sdhci_reset(slot, SDHCI_RESET_CMD); 1315 sdhci_reset(slot, SDHCI_RESET_DATA); 1316 } 1317 1318 sdhci_req_done(slot); 1319 } 1320 1321 int 1322 sdhci_generic_request(device_t brdev __unused, device_t reqdev, 1323 struct mmc_request *req) 1324 { 1325 struct sdhci_slot *slot = device_get_ivars(reqdev); 1326 1327 SDHCI_LOCK(slot); 1328 if (slot->req != NULL) { 1329 SDHCI_UNLOCK(slot); 1330 return (EBUSY); 1331 } 1332 if (sdhci_debug > 1) { 1333 slot_printf(slot, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n", 1334 req->cmd->opcode, req->cmd->arg, req->cmd->flags, 1335 (req->cmd->data)?(u_int)req->cmd->data->len:0, 1336 (req->cmd->data)?req->cmd->data->flags:0); 1337 } 1338 slot->req = req; 1339 slot->flags = 0; 1340 sdhci_start(slot); 1341 SDHCI_UNLOCK(slot); 1342 if (dumping) { 1343 while (slot->req != NULL) { 1344 sdhci_generic_intr(slot); 1345 DELAY(10); 1346 } 1347 } 1348 return (0); 1349 } 1350 1351 int 1352 sdhci_generic_get_ro(device_t brdev __unused, device_t reqdev) 1353 { 1354 struct sdhci_slot *slot = device_get_ivars(reqdev); 1355 uint32_t val; 1356 1357 SDHCI_LOCK(slot); 1358 val = RD4(slot, SDHCI_PRESENT_STATE); 1359 SDHCI_UNLOCK(slot); 1360 return (!(val & SDHCI_WRITE_PROTECT)); 1361 } 1362 1363 int 1364 sdhci_generic_acquire_host(device_t brdev __unused, device_t reqdev) 1365 { 1366 struct sdhci_slot *slot = device_get_ivars(reqdev); 1367 int err = 0; 1368 1369 SDHCI_LOCK(slot); 1370 while (slot->bus_busy) 1371 lksleep(slot, &slot->lock, 0, "sdhciah", 0); 1372 slot->bus_busy++; 1373 /* Activate led. */ 1374 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl |= SDHCI_CTRL_LED); 1375 SDHCI_UNLOCK(slot); 1376 return (err); 1377 } 1378 1379 int 1380 sdhci_generic_release_host(device_t brdev __unused, device_t reqdev) 1381 { 1382 struct sdhci_slot *slot = device_get_ivars(reqdev); 1383 1384 SDHCI_LOCK(slot); 1385 /* Deactivate led. */ 1386 WR1(slot, SDHCI_HOST_CONTROL, slot->hostctrl &= ~SDHCI_CTRL_LED); 1387 slot->bus_busy--; 1388 SDHCI_UNLOCK(slot); 1389 wakeup(slot); 1390 return (0); 1391 } 1392 1393 static void 1394 sdhci_cmd_irq(struct sdhci_slot *slot, uint32_t intmask) 1395 { 1396 1397 if (!slot->curcmd) { 1398 slot_printf(slot, "Got command interrupt 0x%08x, but " 1399 "there is no active command.\n", intmask); 1400 sdhci_dumpregs(slot); 1401 return; 1402 } 1403 if (intmask & SDHCI_INT_TIMEOUT) 1404 slot->curcmd->error = MMC_ERR_TIMEOUT; 1405 else if (intmask & SDHCI_INT_CRC) 1406 slot->curcmd->error = MMC_ERR_BADCRC; 1407 else if (intmask & (SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) 1408 slot->curcmd->error = MMC_ERR_FIFO; 1409 1410 sdhci_finish_command(slot); 1411 } 1412 1413 static void 1414 sdhci_data_irq(struct sdhci_slot *slot, uint32_t intmask) 1415 { 1416 1417 if (!slot->curcmd) { 1418 slot_printf(slot, "Got data interrupt 0x%08x, but " 1419 "there is no active command.\n", intmask); 1420 sdhci_dumpregs(slot); 1421 return; 1422 } 1423 if (slot->curcmd->data == NULL && 1424 (slot->curcmd->flags & MMC_RSP_BUSY) == 0) { 1425 slot_printf(slot, "Got data interrupt 0x%08x, but " 1426 "there is no active data operation.\n", 1427 intmask); 1428 sdhci_dumpregs(slot); 1429 return; 1430 } 1431 if (intmask & SDHCI_INT_DATA_TIMEOUT) 1432 slot->curcmd->error = MMC_ERR_TIMEOUT; 1433 else if (intmask & (SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_END_BIT)) 1434 slot->curcmd->error = MMC_ERR_BADCRC; 1435 if (slot->curcmd->data == NULL && 1436 (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL | 1437 SDHCI_INT_DMA_END))) { 1438 slot_printf(slot, "Got data interrupt 0x%08x, but " 1439 "there is busy-only command.\n", intmask); 1440 sdhci_dumpregs(slot); 1441 slot->curcmd->error = MMC_ERR_INVALID; 1442 } 1443 if (slot->curcmd->error) { 1444 /* No need to continue after any error. */ 1445 goto done; 1446 } 1447 1448 /* Handle PIO interrupt. */ 1449 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL)) { 1450 if ((slot->opt & SDHCI_PLATFORM_TRANSFER) && 1451 SDHCI_PLATFORM_WILL_HANDLE(slot->bus, slot)) { 1452 SDHCI_PLATFORM_START_TRANSFER(slot->bus, slot, &intmask); 1453 slot->flags |= PLATFORM_DATA_STARTED; 1454 } else 1455 sdhci_transfer_pio(slot); 1456 } 1457 /* Handle DMA border. */ 1458 if (intmask & SDHCI_INT_DMA_END) { 1459 struct mmc_data *data = slot->curcmd->data; 1460 bus_dmamem_t *sdma = &slot->sdma_mem; 1461 size_t left; 1462 1463 /* Unload DMA buffer ... */ 1464 left = data->len - slot->offset; 1465 if (data->flags & MMC_DATA_READ) { 1466 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1467 BUS_DMASYNC_POSTREAD); 1468 memcpy((u_char*)data->data + slot->offset, 1469 sdma->dmem_addr, 1470 (left < DMA_BLOCK_SIZE)?left:DMA_BLOCK_SIZE); 1471 } else { 1472 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1473 BUS_DMASYNC_POSTWRITE); 1474 } 1475 /* ... and reload it again. */ 1476 slot->offset += DMA_BLOCK_SIZE; 1477 left = data->len - slot->offset; 1478 if (data->flags & MMC_DATA_READ) { 1479 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1480 BUS_DMASYNC_PREREAD); 1481 } else { 1482 memcpy(sdma->dmem_addr, 1483 (u_char*)data->data + slot->offset, 1484 (left < DMA_BLOCK_SIZE)?left:DMA_BLOCK_SIZE); 1485 bus_dmamap_sync(sdma->dmem_tag, sdma->dmem_map, 1486 BUS_DMASYNC_PREWRITE); 1487 } 1488 /* Interrupt aggregation: Mask border interrupt 1489 * for the last page. */ 1490 if (left == DMA_BLOCK_SIZE) { 1491 slot->intmask &= ~SDHCI_INT_DMA_END; 1492 WR4(slot, SDHCI_SIGNAL_ENABLE, slot->intmask); 1493 } 1494 /* Restart DMA. */ 1495 WR4(slot, SDHCI_SDMA_ADDRESS, sdma->dmem_busaddr); 1496 } 1497 /* We have got all data. */ 1498 if (intmask & SDHCI_INT_DATA_END) { 1499 if (slot->flags & PLATFORM_DATA_STARTED) { 1500 slot->flags &= ~PLATFORM_DATA_STARTED; 1501 SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); 1502 } else { 1503 sdhci_finish_data(slot); 1504 } 1505 } 1506 done: 1507 if (slot->curcmd != NULL && slot->curcmd->error != 0) { 1508 if (slot->flags & PLATFORM_DATA_STARTED) { 1509 slot->flags &= ~PLATFORM_DATA_STARTED; 1510 SDHCI_PLATFORM_FINISH_TRANSFER(slot->bus, slot); 1511 } else { 1512 sdhci_finish_data(slot); 1513 } 1514 return; 1515 } 1516 } 1517 1518 static void 1519 sdhci_acmd_irq(struct sdhci_slot *slot) 1520 { 1521 uint16_t err; 1522 1523 err = RD4(slot, SDHCI_ACMD12_ERR); 1524 if (!slot->curcmd) { 1525 slot_printf(slot, "Got AutoCMD12 error 0x%04x, but " 1526 "there is no active command.\n", err); 1527 sdhci_dumpregs(slot); 1528 return; 1529 } 1530 slot_printf(slot, "Got AutoCMD12 error 0x%04x\n", err); 1531 sdhci_reset(slot, SDHCI_RESET_CMD); 1532 } 1533 1534 static void 1535 sdhci_adma_irq(struct sdhci_slot *slot) 1536 { 1537 bus_dmamem_t *descmem = &slot->adma2_descs; 1538 struct sdhci_adma2_desc32 *desc; 1539 bus_addr_t addr = 0; 1540 uint8_t err, adma_state; 1541 1542 err = RD1(slot, SDHCI_ADMA_ERR); 1543 if (slot->curcmd && (slot->flags & SDHCI_USE_ADMA2)) { 1544 slot_printf(slot, "Got ADMA2 error 0x%02x\n", err); 1545 } else { 1546 slot_printf(slot, "Got ADMA2 error 0x%02x, but " 1547 "there is no active command.\n", err); 1548 sdhci_dumpregs(slot); 1549 } 1550 1551 /* Try to print the erronous ADMA2 descriptor */ 1552 adma_state = err & SDHCI_ADMA_ERR_STATE_MASK; 1553 if (adma_state == SDHCI_ADMA_ERR_STATE_STOP) { 1554 addr = RD4(slot, SDHCI_ADMA_ADDRESS_LOW); 1555 if (addr > sizeof(*desc)) 1556 addr -= sizeof(*desc); 1557 else 1558 addr = 0; 1559 } else if (adma_state == SDHCI_ADMA_ERR_STATE_FDS) { 1560 addr = RD4(slot, SDHCI_ADMA_ADDRESS_LOW); 1561 } else if (adma_state == SDHCI_ADMA_ERR_STATE_TFR) { 1562 addr = RD4(slot, SDHCI_ADMA_ADDRESS_LOW); 1563 if (addr > sizeof(*desc)) 1564 addr -= sizeof(*desc); 1565 else 1566 addr = 0; 1567 } else { 1568 slot_printf(slot, "Invalid ADMA2 state 0x%02x\n", adma_state); 1569 } 1570 if (addr >= descmem->dmem_busaddr && 1571 addr < descmem->dmem_busaddr + SDHCI_ADMA2_DESCBUF_SIZE) { 1572 desc = (void *) ((char *)descmem->dmem_addr + 1573 (addr - descmem->dmem_busaddr)); 1574 slot_printf(slot, 1575 "Descriptor: Addr=0x%08x Length=0x%04x Attr=0x%04x\n", 1576 desc->address, desc->length, desc->attribute); 1577 } 1578 1579 if (slot->curcmd && (slot->flags & SDHCI_USE_ADMA2)) { 1580 sdhci_reset(slot, SDHCI_RESET_CMD); 1581 } 1582 } 1583 1584 void 1585 sdhci_generic_intr(struct sdhci_slot *slot) 1586 { 1587 uint32_t intmask; 1588 1589 SDHCI_LOCK(slot); 1590 /* Read slot interrupt status. */ 1591 intmask = RD4(slot, SDHCI_INT_STATUS); 1592 if (intmask == 0 || intmask == 0xffffffff) { 1593 SDHCI_UNLOCK(slot); 1594 return; 1595 } 1596 if (sdhci_debug > 2) 1597 slot_printf(slot, "Interrupt %#x\n", intmask); 1598 1599 /* Handle card presence interrupts. */ 1600 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { 1601 WR4(slot, SDHCI_INT_STATUS, intmask & 1602 (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)); 1603 1604 if (intmask & SDHCI_INT_CARD_REMOVE) { 1605 if (bootverbose || sdhci_debug) 1606 slot_printf(slot, "Card removed\n"); 1607 callout_stop(&slot->card_callout); 1608 taskqueue_enqueue(taskqueue_swi_mp, &slot->card_task); 1609 } 1610 if (intmask & SDHCI_INT_CARD_INSERT) { 1611 if (bootverbose || sdhci_debug) 1612 slot_printf(slot, "Card inserted\n"); 1613 callout_reset(&slot->card_callout, hz / 2, 1614 sdhci_card_delay, slot); 1615 } 1616 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); 1617 } 1618 /* Handle command interrupts. */ 1619 if (intmask & SDHCI_INT_CMD_MASK) { 1620 WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_CMD_MASK); 1621 sdhci_cmd_irq(slot, intmask & SDHCI_INT_CMD_MASK); 1622 } 1623 /* Handle data interrupts. */ 1624 if (intmask & SDHCI_INT_DATA_MASK) { 1625 WR4(slot, SDHCI_INT_STATUS, intmask & SDHCI_INT_DATA_MASK); 1626 /* Dont call data_irq in case of errored command */ 1627 if ((intmask & SDHCI_INT_CMD_ERROR_MASK) == 0) 1628 sdhci_data_irq(slot, intmask & SDHCI_INT_DATA_MASK); 1629 } 1630 /* Handle AutoCMD12 error interrupt. */ 1631 if (intmask & SDHCI_INT_ACMD12ERR) { 1632 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_ACMD12ERR); 1633 sdhci_acmd_irq(slot); 1634 } 1635 /* Handle ADMA2 error interrupt. */ 1636 if (intmask & SDHCI_INT_ADMAERR) { 1637 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_ADMAERR); 1638 sdhci_adma_irq(slot); 1639 } 1640 intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); 1641 intmask &= ~SDHCI_INT_ACMD12ERR; 1642 intmask &= ~SDHCI_INT_ADMAERR; 1643 intmask &= ~SDHCI_INT_ERROR; 1644 /* Handle bus power interrupt. */ 1645 if (intmask & SDHCI_INT_BUS_POWER) { 1646 WR4(slot, SDHCI_INT_STATUS, SDHCI_INT_BUS_POWER); 1647 slot_printf(slot, 1648 "Card is consuming too much power!\n"); 1649 intmask &= ~SDHCI_INT_BUS_POWER; 1650 } 1651 /* The rest is unknown. */ 1652 if (intmask) { 1653 WR4(slot, SDHCI_INT_STATUS, intmask); 1654 slot_printf(slot, "Unexpected interrupt 0x%08x.\n", 1655 intmask); 1656 sdhci_dumpregs(slot); 1657 } 1658 1659 SDHCI_UNLOCK(slot); 1660 } 1661 1662 int 1663 sdhci_generic_read_ivar(device_t bus, device_t child, int which, uintptr_t *result) 1664 { 1665 struct sdhci_slot *slot = device_get_ivars(child); 1666 1667 switch (which) { 1668 default: 1669 return (EINVAL); 1670 case MMCBR_IVAR_BUS_MODE: 1671 *(int *)result = slot->host.ios.bus_mode; 1672 break; 1673 case MMCBR_IVAR_BUS_WIDTH: 1674 *(int *)result = slot->host.ios.bus_width; 1675 break; 1676 case MMCBR_IVAR_CHIP_SELECT: 1677 *(int *)result = slot->host.ios.chip_select; 1678 break; 1679 case MMCBR_IVAR_CLOCK: 1680 *(int *)result = slot->host.ios.clock; 1681 break; 1682 case MMCBR_IVAR_F_MIN: 1683 *(int *)result = slot->host.f_min; 1684 break; 1685 case MMCBR_IVAR_F_MAX: 1686 *(int *)result = slot->host.f_max; 1687 break; 1688 case MMCBR_IVAR_HOST_OCR: 1689 *(int *)result = slot->host.host_ocr; 1690 break; 1691 case MMCBR_IVAR_MODE: 1692 *(int *)result = slot->host.mode; 1693 break; 1694 case MMCBR_IVAR_OCR: 1695 *(int *)result = slot->host.ocr; 1696 break; 1697 case MMCBR_IVAR_POWER_MODE: 1698 *(int *)result = slot->host.ios.power_mode; 1699 break; 1700 case MMCBR_IVAR_VDD: 1701 *(int *)result = slot->host.ios.vdd; 1702 break; 1703 case MMCBR_IVAR_CAPS: 1704 *(int *)result = slot->host.caps; 1705 break; 1706 case MMCBR_IVAR_TIMING: 1707 *(int *)result = slot->host.ios.timing; 1708 break; 1709 case MMCBR_IVAR_MAX_DATA: 1710 *(int *)result = 65535; 1711 break; 1712 case MMCBR_IVAR_MAX_BUSY_TIMEOUT: 1713 /* 1714 * Currently, sdhci_start_data() hardcodes 1 s for all CMDs. 1715 */ 1716 *result = 1000000; 1717 break; 1718 } 1719 return (0); 1720 } 1721 1722 int 1723 sdhci_generic_write_ivar(device_t bus, device_t child, int which, uintptr_t value) 1724 { 1725 struct sdhci_slot *slot = device_get_ivars(child); 1726 uint32_t clock, max_clock; 1727 int i; 1728 1729 switch (which) { 1730 default: 1731 return (EINVAL); 1732 case MMCBR_IVAR_BUS_MODE: 1733 slot->host.ios.bus_mode = value; 1734 break; 1735 case MMCBR_IVAR_BUS_WIDTH: 1736 slot->host.ios.bus_width = value; 1737 break; 1738 case MMCBR_IVAR_CHIP_SELECT: 1739 slot->host.ios.chip_select = value; 1740 break; 1741 case MMCBR_IVAR_CLOCK: 1742 if (value > 0) { 1743 max_clock = slot->max_clk; 1744 clock = max_clock; 1745 1746 if (slot->version < SDHCI_SPEC_300) { 1747 for (i = 0; i < SDHCI_200_MAX_DIVIDER; 1748 i <<= 1) { 1749 if (clock <= value) 1750 break; 1751 clock >>= 1; 1752 } 1753 } else { 1754 for (i = 0; i < SDHCI_300_MAX_DIVIDER; 1755 i += 2) { 1756 if (clock <= value) 1757 break; 1758 clock = max_clock / (i + 2); 1759 } 1760 } 1761 1762 slot->host.ios.clock = clock; 1763 } else 1764 slot->host.ios.clock = 0; 1765 break; 1766 case MMCBR_IVAR_MODE: 1767 slot->host.mode = value; 1768 break; 1769 case MMCBR_IVAR_OCR: 1770 slot->host.ocr = value; 1771 break; 1772 case MMCBR_IVAR_POWER_MODE: 1773 slot->host.ios.power_mode = value; 1774 break; 1775 case MMCBR_IVAR_VDD: 1776 slot->host.ios.vdd = value; 1777 break; 1778 case MMCBR_IVAR_TIMING: 1779 slot->host.ios.timing = value; 1780 break; 1781 case MMCBR_IVAR_CAPS: 1782 case MMCBR_IVAR_HOST_OCR: 1783 case MMCBR_IVAR_F_MIN: 1784 case MMCBR_IVAR_F_MAX: 1785 case MMCBR_IVAR_MAX_DATA: 1786 return (EINVAL); 1787 } 1788 return (0); 1789 } 1790 1791 MODULE_VERSION(sdhci, 1); 1792