1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4 *
5 * Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6 *
7 * Thanks to the following companies for their support:
8 *
9 * - JMicron (hardware and technical support)
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/delay.h>
14 #include <linux/dmaengine.h>
15 #include <linux/ktime.h>
16 #include <linux/highmem.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/dma-mapping.h>
20 #include <linux/slab.h>
21 #include <linux/scatterlist.h>
22 #include <linux/sizes.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26
27 #include <linux/leds.h>
28
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
34
35 #include "sdhci.h"
36
37 #define DRIVER_NAME "sdhci"
38
39 #define DBG(f, x...) \
40 pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41
42 #define SDHCI_DUMP(f, x...) \
43 pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44
45 #define MAX_TUNING_LOOP 40
46
47 static unsigned int debug_quirks = 0;
48 static unsigned int debug_quirks2;
49
50 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
51
52 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd);
53
sdhci_dumpregs(struct sdhci_host * host)54 void sdhci_dumpregs(struct sdhci_host *host)
55 {
56 SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
57
58 SDHCI_DUMP("Sys addr: 0x%08x | Version: 0x%08x\n",
59 sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 sdhci_readw(host, SDHCI_HOST_VERSION));
61 SDHCI_DUMP("Blk size: 0x%08x | Blk cnt: 0x%08x\n",
62 sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 SDHCI_DUMP("Argument: 0x%08x | Trn mode: 0x%08x\n",
65 sdhci_readl(host, SDHCI_ARGUMENT),
66 sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 SDHCI_DUMP("Present: 0x%08x | Host ctl: 0x%08x\n",
68 sdhci_readl(host, SDHCI_PRESENT_STATE),
69 sdhci_readb(host, SDHCI_HOST_CONTROL));
70 SDHCI_DUMP("Power: 0x%08x | Blk gap: 0x%08x\n",
71 sdhci_readb(host, SDHCI_POWER_CONTROL),
72 sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 SDHCI_DUMP("Wake-up: 0x%08x | Clock: 0x%08x\n",
74 sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 SDHCI_DUMP("Timeout: 0x%08x | Int stat: 0x%08x\n",
77 sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 sdhci_readl(host, SDHCI_INT_STATUS));
79 SDHCI_DUMP("Int enab: 0x%08x | Sig enab: 0x%08x\n",
80 sdhci_readl(host, SDHCI_INT_ENABLE),
81 sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83 sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84 sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 SDHCI_DUMP("Caps: 0x%08x | Caps_1: 0x%08x\n",
86 sdhci_readl(host, SDHCI_CAPABILITIES),
87 sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 SDHCI_DUMP("Cmd: 0x%08x | Max curr: 0x%08x\n",
89 sdhci_readw(host, SDHCI_COMMAND),
90 sdhci_readl(host, SDHCI_MAX_CURRENT));
91 SDHCI_DUMP("Resp[0]: 0x%08x | Resp[1]: 0x%08x\n",
92 sdhci_readl(host, SDHCI_RESPONSE),
93 sdhci_readl(host, SDHCI_RESPONSE + 4));
94 SDHCI_DUMP("Resp[2]: 0x%08x | Resp[3]: 0x%08x\n",
95 sdhci_readl(host, SDHCI_RESPONSE + 8),
96 sdhci_readl(host, SDHCI_RESPONSE + 12));
97 SDHCI_DUMP("Host ctl2: 0x%08x\n",
98 sdhci_readw(host, SDHCI_HOST_CONTROL2));
99
100 if (host->flags & SDHCI_USE_ADMA) {
101 if (host->flags & SDHCI_USE_64_BIT_DMA) {
102 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 sdhci_readl(host, SDHCI_ADMA_ERROR),
104 sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106 } else {
107 SDHCI_DUMP("ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n",
108 sdhci_readl(host, SDHCI_ADMA_ERROR),
109 sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110 }
111 }
112
113 if (host->ops->dump_vendor_regs)
114 host->ops->dump_vendor_regs(host);
115
116 SDHCI_DUMP("============================================\n");
117 }
118 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
119
120 /*****************************************************************************\
121 * *
122 * Low level functions *
123 * *
124 \*****************************************************************************/
125
sdhci_do_enable_v4_mode(struct sdhci_host * host)126 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
127 {
128 u16 ctrl2;
129
130 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
131 if (ctrl2 & SDHCI_CTRL_V4_MODE)
132 return;
133
134 ctrl2 |= SDHCI_CTRL_V4_MODE;
135 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
136 }
137
138 /*
139 * This can be called before sdhci_add_host() by Vendor's host controller
140 * driver to enable v4 mode if supported.
141 */
sdhci_enable_v4_mode(struct sdhci_host * host)142 void sdhci_enable_v4_mode(struct sdhci_host *host)
143 {
144 host->v4_mode = true;
145 sdhci_do_enable_v4_mode(host);
146 }
147 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
148
sdhci_data_line_cmd(struct mmc_command * cmd)149 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
150 {
151 return cmd->data || cmd->flags & MMC_RSP_BUSY;
152 }
153
sdhci_set_card_detection(struct sdhci_host * host,bool enable)154 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
155 {
156 u32 present;
157
158 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
159 !mmc_card_is_removable(host->mmc) || mmc_can_gpio_cd(host->mmc))
160 return;
161
162 if (enable) {
163 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
164 SDHCI_CARD_PRESENT;
165
166 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
167 SDHCI_INT_CARD_INSERT;
168 } else {
169 host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
170 }
171
172 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
173 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
174 }
175
sdhci_enable_card_detection(struct sdhci_host * host)176 static void sdhci_enable_card_detection(struct sdhci_host *host)
177 {
178 sdhci_set_card_detection(host, true);
179 }
180
sdhci_disable_card_detection(struct sdhci_host * host)181 static void sdhci_disable_card_detection(struct sdhci_host *host)
182 {
183 sdhci_set_card_detection(host, false);
184 }
185
sdhci_runtime_pm_bus_on(struct sdhci_host * host)186 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
187 {
188 if (host->bus_on)
189 return;
190 host->bus_on = true;
191 pm_runtime_get_noresume(mmc_dev(host->mmc));
192 }
193
sdhci_runtime_pm_bus_off(struct sdhci_host * host)194 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
195 {
196 if (!host->bus_on)
197 return;
198 host->bus_on = false;
199 pm_runtime_put_noidle(mmc_dev(host->mmc));
200 }
201
sdhci_reset(struct sdhci_host * host,u8 mask)202 void sdhci_reset(struct sdhci_host *host, u8 mask)
203 {
204 ktime_t timeout;
205
206 sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
207
208 if (mask & SDHCI_RESET_ALL) {
209 host->clock = 0;
210 /* Reset-all turns off SD Bus Power */
211 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
212 sdhci_runtime_pm_bus_off(host);
213 }
214
215 /* Wait max 100 ms */
216 timeout = ktime_add_ms(ktime_get(), 100);
217
218 /* hw clears the bit when it's done */
219 while (1) {
220 bool timedout = ktime_after(ktime_get(), timeout);
221
222 if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
223 break;
224 if (timedout) {
225 pr_err("%s: Reset 0x%x never completed.\n",
226 mmc_hostname(host->mmc), (int)mask);
227 sdhci_dumpregs(host);
228 return;
229 }
230 udelay(10);
231 }
232 }
233 EXPORT_SYMBOL_GPL(sdhci_reset);
234
sdhci_do_reset(struct sdhci_host * host,u8 mask)235 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
236 {
237 if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
238 struct mmc_host *mmc = host->mmc;
239
240 if (!mmc->ops->get_cd(mmc))
241 return;
242 }
243
244 host->ops->reset(host, mask);
245
246 if (mask & SDHCI_RESET_ALL) {
247 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
248 if (host->ops->enable_dma)
249 host->ops->enable_dma(host);
250 }
251
252 /* Resetting the controller clears many */
253 host->preset_enabled = false;
254 }
255 }
256
sdhci_set_default_irqs(struct sdhci_host * host)257 static void sdhci_set_default_irqs(struct sdhci_host *host)
258 {
259 host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
260 SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
261 SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
262 SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
263 SDHCI_INT_RESPONSE;
264
265 if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
266 host->tuning_mode == SDHCI_TUNING_MODE_3)
267 host->ier |= SDHCI_INT_RETUNE;
268
269 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
270 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
271 }
272
sdhci_config_dma(struct sdhci_host * host)273 static void sdhci_config_dma(struct sdhci_host *host)
274 {
275 u8 ctrl;
276 u16 ctrl2;
277
278 if (host->version < SDHCI_SPEC_200)
279 return;
280
281 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
282
283 /*
284 * Always adjust the DMA selection as some controllers
285 * (e.g. JMicron) can't do PIO properly when the selection
286 * is ADMA.
287 */
288 ctrl &= ~SDHCI_CTRL_DMA_MASK;
289 if (!(host->flags & SDHCI_REQ_USE_DMA))
290 goto out;
291
292 /* Note if DMA Select is zero then SDMA is selected */
293 if (host->flags & SDHCI_USE_ADMA)
294 ctrl |= SDHCI_CTRL_ADMA32;
295
296 if (host->flags & SDHCI_USE_64_BIT_DMA) {
297 /*
298 * If v4 mode, all supported DMA can be 64-bit addressing if
299 * controller supports 64-bit system address, otherwise only
300 * ADMA can support 64-bit addressing.
301 */
302 if (host->v4_mode) {
303 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
304 ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
305 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
306 } else if (host->flags & SDHCI_USE_ADMA) {
307 /*
308 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
309 * set SDHCI_CTRL_ADMA64.
310 */
311 ctrl |= SDHCI_CTRL_ADMA64;
312 }
313 }
314
315 out:
316 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
317 }
318
sdhci_init(struct sdhci_host * host,int soft)319 static void sdhci_init(struct sdhci_host *host, int soft)
320 {
321 struct mmc_host *mmc = host->mmc;
322 unsigned long flags;
323
324 if (soft)
325 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
326 else
327 sdhci_do_reset(host, SDHCI_RESET_ALL);
328
329 if (host->v4_mode)
330 sdhci_do_enable_v4_mode(host);
331
332 spin_lock_irqsave(&host->lock, flags);
333 sdhci_set_default_irqs(host);
334 spin_unlock_irqrestore(&host->lock, flags);
335
336 host->cqe_on = false;
337
338 if (soft) {
339 /* force clock reconfiguration */
340 host->clock = 0;
341 mmc->ops->set_ios(mmc, &mmc->ios);
342 }
343 }
344
sdhci_reinit(struct sdhci_host * host)345 static void sdhci_reinit(struct sdhci_host *host)
346 {
347 u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
348
349 sdhci_init(host, 0);
350 sdhci_enable_card_detection(host);
351
352 /*
353 * A change to the card detect bits indicates a change in present state,
354 * refer sdhci_set_card_detection(). A card detect interrupt might have
355 * been missed while the host controller was being reset, so trigger a
356 * rescan to check.
357 */
358 if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
359 mmc_detect_change(host->mmc, msecs_to_jiffies(200));
360 }
361
__sdhci_led_activate(struct sdhci_host * host)362 static void __sdhci_led_activate(struct sdhci_host *host)
363 {
364 u8 ctrl;
365
366 if (host->quirks & SDHCI_QUIRK_NO_LED)
367 return;
368
369 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
370 ctrl |= SDHCI_CTRL_LED;
371 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
372 }
373
__sdhci_led_deactivate(struct sdhci_host * host)374 static void __sdhci_led_deactivate(struct sdhci_host *host)
375 {
376 u8 ctrl;
377
378 if (host->quirks & SDHCI_QUIRK_NO_LED)
379 return;
380
381 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
382 ctrl &= ~SDHCI_CTRL_LED;
383 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
384 }
385
386 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
sdhci_led_control(struct led_classdev * led,enum led_brightness brightness)387 static void sdhci_led_control(struct led_classdev *led,
388 enum led_brightness brightness)
389 {
390 struct sdhci_host *host = container_of(led, struct sdhci_host, led);
391 unsigned long flags;
392
393 spin_lock_irqsave(&host->lock, flags);
394
395 if (host->runtime_suspended)
396 goto out;
397
398 if (brightness == LED_OFF)
399 __sdhci_led_deactivate(host);
400 else
401 __sdhci_led_activate(host);
402 out:
403 spin_unlock_irqrestore(&host->lock, flags);
404 }
405
sdhci_led_register(struct sdhci_host * host)406 static int sdhci_led_register(struct sdhci_host *host)
407 {
408 struct mmc_host *mmc = host->mmc;
409
410 if (host->quirks & SDHCI_QUIRK_NO_LED)
411 return 0;
412
413 snprintf(host->led_name, sizeof(host->led_name),
414 "%s::", mmc_hostname(mmc));
415
416 host->led.name = host->led_name;
417 host->led.brightness = LED_OFF;
418 host->led.default_trigger = mmc_hostname(mmc);
419 host->led.brightness_set = sdhci_led_control;
420
421 return led_classdev_register(mmc_dev(mmc), &host->led);
422 }
423
sdhci_led_unregister(struct sdhci_host * host)424 static void sdhci_led_unregister(struct sdhci_host *host)
425 {
426 if (host->quirks & SDHCI_QUIRK_NO_LED)
427 return;
428
429 led_classdev_unregister(&host->led);
430 }
431
sdhci_led_activate(struct sdhci_host * host)432 static inline void sdhci_led_activate(struct sdhci_host *host)
433 {
434 }
435
sdhci_led_deactivate(struct sdhci_host * host)436 static inline void sdhci_led_deactivate(struct sdhci_host *host)
437 {
438 }
439
440 #else
441
sdhci_led_register(struct sdhci_host * host)442 static inline int sdhci_led_register(struct sdhci_host *host)
443 {
444 return 0;
445 }
446
sdhci_led_unregister(struct sdhci_host * host)447 static inline void sdhci_led_unregister(struct sdhci_host *host)
448 {
449 }
450
sdhci_led_activate(struct sdhci_host * host)451 static inline void sdhci_led_activate(struct sdhci_host *host)
452 {
453 __sdhci_led_activate(host);
454 }
455
sdhci_led_deactivate(struct sdhci_host * host)456 static inline void sdhci_led_deactivate(struct sdhci_host *host)
457 {
458 __sdhci_led_deactivate(host);
459 }
460
461 #endif
462
sdhci_mod_timer(struct sdhci_host * host,struct mmc_request * mrq,unsigned long timeout)463 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
464 unsigned long timeout)
465 {
466 if (sdhci_data_line_cmd(mrq->cmd))
467 mod_timer(&host->data_timer, timeout);
468 else
469 mod_timer(&host->timer, timeout);
470 }
471
sdhci_del_timer(struct sdhci_host * host,struct mmc_request * mrq)472 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
473 {
474 if (sdhci_data_line_cmd(mrq->cmd))
475 del_timer(&host->data_timer);
476 else
477 del_timer(&host->timer);
478 }
479
sdhci_has_requests(struct sdhci_host * host)480 static inline bool sdhci_has_requests(struct sdhci_host *host)
481 {
482 return host->cmd || host->data_cmd;
483 }
484
485 /*****************************************************************************\
486 * *
487 * Core functions *
488 * *
489 \*****************************************************************************/
490
sdhci_read_block_pio(struct sdhci_host * host)491 static void sdhci_read_block_pio(struct sdhci_host *host)
492 {
493 unsigned long flags;
494 size_t blksize, len, chunk;
495 u32 scratch;
496 u8 *buf;
497
498 DBG("PIO reading\n");
499
500 blksize = host->data->blksz;
501 chunk = 0;
502
503 local_irq_save(flags);
504
505 while (blksize) {
506 BUG_ON(!sg_miter_next(&host->sg_miter));
507
508 len = min(host->sg_miter.length, blksize);
509
510 blksize -= len;
511 host->sg_miter.consumed = len;
512
513 buf = host->sg_miter.addr;
514
515 while (len) {
516 if (chunk == 0) {
517 scratch = sdhci_readl(host, SDHCI_BUFFER);
518 chunk = 4;
519 }
520
521 *buf = scratch & 0xFF;
522
523 buf++;
524 scratch >>= 8;
525 chunk--;
526 len--;
527 }
528 }
529
530 sg_miter_stop(&host->sg_miter);
531
532 local_irq_restore(flags);
533 }
534
sdhci_write_block_pio(struct sdhci_host * host)535 static void sdhci_write_block_pio(struct sdhci_host *host)
536 {
537 unsigned long flags;
538 size_t blksize, len, chunk;
539 u32 scratch;
540 u8 *buf;
541
542 DBG("PIO writing\n");
543
544 blksize = host->data->blksz;
545 chunk = 0;
546 scratch = 0;
547
548 local_irq_save(flags);
549
550 while (blksize) {
551 BUG_ON(!sg_miter_next(&host->sg_miter));
552
553 len = min(host->sg_miter.length, blksize);
554
555 blksize -= len;
556 host->sg_miter.consumed = len;
557
558 buf = host->sg_miter.addr;
559
560 while (len) {
561 scratch |= (u32)*buf << (chunk * 8);
562
563 buf++;
564 chunk++;
565 len--;
566
567 if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
568 sdhci_writel(host, scratch, SDHCI_BUFFER);
569 chunk = 0;
570 scratch = 0;
571 }
572 }
573 }
574
575 sg_miter_stop(&host->sg_miter);
576
577 local_irq_restore(flags);
578 }
579
sdhci_transfer_pio(struct sdhci_host * host)580 static void sdhci_transfer_pio(struct sdhci_host *host)
581 {
582 u32 mask;
583
584 if (host->blocks == 0)
585 return;
586
587 if (host->data->flags & MMC_DATA_READ)
588 mask = SDHCI_DATA_AVAILABLE;
589 else
590 mask = SDHCI_SPACE_AVAILABLE;
591
592 /*
593 * Some controllers (JMicron JMB38x) mess up the buffer bits
594 * for transfers < 4 bytes. As long as it is just one block,
595 * we can ignore the bits.
596 */
597 if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
598 (host->data->blocks == 1))
599 mask = ~0;
600
601 while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
602 if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
603 udelay(100);
604
605 if (host->data->flags & MMC_DATA_READ)
606 sdhci_read_block_pio(host);
607 else
608 sdhci_write_block_pio(host);
609
610 host->blocks--;
611 if (host->blocks == 0)
612 break;
613 }
614
615 DBG("PIO transfer complete.\n");
616 }
617
sdhci_pre_dma_transfer(struct sdhci_host * host,struct mmc_data * data,int cookie)618 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
619 struct mmc_data *data, int cookie)
620 {
621 int sg_count;
622
623 /*
624 * If the data buffers are already mapped, return the previous
625 * dma_map_sg() result.
626 */
627 if (data->host_cookie == COOKIE_PRE_MAPPED)
628 return data->sg_count;
629
630 /* Bounce write requests to the bounce buffer */
631 if (host->bounce_buffer) {
632 unsigned int length = data->blksz * data->blocks;
633
634 if (length > host->bounce_buffer_size) {
635 pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
636 mmc_hostname(host->mmc), length,
637 host->bounce_buffer_size);
638 return -EIO;
639 }
640 if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
641 /* Copy the data to the bounce buffer */
642 if (host->ops->copy_to_bounce_buffer) {
643 host->ops->copy_to_bounce_buffer(host,
644 data, length);
645 } else {
646 sg_copy_to_buffer(data->sg, data->sg_len,
647 host->bounce_buffer, length);
648 }
649 }
650 /* Switch ownership to the DMA */
651 dma_sync_single_for_device(mmc_dev(host->mmc),
652 host->bounce_addr,
653 host->bounce_buffer_size,
654 mmc_get_dma_dir(data));
655 /* Just a dummy value */
656 sg_count = 1;
657 } else {
658 /* Just access the data directly from memory */
659 sg_count = dma_map_sg(mmc_dev(host->mmc),
660 data->sg, data->sg_len,
661 mmc_get_dma_dir(data));
662 }
663
664 if (sg_count == 0)
665 return -ENOSPC;
666
667 data->sg_count = sg_count;
668 data->host_cookie = cookie;
669
670 return sg_count;
671 }
672
sdhci_kmap_atomic(struct scatterlist * sg,unsigned long * flags)673 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
674 {
675 local_irq_save(*flags);
676 return kmap_atomic(sg_page(sg)) + sg->offset;
677 }
678
sdhci_kunmap_atomic(void * buffer,unsigned long * flags)679 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
680 {
681 kunmap_atomic(buffer);
682 local_irq_restore(*flags);
683 }
684
sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)685 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
686 dma_addr_t addr, int len, unsigned int cmd)
687 {
688 struct sdhci_adma2_64_desc *dma_desc = *desc;
689
690 /* 32-bit and 64-bit descriptors have these members in same position */
691 dma_desc->cmd = cpu_to_le16(cmd);
692 dma_desc->len = cpu_to_le16(len);
693 dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
694
695 if (host->flags & SDHCI_USE_64_BIT_DMA)
696 dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
697
698 *desc += host->desc_sz;
699 }
700 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
701
__sdhci_adma_write_desc(struct sdhci_host * host,void ** desc,dma_addr_t addr,int len,unsigned int cmd)702 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
703 void **desc, dma_addr_t addr,
704 int len, unsigned int cmd)
705 {
706 if (host->ops->adma_write_desc)
707 host->ops->adma_write_desc(host, desc, addr, len, cmd);
708 else
709 sdhci_adma_write_desc(host, desc, addr, len, cmd);
710 }
711
sdhci_adma_mark_end(void * desc)712 static void sdhci_adma_mark_end(void *desc)
713 {
714 struct sdhci_adma2_64_desc *dma_desc = desc;
715
716 /* 32-bit and 64-bit descriptors have 'cmd' in same position */
717 dma_desc->cmd |= cpu_to_le16(ADMA2_END);
718 }
719
sdhci_adma_table_pre(struct sdhci_host * host,struct mmc_data * data,int sg_count)720 static void sdhci_adma_table_pre(struct sdhci_host *host,
721 struct mmc_data *data, int sg_count)
722 {
723 struct scatterlist *sg;
724 unsigned long flags;
725 dma_addr_t addr, align_addr;
726 void *desc, *align;
727 char *buffer;
728 int len, offset, i;
729
730 /*
731 * The spec does not specify endianness of descriptor table.
732 * We currently guess that it is LE.
733 */
734
735 host->sg_count = sg_count;
736
737 desc = host->adma_table;
738 align = host->align_buffer;
739
740 align_addr = host->align_addr;
741
742 for_each_sg(data->sg, sg, host->sg_count, i) {
743 addr = sg_dma_address(sg);
744 len = sg_dma_len(sg);
745
746 /*
747 * The SDHCI specification states that ADMA addresses must
748 * be 32-bit aligned. If they aren't, then we use a bounce
749 * buffer for the (up to three) bytes that screw up the
750 * alignment.
751 */
752 offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
753 SDHCI_ADMA2_MASK;
754 if (offset) {
755 if (data->flags & MMC_DATA_WRITE) {
756 buffer = sdhci_kmap_atomic(sg, &flags);
757 memcpy(align, buffer, offset);
758 sdhci_kunmap_atomic(buffer, &flags);
759 }
760
761 /* tran, valid */
762 __sdhci_adma_write_desc(host, &desc, align_addr,
763 offset, ADMA2_TRAN_VALID);
764
765 BUG_ON(offset > 65536);
766
767 align += SDHCI_ADMA2_ALIGN;
768 align_addr += SDHCI_ADMA2_ALIGN;
769
770 addr += offset;
771 len -= offset;
772 }
773
774 BUG_ON(len > 65536);
775
776 /* tran, valid */
777 if (len)
778 __sdhci_adma_write_desc(host, &desc, addr, len,
779 ADMA2_TRAN_VALID);
780
781 /*
782 * If this triggers then we have a calculation bug
783 * somewhere. :/
784 */
785 WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
786 }
787
788 if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
789 /* Mark the last descriptor as the terminating descriptor */
790 if (desc != host->adma_table) {
791 desc -= host->desc_sz;
792 sdhci_adma_mark_end(desc);
793 }
794 } else {
795 /* Add a terminating entry - nop, end, valid */
796 __sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
797 }
798 }
799
sdhci_adma_table_post(struct sdhci_host * host,struct mmc_data * data)800 static void sdhci_adma_table_post(struct sdhci_host *host,
801 struct mmc_data *data)
802 {
803 struct scatterlist *sg;
804 int i, size;
805 void *align;
806 char *buffer;
807 unsigned long flags;
808
809 if (data->flags & MMC_DATA_READ) {
810 bool has_unaligned = false;
811
812 /* Do a quick scan of the SG list for any unaligned mappings */
813 for_each_sg(data->sg, sg, host->sg_count, i)
814 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
815 has_unaligned = true;
816 break;
817 }
818
819 if (has_unaligned) {
820 dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
821 data->sg_len, DMA_FROM_DEVICE);
822
823 align = host->align_buffer;
824
825 for_each_sg(data->sg, sg, host->sg_count, i) {
826 if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
827 size = SDHCI_ADMA2_ALIGN -
828 (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
829
830 buffer = sdhci_kmap_atomic(sg, &flags);
831 memcpy(buffer, align, size);
832 sdhci_kunmap_atomic(buffer, &flags);
833
834 align += SDHCI_ADMA2_ALIGN;
835 }
836 }
837 }
838 }
839 }
840
sdhci_set_adma_addr(struct sdhci_host * host,dma_addr_t addr)841 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
842 {
843 sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
844 if (host->flags & SDHCI_USE_64_BIT_DMA)
845 sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
846 }
847
sdhci_sdma_address(struct sdhci_host * host)848 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
849 {
850 if (host->bounce_buffer)
851 return host->bounce_addr;
852 else
853 return sg_dma_address(host->data->sg);
854 }
855
sdhci_set_sdma_addr(struct sdhci_host * host,dma_addr_t addr)856 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
857 {
858 if (host->v4_mode)
859 sdhci_set_adma_addr(host, addr);
860 else
861 sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
862 }
863
sdhci_target_timeout(struct sdhci_host * host,struct mmc_command * cmd,struct mmc_data * data)864 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
865 struct mmc_command *cmd,
866 struct mmc_data *data)
867 {
868 unsigned int target_timeout;
869
870 /* timeout in us */
871 if (!data) {
872 target_timeout = cmd->busy_timeout * 1000;
873 } else {
874 target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
875 if (host->clock && data->timeout_clks) {
876 unsigned long long val;
877
878 /*
879 * data->timeout_clks is in units of clock cycles.
880 * host->clock is in Hz. target_timeout is in us.
881 * Hence, us = 1000000 * cycles / Hz. Round up.
882 */
883 val = 1000000ULL * data->timeout_clks;
884 if (do_div(val, host->clock))
885 target_timeout++;
886 target_timeout += val;
887 }
888 }
889
890 return target_timeout;
891 }
892
sdhci_calc_sw_timeout(struct sdhci_host * host,struct mmc_command * cmd)893 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
894 struct mmc_command *cmd)
895 {
896 struct mmc_data *data = cmd->data;
897 struct mmc_host *mmc = host->mmc;
898 struct mmc_ios *ios = &mmc->ios;
899 unsigned char bus_width = 1 << ios->bus_width;
900 unsigned int blksz;
901 unsigned int freq;
902 u64 target_timeout;
903 u64 transfer_time;
904
905 target_timeout = sdhci_target_timeout(host, cmd, data);
906 target_timeout *= NSEC_PER_USEC;
907
908 if (data) {
909 blksz = data->blksz;
910 freq = mmc->actual_clock ? : host->clock;
911 transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
912 do_div(transfer_time, freq);
913 /* multiply by '2' to account for any unknowns */
914 transfer_time = transfer_time * 2;
915 /* calculate timeout for the entire data */
916 host->data_timeout = data->blocks * target_timeout +
917 transfer_time;
918 } else {
919 host->data_timeout = target_timeout;
920 }
921
922 if (host->data_timeout)
923 host->data_timeout += MMC_CMD_TRANSFER_TIME;
924 }
925
sdhci_calc_timeout(struct sdhci_host * host,struct mmc_command * cmd,bool * too_big)926 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
927 bool *too_big)
928 {
929 u8 count;
930 struct mmc_data *data;
931 unsigned target_timeout, current_timeout;
932
933 *too_big = true;
934
935 /*
936 * If the host controller provides us with an incorrect timeout
937 * value, just skip the check and use 0xE. The hardware may take
938 * longer to time out, but that's much better than having a too-short
939 * timeout value.
940 */
941 if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
942 return 0xE;
943
944 /* Unspecified command, asume max */
945 if (cmd == NULL)
946 return 0xE;
947
948 data = cmd->data;
949 /* Unspecified timeout, assume max */
950 if (!data && !cmd->busy_timeout)
951 return 0xE;
952
953 /* timeout in us */
954 target_timeout = sdhci_target_timeout(host, cmd, data);
955
956 /*
957 * Figure out needed cycles.
958 * We do this in steps in order to fit inside a 32 bit int.
959 * The first step is the minimum timeout, which will have a
960 * minimum resolution of 6 bits:
961 * (1) 2^13*1000 > 2^22,
962 * (2) host->timeout_clk < 2^16
963 * =>
964 * (1) / (2) > 2^6
965 */
966 count = 0;
967 current_timeout = (1 << 13) * 1000 / host->timeout_clk;
968 while (current_timeout < target_timeout) {
969 count++;
970 current_timeout <<= 1;
971 if (count >= 0xF)
972 break;
973 }
974
975 if (count >= 0xF) {
976 if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
977 DBG("Too large timeout 0x%x requested for CMD%d!\n",
978 count, cmd->opcode);
979 count = 0xE;
980 } else {
981 *too_big = false;
982 }
983
984 return count;
985 }
986
sdhci_set_transfer_irqs(struct sdhci_host * host)987 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
988 {
989 u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
990 u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
991
992 if (host->flags & SDHCI_REQ_USE_DMA)
993 host->ier = (host->ier & ~pio_irqs) | dma_irqs;
994 else
995 host->ier = (host->ier & ~dma_irqs) | pio_irqs;
996
997 if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
998 host->ier |= SDHCI_INT_AUTO_CMD_ERR;
999 else
1000 host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
1001
1002 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1003 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1004 }
1005
sdhci_set_data_timeout_irq(struct sdhci_host * host,bool enable)1006 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
1007 {
1008 if (enable)
1009 host->ier |= SDHCI_INT_DATA_TIMEOUT;
1010 else
1011 host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1012 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1013 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1014 }
1015 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1016
__sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1017 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1018 {
1019 bool too_big = false;
1020 u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1021
1022 if (too_big &&
1023 host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1024 sdhci_calc_sw_timeout(host, cmd);
1025 sdhci_set_data_timeout_irq(host, false);
1026 } else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1027 sdhci_set_data_timeout_irq(host, true);
1028 }
1029
1030 sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1031 }
1032 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1033
sdhci_set_timeout(struct sdhci_host * host,struct mmc_command * cmd)1034 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1035 {
1036 if (host->ops->set_timeout)
1037 host->ops->set_timeout(host, cmd);
1038 else
1039 __sdhci_set_timeout(host, cmd);
1040 }
1041
sdhci_initialize_data(struct sdhci_host * host,struct mmc_data * data)1042 static void sdhci_initialize_data(struct sdhci_host *host,
1043 struct mmc_data *data)
1044 {
1045 WARN_ON(host->data);
1046
1047 /* Sanity checks */
1048 BUG_ON(data->blksz * data->blocks > 524288);
1049 BUG_ON(data->blksz > host->mmc->max_blk_size);
1050 BUG_ON(data->blocks > 65535);
1051
1052 host->data = data;
1053 host->data_early = 0;
1054 host->data->bytes_xfered = 0;
1055 }
1056
sdhci_set_block_info(struct sdhci_host * host,struct mmc_data * data)1057 static inline void sdhci_set_block_info(struct sdhci_host *host,
1058 struct mmc_data *data)
1059 {
1060 /* Set the DMA boundary value and block size */
1061 sdhci_writew(host,
1062 SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1063 SDHCI_BLOCK_SIZE);
1064 /*
1065 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1066 * can be supported, in that case 16-bit block count register must be 0.
1067 */
1068 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1069 (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1070 if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1071 sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1072 sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1073 } else {
1074 sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1075 }
1076 }
1077
sdhci_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1078 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1079 {
1080 struct mmc_data *data = cmd->data;
1081
1082 sdhci_initialize_data(host, data);
1083
1084 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1085 struct scatterlist *sg;
1086 unsigned int length_mask, offset_mask;
1087 int i;
1088
1089 host->flags |= SDHCI_REQ_USE_DMA;
1090
1091 /*
1092 * FIXME: This doesn't account for merging when mapping the
1093 * scatterlist.
1094 *
1095 * The assumption here being that alignment and lengths are
1096 * the same after DMA mapping to device address space.
1097 */
1098 length_mask = 0;
1099 offset_mask = 0;
1100 if (host->flags & SDHCI_USE_ADMA) {
1101 if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1102 length_mask = 3;
1103 /*
1104 * As we use up to 3 byte chunks to work
1105 * around alignment problems, we need to
1106 * check the offset as well.
1107 */
1108 offset_mask = 3;
1109 }
1110 } else {
1111 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1112 length_mask = 3;
1113 if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1114 offset_mask = 3;
1115 }
1116
1117 if (unlikely(length_mask | offset_mask)) {
1118 for_each_sg(data->sg, sg, data->sg_len, i) {
1119 if (sg->length & length_mask) {
1120 DBG("Reverting to PIO because of transfer size (%d)\n",
1121 sg->length);
1122 host->flags &= ~SDHCI_REQ_USE_DMA;
1123 break;
1124 }
1125 if (sg->offset & offset_mask) {
1126 DBG("Reverting to PIO because of bad alignment\n");
1127 host->flags &= ~SDHCI_REQ_USE_DMA;
1128 break;
1129 }
1130 }
1131 }
1132 }
1133
1134 if (host->flags & SDHCI_REQ_USE_DMA) {
1135 int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1136
1137 if (sg_cnt <= 0) {
1138 /*
1139 * This only happens when someone fed
1140 * us an invalid request.
1141 */
1142 WARN_ON(1);
1143 host->flags &= ~SDHCI_REQ_USE_DMA;
1144 } else if (host->flags & SDHCI_USE_ADMA) {
1145 sdhci_adma_table_pre(host, data, sg_cnt);
1146 sdhci_set_adma_addr(host, host->adma_addr);
1147 } else {
1148 WARN_ON(sg_cnt != 1);
1149 sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1150 }
1151 }
1152
1153 sdhci_config_dma(host);
1154
1155 if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1156 int flags;
1157
1158 flags = SG_MITER_ATOMIC;
1159 if (host->data->flags & MMC_DATA_READ)
1160 flags |= SG_MITER_TO_SG;
1161 else
1162 flags |= SG_MITER_FROM_SG;
1163 sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1164 host->blocks = data->blocks;
1165 }
1166
1167 sdhci_set_transfer_irqs(host);
1168
1169 sdhci_set_block_info(host, data);
1170 }
1171
1172 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1173
sdhci_external_dma_init(struct sdhci_host * host)1174 static int sdhci_external_dma_init(struct sdhci_host *host)
1175 {
1176 int ret = 0;
1177 struct mmc_host *mmc = host->mmc;
1178
1179 host->tx_chan = dma_request_chan(mmc_dev(mmc), "tx");
1180 if (IS_ERR(host->tx_chan)) {
1181 ret = PTR_ERR(host->tx_chan);
1182 if (ret != -EPROBE_DEFER)
1183 pr_warn("Failed to request TX DMA channel.\n");
1184 host->tx_chan = NULL;
1185 return ret;
1186 }
1187
1188 host->rx_chan = dma_request_chan(mmc_dev(mmc), "rx");
1189 if (IS_ERR(host->rx_chan)) {
1190 if (host->tx_chan) {
1191 dma_release_channel(host->tx_chan);
1192 host->tx_chan = NULL;
1193 }
1194
1195 ret = PTR_ERR(host->rx_chan);
1196 if (ret != -EPROBE_DEFER)
1197 pr_warn("Failed to request RX DMA channel.\n");
1198 host->rx_chan = NULL;
1199 }
1200
1201 return ret;
1202 }
1203
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1204 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1205 struct mmc_data *data)
1206 {
1207 return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1208 }
1209
sdhci_external_dma_setup(struct sdhci_host * host,struct mmc_command * cmd)1210 static int sdhci_external_dma_setup(struct sdhci_host *host,
1211 struct mmc_command *cmd)
1212 {
1213 int ret, i;
1214 enum dma_transfer_direction dir;
1215 struct dma_async_tx_descriptor *desc;
1216 struct mmc_data *data = cmd->data;
1217 struct dma_chan *chan;
1218 struct dma_slave_config cfg;
1219 dma_cookie_t cookie;
1220 int sg_cnt;
1221
1222 if (!host->mapbase)
1223 return -EINVAL;
1224
1225 cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1226 cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1227 cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1228 cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1229 cfg.src_maxburst = data->blksz / 4;
1230 cfg.dst_maxburst = data->blksz / 4;
1231
1232 /* Sanity check: all the SG entries must be aligned by block size. */
1233 for (i = 0; i < data->sg_len; i++) {
1234 if ((data->sg + i)->length % data->blksz)
1235 return -EINVAL;
1236 }
1237
1238 chan = sdhci_external_dma_channel(host, data);
1239
1240 ret = dmaengine_slave_config(chan, &cfg);
1241 if (ret)
1242 return ret;
1243
1244 sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1245 if (sg_cnt <= 0)
1246 return -EINVAL;
1247
1248 dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1249 desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1250 DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1251 if (!desc)
1252 return -EINVAL;
1253
1254 desc->callback = NULL;
1255 desc->callback_param = NULL;
1256
1257 cookie = dmaengine_submit(desc);
1258 if (dma_submit_error(cookie))
1259 ret = cookie;
1260
1261 return ret;
1262 }
1263
sdhci_external_dma_release(struct sdhci_host * host)1264 static void sdhci_external_dma_release(struct sdhci_host *host)
1265 {
1266 if (host->tx_chan) {
1267 dma_release_channel(host->tx_chan);
1268 host->tx_chan = NULL;
1269 }
1270
1271 if (host->rx_chan) {
1272 dma_release_channel(host->rx_chan);
1273 host->rx_chan = NULL;
1274 }
1275
1276 sdhci_switch_external_dma(host, false);
1277 }
1278
__sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1279 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1280 struct mmc_command *cmd)
1281 {
1282 struct mmc_data *data = cmd->data;
1283
1284 sdhci_initialize_data(host, data);
1285
1286 host->flags |= SDHCI_REQ_USE_DMA;
1287 sdhci_set_transfer_irqs(host);
1288
1289 sdhci_set_block_info(host, data);
1290 }
1291
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1292 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1293 struct mmc_command *cmd)
1294 {
1295 if (!sdhci_external_dma_setup(host, cmd)) {
1296 __sdhci_external_dma_prepare_data(host, cmd);
1297 } else {
1298 sdhci_external_dma_release(host);
1299 pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1300 mmc_hostname(host->mmc));
1301 sdhci_prepare_data(host, cmd);
1302 }
1303 }
1304
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1305 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1306 struct mmc_command *cmd)
1307 {
1308 struct dma_chan *chan;
1309
1310 if (!cmd->data)
1311 return;
1312
1313 chan = sdhci_external_dma_channel(host, cmd->data);
1314 if (chan)
1315 dma_async_issue_pending(chan);
1316 }
1317
1318 #else
1319
sdhci_external_dma_init(struct sdhci_host * host)1320 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1321 {
1322 return -EOPNOTSUPP;
1323 }
1324
sdhci_external_dma_release(struct sdhci_host * host)1325 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1326 {
1327 }
1328
sdhci_external_dma_prepare_data(struct sdhci_host * host,struct mmc_command * cmd)1329 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1330 struct mmc_command *cmd)
1331 {
1332 /* This should never happen */
1333 WARN_ON_ONCE(1);
1334 }
1335
sdhci_external_dma_pre_transfer(struct sdhci_host * host,struct mmc_command * cmd)1336 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1337 struct mmc_command *cmd)
1338 {
1339 }
1340
sdhci_external_dma_channel(struct sdhci_host * host,struct mmc_data * data)1341 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1342 struct mmc_data *data)
1343 {
1344 return NULL;
1345 }
1346
1347 #endif
1348
sdhci_switch_external_dma(struct sdhci_host * host,bool en)1349 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1350 {
1351 host->use_external_dma = en;
1352 }
1353 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1354
sdhci_auto_cmd12(struct sdhci_host * host,struct mmc_request * mrq)1355 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1356 struct mmc_request *mrq)
1357 {
1358 return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1359 !mrq->cap_cmd_during_tfr;
1360 }
1361
sdhci_auto_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1362 static inline bool sdhci_auto_cmd23(struct sdhci_host *host,
1363 struct mmc_request *mrq)
1364 {
1365 return mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1366 }
1367
sdhci_manual_cmd23(struct sdhci_host * host,struct mmc_request * mrq)1368 static inline bool sdhci_manual_cmd23(struct sdhci_host *host,
1369 struct mmc_request *mrq)
1370 {
1371 return mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23);
1372 }
1373
sdhci_auto_cmd_select(struct sdhci_host * host,struct mmc_command * cmd,u16 * mode)1374 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1375 struct mmc_command *cmd,
1376 u16 *mode)
1377 {
1378 bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1379 (cmd->opcode != SD_IO_RW_EXTENDED);
1380 bool use_cmd23 = sdhci_auto_cmd23(host, cmd->mrq);
1381 u16 ctrl2;
1382
1383 /*
1384 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1385 * Select' is recommended rather than use of 'Auto CMD12
1386 * Enable' or 'Auto CMD23 Enable'. We require Version 4 Mode
1387 * here because some controllers (e.g sdhci-of-dwmshc) expect it.
1388 */
1389 if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1390 (use_cmd12 || use_cmd23)) {
1391 *mode |= SDHCI_TRNS_AUTO_SEL;
1392
1393 ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1394 if (use_cmd23)
1395 ctrl2 |= SDHCI_CMD23_ENABLE;
1396 else
1397 ctrl2 &= ~SDHCI_CMD23_ENABLE;
1398 sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1399
1400 return;
1401 }
1402
1403 /*
1404 * If we are sending CMD23, CMD12 never gets sent
1405 * on successful completion (so no Auto-CMD12).
1406 */
1407 if (use_cmd12)
1408 *mode |= SDHCI_TRNS_AUTO_CMD12;
1409 else if (use_cmd23)
1410 *mode |= SDHCI_TRNS_AUTO_CMD23;
1411 }
1412
sdhci_set_transfer_mode(struct sdhci_host * host,struct mmc_command * cmd)1413 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1414 struct mmc_command *cmd)
1415 {
1416 u16 mode = 0;
1417 struct mmc_data *data = cmd->data;
1418
1419 if (data == NULL) {
1420 if (host->quirks2 &
1421 SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1422 /* must not clear SDHCI_TRANSFER_MODE when tuning */
1423 if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1424 sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1425 } else {
1426 /* clear Auto CMD settings for no data CMDs */
1427 mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1428 sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1429 SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1430 }
1431 return;
1432 }
1433
1434 WARN_ON(!host->data);
1435
1436 if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1437 mode = SDHCI_TRNS_BLK_CNT_EN;
1438
1439 if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1440 mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1441 sdhci_auto_cmd_select(host, cmd, &mode);
1442 if (sdhci_auto_cmd23(host, cmd->mrq))
1443 sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1444 }
1445
1446 if (data->flags & MMC_DATA_READ)
1447 mode |= SDHCI_TRNS_READ;
1448 if (host->flags & SDHCI_REQ_USE_DMA)
1449 mode |= SDHCI_TRNS_DMA;
1450
1451 sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1452 }
1453
sdhci_needs_reset(struct sdhci_host * host,struct mmc_request * mrq)1454 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1455 {
1456 return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1457 ((mrq->cmd && mrq->cmd->error) ||
1458 (mrq->sbc && mrq->sbc->error) ||
1459 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1460 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1461 }
1462
sdhci_set_mrq_done(struct sdhci_host * host,struct mmc_request * mrq)1463 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1464 {
1465 int i;
1466
1467 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1468 if (host->mrqs_done[i] == mrq) {
1469 WARN_ON(1);
1470 return;
1471 }
1472 }
1473
1474 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1475 if (!host->mrqs_done[i]) {
1476 host->mrqs_done[i] = mrq;
1477 break;
1478 }
1479 }
1480
1481 WARN_ON(i >= SDHCI_MAX_MRQS);
1482 }
1483
__sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1484 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1485 {
1486 if (host->cmd && host->cmd->mrq == mrq)
1487 host->cmd = NULL;
1488
1489 if (host->data_cmd && host->data_cmd->mrq == mrq)
1490 host->data_cmd = NULL;
1491
1492 if (host->deferred_cmd && host->deferred_cmd->mrq == mrq)
1493 host->deferred_cmd = NULL;
1494
1495 if (host->data && host->data->mrq == mrq)
1496 host->data = NULL;
1497
1498 if (sdhci_needs_reset(host, mrq))
1499 host->pending_reset = true;
1500
1501 sdhci_set_mrq_done(host, mrq);
1502
1503 sdhci_del_timer(host, mrq);
1504
1505 if (!sdhci_has_requests(host))
1506 sdhci_led_deactivate(host);
1507 }
1508
sdhci_finish_mrq(struct sdhci_host * host,struct mmc_request * mrq)1509 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1510 {
1511 __sdhci_finish_mrq(host, mrq);
1512
1513 queue_work(host->complete_wq, &host->complete_work);
1514 }
1515
__sdhci_finish_data(struct sdhci_host * host,bool sw_data_timeout)1516 static void __sdhci_finish_data(struct sdhci_host *host, bool sw_data_timeout)
1517 {
1518 struct mmc_command *data_cmd = host->data_cmd;
1519 struct mmc_data *data = host->data;
1520
1521 host->data = NULL;
1522 host->data_cmd = NULL;
1523
1524 /*
1525 * The controller needs a reset of internal state machines upon error
1526 * conditions.
1527 */
1528 if (data->error) {
1529 if (!host->cmd || host->cmd == data_cmd)
1530 sdhci_do_reset(host, SDHCI_RESET_CMD);
1531 sdhci_do_reset(host, SDHCI_RESET_DATA);
1532 }
1533
1534 if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1535 (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1536 sdhci_adma_table_post(host, data);
1537
1538 /*
1539 * The specification states that the block count register must
1540 * be updated, but it does not specify at what point in the
1541 * data flow. That makes the register entirely useless to read
1542 * back so we have to assume that nothing made it to the card
1543 * in the event of an error.
1544 */
1545 if (data->error)
1546 data->bytes_xfered = 0;
1547 else
1548 data->bytes_xfered = data->blksz * data->blocks;
1549
1550 /*
1551 * Need to send CMD12 if -
1552 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1553 * b) error in multiblock transfer
1554 */
1555 if (data->stop &&
1556 ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1557 data->error)) {
1558 /*
1559 * 'cap_cmd_during_tfr' request must not use the command line
1560 * after mmc_command_done() has been called. It is upper layer's
1561 * responsibility to send the stop command if required.
1562 */
1563 if (data->mrq->cap_cmd_during_tfr) {
1564 __sdhci_finish_mrq(host, data->mrq);
1565 } else {
1566 /* Avoid triggering warning in sdhci_send_command() */
1567 host->cmd = NULL;
1568 if (!sdhci_send_command(host, data->stop)) {
1569 if (sw_data_timeout) {
1570 /*
1571 * This is anyway a sw data timeout, so
1572 * give up now.
1573 */
1574 data->stop->error = -EIO;
1575 __sdhci_finish_mrq(host, data->mrq);
1576 } else {
1577 WARN_ON(host->deferred_cmd);
1578 host->deferred_cmd = data->stop;
1579 }
1580 }
1581 }
1582 } else {
1583 __sdhci_finish_mrq(host, data->mrq);
1584 }
1585 }
1586
sdhci_finish_data(struct sdhci_host * host)1587 static void sdhci_finish_data(struct sdhci_host *host)
1588 {
1589 __sdhci_finish_data(host, false);
1590 }
1591
sdhci_send_command(struct sdhci_host * host,struct mmc_command * cmd)1592 static bool sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1593 {
1594 int flags;
1595 u32 mask;
1596 unsigned long timeout;
1597
1598 WARN_ON(host->cmd);
1599
1600 /* Initially, a command has no error */
1601 cmd->error = 0;
1602
1603 if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1604 cmd->opcode == MMC_STOP_TRANSMISSION)
1605 cmd->flags |= MMC_RSP_BUSY;
1606
1607 mask = SDHCI_CMD_INHIBIT;
1608 if (sdhci_data_line_cmd(cmd))
1609 mask |= SDHCI_DATA_INHIBIT;
1610
1611 /* We shouldn't wait for data inihibit for stop commands, even
1612 though they might use busy signaling */
1613 if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1614 mask &= ~SDHCI_DATA_INHIBIT;
1615
1616 if (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask)
1617 return false;
1618
1619 host->cmd = cmd;
1620 host->data_timeout = 0;
1621 if (sdhci_data_line_cmd(cmd)) {
1622 WARN_ON(host->data_cmd);
1623 host->data_cmd = cmd;
1624 sdhci_set_timeout(host, cmd);
1625 }
1626
1627 if (cmd->data) {
1628 if (host->use_external_dma)
1629 sdhci_external_dma_prepare_data(host, cmd);
1630 else
1631 sdhci_prepare_data(host, cmd);
1632 }
1633
1634 sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1635
1636 sdhci_set_transfer_mode(host, cmd);
1637
1638 if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1639 WARN_ONCE(1, "Unsupported response type!\n");
1640 /*
1641 * This does not happen in practice because 136-bit response
1642 * commands never have busy waiting, so rather than complicate
1643 * the error path, just remove busy waiting and continue.
1644 */
1645 cmd->flags &= ~MMC_RSP_BUSY;
1646 }
1647
1648 if (!(cmd->flags & MMC_RSP_PRESENT))
1649 flags = SDHCI_CMD_RESP_NONE;
1650 else if (cmd->flags & MMC_RSP_136)
1651 flags = SDHCI_CMD_RESP_LONG;
1652 else if (cmd->flags & MMC_RSP_BUSY)
1653 flags = SDHCI_CMD_RESP_SHORT_BUSY;
1654 else
1655 flags = SDHCI_CMD_RESP_SHORT;
1656
1657 if (cmd->flags & MMC_RSP_CRC)
1658 flags |= SDHCI_CMD_CRC;
1659 if (cmd->flags & MMC_RSP_OPCODE)
1660 flags |= SDHCI_CMD_INDEX;
1661
1662 /* CMD19 is special in that the Data Present Select should be set */
1663 if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1664 cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1665 flags |= SDHCI_CMD_DATA;
1666
1667 timeout = jiffies;
1668 if (host->data_timeout)
1669 timeout += nsecs_to_jiffies(host->data_timeout);
1670 else if (!cmd->data && cmd->busy_timeout > 9000)
1671 timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1672 else
1673 timeout += 10 * HZ;
1674 sdhci_mod_timer(host, cmd->mrq, timeout);
1675
1676 if (host->use_external_dma)
1677 sdhci_external_dma_pre_transfer(host, cmd);
1678
1679 sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1680
1681 return true;
1682 }
1683
sdhci_present_error(struct sdhci_host * host,struct mmc_command * cmd,bool present)1684 static bool sdhci_present_error(struct sdhci_host *host,
1685 struct mmc_command *cmd, bool present)
1686 {
1687 if (!present || host->flags & SDHCI_DEVICE_DEAD) {
1688 cmd->error = -ENOMEDIUM;
1689 return true;
1690 }
1691
1692 return false;
1693 }
1694
sdhci_send_command_retry(struct sdhci_host * host,struct mmc_command * cmd,unsigned long flags)1695 static bool sdhci_send_command_retry(struct sdhci_host *host,
1696 struct mmc_command *cmd,
1697 unsigned long flags)
1698 __releases(host->lock)
1699 __acquires(host->lock)
1700 {
1701 struct mmc_command *deferred_cmd = host->deferred_cmd;
1702 int timeout = 10; /* Approx. 10 ms */
1703 bool present;
1704
1705 while (!sdhci_send_command(host, cmd)) {
1706 if (!timeout--) {
1707 pr_err("%s: Controller never released inhibit bit(s).\n",
1708 mmc_hostname(host->mmc));
1709 sdhci_dumpregs(host);
1710 cmd->error = -EIO;
1711 return false;
1712 }
1713
1714 spin_unlock_irqrestore(&host->lock, flags);
1715
1716 usleep_range(1000, 1250);
1717
1718 present = host->mmc->ops->get_cd(host->mmc);
1719
1720 spin_lock_irqsave(&host->lock, flags);
1721
1722 /* A deferred command might disappear, handle that */
1723 if (cmd == deferred_cmd && cmd != host->deferred_cmd)
1724 return true;
1725
1726 if (sdhci_present_error(host, cmd, present))
1727 return false;
1728 }
1729
1730 if (cmd == host->deferred_cmd)
1731 host->deferred_cmd = NULL;
1732
1733 return true;
1734 }
1735
sdhci_read_rsp_136(struct sdhci_host * host,struct mmc_command * cmd)1736 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1737 {
1738 int i, reg;
1739
1740 for (i = 0; i < 4; i++) {
1741 reg = SDHCI_RESPONSE + (3 - i) * 4;
1742 cmd->resp[i] = sdhci_readl(host, reg);
1743 }
1744
1745 if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1746 return;
1747
1748 /* CRC is stripped so we need to do some shifting */
1749 for (i = 0; i < 4; i++) {
1750 cmd->resp[i] <<= 8;
1751 if (i != 3)
1752 cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1753 }
1754 }
1755
sdhci_finish_command(struct sdhci_host * host)1756 static void sdhci_finish_command(struct sdhci_host *host)
1757 {
1758 struct mmc_command *cmd = host->cmd;
1759
1760 host->cmd = NULL;
1761
1762 if (cmd->flags & MMC_RSP_PRESENT) {
1763 if (cmd->flags & MMC_RSP_136) {
1764 sdhci_read_rsp_136(host, cmd);
1765 } else {
1766 cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1767 }
1768 }
1769
1770 if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1771 mmc_command_done(host->mmc, cmd->mrq);
1772
1773 /*
1774 * The host can send and interrupt when the busy state has
1775 * ended, allowing us to wait without wasting CPU cycles.
1776 * The busy signal uses DAT0 so this is similar to waiting
1777 * for data to complete.
1778 *
1779 * Note: The 1.0 specification is a bit ambiguous about this
1780 * feature so there might be some problems with older
1781 * controllers.
1782 */
1783 if (cmd->flags & MMC_RSP_BUSY) {
1784 if (cmd->data) {
1785 DBG("Cannot wait for busy signal when also doing a data transfer");
1786 } else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1787 cmd == host->data_cmd) {
1788 /* Command complete before busy is ended */
1789 return;
1790 }
1791 }
1792
1793 /* Finished CMD23, now send actual command. */
1794 if (cmd == cmd->mrq->sbc) {
1795 if (!sdhci_send_command(host, cmd->mrq->cmd)) {
1796 WARN_ON(host->deferred_cmd);
1797 host->deferred_cmd = cmd->mrq->cmd;
1798 }
1799 } else {
1800
1801 /* Processed actual command. */
1802 if (host->data && host->data_early)
1803 sdhci_finish_data(host);
1804
1805 if (!cmd->data)
1806 __sdhci_finish_mrq(host, cmd->mrq);
1807 }
1808 }
1809
sdhci_get_preset_value(struct sdhci_host * host)1810 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1811 {
1812 u16 preset = 0;
1813
1814 switch (host->timing) {
1815 case MMC_TIMING_UHS_SDR12:
1816 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1817 break;
1818 case MMC_TIMING_UHS_SDR25:
1819 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1820 break;
1821 case MMC_TIMING_UHS_SDR50:
1822 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1823 break;
1824 case MMC_TIMING_UHS_SDR104:
1825 case MMC_TIMING_MMC_HS200:
1826 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1827 break;
1828 case MMC_TIMING_UHS_DDR50:
1829 case MMC_TIMING_MMC_DDR52:
1830 preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1831 break;
1832 case MMC_TIMING_MMC_HS400:
1833 preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1834 break;
1835 default:
1836 pr_warn("%s: Invalid UHS-I mode selected\n",
1837 mmc_hostname(host->mmc));
1838 preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1839 break;
1840 }
1841 return preset;
1842 }
1843
sdhci_calc_clk(struct sdhci_host * host,unsigned int clock,unsigned int * actual_clock)1844 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1845 unsigned int *actual_clock)
1846 {
1847 int div = 0; /* Initialized for compiler warning */
1848 int real_div = div, clk_mul = 1;
1849 u16 clk = 0;
1850 bool switch_base_clk = false;
1851
1852 if (host->version >= SDHCI_SPEC_300) {
1853 if (host->preset_enabled) {
1854 u16 pre_val;
1855
1856 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1857 pre_val = sdhci_get_preset_value(host);
1858 div = FIELD_GET(SDHCI_PRESET_SDCLK_FREQ_MASK, pre_val);
1859 if (host->clk_mul &&
1860 (pre_val & SDHCI_PRESET_CLKGEN_SEL)) {
1861 clk = SDHCI_PROG_CLOCK_MODE;
1862 real_div = div + 1;
1863 clk_mul = host->clk_mul;
1864 } else {
1865 real_div = max_t(int, 1, div << 1);
1866 }
1867 goto clock_set;
1868 }
1869
1870 /*
1871 * Check if the Host Controller supports Programmable Clock
1872 * Mode.
1873 */
1874 if (host->clk_mul) {
1875 for (div = 1; div <= 1024; div++) {
1876 if ((host->max_clk * host->clk_mul / div)
1877 <= clock)
1878 break;
1879 }
1880 if ((host->max_clk * host->clk_mul / div) <= clock) {
1881 /*
1882 * Set Programmable Clock Mode in the Clock
1883 * Control register.
1884 */
1885 clk = SDHCI_PROG_CLOCK_MODE;
1886 real_div = div;
1887 clk_mul = host->clk_mul;
1888 div--;
1889 } else {
1890 /*
1891 * Divisor can be too small to reach clock
1892 * speed requirement. Then use the base clock.
1893 */
1894 switch_base_clk = true;
1895 }
1896 }
1897
1898 if (!host->clk_mul || switch_base_clk) {
1899 /* Version 3.00 divisors must be a multiple of 2. */
1900 if (host->max_clk <= clock)
1901 div = 1;
1902 else {
1903 for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1904 div += 2) {
1905 if ((host->max_clk / div) <= clock)
1906 break;
1907 }
1908 }
1909 real_div = div;
1910 div >>= 1;
1911 if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1912 && !div && host->max_clk <= 25000000)
1913 div = 1;
1914 }
1915 } else {
1916 /* Version 2.00 divisors must be a power of 2. */
1917 for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1918 if ((host->max_clk / div) <= clock)
1919 break;
1920 }
1921 real_div = div;
1922 div >>= 1;
1923 }
1924
1925 clock_set:
1926 if (real_div)
1927 *actual_clock = (host->max_clk * clk_mul) / real_div;
1928 clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1929 clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1930 << SDHCI_DIVIDER_HI_SHIFT;
1931
1932 return clk;
1933 }
1934 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1935
sdhci_enable_clk(struct sdhci_host * host,u16 clk)1936 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1937 {
1938 ktime_t timeout;
1939
1940 clk |= SDHCI_CLOCK_INT_EN;
1941 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1942
1943 /* Wait max 150 ms */
1944 timeout = ktime_add_ms(ktime_get(), 150);
1945 while (1) {
1946 bool timedout = ktime_after(ktime_get(), timeout);
1947
1948 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1949 if (clk & SDHCI_CLOCK_INT_STABLE)
1950 break;
1951 if (timedout) {
1952 pr_err("%s: Internal clock never stabilised.\n",
1953 mmc_hostname(host->mmc));
1954 sdhci_dumpregs(host);
1955 return;
1956 }
1957 udelay(10);
1958 }
1959
1960 if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1961 clk |= SDHCI_CLOCK_PLL_EN;
1962 clk &= ~SDHCI_CLOCK_INT_STABLE;
1963 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1964
1965 /* Wait max 150 ms */
1966 timeout = ktime_add_ms(ktime_get(), 150);
1967 while (1) {
1968 bool timedout = ktime_after(ktime_get(), timeout);
1969
1970 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1971 if (clk & SDHCI_CLOCK_INT_STABLE)
1972 break;
1973 if (timedout) {
1974 pr_err("%s: PLL clock never stabilised.\n",
1975 mmc_hostname(host->mmc));
1976 sdhci_dumpregs(host);
1977 return;
1978 }
1979 udelay(10);
1980 }
1981 }
1982
1983 clk |= SDHCI_CLOCK_CARD_EN;
1984 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1985 }
1986 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1987
sdhci_set_clock(struct sdhci_host * host,unsigned int clock)1988 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1989 {
1990 u16 clk;
1991
1992 host->mmc->actual_clock = 0;
1993
1994 sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1995
1996 if (clock == 0)
1997 return;
1998
1999 clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
2000 sdhci_enable_clk(host, clk);
2001 }
2002 EXPORT_SYMBOL_GPL(sdhci_set_clock);
2003
sdhci_set_power_reg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2004 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
2005 unsigned short vdd)
2006 {
2007 struct mmc_host *mmc = host->mmc;
2008
2009 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2010
2011 if (mode != MMC_POWER_OFF)
2012 sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
2013 else
2014 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2015 }
2016
sdhci_set_power_noreg(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2017 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
2018 unsigned short vdd)
2019 {
2020 u8 pwr = 0;
2021
2022 if (mode != MMC_POWER_OFF) {
2023 switch (1 << vdd) {
2024 case MMC_VDD_165_195:
2025 /*
2026 * Without a regulator, SDHCI does not support 2.0v
2027 * so we only get here if the driver deliberately
2028 * added the 2.0v range to ocr_avail. Map it to 1.8v
2029 * for the purpose of turning on the power.
2030 */
2031 case MMC_VDD_20_21:
2032 pwr = SDHCI_POWER_180;
2033 break;
2034 case MMC_VDD_29_30:
2035 case MMC_VDD_30_31:
2036 pwr = SDHCI_POWER_300;
2037 break;
2038 case MMC_VDD_32_33:
2039 case MMC_VDD_33_34:
2040 pwr = SDHCI_POWER_330;
2041 break;
2042 default:
2043 WARN(1, "%s: Invalid vdd %#x\n",
2044 mmc_hostname(host->mmc), vdd);
2045 break;
2046 }
2047 }
2048
2049 if (host->pwr == pwr)
2050 return;
2051
2052 host->pwr = pwr;
2053
2054 if (pwr == 0) {
2055 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2056 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2057 sdhci_runtime_pm_bus_off(host);
2058 } else {
2059 /*
2060 * Spec says that we should clear the power reg before setting
2061 * a new value. Some controllers don't seem to like this though.
2062 */
2063 if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
2064 sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
2065
2066 /*
2067 * At least the Marvell CaFe chip gets confused if we set the
2068 * voltage and set turn on power at the same time, so set the
2069 * voltage first.
2070 */
2071 if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
2072 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2073
2074 pwr |= SDHCI_POWER_ON;
2075
2076 sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
2077
2078 if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
2079 sdhci_runtime_pm_bus_on(host);
2080
2081 /*
2082 * Some controllers need an extra 10ms delay of 10ms before
2083 * they can apply clock after applying power
2084 */
2085 if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
2086 mdelay(10);
2087 }
2088 }
2089 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2090
sdhci_set_power(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2091 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2092 unsigned short vdd)
2093 {
2094 if (IS_ERR(host->mmc->supply.vmmc))
2095 sdhci_set_power_noreg(host, mode, vdd);
2096 else
2097 sdhci_set_power_reg(host, mode, vdd);
2098 }
2099 EXPORT_SYMBOL_GPL(sdhci_set_power);
2100
2101 /*
2102 * Some controllers need to configure a valid bus voltage on their power
2103 * register regardless of whether an external regulator is taking care of power
2104 * supply. This helper function takes care of it if set as the controller's
2105 * sdhci_ops.set_power callback.
2106 */
sdhci_set_power_and_bus_voltage(struct sdhci_host * host,unsigned char mode,unsigned short vdd)2107 void sdhci_set_power_and_bus_voltage(struct sdhci_host *host,
2108 unsigned char mode,
2109 unsigned short vdd)
2110 {
2111 if (!IS_ERR(host->mmc->supply.vmmc)) {
2112 struct mmc_host *mmc = host->mmc;
2113
2114 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
2115 }
2116 sdhci_set_power_noreg(host, mode, vdd);
2117 }
2118 EXPORT_SYMBOL_GPL(sdhci_set_power_and_bus_voltage);
2119
2120 /*****************************************************************************\
2121 * *
2122 * MMC callbacks *
2123 * *
2124 \*****************************************************************************/
2125
sdhci_request(struct mmc_host * mmc,struct mmc_request * mrq)2126 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2127 {
2128 struct sdhci_host *host = mmc_priv(mmc);
2129 struct mmc_command *cmd;
2130 unsigned long flags;
2131 bool present;
2132
2133 /* Firstly check card presence */
2134 present = mmc->ops->get_cd(mmc);
2135
2136 spin_lock_irqsave(&host->lock, flags);
2137
2138 sdhci_led_activate(host);
2139
2140 if (sdhci_present_error(host, mrq->cmd, present))
2141 goto out_finish;
2142
2143 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2144
2145 if (!sdhci_send_command_retry(host, cmd, flags))
2146 goto out_finish;
2147
2148 spin_unlock_irqrestore(&host->lock, flags);
2149
2150 return;
2151
2152 out_finish:
2153 sdhci_finish_mrq(host, mrq);
2154 spin_unlock_irqrestore(&host->lock, flags);
2155 }
2156 EXPORT_SYMBOL_GPL(sdhci_request);
2157
sdhci_request_atomic(struct mmc_host * mmc,struct mmc_request * mrq)2158 int sdhci_request_atomic(struct mmc_host *mmc, struct mmc_request *mrq)
2159 {
2160 struct sdhci_host *host = mmc_priv(mmc);
2161 struct mmc_command *cmd;
2162 unsigned long flags;
2163 int ret = 0;
2164
2165 spin_lock_irqsave(&host->lock, flags);
2166
2167 if (sdhci_present_error(host, mrq->cmd, true)) {
2168 sdhci_finish_mrq(host, mrq);
2169 goto out_finish;
2170 }
2171
2172 cmd = sdhci_manual_cmd23(host, mrq) ? mrq->sbc : mrq->cmd;
2173
2174 /*
2175 * The HSQ may send a command in interrupt context without polling
2176 * the busy signaling, which means we should return BUSY if controller
2177 * has not released inhibit bits to allow HSQ trying to send request
2178 * again in non-atomic context. So we should not finish this request
2179 * here.
2180 */
2181 if (!sdhci_send_command(host, cmd))
2182 ret = -EBUSY;
2183 else
2184 sdhci_led_activate(host);
2185
2186 out_finish:
2187 spin_unlock_irqrestore(&host->lock, flags);
2188 return ret;
2189 }
2190 EXPORT_SYMBOL_GPL(sdhci_request_atomic);
2191
sdhci_set_bus_width(struct sdhci_host * host,int width)2192 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2193 {
2194 u8 ctrl;
2195
2196 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2197 if (width == MMC_BUS_WIDTH_8) {
2198 ctrl &= ~SDHCI_CTRL_4BITBUS;
2199 ctrl |= SDHCI_CTRL_8BITBUS;
2200 } else {
2201 if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2202 ctrl &= ~SDHCI_CTRL_8BITBUS;
2203 if (width == MMC_BUS_WIDTH_4)
2204 ctrl |= SDHCI_CTRL_4BITBUS;
2205 else
2206 ctrl &= ~SDHCI_CTRL_4BITBUS;
2207 }
2208 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2209 }
2210 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2211
sdhci_set_uhs_signaling(struct sdhci_host * host,unsigned timing)2212 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2213 {
2214 u16 ctrl_2;
2215
2216 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2217 /* Select Bus Speed Mode for host */
2218 ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2219 if ((timing == MMC_TIMING_MMC_HS200) ||
2220 (timing == MMC_TIMING_UHS_SDR104))
2221 ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2222 else if (timing == MMC_TIMING_UHS_SDR12)
2223 ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2224 else if (timing == MMC_TIMING_UHS_SDR25)
2225 ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2226 else if (timing == MMC_TIMING_UHS_SDR50)
2227 ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2228 else if ((timing == MMC_TIMING_UHS_DDR50) ||
2229 (timing == MMC_TIMING_MMC_DDR52))
2230 ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2231 else if (timing == MMC_TIMING_MMC_HS400)
2232 ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2233 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2234 }
2235 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2236
sdhci_set_ios(struct mmc_host * mmc,struct mmc_ios * ios)2237 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2238 {
2239 struct sdhci_host *host = mmc_priv(mmc);
2240 u8 ctrl;
2241
2242 if (ios->power_mode == MMC_POWER_UNDEFINED)
2243 return;
2244
2245 if (host->flags & SDHCI_DEVICE_DEAD) {
2246 if (!IS_ERR(mmc->supply.vmmc) &&
2247 ios->power_mode == MMC_POWER_OFF)
2248 mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2249 return;
2250 }
2251
2252 /*
2253 * Reset the chip on each power off.
2254 * Should clear out any weird states.
2255 */
2256 if (ios->power_mode == MMC_POWER_OFF) {
2257 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2258 sdhci_reinit(host);
2259 }
2260
2261 if (host->version >= SDHCI_SPEC_300 &&
2262 (ios->power_mode == MMC_POWER_UP) &&
2263 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2264 sdhci_enable_preset_value(host, false);
2265
2266 if (!ios->clock || ios->clock != host->clock) {
2267 host->ops->set_clock(host, ios->clock);
2268 host->clock = ios->clock;
2269
2270 if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2271 host->clock) {
2272 host->timeout_clk = mmc->actual_clock ?
2273 mmc->actual_clock / 1000 :
2274 host->clock / 1000;
2275 mmc->max_busy_timeout =
2276 host->ops->get_max_timeout_count ?
2277 host->ops->get_max_timeout_count(host) :
2278 1 << 27;
2279 mmc->max_busy_timeout /= host->timeout_clk;
2280 }
2281 }
2282
2283 if (host->ops->set_power)
2284 host->ops->set_power(host, ios->power_mode, ios->vdd);
2285 else
2286 sdhci_set_power(host, ios->power_mode, ios->vdd);
2287
2288 if (host->ops->platform_send_init_74_clocks)
2289 host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2290
2291 host->ops->set_bus_width(host, ios->bus_width);
2292
2293 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2294
2295 if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2296 if (ios->timing == MMC_TIMING_SD_HS ||
2297 ios->timing == MMC_TIMING_MMC_HS ||
2298 ios->timing == MMC_TIMING_MMC_HS400 ||
2299 ios->timing == MMC_TIMING_MMC_HS200 ||
2300 ios->timing == MMC_TIMING_MMC_DDR52 ||
2301 ios->timing == MMC_TIMING_UHS_SDR50 ||
2302 ios->timing == MMC_TIMING_UHS_SDR104 ||
2303 ios->timing == MMC_TIMING_UHS_DDR50 ||
2304 ios->timing == MMC_TIMING_UHS_SDR25)
2305 ctrl |= SDHCI_CTRL_HISPD;
2306 else
2307 ctrl &= ~SDHCI_CTRL_HISPD;
2308 }
2309
2310 if (host->version >= SDHCI_SPEC_300) {
2311 u16 clk, ctrl_2;
2312
2313 if (!host->preset_enabled) {
2314 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2315 /*
2316 * We only need to set Driver Strength if the
2317 * preset value enable is not set.
2318 */
2319 ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2320 ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2321 if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2322 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2323 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2324 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2325 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2326 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2327 else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2328 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2329 else {
2330 pr_warn("%s: invalid driver type, default to driver type B\n",
2331 mmc_hostname(mmc));
2332 ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2333 }
2334
2335 sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2336 } else {
2337 /*
2338 * According to SDHC Spec v3.00, if the Preset Value
2339 * Enable in the Host Control 2 register is set, we
2340 * need to reset SD Clock Enable before changing High
2341 * Speed Enable to avoid generating clock gliches.
2342 */
2343
2344 /* Reset SD Clock Enable */
2345 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2346 clk &= ~SDHCI_CLOCK_CARD_EN;
2347 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2348
2349 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2350
2351 /* Re-enable SD Clock */
2352 host->ops->set_clock(host, host->clock);
2353 }
2354
2355 /* Reset SD Clock Enable */
2356 clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2357 clk &= ~SDHCI_CLOCK_CARD_EN;
2358 sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2359
2360 host->ops->set_uhs_signaling(host, ios->timing);
2361 host->timing = ios->timing;
2362
2363 if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2364 ((ios->timing == MMC_TIMING_UHS_SDR12) ||
2365 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2366 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2367 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2368 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2369 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2370 u16 preset;
2371
2372 sdhci_enable_preset_value(host, true);
2373 preset = sdhci_get_preset_value(host);
2374 ios->drv_type = FIELD_GET(SDHCI_PRESET_DRV_MASK,
2375 preset);
2376 }
2377
2378 /* Re-enable SD Clock */
2379 host->ops->set_clock(host, host->clock);
2380 } else
2381 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2382
2383 /*
2384 * Some (ENE) controllers go apeshit on some ios operation,
2385 * signalling timeout and CRC errors even on CMD0. Resetting
2386 * it on each ios seems to solve the problem.
2387 */
2388 if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2389 sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2390 }
2391 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2392
sdhci_get_cd(struct mmc_host * mmc)2393 static int sdhci_get_cd(struct mmc_host *mmc)
2394 {
2395 struct sdhci_host *host = mmc_priv(mmc);
2396 int gpio_cd = mmc_gpio_get_cd(mmc);
2397
2398 if (host->flags & SDHCI_DEVICE_DEAD)
2399 return 0;
2400
2401 /* If nonremovable, assume that the card is always present. */
2402 if (!mmc_card_is_removable(mmc))
2403 return 1;
2404
2405 /*
2406 * Try slot gpio detect, if defined it take precedence
2407 * over build in controller functionality
2408 */
2409 if (gpio_cd >= 0)
2410 return !!gpio_cd;
2411
2412 /* If polling, assume that the card is always present. */
2413 if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2414 return 1;
2415
2416 /* Host native card detect */
2417 return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2418 }
2419
sdhci_check_ro(struct sdhci_host * host)2420 static int sdhci_check_ro(struct sdhci_host *host)
2421 {
2422 unsigned long flags;
2423 int is_readonly;
2424
2425 spin_lock_irqsave(&host->lock, flags);
2426
2427 if (host->flags & SDHCI_DEVICE_DEAD)
2428 is_readonly = 0;
2429 else if (host->ops->get_ro)
2430 is_readonly = host->ops->get_ro(host);
2431 else if (mmc_can_gpio_ro(host->mmc))
2432 is_readonly = mmc_gpio_get_ro(host->mmc);
2433 else
2434 is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2435 & SDHCI_WRITE_PROTECT);
2436
2437 spin_unlock_irqrestore(&host->lock, flags);
2438
2439 /* This quirk needs to be replaced by a callback-function later */
2440 return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2441 !is_readonly : is_readonly;
2442 }
2443
2444 #define SAMPLE_COUNT 5
2445
sdhci_get_ro(struct mmc_host * mmc)2446 static int sdhci_get_ro(struct mmc_host *mmc)
2447 {
2448 struct sdhci_host *host = mmc_priv(mmc);
2449 int i, ro_count;
2450
2451 if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2452 return sdhci_check_ro(host);
2453
2454 ro_count = 0;
2455 for (i = 0; i < SAMPLE_COUNT; i++) {
2456 if (sdhci_check_ro(host)) {
2457 if (++ro_count > SAMPLE_COUNT / 2)
2458 return 1;
2459 }
2460 msleep(30);
2461 }
2462 return 0;
2463 }
2464
sdhci_hw_reset(struct mmc_host * mmc)2465 static void sdhci_hw_reset(struct mmc_host *mmc)
2466 {
2467 struct sdhci_host *host = mmc_priv(mmc);
2468
2469 if (host->ops && host->ops->hw_reset)
2470 host->ops->hw_reset(host);
2471 }
2472
sdhci_enable_sdio_irq_nolock(struct sdhci_host * host,int enable)2473 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2474 {
2475 if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2476 if (enable)
2477 host->ier |= SDHCI_INT_CARD_INT;
2478 else
2479 host->ier &= ~SDHCI_INT_CARD_INT;
2480
2481 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2482 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2483 }
2484 }
2485
sdhci_enable_sdio_irq(struct mmc_host * mmc,int enable)2486 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2487 {
2488 struct sdhci_host *host = mmc_priv(mmc);
2489 unsigned long flags;
2490
2491 if (enable)
2492 pm_runtime_get_noresume(mmc_dev(mmc));
2493
2494 spin_lock_irqsave(&host->lock, flags);
2495 sdhci_enable_sdio_irq_nolock(host, enable);
2496 spin_unlock_irqrestore(&host->lock, flags);
2497
2498 if (!enable)
2499 pm_runtime_put_noidle(mmc_dev(mmc));
2500 }
2501 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2502
sdhci_ack_sdio_irq(struct mmc_host * mmc)2503 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2504 {
2505 struct sdhci_host *host = mmc_priv(mmc);
2506 unsigned long flags;
2507
2508 spin_lock_irqsave(&host->lock, flags);
2509 sdhci_enable_sdio_irq_nolock(host, true);
2510 spin_unlock_irqrestore(&host->lock, flags);
2511 }
2512
sdhci_start_signal_voltage_switch(struct mmc_host * mmc,struct mmc_ios * ios)2513 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2514 struct mmc_ios *ios)
2515 {
2516 struct sdhci_host *host = mmc_priv(mmc);
2517 u16 ctrl;
2518 int ret;
2519
2520 /*
2521 * Signal Voltage Switching is only applicable for Host Controllers
2522 * v3.00 and above.
2523 */
2524 if (host->version < SDHCI_SPEC_300)
2525 return 0;
2526
2527 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2528
2529 switch (ios->signal_voltage) {
2530 case MMC_SIGNAL_VOLTAGE_330:
2531 if (!(host->flags & SDHCI_SIGNALING_330))
2532 return -EINVAL;
2533 /* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2534 ctrl &= ~SDHCI_CTRL_VDD_180;
2535 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2536
2537 if (!IS_ERR(mmc->supply.vqmmc)) {
2538 ret = mmc_regulator_set_vqmmc(mmc, ios);
2539 if (ret < 0) {
2540 pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2541 mmc_hostname(mmc));
2542 return -EIO;
2543 }
2544 }
2545 /* Wait for 5ms */
2546 usleep_range(5000, 5500);
2547
2548 /* 3.3V regulator output should be stable within 5 ms */
2549 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2550 if (!(ctrl & SDHCI_CTRL_VDD_180))
2551 return 0;
2552
2553 pr_warn("%s: 3.3V regulator output did not become stable\n",
2554 mmc_hostname(mmc));
2555
2556 return -EAGAIN;
2557 case MMC_SIGNAL_VOLTAGE_180:
2558 if (!(host->flags & SDHCI_SIGNALING_180))
2559 return -EINVAL;
2560 if (!IS_ERR(mmc->supply.vqmmc)) {
2561 ret = mmc_regulator_set_vqmmc(mmc, ios);
2562 if (ret < 0) {
2563 pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2564 mmc_hostname(mmc));
2565 return -EIO;
2566 }
2567 }
2568
2569 /*
2570 * Enable 1.8V Signal Enable in the Host Control2
2571 * register
2572 */
2573 ctrl |= SDHCI_CTRL_VDD_180;
2574 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2575
2576 /* Some controller need to do more when switching */
2577 if (host->ops->voltage_switch)
2578 host->ops->voltage_switch(host);
2579
2580 /* 1.8V regulator output should be stable within 5 ms */
2581 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2582 if (ctrl & SDHCI_CTRL_VDD_180)
2583 return 0;
2584
2585 pr_warn("%s: 1.8V regulator output did not become stable\n",
2586 mmc_hostname(mmc));
2587
2588 return -EAGAIN;
2589 case MMC_SIGNAL_VOLTAGE_120:
2590 if (!(host->flags & SDHCI_SIGNALING_120))
2591 return -EINVAL;
2592 if (!IS_ERR(mmc->supply.vqmmc)) {
2593 ret = mmc_regulator_set_vqmmc(mmc, ios);
2594 if (ret < 0) {
2595 pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2596 mmc_hostname(mmc));
2597 return -EIO;
2598 }
2599 }
2600 return 0;
2601 default:
2602 /* No signal voltage switch required */
2603 return 0;
2604 }
2605 }
2606 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2607
sdhci_card_busy(struct mmc_host * mmc)2608 static int sdhci_card_busy(struct mmc_host *mmc)
2609 {
2610 struct sdhci_host *host = mmc_priv(mmc);
2611 u32 present_state;
2612
2613 /* Check whether DAT[0] is 0 */
2614 present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2615
2616 return !(present_state & SDHCI_DATA_0_LVL_MASK);
2617 }
2618
sdhci_prepare_hs400_tuning(struct mmc_host * mmc,struct mmc_ios * ios)2619 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2620 {
2621 struct sdhci_host *host = mmc_priv(mmc);
2622 unsigned long flags;
2623
2624 spin_lock_irqsave(&host->lock, flags);
2625 host->flags |= SDHCI_HS400_TUNING;
2626 spin_unlock_irqrestore(&host->lock, flags);
2627
2628 return 0;
2629 }
2630
sdhci_start_tuning(struct sdhci_host * host)2631 void sdhci_start_tuning(struct sdhci_host *host)
2632 {
2633 u16 ctrl;
2634
2635 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2636 ctrl |= SDHCI_CTRL_EXEC_TUNING;
2637 if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2638 ctrl |= SDHCI_CTRL_TUNED_CLK;
2639 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2640
2641 /*
2642 * As per the Host Controller spec v3.00, tuning command
2643 * generates Buffer Read Ready interrupt, so enable that.
2644 *
2645 * Note: The spec clearly says that when tuning sequence
2646 * is being performed, the controller does not generate
2647 * interrupts other than Buffer Read Ready interrupt. But
2648 * to make sure we don't hit a controller bug, we _only_
2649 * enable Buffer Read Ready interrupt here.
2650 */
2651 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2652 sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2653 }
2654 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2655
sdhci_end_tuning(struct sdhci_host * host)2656 void sdhci_end_tuning(struct sdhci_host *host)
2657 {
2658 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2659 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2660 }
2661 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2662
sdhci_reset_tuning(struct sdhci_host * host)2663 void sdhci_reset_tuning(struct sdhci_host *host)
2664 {
2665 u16 ctrl;
2666
2667 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2668 ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2669 ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2670 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2671 }
2672 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2673
sdhci_abort_tuning(struct sdhci_host * host,u32 opcode)2674 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2675 {
2676 sdhci_reset_tuning(host);
2677
2678 sdhci_do_reset(host, SDHCI_RESET_CMD);
2679 sdhci_do_reset(host, SDHCI_RESET_DATA);
2680
2681 sdhci_end_tuning(host);
2682
2683 mmc_abort_tuning(host->mmc, opcode);
2684 }
2685 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2686
2687 /*
2688 * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2689 * tuning command does not have a data payload (or rather the hardware does it
2690 * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2691 * interrupt setup is different to other commands and there is no timeout
2692 * interrupt so special handling is needed.
2693 */
sdhci_send_tuning(struct sdhci_host * host,u32 opcode)2694 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2695 {
2696 struct mmc_host *mmc = host->mmc;
2697 struct mmc_command cmd = {};
2698 struct mmc_request mrq = {};
2699 unsigned long flags;
2700 u32 b = host->sdma_boundary;
2701
2702 spin_lock_irqsave(&host->lock, flags);
2703
2704 cmd.opcode = opcode;
2705 cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2706 cmd.mrq = &mrq;
2707
2708 mrq.cmd = &cmd;
2709 /*
2710 * In response to CMD19, the card sends 64 bytes of tuning
2711 * block to the Host Controller. So we set the block size
2712 * to 64 here.
2713 */
2714 if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2715 mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2716 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2717 else
2718 sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2719
2720 /*
2721 * The tuning block is sent by the card to the host controller.
2722 * So we set the TRNS_READ bit in the Transfer Mode register.
2723 * This also takes care of setting DMA Enable and Multi Block
2724 * Select in the same register to 0.
2725 */
2726 sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2727
2728 if (!sdhci_send_command_retry(host, &cmd, flags)) {
2729 spin_unlock_irqrestore(&host->lock, flags);
2730 host->tuning_done = 0;
2731 return;
2732 }
2733
2734 host->cmd = NULL;
2735
2736 sdhci_del_timer(host, &mrq);
2737
2738 host->tuning_done = 0;
2739
2740 spin_unlock_irqrestore(&host->lock, flags);
2741
2742 /* Wait for Buffer Read Ready interrupt */
2743 wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2744 msecs_to_jiffies(50));
2745
2746 }
2747 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2748
__sdhci_execute_tuning(struct sdhci_host * host,u32 opcode)2749 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2750 {
2751 int i;
2752
2753 /*
2754 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2755 * of loops reaches tuning loop count.
2756 */
2757 for (i = 0; i < host->tuning_loop_count; i++) {
2758 u16 ctrl;
2759
2760 sdhci_send_tuning(host, opcode);
2761
2762 if (!host->tuning_done) {
2763 pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2764 mmc_hostname(host->mmc));
2765 sdhci_abort_tuning(host, opcode);
2766 return -ETIMEDOUT;
2767 }
2768
2769 /* Spec does not require a delay between tuning cycles */
2770 if (host->tuning_delay > 0)
2771 mdelay(host->tuning_delay);
2772
2773 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2774 if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2775 if (ctrl & SDHCI_CTRL_TUNED_CLK)
2776 return 0; /* Success! */
2777 break;
2778 }
2779
2780 }
2781
2782 pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2783 mmc_hostname(host->mmc));
2784 sdhci_reset_tuning(host);
2785 return -EAGAIN;
2786 }
2787
sdhci_execute_tuning(struct mmc_host * mmc,u32 opcode)2788 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2789 {
2790 struct sdhci_host *host = mmc_priv(mmc);
2791 int err = 0;
2792 unsigned int tuning_count = 0;
2793 bool hs400_tuning;
2794
2795 hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2796
2797 if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2798 tuning_count = host->tuning_count;
2799
2800 /*
2801 * The Host Controller needs tuning in case of SDR104 and DDR50
2802 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2803 * the Capabilities register.
2804 * If the Host Controller supports the HS200 mode then the
2805 * tuning function has to be executed.
2806 */
2807 switch (host->timing) {
2808 /* HS400 tuning is done in HS200 mode */
2809 case MMC_TIMING_MMC_HS400:
2810 err = -EINVAL;
2811 goto out;
2812
2813 case MMC_TIMING_MMC_HS200:
2814 /*
2815 * Periodic re-tuning for HS400 is not expected to be needed, so
2816 * disable it here.
2817 */
2818 if (hs400_tuning)
2819 tuning_count = 0;
2820 break;
2821
2822 case MMC_TIMING_UHS_SDR104:
2823 case MMC_TIMING_UHS_DDR50:
2824 break;
2825
2826 case MMC_TIMING_UHS_SDR50:
2827 if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2828 break;
2829 fallthrough;
2830
2831 default:
2832 goto out;
2833 }
2834
2835 if (host->ops->platform_execute_tuning) {
2836 err = host->ops->platform_execute_tuning(host, opcode);
2837 goto out;
2838 }
2839
2840 mmc->retune_period = tuning_count;
2841
2842 if (host->tuning_delay < 0)
2843 host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2844
2845 sdhci_start_tuning(host);
2846
2847 host->tuning_err = __sdhci_execute_tuning(host, opcode);
2848
2849 sdhci_end_tuning(host);
2850 out:
2851 host->flags &= ~SDHCI_HS400_TUNING;
2852
2853 return err;
2854 }
2855 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2856
sdhci_enable_preset_value(struct sdhci_host * host,bool enable)2857 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2858 {
2859 /* Host Controller v3.00 defines preset value registers */
2860 if (host->version < SDHCI_SPEC_300)
2861 return;
2862
2863 /*
2864 * We only enable or disable Preset Value if they are not already
2865 * enabled or disabled respectively. Otherwise, we bail out.
2866 */
2867 if (host->preset_enabled != enable) {
2868 u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2869
2870 if (enable)
2871 ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2872 else
2873 ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2874
2875 sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2876
2877 if (enable)
2878 host->flags |= SDHCI_PV_ENABLED;
2879 else
2880 host->flags &= ~SDHCI_PV_ENABLED;
2881
2882 host->preset_enabled = enable;
2883 }
2884 }
2885
sdhci_post_req(struct mmc_host * mmc,struct mmc_request * mrq,int err)2886 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2887 int err)
2888 {
2889 struct mmc_data *data = mrq->data;
2890
2891 if (data->host_cookie != COOKIE_UNMAPPED)
2892 dma_unmap_sg(mmc_dev(mmc), data->sg, data->sg_len,
2893 mmc_get_dma_dir(data));
2894
2895 data->host_cookie = COOKIE_UNMAPPED;
2896 }
2897
sdhci_pre_req(struct mmc_host * mmc,struct mmc_request * mrq)2898 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2899 {
2900 struct sdhci_host *host = mmc_priv(mmc);
2901
2902 mrq->data->host_cookie = COOKIE_UNMAPPED;
2903
2904 /*
2905 * No pre-mapping in the pre hook if we're using the bounce buffer,
2906 * for that we would need two bounce buffers since one buffer is
2907 * in flight when this is getting called.
2908 */
2909 if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2910 sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2911 }
2912
sdhci_error_out_mrqs(struct sdhci_host * host,int err)2913 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2914 {
2915 if (host->data_cmd) {
2916 host->data_cmd->error = err;
2917 sdhci_finish_mrq(host, host->data_cmd->mrq);
2918 }
2919
2920 if (host->cmd) {
2921 host->cmd->error = err;
2922 sdhci_finish_mrq(host, host->cmd->mrq);
2923 }
2924 }
2925
sdhci_card_event(struct mmc_host * mmc)2926 static void sdhci_card_event(struct mmc_host *mmc)
2927 {
2928 struct sdhci_host *host = mmc_priv(mmc);
2929 unsigned long flags;
2930 int present;
2931
2932 /* First check if client has provided their own card event */
2933 if (host->ops->card_event)
2934 host->ops->card_event(host);
2935
2936 present = mmc->ops->get_cd(mmc);
2937
2938 spin_lock_irqsave(&host->lock, flags);
2939
2940 /* Check sdhci_has_requests() first in case we are runtime suspended */
2941 if (sdhci_has_requests(host) && !present) {
2942 pr_err("%s: Card removed during transfer!\n",
2943 mmc_hostname(mmc));
2944 pr_err("%s: Resetting controller.\n",
2945 mmc_hostname(mmc));
2946
2947 sdhci_do_reset(host, SDHCI_RESET_CMD);
2948 sdhci_do_reset(host, SDHCI_RESET_DATA);
2949
2950 sdhci_error_out_mrqs(host, -ENOMEDIUM);
2951 }
2952
2953 spin_unlock_irqrestore(&host->lock, flags);
2954 }
2955
2956 static const struct mmc_host_ops sdhci_ops = {
2957 .request = sdhci_request,
2958 .post_req = sdhci_post_req,
2959 .pre_req = sdhci_pre_req,
2960 .set_ios = sdhci_set_ios,
2961 .get_cd = sdhci_get_cd,
2962 .get_ro = sdhci_get_ro,
2963 .hw_reset = sdhci_hw_reset,
2964 .enable_sdio_irq = sdhci_enable_sdio_irq,
2965 .ack_sdio_irq = sdhci_ack_sdio_irq,
2966 .start_signal_voltage_switch = sdhci_start_signal_voltage_switch,
2967 .prepare_hs400_tuning = sdhci_prepare_hs400_tuning,
2968 .execute_tuning = sdhci_execute_tuning,
2969 .card_event = sdhci_card_event,
2970 .card_busy = sdhci_card_busy,
2971 };
2972
2973 /*****************************************************************************\
2974 * *
2975 * Request done *
2976 * *
2977 \*****************************************************************************/
2978
sdhci_request_done(struct sdhci_host * host)2979 static bool sdhci_request_done(struct sdhci_host *host)
2980 {
2981 unsigned long flags;
2982 struct mmc_request *mrq;
2983 int i;
2984
2985 spin_lock_irqsave(&host->lock, flags);
2986
2987 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2988 mrq = host->mrqs_done[i];
2989 if (mrq)
2990 break;
2991 }
2992
2993 if (!mrq) {
2994 spin_unlock_irqrestore(&host->lock, flags);
2995 return true;
2996 }
2997
2998 /*
2999 * The controller needs a reset of internal state machines
3000 * upon error conditions.
3001 */
3002 if (sdhci_needs_reset(host, mrq)) {
3003 /*
3004 * Do not finish until command and data lines are available for
3005 * reset. Note there can only be one other mrq, so it cannot
3006 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
3007 * would both be null.
3008 */
3009 if (host->cmd || host->data_cmd) {
3010 spin_unlock_irqrestore(&host->lock, flags);
3011 return true;
3012 }
3013
3014 /* Some controllers need this kick or reset won't work here */
3015 if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
3016 /* This is to force an update */
3017 host->ops->set_clock(host, host->clock);
3018
3019 /*
3020 * Spec says we should do both at the same time, but Ricoh
3021 * controllers do not like that.
3022 */
3023 sdhci_do_reset(host, SDHCI_RESET_CMD);
3024 sdhci_do_reset(host, SDHCI_RESET_DATA);
3025
3026 host->pending_reset = false;
3027 }
3028
3029 /*
3030 * Always unmap the data buffers if they were mapped by
3031 * sdhci_prepare_data() whenever we finish with a request.
3032 * This avoids leaking DMA mappings on error.
3033 */
3034 if (host->flags & SDHCI_REQ_USE_DMA) {
3035 struct mmc_data *data = mrq->data;
3036
3037 if (host->use_external_dma && data &&
3038 (mrq->cmd->error || data->error)) {
3039 struct dma_chan *chan = sdhci_external_dma_channel(host, data);
3040
3041 host->mrqs_done[i] = NULL;
3042 spin_unlock_irqrestore(&host->lock, flags);
3043 dmaengine_terminate_sync(chan);
3044 spin_lock_irqsave(&host->lock, flags);
3045 sdhci_set_mrq_done(host, mrq);
3046 }
3047
3048 if (data && data->host_cookie == COOKIE_MAPPED) {
3049 if (host->bounce_buffer) {
3050 /*
3051 * On reads, copy the bounced data into the
3052 * sglist
3053 */
3054 if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
3055 unsigned int length = data->bytes_xfered;
3056
3057 if (length > host->bounce_buffer_size) {
3058 pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
3059 mmc_hostname(host->mmc),
3060 host->bounce_buffer_size,
3061 data->bytes_xfered);
3062 /* Cap it down and continue */
3063 length = host->bounce_buffer_size;
3064 }
3065 dma_sync_single_for_cpu(
3066 mmc_dev(host->mmc),
3067 host->bounce_addr,
3068 host->bounce_buffer_size,
3069 DMA_FROM_DEVICE);
3070 sg_copy_from_buffer(data->sg,
3071 data->sg_len,
3072 host->bounce_buffer,
3073 length);
3074 } else {
3075 /* No copying, just switch ownership */
3076 dma_sync_single_for_cpu(
3077 mmc_dev(host->mmc),
3078 host->bounce_addr,
3079 host->bounce_buffer_size,
3080 mmc_get_dma_dir(data));
3081 }
3082 } else {
3083 /* Unmap the raw data */
3084 dma_unmap_sg(mmc_dev(host->mmc), data->sg,
3085 data->sg_len,
3086 mmc_get_dma_dir(data));
3087 }
3088 data->host_cookie = COOKIE_UNMAPPED;
3089 }
3090 }
3091
3092 host->mrqs_done[i] = NULL;
3093
3094 spin_unlock_irqrestore(&host->lock, flags);
3095
3096 if (host->ops->request_done)
3097 host->ops->request_done(host, mrq);
3098 else
3099 mmc_request_done(host->mmc, mrq);
3100
3101 return false;
3102 }
3103
sdhci_complete_work(struct work_struct * work)3104 static void sdhci_complete_work(struct work_struct *work)
3105 {
3106 struct sdhci_host *host = container_of(work, struct sdhci_host,
3107 complete_work);
3108
3109 while (!sdhci_request_done(host))
3110 ;
3111 }
3112
sdhci_timeout_timer(struct timer_list * t)3113 static void sdhci_timeout_timer(struct timer_list *t)
3114 {
3115 struct sdhci_host *host;
3116 unsigned long flags;
3117
3118 host = from_timer(host, t, timer);
3119
3120 spin_lock_irqsave(&host->lock, flags);
3121
3122 if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
3123 pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
3124 mmc_hostname(host->mmc));
3125 sdhci_dumpregs(host);
3126
3127 host->cmd->error = -ETIMEDOUT;
3128 sdhci_finish_mrq(host, host->cmd->mrq);
3129 }
3130
3131 spin_unlock_irqrestore(&host->lock, flags);
3132 }
3133
sdhci_timeout_data_timer(struct timer_list * t)3134 static void sdhci_timeout_data_timer(struct timer_list *t)
3135 {
3136 struct sdhci_host *host;
3137 unsigned long flags;
3138
3139 host = from_timer(host, t, data_timer);
3140
3141 spin_lock_irqsave(&host->lock, flags);
3142
3143 if (host->data || host->data_cmd ||
3144 (host->cmd && sdhci_data_line_cmd(host->cmd))) {
3145 pr_err("%s: Timeout waiting for hardware interrupt.\n",
3146 mmc_hostname(host->mmc));
3147 sdhci_dumpregs(host);
3148
3149 if (host->data) {
3150 host->data->error = -ETIMEDOUT;
3151 __sdhci_finish_data(host, true);
3152 queue_work(host->complete_wq, &host->complete_work);
3153 } else if (host->data_cmd) {
3154 host->data_cmd->error = -ETIMEDOUT;
3155 sdhci_finish_mrq(host, host->data_cmd->mrq);
3156 } else {
3157 host->cmd->error = -ETIMEDOUT;
3158 sdhci_finish_mrq(host, host->cmd->mrq);
3159 }
3160 }
3161
3162 spin_unlock_irqrestore(&host->lock, flags);
3163 }
3164
3165 /*****************************************************************************\
3166 * *
3167 * Interrupt handling *
3168 * *
3169 \*****************************************************************************/
3170
sdhci_cmd_irq(struct sdhci_host * host,u32 intmask,u32 * intmask_p)3171 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3172 {
3173 /* Handle auto-CMD12 error */
3174 if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3175 struct mmc_request *mrq = host->data_cmd->mrq;
3176 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3177 int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3178 SDHCI_INT_DATA_TIMEOUT :
3179 SDHCI_INT_DATA_CRC;
3180
3181 /* Treat auto-CMD12 error the same as data error */
3182 if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3183 *intmask_p |= data_err_bit;
3184 return;
3185 }
3186 }
3187
3188 if (!host->cmd) {
3189 /*
3190 * SDHCI recovers from errors by resetting the cmd and data
3191 * circuits. Until that is done, there very well might be more
3192 * interrupts, so ignore them in that case.
3193 */
3194 if (host->pending_reset)
3195 return;
3196 pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3197 mmc_hostname(host->mmc), (unsigned)intmask);
3198 sdhci_dumpregs(host);
3199 return;
3200 }
3201
3202 if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3203 SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3204 if (intmask & SDHCI_INT_TIMEOUT)
3205 host->cmd->error = -ETIMEDOUT;
3206 else
3207 host->cmd->error = -EILSEQ;
3208
3209 /* Treat data command CRC error the same as data CRC error */
3210 if (host->cmd->data &&
3211 (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3212 SDHCI_INT_CRC) {
3213 host->cmd = NULL;
3214 *intmask_p |= SDHCI_INT_DATA_CRC;
3215 return;
3216 }
3217
3218 __sdhci_finish_mrq(host, host->cmd->mrq);
3219 return;
3220 }
3221
3222 /* Handle auto-CMD23 error */
3223 if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3224 struct mmc_request *mrq = host->cmd->mrq;
3225 u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3226 int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3227 -ETIMEDOUT :
3228 -EILSEQ;
3229
3230 if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3231 mrq->sbc->error = err;
3232 __sdhci_finish_mrq(host, mrq);
3233 return;
3234 }
3235 }
3236
3237 if (intmask & SDHCI_INT_RESPONSE)
3238 sdhci_finish_command(host);
3239 }
3240
sdhci_adma_show_error(struct sdhci_host * host)3241 static void sdhci_adma_show_error(struct sdhci_host *host)
3242 {
3243 void *desc = host->adma_table;
3244 dma_addr_t dma = host->adma_addr;
3245
3246 sdhci_dumpregs(host);
3247
3248 while (true) {
3249 struct sdhci_adma2_64_desc *dma_desc = desc;
3250
3251 if (host->flags & SDHCI_USE_64_BIT_DMA)
3252 SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3253 (unsigned long long)dma,
3254 le32_to_cpu(dma_desc->addr_hi),
3255 le32_to_cpu(dma_desc->addr_lo),
3256 le16_to_cpu(dma_desc->len),
3257 le16_to_cpu(dma_desc->cmd));
3258 else
3259 SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3260 (unsigned long long)dma,
3261 le32_to_cpu(dma_desc->addr_lo),
3262 le16_to_cpu(dma_desc->len),
3263 le16_to_cpu(dma_desc->cmd));
3264
3265 desc += host->desc_sz;
3266 dma += host->desc_sz;
3267
3268 if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3269 break;
3270 }
3271 }
3272
sdhci_data_irq(struct sdhci_host * host,u32 intmask)3273 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3274 {
3275 u32 command;
3276
3277 /* CMD19 generates _only_ Buffer Read Ready interrupt */
3278 if (intmask & SDHCI_INT_DATA_AVAIL) {
3279 command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3280 if (command == MMC_SEND_TUNING_BLOCK ||
3281 command == MMC_SEND_TUNING_BLOCK_HS200) {
3282 host->tuning_done = 1;
3283 wake_up(&host->buf_ready_int);
3284 return;
3285 }
3286 }
3287
3288 if (!host->data) {
3289 struct mmc_command *data_cmd = host->data_cmd;
3290
3291 /*
3292 * The "data complete" interrupt is also used to
3293 * indicate that a busy state has ended. See comment
3294 * above in sdhci_cmd_irq().
3295 */
3296 if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3297 if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3298 host->data_cmd = NULL;
3299 data_cmd->error = -ETIMEDOUT;
3300 __sdhci_finish_mrq(host, data_cmd->mrq);
3301 return;
3302 }
3303 if (intmask & SDHCI_INT_DATA_END) {
3304 host->data_cmd = NULL;
3305 /*
3306 * Some cards handle busy-end interrupt
3307 * before the command completed, so make
3308 * sure we do things in the proper order.
3309 */
3310 if (host->cmd == data_cmd)
3311 return;
3312
3313 __sdhci_finish_mrq(host, data_cmd->mrq);
3314 return;
3315 }
3316 }
3317
3318 /*
3319 * SDHCI recovers from errors by resetting the cmd and data
3320 * circuits. Until that is done, there very well might be more
3321 * interrupts, so ignore them in that case.
3322 */
3323 if (host->pending_reset)
3324 return;
3325
3326 pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3327 mmc_hostname(host->mmc), (unsigned)intmask);
3328 sdhci_dumpregs(host);
3329
3330 return;
3331 }
3332
3333 if (intmask & SDHCI_INT_DATA_TIMEOUT)
3334 host->data->error = -ETIMEDOUT;
3335 else if (intmask & SDHCI_INT_DATA_END_BIT)
3336 host->data->error = -EILSEQ;
3337 else if ((intmask & SDHCI_INT_DATA_CRC) &&
3338 SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3339 != MMC_BUS_TEST_R)
3340 host->data->error = -EILSEQ;
3341 else if (intmask & SDHCI_INT_ADMA_ERROR) {
3342 pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3343 intmask);
3344 sdhci_adma_show_error(host);
3345 host->data->error = -EIO;
3346 if (host->ops->adma_workaround)
3347 host->ops->adma_workaround(host, intmask);
3348 }
3349
3350 if (host->data->error)
3351 sdhci_finish_data(host);
3352 else {
3353 if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3354 sdhci_transfer_pio(host);
3355
3356 /*
3357 * We currently don't do anything fancy with DMA
3358 * boundaries, but as we can't disable the feature
3359 * we need to at least restart the transfer.
3360 *
3361 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3362 * should return a valid address to continue from, but as
3363 * some controllers are faulty, don't trust them.
3364 */
3365 if (intmask & SDHCI_INT_DMA_END) {
3366 dma_addr_t dmastart, dmanow;
3367
3368 dmastart = sdhci_sdma_address(host);
3369 dmanow = dmastart + host->data->bytes_xfered;
3370 /*
3371 * Force update to the next DMA block boundary.
3372 */
3373 dmanow = (dmanow &
3374 ~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3375 SDHCI_DEFAULT_BOUNDARY_SIZE;
3376 host->data->bytes_xfered = dmanow - dmastart;
3377 DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3378 &dmastart, host->data->bytes_xfered, &dmanow);
3379 sdhci_set_sdma_addr(host, dmanow);
3380 }
3381
3382 if (intmask & SDHCI_INT_DATA_END) {
3383 if (host->cmd == host->data_cmd) {
3384 /*
3385 * Data managed to finish before the
3386 * command completed. Make sure we do
3387 * things in the proper order.
3388 */
3389 host->data_early = 1;
3390 } else {
3391 sdhci_finish_data(host);
3392 }
3393 }
3394 }
3395 }
3396
sdhci_defer_done(struct sdhci_host * host,struct mmc_request * mrq)3397 static inline bool sdhci_defer_done(struct sdhci_host *host,
3398 struct mmc_request *mrq)
3399 {
3400 struct mmc_data *data = mrq->data;
3401
3402 return host->pending_reset || host->always_defer_done ||
3403 ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3404 data->host_cookie == COOKIE_MAPPED);
3405 }
3406
sdhci_irq(int irq,void * dev_id)3407 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3408 {
3409 struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3410 irqreturn_t result = IRQ_NONE;
3411 struct sdhci_host *host = dev_id;
3412 u32 intmask, mask, unexpected = 0;
3413 int max_loops = 16;
3414 int i;
3415
3416 spin_lock(&host->lock);
3417
3418 if (host->runtime_suspended) {
3419 spin_unlock(&host->lock);
3420 return IRQ_NONE;
3421 }
3422
3423 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3424 if (!intmask || intmask == 0xffffffff) {
3425 result = IRQ_NONE;
3426 goto out;
3427 }
3428
3429 do {
3430 DBG("IRQ status 0x%08x\n", intmask);
3431
3432 if (host->ops->irq) {
3433 intmask = host->ops->irq(host, intmask);
3434 if (!intmask)
3435 goto cont;
3436 }
3437
3438 /* Clear selected interrupts. */
3439 mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3440 SDHCI_INT_BUS_POWER);
3441 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3442
3443 if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3444 u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3445 SDHCI_CARD_PRESENT;
3446
3447 /*
3448 * There is a observation on i.mx esdhc. INSERT
3449 * bit will be immediately set again when it gets
3450 * cleared, if a card is inserted. We have to mask
3451 * the irq to prevent interrupt storm which will
3452 * freeze the system. And the REMOVE gets the
3453 * same situation.
3454 *
3455 * More testing are needed here to ensure it works
3456 * for other platforms though.
3457 */
3458 host->ier &= ~(SDHCI_INT_CARD_INSERT |
3459 SDHCI_INT_CARD_REMOVE);
3460 host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3461 SDHCI_INT_CARD_INSERT;
3462 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3463 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3464
3465 sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3466 SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3467
3468 host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3469 SDHCI_INT_CARD_REMOVE);
3470 result = IRQ_WAKE_THREAD;
3471 }
3472
3473 if (intmask & SDHCI_INT_CMD_MASK)
3474 sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3475
3476 if (intmask & SDHCI_INT_DATA_MASK)
3477 sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3478
3479 if (intmask & SDHCI_INT_BUS_POWER)
3480 pr_err("%s: Card is consuming too much power!\n",
3481 mmc_hostname(host->mmc));
3482
3483 if (intmask & SDHCI_INT_RETUNE)
3484 mmc_retune_needed(host->mmc);
3485
3486 if ((intmask & SDHCI_INT_CARD_INT) &&
3487 (host->ier & SDHCI_INT_CARD_INT)) {
3488 sdhci_enable_sdio_irq_nolock(host, false);
3489 sdio_signal_irq(host->mmc);
3490 }
3491
3492 intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3493 SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3494 SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3495 SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3496
3497 if (intmask) {
3498 unexpected |= intmask;
3499 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3500 }
3501 cont:
3502 if (result == IRQ_NONE)
3503 result = IRQ_HANDLED;
3504
3505 intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3506 } while (intmask && --max_loops);
3507
3508 /* Determine if mrqs can be completed immediately */
3509 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3510 struct mmc_request *mrq = host->mrqs_done[i];
3511
3512 if (!mrq)
3513 continue;
3514
3515 if (sdhci_defer_done(host, mrq)) {
3516 result = IRQ_WAKE_THREAD;
3517 } else {
3518 mrqs_done[i] = mrq;
3519 host->mrqs_done[i] = NULL;
3520 }
3521 }
3522 out:
3523 if (host->deferred_cmd)
3524 result = IRQ_WAKE_THREAD;
3525
3526 spin_unlock(&host->lock);
3527
3528 /* Process mrqs ready for immediate completion */
3529 for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3530 if (!mrqs_done[i])
3531 continue;
3532
3533 if (host->ops->request_done)
3534 host->ops->request_done(host, mrqs_done[i]);
3535 else
3536 mmc_request_done(host->mmc, mrqs_done[i]);
3537 }
3538
3539 if (unexpected) {
3540 pr_err("%s: Unexpected interrupt 0x%08x.\n",
3541 mmc_hostname(host->mmc), unexpected);
3542 sdhci_dumpregs(host);
3543 }
3544
3545 return result;
3546 }
3547
sdhci_thread_irq(int irq,void * dev_id)3548 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3549 {
3550 struct sdhci_host *host = dev_id;
3551 struct mmc_command *cmd;
3552 unsigned long flags;
3553 u32 isr;
3554
3555 while (!sdhci_request_done(host))
3556 ;
3557
3558 spin_lock_irqsave(&host->lock, flags);
3559
3560 isr = host->thread_isr;
3561 host->thread_isr = 0;
3562
3563 cmd = host->deferred_cmd;
3564 if (cmd && !sdhci_send_command_retry(host, cmd, flags))
3565 sdhci_finish_mrq(host, cmd->mrq);
3566
3567 spin_unlock_irqrestore(&host->lock, flags);
3568
3569 if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3570 struct mmc_host *mmc = host->mmc;
3571
3572 mmc->ops->card_event(mmc);
3573 mmc_detect_change(mmc, msecs_to_jiffies(200));
3574 }
3575
3576 return IRQ_HANDLED;
3577 }
3578
3579 /*****************************************************************************\
3580 * *
3581 * Suspend/resume *
3582 * *
3583 \*****************************************************************************/
3584
3585 #ifdef CONFIG_PM
3586
sdhci_cd_irq_can_wakeup(struct sdhci_host * host)3587 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3588 {
3589 return mmc_card_is_removable(host->mmc) &&
3590 !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3591 !mmc_can_gpio_cd(host->mmc);
3592 }
3593
3594 /*
3595 * To enable wakeup events, the corresponding events have to be enabled in
3596 * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3597 * Table' in the SD Host Controller Standard Specification.
3598 * It is useless to restore SDHCI_INT_ENABLE state in
3599 * sdhci_disable_irq_wakeups() since it will be set by
3600 * sdhci_enable_card_detection() or sdhci_init().
3601 */
sdhci_enable_irq_wakeups(struct sdhci_host * host)3602 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3603 {
3604 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3605 SDHCI_WAKE_ON_INT;
3606 u32 irq_val = 0;
3607 u8 wake_val = 0;
3608 u8 val;
3609
3610 if (sdhci_cd_irq_can_wakeup(host)) {
3611 wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3612 irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3613 }
3614
3615 if (mmc_card_wake_sdio_irq(host->mmc)) {
3616 wake_val |= SDHCI_WAKE_ON_INT;
3617 irq_val |= SDHCI_INT_CARD_INT;
3618 }
3619
3620 if (!irq_val)
3621 return false;
3622
3623 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3624 val &= ~mask;
3625 val |= wake_val;
3626 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3627
3628 sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3629
3630 host->irq_wake_enabled = !enable_irq_wake(host->irq);
3631
3632 return host->irq_wake_enabled;
3633 }
3634
sdhci_disable_irq_wakeups(struct sdhci_host * host)3635 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3636 {
3637 u8 val;
3638 u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3639 | SDHCI_WAKE_ON_INT;
3640
3641 val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3642 val &= ~mask;
3643 sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3644
3645 disable_irq_wake(host->irq);
3646
3647 host->irq_wake_enabled = false;
3648 }
3649
sdhci_suspend_host(struct sdhci_host * host)3650 int sdhci_suspend_host(struct sdhci_host *host)
3651 {
3652 sdhci_disable_card_detection(host);
3653
3654 mmc_retune_timer_stop(host->mmc);
3655
3656 if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3657 !sdhci_enable_irq_wakeups(host)) {
3658 host->ier = 0;
3659 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3660 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3661 free_irq(host->irq, host);
3662 }
3663
3664 return 0;
3665 }
3666
3667 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3668
sdhci_resume_host(struct sdhci_host * host)3669 int sdhci_resume_host(struct sdhci_host *host)
3670 {
3671 struct mmc_host *mmc = host->mmc;
3672 int ret = 0;
3673
3674 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3675 if (host->ops->enable_dma)
3676 host->ops->enable_dma(host);
3677 }
3678
3679 if ((mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3680 (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3681 /* Card keeps power but host controller does not */
3682 sdhci_init(host, 0);
3683 host->pwr = 0;
3684 host->clock = 0;
3685 mmc->ops->set_ios(mmc, &mmc->ios);
3686 } else {
3687 sdhci_init(host, (mmc->pm_flags & MMC_PM_KEEP_POWER));
3688 }
3689
3690 if (host->irq_wake_enabled) {
3691 sdhci_disable_irq_wakeups(host);
3692 } else {
3693 ret = request_threaded_irq(host->irq, sdhci_irq,
3694 sdhci_thread_irq, IRQF_SHARED,
3695 mmc_hostname(mmc), host);
3696 if (ret)
3697 return ret;
3698 }
3699
3700 sdhci_enable_card_detection(host);
3701
3702 return ret;
3703 }
3704
3705 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3706
sdhci_runtime_suspend_host(struct sdhci_host * host)3707 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3708 {
3709 unsigned long flags;
3710
3711 mmc_retune_timer_stop(host->mmc);
3712
3713 spin_lock_irqsave(&host->lock, flags);
3714 host->ier &= SDHCI_INT_CARD_INT;
3715 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3716 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3717 spin_unlock_irqrestore(&host->lock, flags);
3718
3719 synchronize_hardirq(host->irq);
3720
3721 spin_lock_irqsave(&host->lock, flags);
3722 host->runtime_suspended = true;
3723 spin_unlock_irqrestore(&host->lock, flags);
3724
3725 return 0;
3726 }
3727 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3728
sdhci_runtime_resume_host(struct sdhci_host * host,int soft_reset)3729 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3730 {
3731 struct mmc_host *mmc = host->mmc;
3732 unsigned long flags;
3733 int host_flags = host->flags;
3734
3735 if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3736 if (host->ops->enable_dma)
3737 host->ops->enable_dma(host);
3738 }
3739
3740 sdhci_init(host, soft_reset);
3741
3742 if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3743 mmc->ios.power_mode != MMC_POWER_OFF) {
3744 /* Force clock and power re-program */
3745 host->pwr = 0;
3746 host->clock = 0;
3747 mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3748 mmc->ops->set_ios(mmc, &mmc->ios);
3749
3750 if ((host_flags & SDHCI_PV_ENABLED) &&
3751 !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3752 spin_lock_irqsave(&host->lock, flags);
3753 sdhci_enable_preset_value(host, true);
3754 spin_unlock_irqrestore(&host->lock, flags);
3755 }
3756
3757 if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3758 mmc->ops->hs400_enhanced_strobe)
3759 mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3760 }
3761
3762 spin_lock_irqsave(&host->lock, flags);
3763
3764 host->runtime_suspended = false;
3765
3766 /* Enable SDIO IRQ */
3767 if (sdio_irq_claimed(mmc))
3768 sdhci_enable_sdio_irq_nolock(host, true);
3769
3770 /* Enable Card Detection */
3771 sdhci_enable_card_detection(host);
3772
3773 spin_unlock_irqrestore(&host->lock, flags);
3774
3775 return 0;
3776 }
3777 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3778
3779 #endif /* CONFIG_PM */
3780
3781 /*****************************************************************************\
3782 * *
3783 * Command Queue Engine (CQE) helpers *
3784 * *
3785 \*****************************************************************************/
3786
sdhci_cqe_enable(struct mmc_host * mmc)3787 void sdhci_cqe_enable(struct mmc_host *mmc)
3788 {
3789 struct sdhci_host *host = mmc_priv(mmc);
3790 unsigned long flags;
3791 u8 ctrl;
3792
3793 spin_lock_irqsave(&host->lock, flags);
3794
3795 ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3796 ctrl &= ~SDHCI_CTRL_DMA_MASK;
3797 /*
3798 * Host from V4.10 supports ADMA3 DMA type.
3799 * ADMA3 performs integrated descriptor which is more suitable
3800 * for cmd queuing to fetch both command and transfer descriptors.
3801 */
3802 if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3803 ctrl |= SDHCI_CTRL_ADMA3;
3804 else if (host->flags & SDHCI_USE_64_BIT_DMA)
3805 ctrl |= SDHCI_CTRL_ADMA64;
3806 else
3807 ctrl |= SDHCI_CTRL_ADMA32;
3808 sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3809
3810 sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3811 SDHCI_BLOCK_SIZE);
3812
3813 /* Set maximum timeout */
3814 sdhci_set_timeout(host, NULL);
3815
3816 host->ier = host->cqe_ier;
3817
3818 sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3819 sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3820
3821 host->cqe_on = true;
3822
3823 pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3824 mmc_hostname(mmc), host->ier,
3825 sdhci_readl(host, SDHCI_INT_STATUS));
3826
3827 spin_unlock_irqrestore(&host->lock, flags);
3828 }
3829 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3830
sdhci_cqe_disable(struct mmc_host * mmc,bool recovery)3831 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3832 {
3833 struct sdhci_host *host = mmc_priv(mmc);
3834 unsigned long flags;
3835
3836 spin_lock_irqsave(&host->lock, flags);
3837
3838 sdhci_set_default_irqs(host);
3839
3840 host->cqe_on = false;
3841
3842 if (recovery) {
3843 sdhci_do_reset(host, SDHCI_RESET_CMD);
3844 sdhci_do_reset(host, SDHCI_RESET_DATA);
3845 }
3846
3847 pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3848 mmc_hostname(mmc), host->ier,
3849 sdhci_readl(host, SDHCI_INT_STATUS));
3850
3851 spin_unlock_irqrestore(&host->lock, flags);
3852 }
3853 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3854
sdhci_cqe_irq(struct sdhci_host * host,u32 intmask,int * cmd_error,int * data_error)3855 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3856 int *data_error)
3857 {
3858 u32 mask;
3859
3860 if (!host->cqe_on)
3861 return false;
3862
3863 if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3864 *cmd_error = -EILSEQ;
3865 else if (intmask & SDHCI_INT_TIMEOUT)
3866 *cmd_error = -ETIMEDOUT;
3867 else
3868 *cmd_error = 0;
3869
3870 if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3871 *data_error = -EILSEQ;
3872 else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3873 *data_error = -ETIMEDOUT;
3874 else if (intmask & SDHCI_INT_ADMA_ERROR)
3875 *data_error = -EIO;
3876 else
3877 *data_error = 0;
3878
3879 /* Clear selected interrupts. */
3880 mask = intmask & host->cqe_ier;
3881 sdhci_writel(host, mask, SDHCI_INT_STATUS);
3882
3883 if (intmask & SDHCI_INT_BUS_POWER)
3884 pr_err("%s: Card is consuming too much power!\n",
3885 mmc_hostname(host->mmc));
3886
3887 intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3888 if (intmask) {
3889 sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3890 pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3891 mmc_hostname(host->mmc), intmask);
3892 sdhci_dumpregs(host);
3893 }
3894
3895 return true;
3896 }
3897 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3898
3899 /*****************************************************************************\
3900 * *
3901 * Device allocation/registration *
3902 * *
3903 \*****************************************************************************/
3904
sdhci_alloc_host(struct device * dev,size_t priv_size)3905 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3906 size_t priv_size)
3907 {
3908 struct mmc_host *mmc;
3909 struct sdhci_host *host;
3910
3911 WARN_ON(dev == NULL);
3912
3913 mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3914 if (!mmc)
3915 return ERR_PTR(-ENOMEM);
3916
3917 host = mmc_priv(mmc);
3918 host->mmc = mmc;
3919 host->mmc_host_ops = sdhci_ops;
3920 mmc->ops = &host->mmc_host_ops;
3921
3922 host->flags = SDHCI_SIGNALING_330;
3923
3924 host->cqe_ier = SDHCI_CQE_INT_MASK;
3925 host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3926
3927 host->tuning_delay = -1;
3928 host->tuning_loop_count = MAX_TUNING_LOOP;
3929
3930 host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3931
3932 /*
3933 * The DMA table descriptor count is calculated as the maximum
3934 * number of segments times 2, to allow for an alignment
3935 * descriptor for each segment, plus 1 for a nop end descriptor.
3936 */
3937 host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3938
3939 return host;
3940 }
3941
3942 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3943
sdhci_set_dma_mask(struct sdhci_host * host)3944 static int sdhci_set_dma_mask(struct sdhci_host *host)
3945 {
3946 struct mmc_host *mmc = host->mmc;
3947 struct device *dev = mmc_dev(mmc);
3948 int ret = -EINVAL;
3949
3950 if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3951 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3952
3953 /* Try 64-bit mask if hardware is capable of it */
3954 if (host->flags & SDHCI_USE_64_BIT_DMA) {
3955 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3956 if (ret) {
3957 pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3958 mmc_hostname(mmc));
3959 host->flags &= ~SDHCI_USE_64_BIT_DMA;
3960 }
3961 }
3962
3963 /* 32-bit mask as default & fallback */
3964 if (ret) {
3965 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3966 if (ret)
3967 pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3968 mmc_hostname(mmc));
3969 }
3970
3971 return ret;
3972 }
3973
__sdhci_read_caps(struct sdhci_host * host,const u16 * ver,const u32 * caps,const u32 * caps1)3974 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3975 const u32 *caps, const u32 *caps1)
3976 {
3977 u16 v;
3978 u64 dt_caps_mask = 0;
3979 u64 dt_caps = 0;
3980
3981 if (host->read_caps)
3982 return;
3983
3984 host->read_caps = true;
3985
3986 if (debug_quirks)
3987 host->quirks = debug_quirks;
3988
3989 if (debug_quirks2)
3990 host->quirks2 = debug_quirks2;
3991
3992 sdhci_do_reset(host, SDHCI_RESET_ALL);
3993
3994 if (host->v4_mode)
3995 sdhci_do_enable_v4_mode(host);
3996
3997 device_property_read_u64(mmc_dev(host->mmc),
3998 "sdhci-caps-mask", &dt_caps_mask);
3999 device_property_read_u64(mmc_dev(host->mmc),
4000 "sdhci-caps", &dt_caps);
4001
4002 v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
4003 host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
4004
4005 if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
4006 return;
4007
4008 if (caps) {
4009 host->caps = *caps;
4010 } else {
4011 host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
4012 host->caps &= ~lower_32_bits(dt_caps_mask);
4013 host->caps |= lower_32_bits(dt_caps);
4014 }
4015
4016 if (host->version < SDHCI_SPEC_300)
4017 return;
4018
4019 if (caps1) {
4020 host->caps1 = *caps1;
4021 } else {
4022 host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
4023 host->caps1 &= ~upper_32_bits(dt_caps_mask);
4024 host->caps1 |= upper_32_bits(dt_caps);
4025 }
4026 }
4027 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
4028
sdhci_allocate_bounce_buffer(struct sdhci_host * host)4029 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
4030 {
4031 struct mmc_host *mmc = host->mmc;
4032 unsigned int max_blocks;
4033 unsigned int bounce_size;
4034 int ret;
4035
4036 /*
4037 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
4038 * has diminishing returns, this is probably because SD/MMC
4039 * cards are usually optimized to handle this size of requests.
4040 */
4041 bounce_size = SZ_64K;
4042 /*
4043 * Adjust downwards to maximum request size if this is less
4044 * than our segment size, else hammer down the maximum
4045 * request size to the maximum buffer size.
4046 */
4047 if (mmc->max_req_size < bounce_size)
4048 bounce_size = mmc->max_req_size;
4049 max_blocks = bounce_size / 512;
4050
4051 /*
4052 * When we just support one segment, we can get significant
4053 * speedups by the help of a bounce buffer to group scattered
4054 * reads/writes together.
4055 */
4056 host->bounce_buffer = devm_kmalloc(mmc_dev(mmc),
4057 bounce_size,
4058 GFP_KERNEL);
4059 if (!host->bounce_buffer) {
4060 pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
4061 mmc_hostname(mmc),
4062 bounce_size);
4063 /*
4064 * Exiting with zero here makes sure we proceed with
4065 * mmc->max_segs == 1.
4066 */
4067 return;
4068 }
4069
4070 host->bounce_addr = dma_map_single(mmc_dev(mmc),
4071 host->bounce_buffer,
4072 bounce_size,
4073 DMA_BIDIRECTIONAL);
4074 ret = dma_mapping_error(mmc_dev(mmc), host->bounce_addr);
4075 if (ret)
4076 /* Again fall back to max_segs == 1 */
4077 return;
4078 host->bounce_buffer_size = bounce_size;
4079
4080 /* Lie about this since we're bouncing */
4081 mmc->max_segs = max_blocks;
4082 mmc->max_seg_size = bounce_size;
4083 mmc->max_req_size = bounce_size;
4084
4085 pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
4086 mmc_hostname(mmc), max_blocks, bounce_size);
4087 }
4088
sdhci_can_64bit_dma(struct sdhci_host * host)4089 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
4090 {
4091 /*
4092 * According to SD Host Controller spec v4.10, bit[27] added from
4093 * version 4.10 in Capabilities Register is used as 64-bit System
4094 * Address support for V4 mode.
4095 */
4096 if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
4097 return host->caps & SDHCI_CAN_64BIT_V4;
4098
4099 return host->caps & SDHCI_CAN_64BIT;
4100 }
4101
sdhci_setup_host(struct sdhci_host * host)4102 int sdhci_setup_host(struct sdhci_host *host)
4103 {
4104 struct mmc_host *mmc;
4105 u32 max_current_caps;
4106 unsigned int ocr_avail;
4107 unsigned int override_timeout_clk;
4108 u32 max_clk;
4109 int ret = 0;
4110 bool enable_vqmmc = false;
4111
4112 WARN_ON(host == NULL);
4113 if (host == NULL)
4114 return -EINVAL;
4115
4116 mmc = host->mmc;
4117
4118 /*
4119 * If there are external regulators, get them. Note this must be done
4120 * early before resetting the host and reading the capabilities so that
4121 * the host can take the appropriate action if regulators are not
4122 * available.
4123 */
4124 if (!mmc->supply.vqmmc) {
4125 ret = mmc_regulator_get_supply(mmc);
4126 if (ret)
4127 return ret;
4128 enable_vqmmc = true;
4129 }
4130
4131 DBG("Version: 0x%08x | Present: 0x%08x\n",
4132 sdhci_readw(host, SDHCI_HOST_VERSION),
4133 sdhci_readl(host, SDHCI_PRESENT_STATE));
4134 DBG("Caps: 0x%08x | Caps_1: 0x%08x\n",
4135 sdhci_readl(host, SDHCI_CAPABILITIES),
4136 sdhci_readl(host, SDHCI_CAPABILITIES_1));
4137
4138 sdhci_read_caps(host);
4139
4140 override_timeout_clk = host->timeout_clk;
4141
4142 if (host->version > SDHCI_SPEC_420) {
4143 pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
4144 mmc_hostname(mmc), host->version);
4145 }
4146
4147 if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
4148 host->flags |= SDHCI_USE_SDMA;
4149 else if (!(host->caps & SDHCI_CAN_DO_SDMA))
4150 DBG("Controller doesn't have SDMA capability\n");
4151 else
4152 host->flags |= SDHCI_USE_SDMA;
4153
4154 if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
4155 (host->flags & SDHCI_USE_SDMA)) {
4156 DBG("Disabling DMA as it is marked broken\n");
4157 host->flags &= ~SDHCI_USE_SDMA;
4158 }
4159
4160 if ((host->version >= SDHCI_SPEC_200) &&
4161 (host->caps & SDHCI_CAN_DO_ADMA2))
4162 host->flags |= SDHCI_USE_ADMA;
4163
4164 if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
4165 (host->flags & SDHCI_USE_ADMA)) {
4166 DBG("Disabling ADMA as it is marked broken\n");
4167 host->flags &= ~SDHCI_USE_ADMA;
4168 }
4169
4170 if (sdhci_can_64bit_dma(host))
4171 host->flags |= SDHCI_USE_64_BIT_DMA;
4172
4173 if (host->use_external_dma) {
4174 ret = sdhci_external_dma_init(host);
4175 if (ret == -EPROBE_DEFER)
4176 goto unreg;
4177 /*
4178 * Fall back to use the DMA/PIO integrated in standard SDHCI
4179 * instead of external DMA devices.
4180 */
4181 else if (ret)
4182 sdhci_switch_external_dma(host, false);
4183 /* Disable internal DMA sources */
4184 else
4185 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4186 }
4187
4188 if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4189 if (host->ops->set_dma_mask)
4190 ret = host->ops->set_dma_mask(host);
4191 else
4192 ret = sdhci_set_dma_mask(host);
4193
4194 if (!ret && host->ops->enable_dma)
4195 ret = host->ops->enable_dma(host);
4196
4197 if (ret) {
4198 pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4199 mmc_hostname(mmc));
4200 host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4201
4202 ret = 0;
4203 }
4204 }
4205
4206 /* SDMA does not support 64-bit DMA if v4 mode not set */
4207 if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4208 host->flags &= ~SDHCI_USE_SDMA;
4209
4210 if (host->flags & SDHCI_USE_ADMA) {
4211 dma_addr_t dma;
4212 void *buf;
4213
4214 if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4215 host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4216 else if (!host->alloc_desc_sz)
4217 host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4218
4219 host->desc_sz = host->alloc_desc_sz;
4220 host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4221
4222 host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4223 /*
4224 * Use zalloc to zero the reserved high 32-bits of 128-bit
4225 * descriptors so that they never need to be written.
4226 */
4227 buf = dma_alloc_coherent(mmc_dev(mmc),
4228 host->align_buffer_sz + host->adma_table_sz,
4229 &dma, GFP_KERNEL);
4230 if (!buf) {
4231 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4232 mmc_hostname(mmc));
4233 host->flags &= ~SDHCI_USE_ADMA;
4234 } else if ((dma + host->align_buffer_sz) &
4235 (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4236 pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4237 mmc_hostname(mmc));
4238 host->flags &= ~SDHCI_USE_ADMA;
4239 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4240 host->adma_table_sz, buf, dma);
4241 } else {
4242 host->align_buffer = buf;
4243 host->align_addr = dma;
4244
4245 host->adma_table = buf + host->align_buffer_sz;
4246 host->adma_addr = dma + host->align_buffer_sz;
4247 }
4248 }
4249
4250 /*
4251 * If we use DMA, then it's up to the caller to set the DMA
4252 * mask, but PIO does not need the hw shim so we set a new
4253 * mask here in that case.
4254 */
4255 if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4256 host->dma_mask = DMA_BIT_MASK(64);
4257 mmc_dev(mmc)->dma_mask = &host->dma_mask;
4258 }
4259
4260 if (host->version >= SDHCI_SPEC_300)
4261 host->max_clk = FIELD_GET(SDHCI_CLOCK_V3_BASE_MASK, host->caps);
4262 else
4263 host->max_clk = FIELD_GET(SDHCI_CLOCK_BASE_MASK, host->caps);
4264
4265 host->max_clk *= 1000000;
4266 if (host->max_clk == 0 || host->quirks &
4267 SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4268 if (!host->ops->get_max_clock) {
4269 pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4270 mmc_hostname(mmc));
4271 ret = -ENODEV;
4272 goto undma;
4273 }
4274 host->max_clk = host->ops->get_max_clock(host);
4275 }
4276
4277 /*
4278 * In case of Host Controller v3.00, find out whether clock
4279 * multiplier is supported.
4280 */
4281 host->clk_mul = FIELD_GET(SDHCI_CLOCK_MUL_MASK, host->caps1);
4282
4283 /*
4284 * In case the value in Clock Multiplier is 0, then programmable
4285 * clock mode is not supported, otherwise the actual clock
4286 * multiplier is one more than the value of Clock Multiplier
4287 * in the Capabilities Register.
4288 */
4289 if (host->clk_mul)
4290 host->clk_mul += 1;
4291
4292 /*
4293 * Set host parameters.
4294 */
4295 max_clk = host->max_clk;
4296
4297 if (host->ops->get_min_clock)
4298 mmc->f_min = host->ops->get_min_clock(host);
4299 else if (host->version >= SDHCI_SPEC_300) {
4300 if (host->clk_mul)
4301 max_clk = host->max_clk * host->clk_mul;
4302 /*
4303 * Divided Clock Mode minimum clock rate is always less than
4304 * Programmable Clock Mode minimum clock rate.
4305 */
4306 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4307 } else
4308 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4309
4310 if (!mmc->f_max || mmc->f_max > max_clk)
4311 mmc->f_max = max_clk;
4312
4313 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4314 host->timeout_clk = FIELD_GET(SDHCI_TIMEOUT_CLK_MASK, host->caps);
4315
4316 if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4317 host->timeout_clk *= 1000;
4318
4319 if (host->timeout_clk == 0) {
4320 if (!host->ops->get_timeout_clock) {
4321 pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4322 mmc_hostname(mmc));
4323 ret = -ENODEV;
4324 goto undma;
4325 }
4326
4327 host->timeout_clk =
4328 DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4329 1000);
4330 }
4331
4332 if (override_timeout_clk)
4333 host->timeout_clk = override_timeout_clk;
4334
4335 mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4336 host->ops->get_max_timeout_count(host) : 1 << 27;
4337 mmc->max_busy_timeout /= host->timeout_clk;
4338 }
4339
4340 if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4341 !host->ops->get_max_timeout_count)
4342 mmc->max_busy_timeout = 0;
4343
4344 mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_CMD23;
4345 mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4346
4347 if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4348 host->flags |= SDHCI_AUTO_CMD12;
4349
4350 /*
4351 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4352 * For v4 mode, SDMA may use Auto-CMD23 as well.
4353 */
4354 if ((host->version >= SDHCI_SPEC_300) &&
4355 ((host->flags & SDHCI_USE_ADMA) ||
4356 !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4357 !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4358 host->flags |= SDHCI_AUTO_CMD23;
4359 DBG("Auto-CMD23 available\n");
4360 } else {
4361 DBG("Auto-CMD23 unavailable\n");
4362 }
4363
4364 /*
4365 * A controller may support 8-bit width, but the board itself
4366 * might not have the pins brought out. Boards that support
4367 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4368 * their platform code before calling sdhci_add_host(), and we
4369 * won't assume 8-bit width for hosts without that CAP.
4370 */
4371 if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4372 mmc->caps |= MMC_CAP_4_BIT_DATA;
4373
4374 if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4375 mmc->caps &= ~MMC_CAP_CMD23;
4376
4377 if (host->caps & SDHCI_CAN_DO_HISPD)
4378 mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4379
4380 if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4381 mmc_card_is_removable(mmc) &&
4382 mmc_gpio_get_cd(mmc) < 0)
4383 mmc->caps |= MMC_CAP_NEEDS_POLL;
4384
4385 if (!IS_ERR(mmc->supply.vqmmc)) {
4386 if (enable_vqmmc) {
4387 ret = regulator_enable(mmc->supply.vqmmc);
4388 host->sdhci_core_to_disable_vqmmc = !ret;
4389 }
4390
4391 /* If vqmmc provides no 1.8V signalling, then there's no UHS */
4392 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4393 1950000))
4394 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4395 SDHCI_SUPPORT_SDR50 |
4396 SDHCI_SUPPORT_DDR50);
4397
4398 /* In eMMC case vqmmc might be a fixed 1.8V regulator */
4399 if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4400 3600000))
4401 host->flags &= ~SDHCI_SIGNALING_330;
4402
4403 if (ret) {
4404 pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4405 mmc_hostname(mmc), ret);
4406 mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4407 }
4408
4409 }
4410
4411 if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4412 host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4413 SDHCI_SUPPORT_DDR50);
4414 /*
4415 * The SDHCI controller in a SoC might support HS200/HS400
4416 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4417 * but if the board is modeled such that the IO lines are not
4418 * connected to 1.8v then HS200/HS400 cannot be supported.
4419 * Disable HS200/HS400 if the board does not have 1.8v connected
4420 * to the IO lines. (Applicable for other modes in 1.8v)
4421 */
4422 mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4423 mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4424 }
4425
4426 /* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4427 if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4428 SDHCI_SUPPORT_DDR50))
4429 mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4430
4431 /* SDR104 supports also implies SDR50 support */
4432 if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4433 mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4434 /* SD3.0: SDR104 is supported so (for eMMC) the caps2
4435 * field can be promoted to support HS200.
4436 */
4437 if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4438 mmc->caps2 |= MMC_CAP2_HS200;
4439 } else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4440 mmc->caps |= MMC_CAP_UHS_SDR50;
4441 }
4442
4443 if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4444 (host->caps1 & SDHCI_SUPPORT_HS400))
4445 mmc->caps2 |= MMC_CAP2_HS400;
4446
4447 if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4448 (IS_ERR(mmc->supply.vqmmc) ||
4449 !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4450 1300000)))
4451 mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4452
4453 if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4454 !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4455 mmc->caps |= MMC_CAP_UHS_DDR50;
4456
4457 /* Does the host need tuning for SDR50? */
4458 if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4459 host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4460
4461 /* Driver Type(s) (A, C, D) supported by the host */
4462 if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4463 mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4464 if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4465 mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4466 if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4467 mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4468
4469 /* Initial value for re-tuning timer count */
4470 host->tuning_count = FIELD_GET(SDHCI_RETUNING_TIMER_COUNT_MASK,
4471 host->caps1);
4472
4473 /*
4474 * In case Re-tuning Timer is not disabled, the actual value of
4475 * re-tuning timer will be 2 ^ (n - 1).
4476 */
4477 if (host->tuning_count)
4478 host->tuning_count = 1 << (host->tuning_count - 1);
4479
4480 /* Re-tuning mode supported by the Host Controller */
4481 host->tuning_mode = FIELD_GET(SDHCI_RETUNING_MODE_MASK, host->caps1);
4482
4483 ocr_avail = 0;
4484
4485 /*
4486 * According to SD Host Controller spec v3.00, if the Host System
4487 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4488 * the value is meaningful only if Voltage Support in the Capabilities
4489 * register is set. The actual current value is 4 times the register
4490 * value.
4491 */
4492 max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4493 if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4494 int curr = regulator_get_current_limit(mmc->supply.vmmc);
4495 if (curr > 0) {
4496
4497 /* convert to SDHCI_MAX_CURRENT format */
4498 curr = curr/1000; /* convert to mA */
4499 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4500
4501 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4502 max_current_caps =
4503 FIELD_PREP(SDHCI_MAX_CURRENT_330_MASK, curr) |
4504 FIELD_PREP(SDHCI_MAX_CURRENT_300_MASK, curr) |
4505 FIELD_PREP(SDHCI_MAX_CURRENT_180_MASK, curr);
4506 }
4507 }
4508
4509 if (host->caps & SDHCI_CAN_VDD_330) {
4510 ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4511
4512 mmc->max_current_330 = FIELD_GET(SDHCI_MAX_CURRENT_330_MASK,
4513 max_current_caps) *
4514 SDHCI_MAX_CURRENT_MULTIPLIER;
4515 }
4516 if (host->caps & SDHCI_CAN_VDD_300) {
4517 ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4518
4519 mmc->max_current_300 = FIELD_GET(SDHCI_MAX_CURRENT_300_MASK,
4520 max_current_caps) *
4521 SDHCI_MAX_CURRENT_MULTIPLIER;
4522 }
4523 if (host->caps & SDHCI_CAN_VDD_180) {
4524 ocr_avail |= MMC_VDD_165_195;
4525
4526 mmc->max_current_180 = FIELD_GET(SDHCI_MAX_CURRENT_180_MASK,
4527 max_current_caps) *
4528 SDHCI_MAX_CURRENT_MULTIPLIER;
4529 }
4530
4531 /* If OCR set by host, use it instead. */
4532 if (host->ocr_mask)
4533 ocr_avail = host->ocr_mask;
4534
4535 /* If OCR set by external regulators, give it highest prio. */
4536 if (mmc->ocr_avail)
4537 ocr_avail = mmc->ocr_avail;
4538
4539 mmc->ocr_avail = ocr_avail;
4540 mmc->ocr_avail_sdio = ocr_avail;
4541 if (host->ocr_avail_sdio)
4542 mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4543 mmc->ocr_avail_sd = ocr_avail;
4544 if (host->ocr_avail_sd)
4545 mmc->ocr_avail_sd &= host->ocr_avail_sd;
4546 else /* normal SD controllers don't support 1.8V */
4547 mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4548 mmc->ocr_avail_mmc = ocr_avail;
4549 if (host->ocr_avail_mmc)
4550 mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4551
4552 if (mmc->ocr_avail == 0) {
4553 pr_err("%s: Hardware doesn't report any support voltages.\n",
4554 mmc_hostname(mmc));
4555 ret = -ENODEV;
4556 goto unreg;
4557 }
4558
4559 if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4560 MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4561 MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4562 (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4563 host->flags |= SDHCI_SIGNALING_180;
4564
4565 if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4566 host->flags |= SDHCI_SIGNALING_120;
4567
4568 spin_lock_init(&host->lock);
4569
4570 /*
4571 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4572 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4573 * is less anyway.
4574 */
4575 mmc->max_req_size = 524288;
4576
4577 /*
4578 * Maximum number of segments. Depends on if the hardware
4579 * can do scatter/gather or not.
4580 */
4581 if (host->flags & SDHCI_USE_ADMA) {
4582 mmc->max_segs = SDHCI_MAX_SEGS;
4583 } else if (host->flags & SDHCI_USE_SDMA) {
4584 mmc->max_segs = 1;
4585 mmc->max_req_size = min_t(size_t, mmc->max_req_size,
4586 dma_max_mapping_size(mmc_dev(mmc)));
4587 } else { /* PIO */
4588 mmc->max_segs = SDHCI_MAX_SEGS;
4589 }
4590
4591 /*
4592 * Maximum segment size. Could be one segment with the maximum number
4593 * of bytes. When doing hardware scatter/gather, each entry cannot
4594 * be larger than 64 KiB though.
4595 */
4596 if (host->flags & SDHCI_USE_ADMA) {
4597 if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4598 mmc->max_seg_size = 65535;
4599 else
4600 mmc->max_seg_size = 65536;
4601 } else {
4602 mmc->max_seg_size = mmc->max_req_size;
4603 }
4604
4605 /*
4606 * Maximum block size. This varies from controller to controller and
4607 * is specified in the capabilities register.
4608 */
4609 if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4610 mmc->max_blk_size = 2;
4611 } else {
4612 mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4613 SDHCI_MAX_BLOCK_SHIFT;
4614 if (mmc->max_blk_size >= 3) {
4615 pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4616 mmc_hostname(mmc));
4617 mmc->max_blk_size = 0;
4618 }
4619 }
4620
4621 mmc->max_blk_size = 512 << mmc->max_blk_size;
4622
4623 /*
4624 * Maximum block count.
4625 */
4626 mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4627
4628 if (mmc->max_segs == 1)
4629 /* This may alter mmc->*_blk_* parameters */
4630 sdhci_allocate_bounce_buffer(host);
4631
4632 return 0;
4633
4634 unreg:
4635 if (host->sdhci_core_to_disable_vqmmc)
4636 regulator_disable(mmc->supply.vqmmc);
4637 undma:
4638 if (host->align_buffer)
4639 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4640 host->adma_table_sz, host->align_buffer,
4641 host->align_addr);
4642 host->adma_table = NULL;
4643 host->align_buffer = NULL;
4644
4645 return ret;
4646 }
4647 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4648
sdhci_cleanup_host(struct sdhci_host * host)4649 void sdhci_cleanup_host(struct sdhci_host *host)
4650 {
4651 struct mmc_host *mmc = host->mmc;
4652
4653 if (host->sdhci_core_to_disable_vqmmc)
4654 regulator_disable(mmc->supply.vqmmc);
4655
4656 if (host->align_buffer)
4657 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4658 host->adma_table_sz, host->align_buffer,
4659 host->align_addr);
4660
4661 if (host->use_external_dma)
4662 sdhci_external_dma_release(host);
4663
4664 host->adma_table = NULL;
4665 host->align_buffer = NULL;
4666 }
4667 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4668
__sdhci_add_host(struct sdhci_host * host)4669 int __sdhci_add_host(struct sdhci_host *host)
4670 {
4671 unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4672 struct mmc_host *mmc = host->mmc;
4673 int ret;
4674
4675 if ((mmc->caps2 & MMC_CAP2_CQE) &&
4676 (host->quirks & SDHCI_QUIRK_BROKEN_CQE)) {
4677 mmc->caps2 &= ~MMC_CAP2_CQE;
4678 mmc->cqe_ops = NULL;
4679 }
4680
4681 host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4682 if (!host->complete_wq)
4683 return -ENOMEM;
4684
4685 INIT_WORK(&host->complete_work, sdhci_complete_work);
4686
4687 timer_setup(&host->timer, sdhci_timeout_timer, 0);
4688 timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4689
4690 init_waitqueue_head(&host->buf_ready_int);
4691
4692 sdhci_init(host, 0);
4693
4694 ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4695 IRQF_SHARED, mmc_hostname(mmc), host);
4696 if (ret) {
4697 pr_err("%s: Failed to request IRQ %d: %d\n",
4698 mmc_hostname(mmc), host->irq, ret);
4699 goto unwq;
4700 }
4701
4702 ret = sdhci_led_register(host);
4703 if (ret) {
4704 pr_err("%s: Failed to register LED device: %d\n",
4705 mmc_hostname(mmc), ret);
4706 goto unirq;
4707 }
4708
4709 ret = mmc_add_host(mmc);
4710 if (ret)
4711 goto unled;
4712
4713 pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4714 mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4715 host->use_external_dma ? "External DMA" :
4716 (host->flags & SDHCI_USE_ADMA) ?
4717 (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4718 (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4719
4720 sdhci_enable_card_detection(host);
4721
4722 return 0;
4723
4724 unled:
4725 sdhci_led_unregister(host);
4726 unirq:
4727 sdhci_do_reset(host, SDHCI_RESET_ALL);
4728 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4729 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4730 free_irq(host->irq, host);
4731 unwq:
4732 destroy_workqueue(host->complete_wq);
4733
4734 return ret;
4735 }
4736 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4737
sdhci_add_host(struct sdhci_host * host)4738 int sdhci_add_host(struct sdhci_host *host)
4739 {
4740 int ret;
4741
4742 ret = sdhci_setup_host(host);
4743 if (ret)
4744 return ret;
4745
4746 ret = __sdhci_add_host(host);
4747 if (ret)
4748 goto cleanup;
4749
4750 return 0;
4751
4752 cleanup:
4753 sdhci_cleanup_host(host);
4754
4755 return ret;
4756 }
4757 EXPORT_SYMBOL_GPL(sdhci_add_host);
4758
sdhci_remove_host(struct sdhci_host * host,int dead)4759 void sdhci_remove_host(struct sdhci_host *host, int dead)
4760 {
4761 struct mmc_host *mmc = host->mmc;
4762 unsigned long flags;
4763
4764 if (dead) {
4765 spin_lock_irqsave(&host->lock, flags);
4766
4767 host->flags |= SDHCI_DEVICE_DEAD;
4768
4769 if (sdhci_has_requests(host)) {
4770 pr_err("%s: Controller removed during "
4771 " transfer!\n", mmc_hostname(mmc));
4772 sdhci_error_out_mrqs(host, -ENOMEDIUM);
4773 }
4774
4775 spin_unlock_irqrestore(&host->lock, flags);
4776 }
4777
4778 sdhci_disable_card_detection(host);
4779
4780 mmc_remove_host(mmc);
4781
4782 sdhci_led_unregister(host);
4783
4784 if (!dead)
4785 sdhci_do_reset(host, SDHCI_RESET_ALL);
4786
4787 sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4788 sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4789 free_irq(host->irq, host);
4790
4791 del_timer_sync(&host->timer);
4792 del_timer_sync(&host->data_timer);
4793
4794 destroy_workqueue(host->complete_wq);
4795
4796 if (host->sdhci_core_to_disable_vqmmc)
4797 regulator_disable(mmc->supply.vqmmc);
4798
4799 if (host->align_buffer)
4800 dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4801 host->adma_table_sz, host->align_buffer,
4802 host->align_addr);
4803
4804 if (host->use_external_dma)
4805 sdhci_external_dma_release(host);
4806
4807 host->adma_table = NULL;
4808 host->align_buffer = NULL;
4809 }
4810
4811 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4812
sdhci_free_host(struct sdhci_host * host)4813 void sdhci_free_host(struct sdhci_host *host)
4814 {
4815 mmc_free_host(host->mmc);
4816 }
4817
4818 EXPORT_SYMBOL_GPL(sdhci_free_host);
4819
4820 /*****************************************************************************\
4821 * *
4822 * Driver init/exit *
4823 * *
4824 \*****************************************************************************/
4825
sdhci_drv_init(void)4826 static int __init sdhci_drv_init(void)
4827 {
4828 pr_info(DRIVER_NAME
4829 ": Secure Digital Host Controller Interface driver\n");
4830 pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4831
4832 return 0;
4833 }
4834
sdhci_drv_exit(void)4835 static void __exit sdhci_drv_exit(void)
4836 {
4837 }
4838
4839 module_init(sdhci_drv_init);
4840 module_exit(sdhci_drv_exit);
4841
4842 module_param(debug_quirks, uint, 0444);
4843 module_param(debug_quirks2, uint, 0444);
4844
4845 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4846 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4847 MODULE_LICENSE("GPL");
4848
4849 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4850 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4851