xref: /linux/drivers/mmc/host/sdhci.c (revision f86fd32d)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  linux/drivers/mmc/host/sdhci.c - Secure Digital Host Controller Interface driver
4  *
5  *  Copyright (C) 2005-2008 Pierre Ossman, All Rights Reserved.
6  *
7  * Thanks to the following companies for their support:
8  *
9  *     - JMicron (hardware and technical support)
10  */
11 
12 #include <linux/delay.h>
13 #include <linux/dmaengine.h>
14 #include <linux/ktime.h>
15 #include <linux/highmem.h>
16 #include <linux/io.h>
17 #include <linux/module.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/slab.h>
20 #include <linux/scatterlist.h>
21 #include <linux/sizes.h>
22 #include <linux/swiotlb.h>
23 #include <linux/regulator/consumer.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26 
27 #include <linux/leds.h>
28 
29 #include <linux/mmc/mmc.h>
30 #include <linux/mmc/host.h>
31 #include <linux/mmc/card.h>
32 #include <linux/mmc/sdio.h>
33 #include <linux/mmc/slot-gpio.h>
34 
35 #include "sdhci.h"
36 
37 #define DRIVER_NAME "sdhci"
38 
39 #define DBG(f, x...) \
40 	pr_debug("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
41 
42 #define SDHCI_DUMP(f, x...) \
43 	pr_err("%s: " DRIVER_NAME ": " f, mmc_hostname(host->mmc), ## x)
44 
45 #define MAX_TUNING_LOOP 40
46 
47 static unsigned int debug_quirks = 0;
48 static unsigned int debug_quirks2;
49 
50 static void sdhci_finish_data(struct sdhci_host *);
51 
52 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable);
53 
54 void sdhci_dumpregs(struct sdhci_host *host)
55 {
56 	SDHCI_DUMP("============ SDHCI REGISTER DUMP ===========\n");
57 
58 	SDHCI_DUMP("Sys addr:  0x%08x | Version:  0x%08x\n",
59 		   sdhci_readl(host, SDHCI_DMA_ADDRESS),
60 		   sdhci_readw(host, SDHCI_HOST_VERSION));
61 	SDHCI_DUMP("Blk size:  0x%08x | Blk cnt:  0x%08x\n",
62 		   sdhci_readw(host, SDHCI_BLOCK_SIZE),
63 		   sdhci_readw(host, SDHCI_BLOCK_COUNT));
64 	SDHCI_DUMP("Argument:  0x%08x | Trn mode: 0x%08x\n",
65 		   sdhci_readl(host, SDHCI_ARGUMENT),
66 		   sdhci_readw(host, SDHCI_TRANSFER_MODE));
67 	SDHCI_DUMP("Present:   0x%08x | Host ctl: 0x%08x\n",
68 		   sdhci_readl(host, SDHCI_PRESENT_STATE),
69 		   sdhci_readb(host, SDHCI_HOST_CONTROL));
70 	SDHCI_DUMP("Power:     0x%08x | Blk gap:  0x%08x\n",
71 		   sdhci_readb(host, SDHCI_POWER_CONTROL),
72 		   sdhci_readb(host, SDHCI_BLOCK_GAP_CONTROL));
73 	SDHCI_DUMP("Wake-up:   0x%08x | Clock:    0x%08x\n",
74 		   sdhci_readb(host, SDHCI_WAKE_UP_CONTROL),
75 		   sdhci_readw(host, SDHCI_CLOCK_CONTROL));
76 	SDHCI_DUMP("Timeout:   0x%08x | Int stat: 0x%08x\n",
77 		   sdhci_readb(host, SDHCI_TIMEOUT_CONTROL),
78 		   sdhci_readl(host, SDHCI_INT_STATUS));
79 	SDHCI_DUMP("Int enab:  0x%08x | Sig enab: 0x%08x\n",
80 		   sdhci_readl(host, SDHCI_INT_ENABLE),
81 		   sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
82 	SDHCI_DUMP("ACmd stat: 0x%08x | Slot int: 0x%08x\n",
83 		   sdhci_readw(host, SDHCI_AUTO_CMD_STATUS),
84 		   sdhci_readw(host, SDHCI_SLOT_INT_STATUS));
85 	SDHCI_DUMP("Caps:      0x%08x | Caps_1:   0x%08x\n",
86 		   sdhci_readl(host, SDHCI_CAPABILITIES),
87 		   sdhci_readl(host, SDHCI_CAPABILITIES_1));
88 	SDHCI_DUMP("Cmd:       0x%08x | Max curr: 0x%08x\n",
89 		   sdhci_readw(host, SDHCI_COMMAND),
90 		   sdhci_readl(host, SDHCI_MAX_CURRENT));
91 	SDHCI_DUMP("Resp[0]:   0x%08x | Resp[1]:  0x%08x\n",
92 		   sdhci_readl(host, SDHCI_RESPONSE),
93 		   sdhci_readl(host, SDHCI_RESPONSE + 4));
94 	SDHCI_DUMP("Resp[2]:   0x%08x | Resp[3]:  0x%08x\n",
95 		   sdhci_readl(host, SDHCI_RESPONSE + 8),
96 		   sdhci_readl(host, SDHCI_RESPONSE + 12));
97 	SDHCI_DUMP("Host ctl2: 0x%08x\n",
98 		   sdhci_readw(host, SDHCI_HOST_CONTROL2));
99 
100 	if (host->flags & SDHCI_USE_ADMA) {
101 		if (host->flags & SDHCI_USE_64_BIT_DMA) {
102 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x%08x\n",
103 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
104 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS_HI),
105 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
106 		} else {
107 			SDHCI_DUMP("ADMA Err:  0x%08x | ADMA Ptr: 0x%08x\n",
108 				   sdhci_readl(host, SDHCI_ADMA_ERROR),
109 				   sdhci_readl(host, SDHCI_ADMA_ADDRESS));
110 		}
111 	}
112 
113 	SDHCI_DUMP("============================================\n");
114 }
115 EXPORT_SYMBOL_GPL(sdhci_dumpregs);
116 
117 /*****************************************************************************\
118  *                                                                           *
119  * Low level functions                                                       *
120  *                                                                           *
121 \*****************************************************************************/
122 
123 static void sdhci_do_enable_v4_mode(struct sdhci_host *host)
124 {
125 	u16 ctrl2;
126 
127 	ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
128 	if (ctrl2 & SDHCI_CTRL_V4_MODE)
129 		return;
130 
131 	ctrl2 |= SDHCI_CTRL_V4_MODE;
132 	sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
133 }
134 
135 /*
136  * This can be called before sdhci_add_host() by Vendor's host controller
137  * driver to enable v4 mode if supported.
138  */
139 void sdhci_enable_v4_mode(struct sdhci_host *host)
140 {
141 	host->v4_mode = true;
142 	sdhci_do_enable_v4_mode(host);
143 }
144 EXPORT_SYMBOL_GPL(sdhci_enable_v4_mode);
145 
146 static inline bool sdhci_data_line_cmd(struct mmc_command *cmd)
147 {
148 	return cmd->data || cmd->flags & MMC_RSP_BUSY;
149 }
150 
151 static void sdhci_set_card_detection(struct sdhci_host *host, bool enable)
152 {
153 	u32 present;
154 
155 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) ||
156 	    !mmc_card_is_removable(host->mmc))
157 		return;
158 
159 	if (enable) {
160 		present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
161 				      SDHCI_CARD_PRESENT;
162 
163 		host->ier |= present ? SDHCI_INT_CARD_REMOVE :
164 				       SDHCI_INT_CARD_INSERT;
165 	} else {
166 		host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
167 	}
168 
169 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
170 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
171 }
172 
173 static void sdhci_enable_card_detection(struct sdhci_host *host)
174 {
175 	sdhci_set_card_detection(host, true);
176 }
177 
178 static void sdhci_disable_card_detection(struct sdhci_host *host)
179 {
180 	sdhci_set_card_detection(host, false);
181 }
182 
183 static void sdhci_runtime_pm_bus_on(struct sdhci_host *host)
184 {
185 	if (host->bus_on)
186 		return;
187 	host->bus_on = true;
188 	pm_runtime_get_noresume(host->mmc->parent);
189 }
190 
191 static void sdhci_runtime_pm_bus_off(struct sdhci_host *host)
192 {
193 	if (!host->bus_on)
194 		return;
195 	host->bus_on = false;
196 	pm_runtime_put_noidle(host->mmc->parent);
197 }
198 
199 void sdhci_reset(struct sdhci_host *host, u8 mask)
200 {
201 	ktime_t timeout;
202 
203 	sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET);
204 
205 	if (mask & SDHCI_RESET_ALL) {
206 		host->clock = 0;
207 		/* Reset-all turns off SD Bus Power */
208 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
209 			sdhci_runtime_pm_bus_off(host);
210 	}
211 
212 	/* Wait max 100 ms */
213 	timeout = ktime_add_ms(ktime_get(), 100);
214 
215 	/* hw clears the bit when it's done */
216 	while (1) {
217 		bool timedout = ktime_after(ktime_get(), timeout);
218 
219 		if (!(sdhci_readb(host, SDHCI_SOFTWARE_RESET) & mask))
220 			break;
221 		if (timedout) {
222 			pr_err("%s: Reset 0x%x never completed.\n",
223 				mmc_hostname(host->mmc), (int)mask);
224 			sdhci_dumpregs(host);
225 			return;
226 		}
227 		udelay(10);
228 	}
229 }
230 EXPORT_SYMBOL_GPL(sdhci_reset);
231 
232 static void sdhci_do_reset(struct sdhci_host *host, u8 mask)
233 {
234 	if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) {
235 		struct mmc_host *mmc = host->mmc;
236 
237 		if (!mmc->ops->get_cd(mmc))
238 			return;
239 	}
240 
241 	host->ops->reset(host, mask);
242 
243 	if (mask & SDHCI_RESET_ALL) {
244 		if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
245 			if (host->ops->enable_dma)
246 				host->ops->enable_dma(host);
247 		}
248 
249 		/* Resetting the controller clears many */
250 		host->preset_enabled = false;
251 	}
252 }
253 
254 static void sdhci_set_default_irqs(struct sdhci_host *host)
255 {
256 	host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT |
257 		    SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT |
258 		    SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC |
259 		    SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END |
260 		    SDHCI_INT_RESPONSE;
261 
262 	if (host->tuning_mode == SDHCI_TUNING_MODE_2 ||
263 	    host->tuning_mode == SDHCI_TUNING_MODE_3)
264 		host->ier |= SDHCI_INT_RETUNE;
265 
266 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
267 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
268 }
269 
270 static void sdhci_config_dma(struct sdhci_host *host)
271 {
272 	u8 ctrl;
273 	u16 ctrl2;
274 
275 	if (host->version < SDHCI_SPEC_200)
276 		return;
277 
278 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
279 
280 	/*
281 	 * Always adjust the DMA selection as some controllers
282 	 * (e.g. JMicron) can't do PIO properly when the selection
283 	 * is ADMA.
284 	 */
285 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
286 	if (!(host->flags & SDHCI_REQ_USE_DMA))
287 		goto out;
288 
289 	/* Note if DMA Select is zero then SDMA is selected */
290 	if (host->flags & SDHCI_USE_ADMA)
291 		ctrl |= SDHCI_CTRL_ADMA32;
292 
293 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
294 		/*
295 		 * If v4 mode, all supported DMA can be 64-bit addressing if
296 		 * controller supports 64-bit system address, otherwise only
297 		 * ADMA can support 64-bit addressing.
298 		 */
299 		if (host->v4_mode) {
300 			ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
301 			ctrl2 |= SDHCI_CTRL_64BIT_ADDR;
302 			sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
303 		} else if (host->flags & SDHCI_USE_ADMA) {
304 			/*
305 			 * Don't need to undo SDHCI_CTRL_ADMA32 in order to
306 			 * set SDHCI_CTRL_ADMA64.
307 			 */
308 			ctrl |= SDHCI_CTRL_ADMA64;
309 		}
310 	}
311 
312 out:
313 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
314 }
315 
316 static void sdhci_init(struct sdhci_host *host, int soft)
317 {
318 	struct mmc_host *mmc = host->mmc;
319 
320 	if (soft)
321 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
322 	else
323 		sdhci_do_reset(host, SDHCI_RESET_ALL);
324 
325 	if (host->v4_mode)
326 		sdhci_do_enable_v4_mode(host);
327 
328 	sdhci_set_default_irqs(host);
329 
330 	host->cqe_on = false;
331 
332 	if (soft) {
333 		/* force clock reconfiguration */
334 		host->clock = 0;
335 		mmc->ops->set_ios(mmc, &mmc->ios);
336 	}
337 }
338 
339 static void sdhci_reinit(struct sdhci_host *host)
340 {
341 	u32 cd = host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT);
342 
343 	sdhci_init(host, 0);
344 	sdhci_enable_card_detection(host);
345 
346 	/*
347 	 * A change to the card detect bits indicates a change in present state,
348 	 * refer sdhci_set_card_detection(). A card detect interrupt might have
349 	 * been missed while the host controller was being reset, so trigger a
350 	 * rescan to check.
351 	 */
352 	if (cd != (host->ier & (SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT)))
353 		mmc_detect_change(host->mmc, msecs_to_jiffies(200));
354 }
355 
356 static void __sdhci_led_activate(struct sdhci_host *host)
357 {
358 	u8 ctrl;
359 
360 	if (host->quirks & SDHCI_QUIRK_NO_LED)
361 		return;
362 
363 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
364 	ctrl |= SDHCI_CTRL_LED;
365 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
366 }
367 
368 static void __sdhci_led_deactivate(struct sdhci_host *host)
369 {
370 	u8 ctrl;
371 
372 	if (host->quirks & SDHCI_QUIRK_NO_LED)
373 		return;
374 
375 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
376 	ctrl &= ~SDHCI_CTRL_LED;
377 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
378 }
379 
380 #if IS_REACHABLE(CONFIG_LEDS_CLASS)
381 static void sdhci_led_control(struct led_classdev *led,
382 			      enum led_brightness brightness)
383 {
384 	struct sdhci_host *host = container_of(led, struct sdhci_host, led);
385 	unsigned long flags;
386 
387 	spin_lock_irqsave(&host->lock, flags);
388 
389 	if (host->runtime_suspended)
390 		goto out;
391 
392 	if (brightness == LED_OFF)
393 		__sdhci_led_deactivate(host);
394 	else
395 		__sdhci_led_activate(host);
396 out:
397 	spin_unlock_irqrestore(&host->lock, flags);
398 }
399 
400 static int sdhci_led_register(struct sdhci_host *host)
401 {
402 	struct mmc_host *mmc = host->mmc;
403 
404 	if (host->quirks & SDHCI_QUIRK_NO_LED)
405 		return 0;
406 
407 	snprintf(host->led_name, sizeof(host->led_name),
408 		 "%s::", mmc_hostname(mmc));
409 
410 	host->led.name = host->led_name;
411 	host->led.brightness = LED_OFF;
412 	host->led.default_trigger = mmc_hostname(mmc);
413 	host->led.brightness_set = sdhci_led_control;
414 
415 	return led_classdev_register(mmc_dev(mmc), &host->led);
416 }
417 
418 static void sdhci_led_unregister(struct sdhci_host *host)
419 {
420 	if (host->quirks & SDHCI_QUIRK_NO_LED)
421 		return;
422 
423 	led_classdev_unregister(&host->led);
424 }
425 
426 static inline void sdhci_led_activate(struct sdhci_host *host)
427 {
428 }
429 
430 static inline void sdhci_led_deactivate(struct sdhci_host *host)
431 {
432 }
433 
434 #else
435 
436 static inline int sdhci_led_register(struct sdhci_host *host)
437 {
438 	return 0;
439 }
440 
441 static inline void sdhci_led_unregister(struct sdhci_host *host)
442 {
443 }
444 
445 static inline void sdhci_led_activate(struct sdhci_host *host)
446 {
447 	__sdhci_led_activate(host);
448 }
449 
450 static inline void sdhci_led_deactivate(struct sdhci_host *host)
451 {
452 	__sdhci_led_deactivate(host);
453 }
454 
455 #endif
456 
457 static void sdhci_mod_timer(struct sdhci_host *host, struct mmc_request *mrq,
458 			    unsigned long timeout)
459 {
460 	if (sdhci_data_line_cmd(mrq->cmd))
461 		mod_timer(&host->data_timer, timeout);
462 	else
463 		mod_timer(&host->timer, timeout);
464 }
465 
466 static void sdhci_del_timer(struct sdhci_host *host, struct mmc_request *mrq)
467 {
468 	if (sdhci_data_line_cmd(mrq->cmd))
469 		del_timer(&host->data_timer);
470 	else
471 		del_timer(&host->timer);
472 }
473 
474 static inline bool sdhci_has_requests(struct sdhci_host *host)
475 {
476 	return host->cmd || host->data_cmd;
477 }
478 
479 /*****************************************************************************\
480  *                                                                           *
481  * Core functions                                                            *
482  *                                                                           *
483 \*****************************************************************************/
484 
485 static void sdhci_read_block_pio(struct sdhci_host *host)
486 {
487 	unsigned long flags;
488 	size_t blksize, len, chunk;
489 	u32 uninitialized_var(scratch);
490 	u8 *buf;
491 
492 	DBG("PIO reading\n");
493 
494 	blksize = host->data->blksz;
495 	chunk = 0;
496 
497 	local_irq_save(flags);
498 
499 	while (blksize) {
500 		BUG_ON(!sg_miter_next(&host->sg_miter));
501 
502 		len = min(host->sg_miter.length, blksize);
503 
504 		blksize -= len;
505 		host->sg_miter.consumed = len;
506 
507 		buf = host->sg_miter.addr;
508 
509 		while (len) {
510 			if (chunk == 0) {
511 				scratch = sdhci_readl(host, SDHCI_BUFFER);
512 				chunk = 4;
513 			}
514 
515 			*buf = scratch & 0xFF;
516 
517 			buf++;
518 			scratch >>= 8;
519 			chunk--;
520 			len--;
521 		}
522 	}
523 
524 	sg_miter_stop(&host->sg_miter);
525 
526 	local_irq_restore(flags);
527 }
528 
529 static void sdhci_write_block_pio(struct sdhci_host *host)
530 {
531 	unsigned long flags;
532 	size_t blksize, len, chunk;
533 	u32 scratch;
534 	u8 *buf;
535 
536 	DBG("PIO writing\n");
537 
538 	blksize = host->data->blksz;
539 	chunk = 0;
540 	scratch = 0;
541 
542 	local_irq_save(flags);
543 
544 	while (blksize) {
545 		BUG_ON(!sg_miter_next(&host->sg_miter));
546 
547 		len = min(host->sg_miter.length, blksize);
548 
549 		blksize -= len;
550 		host->sg_miter.consumed = len;
551 
552 		buf = host->sg_miter.addr;
553 
554 		while (len) {
555 			scratch |= (u32)*buf << (chunk * 8);
556 
557 			buf++;
558 			chunk++;
559 			len--;
560 
561 			if ((chunk == 4) || ((len == 0) && (blksize == 0))) {
562 				sdhci_writel(host, scratch, SDHCI_BUFFER);
563 				chunk = 0;
564 				scratch = 0;
565 			}
566 		}
567 	}
568 
569 	sg_miter_stop(&host->sg_miter);
570 
571 	local_irq_restore(flags);
572 }
573 
574 static void sdhci_transfer_pio(struct sdhci_host *host)
575 {
576 	u32 mask;
577 
578 	if (host->blocks == 0)
579 		return;
580 
581 	if (host->data->flags & MMC_DATA_READ)
582 		mask = SDHCI_DATA_AVAILABLE;
583 	else
584 		mask = SDHCI_SPACE_AVAILABLE;
585 
586 	/*
587 	 * Some controllers (JMicron JMB38x) mess up the buffer bits
588 	 * for transfers < 4 bytes. As long as it is just one block,
589 	 * we can ignore the bits.
590 	 */
591 	if ((host->quirks & SDHCI_QUIRK_BROKEN_SMALL_PIO) &&
592 		(host->data->blocks == 1))
593 		mask = ~0;
594 
595 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
596 		if (host->quirks & SDHCI_QUIRK_PIO_NEEDS_DELAY)
597 			udelay(100);
598 
599 		if (host->data->flags & MMC_DATA_READ)
600 			sdhci_read_block_pio(host);
601 		else
602 			sdhci_write_block_pio(host);
603 
604 		host->blocks--;
605 		if (host->blocks == 0)
606 			break;
607 	}
608 
609 	DBG("PIO transfer complete.\n");
610 }
611 
612 static int sdhci_pre_dma_transfer(struct sdhci_host *host,
613 				  struct mmc_data *data, int cookie)
614 {
615 	int sg_count;
616 
617 	/*
618 	 * If the data buffers are already mapped, return the previous
619 	 * dma_map_sg() result.
620 	 */
621 	if (data->host_cookie == COOKIE_PRE_MAPPED)
622 		return data->sg_count;
623 
624 	/* Bounce write requests to the bounce buffer */
625 	if (host->bounce_buffer) {
626 		unsigned int length = data->blksz * data->blocks;
627 
628 		if (length > host->bounce_buffer_size) {
629 			pr_err("%s: asked for transfer of %u bytes exceeds bounce buffer %u bytes\n",
630 			       mmc_hostname(host->mmc), length,
631 			       host->bounce_buffer_size);
632 			return -EIO;
633 		}
634 		if (mmc_get_dma_dir(data) == DMA_TO_DEVICE) {
635 			/* Copy the data to the bounce buffer */
636 			sg_copy_to_buffer(data->sg, data->sg_len,
637 					  host->bounce_buffer,
638 					  length);
639 		}
640 		/* Switch ownership to the DMA */
641 		dma_sync_single_for_device(host->mmc->parent,
642 					   host->bounce_addr,
643 					   host->bounce_buffer_size,
644 					   mmc_get_dma_dir(data));
645 		/* Just a dummy value */
646 		sg_count = 1;
647 	} else {
648 		/* Just access the data directly from memory */
649 		sg_count = dma_map_sg(mmc_dev(host->mmc),
650 				      data->sg, data->sg_len,
651 				      mmc_get_dma_dir(data));
652 	}
653 
654 	if (sg_count == 0)
655 		return -ENOSPC;
656 
657 	data->sg_count = sg_count;
658 	data->host_cookie = cookie;
659 
660 	return sg_count;
661 }
662 
663 static char *sdhci_kmap_atomic(struct scatterlist *sg, unsigned long *flags)
664 {
665 	local_irq_save(*flags);
666 	return kmap_atomic(sg_page(sg)) + sg->offset;
667 }
668 
669 static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags)
670 {
671 	kunmap_atomic(buffer);
672 	local_irq_restore(*flags);
673 }
674 
675 void sdhci_adma_write_desc(struct sdhci_host *host, void **desc,
676 			   dma_addr_t addr, int len, unsigned int cmd)
677 {
678 	struct sdhci_adma2_64_desc *dma_desc = *desc;
679 
680 	/* 32-bit and 64-bit descriptors have these members in same position */
681 	dma_desc->cmd = cpu_to_le16(cmd);
682 	dma_desc->len = cpu_to_le16(len);
683 	dma_desc->addr_lo = cpu_to_le32(lower_32_bits(addr));
684 
685 	if (host->flags & SDHCI_USE_64_BIT_DMA)
686 		dma_desc->addr_hi = cpu_to_le32(upper_32_bits(addr));
687 
688 	*desc += host->desc_sz;
689 }
690 EXPORT_SYMBOL_GPL(sdhci_adma_write_desc);
691 
692 static inline void __sdhci_adma_write_desc(struct sdhci_host *host,
693 					   void **desc, dma_addr_t addr,
694 					   int len, unsigned int cmd)
695 {
696 	if (host->ops->adma_write_desc)
697 		host->ops->adma_write_desc(host, desc, addr, len, cmd);
698 	else
699 		sdhci_adma_write_desc(host, desc, addr, len, cmd);
700 }
701 
702 static void sdhci_adma_mark_end(void *desc)
703 {
704 	struct sdhci_adma2_64_desc *dma_desc = desc;
705 
706 	/* 32-bit and 64-bit descriptors have 'cmd' in same position */
707 	dma_desc->cmd |= cpu_to_le16(ADMA2_END);
708 }
709 
710 static void sdhci_adma_table_pre(struct sdhci_host *host,
711 	struct mmc_data *data, int sg_count)
712 {
713 	struct scatterlist *sg;
714 	unsigned long flags;
715 	dma_addr_t addr, align_addr;
716 	void *desc, *align;
717 	char *buffer;
718 	int len, offset, i;
719 
720 	/*
721 	 * The spec does not specify endianness of descriptor table.
722 	 * We currently guess that it is LE.
723 	 */
724 
725 	host->sg_count = sg_count;
726 
727 	desc = host->adma_table;
728 	align = host->align_buffer;
729 
730 	align_addr = host->align_addr;
731 
732 	for_each_sg(data->sg, sg, host->sg_count, i) {
733 		addr = sg_dma_address(sg);
734 		len = sg_dma_len(sg);
735 
736 		/*
737 		 * The SDHCI specification states that ADMA addresses must
738 		 * be 32-bit aligned. If they aren't, then we use a bounce
739 		 * buffer for the (up to three) bytes that screw up the
740 		 * alignment.
741 		 */
742 		offset = (SDHCI_ADMA2_ALIGN - (addr & SDHCI_ADMA2_MASK)) &
743 			 SDHCI_ADMA2_MASK;
744 		if (offset) {
745 			if (data->flags & MMC_DATA_WRITE) {
746 				buffer = sdhci_kmap_atomic(sg, &flags);
747 				memcpy(align, buffer, offset);
748 				sdhci_kunmap_atomic(buffer, &flags);
749 			}
750 
751 			/* tran, valid */
752 			__sdhci_adma_write_desc(host, &desc, align_addr,
753 						offset, ADMA2_TRAN_VALID);
754 
755 			BUG_ON(offset > 65536);
756 
757 			align += SDHCI_ADMA2_ALIGN;
758 			align_addr += SDHCI_ADMA2_ALIGN;
759 
760 			addr += offset;
761 			len -= offset;
762 		}
763 
764 		BUG_ON(len > 65536);
765 
766 		/* tran, valid */
767 		if (len)
768 			__sdhci_adma_write_desc(host, &desc, addr, len,
769 						ADMA2_TRAN_VALID);
770 
771 		/*
772 		 * If this triggers then we have a calculation bug
773 		 * somewhere. :/
774 		 */
775 		WARN_ON((desc - host->adma_table) >= host->adma_table_sz);
776 	}
777 
778 	if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) {
779 		/* Mark the last descriptor as the terminating descriptor */
780 		if (desc != host->adma_table) {
781 			desc -= host->desc_sz;
782 			sdhci_adma_mark_end(desc);
783 		}
784 	} else {
785 		/* Add a terminating entry - nop, end, valid */
786 		__sdhci_adma_write_desc(host, &desc, 0, 0, ADMA2_NOP_END_VALID);
787 	}
788 }
789 
790 static void sdhci_adma_table_post(struct sdhci_host *host,
791 	struct mmc_data *data)
792 {
793 	struct scatterlist *sg;
794 	int i, size;
795 	void *align;
796 	char *buffer;
797 	unsigned long flags;
798 
799 	if (data->flags & MMC_DATA_READ) {
800 		bool has_unaligned = false;
801 
802 		/* Do a quick scan of the SG list for any unaligned mappings */
803 		for_each_sg(data->sg, sg, host->sg_count, i)
804 			if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
805 				has_unaligned = true;
806 				break;
807 			}
808 
809 		if (has_unaligned) {
810 			dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg,
811 					    data->sg_len, DMA_FROM_DEVICE);
812 
813 			align = host->align_buffer;
814 
815 			for_each_sg(data->sg, sg, host->sg_count, i) {
816 				if (sg_dma_address(sg) & SDHCI_ADMA2_MASK) {
817 					size = SDHCI_ADMA2_ALIGN -
818 					       (sg_dma_address(sg) & SDHCI_ADMA2_MASK);
819 
820 					buffer = sdhci_kmap_atomic(sg, &flags);
821 					memcpy(buffer, align, size);
822 					sdhci_kunmap_atomic(buffer, &flags);
823 
824 					align += SDHCI_ADMA2_ALIGN;
825 				}
826 			}
827 		}
828 	}
829 }
830 
831 static void sdhci_set_adma_addr(struct sdhci_host *host, dma_addr_t addr)
832 {
833 	sdhci_writel(host, lower_32_bits(addr), SDHCI_ADMA_ADDRESS);
834 	if (host->flags & SDHCI_USE_64_BIT_DMA)
835 		sdhci_writel(host, upper_32_bits(addr), SDHCI_ADMA_ADDRESS_HI);
836 }
837 
838 static dma_addr_t sdhci_sdma_address(struct sdhci_host *host)
839 {
840 	if (host->bounce_buffer)
841 		return host->bounce_addr;
842 	else
843 		return sg_dma_address(host->data->sg);
844 }
845 
846 static void sdhci_set_sdma_addr(struct sdhci_host *host, dma_addr_t addr)
847 {
848 	if (host->v4_mode)
849 		sdhci_set_adma_addr(host, addr);
850 	else
851 		sdhci_writel(host, addr, SDHCI_DMA_ADDRESS);
852 }
853 
854 static unsigned int sdhci_target_timeout(struct sdhci_host *host,
855 					 struct mmc_command *cmd,
856 					 struct mmc_data *data)
857 {
858 	unsigned int target_timeout;
859 
860 	/* timeout in us */
861 	if (!data) {
862 		target_timeout = cmd->busy_timeout * 1000;
863 	} else {
864 		target_timeout = DIV_ROUND_UP(data->timeout_ns, 1000);
865 		if (host->clock && data->timeout_clks) {
866 			unsigned long long val;
867 
868 			/*
869 			 * data->timeout_clks is in units of clock cycles.
870 			 * host->clock is in Hz.  target_timeout is in us.
871 			 * Hence, us = 1000000 * cycles / Hz.  Round up.
872 			 */
873 			val = 1000000ULL * data->timeout_clks;
874 			if (do_div(val, host->clock))
875 				target_timeout++;
876 			target_timeout += val;
877 		}
878 	}
879 
880 	return target_timeout;
881 }
882 
883 static void sdhci_calc_sw_timeout(struct sdhci_host *host,
884 				  struct mmc_command *cmd)
885 {
886 	struct mmc_data *data = cmd->data;
887 	struct mmc_host *mmc = host->mmc;
888 	struct mmc_ios *ios = &mmc->ios;
889 	unsigned char bus_width = 1 << ios->bus_width;
890 	unsigned int blksz;
891 	unsigned int freq;
892 	u64 target_timeout;
893 	u64 transfer_time;
894 
895 	target_timeout = sdhci_target_timeout(host, cmd, data);
896 	target_timeout *= NSEC_PER_USEC;
897 
898 	if (data) {
899 		blksz = data->blksz;
900 		freq = host->mmc->actual_clock ? : host->clock;
901 		transfer_time = (u64)blksz * NSEC_PER_SEC * (8 / bus_width);
902 		do_div(transfer_time, freq);
903 		/* multiply by '2' to account for any unknowns */
904 		transfer_time = transfer_time * 2;
905 		/* calculate timeout for the entire data */
906 		host->data_timeout = data->blocks * target_timeout +
907 				     transfer_time;
908 	} else {
909 		host->data_timeout = target_timeout;
910 	}
911 
912 	if (host->data_timeout)
913 		host->data_timeout += MMC_CMD_TRANSFER_TIME;
914 }
915 
916 static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd,
917 			     bool *too_big)
918 {
919 	u8 count;
920 	struct mmc_data *data;
921 	unsigned target_timeout, current_timeout;
922 
923 	*too_big = true;
924 
925 	/*
926 	 * If the host controller provides us with an incorrect timeout
927 	 * value, just skip the check and use 0xE.  The hardware may take
928 	 * longer to time out, but that's much better than having a too-short
929 	 * timeout value.
930 	 */
931 	if (host->quirks & SDHCI_QUIRK_BROKEN_TIMEOUT_VAL)
932 		return 0xE;
933 
934 	/* Unspecified command, asume max */
935 	if (cmd == NULL)
936 		return 0xE;
937 
938 	data = cmd->data;
939 	/* Unspecified timeout, assume max */
940 	if (!data && !cmd->busy_timeout)
941 		return 0xE;
942 
943 	/* timeout in us */
944 	target_timeout = sdhci_target_timeout(host, cmd, data);
945 
946 	/*
947 	 * Figure out needed cycles.
948 	 * We do this in steps in order to fit inside a 32 bit int.
949 	 * The first step is the minimum timeout, which will have a
950 	 * minimum resolution of 6 bits:
951 	 * (1) 2^13*1000 > 2^22,
952 	 * (2) host->timeout_clk < 2^16
953 	 *     =>
954 	 *     (1) / (2) > 2^6
955 	 */
956 	count = 0;
957 	current_timeout = (1 << 13) * 1000 / host->timeout_clk;
958 	while (current_timeout < target_timeout) {
959 		count++;
960 		current_timeout <<= 1;
961 		if (count >= 0xF)
962 			break;
963 	}
964 
965 	if (count >= 0xF) {
966 		if (!(host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT))
967 			DBG("Too large timeout 0x%x requested for CMD%d!\n",
968 			    count, cmd->opcode);
969 		count = 0xE;
970 	} else {
971 		*too_big = false;
972 	}
973 
974 	return count;
975 }
976 
977 static void sdhci_set_transfer_irqs(struct sdhci_host *host)
978 {
979 	u32 pio_irqs = SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL;
980 	u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR;
981 
982 	if (host->flags & SDHCI_REQ_USE_DMA)
983 		host->ier = (host->ier & ~pio_irqs) | dma_irqs;
984 	else
985 		host->ier = (host->ier & ~dma_irqs) | pio_irqs;
986 
987 	if (host->flags & (SDHCI_AUTO_CMD23 | SDHCI_AUTO_CMD12))
988 		host->ier |= SDHCI_INT_AUTO_CMD_ERR;
989 	else
990 		host->ier &= ~SDHCI_INT_AUTO_CMD_ERR;
991 
992 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
993 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
994 }
995 
996 void sdhci_set_data_timeout_irq(struct sdhci_host *host, bool enable)
997 {
998 	if (enable)
999 		host->ier |= SDHCI_INT_DATA_TIMEOUT;
1000 	else
1001 		host->ier &= ~SDHCI_INT_DATA_TIMEOUT;
1002 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
1003 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
1004 }
1005 EXPORT_SYMBOL_GPL(sdhci_set_data_timeout_irq);
1006 
1007 void __sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1008 {
1009 	bool too_big = false;
1010 	u8 count = sdhci_calc_timeout(host, cmd, &too_big);
1011 
1012 	if (too_big &&
1013 	    host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT) {
1014 		sdhci_calc_sw_timeout(host, cmd);
1015 		sdhci_set_data_timeout_irq(host, false);
1016 	} else if (!(host->ier & SDHCI_INT_DATA_TIMEOUT)) {
1017 		sdhci_set_data_timeout_irq(host, true);
1018 	}
1019 
1020 	sdhci_writeb(host, count, SDHCI_TIMEOUT_CONTROL);
1021 }
1022 EXPORT_SYMBOL_GPL(__sdhci_set_timeout);
1023 
1024 static void sdhci_set_timeout(struct sdhci_host *host, struct mmc_command *cmd)
1025 {
1026 	if (host->ops->set_timeout)
1027 		host->ops->set_timeout(host, cmd);
1028 	else
1029 		__sdhci_set_timeout(host, cmd);
1030 }
1031 
1032 static void sdhci_initialize_data(struct sdhci_host *host,
1033 				  struct mmc_data *data)
1034 {
1035 	WARN_ON(host->data);
1036 
1037 	/* Sanity checks */
1038 	BUG_ON(data->blksz * data->blocks > 524288);
1039 	BUG_ON(data->blksz > host->mmc->max_blk_size);
1040 	BUG_ON(data->blocks > 65535);
1041 
1042 	host->data = data;
1043 	host->data_early = 0;
1044 	host->data->bytes_xfered = 0;
1045 }
1046 
1047 static inline void sdhci_set_block_info(struct sdhci_host *host,
1048 					struct mmc_data *data)
1049 {
1050 	/* Set the DMA boundary value and block size */
1051 	sdhci_writew(host,
1052 		     SDHCI_MAKE_BLKSZ(host->sdma_boundary, data->blksz),
1053 		     SDHCI_BLOCK_SIZE);
1054 	/*
1055 	 * For Version 4.10 onwards, if v4 mode is enabled, 32-bit Block Count
1056 	 * can be supported, in that case 16-bit block count register must be 0.
1057 	 */
1058 	if (host->version >= SDHCI_SPEC_410 && host->v4_mode &&
1059 	    (host->quirks2 & SDHCI_QUIRK2_USE_32BIT_BLK_CNT)) {
1060 		if (sdhci_readw(host, SDHCI_BLOCK_COUNT))
1061 			sdhci_writew(host, 0, SDHCI_BLOCK_COUNT);
1062 		sdhci_writew(host, data->blocks, SDHCI_32BIT_BLK_CNT);
1063 	} else {
1064 		sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
1065 	}
1066 }
1067 
1068 static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
1069 {
1070 	struct mmc_data *data = cmd->data;
1071 
1072 	sdhci_initialize_data(host, data);
1073 
1074 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
1075 		struct scatterlist *sg;
1076 		unsigned int length_mask, offset_mask;
1077 		int i;
1078 
1079 		host->flags |= SDHCI_REQ_USE_DMA;
1080 
1081 		/*
1082 		 * FIXME: This doesn't account for merging when mapping the
1083 		 * scatterlist.
1084 		 *
1085 		 * The assumption here being that alignment and lengths are
1086 		 * the same after DMA mapping to device address space.
1087 		 */
1088 		length_mask = 0;
1089 		offset_mask = 0;
1090 		if (host->flags & SDHCI_USE_ADMA) {
1091 			if (host->quirks & SDHCI_QUIRK_32BIT_ADMA_SIZE) {
1092 				length_mask = 3;
1093 				/*
1094 				 * As we use up to 3 byte chunks to work
1095 				 * around alignment problems, we need to
1096 				 * check the offset as well.
1097 				 */
1098 				offset_mask = 3;
1099 			}
1100 		} else {
1101 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_SIZE)
1102 				length_mask = 3;
1103 			if (host->quirks & SDHCI_QUIRK_32BIT_DMA_ADDR)
1104 				offset_mask = 3;
1105 		}
1106 
1107 		if (unlikely(length_mask | offset_mask)) {
1108 			for_each_sg(data->sg, sg, data->sg_len, i) {
1109 				if (sg->length & length_mask) {
1110 					DBG("Reverting to PIO because of transfer size (%d)\n",
1111 					    sg->length);
1112 					host->flags &= ~SDHCI_REQ_USE_DMA;
1113 					break;
1114 				}
1115 				if (sg->offset & offset_mask) {
1116 					DBG("Reverting to PIO because of bad alignment\n");
1117 					host->flags &= ~SDHCI_REQ_USE_DMA;
1118 					break;
1119 				}
1120 			}
1121 		}
1122 	}
1123 
1124 	if (host->flags & SDHCI_REQ_USE_DMA) {
1125 		int sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1126 
1127 		if (sg_cnt <= 0) {
1128 			/*
1129 			 * This only happens when someone fed
1130 			 * us an invalid request.
1131 			 */
1132 			WARN_ON(1);
1133 			host->flags &= ~SDHCI_REQ_USE_DMA;
1134 		} else if (host->flags & SDHCI_USE_ADMA) {
1135 			sdhci_adma_table_pre(host, data, sg_cnt);
1136 			sdhci_set_adma_addr(host, host->adma_addr);
1137 		} else {
1138 			WARN_ON(sg_cnt != 1);
1139 			sdhci_set_sdma_addr(host, sdhci_sdma_address(host));
1140 		}
1141 	}
1142 
1143 	sdhci_config_dma(host);
1144 
1145 	if (!(host->flags & SDHCI_REQ_USE_DMA)) {
1146 		int flags;
1147 
1148 		flags = SG_MITER_ATOMIC;
1149 		if (host->data->flags & MMC_DATA_READ)
1150 			flags |= SG_MITER_TO_SG;
1151 		else
1152 			flags |= SG_MITER_FROM_SG;
1153 		sg_miter_start(&host->sg_miter, data->sg, data->sg_len, flags);
1154 		host->blocks = data->blocks;
1155 	}
1156 
1157 	sdhci_set_transfer_irqs(host);
1158 
1159 	sdhci_set_block_info(host, data);
1160 }
1161 
1162 #if IS_ENABLED(CONFIG_MMC_SDHCI_EXTERNAL_DMA)
1163 
1164 static int sdhci_external_dma_init(struct sdhci_host *host)
1165 {
1166 	int ret = 0;
1167 	struct mmc_host *mmc = host->mmc;
1168 
1169 	host->tx_chan = dma_request_chan(mmc->parent, "tx");
1170 	if (IS_ERR(host->tx_chan)) {
1171 		ret = PTR_ERR(host->tx_chan);
1172 		if (ret != -EPROBE_DEFER)
1173 			pr_warn("Failed to request TX DMA channel.\n");
1174 		host->tx_chan = NULL;
1175 		return ret;
1176 	}
1177 
1178 	host->rx_chan = dma_request_chan(mmc->parent, "rx");
1179 	if (IS_ERR(host->rx_chan)) {
1180 		if (host->tx_chan) {
1181 			dma_release_channel(host->tx_chan);
1182 			host->tx_chan = NULL;
1183 		}
1184 
1185 		ret = PTR_ERR(host->rx_chan);
1186 		if (ret != -EPROBE_DEFER)
1187 			pr_warn("Failed to request RX DMA channel.\n");
1188 		host->rx_chan = NULL;
1189 	}
1190 
1191 	return ret;
1192 }
1193 
1194 static struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1195 						   struct mmc_data *data)
1196 {
1197 	return data->flags & MMC_DATA_WRITE ? host->tx_chan : host->rx_chan;
1198 }
1199 
1200 static int sdhci_external_dma_setup(struct sdhci_host *host,
1201 				    struct mmc_command *cmd)
1202 {
1203 	int ret, i;
1204 	enum dma_transfer_direction dir;
1205 	struct dma_async_tx_descriptor *desc;
1206 	struct mmc_data *data = cmd->data;
1207 	struct dma_chan *chan;
1208 	struct dma_slave_config cfg;
1209 	dma_cookie_t cookie;
1210 	int sg_cnt;
1211 
1212 	if (!host->mapbase)
1213 		return -EINVAL;
1214 
1215 	cfg.src_addr = host->mapbase + SDHCI_BUFFER;
1216 	cfg.dst_addr = host->mapbase + SDHCI_BUFFER;
1217 	cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1218 	cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
1219 	cfg.src_maxburst = data->blksz / 4;
1220 	cfg.dst_maxburst = data->blksz / 4;
1221 
1222 	/* Sanity check: all the SG entries must be aligned by block size. */
1223 	for (i = 0; i < data->sg_len; i++) {
1224 		if ((data->sg + i)->length % data->blksz)
1225 			return -EINVAL;
1226 	}
1227 
1228 	chan = sdhci_external_dma_channel(host, data);
1229 
1230 	ret = dmaengine_slave_config(chan, &cfg);
1231 	if (ret)
1232 		return ret;
1233 
1234 	sg_cnt = sdhci_pre_dma_transfer(host, data, COOKIE_MAPPED);
1235 	if (sg_cnt <= 0)
1236 		return -EINVAL;
1237 
1238 	dir = data->flags & MMC_DATA_WRITE ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
1239 	desc = dmaengine_prep_slave_sg(chan, data->sg, data->sg_len, dir,
1240 				       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
1241 	if (!desc)
1242 		return -EINVAL;
1243 
1244 	desc->callback = NULL;
1245 	desc->callback_param = NULL;
1246 
1247 	cookie = dmaengine_submit(desc);
1248 	if (dma_submit_error(cookie))
1249 		ret = cookie;
1250 
1251 	return ret;
1252 }
1253 
1254 static void sdhci_external_dma_release(struct sdhci_host *host)
1255 {
1256 	if (host->tx_chan) {
1257 		dma_release_channel(host->tx_chan);
1258 		host->tx_chan = NULL;
1259 	}
1260 
1261 	if (host->rx_chan) {
1262 		dma_release_channel(host->rx_chan);
1263 		host->rx_chan = NULL;
1264 	}
1265 
1266 	sdhci_switch_external_dma(host, false);
1267 }
1268 
1269 static void __sdhci_external_dma_prepare_data(struct sdhci_host *host,
1270 					      struct mmc_command *cmd)
1271 {
1272 	struct mmc_data *data = cmd->data;
1273 
1274 	sdhci_initialize_data(host, data);
1275 
1276 	host->flags |= SDHCI_REQ_USE_DMA;
1277 	sdhci_set_transfer_irqs(host);
1278 
1279 	sdhci_set_block_info(host, data);
1280 }
1281 
1282 static void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1283 					    struct mmc_command *cmd)
1284 {
1285 	if (!sdhci_external_dma_setup(host, cmd)) {
1286 		__sdhci_external_dma_prepare_data(host, cmd);
1287 	} else {
1288 		sdhci_external_dma_release(host);
1289 		pr_err("%s: Cannot use external DMA, switch to the DMA/PIO which standard SDHCI provides.\n",
1290 		       mmc_hostname(host->mmc));
1291 		sdhci_prepare_data(host, cmd);
1292 	}
1293 }
1294 
1295 static void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1296 					    struct mmc_command *cmd)
1297 {
1298 	struct dma_chan *chan;
1299 
1300 	if (!cmd->data)
1301 		return;
1302 
1303 	chan = sdhci_external_dma_channel(host, cmd->data);
1304 	if (chan)
1305 		dma_async_issue_pending(chan);
1306 }
1307 
1308 #else
1309 
1310 static inline int sdhci_external_dma_init(struct sdhci_host *host)
1311 {
1312 	return -EOPNOTSUPP;
1313 }
1314 
1315 static inline void sdhci_external_dma_release(struct sdhci_host *host)
1316 {
1317 }
1318 
1319 static inline void sdhci_external_dma_prepare_data(struct sdhci_host *host,
1320 						   struct mmc_command *cmd)
1321 {
1322 	/* This should never happen */
1323 	WARN_ON_ONCE(1);
1324 }
1325 
1326 static inline void sdhci_external_dma_pre_transfer(struct sdhci_host *host,
1327 						   struct mmc_command *cmd)
1328 {
1329 }
1330 
1331 static inline struct dma_chan *sdhci_external_dma_channel(struct sdhci_host *host,
1332 							  struct mmc_data *data)
1333 {
1334 	return NULL;
1335 }
1336 
1337 #endif
1338 
1339 void sdhci_switch_external_dma(struct sdhci_host *host, bool en)
1340 {
1341 	host->use_external_dma = en;
1342 }
1343 EXPORT_SYMBOL_GPL(sdhci_switch_external_dma);
1344 
1345 static inline bool sdhci_auto_cmd12(struct sdhci_host *host,
1346 				    struct mmc_request *mrq)
1347 {
1348 	return !mrq->sbc && (host->flags & SDHCI_AUTO_CMD12) &&
1349 	       !mrq->cap_cmd_during_tfr;
1350 }
1351 
1352 static inline void sdhci_auto_cmd_select(struct sdhci_host *host,
1353 					 struct mmc_command *cmd,
1354 					 u16 *mode)
1355 {
1356 	bool use_cmd12 = sdhci_auto_cmd12(host, cmd->mrq) &&
1357 			 (cmd->opcode != SD_IO_RW_EXTENDED);
1358 	bool use_cmd23 = cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23);
1359 	u16 ctrl2;
1360 
1361 	/*
1362 	 * In case of Version 4.10 or later, use of 'Auto CMD Auto
1363 	 * Select' is recommended rather than use of 'Auto CMD12
1364 	 * Enable' or 'Auto CMD23 Enable'.
1365 	 */
1366 	if (host->version >= SDHCI_SPEC_410 && (use_cmd12 || use_cmd23)) {
1367 		*mode |= SDHCI_TRNS_AUTO_SEL;
1368 
1369 		ctrl2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
1370 		if (use_cmd23)
1371 			ctrl2 |= SDHCI_CMD23_ENABLE;
1372 		else
1373 			ctrl2 &= ~SDHCI_CMD23_ENABLE;
1374 		sdhci_writew(host, ctrl2, SDHCI_HOST_CONTROL2);
1375 
1376 		return;
1377 	}
1378 
1379 	/*
1380 	 * If we are sending CMD23, CMD12 never gets sent
1381 	 * on successful completion (so no Auto-CMD12).
1382 	 */
1383 	if (use_cmd12)
1384 		*mode |= SDHCI_TRNS_AUTO_CMD12;
1385 	else if (use_cmd23)
1386 		*mode |= SDHCI_TRNS_AUTO_CMD23;
1387 }
1388 
1389 static void sdhci_set_transfer_mode(struct sdhci_host *host,
1390 	struct mmc_command *cmd)
1391 {
1392 	u16 mode = 0;
1393 	struct mmc_data *data = cmd->data;
1394 
1395 	if (data == NULL) {
1396 		if (host->quirks2 &
1397 			SDHCI_QUIRK2_CLEAR_TRANSFERMODE_REG_BEFORE_CMD) {
1398 			/* must not clear SDHCI_TRANSFER_MODE when tuning */
1399 			if (cmd->opcode != MMC_SEND_TUNING_BLOCK_HS200)
1400 				sdhci_writew(host, 0x0, SDHCI_TRANSFER_MODE);
1401 		} else {
1402 		/* clear Auto CMD settings for no data CMDs */
1403 			mode = sdhci_readw(host, SDHCI_TRANSFER_MODE);
1404 			sdhci_writew(host, mode & ~(SDHCI_TRNS_AUTO_CMD12 |
1405 				SDHCI_TRNS_AUTO_CMD23), SDHCI_TRANSFER_MODE);
1406 		}
1407 		return;
1408 	}
1409 
1410 	WARN_ON(!host->data);
1411 
1412 	if (!(host->quirks2 & SDHCI_QUIRK2_SUPPORT_SINGLE))
1413 		mode = SDHCI_TRNS_BLK_CNT_EN;
1414 
1415 	if (mmc_op_multi(cmd->opcode) || data->blocks > 1) {
1416 		mode = SDHCI_TRNS_BLK_CNT_EN | SDHCI_TRNS_MULTI;
1417 		sdhci_auto_cmd_select(host, cmd, &mode);
1418 		if (cmd->mrq->sbc && (host->flags & SDHCI_AUTO_CMD23))
1419 			sdhci_writel(host, cmd->mrq->sbc->arg, SDHCI_ARGUMENT2);
1420 	}
1421 
1422 	if (data->flags & MMC_DATA_READ)
1423 		mode |= SDHCI_TRNS_READ;
1424 	if (host->flags & SDHCI_REQ_USE_DMA)
1425 		mode |= SDHCI_TRNS_DMA;
1426 
1427 	sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
1428 }
1429 
1430 static bool sdhci_needs_reset(struct sdhci_host *host, struct mmc_request *mrq)
1431 {
1432 	return (!(host->flags & SDHCI_DEVICE_DEAD) &&
1433 		((mrq->cmd && mrq->cmd->error) ||
1434 		 (mrq->sbc && mrq->sbc->error) ||
1435 		 (mrq->data && mrq->data->stop && mrq->data->stop->error) ||
1436 		 (host->quirks & SDHCI_QUIRK_RESET_AFTER_REQUEST)));
1437 }
1438 
1439 static void sdhci_set_mrq_done(struct sdhci_host *host, struct mmc_request *mrq)
1440 {
1441 	int i;
1442 
1443 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1444 		if (host->mrqs_done[i] == mrq) {
1445 			WARN_ON(1);
1446 			return;
1447 		}
1448 	}
1449 
1450 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
1451 		if (!host->mrqs_done[i]) {
1452 			host->mrqs_done[i] = mrq;
1453 			break;
1454 		}
1455 	}
1456 
1457 	WARN_ON(i >= SDHCI_MAX_MRQS);
1458 }
1459 
1460 static void __sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1461 {
1462 	if (host->cmd && host->cmd->mrq == mrq)
1463 		host->cmd = NULL;
1464 
1465 	if (host->data_cmd && host->data_cmd->mrq == mrq)
1466 		host->data_cmd = NULL;
1467 
1468 	if (host->data && host->data->mrq == mrq)
1469 		host->data = NULL;
1470 
1471 	if (sdhci_needs_reset(host, mrq))
1472 		host->pending_reset = true;
1473 
1474 	sdhci_set_mrq_done(host, mrq);
1475 
1476 	sdhci_del_timer(host, mrq);
1477 
1478 	if (!sdhci_has_requests(host))
1479 		sdhci_led_deactivate(host);
1480 }
1481 
1482 static void sdhci_finish_mrq(struct sdhci_host *host, struct mmc_request *mrq)
1483 {
1484 	__sdhci_finish_mrq(host, mrq);
1485 
1486 	queue_work(host->complete_wq, &host->complete_work);
1487 }
1488 
1489 static void sdhci_finish_data(struct sdhci_host *host)
1490 {
1491 	struct mmc_command *data_cmd = host->data_cmd;
1492 	struct mmc_data *data = host->data;
1493 
1494 	host->data = NULL;
1495 	host->data_cmd = NULL;
1496 
1497 	/*
1498 	 * The controller needs a reset of internal state machines upon error
1499 	 * conditions.
1500 	 */
1501 	if (data->error) {
1502 		if (!host->cmd || host->cmd == data_cmd)
1503 			sdhci_do_reset(host, SDHCI_RESET_CMD);
1504 		sdhci_do_reset(host, SDHCI_RESET_DATA);
1505 	}
1506 
1507 	if ((host->flags & (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA)) ==
1508 	    (SDHCI_REQ_USE_DMA | SDHCI_USE_ADMA))
1509 		sdhci_adma_table_post(host, data);
1510 
1511 	/*
1512 	 * The specification states that the block count register must
1513 	 * be updated, but it does not specify at what point in the
1514 	 * data flow. That makes the register entirely useless to read
1515 	 * back so we have to assume that nothing made it to the card
1516 	 * in the event of an error.
1517 	 */
1518 	if (data->error)
1519 		data->bytes_xfered = 0;
1520 	else
1521 		data->bytes_xfered = data->blksz * data->blocks;
1522 
1523 	/*
1524 	 * Need to send CMD12 if -
1525 	 * a) open-ended multiblock transfer not using auto CMD12 (no CMD23)
1526 	 * b) error in multiblock transfer
1527 	 */
1528 	if (data->stop &&
1529 	    ((!data->mrq->sbc && !sdhci_auto_cmd12(host, data->mrq)) ||
1530 	     data->error)) {
1531 		/*
1532 		 * 'cap_cmd_during_tfr' request must not use the command line
1533 		 * after mmc_command_done() has been called. It is upper layer's
1534 		 * responsibility to send the stop command if required.
1535 		 */
1536 		if (data->mrq->cap_cmd_during_tfr) {
1537 			__sdhci_finish_mrq(host, data->mrq);
1538 		} else {
1539 			/* Avoid triggering warning in sdhci_send_command() */
1540 			host->cmd = NULL;
1541 			sdhci_send_command(host, data->stop);
1542 		}
1543 	} else {
1544 		__sdhci_finish_mrq(host, data->mrq);
1545 	}
1546 }
1547 
1548 void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
1549 {
1550 	int flags;
1551 	u32 mask;
1552 	unsigned long timeout;
1553 
1554 	WARN_ON(host->cmd);
1555 
1556 	/* Initially, a command has no error */
1557 	cmd->error = 0;
1558 
1559 	if ((host->quirks2 & SDHCI_QUIRK2_STOP_WITH_TC) &&
1560 	    cmd->opcode == MMC_STOP_TRANSMISSION)
1561 		cmd->flags |= MMC_RSP_BUSY;
1562 
1563 	/* Wait max 10 ms */
1564 	timeout = 10;
1565 
1566 	mask = SDHCI_CMD_INHIBIT;
1567 	if (sdhci_data_line_cmd(cmd))
1568 		mask |= SDHCI_DATA_INHIBIT;
1569 
1570 	/* We shouldn't wait for data inihibit for stop commands, even
1571 	   though they might use busy signaling */
1572 	if (cmd->mrq->data && (cmd == cmd->mrq->data->stop))
1573 		mask &= ~SDHCI_DATA_INHIBIT;
1574 
1575 	while (sdhci_readl(host, SDHCI_PRESENT_STATE) & mask) {
1576 		if (timeout == 0) {
1577 			pr_err("%s: Controller never released inhibit bit(s).\n",
1578 			       mmc_hostname(host->mmc));
1579 			sdhci_dumpregs(host);
1580 			cmd->error = -EIO;
1581 			sdhci_finish_mrq(host, cmd->mrq);
1582 			return;
1583 		}
1584 		timeout--;
1585 		mdelay(1);
1586 	}
1587 
1588 	host->cmd = cmd;
1589 	host->data_timeout = 0;
1590 	if (sdhci_data_line_cmd(cmd)) {
1591 		WARN_ON(host->data_cmd);
1592 		host->data_cmd = cmd;
1593 		sdhci_set_timeout(host, cmd);
1594 	}
1595 
1596 	if (cmd->data) {
1597 		if (host->use_external_dma)
1598 			sdhci_external_dma_prepare_data(host, cmd);
1599 		else
1600 			sdhci_prepare_data(host, cmd);
1601 	}
1602 
1603 	sdhci_writel(host, cmd->arg, SDHCI_ARGUMENT);
1604 
1605 	sdhci_set_transfer_mode(host, cmd);
1606 
1607 	if ((cmd->flags & MMC_RSP_136) && (cmd->flags & MMC_RSP_BUSY)) {
1608 		pr_err("%s: Unsupported response type!\n",
1609 			mmc_hostname(host->mmc));
1610 		cmd->error = -EINVAL;
1611 		sdhci_finish_mrq(host, cmd->mrq);
1612 		return;
1613 	}
1614 
1615 	if (!(cmd->flags & MMC_RSP_PRESENT))
1616 		flags = SDHCI_CMD_RESP_NONE;
1617 	else if (cmd->flags & MMC_RSP_136)
1618 		flags = SDHCI_CMD_RESP_LONG;
1619 	else if (cmd->flags & MMC_RSP_BUSY)
1620 		flags = SDHCI_CMD_RESP_SHORT_BUSY;
1621 	else
1622 		flags = SDHCI_CMD_RESP_SHORT;
1623 
1624 	if (cmd->flags & MMC_RSP_CRC)
1625 		flags |= SDHCI_CMD_CRC;
1626 	if (cmd->flags & MMC_RSP_OPCODE)
1627 		flags |= SDHCI_CMD_INDEX;
1628 
1629 	/* CMD19 is special in that the Data Present Select should be set */
1630 	if (cmd->data || cmd->opcode == MMC_SEND_TUNING_BLOCK ||
1631 	    cmd->opcode == MMC_SEND_TUNING_BLOCK_HS200)
1632 		flags |= SDHCI_CMD_DATA;
1633 
1634 	timeout = jiffies;
1635 	if (host->data_timeout)
1636 		timeout += nsecs_to_jiffies(host->data_timeout);
1637 	else if (!cmd->data && cmd->busy_timeout > 9000)
1638 		timeout += DIV_ROUND_UP(cmd->busy_timeout, 1000) * HZ + HZ;
1639 	else
1640 		timeout += 10 * HZ;
1641 	sdhci_mod_timer(host, cmd->mrq, timeout);
1642 
1643 	if (host->use_external_dma)
1644 		sdhci_external_dma_pre_transfer(host, cmd);
1645 
1646 	sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
1647 }
1648 EXPORT_SYMBOL_GPL(sdhci_send_command);
1649 
1650 static void sdhci_read_rsp_136(struct sdhci_host *host, struct mmc_command *cmd)
1651 {
1652 	int i, reg;
1653 
1654 	for (i = 0; i < 4; i++) {
1655 		reg = SDHCI_RESPONSE + (3 - i) * 4;
1656 		cmd->resp[i] = sdhci_readl(host, reg);
1657 	}
1658 
1659 	if (host->quirks2 & SDHCI_QUIRK2_RSP_136_HAS_CRC)
1660 		return;
1661 
1662 	/* CRC is stripped so we need to do some shifting */
1663 	for (i = 0; i < 4; i++) {
1664 		cmd->resp[i] <<= 8;
1665 		if (i != 3)
1666 			cmd->resp[i] |= cmd->resp[i + 1] >> 24;
1667 	}
1668 }
1669 
1670 static void sdhci_finish_command(struct sdhci_host *host)
1671 {
1672 	struct mmc_command *cmd = host->cmd;
1673 
1674 	host->cmd = NULL;
1675 
1676 	if (cmd->flags & MMC_RSP_PRESENT) {
1677 		if (cmd->flags & MMC_RSP_136) {
1678 			sdhci_read_rsp_136(host, cmd);
1679 		} else {
1680 			cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
1681 		}
1682 	}
1683 
1684 	if (cmd->mrq->cap_cmd_during_tfr && cmd == cmd->mrq->cmd)
1685 		mmc_command_done(host->mmc, cmd->mrq);
1686 
1687 	/*
1688 	 * The host can send and interrupt when the busy state has
1689 	 * ended, allowing us to wait without wasting CPU cycles.
1690 	 * The busy signal uses DAT0 so this is similar to waiting
1691 	 * for data to complete.
1692 	 *
1693 	 * Note: The 1.0 specification is a bit ambiguous about this
1694 	 *       feature so there might be some problems with older
1695 	 *       controllers.
1696 	 */
1697 	if (cmd->flags & MMC_RSP_BUSY) {
1698 		if (cmd->data) {
1699 			DBG("Cannot wait for busy signal when also doing a data transfer");
1700 		} else if (!(host->quirks & SDHCI_QUIRK_NO_BUSY_IRQ) &&
1701 			   cmd == host->data_cmd) {
1702 			/* Command complete before busy is ended */
1703 			return;
1704 		}
1705 	}
1706 
1707 	/* Finished CMD23, now send actual command. */
1708 	if (cmd == cmd->mrq->sbc) {
1709 		sdhci_send_command(host, cmd->mrq->cmd);
1710 	} else {
1711 
1712 		/* Processed actual command. */
1713 		if (host->data && host->data_early)
1714 			sdhci_finish_data(host);
1715 
1716 		if (!cmd->data)
1717 			__sdhci_finish_mrq(host, cmd->mrq);
1718 	}
1719 }
1720 
1721 static u16 sdhci_get_preset_value(struct sdhci_host *host)
1722 {
1723 	u16 preset = 0;
1724 
1725 	switch (host->timing) {
1726 	case MMC_TIMING_UHS_SDR12:
1727 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1728 		break;
1729 	case MMC_TIMING_UHS_SDR25:
1730 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25);
1731 		break;
1732 	case MMC_TIMING_UHS_SDR50:
1733 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50);
1734 		break;
1735 	case MMC_TIMING_UHS_SDR104:
1736 	case MMC_TIMING_MMC_HS200:
1737 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104);
1738 		break;
1739 	case MMC_TIMING_UHS_DDR50:
1740 	case MMC_TIMING_MMC_DDR52:
1741 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50);
1742 		break;
1743 	case MMC_TIMING_MMC_HS400:
1744 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_HS400);
1745 		break;
1746 	default:
1747 		pr_warn("%s: Invalid UHS-I mode selected\n",
1748 			mmc_hostname(host->mmc));
1749 		preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12);
1750 		break;
1751 	}
1752 	return preset;
1753 }
1754 
1755 u16 sdhci_calc_clk(struct sdhci_host *host, unsigned int clock,
1756 		   unsigned int *actual_clock)
1757 {
1758 	int div = 0; /* Initialized for compiler warning */
1759 	int real_div = div, clk_mul = 1;
1760 	u16 clk = 0;
1761 	bool switch_base_clk = false;
1762 
1763 	if (host->version >= SDHCI_SPEC_300) {
1764 		if (host->preset_enabled) {
1765 			u16 pre_val;
1766 
1767 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1768 			pre_val = sdhci_get_preset_value(host);
1769 			div = (pre_val & SDHCI_PRESET_SDCLK_FREQ_MASK)
1770 				>> SDHCI_PRESET_SDCLK_FREQ_SHIFT;
1771 			if (host->clk_mul &&
1772 				(pre_val & SDHCI_PRESET_CLKGEN_SEL_MASK)) {
1773 				clk = SDHCI_PROG_CLOCK_MODE;
1774 				real_div = div + 1;
1775 				clk_mul = host->clk_mul;
1776 			} else {
1777 				real_div = max_t(int, 1, div << 1);
1778 			}
1779 			goto clock_set;
1780 		}
1781 
1782 		/*
1783 		 * Check if the Host Controller supports Programmable Clock
1784 		 * Mode.
1785 		 */
1786 		if (host->clk_mul) {
1787 			for (div = 1; div <= 1024; div++) {
1788 				if ((host->max_clk * host->clk_mul / div)
1789 					<= clock)
1790 					break;
1791 			}
1792 			if ((host->max_clk * host->clk_mul / div) <= clock) {
1793 				/*
1794 				 * Set Programmable Clock Mode in the Clock
1795 				 * Control register.
1796 				 */
1797 				clk = SDHCI_PROG_CLOCK_MODE;
1798 				real_div = div;
1799 				clk_mul = host->clk_mul;
1800 				div--;
1801 			} else {
1802 				/*
1803 				 * Divisor can be too small to reach clock
1804 				 * speed requirement. Then use the base clock.
1805 				 */
1806 				switch_base_clk = true;
1807 			}
1808 		}
1809 
1810 		if (!host->clk_mul || switch_base_clk) {
1811 			/* Version 3.00 divisors must be a multiple of 2. */
1812 			if (host->max_clk <= clock)
1813 				div = 1;
1814 			else {
1815 				for (div = 2; div < SDHCI_MAX_DIV_SPEC_300;
1816 				     div += 2) {
1817 					if ((host->max_clk / div) <= clock)
1818 						break;
1819 				}
1820 			}
1821 			real_div = div;
1822 			div >>= 1;
1823 			if ((host->quirks2 & SDHCI_QUIRK2_CLOCK_DIV_ZERO_BROKEN)
1824 				&& !div && host->max_clk <= 25000000)
1825 				div = 1;
1826 		}
1827 	} else {
1828 		/* Version 2.00 divisors must be a power of 2. */
1829 		for (div = 1; div < SDHCI_MAX_DIV_SPEC_200; div *= 2) {
1830 			if ((host->max_clk / div) <= clock)
1831 				break;
1832 		}
1833 		real_div = div;
1834 		div >>= 1;
1835 	}
1836 
1837 clock_set:
1838 	if (real_div)
1839 		*actual_clock = (host->max_clk * clk_mul) / real_div;
1840 	clk |= (div & SDHCI_DIV_MASK) << SDHCI_DIVIDER_SHIFT;
1841 	clk |= ((div & SDHCI_DIV_HI_MASK) >> SDHCI_DIV_MASK_LEN)
1842 		<< SDHCI_DIVIDER_HI_SHIFT;
1843 
1844 	return clk;
1845 }
1846 EXPORT_SYMBOL_GPL(sdhci_calc_clk);
1847 
1848 void sdhci_enable_clk(struct sdhci_host *host, u16 clk)
1849 {
1850 	ktime_t timeout;
1851 
1852 	clk |= SDHCI_CLOCK_INT_EN;
1853 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1854 
1855 	/* Wait max 150 ms */
1856 	timeout = ktime_add_ms(ktime_get(), 150);
1857 	while (1) {
1858 		bool timedout = ktime_after(ktime_get(), timeout);
1859 
1860 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1861 		if (clk & SDHCI_CLOCK_INT_STABLE)
1862 			break;
1863 		if (timedout) {
1864 			pr_err("%s: Internal clock never stabilised.\n",
1865 			       mmc_hostname(host->mmc));
1866 			sdhci_dumpregs(host);
1867 			return;
1868 		}
1869 		udelay(10);
1870 	}
1871 
1872 	if (host->version >= SDHCI_SPEC_410 && host->v4_mode) {
1873 		clk |= SDHCI_CLOCK_PLL_EN;
1874 		clk &= ~SDHCI_CLOCK_INT_STABLE;
1875 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1876 
1877 		/* Wait max 150 ms */
1878 		timeout = ktime_add_ms(ktime_get(), 150);
1879 		while (1) {
1880 			bool timedout = ktime_after(ktime_get(), timeout);
1881 
1882 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
1883 			if (clk & SDHCI_CLOCK_INT_STABLE)
1884 				break;
1885 			if (timedout) {
1886 				pr_err("%s: PLL clock never stabilised.\n",
1887 				       mmc_hostname(host->mmc));
1888 				sdhci_dumpregs(host);
1889 				return;
1890 			}
1891 			udelay(10);
1892 		}
1893 	}
1894 
1895 	clk |= SDHCI_CLOCK_CARD_EN;
1896 	sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
1897 }
1898 EXPORT_SYMBOL_GPL(sdhci_enable_clk);
1899 
1900 void sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
1901 {
1902 	u16 clk;
1903 
1904 	host->mmc->actual_clock = 0;
1905 
1906 	sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL);
1907 
1908 	if (clock == 0)
1909 		return;
1910 
1911 	clk = sdhci_calc_clk(host, clock, &host->mmc->actual_clock);
1912 	sdhci_enable_clk(host, clk);
1913 }
1914 EXPORT_SYMBOL_GPL(sdhci_set_clock);
1915 
1916 static void sdhci_set_power_reg(struct sdhci_host *host, unsigned char mode,
1917 				unsigned short vdd)
1918 {
1919 	struct mmc_host *mmc = host->mmc;
1920 
1921 	mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd);
1922 
1923 	if (mode != MMC_POWER_OFF)
1924 		sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL);
1925 	else
1926 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1927 }
1928 
1929 void sdhci_set_power_noreg(struct sdhci_host *host, unsigned char mode,
1930 			   unsigned short vdd)
1931 {
1932 	u8 pwr = 0;
1933 
1934 	if (mode != MMC_POWER_OFF) {
1935 		switch (1 << vdd) {
1936 		case MMC_VDD_165_195:
1937 		/*
1938 		 * Without a regulator, SDHCI does not support 2.0v
1939 		 * so we only get here if the driver deliberately
1940 		 * added the 2.0v range to ocr_avail. Map it to 1.8v
1941 		 * for the purpose of turning on the power.
1942 		 */
1943 		case MMC_VDD_20_21:
1944 			pwr = SDHCI_POWER_180;
1945 			break;
1946 		case MMC_VDD_29_30:
1947 		case MMC_VDD_30_31:
1948 			pwr = SDHCI_POWER_300;
1949 			break;
1950 		case MMC_VDD_32_33:
1951 		case MMC_VDD_33_34:
1952 			pwr = SDHCI_POWER_330;
1953 			break;
1954 		default:
1955 			WARN(1, "%s: Invalid vdd %#x\n",
1956 			     mmc_hostname(host->mmc), vdd);
1957 			break;
1958 		}
1959 	}
1960 
1961 	if (host->pwr == pwr)
1962 		return;
1963 
1964 	host->pwr = pwr;
1965 
1966 	if (pwr == 0) {
1967 		sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1968 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1969 			sdhci_runtime_pm_bus_off(host);
1970 	} else {
1971 		/*
1972 		 * Spec says that we should clear the power reg before setting
1973 		 * a new value. Some controllers don't seem to like this though.
1974 		 */
1975 		if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE))
1976 			sdhci_writeb(host, 0, SDHCI_POWER_CONTROL);
1977 
1978 		/*
1979 		 * At least the Marvell CaFe chip gets confused if we set the
1980 		 * voltage and set turn on power at the same time, so set the
1981 		 * voltage first.
1982 		 */
1983 		if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER)
1984 			sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1985 
1986 		pwr |= SDHCI_POWER_ON;
1987 
1988 		sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL);
1989 
1990 		if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON)
1991 			sdhci_runtime_pm_bus_on(host);
1992 
1993 		/*
1994 		 * Some controllers need an extra 10ms delay of 10ms before
1995 		 * they can apply clock after applying power
1996 		 */
1997 		if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER)
1998 			mdelay(10);
1999 	}
2000 }
2001 EXPORT_SYMBOL_GPL(sdhci_set_power_noreg);
2002 
2003 void sdhci_set_power(struct sdhci_host *host, unsigned char mode,
2004 		     unsigned short vdd)
2005 {
2006 	if (IS_ERR(host->mmc->supply.vmmc))
2007 		sdhci_set_power_noreg(host, mode, vdd);
2008 	else
2009 		sdhci_set_power_reg(host, mode, vdd);
2010 }
2011 EXPORT_SYMBOL_GPL(sdhci_set_power);
2012 
2013 /*****************************************************************************\
2014  *                                                                           *
2015  * MMC callbacks                                                             *
2016  *                                                                           *
2017 \*****************************************************************************/
2018 
2019 void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
2020 {
2021 	struct sdhci_host *host;
2022 	int present;
2023 	unsigned long flags;
2024 
2025 	host = mmc_priv(mmc);
2026 
2027 	/* Firstly check card presence */
2028 	present = mmc->ops->get_cd(mmc);
2029 
2030 	spin_lock_irqsave(&host->lock, flags);
2031 
2032 	sdhci_led_activate(host);
2033 
2034 	if (!present || host->flags & SDHCI_DEVICE_DEAD) {
2035 		mrq->cmd->error = -ENOMEDIUM;
2036 		sdhci_finish_mrq(host, mrq);
2037 	} else {
2038 		if (mrq->sbc && !(host->flags & SDHCI_AUTO_CMD23))
2039 			sdhci_send_command(host, mrq->sbc);
2040 		else
2041 			sdhci_send_command(host, mrq->cmd);
2042 	}
2043 
2044 	spin_unlock_irqrestore(&host->lock, flags);
2045 }
2046 EXPORT_SYMBOL_GPL(sdhci_request);
2047 
2048 void sdhci_set_bus_width(struct sdhci_host *host, int width)
2049 {
2050 	u8 ctrl;
2051 
2052 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2053 	if (width == MMC_BUS_WIDTH_8) {
2054 		ctrl &= ~SDHCI_CTRL_4BITBUS;
2055 		ctrl |= SDHCI_CTRL_8BITBUS;
2056 	} else {
2057 		if (host->mmc->caps & MMC_CAP_8_BIT_DATA)
2058 			ctrl &= ~SDHCI_CTRL_8BITBUS;
2059 		if (width == MMC_BUS_WIDTH_4)
2060 			ctrl |= SDHCI_CTRL_4BITBUS;
2061 		else
2062 			ctrl &= ~SDHCI_CTRL_4BITBUS;
2063 	}
2064 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2065 }
2066 EXPORT_SYMBOL_GPL(sdhci_set_bus_width);
2067 
2068 void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing)
2069 {
2070 	u16 ctrl_2;
2071 
2072 	ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2073 	/* Select Bus Speed Mode for host */
2074 	ctrl_2 &= ~SDHCI_CTRL_UHS_MASK;
2075 	if ((timing == MMC_TIMING_MMC_HS200) ||
2076 	    (timing == MMC_TIMING_UHS_SDR104))
2077 		ctrl_2 |= SDHCI_CTRL_UHS_SDR104;
2078 	else if (timing == MMC_TIMING_UHS_SDR12)
2079 		ctrl_2 |= SDHCI_CTRL_UHS_SDR12;
2080 	else if (timing == MMC_TIMING_UHS_SDR25)
2081 		ctrl_2 |= SDHCI_CTRL_UHS_SDR25;
2082 	else if (timing == MMC_TIMING_UHS_SDR50)
2083 		ctrl_2 |= SDHCI_CTRL_UHS_SDR50;
2084 	else if ((timing == MMC_TIMING_UHS_DDR50) ||
2085 		 (timing == MMC_TIMING_MMC_DDR52))
2086 		ctrl_2 |= SDHCI_CTRL_UHS_DDR50;
2087 	else if (timing == MMC_TIMING_MMC_HS400)
2088 		ctrl_2 |= SDHCI_CTRL_HS400; /* Non-standard */
2089 	sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2090 }
2091 EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling);
2092 
2093 void sdhci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
2094 {
2095 	struct sdhci_host *host = mmc_priv(mmc);
2096 	u8 ctrl;
2097 
2098 	if (ios->power_mode == MMC_POWER_UNDEFINED)
2099 		return;
2100 
2101 	if (host->flags & SDHCI_DEVICE_DEAD) {
2102 		if (!IS_ERR(mmc->supply.vmmc) &&
2103 		    ios->power_mode == MMC_POWER_OFF)
2104 			mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, 0);
2105 		return;
2106 	}
2107 
2108 	/*
2109 	 * Reset the chip on each power off.
2110 	 * Should clear out any weird states.
2111 	 */
2112 	if (ios->power_mode == MMC_POWER_OFF) {
2113 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
2114 		sdhci_reinit(host);
2115 	}
2116 
2117 	if (host->version >= SDHCI_SPEC_300 &&
2118 		(ios->power_mode == MMC_POWER_UP) &&
2119 		!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN))
2120 		sdhci_enable_preset_value(host, false);
2121 
2122 	if (!ios->clock || ios->clock != host->clock) {
2123 		host->ops->set_clock(host, ios->clock);
2124 		host->clock = ios->clock;
2125 
2126 		if (host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK &&
2127 		    host->clock) {
2128 			host->timeout_clk = host->mmc->actual_clock ?
2129 						host->mmc->actual_clock / 1000 :
2130 						host->clock / 1000;
2131 			host->mmc->max_busy_timeout =
2132 				host->ops->get_max_timeout_count ?
2133 				host->ops->get_max_timeout_count(host) :
2134 				1 << 27;
2135 			host->mmc->max_busy_timeout /= host->timeout_clk;
2136 		}
2137 	}
2138 
2139 	if (host->ops->set_power)
2140 		host->ops->set_power(host, ios->power_mode, ios->vdd);
2141 	else
2142 		sdhci_set_power(host, ios->power_mode, ios->vdd);
2143 
2144 	if (host->ops->platform_send_init_74_clocks)
2145 		host->ops->platform_send_init_74_clocks(host, ios->power_mode);
2146 
2147 	host->ops->set_bus_width(host, ios->bus_width);
2148 
2149 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
2150 
2151 	if (!(host->quirks & SDHCI_QUIRK_NO_HISPD_BIT)) {
2152 		if (ios->timing == MMC_TIMING_SD_HS ||
2153 		     ios->timing == MMC_TIMING_MMC_HS ||
2154 		     ios->timing == MMC_TIMING_MMC_HS400 ||
2155 		     ios->timing == MMC_TIMING_MMC_HS200 ||
2156 		     ios->timing == MMC_TIMING_MMC_DDR52 ||
2157 		     ios->timing == MMC_TIMING_UHS_SDR50 ||
2158 		     ios->timing == MMC_TIMING_UHS_SDR104 ||
2159 		     ios->timing == MMC_TIMING_UHS_DDR50 ||
2160 		     ios->timing == MMC_TIMING_UHS_SDR25)
2161 			ctrl |= SDHCI_CTRL_HISPD;
2162 		else
2163 			ctrl &= ~SDHCI_CTRL_HISPD;
2164 	}
2165 
2166 	if (host->version >= SDHCI_SPEC_300) {
2167 		u16 clk, ctrl_2;
2168 
2169 		if (!host->preset_enabled) {
2170 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2171 			/*
2172 			 * We only need to set Driver Strength if the
2173 			 * preset value enable is not set.
2174 			 */
2175 			ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2176 			ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK;
2177 			if (ios->drv_type == MMC_SET_DRIVER_TYPE_A)
2178 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A;
2179 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_B)
2180 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2181 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_C)
2182 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_C;
2183 			else if (ios->drv_type == MMC_SET_DRIVER_TYPE_D)
2184 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_D;
2185 			else {
2186 				pr_warn("%s: invalid driver type, default to driver type B\n",
2187 					mmc_hostname(mmc));
2188 				ctrl_2 |= SDHCI_CTRL_DRV_TYPE_B;
2189 			}
2190 
2191 			sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2);
2192 		} else {
2193 			/*
2194 			 * According to SDHC Spec v3.00, if the Preset Value
2195 			 * Enable in the Host Control 2 register is set, we
2196 			 * need to reset SD Clock Enable before changing High
2197 			 * Speed Enable to avoid generating clock gliches.
2198 			 */
2199 
2200 			/* Reset SD Clock Enable */
2201 			clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2202 			clk &= ~SDHCI_CLOCK_CARD_EN;
2203 			sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2204 
2205 			sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2206 
2207 			/* Re-enable SD Clock */
2208 			host->ops->set_clock(host, host->clock);
2209 		}
2210 
2211 		/* Reset SD Clock Enable */
2212 		clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL);
2213 		clk &= ~SDHCI_CLOCK_CARD_EN;
2214 		sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL);
2215 
2216 		host->ops->set_uhs_signaling(host, ios->timing);
2217 		host->timing = ios->timing;
2218 
2219 		if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) &&
2220 				((ios->timing == MMC_TIMING_UHS_SDR12) ||
2221 				 (ios->timing == MMC_TIMING_UHS_SDR25) ||
2222 				 (ios->timing == MMC_TIMING_UHS_SDR50) ||
2223 				 (ios->timing == MMC_TIMING_UHS_SDR104) ||
2224 				 (ios->timing == MMC_TIMING_UHS_DDR50) ||
2225 				 (ios->timing == MMC_TIMING_MMC_DDR52))) {
2226 			u16 preset;
2227 
2228 			sdhci_enable_preset_value(host, true);
2229 			preset = sdhci_get_preset_value(host);
2230 			ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK)
2231 				>> SDHCI_PRESET_DRV_SHIFT;
2232 		}
2233 
2234 		/* Re-enable SD Clock */
2235 		host->ops->set_clock(host, host->clock);
2236 	} else
2237 		sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
2238 
2239 	/*
2240 	 * Some (ENE) controllers go apeshit on some ios operation,
2241 	 * signalling timeout and CRC errors even on CMD0. Resetting
2242 	 * it on each ios seems to solve the problem.
2243 	 */
2244 	if (host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS)
2245 		sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA);
2246 }
2247 EXPORT_SYMBOL_GPL(sdhci_set_ios);
2248 
2249 static int sdhci_get_cd(struct mmc_host *mmc)
2250 {
2251 	struct sdhci_host *host = mmc_priv(mmc);
2252 	int gpio_cd = mmc_gpio_get_cd(mmc);
2253 
2254 	if (host->flags & SDHCI_DEVICE_DEAD)
2255 		return 0;
2256 
2257 	/* If nonremovable, assume that the card is always present. */
2258 	if (!mmc_card_is_removable(host->mmc))
2259 		return 1;
2260 
2261 	/*
2262 	 * Try slot gpio detect, if defined it take precedence
2263 	 * over build in controller functionality
2264 	 */
2265 	if (gpio_cd >= 0)
2266 		return !!gpio_cd;
2267 
2268 	/* If polling, assume that the card is always present. */
2269 	if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)
2270 		return 1;
2271 
2272 	/* Host native card detect */
2273 	return !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
2274 }
2275 
2276 static int sdhci_check_ro(struct sdhci_host *host)
2277 {
2278 	unsigned long flags;
2279 	int is_readonly;
2280 
2281 	spin_lock_irqsave(&host->lock, flags);
2282 
2283 	if (host->flags & SDHCI_DEVICE_DEAD)
2284 		is_readonly = 0;
2285 	else if (host->ops->get_ro)
2286 		is_readonly = host->ops->get_ro(host);
2287 	else if (mmc_can_gpio_ro(host->mmc))
2288 		is_readonly = mmc_gpio_get_ro(host->mmc);
2289 	else
2290 		is_readonly = !(sdhci_readl(host, SDHCI_PRESENT_STATE)
2291 				& SDHCI_WRITE_PROTECT);
2292 
2293 	spin_unlock_irqrestore(&host->lock, flags);
2294 
2295 	/* This quirk needs to be replaced by a callback-function later */
2296 	return host->quirks & SDHCI_QUIRK_INVERTED_WRITE_PROTECT ?
2297 		!is_readonly : is_readonly;
2298 }
2299 
2300 #define SAMPLE_COUNT	5
2301 
2302 static int sdhci_get_ro(struct mmc_host *mmc)
2303 {
2304 	struct sdhci_host *host = mmc_priv(mmc);
2305 	int i, ro_count;
2306 
2307 	if (!(host->quirks & SDHCI_QUIRK_UNSTABLE_RO_DETECT))
2308 		return sdhci_check_ro(host);
2309 
2310 	ro_count = 0;
2311 	for (i = 0; i < SAMPLE_COUNT; i++) {
2312 		if (sdhci_check_ro(host)) {
2313 			if (++ro_count > SAMPLE_COUNT / 2)
2314 				return 1;
2315 		}
2316 		msleep(30);
2317 	}
2318 	return 0;
2319 }
2320 
2321 static void sdhci_hw_reset(struct mmc_host *mmc)
2322 {
2323 	struct sdhci_host *host = mmc_priv(mmc);
2324 
2325 	if (host->ops && host->ops->hw_reset)
2326 		host->ops->hw_reset(host);
2327 }
2328 
2329 static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable)
2330 {
2331 	if (!(host->flags & SDHCI_DEVICE_DEAD)) {
2332 		if (enable)
2333 			host->ier |= SDHCI_INT_CARD_INT;
2334 		else
2335 			host->ier &= ~SDHCI_INT_CARD_INT;
2336 
2337 		sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2338 		sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2339 	}
2340 }
2341 
2342 void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable)
2343 {
2344 	struct sdhci_host *host = mmc_priv(mmc);
2345 	unsigned long flags;
2346 
2347 	if (enable)
2348 		pm_runtime_get_noresume(host->mmc->parent);
2349 
2350 	spin_lock_irqsave(&host->lock, flags);
2351 	sdhci_enable_sdio_irq_nolock(host, enable);
2352 	spin_unlock_irqrestore(&host->lock, flags);
2353 
2354 	if (!enable)
2355 		pm_runtime_put_noidle(host->mmc->parent);
2356 }
2357 EXPORT_SYMBOL_GPL(sdhci_enable_sdio_irq);
2358 
2359 static void sdhci_ack_sdio_irq(struct mmc_host *mmc)
2360 {
2361 	struct sdhci_host *host = mmc_priv(mmc);
2362 	unsigned long flags;
2363 
2364 	spin_lock_irqsave(&host->lock, flags);
2365 	sdhci_enable_sdio_irq_nolock(host, true);
2366 	spin_unlock_irqrestore(&host->lock, flags);
2367 }
2368 
2369 int sdhci_start_signal_voltage_switch(struct mmc_host *mmc,
2370 				      struct mmc_ios *ios)
2371 {
2372 	struct sdhci_host *host = mmc_priv(mmc);
2373 	u16 ctrl;
2374 	int ret;
2375 
2376 	/*
2377 	 * Signal Voltage Switching is only applicable for Host Controllers
2378 	 * v3.00 and above.
2379 	 */
2380 	if (host->version < SDHCI_SPEC_300)
2381 		return 0;
2382 
2383 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2384 
2385 	switch (ios->signal_voltage) {
2386 	case MMC_SIGNAL_VOLTAGE_330:
2387 		if (!(host->flags & SDHCI_SIGNALING_330))
2388 			return -EINVAL;
2389 		/* Set 1.8V Signal Enable in the Host Control2 register to 0 */
2390 		ctrl &= ~SDHCI_CTRL_VDD_180;
2391 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2392 
2393 		if (!IS_ERR(mmc->supply.vqmmc)) {
2394 			ret = mmc_regulator_set_vqmmc(mmc, ios);
2395 			if (ret) {
2396 				pr_warn("%s: Switching to 3.3V signalling voltage failed\n",
2397 					mmc_hostname(mmc));
2398 				return -EIO;
2399 			}
2400 		}
2401 		/* Wait for 5ms */
2402 		usleep_range(5000, 5500);
2403 
2404 		/* 3.3V regulator output should be stable within 5 ms */
2405 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2406 		if (!(ctrl & SDHCI_CTRL_VDD_180))
2407 			return 0;
2408 
2409 		pr_warn("%s: 3.3V regulator output did not become stable\n",
2410 			mmc_hostname(mmc));
2411 
2412 		return -EAGAIN;
2413 	case MMC_SIGNAL_VOLTAGE_180:
2414 		if (!(host->flags & SDHCI_SIGNALING_180))
2415 			return -EINVAL;
2416 		if (!IS_ERR(mmc->supply.vqmmc)) {
2417 			ret = mmc_regulator_set_vqmmc(mmc, ios);
2418 			if (ret) {
2419 				pr_warn("%s: Switching to 1.8V signalling voltage failed\n",
2420 					mmc_hostname(mmc));
2421 				return -EIO;
2422 			}
2423 		}
2424 
2425 		/*
2426 		 * Enable 1.8V Signal Enable in the Host Control2
2427 		 * register
2428 		 */
2429 		ctrl |= SDHCI_CTRL_VDD_180;
2430 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2431 
2432 		/* Some controller need to do more when switching */
2433 		if (host->ops->voltage_switch)
2434 			host->ops->voltage_switch(host);
2435 
2436 		/* 1.8V regulator output should be stable within 5 ms */
2437 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2438 		if (ctrl & SDHCI_CTRL_VDD_180)
2439 			return 0;
2440 
2441 		pr_warn("%s: 1.8V regulator output did not become stable\n",
2442 			mmc_hostname(mmc));
2443 
2444 		return -EAGAIN;
2445 	case MMC_SIGNAL_VOLTAGE_120:
2446 		if (!(host->flags & SDHCI_SIGNALING_120))
2447 			return -EINVAL;
2448 		if (!IS_ERR(mmc->supply.vqmmc)) {
2449 			ret = mmc_regulator_set_vqmmc(mmc, ios);
2450 			if (ret) {
2451 				pr_warn("%s: Switching to 1.2V signalling voltage failed\n",
2452 					mmc_hostname(mmc));
2453 				return -EIO;
2454 			}
2455 		}
2456 		return 0;
2457 	default:
2458 		/* No signal voltage switch required */
2459 		return 0;
2460 	}
2461 }
2462 EXPORT_SYMBOL_GPL(sdhci_start_signal_voltage_switch);
2463 
2464 static int sdhci_card_busy(struct mmc_host *mmc)
2465 {
2466 	struct sdhci_host *host = mmc_priv(mmc);
2467 	u32 present_state;
2468 
2469 	/* Check whether DAT[0] is 0 */
2470 	present_state = sdhci_readl(host, SDHCI_PRESENT_STATE);
2471 
2472 	return !(present_state & SDHCI_DATA_0_LVL_MASK);
2473 }
2474 
2475 static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios)
2476 {
2477 	struct sdhci_host *host = mmc_priv(mmc);
2478 	unsigned long flags;
2479 
2480 	spin_lock_irqsave(&host->lock, flags);
2481 	host->flags |= SDHCI_HS400_TUNING;
2482 	spin_unlock_irqrestore(&host->lock, flags);
2483 
2484 	return 0;
2485 }
2486 
2487 void sdhci_start_tuning(struct sdhci_host *host)
2488 {
2489 	u16 ctrl;
2490 
2491 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2492 	ctrl |= SDHCI_CTRL_EXEC_TUNING;
2493 	if (host->quirks2 & SDHCI_QUIRK2_TUNING_WORK_AROUND)
2494 		ctrl |= SDHCI_CTRL_TUNED_CLK;
2495 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2496 
2497 	/*
2498 	 * As per the Host Controller spec v3.00, tuning command
2499 	 * generates Buffer Read Ready interrupt, so enable that.
2500 	 *
2501 	 * Note: The spec clearly says that when tuning sequence
2502 	 * is being performed, the controller does not generate
2503 	 * interrupts other than Buffer Read Ready interrupt. But
2504 	 * to make sure we don't hit a controller bug, we _only_
2505 	 * enable Buffer Read Ready interrupt here.
2506 	 */
2507 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE);
2508 	sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE);
2509 }
2510 EXPORT_SYMBOL_GPL(sdhci_start_tuning);
2511 
2512 void sdhci_end_tuning(struct sdhci_host *host)
2513 {
2514 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
2515 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
2516 }
2517 EXPORT_SYMBOL_GPL(sdhci_end_tuning);
2518 
2519 void sdhci_reset_tuning(struct sdhci_host *host)
2520 {
2521 	u16 ctrl;
2522 
2523 	ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2524 	ctrl &= ~SDHCI_CTRL_TUNED_CLK;
2525 	ctrl &= ~SDHCI_CTRL_EXEC_TUNING;
2526 	sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2527 }
2528 EXPORT_SYMBOL_GPL(sdhci_reset_tuning);
2529 
2530 void sdhci_abort_tuning(struct sdhci_host *host, u32 opcode)
2531 {
2532 	sdhci_reset_tuning(host);
2533 
2534 	sdhci_do_reset(host, SDHCI_RESET_CMD);
2535 	sdhci_do_reset(host, SDHCI_RESET_DATA);
2536 
2537 	sdhci_end_tuning(host);
2538 
2539 	mmc_abort_tuning(host->mmc, opcode);
2540 }
2541 EXPORT_SYMBOL_GPL(sdhci_abort_tuning);
2542 
2543 /*
2544  * We use sdhci_send_tuning() because mmc_send_tuning() is not a good fit. SDHCI
2545  * tuning command does not have a data payload (or rather the hardware does it
2546  * automatically) so mmc_send_tuning() will return -EIO. Also the tuning command
2547  * interrupt setup is different to other commands and there is no timeout
2548  * interrupt so special handling is needed.
2549  */
2550 void sdhci_send_tuning(struct sdhci_host *host, u32 opcode)
2551 {
2552 	struct mmc_host *mmc = host->mmc;
2553 	struct mmc_command cmd = {};
2554 	struct mmc_request mrq = {};
2555 	unsigned long flags;
2556 	u32 b = host->sdma_boundary;
2557 
2558 	spin_lock_irqsave(&host->lock, flags);
2559 
2560 	cmd.opcode = opcode;
2561 	cmd.flags = MMC_RSP_R1 | MMC_CMD_ADTC;
2562 	cmd.mrq = &mrq;
2563 
2564 	mrq.cmd = &cmd;
2565 	/*
2566 	 * In response to CMD19, the card sends 64 bytes of tuning
2567 	 * block to the Host Controller. So we set the block size
2568 	 * to 64 here.
2569 	 */
2570 	if (cmd.opcode == MMC_SEND_TUNING_BLOCK_HS200 &&
2571 	    mmc->ios.bus_width == MMC_BUS_WIDTH_8)
2572 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 128), SDHCI_BLOCK_SIZE);
2573 	else
2574 		sdhci_writew(host, SDHCI_MAKE_BLKSZ(b, 64), SDHCI_BLOCK_SIZE);
2575 
2576 	/*
2577 	 * The tuning block is sent by the card to the host controller.
2578 	 * So we set the TRNS_READ bit in the Transfer Mode register.
2579 	 * This also takes care of setting DMA Enable and Multi Block
2580 	 * Select in the same register to 0.
2581 	 */
2582 	sdhci_writew(host, SDHCI_TRNS_READ, SDHCI_TRANSFER_MODE);
2583 
2584 	sdhci_send_command(host, &cmd);
2585 
2586 	host->cmd = NULL;
2587 
2588 	sdhci_del_timer(host, &mrq);
2589 
2590 	host->tuning_done = 0;
2591 
2592 	spin_unlock_irqrestore(&host->lock, flags);
2593 
2594 	/* Wait for Buffer Read Ready interrupt */
2595 	wait_event_timeout(host->buf_ready_int, (host->tuning_done == 1),
2596 			   msecs_to_jiffies(50));
2597 
2598 }
2599 EXPORT_SYMBOL_GPL(sdhci_send_tuning);
2600 
2601 static int __sdhci_execute_tuning(struct sdhci_host *host, u32 opcode)
2602 {
2603 	int i;
2604 
2605 	/*
2606 	 * Issue opcode repeatedly till Execute Tuning is set to 0 or the number
2607 	 * of loops reaches tuning loop count.
2608 	 */
2609 	for (i = 0; i < host->tuning_loop_count; i++) {
2610 		u16 ctrl;
2611 
2612 		sdhci_send_tuning(host, opcode);
2613 
2614 		if (!host->tuning_done) {
2615 			pr_debug("%s: Tuning timeout, falling back to fixed sampling clock\n",
2616 				 mmc_hostname(host->mmc));
2617 			sdhci_abort_tuning(host, opcode);
2618 			return -ETIMEDOUT;
2619 		}
2620 
2621 		/* Spec does not require a delay between tuning cycles */
2622 		if (host->tuning_delay > 0)
2623 			mdelay(host->tuning_delay);
2624 
2625 		ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2626 		if (!(ctrl & SDHCI_CTRL_EXEC_TUNING)) {
2627 			if (ctrl & SDHCI_CTRL_TUNED_CLK)
2628 				return 0; /* Success! */
2629 			break;
2630 		}
2631 
2632 	}
2633 
2634 	pr_info("%s: Tuning failed, falling back to fixed sampling clock\n",
2635 		mmc_hostname(host->mmc));
2636 	sdhci_reset_tuning(host);
2637 	return -EAGAIN;
2638 }
2639 
2640 int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode)
2641 {
2642 	struct sdhci_host *host = mmc_priv(mmc);
2643 	int err = 0;
2644 	unsigned int tuning_count = 0;
2645 	bool hs400_tuning;
2646 
2647 	hs400_tuning = host->flags & SDHCI_HS400_TUNING;
2648 
2649 	if (host->tuning_mode == SDHCI_TUNING_MODE_1)
2650 		tuning_count = host->tuning_count;
2651 
2652 	/*
2653 	 * The Host Controller needs tuning in case of SDR104 and DDR50
2654 	 * mode, and for SDR50 mode when Use Tuning for SDR50 is set in
2655 	 * the Capabilities register.
2656 	 * If the Host Controller supports the HS200 mode then the
2657 	 * tuning function has to be executed.
2658 	 */
2659 	switch (host->timing) {
2660 	/* HS400 tuning is done in HS200 mode */
2661 	case MMC_TIMING_MMC_HS400:
2662 		err = -EINVAL;
2663 		goto out;
2664 
2665 	case MMC_TIMING_MMC_HS200:
2666 		/*
2667 		 * Periodic re-tuning for HS400 is not expected to be needed, so
2668 		 * disable it here.
2669 		 */
2670 		if (hs400_tuning)
2671 			tuning_count = 0;
2672 		break;
2673 
2674 	case MMC_TIMING_UHS_SDR104:
2675 	case MMC_TIMING_UHS_DDR50:
2676 		break;
2677 
2678 	case MMC_TIMING_UHS_SDR50:
2679 		if (host->flags & SDHCI_SDR50_NEEDS_TUNING)
2680 			break;
2681 		/* FALLTHROUGH */
2682 
2683 	default:
2684 		goto out;
2685 	}
2686 
2687 	if (host->ops->platform_execute_tuning) {
2688 		err = host->ops->platform_execute_tuning(host, opcode);
2689 		goto out;
2690 	}
2691 
2692 	host->mmc->retune_period = tuning_count;
2693 
2694 	if (host->tuning_delay < 0)
2695 		host->tuning_delay = opcode == MMC_SEND_TUNING_BLOCK;
2696 
2697 	sdhci_start_tuning(host);
2698 
2699 	host->tuning_err = __sdhci_execute_tuning(host, opcode);
2700 
2701 	sdhci_end_tuning(host);
2702 out:
2703 	host->flags &= ~SDHCI_HS400_TUNING;
2704 
2705 	return err;
2706 }
2707 EXPORT_SYMBOL_GPL(sdhci_execute_tuning);
2708 
2709 static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable)
2710 {
2711 	/* Host Controller v3.00 defines preset value registers */
2712 	if (host->version < SDHCI_SPEC_300)
2713 		return;
2714 
2715 	/*
2716 	 * We only enable or disable Preset Value if they are not already
2717 	 * enabled or disabled respectively. Otherwise, we bail out.
2718 	 */
2719 	if (host->preset_enabled != enable) {
2720 		u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2);
2721 
2722 		if (enable)
2723 			ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE;
2724 		else
2725 			ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE;
2726 
2727 		sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2);
2728 
2729 		if (enable)
2730 			host->flags |= SDHCI_PV_ENABLED;
2731 		else
2732 			host->flags &= ~SDHCI_PV_ENABLED;
2733 
2734 		host->preset_enabled = enable;
2735 	}
2736 }
2737 
2738 static void sdhci_post_req(struct mmc_host *mmc, struct mmc_request *mrq,
2739 				int err)
2740 {
2741 	struct sdhci_host *host = mmc_priv(mmc);
2742 	struct mmc_data *data = mrq->data;
2743 
2744 	if (data->host_cookie != COOKIE_UNMAPPED)
2745 		dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
2746 			     mmc_get_dma_dir(data));
2747 
2748 	data->host_cookie = COOKIE_UNMAPPED;
2749 }
2750 
2751 static void sdhci_pre_req(struct mmc_host *mmc, struct mmc_request *mrq)
2752 {
2753 	struct sdhci_host *host = mmc_priv(mmc);
2754 
2755 	mrq->data->host_cookie = COOKIE_UNMAPPED;
2756 
2757 	/*
2758 	 * No pre-mapping in the pre hook if we're using the bounce buffer,
2759 	 * for that we would need two bounce buffers since one buffer is
2760 	 * in flight when this is getting called.
2761 	 */
2762 	if (host->flags & SDHCI_REQ_USE_DMA && !host->bounce_buffer)
2763 		sdhci_pre_dma_transfer(host, mrq->data, COOKIE_PRE_MAPPED);
2764 }
2765 
2766 static void sdhci_error_out_mrqs(struct sdhci_host *host, int err)
2767 {
2768 	if (host->data_cmd) {
2769 		host->data_cmd->error = err;
2770 		sdhci_finish_mrq(host, host->data_cmd->mrq);
2771 	}
2772 
2773 	if (host->cmd) {
2774 		host->cmd->error = err;
2775 		sdhci_finish_mrq(host, host->cmd->mrq);
2776 	}
2777 }
2778 
2779 static void sdhci_card_event(struct mmc_host *mmc)
2780 {
2781 	struct sdhci_host *host = mmc_priv(mmc);
2782 	unsigned long flags;
2783 	int present;
2784 
2785 	/* First check if client has provided their own card event */
2786 	if (host->ops->card_event)
2787 		host->ops->card_event(host);
2788 
2789 	present = mmc->ops->get_cd(mmc);
2790 
2791 	spin_lock_irqsave(&host->lock, flags);
2792 
2793 	/* Check sdhci_has_requests() first in case we are runtime suspended */
2794 	if (sdhci_has_requests(host) && !present) {
2795 		pr_err("%s: Card removed during transfer!\n",
2796 			mmc_hostname(host->mmc));
2797 		pr_err("%s: Resetting controller.\n",
2798 			mmc_hostname(host->mmc));
2799 
2800 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2801 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2802 
2803 		sdhci_error_out_mrqs(host, -ENOMEDIUM);
2804 	}
2805 
2806 	spin_unlock_irqrestore(&host->lock, flags);
2807 }
2808 
2809 static const struct mmc_host_ops sdhci_ops = {
2810 	.request	= sdhci_request,
2811 	.post_req	= sdhci_post_req,
2812 	.pre_req	= sdhci_pre_req,
2813 	.set_ios	= sdhci_set_ios,
2814 	.get_cd		= sdhci_get_cd,
2815 	.get_ro		= sdhci_get_ro,
2816 	.hw_reset	= sdhci_hw_reset,
2817 	.enable_sdio_irq = sdhci_enable_sdio_irq,
2818 	.ack_sdio_irq    = sdhci_ack_sdio_irq,
2819 	.start_signal_voltage_switch	= sdhci_start_signal_voltage_switch,
2820 	.prepare_hs400_tuning		= sdhci_prepare_hs400_tuning,
2821 	.execute_tuning			= sdhci_execute_tuning,
2822 	.card_event			= sdhci_card_event,
2823 	.card_busy	= sdhci_card_busy,
2824 };
2825 
2826 /*****************************************************************************\
2827  *                                                                           *
2828  * Request done                                                              *
2829  *                                                                           *
2830 \*****************************************************************************/
2831 
2832 static bool sdhci_request_done(struct sdhci_host *host)
2833 {
2834 	unsigned long flags;
2835 	struct mmc_request *mrq;
2836 	int i;
2837 
2838 	spin_lock_irqsave(&host->lock, flags);
2839 
2840 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
2841 		mrq = host->mrqs_done[i];
2842 		if (mrq)
2843 			break;
2844 	}
2845 
2846 	if (!mrq) {
2847 		spin_unlock_irqrestore(&host->lock, flags);
2848 		return true;
2849 	}
2850 
2851 	/*
2852 	 * Always unmap the data buffers if they were mapped by
2853 	 * sdhci_prepare_data() whenever we finish with a request.
2854 	 * This avoids leaking DMA mappings on error.
2855 	 */
2856 	if (host->flags & SDHCI_REQ_USE_DMA) {
2857 		struct mmc_data *data = mrq->data;
2858 
2859 		if (host->use_external_dma && data &&
2860 		    (mrq->cmd->error || data->error)) {
2861 			struct dma_chan *chan = sdhci_external_dma_channel(host, data);
2862 
2863 			host->mrqs_done[i] = NULL;
2864 			spin_unlock_irqrestore(&host->lock, flags);
2865 			dmaengine_terminate_sync(chan);
2866 			spin_lock_irqsave(&host->lock, flags);
2867 			sdhci_set_mrq_done(host, mrq);
2868 		}
2869 
2870 		if (data && data->host_cookie == COOKIE_MAPPED) {
2871 			if (host->bounce_buffer) {
2872 				/*
2873 				 * On reads, copy the bounced data into the
2874 				 * sglist
2875 				 */
2876 				if (mmc_get_dma_dir(data) == DMA_FROM_DEVICE) {
2877 					unsigned int length = data->bytes_xfered;
2878 
2879 					if (length > host->bounce_buffer_size) {
2880 						pr_err("%s: bounce buffer is %u bytes but DMA claims to have transferred %u bytes\n",
2881 						       mmc_hostname(host->mmc),
2882 						       host->bounce_buffer_size,
2883 						       data->bytes_xfered);
2884 						/* Cap it down and continue */
2885 						length = host->bounce_buffer_size;
2886 					}
2887 					dma_sync_single_for_cpu(
2888 						host->mmc->parent,
2889 						host->bounce_addr,
2890 						host->bounce_buffer_size,
2891 						DMA_FROM_DEVICE);
2892 					sg_copy_from_buffer(data->sg,
2893 						data->sg_len,
2894 						host->bounce_buffer,
2895 						length);
2896 				} else {
2897 					/* No copying, just switch ownership */
2898 					dma_sync_single_for_cpu(
2899 						host->mmc->parent,
2900 						host->bounce_addr,
2901 						host->bounce_buffer_size,
2902 						mmc_get_dma_dir(data));
2903 				}
2904 			} else {
2905 				/* Unmap the raw data */
2906 				dma_unmap_sg(mmc_dev(host->mmc), data->sg,
2907 					     data->sg_len,
2908 					     mmc_get_dma_dir(data));
2909 			}
2910 			data->host_cookie = COOKIE_UNMAPPED;
2911 		}
2912 	}
2913 
2914 	/*
2915 	 * The controller needs a reset of internal state machines
2916 	 * upon error conditions.
2917 	 */
2918 	if (sdhci_needs_reset(host, mrq)) {
2919 		/*
2920 		 * Do not finish until command and data lines are available for
2921 		 * reset. Note there can only be one other mrq, so it cannot
2922 		 * also be in mrqs_done, otherwise host->cmd and host->data_cmd
2923 		 * would both be null.
2924 		 */
2925 		if (host->cmd || host->data_cmd) {
2926 			spin_unlock_irqrestore(&host->lock, flags);
2927 			return true;
2928 		}
2929 
2930 		/* Some controllers need this kick or reset won't work here */
2931 		if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET)
2932 			/* This is to force an update */
2933 			host->ops->set_clock(host, host->clock);
2934 
2935 		/* Spec says we should do both at the same time, but Ricoh
2936 		   controllers do not like that. */
2937 		sdhci_do_reset(host, SDHCI_RESET_CMD);
2938 		sdhci_do_reset(host, SDHCI_RESET_DATA);
2939 
2940 		host->pending_reset = false;
2941 	}
2942 
2943 	host->mrqs_done[i] = NULL;
2944 
2945 	spin_unlock_irqrestore(&host->lock, flags);
2946 
2947 	mmc_request_done(host->mmc, mrq);
2948 
2949 	return false;
2950 }
2951 
2952 static void sdhci_complete_work(struct work_struct *work)
2953 {
2954 	struct sdhci_host *host = container_of(work, struct sdhci_host,
2955 					       complete_work);
2956 
2957 	while (!sdhci_request_done(host))
2958 		;
2959 }
2960 
2961 static void sdhci_timeout_timer(struct timer_list *t)
2962 {
2963 	struct sdhci_host *host;
2964 	unsigned long flags;
2965 
2966 	host = from_timer(host, t, timer);
2967 
2968 	spin_lock_irqsave(&host->lock, flags);
2969 
2970 	if (host->cmd && !sdhci_data_line_cmd(host->cmd)) {
2971 		pr_err("%s: Timeout waiting for hardware cmd interrupt.\n",
2972 		       mmc_hostname(host->mmc));
2973 		sdhci_dumpregs(host);
2974 
2975 		host->cmd->error = -ETIMEDOUT;
2976 		sdhci_finish_mrq(host, host->cmd->mrq);
2977 	}
2978 
2979 	spin_unlock_irqrestore(&host->lock, flags);
2980 }
2981 
2982 static void sdhci_timeout_data_timer(struct timer_list *t)
2983 {
2984 	struct sdhci_host *host;
2985 	unsigned long flags;
2986 
2987 	host = from_timer(host, t, data_timer);
2988 
2989 	spin_lock_irqsave(&host->lock, flags);
2990 
2991 	if (host->data || host->data_cmd ||
2992 	    (host->cmd && sdhci_data_line_cmd(host->cmd))) {
2993 		pr_err("%s: Timeout waiting for hardware interrupt.\n",
2994 		       mmc_hostname(host->mmc));
2995 		sdhci_dumpregs(host);
2996 
2997 		if (host->data) {
2998 			host->data->error = -ETIMEDOUT;
2999 			sdhci_finish_data(host);
3000 			queue_work(host->complete_wq, &host->complete_work);
3001 		} else if (host->data_cmd) {
3002 			host->data_cmd->error = -ETIMEDOUT;
3003 			sdhci_finish_mrq(host, host->data_cmd->mrq);
3004 		} else {
3005 			host->cmd->error = -ETIMEDOUT;
3006 			sdhci_finish_mrq(host, host->cmd->mrq);
3007 		}
3008 	}
3009 
3010 	spin_unlock_irqrestore(&host->lock, flags);
3011 }
3012 
3013 /*****************************************************************************\
3014  *                                                                           *
3015  * Interrupt handling                                                        *
3016  *                                                                           *
3017 \*****************************************************************************/
3018 
3019 static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *intmask_p)
3020 {
3021 	/* Handle auto-CMD12 error */
3022 	if (intmask & SDHCI_INT_AUTO_CMD_ERR && host->data_cmd) {
3023 		struct mmc_request *mrq = host->data_cmd->mrq;
3024 		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3025 		int data_err_bit = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3026 				   SDHCI_INT_DATA_TIMEOUT :
3027 				   SDHCI_INT_DATA_CRC;
3028 
3029 		/* Treat auto-CMD12 error the same as data error */
3030 		if (!mrq->sbc && (host->flags & SDHCI_AUTO_CMD12)) {
3031 			*intmask_p |= data_err_bit;
3032 			return;
3033 		}
3034 	}
3035 
3036 	if (!host->cmd) {
3037 		/*
3038 		 * SDHCI recovers from errors by resetting the cmd and data
3039 		 * circuits.  Until that is done, there very well might be more
3040 		 * interrupts, so ignore them in that case.
3041 		 */
3042 		if (host->pending_reset)
3043 			return;
3044 		pr_err("%s: Got command interrupt 0x%08x even though no command operation was in progress.\n",
3045 		       mmc_hostname(host->mmc), (unsigned)intmask);
3046 		sdhci_dumpregs(host);
3047 		return;
3048 	}
3049 
3050 	if (intmask & (SDHCI_INT_TIMEOUT | SDHCI_INT_CRC |
3051 		       SDHCI_INT_END_BIT | SDHCI_INT_INDEX)) {
3052 		if (intmask & SDHCI_INT_TIMEOUT)
3053 			host->cmd->error = -ETIMEDOUT;
3054 		else
3055 			host->cmd->error = -EILSEQ;
3056 
3057 		/* Treat data command CRC error the same as data CRC error */
3058 		if (host->cmd->data &&
3059 		    (intmask & (SDHCI_INT_CRC | SDHCI_INT_TIMEOUT)) ==
3060 		     SDHCI_INT_CRC) {
3061 			host->cmd = NULL;
3062 			*intmask_p |= SDHCI_INT_DATA_CRC;
3063 			return;
3064 		}
3065 
3066 		__sdhci_finish_mrq(host, host->cmd->mrq);
3067 		return;
3068 	}
3069 
3070 	/* Handle auto-CMD23 error */
3071 	if (intmask & SDHCI_INT_AUTO_CMD_ERR) {
3072 		struct mmc_request *mrq = host->cmd->mrq;
3073 		u16 auto_cmd_status = sdhci_readw(host, SDHCI_AUTO_CMD_STATUS);
3074 		int err = (auto_cmd_status & SDHCI_AUTO_CMD_TIMEOUT) ?
3075 			  -ETIMEDOUT :
3076 			  -EILSEQ;
3077 
3078 		if (mrq->sbc && (host->flags & SDHCI_AUTO_CMD23)) {
3079 			mrq->sbc->error = err;
3080 			__sdhci_finish_mrq(host, mrq);
3081 			return;
3082 		}
3083 	}
3084 
3085 	if (intmask & SDHCI_INT_RESPONSE)
3086 		sdhci_finish_command(host);
3087 }
3088 
3089 static void sdhci_adma_show_error(struct sdhci_host *host)
3090 {
3091 	void *desc = host->adma_table;
3092 	dma_addr_t dma = host->adma_addr;
3093 
3094 	sdhci_dumpregs(host);
3095 
3096 	while (true) {
3097 		struct sdhci_adma2_64_desc *dma_desc = desc;
3098 
3099 		if (host->flags & SDHCI_USE_64_BIT_DMA)
3100 			SDHCI_DUMP("%08llx: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n",
3101 			    (unsigned long long)dma,
3102 			    le32_to_cpu(dma_desc->addr_hi),
3103 			    le32_to_cpu(dma_desc->addr_lo),
3104 			    le16_to_cpu(dma_desc->len),
3105 			    le16_to_cpu(dma_desc->cmd));
3106 		else
3107 			SDHCI_DUMP("%08llx: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n",
3108 			    (unsigned long long)dma,
3109 			    le32_to_cpu(dma_desc->addr_lo),
3110 			    le16_to_cpu(dma_desc->len),
3111 			    le16_to_cpu(dma_desc->cmd));
3112 
3113 		desc += host->desc_sz;
3114 		dma += host->desc_sz;
3115 
3116 		if (dma_desc->cmd & cpu_to_le16(ADMA2_END))
3117 			break;
3118 	}
3119 }
3120 
3121 static void sdhci_data_irq(struct sdhci_host *host, u32 intmask)
3122 {
3123 	u32 command;
3124 
3125 	/* CMD19 generates _only_ Buffer Read Ready interrupt */
3126 	if (intmask & SDHCI_INT_DATA_AVAIL) {
3127 		command = SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND));
3128 		if (command == MMC_SEND_TUNING_BLOCK ||
3129 		    command == MMC_SEND_TUNING_BLOCK_HS200) {
3130 			host->tuning_done = 1;
3131 			wake_up(&host->buf_ready_int);
3132 			return;
3133 		}
3134 	}
3135 
3136 	if (!host->data) {
3137 		struct mmc_command *data_cmd = host->data_cmd;
3138 
3139 		/*
3140 		 * The "data complete" interrupt is also used to
3141 		 * indicate that a busy state has ended. See comment
3142 		 * above in sdhci_cmd_irq().
3143 		 */
3144 		if (data_cmd && (data_cmd->flags & MMC_RSP_BUSY)) {
3145 			if (intmask & SDHCI_INT_DATA_TIMEOUT) {
3146 				host->data_cmd = NULL;
3147 				data_cmd->error = -ETIMEDOUT;
3148 				__sdhci_finish_mrq(host, data_cmd->mrq);
3149 				return;
3150 			}
3151 			if (intmask & SDHCI_INT_DATA_END) {
3152 				host->data_cmd = NULL;
3153 				/*
3154 				 * Some cards handle busy-end interrupt
3155 				 * before the command completed, so make
3156 				 * sure we do things in the proper order.
3157 				 */
3158 				if (host->cmd == data_cmd)
3159 					return;
3160 
3161 				__sdhci_finish_mrq(host, data_cmd->mrq);
3162 				return;
3163 			}
3164 		}
3165 
3166 		/*
3167 		 * SDHCI recovers from errors by resetting the cmd and data
3168 		 * circuits. Until that is done, there very well might be more
3169 		 * interrupts, so ignore them in that case.
3170 		 */
3171 		if (host->pending_reset)
3172 			return;
3173 
3174 		pr_err("%s: Got data interrupt 0x%08x even though no data operation was in progress.\n",
3175 		       mmc_hostname(host->mmc), (unsigned)intmask);
3176 		sdhci_dumpregs(host);
3177 
3178 		return;
3179 	}
3180 
3181 	if (intmask & SDHCI_INT_DATA_TIMEOUT)
3182 		host->data->error = -ETIMEDOUT;
3183 	else if (intmask & SDHCI_INT_DATA_END_BIT)
3184 		host->data->error = -EILSEQ;
3185 	else if ((intmask & SDHCI_INT_DATA_CRC) &&
3186 		SDHCI_GET_CMD(sdhci_readw(host, SDHCI_COMMAND))
3187 			!= MMC_BUS_TEST_R)
3188 		host->data->error = -EILSEQ;
3189 	else if (intmask & SDHCI_INT_ADMA_ERROR) {
3190 		pr_err("%s: ADMA error: 0x%08x\n", mmc_hostname(host->mmc),
3191 		       intmask);
3192 		sdhci_adma_show_error(host);
3193 		host->data->error = -EIO;
3194 		if (host->ops->adma_workaround)
3195 			host->ops->adma_workaround(host, intmask);
3196 	}
3197 
3198 	if (host->data->error)
3199 		sdhci_finish_data(host);
3200 	else {
3201 		if (intmask & (SDHCI_INT_DATA_AVAIL | SDHCI_INT_SPACE_AVAIL))
3202 			sdhci_transfer_pio(host);
3203 
3204 		/*
3205 		 * We currently don't do anything fancy with DMA
3206 		 * boundaries, but as we can't disable the feature
3207 		 * we need to at least restart the transfer.
3208 		 *
3209 		 * According to the spec sdhci_readl(host, SDHCI_DMA_ADDRESS)
3210 		 * should return a valid address to continue from, but as
3211 		 * some controllers are faulty, don't trust them.
3212 		 */
3213 		if (intmask & SDHCI_INT_DMA_END) {
3214 			dma_addr_t dmastart, dmanow;
3215 
3216 			dmastart = sdhci_sdma_address(host);
3217 			dmanow = dmastart + host->data->bytes_xfered;
3218 			/*
3219 			 * Force update to the next DMA block boundary.
3220 			 */
3221 			dmanow = (dmanow &
3222 				~((dma_addr_t)SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) +
3223 				SDHCI_DEFAULT_BOUNDARY_SIZE;
3224 			host->data->bytes_xfered = dmanow - dmastart;
3225 			DBG("DMA base %pad, transferred 0x%06x bytes, next %pad\n",
3226 			    &dmastart, host->data->bytes_xfered, &dmanow);
3227 			sdhci_set_sdma_addr(host, dmanow);
3228 		}
3229 
3230 		if (intmask & SDHCI_INT_DATA_END) {
3231 			if (host->cmd == host->data_cmd) {
3232 				/*
3233 				 * Data managed to finish before the
3234 				 * command completed. Make sure we do
3235 				 * things in the proper order.
3236 				 */
3237 				host->data_early = 1;
3238 			} else {
3239 				sdhci_finish_data(host);
3240 			}
3241 		}
3242 	}
3243 }
3244 
3245 static inline bool sdhci_defer_done(struct sdhci_host *host,
3246 				    struct mmc_request *mrq)
3247 {
3248 	struct mmc_data *data = mrq->data;
3249 
3250 	return host->pending_reset ||
3251 	       ((host->flags & SDHCI_REQ_USE_DMA) && data &&
3252 		data->host_cookie == COOKIE_MAPPED);
3253 }
3254 
3255 static irqreturn_t sdhci_irq(int irq, void *dev_id)
3256 {
3257 	struct mmc_request *mrqs_done[SDHCI_MAX_MRQS] = {0};
3258 	irqreturn_t result = IRQ_NONE;
3259 	struct sdhci_host *host = dev_id;
3260 	u32 intmask, mask, unexpected = 0;
3261 	int max_loops = 16;
3262 	int i;
3263 
3264 	spin_lock(&host->lock);
3265 
3266 	if (host->runtime_suspended) {
3267 		spin_unlock(&host->lock);
3268 		return IRQ_NONE;
3269 	}
3270 
3271 	intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3272 	if (!intmask || intmask == 0xffffffff) {
3273 		result = IRQ_NONE;
3274 		goto out;
3275 	}
3276 
3277 	do {
3278 		DBG("IRQ status 0x%08x\n", intmask);
3279 
3280 		if (host->ops->irq) {
3281 			intmask = host->ops->irq(host, intmask);
3282 			if (!intmask)
3283 				goto cont;
3284 		}
3285 
3286 		/* Clear selected interrupts. */
3287 		mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3288 				  SDHCI_INT_BUS_POWER);
3289 		sdhci_writel(host, mask, SDHCI_INT_STATUS);
3290 
3291 		if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3292 			u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) &
3293 				      SDHCI_CARD_PRESENT;
3294 
3295 			/*
3296 			 * There is a observation on i.mx esdhc.  INSERT
3297 			 * bit will be immediately set again when it gets
3298 			 * cleared, if a card is inserted.  We have to mask
3299 			 * the irq to prevent interrupt storm which will
3300 			 * freeze the system.  And the REMOVE gets the
3301 			 * same situation.
3302 			 *
3303 			 * More testing are needed here to ensure it works
3304 			 * for other platforms though.
3305 			 */
3306 			host->ier &= ~(SDHCI_INT_CARD_INSERT |
3307 				       SDHCI_INT_CARD_REMOVE);
3308 			host->ier |= present ? SDHCI_INT_CARD_REMOVE :
3309 					       SDHCI_INT_CARD_INSERT;
3310 			sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3311 			sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3312 
3313 			sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT |
3314 				     SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS);
3315 
3316 			host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT |
3317 						       SDHCI_INT_CARD_REMOVE);
3318 			result = IRQ_WAKE_THREAD;
3319 		}
3320 
3321 		if (intmask & SDHCI_INT_CMD_MASK)
3322 			sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK, &intmask);
3323 
3324 		if (intmask & SDHCI_INT_DATA_MASK)
3325 			sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK);
3326 
3327 		if (intmask & SDHCI_INT_BUS_POWER)
3328 			pr_err("%s: Card is consuming too much power!\n",
3329 				mmc_hostname(host->mmc));
3330 
3331 		if (intmask & SDHCI_INT_RETUNE)
3332 			mmc_retune_needed(host->mmc);
3333 
3334 		if ((intmask & SDHCI_INT_CARD_INT) &&
3335 		    (host->ier & SDHCI_INT_CARD_INT)) {
3336 			sdhci_enable_sdio_irq_nolock(host, false);
3337 			sdio_signal_irq(host->mmc);
3338 		}
3339 
3340 		intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE |
3341 			     SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK |
3342 			     SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER |
3343 			     SDHCI_INT_RETUNE | SDHCI_INT_CARD_INT);
3344 
3345 		if (intmask) {
3346 			unexpected |= intmask;
3347 			sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3348 		}
3349 cont:
3350 		if (result == IRQ_NONE)
3351 			result = IRQ_HANDLED;
3352 
3353 		intmask = sdhci_readl(host, SDHCI_INT_STATUS);
3354 	} while (intmask && --max_loops);
3355 
3356 	/* Determine if mrqs can be completed immediately */
3357 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3358 		struct mmc_request *mrq = host->mrqs_done[i];
3359 
3360 		if (!mrq)
3361 			continue;
3362 
3363 		if (sdhci_defer_done(host, mrq)) {
3364 			result = IRQ_WAKE_THREAD;
3365 		} else {
3366 			mrqs_done[i] = mrq;
3367 			host->mrqs_done[i] = NULL;
3368 		}
3369 	}
3370 out:
3371 	spin_unlock(&host->lock);
3372 
3373 	/* Process mrqs ready for immediate completion */
3374 	for (i = 0; i < SDHCI_MAX_MRQS; i++) {
3375 		if (mrqs_done[i])
3376 			mmc_request_done(host->mmc, mrqs_done[i]);
3377 	}
3378 
3379 	if (unexpected) {
3380 		pr_err("%s: Unexpected interrupt 0x%08x.\n",
3381 			   mmc_hostname(host->mmc), unexpected);
3382 		sdhci_dumpregs(host);
3383 	}
3384 
3385 	return result;
3386 }
3387 
3388 static irqreturn_t sdhci_thread_irq(int irq, void *dev_id)
3389 {
3390 	struct sdhci_host *host = dev_id;
3391 	unsigned long flags;
3392 	u32 isr;
3393 
3394 	while (!sdhci_request_done(host))
3395 		;
3396 
3397 	spin_lock_irqsave(&host->lock, flags);
3398 	isr = host->thread_isr;
3399 	host->thread_isr = 0;
3400 	spin_unlock_irqrestore(&host->lock, flags);
3401 
3402 	if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) {
3403 		struct mmc_host *mmc = host->mmc;
3404 
3405 		mmc->ops->card_event(mmc);
3406 		mmc_detect_change(mmc, msecs_to_jiffies(200));
3407 	}
3408 
3409 	return IRQ_HANDLED;
3410 }
3411 
3412 /*****************************************************************************\
3413  *                                                                           *
3414  * Suspend/resume                                                            *
3415  *                                                                           *
3416 \*****************************************************************************/
3417 
3418 #ifdef CONFIG_PM
3419 
3420 static bool sdhci_cd_irq_can_wakeup(struct sdhci_host *host)
3421 {
3422 	return mmc_card_is_removable(host->mmc) &&
3423 	       !(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
3424 	       !mmc_can_gpio_cd(host->mmc);
3425 }
3426 
3427 /*
3428  * To enable wakeup events, the corresponding events have to be enabled in
3429  * the Interrupt Status Enable register too. See 'Table 1-6: Wakeup Signal
3430  * Table' in the SD Host Controller Standard Specification.
3431  * It is useless to restore SDHCI_INT_ENABLE state in
3432  * sdhci_disable_irq_wakeups() since it will be set by
3433  * sdhci_enable_card_detection() or sdhci_init().
3434  */
3435 static bool sdhci_enable_irq_wakeups(struct sdhci_host *host)
3436 {
3437 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE |
3438 		  SDHCI_WAKE_ON_INT;
3439 	u32 irq_val = 0;
3440 	u8 wake_val = 0;
3441 	u8 val;
3442 
3443 	if (sdhci_cd_irq_can_wakeup(host)) {
3444 		wake_val |= SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE;
3445 		irq_val |= SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE;
3446 	}
3447 
3448 	if (mmc_card_wake_sdio_irq(host->mmc)) {
3449 		wake_val |= SDHCI_WAKE_ON_INT;
3450 		irq_val |= SDHCI_INT_CARD_INT;
3451 	}
3452 
3453 	if (!irq_val)
3454 		return false;
3455 
3456 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3457 	val &= ~mask;
3458 	val |= wake_val;
3459 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3460 
3461 	sdhci_writel(host, irq_val, SDHCI_INT_ENABLE);
3462 
3463 	host->irq_wake_enabled = !enable_irq_wake(host->irq);
3464 
3465 	return host->irq_wake_enabled;
3466 }
3467 
3468 static void sdhci_disable_irq_wakeups(struct sdhci_host *host)
3469 {
3470 	u8 val;
3471 	u8 mask = SDHCI_WAKE_ON_INSERT | SDHCI_WAKE_ON_REMOVE
3472 			| SDHCI_WAKE_ON_INT;
3473 
3474 	val = sdhci_readb(host, SDHCI_WAKE_UP_CONTROL);
3475 	val &= ~mask;
3476 	sdhci_writeb(host, val, SDHCI_WAKE_UP_CONTROL);
3477 
3478 	disable_irq_wake(host->irq);
3479 
3480 	host->irq_wake_enabled = false;
3481 }
3482 
3483 int sdhci_suspend_host(struct sdhci_host *host)
3484 {
3485 	sdhci_disable_card_detection(host);
3486 
3487 	mmc_retune_timer_stop(host->mmc);
3488 
3489 	if (!device_may_wakeup(mmc_dev(host->mmc)) ||
3490 	    !sdhci_enable_irq_wakeups(host)) {
3491 		host->ier = 0;
3492 		sdhci_writel(host, 0, SDHCI_INT_ENABLE);
3493 		sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
3494 		free_irq(host->irq, host);
3495 	}
3496 
3497 	return 0;
3498 }
3499 
3500 EXPORT_SYMBOL_GPL(sdhci_suspend_host);
3501 
3502 int sdhci_resume_host(struct sdhci_host *host)
3503 {
3504 	struct mmc_host *mmc = host->mmc;
3505 	int ret = 0;
3506 
3507 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3508 		if (host->ops->enable_dma)
3509 			host->ops->enable_dma(host);
3510 	}
3511 
3512 	if ((host->mmc->pm_flags & MMC_PM_KEEP_POWER) &&
3513 	    (host->quirks2 & SDHCI_QUIRK2_HOST_OFF_CARD_ON)) {
3514 		/* Card keeps power but host controller does not */
3515 		sdhci_init(host, 0);
3516 		host->pwr = 0;
3517 		host->clock = 0;
3518 		mmc->ops->set_ios(mmc, &mmc->ios);
3519 	} else {
3520 		sdhci_init(host, (host->mmc->pm_flags & MMC_PM_KEEP_POWER));
3521 	}
3522 
3523 	if (host->irq_wake_enabled) {
3524 		sdhci_disable_irq_wakeups(host);
3525 	} else {
3526 		ret = request_threaded_irq(host->irq, sdhci_irq,
3527 					   sdhci_thread_irq, IRQF_SHARED,
3528 					   mmc_hostname(host->mmc), host);
3529 		if (ret)
3530 			return ret;
3531 	}
3532 
3533 	sdhci_enable_card_detection(host);
3534 
3535 	return ret;
3536 }
3537 
3538 EXPORT_SYMBOL_GPL(sdhci_resume_host);
3539 
3540 int sdhci_runtime_suspend_host(struct sdhci_host *host)
3541 {
3542 	unsigned long flags;
3543 
3544 	mmc_retune_timer_stop(host->mmc);
3545 
3546 	spin_lock_irqsave(&host->lock, flags);
3547 	host->ier &= SDHCI_INT_CARD_INT;
3548 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3549 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3550 	spin_unlock_irqrestore(&host->lock, flags);
3551 
3552 	synchronize_hardirq(host->irq);
3553 
3554 	spin_lock_irqsave(&host->lock, flags);
3555 	host->runtime_suspended = true;
3556 	spin_unlock_irqrestore(&host->lock, flags);
3557 
3558 	return 0;
3559 }
3560 EXPORT_SYMBOL_GPL(sdhci_runtime_suspend_host);
3561 
3562 int sdhci_runtime_resume_host(struct sdhci_host *host, int soft_reset)
3563 {
3564 	struct mmc_host *mmc = host->mmc;
3565 	unsigned long flags;
3566 	int host_flags = host->flags;
3567 
3568 	if (host_flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
3569 		if (host->ops->enable_dma)
3570 			host->ops->enable_dma(host);
3571 	}
3572 
3573 	sdhci_init(host, soft_reset);
3574 
3575 	if (mmc->ios.power_mode != MMC_POWER_UNDEFINED &&
3576 	    mmc->ios.power_mode != MMC_POWER_OFF) {
3577 		/* Force clock and power re-program */
3578 		host->pwr = 0;
3579 		host->clock = 0;
3580 		mmc->ops->start_signal_voltage_switch(mmc, &mmc->ios);
3581 		mmc->ops->set_ios(mmc, &mmc->ios);
3582 
3583 		if ((host_flags & SDHCI_PV_ENABLED) &&
3584 		    !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) {
3585 			spin_lock_irqsave(&host->lock, flags);
3586 			sdhci_enable_preset_value(host, true);
3587 			spin_unlock_irqrestore(&host->lock, flags);
3588 		}
3589 
3590 		if ((mmc->caps2 & MMC_CAP2_HS400_ES) &&
3591 		    mmc->ops->hs400_enhanced_strobe)
3592 			mmc->ops->hs400_enhanced_strobe(mmc, &mmc->ios);
3593 	}
3594 
3595 	spin_lock_irqsave(&host->lock, flags);
3596 
3597 	host->runtime_suspended = false;
3598 
3599 	/* Enable SDIO IRQ */
3600 	if (sdio_irq_claimed(mmc))
3601 		sdhci_enable_sdio_irq_nolock(host, true);
3602 
3603 	/* Enable Card Detection */
3604 	sdhci_enable_card_detection(host);
3605 
3606 	spin_unlock_irqrestore(&host->lock, flags);
3607 
3608 	return 0;
3609 }
3610 EXPORT_SYMBOL_GPL(sdhci_runtime_resume_host);
3611 
3612 #endif /* CONFIG_PM */
3613 
3614 /*****************************************************************************\
3615  *                                                                           *
3616  * Command Queue Engine (CQE) helpers                                        *
3617  *                                                                           *
3618 \*****************************************************************************/
3619 
3620 void sdhci_cqe_enable(struct mmc_host *mmc)
3621 {
3622 	struct sdhci_host *host = mmc_priv(mmc);
3623 	unsigned long flags;
3624 	u8 ctrl;
3625 
3626 	spin_lock_irqsave(&host->lock, flags);
3627 
3628 	ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL);
3629 	ctrl &= ~SDHCI_CTRL_DMA_MASK;
3630 	/*
3631 	 * Host from V4.10 supports ADMA3 DMA type.
3632 	 * ADMA3 performs integrated descriptor which is more suitable
3633 	 * for cmd queuing to fetch both command and transfer descriptors.
3634 	 */
3635 	if (host->v4_mode && (host->caps1 & SDHCI_CAN_DO_ADMA3))
3636 		ctrl |= SDHCI_CTRL_ADMA3;
3637 	else if (host->flags & SDHCI_USE_64_BIT_DMA)
3638 		ctrl |= SDHCI_CTRL_ADMA64;
3639 	else
3640 		ctrl |= SDHCI_CTRL_ADMA32;
3641 	sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL);
3642 
3643 	sdhci_writew(host, SDHCI_MAKE_BLKSZ(host->sdma_boundary, 512),
3644 		     SDHCI_BLOCK_SIZE);
3645 
3646 	/* Set maximum timeout */
3647 	sdhci_set_timeout(host, NULL);
3648 
3649 	host->ier = host->cqe_ier;
3650 
3651 	sdhci_writel(host, host->ier, SDHCI_INT_ENABLE);
3652 	sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE);
3653 
3654 	host->cqe_on = true;
3655 
3656 	pr_debug("%s: sdhci: CQE on, IRQ mask %#x, IRQ status %#x\n",
3657 		 mmc_hostname(mmc), host->ier,
3658 		 sdhci_readl(host, SDHCI_INT_STATUS));
3659 
3660 	spin_unlock_irqrestore(&host->lock, flags);
3661 }
3662 EXPORT_SYMBOL_GPL(sdhci_cqe_enable);
3663 
3664 void sdhci_cqe_disable(struct mmc_host *mmc, bool recovery)
3665 {
3666 	struct sdhci_host *host = mmc_priv(mmc);
3667 	unsigned long flags;
3668 
3669 	spin_lock_irqsave(&host->lock, flags);
3670 
3671 	sdhci_set_default_irqs(host);
3672 
3673 	host->cqe_on = false;
3674 
3675 	if (recovery) {
3676 		sdhci_do_reset(host, SDHCI_RESET_CMD);
3677 		sdhci_do_reset(host, SDHCI_RESET_DATA);
3678 	}
3679 
3680 	pr_debug("%s: sdhci: CQE off, IRQ mask %#x, IRQ status %#x\n",
3681 		 mmc_hostname(mmc), host->ier,
3682 		 sdhci_readl(host, SDHCI_INT_STATUS));
3683 
3684 	spin_unlock_irqrestore(&host->lock, flags);
3685 }
3686 EXPORT_SYMBOL_GPL(sdhci_cqe_disable);
3687 
3688 bool sdhci_cqe_irq(struct sdhci_host *host, u32 intmask, int *cmd_error,
3689 		   int *data_error)
3690 {
3691 	u32 mask;
3692 
3693 	if (!host->cqe_on)
3694 		return false;
3695 
3696 	if (intmask & (SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC))
3697 		*cmd_error = -EILSEQ;
3698 	else if (intmask & SDHCI_INT_TIMEOUT)
3699 		*cmd_error = -ETIMEDOUT;
3700 	else
3701 		*cmd_error = 0;
3702 
3703 	if (intmask & (SDHCI_INT_DATA_END_BIT | SDHCI_INT_DATA_CRC))
3704 		*data_error = -EILSEQ;
3705 	else if (intmask & SDHCI_INT_DATA_TIMEOUT)
3706 		*data_error = -ETIMEDOUT;
3707 	else if (intmask & SDHCI_INT_ADMA_ERROR)
3708 		*data_error = -EIO;
3709 	else
3710 		*data_error = 0;
3711 
3712 	/* Clear selected interrupts. */
3713 	mask = intmask & host->cqe_ier;
3714 	sdhci_writel(host, mask, SDHCI_INT_STATUS);
3715 
3716 	if (intmask & SDHCI_INT_BUS_POWER)
3717 		pr_err("%s: Card is consuming too much power!\n",
3718 		       mmc_hostname(host->mmc));
3719 
3720 	intmask &= ~(host->cqe_ier | SDHCI_INT_ERROR);
3721 	if (intmask) {
3722 		sdhci_writel(host, intmask, SDHCI_INT_STATUS);
3723 		pr_err("%s: CQE: Unexpected interrupt 0x%08x.\n",
3724 		       mmc_hostname(host->mmc), intmask);
3725 		sdhci_dumpregs(host);
3726 	}
3727 
3728 	return true;
3729 }
3730 EXPORT_SYMBOL_GPL(sdhci_cqe_irq);
3731 
3732 /*****************************************************************************\
3733  *                                                                           *
3734  * Device allocation/registration                                            *
3735  *                                                                           *
3736 \*****************************************************************************/
3737 
3738 struct sdhci_host *sdhci_alloc_host(struct device *dev,
3739 	size_t priv_size)
3740 {
3741 	struct mmc_host *mmc;
3742 	struct sdhci_host *host;
3743 
3744 	WARN_ON(dev == NULL);
3745 
3746 	mmc = mmc_alloc_host(sizeof(struct sdhci_host) + priv_size, dev);
3747 	if (!mmc)
3748 		return ERR_PTR(-ENOMEM);
3749 
3750 	host = mmc_priv(mmc);
3751 	host->mmc = mmc;
3752 	host->mmc_host_ops = sdhci_ops;
3753 	mmc->ops = &host->mmc_host_ops;
3754 
3755 	host->flags = SDHCI_SIGNALING_330;
3756 
3757 	host->cqe_ier     = SDHCI_CQE_INT_MASK;
3758 	host->cqe_err_ier = SDHCI_CQE_INT_ERR_MASK;
3759 
3760 	host->tuning_delay = -1;
3761 	host->tuning_loop_count = MAX_TUNING_LOOP;
3762 
3763 	host->sdma_boundary = SDHCI_DEFAULT_BOUNDARY_ARG;
3764 
3765 	/*
3766 	 * The DMA table descriptor count is calculated as the maximum
3767 	 * number of segments times 2, to allow for an alignment
3768 	 * descriptor for each segment, plus 1 for a nop end descriptor.
3769 	 */
3770 	host->adma_table_cnt = SDHCI_MAX_SEGS * 2 + 1;
3771 
3772 	return host;
3773 }
3774 
3775 EXPORT_SYMBOL_GPL(sdhci_alloc_host);
3776 
3777 static int sdhci_set_dma_mask(struct sdhci_host *host)
3778 {
3779 	struct mmc_host *mmc = host->mmc;
3780 	struct device *dev = mmc_dev(mmc);
3781 	int ret = -EINVAL;
3782 
3783 	if (host->quirks2 & SDHCI_QUIRK2_BROKEN_64_BIT_DMA)
3784 		host->flags &= ~SDHCI_USE_64_BIT_DMA;
3785 
3786 	/* Try 64-bit mask if hardware is capable  of it */
3787 	if (host->flags & SDHCI_USE_64_BIT_DMA) {
3788 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
3789 		if (ret) {
3790 			pr_warn("%s: Failed to set 64-bit DMA mask.\n",
3791 				mmc_hostname(mmc));
3792 			host->flags &= ~SDHCI_USE_64_BIT_DMA;
3793 		}
3794 	}
3795 
3796 	/* 32-bit mask as default & fallback */
3797 	if (ret) {
3798 		ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
3799 		if (ret)
3800 			pr_warn("%s: Failed to set 32-bit DMA mask.\n",
3801 				mmc_hostname(mmc));
3802 	}
3803 
3804 	return ret;
3805 }
3806 
3807 void __sdhci_read_caps(struct sdhci_host *host, const u16 *ver,
3808 		       const u32 *caps, const u32 *caps1)
3809 {
3810 	u16 v;
3811 	u64 dt_caps_mask = 0;
3812 	u64 dt_caps = 0;
3813 
3814 	if (host->read_caps)
3815 		return;
3816 
3817 	host->read_caps = true;
3818 
3819 	if (debug_quirks)
3820 		host->quirks = debug_quirks;
3821 
3822 	if (debug_quirks2)
3823 		host->quirks2 = debug_quirks2;
3824 
3825 	sdhci_do_reset(host, SDHCI_RESET_ALL);
3826 
3827 	if (host->v4_mode)
3828 		sdhci_do_enable_v4_mode(host);
3829 
3830 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3831 			     "sdhci-caps-mask", &dt_caps_mask);
3832 	of_property_read_u64(mmc_dev(host->mmc)->of_node,
3833 			     "sdhci-caps", &dt_caps);
3834 
3835 	v = ver ? *ver : sdhci_readw(host, SDHCI_HOST_VERSION);
3836 	host->version = (v & SDHCI_SPEC_VER_MASK) >> SDHCI_SPEC_VER_SHIFT;
3837 
3838 	if (host->quirks & SDHCI_QUIRK_MISSING_CAPS)
3839 		return;
3840 
3841 	if (caps) {
3842 		host->caps = *caps;
3843 	} else {
3844 		host->caps = sdhci_readl(host, SDHCI_CAPABILITIES);
3845 		host->caps &= ~lower_32_bits(dt_caps_mask);
3846 		host->caps |= lower_32_bits(dt_caps);
3847 	}
3848 
3849 	if (host->version < SDHCI_SPEC_300)
3850 		return;
3851 
3852 	if (caps1) {
3853 		host->caps1 = *caps1;
3854 	} else {
3855 		host->caps1 = sdhci_readl(host, SDHCI_CAPABILITIES_1);
3856 		host->caps1 &= ~upper_32_bits(dt_caps_mask);
3857 		host->caps1 |= upper_32_bits(dt_caps);
3858 	}
3859 }
3860 EXPORT_SYMBOL_GPL(__sdhci_read_caps);
3861 
3862 static void sdhci_allocate_bounce_buffer(struct sdhci_host *host)
3863 {
3864 	struct mmc_host *mmc = host->mmc;
3865 	unsigned int max_blocks;
3866 	unsigned int bounce_size;
3867 	int ret;
3868 
3869 	/*
3870 	 * Cap the bounce buffer at 64KB. Using a bigger bounce buffer
3871 	 * has diminishing returns, this is probably because SD/MMC
3872 	 * cards are usually optimized to handle this size of requests.
3873 	 */
3874 	bounce_size = SZ_64K;
3875 	/*
3876 	 * Adjust downwards to maximum request size if this is less
3877 	 * than our segment size, else hammer down the maximum
3878 	 * request size to the maximum buffer size.
3879 	 */
3880 	if (mmc->max_req_size < bounce_size)
3881 		bounce_size = mmc->max_req_size;
3882 	max_blocks = bounce_size / 512;
3883 
3884 	/*
3885 	 * When we just support one segment, we can get significant
3886 	 * speedups by the help of a bounce buffer to group scattered
3887 	 * reads/writes together.
3888 	 */
3889 	host->bounce_buffer = devm_kmalloc(mmc->parent,
3890 					   bounce_size,
3891 					   GFP_KERNEL);
3892 	if (!host->bounce_buffer) {
3893 		pr_err("%s: failed to allocate %u bytes for bounce buffer, falling back to single segments\n",
3894 		       mmc_hostname(mmc),
3895 		       bounce_size);
3896 		/*
3897 		 * Exiting with zero here makes sure we proceed with
3898 		 * mmc->max_segs == 1.
3899 		 */
3900 		return;
3901 	}
3902 
3903 	host->bounce_addr = dma_map_single(mmc->parent,
3904 					   host->bounce_buffer,
3905 					   bounce_size,
3906 					   DMA_BIDIRECTIONAL);
3907 	ret = dma_mapping_error(mmc->parent, host->bounce_addr);
3908 	if (ret)
3909 		/* Again fall back to max_segs == 1 */
3910 		return;
3911 	host->bounce_buffer_size = bounce_size;
3912 
3913 	/* Lie about this since we're bouncing */
3914 	mmc->max_segs = max_blocks;
3915 	mmc->max_seg_size = bounce_size;
3916 	mmc->max_req_size = bounce_size;
3917 
3918 	pr_info("%s bounce up to %u segments into one, max segment size %u bytes\n",
3919 		mmc_hostname(mmc), max_blocks, bounce_size);
3920 }
3921 
3922 static inline bool sdhci_can_64bit_dma(struct sdhci_host *host)
3923 {
3924 	/*
3925 	 * According to SD Host Controller spec v4.10, bit[27] added from
3926 	 * version 4.10 in Capabilities Register is used as 64-bit System
3927 	 * Address support for V4 mode.
3928 	 */
3929 	if (host->version >= SDHCI_SPEC_410 && host->v4_mode)
3930 		return host->caps & SDHCI_CAN_64BIT_V4;
3931 
3932 	return host->caps & SDHCI_CAN_64BIT;
3933 }
3934 
3935 int sdhci_setup_host(struct sdhci_host *host)
3936 {
3937 	struct mmc_host *mmc;
3938 	u32 max_current_caps;
3939 	unsigned int ocr_avail;
3940 	unsigned int override_timeout_clk;
3941 	u32 max_clk;
3942 	int ret;
3943 
3944 	WARN_ON(host == NULL);
3945 	if (host == NULL)
3946 		return -EINVAL;
3947 
3948 	mmc = host->mmc;
3949 
3950 	/*
3951 	 * If there are external regulators, get them. Note this must be done
3952 	 * early before resetting the host and reading the capabilities so that
3953 	 * the host can take the appropriate action if regulators are not
3954 	 * available.
3955 	 */
3956 	ret = mmc_regulator_get_supply(mmc);
3957 	if (ret)
3958 		return ret;
3959 
3960 	DBG("Version:   0x%08x | Present:  0x%08x\n",
3961 	    sdhci_readw(host, SDHCI_HOST_VERSION),
3962 	    sdhci_readl(host, SDHCI_PRESENT_STATE));
3963 	DBG("Caps:      0x%08x | Caps_1:   0x%08x\n",
3964 	    sdhci_readl(host, SDHCI_CAPABILITIES),
3965 	    sdhci_readl(host, SDHCI_CAPABILITIES_1));
3966 
3967 	sdhci_read_caps(host);
3968 
3969 	override_timeout_clk = host->timeout_clk;
3970 
3971 	if (host->version > SDHCI_SPEC_420) {
3972 		pr_err("%s: Unknown controller version (%d). You may experience problems.\n",
3973 		       mmc_hostname(mmc), host->version);
3974 	}
3975 
3976 	if (host->quirks & SDHCI_QUIRK_BROKEN_CQE)
3977 		mmc->caps2 &= ~MMC_CAP2_CQE;
3978 
3979 	if (host->quirks & SDHCI_QUIRK_FORCE_DMA)
3980 		host->flags |= SDHCI_USE_SDMA;
3981 	else if (!(host->caps & SDHCI_CAN_DO_SDMA))
3982 		DBG("Controller doesn't have SDMA capability\n");
3983 	else
3984 		host->flags |= SDHCI_USE_SDMA;
3985 
3986 	if ((host->quirks & SDHCI_QUIRK_BROKEN_DMA) &&
3987 		(host->flags & SDHCI_USE_SDMA)) {
3988 		DBG("Disabling DMA as it is marked broken\n");
3989 		host->flags &= ~SDHCI_USE_SDMA;
3990 	}
3991 
3992 	if ((host->version >= SDHCI_SPEC_200) &&
3993 		(host->caps & SDHCI_CAN_DO_ADMA2))
3994 		host->flags |= SDHCI_USE_ADMA;
3995 
3996 	if ((host->quirks & SDHCI_QUIRK_BROKEN_ADMA) &&
3997 		(host->flags & SDHCI_USE_ADMA)) {
3998 		DBG("Disabling ADMA as it is marked broken\n");
3999 		host->flags &= ~SDHCI_USE_ADMA;
4000 	}
4001 
4002 	if (sdhci_can_64bit_dma(host))
4003 		host->flags |= SDHCI_USE_64_BIT_DMA;
4004 
4005 	if (host->use_external_dma) {
4006 		ret = sdhci_external_dma_init(host);
4007 		if (ret == -EPROBE_DEFER)
4008 			goto unreg;
4009 		/*
4010 		 * Fall back to use the DMA/PIO integrated in standard SDHCI
4011 		 * instead of external DMA devices.
4012 		 */
4013 		else if (ret)
4014 			sdhci_switch_external_dma(host, false);
4015 		/* Disable internal DMA sources */
4016 		else
4017 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4018 	}
4019 
4020 	if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) {
4021 		if (host->ops->set_dma_mask)
4022 			ret = host->ops->set_dma_mask(host);
4023 		else
4024 			ret = sdhci_set_dma_mask(host);
4025 
4026 		if (!ret && host->ops->enable_dma)
4027 			ret = host->ops->enable_dma(host);
4028 
4029 		if (ret) {
4030 			pr_warn("%s: No suitable DMA available - falling back to PIO\n",
4031 				mmc_hostname(mmc));
4032 			host->flags &= ~(SDHCI_USE_SDMA | SDHCI_USE_ADMA);
4033 
4034 			ret = 0;
4035 		}
4036 	}
4037 
4038 	/* SDMA does not support 64-bit DMA if v4 mode not set */
4039 	if ((host->flags & SDHCI_USE_64_BIT_DMA) && !host->v4_mode)
4040 		host->flags &= ~SDHCI_USE_SDMA;
4041 
4042 	if (host->flags & SDHCI_USE_ADMA) {
4043 		dma_addr_t dma;
4044 		void *buf;
4045 
4046 		if (!(host->flags & SDHCI_USE_64_BIT_DMA))
4047 			host->alloc_desc_sz = SDHCI_ADMA2_32_DESC_SZ;
4048 		else if (!host->alloc_desc_sz)
4049 			host->alloc_desc_sz = SDHCI_ADMA2_64_DESC_SZ(host);
4050 
4051 		host->desc_sz = host->alloc_desc_sz;
4052 		host->adma_table_sz = host->adma_table_cnt * host->desc_sz;
4053 
4054 		host->align_buffer_sz = SDHCI_MAX_SEGS * SDHCI_ADMA2_ALIGN;
4055 		/*
4056 		 * Use zalloc to zero the reserved high 32-bits of 128-bit
4057 		 * descriptors so that they never need to be written.
4058 		 */
4059 		buf = dma_alloc_coherent(mmc_dev(mmc),
4060 					 host->align_buffer_sz + host->adma_table_sz,
4061 					 &dma, GFP_KERNEL);
4062 		if (!buf) {
4063 			pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
4064 				mmc_hostname(mmc));
4065 			host->flags &= ~SDHCI_USE_ADMA;
4066 		} else if ((dma + host->align_buffer_sz) &
4067 			   (SDHCI_ADMA2_DESC_ALIGN - 1)) {
4068 			pr_warn("%s: unable to allocate aligned ADMA descriptor\n",
4069 				mmc_hostname(mmc));
4070 			host->flags &= ~SDHCI_USE_ADMA;
4071 			dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4072 					  host->adma_table_sz, buf, dma);
4073 		} else {
4074 			host->align_buffer = buf;
4075 			host->align_addr = dma;
4076 
4077 			host->adma_table = buf + host->align_buffer_sz;
4078 			host->adma_addr = dma + host->align_buffer_sz;
4079 		}
4080 	}
4081 
4082 	/*
4083 	 * If we use DMA, then it's up to the caller to set the DMA
4084 	 * mask, but PIO does not need the hw shim so we set a new
4085 	 * mask here in that case.
4086 	 */
4087 	if (!(host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA))) {
4088 		host->dma_mask = DMA_BIT_MASK(64);
4089 		mmc_dev(mmc)->dma_mask = &host->dma_mask;
4090 	}
4091 
4092 	if (host->version >= SDHCI_SPEC_300)
4093 		host->max_clk = (host->caps & SDHCI_CLOCK_V3_BASE_MASK)
4094 			>> SDHCI_CLOCK_BASE_SHIFT;
4095 	else
4096 		host->max_clk = (host->caps & SDHCI_CLOCK_BASE_MASK)
4097 			>> SDHCI_CLOCK_BASE_SHIFT;
4098 
4099 	host->max_clk *= 1000000;
4100 	if (host->max_clk == 0 || host->quirks &
4101 			SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN) {
4102 		if (!host->ops->get_max_clock) {
4103 			pr_err("%s: Hardware doesn't specify base clock frequency.\n",
4104 			       mmc_hostname(mmc));
4105 			ret = -ENODEV;
4106 			goto undma;
4107 		}
4108 		host->max_clk = host->ops->get_max_clock(host);
4109 	}
4110 
4111 	/*
4112 	 * In case of Host Controller v3.00, find out whether clock
4113 	 * multiplier is supported.
4114 	 */
4115 	host->clk_mul = (host->caps1 & SDHCI_CLOCK_MUL_MASK) >>
4116 			SDHCI_CLOCK_MUL_SHIFT;
4117 
4118 	/*
4119 	 * In case the value in Clock Multiplier is 0, then programmable
4120 	 * clock mode is not supported, otherwise the actual clock
4121 	 * multiplier is one more than the value of Clock Multiplier
4122 	 * in the Capabilities Register.
4123 	 */
4124 	if (host->clk_mul)
4125 		host->clk_mul += 1;
4126 
4127 	/*
4128 	 * Set host parameters.
4129 	 */
4130 	max_clk = host->max_clk;
4131 
4132 	if (host->ops->get_min_clock)
4133 		mmc->f_min = host->ops->get_min_clock(host);
4134 	else if (host->version >= SDHCI_SPEC_300) {
4135 		if (host->clk_mul)
4136 			max_clk = host->max_clk * host->clk_mul;
4137 		/*
4138 		 * Divided Clock Mode minimum clock rate is always less than
4139 		 * Programmable Clock Mode minimum clock rate.
4140 		 */
4141 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
4142 	} else
4143 		mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
4144 
4145 	if (!mmc->f_max || mmc->f_max > max_clk)
4146 		mmc->f_max = max_clk;
4147 
4148 	if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
4149 		host->timeout_clk = (host->caps & SDHCI_TIMEOUT_CLK_MASK) >>
4150 					SDHCI_TIMEOUT_CLK_SHIFT;
4151 
4152 		if (host->caps & SDHCI_TIMEOUT_CLK_UNIT)
4153 			host->timeout_clk *= 1000;
4154 
4155 		if (host->timeout_clk == 0) {
4156 			if (!host->ops->get_timeout_clock) {
4157 				pr_err("%s: Hardware doesn't specify timeout clock frequency.\n",
4158 					mmc_hostname(mmc));
4159 				ret = -ENODEV;
4160 				goto undma;
4161 			}
4162 
4163 			host->timeout_clk =
4164 				DIV_ROUND_UP(host->ops->get_timeout_clock(host),
4165 					     1000);
4166 		}
4167 
4168 		if (override_timeout_clk)
4169 			host->timeout_clk = override_timeout_clk;
4170 
4171 		mmc->max_busy_timeout = host->ops->get_max_timeout_count ?
4172 			host->ops->get_max_timeout_count(host) : 1 << 27;
4173 		mmc->max_busy_timeout /= host->timeout_clk;
4174 	}
4175 
4176 	if (host->quirks2 & SDHCI_QUIRK2_DISABLE_HW_TIMEOUT &&
4177 	    !host->ops->get_max_timeout_count)
4178 		mmc->max_busy_timeout = 0;
4179 
4180 	mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23;
4181 	mmc->caps2 |= MMC_CAP2_SDIO_IRQ_NOTHREAD;
4182 
4183 	if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12)
4184 		host->flags |= SDHCI_AUTO_CMD12;
4185 
4186 	/*
4187 	 * For v3 mode, Auto-CMD23 stuff only works in ADMA or PIO.
4188 	 * For v4 mode, SDMA may use Auto-CMD23 as well.
4189 	 */
4190 	if ((host->version >= SDHCI_SPEC_300) &&
4191 	    ((host->flags & SDHCI_USE_ADMA) ||
4192 	     !(host->flags & SDHCI_USE_SDMA) || host->v4_mode) &&
4193 	     !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) {
4194 		host->flags |= SDHCI_AUTO_CMD23;
4195 		DBG("Auto-CMD23 available\n");
4196 	} else {
4197 		DBG("Auto-CMD23 unavailable\n");
4198 	}
4199 
4200 	/*
4201 	 * A controller may support 8-bit width, but the board itself
4202 	 * might not have the pins brought out.  Boards that support
4203 	 * 8-bit width must set "mmc->caps |= MMC_CAP_8_BIT_DATA;" in
4204 	 * their platform code before calling sdhci_add_host(), and we
4205 	 * won't assume 8-bit width for hosts without that CAP.
4206 	 */
4207 	if (!(host->quirks & SDHCI_QUIRK_FORCE_1_BIT_DATA))
4208 		mmc->caps |= MMC_CAP_4_BIT_DATA;
4209 
4210 	if (host->quirks2 & SDHCI_QUIRK2_HOST_NO_CMD23)
4211 		mmc->caps &= ~MMC_CAP_CMD23;
4212 
4213 	if (host->caps & SDHCI_CAN_DO_HISPD)
4214 		mmc->caps |= MMC_CAP_SD_HIGHSPEED | MMC_CAP_MMC_HIGHSPEED;
4215 
4216 	if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) &&
4217 	    mmc_card_is_removable(mmc) &&
4218 	    mmc_gpio_get_cd(host->mmc) < 0)
4219 		mmc->caps |= MMC_CAP_NEEDS_POLL;
4220 
4221 	if (!IS_ERR(mmc->supply.vqmmc)) {
4222 		ret = regulator_enable(mmc->supply.vqmmc);
4223 
4224 		/* If vqmmc provides no 1.8V signalling, then there's no UHS */
4225 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 1700000,
4226 						    1950000))
4227 			host->caps1 &= ~(SDHCI_SUPPORT_SDR104 |
4228 					 SDHCI_SUPPORT_SDR50 |
4229 					 SDHCI_SUPPORT_DDR50);
4230 
4231 		/* In eMMC case vqmmc might be a fixed 1.8V regulator */
4232 		if (!regulator_is_supported_voltage(mmc->supply.vqmmc, 2700000,
4233 						    3600000))
4234 			host->flags &= ~SDHCI_SIGNALING_330;
4235 
4236 		if (ret) {
4237 			pr_warn("%s: Failed to enable vqmmc regulator: %d\n",
4238 				mmc_hostname(mmc), ret);
4239 			mmc->supply.vqmmc = ERR_PTR(-EINVAL);
4240 		}
4241 	}
4242 
4243 	if (host->quirks2 & SDHCI_QUIRK2_NO_1_8_V) {
4244 		host->caps1 &= ~(SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4245 				 SDHCI_SUPPORT_DDR50);
4246 		/*
4247 		 * The SDHCI controller in a SoC might support HS200/HS400
4248 		 * (indicated using mmc-hs200-1_8v/mmc-hs400-1_8v dt property),
4249 		 * but if the board is modeled such that the IO lines are not
4250 		 * connected to 1.8v then HS200/HS400 cannot be supported.
4251 		 * Disable HS200/HS400 if the board does not have 1.8v connected
4252 		 * to the IO lines. (Applicable for other modes in 1.8v)
4253 		 */
4254 		mmc->caps2 &= ~(MMC_CAP2_HSX00_1_8V | MMC_CAP2_HS400_ES);
4255 		mmc->caps &= ~(MMC_CAP_1_8V_DDR | MMC_CAP_UHS);
4256 	}
4257 
4258 	/* Any UHS-I mode in caps implies SDR12 and SDR25 support. */
4259 	if (host->caps1 & (SDHCI_SUPPORT_SDR104 | SDHCI_SUPPORT_SDR50 |
4260 			   SDHCI_SUPPORT_DDR50))
4261 		mmc->caps |= MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25;
4262 
4263 	/* SDR104 supports also implies SDR50 support */
4264 	if (host->caps1 & SDHCI_SUPPORT_SDR104) {
4265 		mmc->caps |= MMC_CAP_UHS_SDR104 | MMC_CAP_UHS_SDR50;
4266 		/* SD3.0: SDR104 is supported so (for eMMC) the caps2
4267 		 * field can be promoted to support HS200.
4268 		 */
4269 		if (!(host->quirks2 & SDHCI_QUIRK2_BROKEN_HS200))
4270 			mmc->caps2 |= MMC_CAP2_HS200;
4271 	} else if (host->caps1 & SDHCI_SUPPORT_SDR50) {
4272 		mmc->caps |= MMC_CAP_UHS_SDR50;
4273 	}
4274 
4275 	if (host->quirks2 & SDHCI_QUIRK2_CAPS_BIT63_FOR_HS400 &&
4276 	    (host->caps1 & SDHCI_SUPPORT_HS400))
4277 		mmc->caps2 |= MMC_CAP2_HS400;
4278 
4279 	if ((mmc->caps2 & MMC_CAP2_HSX00_1_2V) &&
4280 	    (IS_ERR(mmc->supply.vqmmc) ||
4281 	     !regulator_is_supported_voltage(mmc->supply.vqmmc, 1100000,
4282 					     1300000)))
4283 		mmc->caps2 &= ~MMC_CAP2_HSX00_1_2V;
4284 
4285 	if ((host->caps1 & SDHCI_SUPPORT_DDR50) &&
4286 	    !(host->quirks2 & SDHCI_QUIRK2_BROKEN_DDR50))
4287 		mmc->caps |= MMC_CAP_UHS_DDR50;
4288 
4289 	/* Does the host need tuning for SDR50? */
4290 	if (host->caps1 & SDHCI_USE_SDR50_TUNING)
4291 		host->flags |= SDHCI_SDR50_NEEDS_TUNING;
4292 
4293 	/* Driver Type(s) (A, C, D) supported by the host */
4294 	if (host->caps1 & SDHCI_DRIVER_TYPE_A)
4295 		mmc->caps |= MMC_CAP_DRIVER_TYPE_A;
4296 	if (host->caps1 & SDHCI_DRIVER_TYPE_C)
4297 		mmc->caps |= MMC_CAP_DRIVER_TYPE_C;
4298 	if (host->caps1 & SDHCI_DRIVER_TYPE_D)
4299 		mmc->caps |= MMC_CAP_DRIVER_TYPE_D;
4300 
4301 	/* Initial value for re-tuning timer count */
4302 	host->tuning_count = (host->caps1 & SDHCI_RETUNING_TIMER_COUNT_MASK) >>
4303 			     SDHCI_RETUNING_TIMER_COUNT_SHIFT;
4304 
4305 	/*
4306 	 * In case Re-tuning Timer is not disabled, the actual value of
4307 	 * re-tuning timer will be 2 ^ (n - 1).
4308 	 */
4309 	if (host->tuning_count)
4310 		host->tuning_count = 1 << (host->tuning_count - 1);
4311 
4312 	/* Re-tuning mode supported by the Host Controller */
4313 	host->tuning_mode = (host->caps1 & SDHCI_RETUNING_MODE_MASK) >>
4314 			     SDHCI_RETUNING_MODE_SHIFT;
4315 
4316 	ocr_avail = 0;
4317 
4318 	/*
4319 	 * According to SD Host Controller spec v3.00, if the Host System
4320 	 * can afford more than 150mA, Host Driver should set XPC to 1. Also
4321 	 * the value is meaningful only if Voltage Support in the Capabilities
4322 	 * register is set. The actual current value is 4 times the register
4323 	 * value.
4324 	 */
4325 	max_current_caps = sdhci_readl(host, SDHCI_MAX_CURRENT);
4326 	if (!max_current_caps && !IS_ERR(mmc->supply.vmmc)) {
4327 		int curr = regulator_get_current_limit(mmc->supply.vmmc);
4328 		if (curr > 0) {
4329 
4330 			/* convert to SDHCI_MAX_CURRENT format */
4331 			curr = curr/1000;  /* convert to mA */
4332 			curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER;
4333 
4334 			curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT);
4335 			max_current_caps =
4336 				(curr << SDHCI_MAX_CURRENT_330_SHIFT) |
4337 				(curr << SDHCI_MAX_CURRENT_300_SHIFT) |
4338 				(curr << SDHCI_MAX_CURRENT_180_SHIFT);
4339 		}
4340 	}
4341 
4342 	if (host->caps & SDHCI_CAN_VDD_330) {
4343 		ocr_avail |= MMC_VDD_32_33 | MMC_VDD_33_34;
4344 
4345 		mmc->max_current_330 = ((max_current_caps &
4346 				   SDHCI_MAX_CURRENT_330_MASK) >>
4347 				   SDHCI_MAX_CURRENT_330_SHIFT) *
4348 				   SDHCI_MAX_CURRENT_MULTIPLIER;
4349 	}
4350 	if (host->caps & SDHCI_CAN_VDD_300) {
4351 		ocr_avail |= MMC_VDD_29_30 | MMC_VDD_30_31;
4352 
4353 		mmc->max_current_300 = ((max_current_caps &
4354 				   SDHCI_MAX_CURRENT_300_MASK) >>
4355 				   SDHCI_MAX_CURRENT_300_SHIFT) *
4356 				   SDHCI_MAX_CURRENT_MULTIPLIER;
4357 	}
4358 	if (host->caps & SDHCI_CAN_VDD_180) {
4359 		ocr_avail |= MMC_VDD_165_195;
4360 
4361 		mmc->max_current_180 = ((max_current_caps &
4362 				   SDHCI_MAX_CURRENT_180_MASK) >>
4363 				   SDHCI_MAX_CURRENT_180_SHIFT) *
4364 				   SDHCI_MAX_CURRENT_MULTIPLIER;
4365 	}
4366 
4367 	/* If OCR set by host, use it instead. */
4368 	if (host->ocr_mask)
4369 		ocr_avail = host->ocr_mask;
4370 
4371 	/* If OCR set by external regulators, give it highest prio. */
4372 	if (mmc->ocr_avail)
4373 		ocr_avail = mmc->ocr_avail;
4374 
4375 	mmc->ocr_avail = ocr_avail;
4376 	mmc->ocr_avail_sdio = ocr_avail;
4377 	if (host->ocr_avail_sdio)
4378 		mmc->ocr_avail_sdio &= host->ocr_avail_sdio;
4379 	mmc->ocr_avail_sd = ocr_avail;
4380 	if (host->ocr_avail_sd)
4381 		mmc->ocr_avail_sd &= host->ocr_avail_sd;
4382 	else /* normal SD controllers don't support 1.8V */
4383 		mmc->ocr_avail_sd &= ~MMC_VDD_165_195;
4384 	mmc->ocr_avail_mmc = ocr_avail;
4385 	if (host->ocr_avail_mmc)
4386 		mmc->ocr_avail_mmc &= host->ocr_avail_mmc;
4387 
4388 	if (mmc->ocr_avail == 0) {
4389 		pr_err("%s: Hardware doesn't report any support voltages.\n",
4390 		       mmc_hostname(mmc));
4391 		ret = -ENODEV;
4392 		goto unreg;
4393 	}
4394 
4395 	if ((mmc->caps & (MMC_CAP_UHS_SDR12 | MMC_CAP_UHS_SDR25 |
4396 			  MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 |
4397 			  MMC_CAP_UHS_DDR50 | MMC_CAP_1_8V_DDR)) ||
4398 	    (mmc->caps2 & (MMC_CAP2_HS200_1_8V_SDR | MMC_CAP2_HS400_1_8V)))
4399 		host->flags |= SDHCI_SIGNALING_180;
4400 
4401 	if (mmc->caps2 & MMC_CAP2_HSX00_1_2V)
4402 		host->flags |= SDHCI_SIGNALING_120;
4403 
4404 	spin_lock_init(&host->lock);
4405 
4406 	/*
4407 	 * Maximum number of sectors in one transfer. Limited by SDMA boundary
4408 	 * size (512KiB). Note some tuning modes impose a 4MiB limit, but this
4409 	 * is less anyway.
4410 	 */
4411 	mmc->max_req_size = 524288;
4412 
4413 	/*
4414 	 * Maximum number of segments. Depends on if the hardware
4415 	 * can do scatter/gather or not.
4416 	 */
4417 	if (host->flags & SDHCI_USE_ADMA) {
4418 		mmc->max_segs = SDHCI_MAX_SEGS;
4419 	} else if (host->flags & SDHCI_USE_SDMA) {
4420 		mmc->max_segs = 1;
4421 		if (swiotlb_max_segment()) {
4422 			unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
4423 						IO_TLB_SEGSIZE;
4424 			mmc->max_req_size = min(mmc->max_req_size,
4425 						max_req_size);
4426 		}
4427 	} else { /* PIO */
4428 		mmc->max_segs = SDHCI_MAX_SEGS;
4429 	}
4430 
4431 	/*
4432 	 * Maximum segment size. Could be one segment with the maximum number
4433 	 * of bytes. When doing hardware scatter/gather, each entry cannot
4434 	 * be larger than 64 KiB though.
4435 	 */
4436 	if (host->flags & SDHCI_USE_ADMA) {
4437 		if (host->quirks & SDHCI_QUIRK_BROKEN_ADMA_ZEROLEN_DESC)
4438 			mmc->max_seg_size = 65535;
4439 		else
4440 			mmc->max_seg_size = 65536;
4441 	} else {
4442 		mmc->max_seg_size = mmc->max_req_size;
4443 	}
4444 
4445 	/*
4446 	 * Maximum block size. This varies from controller to controller and
4447 	 * is specified in the capabilities register.
4448 	 */
4449 	if (host->quirks & SDHCI_QUIRK_FORCE_BLK_SZ_2048) {
4450 		mmc->max_blk_size = 2;
4451 	} else {
4452 		mmc->max_blk_size = (host->caps & SDHCI_MAX_BLOCK_MASK) >>
4453 				SDHCI_MAX_BLOCK_SHIFT;
4454 		if (mmc->max_blk_size >= 3) {
4455 			pr_warn("%s: Invalid maximum block size, assuming 512 bytes\n",
4456 				mmc_hostname(mmc));
4457 			mmc->max_blk_size = 0;
4458 		}
4459 	}
4460 
4461 	mmc->max_blk_size = 512 << mmc->max_blk_size;
4462 
4463 	/*
4464 	 * Maximum block count.
4465 	 */
4466 	mmc->max_blk_count = (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535;
4467 
4468 	if (mmc->max_segs == 1)
4469 		/* This may alter mmc->*_blk_* parameters */
4470 		sdhci_allocate_bounce_buffer(host);
4471 
4472 	return 0;
4473 
4474 unreg:
4475 	if (!IS_ERR(mmc->supply.vqmmc))
4476 		regulator_disable(mmc->supply.vqmmc);
4477 undma:
4478 	if (host->align_buffer)
4479 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4480 				  host->adma_table_sz, host->align_buffer,
4481 				  host->align_addr);
4482 	host->adma_table = NULL;
4483 	host->align_buffer = NULL;
4484 
4485 	return ret;
4486 }
4487 EXPORT_SYMBOL_GPL(sdhci_setup_host);
4488 
4489 void sdhci_cleanup_host(struct sdhci_host *host)
4490 {
4491 	struct mmc_host *mmc = host->mmc;
4492 
4493 	if (!IS_ERR(mmc->supply.vqmmc))
4494 		regulator_disable(mmc->supply.vqmmc);
4495 
4496 	if (host->align_buffer)
4497 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4498 				  host->adma_table_sz, host->align_buffer,
4499 				  host->align_addr);
4500 
4501 	if (host->use_external_dma)
4502 		sdhci_external_dma_release(host);
4503 
4504 	host->adma_table = NULL;
4505 	host->align_buffer = NULL;
4506 }
4507 EXPORT_SYMBOL_GPL(sdhci_cleanup_host);
4508 
4509 int __sdhci_add_host(struct sdhci_host *host)
4510 {
4511 	unsigned int flags = WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI;
4512 	struct mmc_host *mmc = host->mmc;
4513 	int ret;
4514 
4515 	host->complete_wq = alloc_workqueue("sdhci", flags, 0);
4516 	if (!host->complete_wq)
4517 		return -ENOMEM;
4518 
4519 	INIT_WORK(&host->complete_work, sdhci_complete_work);
4520 
4521 	timer_setup(&host->timer, sdhci_timeout_timer, 0);
4522 	timer_setup(&host->data_timer, sdhci_timeout_data_timer, 0);
4523 
4524 	init_waitqueue_head(&host->buf_ready_int);
4525 
4526 	sdhci_init(host, 0);
4527 
4528 	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
4529 				   IRQF_SHARED,	mmc_hostname(mmc), host);
4530 	if (ret) {
4531 		pr_err("%s: Failed to request IRQ %d: %d\n",
4532 		       mmc_hostname(mmc), host->irq, ret);
4533 		goto unwq;
4534 	}
4535 
4536 	ret = sdhci_led_register(host);
4537 	if (ret) {
4538 		pr_err("%s: Failed to register LED device: %d\n",
4539 		       mmc_hostname(mmc), ret);
4540 		goto unirq;
4541 	}
4542 
4543 	ret = mmc_add_host(mmc);
4544 	if (ret)
4545 		goto unled;
4546 
4547 	pr_info("%s: SDHCI controller on %s [%s] using %s\n",
4548 		mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)),
4549 		host->use_external_dma ? "External DMA" :
4550 		(host->flags & SDHCI_USE_ADMA) ?
4551 		(host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" :
4552 		(host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO");
4553 
4554 	sdhci_enable_card_detection(host);
4555 
4556 	return 0;
4557 
4558 unled:
4559 	sdhci_led_unregister(host);
4560 unirq:
4561 	sdhci_do_reset(host, SDHCI_RESET_ALL);
4562 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4563 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4564 	free_irq(host->irq, host);
4565 unwq:
4566 	destroy_workqueue(host->complete_wq);
4567 
4568 	return ret;
4569 }
4570 EXPORT_SYMBOL_GPL(__sdhci_add_host);
4571 
4572 int sdhci_add_host(struct sdhci_host *host)
4573 {
4574 	int ret;
4575 
4576 	ret = sdhci_setup_host(host);
4577 	if (ret)
4578 		return ret;
4579 
4580 	ret = __sdhci_add_host(host);
4581 	if (ret)
4582 		goto cleanup;
4583 
4584 	return 0;
4585 
4586 cleanup:
4587 	sdhci_cleanup_host(host);
4588 
4589 	return ret;
4590 }
4591 EXPORT_SYMBOL_GPL(sdhci_add_host);
4592 
4593 void sdhci_remove_host(struct sdhci_host *host, int dead)
4594 {
4595 	struct mmc_host *mmc = host->mmc;
4596 	unsigned long flags;
4597 
4598 	if (dead) {
4599 		spin_lock_irqsave(&host->lock, flags);
4600 
4601 		host->flags |= SDHCI_DEVICE_DEAD;
4602 
4603 		if (sdhci_has_requests(host)) {
4604 			pr_err("%s: Controller removed during "
4605 				" transfer!\n", mmc_hostname(mmc));
4606 			sdhci_error_out_mrqs(host, -ENOMEDIUM);
4607 		}
4608 
4609 		spin_unlock_irqrestore(&host->lock, flags);
4610 	}
4611 
4612 	sdhci_disable_card_detection(host);
4613 
4614 	mmc_remove_host(mmc);
4615 
4616 	sdhci_led_unregister(host);
4617 
4618 	if (!dead)
4619 		sdhci_do_reset(host, SDHCI_RESET_ALL);
4620 
4621 	sdhci_writel(host, 0, SDHCI_INT_ENABLE);
4622 	sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE);
4623 	free_irq(host->irq, host);
4624 
4625 	del_timer_sync(&host->timer);
4626 	del_timer_sync(&host->data_timer);
4627 
4628 	destroy_workqueue(host->complete_wq);
4629 
4630 	if (!IS_ERR(mmc->supply.vqmmc))
4631 		regulator_disable(mmc->supply.vqmmc);
4632 
4633 	if (host->align_buffer)
4634 		dma_free_coherent(mmc_dev(mmc), host->align_buffer_sz +
4635 				  host->adma_table_sz, host->align_buffer,
4636 				  host->align_addr);
4637 
4638 	if (host->use_external_dma)
4639 		sdhci_external_dma_release(host);
4640 
4641 	host->adma_table = NULL;
4642 	host->align_buffer = NULL;
4643 }
4644 
4645 EXPORT_SYMBOL_GPL(sdhci_remove_host);
4646 
4647 void sdhci_free_host(struct sdhci_host *host)
4648 {
4649 	mmc_free_host(host->mmc);
4650 }
4651 
4652 EXPORT_SYMBOL_GPL(sdhci_free_host);
4653 
4654 /*****************************************************************************\
4655  *                                                                           *
4656  * Driver init/exit                                                          *
4657  *                                                                           *
4658 \*****************************************************************************/
4659 
4660 static int __init sdhci_drv_init(void)
4661 {
4662 	pr_info(DRIVER_NAME
4663 		": Secure Digital Host Controller Interface driver\n");
4664 	pr_info(DRIVER_NAME ": Copyright(c) Pierre Ossman\n");
4665 
4666 	return 0;
4667 }
4668 
4669 static void __exit sdhci_drv_exit(void)
4670 {
4671 }
4672 
4673 module_init(sdhci_drv_init);
4674 module_exit(sdhci_drv_exit);
4675 
4676 module_param(debug_quirks, uint, 0444);
4677 module_param(debug_quirks2, uint, 0444);
4678 
4679 MODULE_AUTHOR("Pierre Ossman <pierre@ossman.eu>");
4680 MODULE_DESCRIPTION("Secure Digital Host Controller Interface core driver");
4681 MODULE_LICENSE("GPL");
4682 
4683 MODULE_PARM_DESC(debug_quirks, "Force certain quirks.");
4684 MODULE_PARM_DESC(debug_quirks2, "Force certain other quirks.");
4685