xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision 1323ec57)
1 /*-
2  * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/conf.h>
42 #include <sys/bus.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/module.h>
46 #include <sys/malloc.h>
47 #include <sys/mutex.h>
48 #include <sys/proc.h>
49 #include <sys/rman.h>
50 #include <sys/queue.h>
51 #include <sys/taskqueue.h>
52 
53 #include <dev/mmc/bridge.h>
54 #include <dev/mmc/mmcbrvar.h>
55 #include <dev/mmc/mmc_fdt_helpers.h>
56 
57 #include <dev/fdt/fdt_common.h>
58 #include <dev/ofw/openfirm.h>
59 #include <dev/ofw/ofw_bus.h>
60 #include <dev/ofw/ofw_bus_subr.h>
61 
62 #include <machine/bus.h>
63 #include <machine/cpu.h>
64 #include <machine/intr.h>
65 
66 #include <dev/extres/clk/clk.h>
67 
68 #include <dev/mmc/host/dwmmc_reg.h>
69 #include <dev/mmc/host/dwmmc_var.h>
70 
71 #include "opt_mmccam.h"
72 
73 #ifdef MMCCAM
74 #include <cam/cam.h>
75 #include <cam/cam_ccb.h>
76 #include <cam/cam_debug.h>
77 #include <cam/cam_sim.h>
78 #include <cam/cam_xpt_sim.h>
79 
80 #include "mmc_sim_if.h"
81 #endif
82 
83 #include "mmcbr_if.h"
84 
85 #ifdef DEBUG
86 #define dprintf(fmt, args...) printf(fmt, ##args)
87 #else
88 #define dprintf(x, arg...)
89 #endif
90 
91 #define	READ4(_sc, _reg) \
92 	bus_read_4((_sc)->res[0], _reg)
93 #define	WRITE4(_sc, _reg, _val) \
94 	bus_write_4((_sc)->res[0], _reg, _val)
95 
96 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
97 
98 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
99 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
100 #define	DWMMC_LOCK_INIT(_sc) \
101 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
102 	    "dwmmc", MTX_DEF)
103 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
104 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
105 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
106 
107 #define	PENDING_CMD	0x01
108 #define	PENDING_STOP	0x02
109 #define	CARD_INIT_DONE	0x04
110 
111 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
112 				|SDMMC_INTMASK_SBE | SDMMC_INTMASK_EBE)
113 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
114 				|SDMMC_INTMASK_RE)
115 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
116 				|SDMMC_INTMASK_HLE)
117 
118 #define	DES0_DIC	(1 << 1)	/* Disable Interrupt on Completion */
119 #define	DES0_LD		(1 << 2)	/* Last Descriptor */
120 #define	DES0_FS		(1 << 3)	/* First Descriptor */
121 #define	DES0_CH		(1 << 4)	/* second address CHained */
122 #define	DES0_ER		(1 << 5)	/* End of Ring */
123 #define	DES0_CES	(1 << 30)	/* Card Error Summary */
124 #define	DES0_OWN	(1 << 31)	/* OWN */
125 
126 #define	DES1_BS1_MASK	0x1fff
127 
128 struct idmac_desc {
129 	uint32_t	des0;	/* control */
130 	uint32_t	des1;	/* bufsize */
131 	uint32_t	des2;	/* buf1 phys addr */
132 	uint32_t	des3;	/* buf2 phys addr or next descr */
133 };
134 
135 #define	IDMAC_DESC_SEGS	(PAGE_SIZE / (sizeof(struct idmac_desc)))
136 #define	IDMAC_DESC_SIZE	(sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
137 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
138 /*
139  * Size field in DMA descriptor is 13 bits long (up to 4095 bytes),
140  * but must be a multiple of the data bus size.Additionally, we must ensure
141  * that bus_dmamap_load() doesn't additionally fragments buffer (because it
142  * is processed with page size granularity). Thus limit fragment size to half
143  * of page.
144  * XXX switch descriptor format to array and use second buffer pointer for
145  * second half of page
146  */
147 #define	IDMAC_MAX_SIZE	2048
148 /*
149  * Busdma may bounce buffers, so we must reserve 2 descriptors
150  * (on start and on end) for bounced fragments.
151  */
152 #define DWMMC_MAX_DATA	(IDMAC_MAX_SIZE * (IDMAC_DESC_SEGS - 2)) / MMC_SECTOR_SIZE
153 
154 static void dwmmc_next_operation(struct dwmmc_softc *);
155 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
156 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
157 static int dma_stop(struct dwmmc_softc *);
158 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
159 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
160 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
161 
162 static struct resource_spec dwmmc_spec[] = {
163 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
164 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
165 	{ -1, 0 }
166 };
167 
168 #define	HWTYPE_MASK		(0x0000ffff)
169 #define	HWFLAG_MASK		(0xffff << 16)
170 
171 static void
172 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
173 {
174 
175 	if (nsegs != 1)
176 		panic("%s: nsegs != 1 (%d)\n", __func__, nsegs);
177 	if (error != 0)
178 		panic("%s: error != 0 (%d)\n", __func__, error);
179 
180 	*(bus_addr_t *)arg = segs[0].ds_addr;
181 }
182 
183 static void
184 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
185 {
186 	struct dwmmc_softc *sc;
187 	int idx;
188 
189 	sc = arg;
190 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
191 	if (error != 0)
192 		panic("%s: error != 0 (%d)\n", __func__, error);
193 
194 	for (idx = 0; idx < nsegs; idx++) {
195 		sc->desc_ring[idx].des0 = DES0_DIC | DES0_CH;
196 		sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
197 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
198 
199 		if (idx == 0)
200 			sc->desc_ring[idx].des0 |= DES0_FS;
201 
202 		if (idx == (nsegs - 1)) {
203 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
204 			sc->desc_ring[idx].des0 |= DES0_LD;
205 		}
206 		wmb();
207 		sc->desc_ring[idx].des0 |= DES0_OWN;
208 	}
209 }
210 
211 static int
212 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
213 {
214 	int reg;
215 	int i;
216 
217 	reg = READ4(sc, SDMMC_CTRL);
218 	reg |= (reset_bits);
219 	WRITE4(sc, SDMMC_CTRL, reg);
220 
221 	/* Wait reset done */
222 	for (i = 0; i < 100; i++) {
223 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
224 			return (0);
225 		DELAY(10);
226 	}
227 
228 	device_printf(sc->dev, "Reset failed\n");
229 
230 	return (1);
231 }
232 
233 static int
234 dma_setup(struct dwmmc_softc *sc)
235 {
236 	int error;
237 	int nidx;
238 	int idx;
239 
240 	/*
241 	 * Set up TX descriptor ring, descriptors, and dma maps.
242 	 */
243 	error = bus_dma_tag_create(
244 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
245 	    4096, 0,			/* alignment, boundary */
246 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
247 	    BUS_SPACE_MAXADDR,		/* highaddr */
248 	    NULL, NULL,			/* filter, filterarg */
249 	    IDMAC_DESC_SIZE, 1,		/* maxsize, nsegments */
250 	    IDMAC_DESC_SIZE,		/* maxsegsize */
251 	    0,				/* flags */
252 	    NULL, NULL,			/* lockfunc, lockarg */
253 	    &sc->desc_tag);
254 	if (error != 0) {
255 		device_printf(sc->dev,
256 		    "could not create ring DMA tag.\n");
257 		return (1);
258 	}
259 
260 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
261 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
262 	    &sc->desc_map);
263 	if (error != 0) {
264 		device_printf(sc->dev,
265 		    "could not allocate descriptor ring.\n");
266 		return (1);
267 	}
268 
269 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
270 	    sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
271 	    &sc->desc_ring_paddr, 0);
272 	if (error != 0) {
273 		device_printf(sc->dev,
274 		    "could not load descriptor ring map.\n");
275 		return (1);
276 	}
277 
278 	for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
279 		sc->desc_ring[idx].des0 = DES0_CH;
280 		sc->desc_ring[idx].des1 = 0;
281 		nidx = (idx + 1) % IDMAC_DESC_SEGS;
282 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
283 		    (nidx * sizeof(struct idmac_desc));
284 	}
285 	sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
286 	sc->desc_ring[idx - 1].des0 |= DES0_ER;
287 
288 	error = bus_dma_tag_create(
289 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
290 	    8, 0,			/* alignment, boundary */
291 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
292 	    BUS_SPACE_MAXADDR,		/* highaddr */
293 	    NULL, NULL,			/* filter, filterarg */
294 	    IDMAC_MAX_SIZE * IDMAC_DESC_SEGS,	/* maxsize */
295 	    IDMAC_DESC_SEGS,		/* nsegments */
296 	    IDMAC_MAX_SIZE,		/* maxsegsize */
297 	    0,				/* flags */
298 	    NULL, NULL,			/* lockfunc, lockarg */
299 	    &sc->buf_tag);
300 	if (error != 0) {
301 		device_printf(sc->dev,
302 		    "could not create ring DMA tag.\n");
303 		return (1);
304 	}
305 
306 	error = bus_dmamap_create(sc->buf_tag, 0,
307 	    &sc->buf_map);
308 	if (error != 0) {
309 		device_printf(sc->dev,
310 		    "could not create TX buffer DMA map.\n");
311 		return (1);
312 	}
313 
314 	return (0);
315 }
316 
317 static void
318 dwmmc_cmd_done(struct dwmmc_softc *sc)
319 {
320 	struct mmc_command *cmd;
321 #ifdef MMCCAM
322 	union ccb *ccb;
323 #endif
324 
325 #ifdef MMCCAM
326 	ccb = sc->ccb;
327 	if (ccb == NULL)
328 		return;
329 	cmd = &ccb->mmcio.cmd;
330 #else
331 	cmd = sc->curcmd;
332 #endif
333 	if (cmd == NULL)
334 		return;
335 
336 	if (cmd->flags & MMC_RSP_PRESENT) {
337 		if (cmd->flags & MMC_RSP_136) {
338 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
339 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
340 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
341 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
342 		} else {
343 			cmd->resp[3] = 0;
344 			cmd->resp[2] = 0;
345 			cmd->resp[1] = 0;
346 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
347 		}
348 	}
349 }
350 
351 static void
352 dwmmc_tasklet(struct dwmmc_softc *sc)
353 {
354 	struct mmc_command *cmd;
355 
356 	cmd = sc->curcmd;
357 	if (cmd == NULL)
358 		return;
359 
360 	if (!sc->cmd_done)
361 		return;
362 
363 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
364 		dwmmc_next_operation(sc);
365 	} else if (cmd->data && sc->dto_rcvd) {
366 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
367 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
368 		     sc->use_auto_stop) {
369 			if (sc->acd_rcvd)
370 				dwmmc_next_operation(sc);
371 		} else {
372 			dwmmc_next_operation(sc);
373 		}
374 	}
375 }
376 
377 static void
378 dwmmc_intr(void *arg)
379 {
380 	struct mmc_command *cmd;
381 	struct dwmmc_softc *sc;
382 	uint32_t reg;
383 
384 	sc = arg;
385 
386 	DWMMC_LOCK(sc);
387 
388 	cmd = sc->curcmd;
389 
390 	/* First handle SDMMC controller interrupts */
391 	reg = READ4(sc, SDMMC_MINTSTS);
392 	if (reg) {
393 		dprintf("%s 0x%08x\n", __func__, reg);
394 
395 		if (reg & DWMMC_CMD_ERR_FLAGS) {
396 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
397 				reg, cmd->opcode);
398 			cmd->error = MMC_ERR_TIMEOUT;
399 		}
400 
401 		if (reg & DWMMC_DATA_ERR_FLAGS) {
402 			dprintf("data err 0x%08x cmd 0x%08x\n",
403 				reg, cmd->opcode);
404 			cmd->error = MMC_ERR_FAILED;
405 			if (!sc->use_pio) {
406 				dma_done(sc, cmd);
407 				dma_stop(sc);
408 			}
409 		}
410 
411 		if (reg & SDMMC_INTMASK_CMD_DONE) {
412 			dwmmc_cmd_done(sc);
413 			sc->cmd_done = 1;
414 		}
415 
416 		if (reg & SDMMC_INTMASK_ACD)
417 			sc->acd_rcvd = 1;
418 
419 		if (reg & SDMMC_INTMASK_DTO)
420 			sc->dto_rcvd = 1;
421 
422 		if (reg & SDMMC_INTMASK_CD) {
423 			dwmmc_handle_card_present(sc,
424 			    READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
425 		}
426 	}
427 
428 	/* Ack interrupts */
429 	WRITE4(sc, SDMMC_RINTSTS, reg);
430 
431 	if (sc->use_pio) {
432 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
433 			pio_read(sc, cmd);
434 		}
435 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
436 			pio_write(sc, cmd);
437 		}
438 	} else {
439 		/* Now handle DMA interrupts */
440 		reg = READ4(sc, SDMMC_IDSTS);
441 		if (reg) {
442 			dprintf("dma intr 0x%08x\n", reg);
443 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
444 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
445 							 SDMMC_IDINTEN_RI));
446 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
447 				dma_done(sc, cmd);
448 			}
449 		}
450 	}
451 
452 	dwmmc_tasklet(sc);
453 
454 	DWMMC_UNLOCK(sc);
455 }
456 
457 static void
458 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
459 {
460 	bool was_present;
461 
462 	if (dumping || SCHEDULER_STOPPED())
463 		return;
464 
465 	was_present = sc->child != NULL;
466 
467 	if (!was_present && is_present) {
468 		taskqueue_enqueue_timeout(taskqueue_swi_giant,
469 		  &sc->card_delayed_task, -(hz / 2));
470 	} else if (was_present && !is_present) {
471 		taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
472 	}
473 }
474 
475 static void
476 dwmmc_card_task(void *arg, int pending __unused)
477 {
478 	struct dwmmc_softc *sc = arg;
479 
480 #ifdef MMCCAM
481 	mmc_cam_sim_discover(&sc->mmc_sim);
482 #else
483 	DWMMC_LOCK(sc);
484 
485 	if (READ4(sc, SDMMC_CDETECT) == 0 ||
486 	    (sc->mmc_helper.props & MMC_PROP_BROKEN_CD)) {
487 		if (sc->child == NULL) {
488 			if (bootverbose)
489 				device_printf(sc->dev, "Card inserted\n");
490 
491 			sc->child = device_add_child(sc->dev, "mmc", -1);
492 			DWMMC_UNLOCK(sc);
493 			if (sc->child) {
494 				device_set_ivars(sc->child, sc);
495 				(void)device_probe_and_attach(sc->child);
496 			}
497 		} else
498 			DWMMC_UNLOCK(sc);
499 	} else {
500 		/* Card isn't present, detach if necessary */
501 		if (sc->child != NULL) {
502 			if (bootverbose)
503 				device_printf(sc->dev, "Card removed\n");
504 
505 			DWMMC_UNLOCK(sc);
506 			device_delete_child(sc->dev, sc->child);
507 			sc->child = NULL;
508 		} else
509 			DWMMC_UNLOCK(sc);
510 	}
511 #endif /* MMCCAM */
512 }
513 
514 static int
515 parse_fdt(struct dwmmc_softc *sc)
516 {
517 	pcell_t dts_value[3];
518 	phandle_t node;
519 	uint32_t bus_hz = 0;
520 	int len;
521 	int error;
522 
523 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
524 		return (ENXIO);
525 
526 	/* Set some defaults for freq and supported mode */
527 	sc->host.f_min = 400000;
528 	sc->host.f_max = 200000000;
529 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
530 	sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
531 	mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
532 
533 	/* fifo-depth */
534 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
535 		OF_getencprop(node, "fifo-depth", dts_value, len);
536 		sc->fifo_depth = dts_value[0];
537 	}
538 
539 	/* num-slots (Deprecated) */
540 	sc->num_slots = 1;
541 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
542 		device_printf(sc->dev, "num-slots property is deprecated\n");
543 		OF_getencprop(node, "num-slots", dts_value, len);
544 		sc->num_slots = dts_value[0];
545 	}
546 
547 	/* clock-frequency */
548 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
549 		OF_getencprop(node, "clock-frequency", dts_value, len);
550 		bus_hz = dts_value[0];
551 	}
552 
553 	/* IP block reset is optional */
554 	error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
555 	if (error != 0 &&
556 	    error != ENOENT &&
557 	    error != ENODEV) {
558 		device_printf(sc->dev, "Cannot get reset\n");
559 		goto fail;
560 	}
561 
562 	/* vmmc regulator is optional */
563 	error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
564 	     &sc->vmmc);
565 	if (error != 0 &&
566 	    error != ENOENT &&
567 	    error != ENODEV) {
568 		device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
569 		goto fail;
570 	}
571 
572 	/* vqmmc regulator is optional */
573 	error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
574 	     &sc->vqmmc);
575 	if (error != 0 &&
576 	    error != ENOENT &&
577 	    error != ENODEV) {
578 		device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
579 		goto fail;
580 	}
581 
582 	/* Assert reset first */
583 	if (sc->hwreset != NULL) {
584 		error = hwreset_assert(sc->hwreset);
585 		if (error != 0) {
586 			device_printf(sc->dev, "Cannot assert reset\n");
587 			goto fail;
588 		}
589 	}
590 
591 	/* BIU (Bus Interface Unit clock) is optional */
592 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
593 	if (error != 0 &&
594 	    error != ENOENT &&
595 	    error != ENODEV) {
596 		device_printf(sc->dev, "Cannot get 'biu' clock\n");
597 		goto fail;
598 	}
599 
600 	if (sc->biu) {
601 		error = clk_enable(sc->biu);
602 		if (error != 0) {
603 			device_printf(sc->dev, "cannot enable biu clock\n");
604 			goto fail;
605 		}
606 	}
607 
608 	/*
609 	 * CIU (Controller Interface Unit clock) is mandatory
610 	 * if no clock-frequency property is given
611 	 */
612 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
613 	if (error != 0 &&
614 	    error != ENOENT &&
615 	    error != ENODEV) {
616 		device_printf(sc->dev, "Cannot get 'ciu' clock\n");
617 		goto fail;
618 	}
619 
620 	if (sc->ciu) {
621 		if (bus_hz != 0) {
622 			error = clk_set_freq(sc->ciu, bus_hz, 0);
623 			if (error != 0)
624 				device_printf(sc->dev,
625 				    "cannot set ciu clock to %u\n", bus_hz);
626 		}
627 		error = clk_enable(sc->ciu);
628 		if (error != 0) {
629 			device_printf(sc->dev, "cannot enable ciu clock\n");
630 			goto fail;
631 		}
632 		clk_get_freq(sc->ciu, &sc->bus_hz);
633 	}
634 
635 	/* Enable regulators */
636 	if (sc->vmmc != NULL) {
637 		error = regulator_enable(sc->vmmc);
638 		if (error != 0) {
639 			device_printf(sc->dev, "Cannot enable vmmc regulator\n");
640 			goto fail;
641 		}
642 	}
643 	if (sc->vqmmc != NULL) {
644 		error = regulator_enable(sc->vqmmc);
645 		if (error != 0) {
646 			device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
647 			goto fail;
648 		}
649 	}
650 
651 	/* Take dwmmc out of reset */
652 	if (sc->hwreset != NULL) {
653 		error = hwreset_deassert(sc->hwreset);
654 		if (error != 0) {
655 			device_printf(sc->dev, "Cannot deassert reset\n");
656 			goto fail;
657 		}
658 	}
659 
660 	if (sc->bus_hz == 0) {
661 		device_printf(sc->dev, "No bus speed provided\n");
662 		goto fail;
663 	}
664 
665 	return (0);
666 
667 fail:
668 	return (ENXIO);
669 }
670 
671 int
672 dwmmc_attach(device_t dev)
673 {
674 	struct dwmmc_softc *sc;
675 	int error;
676 
677 	sc = device_get_softc(dev);
678 
679 	sc->dev = dev;
680 
681 	/* Why not to use Auto Stop? It save a hundred of irq per second */
682 	sc->use_auto_stop = 1;
683 
684 	error = parse_fdt(sc);
685 	if (error != 0) {
686 		device_printf(dev, "Can't get FDT property.\n");
687 		return (ENXIO);
688 	}
689 
690 	DWMMC_LOCK_INIT(sc);
691 
692 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
693 		device_printf(dev, "could not allocate resources\n");
694 		return (ENXIO);
695 	}
696 
697 	/* Setup interrupt handler. */
698 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
699 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
700 	if (error != 0) {
701 		device_printf(dev, "could not setup interrupt handler.\n");
702 		return (ENXIO);
703 	}
704 
705 	device_printf(dev, "Hardware version ID is %04x\n",
706 		READ4(sc, SDMMC_VERID) & 0xffff);
707 
708 	/* Reset all */
709 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
710 				  SDMMC_CTRL_FIFO_RESET |
711 				  SDMMC_CTRL_DMA_RESET)))
712 		return (ENXIO);
713 
714 	dwmmc_setup_bus(sc, sc->host.f_min);
715 
716 	if (sc->fifo_depth == 0) {
717 		sc->fifo_depth = 1 +
718 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
719 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
720 		    sc->fifo_depth);
721 	}
722 
723 	if (!sc->use_pio) {
724 		dma_stop(sc);
725 		if (dma_setup(sc))
726 			return (ENXIO);
727 
728 		/* Install desc base */
729 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
730 
731 		/* Enable DMA interrupts */
732 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
733 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
734 					   SDMMC_IDINTEN_RI |
735 					   SDMMC_IDINTEN_TI));
736 	}
737 
738 	/* Clear and disable interrups for a while */
739 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
740 	WRITE4(sc, SDMMC_INTMASK, 0);
741 
742 	/* Maximum timeout */
743 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
744 
745 	/* Enable interrupts */
746 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
747 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
748 				   SDMMC_INTMASK_DTO |
749 				   SDMMC_INTMASK_ACD |
750 				   SDMMC_INTMASK_TXDR |
751 				   SDMMC_INTMASK_RXDR |
752 				   DWMMC_ERR_FLAGS |
753 				   SDMMC_INTMASK_CD));
754 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
755 
756 	TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
757 	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
758 		dwmmc_card_task, sc);
759 
760 #ifdef MMCCAM
761 	sc->ccb = NULL;
762 	if (mmc_cam_sim_alloc(dev, "dw_mmc", &sc->mmc_sim) != 0) {
763 		device_printf(dev, "cannot alloc cam sim\n");
764 		dwmmc_detach(dev);
765 		return (ENXIO);
766 	}
767 #endif
768 	/*
769 	 * Schedule a card detection as we won't get an interrupt
770 	 * if the card is inserted when we attach
771 	 */
772 	dwmmc_card_task(sc, 0);
773 	return (0);
774 }
775 
776 int
777 dwmmc_detach(device_t dev)
778 {
779 	struct dwmmc_softc *sc;
780 	int ret;
781 
782 	sc = device_get_softc(dev);
783 
784 	ret = device_delete_children(dev);
785 	if (ret != 0)
786 		return (ret);
787 
788 	taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
789 	taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
790 
791 	if (sc->intr_cookie != NULL) {
792 		ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
793 		if (ret != 0)
794 			return (ret);
795 	}
796 	bus_release_resources(dev, dwmmc_spec, sc->res);
797 
798 	DWMMC_LOCK_DESTROY(sc);
799 
800 	if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
801 		device_printf(sc->dev, "cannot deassert reset\n");
802 	if (sc->biu != NULL && clk_disable(sc->biu) != 0)
803 		device_printf(sc->dev, "cannot disable biu clock\n");
804 	if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
805 			device_printf(sc->dev, "cannot disable ciu clock\n");
806 
807 	if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
808 		device_printf(sc->dev, "Cannot disable vmmc regulator\n");
809 	if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
810 		device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
811 
812 #ifdef MMCCAM
813 	mmc_cam_sim_free(&sc->mmc_sim);
814 #endif
815 
816 	return (0);
817 }
818 
819 static int
820 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
821 {
822 	int tout;
823 	int div;
824 
825 	if (freq == 0) {
826 		WRITE4(sc, SDMMC_CLKENA, 0);
827 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
828 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
829 
830 		tout = 1000;
831 		do {
832 			if (tout-- < 0) {
833 				device_printf(sc->dev, "Failed update clk\n");
834 				return (1);
835 			}
836 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
837 
838 		return (0);
839 	}
840 
841 	WRITE4(sc, SDMMC_CLKENA, 0);
842 	WRITE4(sc, SDMMC_CLKSRC, 0);
843 
844 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
845 
846 	WRITE4(sc, SDMMC_CLKDIV, div);
847 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
848 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
849 
850 	tout = 1000;
851 	do {
852 		if (tout-- < 0) {
853 			device_printf(sc->dev, "Failed to update clk\n");
854 			return (1);
855 		}
856 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
857 
858 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
859 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
860 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
861 
862 	tout = 1000;
863 	do {
864 		if (tout-- < 0) {
865 			device_printf(sc->dev, "Failed to enable clk\n");
866 			return (1);
867 		}
868 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
869 
870 	return (0);
871 }
872 
873 static int
874 dwmmc_update_ios(device_t brdev, device_t reqdev)
875 {
876 	struct dwmmc_softc *sc;
877 	struct mmc_ios *ios;
878 	uint32_t reg;
879 	int ret = 0;
880 
881 	sc = device_get_softc(brdev);
882 	ios = &sc->host.ios;
883 
884 	dprintf("Setting up clk %u bus_width %d, timming: %d\n",
885 		ios->clock, ios->bus_width, ios->timing);
886 
887 	switch (ios->power_mode) {
888 	case power_on:
889 		break;
890 	case power_off:
891 		WRITE4(sc, SDMMC_PWREN, 0);
892 		break;
893 	case power_up:
894 		WRITE4(sc, SDMMC_PWREN, 1);
895 		break;
896 	}
897 
898 	mmc_fdt_set_power(&sc->mmc_helper, ios->power_mode);
899 
900 	if (ios->bus_width == bus_width_8)
901 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
902 	else if (ios->bus_width == bus_width_4)
903 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
904 	else
905 		WRITE4(sc, SDMMC_CTYPE, 0);
906 
907 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
908 		/* XXX: take care about DDR or SDR use here */
909 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
910 	}
911 
912 	/* Set DDR mode */
913 	reg = READ4(sc, SDMMC_UHS_REG);
914 	if (ios->timing == bus_timing_uhs_ddr50 ||
915 	    ios->timing == bus_timing_mmc_ddr52 ||
916 	    ios->timing == bus_timing_mmc_hs400)
917 		reg |= (SDMMC_UHS_REG_DDR);
918 	else
919 		reg &= ~(SDMMC_UHS_REG_DDR);
920 	WRITE4(sc, SDMMC_UHS_REG, reg);
921 
922 	if (sc->update_ios)
923 		ret = sc->update_ios(sc, ios);
924 
925 	dwmmc_setup_bus(sc, ios->clock);
926 
927 	return (ret);
928 }
929 
930 static int
931 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
932 {
933 	struct mmc_data *data;
934 
935 	data = cmd->data;
936 
937 	if (data->flags & MMC_DATA_WRITE)
938 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
939 			BUS_DMASYNC_POSTWRITE);
940 	else
941 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
942 			BUS_DMASYNC_POSTREAD);
943 
944 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
945 	    BUS_DMASYNC_POSTWRITE);
946 
947 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
948 
949 	return (0);
950 }
951 
952 static int
953 dma_stop(struct dwmmc_softc *sc)
954 {
955 	int reg;
956 
957 	reg = READ4(sc, SDMMC_CTRL);
958 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
959 	reg |= (SDMMC_CTRL_DMA_RESET);
960 	WRITE4(sc, SDMMC_CTRL, reg);
961 
962 	reg = READ4(sc, SDMMC_BMOD);
963 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
964 	reg |= (SDMMC_BMOD_SWR);
965 	WRITE4(sc, SDMMC_BMOD, reg);
966 
967 	return (0);
968 }
969 
970 static int
971 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
972 {
973 	struct mmc_data *data;
974 	int err;
975 	int reg;
976 
977 	data = cmd->data;
978 
979 	reg = READ4(sc, SDMMC_INTMASK);
980 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
981 	WRITE4(sc, SDMMC_INTMASK, reg);
982 	dprintf("%s: bus_dmamap_load size: %zu\n", __func__, data->len);
983 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
984 		data->data, data->len, dwmmc_ring_setup,
985 		sc, BUS_DMA_NOWAIT);
986 	if (err != 0)
987 		panic("dmamap_load failed\n");
988 
989 	/* Ensure the device can see the desc */
990 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
991 	    BUS_DMASYNC_PREWRITE);
992 
993 	if (data->flags & MMC_DATA_WRITE)
994 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
995 			BUS_DMASYNC_PREWRITE);
996 	else
997 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
998 			BUS_DMASYNC_PREREAD);
999 
1000 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1001 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1002 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1003 
1004 	WRITE4(sc, SDMMC_FIFOTH, reg);
1005 	wmb();
1006 
1007 	reg = READ4(sc, SDMMC_CTRL);
1008 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
1009 	WRITE4(sc, SDMMC_CTRL, reg);
1010 	wmb();
1011 
1012 	reg = READ4(sc, SDMMC_BMOD);
1013 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
1014 	WRITE4(sc, SDMMC_BMOD, reg);
1015 
1016 	/* Start */
1017 	WRITE4(sc, SDMMC_PLDMND, 1);
1018 
1019 	return (0);
1020 }
1021 
1022 static int
1023 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
1024 {
1025 	struct mmc_data *data;
1026 	int reg;
1027 
1028 	data = cmd->data;
1029 	data->xfer_len = 0;
1030 
1031 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
1032 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
1033 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
1034 
1035 	WRITE4(sc, SDMMC_FIFOTH, reg);
1036 	wmb();
1037 
1038 	return (0);
1039 }
1040 
1041 static void
1042 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
1043 {
1044 	struct mmc_data *data;
1045 	uint32_t *p, status;
1046 
1047 	if (cmd == NULL || cmd->data == NULL)
1048 		return;
1049 
1050 	data = cmd->data;
1051 	if ((data->flags & MMC_DATA_READ) == 0)
1052 		return;
1053 
1054 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1055 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1056 
1057 	while (data->xfer_len < data->len) {
1058 		status = READ4(sc, SDMMC_STATUS);
1059 		if (status & SDMMC_STATUS_FIFO_EMPTY)
1060 			break;
1061 		*p++ = READ4(sc, SDMMC_DATA);
1062 		data->xfer_len += 4;
1063 	}
1064 
1065 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1066 }
1067 
1068 static void
1069 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1070 {
1071 	struct mmc_data *data;
1072 	uint32_t *p, status;
1073 
1074 	if (cmd == NULL || cmd->data == NULL)
1075 		return;
1076 
1077 	data = cmd->data;
1078 	if ((data->flags & MMC_DATA_WRITE) == 0)
1079 		return;
1080 
1081 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1082 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1083 
1084 	while (data->xfer_len < data->len) {
1085 		status = READ4(sc, SDMMC_STATUS);
1086 		if (status & SDMMC_STATUS_FIFO_FULL)
1087 			break;
1088 		WRITE4(sc, SDMMC_DATA, *p++);
1089 		data->xfer_len += 4;
1090 	}
1091 
1092 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1093 }
1094 
1095 static void
1096 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1097 {
1098 	struct mmc_data *data;
1099 	uint32_t blksz;
1100 	uint32_t cmdr;
1101 
1102 	dprintf("%s\n", __func__);
1103 	sc->curcmd = cmd;
1104 	data = cmd->data;
1105 
1106 #ifndef MMCCAM
1107 	/* XXX Upper layers don't always set this */
1108 	cmd->mrq = sc->req;
1109 #endif
1110 	/* Begin setting up command register. */
1111 
1112 	cmdr = cmd->opcode;
1113 
1114 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1115 
1116 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1117 	    cmd->opcode == MMC_GO_IDLE_STATE ||
1118 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
1119 		cmdr |= SDMMC_CMD_STOP_ABORT;
1120 	else if (cmd->opcode != MMC_SEND_STATUS && data)
1121 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1122 
1123 	/* Set up response handling. */
1124 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1125 		cmdr |= SDMMC_CMD_RESP_EXP;
1126 		if (cmd->flags & MMC_RSP_136)
1127 			cmdr |= SDMMC_CMD_RESP_LONG;
1128 	}
1129 
1130 	if (cmd->flags & MMC_RSP_CRC)
1131 		cmdr |= SDMMC_CMD_RESP_CRC;
1132 
1133 	/*
1134 	 * XXX: Not all platforms want this.
1135 	 */
1136 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
1137 
1138 	if ((sc->flags & CARD_INIT_DONE) == 0) {
1139 		sc->flags |= (CARD_INIT_DONE);
1140 		cmdr |= SDMMC_CMD_SEND_INIT;
1141 	}
1142 
1143 	if (data) {
1144 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1145 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1146 		     sc->use_auto_stop)
1147 			cmdr |= SDMMC_CMD_SEND_ASTOP;
1148 
1149 		cmdr |= SDMMC_CMD_DATA_EXP;
1150 		if (data->flags & MMC_DATA_STREAM)
1151 			cmdr |= SDMMC_CMD_MODE_STREAM;
1152 		if (data->flags & MMC_DATA_WRITE)
1153 			cmdr |= SDMMC_CMD_DATA_WRITE;
1154 
1155 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1156 #ifdef MMCCAM
1157 		if (cmd->data->flags & MMC_DATA_BLOCK_SIZE) {
1158 			WRITE4(sc, SDMMC_BLKSIZ, cmd->data->block_size);
1159 			WRITE4(sc, SDMMC_BYTCNT, cmd->data->len);
1160 		} else
1161 #endif
1162 		{
1163 			WRITE4(sc, SDMMC_BYTCNT, data->len);
1164 			blksz = (data->len < MMC_SECTOR_SIZE) ? \
1165 				data->len : MMC_SECTOR_SIZE;
1166 			WRITE4(sc, SDMMC_BLKSIZ, blksz);
1167 		}
1168 
1169 		if (sc->use_pio) {
1170 			pio_prepare(sc, cmd);
1171 		} else {
1172 			dma_prepare(sc, cmd);
1173 		}
1174 		wmb();
1175 	}
1176 
1177 	dprintf("cmdr 0x%08x\n", cmdr);
1178 
1179 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1180 	wmb();
1181 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1182 };
1183 
1184 static void
1185 dwmmc_next_operation(struct dwmmc_softc *sc)
1186 {
1187 	struct mmc_command *cmd;
1188 	dprintf("%s\n", __func__);
1189 #ifdef MMCCAM
1190 	union ccb *ccb;
1191 
1192 	ccb = sc->ccb;
1193 	if (ccb == NULL)
1194 		return;
1195 	cmd = &ccb->mmcio.cmd;
1196 #else
1197 	struct mmc_request *req;
1198 
1199 	req = sc->req;
1200 	if (req == NULL)
1201 		return;
1202 	cmd = req->cmd;
1203 #endif
1204 
1205 	sc->acd_rcvd = 0;
1206 	sc->dto_rcvd = 0;
1207 	sc->cmd_done = 0;
1208 
1209 	/*
1210 	 * XXX: Wait until card is still busy.
1211 	 * We do need this to prevent data timeouts,
1212 	 * mostly caused by multi-block write command
1213 	 * followed by single-read.
1214 	 */
1215 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1216 		continue;
1217 
1218 	if (sc->flags & PENDING_CMD) {
1219 		sc->flags &= ~PENDING_CMD;
1220 		dwmmc_start_cmd(sc, cmd);
1221 		return;
1222 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1223 		sc->flags &= ~PENDING_STOP;
1224 		/// XXX: What to do with this?
1225 		//dwmmc_start_cmd(sc, req->stop);
1226 		return;
1227 	}
1228 
1229 #ifdef MMCCAM
1230 	sc->ccb = NULL;
1231 	sc->curcmd = NULL;
1232 	ccb->ccb_h.status =
1233 		(ccb->mmcio.cmd.error == 0 ? CAM_REQ_CMP : CAM_REQ_CMP_ERR);
1234 	xpt_done(ccb);
1235 #else
1236 	sc->req = NULL;
1237 	sc->curcmd = NULL;
1238 	req->done(req);
1239 #endif
1240 }
1241 
1242 static int
1243 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1244 {
1245 	struct dwmmc_softc *sc;
1246 
1247 	sc = device_get_softc(brdev);
1248 
1249 	dprintf("%s\n", __func__);
1250 
1251 	DWMMC_LOCK(sc);
1252 
1253 #ifdef MMCCAM
1254 	sc->flags |= PENDING_CMD;
1255 #else
1256 	if (sc->req != NULL) {
1257 		DWMMC_UNLOCK(sc);
1258 		return (EBUSY);
1259 	}
1260 
1261 	sc->req = req;
1262 	sc->flags |= PENDING_CMD;
1263 	if (sc->req->stop)
1264 		sc->flags |= PENDING_STOP;
1265 #endif
1266 	dwmmc_next_operation(sc);
1267 
1268 	DWMMC_UNLOCK(sc);
1269 	return (0);
1270 }
1271 
1272 #ifndef MMCCAM
1273 static int
1274 dwmmc_get_ro(device_t brdev, device_t reqdev)
1275 {
1276 
1277 	dprintf("%s\n", __func__);
1278 
1279 	return (0);
1280 }
1281 
1282 static int
1283 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1284 {
1285 	struct dwmmc_softc *sc;
1286 
1287 	sc = device_get_softc(brdev);
1288 
1289 	DWMMC_LOCK(sc);
1290 	while (sc->bus_busy)
1291 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1292 	sc->bus_busy++;
1293 	DWMMC_UNLOCK(sc);
1294 	return (0);
1295 }
1296 
1297 static int
1298 dwmmc_release_host(device_t brdev, device_t reqdev)
1299 {
1300 	struct dwmmc_softc *sc;
1301 
1302 	sc = device_get_softc(brdev);
1303 
1304 	DWMMC_LOCK(sc);
1305 	sc->bus_busy--;
1306 	wakeup(sc);
1307 	DWMMC_UNLOCK(sc);
1308 	return (0);
1309 }
1310 #endif	/* !MMCCAM */
1311 
1312 static int
1313 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1314 {
1315 	struct dwmmc_softc *sc;
1316 
1317 	sc = device_get_softc(bus);
1318 
1319 	switch (which) {
1320 	default:
1321 		return (EINVAL);
1322 	case MMCBR_IVAR_BUS_MODE:
1323 		*(int *)result = sc->host.ios.bus_mode;
1324 		break;
1325 	case MMCBR_IVAR_BUS_WIDTH:
1326 		*(int *)result = sc->host.ios.bus_width;
1327 		break;
1328 	case MMCBR_IVAR_CHIP_SELECT:
1329 		*(int *)result = sc->host.ios.chip_select;
1330 		break;
1331 	case MMCBR_IVAR_CLOCK:
1332 		*(int *)result = sc->host.ios.clock;
1333 		break;
1334 	case MMCBR_IVAR_F_MIN:
1335 		*(int *)result = sc->host.f_min;
1336 		break;
1337 	case MMCBR_IVAR_F_MAX:
1338 		*(int *)result = sc->host.f_max;
1339 		break;
1340 	case MMCBR_IVAR_HOST_OCR:
1341 		*(int *)result = sc->host.host_ocr;
1342 		break;
1343 	case MMCBR_IVAR_MODE:
1344 		*(int *)result = sc->host.mode;
1345 		break;
1346 	case MMCBR_IVAR_OCR:
1347 		*(int *)result = sc->host.ocr;
1348 		break;
1349 	case MMCBR_IVAR_POWER_MODE:
1350 		*(int *)result = sc->host.ios.power_mode;
1351 		break;
1352 	case MMCBR_IVAR_VDD:
1353 		*(int *)result = sc->host.ios.vdd;
1354 		break;
1355 	case MMCBR_IVAR_VCCQ:
1356 		*(int *)result = sc->host.ios.vccq;
1357 		break;
1358 	case MMCBR_IVAR_CAPS:
1359 		*(int *)result = sc->host.caps;
1360 		break;
1361 	case MMCBR_IVAR_MAX_DATA:
1362 		*(int *)result = DWMMC_MAX_DATA;
1363 		break;
1364 	case MMCBR_IVAR_TIMING:
1365 		*(int *)result = sc->host.ios.timing;
1366 		break;
1367 	}
1368 	return (0);
1369 }
1370 
1371 static int
1372 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1373 {
1374 	struct dwmmc_softc *sc;
1375 
1376 	sc = device_get_softc(bus);
1377 
1378 	switch (which) {
1379 	default:
1380 		return (EINVAL);
1381 	case MMCBR_IVAR_BUS_MODE:
1382 		sc->host.ios.bus_mode = value;
1383 		break;
1384 	case MMCBR_IVAR_BUS_WIDTH:
1385 		sc->host.ios.bus_width = value;
1386 		break;
1387 	case MMCBR_IVAR_CHIP_SELECT:
1388 		sc->host.ios.chip_select = value;
1389 		break;
1390 	case MMCBR_IVAR_CLOCK:
1391 		sc->host.ios.clock = value;
1392 		break;
1393 	case MMCBR_IVAR_MODE:
1394 		sc->host.mode = value;
1395 		break;
1396 	case MMCBR_IVAR_OCR:
1397 		sc->host.ocr = value;
1398 		break;
1399 	case MMCBR_IVAR_POWER_MODE:
1400 		sc->host.ios.power_mode = value;
1401 		break;
1402 	case MMCBR_IVAR_VDD:
1403 		sc->host.ios.vdd = value;
1404 		break;
1405 	case MMCBR_IVAR_TIMING:
1406 		sc->host.ios.timing = value;
1407 		break;
1408 	case MMCBR_IVAR_VCCQ:
1409 		sc->host.ios.vccq = value;
1410 		break;
1411 	/* These are read-only */
1412 	case MMCBR_IVAR_CAPS:
1413 	case MMCBR_IVAR_HOST_OCR:
1414 	case MMCBR_IVAR_F_MIN:
1415 	case MMCBR_IVAR_F_MAX:
1416 	case MMCBR_IVAR_MAX_DATA:
1417 		return (EINVAL);
1418 	}
1419 	return (0);
1420 }
1421 
1422 #ifdef MMCCAM
1423 /* Note: this function likely belongs to the specific driver impl */
1424 static int
1425 dwmmc_switch_vccq(device_t dev, device_t child)
1426 {
1427 	device_printf(dev, "This is a default impl of switch_vccq() that always fails\n");
1428 	return EINVAL;
1429 }
1430 
1431 static int
1432 dwmmc_get_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1433 {
1434 	struct dwmmc_softc *sc;
1435 
1436 	sc = device_get_softc(dev);
1437 
1438 	cts->host_ocr = sc->host.host_ocr;
1439 	cts->host_f_min = sc->host.f_min;
1440 	cts->host_f_max = sc->host.f_max;
1441 	cts->host_caps = sc->host.caps;
1442 	cts->host_max_data = DWMMC_MAX_DATA;
1443 	memcpy(&cts->ios, &sc->host.ios, sizeof(struct mmc_ios));
1444 
1445 	return (0);
1446 }
1447 
1448 static int
1449 dwmmc_set_tran_settings(device_t dev, struct ccb_trans_settings_mmc *cts)
1450 {
1451 	struct dwmmc_softc *sc;
1452 	struct mmc_ios *ios;
1453 	struct mmc_ios *new_ios;
1454 	int res;
1455 
1456 	sc = device_get_softc(dev);
1457 	ios = &sc->host.ios;
1458 
1459 	new_ios = &cts->ios;
1460 
1461 	/* Update only requested fields */
1462 	if (cts->ios_valid & MMC_CLK) {
1463 		ios->clock = new_ios->clock;
1464 		if (bootverbose)
1465 			device_printf(sc->dev, "Clock => %d\n", ios->clock);
1466 	}
1467 	if (cts->ios_valid & MMC_VDD) {
1468 		ios->vdd = new_ios->vdd;
1469 		if (bootverbose)
1470 			device_printf(sc->dev, "VDD => %d\n", ios->vdd);
1471 	}
1472 	if (cts->ios_valid & MMC_CS) {
1473 		ios->chip_select = new_ios->chip_select;
1474 		if (bootverbose)
1475 			device_printf(sc->dev, "CS => %d\n", ios->chip_select);
1476 	}
1477 	if (cts->ios_valid & MMC_BW) {
1478 		ios->bus_width = new_ios->bus_width;
1479 		if (bootverbose)
1480 			device_printf(sc->dev, "Bus width => %d\n", ios->bus_width);
1481 	}
1482 	if (cts->ios_valid & MMC_PM) {
1483 		ios->power_mode = new_ios->power_mode;
1484 		if (bootverbose)
1485 			device_printf(sc->dev, "Power mode => %d\n", ios->power_mode);
1486 	}
1487 	if (cts->ios_valid & MMC_BT) {
1488 		ios->timing = new_ios->timing;
1489 		if (bootverbose)
1490 			device_printf(sc->dev, "Timing => %d\n", ios->timing);
1491 	}
1492 	if (cts->ios_valid & MMC_BM) {
1493 		ios->bus_mode = new_ios->bus_mode;
1494 		if (bootverbose)
1495 			device_printf(sc->dev, "Bus mode => %d\n", ios->bus_mode);
1496 	}
1497 	if (cts->ios_valid & MMC_VCCQ) {
1498 		ios->vccq = new_ios->vccq;
1499 		if (bootverbose)
1500 			device_printf(sc->dev, "VCCQ => %d\n", ios->vccq);
1501 		res = dwmmc_switch_vccq(sc->dev, NULL);
1502 		device_printf(sc->dev, "VCCQ switch result: %d\n", res);
1503 	}
1504 
1505 	return (dwmmc_update_ios(sc->dev, NULL));
1506 }
1507 
1508 static int
1509 dwmmc_cam_request(device_t dev, union ccb *ccb)
1510 {
1511 	struct dwmmc_softc *sc;
1512 	struct ccb_mmcio *mmcio;
1513 
1514 	sc = device_get_softc(dev);
1515 	mmcio = &ccb->mmcio;
1516 
1517 	DWMMC_LOCK(sc);
1518 
1519 #ifdef DEBUG
1520 	if (__predict_false(bootverbose)) {
1521 		device_printf(sc->dev, "CMD%u arg %#x flags %#x dlen %u dflags %#x\n",
1522 			    mmcio->cmd.opcode, mmcio->cmd.arg, mmcio->cmd.flags,
1523 			    mmcio->cmd.data != NULL ? (unsigned int) mmcio->cmd.data->len : 0,
1524 			    mmcio->cmd.data != NULL ? mmcio->cmd.data->flags: 0);
1525 	}
1526 #endif
1527 	if (mmcio->cmd.data != NULL) {
1528 		if (mmcio->cmd.data->len == 0 || mmcio->cmd.data->flags == 0)
1529 			panic("data->len = %d, data->flags = %d -- something is b0rked",
1530 			      (int)mmcio->cmd.data->len, mmcio->cmd.data->flags);
1531 	}
1532 	if (sc->ccb != NULL) {
1533 		device_printf(sc->dev, "Controller still has an active command\n");
1534 		return (EBUSY);
1535 	}
1536 	sc->ccb = ccb;
1537 	DWMMC_UNLOCK(sc);
1538 	dwmmc_request(sc->dev, NULL, NULL);
1539 
1540 	return (0);
1541 }
1542 
1543 static void
1544 dwmmc_cam_poll(device_t dev)
1545 {
1546 	struct dwmmc_softc *sc;
1547 
1548 	sc = device_get_softc(dev);
1549 	dwmmc_intr(sc);
1550 }
1551 #endif /* MMCCAM */
1552 
1553 static device_method_t dwmmc_methods[] = {
1554 	/* Bus interface */
1555 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1556 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1557 
1558 #ifndef MMCCAM
1559 	/* mmcbr_if */
1560 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1561 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1562 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1563 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1564 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1565 #endif
1566 
1567 #ifdef MMCCAM
1568 	/* MMCCAM interface */
1569 	DEVMETHOD(mmc_sim_get_tran_settings,	dwmmc_get_tran_settings),
1570 	DEVMETHOD(mmc_sim_set_tran_settings,	dwmmc_set_tran_settings),
1571 	DEVMETHOD(mmc_sim_cam_request,		dwmmc_cam_request),
1572 	DEVMETHOD(mmc_sim_cam_poll,		dwmmc_cam_poll),
1573 
1574 	DEVMETHOD(bus_add_child,		bus_generic_add_child),
1575 #endif
1576 
1577 	DEVMETHOD_END
1578 };
1579 
1580 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1581     sizeof(struct dwmmc_softc));
1582