xref: /freebsd/sys/dev/mmc/host/dwmmc.c (revision e17f5b1d)
1 /*-
2  * Copyright (c) 2014-2019 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * This software was developed by SRI International and the University of
6  * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
7  * ("CTSRD"), as part of the DARPA CRASH research programme.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30 
31 /*
32  * Synopsys DesignWare Mobile Storage Host Controller
33  * Chapter 14, Altera Cyclone V Device Handbook (CV-5V2 2014.07.22)
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/bus.h>
42 #include <sys/kernel.h>
43 #include <sys/lock.h>
44 #include <sys/module.h>
45 #include <sys/malloc.h>
46 #include <sys/mutex.h>
47 #include <sys/rman.h>
48 #include <sys/queue.h>
49 #include <sys/taskqueue.h>
50 
51 #include <dev/mmc/bridge.h>
52 #include <dev/mmc/mmcbrvar.h>
53 #include <dev/mmc/mmc_fdt_helpers.h>
54 
55 #include <dev/fdt/fdt_common.h>
56 #include <dev/ofw/openfirm.h>
57 #include <dev/ofw/ofw_bus.h>
58 #include <dev/ofw/ofw_bus_subr.h>
59 
60 #include <machine/bus.h>
61 #include <machine/cpu.h>
62 #include <machine/intr.h>
63 
64 #ifdef EXT_RESOURCES
65 #include <dev/extres/clk/clk.h>
66 #endif
67 
68 #include <dev/mmc/host/dwmmc_reg.h>
69 #include <dev/mmc/host/dwmmc_var.h>
70 
71 #include "mmcbr_if.h"
72 
73 #define dprintf(x, arg...)
74 
75 #define	READ4(_sc, _reg) \
76 	bus_read_4((_sc)->res[0], _reg)
77 #define	WRITE4(_sc, _reg, _val) \
78 	bus_write_4((_sc)->res[0], _reg, _val)
79 
80 #define	DIV_ROUND_UP(n, d)		howmany(n, d)
81 
82 #define	DWMMC_LOCK(_sc)			mtx_lock(&(_sc)->sc_mtx)
83 #define	DWMMC_UNLOCK(_sc)		mtx_unlock(&(_sc)->sc_mtx)
84 #define	DWMMC_LOCK_INIT(_sc) \
85 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->dev), \
86 	    "dwmmc", MTX_DEF)
87 #define	DWMMC_LOCK_DESTROY(_sc)		mtx_destroy(&_sc->sc_mtx);
88 #define	DWMMC_ASSERT_LOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_OWNED);
89 #define	DWMMC_ASSERT_UNLOCKED(_sc)	mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
90 
91 #define	PENDING_CMD	0x01
92 #define	PENDING_STOP	0x02
93 #define	CARD_INIT_DONE	0x04
94 
95 #define	DWMMC_DATA_ERR_FLAGS	(SDMMC_INTMASK_DRT | SDMMC_INTMASK_DCRC \
96 				|SDMMC_INTMASK_HTO | SDMMC_INTMASK_SBE \
97 				|SDMMC_INTMASK_EBE)
98 #define	DWMMC_CMD_ERR_FLAGS	(SDMMC_INTMASK_RTO | SDMMC_INTMASK_RCRC \
99 				|SDMMC_INTMASK_RE)
100 #define	DWMMC_ERR_FLAGS		(DWMMC_DATA_ERR_FLAGS | DWMMC_CMD_ERR_FLAGS \
101 				|SDMMC_INTMASK_HLE)
102 
103 #define	DES0_DIC	(1 << 1)	/* Disable Interrupt on Completion */
104 #define	DES0_LD		(1 << 2)	/* Last Descriptor */
105 #define	DES0_FS		(1 << 3)	/* First Descriptor */
106 #define	DES0_CH		(1 << 4)	/* second address CHained */
107 #define	DES0_ER		(1 << 5)	/* End of Ring */
108 #define	DES0_CES	(1 << 30)	/* Card Error Summary */
109 #define	DES0_OWN	(1 << 31)	/* OWN */
110 
111 #define	DES1_BS1_MASK	0x1fff
112 
113 struct idmac_desc {
114 	uint32_t	des0;	/* control */
115 	uint32_t	des1;	/* bufsize */
116 	uint32_t	des2;	/* buf1 phys addr */
117 	uint32_t	des3;	/* buf2 phys addr or next descr */
118 };
119 
120 #define	IDMAC_DESC_SEGS	(PAGE_SIZE / (sizeof(struct idmac_desc)))
121 #define	IDMAC_DESC_SIZE	(sizeof(struct idmac_desc) * IDMAC_DESC_SEGS)
122 #define	DEF_MSIZE	0x2	/* Burst size of multiple transaction */
123 #define	IDMAC_MAX_SIZE	4096
124 
125 static void dwmmc_next_operation(struct dwmmc_softc *);
126 static int dwmmc_setup_bus(struct dwmmc_softc *, int);
127 static int dma_done(struct dwmmc_softc *, struct mmc_command *);
128 static int dma_stop(struct dwmmc_softc *);
129 static void pio_read(struct dwmmc_softc *, struct mmc_command *);
130 static void pio_write(struct dwmmc_softc *, struct mmc_command *);
131 static void dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present);
132 
133 static struct resource_spec dwmmc_spec[] = {
134 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
135 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
136 	{ -1, 0 }
137 };
138 
139 #define	HWTYPE_MASK		(0x0000ffff)
140 #define	HWFLAG_MASK		(0xffff << 16)
141 
142 static void
143 dwmmc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
144 {
145 
146 	if (error != 0)
147 		return;
148 	*(bus_addr_t *)arg = segs[0].ds_addr;
149 }
150 
151 static void
152 dwmmc_ring_setup(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
153 {
154 	struct dwmmc_softc *sc;
155 	int idx;
156 
157 	if (error != 0)
158 		return;
159 
160 	sc = arg;
161 
162 	dprintf("nsegs %d seg0len %lu\n", nsegs, segs[0].ds_len);
163 
164 	for (idx = 0; idx < nsegs; idx++) {
165 		sc->desc_ring[idx].des0 = (DES0_OWN | DES0_DIC | DES0_CH);
166 		sc->desc_ring[idx].des1 = segs[idx].ds_len & DES1_BS1_MASK;
167 		sc->desc_ring[idx].des2 = segs[idx].ds_addr;
168 
169 		if (idx == 0)
170 			sc->desc_ring[idx].des0 |= DES0_FS;
171 
172 		if (idx == (nsegs - 1)) {
173 			sc->desc_ring[idx].des0 &= ~(DES0_DIC | DES0_CH);
174 			sc->desc_ring[idx].des0 |= DES0_LD;
175 		}
176 	}
177 }
178 
179 static int
180 dwmmc_ctrl_reset(struct dwmmc_softc *sc, int reset_bits)
181 {
182 	int reg;
183 	int i;
184 
185 	reg = READ4(sc, SDMMC_CTRL);
186 	reg |= (reset_bits);
187 	WRITE4(sc, SDMMC_CTRL, reg);
188 
189 	/* Wait reset done */
190 	for (i = 0; i < 100; i++) {
191 		if (!(READ4(sc, SDMMC_CTRL) & reset_bits))
192 			return (0);
193 		DELAY(10);
194 	}
195 
196 	device_printf(sc->dev, "Reset failed\n");
197 
198 	return (1);
199 }
200 
201 static int
202 dma_setup(struct dwmmc_softc *sc)
203 {
204 	int error;
205 	int nidx;
206 	int idx;
207 
208 	/*
209 	 * Set up TX descriptor ring, descriptors, and dma maps.
210 	 */
211 	error = bus_dma_tag_create(
212 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
213 	    4096, 0,			/* alignment, boundary */
214 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
215 	    BUS_SPACE_MAXADDR,		/* highaddr */
216 	    NULL, NULL,			/* filter, filterarg */
217 	    IDMAC_DESC_SIZE, 1,		/* maxsize, nsegments */
218 	    IDMAC_DESC_SIZE,		/* maxsegsize */
219 	    0,				/* flags */
220 	    NULL, NULL,			/* lockfunc, lockarg */
221 	    &sc->desc_tag);
222 	if (error != 0) {
223 		device_printf(sc->dev,
224 		    "could not create ring DMA tag.\n");
225 		return (1);
226 	}
227 
228 	error = bus_dmamem_alloc(sc->desc_tag, (void**)&sc->desc_ring,
229 	    BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
230 	    &sc->desc_map);
231 	if (error != 0) {
232 		device_printf(sc->dev,
233 		    "could not allocate descriptor ring.\n");
234 		return (1);
235 	}
236 
237 	error = bus_dmamap_load(sc->desc_tag, sc->desc_map,
238 	    sc->desc_ring, IDMAC_DESC_SIZE, dwmmc_get1paddr,
239 	    &sc->desc_ring_paddr, 0);
240 	if (error != 0) {
241 		device_printf(sc->dev,
242 		    "could not load descriptor ring map.\n");
243 		return (1);
244 	}
245 
246 	for (idx = 0; idx < IDMAC_DESC_SEGS; idx++) {
247 		sc->desc_ring[idx].des0 = DES0_CH;
248 		sc->desc_ring[idx].des1 = 0;
249 		nidx = (idx + 1) % IDMAC_DESC_SEGS;
250 		sc->desc_ring[idx].des3 = sc->desc_ring_paddr + \
251 		    (nidx * sizeof(struct idmac_desc));
252 	}
253 	sc->desc_ring[idx - 1].des3 = sc->desc_ring_paddr;
254 	sc->desc_ring[idx - 1].des0 |= DES0_ER;
255 
256 	error = bus_dma_tag_create(
257 	    bus_get_dma_tag(sc->dev),	/* Parent tag. */
258 	    CACHE_LINE_SIZE, 0,		/* alignment, boundary */
259 	    BUS_SPACE_MAXADDR_32BIT,	/* lowaddr */
260 	    BUS_SPACE_MAXADDR,		/* highaddr */
261 	    NULL, NULL,			/* filter, filterarg */
262 	    IDMAC_MAX_SIZE * IDMAC_DESC_SEGS,	/* maxsize */
263 	    IDMAC_DESC_SEGS,		/* nsegments */
264 	    IDMAC_MAX_SIZE,		/* maxsegsize */
265 	    0,				/* flags */
266 	    NULL, NULL,			/* lockfunc, lockarg */
267 	    &sc->buf_tag);
268 	if (error != 0) {
269 		device_printf(sc->dev,
270 		    "could not create ring DMA tag.\n");
271 		return (1);
272 	}
273 
274 	error = bus_dmamap_create(sc->buf_tag, 0,
275 	    &sc->buf_map);
276 	if (error != 0) {
277 		device_printf(sc->dev,
278 		    "could not create TX buffer DMA map.\n");
279 		return (1);
280 	}
281 
282 	return (0);
283 }
284 
285 static void
286 dwmmc_cmd_done(struct dwmmc_softc *sc)
287 {
288 	struct mmc_command *cmd;
289 
290 	cmd = sc->curcmd;
291 	if (cmd == NULL)
292 		return;
293 
294 	if (cmd->flags & MMC_RSP_PRESENT) {
295 		if (cmd->flags & MMC_RSP_136) {
296 			cmd->resp[3] = READ4(sc, SDMMC_RESP0);
297 			cmd->resp[2] = READ4(sc, SDMMC_RESP1);
298 			cmd->resp[1] = READ4(sc, SDMMC_RESP2);
299 			cmd->resp[0] = READ4(sc, SDMMC_RESP3);
300 		} else {
301 			cmd->resp[3] = 0;
302 			cmd->resp[2] = 0;
303 			cmd->resp[1] = 0;
304 			cmd->resp[0] = READ4(sc, SDMMC_RESP0);
305 		}
306 	}
307 }
308 
309 static void
310 dwmmc_tasklet(struct dwmmc_softc *sc)
311 {
312 	struct mmc_command *cmd;
313 
314 	cmd = sc->curcmd;
315 	if (cmd == NULL)
316 		return;
317 
318 	if (!sc->cmd_done)
319 		return;
320 
321 	if (cmd->error != MMC_ERR_NONE || !cmd->data) {
322 		dwmmc_next_operation(sc);
323 	} else if (cmd->data && sc->dto_rcvd) {
324 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
325 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
326 		     sc->use_auto_stop) {
327 			if (sc->acd_rcvd)
328 				dwmmc_next_operation(sc);
329 		} else {
330 			dwmmc_next_operation(sc);
331 		}
332 	}
333 }
334 
335 static void
336 dwmmc_intr(void *arg)
337 {
338 	struct mmc_command *cmd;
339 	struct dwmmc_softc *sc;
340 	uint32_t reg;
341 
342 	sc = arg;
343 
344 	DWMMC_LOCK(sc);
345 
346 	cmd = sc->curcmd;
347 
348 	/* First handle SDMMC controller interrupts */
349 	reg = READ4(sc, SDMMC_MINTSTS);
350 	if (reg) {
351 		dprintf("%s 0x%08x\n", __func__, reg);
352 
353 		if (reg & DWMMC_CMD_ERR_FLAGS) {
354 			dprintf("cmd err 0x%08x cmd 0x%08x\n",
355 				reg, cmd->opcode);
356 			cmd->error = MMC_ERR_TIMEOUT;
357 		}
358 
359 		if (reg & DWMMC_DATA_ERR_FLAGS) {
360 			dprintf("data err 0x%08x cmd 0x%08x\n",
361 				reg, cmd->opcode);
362 			cmd->error = MMC_ERR_FAILED;
363 			if (!sc->use_pio) {
364 				dma_done(sc, cmd);
365 				dma_stop(sc);
366 			}
367 		}
368 
369 		if (reg & SDMMC_INTMASK_CMD_DONE) {
370 			dwmmc_cmd_done(sc);
371 			sc->cmd_done = 1;
372 		}
373 
374 		if (reg & SDMMC_INTMASK_ACD)
375 			sc->acd_rcvd = 1;
376 
377 		if (reg & SDMMC_INTMASK_DTO)
378 			sc->dto_rcvd = 1;
379 
380 		if (reg & SDMMC_INTMASK_CD) {
381 			dwmmc_handle_card_present(sc,
382 			    READ4(sc, SDMMC_CDETECT) == 0 ? true : false);
383 		}
384 	}
385 
386 	/* Ack interrupts */
387 	WRITE4(sc, SDMMC_RINTSTS, reg);
388 
389 	if (sc->use_pio) {
390 		if (reg & (SDMMC_INTMASK_RXDR|SDMMC_INTMASK_DTO)) {
391 			pio_read(sc, cmd);
392 		}
393 		if (reg & (SDMMC_INTMASK_TXDR|SDMMC_INTMASK_DTO)) {
394 			pio_write(sc, cmd);
395 		}
396 	} else {
397 		/* Now handle DMA interrupts */
398 		reg = READ4(sc, SDMMC_IDSTS);
399 		if (reg) {
400 			dprintf("dma intr 0x%08x\n", reg);
401 			if (reg & (SDMMC_IDINTEN_TI | SDMMC_IDINTEN_RI)) {
402 				WRITE4(sc, SDMMC_IDSTS, (SDMMC_IDINTEN_TI |
403 							 SDMMC_IDINTEN_RI));
404 				WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_NI);
405 				dma_done(sc, cmd);
406 			}
407 		}
408 	}
409 
410 	dwmmc_tasklet(sc);
411 
412 	DWMMC_UNLOCK(sc);
413 }
414 
415 static void
416 dwmmc_handle_card_present(struct dwmmc_softc *sc, bool is_present)
417 {
418 	bool was_present;
419 
420 	was_present = sc->child != NULL;
421 
422 	if (!was_present && is_present) {
423 		taskqueue_enqueue_timeout(taskqueue_swi_giant,
424 		  &sc->card_delayed_task, -(hz / 2));
425 	} else if (was_present && !is_present) {
426 		taskqueue_enqueue(taskqueue_swi_giant, &sc->card_task);
427 	}
428 }
429 
430 static void
431 dwmmc_card_task(void *arg, int pending __unused)
432 {
433 	struct dwmmc_softc *sc = arg;
434 
435 	DWMMC_LOCK(sc);
436 
437 	if (READ4(sc, SDMMC_CDETECT) == 0) {
438 		if (sc->child == NULL) {
439 			if (bootverbose)
440 				device_printf(sc->dev, "Card inserted\n");
441 
442 			sc->child = device_add_child(sc->dev, "mmc", -1);
443 			DWMMC_UNLOCK(sc);
444 			if (sc->child) {
445 				device_set_ivars(sc->child, sc);
446 				(void)device_probe_and_attach(sc->child);
447 			}
448 		} else
449 			DWMMC_UNLOCK(sc);
450 	} else {
451 		/* Card isn't present, detach if necessary */
452 		if (sc->child != NULL) {
453 			if (bootverbose)
454 				device_printf(sc->dev, "Card removed\n");
455 
456 			DWMMC_UNLOCK(sc);
457 			device_delete_child(sc->dev, sc->child);
458 			sc->child = NULL;
459 		} else
460 			DWMMC_UNLOCK(sc);
461 	}
462 }
463 
464 static int
465 parse_fdt(struct dwmmc_softc *sc)
466 {
467 	pcell_t dts_value[3];
468 	phandle_t node;
469 	uint32_t bus_hz = 0;
470 	int len;
471 #ifdef EXT_RESOURCES
472 	int error;
473 #endif
474 
475 	if ((node = ofw_bus_get_node(sc->dev)) == -1)
476 		return (ENXIO);
477 
478 	/* Set some defaults for freq and supported mode */
479 	sc->host.f_min = 400000;
480 	sc->host.f_max = 200000000;
481 	sc->host.host_ocr = MMC_OCR_320_330 | MMC_OCR_330_340;
482 	sc->host.caps = MMC_CAP_HSPEED | MMC_CAP_SIGNALING_330;
483 	mmc_fdt_parse(sc->dev, node, &sc->mmc_helper, &sc->host);
484 
485 	/* fifo-depth */
486 	if ((len = OF_getproplen(node, "fifo-depth")) > 0) {
487 		OF_getencprop(node, "fifo-depth", dts_value, len);
488 		sc->fifo_depth = dts_value[0];
489 	}
490 
491 	/* num-slots (Deprecated) */
492 	sc->num_slots = 1;
493 	if ((len = OF_getproplen(node, "num-slots")) > 0) {
494 		device_printf(sc->dev, "num-slots property is deprecated\n");
495 		OF_getencprop(node, "num-slots", dts_value, len);
496 		sc->num_slots = dts_value[0];
497 	}
498 
499 	/* clock-frequency */
500 	if ((len = OF_getproplen(node, "clock-frequency")) > 0) {
501 		OF_getencprop(node, "clock-frequency", dts_value, len);
502 		bus_hz = dts_value[0];
503 	}
504 
505 #ifdef EXT_RESOURCES
506 
507 	/* IP block reset is optional */
508 	error = hwreset_get_by_ofw_name(sc->dev, 0, "reset", &sc->hwreset);
509 	if (error != 0 &&
510 	    error != ENOENT &&
511 	    error != ENODEV) {
512 		device_printf(sc->dev, "Cannot get reset\n");
513 		goto fail;
514 	}
515 
516 	/* vmmc regulator is optional */
517 	error = regulator_get_by_ofw_property(sc->dev, 0, "vmmc-supply",
518 	     &sc->vmmc);
519 	if (error != 0 &&
520 	    error != ENOENT &&
521 	    error != ENODEV) {
522 		device_printf(sc->dev, "Cannot get regulator 'vmmc-supply'\n");
523 		goto fail;
524 	}
525 
526 	/* vqmmc regulator is optional */
527 	error = regulator_get_by_ofw_property(sc->dev, 0, "vqmmc-supply",
528 	     &sc->vqmmc);
529 	if (error != 0 &&
530 	    error != ENOENT &&
531 	    error != ENODEV) {
532 		device_printf(sc->dev, "Cannot get regulator 'vqmmc-supply'\n");
533 		goto fail;
534 	}
535 
536 	/* Assert reset first */
537 	if (sc->hwreset != NULL) {
538 		error = hwreset_assert(sc->hwreset);
539 		if (error != 0) {
540 			device_printf(sc->dev, "Cannot assert reset\n");
541 			goto fail;
542 		}
543 	}
544 
545 	/* BIU (Bus Interface Unit clock) is optional */
546 	error = clk_get_by_ofw_name(sc->dev, 0, "biu", &sc->biu);
547 	if (error != 0 &&
548 	    error != ENOENT &&
549 	    error != ENODEV) {
550 		device_printf(sc->dev, "Cannot get 'biu' clock\n");
551 		goto fail;
552 	}
553 
554 	if (sc->biu) {
555 		error = clk_enable(sc->biu);
556 		if (error != 0) {
557 			device_printf(sc->dev, "cannot enable biu clock\n");
558 			goto fail;
559 		}
560 	}
561 
562 	/*
563 	 * CIU (Controller Interface Unit clock) is mandatory
564 	 * if no clock-frequency property is given
565 	 */
566 	error = clk_get_by_ofw_name(sc->dev, 0, "ciu", &sc->ciu);
567 	if (error != 0 &&
568 	    error != ENOENT &&
569 	    error != ENODEV) {
570 		device_printf(sc->dev, "Cannot get 'ciu' clock\n");
571 		goto fail;
572 	}
573 
574 	if (sc->ciu) {
575 		if (bus_hz != 0) {
576 			error = clk_set_freq(sc->ciu, bus_hz, 0);
577 			if (error != 0)
578 				device_printf(sc->dev,
579 				    "cannot set ciu clock to %u\n", bus_hz);
580 		}
581 		error = clk_enable(sc->ciu);
582 		if (error != 0) {
583 			device_printf(sc->dev, "cannot enable ciu clock\n");
584 			goto fail;
585 		}
586 		clk_get_freq(sc->ciu, &sc->bus_hz);
587 	}
588 
589 	/* Enable regulators */
590 	if (sc->vmmc != NULL) {
591 		error = regulator_enable(sc->vmmc);
592 		if (error != 0) {
593 			device_printf(sc->dev, "Cannot enable vmmc regulator\n");
594 			goto fail;
595 		}
596 	}
597 	if (sc->vqmmc != NULL) {
598 		error = regulator_enable(sc->vqmmc);
599 		if (error != 0) {
600 			device_printf(sc->dev, "Cannot enable vqmmc regulator\n");
601 			goto fail;
602 		}
603 	}
604 
605 	/* Take dwmmc out of reset */
606 	if (sc->hwreset != NULL) {
607 		error = hwreset_deassert(sc->hwreset);
608 		if (error != 0) {
609 			device_printf(sc->dev, "Cannot deassert reset\n");
610 			goto fail;
611 		}
612 	}
613 #endif /* EXT_RESOURCES */
614 
615 	if (sc->bus_hz == 0) {
616 		device_printf(sc->dev, "No bus speed provided\n");
617 		goto fail;
618 	}
619 
620 	return (0);
621 
622 fail:
623 	return (ENXIO);
624 }
625 
626 int
627 dwmmc_attach(device_t dev)
628 {
629 	struct dwmmc_softc *sc;
630 	int error;
631 	int slot;
632 
633 	sc = device_get_softc(dev);
634 
635 	sc->dev = dev;
636 
637 	/* Why not to use Auto Stop? It save a hundred of irq per second */
638 	sc->use_auto_stop = 1;
639 
640 	error = parse_fdt(sc);
641 	if (error != 0) {
642 		device_printf(dev, "Can't get FDT property.\n");
643 		return (ENXIO);
644 	}
645 
646 	DWMMC_LOCK_INIT(sc);
647 
648 	if (bus_alloc_resources(dev, dwmmc_spec, sc->res)) {
649 		device_printf(dev, "could not allocate resources\n");
650 		return (ENXIO);
651 	}
652 
653 	/* Setup interrupt handler. */
654 	error = bus_setup_intr(dev, sc->res[1], INTR_TYPE_NET | INTR_MPSAFE,
655 	    NULL, dwmmc_intr, sc, &sc->intr_cookie);
656 	if (error != 0) {
657 		device_printf(dev, "could not setup interrupt handler.\n");
658 		return (ENXIO);
659 	}
660 
661 	device_printf(dev, "Hardware version ID is %04x\n",
662 		READ4(sc, SDMMC_VERID) & 0xffff);
663 
664 	/* XXX: we support operation for slot index 0 only */
665 	slot = 0;
666 	if (sc->pwren_inverted) {
667 		WRITE4(sc, SDMMC_PWREN, (0 << slot));
668 	} else {
669 		WRITE4(sc, SDMMC_PWREN, (1 << slot));
670 	}
671 
672 	/* Reset all */
673 	if (dwmmc_ctrl_reset(sc, (SDMMC_CTRL_RESET |
674 				  SDMMC_CTRL_FIFO_RESET |
675 				  SDMMC_CTRL_DMA_RESET)))
676 		return (ENXIO);
677 
678 	dwmmc_setup_bus(sc, sc->host.f_min);
679 
680 	if (sc->fifo_depth == 0) {
681 		sc->fifo_depth = 1 +
682 		    ((READ4(sc, SDMMC_FIFOTH) >> SDMMC_FIFOTH_RXWMARK_S) & 0xfff);
683 		device_printf(dev, "No fifo-depth, using FIFOTH %x\n",
684 		    sc->fifo_depth);
685 	}
686 
687 	if (!sc->use_pio) {
688 		dma_stop(sc);
689 		if (dma_setup(sc))
690 			return (ENXIO);
691 
692 		/* Install desc base */
693 		WRITE4(sc, SDMMC_DBADDR, sc->desc_ring_paddr);
694 
695 		/* Enable DMA interrupts */
696 		WRITE4(sc, SDMMC_IDSTS, SDMMC_IDINTEN_MASK);
697 		WRITE4(sc, SDMMC_IDINTEN, (SDMMC_IDINTEN_NI |
698 					   SDMMC_IDINTEN_RI |
699 					   SDMMC_IDINTEN_TI));
700 	}
701 
702 	/* Clear and disable interrups for a while */
703 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
704 	WRITE4(sc, SDMMC_INTMASK, 0);
705 
706 	/* Maximum timeout */
707 	WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
708 
709 	/* Enable interrupts */
710 	WRITE4(sc, SDMMC_RINTSTS, 0xffffffff);
711 	WRITE4(sc, SDMMC_INTMASK, (SDMMC_INTMASK_CMD_DONE |
712 				   SDMMC_INTMASK_DTO |
713 				   SDMMC_INTMASK_ACD |
714 				   SDMMC_INTMASK_TXDR |
715 				   SDMMC_INTMASK_RXDR |
716 				   DWMMC_ERR_FLAGS |
717 				   SDMMC_INTMASK_CD));
718 	WRITE4(sc, SDMMC_CTRL, SDMMC_CTRL_INT_ENABLE);
719 
720 	TASK_INIT(&sc->card_task, 0, dwmmc_card_task, sc);
721 	TIMEOUT_TASK_INIT(taskqueue_swi_giant, &sc->card_delayed_task, 0,
722 		dwmmc_card_task, sc);
723 
724 	/*
725 	 * Schedule a card detection as we won't get an interrupt
726 	 * if the card is inserted when we attach
727 	 */
728 	dwmmc_card_task(sc, 0);
729 
730 	return (0);
731 }
732 
733 int
734 dwmmc_detach(device_t dev)
735 {
736 	struct dwmmc_softc *sc;
737 	int ret;
738 
739 	sc = device_get_softc(dev);
740 
741 	ret = device_delete_children(dev);
742 	if (ret != 0)
743 		return (ret);
744 
745 	taskqueue_drain(taskqueue_swi_giant, &sc->card_task);
746 	taskqueue_drain_timeout(taskqueue_swi_giant, &sc->card_delayed_task);
747 
748 	if (sc->intr_cookie != NULL) {
749 		ret = bus_teardown_intr(dev, sc->res[1], sc->intr_cookie);
750 		if (ret != 0)
751 			return (ret);
752 	}
753 	bus_release_resources(dev, dwmmc_spec, sc->res);
754 
755 	DWMMC_LOCK_DESTROY(sc);
756 
757 #ifdef EXT_RESOURCES
758 	if (sc->hwreset != NULL && hwreset_deassert(sc->hwreset) != 0)
759 		device_printf(sc->dev, "cannot deassert reset\n");
760 	if (sc->biu != NULL && clk_disable(sc->biu) != 0)
761 		device_printf(sc->dev, "cannot disable biu clock\n");
762 	if (sc->ciu != NULL && clk_disable(sc->ciu) != 0)
763 			device_printf(sc->dev, "cannot disable ciu clock\n");
764 
765 	if (sc->vmmc && regulator_disable(sc->vmmc) != 0)
766 		device_printf(sc->dev, "Cannot disable vmmc regulator\n");
767 	if (sc->vqmmc && regulator_disable(sc->vqmmc) != 0)
768 		device_printf(sc->dev, "Cannot disable vqmmc regulator\n");
769 #endif
770 
771 	return (0);
772 }
773 
774 static int
775 dwmmc_setup_bus(struct dwmmc_softc *sc, int freq)
776 {
777 	int tout;
778 	int div;
779 
780 	if (freq == 0) {
781 		WRITE4(sc, SDMMC_CLKENA, 0);
782 		WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
783 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
784 
785 		tout = 1000;
786 		do {
787 			if (tout-- < 0) {
788 				device_printf(sc->dev, "Failed update clk\n");
789 				return (1);
790 			}
791 		} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
792 
793 		return (0);
794 	}
795 
796 	WRITE4(sc, SDMMC_CLKENA, 0);
797 	WRITE4(sc, SDMMC_CLKSRC, 0);
798 
799 	div = (sc->bus_hz != freq) ? DIV_ROUND_UP(sc->bus_hz, 2 * freq) : 0;
800 
801 	WRITE4(sc, SDMMC_CLKDIV, div);
802 	WRITE4(sc, SDMMC_CMD, (SDMMC_CMD_WAIT_PRVDATA |
803 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START));
804 
805 	tout = 1000;
806 	do {
807 		if (tout-- < 0) {
808 			device_printf(sc->dev, "Failed to update clk");
809 			return (1);
810 		}
811 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
812 
813 	WRITE4(sc, SDMMC_CLKENA, (SDMMC_CLKENA_CCLK_EN | SDMMC_CLKENA_LP));
814 	WRITE4(sc, SDMMC_CMD, SDMMC_CMD_WAIT_PRVDATA |
815 			SDMMC_CMD_UPD_CLK_ONLY | SDMMC_CMD_START);
816 
817 	tout = 1000;
818 	do {
819 		if (tout-- < 0) {
820 			device_printf(sc->dev, "Failed to enable clk\n");
821 			return (1);
822 		}
823 	} while (READ4(sc, SDMMC_CMD) & SDMMC_CMD_START);
824 
825 	return (0);
826 }
827 
828 static int
829 dwmmc_update_ios(device_t brdev, device_t reqdev)
830 {
831 	struct dwmmc_softc *sc;
832 	struct mmc_ios *ios;
833 	uint32_t reg;
834 	int ret = 0;
835 
836 	sc = device_get_softc(brdev);
837 	ios = &sc->host.ios;
838 
839 	dprintf("Setting up clk %u bus_width %d\n",
840 		ios->clock, ios->bus_width);
841 
842 	if (ios->bus_width == bus_width_8)
843 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_8BIT);
844 	else if (ios->bus_width == bus_width_4)
845 		WRITE4(sc, SDMMC_CTYPE, SDMMC_CTYPE_4BIT);
846 	else
847 		WRITE4(sc, SDMMC_CTYPE, 0);
848 
849 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_EXYNOS) {
850 		/* XXX: take care about DDR or SDR use here */
851 		WRITE4(sc, SDMMC_CLKSEL, sc->sdr_timing);
852 	}
853 
854 	/* Set DDR mode */
855 	reg = READ4(sc, SDMMC_UHS_REG);
856 	if (ios->timing == bus_timing_uhs_ddr50 ||
857 	    ios->timing == bus_timing_mmc_ddr52 ||
858 	    ios->timing == bus_timing_mmc_hs400)
859 		reg |= (SDMMC_UHS_REG_DDR);
860 	else
861 		reg &= ~(SDMMC_UHS_REG_DDR);
862 	WRITE4(sc, SDMMC_UHS_REG, reg);
863 
864 	if (sc->update_ios)
865 		ret = sc->update_ios(sc, ios);
866 
867 	dwmmc_setup_bus(sc, ios->clock);
868 
869 	return (ret);
870 }
871 
872 static int
873 dma_done(struct dwmmc_softc *sc, struct mmc_command *cmd)
874 {
875 	struct mmc_data *data;
876 
877 	data = cmd->data;
878 
879 	if (data->flags & MMC_DATA_WRITE)
880 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
881 			BUS_DMASYNC_POSTWRITE);
882 	else
883 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
884 			BUS_DMASYNC_POSTREAD);
885 
886 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
887 	    BUS_DMASYNC_POSTWRITE);
888 
889 	bus_dmamap_unload(sc->buf_tag, sc->buf_map);
890 
891 	return (0);
892 }
893 
894 static int
895 dma_stop(struct dwmmc_softc *sc)
896 {
897 	int reg;
898 
899 	reg = READ4(sc, SDMMC_CTRL);
900 	reg &= ~(SDMMC_CTRL_USE_IDMAC);
901 	reg |= (SDMMC_CTRL_DMA_RESET);
902 	WRITE4(sc, SDMMC_CTRL, reg);
903 
904 	reg = READ4(sc, SDMMC_BMOD);
905 	reg &= ~(SDMMC_BMOD_DE | SDMMC_BMOD_FB);
906 	reg |= (SDMMC_BMOD_SWR);
907 	WRITE4(sc, SDMMC_BMOD, reg);
908 
909 	return (0);
910 }
911 
912 static int
913 dma_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
914 {
915 	struct mmc_data *data;
916 	int err;
917 	int reg;
918 
919 	data = cmd->data;
920 
921 	reg = READ4(sc, SDMMC_INTMASK);
922 	reg &= ~(SDMMC_INTMASK_TXDR | SDMMC_INTMASK_RXDR);
923 	WRITE4(sc, SDMMC_INTMASK, reg);
924 
925 	err = bus_dmamap_load(sc->buf_tag, sc->buf_map,
926 		data->data, data->len, dwmmc_ring_setup,
927 		sc, BUS_DMA_NOWAIT);
928 	if (err != 0)
929 		panic("dmamap_load failed\n");
930 
931 	/* Ensure the device can see the desc */
932 	bus_dmamap_sync(sc->desc_tag, sc->desc_map,
933 	    BUS_DMASYNC_PREWRITE);
934 
935 	if (data->flags & MMC_DATA_WRITE)
936 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
937 			BUS_DMASYNC_PREWRITE);
938 	else
939 		bus_dmamap_sync(sc->buf_tag, sc->buf_map,
940 			BUS_DMASYNC_PREREAD);
941 
942 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
943 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
944 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
945 
946 	WRITE4(sc, SDMMC_FIFOTH, reg);
947 	wmb();
948 
949 	reg = READ4(sc, SDMMC_CTRL);
950 	reg |= (SDMMC_CTRL_USE_IDMAC | SDMMC_CTRL_DMA_ENABLE);
951 	WRITE4(sc, SDMMC_CTRL, reg);
952 	wmb();
953 
954 	reg = READ4(sc, SDMMC_BMOD);
955 	reg |= (SDMMC_BMOD_DE | SDMMC_BMOD_FB);
956 	WRITE4(sc, SDMMC_BMOD, reg);
957 
958 	/* Start */
959 	WRITE4(sc, SDMMC_PLDMND, 1);
960 
961 	return (0);
962 }
963 
964 static int
965 pio_prepare(struct dwmmc_softc *sc, struct mmc_command *cmd)
966 {
967 	struct mmc_data *data;
968 	int reg;
969 
970 	data = cmd->data;
971 	data->xfer_len = 0;
972 
973 	reg = (DEF_MSIZE << SDMMC_FIFOTH_MSIZE_S);
974 	reg |= ((sc->fifo_depth / 2) - 1) << SDMMC_FIFOTH_RXWMARK_S;
975 	reg |= (sc->fifo_depth / 2) << SDMMC_FIFOTH_TXWMARK_S;
976 
977 	WRITE4(sc, SDMMC_FIFOTH, reg);
978 	wmb();
979 
980 	return (0);
981 }
982 
983 static void
984 pio_read(struct dwmmc_softc *sc, struct mmc_command *cmd)
985 {
986 	struct mmc_data *data;
987 	uint32_t *p, status;
988 
989 	if (cmd == NULL || cmd->data == NULL)
990 		return;
991 
992 	data = cmd->data;
993 	if ((data->flags & MMC_DATA_READ) == 0)
994 		return;
995 
996 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
997 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
998 
999 	while (data->xfer_len < data->len) {
1000 		status = READ4(sc, SDMMC_STATUS);
1001 		if (status & SDMMC_STATUS_FIFO_EMPTY)
1002 			break;
1003 		*p++ = READ4(sc, SDMMC_DATA);
1004 		data->xfer_len += 4;
1005 	}
1006 
1007 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_RXDR);
1008 }
1009 
1010 static void
1011 pio_write(struct dwmmc_softc *sc, struct mmc_command *cmd)
1012 {
1013 	struct mmc_data *data;
1014 	uint32_t *p, status;
1015 
1016 	if (cmd == NULL || cmd->data == NULL)
1017 		return;
1018 
1019 	data = cmd->data;
1020 	if ((data->flags & MMC_DATA_WRITE) == 0)
1021 		return;
1022 
1023 	KASSERT((data->xfer_len & 3) == 0, ("xfer_len not aligned"));
1024 	p = (uint32_t *)data->data + (data->xfer_len >> 2);
1025 
1026 	while (data->xfer_len < data->len) {
1027 		status = READ4(sc, SDMMC_STATUS);
1028 		if (status & SDMMC_STATUS_FIFO_FULL)
1029 			break;
1030 		WRITE4(sc, SDMMC_DATA, *p++);
1031 		data->xfer_len += 4;
1032 	}
1033 
1034 	WRITE4(sc, SDMMC_RINTSTS, SDMMC_INTMASK_TXDR);
1035 }
1036 
1037 static void
1038 dwmmc_start_cmd(struct dwmmc_softc *sc, struct mmc_command *cmd)
1039 {
1040 	struct mmc_data *data;
1041 	uint32_t blksz;
1042 	uint32_t cmdr;
1043 
1044 	sc->curcmd = cmd;
1045 	data = cmd->data;
1046 
1047 	if ((sc->hwtype & HWTYPE_MASK) == HWTYPE_ROCKCHIP)
1048 		dwmmc_setup_bus(sc, sc->host.ios.clock);
1049 
1050 	/* XXX Upper layers don't always set this */
1051 	cmd->mrq = sc->req;
1052 
1053 	/* Begin setting up command register. */
1054 
1055 	cmdr = cmd->opcode;
1056 
1057 	dprintf("cmd->opcode 0x%08x\n", cmd->opcode);
1058 
1059 	if (cmd->opcode == MMC_STOP_TRANSMISSION ||
1060 	    cmd->opcode == MMC_GO_IDLE_STATE ||
1061 	    cmd->opcode == MMC_GO_INACTIVE_STATE)
1062 		cmdr |= SDMMC_CMD_STOP_ABORT;
1063 	else if (cmd->opcode != MMC_SEND_STATUS && data)
1064 		cmdr |= SDMMC_CMD_WAIT_PRVDATA;
1065 
1066 	/* Set up response handling. */
1067 	if (MMC_RSP(cmd->flags) != MMC_RSP_NONE) {
1068 		cmdr |= SDMMC_CMD_RESP_EXP;
1069 		if (cmd->flags & MMC_RSP_136)
1070 			cmdr |= SDMMC_CMD_RESP_LONG;
1071 	}
1072 
1073 	if (cmd->flags & MMC_RSP_CRC)
1074 		cmdr |= SDMMC_CMD_RESP_CRC;
1075 
1076 	/*
1077 	 * XXX: Not all platforms want this.
1078 	 */
1079 	cmdr |= SDMMC_CMD_USE_HOLD_REG;
1080 
1081 	if ((sc->flags & CARD_INIT_DONE) == 0) {
1082 		sc->flags |= (CARD_INIT_DONE);
1083 		cmdr |= SDMMC_CMD_SEND_INIT;
1084 	}
1085 
1086 	if (data) {
1087 		if ((cmd->opcode == MMC_WRITE_MULTIPLE_BLOCK ||
1088 		     cmd->opcode == MMC_READ_MULTIPLE_BLOCK) &&
1089 		     sc->use_auto_stop)
1090 			cmdr |= SDMMC_CMD_SEND_ASTOP;
1091 
1092 		cmdr |= SDMMC_CMD_DATA_EXP;
1093 		if (data->flags & MMC_DATA_STREAM)
1094 			cmdr |= SDMMC_CMD_MODE_STREAM;
1095 		if (data->flags & MMC_DATA_WRITE)
1096 			cmdr |= SDMMC_CMD_DATA_WRITE;
1097 
1098 		WRITE4(sc, SDMMC_TMOUT, 0xffffffff);
1099 		WRITE4(sc, SDMMC_BYTCNT, data->len);
1100 		blksz = (data->len < MMC_SECTOR_SIZE) ? \
1101 			 data->len : MMC_SECTOR_SIZE;
1102 		WRITE4(sc, SDMMC_BLKSIZ, blksz);
1103 
1104 		if (sc->use_pio) {
1105 			pio_prepare(sc, cmd);
1106 		} else {
1107 			dma_prepare(sc, cmd);
1108 		}
1109 		wmb();
1110 	}
1111 
1112 	dprintf("cmdr 0x%08x\n", cmdr);
1113 
1114 	WRITE4(sc, SDMMC_CMDARG, cmd->arg);
1115 	wmb();
1116 	WRITE4(sc, SDMMC_CMD, cmdr | SDMMC_CMD_START);
1117 };
1118 
1119 static void
1120 dwmmc_next_operation(struct dwmmc_softc *sc)
1121 {
1122 	struct mmc_request *req;
1123 
1124 	req = sc->req;
1125 	if (req == NULL)
1126 		return;
1127 
1128 	sc->acd_rcvd = 0;
1129 	sc->dto_rcvd = 0;
1130 	sc->cmd_done = 0;
1131 
1132 	/*
1133 	 * XXX: Wait until card is still busy.
1134 	 * We do need this to prevent data timeouts,
1135 	 * mostly caused by multi-block write command
1136 	 * followed by single-read.
1137 	 */
1138 	while(READ4(sc, SDMMC_STATUS) & (SDMMC_STATUS_DATA_BUSY))
1139 		continue;
1140 
1141 	if (sc->flags & PENDING_CMD) {
1142 		sc->flags &= ~PENDING_CMD;
1143 		dwmmc_start_cmd(sc, req->cmd);
1144 		return;
1145 	} else if (sc->flags & PENDING_STOP && !sc->use_auto_stop) {
1146 		sc->flags &= ~PENDING_STOP;
1147 		dwmmc_start_cmd(sc, req->stop);
1148 		return;
1149 	}
1150 
1151 	sc->req = NULL;
1152 	sc->curcmd = NULL;
1153 	req->done(req);
1154 }
1155 
1156 static int
1157 dwmmc_request(device_t brdev, device_t reqdev, struct mmc_request *req)
1158 {
1159 	struct dwmmc_softc *sc;
1160 
1161 	sc = device_get_softc(brdev);
1162 
1163 	dprintf("%s\n", __func__);
1164 
1165 	DWMMC_LOCK(sc);
1166 
1167 	if (sc->req != NULL) {
1168 		DWMMC_UNLOCK(sc);
1169 		return (EBUSY);
1170 	}
1171 
1172 	sc->req = req;
1173 	sc->flags |= PENDING_CMD;
1174 	if (sc->req->stop)
1175 		sc->flags |= PENDING_STOP;
1176 	dwmmc_next_operation(sc);
1177 
1178 	DWMMC_UNLOCK(sc);
1179 	return (0);
1180 }
1181 
1182 static int
1183 dwmmc_get_ro(device_t brdev, device_t reqdev)
1184 {
1185 
1186 	dprintf("%s\n", __func__);
1187 
1188 	return (0);
1189 }
1190 
1191 static int
1192 dwmmc_acquire_host(device_t brdev, device_t reqdev)
1193 {
1194 	struct dwmmc_softc *sc;
1195 
1196 	sc = device_get_softc(brdev);
1197 
1198 	DWMMC_LOCK(sc);
1199 	while (sc->bus_busy)
1200 		msleep(sc, &sc->sc_mtx, PZERO, "dwmmcah", hz / 5);
1201 	sc->bus_busy++;
1202 	DWMMC_UNLOCK(sc);
1203 	return (0);
1204 }
1205 
1206 static int
1207 dwmmc_release_host(device_t brdev, device_t reqdev)
1208 {
1209 	struct dwmmc_softc *sc;
1210 
1211 	sc = device_get_softc(brdev);
1212 
1213 	DWMMC_LOCK(sc);
1214 	sc->bus_busy--;
1215 	wakeup(sc);
1216 	DWMMC_UNLOCK(sc);
1217 	return (0);
1218 }
1219 
1220 static int
1221 dwmmc_read_ivar(device_t bus, device_t child, int which, uintptr_t *result)
1222 {
1223 	struct dwmmc_softc *sc;
1224 
1225 	sc = device_get_softc(bus);
1226 
1227 	switch (which) {
1228 	default:
1229 		return (EINVAL);
1230 	case MMCBR_IVAR_BUS_MODE:
1231 		*(int *)result = sc->host.ios.bus_mode;
1232 		break;
1233 	case MMCBR_IVAR_BUS_WIDTH:
1234 		*(int *)result = sc->host.ios.bus_width;
1235 		break;
1236 	case MMCBR_IVAR_CHIP_SELECT:
1237 		*(int *)result = sc->host.ios.chip_select;
1238 		break;
1239 	case MMCBR_IVAR_CLOCK:
1240 		*(int *)result = sc->host.ios.clock;
1241 		break;
1242 	case MMCBR_IVAR_F_MIN:
1243 		*(int *)result = sc->host.f_min;
1244 		break;
1245 	case MMCBR_IVAR_F_MAX:
1246 		*(int *)result = sc->host.f_max;
1247 		break;
1248 	case MMCBR_IVAR_HOST_OCR:
1249 		*(int *)result = sc->host.host_ocr;
1250 		break;
1251 	case MMCBR_IVAR_MODE:
1252 		*(int *)result = sc->host.mode;
1253 		break;
1254 	case MMCBR_IVAR_OCR:
1255 		*(int *)result = sc->host.ocr;
1256 		break;
1257 	case MMCBR_IVAR_POWER_MODE:
1258 		*(int *)result = sc->host.ios.power_mode;
1259 		break;
1260 	case MMCBR_IVAR_VDD:
1261 		*(int *)result = sc->host.ios.vdd;
1262 		break;
1263 	case MMCBR_IVAR_VCCQ:
1264 		*(int *)result = sc->host.ios.vccq;
1265 		break;
1266 	case MMCBR_IVAR_CAPS:
1267 		*(int *)result = sc->host.caps;
1268 		break;
1269 	case MMCBR_IVAR_MAX_DATA:
1270 		*(int *)result = (IDMAC_MAX_SIZE * IDMAC_DESC_SEGS) / MMC_SECTOR_SIZE;
1271 		break;
1272 	case MMCBR_IVAR_TIMING:
1273 		*(int *)result = sc->host.ios.timing;
1274 		break;
1275 	}
1276 	return (0);
1277 }
1278 
1279 static int
1280 dwmmc_write_ivar(device_t bus, device_t child, int which, uintptr_t value)
1281 {
1282 	struct dwmmc_softc *sc;
1283 
1284 	sc = device_get_softc(bus);
1285 
1286 	switch (which) {
1287 	default:
1288 		return (EINVAL);
1289 	case MMCBR_IVAR_BUS_MODE:
1290 		sc->host.ios.bus_mode = value;
1291 		break;
1292 	case MMCBR_IVAR_BUS_WIDTH:
1293 		sc->host.ios.bus_width = value;
1294 		break;
1295 	case MMCBR_IVAR_CHIP_SELECT:
1296 		sc->host.ios.chip_select = value;
1297 		break;
1298 	case MMCBR_IVAR_CLOCK:
1299 		sc->host.ios.clock = value;
1300 		break;
1301 	case MMCBR_IVAR_MODE:
1302 		sc->host.mode = value;
1303 		break;
1304 	case MMCBR_IVAR_OCR:
1305 		sc->host.ocr = value;
1306 		break;
1307 	case MMCBR_IVAR_POWER_MODE:
1308 		sc->host.ios.power_mode = value;
1309 		break;
1310 	case MMCBR_IVAR_VDD:
1311 		sc->host.ios.vdd = value;
1312 		break;
1313 	case MMCBR_IVAR_TIMING:
1314 		sc->host.ios.timing = value;
1315 		break;
1316 	case MMCBR_IVAR_VCCQ:
1317 		sc->host.ios.vccq = value;
1318 		break;
1319 	/* These are read-only */
1320 	case MMCBR_IVAR_CAPS:
1321 	case MMCBR_IVAR_HOST_OCR:
1322 	case MMCBR_IVAR_F_MIN:
1323 	case MMCBR_IVAR_F_MAX:
1324 	case MMCBR_IVAR_MAX_DATA:
1325 		return (EINVAL);
1326 	}
1327 	return (0);
1328 }
1329 
1330 static device_method_t dwmmc_methods[] = {
1331 	/* Bus interface */
1332 	DEVMETHOD(bus_read_ivar,	dwmmc_read_ivar),
1333 	DEVMETHOD(bus_write_ivar,	dwmmc_write_ivar),
1334 
1335 	/* mmcbr_if */
1336 	DEVMETHOD(mmcbr_update_ios,	dwmmc_update_ios),
1337 	DEVMETHOD(mmcbr_request,	dwmmc_request),
1338 	DEVMETHOD(mmcbr_get_ro,		dwmmc_get_ro),
1339 	DEVMETHOD(mmcbr_acquire_host,	dwmmc_acquire_host),
1340 	DEVMETHOD(mmcbr_release_host,	dwmmc_release_host),
1341 
1342 	DEVMETHOD_END
1343 };
1344 
1345 DEFINE_CLASS_0(dwmmc, dwmmc_driver, dwmmc_methods,
1346     sizeof(struct dwmmc_softc));
1347