xref: /freebsd/sys/arm/freescale/imx/imx6_sdma.c (revision 315ee00f)
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * i.MX6 Smart Direct Memory Access Controller (sDMA)
29  * Chapter 41, i.MX 6Dual/6Quad Applications Processor Reference Manual,
30  * Rev. 1, 04/2013
31  */
32 
33 #include <sys/cdefs.h>
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/malloc.h>
40 #include <sys/endian.h>
41 #include <sys/rman.h>
42 #include <sys/timeet.h>
43 #include <sys/timetc.h>
44 #include <sys/firmware.h>
45 
46 #include <vm/vm.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_kern.h>
49 #include <vm/pmap.h>
50 
51 #include <dev/ofw/openfirm.h>
52 #include <dev/ofw/ofw_bus.h>
53 #include <dev/ofw/ofw_bus_subr.h>
54 
55 #include <machine/bus.h>
56 #include <machine/cpu.h>
57 #include <machine/intr.h>
58 
59 #include <arm/freescale/imx/imx6_sdma.h>
60 
61 #define	MAX_BD	(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
62 
63 #define	READ4(_sc, _reg)	\
64 	bus_space_read_4(_sc->bst, _sc->bsh, _reg)
65 #define	WRITE4(_sc, _reg, _val)	\
66 	bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
67 
68 struct sdma_softc *sdma_sc;
69 
70 static struct resource_spec sdma_spec[] = {
71 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
72 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
73 	{ -1, 0 }
74 };
75 
76 /*
77  * This will get set to true if we can't load firmware while attaching, to
78  * prevent multiple attempts to re-attach the device on each bus pass.
79  */
80 static bool firmware_unavailable;
81 
82 static void
83 sdma_intr(void *arg)
84 {
85 	struct sdma_buffer_descriptor *bd;
86 	struct sdma_channel *channel;
87 	struct sdma_conf *conf;
88 	struct sdma_softc *sc;
89 	int pending;
90 	int i;
91 	int j;
92 
93 	sc = arg;
94 
95 	pending = READ4(sc, SDMAARM_INTR);
96 
97 	/* Ack intr */
98 	WRITE4(sc, SDMAARM_INTR, pending);
99 
100 	for (i = 0; i < SDMA_N_CHANNELS; i++) {
101 		if ((pending & (1 << i)) == 0)
102 			continue;
103 		channel = &sc->channel[i];
104 		conf = channel->conf;
105 		if (!conf)
106 			continue;
107 		for (j = 0; j < conf->num_bd; j++) {
108 			bd = &channel->bd[j];
109 			bd->mode.status |= BD_DONE;
110 			if (bd->mode.status & BD_RROR)
111 				printf("sDMA error\n");
112 		}
113 
114 		conf->ih(conf->ih_user, 1);
115 
116 		WRITE4(sc, SDMAARM_HSTART, (1 << i));
117 	}
118 }
119 
120 static int
121 sdma_probe(device_t dev)
122 {
123 
124 	if (!ofw_bus_status_okay(dev) || firmware_unavailable)
125 		return (ENXIO);
126 
127 	if (!ofw_bus_is_compatible(dev, "fsl,imx6q-sdma"))
128 		return (ENXIO);
129 
130 	device_set_desc(dev, "i.MX6 Smart Direct Memory Access Controller");
131 	return (BUS_PROBE_DEFAULT);
132 }
133 
134 int
135 sdma_start(int chn)
136 {
137 	struct sdma_softc *sc;
138 
139 	sc = sdma_sc;
140 
141 	WRITE4(sc, SDMAARM_HSTART, (1 << chn));
142 
143 	return (0);
144 }
145 
146 int
147 sdma_stop(int chn)
148 {
149 	struct sdma_softc *sc;
150 
151 	sc = sdma_sc;
152 
153 	WRITE4(sc, SDMAARM_STOP_STAT, (1 << chn));
154 
155 	return (0);
156 }
157 
158 int
159 sdma_alloc(void)
160 {
161 	struct sdma_channel *channel;
162 	struct sdma_softc *sc;
163 	int found;
164 	int chn;
165 	int i;
166 
167 	sc = sdma_sc;
168 	found = 0;
169 
170 	/* Channel 0 can't be used */
171 	for (i = 1; i < SDMA_N_CHANNELS; i++) {
172 		channel = &sc->channel[i];
173 		if (channel->in_use == 0) {
174 			channel->in_use = 1;
175 			found = 1;
176 			break;
177 		}
178 	}
179 
180 	if (!found)
181 		return (-1);
182 
183 	chn = i;
184 
185 	/* Allocate area for buffer descriptors */
186 	channel->bd = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
187 	    PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
188 
189 	return (chn);
190 }
191 
192 int
193 sdma_free(int chn)
194 {
195 	struct sdma_channel *channel;
196 	struct sdma_softc *sc;
197 
198 	sc = sdma_sc;
199 
200 	channel = &sc->channel[chn];
201 	channel->in_use = 0;
202 
203 	kmem_free(channel->bd, PAGE_SIZE);
204 
205 	return (0);
206 }
207 
208 static int
209 sdma_overrides(struct sdma_softc *sc, int chn,
210 		int evt, int host, int dsp)
211 {
212 	int reg;
213 
214 	/* Ignore sDMA requests */
215 	reg = READ4(sc, SDMAARM_EVTOVR);
216 	if (evt)
217 		reg |= (1 << chn);
218 	else
219 		reg &= ~(1 << chn);
220 	WRITE4(sc, SDMAARM_EVTOVR, reg);
221 
222 	/* Ignore enable bit (HE) */
223 	reg = READ4(sc, SDMAARM_HOSTOVR);
224 	if (host)
225 		reg |= (1 << chn);
226 	else
227 		reg &= ~(1 << chn);
228 	WRITE4(sc, SDMAARM_HOSTOVR, reg);
229 
230 	/* Prevent sDMA channel from starting */
231 	reg = READ4(sc, SDMAARM_DSPOVR);
232 	if (!dsp)
233 		reg |= (1 << chn);
234 	else
235 		reg &= ~(1 << chn);
236 	WRITE4(sc, SDMAARM_DSPOVR, reg);
237 
238 	return (0);
239 }
240 
241 int
242 sdma_configure(int chn, struct sdma_conf *conf)
243 {
244 	struct sdma_buffer_descriptor *bd0;
245 	struct sdma_buffer_descriptor *bd;
246 	struct sdma_context_data *context;
247 	struct sdma_channel *channel;
248 	struct sdma_softc *sc;
249 #if 0
250 	int timeout;
251 	int ret;
252 #endif
253 	int i;
254 
255 	sc = sdma_sc;
256 
257 	channel = &sc->channel[chn];
258 	channel->conf = conf;
259 
260 	/* Ensure operation has stopped */
261 	sdma_stop(chn);
262 
263 	/* Set priority and enable the channel */
264 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
265 	WRITE4(sc, SDMAARM_CHNENBL(conf->event), (1 << chn));
266 
267 	sdma_overrides(sc, chn, 0, 0, 0);
268 
269 	if (conf->num_bd > MAX_BD) {
270 		device_printf(sc->dev, "Error: too much buffer"
271 				" descriptors requested\n");
272 		return (-1);
273 	}
274 
275 	for (i = 0; i < conf->num_bd; i++) {
276 		bd = &channel->bd[i];
277 		bd->mode.command = conf->command;
278 		bd->mode.status = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
279 		if (i == (conf->num_bd - 1))
280 			bd->mode.status |= BD_WRAP;
281 		bd->mode.count = conf->period;
282 		bd->buffer_addr = conf->saddr + (conf->period * i);
283 		bd->ext_buffer_addr = 0;
284 	}
285 
286 	sc->ccb[chn].base_bd_ptr = vtophys(channel->bd);
287 	sc->ccb[chn].current_bd_ptr = vtophys(channel->bd);
288 
289 	/*
290 	 * Load context.
291 	 *
292 	 * i.MX6 Reference Manual: Appendix A SDMA Scripts
293 	 * A.3.1.7.1 (mcu_2_app)
294 	 */
295 
296 	/*
297 	 * TODO: allow using other scripts
298 	 */
299 	context = sc->context;
300 	memset(context, 0, sizeof(*context));
301 	context->channel_state.pc = sc->fw_scripts->mcu_2_app_addr;
302 
303 	/*
304 	 * Tx FIFO 0 address (r6)
305 	 * Event_mask (r1)
306 	 * Event2_mask (r0)
307 	 * Watermark level (r7)
308 	 */
309 
310 	if (conf->event > 32) {
311 		context->gReg[0] = (1 << (conf->event % 32));
312 		context->gReg[1] = 0;
313 	} else {
314 		context->gReg[0] = 0;
315 		context->gReg[1] = (1 << conf->event);
316 	}
317 
318 	context->gReg[6] = conf->daddr;
319 	context->gReg[7] = conf->word_length;
320 
321 	bd0 = sc->bd0;
322 	bd0->mode.command = C0_SETDM;
323 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
324 	bd0->mode.count = sizeof(*context) / 4;
325 	bd0->buffer_addr = sc->context_phys;
326 	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * chn;
327 
328 	WRITE4(sc, SDMAARM_HSTART, 1);
329 
330 #if 0
331 	/* Debug purposes */
332 
333 	timeout = 1000;
334 	while (!(ret = READ4(sc, SDMAARM_INTR) & 1)) {
335 		if (timeout-- <= 0)
336 			break;
337 		DELAY(10);
338 	};
339 
340 	if (!ret) {
341 		device_printf(sc->dev, "Failed to load context.\n");
342 		return (-1);
343 	}
344 
345 	WRITE4(sc, SDMAARM_INTR, ret);
346 
347 	device_printf(sc->dev, "Context loaded successfully.\n");
348 #endif
349 
350 	return (0);
351 }
352 
353 static int
354 load_firmware(struct sdma_softc *sc)
355 {
356 	const struct sdma_firmware_header *header;
357 	const struct firmware *fp;
358 
359 	fp = firmware_get("sdma-imx6q");
360 	if (fp == NULL) {
361 		device_printf(sc->dev, "Can't get firmware.\n");
362 		return (-1);
363 	}
364 
365 	header = fp->data;
366 	if (header->magic != FW_HEADER_MAGIC) {
367 		device_printf(sc->dev, "Can't use firmware.\n");
368 		return (-1);
369 	}
370 
371 	sc->fw_header = header;
372 	sc->fw_scripts = (const void *)((const char *)header +
373 				header->script_addrs_start);
374 
375 	return (0);
376 }
377 
378 static int
379 boot_firmware(struct sdma_softc *sc)
380 {
381 	struct sdma_buffer_descriptor *bd0;
382 	const uint32_t *ram_code;
383 	int timeout;
384 	int ret;
385 	int chn;
386 	int sz;
387 	int i;
388 
389 	ram_code = (const void *)((const char *)sc->fw_header +
390 			sc->fw_header->ram_code_start);
391 
392 	/* Make sure SDMA has not started yet */
393 	WRITE4(sc, SDMAARM_MC0PTR, 0);
394 
395 	sz = SDMA_N_CHANNELS * sizeof(struct sdma_channel_control) + \
396 	    sizeof(struct sdma_context_data);
397 	sc->ccb = kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
398 	    VM_MEMATTR_UNCACHEABLE);
399 	sc->ccb_phys = vtophys(sc->ccb);
400 
401 	sc->context = (void *)((char *)sc->ccb + \
402 	    SDMA_N_CHANNELS * sizeof(struct sdma_channel_control));
403 	sc->context_phys = vtophys(sc->context);
404 
405 	/* Disable all the channels */
406 	for (i = 0; i < SDMA_N_EVENTS; i++)
407 		WRITE4(sc, SDMAARM_CHNENBL(i), 0);
408 
409 	/* All channels have priority 0 */
410 	for (i = 0; i < SDMA_N_CHANNELS; i++)
411 		WRITE4(sc, SDMAARM_SDMA_CHNPRI(i), 0);
412 
413 	/* Channel 0 is used for booting firmware */
414 	chn = 0;
415 
416 	sc->bd0 = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
417 	    0, VM_MEMATTR_UNCACHEABLE);
418 	bd0 = sc->bd0;
419 	sc->ccb[chn].base_bd_ptr = vtophys(bd0);
420 	sc->ccb[chn].current_bd_ptr = vtophys(bd0);
421 
422 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
423 
424 	sdma_overrides(sc, chn, 1, 0, 0);
425 
426 	/* XXX: not sure what is that */
427 	WRITE4(sc, SDMAARM_CHN0ADDR, 0x4050);
428 
429 	WRITE4(sc, SDMAARM_CONFIG, 0);
430 	WRITE4(sc, SDMAARM_MC0PTR, sc->ccb_phys);
431 	WRITE4(sc, SDMAARM_CONFIG, CONFIG_CSM);
432 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
433 
434 	bd0->mode.command = C0_SETPM;
435 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
436 	bd0->mode.count = sc->fw_header->ram_code_size / 2;
437 	bd0->buffer_addr = vtophys(ram_code);
438 	bd0->ext_buffer_addr = sc->fw_scripts->ram_code_start_addr;
439 
440 	WRITE4(sc, SDMAARM_HSTART, 1);
441 
442 	timeout = 100;
443 	while (!(ret = READ4(sc, SDMAARM_INTR) & 1)) {
444 		if (timeout-- <= 0)
445 			break;
446 		DELAY(10);
447 	}
448 
449 	if (ret == 0) {
450 		device_printf(sc->dev, "SDMA failed to boot\n");
451 		return (-1);
452 	}
453 
454 	WRITE4(sc, SDMAARM_INTR, ret);
455 
456 #if 0
457 	device_printf(sc->dev, "SDMA booted successfully.\n");
458 #endif
459 
460 	/* Debug is disabled */
461 	WRITE4(sc, SDMAARM_ONCE_ENB, 0);
462 
463 	return (0);
464 }
465 
466 static int
467 sdma_attach(device_t dev)
468 {
469 	struct sdma_softc *sc;
470 	int err;
471 
472 	sc = device_get_softc(dev);
473 	sc->dev = dev;
474 
475 	if (load_firmware(sc) == -1) {
476 		firmware_unavailable = true;
477 		return (ENXIO);
478 	}
479 
480 	if (bus_alloc_resources(dev, sdma_spec, sc->res)) {
481 		device_printf(dev, "could not allocate resources\n");
482 		return (ENXIO);
483 	}
484 
485 	/* Memory interface */
486 	sc->bst = rman_get_bustag(sc->res[0]);
487 	sc->bsh = rman_get_bushandle(sc->res[0]);
488 
489 	sdma_sc = sc;
490 
491 	/* Setup interrupt handler */
492 	err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
493 	    NULL, sdma_intr, sc, &sc->ih);
494 	if (err) {
495 		device_printf(dev, "Unable to alloc interrupt resource.\n");
496 		return (ENXIO);
497 	}
498 
499 	if (boot_firmware(sc) == -1)
500 		return (ENXIO);
501 
502 	return (0);
503 };
504 
505 static device_method_t sdma_methods[] = {
506 	/* Device interface */
507 	DEVMETHOD(device_probe,		sdma_probe),
508 	DEVMETHOD(device_attach,	sdma_attach),
509 	{ 0, 0 }
510 };
511 
512 static driver_t sdma_driver = {
513 	"sdma",
514 	sdma_methods,
515 	sizeof(struct sdma_softc),
516 };
517 
518 /* We want to attach after all interrupt controllers, before anything else. */
519 EARLY_DRIVER_MODULE(sdma, simplebus, sdma_driver, 0, 0,
520     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
521