xref: /freebsd/sys/arm/freescale/imx/imx6_sdma.c (revision 4b9d6057)
1 /*-
2  * Copyright (c) 2015 Ruslan Bukin <br@bsdpad.com>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26 
27 /*
28  * i.MX6 Smart Direct Memory Access Controller (sDMA)
29  * Chapter 41, i.MX 6Dual/6Quad Applications Processor Reference Manual,
30  * Rev. 1, 04/2013
31  */
32 
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/endian.h>
40 #include <sys/rman.h>
41 #include <sys/timeet.h>
42 #include <sys/timetc.h>
43 #include <sys/firmware.h>
44 
45 #include <vm/vm.h>
46 #include <vm/vm_extern.h>
47 #include <vm/vm_kern.h>
48 #include <vm/pmap.h>
49 
50 #include <dev/ofw/openfirm.h>
51 #include <dev/ofw/ofw_bus.h>
52 #include <dev/ofw/ofw_bus_subr.h>
53 
54 #include <machine/bus.h>
55 #include <machine/cpu.h>
56 #include <machine/intr.h>
57 
58 #include <arm/freescale/imx/imx6_sdma.h>
59 
60 #define	MAX_BD	(PAGE_SIZE / sizeof(struct sdma_buffer_descriptor))
61 
62 #define	READ4(_sc, _reg)	\
63 	bus_space_read_4(_sc->bst, _sc->bsh, _reg)
64 #define	WRITE4(_sc, _reg, _val)	\
65 	bus_space_write_4(_sc->bst, _sc->bsh, _reg, _val)
66 
67 struct sdma_softc *sdma_sc;
68 
69 static struct resource_spec sdma_spec[] = {
70 	{ SYS_RES_MEMORY,	0,	RF_ACTIVE },
71 	{ SYS_RES_IRQ,		0,	RF_ACTIVE },
72 	{ -1, 0 }
73 };
74 
75 /*
76  * This will get set to true if we can't load firmware while attaching, to
77  * prevent multiple attempts to re-attach the device on each bus pass.
78  */
79 static bool firmware_unavailable;
80 
81 static void
82 sdma_intr(void *arg)
83 {
84 	struct sdma_buffer_descriptor *bd;
85 	struct sdma_channel *channel;
86 	struct sdma_conf *conf;
87 	struct sdma_softc *sc;
88 	int pending;
89 	int i;
90 	int j;
91 
92 	sc = arg;
93 
94 	pending = READ4(sc, SDMAARM_INTR);
95 
96 	/* Ack intr */
97 	WRITE4(sc, SDMAARM_INTR, pending);
98 
99 	for (i = 0; i < SDMA_N_CHANNELS; i++) {
100 		if ((pending & (1 << i)) == 0)
101 			continue;
102 		channel = &sc->channel[i];
103 		conf = channel->conf;
104 		if (!conf)
105 			continue;
106 		for (j = 0; j < conf->num_bd; j++) {
107 			bd = &channel->bd[j];
108 			bd->mode.status |= BD_DONE;
109 			if (bd->mode.status & BD_RROR)
110 				printf("sDMA error\n");
111 		}
112 
113 		conf->ih(conf->ih_user, 1);
114 
115 		WRITE4(sc, SDMAARM_HSTART, (1 << i));
116 	}
117 }
118 
119 static int
120 sdma_probe(device_t dev)
121 {
122 
123 	if (!ofw_bus_status_okay(dev) || firmware_unavailable)
124 		return (ENXIO);
125 
126 	if (!ofw_bus_is_compatible(dev, "fsl,imx6q-sdma"))
127 		return (ENXIO);
128 
129 	device_set_desc(dev, "i.MX6 Smart Direct Memory Access Controller");
130 	return (BUS_PROBE_DEFAULT);
131 }
132 
133 int
134 sdma_start(int chn)
135 {
136 	struct sdma_softc *sc;
137 
138 	sc = sdma_sc;
139 
140 	WRITE4(sc, SDMAARM_HSTART, (1 << chn));
141 
142 	return (0);
143 }
144 
145 int
146 sdma_stop(int chn)
147 {
148 	struct sdma_softc *sc;
149 
150 	sc = sdma_sc;
151 
152 	WRITE4(sc, SDMAARM_STOP_STAT, (1 << chn));
153 
154 	return (0);
155 }
156 
157 int
158 sdma_alloc(void)
159 {
160 	struct sdma_channel *channel;
161 	struct sdma_softc *sc;
162 	int found;
163 	int chn;
164 	int i;
165 
166 	sc = sdma_sc;
167 	found = 0;
168 
169 	/* Channel 0 can't be used */
170 	for (i = 1; i < SDMA_N_CHANNELS; i++) {
171 		channel = &sc->channel[i];
172 		if (channel->in_use == 0) {
173 			channel->in_use = 1;
174 			found = 1;
175 			break;
176 		}
177 	}
178 
179 	if (!found)
180 		return (-1);
181 
182 	chn = i;
183 
184 	/* Allocate area for buffer descriptors */
185 	channel->bd = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0,
186 	    PAGE_SIZE, 0, VM_MEMATTR_UNCACHEABLE);
187 
188 	return (chn);
189 }
190 
191 int
192 sdma_free(int chn)
193 {
194 	struct sdma_channel *channel;
195 	struct sdma_softc *sc;
196 
197 	sc = sdma_sc;
198 
199 	channel = &sc->channel[chn];
200 	channel->in_use = 0;
201 
202 	kmem_free(channel->bd, PAGE_SIZE);
203 
204 	return (0);
205 }
206 
207 static int
208 sdma_overrides(struct sdma_softc *sc, int chn,
209 		int evt, int host, int dsp)
210 {
211 	int reg;
212 
213 	/* Ignore sDMA requests */
214 	reg = READ4(sc, SDMAARM_EVTOVR);
215 	if (evt)
216 		reg |= (1 << chn);
217 	else
218 		reg &= ~(1 << chn);
219 	WRITE4(sc, SDMAARM_EVTOVR, reg);
220 
221 	/* Ignore enable bit (HE) */
222 	reg = READ4(sc, SDMAARM_HOSTOVR);
223 	if (host)
224 		reg |= (1 << chn);
225 	else
226 		reg &= ~(1 << chn);
227 	WRITE4(sc, SDMAARM_HOSTOVR, reg);
228 
229 	/* Prevent sDMA channel from starting */
230 	reg = READ4(sc, SDMAARM_DSPOVR);
231 	if (!dsp)
232 		reg |= (1 << chn);
233 	else
234 		reg &= ~(1 << chn);
235 	WRITE4(sc, SDMAARM_DSPOVR, reg);
236 
237 	return (0);
238 }
239 
240 int
241 sdma_configure(int chn, struct sdma_conf *conf)
242 {
243 	struct sdma_buffer_descriptor *bd0;
244 	struct sdma_buffer_descriptor *bd;
245 	struct sdma_context_data *context;
246 	struct sdma_channel *channel;
247 	struct sdma_softc *sc;
248 #if 0
249 	int timeout;
250 	int ret;
251 #endif
252 	int i;
253 
254 	sc = sdma_sc;
255 
256 	channel = &sc->channel[chn];
257 	channel->conf = conf;
258 
259 	/* Ensure operation has stopped */
260 	sdma_stop(chn);
261 
262 	/* Set priority and enable the channel */
263 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
264 	WRITE4(sc, SDMAARM_CHNENBL(conf->event), (1 << chn));
265 
266 	sdma_overrides(sc, chn, 0, 0, 0);
267 
268 	if (conf->num_bd > MAX_BD) {
269 		device_printf(sc->dev, "Error: too much buffer"
270 				" descriptors requested\n");
271 		return (-1);
272 	}
273 
274 	for (i = 0; i < conf->num_bd; i++) {
275 		bd = &channel->bd[i];
276 		bd->mode.command = conf->command;
277 		bd->mode.status = BD_DONE | BD_EXTD | BD_CONT | BD_INTR;
278 		if (i == (conf->num_bd - 1))
279 			bd->mode.status |= BD_WRAP;
280 		bd->mode.count = conf->period;
281 		bd->buffer_addr = conf->saddr + (conf->period * i);
282 		bd->ext_buffer_addr = 0;
283 	}
284 
285 	sc->ccb[chn].base_bd_ptr = vtophys(channel->bd);
286 	sc->ccb[chn].current_bd_ptr = vtophys(channel->bd);
287 
288 	/*
289 	 * Load context.
290 	 *
291 	 * i.MX6 Reference Manual: Appendix A SDMA Scripts
292 	 * A.3.1.7.1 (mcu_2_app)
293 	 */
294 
295 	/*
296 	 * TODO: allow using other scripts
297 	 */
298 	context = sc->context;
299 	memset(context, 0, sizeof(*context));
300 	context->channel_state.pc = sc->fw_scripts->mcu_2_app_addr;
301 
302 	/*
303 	 * Tx FIFO 0 address (r6)
304 	 * Event_mask (r1)
305 	 * Event2_mask (r0)
306 	 * Watermark level (r7)
307 	 */
308 
309 	if (conf->event > 32) {
310 		context->gReg[0] = (1 << (conf->event % 32));
311 		context->gReg[1] = 0;
312 	} else {
313 		context->gReg[0] = 0;
314 		context->gReg[1] = (1 << conf->event);
315 	}
316 
317 	context->gReg[6] = conf->daddr;
318 	context->gReg[7] = conf->word_length;
319 
320 	bd0 = sc->bd0;
321 	bd0->mode.command = C0_SETDM;
322 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
323 	bd0->mode.count = sizeof(*context) / 4;
324 	bd0->buffer_addr = sc->context_phys;
325 	bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * chn;
326 
327 	WRITE4(sc, SDMAARM_HSTART, 1);
328 
329 #if 0
330 	/* Debug purposes */
331 
332 	timeout = 1000;
333 	while (!(ret = READ4(sc, SDMAARM_INTR) & 1)) {
334 		if (timeout-- <= 0)
335 			break;
336 		DELAY(10);
337 	};
338 
339 	if (!ret) {
340 		device_printf(sc->dev, "Failed to load context.\n");
341 		return (-1);
342 	}
343 
344 	WRITE4(sc, SDMAARM_INTR, ret);
345 
346 	device_printf(sc->dev, "Context loaded successfully.\n");
347 #endif
348 
349 	return (0);
350 }
351 
352 static int
353 load_firmware(struct sdma_softc *sc)
354 {
355 	const struct sdma_firmware_header *header;
356 	const struct firmware *fp;
357 
358 	fp = firmware_get("sdma-imx6q");
359 	if (fp == NULL) {
360 		device_printf(sc->dev, "Can't get firmware.\n");
361 		return (-1);
362 	}
363 
364 	header = fp->data;
365 	if (header->magic != FW_HEADER_MAGIC) {
366 		device_printf(sc->dev, "Can't use firmware.\n");
367 		return (-1);
368 	}
369 
370 	sc->fw_header = header;
371 	sc->fw_scripts = (const void *)((const char *)header +
372 				header->script_addrs_start);
373 
374 	return (0);
375 }
376 
377 static int
378 boot_firmware(struct sdma_softc *sc)
379 {
380 	struct sdma_buffer_descriptor *bd0;
381 	const uint32_t *ram_code;
382 	int timeout;
383 	int ret;
384 	int chn;
385 	int sz;
386 	int i;
387 
388 	ram_code = (const void *)((const char *)sc->fw_header +
389 			sc->fw_header->ram_code_start);
390 
391 	/* Make sure SDMA has not started yet */
392 	WRITE4(sc, SDMAARM_MC0PTR, 0);
393 
394 	sz = SDMA_N_CHANNELS * sizeof(struct sdma_channel_control) + \
395 	    sizeof(struct sdma_context_data);
396 	sc->ccb = kmem_alloc_contig(sz, M_ZERO, 0, ~0, PAGE_SIZE, 0,
397 	    VM_MEMATTR_UNCACHEABLE);
398 	sc->ccb_phys = vtophys(sc->ccb);
399 
400 	sc->context = (void *)((char *)sc->ccb + \
401 	    SDMA_N_CHANNELS * sizeof(struct sdma_channel_control));
402 	sc->context_phys = vtophys(sc->context);
403 
404 	/* Disable all the channels */
405 	for (i = 0; i < SDMA_N_EVENTS; i++)
406 		WRITE4(sc, SDMAARM_CHNENBL(i), 0);
407 
408 	/* All channels have priority 0 */
409 	for (i = 0; i < SDMA_N_CHANNELS; i++)
410 		WRITE4(sc, SDMAARM_SDMA_CHNPRI(i), 0);
411 
412 	/* Channel 0 is used for booting firmware */
413 	chn = 0;
414 
415 	sc->bd0 = kmem_alloc_contig(PAGE_SIZE, M_ZERO, 0, ~0, PAGE_SIZE,
416 	    0, VM_MEMATTR_UNCACHEABLE);
417 	bd0 = sc->bd0;
418 	sc->ccb[chn].base_bd_ptr = vtophys(bd0);
419 	sc->ccb[chn].current_bd_ptr = vtophys(bd0);
420 
421 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
422 
423 	sdma_overrides(sc, chn, 1, 0, 0);
424 
425 	/* XXX: not sure what is that */
426 	WRITE4(sc, SDMAARM_CHN0ADDR, 0x4050);
427 
428 	WRITE4(sc, SDMAARM_CONFIG, 0);
429 	WRITE4(sc, SDMAARM_MC0PTR, sc->ccb_phys);
430 	WRITE4(sc, SDMAARM_CONFIG, CONFIG_CSM);
431 	WRITE4(sc, SDMAARM_SDMA_CHNPRI(chn), 1);
432 
433 	bd0->mode.command = C0_SETPM;
434 	bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
435 	bd0->mode.count = sc->fw_header->ram_code_size / 2;
436 	bd0->buffer_addr = vtophys(ram_code);
437 	bd0->ext_buffer_addr = sc->fw_scripts->ram_code_start_addr;
438 
439 	WRITE4(sc, SDMAARM_HSTART, 1);
440 
441 	timeout = 100;
442 	while (!(ret = READ4(sc, SDMAARM_INTR) & 1)) {
443 		if (timeout-- <= 0)
444 			break;
445 		DELAY(10);
446 	}
447 
448 	if (ret == 0) {
449 		device_printf(sc->dev, "SDMA failed to boot\n");
450 		return (-1);
451 	}
452 
453 	WRITE4(sc, SDMAARM_INTR, ret);
454 
455 #if 0
456 	device_printf(sc->dev, "SDMA booted successfully.\n");
457 #endif
458 
459 	/* Debug is disabled */
460 	WRITE4(sc, SDMAARM_ONCE_ENB, 0);
461 
462 	return (0);
463 }
464 
465 static int
466 sdma_attach(device_t dev)
467 {
468 	struct sdma_softc *sc;
469 	int err;
470 
471 	sc = device_get_softc(dev);
472 	sc->dev = dev;
473 
474 	if (load_firmware(sc) == -1) {
475 		firmware_unavailable = true;
476 		return (ENXIO);
477 	}
478 
479 	if (bus_alloc_resources(dev, sdma_spec, sc->res)) {
480 		device_printf(dev, "could not allocate resources\n");
481 		return (ENXIO);
482 	}
483 
484 	/* Memory interface */
485 	sc->bst = rman_get_bustag(sc->res[0]);
486 	sc->bsh = rman_get_bushandle(sc->res[0]);
487 
488 	sdma_sc = sc;
489 
490 	/* Setup interrupt handler */
491 	err = bus_setup_intr(dev, sc->res[1], INTR_TYPE_MISC | INTR_MPSAFE,
492 	    NULL, sdma_intr, sc, &sc->ih);
493 	if (err) {
494 		device_printf(dev, "Unable to alloc interrupt resource.\n");
495 		return (ENXIO);
496 	}
497 
498 	if (boot_firmware(sc) == -1)
499 		return (ENXIO);
500 
501 	return (0);
502 };
503 
504 static device_method_t sdma_methods[] = {
505 	/* Device interface */
506 	DEVMETHOD(device_probe,		sdma_probe),
507 	DEVMETHOD(device_attach,	sdma_attach),
508 	{ 0, 0 }
509 };
510 
511 static driver_t sdma_driver = {
512 	"sdma",
513 	sdma_methods,
514 	sizeof(struct sdma_softc),
515 };
516 
517 /* We want to attach after all interrupt controllers, before anything else. */
518 EARLY_DRIVER_MODULE(sdma, simplebus, sdma_driver, 0, 0,
519     BUS_PASS_INTERRUPT + BUS_PASS_ORDER_LAST);
520