xref: /freebsd/sys/arm/ti/ti_sdma.c (revision 325151a3)
1 /*-
2  * Copyright (c) 2011
3  *	Ben Gray <ben.r.gray@gmail.com>.
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  */
27 
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
30 
31 #include <sys/param.h>
32 #include <sys/systm.h>
33 #include <sys/bus.h>
34 #include <sys/kernel.h>
35 #include <sys/lock.h>
36 #include <sys/interrupt.h>
37 #include <sys/module.h>
38 #include <sys/malloc.h>
39 #include <sys/mutex.h>
40 #include <sys/rman.h>
41 #include <sys/queue.h>
42 #include <sys/taskqueue.h>
43 #include <sys/timetc.h>
44 #include <machine/bus.h>
45 #include <machine/intr.h>
46 
47 #include <dev/fdt/fdt_common.h>
48 #include <dev/ofw/openfirm.h>
49 #include <dev/ofw/ofw_bus.h>
50 #include <dev/ofw/ofw_bus_subr.h>
51 
52 #include <arm/ti/ti_cpuid.h>
53 #include <arm/ti/ti_prcm.h>
54 #include <arm/ti/ti_sdma.h>
55 #include <arm/ti/ti_sdmareg.h>
56 
57 /**
58  *	Kernel functions for using the DMA controller
59  *
60  *
61  *	DMA TRANSFERS:
62  *	A DMA transfer block consists of a number of frames (FN). Each frame
63  *	consists of a number of elements, and each element can have a size of 8, 16,
64  *	or 32 bits.
65  *
66  *	OMAP44xx and newer chips support linked list (aka scatter gather) transfers,
67  *	where a linked list of source/destination pairs can be placed in memory
68  *	for the H/W to process.  Earlier chips only allowed you to chain multiple
69  *	channels together.  However currently this linked list feature is not
70  *	supported by the driver.
71  *
72  */
73 
74 /**
75  *	Data structure per DMA channel.
76  *
77  *
78  */
79 struct ti_sdma_channel {
80 
81 	/*
82 	 * The configuration registers for the given channel, these are modified
83 	 * by the set functions and only written to the actual registers when a
84 	 * transaction is started.
85 	 */
86 	uint32_t		reg_csdp;
87 	uint32_t		reg_ccr;
88 	uint32_t		reg_cicr;
89 
90 	/* Set when one of the configuration registers above change */
91 	uint32_t		need_reg_write;
92 
93 	/* Callback function used when an interrupt is tripped on the given channel */
94 	void (*callback)(unsigned int ch, uint32_t ch_status, void *data);
95 
96 	/* Callback data passed in the callback ... duh */
97 	void*			callback_data;
98 
99 };
100 
101 /**
102  *	DMA driver context, allocated and stored globally, this driver is not
103  *	intetned to ever be unloaded (see ti_sdma_sc).
104  *
105  */
106 struct ti_sdma_softc {
107 	device_t		sc_dev;
108 	struct resource*	sc_irq_res;
109 	struct resource*	sc_mem_res;
110 
111 	/*
112 	 * I guess in theory we should have a mutex per DMA channel for register
113 	 * modifications. But since we know we are never going to be run on a SMP
114 	 * system, we can use just the single lock for all channels.
115 	 */
116 	struct mtx		sc_mtx;
117 
118 	/* Stores the H/W revision read from the registers */
119 	uint32_t		sc_hw_rev;
120 
121 	/*
122 	 * Bits in the sc_active_channels data field indicate if the channel has
123 	 * been activated.
124 	 */
125 	uint32_t		sc_active_channels;
126 
127 	struct ti_sdma_channel sc_channel[NUM_DMA_CHANNELS];
128 
129 };
130 
131 static struct ti_sdma_softc *ti_sdma_sc = NULL;
132 
133 /**
134  *	Macros for driver mutex locking
135  */
136 #define TI_SDMA_LOCK(_sc)             mtx_lock_spin(&(_sc)->sc_mtx)
137 #define TI_SDMA_UNLOCK(_sc)           mtx_unlock_spin(&(_sc)->sc_mtx)
138 #define TI_SDMA_LOCK_INIT(_sc) \
139 	mtx_init(&_sc->sc_mtx, device_get_nameunit(_sc->sc_dev), \
140 	         "ti_sdma", MTX_SPIN)
141 #define TI_SDMA_LOCK_DESTROY(_sc)     mtx_destroy(&_sc->sc_mtx);
142 #define TI_SDMA_ASSERT_LOCKED(_sc)    mtx_assert(&_sc->sc_mtx, MA_OWNED);
143 #define TI_SDMA_ASSERT_UNLOCKED(_sc)  mtx_assert(&_sc->sc_mtx, MA_NOTOWNED);
144 
145 /**
146  *	Function prototypes
147  *
148  */
149 static void ti_sdma_intr(void *);
150 
151 /**
152  *	ti_sdma_read_4 - reads a 32-bit value from one of the DMA registers
153  *	@sc: DMA device context
154  *	@off: The offset of a register from the DMA register address range
155  *
156  *
157  *	RETURNS:
158  *	32-bit value read from the register.
159  */
160 static inline uint32_t
161 ti_sdma_read_4(struct ti_sdma_softc *sc, bus_size_t off)
162 {
163 	return bus_read_4(sc->sc_mem_res, off);
164 }
165 
166 /**
167  *	ti_sdma_write_4 - writes a 32-bit value to one of the DMA registers
168  *	@sc: DMA device context
169  *	@off: The offset of a register from the DMA register address range
170  *
171  *
172  *	RETURNS:
173  *	32-bit value read from the register.
174  */
175 static inline void
176 ti_sdma_write_4(struct ti_sdma_softc *sc, bus_size_t off, uint32_t val)
177 {
178 	bus_write_4(sc->sc_mem_res, off, val);
179 }
180 
181 /**
182  *	ti_sdma_is_omap3_rev - returns true if H/W is from OMAP3 series
183  *	@sc: DMA device context
184  *
185  */
186 static inline int
187 ti_sdma_is_omap3_rev(struct ti_sdma_softc *sc)
188 {
189 	return (sc->sc_hw_rev == DMA4_OMAP3_REV);
190 }
191 
192 /**
193  *	ti_sdma_is_omap4_rev - returns true if H/W is from OMAP4 series
194  *	@sc: DMA device context
195  *
196  */
197 static inline int
198 ti_sdma_is_omap4_rev(struct ti_sdma_softc *sc)
199 {
200 	return (sc->sc_hw_rev == DMA4_OMAP4_REV);
201 }
202 
203 /**
204  *	ti_sdma_intr - interrupt handler for all 4 DMA IRQs
205  *	@arg: ignored
206  *
207  *	Called when any of the four DMA IRQs are triggered.
208  *
209  *	LOCKING:
210  *	DMA registers protected by internal mutex
211  *
212  *	RETURNS:
213  *	nothing
214  */
215 static void
216 ti_sdma_intr(void *arg)
217 {
218 	struct ti_sdma_softc *sc = ti_sdma_sc;
219 	uint32_t intr;
220 	uint32_t csr;
221 	unsigned int ch, j;
222 	struct ti_sdma_channel* channel;
223 
224 	TI_SDMA_LOCK(sc);
225 
226 	for (j = 0; j < NUM_DMA_IRQS; j++) {
227 
228 		/* Get the flag interrupts (enabled) */
229 		intr = ti_sdma_read_4(sc, DMA4_IRQSTATUS_L(j));
230 		intr &= ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
231 		if (intr == 0x00000000)
232 			continue;
233 
234 		/* Loop through checking the status bits */
235 		for (ch = 0; ch < NUM_DMA_CHANNELS; ch++) {
236 			if (intr & (1 << ch)) {
237 				channel = &sc->sc_channel[ch];
238 
239 				/* Read the CSR regsiter and verify we don't have a spurious IRQ */
240 				csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
241 				if (csr == 0) {
242 					device_printf(sc->sc_dev, "Spurious DMA IRQ for channel "
243 					              "%d\n", ch);
244 					continue;
245 				}
246 
247 				/* Sanity check this channel is active */
248 				if ((sc->sc_active_channels & (1 << ch)) == 0) {
249 					device_printf(sc->sc_dev, "IRQ %d for a non-activated "
250 					              "channel %d\n", j, ch);
251 					continue;
252 				}
253 
254 				/* Check the status error codes */
255 				if (csr & DMA4_CSR_DROP)
256 					device_printf(sc->sc_dev, "Synchronization event drop "
257 					              "occurred during the transfer on channel %u\n",
258 								  ch);
259 				if (csr & DMA4_CSR_SECURE_ERR)
260 					device_printf(sc->sc_dev, "Secure transaction error event "
261 					              "on channel %u\n", ch);
262 				if (csr & DMA4_CSR_MISALIGNED_ADRS_ERR)
263 					device_printf(sc->sc_dev, "Misaligned address error event "
264 					              "on channel %u\n", ch);
265 				if (csr & DMA4_CSR_TRANS_ERR) {
266 					device_printf(sc->sc_dev, "Transaction error event on "
267 					              "channel %u\n", ch);
268 					/*
269 					 * Apparently according to linux code, there is an errata
270 					 * that says the channel is not disabled upon this error.
271 					 * They explicitly disable the channel here .. since I
272 					 * haven't seen the errata, I'm going to ignore for now.
273 					 */
274 				}
275 
276 				/* Clear the status flags for the IRQ */
277 				ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
278 				ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
279 
280 				/* Call the callback for the given channel */
281 				if (channel->callback)
282 					channel->callback(ch, csr, channel->callback_data);
283 			}
284 		}
285 	}
286 
287 	TI_SDMA_UNLOCK(sc);
288 
289 	return;
290 }
291 
292 /**
293  *	ti_sdma_activate_channel - activates a DMA channel
294  *	@ch: upon return contains the channel allocated
295  *	@callback: a callback function to associate with the channel
296  *	@data: optional data supplied when the callback is called
297  *
298  *	Simply activates a channel be enabling and writing default values to the
299  *	channel's register set.  It doesn't start a transaction, just populates the
300  *	internal data structures and sets defaults.
301  *
302  *	Note this function doesn't enable interrupts, for that you need to call
303  *	ti_sdma_enable_channel_irq(). If not using IRQ to detect the end of the
304  *	transfer, you can use ti_sdma_status_poll() to detect a change in the
305  *	status.
306  *
307  *	A channel must be activated before any of the other DMA functions can be
308  *	called on it.
309  *
310  *	LOCKING:
311  *	DMA registers protected by internal mutex
312  *
313  *	RETURNS:
314  *	0 on success, otherwise an error code
315  */
316 int
317 ti_sdma_activate_channel(unsigned int *ch,
318                           void (*callback)(unsigned int ch, uint32_t status, void *data),
319                           void *data)
320 {
321 	struct ti_sdma_softc *sc = ti_sdma_sc;
322 	struct ti_sdma_channel *channel = NULL;
323 	uint32_t addr;
324 	unsigned int i;
325 
326 	/* Sanity check */
327 	if (sc == NULL)
328 		return (ENOMEM);
329 
330 	if (ch == NULL)
331 		return (EINVAL);
332 
333 	TI_SDMA_LOCK(sc);
334 
335 	/* Check to see if all channels are in use */
336 	if (sc->sc_active_channels == 0xffffffff) {
337 		TI_SDMA_UNLOCK(sc);
338 		return (ENOMEM);
339 	}
340 
341 	/* Find the first non-active channel */
342 	for (i = 0; i < NUM_DMA_CHANNELS; i++) {
343 		if (!(sc->sc_active_channels & (0x1 << i))) {
344 			sc->sc_active_channels |= (0x1 << i);
345 			*ch = i;
346 			break;
347 		}
348 	}
349 
350 	/* Get the channel struct and populate the fields */
351 	channel = &sc->sc_channel[*ch];
352 
353 	channel->callback = callback;
354 	channel->callback_data = data;
355 
356 	channel->need_reg_write = 1;
357 
358 	/* Set the default configuration for the DMA channel */
359 	channel->reg_csdp = DMA4_CSDP_DATA_TYPE(0x2)
360 		| DMA4_CSDP_SRC_BURST_MODE(0)
361 		| DMA4_CSDP_DST_BURST_MODE(0)
362 		| DMA4_CSDP_SRC_ENDIANISM(0)
363 		| DMA4_CSDP_DST_ENDIANISM(0)
364 		| DMA4_CSDP_WRITE_MODE(0)
365 		| DMA4_CSDP_SRC_PACKED(0)
366 		| DMA4_CSDP_DST_PACKED(0);
367 
368 	channel->reg_ccr = DMA4_CCR_DST_ADDRESS_MODE(1)
369 		| DMA4_CCR_SRC_ADDRESS_MODE(1)
370 		| DMA4_CCR_READ_PRIORITY(0)
371 		| DMA4_CCR_WRITE_PRIORITY(0)
372 		| DMA4_CCR_SYNC_TRIGGER(0)
373 		| DMA4_CCR_FRAME_SYNC(0)
374 		| DMA4_CCR_BLOCK_SYNC(0);
375 
376 	channel->reg_cicr = DMA4_CICR_TRANS_ERR_IE
377 		| DMA4_CICR_SECURE_ERR_IE
378 		| DMA4_CICR_SUPERVISOR_ERR_IE
379 		| DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
380 
381 	/* Clear all the channel registers, this should abort any transaction */
382 	for (addr = DMA4_CCR(*ch); addr <= DMA4_COLOR(*ch); addr += 4)
383 		ti_sdma_write_4(sc, addr, 0x00000000);
384 
385 	TI_SDMA_UNLOCK(sc);
386 
387 	return 0;
388 }
389 
390 /**
391  *	ti_sdma_deactivate_channel - deactivates a channel
392  *	@ch: the channel to deactivate
393  *
394  *
395  *
396  *	LOCKING:
397  *	DMA registers protected by internal mutex
398  *
399  *	RETURNS:
400  *	EH_HANDLED or EH_NOT_HANDLED
401  */
402 int
403 ti_sdma_deactivate_channel(unsigned int ch)
404 {
405 	struct ti_sdma_softc *sc = ti_sdma_sc;
406 	unsigned int j;
407 	unsigned int addr;
408 
409 	/* Sanity check */
410 	if (sc == NULL)
411 		return (ENOMEM);
412 
413 	TI_SDMA_LOCK(sc);
414 
415 	/* First check if the channel is currently active */
416 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
417 		TI_SDMA_UNLOCK(sc);
418 		return (EBUSY);
419 	}
420 
421 	/* Mark the channel as inactive */
422 	sc->sc_active_channels &= ~(1 << ch);
423 
424 	/* Disable all DMA interrupts for the channel. */
425 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
426 
427 	/* Make sure the DMA transfer is stopped. */
428 	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
429 
430 	/* Clear the CSR register and IRQ status register */
431 	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
432 	for (j = 0; j < NUM_DMA_IRQS; j++) {
433 		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
434 	}
435 
436 	/* Clear all the channel registers, this should abort any transaction */
437 	for (addr = DMA4_CCR(ch); addr <= DMA4_COLOR(ch); addr += 4)
438 		ti_sdma_write_4(sc, addr, 0x00000000);
439 
440 	TI_SDMA_UNLOCK(sc);
441 
442 	return 0;
443 }
444 
445 /**
446  *	ti_sdma_disable_channel_irq - disables IRQ's on the given channel
447  *	@ch: the channel to disable IRQ's on
448  *
449  *	Disable interupt generation for the given channel.
450  *
451  *	LOCKING:
452  *	DMA registers protected by internal mutex
453  *
454  *	RETURNS:
455  *	EH_HANDLED or EH_NOT_HANDLED
456  */
457 int
458 ti_sdma_disable_channel_irq(unsigned int ch)
459 {
460 	struct ti_sdma_softc *sc = ti_sdma_sc;
461 	uint32_t irq_enable;
462 	unsigned int j;
463 
464 	/* Sanity check */
465 	if (sc == NULL)
466 		return (ENOMEM);
467 
468 	TI_SDMA_LOCK(sc);
469 
470 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
471 		TI_SDMA_UNLOCK(sc);
472 		return (EINVAL);
473 	}
474 
475 	/* Disable all the individual error conditions */
476 	sc->sc_channel[ch].reg_cicr = 0x0000;
477 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0x0000);
478 
479 	/* Disable the channel interrupt enable */
480 	for (j = 0; j < NUM_DMA_IRQS; j++) {
481 		irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(j));
482 		irq_enable &= ~(1 << ch);
483 
484 		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(j), irq_enable);
485 	}
486 
487 	/* Indicate the registers need to be rewritten on the next transaction */
488 	sc->sc_channel[ch].need_reg_write = 1;
489 
490 	TI_SDMA_UNLOCK(sc);
491 
492 	return (0);
493 }
494 
495 /**
496  *	ti_sdma_disable_channel_irq - enables IRQ's on the given channel
497  *	@ch: the channel to enable IRQ's on
498  *	@flags: bitmask of interrupt types to enable
499  *
500  *	Flags can be a bitmask of the following options:
501  *		DMA_IRQ_FLAG_DROP
502  *		DMA_IRQ_FLAG_HALF_FRAME_COMPL
503  *		DMA_IRQ_FLAG_FRAME_COMPL
504  *		DMA_IRQ_FLAG_START_LAST_FRAME
505  *		DMA_IRQ_FLAG_BLOCK_COMPL
506  *		DMA_IRQ_FLAG_ENDOF_PKT
507  *		DMA_IRQ_FLAG_DRAIN
508  *
509  *
510  *	LOCKING:
511  *	DMA registers protected by internal mutex
512  *
513  *	RETURNS:
514  *	EH_HANDLED or EH_NOT_HANDLED
515  */
516 int
517 ti_sdma_enable_channel_irq(unsigned int ch, uint32_t flags)
518 {
519 	struct ti_sdma_softc *sc = ti_sdma_sc;
520 	uint32_t irq_enable;
521 
522 	/* Sanity check */
523 	if (sc == NULL)
524 		return (ENOMEM);
525 
526 	TI_SDMA_LOCK(sc);
527 
528 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
529 		TI_SDMA_UNLOCK(sc);
530 		return (EINVAL);
531 	}
532 
533 	/* Always enable the error interrupts if we have interrupts enabled */
534 	flags |= DMA4_CICR_TRANS_ERR_IE | DMA4_CICR_SECURE_ERR_IE |
535 	         DMA4_CICR_SUPERVISOR_ERR_IE | DMA4_CICR_MISALIGNED_ADRS_ERR_IE;
536 
537 	sc->sc_channel[ch].reg_cicr = flags;
538 
539 	/* Write the values to the register */
540 	ti_sdma_write_4(sc, DMA4_CICR(ch), flags);
541 
542 	/* Enable the channel interrupt enable */
543 	irq_enable = ti_sdma_read_4(sc, DMA4_IRQENABLE_L(0));
544 	irq_enable |= (1 << ch);
545 
546 	ti_sdma_write_4(sc, DMA4_IRQENABLE_L(0), irq_enable);
547 
548 	/* Indicate the registers need to be rewritten on the next transaction */
549 	sc->sc_channel[ch].need_reg_write = 1;
550 
551 	TI_SDMA_UNLOCK(sc);
552 
553 	return (0);
554 }
555 
556 /**
557  *	ti_sdma_get_channel_status - returns the status of a given channel
558  *	@ch: the channel number to get the status of
559  *	@status: upon return will contain the status bitmask, see below for possible
560  *	         values.
561  *
562  *	      DMA_STATUS_DROP
563  *	      DMA_STATUS_HALF
564  *	      DMA_STATUS_FRAME
565  *	      DMA_STATUS_LAST
566  *	      DMA_STATUS_BLOCK
567  *	      DMA_STATUS_SYNC
568  *	      DMA_STATUS_PKT
569  *	      DMA_STATUS_TRANS_ERR
570  *	      DMA_STATUS_SECURE_ERR
571  *	      DMA_STATUS_SUPERVISOR_ERR
572  *	      DMA_STATUS_MISALIGNED_ADRS_ERR
573  *	      DMA_STATUS_DRAIN_END
574  *
575  *
576  *	LOCKING:
577  *	DMA registers protected by internal mutex
578  *
579  *	RETURNS:
580  *	EH_HANDLED or EH_NOT_HANDLED
581  */
582 int
583 ti_sdma_get_channel_status(unsigned int ch, uint32_t *status)
584 {
585 	struct ti_sdma_softc *sc = ti_sdma_sc;
586 	uint32_t csr;
587 
588 	/* Sanity check */
589 	if (sc == NULL)
590 		return (ENOMEM);
591 
592 	TI_SDMA_LOCK(sc);
593 
594 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
595 		TI_SDMA_UNLOCK(sc);
596 		return (EINVAL);
597 	}
598 
599 	TI_SDMA_UNLOCK(sc);
600 
601 	csr = ti_sdma_read_4(sc, DMA4_CSR(ch));
602 
603 	if (status != NULL)
604 		*status = csr;
605 
606 	return (0);
607 }
608 
609 /**
610  *	ti_sdma_start_xfer - starts a DMA transfer
611  *	@ch: the channel number to set the endianess of
612  *	@src_paddr: the source phsyical address
613  *	@dst_paddr: the destination phsyical address
614  *	@frmcnt: the number of frames per block
615  *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
616  *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
617  *
618  *
619  *	LOCKING:
620  *	DMA registers protected by internal mutex
621  *
622  *	RETURNS:
623  *	EH_HANDLED or EH_NOT_HANDLED
624  */
625 int
626 ti_sdma_start_xfer(unsigned int ch, unsigned int src_paddr,
627                     unsigned long dst_paddr,
628                     unsigned int frmcnt, unsigned int elmcnt)
629 {
630 	struct ti_sdma_softc *sc = ti_sdma_sc;
631 	struct ti_sdma_channel *channel;
632 	uint32_t ccr;
633 
634 	/* Sanity check */
635 	if (sc == NULL)
636 		return (ENOMEM);
637 
638 	TI_SDMA_LOCK(sc);
639 
640 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
641 		TI_SDMA_UNLOCK(sc);
642 		return (EINVAL);
643 	}
644 
645 	channel = &sc->sc_channel[ch];
646 
647 	/* a) Write the CSDP register */
648 	ti_sdma_write_4(sc, DMA4_CSDP(ch),
649 	    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
650 
651 	/* b) Set the number of element per frame CEN[23:0] */
652 	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
653 
654 	/* c) Set the number of frame per block CFN[15:0] */
655 	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
656 
657 	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
658 	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
659 	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
660 
661 	/* e) Write the CCR register */
662 	ti_sdma_write_4(sc, DMA4_CCR(ch), channel->reg_ccr);
663 
664 	/* f)  - Set the source element index increment CSEI[15:0] */
665 	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
666 
667 	/*     - Set the source frame index increment CSFI[15:0] */
668 	ti_sdma_write_4(sc, DMA4_CSF(ch), 0x0001);
669 
670 	/*     - Set the destination element index increment CDEI[15:0]*/
671 	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
672 
673 	/* - Set the destination frame index increment CDFI[31:0] */
674 	ti_sdma_write_4(sc, DMA4_CDF(ch), 0x0001);
675 
676 	/* Clear the status register */
677 	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
678 
679 	/* Write the start-bit and away we go */
680 	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
681 	ccr |= (1 << 7);
682 	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
683 
684 	/* Clear the reg write flag */
685 	channel->need_reg_write = 0;
686 
687 	TI_SDMA_UNLOCK(sc);
688 
689 	return (0);
690 }
691 
692 /**
693  *	ti_sdma_start_xfer_packet - starts a packet DMA transfer
694  *	@ch: the channel number to use for the transfer
695  *	@src_paddr: the source physical address
696  *	@dst_paddr: the destination physical address
697  *	@frmcnt: the number of frames to transfer
698  *	@elmcnt: the number of elements in a frame, an element is either an 8, 16
699  *           or 32-bit value as defined by ti_sdma_set_xfer_burst()
700  *	@pktsize: the number of elements in each transfer packet
701  *
702  *	The @frmcnt and @elmcnt define the overall number of bytes to transfer,
703  *	typically @frmcnt is 1 and @elmcnt contains the total number of elements.
704  *	@pktsize is the size of each individual packet, there might be multiple
705  *	packets per transfer.  i.e. for the following with element size of 32-bits
706  *
707  *		frmcnt = 1, elmcnt = 512, pktsize = 128
708  *
709  *	       Total transfer bytes = 1 * 512 = 512 elements or 2048 bytes
710  *	       Packets transfered   = 128 / 512 = 4
711  *
712  *
713  *	LOCKING:
714  *	DMA registers protected by internal mutex
715  *
716  *	RETURNS:
717  *	EH_HANDLED or EH_NOT_HANDLED
718  */
719 int
720 ti_sdma_start_xfer_packet(unsigned int ch, unsigned int src_paddr,
721                            unsigned long dst_paddr, unsigned int frmcnt,
722                            unsigned int elmcnt, unsigned int pktsize)
723 {
724 	struct ti_sdma_softc *sc = ti_sdma_sc;
725 	struct ti_sdma_channel *channel;
726 	uint32_t ccr;
727 
728 	/* Sanity check */
729 	if (sc == NULL)
730 		return (ENOMEM);
731 
732 	TI_SDMA_LOCK(sc);
733 
734 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
735 		TI_SDMA_UNLOCK(sc);
736 		return (EINVAL);
737 	}
738 
739 	channel = &sc->sc_channel[ch];
740 
741 	/* a) Write the CSDP register */
742 	if (channel->need_reg_write)
743 		ti_sdma_write_4(sc, DMA4_CSDP(ch),
744 		    channel->reg_csdp | DMA4_CSDP_WRITE_MODE(1));
745 
746 	/* b) Set the number of elements to transfer CEN[23:0] */
747 	ti_sdma_write_4(sc, DMA4_CEN(ch), elmcnt);
748 
749 	/* c) Set the number of frames to transfer CFN[15:0] */
750 	ti_sdma_write_4(sc, DMA4_CFN(ch), frmcnt);
751 
752 	/* d) Set the Source/dest start address index CSSA[31:0]/CDSA[31:0] */
753 	ti_sdma_write_4(sc, DMA4_CSSA(ch), src_paddr);
754 	ti_sdma_write_4(sc, DMA4_CDSA(ch), dst_paddr);
755 
756 	/* e) Write the CCR register */
757 	ti_sdma_write_4(sc, DMA4_CCR(ch),
758 	    channel->reg_ccr | DMA4_CCR_PACKET_TRANS);
759 
760 	/* f)  - Set the source element index increment CSEI[15:0] */
761 	ti_sdma_write_4(sc, DMA4_CSE(ch), 0x0001);
762 
763 	/*     - Set the packet size, this is dependent on the sync source */
764 	if (channel->reg_ccr & DMA4_CCR_SEL_SRC_DST_SYNC(1))
765 		ti_sdma_write_4(sc, DMA4_CSF(ch), pktsize);
766 	else
767 		ti_sdma_write_4(sc, DMA4_CDF(ch), pktsize);
768 
769 	/* - Set the destination frame index increment CDFI[31:0] */
770 	ti_sdma_write_4(sc, DMA4_CDE(ch), 0x0001);
771 
772 	/* Clear the status register */
773 	ti_sdma_write_4(sc, DMA4_CSR(ch), 0x1FFE);
774 
775 	/* Write the start-bit and away we go */
776 	ccr = ti_sdma_read_4(sc, DMA4_CCR(ch));
777 	ccr |= (1 << 7);
778 	ti_sdma_write_4(sc, DMA4_CCR(ch), ccr);
779 
780 	/* Clear the reg write flag */
781 	channel->need_reg_write = 0;
782 
783 	TI_SDMA_UNLOCK(sc);
784 
785 	return (0);
786 }
787 
788 /**
789  *	ti_sdma_stop_xfer - stops any currently active transfers
790  *	@ch: the channel number to set the endianess of
791  *
792  *	This function call is effectively a NOP if no transaction is in progress.
793  *
794  *	LOCKING:
795  *	DMA registers protected by internal mutex
796  *
797  *	RETURNS:
798  *	EH_HANDLED or EH_NOT_HANDLED
799  */
800 int
801 ti_sdma_stop_xfer(unsigned int ch)
802 {
803 	struct ti_sdma_softc *sc = ti_sdma_sc;
804 	unsigned int j;
805 
806 	/* Sanity check */
807 	if (sc == NULL)
808 		return (ENOMEM);
809 
810 	TI_SDMA_LOCK(sc);
811 
812 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
813 		TI_SDMA_UNLOCK(sc);
814 		return (EINVAL);
815 	}
816 
817 	/* Disable all DMA interrupts for the channel. */
818 	ti_sdma_write_4(sc, DMA4_CICR(ch), 0);
819 
820 	/* Make sure the DMA transfer is stopped. */
821 	ti_sdma_write_4(sc, DMA4_CCR(ch), 0);
822 
823 	/* Clear the CSR register and IRQ status register */
824 	ti_sdma_write_4(sc, DMA4_CSR(ch), DMA4_CSR_CLEAR_MASK);
825 	for (j = 0; j < NUM_DMA_IRQS; j++) {
826 		ti_sdma_write_4(sc, DMA4_IRQSTATUS_L(j), (1 << ch));
827 	}
828 
829 	/* Configuration registers need to be re-written on the next xfer */
830 	sc->sc_channel[ch].need_reg_write = 1;
831 
832 	TI_SDMA_UNLOCK(sc);
833 
834 	return (0);
835 }
836 
837 /**
838  *	ti_sdma_set_xfer_endianess - sets the endianess of subsequent transfers
839  *	@ch: the channel number to set the endianess of
840  *	@src: the source endianess (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
841  *	@dst: the destination endianess (either DMA_ENDIAN_LITTLE or DMA_ENDIAN_BIG)
842  *
843  *
844  *	LOCKING:
845  *	DMA registers protected by internal mutex
846  *
847  *	RETURNS:
848  *	EH_HANDLED or EH_NOT_HANDLED
849  */
850 int
851 ti_sdma_set_xfer_endianess(unsigned int ch, unsigned int src, unsigned int dst)
852 {
853 	struct ti_sdma_softc *sc = ti_sdma_sc;
854 
855 	/* Sanity check */
856 	if (sc == NULL)
857 		return (ENOMEM);
858 
859 	TI_SDMA_LOCK(sc);
860 
861 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
862 		TI_SDMA_UNLOCK(sc);
863 		return (EINVAL);
864 	}
865 
866 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_ENDIANISM(1);
867 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_ENDIANISM(src);
868 
869 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_ENDIANISM(1);
870 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_ENDIANISM(dst);
871 
872 	sc->sc_channel[ch].need_reg_write = 1;
873 
874 	TI_SDMA_UNLOCK(sc);
875 
876 	return 0;
877 }
878 
879 /**
880  *	ti_sdma_set_xfer_burst - sets the source and destination element size
881  *	@ch: the channel number to set the burst settings of
882  *	@src: the source endianess (either DMA_BURST_NONE, DMA_BURST_16, DMA_BURST_32
883  *	      or DMA_BURST_64)
884  *	@dst: the destination endianess (either DMA_BURST_NONE, DMA_BURST_16,
885  *	      DMA_BURST_32 or DMA_BURST_64)
886  *
887  *	This function sets the size of the elements for all subsequent transfers.
888  *
889  *	LOCKING:
890  *	DMA registers protected by internal mutex
891  *
892  *	RETURNS:
893  *	EH_HANDLED or EH_NOT_HANDLED
894  */
895 int
896 ti_sdma_set_xfer_burst(unsigned int ch, unsigned int src, unsigned int dst)
897 {
898 	struct ti_sdma_softc *sc = ti_sdma_sc;
899 
900 	/* Sanity check */
901 	if (sc == NULL)
902 		return (ENOMEM);
903 
904 	TI_SDMA_LOCK(sc);
905 
906 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
907 		TI_SDMA_UNLOCK(sc);
908 		return (EINVAL);
909 	}
910 
911 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_SRC_BURST_MODE(0x3);
912 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_SRC_BURST_MODE(src);
913 
914 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DST_BURST_MODE(0x3);
915 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DST_BURST_MODE(dst);
916 
917 	sc->sc_channel[ch].need_reg_write = 1;
918 
919 	TI_SDMA_UNLOCK(sc);
920 
921 	return 0;
922 }
923 
924 /**
925  *	ti_sdma_set_xfer_data_type - driver attach function
926  *	@ch: the channel number to set the endianess of
927  *	@type: the xfer data type (either DMA_DATA_8BITS_SCALAR, DMA_DATA_16BITS_SCALAR
928  *	       or DMA_DATA_32BITS_SCALAR)
929  *
930  *
931  *	LOCKING:
932  *	DMA registers protected by internal mutex
933  *
934  *	RETURNS:
935  *	EH_HANDLED or EH_NOT_HANDLED
936  */
937 int
938 ti_sdma_set_xfer_data_type(unsigned int ch, unsigned int type)
939 {
940 	struct ti_sdma_softc *sc = ti_sdma_sc;
941 
942 	/* Sanity check */
943 	if (sc == NULL)
944 		return (ENOMEM);
945 
946 	TI_SDMA_LOCK(sc);
947 
948 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
949 		TI_SDMA_UNLOCK(sc);
950 		return (EINVAL);
951 	}
952 
953 	sc->sc_channel[ch].reg_csdp &= ~DMA4_CSDP_DATA_TYPE(0x3);
954 	sc->sc_channel[ch].reg_csdp |= DMA4_CSDP_DATA_TYPE(type);
955 
956 	sc->sc_channel[ch].need_reg_write = 1;
957 
958 	TI_SDMA_UNLOCK(sc);
959 
960 	return 0;
961 }
962 
963 /**
964  *	ti_sdma_set_callback - driver attach function
965  *	@dev: dma device handle
966  *
967  *
968  *
969  *	LOCKING:
970  *	DMA registers protected by internal mutex
971  *
972  *	RETURNS:
973  *	EH_HANDLED or EH_NOT_HANDLED
974  */
975 int
976 ti_sdma_set_callback(unsigned int ch,
977                       void (*callback)(unsigned int ch, uint32_t status, void *data),
978                       void *data)
979 {
980 	struct ti_sdma_softc *sc = ti_sdma_sc;
981 
982 	/* Sanity check */
983 	if (sc == NULL)
984 		return (ENOMEM);
985 
986 	TI_SDMA_LOCK(sc);
987 
988 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
989 		TI_SDMA_UNLOCK(sc);
990 		return (EINVAL);
991 	}
992 
993 	sc->sc_channel[ch].callback = callback;
994 	sc->sc_channel[ch].callback_data = data;
995 
996 	sc->sc_channel[ch].need_reg_write = 1;
997 
998 	TI_SDMA_UNLOCK(sc);
999 
1000 	return 0;
1001 }
1002 
1003 /**
1004  *	ti_sdma_sync_params - sets channel sync settings
1005  *	@ch: the channel number to set the sync on
1006  *	@trigger: the number of the sync trigger, this depends on what other H/W
1007  *	          module is triggering/receiving the DMA transactions
1008  *	@mode: flags describing the sync mode to use, it may have one or more of
1009  *	          the following bits set; TI_SDMA_SYNC_FRAME,
1010  *	          TI_SDMA_SYNC_BLOCK, TI_SDMA_SYNC_TRIG_ON_SRC.
1011  *
1012  *
1013  *
1014  *	LOCKING:
1015  *	DMA registers protected by internal mutex
1016  *
1017  *	RETURNS:
1018  *	EH_HANDLED or EH_NOT_HANDLED
1019  */
1020 int
1021 ti_sdma_sync_params(unsigned int ch, unsigned int trigger, unsigned int mode)
1022 {
1023 	struct ti_sdma_softc *sc = ti_sdma_sc;
1024 	uint32_t ccr;
1025 
1026 	/* Sanity check */
1027 	if (sc == NULL)
1028 		return (ENOMEM);
1029 
1030 	TI_SDMA_LOCK(sc);
1031 
1032 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1033 		TI_SDMA_UNLOCK(sc);
1034 		return (EINVAL);
1035 	}
1036 
1037 	ccr = sc->sc_channel[ch].reg_ccr;
1038 
1039 	ccr &= ~DMA4_CCR_SYNC_TRIGGER(0x7F);
1040 	ccr |= DMA4_CCR_SYNC_TRIGGER(trigger + 1);
1041 
1042 	if (mode & TI_SDMA_SYNC_FRAME)
1043 		ccr |= DMA4_CCR_FRAME_SYNC(1);
1044 	else
1045 		ccr &= ~DMA4_CCR_FRAME_SYNC(1);
1046 
1047 	if (mode & TI_SDMA_SYNC_BLOCK)
1048 		ccr |= DMA4_CCR_BLOCK_SYNC(1);
1049 	else
1050 		ccr &= ~DMA4_CCR_BLOCK_SYNC(1);
1051 
1052 	if (mode & TI_SDMA_SYNC_TRIG_ON_SRC)
1053 		ccr |= DMA4_CCR_SEL_SRC_DST_SYNC(1);
1054 	else
1055 		ccr &= ~DMA4_CCR_SEL_SRC_DST_SYNC(1);
1056 
1057 	sc->sc_channel[ch].reg_ccr = ccr;
1058 
1059 	sc->sc_channel[ch].need_reg_write = 1;
1060 
1061 	TI_SDMA_UNLOCK(sc);
1062 
1063 	return 0;
1064 }
1065 
1066 /**
1067  *	ti_sdma_set_addr_mode - driver attach function
1068  *	@ch: the channel number to set the endianess of
1069  *	@rd_mode: the xfer source addressing mode (either DMA_ADDR_CONSTANT,
1070  *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1071  *	          DMA_ADDR_DOUBLE_INDEX)
1072  *	@wr_mode: the xfer destination addressing mode (either DMA_ADDR_CONSTANT,
1073  *	          DMA_ADDR_POST_INCREMENT, DMA_ADDR_SINGLE_INDEX or
1074  *	          DMA_ADDR_DOUBLE_INDEX)
1075  *
1076  *
1077  *	LOCKING:
1078  *	DMA registers protected by internal mutex
1079  *
1080  *	RETURNS:
1081  *	EH_HANDLED or EH_NOT_HANDLED
1082  */
1083 int
1084 ti_sdma_set_addr_mode(unsigned int ch, unsigned int src_mode,
1085                        unsigned int dst_mode)
1086 {
1087 	struct ti_sdma_softc *sc = ti_sdma_sc;
1088 	uint32_t ccr;
1089 
1090 	/* Sanity check */
1091 	if (sc == NULL)
1092 		return (ENOMEM);
1093 
1094 	TI_SDMA_LOCK(sc);
1095 
1096 	if ((sc->sc_active_channels & (1 << ch)) == 0) {
1097 		TI_SDMA_UNLOCK(sc);
1098 		return (EINVAL);
1099 	}
1100 
1101 	ccr = sc->sc_channel[ch].reg_ccr;
1102 
1103 	ccr &= ~DMA4_CCR_SRC_ADDRESS_MODE(0x3);
1104 	ccr |= DMA4_CCR_SRC_ADDRESS_MODE(src_mode);
1105 
1106 	ccr &= ~DMA4_CCR_DST_ADDRESS_MODE(0x3);
1107 	ccr |= DMA4_CCR_DST_ADDRESS_MODE(dst_mode);
1108 
1109 	sc->sc_channel[ch].reg_ccr = ccr;
1110 
1111 	sc->sc_channel[ch].need_reg_write = 1;
1112 
1113 	TI_SDMA_UNLOCK(sc);
1114 
1115 	return 0;
1116 }
1117 
1118 /**
1119  *	ti_sdma_probe - driver probe function
1120  *	@dev: dma device handle
1121  *
1122  *
1123  *
1124  *	RETURNS:
1125  *	Always returns 0.
1126  */
1127 static int
1128 ti_sdma_probe(device_t dev)
1129 {
1130 
1131 	if (!ofw_bus_status_okay(dev))
1132 		return (ENXIO);
1133 
1134 	if (!ofw_bus_is_compatible(dev, "ti,omap4430-sdma"))
1135 		return (ENXIO);
1136 
1137 	device_set_desc(dev, "TI sDMA Controller");
1138 	return (0);
1139 }
1140 
1141 /**
1142  *	ti_sdma_attach - driver attach function
1143  *	@dev: dma device handle
1144  *
1145  *	Initialises memory mapping/pointers to the DMA register set and requests
1146  *	IRQs. This is effectively the setup function for the driver.
1147  *
1148  *	RETURNS:
1149  *	0 on success or a negative error code failure.
1150  */
1151 static int
1152 ti_sdma_attach(device_t dev)
1153 {
1154 	struct ti_sdma_softc *sc = device_get_softc(dev);
1155 	unsigned int timeout;
1156 	unsigned int i;
1157 	int      rid;
1158 	void    *ihl;
1159 	int      err;
1160 
1161 	/* Setup the basics */
1162 	sc->sc_dev = dev;
1163 
1164 	/* No channels active at the moment */
1165 	sc->sc_active_channels = 0x00000000;
1166 
1167 	/* Mutex to protect the shared data structures */
1168 	TI_SDMA_LOCK_INIT(sc);
1169 
1170 	/* Get the memory resource for the register mapping */
1171 	rid = 0;
1172 	sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
1173 	if (sc->sc_mem_res == NULL)
1174 		panic("%s: Cannot map registers", device_get_name(dev));
1175 
1176 	/* Enable the interface and functional clocks */
1177 	ti_prcm_clk_enable(SDMA_CLK);
1178 
1179 	/* Read the sDMA revision register and sanity check it's known */
1180 	sc->sc_hw_rev = ti_sdma_read_4(sc, DMA4_REVISION);
1181 	device_printf(dev, "sDMA revision %08x\n", sc->sc_hw_rev);
1182 
1183 	if (!ti_sdma_is_omap4_rev(sc) && !ti_sdma_is_omap3_rev(sc)) {
1184 		device_printf(sc->sc_dev, "error - unknown sDMA H/W revision\n");
1185 		return (EINVAL);
1186 	}
1187 
1188 	/* Disable all interrupts */
1189 	for (i = 0; i < NUM_DMA_IRQS; i++) {
1190 		ti_sdma_write_4(sc, DMA4_IRQENABLE_L(i), 0x00000000);
1191 	}
1192 
1193 	/* Soft-reset is only supported on pre-OMAP44xx devices */
1194 	if (ti_sdma_is_omap3_rev(sc)) {
1195 
1196 		/* Soft-reset */
1197 		ti_sdma_write_4(sc, DMA4_OCP_SYSCONFIG, 0x0002);
1198 
1199 		/* Set the timeout to 100ms*/
1200 		timeout = (hz < 10) ? 1 : ((100 * hz) / 1000);
1201 
1202 		/* Wait for DMA reset to complete */
1203 		while ((ti_sdma_read_4(sc, DMA4_SYSSTATUS) & 0x1) == 0x0) {
1204 
1205 			/* Sleep for a tick */
1206 			pause("DMARESET", 1);
1207 
1208 			if (timeout-- == 0) {
1209 				device_printf(sc->sc_dev, "sDMA reset operation timed out\n");
1210 				return (EINVAL);
1211 			}
1212 		}
1213 	}
1214 
1215 	/*
1216 	 * Install interrupt handlers for the for possible interrupts. Any channel
1217 	 * can trip one of the four IRQs
1218 	 */
1219 	rid = 0;
1220 	sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
1221 	    RF_ACTIVE | RF_SHAREABLE);
1222 	if (sc->sc_irq_res == NULL)
1223 		panic("Unable to setup the dma irq handler.\n");
1224 
1225 	err = bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_MISC | INTR_MPSAFE,
1226 	    NULL, ti_sdma_intr, NULL, &ihl);
1227 	if (err)
1228 		panic("%s: Cannot register IRQ", device_get_name(dev));
1229 
1230 	/* Store the DMA structure globally ... this driver should never be unloaded */
1231 	ti_sdma_sc = sc;
1232 
1233 	return (0);
1234 }
1235 
1236 static device_method_t ti_sdma_methods[] = {
1237 	DEVMETHOD(device_probe, ti_sdma_probe),
1238 	DEVMETHOD(device_attach, ti_sdma_attach),
1239 	{0, 0},
1240 };
1241 
1242 static driver_t ti_sdma_driver = {
1243 	"ti_sdma",
1244 	ti_sdma_methods,
1245 	sizeof(struct ti_sdma_softc),
1246 };
1247 static devclass_t ti_sdma_devclass;
1248 
1249 DRIVER_MODULE(ti_sdma, simplebus, ti_sdma_driver, ti_sdma_devclass, 0, 0);
1250 MODULE_DEPEND(ti_sdma, ti_prcm, 1, 1, 1);
1251