xref: /original-bsd/sys/hp300/dev/dma.c (revision e21485a6)
1 /*
2  * Copyright (c) 1982, 1990 The Regents of the University of California.
3  * All rights reserved.
4  *
5  * %sccs.include.redist.c%
6  *
7  *	@(#)dma.c	7.6 (Berkeley) 06/05/92
8  */
9 
10 /*
11  * DMA driver
12  */
13 
14 #include "param.h"
15 #include "systm.h"
16 #include "time.h"
17 #include "kernel.h"
18 #include "proc.h"
19 
20 #include "dmareg.h"
21 #include "dmavar.h"
22 #include "hp/dev/device.h"
23 
24 #include "../include/cpu.h"
25 #include "../hp300/isr.h"
26 
27 extern void isrlink();
28 extern void _insque();
29 extern void _remque();
30 extern void timeout();
31 extern u_int kvtop();
32 extern void PCIA();
33 
34 /*
35  * The largest single request will be MAXPHYS bytes which will require
36  * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
37  * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
38  * buffer is not page aligned (+1).
39  */
40 #define	DMAMAXIO	(MAXPHYS/NBPG+1)
41 
42 struct	dma_chain {
43 	int	dc_count;
44 	char	*dc_addr;
45 };
46 
47 struct	dma_softc {
48 	struct	dmadevice *sc_hwaddr;
49 	struct	dmaBdevice *sc_Bhwaddr;
50 	char	sc_type;
51 	char	sc_flags;
52 	u_short	sc_cmd;
53 	struct	dma_chain *sc_cur;
54 	struct	dma_chain *sc_last;
55 	struct	dma_chain sc_chain[DMAMAXIO];
56 } dma_softc[NDMA];
57 
58 /* types */
59 #define	DMA_B	0
60 #define DMA_C	1
61 
62 /* flags */
63 #define DMAF_PCFLUSH	0x01
64 #define DMAF_VCFLUSH	0x02
65 #define DMAF_NOINTR	0x04
66 
67 struct	devqueue dmachan[NDMA + 1];
68 int	dmaintr();
69 
70 #ifdef DEBUG
71 int	dmadebug = 0;
72 #define DDB_WORD	0x01	/* same as DMAGO_WORD */
73 #define DDB_LWORD	0x02	/* same as DMAGO_LWORD */
74 #define	DDB_FOLLOW	0x04
75 #define DDB_IO		0x08
76 
77 void	dmatimeout();
78 int	dmatimo[NDMA];
79 
80 long	dmahits[NDMA];
81 long	dmamisses[NDMA];
82 long	dmabyte[NDMA];
83 long	dmaword[NDMA];
84 long	dmalword[NDMA];
85 #endif
86 
87 void
88 dmainit()
89 {
90 	register struct dmareg *dma = (struct dmareg *)DMA_BASE;
91 	register struct dma_softc *dc;
92 	register int i;
93 	char rev;
94 
95 	/*
96 	 * Determine the DMA type.
97 	 * Don't know how to easily differentiate the A and B cards,
98 	 * so we just hope nobody has an A card (A cards will work if
99 	 * DMAINTLVL is set to 3).
100 	 */
101 	if (!badbaddr((char *)&dma->dma_id[2]))
102 		rev = dma->dma_id[2];
103 	else {
104 		rev = 'B';
105 #if !defined(HP320)
106 		panic("dmainit: DMA card requires hp320 support");
107 #endif
108 	}
109 
110 	dc = &dma_softc[0];
111 	for (i = 0; i < NDMA; i++) {
112 		dc->sc_hwaddr = (i & 1) ? &dma->dma_chan1 : &dma->dma_chan0;
113 		dc->sc_Bhwaddr = (i & 1) ? &dma->dma_Bchan1 : &dma->dma_Bchan0;
114 		dc->sc_type = rev == 'B' ? DMA_B : DMA_C;
115 		dc++;
116 		dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
117 	}
118 	dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
119 #ifdef DEBUG
120 	/* make sure timeout is really not needed */
121 	timeout(dmatimeout, 0, 30 * hz);
122 #endif
123 
124 	printf("dma: 98620%c with 2 channels, %d bit DMA\n",
125 	       rev, rev == 'B' ? 16 : 32);
126 }
127 
128 int
129 dmareq(dq)
130 	register struct devqueue *dq;
131 {
132 	register int i;
133 	register int chan;
134 	register int s = splbio();
135 
136 	chan = dq->dq_ctlr;
137 	i = NDMA;
138 	while (--i >= 0) {
139 		if ((chan & (1 << i)) == 0)
140 			continue;
141 		if (dmachan[i].dq_forw != &dmachan[i])
142 			continue;
143 		insque(dq, &dmachan[i]);
144 		dq->dq_ctlr = i;
145 		splx(s);
146 		return(1);
147 	}
148 	insque(dq, dmachan[NDMA].dq_back);
149 	splx(s);
150 	return(0);
151 }
152 
153 void
154 dmafree(dq)
155 	register struct devqueue *dq;
156 {
157 	int unit = dq->dq_ctlr;
158 	register struct dma_softc *dc = &dma_softc[unit];
159 	register struct devqueue *dn;
160 	register int chan, s;
161 
162 	s = splbio();
163 #ifdef DEBUG
164 	dmatimo[unit] = 0;
165 #endif
166 	DMA_CLEAR(dc);
167 #if defined(HP360) || defined(HP370) || defined(HP380)
168 	/*
169 	 * XXX we may not always go thru the flush code in dmastop()
170 	 */
171 	if (dc->sc_flags & DMAF_PCFLUSH) {
172 		PCIA();
173 		dc->sc_flags &= ~DMAF_PCFLUSH;
174 	}
175 #endif
176 #if defined(HP320) || defined(HP350)
177 	if (dc->sc_flags & DMAF_VCFLUSH) {
178 		/*
179 		 * 320/350s have VACs that may also need flushing.
180 		 * In our case we only flush the supervisor side
181 		 * because we know that if we are DMAing to user
182 		 * space, the physical pages will also be mapped
183 		 * in kernel space (via vmapbuf) and hence cache-
184 		 * inhibited by the pmap module due to the multiple
185 		 * mapping.
186 		 */
187 		DCIS();
188 		dc->sc_flags &= ~DMAF_VCFLUSH;
189 	}
190 #endif
191 	remque(dq);
192 	chan = 1 << unit;
193 	for (dn = dmachan[NDMA].dq_forw;
194 	     dn != &dmachan[NDMA]; dn = dn->dq_forw) {
195 		if (dn->dq_ctlr & chan) {
196 			remque((caddr_t)dn);
197 			insque((caddr_t)dn, (caddr_t)dq->dq_back);
198 			splx(s);
199 			dn->dq_ctlr = dq->dq_ctlr;
200 			(dn->dq_driver->d_start)(dn->dq_unit);
201 			return;
202 		}
203 	}
204 	splx(s);
205 }
206 
207 void
208 dmago(unit, addr, count, flags)
209 	int unit;
210 	register char *addr;
211 	register int count;
212 	register int flags;
213 {
214 	register struct dma_softc *dc = &dma_softc[unit];
215 	register struct dma_chain *dcp;
216 	register char *dmaend = NULL;
217 	register int tcount;
218 
219 	if (count > MAXPHYS)
220 		panic("dmago: count > MAXPHYS");
221 #if defined(HP320)
222 	if (dc->sc_type == DMA_B && (flags & DMAGO_LWORD))
223 		panic("dmago: no can do 32-bit DMA");
224 #endif
225 #ifdef DEBUG
226 	if (dmadebug & DDB_FOLLOW)
227 		printf("dmago(%d, %x, %x, %x)\n",
228 		       unit, addr, count, flags);
229 	if (flags & DMAGO_LWORD)
230 		dmalword[unit]++;
231 	else if (flags & DMAGO_WORD)
232 		dmaword[unit]++;
233 	else
234 		dmabyte[unit]++;
235 #endif
236 	/*
237 	 * Build the DMA chain
238 	 */
239 	for (dcp = dc->sc_chain; count > 0; dcp++) {
240 		dcp->dc_addr = (char *) kvtop(addr);
241 #if defined(HP380)
242 		/*
243 		 * Push back dirty cache lines
244 		 */
245 		if (mmutype == MMU_68040)
246 			DCFP(dcp->dc_addr);
247 #endif
248 		if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
249 			tcount = count;
250 		dcp->dc_count = tcount;
251 		addr += tcount;
252 		count -= tcount;
253 		if (flags & DMAGO_LWORD)
254 			tcount >>= 2;
255 		else if (flags & DMAGO_WORD)
256 			tcount >>= 1;
257 		if (dcp->dc_addr == dmaend
258 #if defined(HP320)
259 		    /* only 16-bit count on 98620B */
260 		    && (dc->sc_type != DMA_B ||
261 			(dcp-1)->dc_count + tcount <= 65536)
262 #endif
263 		) {
264 #ifdef DEBUG
265 			dmahits[unit]++;
266 #endif
267 			dmaend += dcp->dc_count;
268 			(--dcp)->dc_count += tcount;
269 		} else {
270 #ifdef DEBUG
271 			dmamisses[unit]++;
272 #endif
273 			dmaend = dcp->dc_addr + dcp->dc_count;
274 			dcp->dc_count = tcount;
275 		}
276 	}
277 	dc->sc_cur = dc->sc_chain;
278 	dc->sc_last = --dcp;
279 	dc->sc_flags = 0;
280 	/*
281 	 * Set up the command word based on flags
282 	 */
283 	dc->sc_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START;
284 	if ((flags & DMAGO_READ) == 0)
285 		dc->sc_cmd |= DMA_WRT;
286 	if (flags & DMAGO_LWORD)
287 		dc->sc_cmd |= DMA_LWORD;
288 	else if (flags & DMAGO_WORD)
289 		dc->sc_cmd |= DMA_WORD;
290 	if (flags & DMAGO_PRI)
291 		dc->sc_cmd |= DMA_PRI;
292 #if defined(HP380)
293 	/*
294 	 * On the 68040 we need to flush (push) the data cache before a
295 	 * DMA (already done above) and flush again after DMA completes.
296 	 * In theory we should only need to flush prior to a write DMA
297 	 * and purge after a read DMA but if the entire page is not
298 	 * involved in the DMA we might purge some valid data.
299 	 */
300 	if (mmutype == MMU_68040 && (flags & DMAGO_READ))
301 		dc->sc_flags |= DMAF_PCFLUSH;
302 #endif
303 #if defined(HP360) || defined(HP370)
304 	/*
305 	 * Remember if we need to flush external physical cache when
306 	 * DMA is done.  We only do this if we are reading (writing memory).
307 	 */
308 	if (ectype == EC_PHYS && (flags & DMAGO_READ))
309 		dc->sc_flags |= DMAF_PCFLUSH;
310 #endif
311 #if defined(HP320) || defined(HP350)
312 	if (ectype == EC_VIRT && (flags & DMAGO_READ))
313 		dc->sc_flags |= DMAF_VCFLUSH;
314 #endif
315 	/*
316 	 * Remember if we can skip the dma completion interrupt on
317 	 * the last segment in the chain.
318 	 */
319 	if (flags & DMAGO_NOINT) {
320 		if (dc->sc_cur == dc->sc_last)
321 			dc->sc_cmd &= ~DMA_ENAB;
322 		else
323 			dc->sc_flags |= DMAF_NOINTR;
324 	}
325 #ifdef DEBUG
326 	if (dmadebug & DDB_IO)
327 		if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
328 		    (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) {
329 			printf("dmago: cmd %x, flags %x\n",
330 			       dc->sc_cmd, dc->sc_flags);
331 			for (dcp = dc->sc_chain; dcp <= dc->sc_last; dcp++)
332 				printf("  %d: %d@%x\n", dcp-dc->sc_chain,
333 				       dcp->dc_count, dcp->dc_addr);
334 		}
335 	dmatimo[unit] = 1;
336 #endif
337 	DMA_ARM(dc);
338 }
339 
340 void
341 dmastop(unit)
342 	register int unit;
343 {
344 	register struct dma_softc *dc = &dma_softc[unit];
345 	register struct devqueue *dq;
346 
347 #ifdef DEBUG
348 	if (dmadebug & DDB_FOLLOW)
349 		printf("dmastop(%d)\n", unit);
350 	dmatimo[unit] = 0;
351 #endif
352 	DMA_CLEAR(dc);
353 #if defined(HP360) || defined(HP370) || defined(HP380)
354 	if (dc->sc_flags & DMAF_PCFLUSH) {
355 		PCIA();
356 		dc->sc_flags &= ~DMAF_PCFLUSH;
357 	}
358 #endif
359 #if defined(HP320) || defined(HP350)
360 	if (dc->sc_flags & DMAF_VCFLUSH) {
361 		/*
362 		 * 320/350s have VACs that may also need flushing.
363 		 * In our case we only flush the supervisor side
364 		 * because we know that if we are DMAing to user
365 		 * space, the physical pages will also be mapped
366 		 * in kernel space (via vmapbuf) and hence cache-
367 		 * inhibited by the pmap module due to the multiple
368 		 * mapping.
369 		 */
370 		DCIS();
371 		dc->sc_flags &= ~DMAF_VCFLUSH;
372 	}
373 #endif
374 	/*
375 	 * We may get this interrupt after a device service routine
376 	 * has freed the dma channel.  So, ignore the intr if there's
377 	 * nothing on the queue.
378 	 */
379 	dq = dmachan[unit].dq_forw;
380 	if (dq != &dmachan[unit])
381 		(dq->dq_driver->d_done)(dq->dq_unit);
382 }
383 
384 int
385 dmaintr()
386 {
387 	register struct dma_softc *dc;
388 	register int i, stat;
389 	int found = 0;
390 
391 #ifdef DEBUG
392 	if (dmadebug & DDB_FOLLOW)
393 		printf("dmaintr\n");
394 #endif
395 	for (i = 0, dc = dma_softc; i < NDMA; i++, dc++) {
396 		stat = DMA_STAT(dc);
397 		if ((stat & DMA_INTR) == 0)
398 			continue;
399 		found++;
400 #ifdef DEBUG
401 		if (dmadebug & DDB_IO) {
402 			if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
403 			    (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD))
404 				printf("dmaintr: unit %d stat %x next %d\n",
405 				       i, stat, (dc->sc_cur-dc->sc_chain)+1);
406 		}
407 		if (stat & DMA_ARMED)
408 			printf("dma%d: intr when armed\n", i);
409 #endif
410 		if (++dc->sc_cur <= dc->sc_last) {
411 #ifdef DEBUG
412 			dmatimo[i] = 1;
413 #endif
414 			/*
415 			 * Last chain segment, disable DMA interrupt.
416 			 */
417 			if (dc->sc_cur == dc->sc_last &&
418 			    (dc->sc_flags & DMAF_NOINTR))
419 				dc->sc_cmd &= ~DMA_ENAB;
420 			DMA_CLEAR(dc);
421 			DMA_ARM(dc);
422 		} else
423 			dmastop(i);
424 	}
425 	return(found);
426 }
427 
428 #ifdef DEBUG
429 void
430 dmatimeout()
431 {
432 	register int i, s;
433 
434 	for (i = 0; i < NDMA; i++) {
435 		s = splbio();
436 		if (dmatimo[i]) {
437 			if (dmatimo[i] > 1)
438 				printf("dma%d: timeout #%d\n",
439 				       i, dmatimo[i]-1);
440 			dmatimo[i]++;
441 		}
442 		splx(s);
443 	}
444 	timeout(dmatimeout, (caddr_t)0, 30 * hz);
445 }
446 #endif
447