1 /*
2 * Copyright (c) 1982, 1990, 1993
3 * The Regents of the University of California. All rights reserved.
4 *
5 * %sccs.include.redist.c%
6 *
7 * @(#)dma.c 8.1 (Berkeley) 06/10/93
8 */
9
10 /*
11 * DMA driver
12 */
13
14 #include <sys/param.h>
15 #include <sys/systm.h>
16 #include <sys/time.h>
17 #include <sys/kernel.h>
18 #include <sys/proc.h>
19
20 #include <hp300/dev/dmareg.h>
21 #include <hp300/dev/dmavar.h>
22
23 #include <hp/dev/device.h>
24
25 #include <machine/cpu.h>
26 #include <hp300/hp300/isr.h>
27
28 extern void isrlink();
29 extern void _insque();
30 extern void _remque();
31 extern void timeout();
32 extern u_int kvtop();
33 extern void PCIA();
34
35 /*
36 * The largest single request will be MAXPHYS bytes which will require
37 * at most MAXPHYS/NBPG+1 chain elements to describe, i.e. if none of
38 * the buffer pages are physically contiguous (MAXPHYS/NBPG) and the
39 * buffer is not page aligned (+1).
40 */
41 #define DMAMAXIO (MAXPHYS/NBPG+1)
42
43 struct dma_chain {
44 int dc_count;
45 char *dc_addr;
46 };
47
48 struct dma_softc {
49 struct dmadevice *sc_hwaddr;
50 struct dmaBdevice *sc_Bhwaddr;
51 char sc_type;
52 char sc_flags;
53 u_short sc_cmd;
54 struct dma_chain *sc_cur;
55 struct dma_chain *sc_last;
56 struct dma_chain sc_chain[DMAMAXIO];
57 } dma_softc[NDMA];
58
59 /* types */
60 #define DMA_B 0
61 #define DMA_C 1
62
63 /* flags */
64 #define DMAF_PCFLUSH 0x01
65 #define DMAF_VCFLUSH 0x02
66 #define DMAF_NOINTR 0x04
67
68 struct devqueue dmachan[NDMA + 1];
69 int dmaintr();
70
71 #ifdef DEBUG
72 int dmadebug = 0;
73 #define DDB_WORD 0x01 /* same as DMAGO_WORD */
74 #define DDB_LWORD 0x02 /* same as DMAGO_LWORD */
75 #define DDB_FOLLOW 0x04
76 #define DDB_IO 0x08
77
78 void dmatimeout();
79 int dmatimo[NDMA];
80
81 long dmahits[NDMA];
82 long dmamisses[NDMA];
83 long dmabyte[NDMA];
84 long dmaword[NDMA];
85 long dmalword[NDMA];
86 #endif
87
88 void
dmainit()89 dmainit()
90 {
91 register struct dmareg *dma = (struct dmareg *)DMA_BASE;
92 register struct dma_softc *dc;
93 register int i;
94 char rev;
95
96 /*
97 * Determine the DMA type.
98 * Don't know how to easily differentiate the A and B cards,
99 * so we just hope nobody has an A card (A cards will work if
100 * DMAINTLVL is set to 3).
101 */
102 if (!badbaddr((char *)&dma->dma_id[2]))
103 rev = dma->dma_id[2];
104 else {
105 rev = 'B';
106 #if !defined(HP320)
107 panic("dmainit: DMA card requires hp320 support");
108 #endif
109 }
110
111 dc = &dma_softc[0];
112 for (i = 0; i < NDMA; i++) {
113 dc->sc_hwaddr = (i & 1) ? &dma->dma_chan1 : &dma->dma_chan0;
114 dc->sc_Bhwaddr = (i & 1) ? &dma->dma_Bchan1 : &dma->dma_Bchan0;
115 dc->sc_type = rev == 'B' ? DMA_B : DMA_C;
116 dc++;
117 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
118 }
119 dmachan[i].dq_forw = dmachan[i].dq_back = &dmachan[i];
120 #ifdef DEBUG
121 /* make sure timeout is really not needed */
122 timeout(dmatimeout, 0, 30 * hz);
123 #endif
124
125 printf("dma: 98620%c with 2 channels, %d bit DMA\n",
126 rev, rev == 'B' ? 16 : 32);
127 }
128
129 int
dmareq(dq)130 dmareq(dq)
131 register struct devqueue *dq;
132 {
133 register int i;
134 register int chan;
135 register int s = splbio();
136
137 chan = dq->dq_ctlr;
138 i = NDMA;
139 while (--i >= 0) {
140 if ((chan & (1 << i)) == 0)
141 continue;
142 if (dmachan[i].dq_forw != &dmachan[i])
143 continue;
144 insque(dq, &dmachan[i]);
145 dq->dq_ctlr = i;
146 splx(s);
147 return(1);
148 }
149 insque(dq, dmachan[NDMA].dq_back);
150 splx(s);
151 return(0);
152 }
153
154 void
dmafree(dq)155 dmafree(dq)
156 register struct devqueue *dq;
157 {
158 int unit = dq->dq_ctlr;
159 register struct dma_softc *dc = &dma_softc[unit];
160 register struct devqueue *dn;
161 register int chan, s;
162
163 s = splbio();
164 #ifdef DEBUG
165 dmatimo[unit] = 0;
166 #endif
167 DMA_CLEAR(dc);
168 #if defined(HP360) || defined(HP370) || defined(HP380)
169 /*
170 * XXX we may not always go thru the flush code in dmastop()
171 */
172 if (dc->sc_flags & DMAF_PCFLUSH) {
173 PCIA();
174 dc->sc_flags &= ~DMAF_PCFLUSH;
175 }
176 #endif
177 #if defined(HP320) || defined(HP350)
178 if (dc->sc_flags & DMAF_VCFLUSH) {
179 /*
180 * 320/350s have VACs that may also need flushing.
181 * In our case we only flush the supervisor side
182 * because we know that if we are DMAing to user
183 * space, the physical pages will also be mapped
184 * in kernel space (via vmapbuf) and hence cache-
185 * inhibited by the pmap module due to the multiple
186 * mapping.
187 */
188 DCIS();
189 dc->sc_flags &= ~DMAF_VCFLUSH;
190 }
191 #endif
192 remque(dq);
193 chan = 1 << unit;
194 for (dn = dmachan[NDMA].dq_forw;
195 dn != &dmachan[NDMA]; dn = dn->dq_forw) {
196 if (dn->dq_ctlr & chan) {
197 remque((caddr_t)dn);
198 insque((caddr_t)dn, (caddr_t)dq->dq_back);
199 splx(s);
200 dn->dq_ctlr = dq->dq_ctlr;
201 (dn->dq_driver->d_start)(dn->dq_unit);
202 return;
203 }
204 }
205 splx(s);
206 }
207
208 void
dmago(unit,addr,count,flags)209 dmago(unit, addr, count, flags)
210 int unit;
211 register char *addr;
212 register int count;
213 register int flags;
214 {
215 register struct dma_softc *dc = &dma_softc[unit];
216 register struct dma_chain *dcp;
217 register char *dmaend = NULL;
218 register int tcount;
219
220 if (count > MAXPHYS)
221 panic("dmago: count > MAXPHYS");
222 #if defined(HP320)
223 if (dc->sc_type == DMA_B && (flags & DMAGO_LWORD))
224 panic("dmago: no can do 32-bit DMA");
225 #endif
226 #ifdef DEBUG
227 if (dmadebug & DDB_FOLLOW)
228 printf("dmago(%d, %x, %x, %x)\n",
229 unit, addr, count, flags);
230 if (flags & DMAGO_LWORD)
231 dmalword[unit]++;
232 else if (flags & DMAGO_WORD)
233 dmaword[unit]++;
234 else
235 dmabyte[unit]++;
236 #endif
237 /*
238 * Build the DMA chain
239 */
240 for (dcp = dc->sc_chain; count > 0; dcp++) {
241 dcp->dc_addr = (char *) kvtop(addr);
242 #if defined(HP380)
243 /*
244 * Push back dirty cache lines
245 */
246 if (mmutype == MMU_68040)
247 DCFP(dcp->dc_addr);
248 #endif
249 if (count < (tcount = NBPG - ((int)addr & PGOFSET)))
250 tcount = count;
251 dcp->dc_count = tcount;
252 addr += tcount;
253 count -= tcount;
254 if (flags & DMAGO_LWORD)
255 tcount >>= 2;
256 else if (flags & DMAGO_WORD)
257 tcount >>= 1;
258 if (dcp->dc_addr == dmaend
259 #if defined(HP320)
260 /* only 16-bit count on 98620B */
261 && (dc->sc_type != DMA_B ||
262 (dcp-1)->dc_count + tcount <= 65536)
263 #endif
264 ) {
265 #ifdef DEBUG
266 dmahits[unit]++;
267 #endif
268 dmaend += dcp->dc_count;
269 (--dcp)->dc_count += tcount;
270 } else {
271 #ifdef DEBUG
272 dmamisses[unit]++;
273 #endif
274 dmaend = dcp->dc_addr + dcp->dc_count;
275 dcp->dc_count = tcount;
276 }
277 }
278 dc->sc_cur = dc->sc_chain;
279 dc->sc_last = --dcp;
280 dc->sc_flags = 0;
281 /*
282 * Set up the command word based on flags
283 */
284 dc->sc_cmd = DMA_ENAB | DMA_IPL(DMAINTLVL) | DMA_START;
285 if ((flags & DMAGO_READ) == 0)
286 dc->sc_cmd |= DMA_WRT;
287 if (flags & DMAGO_LWORD)
288 dc->sc_cmd |= DMA_LWORD;
289 else if (flags & DMAGO_WORD)
290 dc->sc_cmd |= DMA_WORD;
291 if (flags & DMAGO_PRI)
292 dc->sc_cmd |= DMA_PRI;
293 #if defined(HP380)
294 /*
295 * On the 68040 we need to flush (push) the data cache before a
296 * DMA (already done above) and flush again after DMA completes.
297 * In theory we should only need to flush prior to a write DMA
298 * and purge after a read DMA but if the entire page is not
299 * involved in the DMA we might purge some valid data.
300 */
301 if (mmutype == MMU_68040 && (flags & DMAGO_READ))
302 dc->sc_flags |= DMAF_PCFLUSH;
303 #endif
304 #if defined(HP360) || defined(HP370)
305 /*
306 * Remember if we need to flush external physical cache when
307 * DMA is done. We only do this if we are reading (writing memory).
308 */
309 if (ectype == EC_PHYS && (flags & DMAGO_READ))
310 dc->sc_flags |= DMAF_PCFLUSH;
311 #endif
312 #if defined(HP320) || defined(HP350)
313 if (ectype == EC_VIRT && (flags & DMAGO_READ))
314 dc->sc_flags |= DMAF_VCFLUSH;
315 #endif
316 /*
317 * Remember if we can skip the dma completion interrupt on
318 * the last segment in the chain.
319 */
320 if (flags & DMAGO_NOINT) {
321 if (dc->sc_cur == dc->sc_last)
322 dc->sc_cmd &= ~DMA_ENAB;
323 else
324 dc->sc_flags |= DMAF_NOINTR;
325 }
326 #ifdef DEBUG
327 if (dmadebug & DDB_IO)
328 if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
329 (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD)) {
330 printf("dmago: cmd %x, flags %x\n",
331 dc->sc_cmd, dc->sc_flags);
332 for (dcp = dc->sc_chain; dcp <= dc->sc_last; dcp++)
333 printf(" %d: %d@%x\n", dcp-dc->sc_chain,
334 dcp->dc_count, dcp->dc_addr);
335 }
336 dmatimo[unit] = 1;
337 #endif
338 DMA_ARM(dc);
339 }
340
341 void
dmastop(unit)342 dmastop(unit)
343 register int unit;
344 {
345 register struct dma_softc *dc = &dma_softc[unit];
346 register struct devqueue *dq;
347
348 #ifdef DEBUG
349 if (dmadebug & DDB_FOLLOW)
350 printf("dmastop(%d)\n", unit);
351 dmatimo[unit] = 0;
352 #endif
353 DMA_CLEAR(dc);
354 #if defined(HP360) || defined(HP370) || defined(HP380)
355 if (dc->sc_flags & DMAF_PCFLUSH) {
356 PCIA();
357 dc->sc_flags &= ~DMAF_PCFLUSH;
358 }
359 #endif
360 #if defined(HP320) || defined(HP350)
361 if (dc->sc_flags & DMAF_VCFLUSH) {
362 /*
363 * 320/350s have VACs that may also need flushing.
364 * In our case we only flush the supervisor side
365 * because we know that if we are DMAing to user
366 * space, the physical pages will also be mapped
367 * in kernel space (via vmapbuf) and hence cache-
368 * inhibited by the pmap module due to the multiple
369 * mapping.
370 */
371 DCIS();
372 dc->sc_flags &= ~DMAF_VCFLUSH;
373 }
374 #endif
375 /*
376 * We may get this interrupt after a device service routine
377 * has freed the dma channel. So, ignore the intr if there's
378 * nothing on the queue.
379 */
380 dq = dmachan[unit].dq_forw;
381 if (dq != &dmachan[unit])
382 (dq->dq_driver->d_done)(dq->dq_unit);
383 }
384
385 int
dmaintr()386 dmaintr()
387 {
388 register struct dma_softc *dc;
389 register int i, stat;
390 int found = 0;
391
392 #ifdef DEBUG
393 if (dmadebug & DDB_FOLLOW)
394 printf("dmaintr\n");
395 #endif
396 for (i = 0, dc = dma_softc; i < NDMA; i++, dc++) {
397 stat = DMA_STAT(dc);
398 if ((stat & DMA_INTR) == 0)
399 continue;
400 found++;
401 #ifdef DEBUG
402 if (dmadebug & DDB_IO) {
403 if ((dmadebug&DDB_WORD) && (dc->sc_cmd&DMA_WORD) ||
404 (dmadebug&DDB_LWORD) && (dc->sc_cmd&DMA_LWORD))
405 printf("dmaintr: unit %d stat %x next %d\n",
406 i, stat, (dc->sc_cur-dc->sc_chain)+1);
407 }
408 if (stat & DMA_ARMED)
409 printf("dma%d: intr when armed\n", i);
410 #endif
411 if (++dc->sc_cur <= dc->sc_last) {
412 #ifdef DEBUG
413 dmatimo[i] = 1;
414 #endif
415 /*
416 * Last chain segment, disable DMA interrupt.
417 */
418 if (dc->sc_cur == dc->sc_last &&
419 (dc->sc_flags & DMAF_NOINTR))
420 dc->sc_cmd &= ~DMA_ENAB;
421 DMA_CLEAR(dc);
422 DMA_ARM(dc);
423 } else
424 dmastop(i);
425 }
426 return(found);
427 }
428
429 #ifdef DEBUG
430 void
dmatimeout()431 dmatimeout()
432 {
433 register int i, s;
434
435 for (i = 0; i < NDMA; i++) {
436 s = splbio();
437 if (dmatimo[i]) {
438 if (dmatimo[i] > 1)
439 printf("dma%d: timeout #%d\n",
440 i, dmatimo[i]-1);
441 dmatimo[i]++;
442 }
443 splx(s);
444 }
445 timeout(dmatimeout, (caddr_t)0, 30 * hz);
446 }
447 #endif
448