xref: /netbsd/sys/arch/sun3/dev/si_sebuf.c (revision bf9ec67e)
1 /*	$NetBSD: si_sebuf.c,v 1.14 2001/08/20 12:00:51 wiz Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Gordon W. Ross.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Sun3/E SCSI driver (machine-dependent portion).
41  * The machine-independent parts are in ncr5380sbc.c
42  *
43  * XXX - Mostly from the si driver.  Merge?
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/errno.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/device.h>
52 #include <sys/buf.h>
53 #include <sys/proc.h>
54 #include <sys/user.h>
55 
56 #include <dev/scsipi/scsi_all.h>
57 #include <dev/scsipi/scsipi_all.h>
58 #include <dev/scsipi/scsipi_debug.h>
59 #include <dev/scsipi/scsiconf.h>
60 
61 #include <machine/autoconf.h>
62 
63 /* #define DEBUG XXX */
64 
65 #include <dev/ic/ncr5380reg.h>
66 #include <dev/ic/ncr5380var.h>
67 
68 #include "sereg.h"
69 #include "sevar.h"
70 
71 /*
72  * Transfers smaller than this are done using PIO
73  * (on assumption they're not worth DMA overhead)
74  */
75 #define	MIN_DMA_LEN 128
76 
77 /*
78  * Transfers lager than 65535 bytes need to be split-up.
79  * (Some of the FIFO logic has only 16 bits counters.)
80  * Make the size an integer multiple of the page size
81  * to avoid buf/cluster remap problems.  (paranoid?)
82  */
83 #define	MAX_DMA_LEN 0xE000
84 
85 /*
86  * This structure is used to keep track of mapped DMA requests.
87  */
88 struct se_dma_handle {
89 	int 		dh_flags;
90 #define	SIDH_BUSY	1		/* This DH is in use */
91 #define	SIDH_OUT	2		/* DMA does data out (write) */
92 	u_char *	dh_addr;	/* KVA of start of buffer */
93 	int 		dh_maplen;	/* Length of KVA mapping. */
94 	long		dh_dma; 	/* Offset in DMA buffer. */
95 };
96 
97 /*
98  * The first structure member has to be the ncr5380_softc
99  * so we can just cast to go back and fourth between them.
100  */
101 struct se_softc {
102 	struct ncr5380_softc	ncr_sc;
103 	volatile struct se_regs	*sc_regs;
104 	int		sc_adapter_type;
105 	int		sc_adapter_iv;		/* int. vec */
106 	int 	sc_options;			/* options for this instance */
107 	int 	sc_reqlen;  		/* requested transfer length */
108 	struct se_dma_handle *sc_dma;
109 	/* DMA command block for the OBIO controller. */
110 	void *sc_dmacmd;
111 };
112 
113 /* Options for disconnect/reselect, DMA, and interrupts. */
114 #define SE_NO_DISCONNECT    0xff
115 #define SE_NO_PARITY_CHK  0xff00
116 #define SE_FORCE_POLLING 0x10000
117 #define SE_DISABLE_DMA   0x20000
118 
119 void se_dma_alloc __P((struct ncr5380_softc *));
120 void se_dma_free __P((struct ncr5380_softc *));
121 void se_dma_poll __P((struct ncr5380_softc *));
122 
123 void se_dma_setup __P((struct ncr5380_softc *));
124 void se_dma_start __P((struct ncr5380_softc *));
125 void se_dma_eop __P((struct ncr5380_softc *));
126 void se_dma_stop __P((struct ncr5380_softc *));
127 
128 void se_intr_on  __P((struct ncr5380_softc *));
129 void se_intr_off __P((struct ncr5380_softc *));
130 
131 static int  se_intr __P((void *));
132 static void se_reset __P((struct ncr5380_softc *));
133 
134 /*
135  * New-style autoconfig attachment
136  */
137 
138 static int	se_match __P((struct device *, struct cfdata *, void *));
139 static void	se_attach __P((struct device *, struct device *, void *));
140 
141 struct cfattach si_sebuf_ca = {
142 	sizeof(struct se_softc), se_match, se_attach
143 };
144 
145 static void	se_minphys __P((struct buf *));
146 
147 /* Options for disconnect/reselect, DMA, and interrupts. */
148 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff;
149 
150 /* How long to wait for DMA before declaring an error. */
151 int se_dma_intr_timo = 500;	/* ticks (sec. X 100) */
152 
153 int se_debug = 0;
154 
155 static int
156 se_match(parent, cf, args)
157 	struct device	*parent;
158 	struct cfdata *cf;
159 	void *args;
160 {
161 	struct sebuf_attach_args *aa = args;
162 
163 	/* Match by name. */
164 	if (strcmp(aa->name, "se"))
165 		return (0);
166 
167 	/* Anyting else to check? */
168 
169 	return (1);
170 }
171 
172 static void
173 se_attach(parent, self, args)
174 	struct device	*parent, *self;
175 	void		*args;
176 {
177 	struct se_softc *sc = (struct se_softc *) self;
178 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
179 	struct cfdata *cf = self->dv_cfdata;
180 	struct sebuf_attach_args *aa = args;
181 	volatile struct se_regs *regs;
182 	int i;
183 
184 	/* Get options from config flags if specified. */
185 	if (cf->cf_flags)
186 		sc->sc_options = cf->cf_flags;
187 	else
188 		sc->sc_options = se_options;
189 
190 	printf(": options=0x%x\n", sc->sc_options);
191 
192 	sc->sc_adapter_type = aa->ca.ca_bustype;
193 	sc->sc_adapter_iv = aa->ca.ca_intvec;
194 	sc->sc_regs = regs = aa->regs;
195 
196 	/*
197 	 * MD function pointers used by the MI code.
198 	 */
199 	ncr_sc->sc_pio_out = ncr5380_pio_out;
200 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
201 
202 #if 0	/* XXX - not yet... */
203 	ncr_sc->sc_dma_alloc = se_dma_alloc;
204 	ncr_sc->sc_dma_free  = se_dma_free;
205 	ncr_sc->sc_dma_setup = se_dma_setup;
206 	ncr_sc->sc_dma_start = se_dma_start;
207 	ncr_sc->sc_dma_poll  = se_dma_poll;
208 	ncr_sc->sc_dma_eop   = se_dma_eop;
209 	ncr_sc->sc_dma_stop  = se_dma_stop;
210 	ncr_sc->sc_intr_on   = se_intr_on;
211 	ncr_sc->sc_intr_off  = se_intr_off;
212 #endif	/* XXX */
213 
214 	/* Attach interrupt handler. */
215 	isr_add_vectored(se_intr, (void *)sc,
216 		aa->ca.ca_intpri, aa->ca.ca_intvec);
217 
218 	/* Reset the hardware. */
219 	se_reset(ncr_sc);
220 
221 	/* Do the common attach stuff. */
222 
223 	/*
224 	 * Support the "options" (config file flags).
225 	 * Disconnect/reselect is a per-target mask.
226 	 * Interrupts and DMA are per-controller.
227 	 */
228 	ncr_sc->sc_no_disconnect =
229 		(sc->sc_options & SE_NO_DISCONNECT);
230 	ncr_sc->sc_parity_disable =
231 		(sc->sc_options & SE_NO_PARITY_CHK) >> 8;
232 	if (sc->sc_options & SE_FORCE_POLLING)
233 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
234 
235 #if 1	/* XXX - Temporary */
236 	/* XXX - In case we think DMA is completely broken... */
237 	if (sc->sc_options & SE_DISABLE_DMA) {
238 		/* Override this function pointer. */
239 		ncr_sc->sc_dma_alloc = NULL;
240 	}
241 #endif
242 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
243 
244 	/*
245 	 * Initialize fields used by the MI code
246 	 */
247 	ncr_sc->sci_r0 = &regs->ncrregs[0];
248 	ncr_sc->sci_r1 = &regs->ncrregs[1];
249 	ncr_sc->sci_r2 = &regs->ncrregs[2];
250 	ncr_sc->sci_r3 = &regs->ncrregs[3];
251 	ncr_sc->sci_r4 = &regs->ncrregs[4];
252 	ncr_sc->sci_r5 = &regs->ncrregs[5];
253 	ncr_sc->sci_r6 = &regs->ncrregs[6];
254 	ncr_sc->sci_r7 = &regs->ncrregs[7];
255 
256 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
257 
258 	/*
259 	 * Allocate DMA handles.
260 	 */
261 	i = SCI_OPENINGS * sizeof(struct se_dma_handle);
262 	sc->sc_dma = (struct se_dma_handle *)
263 		malloc(i, M_DEVBUF, M_WAITOK);
264 	if (sc->sc_dma == NULL)
265 		panic("se: dma_malloc failed\n");
266 	for (i = 0; i < SCI_OPENINGS; i++)
267 		sc->sc_dma[i].dh_flags = 0;
268 
269 	ncr_sc->sc_channel.chan_id = 7;
270 	ncr_sc->sc_adapter.adapt_minphys = se_minphys;
271 
272 	/*
273 	 *  Initialize se board itself.
274 	 */
275 	ncr5380_attach(ncr_sc);
276 }
277 
278 static void
279 se_reset(struct ncr5380_softc *ncr_sc)
280 {
281 	struct se_softc *sc = (struct se_softc *)ncr_sc;
282 	volatile struct se_regs *se = sc->sc_regs;
283 
284 #ifdef	DEBUG
285 	if (se_debug) {
286 		printf("se_reset\n");
287 	}
288 #endif
289 
290 	/* The reset bits in the CSR are active low. */
291 	se->se_csr = 0;
292 	delay(10);
293 	se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
294 	delay(10);
295 
296 	/* Make sure the DMA engine is stopped. */
297 	se->dma_addr = 0;
298 	se->dma_cntr = 0;
299 	se->se_ivec = sc->sc_adapter_iv;
300 }
301 
302 /*
303  * This is called when the bus is going idle,
304  * so we want to enable the SBC interrupts.
305  * That is controlled by the DMA enable!
306  * Who would have guessed!
307  * What a NASTY trick!
308  */
309 void
310 se_intr_on(ncr_sc)
311 	struct ncr5380_softc *ncr_sc;
312 {
313 	struct se_softc *sc = (struct se_softc *)ncr_sc;
314 	volatile struct se_regs *se = sc->sc_regs;
315 
316 	/* receive mode should be safer */
317 	se->se_csr &= ~SE_CSR_SEND;
318 
319 	/* Clear the count so nothing happens. */
320 	se->dma_cntr = 0;
321 
322 	/* Clear the start address too. (paranoid?) */
323 	se->dma_addr = 0;
324 
325 	/* Finally, enable the DMA engine. */
326 	se->se_csr |= SE_CSR_INTR_EN;
327 }
328 
329 /*
330  * This is called when the bus is idle and we are
331  * about to start playing with the SBC chip.
332  */
333 void
334 se_intr_off(ncr_sc)
335 	struct ncr5380_softc *ncr_sc;
336 {
337 	struct se_softc *sc = (struct se_softc *)ncr_sc;
338 	volatile struct se_regs *se = sc->sc_regs;
339 
340 	se->se_csr &= ~SE_CSR_INTR_EN;
341 }
342 
343 /*
344  * This function is called during the COMMAND or MSG_IN phase
345  * that precedes a DATA_IN or DATA_OUT phase, in case we need
346  * to setup the DMA engine before the bus enters a DATA phase.
347  *
348  * On the VME version, setup the start addres, but clear the
349  * count (to make sure it stays idle) and set that later.
350  * XXX: The VME adapter appears to suppress SBC interrupts
351  * when the FIFO is not empty or the FIFO count is non-zero!
352  * XXX: Need to copy data into the DMA buffer...
353  */
354 void
355 se_dma_setup(ncr_sc)
356 	struct ncr5380_softc *ncr_sc;
357 {
358 	struct se_softc *sc = (struct se_softc *)ncr_sc;
359 	struct sci_req *sr = ncr_sc->sc_current;
360 	struct se_dma_handle *dh = sr->sr_dma_hand;
361 	volatile struct se_regs *se = sc->sc_regs;
362 	long data_pa;
363 	int xlen;
364 
365 	/*
366 	 * Get the DMA mapping for this segment.
367 	 * XXX - Should separate allocation and mapin.
368 	 */
369 	data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
370 	data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
371 	if (data_pa & 1)
372 		panic("se_dma_start: bad pa=0x%lx", data_pa);
373 	xlen = ncr_sc->sc_datalen;
374 	xlen &= ~1;				/* XXX: necessary? */
375 	sc->sc_reqlen = xlen; 	/* XXX: or less? */
376 
377 #ifdef	DEBUG
378 	if (se_debug & 2) {
379 		printf("se_dma_setup: dh=%p, pa=0x%lx, xlen=0x%x\n",
380 			   dh, data_pa, xlen);
381 	}
382 #endif
383 
384 	/* Set direction (send/recv) */
385 	if (dh->dh_flags & SIDH_OUT) {
386 		se->se_csr |= SE_CSR_SEND;
387 	} else {
388 		se->se_csr &= ~SE_CSR_SEND;
389 	}
390 
391 	/* Load the start address. */
392 	se->dma_addr = (ushort)(data_pa & 0xFFFF);
393 
394 	/*
395 	 * Keep the count zero or it may start early!
396 	 */
397 	se->dma_cntr = 0;
398 }
399 
400 
401 void
402 se_dma_start(ncr_sc)
403 	struct ncr5380_softc *ncr_sc;
404 {
405 	struct se_softc *sc = (struct se_softc *)ncr_sc;
406 	struct sci_req *sr = ncr_sc->sc_current;
407 	struct se_dma_handle *dh = sr->sr_dma_hand;
408 	volatile struct se_regs *se = sc->sc_regs;
409 	int s, xlen;
410 
411 	xlen = sc->sc_reqlen;
412 
413 	/* This MAY be time critical (not sure). */
414 	s = splhigh();
415 
416 	se->dma_cntr = (ushort)(xlen & 0xFFFF);
417 
418 	/*
419 	 * Acknowledge the phase change.  (After DMA setup!)
420 	 * Put the SBIC into DMA mode, and start the transfer.
421 	 */
422 	if (dh->dh_flags & SIDH_OUT) {
423 		*ncr_sc->sci_tcmd = PHASE_DATA_OUT;
424 		SCI_CLR_INTR(ncr_sc);
425 		*ncr_sc->sci_icmd = SCI_ICMD_DATA;
426 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
427 		*ncr_sc->sci_dma_send = 0;	/* start it */
428 	} else {
429 		*ncr_sc->sci_tcmd = PHASE_DATA_IN;
430 		SCI_CLR_INTR(ncr_sc);
431 		*ncr_sc->sci_icmd = 0;
432 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
433 		*ncr_sc->sci_irecv = 0;	/* start it */
434 	}
435 
436 	/* Let'er rip! */
437 	se->se_csr |= SE_CSR_INTR_EN;
438 
439 	splx(s);
440 	ncr_sc->sc_state |= NCR_DOINGDMA;
441 
442 #ifdef	DEBUG
443 	if (se_debug & 2) {
444 		printf("se_dma_start: started, flags=0x%x\n",
445 			   ncr_sc->sc_state);
446 	}
447 #endif
448 }
449 
450 
451 void
452 se_dma_eop(ncr_sc)
453 	struct ncr5380_softc *ncr_sc;
454 {
455 
456 	/* Not needed - DMA was stopped prior to examining sci_csr */
457 }
458 
459 
460 void
461 se_dma_stop(ncr_sc)
462 	struct ncr5380_softc *ncr_sc;
463 {
464 	struct se_softc *sc = (struct se_softc *)ncr_sc;
465 	struct sci_req *sr = ncr_sc->sc_current;
466 	struct se_dma_handle *dh = sr->sr_dma_hand;
467 	volatile struct se_regs *se = sc->sc_regs;
468 	int resid, ntrans;
469 
470 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
471 #ifdef	DEBUG
472 		printf("se_dma_stop: dma not running\n");
473 #endif
474 		return;
475 	}
476 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
477 
478 	/* First, halt the DMA engine. */
479 	se->se_csr &= ~SE_CSR_INTR_EN;	/* VME only */
480 
481 	/* Set an impossible phase to prevent data movement? */
482 	*ncr_sc->sci_tcmd = PHASE_INVALID;
483 
484 	/* Note that timeout may have set the error flag. */
485 	if (ncr_sc->sc_state & NCR_ABORTING)
486 		goto out;
487 
488 	/* XXX: Wait for DMA to actually finish? */
489 
490 	/*
491 	 * Now try to figure out how much actually transferred
492 	 */
493 	resid = se->dma_cntr & 0xFFFF;
494 	if (dh->dh_flags & SIDH_OUT)
495 		if ((resid > 0) && (resid < sc->sc_reqlen))
496 			resid++;
497 	ntrans = sc->sc_reqlen - resid;
498 
499 #ifdef	DEBUG
500 	if (se_debug & 2) {
501 		printf("se_dma_stop: resid=0x%x ntrans=0x%x\n",
502 		       resid, ntrans);
503 	}
504 #endif
505 
506 	if (ntrans < MIN_DMA_LEN) {
507 		printf("se: fifo count: 0x%x\n", resid);
508 		ncr_sc->sc_state |= NCR_ABORTING;
509 		goto out;
510 	}
511 	if (ntrans > ncr_sc->sc_datalen)
512 		panic("se_dma_stop: excess transfer");
513 
514 	/* Adjust data pointer */
515 	ncr_sc->sc_dataptr += ntrans;
516 	ncr_sc->sc_datalen -= ntrans;
517 
518 out:
519 	se->dma_addr = 0;
520 	se->dma_cntr = 0;
521 
522 	/* Put SBIC back in PIO mode. */
523 	*ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
524 	*ncr_sc->sci_icmd = 0;
525 }
526 
527 /*****************************************************************/
528 
529 static void
530 se_minphys(struct buf *bp)
531 {
532 
533 	if (bp->b_bcount > MAX_DMA_LEN)
534 		bp->b_bcount = MAX_DMA_LEN;
535 
536 	return (minphys(bp));
537 }
538 
539 
540 int
541 se_intr(void *arg)
542 {
543 	struct se_softc *sc = arg;
544 	volatile struct se_regs *se = sc->sc_regs;
545 	int dma_error, claimed;
546 	u_short csr;
547 
548 	claimed = 0;
549 	dma_error = 0;
550 
551 	/* SBC interrupt? DMA interrupt? */
552 	csr = se->se_csr;
553 	NCR_TRACE("se_intr: csr=0x%x\n", csr);
554 
555 	if (csr & SE_CSR_SBC_IP) {
556 		claimed = ncr5380_intr(&sc->ncr_sc);
557 #ifdef	DEBUG
558 		if (!claimed) {
559 			printf("se_intr: spurious from SBC\n");
560 		}
561 #endif
562 		/* Yes, we DID cause this interrupt. */
563 		claimed = 1;
564 	}
565 
566 	return (claimed);
567 }
568 
569 
570 /*****************************************************************
571  * Common functions for DMA
572  ****************************************************************/
573 
574 /*
575  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
576  * for DMA transfer.  On the Sun3/E, this means we have to
577  * allocate space in the DMA buffer for this transfer.
578  */
579 void
580 se_dma_alloc(ncr_sc)
581 	struct ncr5380_softc *ncr_sc;
582 {
583 	struct se_softc *sc = (struct se_softc *)ncr_sc;
584 	struct sci_req *sr = ncr_sc->sc_current;
585 	struct scsipi_xfer *xs = sr->sr_xs;
586 	struct se_dma_handle *dh;
587 	int i, xlen;
588 	u_long addr;
589 
590 #ifdef	DIAGNOSTIC
591 	if (sr->sr_dma_hand != NULL)
592 		panic("se_dma_alloc: already have DMA handle");
593 #endif
594 
595 	addr = (u_long) ncr_sc->sc_dataptr;
596 	xlen = ncr_sc->sc_datalen;
597 
598 	/* If the DMA start addr is misaligned then do PIO */
599 	if ((addr & 1) || (xlen & 1)) {
600 		printf("se_dma_alloc: misaligned.\n");
601 		return;
602 	}
603 
604 	/* Make sure our caller checked sc_min_dma_len. */
605 	if (xlen < MIN_DMA_LEN)
606 		panic("se_dma_alloc: xlen=0x%x\n", xlen);
607 
608 	/*
609 	 * Never attempt single transfers of more than 63k, because
610 	 * our count register may be only 16 bits (an OBIO adapter).
611 	 * This should never happen since already bounded by minphys().
612 	 * XXX - Should just segment these...
613 	 */
614 	if (xlen > MAX_DMA_LEN) {
615 		printf("se_dma_alloc: excessive xlen=0x%x\n", xlen);
616 		ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
617 	}
618 
619 	/* Find free DMA handle.  Guaranteed to find one since we have
620 	   as many DMA handles as the driver has processes. */
621 	for (i = 0; i < SCI_OPENINGS; i++) {
622 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
623 			goto found;
624 	}
625 	panic("se: no free DMA handles.");
626 found:
627 
628 	dh = &sc->sc_dma[i];
629 	dh->dh_flags = SIDH_BUSY;
630 
631 	/* Copy the "write" flag for convenience. */
632 	if (xs->xs_control & XS_CTL_DATA_OUT)
633 		dh->dh_flags |= SIDH_OUT;
634 
635 	dh->dh_addr = (u_char*) addr;
636 	dh->dh_maplen  = xlen;
637 	dh->dh_dma = 0;	/* XXX - Allocate space in DMA buffer. */
638 	/* XXX: dh->dh_dma = alloc(xlen) */
639 	if (!dh->dh_dma) {
640 		/* Can't remap segment */
641 		printf("se_dma_alloc: can't remap %p/0x%x\n",
642 			dh->dh_addr, dh->dh_maplen);
643 		dh->dh_flags = 0;
644 		return;
645 	}
646 
647 	/* success */
648 	sr->sr_dma_hand = dh;
649 
650 	return;
651 }
652 
653 
654 void
655 se_dma_free(ncr_sc)
656 	struct ncr5380_softc *ncr_sc;
657 {
658 	struct sci_req *sr = ncr_sc->sc_current;
659 	struct se_dma_handle *dh = sr->sr_dma_hand;
660 
661 #ifdef	DIAGNOSTIC
662 	if (dh == NULL)
663 		panic("se_dma_free: no DMA handle");
664 #endif
665 
666 	if (ncr_sc->sc_state & NCR_DOINGDMA)
667 		panic("se_dma_free: free while in progress");
668 
669 	if (dh->dh_flags & SIDH_BUSY) {
670 		/* XXX: Should separate allocation and mapping. */
671 		/* XXX: Give back the DMA space. */
672 		/* XXX: free((caddr_t)dh->dh_dma, dh->dh_maplen); */
673 		dh->dh_dma = 0;
674 		dh->dh_flags = 0;
675 	}
676 	sr->sr_dma_hand = NULL;
677 }
678 
679 
680 #define	CSR_MASK SE_CSR_SBC_IP
681 #define	POLL_TIMO	50000	/* X100 = 5 sec. */
682 
683 /*
684  * Poll (spin-wait) for DMA completion.
685  * Called right after xx_dma_start(), and
686  * xx_dma_stop() will be called next.
687  * Same for either VME or OBIO.
688  */
689 void
690 se_dma_poll(ncr_sc)
691 	struct ncr5380_softc *ncr_sc;
692 {
693 	struct se_softc *sc = (struct se_softc *)ncr_sc;
694 	struct sci_req *sr = ncr_sc->sc_current;
695 	volatile struct se_regs *se = sc->sc_regs;
696 	int tmo;
697 
698 	/* Make sure DMA started successfully. */
699 	if (ncr_sc->sc_state & NCR_ABORTING)
700 		return;
701 
702 	/*
703 	 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
704 	 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
705 	 * XXX: I really doubt that is necessary...
706 	 */
707 
708 	/* Wait for any "dma complete" or error bits. */
709 	tmo = POLL_TIMO;
710 	for (;;) {
711 		if (se->se_csr & CSR_MASK)
712 			break;
713 		if (--tmo <= 0) {
714 			printf("se: DMA timeout (while polling)\n");
715 			/* Indicate timeout as MI code would. */
716 			sr->sr_flags |= SR_OVERDUE;
717 			break;
718 		}
719 		delay(100);
720 	}
721 	NCR_TRACE("se_dma_poll: waited %d\n",
722 			  POLL_TIMO - tmo);
723 
724 #ifdef	DEBUG
725 	if (se_debug & 2) {
726 		printf("se_dma_poll: done, csr=0x%x\n", se->se_csr);
727 	}
728 #endif
729 }
730 
731