xref: /netbsd/sys/arch/sun3/dev/si_sebuf.c (revision c4a72b64)
1 /*	$NetBSD: si_sebuf.c,v 1.18 2002/10/02 16:02:27 thorpej Exp $	*/
2 
3 /*-
4  * Copyright (c) 1996 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Gordon W. Ross.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Sun3/E SCSI driver (machine-dependent portion).
41  * The machine-independent parts are in ncr5380sbc.c
42  *
43  * XXX - Mostly from the si driver.  Merge?
44  */
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/errno.h>
49 #include <sys/kernel.h>
50 #include <sys/malloc.h>
51 #include <sys/device.h>
52 #include <sys/buf.h>
53 #include <sys/proc.h>
54 #include <sys/user.h>
55 
56 #include <dev/scsipi/scsi_all.h>
57 #include <dev/scsipi/scsipi_all.h>
58 #include <dev/scsipi/scsipi_debug.h>
59 #include <dev/scsipi/scsiconf.h>
60 
61 #include <machine/autoconf.h>
62 
63 /* #define DEBUG XXX */
64 
65 #include <dev/ic/ncr5380reg.h>
66 #include <dev/ic/ncr5380var.h>
67 
68 #include "sereg.h"
69 #include "sevar.h"
70 
71 /*
72  * Transfers smaller than this are done using PIO
73  * (on assumption they're not worth DMA overhead)
74  */
75 #define	MIN_DMA_LEN 128
76 
77 /*
78  * Transfers lager than 65535 bytes need to be split-up.
79  * (Some of the FIFO logic has only 16 bits counters.)
80  * Make the size an integer multiple of the page size
81  * to avoid buf/cluster remap problems.  (paranoid?)
82  */
83 #define	MAX_DMA_LEN 0xE000
84 
85 /*
86  * This structure is used to keep track of mapped DMA requests.
87  */
88 struct se_dma_handle {
89 	int 		dh_flags;
90 #define	SIDH_BUSY	1		/* This DH is in use */
91 #define	SIDH_OUT	2		/* DMA does data out (write) */
92 	u_char *	dh_addr;	/* KVA of start of buffer */
93 	int 		dh_maplen;	/* Length of KVA mapping. */
94 	long		dh_dma; 	/* Offset in DMA buffer. */
95 };
96 
97 /*
98  * The first structure member has to be the ncr5380_softc
99  * so we can just cast to go back and fourth between them.
100  */
101 struct se_softc {
102 	struct ncr5380_softc	ncr_sc;
103 	volatile struct se_regs	*sc_regs;
104 	int		sc_adapter_type;
105 	int		sc_adapter_iv;		/* int. vec */
106 	int 	sc_options;			/* options for this instance */
107 	int 	sc_reqlen;  		/* requested transfer length */
108 	struct se_dma_handle *sc_dma;
109 	/* DMA command block for the OBIO controller. */
110 	void *sc_dmacmd;
111 };
112 
113 /* Options for disconnect/reselect, DMA, and interrupts. */
114 #define SE_NO_DISCONNECT    0xff
115 #define SE_NO_PARITY_CHK  0xff00
116 #define SE_FORCE_POLLING 0x10000
117 #define SE_DISABLE_DMA   0x20000
118 
119 void se_dma_alloc __P((struct ncr5380_softc *));
120 void se_dma_free __P((struct ncr5380_softc *));
121 void se_dma_poll __P((struct ncr5380_softc *));
122 
123 void se_dma_setup __P((struct ncr5380_softc *));
124 void se_dma_start __P((struct ncr5380_softc *));
125 void se_dma_eop __P((struct ncr5380_softc *));
126 void se_dma_stop __P((struct ncr5380_softc *));
127 
128 void se_intr_on  __P((struct ncr5380_softc *));
129 void se_intr_off __P((struct ncr5380_softc *));
130 
131 static int  se_intr __P((void *));
132 static void se_reset __P((struct ncr5380_softc *));
133 
134 /*
135  * New-style autoconfig attachment
136  */
137 
138 static int	se_match __P((struct device *, struct cfdata *, void *));
139 static void	se_attach __P((struct device *, struct device *, void *));
140 
141 CFATTACH_DECL(si_sebuf, sizeof(struct se_softc),
142     se_match, se_attach, NULL, NULL);
143 
144 static void	se_minphys __P((struct buf *));
145 
146 /* Options for disconnect/reselect, DMA, and interrupts. */
147 int se_options = SE_DISABLE_DMA | SE_FORCE_POLLING | 0xff;
148 
149 /* How long to wait for DMA before declaring an error. */
150 int se_dma_intr_timo = 500;	/* ticks (sec. X 100) */
151 
152 int se_debug = 0;
153 
154 static int
155 se_match(parent, cf, args)
156 	struct device	*parent;
157 	struct cfdata *cf;
158 	void *args;
159 {
160 	struct sebuf_attach_args *aa = args;
161 
162 	/* Match by name. */
163 	if (strcmp(aa->name, "se"))
164 		return (0);
165 
166 	/* Anyting else to check? */
167 
168 	return (1);
169 }
170 
171 static void
172 se_attach(parent, self, args)
173 	struct device	*parent, *self;
174 	void		*args;
175 {
176 	struct se_softc *sc = (struct se_softc *) self;
177 	struct ncr5380_softc *ncr_sc = &sc->ncr_sc;
178 	struct cfdata *cf = self->dv_cfdata;
179 	struct sebuf_attach_args *aa = args;
180 	volatile struct se_regs *regs;
181 	int i;
182 
183 	/* Get options from config flags if specified. */
184 	if (cf->cf_flags)
185 		sc->sc_options = cf->cf_flags;
186 	else
187 		sc->sc_options = se_options;
188 
189 	printf(": options=0x%x\n", sc->sc_options);
190 
191 	sc->sc_adapter_type = aa->ca.ca_bustype;
192 	sc->sc_adapter_iv = aa->ca.ca_intvec;
193 	sc->sc_regs = regs = aa->regs;
194 
195 	/*
196 	 * MD function pointers used by the MI code.
197 	 */
198 	ncr_sc->sc_pio_out = ncr5380_pio_out;
199 	ncr_sc->sc_pio_in =  ncr5380_pio_in;
200 
201 #if 0	/* XXX - not yet... */
202 	ncr_sc->sc_dma_alloc = se_dma_alloc;
203 	ncr_sc->sc_dma_free  = se_dma_free;
204 	ncr_sc->sc_dma_setup = se_dma_setup;
205 	ncr_sc->sc_dma_start = se_dma_start;
206 	ncr_sc->sc_dma_poll  = se_dma_poll;
207 	ncr_sc->sc_dma_eop   = se_dma_eop;
208 	ncr_sc->sc_dma_stop  = se_dma_stop;
209 	ncr_sc->sc_intr_on   = se_intr_on;
210 	ncr_sc->sc_intr_off  = se_intr_off;
211 #endif	/* XXX */
212 
213 	/* Attach interrupt handler. */
214 	isr_add_vectored(se_intr, (void *)sc,
215 		aa->ca.ca_intpri, aa->ca.ca_intvec);
216 
217 	/* Reset the hardware. */
218 	se_reset(ncr_sc);
219 
220 	/* Do the common attach stuff. */
221 
222 	/*
223 	 * Support the "options" (config file flags).
224 	 * Disconnect/reselect is a per-target mask.
225 	 * Interrupts and DMA are per-controller.
226 	 */
227 	ncr_sc->sc_no_disconnect =
228 		(sc->sc_options & SE_NO_DISCONNECT);
229 	ncr_sc->sc_parity_disable =
230 		(sc->sc_options & SE_NO_PARITY_CHK) >> 8;
231 	if (sc->sc_options & SE_FORCE_POLLING)
232 		ncr_sc->sc_flags |= NCR5380_FORCE_POLLING;
233 
234 #if 1	/* XXX - Temporary */
235 	/* XXX - In case we think DMA is completely broken... */
236 	if (sc->sc_options & SE_DISABLE_DMA) {
237 		/* Override this function pointer. */
238 		ncr_sc->sc_dma_alloc = NULL;
239 	}
240 #endif
241 	ncr_sc->sc_min_dma_len = MIN_DMA_LEN;
242 
243 	/*
244 	 * Initialize fields used by the MI code
245 	 */
246 	ncr_sc->sci_r0 = &regs->ncrregs[0];
247 	ncr_sc->sci_r1 = &regs->ncrregs[1];
248 	ncr_sc->sci_r2 = &regs->ncrregs[2];
249 	ncr_sc->sci_r3 = &regs->ncrregs[3];
250 	ncr_sc->sci_r4 = &regs->ncrregs[4];
251 	ncr_sc->sci_r5 = &regs->ncrregs[5];
252 	ncr_sc->sci_r6 = &regs->ncrregs[6];
253 	ncr_sc->sci_r7 = &regs->ncrregs[7];
254 
255 	ncr_sc->sc_rev = NCR_VARIANT_NCR5380;
256 
257 	/*
258 	 * Allocate DMA handles.
259 	 */
260 	i = SCI_OPENINGS * sizeof(struct se_dma_handle);
261 	sc->sc_dma = (struct se_dma_handle *)
262 		malloc(i, M_DEVBUF, M_WAITOK);
263 	if (sc->sc_dma == NULL)
264 		panic("se: dma_malloc failed");
265 	for (i = 0; i < SCI_OPENINGS; i++)
266 		sc->sc_dma[i].dh_flags = 0;
267 
268 	ncr_sc->sc_channel.chan_id = 7;
269 	ncr_sc->sc_adapter.adapt_minphys = se_minphys;
270 
271 	/*
272 	 *  Initialize se board itself.
273 	 */
274 	ncr5380_attach(ncr_sc);
275 }
276 
277 static void
278 se_reset(struct ncr5380_softc *ncr_sc)
279 {
280 	struct se_softc *sc = (struct se_softc *)ncr_sc;
281 	volatile struct se_regs *se = sc->sc_regs;
282 
283 #ifdef	DEBUG
284 	if (se_debug) {
285 		printf("se_reset\n");
286 	}
287 #endif
288 
289 	/* The reset bits in the CSR are active low. */
290 	se->se_csr = 0;
291 	delay(10);
292 	se->se_csr = SE_CSR_SCSI_RES /* | SE_CSR_INTR_EN */ ;
293 	delay(10);
294 
295 	/* Make sure the DMA engine is stopped. */
296 	se->dma_addr = 0;
297 	se->dma_cntr = 0;
298 	se->se_ivec = sc->sc_adapter_iv;
299 }
300 
301 /*
302  * This is called when the bus is going idle,
303  * so we want to enable the SBC interrupts.
304  * That is controlled by the DMA enable!
305  * Who would have guessed!
306  * What a NASTY trick!
307  */
308 void
309 se_intr_on(ncr_sc)
310 	struct ncr5380_softc *ncr_sc;
311 {
312 	struct se_softc *sc = (struct se_softc *)ncr_sc;
313 	volatile struct se_regs *se = sc->sc_regs;
314 
315 	/* receive mode should be safer */
316 	se->se_csr &= ~SE_CSR_SEND;
317 
318 	/* Clear the count so nothing happens. */
319 	se->dma_cntr = 0;
320 
321 	/* Clear the start address too. (paranoid?) */
322 	se->dma_addr = 0;
323 
324 	/* Finally, enable the DMA engine. */
325 	se->se_csr |= SE_CSR_INTR_EN;
326 }
327 
328 /*
329  * This is called when the bus is idle and we are
330  * about to start playing with the SBC chip.
331  */
332 void
333 se_intr_off(ncr_sc)
334 	struct ncr5380_softc *ncr_sc;
335 {
336 	struct se_softc *sc = (struct se_softc *)ncr_sc;
337 	volatile struct se_regs *se = sc->sc_regs;
338 
339 	se->se_csr &= ~SE_CSR_INTR_EN;
340 }
341 
342 /*
343  * This function is called during the COMMAND or MSG_IN phase
344  * that precedes a DATA_IN or DATA_OUT phase, in case we need
345  * to setup the DMA engine before the bus enters a DATA phase.
346  *
347  * On the VME version, setup the start addres, but clear the
348  * count (to make sure it stays idle) and set that later.
349  * XXX: The VME adapter appears to suppress SBC interrupts
350  * when the FIFO is not empty or the FIFO count is non-zero!
351  * XXX: Need to copy data into the DMA buffer...
352  */
353 void
354 se_dma_setup(ncr_sc)
355 	struct ncr5380_softc *ncr_sc;
356 {
357 	struct se_softc *sc = (struct se_softc *)ncr_sc;
358 	struct sci_req *sr = ncr_sc->sc_current;
359 	struct se_dma_handle *dh = sr->sr_dma_hand;
360 	volatile struct se_regs *se = sc->sc_regs;
361 	long data_pa;
362 	int xlen;
363 
364 	/*
365 	 * Get the DMA mapping for this segment.
366 	 * XXX - Should separate allocation and mapin.
367 	 */
368 	data_pa = 0; /* XXX se_dma_kvtopa(dh->dh_dma); */
369 	data_pa += (ncr_sc->sc_dataptr - dh->dh_addr);
370 	if (data_pa & 1)
371 		panic("se_dma_start: bad pa=0x%lx", data_pa);
372 	xlen = ncr_sc->sc_datalen;
373 	xlen &= ~1;				/* XXX: necessary? */
374 	sc->sc_reqlen = xlen; 	/* XXX: or less? */
375 
376 #ifdef	DEBUG
377 	if (se_debug & 2) {
378 		printf("se_dma_setup: dh=%p, pa=0x%lx, xlen=0x%x\n",
379 			   dh, data_pa, xlen);
380 	}
381 #endif
382 
383 	/* Set direction (send/recv) */
384 	if (dh->dh_flags & SIDH_OUT) {
385 		se->se_csr |= SE_CSR_SEND;
386 	} else {
387 		se->se_csr &= ~SE_CSR_SEND;
388 	}
389 
390 	/* Load the start address. */
391 	se->dma_addr = (ushort)(data_pa & 0xFFFF);
392 
393 	/*
394 	 * Keep the count zero or it may start early!
395 	 */
396 	se->dma_cntr = 0;
397 }
398 
399 
400 void
401 se_dma_start(ncr_sc)
402 	struct ncr5380_softc *ncr_sc;
403 {
404 	struct se_softc *sc = (struct se_softc *)ncr_sc;
405 	struct sci_req *sr = ncr_sc->sc_current;
406 	struct se_dma_handle *dh = sr->sr_dma_hand;
407 	volatile struct se_regs *se = sc->sc_regs;
408 	int s, xlen;
409 
410 	xlen = sc->sc_reqlen;
411 
412 	/* This MAY be time critical (not sure). */
413 	s = splhigh();
414 
415 	se->dma_cntr = (ushort)(xlen & 0xFFFF);
416 
417 	/*
418 	 * Acknowledge the phase change.  (After DMA setup!)
419 	 * Put the SBIC into DMA mode, and start the transfer.
420 	 */
421 	if (dh->dh_flags & SIDH_OUT) {
422 		*ncr_sc->sci_tcmd = PHASE_DATA_OUT;
423 		SCI_CLR_INTR(ncr_sc);
424 		*ncr_sc->sci_icmd = SCI_ICMD_DATA;
425 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
426 		*ncr_sc->sci_dma_send = 0;	/* start it */
427 	} else {
428 		*ncr_sc->sci_tcmd = PHASE_DATA_IN;
429 		SCI_CLR_INTR(ncr_sc);
430 		*ncr_sc->sci_icmd = 0;
431 		*ncr_sc->sci_mode |= (SCI_MODE_DMA | SCI_MODE_DMA_IE);
432 		*ncr_sc->sci_irecv = 0;	/* start it */
433 	}
434 
435 	/* Let'er rip! */
436 	se->se_csr |= SE_CSR_INTR_EN;
437 
438 	splx(s);
439 	ncr_sc->sc_state |= NCR_DOINGDMA;
440 
441 #ifdef	DEBUG
442 	if (se_debug & 2) {
443 		printf("se_dma_start: started, flags=0x%x\n",
444 			   ncr_sc->sc_state);
445 	}
446 #endif
447 }
448 
449 
450 void
451 se_dma_eop(ncr_sc)
452 	struct ncr5380_softc *ncr_sc;
453 {
454 
455 	/* Not needed - DMA was stopped prior to examining sci_csr */
456 }
457 
458 
459 void
460 se_dma_stop(ncr_sc)
461 	struct ncr5380_softc *ncr_sc;
462 {
463 	struct se_softc *sc = (struct se_softc *)ncr_sc;
464 	struct sci_req *sr = ncr_sc->sc_current;
465 	struct se_dma_handle *dh = sr->sr_dma_hand;
466 	volatile struct se_regs *se = sc->sc_regs;
467 	int resid, ntrans;
468 
469 	if ((ncr_sc->sc_state & NCR_DOINGDMA) == 0) {
470 #ifdef	DEBUG
471 		printf("se_dma_stop: dma not running\n");
472 #endif
473 		return;
474 	}
475 	ncr_sc->sc_state &= ~NCR_DOINGDMA;
476 
477 	/* First, halt the DMA engine. */
478 	se->se_csr &= ~SE_CSR_INTR_EN;	/* VME only */
479 
480 	/* Set an impossible phase to prevent data movement? */
481 	*ncr_sc->sci_tcmd = PHASE_INVALID;
482 
483 	/* Note that timeout may have set the error flag. */
484 	if (ncr_sc->sc_state & NCR_ABORTING)
485 		goto out;
486 
487 	/* XXX: Wait for DMA to actually finish? */
488 
489 	/*
490 	 * Now try to figure out how much actually transferred
491 	 */
492 	resid = se->dma_cntr & 0xFFFF;
493 	if (dh->dh_flags & SIDH_OUT)
494 		if ((resid > 0) && (resid < sc->sc_reqlen))
495 			resid++;
496 	ntrans = sc->sc_reqlen - resid;
497 
498 #ifdef	DEBUG
499 	if (se_debug & 2) {
500 		printf("se_dma_stop: resid=0x%x ntrans=0x%x\n",
501 		       resid, ntrans);
502 	}
503 #endif
504 
505 	if (ntrans < MIN_DMA_LEN) {
506 		printf("se: fifo count: 0x%x\n", resid);
507 		ncr_sc->sc_state |= NCR_ABORTING;
508 		goto out;
509 	}
510 	if (ntrans > ncr_sc->sc_datalen)
511 		panic("se_dma_stop: excess transfer");
512 
513 	/* Adjust data pointer */
514 	ncr_sc->sc_dataptr += ntrans;
515 	ncr_sc->sc_datalen -= ntrans;
516 
517 out:
518 	se->dma_addr = 0;
519 	se->dma_cntr = 0;
520 
521 	/* Put SBIC back in PIO mode. */
522 	*ncr_sc->sci_mode &= ~(SCI_MODE_DMA | SCI_MODE_DMA_IE);
523 	*ncr_sc->sci_icmd = 0;
524 }
525 
526 /*****************************************************************/
527 
528 static void
529 se_minphys(struct buf *bp)
530 {
531 
532 	if (bp->b_bcount > MAX_DMA_LEN)
533 		bp->b_bcount = MAX_DMA_LEN;
534 
535 	return (minphys(bp));
536 }
537 
538 
539 int
540 se_intr(void *arg)
541 {
542 	struct se_softc *sc = arg;
543 	volatile struct se_regs *se = sc->sc_regs;
544 	int dma_error, claimed;
545 	u_short csr;
546 
547 	claimed = 0;
548 	dma_error = 0;
549 
550 	/* SBC interrupt? DMA interrupt? */
551 	csr = se->se_csr;
552 	NCR_TRACE("se_intr: csr=0x%x\n", csr);
553 
554 	if (csr & SE_CSR_SBC_IP) {
555 		claimed = ncr5380_intr(&sc->ncr_sc);
556 #ifdef	DEBUG
557 		if (!claimed) {
558 			printf("se_intr: spurious from SBC\n");
559 		}
560 #endif
561 		/* Yes, we DID cause this interrupt. */
562 		claimed = 1;
563 	}
564 
565 	return (claimed);
566 }
567 
568 
569 /*****************************************************************
570  * Common functions for DMA
571  ****************************************************************/
572 
573 /*
574  * Allocate a DMA handle and put it in sc->sc_dma.  Prepare
575  * for DMA transfer.  On the Sun3/E, this means we have to
576  * allocate space in the DMA buffer for this transfer.
577  */
578 void
579 se_dma_alloc(ncr_sc)
580 	struct ncr5380_softc *ncr_sc;
581 {
582 	struct se_softc *sc = (struct se_softc *)ncr_sc;
583 	struct sci_req *sr = ncr_sc->sc_current;
584 	struct scsipi_xfer *xs = sr->sr_xs;
585 	struct se_dma_handle *dh;
586 	int i, xlen;
587 	u_long addr;
588 
589 #ifdef	DIAGNOSTIC
590 	if (sr->sr_dma_hand != NULL)
591 		panic("se_dma_alloc: already have DMA handle");
592 #endif
593 
594 	addr = (u_long) ncr_sc->sc_dataptr;
595 	xlen = ncr_sc->sc_datalen;
596 
597 	/* If the DMA start addr is misaligned then do PIO */
598 	if ((addr & 1) || (xlen & 1)) {
599 		printf("se_dma_alloc: misaligned.\n");
600 		return;
601 	}
602 
603 	/* Make sure our caller checked sc_min_dma_len. */
604 	if (xlen < MIN_DMA_LEN)
605 		panic("se_dma_alloc: xlen=0x%x", xlen);
606 
607 	/*
608 	 * Never attempt single transfers of more than 63k, because
609 	 * our count register may be only 16 bits (an OBIO adapter).
610 	 * This should never happen since already bounded by minphys().
611 	 * XXX - Should just segment these...
612 	 */
613 	if (xlen > MAX_DMA_LEN) {
614 		printf("se_dma_alloc: excessive xlen=0x%x\n", xlen);
615 		ncr_sc->sc_datalen = xlen = MAX_DMA_LEN;
616 	}
617 
618 	/* Find free DMA handle.  Guaranteed to find one since we have
619 	   as many DMA handles as the driver has processes. */
620 	for (i = 0; i < SCI_OPENINGS; i++) {
621 		if ((sc->sc_dma[i].dh_flags & SIDH_BUSY) == 0)
622 			goto found;
623 	}
624 	panic("se: no free DMA handles.");
625 found:
626 
627 	dh = &sc->sc_dma[i];
628 	dh->dh_flags = SIDH_BUSY;
629 
630 	/* Copy the "write" flag for convenience. */
631 	if (xs->xs_control & XS_CTL_DATA_OUT)
632 		dh->dh_flags |= SIDH_OUT;
633 
634 	dh->dh_addr = (u_char*) addr;
635 	dh->dh_maplen  = xlen;
636 	dh->dh_dma = 0;	/* XXX - Allocate space in DMA buffer. */
637 	/* XXX: dh->dh_dma = alloc(xlen) */
638 	if (!dh->dh_dma) {
639 		/* Can't remap segment */
640 		printf("se_dma_alloc: can't remap %p/0x%x\n",
641 			dh->dh_addr, dh->dh_maplen);
642 		dh->dh_flags = 0;
643 		return;
644 	}
645 
646 	/* success */
647 	sr->sr_dma_hand = dh;
648 
649 	return;
650 }
651 
652 
653 void
654 se_dma_free(ncr_sc)
655 	struct ncr5380_softc *ncr_sc;
656 {
657 	struct sci_req *sr = ncr_sc->sc_current;
658 	struct se_dma_handle *dh = sr->sr_dma_hand;
659 
660 #ifdef	DIAGNOSTIC
661 	if (dh == NULL)
662 		panic("se_dma_free: no DMA handle");
663 #endif
664 
665 	if (ncr_sc->sc_state & NCR_DOINGDMA)
666 		panic("se_dma_free: free while in progress");
667 
668 	if (dh->dh_flags & SIDH_BUSY) {
669 		/* XXX: Should separate allocation and mapping. */
670 		/* XXX: Give back the DMA space. */
671 		/* XXX: free((caddr_t)dh->dh_dma, dh->dh_maplen); */
672 		dh->dh_dma = 0;
673 		dh->dh_flags = 0;
674 	}
675 	sr->sr_dma_hand = NULL;
676 }
677 
678 
679 #define	CSR_MASK SE_CSR_SBC_IP
680 #define	POLL_TIMO	50000	/* X100 = 5 sec. */
681 
682 /*
683  * Poll (spin-wait) for DMA completion.
684  * Called right after xx_dma_start(), and
685  * xx_dma_stop() will be called next.
686  * Same for either VME or OBIO.
687  */
688 void
689 se_dma_poll(ncr_sc)
690 	struct ncr5380_softc *ncr_sc;
691 {
692 	struct se_softc *sc = (struct se_softc *)ncr_sc;
693 	struct sci_req *sr = ncr_sc->sc_current;
694 	volatile struct se_regs *se = sc->sc_regs;
695 	int tmo;
696 
697 	/* Make sure DMA started successfully. */
698 	if (ncr_sc->sc_state & NCR_ABORTING)
699 		return;
700 
701 	/*
702 	 * XXX: The Sun driver waits for ~SE_CSR_DMA_ACTIVE here
703 	 * XXX: (on obio) or even worse (on vme) a 10mS. delay!
704 	 * XXX: I really doubt that is necessary...
705 	 */
706 
707 	/* Wait for any "dma complete" or error bits. */
708 	tmo = POLL_TIMO;
709 	for (;;) {
710 		if (se->se_csr & CSR_MASK)
711 			break;
712 		if (--tmo <= 0) {
713 			printf("se: DMA timeout (while polling)\n");
714 			/* Indicate timeout as MI code would. */
715 			sr->sr_flags |= SR_OVERDUE;
716 			break;
717 		}
718 		delay(100);
719 	}
720 	NCR_TRACE("se_dma_poll: waited %d\n",
721 			  POLL_TIMO - tmo);
722 
723 #ifdef	DEBUG
724 	if (se_debug & 2) {
725 		printf("se_dma_poll: done, csr=0x%x\n", se->se_csr);
726 	}
727 #endif
728 }
729 
730