xref: /netbsd/sys/dev/pci/btvmei.c (revision 6550d01e)
1 /* $NetBSD: btvmei.c,v 1.26 2009/11/26 15:17:08 njoly Exp $ */
2 
3 /*
4  * Copyright (c) 1999
5  *	Matthias Drochner.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  * 3. The name of the author may not be used to endorse or promote products
16  *    derived from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  */
30 
31 #include <sys/cdefs.h>
32 __KERNEL_RCSID(0, "$NetBSD: btvmei.c,v 1.26 2009/11/26 15:17:08 njoly Exp $");
33 
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/kernel.h>
37 #include <sys/device.h>
38 #include <sys/proc.h>
39 #include <sys/malloc.h>
40 
41 #include <sys/bus.h>
42 #include <sys/extent.h>
43 
44 #include <dev/pci/pcireg.h>
45 #include <dev/pci/pcivar.h>
46 #include <dev/pci/pcidevs.h>
47 
48 #include <dev/vme/vmereg.h>
49 #include <dev/vme/vmevar.h>
50 
51 #include <dev/pci/btvmeireg.h>
52 #include <dev/pci/btvmeivar.h>
53 
54 static int b3_617_match(device_t, cfdata_t, void *);
55 static void b3_617_attach(device_t, device_t, void *);
56 #ifdef notyet
57 static int b3_617_detach(device_t);
58 #endif
59 void b3_617_slaveconfig(device_t, struct vme_attach_args *);
60 
61 static void b3_617_vmeintr(struct b3_617_softc *, unsigned char);
62 
63 /*
64  * mapping ressources, needed for deallocation
65  */
66 struct b3_617_vmeresc {
67 	bus_space_handle_t handle;
68 	bus_size_t len;
69 	int firstpage, maplen;
70 };
71 
72 CFATTACH_DECL(btvmei, sizeof(struct b3_617_softc),
73     b3_617_match, b3_617_attach, NULL, NULL);
74 
75 static int
76 b3_617_match(device_t parent, cfdata_t match, void *aux)
77 {
78 	struct pci_attach_args *pa = aux;
79 
80 	if ((PCI_VENDOR(pa->pa_id) != PCI_VENDOR_BIT3)
81 	    || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_BIT3_PCIVME617))
82 		return (0);
83 	return (1);
84 }
85 
86 static void
87 b3_617_attach(device_t parent, device_t self, void *aux)
88 {
89 	struct b3_617_softc *sc = device_private(self);
90 	struct pci_attach_args *pa = aux;
91 	pci_chipset_tag_t pc = pa->pa_pc;
92 
93 	int rev;
94 
95 	pci_intr_handle_t ih;
96 	const char *intrstr;
97 	struct vmebus_attach_args vaa;
98 
99 	aprint_naive(": VME bus adapter\n");
100 
101 	sc->sc_pc = pc;
102 	sc->sc_dmat = pa->pa_dmat;
103 
104 	rev = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG));
105 	aprint_normal(": BIT3 PCI-VME 617 rev %d\n", rev);
106 
107 	/*
108 	 * Map CSR and mapping table spaces.
109 	 * Don't map VME window; parts are mapped as needed to
110 	 * save kernel virtual memory space
111 	 */
112 	if (pci_mapreg_map(pa, 0x14,
113 			   PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
114 			   0, &sc->csrt, &sc->csrh, NULL, NULL) &&
115 	    pci_mapreg_map(pa, 0x10,
116 			   PCI_MAPREG_TYPE_IO,
117 			   0, &sc->csrt, &sc->csrh, NULL, NULL)) {
118 		aprint_error_dev(self, "can't map CSR space\n");
119 		return;
120 	}
121 
122 	if (pci_mapreg_map(pa, 0x18,
123 			   PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
124 			   0, &sc->mapt, &sc->maph, NULL, NULL)) {
125 		aprint_error_dev(self, "can't map map space\n");
126 		return;
127 	}
128 
129 	if (pci_mapreg_info(pc, pa->pa_tag, 0x1c,
130 			    PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT,
131 			    &sc->vmepbase, 0, 0)) {
132 		aprint_error_dev(self, "can't get VME range\n");
133 		return;
134 	}
135 	sc->sc_vmet = pa->pa_memt; /* XXX needed for VME mappings */
136 
137 	/* Map and establish the interrupt. */
138 	if (pci_intr_map(pa, &ih)) {
139 		aprint_error_dev(&sc->sc_dev, "couldn't map interrupt\n");
140 		return;
141 	}
142 	intrstr = pci_intr_string(pc, ih);
143 	/*
144 	 * Use a low interrupt level (the lowest?).
145 	 * We will raise before calling a subdevice's handler.
146 	 */
147 	sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, b3_617_intr, sc);
148 	if (sc->sc_ih == NULL) {
149 		aprint_error_dev(&sc->sc_dev, "couldn't establish interrupt");
150 		if (intrstr != NULL)
151 			aprint_error(" at %s", intrstr);
152 		aprint_error("\n");
153 		return;
154 	}
155 	aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr);
156 
157 	if (b3_617_init(sc))
158 		return;
159 
160 	/*
161 	 * set up all the tags for use by VME devices
162 	 */
163 	sc->sc_vct.cookie = self;
164 	sc->sc_vct.vct_probe = b3_617_vme_probe;
165 	sc->sc_vct.vct_map = b3_617_map_vme;
166 	sc->sc_vct.vct_unmap = b3_617_unmap_vme;
167 	sc->sc_vct.vct_int_map = b3_617_map_vmeint;
168 	sc->sc_vct.vct_int_establish = b3_617_establish_vmeint;
169 	sc->sc_vct.vct_int_disestablish = b3_617_disestablish_vmeint;
170 	sc->sc_vct.vct_dmamap_create = b3_617_dmamap_create;
171 	sc->sc_vct.vct_dmamap_destroy = b3_617_dmamap_destroy;
172 	sc->sc_vct.vct_dmamem_alloc = b3_617_dmamem_alloc;
173 	sc->sc_vct.vct_dmamem_free = b3_617_dmamem_free;
174 
175 	vaa.va_vct = &(sc->sc_vct);
176 	vaa.va_bdt = pa->pa_dmat;
177 	vaa.va_slaveconfig = b3_617_slaveconfig;
178 
179 	sc->csrwindow.offset = -1;
180 	sc->dmawindow24.offset = -1;
181 	sc->dmawindow32.offset = -1;
182 	config_found(self, &vaa, 0);
183 }
184 
185 #ifdef notyet
186 static int
187 b3_617_detach(device_t dev)
188 {
189 	struct b3_617_softc *sc = device_private(dev);
190 
191 	b3_617_halt(sc);
192 
193 	if (sc->sc_ih)
194 		pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
195 
196 	bus_space_unmap(sc->sc_bc, sc->csrbase, 32);
197 	bus_space_unmap(sc->sc_bc, sc->mapbase, 64*1024);
198 
199 	return(0);
200 }
201 #endif
202 
203 void
204 b3_617_slaveconfig(device_t dev, struct vme_attach_args *va)
205 {
206 	struct b3_617_softc *sc = device_private(dev);
207 	vme_chipset_tag_t vmect;
208 	int i, res;
209 	const char *name = 0; /* XXX gcc! */
210 
211 	vmect = &sc->sc_vct;
212 	if (!va)
213 		goto freeit;
214 
215 #ifdef DIAGNOSTIC
216 	if (vmect != va->va_vct)
217 		panic("pcivme_slaveconfig: chipset tag?");
218 #endif
219 
220 	for (i = 0; i < va->numcfranges; i++) {
221 		res = vme_space_alloc(vmect, va->r[i].offset,
222 				      va->r[i].size, va->r[i].am);
223 		if (res)
224 			panic("%s: can't alloc slave window %x/%x/%x",
225 			       device_xname(dev), va->r[i].offset,
226 			       va->r[i].size, va->r[i].am);
227 
228 		switch (va->r[i].am & VME_AM_ADRSIZEMASK) {
229 			/* structure assignments! */
230 		    case VME_AM_A16:
231 			sc->csrwindow = va->r[i];
232 			name = "VME CSR";
233 			break;
234 		    case VME_AM_A24:
235 			sc->dmawindow24 = va->r[i];
236 			name = "A24 DMA";
237 			break;
238 		    case VME_AM_A32:
239 			sc->dmawindow32 = va->r[i];
240 			name = "A32 DMA";
241 			break;
242 		}
243 		printf("%s: %s window: %x-%x\n", device_xname(dev),
244 		       name, va->r[i].offset,
245 		       va->r[i].offset + va->r[i].size - 1);
246 	}
247 	return;
248 
249 freeit:
250 	if (sc->csrwindow.offset != -1)
251 		vme_space_free(vmect, sc->csrwindow.offset,
252 			       sc->csrwindow.size, sc->csrwindow.am);
253 	if (sc->dmawindow32.offset != -1)
254 		vme_space_free(vmect, sc->dmawindow32.offset,
255 			       sc->dmawindow32.size, sc->dmawindow32.am);
256 	if (sc->dmawindow24.offset != -1)
257 		vme_space_free(vmect, sc->dmawindow24.offset,
258 			       sc->dmawindow24.size, sc->dmawindow24.am);
259 }
260 
261 int
262 b3_617_reset(struct b3_617_softc *sc)
263 {
264 	unsigned char status;
265 
266 	/* reset sequence, ch 5.2 */
267 	status = read_csr_byte(sc, LOC_STATUS);
268 	if (status & LSR_NO_CONNECT) {
269 		printf("%s: not connected\n", device_xname(&sc->sc_dev));
270 		return (-1);
271 	}
272 	status = read_csr_byte(sc, REM_STATUS); /* discard */
273 	write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
274 	status = read_csr_byte(sc, LOC_STATUS);
275 	if (status & LSR_CERROR_MASK) {
276 		char sbuf[sizeof(BIT3_LSR_BITS) + 64];
277 
278 		snprintb(sbuf, sizeof(sbuf), BIT3_LSR_BITS, status);
279 		printf("%s: interface error, lsr=%s\n", device_xname(&sc->sc_dev),
280 		       sbuf);
281 		return (-1);
282 	}
283 	return (0);
284 }
285 
286 int
287 b3_617_init(struct b3_617_softc *sc)
288 {
289 	unsigned int i;
290 
291 	if (b3_617_reset(sc))
292 		return (-1);
293 
294 	/* all maps invalid */
295 	for (i = MR_PCI_VME; i < MR_PCI_VME + MR_PCI_VME_SIZE; i += 4)
296 		write_mapmem(sc, i, MR_RAM_INVALID);
297 	for (i = MR_VME_PCI; i < MR_VME_PCI + MR_VME_PCI_SIZE; i += 4)
298 		write_mapmem(sc, i, MR_RAM_INVALID);
299 	for (i = MR_DMA_PCI; i < MR_DMA_PCI + MR_DMA_PCI_SIZE; i += 4)
300 		write_mapmem(sc, i, MR_RAM_INVALID);
301 
302 	/*
303 	 * set up scatter page allocation control
304 	 */
305 	sc->vmeext = extent_create("pcivme", MR_PCI_VME,
306 				   MR_PCI_VME + MR_PCI_VME_SIZE - 1, M_DEVBUF,
307 				   sc->vmemap, sizeof(sc->vmemap),
308 				   EX_NOCOALESCE);
309 #if 0
310 	sc->pciext = extent_create("vmepci", MR_VME_PCI,
311 				   MR_VME_PCI + MR_VME_PCI_SIZE - 1, M_DEVBUF,
312 				   sc->pcimap, sizeof(sc->pcimap),
313 				   EX_NOCOALESCE);
314 	sc->dmaext = extent_create("dmapci", MR_DMA_PCI,
315 				   MR_DMA_PCI + MR_DMA_PCI_SIZE - 1, M_DEVBUF,
316 				   sc->dmamap, sizeof(sc->dmamap),
317 				   EX_NOCOALESCE);
318 #endif
319 
320 	/*
321 	 * init int handler queue,
322 	 * enable interrupts if PCI interrupt available
323 	 */
324 	TAILQ_INIT(&(sc->intrhdls));
325 	sc->strayintrs = 0;
326 
327 	if (sc->sc_ih)
328 		write_csr_byte(sc, LOC_INT_CTRL, LIC_INT_ENABLE);
329 	/* no error ints */
330 	write_csr_byte(sc, REM_CMD2, 0); /* enables VME IRQ */
331 
332 	return (0);
333 }
334 
335 #ifdef notyet /* for detach */
336 void
337 b3_617_halt(struct b3_617_softc *sc)
338 {
339 	/*
340 	 * because detach code checks for existence of children,
341 	 * all ressources (mappings, VME IRQs, DMA requests)
342 	 * should be deallocated at this point
343 	 */
344 
345 	/* disable IRQ */
346 	write_csr_byte(sc, LOC_INT_CTRL, 0);
347 }
348 #endif
349 
350 static void
351 b3_617_vmeintr(struct b3_617_softc *sc, unsigned char lstat)
352 {
353 	int level;
354 
355 	for (level = 7; level >= 1; level--) {
356 		unsigned char vector;
357 		struct b3_617_vmeintrhand *ih;
358 		int found;
359 
360 		if (!(lstat & (1 << level)))
361 			continue;
362 
363 		write_csr_byte(sc, REM_CMD1, level);
364 		vector = read_csr_byte(sc, REM_IACK);
365 
366 		found = 0;
367 
368 		for (ih = sc->intrhdls.tqh_first; ih;
369 		     ih = ih->ih_next.tqe_next) {
370 			if ((ih->ih_level == level) &&
371 			    ((ih->ih_vector == -1) ||
372 			     (ih->ih_vector == vector))) {
373 				int s, res;
374 				/*
375 				 * We should raise the interrupt level
376 				 * to ih->ih_prior here. How to do this
377 				 * machine-independently?
378 				 * To be safe, raise to the maximum.
379 				 */
380 				s = splhigh();
381 				found |= (res = (*(ih->ih_fun))(ih->ih_arg));
382 				splx(s);
383 				if (res)
384 					ih->ih_count++;
385 				if (res == 1)
386 					break;
387 			}
388 		}
389 		if (!found)
390 			sc->strayintrs++;
391 	}
392 }
393 
394 #define sc ((struct b3_617_softc*)vsc)
395 
396 int
397 b3_617_map_vme(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasizes, vme_swap_t swap, bus_space_tag_t *tag, bus_space_handle_t *handle, vme_mapresc_t *resc)
398 {
399 	vme_addr_t vmebase, vmeend, va;
400 	unsigned long maplen, first, i;
401 	u_int32_t mapreg;
402 	bus_addr_t pcibase;
403 	int res;
404 	struct b3_617_vmeresc *r;
405 
406 	/* first mapped address */
407 	vmebase = vmeaddr & ~(VME_PAGESIZE - 1);
408 	/* base of last mapped page */
409 	vmeend = (vmeaddr + len - 1) & ~(VME_PAGESIZE - 1);
410 	/* bytes in scatter table required */
411 	maplen = ((vmeend - vmebase) / VME_PAGESIZE + 1) * 4;
412 
413 	if (extent_alloc(sc->vmeext, maplen, 4, 0, EX_FAST, &first))
414 		return (ENOMEM);
415 
416 	/*
417 	 * set up adapter mapping registers
418 	 */
419 	mapreg = (am << MR_AMOD_SHIFT) | MR_FC_RRAM | swap;
420 
421 	for (i = first, va = vmebase;
422 	     i < first + maplen;
423 	     i += 4, va += VME_PAGESIZE) {
424 		write_mapmem(sc, i, mapreg | va);
425 #ifdef BIT3DEBUG
426 		printf("mapreg@%lx=%x\n", i, read_mapmem(sc, i));
427 #endif
428 	}
429 
430 #ifdef DIAGNOSTIC
431 	if (va != vmeend + VME_PAGESIZE)
432 		panic("b3_617_map_pci_vme: botch");
433 #endif
434 	/*
435 	 * map needed range in PCI space
436 	 */
437 	pcibase = sc->vmepbase + (first - MR_PCI_VME) / 4 * VME_PAGESIZE
438 	    + (vmeaddr & (VME_PAGESIZE - 1));
439 
440 	if ((res = bus_space_map(sc->sc_vmet, pcibase, len, 0, handle))) {
441 		for (i = first; i < first + maplen; i += 4)
442 			write_mapmem(sc, i, MR_RAM_INVALID);
443 		extent_free(sc->vmeext, first, maplen, 0);
444 		return (res);
445 	}
446 
447 	*tag = sc->sc_vmet;
448 
449 	/*
450 	 * save all data needed for later unmapping
451 	 */
452 	r = malloc(sizeof(*r), M_DEVBUF, M_NOWAIT); /* XXX check! */
453 	r->handle = *handle;
454 	r->len = len;
455 	r->firstpage = first;
456 	r->maplen = maplen;
457 	*resc = r;
458 	return (0);
459 }
460 
461 void
462 b3_617_unmap_vme(void *vsc, vme_mapresc_t resc)
463 {
464 	unsigned long i;
465 	struct b3_617_vmeresc *r = resc;
466 
467 	/* unmap PCI window */
468 	bus_space_unmap(sc->sc_vmet, r->handle, r->len);
469 
470 	for (i = r->firstpage; i < r->firstpage + r->maplen; i += 4)
471 		write_mapmem(sc, i, MR_RAM_INVALID);
472 
473 	extent_free(sc->vmeext, r->firstpage, r->maplen, 0);
474 
475 	free(r, M_DEVBUF);
476 }
477 
478 int
479 b3_617_vme_probe(void *vsc, vme_addr_t addr, vme_size_t len, vme_am_t am, vme_datasize_t datasize, int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), void *cbarg)
480 {
481 	bus_space_tag_t tag;
482 	bus_space_handle_t handle;
483 	vme_mapresc_t resc;
484 	int res, i;
485 	volatile u_int32_t dummy;
486 	int status;
487 
488 	res = b3_617_map_vme(vsc, addr, len, am, 0, 0,
489 			     &tag, &handle, &resc);
490 	if (res)
491 		return (res);
492 
493 	if (read_csr_byte(sc, LOC_STATUS) & LSR_ERROR_MASK) {
494 		printf("b3_617_vme_badaddr: error bit not clean - resetting\n");
495 		write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
496 	}
497 
498 	if (callback)
499 		res = (*callback)(cbarg, tag, handle);
500 	else {
501 		for (i = 0; i < len;) {
502 			switch (datasize) {
503 			    case VME_D8:
504 				dummy = bus_space_read_1(tag, handle, i);
505 				i++;
506 				break;
507 			    case VME_D16:
508 				dummy = bus_space_read_2(tag, handle, i);
509 				i += 2;
510 				break;
511 			    case VME_D32:
512 				dummy = bus_space_read_4(tag, handle, i);
513 				i += 4;
514 				break;
515 			    default:
516 				panic("b3_617_vme_probe: invalid datasize %x",
517 				      datasize);
518 			}
519 		}
520 	}
521 
522 	if ((status = read_csr_byte(sc, LOC_STATUS)) & LSR_ERROR_MASK) {
523 #ifdef BIT3DEBUG
524 		printf("b3_617_vme_badaddr: caught error %x\n", status);
525 #endif
526 		write_csr_byte(sc, LOC_CMD1, LC1_CLR_ERROR);
527 		res = EIO;
528 	}
529 
530 	b3_617_unmap_vme(vsc, resc);
531 	return (res);
532 }
533 
534 int
535 b3_617_map_vmeint(void *vsc, int level, int vector, vme_intr_handle_t *handlep)
536 {
537 	if (!sc->sc_ih) {
538 		printf("%s: b3_617_map_vmeint: no IRQ\n",
539 		       device_xname(&sc->sc_dev));
540 		return (ENXIO);
541 	}
542 	/*
543 	 * We should check whether the interface can pass this interrupt
544 	 * level at all, but we don't know much about the jumper setting.
545 	 */
546 	*handlep = (void *)(long)((level << 8) | vector); /* XXX */
547 	return (0);
548 }
549 
550 void *
551 b3_617_establish_vmeint(void *vsc, vme_intr_handle_t handle, int prior, int (*func)(void *), void *arg)
552 {
553 	struct b3_617_vmeintrhand *ih;
554 	long lv;
555 	int s;
556 
557 	/* no point in sleeping unless someone can free memory. */
558 	ih = malloc(sizeof *ih, M_DEVBUF, cold ? M_NOWAIT : M_WAITOK);
559 	if (ih == NULL)
560 		panic("b3_617_map_vmeint: can't malloc handler info");
561 
562 	lv = (long)handle; /* XXX */
563 
564 	ih->ih_fun = func;
565 	ih->ih_arg = arg;
566 	ih->ih_level = lv >> 8;
567 	ih->ih_vector = lv & 0xff;
568 	ih->ih_prior = prior;
569 	ih->ih_count = 0;
570 
571 	s = splhigh();
572 	TAILQ_INSERT_TAIL(&(sc->intrhdls), ih, ih_next);
573 	splx(s);
574 
575 	return (ih);
576 }
577 
578 void
579 b3_617_disestablish_vmeint(void *vsc, void *cookie)
580 {
581 	struct b3_617_vmeintrhand *ih = cookie;
582 	int s;
583 
584 	if (!ih) {
585 		printf("b3_617_unmap_vmeint: NULL arg\n");
586 		return;
587 	}
588 
589 	s = splhigh();
590 	TAILQ_REMOVE(&(sc->intrhdls), ih, ih_next);
591 	splx(s);
592 
593 	free(ih, M_DEVBUF);
594 }
595 
596 int
597 b3_617_intr(void *vsc)
598 {
599 	int handled = 0;
600 
601 	/* follows ch. 5.5.5 (reordered for speed) */
602 	while (read_csr_byte(sc, LOC_INT_CTRL) & LIC_INT_PENDING) {
603 		unsigned char lstat;
604 
605 		handled = 1;
606 
607 		/* no error interrupts! */
608 
609 		lstat = read_csr_byte(sc, LDMA_CMD);
610 		if ((lstat & LDC_DMA_DONE) && (lstat & LDC_DMA_INT_ENABLE)) {
611 			/* DMA done indicator flag */
612 			write_csr_byte(sc, LDMA_CMD, lstat & (~LDC_DMA_DONE));
613 #if 0
614 			b3_617_cntlrdma_done(sc);
615 #endif
616 			continue;
617 		}
618 
619 		lstat = read_csr_byte(sc, LOC_INT_STATUS);
620 		if (lstat & LIS_CINT_MASK) {
621 			/* VME backplane interrupt, ch. 5.5.3 */
622 			b3_617_vmeintr(sc, lstat);
623 		}
624 
625 		/* for now, ignore "mailbox interrupts" */
626 
627 		lstat = read_csr_byte(sc, LOC_STATUS);
628 		if (lstat & LSR_PR_STATUS) {
629 			/* PR interrupt received from REMOTE  */
630 			write_csr_byte(sc, LOC_CMD1, LC1_CLR_PR_INT);
631 			continue;
632 		}
633 
634 		lstat = read_csr_byte(sc, REM_STATUS);
635 		if (lstat & RSR_PT_STATUS) {
636 			/* PT interrupt is set */
637 			write_csr_byte(sc, REM_CMD1, RC1_CLR_PT_INT);
638 			continue;
639 		}
640 	}
641 	return (handled);
642 }
643 
644 int
645 b3_617_dmamap_create(vsc, len, am, datasize, swap,
646 		   nsegs, segsz, bound,
647 		   flags, mapp)
648 	void *vsc;
649 	vme_size_t len;
650 	vme_am_t am;
651 	vme_datasize_t datasize;
652 	vme_swap_t swap;
653 	int nsegs;
654 	vme_size_t segsz;
655 	vme_addr_t bound;
656 	int flags;
657 	bus_dmamap_t *mapp;
658 {
659 	return (EINVAL);
660 }
661 
662 void
663 b3_617_dmamap_destroy(void *vsc, bus_dmamap_t map)
664 {
665 }
666 
667 int
668 b3_617_dmamem_alloc(vsc, len, am, datasizes, swap,
669 		    segs, nsegs, rsegs, flags)
670 	void *vsc;
671 	vme_size_t len;
672 	vme_am_t am;
673 	vme_datasize_t datasizes;
674 	vme_swap_t swap;
675 	bus_dma_segment_t *segs;
676 	int nsegs;
677 	int *rsegs;
678 	int flags;
679 {
680 	return (EINVAL);
681 }
682 
683 void
684 b3_617_dmamem_free(void *vsc, bus_dma_segment_t *segs, int nsegs)
685 {
686 }
687 
688 #undef sc
689