xref: /netbsd/sys/dev/mvme/mvmebus.c (revision 6550d01e)
1 /*	$NetBSD: mvmebus.c,v 1.17 2009/03/16 23:11:16 dsl Exp $	*/
2 
3 /*-
4  * Copyright (c) 2000, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Steve C. Woodford.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: mvmebus.c,v 1.17 2009/03/16 23:11:16 dsl Exp $");
34 
35 #include <sys/param.h>
36 #include <sys/kernel.h>
37 #include <sys/systm.h>
38 #include <sys/device.h>
39 #include <sys/malloc.h>
40 #include <sys/kcore.h>
41 
42 #include <sys/cpu.h>
43 #include <sys/bus.h>
44 
45 #include <dev/vme/vmereg.h>
46 #include <dev/vme/vmevar.h>
47 
48 #include <dev/mvme/mvmebus.h>
49 
50 #ifdef DIAGNOSTIC
51 int	mvmebus_dummy_dmamap_create(bus_dma_tag_t, bus_size_t, int, bus_size_t,
52 	    bus_size_t, int, bus_dmamap_t *);
53 void	mvmebus_dummy_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
54 int	mvmebus_dummy_dmamem_alloc(bus_dma_tag_t, bus_size_t, bus_size_t,
55 	    bus_size_t, bus_dma_segment_t *, int, int *, int);
56 void	mvmebus_dummy_dmamem_free(bus_dma_tag_t, bus_dma_segment_t *, int);
57 #endif
58 
59 #ifdef DEBUG
60 static const char *mvmebus_mod_string(vme_addr_t, vme_size_t,
61 	    vme_am_t, vme_datasize_t);
62 #endif
63 
64 static void mvmebus_offboard_ram(struct mvmebus_softc *);
65 static int mvmebus_dmamap_load_common(struct mvmebus_softc *, bus_dmamap_t);
66 
67 vme_am_t	_mvmebus_am_cap[] = {
68 	MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_USER,
69 	MVMEBUS_AM_CAP_DATA   | MVMEBUS_AM_CAP_USER,
70 	MVMEBUS_AM_CAP_PROG   | MVMEBUS_AM_CAP_USER,
71 	MVMEBUS_AM_CAP_BLK    | MVMEBUS_AM_CAP_USER,
72 	MVMEBUS_AM_CAP_BLKD64 | MVMEBUS_AM_CAP_SUPER,
73 	MVMEBUS_AM_CAP_DATA   | MVMEBUS_AM_CAP_SUPER,
74 	MVMEBUS_AM_CAP_PROG   | MVMEBUS_AM_CAP_SUPER,
75 	MVMEBUS_AM_CAP_BLK    | MVMEBUS_AM_CAP_SUPER
76 };
77 
78 const char *mvmebus_irq_name[] = {
79 	"vmeirq0", "vmeirq1", "vmeirq2", "vmeirq3",
80 	"vmeirq4", "vmeirq5", "vmeirq6", "vmeirq7"
81 };
82 
83 extern phys_ram_seg_t mem_clusters[0];
84 extern int mem_cluster_cnt;
85 
86 
87 static void
88 mvmebus_offboard_ram(struct mvmebus_softc *sc)
89 {
90 	struct mvmebus_range *svr, *mvr;
91 	vme_addr_t start, end, size;
92 	int i;
93 
94 	/*
95 	 * If we have any offboard RAM (i.e. a VMEbus RAM board) then
96 	 * we need to record its details since it's effectively another
97 	 * VMEbus slave image as far as we're concerned.
98 	 * The chip-specific backend will have reserved sc->sc_slaves[0]
99 	 * for exactly this purpose.
100 	 */
101 	svr = sc->sc_slaves;
102 	if (mem_cluster_cnt < 2) {
103 		svr->vr_am = MVMEBUS_AM_DISABLED;
104 		return;
105 	}
106 
107 	start = mem_clusters[1].start;
108 	size = mem_clusters[1].size - 1;
109 	end = start + size;
110 
111 	/*
112 	 * Figure out which VMEbus master image the RAM is
113 	 * visible through. This will tell us the address
114 	 * modifier and datasizes it uses, as well as allowing
115 	 * us to calculate its `real' VMEbus address.
116 	 *
117 	 * XXX FIXME: This is broken if the RAM is mapped through
118 	 * a translated address space. For example, on mvme167 it's
119 	 * perfectly legal to set up the following A32 mapping:
120 	 *
121 	 *  vr_locaddr  == 0x80000000
122 	 *  vr_vmestart == 0x10000000
123 	 *  vr_vmeend   == 0x10ffffff
124 	 *
125 	 * In this case, RAM at VMEbus address 0x10800000 will appear at local
126 	 * address 0x80800000, but we need to set the slave vr_vmestart to
127 	 * 0x10800000.
128 	 */
129 	for (i = 0, mvr = sc->sc_masters; i < sc->sc_nmasters; i++, mvr++) {
130 		vme_addr_t vstart = mvr->vr_locstart + mvr->vr_vmestart;
131 
132 		if (start >= vstart &&
133 		    end <= vstart + (mvr->vr_vmeend - mvr->vr_vmestart))
134 			break;
135 	}
136 	if (i == sc->sc_nmasters) {
137 		svr->vr_am = MVMEBUS_AM_DISABLED;
138 #ifdef DEBUG
139 		printf("%s: No VMEbus master mapping for offboard RAM!\n",
140 		    device_xname(&sc->sc_dev));
141 #endif
142 		return;
143 	}
144 
145 	svr->vr_locstart = start;
146 	svr->vr_vmestart = start & mvr->vr_mask;
147 	svr->vr_vmeend = svr->vr_vmestart + size;
148 	svr->vr_datasize = mvr->vr_datasize;
149 	svr->vr_mask = mvr->vr_mask;
150 	svr->vr_am = mvr->vr_am & VME_AM_ADRSIZEMASK;
151 	svr->vr_am |= MVMEBUS_AM_CAP_DATA  | MVMEBUS_AM_CAP_PROG |
152 		      MVMEBUS_AM_CAP_SUPER | MVMEBUS_AM_CAP_USER;
153 }
154 
155 void
156 mvmebus_attach(struct mvmebus_softc *sc)
157 {
158 	struct vmebus_attach_args vaa;
159 	int i;
160 
161 	/* Zap the IRQ reference counts */
162 	for (i = 0; i < 8; i++)
163 		sc->sc_irqref[i] = 0;
164 
165 	/* If there's offboard RAM, get its VMEbus slave attributes */
166 	mvmebus_offboard_ram(sc);
167 
168 #ifdef DEBUG
169 	for (i = 0; i < sc->sc_nmasters; i++) {
170 		struct mvmebus_range *vr = &sc->sc_masters[i];
171 		if (vr->vr_am == MVMEBUS_AM_DISABLED) {
172 			printf("%s: Master#%d: disabled\n",
173 			    device_xname(&sc->sc_dev), i);
174 			continue;
175 		}
176 		printf("%s: Master#%d: 0x%08lx -> %s\n",
177 		    device_xname(&sc->sc_dev), i,
178 		    vr->vr_locstart + (vr->vr_vmestart & vr->vr_mask),
179 		    mvmebus_mod_string(vr->vr_vmestart,
180 			(vr->vr_vmeend - vr->vr_vmestart) + 1,
181 			vr->vr_am, vr->vr_datasize));
182 	}
183 
184 	for (i = 0; i < sc->sc_nslaves; i++) {
185 		struct mvmebus_range *vr = &sc->sc_slaves[i];
186 		if (vr->vr_am == MVMEBUS_AM_DISABLED) {
187 			printf("%s:  Slave#%d: disabled\n",
188 			    device_xname(&sc->sc_dev), i);
189 			continue;
190 		}
191 		printf("%s:  Slave#%d: 0x%08lx -> %s\n",
192 		    device_xname(&sc->sc_dev), i, vr->vr_locstart,
193 		    mvmebus_mod_string(vr->vr_vmestart,
194 			(vr->vr_vmeend - vr->vr_vmestart) + 1,
195 			vr->vr_am, vr->vr_datasize));
196 	}
197 #endif
198 
199 	sc->sc_vct.cookie = sc;
200 	sc->sc_vct.vct_probe = mvmebus_probe;
201 	sc->sc_vct.vct_map = mvmebus_map;
202 	sc->sc_vct.vct_unmap = mvmebus_unmap;
203 	sc->sc_vct.vct_int_map = mvmebus_intmap;
204 	sc->sc_vct.vct_int_evcnt = mvmebus_intr_evcnt;
205 	sc->sc_vct.vct_int_establish = mvmebus_intr_establish;
206 	sc->sc_vct.vct_int_disestablish = mvmebus_intr_disestablish;
207 	sc->sc_vct.vct_dmamap_create = mvmebus_dmamap_create;
208 	sc->sc_vct.vct_dmamap_destroy = mvmebus_dmamap_destroy;
209 	sc->sc_vct.vct_dmamem_alloc = mvmebus_dmamem_alloc;
210 	sc->sc_vct.vct_dmamem_free = mvmebus_dmamem_free;
211 
212 	sc->sc_mvmedmat._cookie = sc;
213 	sc->sc_mvmedmat._dmamap_load = mvmebus_dmamap_load;
214 	sc->sc_mvmedmat._dmamap_load_mbuf = mvmebus_dmamap_load_mbuf;
215 	sc->sc_mvmedmat._dmamap_load_uio = mvmebus_dmamap_load_uio;
216 	sc->sc_mvmedmat._dmamap_load_raw = mvmebus_dmamap_load_raw;
217 	sc->sc_mvmedmat._dmamap_unload = mvmebus_dmamap_unload;
218 	sc->sc_mvmedmat._dmamap_sync = mvmebus_dmamap_sync;
219 	sc->sc_mvmedmat._dmamem_map = mvmebus_dmamem_map;
220 	sc->sc_mvmedmat._dmamem_unmap = mvmebus_dmamem_unmap;
221 	sc->sc_mvmedmat._dmamem_mmap = mvmebus_dmamem_mmap;
222 
223 #ifdef DIAGNOSTIC
224 	sc->sc_mvmedmat._dmamap_create = mvmebus_dummy_dmamap_create;
225 	sc->sc_mvmedmat._dmamap_destroy = mvmebus_dummy_dmamap_destroy;
226 	sc->sc_mvmedmat._dmamem_alloc = mvmebus_dummy_dmamem_alloc;
227 	sc->sc_mvmedmat._dmamem_free = mvmebus_dummy_dmamem_free;
228 #else
229 	sc->sc_mvmedmat._dmamap_create = NULL;
230 	sc->sc_mvmedmat._dmamap_destroy = NULL;
231 	sc->sc_mvmedmat._dmamem_alloc = NULL;
232 	sc->sc_mvmedmat._dmamem_free = NULL;
233 #endif
234 
235 	vaa.va_vct = &sc->sc_vct;
236 	vaa.va_bdt = &sc->sc_mvmedmat;
237 	vaa.va_slaveconfig = NULL;
238 
239 	config_found(&sc->sc_dev, &vaa, 0);
240 }
241 
242 int
243 mvmebus_map(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasize, vme_swap_t swap, bus_space_tag_t *tag, bus_space_handle_t *handle, vme_mapresc_t *resc)
244 {
245 	struct mvmebus_softc *sc;
246 	struct mvmebus_mapresc *mr;
247 	struct mvmebus_range *vr;
248 	vme_addr_t end;
249 	vme_am_t cap, as;
250 	paddr_t paddr;
251 	int rv, i;
252 
253 	sc = vsc;
254 	end = (vmeaddr + len) - 1;
255 	paddr = 0;
256 	vr = sc->sc_masters;
257 	cap = MVMEBUS_AM2CAP(am);
258 	as = am & VME_AM_ADRSIZEMASK;
259 
260 	for (i = 0; i < sc->sc_nmasters && paddr == 0; i++, vr++) {
261 		if (vr->vr_am == MVMEBUS_AM_DISABLED)
262 			continue;
263 
264 		if (cap == (vr->vr_am & cap) &&
265 		    as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
266 		    datasize <= vr->vr_datasize &&
267 		    vmeaddr >= vr->vr_vmestart && end < vr->vr_vmeend)
268 			paddr = vr->vr_locstart + (vmeaddr & vr->vr_mask);
269 	}
270 	if (paddr == 0)
271 		return (ENOMEM);
272 
273 	rv = bus_space_map(sc->sc_bust, paddr, len, 0, handle);
274 	if (rv != 0)
275 		return (rv);
276 
277 	/* Allocate space for the resource tag */
278 	if ((mr = malloc(sizeof(*mr), M_DEVBUF, M_NOWAIT)) == NULL) {
279 		bus_space_unmap(sc->sc_bust, *handle, len);
280 		return (ENOMEM);
281 	}
282 
283 	/* Record the range's details */
284 	mr->mr_am = am;
285 	mr->mr_datasize = datasize;
286 	mr->mr_addr = vmeaddr;
287 	mr->mr_size = len;
288 	mr->mr_handle = *handle;
289 	mr->mr_range = i;
290 
291 	*tag = sc->sc_bust;
292 	*resc = (vme_mapresc_t *) mr;
293 
294 	return (0);
295 }
296 
297 /* ARGSUSED */
298 void
299 mvmebus_unmap(void *vsc, vme_mapresc_t resc)
300 {
301 	struct mvmebus_softc *sc = vsc;
302 	struct mvmebus_mapresc *mr = (struct mvmebus_mapresc *) resc;
303 
304 	bus_space_unmap(sc->sc_bust, mr->mr_handle, mr->mr_size);
305 
306 	free(mr, M_DEVBUF);
307 }
308 
309 int
310 mvmebus_probe(void *vsc, vme_addr_t vmeaddr, vme_size_t len, vme_am_t am, vme_datasize_t datasize, int (*callback)(void *, bus_space_tag_t, bus_space_handle_t), void *arg)
311 {
312 	bus_space_tag_t tag;
313 	bus_space_handle_t handle;
314 	vme_mapresc_t resc;
315 	vme_size_t offs;
316 	int rv;
317 
318 	/* Get a temporary mapping to the VMEbus range */
319 	rv = mvmebus_map(vsc, vmeaddr, len, am, datasize, 0,
320 	    &tag, &handle, &resc);
321 	if (rv)
322 		return (rv);
323 
324 	if (callback)
325 		rv = (*callback) (arg, tag, handle);
326 	else
327 		for (offs = 0; offs < len && rv == 0;) {
328 			switch (datasize) {
329 			case VME_D8:
330 				rv = bus_space_peek_1(tag, handle, offs, NULL);
331 				offs += 1;
332 				break;
333 
334 			case VME_D16:
335 				rv = bus_space_peek_2(tag, handle, offs, NULL);
336 				offs += 2;
337 				break;
338 
339 			case VME_D32:
340 				rv = bus_space_peek_4(tag, handle, offs, NULL);
341 				offs += 4;
342 				break;
343 			}
344 		}
345 
346 	mvmebus_unmap(vsc, resc);
347 
348 	return (rv);
349 }
350 
351 /* ARGSUSED */
352 int
353 mvmebus_intmap(void *vsc, int level, int vector, vme_intr_handle_t *handlep)
354 {
355 
356 	if (level < 1 || level > 7 || vector < 0x80 || vector > 0xff)
357 		return (EINVAL);
358 
359 	/* This is rather gross */
360 	*handlep = (void *) (int) ((level << 8) | vector);
361 	return (0);
362 }
363 
364 /* ARGSUSED */
365 const struct evcnt *
366 mvmebus_intr_evcnt(void *vsc, vme_intr_handle_t handle)
367 {
368 	struct mvmebus_softc *sc = vsc;
369 
370 	return (&sc->sc_evcnt[(((int) handle) >> 8) - 1]);
371 }
372 
373 void *
374 mvmebus_intr_establish(void *vsc, vme_intr_handle_t handle, int prior, int (*func)(void *), void *arg)
375 {
376 	struct mvmebus_softc *sc;
377 	int level, vector, first;
378 
379 	sc = vsc;
380 
381 	/* Extract the interrupt's level and vector */
382 	level = ((int) handle) >> 8;
383 	vector = ((int) handle) & 0xff;
384 
385 #ifdef DIAGNOSTIC
386 	if (vector < 0 || vector > 0xff) {
387 		printf("%s: Illegal vector offset: 0x%x\n",
388 		    device_xname(&sc->sc_dev), vector);
389 		panic("mvmebus_intr_establish");
390 	}
391 	if (level < 1 || level > 7) {
392 		printf("%s: Illegal interrupt level: %d\n",
393 		    device_xname(&sc->sc_dev), level);
394 		panic("mvmebus_intr_establish");
395 	}
396 #endif
397 
398 	first = (sc->sc_irqref[level]++ == 0);
399 
400 	(*sc->sc_intr_establish)(sc->sc_chip, prior, level, vector, first,
401 	    func, arg, &sc->sc_evcnt[level - 1]);
402 
403 	return ((void *) handle);
404 }
405 
406 void
407 mvmebus_intr_disestablish(void *vsc, vme_intr_handle_t handle)
408 {
409 	struct mvmebus_softc *sc;
410 	int level, vector, last;
411 
412 	sc = vsc;
413 
414 	/* Extract the interrupt's level and vector */
415 	level = ((int) handle) >> 8;
416 	vector = ((int) handle) & 0xff;
417 
418 #ifdef DIAGNOSTIC
419 	if (vector < 0 || vector > 0xff) {
420 		printf("%s: Illegal vector offset: 0x%x\n",
421 		    device_xname(&sc->sc_dev), vector);
422 		panic("mvmebus_intr_disestablish");
423 	}
424 	if (level < 1 || level > 7) {
425 		printf("%s: Illegal interrupt level: %d\n",
426 		    device_xname(&sc->sc_dev), level);
427 		panic("mvmebus_intr_disestablish");
428 	}
429 	if (sc->sc_irqref[level] == 0) {
430 		printf("%s: VMEirq#%d: Reference count already zero!\n",
431 		    device_xname(&sc->sc_dev), level);
432 		panic("mvmebus_intr_disestablish");
433 	}
434 #endif
435 
436 	last = (--(sc->sc_irqref[level]) == 0);
437 
438 	(*sc->sc_intr_disestablish)(sc->sc_chip, level, vector, last,
439 	    &sc->sc_evcnt[level - 1]);
440 }
441 
442 #ifdef DIAGNOSTIC
443 /* ARGSUSED */
444 int
445 mvmebus_dummy_dmamap_create(bus_dma_tag_t t, bus_size_t size, int nsegs, bus_size_t maxsegsz, bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
446 {
447 
448 	panic("Must use vme_dmamap_create() in place of bus_dmamap_create()");
449 	return (0);	/* Shutup the compiler */
450 }
451 
452 /* ARGSUSED */
453 void
454 mvmebus_dummy_dmamap_destroy(bus_dma_tag_t t, bus_dmamap_t map)
455 {
456 
457 	panic("Must use vme_dmamap_destroy() in place of bus_dmamap_destroy()");
458 }
459 #endif
460 
461 /* ARGSUSED */
462 int
463 mvmebus_dmamap_create(vsc, len, am, datasize, swap, nsegs,
464     segsz, bound, flags, mapp)
465 	void *vsc;
466 	vme_size_t len;
467 	vme_am_t am;
468 	vme_datasize_t datasize;
469 	vme_swap_t swap;
470 	int nsegs;
471 	vme_size_t segsz;
472 	vme_addr_t bound;
473 	int flags;
474 	bus_dmamap_t *mapp;
475 {
476 	struct mvmebus_softc *sc = vsc;
477 	struct mvmebus_dmamap *vmap;
478 	struct mvmebus_range *vr;
479 	vme_am_t cap, as;
480 	int i, rv;
481 
482 	cap = MVMEBUS_AM2CAP(am);
483 	as = am & VME_AM_ADRSIZEMASK;
484 
485 	/*
486 	 * Verify that we even stand a chance of satisfying
487 	 * the VMEbus address space and datasize requested.
488 	 */
489 	for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
490 		if (vr->vr_am == MVMEBUS_AM_DISABLED)
491 			continue;
492 
493 		if (as == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
494 		    cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
495 		    len <= (vr->vr_vmeend - vr->vr_vmestart))
496 			break;
497 	}
498 
499 	if (i == sc->sc_nslaves)
500 		return (EINVAL);
501 
502 	if ((vmap = malloc(sizeof(*vmap), M_DMAMAP,
503 	    (flags & BUS_DMA_NOWAIT) ? M_NOWAIT : M_WAITOK)) == NULL)
504 		return (ENOMEM);
505 
506 
507 	rv = bus_dmamap_create(sc->sc_dmat, len, nsegs, segsz,
508 	    bound, flags, mapp);
509 	if (rv != 0) {
510 		free(vmap, M_DMAMAP);
511 		return (rv);
512 	}
513 
514 	vmap->vm_am = am;
515 	vmap->vm_datasize = datasize;
516 	vmap->vm_swap = swap;
517 	vmap->vm_slave = vr;
518 
519 	(*mapp)->_dm_cookie = vmap;
520 
521 	return (0);
522 }
523 
524 void
525 mvmebus_dmamap_destroy(void *vsc, bus_dmamap_t map)
526 {
527 	struct mvmebus_softc *sc = vsc;
528 
529 	free(map->_dm_cookie, M_DMAMAP);
530 	bus_dmamap_destroy(sc->sc_dmat, map);
531 }
532 
533 static int
534 mvmebus_dmamap_load_common(struct mvmebus_softc *sc, bus_dmamap_t map)
535 {
536 	struct mvmebus_dmamap *vmap = map->_dm_cookie;
537 	struct mvmebus_range *vr = vmap->vm_slave;
538 	bus_dma_segment_t *ds;
539 	vme_am_t cap, am;
540 	int i;
541 
542 	cap = MVMEBUS_AM2CAP(vmap->vm_am);
543 	am = vmap->vm_am & VME_AM_ADRSIZEMASK;
544 
545 	/*
546 	 * Traverse the list of segments which make up this map, and
547 	 * convert the CPU-relative addresses therein to VMEbus addresses.
548 	 */
549 	for (ds = &map->dm_segs[0]; ds < &map->dm_segs[map->dm_nsegs]; ds++) {
550 		/*
551 		 * First, see if this map's slave image can access the
552 		 * segment, otherwise we have to waste time scanning all
553 		 * the slave images.
554 		 */
555 		vr = vmap->vm_slave;
556 		if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
557 		    cap == (vr->vr_am & cap) &&
558 		    vmap->vm_datasize <= vr->vr_datasize &&
559 		    ds->_ds_cpuaddr >= vr->vr_locstart &&
560 		    ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
561 			goto found;
562 
563 		for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
564 			if (vr->vr_am == MVMEBUS_AM_DISABLED)
565 				continue;
566 
567 			/*
568 			 * Filter out any slave images which don't have the
569 			 * same VMEbus address modifier and datasize as
570 			 * this DMA map, and those which don't cover the
571 			 * physical address region containing the segment.
572 			 */
573 			if (vr != vmap->vm_slave &&
574 			    am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
575 			    cap == (vr->vr_am & cap) &&
576 			    vmap->vm_datasize <= vr->vr_datasize &&
577 			    ds->_ds_cpuaddr >= vr->vr_locstart &&
578 			    ds->ds_len <= (vr->vr_vmeend - vr->vr_vmestart))
579 				break;
580 		}
581 
582 		/*
583 		 * Did we find an applicable slave image which covers this
584 		 * segment?
585 		 */
586 		if (i == sc->sc_nslaves) {
587 			/*
588 			 * XXX TODO:
589 			 *
590 			 * Bounce this segment via a bounce buffer allocated
591 			 * from this DMA map.
592 			 */
593 			printf("mvmebus_dmamap_load_common: bounce needed!\n");
594 			return (EINVAL);
595 		}
596 
597 found:
598 		/*
599 		 * Generate the VMEbus address of this segment
600 		 */
601 		ds->ds_addr = (ds->_ds_cpuaddr - vr->vr_locstart) +
602 		    vr->vr_vmestart;
603 	}
604 
605 	return (0);
606 }
607 
608 int
609 mvmebus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf, bus_size_t buflen, struct proc *p, int flags)
610 {
611 	struct mvmebus_softc *sc = t->_cookie;
612 	int rv;
613 
614 	rv = bus_dmamap_load(sc->sc_dmat, map, buf, buflen, p, flags);
615 	if (rv != 0)
616 		return rv;
617 
618 	return mvmebus_dmamap_load_common(sc, map);
619 }
620 
621 int
622 mvmebus_dmamap_load_mbuf(bus_dma_tag_t t, bus_dmamap_t map, struct mbuf *chain, int flags)
623 {
624 	struct mvmebus_softc *sc = t->_cookie;
625 	int rv;
626 
627 	rv = bus_dmamap_load_mbuf(sc->sc_dmat, map, chain, flags);
628 	if (rv != 0)
629 		return rv;
630 
631 	return mvmebus_dmamap_load_common(sc, map);
632 }
633 
634 int
635 mvmebus_dmamap_load_uio(bus_dma_tag_t t, bus_dmamap_t map, struct uio *uio, int flags)
636 {
637 	struct mvmebus_softc *sc = t->_cookie;
638 	int rv;
639 
640 	rv = bus_dmamap_load_uio(sc->sc_dmat, map, uio, flags);
641 	if (rv != 0)
642 		return rv;
643 
644 	return mvmebus_dmamap_load_common(sc, map);
645 }
646 
647 int
648 mvmebus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map, bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
649 {
650 	struct mvmebus_softc *sc = t->_cookie;
651 	int rv;
652 
653 	/*
654 	 * mvmebus_dmamem_alloc() will ensure that the physical memory
655 	 * backing these segments is 100% accessible in at least one
656 	 * of the board's VMEbus slave images.
657 	 */
658 	rv = bus_dmamap_load_raw(sc->sc_dmat, map, segs, nsegs, size, flags);
659 	if (rv != 0)
660 		return rv;
661 
662 	return mvmebus_dmamap_load_common(sc, map);
663 }
664 
665 void
666 mvmebus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
667 {
668 	struct mvmebus_softc *sc = t->_cookie;
669 
670 	/* XXX Deal with bounce buffers */
671 
672 	bus_dmamap_unload(sc->sc_dmat, map);
673 }
674 
675 void
676 mvmebus_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map, bus_addr_t offset, bus_size_t len, int ops)
677 {
678 	struct mvmebus_softc *sc = t->_cookie;
679 
680 	/* XXX Bounce buffers */
681 
682 	bus_dmamap_sync(sc->sc_dmat, map, offset, len, ops);
683 }
684 
685 #ifdef DIAGNOSTIC
686 /* ARGSUSED */
687 int
688 mvmebus_dummy_dmamem_alloc(bus_dma_tag_t t, bus_size_t size, bus_size_t align, bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
689 {
690 
691 	panic("Must use vme_dmamem_alloc() in place of bus_dmamem_alloc()");
692 }
693 
694 /* ARGSUSED */
695 void
696 mvmebus_dummy_dmamem_free(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs)
697 {
698 
699 	panic("Must use vme_dmamem_free() in place of bus_dmamem_free()");
700 }
701 #endif
702 
703 /* ARGSUSED */
704 int
705 mvmebus_dmamem_alloc(void *vsc, vme_size_t len, vme_am_t am, vme_datasize_t datasize, vme_swap_t swap, bus_dma_segment_t *segs, int nsegs, int *rsegs, int flags)
706 {
707 	extern paddr_t avail_start;
708 	struct mvmebus_softc *sc = vsc;
709 	struct mvmebus_range *vr;
710 	bus_addr_t low, high;
711 	bus_size_t bound;
712 	vme_am_t cap;
713 	int i;
714 
715 	cap = MVMEBUS_AM2CAP(am);
716 	am &= VME_AM_ADRSIZEMASK;
717 
718 	/*
719 	 * Find a slave mapping in the requested VMEbus address space.
720 	 */
721 	for (i = 0, vr = sc->sc_slaves; i < sc->sc_nslaves; i++, vr++) {
722 		if (vr->vr_am == MVMEBUS_AM_DISABLED)
723 			continue;
724 
725 		if (i == 0 && (flags & BUS_DMA_ONBOARD_RAM) != 0)
726 			continue;
727 
728 		if (am == (vr->vr_am & VME_AM_ADRSIZEMASK) &&
729 		    cap == (vr->vr_am & cap) && datasize <= vr->vr_datasize &&
730 		    len <= (vr->vr_vmeend - vr->vr_vmestart))
731 			break;
732 	}
733 	if (i == sc->sc_nslaves)
734 		return (EINVAL);
735 
736 	/*
737 	 * Set up the constraints so we can allocate physical memory which
738 	 * is visible in the requested address space
739 	 */
740 	low = max(vr->vr_locstart, avail_start);
741 	high = vr->vr_locstart + (vr->vr_vmeend - vr->vr_vmestart) + 1;
742 	bound = (bus_size_t) vr->vr_mask + 1;
743 
744 	/*
745 	 * Allocate physical memory.
746 	 *
747 	 * Note: This fills in the segments with CPU-relative physical
748 	 * addresses. A further call to bus_dmamap_load_raw() (with a
749 	 * DMA map which specifies the same VMEbus address space and
750 	 * constraints as the call to here) must be made. The segments
751 	 * of the DMA map will then contain VMEbus-relative physical
752 	 * addresses of the memory allocated here.
753 	 */
754 	return _bus_dmamem_alloc_common(sc->sc_dmat, low, high,
755 	    len, 0, bound, segs, nsegs, rsegs, flags);
756 }
757 
758 void
759 mvmebus_dmamem_free(void *vsc, bus_dma_segment_t *segs, int nsegs)
760 {
761 	struct mvmebus_softc *sc = vsc;
762 
763 	bus_dmamem_free(sc->sc_dmat, segs, nsegs);
764 }
765 
766 int
767 mvmebus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, size_t size, void **kvap, int flags)
768 {
769 	struct mvmebus_softc *sc = t->_cookie;
770 
771 	return bus_dmamem_map(sc->sc_dmat, segs, nsegs, size, kvap, flags);
772 }
773 
774 void
775 mvmebus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
776 {
777 	struct mvmebus_softc *sc = t->_cookie;
778 
779 	bus_dmamem_unmap(sc->sc_dmat, kva, size);
780 }
781 
782 paddr_t
783 mvmebus_dmamem_mmap(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs, off_t offset, int prot, int flags)
784 {
785 	struct mvmebus_softc *sc = t->_cookie;
786 
787 	return bus_dmamem_mmap(sc->sc_dmat, segs, nsegs, offset, prot, flags);
788 }
789 
790 #ifdef DEBUG
791 static const char *
792 mvmebus_mod_string(vme_addr_t addr, vme_size_t len, vme_am_t am, vme_datasize_t ds)
793 {
794 	static const char *mode[] = {"BLT64)", "DATA)", "PROG)", "BLT32)"};
795 	static const char *dsiz[] = {"(", "(D8,", "(D16,", "(D16-D8,",
796 	"(D32,", "(D32,D8,", "(D32-D16,", "(D32-D8,"};
797 	static const char *adrfmt[] = { "A32:%08x-%08x ", "USR:%08x-%08x ",
798 	    "A16:%04x-%04x ", "A24:%06x-%06x " };
799 	static char mstring[40];
800 
801 	snprintf(mstring, sizeof(mstring),
802 	    adrfmt[(am & VME_AM_ADRSIZEMASK) >> VME_AM_ADRSIZESHIFT],
803 	    addr, addr + len - 1);
804 	strlcat(mstring, dsiz[ds & 0x7], sizeof(mstring));
805 
806 	if (MVMEBUS_AM_HAS_CAP(am)) {
807 		if (am & MVMEBUS_AM_CAP_DATA)
808 			strlcat(mstring, "D", sizeof(mstring));
809 		if (am & MVMEBUS_AM_CAP_PROG)
810 			strlcat(mstring, "P", sizeof(mstring));
811 		if (am & MVMEBUS_AM_CAP_USER)
812 			strlcat(mstring, "U", sizeof(mstring));
813 		if (am & MVMEBUS_AM_CAP_SUPER)
814 			strlcat(mstring, "S", sizeof(mstring));
815 		if (am & MVMEBUS_AM_CAP_BLK)
816 			strlcat(mstring, "B", sizeof(mstring));
817 		if (am & MVMEBUS_AM_CAP_BLKD64)
818 			strlcat(mstring, "6", sizeof(mstring));
819 		strlcat(mstring, ")", sizeof(mstring));
820 	} else {
821 		strlcat(mstring, ((am & VME_AM_PRIVMASK) == VME_AM_USER) ?
822 		    "USER," : "SUPER,", sizeof(mstring));
823 		strlcat(mstring, mode[am & VME_AM_MODEMASK], sizeof(mstring));
824 	}
825 
826 	return (mstring);
827 }
828 #endif
829