1 /* $NetBSD: vme_machdep.c,v 1.76 2022/01/21 19:22:56 thorpej Exp $ */
2
3 /*-
4 * Copyright (c) 1997, 1998 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Paul Kranenburg.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: vme_machdep.c,v 1.76 2022/01/21 19:22:56 thorpej Exp $");
34
35 #include <sys/param.h>
36 #include <sys/extent.h>
37 #include <sys/systm.h>
38 #include <sys/device.h>
39 #include <sys/kmem.h>
40 #include <sys/errno.h>
41
42 #include <sys/proc.h>
43 #include <sys/syslog.h>
44
45 #include <uvm/uvm_extern.h>
46
47 #define _SPARC_BUS_DMA_PRIVATE
48 #include <sys/bus.h>
49 #include <sparc/sparc/iommuvar.h>
50 #include <machine/autoconf.h>
51 #include <machine/oldmon.h>
52 #include <machine/cpu.h>
53 #include <machine/ctlreg.h>
54 #include <machine/pcb.h>
55
56 #include <dev/vme/vmereg.h>
57 #include <dev/vme/vmevar.h>
58
59 #include <sparc/sparc/asm.h>
60 #include <sparc/sparc/vaddrs.h>
61 #include <sparc/sparc/cpuvar.h>
62 #include <sparc/dev/vmereg.h>
63
64 struct sparcvme_softc {
65 bus_space_tag_t sc_bustag;
66 bus_dma_tag_t sc_dmatag;
67 struct vmebusreg *sc_reg; /* VME control registers */
68 struct vmebusvec *sc_vec; /* VME interrupt vector */
69 struct rom_range *sc_range; /* ROM range property */
70 int sc_nrange;
71 volatile uint32_t *sc_ioctags; /* VME IO-cache tag registers */
72 volatile uint32_t *sc_iocflush;/* VME IO-cache flush registers */
73 int (*sc_vmeintr)(void *);
74 };
75 struct sparcvme_softc *sparcvme_sc;/*XXX*/
76
77 /* autoconfiguration driver */
78 static int vmematch_iommu(device_t, cfdata_t, void *);
79 static void vmeattach_iommu(device_t, device_t, void *);
80 static int vmematch_mainbus(device_t, cfdata_t, void *);
81 static void vmeattach_mainbus(device_t, device_t, void *);
82 #if defined(SUN4)
83 int vmeintr4(void *);
84 #endif
85 #if defined(SUN4M)
86 int vmeintr4m(void *);
87 static int sparc_vme_error(void);
88 #endif
89
90
91 static int sparc_vme_probe(void *, vme_addr_t, vme_size_t,
92 vme_am_t, vme_datasize_t,
93 int (*)(void *,
94 bus_space_tag_t, bus_space_handle_t),
95 void *);
96 static int sparc_vme_map(void *, vme_addr_t, vme_size_t, vme_am_t,
97 vme_datasize_t, vme_swap_t,
98 bus_space_tag_t *, bus_space_handle_t *,
99 vme_mapresc_t *);
100 static void sparc_vme_unmap(void *, vme_mapresc_t);
101 static int sparc_vme_intr_map(void *, int, int, vme_intr_handle_t *);
102 static const struct evcnt *sparc_vme_intr_evcnt(void *, vme_intr_handle_t);
103 static void * sparc_vme_intr_establish(void *, vme_intr_handle_t, int,
104 int (*)(void *), void *);
105 static void sparc_vme_intr_disestablish(void *, void *);
106
107 static int vmebus_translate(struct sparcvme_softc *, vme_am_t,
108 vme_addr_t, bus_addr_t *);
109 #ifdef notyet
110 #if defined(SUN4M)
111 static void sparc_vme_iommu_barrier(bus_space_tag_t, bus_space_handle_t,
112 bus_size_t, bus_size_t, int);
113
114 #endif /* SUN4M */
115 #endif
116
117 /*
118 * DMA functions.
119 */
120 #if defined(SUN4) || defined(SUN4M)
121 static void sparc_vct_dmamap_destroy(void *, bus_dmamap_t);
122 #endif
123
124 #if defined(SUN4)
125 static int sparc_vct4_dmamap_create(void *, vme_size_t, vme_am_t,
126 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t,
127 int, bus_dmamap_t *);
128 static int sparc_vme4_dmamap_load(bus_dma_tag_t, bus_dmamap_t, void *,
129 bus_size_t, struct proc *, int);
130 static void sparc_vme4_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
131 static void sparc_vme4_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
132 bus_addr_t, bus_size_t, int);
133 #endif /* SUN4 */
134
135 #if defined(SUN4M)
136 static int sparc_vct_iommu_dmamap_create(void *, vme_size_t, vme_am_t,
137 vme_datasize_t, vme_swap_t, int, vme_size_t, vme_addr_t,
138 int, bus_dmamap_t *);
139 static int sparc_vme_iommu_dmamap_create(bus_dma_tag_t, bus_size_t,
140 int, bus_size_t, bus_size_t, int, bus_dmamap_t *);
141
142 static int sparc_vme_iommu_dmamap_load(bus_dma_tag_t, bus_dmamap_t,
143 void *, bus_size_t, struct proc *, int);
144 static void sparc_vme_iommu_dmamap_unload(bus_dma_tag_t, bus_dmamap_t);
145 static void sparc_vme_iommu_dmamap_sync(bus_dma_tag_t, bus_dmamap_t,
146 bus_addr_t, bus_size_t, int);
147 #endif /* SUN4M */
148
149 #if defined(SUN4) || defined(SUN4M)
150 static int sparc_vme_dmamem_map(bus_dma_tag_t, bus_dma_segment_t *,
151 int, size_t, void **, int);
152 #endif
153
154 #if 0
155 static void sparc_vme_dmamap_destroy(bus_dma_tag_t, bus_dmamap_t);
156 static void sparc_vme_dmamem_unmap(bus_dma_tag_t, void *, size_t);
157 static paddr_t sparc_vme_dmamem_mmap(bus_dma_tag_t,
158 bus_dma_segment_t *, int, off_t, int, int);
159 #endif
160
161 int sparc_vme_mmap_cookie(vme_addr_t, vme_am_t, bus_space_handle_t *);
162
163 CFATTACH_DECL_NEW(vme_mainbus, sizeof(struct sparcvme_softc),
164 vmematch_mainbus, vmeattach_mainbus, NULL, NULL);
165
166 CFATTACH_DECL_NEW(vme_iommu, sizeof(struct sparcvme_softc),
167 vmematch_iommu, vmeattach_iommu, NULL, NULL);
168
169 static int vme_attached;
170
171 extern int (*vmeerr_handler)(void);
172
173 #define VMEMOD_D32 0x40 /* ??? */
174
175 /* If the PROM does not provide the `ranges' property, we make up our own */
176 struct rom_range vmebus_translations[] = {
177 #define _DS (VME_AM_MBO | VME_AM_SUPER | VME_AM_DATA)
178 { VME_AM_A16|_DS, 0, PMAP_VME16, 0xffff0000, 0 },
179 { VME_AM_A24|_DS, 0, PMAP_VME16, 0xff000000, 0 },
180 { VME_AM_A32|_DS, 0, PMAP_VME16, 0x00000000, 0 },
181 { VME_AM_A16|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xffff0000, 0 },
182 { VME_AM_A24|VMEMOD_D32|_DS, 0, PMAP_VME32, 0xff000000, 0 },
183 { VME_AM_A32|VMEMOD_D32|_DS, 0, PMAP_VME32, 0x00000000, 0 }
184 #undef _DS
185 };
186
187 /*
188 * The VME bus logic on sun4 machines maps DMA requests in the first MB
189 * of VME space to the last MB of DVMA space. `vme_dvmamap' is used
190 * for DVMA space allocations. The DMA addresses returned by
191 * bus_dmamap_load*() must be relocated by -VME4_DVMA_BASE.
192 */
193 struct extent *vme_dvmamap;
194
195 /*
196 * The VME hardware on the sun4m IOMMU maps the first 8MB of 32-bit
197 * VME space to the last 8MB of DVMA space and the first 1MB of
198 * 24-bit VME space to the first 1MB of the last 8MB of DVMA space
199 * (thus 24-bit VME space overlaps the first 1MB of of 32-bit space).
200 * The following constants define subregions in the IOMMU DVMA map
201 * for VME DVMA allocations. The DMA addresses returned by
202 * bus_dmamap_load*() must be relocated by -VME_IOMMU_DVMA_BASE.
203 */
204 #define VME_IOMMU_DVMA_BASE 0xff800000
205 #define VME_IOMMU_DVMA_AM24_BASE VME_IOMMU_DVMA_BASE
206 #define VME_IOMMU_DVMA_AM24_END 0xff900000
207 #define VME_IOMMU_DVMA_AM32_BASE VME_IOMMU_DVMA_BASE
208 #define VME_IOMMU_DVMA_AM32_END IOMMU_DVMA_END
209
210 struct vme_chipset_tag sparc_vme_chipset_tag = {
211 NULL,
212 sparc_vme_map,
213 sparc_vme_unmap,
214 sparc_vme_probe,
215 sparc_vme_intr_map,
216 sparc_vme_intr_evcnt,
217 sparc_vme_intr_establish,
218 sparc_vme_intr_disestablish,
219 0, 0, 0 /* bus specific DMA stuff */
220 };
221
222
223 #if defined(SUN4)
224 struct sparc_bus_dma_tag sparc_vme4_dma_tag = {
225 NULL, /* cookie */
226 _bus_dmamap_create,
227 _bus_dmamap_destroy,
228 sparc_vme4_dmamap_load,
229 _bus_dmamap_load_mbuf,
230 _bus_dmamap_load_uio,
231 _bus_dmamap_load_raw,
232 sparc_vme4_dmamap_unload,
233 sparc_vme4_dmamap_sync,
234
235 _bus_dmamem_alloc,
236 _bus_dmamem_free,
237 sparc_vme_dmamem_map,
238 _bus_dmamem_unmap,
239 _bus_dmamem_mmap
240 };
241 #endif
242
243 #if defined(SUN4M)
244 struct sparc_bus_dma_tag sparc_vme_iommu_dma_tag = {
245 NULL, /* cookie */
246 sparc_vme_iommu_dmamap_create,
247 _bus_dmamap_destroy,
248 sparc_vme_iommu_dmamap_load,
249 _bus_dmamap_load_mbuf,
250 _bus_dmamap_load_uio,
251 _bus_dmamap_load_raw,
252 sparc_vme_iommu_dmamap_unload,
253 sparc_vme_iommu_dmamap_sync,
254
255 _bus_dmamem_alloc,
256 _bus_dmamem_free,
257 sparc_vme_dmamem_map,
258 _bus_dmamem_unmap,
259 _bus_dmamem_mmap
260 };
261 #endif
262
263
264 static int
vmematch_mainbus(device_t parent,cfdata_t cf,void * aux)265 vmematch_mainbus(device_t parent, cfdata_t cf, void *aux)
266 {
267 struct mainbus_attach_args *ma = aux;
268
269 if (!CPU_ISSUN4 || vme_attached)
270 return (0);
271
272 return (strcmp("vme", ma->ma_name) == 0);
273 }
274
275 static int
vmematch_iommu(device_t parent,cfdata_t cf,void * aux)276 vmematch_iommu(device_t parent, cfdata_t cf, void *aux)
277 {
278 struct iommu_attach_args *ia = aux;
279
280 if (vme_attached)
281 return 0;
282
283 return (strcmp("vme", ia->iom_name) == 0);
284 }
285
286
287 static void
vmeattach_mainbus(device_t parent,device_t self,void * aux)288 vmeattach_mainbus(device_t parent, device_t self, void *aux)
289 {
290 #if defined(SUN4)
291 struct mainbus_attach_args *ma = aux;
292 struct sparcvme_softc *sc = device_private(self);
293 struct vmebus_attach_args vba;
294
295 vme_attached = 1;
296
297 sc->sc_bustag = ma->ma_bustag;
298 sc->sc_dmatag = ma->ma_dmatag;
299
300 /* VME interrupt entry point */
301 sc->sc_vmeintr = vmeintr4;
302
303 /*XXX*/ sparc_vme_chipset_tag.cookie = sc;
304 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct4_dmamap_create;
305 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy;
306 /*XXX*/ sparc_vme4_dma_tag._cookie = sc;
307
308 vba.va_vct = &sparc_vme_chipset_tag;
309 vba.va_bdt = &sparc_vme4_dma_tag;
310 vba.va_slaveconfig = 0;
311
312 /* Fall back to our own `range' construction */
313 sc->sc_range = vmebus_translations;
314 sc->sc_nrange =
315 sizeof(vmebus_translations)/sizeof(vmebus_translations[0]);
316
317 vme_dvmamap = extent_create("vmedvma", VME4_DVMA_BASE, VME4_DVMA_END,
318 0, 0, EX_WAITOK);
319
320 printf("\n");
321 (void)config_found(self, &vba, 0, CFARGS_NONE);
322
323 #endif /* SUN4 */
324 return;
325 }
326
327 /* sun4m vmebus */
328 static void
vmeattach_iommu(device_t parent,device_t self,void * aux)329 vmeattach_iommu(device_t parent, device_t self, void *aux)
330 {
331 #if defined(SUN4M)
332 struct sparcvme_softc *sc = device_private(self);
333 struct iommu_attach_args *ia = aux;
334 struct vmebus_attach_args vba;
335 bus_space_handle_t bh;
336 int node;
337 int cline;
338
339 sc->sc_bustag = ia->iom_bustag;
340 sc->sc_dmatag = ia->iom_dmatag;
341
342 /* VME interrupt entry point */
343 sc->sc_vmeintr = vmeintr4m;
344
345 /*XXX*/ sparc_vme_chipset_tag.cookie = sc;
346 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_create = sparc_vct_iommu_dmamap_create;
347 /*XXX*/ sparc_vme_chipset_tag.vct_dmamap_destroy = sparc_vct_dmamap_destroy;
348 /*XXX*/ sparc_vme_iommu_dma_tag._cookie = sc;
349
350 vba.va_vct = &sparc_vme_chipset_tag;
351 vba.va_bdt = &sparc_vme_iommu_dma_tag;
352 vba.va_slaveconfig = 0;
353
354 node = ia->iom_node;
355
356 /*
357 * Map VME control space
358 */
359 if (ia->iom_nreg < 2) {
360 printf("%s: only %d register sets\n", device_xname(self),
361 ia->iom_nreg);
362 return;
363 }
364
365 if (bus_space_map(ia->iom_bustag,
366 (bus_addr_t) BUS_ADDR(ia->iom_reg[0].oa_space,
367 ia->iom_reg[0].oa_base),
368 (bus_size_t)ia->iom_reg[0].oa_size,
369 BUS_SPACE_MAP_LINEAR,
370 &bh) != 0) {
371 panic("%s: can't map vmebusreg", device_xname(self));
372 }
373 sc->sc_reg = (struct vmebusreg *)bh;
374
375 if (bus_space_map(ia->iom_bustag,
376 (bus_addr_t) BUS_ADDR(ia->iom_reg[1].oa_space,
377 ia->iom_reg[1].oa_base),
378 (bus_size_t)ia->iom_reg[1].oa_size,
379 BUS_SPACE_MAP_LINEAR,
380 &bh) != 0) {
381 panic("%s: can't map vmebusvec", device_xname(self));
382 }
383 sc->sc_vec = (struct vmebusvec *)bh;
384
385 /*
386 * Map VME IO cache tags and flush control.
387 */
388 if (bus_space_map(ia->iom_bustag,
389 (bus_addr_t) BUS_ADDR(
390 ia->iom_reg[1].oa_space,
391 ia->iom_reg[1].oa_base + VME_IOC_TAGOFFSET),
392 VME_IOC_SIZE,
393 BUS_SPACE_MAP_LINEAR,
394 &bh) != 0) {
395 panic("%s: can't map IOC tags", device_xname(self));
396 }
397 sc->sc_ioctags = (uint32_t *)bh;
398
399 if (bus_space_map(ia->iom_bustag,
400 (bus_addr_t) BUS_ADDR(
401 ia->iom_reg[1].oa_space,
402 ia->iom_reg[1].oa_base + VME_IOC_FLUSHOFFSET),
403 VME_IOC_SIZE,
404 BUS_SPACE_MAP_LINEAR,
405 &bh) != 0) {
406 panic("%s: can't map IOC flush registers", device_xname(self));
407 }
408 sc->sc_iocflush = (uint32_t *)bh;
409
410 /*
411 * Get "range" property.
412 */
413 if (prom_getprop(node, "ranges", sizeof(struct rom_range),
414 &sc->sc_nrange, &sc->sc_range) != 0) {
415 panic("%s: can't get ranges property", device_xname(self));
416 }
417
418 sparcvme_sc = sc;
419 vmeerr_handler = sparc_vme_error;
420
421 /*
422 * Invalidate all IO-cache entries.
423 */
424 for (cline = VME_IOC_SIZE/VME_IOC_LINESZ; cline > 0;) {
425 sc->sc_ioctags[--cline] = 0;
426 }
427
428 /* Enable IO-cache */
429 sc->sc_reg->vmebus_cr |= VMEBUS_CR_C;
430
431 printf(": version 0x%x\n",
432 sc->sc_reg->vmebus_cr & VMEBUS_CR_IMPL);
433
434 (void)config_found(self, &vba, 0,
435 CFARGS(.devhandle = device_handle(self)));
436 #endif /* SUN4M */
437 }
438
439 #if defined(SUN4M)
440 static int
sparc_vme_error(void)441 sparc_vme_error(void)
442 {
443 struct sparcvme_softc *sc = sparcvme_sc;
444 uint32_t afsr, afpa;
445 char bits[64];
446
447 afsr = sc->sc_reg->vmebus_afsr;
448 afpa = sc->sc_reg->vmebus_afar;
449 snprintb(bits, sizeof(bits), VMEBUS_AFSR_BITS, afsr);
450 printf("VME error:\n\tAFSR %s\n", bits);
451 printf("\taddress: 0x%x%x\n", afsr, afpa);
452 return (0);
453 }
454 #endif
455
456 static int
vmebus_translate(struct sparcvme_softc * sc,vme_am_t mod,vme_addr_t addr,bus_addr_t * bap)457 vmebus_translate(struct sparcvme_softc *sc, vme_am_t mod, vme_addr_t addr,
458 bus_addr_t *bap)
459 {
460 int i;
461
462 for (i = 0; i < sc->sc_nrange; i++) {
463 struct rom_range *rp = &sc->sc_range[i];
464
465 if (rp->cspace != mod)
466 continue;
467
468 /* We've found the connection to the parent bus */
469 *bap = BUS_ADDR(rp->pspace, rp->poffset + addr);
470 return (0);
471 }
472 return (ENOENT);
473 }
474
475 struct vmeprobe_myarg {
476 int (*cb)(void *, bus_space_tag_t, bus_space_handle_t);
477 void *cbarg;
478 bus_space_tag_t tag;
479 int res; /* backwards */
480 };
481
482 static int vmeprobe_mycb(void *, void *);
483
484 static int
vmeprobe_mycb(void * bh,void * arg)485 vmeprobe_mycb(void *bh, void *arg)
486 {
487 struct vmeprobe_myarg *a = arg;
488
489 a->res = (*a->cb)(a->cbarg, a->tag, (bus_space_handle_t)bh);
490 return (!a->res);
491 }
492
493 static int
sparc_vme_probe(void * cookie,vme_addr_t addr,vme_size_t len,vme_am_t mod,vme_datasize_t datasize,int (* callback)(void *,bus_space_tag_t,bus_space_handle_t),void * arg)494 sparc_vme_probe(void *cookie, vme_addr_t addr, vme_size_t len, vme_am_t mod,
495 vme_datasize_t datasize,
496 int (*callback)(void *, bus_space_tag_t, bus_space_handle_t),
497 void *arg)
498 {
499 struct sparcvme_softc *sc = cookie;
500 bus_addr_t paddr;
501 bus_size_t size;
502 struct vmeprobe_myarg myarg;
503 int res, i;
504
505 if (vmebus_translate(sc, mod, addr, &paddr) != 0)
506 return (EINVAL);
507
508 size = (datasize == VME_D8 ? 1 : (datasize == VME_D16 ? 2 : 4));
509
510 if (callback) {
511 myarg.cb = callback;
512 myarg.cbarg = arg;
513 myarg.tag = sc->sc_bustag;
514 myarg.res = 0;
515 res = bus_space_probe(sc->sc_bustag, paddr, size, 0,
516 0, vmeprobe_mycb, &myarg);
517 return (res ? 0 : (myarg.res ? myarg.res : EIO));
518 }
519
520 for (i = 0; i < len / size; i++) {
521 myarg.res = 0;
522 res = bus_space_probe(sc->sc_bustag, paddr, size, 0,
523 0, 0, 0);
524 if (res == 0)
525 return (EIO);
526 paddr += size;
527 }
528 return (0);
529 }
530
531 static int
sparc_vme_map(void * cookie,vme_addr_t addr,vme_size_t size,vme_am_t mod,vme_datasize_t datasize,vme_swap_t swap,bus_space_tag_t * tp,bus_space_handle_t * hp,vme_mapresc_t * rp)532 sparc_vme_map(void *cookie, vme_addr_t addr, vme_size_t size, vme_am_t mod,
533 vme_datasize_t datasize, vme_swap_t swap,
534 bus_space_tag_t *tp, bus_space_handle_t *hp, vme_mapresc_t *rp)
535 {
536 struct sparcvme_softc *sc = cookie;
537 bus_addr_t paddr;
538 int error;
539
540 error = vmebus_translate(sc, mod, addr, &paddr);
541 if (error != 0)
542 return (error);
543
544 *tp = sc->sc_bustag;
545 return (bus_space_map(sc->sc_bustag, paddr, size, 0, hp));
546 }
547
548 int
sparc_vme_mmap_cookie(vme_addr_t addr,vme_am_t mod,bus_space_handle_t * hp)549 sparc_vme_mmap_cookie(vme_addr_t addr, vme_am_t mod, bus_space_handle_t *hp)
550 {
551 struct sparcvme_softc *sc = sparcvme_sc;
552 bus_addr_t paddr;
553 int error;
554
555 error = vmebus_translate(sc, mod, addr, &paddr);
556 if (error != 0)
557 return (error);
558
559 return (bus_space_mmap(sc->sc_bustag, paddr, 0,
560 0/*prot is ignored*/, 0));
561 }
562
563 #ifdef notyet
564 #if defined(SUN4M)
565 static void
566 sparc_vme_iommu_barrier(bus_space_tag_t t, bus_space_handle_t h,
567 bus_size_t offset, bus_size_t size.
568 int flags)
569 {
570 struct vmebusreg *vbp = t->cookie;
571
572 /* Read async fault status to flush write-buffers */
573 (*(volatile int *)&vbp->vmebus_afsr);
574 }
575 #endif /* SUN4M */
576 #endif
577
578
579
580 /*
581 * VME Interrupt Priority Level to sparc Processor Interrupt Level.
582 */
583 static int vme_ipl_to_pil[] = {
584 0,
585 2,
586 3,
587 5,
588 7,
589 9,
590 11,
591 13
592 };
593
594
595 /*
596 * All VME device interrupts go through vmeintr(). This function reads
597 * the VME vector from the bus, then dispatches the device interrupt
598 * handler. All handlers for devices that map to the same Processor
599 * Interrupt Level (according to the table above) are on a linked list
600 * of `sparc_vme_intr_handle' structures. The head of which is passed
601 * down as the argument to `vmeintr(void *arg)'.
602 */
603 struct sparc_vme_intr_handle {
604 struct intrhand ih;
605 struct sparc_vme_intr_handle *next;
606 int vec; /* VME interrupt vector */
607 int pri; /* VME interrupt priority */
608 struct sparcvme_softc *sc;/*XXX*/
609 };
610
611 #if defined(SUN4)
612 int
vmeintr4(void * arg)613 vmeintr4(void *arg)
614 {
615 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
616 int level, vec;
617 int rv = 0;
618
619 level = (ihp->pri << 1) | 1;
620
621 vec = ldcontrolb((void *)(AC_VMEINTVEC | level));
622
623 if (vec == -1) {
624 #ifdef DEBUG
625 /*
626 * This seems to happen only with the i82586 based
627 * `ie1' boards.
628 */
629 printf("vme: spurious interrupt at VME level %d\n", ihp->pri);
630 #endif
631 return (1); /* XXX - pretend we handled it, for now */
632 }
633
634 for (; ihp; ihp = ihp->next)
635 if (ihp->vec == vec && ihp->ih.ih_fun) {
636 splx(ihp->ih.ih_classipl);
637 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg);
638 }
639
640 return (rv);
641 }
642 #endif
643
644 #if defined(SUN4M)
645 int
vmeintr4m(void * arg)646 vmeintr4m(void *arg)
647 {
648 struct sparc_vme_intr_handle *ihp = (vme_intr_handle_t)arg;
649 int level, vec;
650 int rv = 0;
651
652 level = (ihp->pri << 1) | 1;
653
654 #if 0
655 int pending;
656
657 /* Flush VME <=> Sbus write buffers */
658 (*(volatile int *)&ihp->sc->sc_reg->vmebus_afsr);
659
660 pending = *((int*)ICR_SI_PEND);
661 if ((pending & SINTR_VME(ihp->pri)) == 0) {
662 printf("vmeintr: non pending at pri %x(p 0x%x)\n",
663 ihp->pri, pending);
664 return (0);
665 }
666 #endif
667 #if 0
668 /* Why gives this a bus timeout sometimes? */
669 vec = ihp->sc->sc_vec->vmebusvec[level];
670 #else
671 /* so, arrange to catch the fault... */
672 {
673 extern int fkbyte(volatile char *, struct pcb *);
674 volatile char *addr = &ihp->sc->sc_vec->vmebusvec[level];
675 struct pcb *xpcb;
676 void *saveonfault;
677 int s;
678
679 s = splhigh();
680
681 xpcb = lwp_getpcb(curlwp);
682 saveonfault = xpcb->pcb_onfault;
683 vec = fkbyte(addr, xpcb);
684 xpcb->pcb_onfault = saveonfault;
685
686 splx(s);
687 }
688 #endif
689
690 if (vec == -1) {
691 #ifdef DEBUG
692 /*
693 * This seems to happen only with the i82586 based
694 * `ie1' boards.
695 */
696 printf("vme: spurious interrupt at VME level %d\n", ihp->pri);
697 printf(" ICR_SI_PEND=0x%x; VME AFSR=0x%x; VME AFAR=0x%x\n",
698 *((int*)ICR_SI_PEND),
699 ihp->sc->sc_reg->vmebus_afsr,
700 ihp->sc->sc_reg->vmebus_afar);
701 #endif
702 return (1); /* XXX - pretend we handled it, for now */
703 }
704
705 for (; ihp; ihp = ihp->next)
706 if (ihp->vec == vec && ihp->ih.ih_fun) {
707 splx(ihp->ih.ih_classipl);
708 rv |= (ihp->ih.ih_fun)(ihp->ih.ih_arg);
709 }
710
711 return (rv);
712 }
713 #endif /* SUN4M */
714
715 static int
sparc_vme_intr_map(void * cookie,int level,int vec,vme_intr_handle_t * ihp)716 sparc_vme_intr_map(void *cookie, int level, int vec,
717 vme_intr_handle_t *ihp)
718 {
719 struct sparc_vme_intr_handle *ih;
720
721 ih = kmem_alloc(sizeof(*ih), KM_SLEEP);
722 ih->pri = level;
723 ih->vec = vec;
724 ih->sc = cookie;/*XXX*/
725 *ihp = ih;
726 return (0);
727 }
728
729 static const struct evcnt *
sparc_vme_intr_evcnt(void * cookie,vme_intr_handle_t vih)730 sparc_vme_intr_evcnt(void *cookie, vme_intr_handle_t vih)
731 {
732
733 /* XXX for now, no evcnt parent reported */
734 return NULL;
735 }
736
737 static void *
sparc_vme_intr_establish(void * cookie,vme_intr_handle_t vih,int level,int (* func)(void *),void * arg)738 sparc_vme_intr_establish(void *cookie, vme_intr_handle_t vih, int level,
739 int (*func)(void *), void *arg)
740 {
741 struct sparcvme_softc *sc = cookie;
742 struct sparc_vme_intr_handle *svih =
743 (struct sparc_vme_intr_handle *)vih;
744 struct intrhand *ih;
745 int pil;
746
747 /* Translate VME priority to processor IPL */
748 pil = vme_ipl_to_pil[svih->pri];
749
750 if (level < pil)
751 panic("vme_intr_establish: class lvl (%d) < pil (%d)\n",
752 level, pil);
753
754 svih->ih.ih_fun = func;
755 svih->ih.ih_arg = arg;
756 svih->ih.ih_classipl = level; /* note: used slightly differently
757 than in intr.c (no shift) */
758 svih->next = NULL;
759
760 /* ensure the interrupt subsystem will call us at this level */
761 for (ih = intrhand[pil]; ih != NULL; ih = ih->ih_next)
762 if (ih->ih_fun == sc->sc_vmeintr)
763 break;
764
765 if (ih == NULL) {
766 ih = kmem_zalloc(sizeof(*ih), KM_SLEEP);
767 ih->ih_fun = sc->sc_vmeintr;
768 ih->ih_arg = vih;
769 intr_establish(pil, 0, ih, NULL, false);
770 } else {
771 svih->next = (vme_intr_handle_t)ih->ih_arg;
772 ih->ih_arg = vih;
773 }
774 return (NULL);
775 }
776
777 static void
sparc_vme_unmap(void * cookie,vme_mapresc_t resc)778 sparc_vme_unmap(void *cookie, vme_mapresc_t resc)
779 {
780
781 /* Not implemented */
782 panic("sparc_vme_unmap");
783 }
784
785 static void
sparc_vme_intr_disestablish(void * cookie,void * a)786 sparc_vme_intr_disestablish(void *cookie, void *a)
787 {
788
789 /* Not implemented */
790 panic("sparc_vme_intr_disestablish");
791 }
792
793
794
795 /*
796 * VME DMA functions.
797 */
798
799 #if defined(SUN4) || defined(SUN4M)
800 static void
sparc_vct_dmamap_destroy(void * cookie,bus_dmamap_t map)801 sparc_vct_dmamap_destroy(void *cookie, bus_dmamap_t map)
802 {
803 struct sparcvme_softc *sc = cookie;
804
805 bus_dmamap_destroy(sc->sc_dmatag, map);
806 }
807 #endif
808
809 #if defined(SUN4)
810 static int
sparc_vct4_dmamap_create(void * cookie,vme_size_t size,vme_am_t am,vme_datasize_t datasize,vme_swap_t swap,int nsegments,vme_size_t maxsegsz,vme_addr_t boundary,int flags,bus_dmamap_t * dmamp)811 sparc_vct4_dmamap_create(void *cookie, vme_size_t size, vme_am_t am,
812 vme_datasize_t datasize, vme_swap_t swap,
813 int nsegments, vme_size_t maxsegsz,
814 vme_addr_t boundary, int flags,
815 bus_dmamap_t *dmamp)
816 {
817 struct sparcvme_softc *sc = cookie;
818
819 /* Allocate a base map through parent bus ops */
820 return (bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz,
821 boundary, flags, dmamp));
822 }
823
824 static int
sparc_vme4_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)825 sparc_vme4_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
826 void *buf, bus_size_t buflen,
827 struct proc *p, int flags)
828 {
829 bus_addr_t dva;
830 bus_size_t sgsize;
831 u_long ldva;
832 vaddr_t va, voff;
833 pmap_t pmap;
834 int pagesz = PAGE_SIZE;
835 int error;
836
837 cache_flush(buf, buflen); /* XXX - move to bus_dma_sync */
838
839 va = (vaddr_t)buf;
840 voff = va & (pagesz - 1);
841 va &= -pagesz;
842
843 /*
844 * Allocate an integral number of pages from DVMA space
845 * covering the passed buffer.
846 */
847 sgsize = (buflen + voff + pagesz - 1) & -pagesz;
848 error = extent_alloc(vme_dvmamap, sgsize, pagesz,
849 map->_dm_boundary,
850 (flags & BUS_DMA_NOWAIT) == 0
851 ? EX_WAITOK
852 : EX_NOWAIT,
853 &ldva);
854 if (error != 0)
855 return (error);
856 dva = (bus_addr_t)ldva;
857
858 map->dm_mapsize = buflen;
859 map->dm_nsegs = 1;
860 /* Adjust DVMA address to VME view */
861 map->dm_segs[0].ds_addr = dva + voff - VME4_DVMA_BASE;
862 map->dm_segs[0].ds_len = buflen;
863 map->dm_segs[0]._ds_sgsize = sgsize;
864
865 pmap = (p == NULL) ? pmap_kernel() : p->p_vmspace->vm_map.pmap;
866
867 for (; sgsize != 0; ) {
868 paddr_t pa;
869 /*
870 * Get the physical address for this page.
871 */
872 (void) pmap_extract(pmap, va, &pa);
873
874 #ifdef notyet
875 if (have_iocache)
876 pa |= PG_IOC;
877 #endif
878 pmap_enter(pmap_kernel(), dva,
879 pa | PMAP_NC,
880 VM_PROT_READ|VM_PROT_WRITE, PMAP_WIRED);
881
882 dva += pagesz;
883 va += pagesz;
884 sgsize -= pagesz;
885 }
886 pmap_update(pmap_kernel());
887
888 return (0);
889 }
890
891 static void
sparc_vme4_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)892 sparc_vme4_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
893 {
894 bus_dma_segment_t *segs = map->dm_segs;
895 int nsegs = map->dm_nsegs;
896 bus_addr_t dva;
897 bus_size_t len;
898 int i, s, error;
899
900 for (i = 0; i < nsegs; i++) {
901 /* Go from VME to CPU view */
902 dva = segs[i].ds_addr + VME4_DVMA_BASE;
903 dva &= -PAGE_SIZE;
904 len = segs[i]._ds_sgsize;
905
906 /* Remove double-mapping in DVMA space */
907 pmap_remove(pmap_kernel(), dva, dva + len);
908
909 /* Release DVMA space */
910 s = splhigh();
911 error = extent_free(vme_dvmamap, dva, len, EX_NOWAIT);
912 splx(s);
913 if (error != 0)
914 printf("warning: %ld of DVMA space lost\n", len);
915 }
916 pmap_update(pmap_kernel());
917
918 /* Mark the mappings as invalid. */
919 map->dm_mapsize = 0;
920 map->dm_nsegs = 0;
921 }
922
923 static void
sparc_vme4_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)924 sparc_vme4_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
925 bus_addr_t offset, bus_size_t len, int ops)
926 {
927
928 /*
929 * XXX Should perform cache flushes as necessary (e.g. 4/200 W/B).
930 * Currently the cache is flushed in bus_dma_load()...
931 */
932 }
933 #endif /* SUN4 */
934
935 #if defined(SUN4M)
936 static int
sparc_vme_iommu_dmamap_create(bus_dma_tag_t t,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)937 sparc_vme_iommu_dmamap_create(bus_dma_tag_t t, bus_size_t size,
938 int nsegments, bus_size_t maxsegsz,
939 bus_size_t boundary, int flags,
940 bus_dmamap_t *dmamp)
941 {
942
943 printf("sparc_vme_dmamap_create: please use `vme_dmamap_create'\n");
944 return (EINVAL);
945 }
946
947 static int
sparc_vct_iommu_dmamap_create(void * cookie,vme_size_t size,vme_am_t am,vme_datasize_t datasize,vme_swap_t swap,int nsegments,vme_size_t maxsegsz,vme_addr_t boundary,int flags,bus_dmamap_t * dmamp)948 sparc_vct_iommu_dmamap_create(void *cookie, vme_size_t size, vme_am_t am,
949 vme_datasize_t datasize, vme_swap_t swap,
950 int nsegments, vme_size_t maxsegsz,
951 vme_addr_t boundary, int flags,
952 bus_dmamap_t *dmamp)
953 {
954 struct sparcvme_softc *sc = cookie;
955 bus_dmamap_t map;
956 int error;
957
958 /* Allocate a base map through parent bus ops */
959 error = bus_dmamap_create(sc->sc_dmatag, size, nsegments, maxsegsz,
960 boundary, flags, &map);
961 if (error != 0)
962 return (error);
963
964 /*
965 * Each I/O cache line maps to a 8K section of VME DVMA space, so
966 * we must ensure that DVMA allocations are always 8K aligned.
967 */
968 map->_dm_align = VME_IOC_PAGESZ;
969
970 /* Set map region based on Address Modifier */
971 switch ((am & VME_AM_ADRSIZEMASK)) {
972 case VME_AM_A16:
973 case VME_AM_A24:
974 /* 1 MB of DVMA space */
975 map->_dm_ex_start = VME_IOMMU_DVMA_AM24_BASE;
976 map->_dm_ex_end = VME_IOMMU_DVMA_AM24_END;
977 break;
978 case VME_AM_A32:
979 /* 8 MB of DVMA space */
980 map->_dm_ex_start = VME_IOMMU_DVMA_AM32_BASE;
981 map->_dm_ex_end = VME_IOMMU_DVMA_AM32_END;
982 break;
983 }
984
985 *dmamp = map;
986 return (0);
987 }
988
989 static int
sparc_vme_iommu_dmamap_load(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)990 sparc_vme_iommu_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map,
991 void *buf, bus_size_t buflen,
992 struct proc *p, int flags)
993 {
994 struct sparcvme_softc *sc = t->_cookie;
995 volatile uint32_t *ioctags;
996 int error;
997
998 /* Round request to a multiple of the I/O cache size */
999 buflen = (buflen + VME_IOC_PAGESZ - 1) & -VME_IOC_PAGESZ;
1000 error = bus_dmamap_load(sc->sc_dmatag, map, buf, buflen, p, flags);
1001 if (error != 0)
1002 return (error);
1003
1004 /* Allocate I/O cache entries for this range */
1005 ioctags = sc->sc_ioctags + VME_IOC_LINE(map->dm_segs[0].ds_addr);
1006 while (buflen > 0) {
1007 *ioctags = VME_IOC_IC | VME_IOC_W;
1008 ioctags += VME_IOC_LINESZ/sizeof(*ioctags);
1009 buflen -= VME_IOC_PAGESZ;
1010 }
1011
1012 /*
1013 * Adjust DVMA address to VME view.
1014 * Note: the DVMA base address is the same for all
1015 * VME address spaces.
1016 */
1017 map->dm_segs[0].ds_addr -= VME_IOMMU_DVMA_BASE;
1018 return (0);
1019 }
1020
1021
1022 static void
sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t,bus_dmamap_t map)1023 sparc_vme_iommu_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
1024 {
1025 struct sparcvme_softc *sc = t->_cookie;
1026 volatile uint32_t *flushregs;
1027 int len;
1028
1029 /* Go from VME to CPU view */
1030 map->dm_segs[0].ds_addr += VME_IOMMU_DVMA_BASE;
1031
1032 /* Flush VME I/O cache */
1033 len = map->dm_segs[0]._ds_sgsize;
1034 flushregs = sc->sc_iocflush + VME_IOC_LINE(map->dm_segs[0].ds_addr);
1035 while (len > 0) {
1036 *flushregs = 0;
1037 flushregs += VME_IOC_LINESZ/sizeof(*flushregs);
1038 len -= VME_IOC_PAGESZ;
1039 }
1040
1041 /*
1042 * Start a read from `tag space' which will not complete until
1043 * all cache flushes have finished
1044 */
1045 (*sc->sc_ioctags);
1046
1047 bus_dmamap_unload(sc->sc_dmatag, map);
1048 }
1049
1050 static void
sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)1051 sparc_vme_iommu_dmamap_sync(bus_dma_tag_t t, bus_dmamap_t map,
1052 bus_addr_t offset, bus_size_t len, int ops)
1053 {
1054
1055 /*
1056 * XXX Should perform cache flushes as necessary.
1057 */
1058 }
1059 #endif /* SUN4M */
1060
1061 #if defined(SUN4) || defined(SUN4M)
1062 static int
sparc_vme_dmamem_map(bus_dma_tag_t t,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1063 sparc_vme_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
1064 size_t size, void **kvap, int flags)
1065 {
1066 struct sparcvme_softc *sc = t->_cookie;
1067
1068 return (bus_dmamem_map(sc->sc_dmatag, segs, nsegs, size, kvap, flags));
1069 }
1070 #endif /* SUN4 || SUN4M */
1071