xref: /netbsd/sys/arch/hppa/hppa/mainbus.c (revision 089d55cc)
1 /*	$NetBSD: mainbus.c,v 1.11 2022/10/11 22:03:37 andvar Exp $	*/
2 
3 /*-
4  * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*	$OpenBSD: mainbus.c,v 1.74 2009/04/20 00:42:06 oga Exp $	*/
33 
34 /*
35  * Copyright (c) 1998-2004 Michael Shalayeff
36  * All rights reserved.
37  *
38  * Redistribution and use in source and binary forms, with or without
39  * modification, are permitted provided that the following conditions
40  * are met:
41  * 1. Redistributions of source code must retain the above copyright
42  *    notice, this list of conditions and the following disclaimer.
43  * 2. Redistributions in binary form must reproduce the above copyright
44  *    notice, this list of conditions and the following disclaimer in the
45  *    documentation and/or other materials provided with the distribution.
46  *
47  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
48  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
49  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
50  * IN NO EVENT SHALL THE AUTHOR OR HIS RELATIVES BE LIABLE FOR ANY DIRECT,
51  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
52  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
53  * SERVICES; LOSS OF MIND, USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
55  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
56  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
57  * THE POSSIBILITY OF SUCH DAMAGE.
58  */
59 
60 #include <sys/cdefs.h>
61 __KERNEL_RCSID(0, "$NetBSD: mainbus.c,v 1.11 2022/10/11 22:03:37 andvar Exp $");
62 
63 #include "locators.h"
64 #include "power.h"
65 #include "lcd.h"
66 
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/device.h>
70 #include <sys/reboot.h>
71 #include <sys/extent.h>
72 #include <sys/mbuf.h>
73 #include <sys/proc.h>
74 #include <sys/kmem.h>
75 
76 #include <uvm/uvm_page.h>
77 #include <uvm/uvm.h>
78 
79 #include <machine/pdc.h>
80 #include <machine/iomod.h>
81 #include <machine/autoconf.h>
82 
83 #include <hppa/hppa/machdep.h>
84 #include <hppa/dev/cpudevs.h>
85 
86 #if NLCD > 0
87 static struct pdc_chassis_info pdc_chassis_info;
88 #endif
89 
90 #ifdef MBUSDEBUG
91 
92 #define	DPRINTF(s)	do {	\
93 	if (mbusdebug)		\
94 		printf s;	\
95 } while(0)
96 
97 int mbusdebug = 1;
98 #else
99 #define	DPRINTF(s)	/* */
100 #endif
101 
102 struct mainbus_softc {
103 	device_t sc_dv;
104 };
105 
106 int	mbmatch(device_t, cfdata_t, void *);
107 void	mbattach(device_t, device_t, void *);
108 
109 CFATTACH_DECL_NEW(mainbus, sizeof(struct mainbus_softc),
110     mbmatch, mbattach, NULL, NULL);
111 
112 extern struct cfdriver mainbus_cd;
113 
114 static int mb_attached;
115 
116 /* from machdep.c */
117 extern struct extent *hppa_io_extent;
118 
119 uint8_t mbus_r1(void *, bus_space_handle_t, bus_size_t);
120 uint16_t mbus_r2(void *, bus_space_handle_t, bus_size_t);
121 uint32_t mbus_r4(void *, bus_space_handle_t, bus_size_t);
122 uint64_t mbus_r8(void *, bus_space_handle_t, bus_size_t);
123 void mbus_w1(void *, bus_space_handle_t, bus_size_t, uint8_t);
124 void mbus_w2(void *, bus_space_handle_t, bus_size_t, uint16_t);
125 void mbus_w4(void *, bus_space_handle_t, bus_size_t, uint32_t);
126 void mbus_w8(void *, bus_space_handle_t, bus_size_t, uint64_t);
127 void mbus_rm_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t);
128 void mbus_rm_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
129 void mbus_rm_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
130 void mbus_rm_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
131 void mbus_wm_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t);
132 void mbus_wm_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
133 void mbus_wm_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
134 void mbus_wm_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
135 void mbus_rr_1(void *, bus_space_handle_t, bus_size_t, uint8_t *, bus_size_t);
136 void mbus_rr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
137 void mbus_rr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
138 void mbus_rr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
139 void mbus_wr_1(void *, bus_space_handle_t, bus_size_t, const uint8_t *, bus_size_t);
140 void mbus_wr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
141 void mbus_wr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
142 void mbus_wr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
143 void mbus_sm_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t);
144 void mbus_sm_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t);
145 void mbus_sm_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t);
146 void mbus_sm_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t);
147 void mbus_sr_1(void *, bus_space_handle_t, bus_size_t, uint8_t, bus_size_t);
148 void mbus_sr_2(void *, bus_space_handle_t, bus_size_t, uint16_t, bus_size_t);
149 void mbus_sr_4(void *, bus_space_handle_t, bus_size_t, uint32_t, bus_size_t);
150 void mbus_sr_8(void *, bus_space_handle_t, bus_size_t, uint64_t, bus_size_t);
151 void mbus_cp_1(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
152 void mbus_cp_2(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
153 void mbus_cp_4(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
154 void mbus_cp_8(void *, bus_space_handle_t, bus_size_t, bus_space_handle_t, bus_size_t, bus_size_t);
155 
156 int mbus_add_mapping(bus_addr_t, bus_size_t, int, bus_space_handle_t *);
157 int mbus_map(void *, bus_addr_t, bus_size_t, int, bus_space_handle_t *);
158 void mbus_unmap(void *, bus_space_handle_t, bus_size_t);
159 int mbus_alloc(void *, bus_addr_t, bus_addr_t, bus_size_t, bus_size_t, bus_size_t, int, bus_addr_t *, bus_space_handle_t *);
160 void mbus_free(void *, bus_space_handle_t, bus_size_t);
161 int mbus_subregion(void *, bus_space_handle_t, bus_size_t, bus_size_t, bus_space_handle_t *);
162 void mbus_barrier(void *, bus_space_handle_t, bus_size_t, bus_size_t, int);
163 void *mbus_vaddr(void *, bus_space_handle_t);
164 paddr_t mbus_mmap(void *, bus_addr_t, off_t, int, int);
165 
166 int mbus_dmamap_create(void *, bus_size_t, int, bus_size_t, bus_size_t, int, bus_dmamap_t *);
167 void mbus_dmamap_destroy(void *, bus_dmamap_t);
168 int mbus_dmamap_load(void *, bus_dmamap_t, void *, bus_size_t, struct proc *, int);
169 int mbus_dmamap_load_mbuf(void *, bus_dmamap_t, struct mbuf *, int);
170 int mbus_dmamap_load_uio(void *, bus_dmamap_t, struct uio *, int);
171 int mbus_dmamap_load_raw(void *, bus_dmamap_t, bus_dma_segment_t *, int, bus_size_t, int);
172 void mbus_dmamap_unload(void *, bus_dmamap_t);
173 void mbus_dmamap_sync(void *, bus_dmamap_t, bus_addr_t, bus_size_t, int);
174 int mbus_dmamem_alloc(void *, bus_size_t, bus_size_t, bus_size_t, bus_dma_segment_t *, int, int *, int);
175 void mbus_dmamem_free(void *, bus_dma_segment_t *, int);
176 int mbus_dmamem_map(void *, bus_dma_segment_t *, int, size_t, void **, int);
177 void mbus_dmamem_unmap(void *, void *, size_t);
178 paddr_t mbus_dmamem_mmap(void *, bus_dma_segment_t *, int, off_t, int, int);
179 int _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
180     bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
181     int *segp, int first);
182 
183 extern struct pdc_btlb pdc_btlb;
184 static uint32_t bmm[HPPA_FLEX_COUNT/32];
185 
186 int
mbus_add_mapping(bus_addr_t bpa,bus_size_t size,int flags,bus_space_handle_t * bshp)187 mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
188     bus_space_handle_t *bshp)
189 {
190 	vaddr_t pa, spa, epa;
191 	int flex;
192 
193 	DPRINTF(("\n%s(%lx,%lx,%scachable,%p)\n", __func__,
194 	    bpa, size, flags? "" : "non", bshp));
195 
196 	KASSERT(bpa >= HPPA_IOSPACE);
197 	KASSERT(!(flags & BUS_SPACE_MAP_CACHEABLE));
198 
199 	/*
200 	 * Mappings are established in HPPA_FLEX_SIZE units,
201 	 * either with BTLB, or regular mappings of the whole area.
202 	 */
203 	for (pa = bpa ; size != 0; pa = epa) {
204 		flex = HPPA_FLEX(pa);
205 		spa = pa & HPPA_FLEX_MASK;
206 		epa = spa + HPPA_FLEX_SIZE; /* may wrap to 0... */
207 
208 		size -= uimin(size, HPPA_FLEX_SIZE - (pa - spa));
209 
210 		/* do need a new mapping? */
211 		if (bmm[flex / 32] & (1 << (flex % 32))) {
212 			DPRINTF(("%s: already mapped flex=%x, mask=%x\n",
213 			    __func__, flex, bmm[flex / 32]));
214 			continue;
215 		}
216 
217 		DPRINTF(("%s: adding flex=%x %lx-%lx, ", __func__, flex, spa,
218 		    epa - 1));
219 
220 		bmm[flex / 32] |= (1 << (flex % 32));
221 
222 		while (spa != epa) {
223 			DPRINTF(("%s: kenter 0x%lx-0x%lx", __func__, spa,
224 			    epa));
225 			for (; spa != epa; spa += PAGE_SIZE)
226 				pmap_kenter_pa(spa, spa,
227 				    VM_PROT_READ | VM_PROT_WRITE, 0);
228 		}
229 	}
230 
231 	*bshp = bpa;
232 
233 	/* Success. */
234 	return 0;
235 }
236 
237 int
mbus_map(void * v,bus_addr_t bpa,bus_size_t size,int flags,bus_space_handle_t * bshp)238 mbus_map(void *v, bus_addr_t bpa, bus_size_t size, int flags,
239     bus_space_handle_t *bshp)
240 {
241 	int error;
242 
243 	/*
244 	 * We must only be called with addresses in I/O space.
245 	 */
246 	KASSERT(bpa >= HPPA_IOSPACE);
247 
248 	/*
249 	 * Allocate the region of I/O space.
250 	 */
251 	error = extent_alloc_region(hppa_io_extent, bpa, size, EX_NOWAIT);
252 	if (error)
253 		return error;
254 
255 	/*
256 	 * Map the region of I/O space.
257 	 */
258 	error = mbus_add_mapping(bpa, size, flags, bshp);
259 	if (error) {
260 		DPRINTF(("bus_space_map: pa 0x%lx, size 0x%lx failed\n",
261 		    bpa, size));
262 		if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) {
263 			printf ("bus_space_map: can't free region\n");
264 		}
265 	}
266 
267 	return error;
268 }
269 
270 void
mbus_unmap(void * v,bus_space_handle_t bsh,bus_size_t size)271 mbus_unmap(void *v, bus_space_handle_t bsh, bus_size_t size)
272 {
273 	bus_addr_t bpa = bsh;
274 	int error;
275 
276 	/*
277 	 * Free the region of I/O space.
278 	 */
279 	error = extent_free(hppa_io_extent, bpa, size, EX_NOWAIT);
280 	if (error) {
281 		DPRINTF(("bus_space_unmap: ps 0x%lx, size 0x%lx\n",
282 		    bpa, size));
283 		panic("bus_space_unmap: can't free region (%d)", error);
284 	}
285 }
286 
287 int
mbus_alloc(void * v,bus_addr_t rstart,bus_addr_t rend,bus_size_t size,bus_size_t align,bus_size_t boundary,int flags,bus_addr_t * addrp,bus_space_handle_t * bshp)288 mbus_alloc(void *v, bus_addr_t rstart, bus_addr_t rend, bus_size_t size,
289     bus_size_t align, bus_size_t boundary, int flags, bus_addr_t *addrp,
290     bus_space_handle_t *bshp)
291 {
292 	bus_addr_t bpa;
293 	int error;
294 
295 	if (rstart < hppa_io_extent->ex_start ||
296 	    rend > hppa_io_extent->ex_end)
297 		panic("bus_space_alloc: bad region start/end");
298 
299 	/*
300 	 * Allocate the region of I/O space.
301 	 */
302 	error = extent_alloc_subregion1(hppa_io_extent, rstart, rend, size,
303 	    align, 0, boundary, EX_NOWAIT, &bpa);
304 	if (error)
305 		return error;
306 
307 	/*
308 	 * Map the region of I/O space.
309 	 */
310 	error = mbus_add_mapping(bpa, size, flags, bshp);
311 	if (error) {
312 		DPRINTF(("bus_space_alloc: pa 0x%lx, size 0x%lx failed\n",
313 		    bpa, size));
314 		if (extent_free(hppa_io_extent, bpa, size, EX_NOWAIT)) {
315 			printf("bus_space_alloc: can't free region\n");
316 		}
317 	}
318 
319 	*addrp = bpa;
320 
321 	return error;
322 }
323 
324 void
mbus_free(void * v,bus_space_handle_t h,bus_size_t size)325 mbus_free(void *v, bus_space_handle_t h, bus_size_t size)
326 {
327 	/* bus_space_unmap() does all that we need to do. */
328 	mbus_unmap(v, h, size);
329 }
330 
331 int
mbus_subregion(void * v,bus_space_handle_t bsh,bus_size_t offset,bus_size_t size,bus_space_handle_t * nbshp)332 mbus_subregion(void *v, bus_space_handle_t bsh, bus_size_t offset,
333     bus_size_t size, bus_space_handle_t *nbshp)
334 {
335 	*nbshp = bsh + offset;
336 	return(0);
337 }
338 
339 void
mbus_barrier(void * v,bus_space_handle_t h,bus_size_t o,bus_size_t l,int op)340 mbus_barrier(void *v, bus_space_handle_t h, bus_size_t o, bus_size_t l, int op)
341 {
342 	sync_caches();
343 }
344 
345 void*
mbus_vaddr(void * v,bus_space_handle_t h)346 mbus_vaddr(void *v, bus_space_handle_t h)
347 {
348 	/*
349 	 * We must only be called with addresses in I/O space.
350 	 */
351 	KASSERT(h >= HPPA_IOSPACE);
352 	return (void*)h;
353 }
354 
355 paddr_t
mbus_mmap(void * v,bus_addr_t addr,off_t off,int prot,int flags)356 mbus_mmap(void *v, bus_addr_t addr, off_t off, int prot, int flags)
357 {
358 	return btop(addr + off);
359 }
360 
361 uint8_t
mbus_r1(void * v,bus_space_handle_t h,bus_size_t o)362 mbus_r1(void *v, bus_space_handle_t h, bus_size_t o)
363 {
364 	return *((volatile uint8_t *)(h + o));
365 }
366 
367 uint16_t
mbus_r2(void * v,bus_space_handle_t h,bus_size_t o)368 mbus_r2(void *v, bus_space_handle_t h, bus_size_t o)
369 {
370 	return *((volatile uint16_t *)(h + o));
371 }
372 
373 uint32_t
mbus_r4(void * v,bus_space_handle_t h,bus_size_t o)374 mbus_r4(void *v, bus_space_handle_t h, bus_size_t o)
375 {
376 	return *((volatile uint32_t *)(h + o));
377 }
378 
379 uint64_t
mbus_r8(void * v,bus_space_handle_t h,bus_size_t o)380 mbus_r8(void *v, bus_space_handle_t h, bus_size_t o)
381 {
382 	return *((volatile uint64_t *)(h + o));
383 }
384 
385 void
mbus_w1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t vv)386 mbus_w1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv)
387 {
388 	*((volatile uint8_t *)(h + o)) = vv;
389 }
390 
391 void
mbus_w2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t vv)392 mbus_w2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv)
393 {
394 	*((volatile uint16_t *)(h + o)) = vv;
395 }
396 
397 void
mbus_w4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t vv)398 mbus_w4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv)
399 {
400 	*((volatile uint32_t *)(h + o)) = vv;
401 }
402 
403 void
mbus_w8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t vv)404 mbus_w8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv)
405 {
406 	*((volatile uint64_t *)(h + o)) = vv;
407 }
408 
409 
410 void
mbus_rm_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)411 mbus_rm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c)
412 {
413 	h += o;
414 	while (c--)
415 		*(a++) = *(volatile uint8_t *)h;
416 }
417 
418 void
mbus_rm_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)419 mbus_rm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c)
420 {
421 	h += o;
422 	while (c--)
423 		*(a++) = *(volatile uint16_t *)h;
424 }
425 
426 void
mbus_rm_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)427 mbus_rm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c)
428 {
429 	h += o;
430 	while (c--)
431 		*(a++) = *(volatile uint32_t *)h;
432 }
433 
434 void
mbus_rm_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)435 mbus_rm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c)
436 {
437 	h += o;
438 	while (c--)
439 		*(a++) = *(volatile uint64_t *)h;
440 }
441 
442 void
mbus_wm_1(void * v,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)443 mbus_wm_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c)
444 {
445 	h += o;
446 	while (c--)
447 		*(volatile uint8_t *)h = *(a++);
448 }
449 
450 void
mbus_wm_2(void * v,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)451 mbus_wm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c)
452 {
453 	h += o;
454 	while (c--)
455 		*(volatile uint16_t *)h = *(a++);
456 }
457 
458 void
mbus_wm_4(void * v,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)459 mbus_wm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c)
460 {
461 	h += o;
462 	while (c--)
463 		*(volatile uint32_t *)h = *(a++);
464 }
465 
466 void
mbus_wm_8(void * v,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)467 mbus_wm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c)
468 {
469 	h += o;
470 	while (c--)
471 		*(volatile uint64_t *)h = *(a++);
472 }
473 
474 void
mbus_sm_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t vv,bus_size_t c)475 mbus_sm_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c)
476 {
477 	h += o;
478 	while (c--)
479 		*(volatile uint8_t *)h = vv;
480 }
481 
482 void
mbus_sm_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t vv,bus_size_t c)483 mbus_sm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c)
484 {
485 	h += o;
486 	while (c--)
487 		*(volatile uint16_t *)h = vv;
488 }
489 
490 void
mbus_sm_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t vv,bus_size_t c)491 mbus_sm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c)
492 {
493 	h += o;
494 	while (c--)
495 		*(volatile uint32_t *)h = vv;
496 }
497 
498 void
mbus_sm_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t vv,bus_size_t c)499 mbus_sm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c)
500 {
501 	h += o;
502 	while (c--)
503 		*(volatile uint64_t *)h = vv;
504 }
505 
506 void mbus_rrm_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t*a, bus_size_t c);
507 void mbus_rrm_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t*a, bus_size_t c);
508 void mbus_rrm_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t*a, bus_size_t c);
509 
510 void mbus_wrm_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c);
511 void mbus_wrm_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c);
512 void mbus_wrm_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c);
513 
514 void
mbus_rr_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t * a,bus_size_t c)515 mbus_rr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t *a, bus_size_t c)
516 {
517 	volatile uint8_t *p;
518 
519 	h += o;
520 	p = (void *)h;
521 	while (c--)
522 		*a++ = *p++;
523 }
524 
525 void
mbus_rr_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t * a,bus_size_t c)526 mbus_rr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t *a, bus_size_t c)
527 {
528 	volatile uint16_t *p;
529 
530 	h += o;
531 	p = (void *)h;
532 	while (c--)
533 		*a++ = *p++;
534 }
535 
536 void
mbus_rr_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t * a,bus_size_t c)537 mbus_rr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t *a, bus_size_t c)
538 {
539 	volatile uint32_t *p;
540 
541 	h += o;
542 	p = (void *)h;
543 	while (c--)
544 		*a++ = *p++;
545 }
546 
547 void
mbus_rr_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t * a,bus_size_t c)548 mbus_rr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t *a, bus_size_t c)
549 {
550 	volatile uint64_t *p;
551 
552 	h += o;
553 	p = (void *)h;
554 	while (c--)
555 		*a++ = *p++;
556 }
557 
558 void
mbus_wr_1(void * v,bus_space_handle_t h,bus_size_t o,const uint8_t * a,bus_size_t c)559 mbus_wr_1(void *v, bus_space_handle_t h, bus_size_t o, const uint8_t *a, bus_size_t c)
560 {
561 	volatile uint8_t *p;
562 
563 	h += o;
564 	p = (void *)h;
565 	while (c--)
566 		*p++ = *a++;
567 }
568 
569 void
mbus_wr_2(void * v,bus_space_handle_t h,bus_size_t o,const uint16_t * a,bus_size_t c)570 mbus_wr_2(void *v, bus_space_handle_t h, bus_size_t o, const uint16_t *a, bus_size_t c)
571 {
572 	volatile uint16_t *p;
573 
574 	h += o;
575 	p = (void *)h;
576 	while (c--)
577 		*p++ = *a++;
578 }
579 
580 void
mbus_wr_4(void * v,bus_space_handle_t h,bus_size_t o,const uint32_t * a,bus_size_t c)581 mbus_wr_4(void *v, bus_space_handle_t h, bus_size_t o, const uint32_t *a, bus_size_t c)
582 {
583 	volatile uint32_t *p;
584 
585 	h += o;
586 	p = (void *)h;
587 	while (c--)
588 		*p++ = *a++;
589 }
590 
591 void
mbus_wr_8(void * v,bus_space_handle_t h,bus_size_t o,const uint64_t * a,bus_size_t c)592 mbus_wr_8(void *v, bus_space_handle_t h, bus_size_t o, const uint64_t *a, bus_size_t c)
593 {
594 	volatile uint64_t *p;
595 
596 	h += o;
597 	p = (void *)h;
598 	while (c--)
599 		*p++ = *a++;
600 }
601 
602 void mbus_rrr_2(void *, bus_space_handle_t, bus_size_t, uint16_t *, bus_size_t);
603 void mbus_rrr_4(void *, bus_space_handle_t, bus_size_t, uint32_t *, bus_size_t);
604 void mbus_rrr_8(void *, bus_space_handle_t, bus_size_t, uint64_t *, bus_size_t);
605 
606 void mbus_wrr_2(void *, bus_space_handle_t, bus_size_t, const uint16_t *, bus_size_t);
607 void mbus_wrr_4(void *, bus_space_handle_t, bus_size_t, const uint32_t *, bus_size_t);
608 void mbus_wrr_8(void *, bus_space_handle_t, bus_size_t, const uint64_t *, bus_size_t);
609 
610 void
mbus_sr_1(void * v,bus_space_handle_t h,bus_size_t o,uint8_t vv,bus_size_t c)611 mbus_sr_1(void *v, bus_space_handle_t h, bus_size_t o, uint8_t vv, bus_size_t c)
612 {
613 	volatile uint8_t *p;
614 
615 	h += o;
616 	p = (void *)h;
617 	while (c--)
618 		*p++ = vv;
619 }
620 
621 void
mbus_sr_2(void * v,bus_space_handle_t h,bus_size_t o,uint16_t vv,bus_size_t c)622 mbus_sr_2(void *v, bus_space_handle_t h, bus_size_t o, uint16_t vv, bus_size_t c)
623 {
624 	volatile uint16_t *p;
625 
626 	h += o;
627 	p = (void *)h;
628 	while (c--)
629 		*p++ = vv;
630 }
631 
632 void
mbus_sr_4(void * v,bus_space_handle_t h,bus_size_t o,uint32_t vv,bus_size_t c)633 mbus_sr_4(void *v, bus_space_handle_t h, bus_size_t o, uint32_t vv, bus_size_t c)
634 {
635 	volatile uint32_t *p;
636 
637 	h += o;
638 	p = (void *)h;
639 	while (c--)
640 		*p++ = vv;
641 }
642 
643 void
mbus_sr_8(void * v,bus_space_handle_t h,bus_size_t o,uint64_t vv,bus_size_t c)644 mbus_sr_8(void *v, bus_space_handle_t h, bus_size_t o, uint64_t vv, bus_size_t c)
645 {
646 	volatile uint64_t *p;
647 
648 	h += o;
649 	p = (void *)h;
650 	while (c--)
651 		*p++ = vv;
652 }
653 
654 void
mbus_cp_1(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)655 mbus_cp_1(void *v, bus_space_handle_t h1, bus_size_t o1,
656 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
657 {
658 	volatile uint8_t *p1, *p2;
659 
660 	h1 += o1;
661 	h2 += o2;
662 	p1 = (void *)h1;
663 	p2 = (void *)h2;
664 	while (c--)
665 		*p1++ = *p2++;
666 }
667 
668 void
mbus_cp_2(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)669 mbus_cp_2(void *v, bus_space_handle_t h1, bus_size_t o1,
670 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
671 {
672 	volatile uint16_t *p1, *p2;
673 
674 	h1 += o1;
675 	h2 += o2;
676 	p1 = (void *)h1;
677 	p2 = (void *)h2;
678 	while (c--)
679 		*p1++ = *p2++;
680 }
681 
682 void
mbus_cp_4(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)683 mbus_cp_4(void *v, bus_space_handle_t h1, bus_size_t o1,
684 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
685 {
686 	volatile uint32_t *p1, *p2;
687 
688 	h1 += o1;
689 	h2 += o2;
690 	p1 = (void *)h1;
691 	p2 = (void *)h2;
692 	while (c--)
693 		*p1++ = *p2++;
694 }
695 
696 void
mbus_cp_8(void * v,bus_space_handle_t h1,bus_size_t o1,bus_space_handle_t h2,bus_size_t o2,bus_size_t c)697 mbus_cp_8(void *v, bus_space_handle_t h1, bus_size_t o1,
698 	  bus_space_handle_t h2, bus_size_t o2, bus_size_t c)
699 {
700 	volatile uint64_t *p1, *p2;
701 
702 	h1 += o1;
703 	h2 += o2;
704 	p1 = (void *)h1;
705 	p2 = (void *)h2;
706 	while (c--)
707 		*p1++ = *p2++;
708 }
709 
710 
711 const struct hppa_bus_space_tag hppa_bustag = {
712 	NULL,
713 
714 	mbus_map, mbus_unmap, mbus_subregion, mbus_alloc, mbus_free,
715 	mbus_barrier, mbus_vaddr, mbus_mmap,
716 	mbus_r1,    mbus_r2,   mbus_r4,   mbus_r8,
717 	mbus_w1,    mbus_w2,   mbus_w4,   mbus_w8,
718 	mbus_rm_1,  mbus_rm_2, mbus_rm_4, mbus_rm_8,
719 	mbus_wm_1,  mbus_wm_2, mbus_wm_4, mbus_wm_8,
720 	mbus_sm_1,  mbus_sm_2, mbus_sm_4, mbus_sm_8,
721 	/* *_stream_* are the same as non-stream for native busses */
722 		    mbus_rm_2, mbus_rm_4, mbus_rm_8,
723 		    mbus_wm_2, mbus_wm_4, mbus_wm_8,
724 	mbus_rr_1,  mbus_rr_2, mbus_rr_4, mbus_rr_8,
725 	mbus_wr_1,  mbus_wr_2, mbus_wr_4, mbus_wr_8,
726 	/* *_stream_* are the same as non-stream for native busses */
727 		    mbus_rr_2, mbus_rr_4, mbus_rr_8,
728 		    mbus_wr_2, mbus_wr_4, mbus_wr_8,
729 	mbus_sr_1,  mbus_sr_2, mbus_sr_4, mbus_sr_8,
730 	mbus_cp_1,  mbus_cp_2, mbus_cp_4, mbus_cp_8
731 };
732 
733 static size_t
_bus_dmamap_mapsize(int const nsegments)734 _bus_dmamap_mapsize(int const nsegments)
735 {
736 	KASSERT(nsegments > 0);
737 	return sizeof(struct hppa_bus_dmamap) +
738 	    (sizeof(bus_dma_segment_t) * (nsegments - 1));
739 }
740 
741 /*
742  * Common function for DMA map creation.  May be called by bus-specific DMA map
743  * creation functions.
744  */
745 int
mbus_dmamap_create(void * v,bus_size_t size,int nsegments,bus_size_t maxsegsz,bus_size_t boundary,int flags,bus_dmamap_t * dmamp)746 mbus_dmamap_create(void *v, bus_size_t size, int nsegments, bus_size_t maxsegsz,
747     bus_size_t boundary, int flags, bus_dmamap_t *dmamp)
748 {
749 	struct hppa_bus_dmamap *map;
750 
751 	/*
752 	 * Allocate and initialize the DMA map.  The end of the map is a
753 	 * variable-sized array of segments, so we allocate enough room for
754 	 * them in one shot.
755 	 *
756 	 * Note we don't preserve the WAITOK or NOWAIT flags.  Preservation of
757 	 * ALLOCNOW notifies others that we've reserved these resources, and
758 	 * they are not to be freed.
759 	 *
760 	 * The bus_dmamap_t includes one bus_dma_segment_t, hence the
761 	 * (nsegments - 1).
762 	 */
763 	map = kmem_zalloc(_bus_dmamap_mapsize(nsegments),
764 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP);
765 	if (!map)
766 		return ENOMEM;
767 
768 	map->_dm_size = size;
769 	map->_dm_segcnt = nsegments;
770 	map->_dm_maxsegsz = maxsegsz;
771 	map->_dm_boundary = boundary;
772 	map->_dm_flags = flags & ~(BUS_DMA_WAITOK|BUS_DMA_NOWAIT);
773 	map->dm_mapsize = 0;		/* no valid mappings */
774 	map->dm_nsegs = 0;
775 
776 	*dmamp = map;
777 	return 0;
778 }
779 
780 /*
781  * Common function for DMA map destruction.  May be called by bus-specific DMA
782  * map destruction functions.
783  */
784 void
mbus_dmamap_destroy(void * v,bus_dmamap_t map)785 mbus_dmamap_destroy(void *v, bus_dmamap_t map)
786 {
787 
788 	/*
789 	 * If the handle contains a valid mapping, unload it.
790 	 */
791 	if (map->dm_mapsize != 0)
792 		mbus_dmamap_unload(v, map);
793 
794 	kmem_free(map, _bus_dmamap_mapsize(map->_dm_segcnt));
795 }
796 
797 /*
798  * load DMA map with a linear buffer.
799  */
800 int
mbus_dmamap_load(void * v,bus_dmamap_t map,void * buf,bus_size_t buflen,struct proc * p,int flags)801 mbus_dmamap_load(void *v, bus_dmamap_t map, void *buf, bus_size_t buflen,
802     struct proc *p, int flags)
803 {
804 	vaddr_t lastaddr;
805 	int seg, error;
806 	struct vmspace *vm;
807 
808 	/*
809 	 * Make sure that on error condition we return "no valid mappings".
810 	 */
811 	map->dm_mapsize = 0;
812 	map->dm_nsegs = 0;
813 
814 	if (buflen > map->_dm_size)
815 		return EINVAL;
816 
817 	if (p != NULL) {
818 		vm = p->p_vmspace;
819 	} else {
820 		vm = vmspace_kernel();
821 	}
822 
823 	seg = 0;
824 	error = _bus_dmamap_load_buffer(NULL, map, buf, buflen, vm, flags,
825 	    &lastaddr, &seg, 1);
826 	if (error == 0) {
827 		map->dm_mapsize = buflen;
828 		map->dm_nsegs = seg + 1;
829 	}
830 	return error;
831 }
832 
833 /*
834  * Like bus_dmamap_load(), but for mbufs.
835  */
836 int
mbus_dmamap_load_mbuf(void * v,bus_dmamap_t map,struct mbuf * m0,int flags)837 mbus_dmamap_load_mbuf(void *v, bus_dmamap_t map, struct mbuf *m0,
838     int flags)
839 {
840 	vaddr_t lastaddr;
841 	int seg, error, first;
842 	struct mbuf *m;
843 
844 	/*
845 	 * Make sure that on error condition we return "no valid mappings."
846 	 */
847 	map->dm_mapsize = 0;
848 	map->dm_nsegs = 0;
849 
850 	KASSERT(m0->m_flags & M_PKTHDR);
851 
852 	if (m0->m_pkthdr.len > map->_dm_size)
853 		return EINVAL;
854 
855 	first = 1;
856 	seg = 0;
857 	error = 0;
858 	for (m = m0; m != NULL && error == 0; m = m->m_next) {
859 		if (m->m_len == 0)
860 			continue;
861 		error = _bus_dmamap_load_buffer(NULL, map, m->m_data, m->m_len,
862 		    vmspace_kernel(), flags, &lastaddr, &seg, first);
863 		first = 0;
864 	}
865 	if (error == 0) {
866 		map->dm_mapsize = m0->m_pkthdr.len;
867 		map->dm_nsegs = seg + 1;
868 	}
869 	return error;
870 }
871 
872 /*
873  * Like bus_dmamap_load(), but for uios.
874  */
875 int
mbus_dmamap_load_uio(void * v,bus_dmamap_t map,struct uio * uio,int flags)876 mbus_dmamap_load_uio(void *v, bus_dmamap_t map, struct uio *uio,
877     int flags)
878 {
879 	vaddr_t lastaddr;
880 	int seg, i, error, first;
881 	bus_size_t minlen, resid;
882 	struct iovec *iov;
883 	void *addr;
884 
885 	/*
886 	 * Make sure that on error condition we return "no valid mappings."
887 	 */
888 	map->dm_mapsize = 0;
889 	map->dm_nsegs = 0;
890 
891 	resid = uio->uio_resid;
892 	iov = uio->uio_iov;
893 
894 	first = 1;
895 	seg = 0;
896 	error = 0;
897 	for (i = 0; i < uio->uio_iovcnt && resid != 0 && error == 0; i++) {
898 		/*
899 		 * Now at the first iovec to load.  Load each iovec
900 		 * until we have exhausted the residual count.
901 		 */
902 		minlen = MIN(resid, iov[i].iov_len);
903 		addr = (void *)iov[i].iov_base;
904 
905 		error = _bus_dmamap_load_buffer(NULL, map, addr, minlen,
906 		    uio->uio_vmspace, flags, &lastaddr, &seg, first);
907 		first = 0;
908 
909 		resid -= minlen;
910 	}
911 	if (error == 0) {
912 		map->dm_mapsize = uio->uio_resid;
913 		map->dm_nsegs = seg + 1;
914 	}
915 	return error;
916 }
917 
918 /*
919  * Like bus_dmamap_load(), but for raw memory allocated with
920  * bus_dmamem_alloc().
921  */
922 int
mbus_dmamap_load_raw(void * v,bus_dmamap_t map,bus_dma_segment_t * segs,int nsegs,bus_size_t size,int flags)923 mbus_dmamap_load_raw(void *v, bus_dmamap_t map, bus_dma_segment_t *segs,
924     int nsegs, bus_size_t size, int flags)
925 {
926 	struct pglist *mlist;
927 	struct vm_page *m;
928 	paddr_t pa, pa_next;
929 	bus_size_t mapsize;
930 	bus_size_t pagesz = PAGE_SIZE;
931 	int seg;
932 
933 	/*
934 	 * Make sure that on error condition we return "no valid mappings".
935 	 */
936 	map->dm_nsegs = 0;
937 	map->dm_mapsize = 0;
938 
939 	/* Load the allocated pages. */
940 	mlist = segs[0]._ds_mlist;
941 	pa_next = 0;
942 	seg = -1;
943 	mapsize = size;
944 	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
945 
946 		if (size == 0)
947 			panic("mbus_dmamap_load_raw: size botch");
948 
949 		pa = VM_PAGE_TO_PHYS(m);
950 		if (pa != pa_next) {
951 			if (++seg >= map->_dm_segcnt)
952 				panic("mbus_dmamap_load_raw: nsegs botch");
953 			map->dm_segs[seg].ds_addr = pa;
954 			map->dm_segs[seg].ds_len = 0;
955 		}
956 		pa_next = pa + PAGE_SIZE;
957 		if (size < pagesz)
958 			pagesz = size;
959 		map->dm_segs[seg].ds_len += pagesz;
960 		size -= pagesz;
961 	}
962 
963 	/* Make the map truly valid. */
964 	map->dm_nsegs = seg + 1;
965 	map->dm_mapsize = mapsize;
966 
967 	return 0;
968 }
969 
970 /*
971  * unload a DMA map.
972  */
973 void
mbus_dmamap_unload(void * v,bus_dmamap_t map)974 mbus_dmamap_unload(void *v, bus_dmamap_t map)
975 {
976 	/*
977 	 * If this map was loaded with mbus_dmamap_load, we don't need to do
978 	 * anything.  If this map was loaded with mbus_dmamap_load_raw, we also
979 	 * don't need to do anything.
980 	 */
981 
982 	/* Mark the mappings as invalid. */
983 	map->dm_mapsize = 0;
984 	map->dm_nsegs = 0;
985 }
986 
987 void
mbus_dmamap_sync(void * v,bus_dmamap_t map,bus_addr_t offset,bus_size_t len,int ops)988 mbus_dmamap_sync(void *v, bus_dmamap_t map, bus_addr_t offset, bus_size_t len,
989     int ops)
990 {
991 	int i;
992 
993 	/*
994 	 * Mixing of PRE and POST operations is not allowed.
995 	 */
996 	if ((ops & (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE)) != 0 &&
997 	    (ops & (BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE)) != 0)
998 		panic("mbus_dmamap_sync: mix PRE and POST");
999 
1000 #ifdef DIAGNOSTIC
1001 	if (offset >= map->dm_mapsize)
1002 		panic("mbus_dmamap_sync: bad offset %lu (map size is %lu)",
1003 		    offset, map->dm_mapsize);
1004 	if ((offset + len) > map->dm_mapsize)
1005 		panic("mbus_dmamap_sync: bad length");
1006 #endif
1007 
1008 	/*
1009 	 * For a virtually-indexed write-back cache, we need to do the
1010 	 * following things:
1011 	 *
1012 	 *	PREREAD -- Invalidate the D-cache.  We do this here in case a
1013 	 *	write-back is required by the back-end.
1014 	 *
1015 	 *	PREWRITE -- Write-back the D-cache.  Note that if we are doing
1016 	 *	a PREREAD|PREWRITE, we can collapse the whole thing into a
1017 	 *	single Wb-Inv.
1018 	 *
1019 	 *	POSTREAD -- Nothing.
1020 	 *
1021 	 *	POSTWRITE -- Nothing.
1022 	 */
1023 
1024 	ops &= (BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
1025 	if (len == 0 || ops == 0)
1026 		return;
1027 
1028 	for (i = 0; len != 0 && i < map->dm_nsegs; i++) {
1029 		if (offset >= map->dm_segs[i].ds_len)
1030 			offset -= map->dm_segs[i].ds_len;
1031 		else {
1032 			bus_size_t l = map->dm_segs[i].ds_len - offset;
1033 
1034 			if (l > len)
1035 				l = len;
1036 
1037 			fdcache(HPPA_SID_KERNEL, map->dm_segs[i]._ds_va +
1038 			    offset, l);
1039 			len -= l;
1040 			offset = 0;
1041 		}
1042 	}
1043 
1044  	/* for either operation sync the shit away */
1045 	__asm __volatile ("sync\n\tsyncdma\n\tsync\n\t"
1046 	    "nop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop\n\tnop" ::: "memory");
1047 }
1048 
1049 /*
1050  * Common function for DMA-safe memory allocation.  May be called by bus-
1051  * specific DMA memory allocation functions.
1052  */
1053 int
mbus_dmamem_alloc(void * v,bus_size_t size,bus_size_t alignment,bus_size_t boundary,bus_dma_segment_t * segs,int nsegs,int * rsegs,int flags)1054 mbus_dmamem_alloc(void *v, bus_size_t size, bus_size_t alignment,
1055     bus_size_t boundary, bus_dma_segment_t *segs, int nsegs, int *rsegs,
1056     int flags)
1057 {
1058 	paddr_t low, high;
1059 	struct pglist *mlist;
1060 	struct vm_page *m;
1061 	paddr_t pa, pa_next;
1062 	int seg;
1063 	int error;
1064 
1065 	DPRINTF(("%s: size 0x%lx align 0x%lx bdry %0lx segs %p nsegs %d\n",
1066 	    __func__, size, alignment, boundary, segs, nsegs));
1067 
1068 	/* Always round the size. */
1069 	size = round_page(size);
1070 
1071 	/* Decide where we can allocate pages. */
1072 	low = 0;
1073 	high = ((flags & BUS_DMA_24BIT) ? (1 << 24) : 0) - 1;
1074 
1075 	if ((mlist = kmem_alloc(sizeof(*mlist),
1076 	    (flags & BUS_DMA_NOWAIT) ? KM_NOSLEEP : KM_SLEEP)) == NULL)
1077 		return ENOMEM;
1078 
1079 	/*
1080 	 * Allocate physical pages from the VM system.
1081 	 */
1082 	TAILQ_INIT(mlist);
1083 	error = uvm_pglistalloc(size, low, high, 0, 0, mlist, nsegs,
1084 	    (flags & BUS_DMA_NOWAIT) == 0);
1085 
1086 	/* If we don't have the pages. */
1087 	if (error) {
1088 		DPRINTF(("%s: uvm_pglistalloc(%lx, %lx, %lx, 0, 0, %p, %d, %0x)"
1089 		    " failed", __func__, size, low, high, mlist, nsegs,
1090 		    (flags & BUS_DMA_NOWAIT) == 0));
1091 		kmem_free(mlist, sizeof(*mlist));
1092 		return error;
1093 	}
1094 
1095 	pa_next = 0;
1096 	seg = -1;
1097 
1098 	TAILQ_FOREACH(m, mlist, pageq.queue) {
1099 		pa = VM_PAGE_TO_PHYS(m);
1100 		if (pa != pa_next) {
1101 			if (++seg >= nsegs) {
1102 				uvm_pglistfree(mlist);
1103 				kmem_free(mlist, sizeof(*mlist));
1104 				return ENOMEM;
1105 			}
1106 			segs[seg].ds_addr = pa;
1107 			segs[seg].ds_len = PAGE_SIZE;
1108 			segs[seg]._ds_mlist = NULL;
1109 			segs[seg]._ds_va = 0;
1110 		} else
1111 			segs[seg].ds_len += PAGE_SIZE;
1112 		pa_next = pa + PAGE_SIZE;
1113 	}
1114 	*rsegs = seg + 1;
1115 
1116 	/*
1117 	 * Simply keep a pointer around to the linked list, so
1118 	 * bus_dmamap_free() can return it.
1119 	 *
1120 	 * Nobody should touch the pageq.queue fields while these pages are in
1121 	 * our custody.
1122 	 */
1123 	segs[0]._ds_mlist = mlist;
1124 
1125 	/*
1126 	 * We now have physical pages, but no kernel virtual addresses yet.
1127 	 * These may be allocated in bus_dmamap_map.
1128 	 */
1129 	return 0;
1130 }
1131 
1132 void
mbus_dmamem_free(void * v,bus_dma_segment_t * segs,int nsegs)1133 mbus_dmamem_free(void *v, bus_dma_segment_t *segs, int nsegs)
1134 {
1135 	struct pglist *mlist;
1136 	/*
1137 	 * Return the list of physical pages back to the VM system.
1138 	 */
1139 	mlist = segs[0]._ds_mlist;
1140 	if (mlist == NULL)
1141 		return;
1142 
1143 	uvm_pglistfree(mlist);
1144 	kmem_free(mlist, sizeof(*mlist));
1145 }
1146 
1147 /*
1148  * Common function for mapping DMA-safe memory.  May be called by bus-specific
1149  * DMA memory map functions.
1150  */
1151 int
mbus_dmamem_map(void * v,bus_dma_segment_t * segs,int nsegs,size_t size,void ** kvap,int flags)1152 mbus_dmamem_map(void *v, bus_dma_segment_t *segs, int nsegs, size_t size,
1153     void **kvap, int flags)
1154 {
1155 	bus_addr_t addr;
1156 	vaddr_t va;
1157 	int curseg;
1158 	u_int pmflags =
1159 	    hppa_cpu_hastlbu_p() ? PMAP_NOCACHE : 0;
1160 	const uvm_flag_t kmflags =
1161 	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;
1162 
1163 	size = round_page(size);
1164 
1165 	/* Get a chunk of kernel virtual space. */
1166 	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
1167 	if (__predict_false(va == 0))
1168 		return ENOMEM;
1169 
1170 	*kvap = (void *)va;
1171 
1172 	for (curseg = 0; curseg < nsegs; curseg++) {
1173 		segs[curseg]._ds_va = va;
1174 		for (addr = segs[curseg].ds_addr;
1175 		     addr < (segs[curseg].ds_addr + segs[curseg].ds_len); ) {
1176 			KASSERT(size != 0);
1177 
1178 			pmap_kenter_pa(va, addr, VM_PROT_READ | VM_PROT_WRITE,
1179 			   pmflags);
1180 
1181 			addr += PAGE_SIZE;
1182 			va += PAGE_SIZE;
1183 			size -= PAGE_SIZE;
1184 		}
1185 	}
1186 	pmap_update(pmap_kernel());
1187 	return 0;
1188 }
1189 
1190 /*
1191  * Common function for unmapping DMA-safe memory.  May be called by bus-
1192  * specific DMA memory unmapping functions.
1193  */
1194 void
mbus_dmamem_unmap(void * v,void * kva,size_t size)1195 mbus_dmamem_unmap(void *v, void *kva, size_t size)
1196 {
1197 
1198 	KASSERT(((vaddr_t)kva & PAGE_MASK) == 0);
1199 
1200 	size = round_page(size);
1201 	pmap_kremove((vaddr_t)kva, size);
1202 	pmap_update(pmap_kernel());
1203 	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
1204 }
1205 
1206 /*
1207  * Common function for mmap(2)'ing DMA-safe memory.  May be called by bus-
1208  * specific DMA mmap(2)'ing functions.
1209  */
1210 paddr_t
mbus_dmamem_mmap(void * v,bus_dma_segment_t * segs,int nsegs,off_t off,int prot,int flags)1211 mbus_dmamem_mmap(void *v, bus_dma_segment_t *segs, int nsegs,
1212 	off_t off, int prot, int flags)
1213 {
1214 	int i;
1215 
1216 	for (i = 0; i < nsegs; i++) {
1217 		KASSERT((off & PGOFSET) == 0);
1218 		KASSERT((segs[i].ds_addr & PGOFSET) == 0);
1219 		KASSERT((segs[i].ds_len & PGOFSET) == 0);
1220 
1221 		if (off >= segs[i].ds_len) {
1222 			off -= segs[i].ds_len;
1223 			continue;
1224 		}
1225 
1226 		return btop((u_long)segs[i].ds_addr + off);
1227 	}
1228 
1229 	/* Page not found. */
1230 	return -1;
1231 }
1232 
1233 int
_bus_dmamap_load_buffer(bus_dma_tag_t t,bus_dmamap_t map,void * buf,bus_size_t buflen,struct vmspace * vm,int flags,paddr_t * lastaddrp,int * segp,int first)1234 _bus_dmamap_load_buffer(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
1235     bus_size_t buflen, struct vmspace *vm, int flags, paddr_t *lastaddrp,
1236     int *segp, int first)
1237 {
1238 	bus_size_t sgsize;
1239 	bus_addr_t curaddr, lastaddr, baddr, bmask;
1240 	vaddr_t vaddr = (vaddr_t)buf;
1241 	int seg;
1242 	pmap_t pmap;
1243 
1244 	pmap = vm_map_pmap(&vm->vm_map);
1245 
1246 	lastaddr = *lastaddrp;
1247 	bmask = ~(map->_dm_boundary - 1);
1248 
1249 	for (seg = *segp; buflen > 0; ) {
1250 		bool ok __diagused;
1251 		/*
1252 		 * Get the physical address for this segment.
1253 		 */
1254 		ok = pmap_extract(pmap, vaddr, &curaddr);
1255 		KASSERT(ok == true);
1256 
1257 		/*
1258 		 * Compute the segment size, and adjust counts.
1259 		 */
1260 		sgsize = PAGE_SIZE - ((u_long)vaddr & PGOFSET);
1261 		if (buflen < sgsize)
1262 			sgsize = buflen;
1263 
1264 		/*
1265 		 * Make sure we don't cross any boundaries.
1266 		 */
1267 		if (map->_dm_boundary > 0) {
1268 			baddr = (curaddr + map->_dm_boundary) & bmask;
1269 			if (sgsize > (baddr - curaddr))
1270 				sgsize = (baddr - curaddr);
1271 		}
1272 
1273 		/*
1274 		 * Insert chunk into a segment, coalescing with previous
1275 		 * segment if possible.
1276 		 */
1277 		if (first) {
1278 			map->dm_segs[seg].ds_addr = curaddr;
1279 			map->dm_segs[seg].ds_len = sgsize;
1280 			map->dm_segs[seg]._ds_va = vaddr;
1281 			first = 0;
1282 		} else {
1283 			if (curaddr == lastaddr &&
1284 			    (map->dm_segs[seg].ds_len + sgsize) <=
1285 			     map->_dm_maxsegsz &&
1286 			    (map->_dm_boundary == 0 ||
1287 			     (map->dm_segs[seg].ds_addr & bmask) ==
1288 			     (curaddr & bmask)))
1289 				map->dm_segs[seg].ds_len += sgsize;
1290 			else {
1291 				if (++seg >= map->_dm_segcnt)
1292 					break;
1293 				map->dm_segs[seg].ds_addr = curaddr;
1294 				map->dm_segs[seg].ds_len = sgsize;
1295 				map->dm_segs[seg]._ds_va = vaddr;
1296 			}
1297 		}
1298 
1299 		lastaddr = curaddr + sgsize;
1300 		vaddr += sgsize;
1301 		buflen -= sgsize;
1302 	}
1303 
1304 	*segp = seg;
1305 	*lastaddrp = lastaddr;
1306 
1307 	/*
1308 	 * Did we fit?
1309 	 */
1310 	if (buflen != 0)
1311 		return EFBIG;		/* XXX better return value here? */
1312 	return 0;
1313 }
1314 
1315 const struct hppa_bus_dma_tag hppa_dmatag = {
1316 	NULL,
1317 	mbus_dmamap_create, mbus_dmamap_destroy,
1318 	mbus_dmamap_load, mbus_dmamap_load_mbuf,
1319 	mbus_dmamap_load_uio, mbus_dmamap_load_raw,
1320 	mbus_dmamap_unload, mbus_dmamap_sync,
1321 
1322 	mbus_dmamem_alloc, mbus_dmamem_free, mbus_dmamem_map,
1323 	mbus_dmamem_unmap, mbus_dmamem_mmap
1324 };
1325 
1326 int
mbmatch(device_t parent,cfdata_t cf,void * aux)1327 mbmatch(device_t parent, cfdata_t cf, void *aux)
1328 {
1329 
1330 	/* there will be only one */
1331 	if (mb_attached)
1332 		return 0;
1333 
1334 	return 1;
1335 }
1336 
1337 static device_t
mb_module_callback(device_t self,struct confargs * ca)1338 mb_module_callback(device_t self, struct confargs *ca)
1339 {
1340 	if (ca->ca_type.iodc_type == HPPA_TYPE_NPROC ||
1341 	    ca->ca_type.iodc_type == HPPA_TYPE_MEMORY)
1342 		return NULL;
1343 
1344 	return config_found(self, ca, mbprint,
1345 	    CFARGS(.submatch = mbsubmatch));
1346 }
1347 
1348 static device_t
mb_cpu_mem_callback(device_t self,struct confargs * ca)1349 mb_cpu_mem_callback(device_t self, struct confargs *ca)
1350 {
1351 	if ((ca->ca_type.iodc_type != HPPA_TYPE_NPROC &&
1352 	     ca->ca_type.iodc_type != HPPA_TYPE_MEMORY))
1353 		return NULL;
1354 
1355 	return config_found(self, ca, mbprint,
1356 	    CFARGS(.submatch = mbsubmatch));
1357 }
1358 
1359 void
mbattach(device_t parent,device_t self,void * aux)1360 mbattach(device_t parent, device_t self, void *aux)
1361 {
1362 	struct mainbus_softc *sc = device_private(self);
1363 	struct confargs nca;
1364 	bus_space_handle_t ioh;
1365 #if NLCD > 0
1366 	int err;
1367 #endif
1368 
1369 	sc->sc_dv = self;
1370 	mb_attached = 1;
1371 
1372 	/*
1373 	 * Map all of Fixed Physical, Local Broadcast, and Global Broadcast
1374 	 * space.  These spaces are adjacent and in that order and run to the
1375 	 * end of the address space.
1376 	 */
1377 	/*
1378 	 * XXX fredette - this may be a copout, or it may be a great idea.  I'm
1379 	 * not sure which yet.
1380 	 */
1381 
1382 	/* map all the way till the end of the memory */
1383 	if (bus_space_map(&hppa_bustag, hppa_mcpuhpa, (~0LU - hppa_mcpuhpa + 1),
1384 	    0, &ioh))
1385 		panic("%s: cannot map mainbus IO space", __func__);
1386 
1387 	/*
1388 	 * Local-Broadcast the HPA to all modules on the bus
1389 	 */
1390 	((struct iomod *)(hppa_mcpuhpa & HPPA_FLEX_MASK))[FPA_IOMOD].io_flex =
1391 		(void *)((hppa_mcpuhpa & HPPA_FLEX_MASK) | DMA_ENABLE);
1392 
1393 	aprint_normal(" [flex %lx]\n", hppa_mcpuhpa & HPPA_FLEX_MASK);
1394 
1395 	/* PDC first */
1396 	memset(&nca, 0, sizeof(nca));
1397 	nca.ca_name = "pdc";
1398 	nca.ca_hpa = 0;
1399 	nca.ca_iot = &hppa_bustag;
1400 	nca.ca_dmatag = &hppa_dmatag;
1401 	config_found(self, &nca, mbprint, CFARGS_NONE);
1402 
1403 #if NPOWER > 0
1404 	/* get some power */
1405 	memset(&nca, 0, sizeof(nca));
1406 	nca.ca_name = "power";
1407 	nca.ca_irq = HPPACF_IRQ_UNDEF;
1408 	nca.ca_iot = &hppa_bustag;
1409 	config_found(self, &nca, mbprint, CFARGS_NONE);
1410 #endif
1411 
1412 #if NLCD > 0
1413 	memset(&nca, 0, sizeof(nca));
1414 	err = pdcproc_chassis_info(&pdc_chassis_info, &nca.ca_pcl);
1415 	if (!err) {
1416 		if (nca.ca_pcl.enabled) {
1417 			nca.ca_name = "lcd";
1418 			nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1419 			nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1420 			nca.ca_dp.dp_mod = -1;
1421 			nca.ca_irq = HPPACF_IRQ_UNDEF;
1422 			nca.ca_iot = &hppa_bustag;
1423 			nca.ca_hpa = nca.ca_pcl.cmd_addr;
1424 
1425 			config_found(self, &nca, mbprint, CFARGS_NONE);
1426 		} else if (nca.ca_pcl.model == 2) {
1427 			bus_space_map(&hppa_bustag, nca.ca_pcl.cmd_addr,
1428 		  	  4, 0, (bus_space_handle_t *)&machine_ledaddr);
1429 		  	machine_ledword = 1;
1430 		}
1431 	}
1432 #endif
1433 
1434 	hppa_modules_scan();
1435 
1436 	/* Search and attach all CPUs and memory controllers. */
1437 	memset(&nca, 0, sizeof(nca));
1438 	nca.ca_name = "mainbus";
1439 	nca.ca_hpa = 0;
1440 	nca.ca_hpabase = HPPA_FPA;	/* Central bus */
1441 	nca.ca_nmodules = MAXMODBUS;
1442 	nca.ca_irq = HPPACF_IRQ_UNDEF;
1443 	nca.ca_iot = &hppa_bustag;
1444 	nca.ca_dmatag = &hppa_dmatag;
1445 	nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1446 	nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1447 	nca.ca_dp.dp_mod = -1;
1448 	pdc_scanbus(self, &nca, mb_cpu_mem_callback);
1449 
1450 	/* Search for IO hardware. */
1451 	memset(&nca, 0, sizeof(nca));
1452 	nca.ca_name = "mainbus";
1453 	nca.ca_hpa = 0;
1454 	nca.ca_hpabase = 0;		/* Central bus already walked above */
1455 	nca.ca_nmodules = MAXMODBUS;
1456 	nca.ca_irq = HPPACF_IRQ_UNDEF;
1457 	nca.ca_iot = &hppa_bustag;
1458 	nca.ca_dmatag = &hppa_dmatag;
1459 	nca.ca_dp.dp_bc[0] = nca.ca_dp.dp_bc[1] = nca.ca_dp.dp_bc[2] =
1460 	nca.ca_dp.dp_bc[3] = nca.ca_dp.dp_bc[4] = nca.ca_dp.dp_bc[5] = -1;
1461 	nca.ca_dp.dp_mod = -1;
1462 	pdc_scanbus(self, &nca, mb_module_callback);
1463 
1464 	hppa_modules_done();
1465 }
1466 
1467 int
mbprint(void * aux,const char * pnp)1468 mbprint(void *aux, const char *pnp)
1469 {
1470 	int n;
1471 	struct confargs *ca = aux;
1472 
1473 	if (pnp)
1474 		aprint_normal("\"%s\" at %s (type 0x%x, sv 0x%x)", ca->ca_name,
1475 		    pnp, ca->ca_type.iodc_type, ca->ca_type.iodc_sv_model);
1476 	if (ca->ca_hpa) {
1477 		aprint_normal(" hpa 0x%lx", ca->ca_hpa);
1478 		if (ca->ca_dp.dp_mod >=0) {
1479 			aprint_normal(" path ");
1480 			for (n = 0; n < 6; n++) {
1481 				if (ca->ca_dp.dp_bc[n] >= 0)
1482 					aprint_normal("%d/", ca->ca_dp.dp_bc[n]);
1483 			}
1484 			aprint_normal("%d", ca->ca_dp.dp_mod);
1485 		}
1486 		if (!pnp && ca->ca_irq >= 0) {
1487 			aprint_normal(" irq %d", ca->ca_irq);
1488 		}
1489 	}
1490 
1491 	return UNCONF;
1492 }
1493 
1494 int
mbsubmatch(device_t parent,cfdata_t cf,const int * ldesc,void * aux)1495 mbsubmatch(device_t parent, cfdata_t cf, const int *ldesc, void *aux)
1496 {
1497 	struct confargs *ca = aux;
1498 	int ret;
1499 	int saved_irq;
1500 
1501 	saved_irq = ca->ca_irq;
1502 	if (cf->hppacf_irq != HPPACF_IRQ_UNDEF)
1503 		ca->ca_irq = cf->hppacf_irq;
1504 	if (!(ret = config_match(parent, cf, aux)))
1505 		ca->ca_irq = saved_irq;
1506 	return ret;
1507 }
1508