xref: /netbsd/sys/arch/sun68k/stand/libsa/sun3x.c (revision bf9ec67e)
1 /*	$NetBSD: sun3x.c,v 1.3 2002/05/23 03:50:37 nathanw Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Jeremy Cooper and Gordon Ross
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Standalone functions specific to the Sun3X.
41  */
42 
43 #define _SUN3X_ XXX
44 
45 /* Avoid conflicts on these: */
46 #define get_pte sun3x_get_pte
47 #define set_pte sun3x_set_pte
48 
49 /*
50  * We need to get the sun3x NBSG definition, even if we're
51  * building this with a different sun68k target.
52  */
53 #include <arch/sun3/include/param3x.h>
54 
55 #include <sys/param.h>
56 #include <machine/mon.h>
57 
58 #include <stand.h>
59 
60 #include "libsa.h"
61 #include "dvma.h"
62 #include "saio.h"	/* enum MAPTYPES */
63 
64 #include <arch/sun3/include/pte3x.h>
65 #include <arch/sun3/sun3x/iommu.h>
66 #include <arch/sun3/sun3x/vme.h>
67 
68 /* Names, names... */
69 #define	MON_LOMEM_BASE	0
70 #define	MON_LOMEM_SIZE	0x400000
71 #define MON_LOMEM_END	(MON_LOMEM_BASE+MON_LOMEM_SIZE)
72 #define MON_KDB_BASE	SUN3X_MON_KDB_BASE
73 #define MON_KDB_SIZE	SUN3X_MON_KDB_SIZE
74 #define MON_KDB_END 	(MON_KDB_BASE+MON_KDB_SIZE)
75 #define MON_DVMA_BASE	SUN3X_MON_DVMA_BASE
76 #define MON_DVMA_SIZE	SUN3X_MON_DVMA_SIZE
77 
78 void mmu_atc_flush (u_int va);
79 void set_iommupte(u_int va, u_int pa);
80 
81 u_int	get_pte __P((vaddr_t va));
82 void	set_pte __P((vaddr_t va, u_int pte));
83 char *	dvma3x_alloc __P((int len));
84 void	dvma3x_free __P((char *dvma, int len));
85 char *	dvma3x_mapin __P((char *pkt, int len));
86 void	dvma3x_mapout __P((char *dmabuf, int len));
87 char *	dev3x_mapin __P((int type, u_long addr, int len));
88 
89 struct mapinfo {
90 	int maptype;
91 	u_int base;
92 	u_int mask;
93 };
94 
95 struct mapinfo
96 sun3x_mapinfo[MAP__NTYPES] = {
97 	/* On-board memory, I/O */
98 	{ MAP_MAINMEM,   0, ~0 },
99 	{ MAP_OBIO,      0, ~0 },
100 	/* Multibus adapter (A24,A16) */
101 	{ MAP_MBMEM,     VME24D16_BASE, VME24_MASK },
102 	{ MAP_MBIO,      VME16D16_BASE, VME16_MASK },
103 	/* VME A16 */
104 	{ MAP_VME16A16D, VME16D16_BASE, VME16_MASK },
105 	{ MAP_VME16A32D, VME16D32_BASE, VME16_MASK },
106 	/* VME A24 */
107 	{ MAP_VME24A16D, VME24D16_BASE, VME24_MASK },
108 	{ MAP_VME24A32D, VME24D32_BASE, VME24_MASK },
109 	/* VME A32 */
110 	{ MAP_VME32A16D, VME32D16_BASE, VME32_MASK },
111 	{ MAP_VME32A32D, VME32D32_BASE, VME32_MASK },
112 };
113 
114 /* The virtual address we will use for PROM device mappings. */
115 u_int sun3x_devmap = MON_KDB_BASE;
116 
117 char *
118 dev3x_mapin(maptype, physaddr, length)
119 	int maptype;
120 	u_long physaddr;
121 	int length;
122 {
123 	u_int i, pa, pte, pgva, va;
124 
125 	if ((sun3x_devmap + length) > (MON_KDB_BASE + MON_KDB_SIZE))
126 		panic("dev3x_mapin: length=%d\n", length);
127 
128 	for (i = 0; i < MAP__NTYPES; i++)
129 		if (sun3x_mapinfo[i].maptype == maptype)
130 			goto found;
131 	panic("dev3x_mapin: bad maptype");
132 found:
133 
134 	if (physaddr & ~(sun3x_mapinfo[i].mask))
135 		panic("dev3x_mapin: bad address");
136 	pa = sun3x_mapinfo[i].base + physaddr;
137 
138 	pte = pa | MMU_DT_PAGE | MMU_SHORT_PTE_CI;
139 
140 	va = pgva = sun3x_devmap;
141 	do {
142 		set_pte(pgva, pte);
143 		pgva += NBPG;
144 		pte += NBPG;
145 		length -= NBPG;
146 	} while (length > 0);
147 	sun3x_devmap = pgva;
148 	va += (physaddr & PGOFSET);
149 
150 #ifdef	DEBUG_PROM
151 	if (debug)
152 		printf("dev3x_mapin: va=0x%x pte=0x%x\n",
153 			   va, get_pte(va));
154 #endif
155 	return ((char*)va);
156 }
157 
158 /*****************************************************************
159  * DVMA support
160  */
161 
162 #define SA_MIN_VA	0x200000
163 #define SA_MAX_VA	(SA_MIN_VA + MON_DVMA_SIZE - (8 * NBPG))
164 
165 #define	MON_DVMA_MAPLEN	(MON_DVMA_SIZE - NBPG)
166 
167 /* This points to the end of the free DVMA space. */
168 u_int dvma3x_end = MON_DVMA_BASE + MON_DVMA_MAPLEN;
169 
170 void
171 dvma3x_init()
172 {
173 	u_int va, pa;
174 
175 	pa = SA_MIN_VA;
176 	va = MON_DVMA_BASE;
177 
178 	while (pa < SA_MAX_VA) {
179 		set_pte(va, pa | MMU_DT_PAGE | MMU_SHORT_PTE_CI);
180 		set_iommupte(va, pa | IOMMU_PDE_DT_VALID | IOMMU_PDE_CI);
181 		va += NBPG;
182 		pa += NBPG;
183 	}
184 }
185 
186 /* Convert a local address to a DVMA address. */
187 char *
188 dvma3x_mapin(char *addr, int len)
189 {
190 	int va = (int)addr;
191 
192 	/* Make sure the address is in the DVMA map. */
193 	if ((va < SA_MIN_VA) || (va >= SA_MAX_VA))
194 		panic("dvma3x_mapin");
195 
196 	va -= SA_MIN_VA;
197 	va += MON_DVMA_BASE;
198 
199 	return ((char *) va);
200 }
201 
202 /* Convert a DVMA address to a local address. */
203 void
204 dvma3x_mapout(char *addr, int len)
205 {
206 	int va = (int)addr;
207 
208 	/* Make sure the address is in the DVMA map. */
209 	if ((va < MON_DVMA_BASE) ||
210 		(va >= (MON_DVMA_BASE + MON_DVMA_MAPLEN)))
211 		panic("dvma3x_mapout");
212 }
213 
214 char *
215 dvma3x_alloc(int len)
216 {
217 	len = m68k_round_page(len);
218 	dvma3x_end -= len;
219 	return((char*)dvma3x_end);
220 }
221 
222 void
223 dvma3x_free(char *dvma, int len)
224 {
225 	/* not worth the trouble */
226 }
227 
228 /*****************************************************************
229  * MMU (and I/O MMU) support
230  */
231 
232 u_int
233 get_pte(va)
234 	vaddr_t va;	/* virt. address */
235 {
236 	u_int	pn;
237 	mmu_short_pte_t *tbl;
238 
239 	if (va >= MON_LOMEM_BASE && va < MON_LOMEM_END) {
240 		tbl = (mmu_short_pte_t *) *romVectorPtr->lomemptaddr;
241 	} else if (va >= MON_KDB_BASE && va < MON_KDB_END) {
242 		va -= MON_KDB_BASE;
243 		tbl = (mmu_short_pte_t *) *romVectorPtr->monptaddr;
244 	} else if (va >= MON_DVMA_BASE) {
245 		va -= MON_DVMA_BASE;
246 		tbl = (mmu_short_pte_t *) *romVectorPtr->shadowpteaddr;
247 	} else {
248 		return 0;
249 	}
250 
251 	/* Calculate the page number within the selected table. */
252 	pn = (va >> MMU_PAGE_SHIFT);
253 	/* Extract the PTE from the table. */
254 	return tbl[pn].attr.raw;
255 }
256 
257 void
258 set_pte(va, pa)
259 	vaddr_t va;	/* virt. address */
260 	u_int pa;	/* phys. address */
261 {
262 	u_int	pn;
263 	mmu_short_pte_t *tbl;
264 
265 	if (va >= MON_LOMEM_BASE && va < (MON_LOMEM_BASE + MON_LOMEM_SIZE)) {
266 		/*
267 		 * Main memory range.
268 		 */
269 		tbl = (mmu_short_pte_t *) *romVectorPtr->lomemptaddr;
270 	} else if (va >= MON_KDB_BASE && va < (MON_KDB_BASE + MON_KDB_SIZE)) {
271 		/*
272 		 * Kernel Debugger range.
273 		 */
274 		va -= MON_KDB_BASE;
275 		tbl = (mmu_short_pte_t *) *romVectorPtr->monptaddr;
276 	} else if (va >= MON_DVMA_BASE) {
277 		/*
278 		 * DVMA range.
279 		 */
280 		va -= MON_DVMA_BASE;
281 		tbl = (mmu_short_pte_t *) *romVectorPtr->shadowpteaddr;
282 	} else {
283 		/* invalid range */
284 		return;
285 	}
286 
287 	/* Calculate the page number within the selected table. */
288 	pn = (va >> MMU_PAGE_SHIFT);
289 	/* Enter the PTE into the table. */
290 	tbl[pn].attr.raw = pa;
291 	/* Flush the ATC of any cached entries for the va. */
292 	mmu_atc_flush(va);
293 }
294 
295 void
296 mmu_atc_flush(va)
297 	u_int va;
298 {
299 
300 	__asm __volatile ("pflush	#0,#0,%0@" : : "a" (va));
301 }
302 
303 void
304 set_iommupte(va, pa)
305 	u_int va;	/* virt. address */
306 	u_int pa;	/* phys. address */
307 {
308 	iommu_pde_t *iommu_va;
309 	int pn;
310 
311 	iommu_va = (iommu_pde_t *) *romVectorPtr->dvmaptaddr;
312 
313 	/* Adjust the virtual address into an offset within the DVMA map. */
314 	va -= MON_DVMA_BASE;
315 
316 	/* Convert the slave address into a page index. */
317 	pn = IOMMU_BTOP(va);
318 
319 	iommu_va[pn].addr.raw = pa;
320 }
321 
322 /*****************************************************************
323  * Init our function pointers, etc.
324  */
325 
326 void
327 sun3x_init()
328 {
329 
330 	/* Set the function pointers. */
331 	dev_mapin_p   = dev3x_mapin;
332 	dvma_alloc_p  = dvma3x_alloc;
333 	dvma_free_p   = dvma3x_free;
334 	dvma_mapin_p  = dvma3x_mapin;
335 	dvma_mapout_p = dvma3x_mapout;
336 
337 	dvma3x_init();
338 }
339