xref: /netbsd/sys/arch/sun68k/stand/libsa/sun2.c (revision c4a72b64)
1 /*	$NetBSD: sun2.c,v 1.5 2002/09/27 15:36:58 provos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Gordon W. Ross and Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *        This product includes software developed by the NetBSD
21  *        Foundation, Inc. and its contributors.
22  * 4. Neither the name of The NetBSD Foundation nor the names of its
23  *    contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  * POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * Standalone functions specific to the Sun2.
41  */
42 
43 /* Need to avoid conflicts on these: */
44 #define get_pte sun2_get_pte
45 #define set_pte sun2_set_pte
46 #define get_segmap sun2_get_segmap
47 #define set_segmap sun2_set_segmap
48 
49 /*
50  * We need to get the sun2 NBSG definition, even if we're
51  * building this with a different sun68k target.
52  */
53 #include <arch/sun2/include/param.h>
54 
55 #include <sys/param.h>
56 #include <machine/idprom.h>
57 #include <machine/mon.h>
58 
59 #include <arch/sun2/include/pte.h>
60 #include <arch/sun2/sun2/control.h>
61 #ifdef notyet
62 #include <arch/sun3/sun3/vme.h>
63 #else
64 #define VME16_BASE MBIO_BASE
65 #define VME16_MASK MBIO_MASK
66 #endif
67 #include <arch/sun2/sun2/mbmem.h>
68 #include <arch/sun2/sun2/mbio.h>
69 
70 #include <stand.h>
71 
72 #include "libsa.h"
73 #include "dvma.h"
74 #include "saio.h"	/* enum MAPTYPES */
75 
76 #define OBIO_MASK 0xFFFFFF
77 
78 u_int	get_pte __P((vaddr_t va));
79 void	set_pte __P((vaddr_t va, u_int pte));
80 char *	dvma2_alloc  __P((int len));
81 void	dvma2_free  __P((char *dvma, int len));
82 char *	dvma2_mapin  __P((char *pkt, int len));
83 void	dvma2_mapout  __P((char *dmabuf, int len));
84 char *	dev2_mapin  __P((int type, u_long addr, int len));
85 
86 struct mapinfo {
87 	int maptype;
88 	int pgtype;
89 	u_int base;
90 	u_int mask;
91 };
92 
93 #ifdef	notyet
94 struct mapinfo
95 sun2_mapinfo[MAP__NTYPES] = {
96 	/* On-board memory, I/O */
97 	{ MAP_MAINMEM,   PGT_OBMEM,   0,          ~0 },
98 	{ MAP_OBIO,      PGT_OBIO,    0,          OBIO_MASK },
99 	/* Multibus memory, I/O */
100 	{ MAP_MBMEM,     PGT_MBMEM, MBMEM_BASE, MBMEM_MASK },
101 	{ MAP_MBIO,      PGT_MBIO,  MBIO_BASE, MBIO_MASK },
102 	/* VME A16 */
103 	{ MAP_VME16A16D, PGT_VME_D16, VME16_BASE, VME16_MASK },
104 	{ MAP_VME16A32D, 0, 0, 0 },
105 	/* VME A24 */
106 	{ MAP_VME24A16D, 0, 0, 0 },
107 	{ MAP_VME24A32D, 0, 0, 0 },
108 	/* VME A32 */
109 	{ MAP_VME32A16D, 0, 0, 0 },
110 	{ MAP_VME32A32D, 0, 0, 0 },
111 };
112 #endif
113 
114 /* The virtual address we will use for PROM device mappings. */
115 int sun2_devmap = SUN3_MONSHORTSEG;
116 
117 char *
118 dev2_mapin(maptype, physaddr, length)
119 	int maptype;
120 	u_long physaddr;
121 	int length;
122 {
123 #ifdef	notyet
124 	u_int i, pa, pte, pgva, va;
125 
126 	if ((sun2_devmap + length) > SUN3_MONSHORTPAGE)
127 		panic("dev2_mapin: length=%d", length);
128 
129 	for (i = 0; i < MAP__NTYPES; i++)
130 		if (sun2_mapinfo[i].maptype == maptype)
131 			goto found;
132 	panic("dev2_mapin: bad maptype");
133 found:
134 
135 	if (physaddr & ~(sun2_mapinfo[i].mask))
136 		panic("dev2_mapin: bad address");
137 	pa = sun2_mapinfo[i].base += physaddr;
138 
139 	pte = PA_PGNUM(pa) | PG_PERM |
140 		sun2_mapinfo[i].pgtype;
141 
142 	va = pgva = sun2_devmap;
143 	do {
144 		set_pte(pgva, pte);
145 		pgva += NBPG;
146 		pte += 1;
147 		length -= NBPG;
148 	} while (length > 0);
149 	sun2_devmap = pgva;
150 	va += (physaddr & PGOFSET);
151 
152 #ifdef	DEBUG_PROM
153 	if (debug)
154 		printf("dev2_mapin: va=0x%x pte=0x%x\n",
155 			   va, get_pte(va));
156 #endif
157 	return ((char*)va);
158 #else
159 	panic("dev2_mapin");
160 	return(NULL);
161 #endif
162 }
163 
164 /*****************************************************************
165  * DVMA support
166  */
167 
168 /*
169  * The easiest way to deal with the need for DVMA mappings is to
170  * create a DVMA alias mapping of the entire address range used by
171  * the boot program.  That way, dvma_mapin can just compute the
172  * DVMA alias address, and dvma_mapout does nothing.
173  *
174  * Note that this assumes that standalone programs will do I/O
175  * operations only within range (SA_MIN_VA .. SA_MAX_VA) checked.
176  */
177 
178 #define DVMA_BASE 0x00f00000
179 #define DVMA_MAPLEN  0x38000	/* 256K - 32K (save MONSHORTSEG) */
180 
181 #define SA_MIN_VA	0x220000
182 #define SA_MAX_VA	(SA_MIN_VA + DVMA_MAPLEN)
183 
184 /* This points to the end of the free DVMA space. */
185 u_int dvma2_end = DVMA_BASE + DVMA_MAPLEN;
186 
187 void
188 dvma2_init()
189 {
190 	int segva, dmava, sme;
191 
192 	segva = SA_MIN_VA;
193 	dmava = DVMA_BASE;
194 
195 	while (segva < SA_MAX_VA) {
196 		sme = get_segmap(segva);
197 		set_segmap(dmava, sme);
198 		segva += NBSG;
199 		dmava += NBSG;
200 	}
201 }
202 
203 /* Convert a local address to a DVMA address. */
204 char *
205 dvma2_mapin(char *addr, int len)
206 {
207 	int va = (int)addr;
208 
209 	/* Make sure the address is in the DVMA map. */
210 	if ((va < SA_MIN_VA) || (va >= SA_MAX_VA))
211 		panic("dvma2_mapin: 0x%x outside 0x%x..0x%x",
212 		    va, SA_MIN_VA, SA_MAX_VA);
213 
214 	va -= SA_MIN_VA;
215 	va += DVMA_BASE;
216 
217 	return ((char *) va);
218 }
219 
220 /* Destroy a DVMA address alias. */
221 void
222 dvma2_mapout(char *addr, int len)
223 {
224 	int va = (int)addr;
225 
226 	/* Make sure the address is in the DVMA map. */
227 	if ((va < DVMA_BASE) || (va >= (DVMA_BASE + DVMA_MAPLEN)))
228 		panic("dvma2_mapout");
229 }
230 
231 char *
232 dvma2_alloc(int len)
233 {
234 	len = m68k_round_page(len);
235 	dvma2_end -= len;
236 	return((char*)dvma2_end);
237 }
238 
239 void
240 dvma2_free(char *dvma, int len)
241 {
242 	/* not worth the trouble */
243 }
244 
245 /*****************************************************************
246  * Control space stuff...
247  */
248 
249 u_int
250 get_pte(va)
251 	vaddr_t va;
252 {
253 	u_int pte;
254 
255 	pte = get_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va));
256 	if (pte & PG_VALID) {
257 		/*
258 		 * This clears bit 30 (the kernel readable bit, which
259 		 * should always be set), bit 28 (which should always
260 		 * be set) and bit 26 (the user writable bit, which we
261 		 * always have tracking the kernel writable bit).  In
262 		 * the protection, this leaves bit 29 (the kernel
263 		 * writable bit) and bit 27 (the user readable bit).
264 		 * See pte2.h for more about this hack.
265 		 */
266 		pte &= ~(0x54000000);
267 		/*
268 		 * Flip bit 27 (the user readable bit) to become bit
269 		 * 27 (the PG_SYSTEM bit).
270 		 */
271 		pte ^= (PG_SYSTEM);
272 	}
273 	return (pte);
274 }
275 
276 void
277 set_pte(va, pte)
278 	vaddr_t va;
279 	u_int pte;
280 {
281 	if (pte & PG_VALID) {
282 		/* Clear bit 26 (the user writable bit).  */
283 		pte &= (~0x04000000);
284 		/*
285 		 * Flip bit 27 (the PG_SYSTEM bit) to become bit 27
286 		 * (the user readable bit).
287 		 */
288 		pte ^= (PG_SYSTEM);
289 		/*
290 		 * Always set bits 30 (the kernel readable bit) and
291 		 * bit 28, and set bit 26 (the user writable bit) iff
292 		 * bit 29 (the kernel writable bit) is set *and* bit
293 		 * 27 (the user readable bit) is set.  This latter bit
294 		 * of logic is expressed in the bizarre second term
295 		 * below, chosen because it needs no branches.
296 		 */
297 #if (PG_WRITE >> 2) != PG_SYSTEM
298 #error	"PG_WRITE and PG_SYSTEM definitions don't match!"
299 #endif
300 		pte |= 0x50000000
301 		    | ((((pte & PG_WRITE) >> 2) & pte) >> 1);
302 	}
303 	set_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va), pte);
304 }
305 
306 int
307 get_segmap(va)
308 	vaddr_t va;
309 {
310 	va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va);
311 	return (get_control_byte(va));
312 }
313 
314 void
315 set_segmap(va, sme)
316 	vaddr_t va;
317 	int sme;
318 {
319 	va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va);
320 	set_control_byte(va, sme);
321 }
322 
323 /*
324  * Copy the IDPROM contents into the passed buffer.
325  * The caller (idprom.c) will do the checksum.
326  */
327 void
328 sun2_getidprom(u_char *dst)
329 {
330 	vaddr_t src;	/* control space address */
331 	int len, x;
332 
333 	src = IDPROM_BASE;
334 	len = sizeof(struct idprom);
335 	do {
336 		x = get_control_byte(src);
337 		src += NBPG;
338 		*dst++ = x;
339 	} while (--len > 0);
340 }
341 
342 /*****************************************************************
343  * Init our function pointers, etc.
344  */
345 
346 /*
347  * For booting, the PROM in fredette's Sun 2/120 doesn't map
348  * much main memory, and what is mapped is mapped strangely.
349  * Low virtual memory is mapped like:
350  *
351  * 0x000000 - 0x0bffff virtual -> 0x000000 - 0x0bffff physical
352  * 0x0c0000 - 0x0fffff virtual -> invalid
353  * 0x100000 - 0x13ffff virtual -> 0x0c0000 - 0x0fffff physical
354  * 0x200800 - 0x3fffff virtual -> 0x200800 - 0x3fffff physical
355  *
356  * I think the SunOS authors wanted to load kernels starting at
357  * physical zero, and assumed that kernels would be less
358  * than 768K (0x0c0000) long.  Also, the PROM maps physical
359  * 0x0c0000 - 0x0fffff into DVMA space, so we can't take the
360  * easy road and just add more mappings to use that physical
361  * memory while loading (the PROM might do DMA there).
362  *
363  * What we do, then, is assume a 4MB machine (you'll really
364  * need that to run NetBSD at all anyways), and we map two
365  * chunks of physical and virtual space:
366  *
367  * 0x400000 - 0x4bffff virtual -> 0x000000 - 0x0bffff physical
368  * 0x4c0000 - 0x600000 virtual -> 0x2c0000 - 0x3fffff physical
369  *
370  * And then we load starting at virtual 0x400000.  We will do
371  * all of this mapping just by copying PMEGs.
372  *
373  * After the load is done, but before we enter the kernel, we're
374  * done with the PROM, so we copy the part of the kernel that
375  * got loaded at physical 0x2c0000 down to physical 0x0c0000.
376  * This can't just be a PMEG copy; we've actually got to move
377  * bytes in physical memory.
378  *
379  * These two chunks of physical and virtual space are defined
380  * in macros below.  Some of the macros are only for completeness:
381  */
382 #define MEM_CHUNK0_SIZE			(0x0c0000)
383 #define MEM_CHUNK0_LOAD_PHYS		(0x000000)
384 #define MEM_CHUNK0_LOAD_VIRT		(0x400000)
385 #define MEM_CHUNK0_LOAD_VIRT_PROM	MEM_CHUNK0_LOAD_PHYS
386 #define MEM_CHUNK0_COPY_PHYS		MEM_CHUNK0_LOAD_PHYS
387 #define MEM_CHUNK0_COPY_VIRT		MEM_CHUNK0_COPY_PHYS
388 
389 #define MEM_CHUNK1_SIZE			(0x140000)
390 #define MEM_CHUNK1_LOAD_PHYS		(0x2c0000)
391 #define MEM_CHUNK1_LOAD_VIRT		(MEM_CHUNK0_LOAD_VIRT + MEM_CHUNK0_SIZE)
392 #define MEM_CHUNK1_LOAD_VIRT_PROM	MEM_CHUNK1_LOAD_PHYS
393 #define MEM_CHUNK1_COPY_PHYS		(MEM_CHUNK0_LOAD_PHYS + MEM_CHUNK0_SIZE)
394 #define MEM_CHUNK1_COPY_VIRT		MEM_CHUNK1_COPY_PHYS
395 
396 /* Maps memory for loading. */
397 u_long
398 sun2_map_mem_load()
399 {
400 	vaddr_t off;
401 
402 	/* Map chunk zero for loading. */
403 	for(off = 0; off < MEM_CHUNK0_SIZE; off += NBSG)
404 		set_segmap(MEM_CHUNK0_LOAD_VIRT + off,
405 			   get_segmap(MEM_CHUNK0_LOAD_VIRT_PROM + off));
406 
407 	/* Map chunk one for loading. */
408 	for(off = 0; off < MEM_CHUNK1_SIZE; off += NBSG)
409 		set_segmap(MEM_CHUNK1_LOAD_VIRT + off,
410 			   get_segmap(MEM_CHUNK1_LOAD_VIRT_PROM + off));
411 
412 	/* Tell our caller where in virtual space to load. */
413 	return MEM_CHUNK0_LOAD_VIRT;
414 }
415 
416 /* Remaps memory for running. */
417 void *
418 sun2_map_mem_run(entry)
419 	void *entry;
420 {
421 	vaddr_t off, off_end;
422 	int sme;
423 	u_int pte;
424 
425 	/* Chunk zero is already mapped and copied. */
426 
427 	/* Chunk one needs to be mapped and copied. */
428 	pte = (get_pte(0) & ~PG_FRAME);
429 	for(off = 0; off < MEM_CHUNK1_SIZE; ) {
430 
431 		/*
432 		 * We use the PMEG immediately before the
433 		 * segment we're copying in the PROM virtual
434 		 * mapping of the chunk.  If this is the first
435 		 * segment, this is the PMEG the PROM used to
436 		 * map 0x2b8000 virtual to 0x2b8000 physical,
437 		 * which I'll assume is unused.  For the second
438 		 * and subsequent segments, this will be the
439 		 * PMEG used to map the previous segment, which
440 		 * is now (since we already copied it) unused.
441 		 */
442 		sme = get_segmap((MEM_CHUNK1_LOAD_VIRT_PROM + off) - NBSG);
443 		set_segmap(MEM_CHUNK1_COPY_VIRT + off, sme);
444 
445 		/* Set the PTEs in this new PMEG. */
446 		for(off_end = off + NBSG; off < off_end; off += NBPG)
447 			set_pte(MEM_CHUNK1_COPY_VIRT + off,
448 				pte | PA_PGNUM(MEM_CHUNK1_COPY_PHYS + off));
449 
450 		/* Copy this segment. */
451 		bcopy((caddr_t)(MEM_CHUNK1_LOAD_VIRT + (off - NBSG)),
452 		      (caddr_t)(MEM_CHUNK1_COPY_VIRT + (off - NBSG)),
453 		      NBSG);
454 	}
455 
456 	/* Tell our caller where in virtual space to enter. */
457 	return ((caddr_t)entry) - MEM_CHUNK0_LOAD_VIRT;
458 }
459 
460 void
461 sun2_init()
462 {
463 	/* Set the function pointers. */
464 	dev_mapin_p   = dev2_mapin;
465 	dvma_alloc_p  = dvma2_alloc;
466 	dvma_free_p   = dvma2_free;
467 	dvma_mapin_p  = dvma2_mapin;
468 	dvma_mapout_p = dvma2_mapout;
469 
470 	/* Prepare DVMA segment. */
471 	dvma2_init();
472 }
473