xref: /netbsd/sys/arch/sun68k/stand/libsa/sun2.c (revision f9e02bcc)
1 /*	$NetBSD: sun2.c,v 1.13 2020/06/20 18:45:06 riastradh Exp $	*/
2 
3 /*-
4  * Copyright (c) 1998 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This code is derived from software contributed to The NetBSD Foundation
8  * by Gordon W. Ross and Matthew Fredette.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Standalone functions specific to the Sun2.
34  */
35 
36 /*
37  * We need to get the sun2 NBSG definition, even if we're
38  * building this with a different sun68k target.
39  */
40 #include <arch/sun2/include/pmap.h>
41 
42 #include <sys/param.h>
43 #include <machine/idprom.h>
44 #include <machine/mon.h>
45 
46 #include <arch/sun2/include/pte.h>
47 #include <arch/sun2/sun2/control.h>
48 #ifdef notyet
49 #include <arch/sun3/sun3/vme.h>
50 #else
51 #define VME16_BASE MBIO_BASE
52 #define VME16_MASK MBIO_MASK
53 #endif
54 #include <arch/sun2/sun2/mbmem.h>
55 #include <arch/sun2/sun2/mbio.h>
56 
57 #include <stand.h>
58 
59 #include "libsa.h"
60 #include "dvma.h"
61 #include "saio.h"	/* enum MAPTYPES */
62 
63 #define OBIO_MASK 0xFFFFFF
64 
65 static u_int	sun2_get_pte(vaddr_t);
66 static void	sun2_set_pte(vaddr_t, u_int);
67 static void	dvma2_init(void);
68 static char *	dvma2_alloc(int);
69 static void	dvma2_free(char *, int);
70 static char *	dvma2_mapin(char *, int);
71 static void	dvma2_mapout(char *, int);
72 static char *	dev2_mapin(int, u_long, int);
73 static int	sun2_get_segmap(vaddr_t);
74 static void	sun2_set_segmap(vaddr_t, int);
75 
76 struct mapinfo {
77 	int maptype;
78 	int pgtype;
79 	u_int base;
80 	u_int mask;
81 };
82 
83 #ifdef	notyet
84 struct mapinfo
85 sun2_mapinfo[MAP__NTYPES] = {
86 	/* On-board memory, I/O */
87 	{ MAP_MAINMEM,   PGT_OBMEM,   0,          ~0 },
88 	{ MAP_OBIO,      PGT_OBIO,    0,          OBIO_MASK },
89 	/* Multibus memory, I/O */
90 	{ MAP_MBMEM,     PGT_MBMEM, MBMEM_BASE, MBMEM_MASK },
91 	{ MAP_MBIO,      PGT_MBIO,  MBIO_BASE, MBIO_MASK },
92 	/* VME A16 */
93 	{ MAP_VME16A16D, PGT_VME_D16, VME16_BASE, VME16_MASK },
94 	{ MAP_VME16A32D, 0, 0, 0 },
95 	/* VME A24 */
96 	{ MAP_VME24A16D, 0, 0, 0 },
97 	{ MAP_VME24A32D, 0, 0, 0 },
98 	/* VME A32 */
99 	{ MAP_VME32A16D, 0, 0, 0 },
100 	{ MAP_VME32A32D, 0, 0, 0 },
101 };
102 #endif
103 
104 /* The virtual address we will use for PROM device mappings. */
105 int sun2_devmap = SUN3_MONSHORTSEG;
106 
107 static char *
dev2_mapin(int maptype,u_long physaddr,int length)108 dev2_mapin(int maptype, u_long physaddr, int length)
109 {
110 #ifdef	notyet
111 	u_int i, pa, pte, pgva, va;
112 
113 	if ((sun2_devmap + length) > SUN3_MONSHORTPAGE)
114 		panic("dev2_mapin: length=%d", length);
115 
116 	for (i = 0; i < MAP__NTYPES; i++)
117 		if (sun2_mapinfo[i].maptype == maptype)
118 			goto found;
119 	panic("dev2_mapin: bad maptype");
120 found:
121 
122 	if (physaddr & ~(sun2_mapinfo[i].mask))
123 		panic("dev2_mapin: bad address");
124 	pa = sun2_mapinfo[i].base += physaddr;
125 
126 	pte = PA_PGNUM(pa) | PG_PERM |
127 		sun2_mapinfo[i].pgtype;
128 
129 	va = pgva = sun2_devmap;
130 	do {
131 		sun2_set_pte(pgva, pte);
132 		pgva += NBPG;
133 		pte += 1;
134 		length -= NBPG;
135 	} while (length > 0);
136 	sun2_devmap = pgva;
137 	va += (physaddr & PGOFSET);
138 
139 #ifdef	DEBUG_PROM
140 	if (debug)
141 		printf("dev2_mapin: va=0x%x pte=0x%x\n",
142 			   va, sun2_get_pte(va));
143 #endif
144 	return ((char*)va);
145 #else
146 	panic("dev2_mapin");
147 	return(NULL);
148 #endif
149 }
150 
151 /*****************************************************************
152  * DVMA support
153  */
154 
155 /*
156  * The easiest way to deal with the need for DVMA mappings is to
157  * create a DVMA alias mapping of the entire address range used by
158  * the boot program.  That way, dvma_mapin can just compute the
159  * DVMA alias address, and dvma_mapout does nothing.
160  *
161  * Note that this assumes that standalone programs will do I/O
162  * operations only within range (SA_MIN_VA .. SA_MAX_VA) checked.
163  */
164 
165 #define DVMA_BASE 0x00f00000
166 #define DVMA_MAPLEN  0x38000	/* 256K - 32K (save MONSHORTSEG) */
167 
168 #define SA_MIN_VA	0x220000
169 #define SA_MAX_VA	(SA_MIN_VA + DVMA_MAPLEN)
170 
171 /* This points to the end of the free DVMA space. */
172 u_int dvma2_end = DVMA_BASE + DVMA_MAPLEN;
173 
174 static void
dvma2_init(void)175 dvma2_init(void)
176 {
177 	int segva, dmava, sme;
178 
179 	segva = SA_MIN_VA;
180 	dmava = DVMA_BASE;
181 
182 	while (segva < SA_MAX_VA) {
183 		sme = sun2_get_segmap(segva);
184 		sun2_set_segmap(dmava, sme);
185 		segva += NBSG;
186 		dmava += NBSG;
187 	}
188 }
189 
190 /* Convert a local address to a DVMA address. */
191 static char *
dvma2_mapin(char * addr,int len)192 dvma2_mapin(char *addr, int len)
193 {
194 	int va = (int)addr;
195 
196 	/* Make sure the address is in the DVMA map. */
197 	if ((va < SA_MIN_VA) || (va >= SA_MAX_VA))
198 		panic("dvma2_mapin: 0x%x outside 0x%x..0x%x",
199 		    va, SA_MIN_VA, SA_MAX_VA);
200 
201 	va -= SA_MIN_VA;
202 	va += DVMA_BASE;
203 
204 	return ((char *) va);
205 }
206 
207 /* Destroy a DVMA address alias. */
208 void
dvma2_mapout(char * addr,int len)209 dvma2_mapout(char *addr, int len)
210 {
211 	int va = (int)addr;
212 
213 	/* Make sure the address is in the DVMA map. */
214 	if ((va < DVMA_BASE) || (va >= (DVMA_BASE + DVMA_MAPLEN)))
215 		panic("dvma2_mapout");
216 }
217 
218 static char *
dvma2_alloc(int len)219 dvma2_alloc(int len)
220 {
221 	len = m68k_round_page(len);
222 	dvma2_end -= len;
223 	return((char*)dvma2_end);
224 }
225 
226 void
dvma2_free(char * dvma,int len)227 dvma2_free(char *dvma, int len)
228 {
229 	/* not worth the trouble */
230 }
231 
232 /*****************************************************************
233  * Control space stuff...
234  */
235 
236 static u_int
sun2_get_pte(vaddr_t va)237 sun2_get_pte(vaddr_t va)
238 {
239 	u_int pte;
240 
241 	pte = get_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va));
242 	if (pte & PG_VALID) {
243 		/*
244 		 * This clears bit 30 (the kernel readable bit, which
245 		 * should always be set), bit 28 (which should always
246 		 * be set) and bit 26 (the user writable bit, which we
247 		 * always have tracking the kernel writable bit).  In
248 		 * the protection, this leaves bit 29 (the kernel
249 		 * writable bit) and bit 27 (the user readable bit).
250 		 * See pte2.h for more about this hack.
251 		 */
252 		pte &= ~(0x54000000);
253 		/*
254 		 * Flip bit 27 (the user readable bit) to become bit
255 		 * 27 (the PG_SYSTEM bit).
256 		 */
257 		pte ^= (PG_SYSTEM);
258 	}
259 	return (pte);
260 }
261 
262 static void
sun2_set_pte(vaddr_t va,u_int pte)263 sun2_set_pte(vaddr_t va, u_int pte)
264 {
265 	if (pte & PG_VALID) {
266 		/* Clear bit 26 (the user writable bit).  */
267 		pte &= (~0x04000000);
268 		/*
269 		 * Flip bit 27 (the PG_SYSTEM bit) to become bit 27
270 		 * (the user readable bit).
271 		 */
272 		pte ^= (PG_SYSTEM);
273 		/*
274 		 * Always set bits 30 (the kernel readable bit) and
275 		 * bit 28, and set bit 26 (the user writable bit) iff
276 		 * bit 29 (the kernel writable bit) is set *and* bit
277 		 * 27 (the user readable bit) is set.  This latter bit
278 		 * of logic is expressed in the bizarre second term
279 		 * below, chosen because it needs no branches.
280 		 */
281 #if (PG_WRITE >> 2) != PG_SYSTEM
282 #error	"PG_WRITE and PG_SYSTEM definitions don't match!"
283 #endif
284 		pte |= 0x50000000
285 		    | ((((pte & PG_WRITE) >> 2) & pte) >> 1);
286 	}
287 	set_control_word(CONTROL_ADDR_BUILD(PGMAP_BASE, va), pte);
288 }
289 
290 static int
sun2_get_segmap(vaddr_t va)291 sun2_get_segmap(vaddr_t va)
292 {
293 	va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va);
294 	return (get_control_byte(va));
295 }
296 
297 static void
sun2_set_segmap(vaddr_t va,int sme)298 sun2_set_segmap(vaddr_t va, int sme)
299 {
300 	va = CONTROL_ADDR_BUILD(SEGMAP_BASE, va);
301 	set_control_byte(va, sme);
302 }
303 
304 /*
305  * Copy the IDPROM contents into the passed buffer.
306  * The caller (idprom.c) will do the checksum.
307  */
308 void
sun2_getidprom(u_char * dst)309 sun2_getidprom(u_char *dst)
310 {
311 	vaddr_t src;	/* control space address */
312 	int len, x;
313 
314 	src = IDPROM_BASE;
315 	len = sizeof(struct idprom);
316 	do {
317 		x = get_control_byte(src);
318 		src += NBPG;
319 		*dst++ = x;
320 	} while (--len > 0);
321 }
322 
323 /*****************************************************************
324  * Init our function pointers, etc.
325  */
326 
327 /*
328  * For booting, the PROM in fredette's Sun 2/120 doesn't map
329  * much main memory, and what is mapped is mapped strangely.
330  * Low virtual memory is mapped like:
331  *
332  * 0x000000 - 0x0bffff virtual -> 0x000000 - 0x0bffff physical
333  * 0x0c0000 - 0x0fffff virtual -> invalid
334  * 0x100000 - 0x13ffff virtual -> 0x0c0000 - 0x0fffff physical
335  * 0x200800 - 0x3fffff virtual -> 0x200800 - 0x3fffff physical
336  *
337  * I think the SunOS authors wanted to load kernels starting at
338  * physical zero, and assumed that kernels would be less
339  * than 768K (0x0c0000) long.  Also, the PROM maps physical
340  * 0x0c0000 - 0x0fffff into DVMA space, so we can't take the
341  * easy road and just add more mappings to use that physical
342  * memory while loading (the PROM might do DMA there).
343  *
344  * What we do, then, is assume a 4MB machine (you'll really
345  * need that to run NetBSD at all anyways), and we map two
346  * chunks of physical and virtual space:
347  *
348  * 0x400000 - 0x4bffff virtual -> 0x000000 - 0x0bffff physical
349  * 0x4c0000 - 0x600000 virtual -> 0x2c0000 - 0x3fffff physical
350  *
351  * And then we load starting at virtual 0x400000.  We will do
352  * all of this mapping just by copying PMEGs.
353  *
354  * After the load is done, but before we enter the kernel, we're
355  * done with the PROM, so we copy the part of the kernel that
356  * got loaded at physical 0x2c0000 down to physical 0x0c0000.
357  * This can't just be a PMEG copy; we've actually got to move
358  * bytes in physical memory.
359  *
360  * These two chunks of physical and virtual space are defined
361  * in macros below.  Some of the macros are only for completeness:
362  */
363 #define MEM_CHUNK0_SIZE			(0x0c0000)
364 #define MEM_CHUNK0_LOAD_PHYS		(0x000000)
365 #define MEM_CHUNK0_LOAD_VIRT		(0x400000)
366 #define MEM_CHUNK0_LOAD_VIRT_PROM	MEM_CHUNK0_LOAD_PHYS
367 #define MEM_CHUNK0_COPY_PHYS		MEM_CHUNK0_LOAD_PHYS
368 #define MEM_CHUNK0_COPY_VIRT		MEM_CHUNK0_COPY_PHYS
369 
370 #define MEM_CHUNK1_SIZE			(0x140000)
371 #define MEM_CHUNK1_LOAD_PHYS		(0x2c0000)
372 #define MEM_CHUNK1_LOAD_VIRT		(MEM_CHUNK0_LOAD_VIRT + MEM_CHUNK0_SIZE)
373 #define MEM_CHUNK1_LOAD_VIRT_PROM	MEM_CHUNK1_LOAD_PHYS
374 #define MEM_CHUNK1_COPY_PHYS		(MEM_CHUNK0_LOAD_PHYS + MEM_CHUNK0_SIZE)
375 #define MEM_CHUNK1_COPY_VIRT		MEM_CHUNK1_COPY_PHYS
376 
377 /* Maps memory for loading. */
378 u_long
sun2_map_mem_load(void)379 sun2_map_mem_load(void)
380 {
381 	vaddr_t off;
382 
383 	/* Map chunk zero for loading. */
384 	for(off = 0; off < MEM_CHUNK0_SIZE; off += NBSG)
385 		sun2_set_segmap(MEM_CHUNK0_LOAD_VIRT + off,
386 			   sun2_get_segmap(MEM_CHUNK0_LOAD_VIRT_PROM + off));
387 
388 	/* Map chunk one for loading. */
389 	for(off = 0; off < MEM_CHUNK1_SIZE; off += NBSG)
390 		sun2_set_segmap(MEM_CHUNK1_LOAD_VIRT + off,
391 			   sun2_get_segmap(MEM_CHUNK1_LOAD_VIRT_PROM + off));
392 
393 	/* Tell our caller where in virtual space to load. */
394 	return MEM_CHUNK0_LOAD_VIRT;
395 }
396 
397 /* Remaps memory for running. */
398 void *
sun2_map_mem_run(void * entry)399 sun2_map_mem_run(void *entry)
400 {
401 	vaddr_t off, off_end;
402 	int sme;
403 	u_int pte;
404 
405 	/* Chunk zero is already mapped and copied. */
406 
407 	/* Chunk one needs to be mapped and copied. */
408 	pte = (sun2_get_pte(0) & ~PG_FRAME);
409 	for(off = 0; off < MEM_CHUNK1_SIZE; ) {
410 
411 		/*
412 		 * We use the PMEG immediately before the
413 		 * segment we're copying in the PROM virtual
414 		 * mapping of the chunk.  If this is the first
415 		 * segment, this is the PMEG the PROM used to
416 		 * map 0x2b8000 virtual to 0x2b8000 physical,
417 		 * which I'll assume is unused.  For the second
418 		 * and subsequent segments, this will be the
419 		 * PMEG used to map the previous segment, which
420 		 * is now (since we already copied it) unused.
421 		 */
422 		sme = sun2_get_segmap((MEM_CHUNK1_LOAD_VIRT_PROM + off) - NBSG);
423 		sun2_set_segmap(MEM_CHUNK1_COPY_VIRT + off, sme);
424 
425 		/* Set the PTEs in this new PMEG. */
426 		for(off_end = off + NBSG; off < off_end; off += NBPG)
427 			sun2_set_pte(MEM_CHUNK1_COPY_VIRT + off,
428 				pte | PA_PGNUM(MEM_CHUNK1_COPY_PHYS + off));
429 
430 		/* Copy this segment. */
431 		memcpy((void *)(MEM_CHUNK1_COPY_VIRT + (off - NBSG)),
432 		       (void *)(MEM_CHUNK1_LOAD_VIRT + (off - NBSG)),
433 		       NBSG);
434 	}
435 
436 	/* Tell our caller where in virtual space to enter. */
437 	return ((char *)entry) - MEM_CHUNK0_LOAD_VIRT;
438 }
439 
440 void
sun2_init(void)441 sun2_init(void)
442 {
443 	/* Set the function pointers. */
444 	dev_mapin_p   = dev2_mapin;
445 	dvma_alloc_p  = dvma2_alloc;
446 	dvma_free_p   = dvma2_free;
447 	dvma_mapin_p  = dvma2_mapin;
448 	dvma_mapout_p = dvma2_mapout;
449 
450 	/* Prepare DVMA segment. */
451 	dvma2_init();
452 }
453