1 /*
2  * Copyright (c) 1992 OMRON Corporation.
3  * Copyright (c) 1991 Regents of the University of California.
4  * All rights reserved.
5  *
6  * This code is derived from software contributed to Berkeley by
7  * the Systems Programming Group of the University of Utah Computer
8  * Science Department.
9  *
10  * %sccs.include.redist.c%
11  *
12  *	OMRON: $Id: pmap_bootstrap.c,v 1.2 92/06/14 18:11:27 moti Exp $
13  *
14  * from: hp300/hp300/pmap_bootstrap.c	7.1 (Berkeley) 6/5/92
15  *
16  *	@(#)pmap_bootstrap.c	7.2 (Berkeley) 10/11/92
17  */
18 
19 #include <sys/param.h>
20 #include <luna68k/luna68k/pte.h>
21 #include <machine/vmparam.h>
22 #include <machine/cpu.h>
23 
24 #include <vm/vm.h>
25 
26 /*
27  * Allocate various and sundry SYSMAPs used in the days of old VM
28  * and not yet converted.  XXX.
29  */
30 #define BSDVM_COMPAT	1
31 
32 extern char *etext;
33 extern int Sysptsize;
34 
35 extern char *proc0paddr;
36 extern struct ste *Sysseg;
37 extern struct pte *Sysptmap, *Sysmap;
38 extern vm_offset_t Umap;
39 
40 extern int maxmem, physmem;
41 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end;
42 extern vm_size_t mem_size;
43 extern int protection_codes[];
44 #if defined(DYNPGSIZE)
45 extern int lunapagesperpage;
46 #endif
47 
48 #if BSDVM_COMPAT
49 #include <sys/msgbuf.h>
50 
51 /*
52  * All those kernel PT submaps that BSD is so fond of
53  */
54 struct pte	*CMAP1, *CMAP2, *mmap;
55 caddr_t		CADDR1, CADDR2, vmmap;
56 struct pte	*msgbufmap;
57 struct msgbuf	*msgbufp;
58 #endif
59 
60 /*
61  * LUNA H/W information.
62  */
63 struct physmap io_physmap[] =
64 {
65 	{0x40000000,0x00100000,1},	/* debugger */
66 	{0x41000000,0x00020000,1},	/* PROM */
67 	{0x45000000,0x00000800,0},	/* calendar clock */
68 	{0x49000000,0x00000004,0},	/* pio-0 */
69 	{0x4D000000,0x00000004,0},	/* pio-1 */
70 	{0x51000000,0x00000008,0},	/* sio */
71 	{0x61000000,0x00000001,0},	/* TAS register */
72 	{0x63000000,0x00000001,0},	/* SYSINT flag */
73 	{0x6B000000,0x00000001,0},	/* internal FPP enable/disable */
74 	{0x6F000000,0x00000001,0},	/* external FPP enable/disable */
75 	{0x71000000,0x00020000,0},	/* 3 port RAM */
76 	{0,0,0}				/* terminate */
77 };
78 #define	IO_DBG_OFF	0		/* debugger offset in io_physmap[] */
79 #define	IOPTPAGE	((sizeof(io_physmap)/sizeof(struct physmap))-1)
80 int	ioptpage = IOPTPAGE;		/* for locore */
81 
82 /*
83  * Bootstrap the VM system.
84  *
85  * Called with MMU off so we must relocate all global references by `firstpa'
86  * (don't call any functions here!)  `nextpa' is the first available physical
87  * memory address.  Returns an updated first PA reflecting the memory we
88  * have allocated.  MMU is still off when we return.
89  *
90  * XXX assumes sizeof(u_int) == sizeof(struct pte)
91  * XXX a PIC compiler would make this much easier.
92  */
93 void
94 pmap_bootstrap(nextpa, firstpa)
95 	vm_offset_t nextpa;
96 	register vm_offset_t firstpa;
97 {
98 	vm_offset_t kstpa, kptpa, iopa, kptmpa, ukptpa, p0upa;
99 	u_int nptpages, kstsize;
100 	register u_int protoste, protopte, *ste, *pte, *epte;
101 
102 	/*
103 	 * Calculate important physical addresses:
104 	 *
105 	 *	kstpa		kernel segment table	1 page (!040)
106 	 *						N pages (040)
107 	 *
108 	 *	kptpa		statically allocated
109 	 *			kernel PT pages		Sysptsize+ pages
110 	 *
111 	 *	kptmpa		kernel PT map		1 page
112 	 *
113 	 *	ukptpa		Uarea kernel PT page	1 page
114 	 *
115 	 *	iopa		IO and debbuger space
116 	 *			PT pages		IOPTPAGE pages
117 	 *
118 	 *
119 	 *	p0upa		proc 0 u-area		UPAGES pages
120 	 *
121 	 * The KVA corresponding to any of these PAs is:
122 	 *	(PA - firstpa + KERNBASE).
123 	 */
124 	kstsize = 1;
125 	kstpa = nextpa;
126 	nextpa += kstsize * NBPG;
127 	kptpa = nextpa;
128 	nptpages = Sysptsize;
129 	nextpa += nptpages * NBPG;
130 	kptmpa = nextpa;
131 	nextpa += NBPG;
132 	ukptpa = nextpa;
133 	nextpa += NBPG;
134 	iopa = nextpa;
135 	nextpa += IOPTPAGE * NBPG;
136 	p0upa = nextpa;
137 	nextpa += UPAGES * NBPG;
138 
139 	/*
140 	 * Initialize segment table and kernel page table map.
141 	 *
142 	 * On 68030s and earlier MMUs the two are identical except for
143 	 * the valid bits so both are initialized with essentially the
144 	 * same values.
145 	 * 0x3FF00000 for UPAGES is used for mapping the current process u-area
146 	 * (u + kernel stack).
147 	 */
148 
149 	/*
150 	 * Map the page table pages in both the HW segment table
151 	 * and the software Sysptmap.  Note that Sysptmap is also
152 	 * considered a PT page hence the +1.
153 	 */
154 	ste = (u_int *)kstpa;
155 	pte = (u_int *)kptmpa;
156 	epte = &pte[nptpages+1];
157 	protoste = kptpa | SG_RW | SG_V;
158 	protopte = kptpa | PG_RW | PG_CI | PG_V;
159 	while (pte < epte) {
160 	    *ste++ = protoste;
161 	    *pte++ = protopte;
162 	    protoste += NBPG;
163 	    protopte += NBPG;
164 	}
165 	/*
166 	 * Invalidate all but the last remaining entries in both.
167 	 */
168 	epte = &((u_int *)kptmpa)[NPTEPG];
169 	while (pte < epte) {
170 	    *ste++ = SG_NV;
171 	    *pte++ = PG_NV;
172 	}
173 	/* LUNA: Uarea pt map */
174 	ste = (u_int *)kstpa;
175 	pte = (u_int *)kptmpa;
176 	ste[KERNELSTACK>>SG_ISHIFT] = ukptpa | SG_RW | SG_V;
177 	pte[KERNELSTACK>>SG_ISHIFT] = ukptpa | PG_RW | PG_CI | PG_V;
178 
179 	/*
180 	 * Invalidate all but the final entry in the last kernel PT page
181 	 * (u-area PTEs will be validated later).  The final entry maps
182 	 * the last page of physical memory.
183 	 */
184 	pte = (u_int *)ukptpa;
185 	epte = &pte[NPTEPG];
186 	while (pte < epte)
187 		*pte++ = PG_NV;
188 	/*
189 	 * Initialize kernel page table.
190 	 * Start by invalidating the `nptpages' that we have allocated.
191 	 */
192 	pte = (u_int *)kptpa;
193 	epte = &pte[nptpages * NPTEPG];
194 	while (pte < epte)
195 		*pte++ = PG_NV;
196 	/*
197 	 * Validate PTEs for kernel text (RO)
198 	 */
199 	pte = &((u_int *)kptpa)[luna_btop(KERNBASE)];
200 	epte = &pte[luna_btop(luna_trunc_page(&etext))];
201 #ifdef KGDB
202 	protopte = firstpa | PG_RW | PG_V;	/* XXX RW for now */
203 #else
204 	protopte = firstpa | PG_RO | PG_V;
205 #endif
206 	while (pte < epte) {
207 		*pte++ = protopte;
208 		protopte += NBPG;
209 	}
210 	/*
211 	 * Validate PTEs for kernel data/bss, dynamic data allocated
212 	 * by us so far (nextpa - firstpa bytes), and pages for proc0
213 	 * u-area and page table allocated below (RW).
214 	 */
215 	epte = &((u_int *)kptpa)[luna_btop(nextpa - firstpa)];
216 	protopte = (protopte & ~PG_PROT) | PG_RW;
217 	while (pte < epte) {
218 		*pte++ = protopte;
219 		protopte += NBPG;
220 	}
221 
222 	/* initialize; all IO pte invalidate */
223 	pte = (u_int *)iopa;
224 	epte = &pte[IOPTPAGE * NPTEPG];
225 	while (pte < epte)
226 		*pte++ = PG_NV;
227 	/*
228 	 * Here, we validate STEs and kernel page table PTEs
229 	 * for io space.
230 	 */
231 	{
232 	    int index;
233 
234 	    protoste = iopa | SG_RW | SG_V;
235 	    protopte = iopa | PG_RW | PG_CI | PG_V;
236 	    for (index = 0; io_physmap[index].pm_phys; index++)
237 	      {
238 		  ste = &((u_int *)kstpa)[io_physmap[index].pm_phys/NBSEG];
239 		  pte = &((u_int *)kptmpa)[io_physmap[index].pm_phys/NBSEG];
240 		  *ste = protoste;
241 		  *pte = protopte;
242 		  protoste += NBPG;
243 		  protopte += NBPG;
244 	      }
245 	    /*
246 	     * Finally, validate the IO space PTEs.
247 	     */
248 	    /* create io(and debbuger) PTEs */
249 	    for (index = 0; io_physmap[index].pm_phys; index++)
250 	      {
251 		  pte = (u_int *)iopa + index*NPTEPG;
252 		  epte = &pte[(luna_round_page(io_physmap[index].pm_size))>>PG_SHIFT];
253 		  /*
254 		   * First entry(index == IO_DBG_OFF) is very special,
255 		   * we map debugger at fixed address(0x40000000).
256 		   * Debugger is always loaded (maxmem+1) page.
257 		   */
258 		  protopte = (index == IO_DBG_OFF ?
259 			      ((maxmem+1)<<PG_SHIFT) : io_physmap[index].pm_phys) |
260 		    PG_RW |(io_physmap[index].pm_cache == 0 ? PG_CI : 0) | PG_V;
261 
262 		  /* physical page setup loop */
263 		  while (pte < epte) {
264 		      *pte++ = protopte;
265 		      protopte += NBPG;
266 		  }
267 	      }
268 	}
269 	/*
270 	 * Calculate important exported kernel virtual addresses
271 	 */
272 	/*
273 	 * Sysseg: base of kernel segment table
274 	 */
275 	Sysseg = (struct ste *)(kstpa - firstpa);
276 	/*
277 	 * Sysptmap: base of kernel page table map
278 	 */
279 	Sysptmap = (struct pte *)(kptmpa - firstpa);
280 	/*
281 	 * Sysmap: kernel page table (as mapped through Sysptmap)
282 	 * Immediately follows `nptpages' of static kernel page table.
283 	 */
284 	Sysmap = (struct pte *)luna_ptob(nptpages * NPTEPG);
285 	/*
286 	 * Umap: first of UPAGES PTEs (in Sysmap) for fixed-address u-area.
287 	 * HIGHPAGES PTEs from the end of Sysmap.
288 	 * LUNA: User stack address = 0x3ff00000.
289 	 */
290 	Umap = (vm_offset_t)Sysmap + (LUNA_MAX_PTSIZE/4 - HIGHPAGES * sizeof(struct pte));
291 	/*
292 	 * Setup u-area for process 0.
293 	 */
294 	/*
295 	 * Validate PTEs in Sysmap corresponding to the u-area (Umap)
296 	 * which are HIGHPAGES from the end of the last kernel PT page
297 	 * allocated earlier.
298 	 */
299 	pte = &((u_int *)ukptpa)[NPTEPG - HIGHPAGES];
300 	epte = &pte[UPAGES];
301 	protopte = p0upa | PG_RW | PG_V;
302 	while (pte < epte) {
303 		*pte++ = protopte;
304 		protopte += NBPG;
305 	}
306 	/*
307 	 * Zero the u-area.
308 	 * NOTE: `pte' and `epte' aren't PTEs here.
309 	 */
310 	pte = (u_int *)p0upa;
311 	epte = (u_int *)(p0upa + UPAGES*NBPG);
312 	while (pte < epte)
313 		*pte++ = 0;
314 	/*
315 	 * Remember the u-area address so it can be loaded in the
316 	 * proc struct p_addr field later.
317 	 */
318 	proc0paddr = (char *)(p0upa - firstpa);
319 
320 	/*
321 	 * VM data structures are now initialized, set up data for
322 	 * the pmap module.
323 	 */
324 	avail_start = nextpa;
325 	avail_end = luna_ptob(maxmem);
326 #if BSDVM_COMPAT
327 			/* XXX allow for msgbuf */
328 			- luna_round_page(sizeof(struct msgbuf))
329 #endif
330 				;
331 	mem_size = luna_ptob(physmem);
332 	virtual_avail =	VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
333 	virtual_end = VM_MAX_KERNEL_ADDRESS;
334 #if defined(DYNPGSIZE)
335 	lunapagesperpage = 1;		/* XXX */
336 #endif
337 	/*
338 	 * Initialize protection array.
339 	 */
340 	{
341 		register int *kp, prot;
342 
343 		kp = protection_codes;
344 		for (prot = 0; prot < 8; prot++) {
345 			switch (prot) {
346 			case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
347 				*kp++ = 0;
348 				break;
349 			case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
350 			case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
351 			case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
352 				*kp++ = PG_RO;
353 				break;
354 			case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
355 			case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
356 			case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
357 			case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
358 				*kp++ = PG_RW;
359 				break;
360 			}
361 		}
362 	}
363 
364 	/*
365 	 * Kernel page/segment table allocated in locore,
366 	 * just initialize pointers.
367 	 */
368 	{
369 		struct pmap *kpm = &kernel_pmap_store;
370 
371 		kpm->pm_stab = Sysseg;
372 		kpm->pm_ptab = Sysmap;
373 		simple_lock_init(&kpm->pm_lock);
374 		kpm->pm_count = 1;
375 		kpm->pm_stpa = (struct ste *)kstpa;
376 	}
377 
378 #if BSDVM_COMPAT
379 #define	SYSMAP(c, p, v, n) \
380 	v = (c)va; va += ((n)*LUNA_PAGE_SIZE); \
381 	p = (struct pte *)pte; pte += (n);
382 
383 	/*
384 	 * Allocate all the submaps we need
385 	 */
386 	{
387 		vm_offset_t va = virtual_avail;
388 
389 		pte = &Sysmap[luna_btop(va)];
390 
391 		SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1	)
392 		SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1	)
393 		SYSMAP(caddr_t		,mmap		,vmmap	   ,1	)
394 		SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1	)
395 
396 		virtual_avail = va;
397 	}
398 #undef	SYSMAP
399 #endif
400 }
401 
402 pmap_showstuff()
403 {
404 	int i;
405 	printf("CADDR1=%x pte at CMAP1=%x\n", CADDR1, CMAP1);
406 	printf("CADDR2=%x pte at CMAP2=%x\n", CADDR2, CMAP2);
407 	printf("vmmap=%x pte at mmap=%x\n", vmmap, mmap);
408 	printf("msgbufp=%x pte at msgbufmap=%x\n", msgbufp, msgbufmap);
409 	printf("virtual_avail=%x, virtual_end=%x\n", virtual_avail, virtual_end);
410 	for (i = 0; i < 8; i++)
411 		printf("%x ", protection_codes[i]);
412 	printf("\n");
413 }
414 
415 #ifdef BOOTDEBUG
416 /*
417  *	Bootstrap the system enough to run with virtual memory.
418  *	Map the kernel's code and data, and allocate the system page table.
419  *
420  *	On the HP this is called after mapping has already been enabled
421  *	and just syncs the pmap module with what has already been done.
422  *	[We can't call it easily with mapping off since the kernel is not
423  *	mapped with PA == VA, hence we would have to relocate every address
424  *	from the linked base (virtual) address 0 to the actual (physical)
425  *	address of 0xFFxxxxxx.]
426  */
427 void
428 Opmap_bootstrap(firstaddr, loadaddr)
429 	vm_offset_t firstaddr;
430 	vm_offset_t loadaddr;
431 {
432 #if BSDVM_COMPAT
433 	vm_offset_t va;
434 	struct pte *pte;
435 #endif
436 
437 	avail_start = firstaddr;
438 	avail_end = maxmem << PGSHIFT;
439 
440 #if BSDVM_COMPAT
441 	/* XXX: allow for msgbuf */
442 	avail_end -= luna_round_page(sizeof(struct msgbuf));
443 #endif
444 
445 	mem_size = physmem << PGSHIFT;
446 	virtual_avail = VM_MIN_KERNEL_ADDRESS + (firstaddr - loadaddr);
447 	virtual_end = VM_MAX_KERNEL_ADDRESS;
448 #if defined(DYNPGSIZE)
449 	lunapagesperpage = PAGE_SIZE / LUNA_PAGE_SIZE;
450 #endif
451 	/*
452 	 * Initialize protection array.
453 	 */
454 	{
455 		register int *kp, prot;
456 
457 		kp = protection_codes;
458 		for (prot = 0; prot < 8; prot++) {
459 			switch (prot) {
460 			case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE:
461 				*kp++ = 0;
462 				break;
463 			case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE:
464 			case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE:
465 			case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE:
466 				*kp++ = PG_RO;
467 				break;
468 			case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE:
469 			case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE:
470 			case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE:
471 			case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE:
472 				*kp++ = PG_RW;
473 				break;
474 			}
475 		}
476 	}
477 	/*
478 	 * Kernel page/segment table allocated in locore,
479 	 * just initialize pointers.
480 	 */
481 	kernel_pmap->pm_stab = Sysseg;
482 	kernel_pmap->pm_ptab = Sysmap;
483 
484 	simple_lock_init(&kernel_pmap->pm_lock);
485 	kernel_pmap->pm_count = 1;
486 
487 #if BSDVM_COMPAT
488 	/*
489 	 * Allocate all the submaps we need
490 	 */
491 #define	SYSMAP(c, p, v, n)	\
492 	v = (c)va; va += ((n)*LUNA_PAGE_SIZE); p = pte; pte += (n);
493 
494 	va = virtual_avail;
495 	pte = &Sysmap[luna_btop(va)];
496 
497 	SYSMAP(caddr_t		,CMAP1		,CADDR1	   ,1		)
498 	SYSMAP(caddr_t		,CMAP2		,CADDR2	   ,1		)
499 	SYSMAP(caddr_t		,mmap		,vmmap	   ,1		)
500 	SYSMAP(struct msgbuf *	,msgbufmap	,msgbufp   ,1		)
501 	virtual_avail = va;
502 #undef SYSMAP
503 #endif
504 }
505 #endif
506