1 /*
2  * Copyright (c) 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from software contributed to Berkeley by
6  * the Systems Programming Group of the University of Utah Computer
7  * Science Department.
8  *
9  * %sccs.include.redist.c%
10  *
11  *	@(#)pmap_bootstrap.c	8.1 (Berkeley) 06/10/93
12  */
13 
14 #include <sys/param.h>
15 #include <sys/msgbuf.h>
16 #include <hp300/hp300/pte.h>
17 #include <hp300/hp300/clockreg.h>
18 #include <machine/vmparam.h>
19 #include <machine/cpu.h>
20 
21 #include <vm/vm.h>
22 
23 #define RELOC(v, t)	*((t*)((u_int)&(v) + firstpa))
24 
25 extern char *etext;
26 extern int Sysptsize;
27 extern char *extiobase, *proc0paddr;
28 extern struct ste *Sysseg;
29 extern struct pte *Sysptmap, *Sysmap;
30 extern vm_offset_t Umap, CLKbase, MMUbase;
31 
32 extern int maxmem, physmem;
33 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end;
34 extern vm_size_t mem_size;
35 extern int protection_codes[];
36 #ifdef HAVEVAC
37 extern int pmap_aliasmask;
38 #endif
39 
40 /*
41  * Special purpose kernel virtual addresses, used for mapping
42  * physical pages for a variety of temporary or permanent purposes:
43  *
44  *	CADDR1, CADDR2:	pmap zero/copy operations
45  *	vmmap:		/dev/mem, crash dumps, parity error checking
46  *	ledbase:	SPU LEDs
47  *	msgbufp:	kernel message buffer
48  */
49 caddr_t		CADDR1, CADDR2, vmmap, ledbase;
50 struct msgbuf	*msgbufp;
51 
52 /*
53  * Bootstrap the VM system.
54  *
55  * Called with MMU off so we must relocate all global references by `firstpa'
56  * (don't call any functions here!)  `nextpa' is the first available physical
57  * memory address.  Returns an updated first PA reflecting the memory we
58  * have allocated.  MMU is still off when we return.
59  *
60  * XXX assumes sizeof(u_int) == sizeof(struct pte)
61  * XXX a PIC compiler would make this much easier.
62  */
63 void
64 pmap_bootstrap(nextpa, firstpa)
65 	vm_offset_t nextpa;
66 	register vm_offset_t firstpa;
67 {
68 	vm_offset_t kstpa, kptpa, iiopa, eiopa, kptmpa, lkptpa, p0upa;
69 	u_int nptpages, kstsize;
70 	register u_int protoste, protopte, *ste, *pte, *epte;
71 
72 	/*
73 	 * Calculate important physical addresses:
74 	 *
75 	 *	kstpa		kernel segment table	1 page (!040)
76 	 *						N pages (040)
77 	 *
78 	 *	kptpa		statically allocated
79 	 *			kernel PT pages		Sysptsize+ pages
80 	 *
81 	 *	iiopa		internal IO space
82 	 *			PT pages		IIOMAPSIZE pages
83 	 *
84 	 *	eiopa		external IO space
85 	 *			PT pages		EIOMAPSIZE pages
86 	 *
87 	 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and
88 	 *   EIOMAPSIZE are the number of PTEs, hence we need to round
89 	 *   the total to a page boundary with IO maps at the end. ]
90 	 *
91 	 *	kptmpa		kernel PT map		1 page
92 	 *
93 	 *	lkptpa		last kernel PT page	1 page
94 	 *
95 	 *	p0upa		proc 0 u-area		UPAGES pages
96 	 *
97 	 * The KVA corresponding to any of these PAs is:
98 	 *	(PA - firstpa + KERNBASE).
99 	 */
100 	if (RELOC(mmutype, int) == MMU_68040)
101 		kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE);
102 	else
103 		kstsize = 1;
104 	kstpa = nextpa;
105 	nextpa += kstsize * NBPG;
106 	kptpa = nextpa;
107 	nptpages = RELOC(Sysptsize, int) +
108 		(IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG;
109 	nextpa += nptpages * NBPG;
110 	eiopa = nextpa - EIOMAPSIZE * sizeof(struct pte);
111 	iiopa = eiopa - IIOMAPSIZE * sizeof(struct pte);
112 	kptmpa = nextpa;
113 	nextpa += NBPG;
114 	lkptpa = nextpa;
115 	nextpa += NBPG;
116 	p0upa = nextpa;
117 	nextpa += UPAGES * NBPG;
118 
119 	/*
120 	 * Initialize segment table and kernel page table map.
121 	 *
122 	 * On 68030s and earlier MMUs the two are identical except for
123 	 * the valid bits so both are initialized with essentially the
124 	 * same values.  On the 68040, which has a mandatory 3-level
125 	 * structure, the segment table holds the level 1 table and part
126 	 * (or all) of the level 2 table and hence is considerably
127 	 * different.  Here the first level consists of 128 descriptors
128 	 * (512 bytes) each mapping 32mb of address space.  Each of these
129 	 * points to blocks of 128 second level descriptors (512 bytes)
130 	 * each mapping 256kb.  Note that there may be additional "segment
131 	 * table" pages depending on how large MAXKL2SIZE is.
132 	 *
133 	 * Portions of the last segment of KVA space (0xFFF00000 -
134 	 * 0xFFFFFFFF) are mapped for a couple of purposes.  0xFFF00000
135 	 * for UPAGES is used for mapping the current process u-area
136 	 * (u + kernel stack).  The very last page (0xFFFFF000) is mapped
137 	 * to the last physical page of RAM to give us a region in which
138 	 * PA == VA.  We use the first part of this page for enabling
139 	 * and disabling mapping.  The last part of this page also contains
140 	 * info left by the boot ROM.
141 	 *
142 	 * XXX cramming two levels of mapping into the single "segment"
143 	 * table on the 68040 is intended as a temporary hack to get things
144 	 * working.  The 224mb of address space that this allows will most
145 	 * likely be insufficient in the future (at least for the kernel).
146 	 */
147 	if (RELOC(mmutype, int) == MMU_68040) {
148 		register int num;
149 
150 		/*
151 		 * First invalidate the entire "segment table" pages
152 		 * (levels 1 and 2 have the same "invalid" value).
153 		 */
154 		pte = (u_int *)kstpa;
155 		epte = &pte[kstsize * NPTEPG];
156 		while (pte < epte)
157 			*pte++ = SG_NV;
158 		/*
159 		 * Initialize level 2 descriptors (which immediately
160 		 * follow the level 1 table).  We need:
161 		 *	NPTEPG / SG4_LEV3SIZE
162 		 * level 2 descriptors to map each of the nptpages+1
163 		 * pages of PTEs.  Note that we set the "used" bit
164 		 * now to save the HW the expense of doing it.
165 		 */
166 		num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE);
167 		pte = &((u_int *)kstpa)[SG4_LEV1SIZE];
168 		epte = &pte[num];
169 		protoste = kptpa | SG_U | SG_RW | SG_V;
170 		while (pte < epte) {
171 			*pte++ = protoste;
172 			protoste += (SG4_LEV3SIZE * sizeof(struct ste));
173 		}
174 		/*
175 		 * Initialize level 1 descriptors.  We need:
176 		 *	roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE
177 		 * level 1 descriptors to map the `num' level 2's.
178 		 */
179 		pte = (u_int *)kstpa;
180 		epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE];
181 		protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V;
182 		while (pte < epte) {
183 			*pte++ = protoste;
184 			protoste += (SG4_LEV2SIZE * sizeof(struct ste));
185 		}
186 		/*
187 		 * Initialize the final level 1 descriptor to map the last
188 		 * block of level 2 descriptors.
189 		 */
190 		ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1];
191 		pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE];
192 		*ste = (u_int)pte | SG_U | SG_RW | SG_V;
193 		/*
194 		 * Now initialize the final portion of that block of
195 		 * descriptors to map the "last PT page".
196 		 */
197 		pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE];
198 		epte = &pte[NPTEPG/SG4_LEV3SIZE];
199 		protoste = lkptpa | SG_U | SG_RW | SG_V;
200 		while (pte < epte) {
201 			*pte++ = protoste;
202 			protoste += (SG4_LEV3SIZE * sizeof(struct ste));
203 		}
204 		/*
205 		 * Initialize Sysptmap
206 		 */
207 		pte = (u_int *)kptmpa;
208 		epte = &pte[nptpages+1];
209 		protopte = kptpa | PG_RW | PG_CI | PG_V;
210 		while (pte < epte) {
211 			*pte++ = protopte;
212 			protopte += NBPG;
213 		}
214 		pte = &((u_int *)kptmpa)[NPTEPG-1];
215 		*pte = lkptpa | PG_RW | PG_CI | PG_V;
216 	} else {
217 		/*
218 		 * Map the page table pages in both the HW segment table
219 		 * and the software Sysptmap.  Note that Sysptmap is also
220 		 * considered a PT page hence the +1.
221 		 */
222 		ste = (u_int *)kstpa;
223 		pte = (u_int *)kptmpa;
224 		epte = &pte[nptpages+1];
225 		protoste = kptpa | SG_RW | SG_V;
226 		protopte = kptpa | PG_RW | PG_CI | PG_V;
227 		while (pte < epte) {
228 			*ste++ = protoste;
229 			*pte++ = protopte;
230 			protoste += NBPG;
231 			protopte += NBPG;
232 		}
233 		/*
234 		 * Invalidate all but the last remaining entries in both.
235 		 */
236 		epte = &((u_int *)kptmpa)[NPTEPG-1];
237 		while (pte < epte) {
238 			*ste++ = SG_NV;
239 			*pte++ = PG_NV;
240 		}
241 		/*
242 		 * Initialize the last to point to point to the page
243 		 * table page allocated earlier.
244 		 */
245 		*ste = lkptpa | SG_RW | SG_V;
246 		*pte = lkptpa | PG_RW | PG_CI | PG_V;
247 	}
248 	/*
249 	 * Invalidate all but the final entry in the last kernel PT page
250 	 * (u-area PTEs will be validated later).  The final entry maps
251 	 * the last page of physical memory.
252 	 */
253 	pte = (u_int *)lkptpa;
254 	epte = &pte[NPTEPG-1];
255 	while (pte < epte)
256 		*pte++ = PG_NV;
257 	*pte = MAXADDR | PG_RW | PG_CI | PG_V;
258 	/*
259 	 * Initialize kernel page table.
260 	 * Start by invalidating the `nptpages' that we have allocated.
261 	 */
262 	pte = (u_int *)kptpa;
263 	epte = &pte[nptpages * NPTEPG];
264 	while (pte < epte)
265 		*pte++ = PG_NV;
266 	/*
267 	 * Validate PTEs for kernel text (RO)
268 	 */
269 	pte = &((u_int *)kptpa)[hp300_btop(KERNBASE)];
270 	epte = &pte[hp300_btop(hp300_trunc_page(&etext))];
271 #ifdef KGDB
272 	protopte = firstpa | PG_RW | PG_V;	/* XXX RW for now */
273 #else
274 	protopte = firstpa | PG_RO | PG_V;
275 #endif
276 	while (pte < epte) {
277 		*pte++ = protopte;
278 		protopte += NBPG;
279 	}
280 	/*
281 	 * Validate PTEs for kernel data/bss, dynamic data allocated
282 	 * by us so far (nextpa - firstpa bytes), and pages for proc0
283 	 * u-area and page table allocated below (RW).
284 	 */
285 	epte = &((u_int *)kptpa)[hp300_btop(nextpa - firstpa)];
286 	protopte = (protopte & ~PG_PROT) | PG_RW;
287 	/*
288 	 * Enable copy-back caching of data pages
289 	 */
290 	if (RELOC(mmutype, int) == MMU_68040)
291 		protopte |= PG_CCB;
292 	while (pte < epte) {
293 		*pte++ = protopte;
294 		protopte += NBPG;
295 	}
296 	/*
297 	 * Finally, validate the internal IO space PTEs (RW+CI).
298 	 * We do this here since the 320/350 MMU registers (also
299 	 * used, but to a lesser extent, on other models) are mapped
300 	 * in this range and it would be nice to be able to access
301 	 * them after the MMU is turned on.
302 	 */
303 	pte = (u_int *)iiopa;
304 	epte = (u_int *)eiopa;
305 	protopte = INTIOBASE | PG_RW | PG_CI | PG_V;
306 	while (pte < epte) {
307 		*pte++ = protopte;
308 		protopte += NBPG;
309 	}
310 
311 	/*
312 	 * Calculate important exported kernel virtual addresses
313 	 */
314 	/*
315 	 * Sysseg: base of kernel segment table
316 	 */
317 	RELOC(Sysseg, struct ste *) =
318 		(struct ste *)(kstpa - firstpa);
319 	/*
320 	 * Sysptmap: base of kernel page table map
321 	 */
322 	RELOC(Sysptmap, struct pte *) =
323 		(struct pte *)(kptmpa - firstpa);
324 	/*
325 	 * Sysmap: kernel page table (as mapped through Sysptmap)
326 	 * Immediately follows `nptpages' of static kernel page table.
327 	 */
328 	RELOC(Sysmap, struct pte *) =
329 		(struct pte *)hp300_ptob(nptpages * NPTEPG);
330 	/*
331 	 * Umap: first of UPAGES PTEs (in Sysmap) for fixed-address u-area.
332 	 * HIGHPAGES PTEs from the end of Sysmap.
333 	 */
334 	RELOC(Umap, vm_offset_t) =
335 		(vm_offset_t)RELOC(Sysmap, struct pte *) +
336 			(HP_MAX_PTSIZE - HIGHPAGES * sizeof(struct pte));
337 	/*
338 	 * intiobase, intiolimit: base and end of internal (DIO) IO space.
339 	 * IIOMAPSIZE pages prior to external IO space at end of static
340 	 * kernel page table.
341 	 */
342 	RELOC(intiobase, char *) =
343 		(char *)hp300_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE));
344 	RELOC(intiolimit, char *) =
345 		(char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE);
346 	/*
347 	 * extiobase: base of external (DIO-II) IO space.
348 	 * EIOMAPSIZE pages at the end of the static kernel page table.
349 	 */
350 	RELOC(extiobase, char *) =
351 		(char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE);
352 	/*
353 	 * CLKbase, MMUbase: important registers in internal IO space
354 	 * accessed from assembly language.
355 	 */
356 	RELOC(CLKbase, vm_offset_t) =
357 		(vm_offset_t)RELOC(intiobase, char *) + CLKBASE;
358 	RELOC(MMUbase, vm_offset_t) =
359 		(vm_offset_t)RELOC(intiobase, char *) + MMUBASE;
360 
361 	/*
362 	 * Setup u-area for process 0.
363 	 */
364 	/*
365 	 * Validate PTEs in Sysmap corresponding to the u-area (Umap)
366 	 * which are HIGHPAGES from the end of the last kernel PT page
367 	 * allocated earlier.
368 	 */
369 	pte = &((u_int *)lkptpa)[NPTEPG - HIGHPAGES];
370 	epte = &pte[UPAGES];
371 	protopte = p0upa | PG_RW | PG_V;
372 	while (pte < epte) {
373 		*pte++ = protopte;
374 		protopte += NBPG;
375 	}
376 	/*
377 	 * Zero the u-area.
378 	 * NOTE: `pte' and `epte' aren't PTEs here.
379 	 */
380 	pte = (u_int *)p0upa;
381 	epte = (u_int *)(p0upa + UPAGES*NBPG);
382 	while (pte < epte)
383 		*pte++ = 0;
384 	/*
385 	 * Remember the u-area address so it can be loaded in the
386 	 * proc struct p_addr field later.
387 	 */
388 	RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa);
389 
390 	/*
391 	 * VM data structures are now initialized, set up data for
392 	 * the pmap module.
393 	 */
394 	RELOC(avail_start, vm_offset_t) = nextpa;
395 	RELOC(avail_end, vm_offset_t) =
396 		hp300_ptob(RELOC(maxmem, int))
397 			/* XXX allow for msgbuf */
398 			- hp300_round_page(sizeof(struct msgbuf));
399 	RELOC(mem_size, vm_size_t) = hp300_ptob(RELOC(physmem, int));
400 	RELOC(virtual_avail, vm_offset_t) =
401 		VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa);
402 	RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS;
403 
404 #ifdef HAVEVAC
405 	/*
406 	 * Determine VA aliasing distance if any
407 	 */
408 	if (RELOC(ectype, int) == EC_VIRT)
409 		if (RELOC(machineid, int) == HP_320)
410 			RELOC(pmap_aliasmask, int) = 0x3fff;	/* 16k */
411 		else if (RELOC(machineid, int) == HP_350)
412 			RELOC(pmap_aliasmask, int) = 0x7fff;	/* 32k */
413 #endif
414 
415 	/*
416 	 * Initialize protection array.
417 	 * XXX don't use a switch statement, it might produce an
418 	 * absolute "jmp" table.
419 	 */
420 	{
421 		register int *kp;
422 
423 		kp = &RELOC(protection_codes, int);
424 		kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0;
425 		kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO;
426 		kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
427 		kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO;
428 		kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
429 		kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
430 		kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW;
431 		kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW;
432 	}
433 
434 	/*
435 	 * Kernel page/segment table allocated in locore,
436 	 * just initialize pointers.
437 	 */
438 	{
439 		struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap);
440 
441 		kpm->pm_stab = RELOC(Sysseg, struct ste *);
442 		kpm->pm_ptab = RELOC(Sysmap, struct pte *);
443 		simple_lock_init(&kpm->pm_lock);
444 		kpm->pm_count = 1;
445 		kpm->pm_stpa = (struct ste *)kstpa;
446 		/*
447 		 * For the 040 we also initialize the free level 2
448 		 * descriptor mask noting that we have used:
449 		 *	0:		level 1 table
450 		 *	1 to `num':	map page tables
451 		 *	MAXKL2SIZE-1:	maps last-page page table
452 		 */
453 		if (RELOC(mmutype, int) == MMU_68040) {
454 			register int num;
455 
456 			kpm->pm_stfree = ~l2tobm(0);
457 			num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE),
458 				      SG4_LEV2SIZE) / SG4_LEV2SIZE;
459 			while (num)
460 				kpm->pm_stfree &= ~l2tobm(num--);
461 			kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1);
462 			for (num = MAXKL2SIZE;
463 			     num < sizeof(kpm->pm_stfree)*NBBY;
464 			     num++)
465 				kpm->pm_stfree &= ~l2tobm(num);
466 		}
467 	}
468 
469 	/*
470 	 * Allocate some fixed, special purpose kernel virtual addresses
471 	 */
472 	{
473 		vm_offset_t va = RELOC(virtual_avail, vm_offset_t);
474 
475 		RELOC(CADDR1, caddr_t) = (caddr_t)va;
476 		va += HP_PAGE_SIZE;
477 		RELOC(CADDR2, caddr_t) = (caddr_t)va;
478 		va += HP_PAGE_SIZE;
479 		RELOC(vmmap, caddr_t) = (caddr_t)va;
480 		va += HP_PAGE_SIZE;
481 		RELOC(ledbase, caddr_t) = (caddr_t)va;
482 		va += HP_PAGE_SIZE;
483 		RELOC(msgbufp, struct msgbuf *) = (struct msgbuf *)va;
484 		va += HP_PAGE_SIZE;
485 		RELOC(virtual_avail, vm_offset_t) = va;
486 	}
487 }
488