1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)pmap_bootstrap.c 7.2 (Berkeley) 06/15/92 12 */ 13 14 #include "param.h" 15 #include "pte.h" 16 #include "clockreg.h" 17 #include "../include/vmparam.h" 18 #include "../include/cpu.h" 19 20 #include "vm/vm.h" 21 22 /* 23 * Allocate various and sundry SYSMAPs used in the days of old VM 24 * and not yet converted. XXX. 25 */ 26 #define BSDVM_COMPAT 1 27 28 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa)) 29 30 extern char *etext; 31 extern int Sysptsize; 32 extern char *extiobase, *proc0paddr; 33 extern struct ste *Sysseg; 34 extern struct pte *Sysptmap, *Sysmap; 35 extern vm_offset_t Umap, CLKbase, MMUbase; 36 37 extern int maxmem, physmem; 38 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end; 39 extern vm_size_t mem_size; 40 extern int pmap_aliasmask, protection_codes[]; 41 #if defined(DYNPGSIZE) 42 extern int hppagesperpage; 43 #endif 44 45 #if BSDVM_COMPAT 46 #include "msgbuf.h" 47 48 /* 49 * All those kernel PT submaps that BSD is so fond of 50 */ 51 struct pte *CMAP1, *CMAP2, *mmap; 52 caddr_t CADDR1, CADDR2, vmmap; 53 struct pte *msgbufmap; 54 struct msgbuf *msgbufp; 55 #endif 56 57 /* 58 * Bootstrap the VM system. 59 * 60 * Called with MMU off so we must relocate all global references by `firstpa' 61 * (don't call any functions here!) `nextpa' is the first available physical 62 * memory address. Returns an updated first PA reflecting the memory we 63 * have allocated. MMU is still off when we return. 64 * 65 * XXX assumes sizeof(u_int) == sizeof(struct pte) 66 * XXX a PIC compiler would make this much easier. 67 */ 68 void 69 pmap_bootstrap(nextpa, firstpa) 70 vm_offset_t nextpa; 71 register vm_offset_t firstpa; 72 { 73 vm_offset_t kstpa, kptpa, iiopa, eiopa, kptmpa, lkptpa, p0upa; 74 u_int nptpages, kstsize; 75 register u_int protoste, protopte, *ste, *pte, *epte; 76 77 /* 78 * Calculate important physical addresses: 79 * 80 * kstpa kernel segment table 1 page (!040) 81 * N pages (040) 82 * 83 * kptpa statically allocated 84 * kernel PT pages Sysptsize+ pages 85 * 86 * iiopa internal IO space 87 * PT pages IIOMAPSIZE pages 88 * 89 * eiopa external IO space 90 * PT pages EIOMAPSIZE pages 91 * 92 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 93 * EIOMAPSIZE are the number of PTEs, hence we need to round 94 * the total to a page boundary with IO maps at the end. ] 95 * 96 * kptmpa kernel PT map 1 page 97 * 98 * lkptpa last kernel PT page 1 page 99 * 100 * p0upa proc 0 u-area UPAGES pages 101 * 102 * The KVA corresponding to any of these PAs is: 103 * (PA - firstpa + KERNBASE). 104 */ 105 if (RELOC(mmutype, int) == MMU_68040) 106 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 107 else 108 kstsize = 1; 109 kstpa = nextpa; 110 nextpa += kstsize * NBPG; 111 kptpa = nextpa; 112 nptpages = RELOC(Sysptsize, int) + 113 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG; 114 nextpa += nptpages * NBPG; 115 eiopa = nextpa - EIOMAPSIZE * sizeof(struct pte); 116 iiopa = eiopa - IIOMAPSIZE * sizeof(struct pte); 117 kptmpa = nextpa; 118 nextpa += NBPG; 119 lkptpa = nextpa; 120 nextpa += NBPG; 121 p0upa = nextpa; 122 nextpa += UPAGES * NBPG; 123 124 /* 125 * Initialize segment table and kernel page table map. 126 * 127 * On 68030s and earlier MMUs the two are identical except for 128 * the valid bits so both are initialized with essentially the 129 * same values. On the 68040, which has a mandatory 3-level 130 * structure, the segment table holds the level 1 table and part 131 * (or all) of the level 2 table and hence is considerably 132 * different. Here the first level consists of 128 descriptors 133 * (512 bytes) each mapping 32mb of address space. Each of these 134 * points to blocks of 128 second level descriptors (512 bytes) 135 * each mapping 256kb. Note that there may be additional "segment 136 * table" pages depending on how large MAXKL2SIZE is. 137 * 138 * Portions of the last segment of KVA space (0xFFF00000 - 139 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 140 * for UPAGES is used for mapping the current process u-area 141 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 142 * to the last physical page of RAM to give us a region in which 143 * PA == VA. We use the first part of this page for enabling 144 * and disabling mapping. The last part of this page also contains 145 * info left by the boot ROM. 146 * 147 * XXX cramming two levels of mapping into the single "segment" 148 * table on the 68040 is intended as a temporary hack to get things 149 * working. The 224mb of address space that this allows will most 150 * likely be insufficient in the future (at least for the kernel). 151 */ 152 if (RELOC(mmutype, int) == MMU_68040) { 153 register int num; 154 155 /* 156 * First invalidate the entire "segment table" pages 157 * (levels 1 and 2 have the same "invalid" value). 158 */ 159 pte = (u_int *)kstpa; 160 epte = &pte[kstsize * NPTEPG]; 161 while (pte < epte) 162 *pte++ = SG_NV; 163 /* 164 * Initialize level 2 descriptors (which immediately 165 * follow the level 1 table). We need: 166 * NPTEPG / SG4_LEV3SIZE 167 * level 2 descriptors to map each of the nptpages+1 168 * pages of PTEs. Note that we set the "used" bit 169 * now to save the HW the expense of doing it. 170 */ 171 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); 172 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 173 epte = &pte[num]; 174 protoste = kptpa | SG_U | SG_RW | SG_V; 175 while (pte < epte) { 176 *pte++ = protoste; 177 protoste += (SG4_LEV3SIZE * sizeof(struct ste)); 178 } 179 /* 180 * Initialize level 1 descriptors. We need: 181 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 182 * level 1 descriptors to map the `num' level 2's. 183 */ 184 pte = (u_int *)kstpa; 185 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 186 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 187 while (pte < epte) { 188 *pte++ = protoste; 189 protoste += (SG4_LEV2SIZE * sizeof(struct ste)); 190 } 191 /* 192 * Initialize the final level 1 descriptor to map the last 193 * block of level 2 descriptors. 194 */ 195 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 196 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 197 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 198 /* 199 * Now initialize the final portion of that block of 200 * descriptors to map the "last PT page". 201 */ 202 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 203 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 204 protoste = lkptpa | SG_U | SG_RW | SG_V; 205 while (pte < epte) { 206 *pte++ = protoste; 207 protoste += (SG4_LEV3SIZE * sizeof(struct ste)); 208 } 209 /* 210 * Initialize Sysptmap 211 */ 212 pte = (u_int *)kptmpa; 213 epte = &pte[nptpages+1]; 214 protopte = kptpa | PG_RW | PG_CI | PG_V; 215 while (pte < epte) { 216 *pte++ = protopte; 217 protopte += NBPG; 218 } 219 pte = &((u_int *)kptmpa)[NPTEPG-1]; 220 *pte = lkptpa | PG_RW | PG_CI | PG_V; 221 } else { 222 /* 223 * Map the page table pages in both the HW segment table 224 * and the software Sysptmap. Note that Sysptmap is also 225 * considered a PT page hence the +1. 226 */ 227 ste = (u_int *)kstpa; 228 pte = (u_int *)kptmpa; 229 epte = &pte[nptpages+1]; 230 protoste = kptpa | SG_RW | SG_V; 231 protopte = kptpa | PG_RW | PG_CI | PG_V; 232 while (pte < epte) { 233 *ste++ = protoste; 234 *pte++ = protopte; 235 protoste += NBPG; 236 protopte += NBPG; 237 } 238 /* 239 * Invalidate all but the last remaining entries in both. 240 */ 241 epte = &((u_int *)kptmpa)[NPTEPG-1]; 242 while (pte < epte) { 243 *ste++ = SG_NV; 244 *pte++ = PG_NV; 245 } 246 /* 247 * Initialize the last to point to point to the page 248 * table page allocated earlier. 249 */ 250 *ste = lkptpa | SG_RW | SG_V; 251 *pte = lkptpa | PG_RW | PG_CI | PG_V; 252 } 253 /* 254 * Invalidate all but the final entry in the last kernel PT page 255 * (u-area PTEs will be validated later). The final entry maps 256 * the last page of physical memory. 257 */ 258 pte = (u_int *)lkptpa; 259 epte = &pte[NPTEPG-1]; 260 while (pte < epte) 261 *pte++ = PG_NV; 262 *pte = MAXADDR | PG_RW | PG_CI | PG_V; 263 /* 264 * Initialize kernel page table. 265 * Start by invalidating the `nptpages' that we have allocated. 266 */ 267 pte = (u_int *)kptpa; 268 epte = &pte[nptpages * NPTEPG]; 269 while (pte < epte) 270 *pte++ = PG_NV; 271 /* 272 * Validate PTEs for kernel text (RO) 273 */ 274 pte = &((u_int *)kptpa)[hp300_btop(KERNBASE)]; 275 epte = &pte[hp300_btop(hp300_trunc_page(&etext))]; 276 #ifdef KGDB 277 protopte = firstpa | PG_RW | PG_V; /* XXX RW for now */ 278 #else 279 protopte = firstpa | PG_RO | PG_V; 280 #endif 281 while (pte < epte) { 282 *pte++ = protopte; 283 protopte += NBPG; 284 } 285 /* 286 * Validate PTEs for kernel data/bss, dynamic data allocated 287 * by us so far (nextpa - firstpa bytes), and pages for proc0 288 * u-area and page table allocated below (RW). 289 */ 290 epte = &((u_int *)kptpa)[hp300_btop(nextpa - firstpa)]; 291 protopte = (protopte & ~PG_PROT) | PG_RW; 292 /* 293 * Enable copy-back caching of data pages 294 */ 295 if (RELOC(mmutype, int) == MMU_68040) 296 protopte |= PG_CCB; 297 while (pte < epte) { 298 *pte++ = protopte; 299 protopte += NBPG; 300 } 301 /* 302 * Finally, validate the internal IO space PTEs (RW+CI). 303 * We do this here since the 320/350 MMU registers (also 304 * used, but to a lesser extent, on other models) are mapped 305 * in this range and it would be nice to be able to access 306 * them after the MMU is turned on. 307 */ 308 pte = (u_int *)iiopa; 309 epte = (u_int *)eiopa; 310 protopte = INTIOBASE | PG_RW | PG_CI | PG_V; 311 while (pte < epte) { 312 *pte++ = protopte; 313 protopte += NBPG; 314 } 315 316 /* 317 * Calculate important exported kernel virtual addresses 318 */ 319 /* 320 * Sysseg: base of kernel segment table 321 */ 322 RELOC(Sysseg, struct ste *) = 323 (struct ste *)(kstpa - firstpa); 324 /* 325 * Sysptmap: base of kernel page table map 326 */ 327 RELOC(Sysptmap, struct pte *) = 328 (struct pte *)(kptmpa - firstpa); 329 /* 330 * Sysmap: kernel page table (as mapped through Sysptmap) 331 * Immediately follows `nptpages' of static kernel page table. 332 */ 333 RELOC(Sysmap, struct pte *) = 334 (struct pte *)hp300_ptob(nptpages * NPTEPG); 335 /* 336 * Umap: first of UPAGES PTEs (in Sysmap) for fixed-address u-area. 337 * HIGHPAGES PTEs from the end of Sysmap. 338 */ 339 RELOC(Umap, vm_offset_t) = 340 (vm_offset_t)RELOC(Sysmap, struct pte *) + 341 (HP_MAX_PTSIZE - HIGHPAGES * sizeof(struct pte)); 342 /* 343 * intiobase, intiolimit: base and end of internal (DIO) IO space. 344 * IIOMAPSIZE pages prior to external IO space at end of static 345 * kernel page table. 346 */ 347 RELOC(intiobase, char *) = 348 (char *)hp300_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE)); 349 RELOC(intiolimit, char *) = 350 (char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE); 351 /* 352 * extiobase: base of external (DIO-II) IO space. 353 * EIOMAPSIZE pages at the end of the static kernel page table. 354 */ 355 RELOC(extiobase, char *) = 356 (char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE); 357 /* 358 * CLKbase, MMUbase: important registers in internal IO space 359 * accessed from assembly language. 360 */ 361 RELOC(CLKbase, vm_offset_t) = 362 (vm_offset_t)RELOC(intiobase, char *) + CLKBASE; 363 RELOC(MMUbase, vm_offset_t) = 364 (vm_offset_t)RELOC(intiobase, char *) + MMUBASE; 365 366 /* 367 * Setup u-area for process 0. 368 */ 369 /* 370 * Validate PTEs in Sysmap corresponding to the u-area (Umap) 371 * which are HIGHPAGES from the end of the last kernel PT page 372 * allocated earlier. 373 */ 374 pte = &((u_int *)lkptpa)[NPTEPG - HIGHPAGES]; 375 epte = &pte[UPAGES]; 376 protopte = p0upa | PG_RW | PG_V; 377 while (pte < epte) { 378 *pte++ = protopte; 379 protopte += NBPG; 380 } 381 /* 382 * Zero the u-area. 383 * NOTE: `pte' and `epte' aren't PTEs here. 384 */ 385 pte = (u_int *)p0upa; 386 epte = (u_int *)(p0upa + UPAGES*NBPG); 387 while (pte < epte) 388 *pte++ = 0; 389 /* 390 * Remember the u-area address so it can be loaded in the 391 * proc struct p_addr field later. 392 */ 393 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa); 394 395 /* 396 * VM data structures are now initialized, set up data for 397 * the pmap module. 398 */ 399 RELOC(avail_start, vm_offset_t) = nextpa; 400 RELOC(avail_end, vm_offset_t) = 401 hp300_ptob(RELOC(maxmem, int)) 402 #if BSDVM_COMPAT 403 /* XXX allow for msgbuf */ 404 - hp300_round_page(sizeof(struct msgbuf)) 405 #endif 406 ; 407 RELOC(mem_size, vm_size_t) = hp300_ptob(RELOC(physmem, int)); 408 RELOC(virtual_avail, vm_offset_t) = 409 VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa); 410 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 411 #if defined(DYNPGSIZE) 412 RELOC(hppagesperpage, int) = 1; /* XXX */ 413 #endif 414 415 /* 416 * Determine VA aliasing distance if any 417 */ 418 if (RELOC(ectype, int) == EC_VIRT) 419 switch (RELOC(machineid, int)) { 420 case HP_320: 421 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */ 422 break; 423 case HP_350: 424 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */ 425 break; 426 } 427 428 /* 429 * Initialize protection array. 430 */ 431 { 432 register int *kp, prot; 433 434 kp = &RELOC(protection_codes, int); 435 for (prot = 0; prot < 8; prot++) { 436 switch (prot) { 437 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 438 *kp++ = 0; 439 break; 440 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 441 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 442 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 443 *kp++ = PG_RO; 444 break; 445 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 446 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 447 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 448 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 449 *kp++ = PG_RW; 450 break; 451 } 452 } 453 } 454 455 /* 456 * Kernel page/segment table allocated in locore, 457 * just initialize pointers. 458 */ 459 { 460 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 461 462 kpm->pm_stab = RELOC(Sysseg, struct ste *); 463 kpm->pm_ptab = RELOC(Sysmap, struct pte *); 464 simple_lock_init(&kpm->pm_lock); 465 kpm->pm_count = 1; 466 kpm->pm_stpa = (struct ste *)kstpa; 467 /* 468 * For the 040 we also initialize the free level 2 469 * descriptor mask noting that we have used: 470 * 0: level 1 table 471 * 1 to `num': map page tables 472 * MAXKL2SIZE-1: maps last-page page table 473 */ 474 if (RELOC(mmutype, int) == MMU_68040) { 475 register int num; 476 477 kpm->pm_stfree = ~l2tobm(0); 478 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), 479 SG4_LEV2SIZE) / SG4_LEV2SIZE; 480 while (num) 481 kpm->pm_stfree &= ~l2tobm(num--); 482 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 483 for (num = MAXKL2SIZE; 484 num < sizeof(kpm->pm_stfree)*NBBY; 485 num++) 486 kpm->pm_stfree &= ~l2tobm(num); 487 } 488 } 489 490 #if BSDVM_COMPAT 491 #define SYSMAP(c, p, v, n) \ 492 RELOC(v, c) = (c)va; va += ((n)*HP_PAGE_SIZE); \ 493 RELOC(p, struct pte *) = (struct pte *)pte; pte += (n); 494 495 /* 496 * Allocate all the submaps we need 497 */ 498 { 499 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 500 501 pte = &((u_int *)RELOC(Sysmap, struct pte *))[hp300_btop(va)]; 502 503 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 504 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 505 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 506 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 507 508 RELOC(virtual_avail, vm_offset_t) = va; 509 } 510 #undef SYSMAP 511 #endif 512 } 513 514 pmap_showstuff() 515 { 516 int i; 517 printf("CADDR1=%x pte at CMAP1=%x\n", CADDR1, CMAP1); 518 printf("CADDR2=%x pte at CMAP2=%x\n", CADDR2, CMAP2); 519 printf("vmmap=%x pte at mmap=%x\n", vmmap, mmap); 520 printf("msgbufp=%x pte at msgbufmap=%x\n", msgbufp, msgbufmap); 521 printf("virtual_avail=%x, virtual_end=%x\n", virtual_avail, virtual_end); 522 for (i = 0; i < 8; i++) 523 printf("%x ", protection_codes[i]); 524 printf("\n"); 525 } 526