1 /* 2 * Copyright (c) 1991 Regents of the University of California. 3 * All rights reserved. 4 * 5 * This code is derived from software contributed to Berkeley by 6 * the Systems Programming Group of the University of Utah Computer 7 * Science Department. 8 * 9 * %sccs.include.redist.c% 10 * 11 * @(#)pmap_bootstrap.c 7.3 (Berkeley) 10/11/92 12 */ 13 14 #include <sys/param.h> 15 16 #include <hp300/hp300/pte.h> 17 #include <hp300/hp300/clockreg.h> 18 19 #include <machine/vmparam.h> 20 #include <machine/cpu.h> 21 22 #include <vm/vm.h> 23 24 /* 25 * Allocate various and sundry SYSMAPs used in the days of old VM 26 * and not yet converted. XXX. 27 */ 28 #define BSDVM_COMPAT 1 29 30 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa)) 31 32 extern char *etext; 33 extern int Sysptsize; 34 extern char *extiobase, *proc0paddr; 35 extern struct ste *Sysseg; 36 extern struct pte *Sysptmap, *Sysmap; 37 extern vm_offset_t Umap, CLKbase, MMUbase; 38 39 extern int maxmem, physmem; 40 extern vm_offset_t avail_start, avail_end, virtual_avail, virtual_end; 41 extern vm_size_t mem_size; 42 extern int pmap_aliasmask, protection_codes[]; 43 #if defined(DYNPGSIZE) 44 extern int hppagesperpage; 45 #endif 46 47 #if BSDVM_COMPAT 48 #include <sys/msgbuf.h> 49 50 /* 51 * All those kernel PT submaps that BSD is so fond of 52 */ 53 struct pte *CMAP1, *CMAP2, *mmap; 54 caddr_t CADDR1, CADDR2, vmmap; 55 struct pte *msgbufmap; 56 struct msgbuf *msgbufp; 57 #endif 58 59 /* 60 * Bootstrap the VM system. 61 * 62 * Called with MMU off so we must relocate all global references by `firstpa' 63 * (don't call any functions here!) `nextpa' is the first available physical 64 * memory address. Returns an updated first PA reflecting the memory we 65 * have allocated. MMU is still off when we return. 66 * 67 * XXX assumes sizeof(u_int) == sizeof(struct pte) 68 * XXX a PIC compiler would make this much easier. 69 */ 70 void 71 pmap_bootstrap(nextpa, firstpa) 72 vm_offset_t nextpa; 73 register vm_offset_t firstpa; 74 { 75 vm_offset_t kstpa, kptpa, iiopa, eiopa, kptmpa, lkptpa, p0upa; 76 u_int nptpages, kstsize; 77 register u_int protoste, protopte, *ste, *pte, *epte; 78 79 /* 80 * Calculate important physical addresses: 81 * 82 * kstpa kernel segment table 1 page (!040) 83 * N pages (040) 84 * 85 * kptpa statically allocated 86 * kernel PT pages Sysptsize+ pages 87 * 88 * iiopa internal IO space 89 * PT pages IIOMAPSIZE pages 90 * 91 * eiopa external IO space 92 * PT pages EIOMAPSIZE pages 93 * 94 * [ Sysptsize is the number of pages of PT, IIOMAPSIZE and 95 * EIOMAPSIZE are the number of PTEs, hence we need to round 96 * the total to a page boundary with IO maps at the end. ] 97 * 98 * kptmpa kernel PT map 1 page 99 * 100 * lkptpa last kernel PT page 1 page 101 * 102 * p0upa proc 0 u-area UPAGES pages 103 * 104 * The KVA corresponding to any of these PAs is: 105 * (PA - firstpa + KERNBASE). 106 */ 107 if (RELOC(mmutype, int) == MMU_68040) 108 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 109 else 110 kstsize = 1; 111 kstpa = nextpa; 112 nextpa += kstsize * NBPG; 113 kptpa = nextpa; 114 nptpages = RELOC(Sysptsize, int) + 115 (IIOMAPSIZE + EIOMAPSIZE + NPTEPG - 1) / NPTEPG; 116 nextpa += nptpages * NBPG; 117 eiopa = nextpa - EIOMAPSIZE * sizeof(struct pte); 118 iiopa = eiopa - IIOMAPSIZE * sizeof(struct pte); 119 kptmpa = nextpa; 120 nextpa += NBPG; 121 lkptpa = nextpa; 122 nextpa += NBPG; 123 p0upa = nextpa; 124 nextpa += UPAGES * NBPG; 125 126 /* 127 * Initialize segment table and kernel page table map. 128 * 129 * On 68030s and earlier MMUs the two are identical except for 130 * the valid bits so both are initialized with essentially the 131 * same values. On the 68040, which has a mandatory 3-level 132 * structure, the segment table holds the level 1 table and part 133 * (or all) of the level 2 table and hence is considerably 134 * different. Here the first level consists of 128 descriptors 135 * (512 bytes) each mapping 32mb of address space. Each of these 136 * points to blocks of 128 second level descriptors (512 bytes) 137 * each mapping 256kb. Note that there may be additional "segment 138 * table" pages depending on how large MAXKL2SIZE is. 139 * 140 * Portions of the last segment of KVA space (0xFFF00000 - 141 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 142 * for UPAGES is used for mapping the current process u-area 143 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 144 * to the last physical page of RAM to give us a region in which 145 * PA == VA. We use the first part of this page for enabling 146 * and disabling mapping. The last part of this page also contains 147 * info left by the boot ROM. 148 * 149 * XXX cramming two levels of mapping into the single "segment" 150 * table on the 68040 is intended as a temporary hack to get things 151 * working. The 224mb of address space that this allows will most 152 * likely be insufficient in the future (at least for the kernel). 153 */ 154 if (RELOC(mmutype, int) == MMU_68040) { 155 register int num; 156 157 /* 158 * First invalidate the entire "segment table" pages 159 * (levels 1 and 2 have the same "invalid" value). 160 */ 161 pte = (u_int *)kstpa; 162 epte = &pte[kstsize * NPTEPG]; 163 while (pte < epte) 164 *pte++ = SG_NV; 165 /* 166 * Initialize level 2 descriptors (which immediately 167 * follow the level 1 table). We need: 168 * NPTEPG / SG4_LEV3SIZE 169 * level 2 descriptors to map each of the nptpages+1 170 * pages of PTEs. Note that we set the "used" bit 171 * now to save the HW the expense of doing it. 172 */ 173 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); 174 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 175 epte = &pte[num]; 176 protoste = kptpa | SG_U | SG_RW | SG_V; 177 while (pte < epte) { 178 *pte++ = protoste; 179 protoste += (SG4_LEV3SIZE * sizeof(struct ste)); 180 } 181 /* 182 * Initialize level 1 descriptors. We need: 183 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 184 * level 1 descriptors to map the `num' level 2's. 185 */ 186 pte = (u_int *)kstpa; 187 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 188 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 189 while (pte < epte) { 190 *pte++ = protoste; 191 protoste += (SG4_LEV2SIZE * sizeof(struct ste)); 192 } 193 /* 194 * Initialize the final level 1 descriptor to map the last 195 * block of level 2 descriptors. 196 */ 197 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 198 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 199 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 200 /* 201 * Now initialize the final portion of that block of 202 * descriptors to map the "last PT page". 203 */ 204 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 205 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 206 protoste = lkptpa | SG_U | SG_RW | SG_V; 207 while (pte < epte) { 208 *pte++ = protoste; 209 protoste += (SG4_LEV3SIZE * sizeof(struct ste)); 210 } 211 /* 212 * Initialize Sysptmap 213 */ 214 pte = (u_int *)kptmpa; 215 epte = &pte[nptpages+1]; 216 protopte = kptpa | PG_RW | PG_CI | PG_V; 217 while (pte < epte) { 218 *pte++ = protopte; 219 protopte += NBPG; 220 } 221 pte = &((u_int *)kptmpa)[NPTEPG-1]; 222 *pte = lkptpa | PG_RW | PG_CI | PG_V; 223 } else { 224 /* 225 * Map the page table pages in both the HW segment table 226 * and the software Sysptmap. Note that Sysptmap is also 227 * considered a PT page hence the +1. 228 */ 229 ste = (u_int *)kstpa; 230 pte = (u_int *)kptmpa; 231 epte = &pte[nptpages+1]; 232 protoste = kptpa | SG_RW | SG_V; 233 protopte = kptpa | PG_RW | PG_CI | PG_V; 234 while (pte < epte) { 235 *ste++ = protoste; 236 *pte++ = protopte; 237 protoste += NBPG; 238 protopte += NBPG; 239 } 240 /* 241 * Invalidate all but the last remaining entries in both. 242 */ 243 epte = &((u_int *)kptmpa)[NPTEPG-1]; 244 while (pte < epte) { 245 *ste++ = SG_NV; 246 *pte++ = PG_NV; 247 } 248 /* 249 * Initialize the last to point to point to the page 250 * table page allocated earlier. 251 */ 252 *ste = lkptpa | SG_RW | SG_V; 253 *pte = lkptpa | PG_RW | PG_CI | PG_V; 254 } 255 /* 256 * Invalidate all but the final entry in the last kernel PT page 257 * (u-area PTEs will be validated later). The final entry maps 258 * the last page of physical memory. 259 */ 260 pte = (u_int *)lkptpa; 261 epte = &pte[NPTEPG-1]; 262 while (pte < epte) 263 *pte++ = PG_NV; 264 *pte = MAXADDR | PG_RW | PG_CI | PG_V; 265 /* 266 * Initialize kernel page table. 267 * Start by invalidating the `nptpages' that we have allocated. 268 */ 269 pte = (u_int *)kptpa; 270 epte = &pte[nptpages * NPTEPG]; 271 while (pte < epte) 272 *pte++ = PG_NV; 273 /* 274 * Validate PTEs for kernel text (RO) 275 */ 276 pte = &((u_int *)kptpa)[hp300_btop(KERNBASE)]; 277 epte = &pte[hp300_btop(hp300_trunc_page(&etext))]; 278 #ifdef KGDB 279 protopte = firstpa | PG_RW | PG_V; /* XXX RW for now */ 280 #else 281 protopte = firstpa | PG_RO | PG_V; 282 #endif 283 while (pte < epte) { 284 *pte++ = protopte; 285 protopte += NBPG; 286 } 287 /* 288 * Validate PTEs for kernel data/bss, dynamic data allocated 289 * by us so far (nextpa - firstpa bytes), and pages for proc0 290 * u-area and page table allocated below (RW). 291 */ 292 epte = &((u_int *)kptpa)[hp300_btop(nextpa - firstpa)]; 293 protopte = (protopte & ~PG_PROT) | PG_RW; 294 /* 295 * Enable copy-back caching of data pages 296 */ 297 if (RELOC(mmutype, int) == MMU_68040) 298 protopte |= PG_CCB; 299 while (pte < epte) { 300 *pte++ = protopte; 301 protopte += NBPG; 302 } 303 /* 304 * Finally, validate the internal IO space PTEs (RW+CI). 305 * We do this here since the 320/350 MMU registers (also 306 * used, but to a lesser extent, on other models) are mapped 307 * in this range and it would be nice to be able to access 308 * them after the MMU is turned on. 309 */ 310 pte = (u_int *)iiopa; 311 epte = (u_int *)eiopa; 312 protopte = INTIOBASE | PG_RW | PG_CI | PG_V; 313 while (pte < epte) { 314 *pte++ = protopte; 315 protopte += NBPG; 316 } 317 318 /* 319 * Calculate important exported kernel virtual addresses 320 */ 321 /* 322 * Sysseg: base of kernel segment table 323 */ 324 RELOC(Sysseg, struct ste *) = 325 (struct ste *)(kstpa - firstpa); 326 /* 327 * Sysptmap: base of kernel page table map 328 */ 329 RELOC(Sysptmap, struct pte *) = 330 (struct pte *)(kptmpa - firstpa); 331 /* 332 * Sysmap: kernel page table (as mapped through Sysptmap) 333 * Immediately follows `nptpages' of static kernel page table. 334 */ 335 RELOC(Sysmap, struct pte *) = 336 (struct pte *)hp300_ptob(nptpages * NPTEPG); 337 /* 338 * Umap: first of UPAGES PTEs (in Sysmap) for fixed-address u-area. 339 * HIGHPAGES PTEs from the end of Sysmap. 340 */ 341 RELOC(Umap, vm_offset_t) = 342 (vm_offset_t)RELOC(Sysmap, struct pte *) + 343 (HP_MAX_PTSIZE - HIGHPAGES * sizeof(struct pte)); 344 /* 345 * intiobase, intiolimit: base and end of internal (DIO) IO space. 346 * IIOMAPSIZE pages prior to external IO space at end of static 347 * kernel page table. 348 */ 349 RELOC(intiobase, char *) = 350 (char *)hp300_ptob(nptpages*NPTEPG - (IIOMAPSIZE+EIOMAPSIZE)); 351 RELOC(intiolimit, char *) = 352 (char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE); 353 /* 354 * extiobase: base of external (DIO-II) IO space. 355 * EIOMAPSIZE pages at the end of the static kernel page table. 356 */ 357 RELOC(extiobase, char *) = 358 (char *)hp300_ptob(nptpages*NPTEPG - EIOMAPSIZE); 359 /* 360 * CLKbase, MMUbase: important registers in internal IO space 361 * accessed from assembly language. 362 */ 363 RELOC(CLKbase, vm_offset_t) = 364 (vm_offset_t)RELOC(intiobase, char *) + CLKBASE; 365 RELOC(MMUbase, vm_offset_t) = 366 (vm_offset_t)RELOC(intiobase, char *) + MMUBASE; 367 368 /* 369 * Setup u-area for process 0. 370 */ 371 /* 372 * Validate PTEs in Sysmap corresponding to the u-area (Umap) 373 * which are HIGHPAGES from the end of the last kernel PT page 374 * allocated earlier. 375 */ 376 pte = &((u_int *)lkptpa)[NPTEPG - HIGHPAGES]; 377 epte = &pte[UPAGES]; 378 protopte = p0upa | PG_RW | PG_V; 379 while (pte < epte) { 380 *pte++ = protopte; 381 protopte += NBPG; 382 } 383 /* 384 * Zero the u-area. 385 * NOTE: `pte' and `epte' aren't PTEs here. 386 */ 387 pte = (u_int *)p0upa; 388 epte = (u_int *)(p0upa + UPAGES*NBPG); 389 while (pte < epte) 390 *pte++ = 0; 391 /* 392 * Remember the u-area address so it can be loaded in the 393 * proc struct p_addr field later. 394 */ 395 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa); 396 397 /* 398 * VM data structures are now initialized, set up data for 399 * the pmap module. 400 */ 401 RELOC(avail_start, vm_offset_t) = nextpa; 402 RELOC(avail_end, vm_offset_t) = 403 hp300_ptob(RELOC(maxmem, int)) 404 #if BSDVM_COMPAT 405 /* XXX allow for msgbuf */ 406 - hp300_round_page(sizeof(struct msgbuf)) 407 #endif 408 ; 409 RELOC(mem_size, vm_size_t) = hp300_ptob(RELOC(physmem, int)); 410 RELOC(virtual_avail, vm_offset_t) = 411 VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa); 412 RELOC(virtual_end, vm_offset_t) = VM_MAX_KERNEL_ADDRESS; 413 #if defined(DYNPGSIZE) 414 RELOC(hppagesperpage, int) = 1; /* XXX */ 415 #endif 416 417 /* 418 * Determine VA aliasing distance if any 419 */ 420 if (RELOC(ectype, int) == EC_VIRT) 421 switch (RELOC(machineid, int)) { 422 case HP_320: 423 RELOC(pmap_aliasmask, int) = 0x3fff; /* 16k */ 424 break; 425 case HP_350: 426 RELOC(pmap_aliasmask, int) = 0x7fff; /* 32k */ 427 break; 428 } 429 430 /* 431 * Initialize protection array. 432 */ 433 { 434 register int *kp, prot; 435 436 kp = &RELOC(protection_codes, int); 437 for (prot = 0; prot < 8; prot++) { 438 switch (prot) { 439 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_NONE: 440 *kp++ = 0; 441 break; 442 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_NONE: 443 case VM_PROT_READ | VM_PROT_NONE | VM_PROT_EXECUTE: 444 case VM_PROT_NONE | VM_PROT_NONE | VM_PROT_EXECUTE: 445 *kp++ = PG_RO; 446 break; 447 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_NONE: 448 case VM_PROT_NONE | VM_PROT_WRITE | VM_PROT_EXECUTE: 449 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_NONE: 450 case VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE: 451 *kp++ = PG_RW; 452 break; 453 } 454 } 455 } 456 457 /* 458 * Kernel page/segment table allocated in locore, 459 * just initialize pointers. 460 */ 461 { 462 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 463 464 kpm->pm_stab = RELOC(Sysseg, struct ste *); 465 kpm->pm_ptab = RELOC(Sysmap, struct pte *); 466 simple_lock_init(&kpm->pm_lock); 467 kpm->pm_count = 1; 468 kpm->pm_stpa = (struct ste *)kstpa; 469 /* 470 * For the 040 we also initialize the free level 2 471 * descriptor mask noting that we have used: 472 * 0: level 1 table 473 * 1 to `num': map page tables 474 * MAXKL2SIZE-1: maps last-page page table 475 */ 476 if (RELOC(mmutype, int) == MMU_68040) { 477 register int num; 478 479 kpm->pm_stfree = ~l2tobm(0); 480 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), 481 SG4_LEV2SIZE) / SG4_LEV2SIZE; 482 while (num) 483 kpm->pm_stfree &= ~l2tobm(num--); 484 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 485 for (num = MAXKL2SIZE; 486 num < sizeof(kpm->pm_stfree)*NBBY; 487 num++) 488 kpm->pm_stfree &= ~l2tobm(num); 489 } 490 } 491 492 #if BSDVM_COMPAT 493 #define SYSMAP(c, p, v, n) \ 494 RELOC(v, c) = (c)va; va += ((n)*HP_PAGE_SIZE); \ 495 RELOC(p, struct pte *) = (struct pte *)pte; pte += (n); 496 497 /* 498 * Allocate all the submaps we need 499 */ 500 { 501 vm_offset_t va = RELOC(virtual_avail, vm_offset_t); 502 503 pte = &((u_int *)RELOC(Sysmap, struct pte *))[hp300_btop(va)]; 504 505 SYSMAP(caddr_t ,CMAP1 ,CADDR1 ,1 ) 506 SYSMAP(caddr_t ,CMAP2 ,CADDR2 ,1 ) 507 SYSMAP(caddr_t ,mmap ,vmmap ,1 ) 508 SYSMAP(struct msgbuf * ,msgbufmap ,msgbufp ,1 ) 509 510 RELOC(virtual_avail, vm_offset_t) = va; 511 } 512 #undef SYSMAP 513 #endif 514 } 515 516 pmap_showstuff() 517 { 518 int i; 519 printf("CADDR1=%x pte at CMAP1=%x\n", CADDR1, CMAP1); 520 printf("CADDR2=%x pte at CMAP2=%x\n", CADDR2, CMAP2); 521 printf("vmmap=%x pte at mmap=%x\n", vmmap, mmap); 522 printf("msgbufp=%x pte at msgbufmap=%x\n", msgbufp, msgbufmap); 523 printf("virtual_avail=%x, virtual_end=%x\n", virtual_avail, virtual_end); 524 for (i = 0; i < 8; i++) 525 printf("%x ", protection_codes[i]); 526 printf("\n"); 527 } 528