1 /* $NetBSD: pmap_bootstrap.c,v 1.11 2002/11/05 07:41:40 chs Exp $ */ 2 3 /* 4 * This file was taken from mvme68k/mvme68k/pmap_bootstrap.c 5 * should probably be re-synced when needed. 6 * cvs id of source for the most recent syncing: 7 * NetBSD: pmap_bootstrap.c,v 1.15 2000/11/20 19:35:30 scw Exp 8 * NetBSD: pmap_bootstrap.c,v 1.17 2001/11/08 21:53:44 scw Exp 9 */ 10 11 12 /* 13 * Copyright (c) 1991, 1993 14 * The Regents of the University of California. All rights reserved. 15 * 16 * This code is derived from software contributed to Berkeley by 17 * the Systems Programming Group of the University of Utah Computer 18 * Science Department. 19 * 20 * Redistribution and use in source and binary forms, with or without 21 * modification, are permitted provided that the following conditions 22 * are met: 23 * 1. Redistributions of source code must retain the above copyright 24 * notice, this list of conditions and the following disclaimer. 25 * 2. Redistributions in binary form must reproduce the above copyright 26 * notice, this list of conditions and the following disclaimer in the 27 * documentation and/or other materials provided with the distribution. 28 * 3. All advertising materials mentioning features or use of this software 29 * must display the following acknowledgement: 30 * This product includes software developed by the University of 31 * California, Berkeley and its contributors. 32 * 4. Neither the name of the University nor the names of its contributors 33 * may be used to endorse or promote products derived from this software 34 * without specific prior written permission. 35 * 36 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 39 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 46 * SUCH DAMAGE. 47 * 48 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 49 */ 50 51 #include <sys/param.h> 52 #include <sys/kcore.h> 53 #include <machine/kcore.h> 54 #include <machine/pte.h> 55 #include <machine/vmparam.h> 56 #include <machine/bus.h> 57 #include <machine/cpu.h> 58 59 #include <next68k/next68k/seglist.h> 60 61 #include <next68k/dev/intiovar.h> 62 63 #include <uvm/uvm_extern.h> 64 65 #define RELOC(v, t) *((t*)((u_int)&(v) + firstpa)) 66 67 extern char *etext; 68 extern int Sysptsize; 69 extern char *proc0paddr; 70 extern st_entry_t *Sysseg; 71 extern pt_entry_t *Sysptmap, *Sysmap; 72 73 extern int maxmem, physmem; 74 extern paddr_t avail_start, avail_end; 75 extern vaddr_t virtual_avail, virtual_end; 76 extern vsize_t mem_size; 77 extern phys_ram_seg_t mem_clusters[]; 78 extern int mem_cluster_cnt; 79 extern paddr_t msgbufpa; 80 extern int protection_codes[]; 81 #ifdef M68K_MMU_HP 82 extern int pmap_aliasmask; 83 #endif 84 85 void pmap_bootstrap __P((paddr_t, paddr_t)); 86 87 88 /* 89 * Special purpose kernel virtual addresses, used for mapping 90 * physical pages for a variety of temporary or permanent purposes: 91 * 92 * CADDR1, CADDR2: pmap zero/copy operations 93 * vmmap: /dev/mem, crash dumps, parity error checking 94 * msgbufaddr: kernel message buffer 95 */ 96 caddr_t CADDR1, CADDR2, vmmap; 97 extern caddr_t msgbufaddr; 98 99 /* 100 * Bootstrap the VM system. 101 * 102 * Called with MMU off so we must relocate all global references by `firstpa' 103 * (don't call any functions here!) `nextpa' is the first available physical 104 * memory address. Returns an updated first PA reflecting the memory we 105 * have allocated. MMU is still off when we return. 106 * 107 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 108 * XXX a PIC compiler would make this much easier. 109 */ 110 void 111 pmap_bootstrap(nextpa, firstpa) 112 paddr_t nextpa; 113 paddr_t firstpa; 114 { 115 paddr_t kstpa, kptpa, eiiopa, iiopa, kptmpa, lkptpa, p0upa; 116 paddr_t emonopa, monopa; 117 paddr_t ecolorpa, colorpa; 118 u_int nptpages, kstsize; 119 st_entry_t protoste, *ste; 120 pt_entry_t protopte, *pte, *epte; 121 psize_t size; 122 int i; 123 124 /* 125 * Calculate important physical addresses: 126 * 127 * kstpa kernel segment table 1 page (!040) 128 * N pages (040) 129 * 130 * kptpa statically allocated 131 * kernel PT pages Sysptsize+ pages 132 * 133 * iiopa internal IO space 134 * PT pages IIOMAPSIZE pages 135 * 136 * eiiopa page following 137 * internal IO space 138 * 139 * monopa mono fb PT pages MONOSIZE pages 140 * 141 * emonopa page following 142 * mono fb pages 143 * 144 * colorpa color fb PT pages COLORSIZE pages 145 * 146 * ecolorpa page following 147 * color fb pages 148 * 149 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE 150 * is the number of PTEs, hence we need to round 151 * the total to a page boundary with IO maps at the end. ] 152 * 153 * kptmpa kernel PT map 1 page 154 * 155 * lkptpa last kernel PT page 1 page 156 * 157 * p0upa proc 0 u-area UPAGES pages 158 * 159 * The KVA corresponding to any of these PAs is: 160 * (PA - firstpa + KERNBASE). 161 */ 162 #if defined(M68040) || defined(M68060) 163 if (RELOC(mmutype, int) == MMU_68040) 164 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 165 else 166 #endif 167 kstsize = 1; 168 kstpa = nextpa; 169 nextpa += kstsize * NBPG; 170 kptpa = nextpa; 171 nptpages = RELOC(Sysptsize, int) + 172 (IIOMAPSIZE + MONOMAPSIZE + COLORMAPSIZE + NPTEPG - 1) / NPTEPG; 173 nextpa += nptpages * NBPG; 174 eiiopa = nextpa; /* just a reference for later */ 175 iiopa = nextpa - IIOMAPSIZE * sizeof(pt_entry_t); 176 177 emonopa = nextpa - IIOMAPSIZE * sizeof(pt_entry_t); 178 monopa = emonopa - MONOMAPSIZE * sizeof(pt_entry_t); 179 180 ecolorpa = emonopa - MONOMAPSIZE * sizeof(pt_entry_t); 181 colorpa = ecolorpa - COLORMAPSIZE * sizeof(pt_entry_t); 182 183 kptmpa = nextpa; 184 nextpa += NBPG; 185 lkptpa = nextpa; 186 nextpa += NBPG; 187 p0upa = nextpa; 188 nextpa += USPACE; 189 190 /* 191 * Clear all PTEs to zero 192 */ 193 for (pte = (pt_entry_t *)kstpa; pte < (pt_entry_t *)nextpa; pte++) 194 *pte = 0; 195 196 /* 197 * Initialize segment table and kernel page table map. 198 * 199 * On 68030s and earlier MMUs the two are identical except for 200 * the valid bits so both are initialized with essentially the 201 * same values. On the 68040, which has a mandatory 3-level 202 * structure, the segment table holds the level 1 table and part 203 * (or all) of the level 2 table and hence is considerably 204 * different. Here the first level consists of 128 descriptors 205 * (512 bytes) each mapping 32mb of address space. Each of these 206 * points to blocks of 128 second level descriptors (512 bytes) 207 * each mapping 256kb. Note that there may be additional "segment 208 * table" pages depending on how large MAXKL2SIZE is. 209 * 210 * Portions of the last segment of KVA space (0xFFF00000 - 211 * 0xFFFFFFFF) are mapped for a couple of purposes. 0xFFF00000 212 * for UPAGES is used for mapping the current process u-area 213 * (u + kernel stack). The very last page (0xFFFFF000) is mapped 214 * to the last physical page of RAM to give us a region in which 215 * PA == VA. We use the first part of this page for enabling 216 * and disabling mapping. The last part of this page also contains 217 * info left by the boot ROM. 218 * 219 * XXX cramming two levels of mapping into the single "segment" 220 * table on the 68040 is intended as a temporary hack to get things 221 * working. The 224mb of address space that this allows will most 222 * likely be insufficient in the future (at least for the kernel). 223 */ 224 #if defined(M68040) || defined(M68060) 225 if (RELOC(mmutype, int) == MMU_68040) { 226 int num; 227 228 /* 229 * First invalidate the entire "segment table" pages 230 * (levels 1 and 2 have the same "invalid" value). 231 */ 232 pte = (u_int *)kstpa; 233 epte = &pte[kstsize * NPTEPG]; 234 while (pte < epte) 235 *pte++ = SG_NV; 236 /* 237 * Initialize level 2 descriptors (which immediately 238 * follow the level 1 table). We need: 239 * NPTEPG / SG4_LEV3SIZE 240 * level 2 descriptors to map each of the nptpages+1 241 * pages of PTEs. Note that we set the "used" bit 242 * now to save the HW the expense of doing it. 243 */ 244 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); 245 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 246 epte = &pte[num]; 247 protoste = kptpa | SG_U | SG_RW | SG_V; 248 while (pte < epte) { 249 *pte++ = protoste; 250 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 251 } 252 /* 253 * Initialize level 1 descriptors. We need: 254 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 255 * level 1 descriptors to map the `num' level 2's. 256 */ 257 pte = (u_int *)kstpa; 258 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 259 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 260 while (pte < epte) { 261 *pte++ = protoste; 262 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 263 } 264 /* 265 * Initialize the final level 1 descriptor to map the last 266 * block of level 2 descriptors. 267 */ 268 ste = &((u_int *)kstpa)[SG4_LEV1SIZE-1]; 269 pte = &((u_int *)kstpa)[kstsize*NPTEPG - SG4_LEV2SIZE]; 270 *ste = (u_int)pte | SG_U | SG_RW | SG_V; 271 /* 272 * Now initialize the final portion of that block of 273 * descriptors to map the "last PT page". 274 */ 275 pte = &((u_int *)kstpa)[kstsize*NPTEPG - NPTEPG/SG4_LEV3SIZE]; 276 epte = &pte[NPTEPG/SG4_LEV3SIZE]; 277 protoste = lkptpa | SG_U | SG_RW | SG_V; 278 while (pte < epte) { 279 *pte++ = protoste; 280 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 281 } 282 /* 283 * Initialize Sysptmap 284 */ 285 pte = (u_int *)kptmpa; 286 epte = &pte[nptpages+1]; 287 protopte = kptpa | PG_RW | PG_CI | PG_U | PG_V; 288 while (pte < epte) { 289 *pte++ = protopte; 290 protopte += NBPG; 291 } 292 /* 293 * Invalidate all but the last remaining entry. 294 */ 295 epte = &((u_int *)kptmpa)[NPTEPG-1]; 296 while (pte < epte) { 297 *pte++ = PG_NV; 298 } 299 /* 300 * Initialize the last to point to the page 301 * table page allocated earlier. 302 */ 303 *pte = lkptpa | PG_RW | PG_CI | PG_U | PG_V; 304 } else 305 #endif /* M68040 || M68060 */ 306 { 307 /* 308 * Map the page table pages in both the HW segment table 309 * and the software Sysptmap. Note that Sysptmap is also 310 * considered a PT page hence the +1. 311 */ 312 ste = (u_int *)kstpa; 313 pte = (u_int *)kptmpa; 314 epte = &pte[nptpages+1]; 315 protoste = kptpa | SG_RW | SG_V; 316 protopte = kptpa | PG_RW | PG_CI | PG_V; 317 while (pte < epte) { 318 *ste++ = protoste; 319 *pte++ = protopte; 320 protoste += NBPG; 321 protopte += NBPG; 322 } 323 /* 324 * Invalidate all but the last remaining entries in both. 325 */ 326 epte = &((u_int *)kptmpa)[NPTEPG-1]; 327 while (pte < epte) { 328 *ste++ = SG_NV; 329 *pte++ = PG_NV; 330 } 331 /* 332 * Initialize the last to point to point to the page 333 * table page allocated earlier. 334 */ 335 *ste = lkptpa | SG_RW | SG_V; 336 *pte = lkptpa | PG_RW | PG_CI | PG_V; 337 } 338 /* 339 * Invalidate all but the final entry in the last kernel PT page 340 * (u-area PTEs will be validated later). The final entry maps 341 * the last page of physical memory. 342 */ 343 pte = (u_int *)lkptpa; 344 epte = &pte[NPTEPG-1]; 345 while (pte < epte) 346 *pte++ = PG_NV; 347 #ifdef MAXADDR 348 /* tmp double-map for cpu's with physmem at the end of memory */ 349 *pte = MAXADDR | PG_RW | PG_CI | PG_U | PG_V; 350 #endif 351 /* 352 * Initialize kernel page table. 353 * Start by invalidating the `nptpages' that we have allocated. 354 */ 355 pte = (u_int *)kptpa; 356 epte = &pte[nptpages * NPTEPG]; 357 while (pte < epte) 358 *pte++ = PG_NV; 359 /* 360 * Validate PTEs for kernel text (RO). The first page 361 * of kernel text remains invalid; see locore.s 362 */ 363 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE + NBPG)]; 364 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 365 protopte = (firstpa + NBPG) | PG_RO | PG_U | PG_V; 366 while (pte < epte) { 367 *pte++ = protopte; 368 protopte += NBPG; 369 } 370 /* 371 * Validate PTEs for kernel data/bss, dynamic data allocated 372 * by us so far (kstpa - firstpa bytes), and pages for proc0 373 * u-area and page table allocated below (RW). 374 */ 375 epte = &((u_int *)kptpa)[m68k_btop(kstpa - firstpa)]; 376 protopte = (protopte & ~PG_PROT) | PG_RW; 377 /* 378 * Enable copy-back caching of data pages 379 */ 380 if (RELOC(mmutype, int) == MMU_68040) 381 protopte |= PG_CCB; 382 while (pte < epte) { 383 *pte++ = protopte; 384 protopte += NBPG; 385 } 386 /* 387 * map the kernel segment table cache invalidated for 388 * these machines (for the 68040 not strictly necessary, but 389 * recommended by Motorola; for the 68060 mandatory) 390 */ 391 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; 392 protopte = (protopte & ~PG_PROT) | PG_RW; 393 if (RELOC(mmutype, int) == MMU_68040) { 394 protopte &= ~PG_CMASK; 395 protopte |= PG_CI; 396 } 397 while (pte < epte) { 398 *pte++ = protopte; 399 protopte += NBPG; 400 } 401 /* 402 * Finally, validate the internal IO space PTEs (RW+CI). 403 * We do this here since the 320/350 MMU registers (also 404 * used, but to a lesser extent, on other models) are mapped 405 * in this range and it would be nice to be able to access 406 * them after the MMU is turned on. 407 */ 408 pte = (u_int *)iiopa; 409 epte = (u_int *)eiiopa; 410 protopte = INTIOBASE | PG_RW | PG_CI | PG_U | PG_V; 411 while (pte < epte) { 412 *pte++ = protopte; 413 protopte += NBPG; 414 } 415 416 /* validate the mono fb space PTEs */ 417 pte = (u_int *)monopa; 418 epte = (u_int *)emonopa; 419 protopte = MONOBASE | PG_RW | PG_CI | PG_U | PG_V; 420 while (pte < epte) { 421 *pte++ = protopte; 422 protopte += NBPG; 423 } 424 425 /* validate the color fb space PTEs */ 426 pte = (u_int *)colorpa; 427 epte = (u_int *)ecolorpa; 428 protopte = COLORBASE | PG_RW | PG_CI | PG_U | PG_V; 429 while (pte < epte) { 430 *pte++ = protopte; 431 protopte += NBPG; 432 } 433 434 /* 435 * Calculate important exported kernel virtual addresses 436 */ 437 /* 438 * Sysseg: base of kernel segment table 439 */ 440 RELOC(Sysseg, st_entry_t *) = 441 (st_entry_t *)(kstpa - firstpa); 442 /* 443 * Sysptmap: base of kernel page table map 444 */ 445 RELOC(Sysptmap, pt_entry_t *) = 446 (pt_entry_t *)(kptmpa - firstpa); 447 /* 448 * Sysmap: kernel page table (as mapped through Sysptmap) 449 * Immediately follows `nptpages' of static kernel page table. 450 */ 451 RELOC(Sysmap, pt_entry_t *) = 452 (pt_entry_t *)m68k_ptob(nptpages * NPTEPG); 453 454 /* 455 * colorbase, colorlimit: base and end of color fb space. 456 * COLORMAPSIZE pages prior to external IO space at end of static 457 * kernel page table. 458 */ 459 RELOC(colorbase, char *) = 460 (char *)m68k_ptob(nptpages*NPTEPG - IIOMAPSIZE - MONOMAPSIZE - COLORMAPSIZE); 461 RELOC(colorlimit, char *) = 462 (char *)m68k_ptob(nptpages*NPTEPG - IIOMAPSIZE - MONOMAPSIZE); 463 464 /* 465 * monobase, monolimit: base and end of mono fb space. 466 * MONOMAPSIZE pages prior to external IO space at end of static 467 * kernel page table. 468 */ 469 RELOC(monobase, char *) = 470 (char *)m68k_ptob(nptpages*NPTEPG - IIOMAPSIZE - MONOMAPSIZE); 471 RELOC(monolimit, char *) = 472 (char *)m68k_ptob(nptpages*NPTEPG - IIOMAPSIZE); 473 474 /* 475 * intiobase, intiolimit: base and end of internal IO space. 476 * IIOMAPSIZE pages prior to external IO space at end of static 477 * kernel page table. 478 */ 479 RELOC(intiobase, char *) = 480 (char *)m68k_ptob(nptpages*NPTEPG - IIOMAPSIZE); 481 RELOC(intiolimit, char *) = 482 (char *)m68k_ptob(nptpages*NPTEPG); 483 484 /* 485 * Setup u-area for process 0. 486 */ 487 /* 488 * Zero the u-area. 489 * NOTE: `pte' and `epte' aren't PTEs here. 490 */ 491 pte = (u_int *)p0upa; 492 epte = (u_int *)(p0upa + USPACE); 493 while (pte < epte) 494 *pte++ = 0; 495 /* 496 * Remember the u-area address so it can be loaded in the 497 * proc struct p_addr field later. 498 */ 499 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa); 500 501 /* 502 * Initialize the mem_clusters[] array for the crash dump 503 * code. While we're at it, compute the total amount of 504 * physical memory in the system. 505 */ 506 for (i = 0; i < VM_PHYSSEG_MAX; i++) { 507 if (RELOC(phys_seg_list[i].ps_start, paddr_t) == 508 RELOC(phys_seg_list[i].ps_end, paddr_t)) { 509 /* 510 * No more memory. 511 */ 512 break; 513 } 514 515 /* 516 * Make sure these are properly rounded. 517 */ 518 RELOC(phys_seg_list[i].ps_start, paddr_t) = 519 m68k_round_page(RELOC(phys_seg_list[i].ps_start, 520 paddr_t)); 521 RELOC(phys_seg_list[i].ps_end, paddr_t) = 522 m68k_trunc_page(RELOC(phys_seg_list[i].ps_end, 523 paddr_t)); 524 525 size = RELOC(phys_seg_list[i].ps_end, paddr_t) - 526 RELOC(phys_seg_list[i].ps_start, paddr_t); 527 528 RELOC(mem_clusters[i].start, u_quad_t) = 529 RELOC(phys_seg_list[i].ps_start, paddr_t); 530 RELOC(mem_clusters[i].size, u_quad_t) = size; 531 532 RELOC(physmem, int) += size >> PGSHIFT; 533 534 RELOC(mem_cluster_cnt, int) += 1; 535 } 536 537 /* 538 * Scoot the start of available on-board RAM forward to 539 * account for: 540 * 541 * (1) The bootstrap programs in low memory (so 542 * that we can jump back to them without 543 * reloading). 544 * 545 * (2) The kernel text, data, and bss. 546 * 547 * (3) The pages we stole above for pmap data 548 * structures. 549 */ 550 RELOC(phys_seg_list[0].ps_start, paddr_t) = nextpa; 551 552 /* 553 * Reserve space at the end of on-board RAM for the message 554 * buffer. We force it into on-board RAM because VME RAM 555 * gets cleared very early on in locore.s (to initialise 556 * parity on boards that need it). This would clobber the 557 * messages from a previous running NetBSD system. 558 */ 559 RELOC(phys_seg_list[0].ps_end, paddr_t) -= 560 m68k_round_page(MSGBUFSIZE); 561 RELOC(msgbufpa, paddr_t) = 562 RELOC(phys_seg_list[0].ps_end, paddr_t); 563 564 /* 565 * Initialize avail_start and avail_end. 566 */ 567 i = RELOC(mem_cluster_cnt, int) - 1; 568 RELOC(avail_start, paddr_t) = 569 RELOC(phys_seg_list[0].ps_start, paddr_t); 570 RELOC(avail_end, paddr_t) = 571 RELOC(phys_seg_list[i].ps_end, paddr_t); 572 573 RELOC(mem_size, vsize_t) = m68k_ptob(RELOC(physmem, int)); 574 575 RELOC(virtual_avail, vaddr_t) = 576 VM_MIN_KERNEL_ADDRESS + (vaddr_t)(nextpa - firstpa); 577 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS; 578 579 /* 580 * Initialize protection array. 581 * XXX don't use a switch statement, it might produce an 582 * absolute "jmp" table. 583 */ 584 { 585 int *kp; 586 587 kp = &RELOC(protection_codes, int); 588 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 589 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 590 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 591 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 592 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 593 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 594 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 595 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 596 } 597 598 /* 599 * Kernel page/segment table allocated above, 600 * just initialize pointers. 601 */ 602 { 603 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 604 605 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 606 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 607 simple_lock_init(&kpm->pm_lock); 608 kpm->pm_count = 1; 609 kpm->pm_stpa = (st_entry_t *)kstpa; 610 #if defined(M68040) || defined(M68060) 611 /* 612 * For the 040 we also initialize the free level 2 613 * descriptor mask noting that we have used: 614 * 0: level 1 table 615 * 1 to `num': map page tables 616 * MAXKL2SIZE-1: maps last-page page table 617 */ 618 if (RELOC(mmutype, int) == MMU_68040) { 619 int num; 620 621 kpm->pm_stfree = ~l2tobm(0); 622 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), 623 SG4_LEV2SIZE) / SG4_LEV2SIZE; 624 while (num) 625 kpm->pm_stfree &= ~l2tobm(num--); 626 kpm->pm_stfree &= ~l2tobm(MAXKL2SIZE-1); 627 for (num = MAXKL2SIZE; 628 num < sizeof(kpm->pm_stfree)*NBBY; 629 num++) 630 kpm->pm_stfree &= ~l2tobm(num); 631 } 632 #endif 633 } 634 635 /* 636 * Allocate some fixed, special purpose kernel virtual addresses 637 */ 638 { 639 vaddr_t va = RELOC(virtual_avail, vaddr_t); 640 641 RELOC(CADDR1, caddr_t) = (caddr_t)va; 642 va += NBPG; 643 RELOC(CADDR2, caddr_t) = (caddr_t)va; 644 va += NBPG; 645 RELOC(vmmap, caddr_t) = (caddr_t)va; 646 va += NBPG; 647 RELOC(msgbufaddr, caddr_t) = (caddr_t)va; 648 va += m68k_round_page(MSGBUFSIZE); 649 RELOC(virtual_avail, vaddr_t) = va; 650 } 651 } 652 653 void 654 pmap_init_md(void) 655 { 656 vaddr_t addr; 657 658 addr = (vaddr_t) intiobase; 659 if (uvm_map(kernel_map, &addr, m68k_ptob(IIOMAPSIZE), 660 NULL, UVM_UNKNOWN_OFFSET, 0, 661 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 662 UVM_INH_NONE, UVM_ADV_RANDOM, 663 UVM_FLAG_FIXED)) != 0) 664 goto failed; 665 addr = (vaddr_t) monobase; 666 if (uvm_map(kernel_map, &addr, m68k_ptob(MONOMAPSIZE), 667 NULL, UVM_UNKNOWN_OFFSET, 0, 668 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 669 UVM_INH_NONE, UVM_ADV_RANDOM, 670 UVM_FLAG_FIXED)) != 0) 671 goto failed; 672 addr = (vaddr_t) colorbase; 673 if (uvm_map(kernel_map, &addr, m68k_ptob(COLORMAPSIZE), 674 NULL, UVM_UNKNOWN_OFFSET, 0, 675 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 676 UVM_INH_NONE, UVM_ADV_RANDOM, 677 UVM_FLAG_FIXED)) != 0) { 678 failed: 679 panic("pmap_init_md: uvm_map failed"); 680 } 681 } 682