1 /* $NetBSD: pmap_bootstrap.c,v 1.23 2002/11/05 07:41:44 chs Exp $ */ 2 3 /* 4 * Copyright (c) 1991, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from software contributed to Berkeley by 8 * the Systems Programming Group of the University of Utah Computer 9 * Science Department. 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 3. All advertising materials mentioning features or use of this software 20 * must display the following acknowledgement: 21 * This product includes software developed by the University of 22 * California, Berkeley and its contributors. 23 * 4. Neither the name of the University nor the names of its contributors 24 * may be used to endorse or promote products derived from this software 25 * without specific prior written permission. 26 * 27 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 28 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 29 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 30 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 31 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 32 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 33 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 34 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 35 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 36 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 37 * SUCH DAMAGE. 38 * 39 * @(#)pmap_bootstrap.c 8.1 (Berkeley) 6/10/93 40 */ 41 42 #include "opt_m680x0.h" 43 44 #include <sys/param.h> 45 #include <uvm/uvm_extern.h> 46 #include <machine/pte.h> 47 #include <machine/vmparam.h> 48 #include <machine/cpu.h> 49 #include <arch/x68k/x68k/iodevice.h> 50 51 52 #define RELOC(v, t) *((t*)((caddr_t)&(v) + firstpa)) 53 54 extern char *etext; 55 extern int Sysptsize; 56 extern char *proc0paddr; 57 extern st_entry_t *Sysseg; 58 extern pt_entry_t *Sysptmap, *Sysmap; 59 60 extern int maxmem, physmem; 61 extern paddr_t avail_start, avail_end; 62 extern vaddr_t virtual_avail, virtual_end; 63 extern psize_t mem_size; 64 extern int protection_codes[]; 65 66 u_int8_t *intiobase = (u_int8_t *) PHYS_IODEV; 67 68 void pmap_bootstrap __P((paddr_t, paddr_t)); 69 70 /* 71 * Special purpose kernel virtual addresses, used for mapping 72 * physical pages for a variety of temporary or permanent purposes: 73 * 74 * CADDR1, CADDR2: pmap zero/copy operations 75 * vmmap: /dev/mem, crash dumps, parity error checking 76 * msgbufaddr: kernel message buffer 77 */ 78 caddr_t CADDR1, CADDR2, vmmap; 79 extern caddr_t msgbufaddr; 80 81 /* 82 * Bootstrap the VM system. 83 * 84 * Called with MMU off so we must relocate all global references by `firstpa' 85 * (don't call any functions here!) `nextpa' is the first available physical 86 * memory address. Returns an updated first PA reflecting the memory we 87 * have allocated. MMU is still off when we return. 88 * 89 * XXX assumes sizeof(u_int) == sizeof(pt_entry_t) 90 * XXX a PIC compiler would make this much easier. 91 */ 92 void 93 pmap_bootstrap(nextpa, firstpa) 94 paddr_t nextpa; 95 paddr_t firstpa; 96 { 97 paddr_t kstpa, kptpa, iiopa, eiiopa, kptmpa, p0upa; 98 u_int nptpages, kstsize; 99 st_entry_t protoste, *ste; 100 pt_entry_t protopte, *pte, *epte; 101 102 /* 103 * Calculate important physical addresses: 104 * 105 * kstpa kernel segment table 1 page (!040) 106 * N pages (040) 107 * 108 * kptpa statically allocated 109 * kernel PT pages Sysptsize+ pages 110 * 111 * iiopa internal IO space 112 * PT pages IIOMAPSIZE pages 113 * 114 * [ Sysptsize is the number of pages of PT, and IIOMAPSIZE 115 * is the number of PTEs, hence we need to round 116 * the total to a page boundary with IO maps at the end. ] 117 * 118 * kptmpa kernel PT map 1 page 119 * 120 * p0upa proc 0 u-area UPAGES pages 121 * 122 * The KVA corresponding to any of these PAs is: 123 * (PA - firstpa + KERNBASE). 124 */ 125 if (RELOC(mmutype, int) == MMU_68040) 126 kstsize = MAXKL2SIZE / (NPTEPG/SG4_LEV2SIZE); 127 else 128 kstsize = 1; 129 kstpa = nextpa; 130 nextpa += kstsize * NBPG; 131 kptpa = nextpa; 132 nptpages = RELOC(Sysptsize, int) + 133 (IIOMAPSIZE + NPTEPG - 1) / NPTEPG; 134 nextpa += nptpages * NBPG; 135 eiiopa = nextpa; /* just a reference for later */ 136 iiopa = nextpa - IIOMAPSIZE * sizeof(pt_entry_t); 137 kptmpa = nextpa; 138 nextpa += NBPG; 139 p0upa = nextpa; 140 nextpa += USPACE; 141 142 /* 143 * Clear all PTEs to zero 144 */ 145 for (pte = (pt_entry_t *)kstpa; pte < (pt_entry_t *)p0upa; pte++) 146 *pte = 0; 147 148 /* 149 * Initialize segment table and kernel page table map. 150 * 151 * On 68030s and earlier MMUs the two are identical except for 152 * the valid bits so both are initialized with essentially the 153 * same values. On the 68040, which has a mandatory 3-level 154 * structure, the segment table holds the level 1 table and part 155 * (or all) of the level 2 table and hence is considerably 156 * different. Here the first level consists of 128 descriptors 157 * (512 bytes) each mapping 32mb of address space. Each of these 158 * points to blocks of 128 second level descriptors (512 bytes) 159 * each mapping 256kb. Note that there may be additional "segment 160 * table" pages depending on how large MAXKL2SIZE is. 161 * 162 * XXX cramming two levels of mapping into the single "segment" 163 * table on the 68040 is intended as a temporary hack to get things 164 * working. The 224mb of address space that this allows will most 165 * likely be insufficient in the future (at least for the kernel). 166 */ 167 #if defined(M68040) || defined(M68060) 168 if (RELOC(mmutype, int) == MMU_68040) { 169 int num; 170 171 /* 172 * First invalidate the entire "segment table" pages 173 * (levels 1 and 2 have the same "invalid" value). 174 */ 175 pte = (u_int *)kstpa; 176 epte = &pte[kstsize * NPTEPG]; 177 while (pte < epte) 178 *pte++ = SG_NV; 179 /* 180 * Initialize level 2 descriptors (which immediately 181 * follow the level 1 table). We need: 182 * NPTEPG / SG4_LEV3SIZE 183 * level 2 descriptors to map each of the nptpages+1 184 * pages of PTEs. Note that we set the "used" bit 185 * now to save the HW the expense of doing it. 186 */ 187 num = (nptpages + 1) * (NPTEPG / SG4_LEV3SIZE); 188 pte = &((u_int *)kstpa)[SG4_LEV1SIZE]; 189 epte = &pte[num]; 190 protoste = kptpa | SG_U | SG_RW | SG_V; 191 while (pte < epte) { 192 *pte++ = protoste; 193 protoste += (SG4_LEV3SIZE * sizeof(st_entry_t)); 194 } 195 /* 196 * Initialize level 1 descriptors. We need: 197 * roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE 198 * level 1 descriptors to map the `num' level 2's. 199 */ 200 pte = (u_int *)kstpa; 201 epte = &pte[roundup(num, SG4_LEV2SIZE) / SG4_LEV2SIZE]; 202 protoste = (u_int)&pte[SG4_LEV1SIZE] | SG_U | SG_RW | SG_V; 203 while (pte < epte) { 204 *pte++ = protoste; 205 protoste += (SG4_LEV2SIZE * sizeof(st_entry_t)); 206 } 207 /* 208 * Initialize Sysptmap 209 */ 210 pte = (u_int *)kptmpa; 211 epte = &pte[nptpages+1]; 212 protopte = kptpa | PG_RW | PG_CI | PG_V; 213 while (pte < epte) { 214 *pte++ = protopte; 215 protopte += NBPG; 216 } 217 /* 218 * Invalidate all but the last remaining entry. 219 */ 220 epte = &((u_int *)kptmpa)[NPTEPG]; 221 while (pte < epte) { 222 *pte++ = PG_NV; 223 } 224 } else 225 #endif /* M68040 || M68060 */ 226 { 227 /* 228 * Map the page table pages in both the HW segment table 229 * and the software Sysptmap. Note that Sysptmap is also 230 * considered a PT page hence the +1. 231 */ 232 ste = (u_int *)kstpa; 233 pte = (u_int *)kptmpa; 234 epte = &pte[nptpages+1]; 235 protoste = kptpa | SG_RW | SG_V; 236 protopte = kptpa | PG_RW | PG_CI | PG_V; 237 while (pte < epte) { 238 *ste++ = protoste; 239 *pte++ = protopte; 240 protoste += NBPG; 241 protopte += NBPG; 242 } 243 /* 244 * Invalidate all but the last remaining entries in both. 245 */ 246 epte = &((u_int *)kptmpa)[NPTEPG]; 247 while (pte < epte) { 248 *ste++ = SG_NV; 249 *pte++ = PG_NV; 250 } 251 } 252 253 /* 254 * Initialize kernel page table. 255 * Start by invalidating the `nptpages' that we have allocated. 256 */ 257 pte = (u_int *)kptpa; 258 epte = &pte[nptpages * NPTEPG]; 259 while (pte < epte) 260 *pte++ = PG_NV; 261 /* 262 * Validate PTEs for kernel text (RO) 263 */ 264 pte = &((u_int *)kptpa)[m68k_btop(KERNBASE)]; 265 epte = &pte[m68k_btop(m68k_trunc_page(&etext))]; 266 protopte = firstpa | PG_RO | PG_V; 267 while (pte < epte) { 268 *pte++ = protopte; 269 protopte += NBPG; 270 } 271 /* 272 * Validate PTEs for kernel data/bss, dynamic data allocated 273 * by us so far (kstpa - firstpa bytes), and pages for proc0 274 * u-area and page table allocated below (RW). 275 */ 276 epte = &((u_int *)kptpa)[m68k_btop(kstpa - firstpa)]; 277 protopte = (protopte & ~PG_PROT) | PG_RW; 278 /* 279 * Enable copy-back caching of data pages 280 */ 281 if (RELOC(mmutype, int) == MMU_68040) 282 protopte |= PG_CCB; 283 while (pte < epte) { 284 *pte++ = protopte; 285 protopte += NBPG; 286 } 287 /* 288 * map the kernel segment table cache invalidated for 289 * these machines (for the 68040 not strictly necessary, but 290 * recommended by Motorola; for the 68060 mandatory) 291 */ 292 epte = &((u_int *)kptpa)[m68k_btop(nextpa - firstpa)]; 293 protopte = (protopte & ~PG_PROT) | PG_RW; 294 if (RELOC(mmutype, int) == MMU_68040) { 295 protopte &= ~PG_CCB; 296 protopte |= PG_CIN; 297 } 298 while (pte < epte) { 299 *pte++ = protopte; 300 protopte += NBPG; 301 } 302 /* 303 * Finally, validate the internal IO space PTEs (RW+CI). 304 */ 305 pte = (u_int *)iiopa; 306 epte = (u_int *)eiiopa; 307 protopte = INTIOBASE | PG_RW | PG_CI | PG_V; 308 while (pte < epte) { 309 *pte++ = protopte; 310 protopte += NBPG; 311 } 312 313 /* 314 * Calculate important exported kernel virtual addresses 315 */ 316 /* 317 * Sysseg: base of kernel segment table 318 */ 319 RELOC(Sysseg, st_entry_t *) = 320 (st_entry_t *)(kstpa - firstpa); 321 /* 322 * Sysptmap: base of kernel page table map 323 */ 324 RELOC(Sysptmap, pt_entry_t *) = 325 (pt_entry_t *)(kptmpa - firstpa); 326 /* 327 * Sysmap: kernel page table (as mapped through Sysptmap) 328 * Immediately follows `nptpages' of static kernel page table. 329 */ 330 RELOC(Sysmap, pt_entry_t *) = 331 (pt_entry_t *)m68k_ptob(nptpages * NPTEPG); 332 /* 333 * IODEVbase, intiolimit: base and end of internal (DIO) IO space. 334 * IIOMAPSIZE pages prior to external IO space at end of static 335 * kernel page table. 336 */ 337 RELOC(IODEVbase, char *) = 338 (char *)m68k_ptob(nptpages*NPTEPG - IIOMAPSIZE); 339 RELOC(intiobase, u_int8_t *) = RELOC(IODEVbase, u_int8_t *); /* XXX */ 340 RELOC(intiolimit, char *) = 341 (char *)m68k_ptob(nptpages*NPTEPG); 342 343 /* 344 * Setup u-area for process 0. 345 */ 346 /* 347 * Zero the u-area. 348 * NOTE: `pte' and `epte' aren't PTEs here. 349 */ 350 pte = (u_int *)p0upa; 351 epte = (u_int *)(p0upa + USPACE); 352 while (pte < epte) 353 *pte++ = 0; 354 /* 355 * Remember the u-area address so it can be loaded in the 356 * proc struct p_addr field later. 357 */ 358 RELOC(proc0paddr, char *) = (char *)(p0upa - firstpa); 359 360 /* 361 * VM data structures are now initialized, set up data for 362 * the pmap module. 363 */ 364 RELOC(avail_start, paddr_t) = nextpa; 365 RELOC(avail_end, paddr_t) = 366 m68k_ptob(RELOC(maxmem, int)) 367 /* XXX allow for msgbuf */ 368 - m68k_round_page(MSGBUFSIZE); 369 RELOC(mem_size, psize_t) = m68k_ptob(RELOC(physmem, int)); 370 RELOC(virtual_avail, vaddr_t) = 371 VM_MIN_KERNEL_ADDRESS + (nextpa - firstpa); 372 RELOC(virtual_end, vaddr_t) = VM_MAX_KERNEL_ADDRESS; 373 374 /* 375 * Initialize protection array. 376 * XXX don't use a switch statement, it might produce an 377 * absolute "jmp" table. 378 */ 379 { 380 int *kp; 381 382 kp = &RELOC(protection_codes, int); 383 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_NONE] = 0; 384 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_NONE] = PG_RO; 385 kp[VM_PROT_READ|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 386 kp[VM_PROT_NONE|VM_PROT_NONE|VM_PROT_EXECUTE] = PG_RO; 387 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 388 kp[VM_PROT_NONE|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 389 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_NONE] = PG_RW; 390 kp[VM_PROT_READ|VM_PROT_WRITE|VM_PROT_EXECUTE] = PG_RW; 391 } 392 393 /* 394 * Kernel page/segment table allocated above, 395 * just initialize pointers. 396 */ 397 { 398 struct pmap *kpm = &RELOC(kernel_pmap_store, struct pmap); 399 400 kpm->pm_stab = RELOC(Sysseg, st_entry_t *); 401 kpm->pm_ptab = RELOC(Sysmap, pt_entry_t *); 402 simple_lock_init(&kpm->pm_lock); 403 kpm->pm_count = 1; 404 kpm->pm_stpa = (st_entry_t *)kstpa; 405 #if defined(M68040) || defined(M68060) 406 /* 407 * For the 040 we also initialize the free level 2 408 * descriptor mask noting that we have used: 409 * 0: level 1 table 410 * 1 to `num': map page tables 411 * MAXKL2SIZE-1: maps last-page page table 412 */ 413 if (RELOC(mmutype, int) == MMU_68040) { 414 int num; 415 416 kpm->pm_stfree = ~l2tobm(0); 417 num = roundup((nptpages + 1) * (NPTEPG / SG4_LEV3SIZE), 418 SG4_LEV2SIZE) / SG4_LEV2SIZE; 419 while (num) 420 kpm->pm_stfree &= ~l2tobm(num--); 421 for (num = MAXKL2SIZE; 422 num < sizeof(kpm->pm_stfree)*NBBY; 423 num++) 424 kpm->pm_stfree &= ~l2tobm(num); 425 } 426 #endif 427 } 428 429 /* 430 * Allocate some fixed, special purpose kernel virtual addresses 431 */ 432 { 433 vaddr_t va = RELOC(virtual_avail, vaddr_t); 434 435 RELOC(CADDR1, caddr_t) = (caddr_t)va; 436 va += NBPG; 437 RELOC(CADDR2, caddr_t) = (caddr_t)va; 438 va += NBPG; 439 RELOC(vmmap, caddr_t) = (caddr_t)va; 440 va += NBPG; 441 RELOC(msgbufaddr, caddr_t) = (caddr_t)va; 442 va += m68k_round_page(MSGBUFSIZE); 443 RELOC(virtual_avail, vaddr_t) = va; 444 } 445 } 446 447 void 448 pmap_init_md(void) 449 { 450 vaddr_t addr; 451 452 addr = (vaddr_t) intiobase; 453 if (uvm_map(kernel_map, &addr, 454 m68k_ptob(IIOMAPSIZE), 455 NULL, UVM_UNKNOWN_OFFSET, 0, 456 UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, 457 UVM_INH_NONE, UVM_ADV_RANDOM, 458 UVM_FLAG_FIXED)) != 0) 459 panic("pmap_init_md: uvm_map failed"); 460 } 461