1 2 #define _SYSTEM 1 3 4 #include <minix/callnr.h> 5 #include <minix/com.h> 6 #include <minix/config.h> 7 #include <minix/const.h> 8 #include <minix/ds.h> 9 #include <minix/endpoint.h> 10 #include <minix/minlib.h> 11 #include <minix/type.h> 12 #include <minix/ipc.h> 13 #include <minix/sysutil.h> 14 #include <minix/syslib.h> 15 #include <minix/safecopies.h> 16 #include <minix/cpufeature.h> 17 #include <minix/bitmap.h> 18 #include <minix/debug.h> 19 20 #include <errno.h> 21 #include <stdlib.h> 22 #include <assert.h> 23 #include <string.h> 24 #include <env.h> 25 #include <stdio.h> 26 #include <fcntl.h> 27 #include <stdlib.h> 28 29 #include "proto.h" 30 #include "glo.h" 31 #include "util.h" 32 #include "vm.h" 33 #include "sanitycheck.h" 34 35 static int vm_self_pages; 36 37 /* PDE used to map in kernel, kernel physical address. */ 38 #define MAX_PAGEDIR_PDES 5 39 static struct pdm { 40 int pdeno; 41 u32_t val; 42 phys_bytes phys; 43 u32_t *page_directories; 44 } pagedir_mappings[MAX_PAGEDIR_PDES]; 45 46 static multiboot_module_t *kern_mb_mod = NULL; 47 static size_t kern_size = 0; 48 static int kern_start_pde = -1; 49 50 /* big page size available in hardware? */ 51 static int bigpage_ok = 1; 52 53 /* Our process table entry. */ 54 struct vmproc *vmprocess = &vmproc[VM_PROC_NR]; 55 56 /* Spare memory, ready to go after initialization, to avoid a 57 * circular dependency on allocating memory and writing it into VM's 58 * page table. 59 */ 60 #if SANITYCHECKS 61 #define SPAREPAGES 200 62 #define STATIC_SPAREPAGES 190 63 #else 64 #ifdef __arm__ 65 # define SPAREPAGES 150 66 # define STATIC_SPAREPAGES 140 67 #else 68 # define SPAREPAGES 20 69 # define STATIC_SPAREPAGES 15 70 #endif /* __arm__ */ 71 #endif 72 73 #ifdef __i386__ 74 static u32_t global_bit = 0; 75 #endif 76 77 #define SPAREPAGEDIRS 1 78 #define STATIC_SPAREPAGEDIRS 1 79 80 int missing_sparedirs = SPAREPAGEDIRS; 81 static struct { 82 void *pagedir; 83 phys_bytes phys; 84 } sparepagedirs[SPAREPAGEDIRS]; 85 86 #define is_staticaddr(v) ((vir_bytes) (v) < VM_OWN_HEAPSTART) 87 88 #define MAX_KERNMAPPINGS 10 89 static struct { 90 phys_bytes phys_addr; /* Physical addr. */ 91 phys_bytes len; /* Length in bytes. */ 92 vir_bytes vir_addr; /* Offset in page table. */ 93 int flags; 94 } kern_mappings[MAX_KERNMAPPINGS]; 95 int kernmappings = 0; 96 97 /* Clicks must be pages, as 98 * - they must be page aligned to map them 99 * - they must be a multiple of the page size 100 * - it's inconvenient to have them bigger than pages, because we often want 101 * just one page 102 * May as well require them to be equal then. 103 */ 104 #if CLICK_SIZE != VM_PAGE_SIZE 105 #error CLICK_SIZE must be page size. 106 #endif 107 108 static void *spare_pagequeue; 109 static char static_sparepages[VM_PAGE_SIZE*STATIC_SPAREPAGES] 110 __aligned(VM_PAGE_SIZE); 111 112 #if defined(__arm__) 113 static char static_sparepagedirs[ARCH_PAGEDIR_SIZE*STATIC_SPAREPAGEDIRS + ARCH_PAGEDIR_SIZE] __aligned(ARCH_PAGEDIR_SIZE); 114 #endif 115 116 void pt_assert(pt_t *pt) 117 { 118 char dir[4096]; 119 pt_clearmapcache(); 120 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 121 panic("VMCTL_FLUSHTLB failed"); 122 } 123 sys_physcopy(NONE, pt->pt_dir_phys, SELF, (vir_bytes) dir, sizeof(dir), 0); 124 assert(!memcmp(dir, pt->pt_dir, sizeof(dir))); 125 } 126 127 #if SANITYCHECKS 128 /*===========================================================================* 129 * pt_sanitycheck * 130 *===========================================================================*/ 131 void pt_sanitycheck(pt_t *pt, const char *file, int line) 132 { 133 /* Basic pt sanity check. */ 134 int slot; 135 136 MYASSERT(pt); 137 MYASSERT(pt->pt_dir); 138 MYASSERT(pt->pt_dir_phys); 139 140 for(slot = 0; slot < ELEMENTS(vmproc); slot++) { 141 if(pt == &vmproc[slot].vm_pt) 142 break; 143 } 144 145 if(slot >= ELEMENTS(vmproc)) { 146 panic("pt_sanitycheck: passed pt not in any proc"); 147 } 148 149 MYASSERT(usedpages_add(pt->pt_dir_phys, VM_PAGE_SIZE) == OK); 150 } 151 #endif 152 153 /*===========================================================================* 154 * findhole * 155 *===========================================================================*/ 156 static u32_t findhole(int pages) 157 { 158 /* Find a space in the virtual address space of VM. */ 159 u32_t curv; 160 int pde = 0, try_restart; 161 static void *lastv = 0; 162 pt_t *pt = &vmprocess->vm_pt; 163 vir_bytes vmin, vmax; 164 u32_t holev = NO_MEM; 165 int holesize = -1; 166 167 vmin = VM_OWN_MMAPBASE; 168 vmax = VM_OWN_MMAPTOP; 169 170 /* Input sanity check. */ 171 assert(vmin + VM_PAGE_SIZE >= vmin); 172 assert(vmax >= vmin + VM_PAGE_SIZE); 173 assert((vmin % VM_PAGE_SIZE) == 0); 174 assert((vmax % VM_PAGE_SIZE) == 0); 175 assert(pages > 0); 176 177 curv = (u32_t) lastv; 178 if(curv < vmin || curv >= vmax) 179 curv = vmin; 180 181 try_restart = 1; 182 183 /* Start looking for a free page starting at vmin. */ 184 while(curv < vmax) { 185 int pte; 186 187 assert(curv >= vmin); 188 assert(curv < vmax); 189 190 pde = ARCH_VM_PDE(curv); 191 pte = ARCH_VM_PTE(curv); 192 193 if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && 194 (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 195 /* there is a page here - so keep looking for holes */ 196 holev = NO_MEM; 197 holesize = 0; 198 } else { 199 /* there is no page here - so we have a hole, a bigger 200 * one if we already had one 201 */ 202 if(holev == NO_MEM) { 203 holev = curv; 204 holesize = 1; 205 } else holesize++; 206 207 assert(holesize > 0); 208 assert(holesize <= pages); 209 210 /* if it's big enough, return it */ 211 if(holesize == pages) { 212 lastv = (void*) (curv + VM_PAGE_SIZE); 213 return holev; 214 } 215 } 216 217 curv+=VM_PAGE_SIZE; 218 219 /* if we reached the limit, start scanning from the beginning if 220 * we haven't looked there yet 221 */ 222 if(curv >= vmax && try_restart) { 223 try_restart = 0; 224 curv = vmin; 225 } 226 } 227 228 printf("VM: out of virtual address space in vm\n"); 229 230 return NO_MEM; 231 } 232 233 /*===========================================================================* 234 * vm_freepages * 235 *===========================================================================*/ 236 void vm_freepages(vir_bytes vir, int pages) 237 { 238 assert(!(vir % VM_PAGE_SIZE)); 239 240 if(is_staticaddr(vir)) { 241 printf("VM: not freeing static page\n"); 242 return; 243 } 244 245 if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir, 246 MAP_NONE, pages*VM_PAGE_SIZE, 0, 247 WMF_OVERWRITE | WMF_FREE) != OK) 248 panic("vm_freepages: pt_writemap failed"); 249 250 vm_self_pages--; 251 252 #if SANITYCHECKS 253 /* If SANITYCHECKS are on, flush tlb so accessing freed pages is 254 * always trapped, also if not in tlb. 255 */ 256 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 257 panic("VMCTL_FLUSHTLB failed"); 258 } 259 #endif 260 } 261 262 /*===========================================================================* 263 * vm_getsparepage * 264 *===========================================================================*/ 265 static void *vm_getsparepage(phys_bytes *phys) 266 { 267 void *ptr; 268 if(reservedqueue_alloc(spare_pagequeue, phys, &ptr) != OK) { 269 return NULL; 270 } 271 assert(ptr); 272 return ptr; 273 } 274 275 /*===========================================================================* 276 * vm_getsparepagedir * 277 *===========================================================================*/ 278 static void *vm_getsparepagedir(phys_bytes *phys) 279 { 280 int s; 281 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS); 282 for(s = 0; s < SPAREPAGEDIRS; s++) { 283 if(sparepagedirs[s].pagedir) { 284 void *sp; 285 sp = sparepagedirs[s].pagedir; 286 *phys = sparepagedirs[s].phys; 287 sparepagedirs[s].pagedir = NULL; 288 missing_sparedirs++; 289 assert(missing_sparedirs >= 0 && missing_sparedirs <= SPAREPAGEDIRS); 290 return sp; 291 } 292 } 293 return NULL; 294 } 295 296 void *vm_mappages(phys_bytes p, int pages) 297 { 298 vir_bytes loc; 299 int r; 300 pt_t *pt = &vmprocess->vm_pt; 301 302 /* Where in our virtual address space can we put it? */ 303 loc = findhole(pages); 304 if(loc == NO_MEM) { 305 printf("vm_mappages: findhole failed\n"); 306 return NULL; 307 } 308 309 /* Map this page into our address space. */ 310 if((r=pt_writemap(vmprocess, pt, loc, p, VM_PAGE_SIZE*pages, 311 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW 312 #if defined(__arm__) 313 | ARM_VM_PTE_CACHED 314 #endif 315 , 0)) != OK) { 316 printf("vm_mappages writemap failed\n"); 317 return NULL; 318 } 319 320 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 321 panic("VMCTL_FLUSHTLB failed: %d", r); 322 } 323 324 assert(loc); 325 326 return (void *) loc; 327 } 328 329 static int pt_init_done; 330 331 /*===========================================================================* 332 * vm_allocpage * 333 *===========================================================================*/ 334 void *vm_allocpages(phys_bytes *phys, int reason, int pages) 335 { 336 /* Allocate a page for use by VM itself. */ 337 phys_bytes newpage; 338 static int level = 0; 339 void *ret; 340 u32_t mem_flags = 0; 341 342 assert(reason >= 0 && reason < VMP_CATEGORIES); 343 344 assert(pages > 0); 345 346 level++; 347 348 assert(level >= 1); 349 assert(level <= 2); 350 351 if((level > 1) || !pt_init_done) { 352 void *s; 353 354 if(pages == 1) s=vm_getsparepage(phys); 355 else if(pages == 4) s=vm_getsparepagedir(phys); 356 else panic("%d pages", pages); 357 358 level--; 359 if(!s) { 360 util_stacktrace(); 361 printf("VM: warning: out of spare pages\n"); 362 } 363 if(!is_staticaddr(s)) vm_self_pages++; 364 return s; 365 } 366 367 #if defined(__arm__) 368 if (reason == VMP_PAGEDIR) { 369 mem_flags |= PAF_ALIGN16K; 370 } 371 #endif 372 373 /* Allocate page of memory for use by VM. As VM 374 * is trusted, we don't have to pre-clear it. 375 */ 376 if((newpage = alloc_mem(pages, mem_flags)) == NO_MEM) { 377 level--; 378 printf("VM: vm_allocpage: alloc_mem failed\n"); 379 return NULL; 380 } 381 382 *phys = CLICK2ABS(newpage); 383 384 if(!(ret = vm_mappages(*phys, pages))) { 385 level--; 386 printf("VM: vm_allocpage: vm_mappages failed\n"); 387 return NULL; 388 } 389 390 level--; 391 vm_self_pages++; 392 393 return ret; 394 } 395 396 void *vm_allocpage(phys_bytes *phys, int reason) 397 { 398 return vm_allocpages(phys, reason, 1); 399 } 400 401 /*===========================================================================* 402 * vm_pagelock * 403 *===========================================================================*/ 404 void vm_pagelock(void *vir, int lockflag) 405 { 406 /* Mark a page allocated by vm_allocpage() unwritable, i.e. only for VM. */ 407 vir_bytes m = (vir_bytes) vir; 408 int r; 409 u32_t flags = ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER; 410 pt_t *pt; 411 412 pt = &vmprocess->vm_pt; 413 414 assert(!(m % VM_PAGE_SIZE)); 415 416 if(!lockflag) 417 flags |= ARCH_VM_PTE_RW; 418 #if defined(__arm__) 419 else 420 flags |= ARCH_VM_PTE_RO; 421 422 flags |= ARM_VM_PTE_CACHED ; 423 #endif 424 425 /* Update flags. */ 426 if((r=pt_writemap(vmprocess, pt, m, 0, VM_PAGE_SIZE, 427 flags, WMF_OVERWRITE | WMF_WRITEFLAGSONLY)) != OK) { 428 panic("vm_lockpage: pt_writemap failed"); 429 } 430 431 if((r=sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 432 panic("VMCTL_FLUSHTLB failed: %d", r); 433 } 434 435 return; 436 } 437 438 /*===========================================================================* 439 * vm_addrok * 440 *===========================================================================*/ 441 int vm_addrok(void *vir, int writeflag) 442 { 443 pt_t *pt = &vmprocess->vm_pt; 444 int pde, pte; 445 vir_bytes v = (vir_bytes) vir; 446 447 pde = ARCH_VM_PDE(v); 448 pte = ARCH_VM_PTE(v); 449 450 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 451 printf("addr not ok: missing pde %d\n", pde); 452 return 0; 453 } 454 455 #if defined(__i386__) 456 if(writeflag && 457 !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) { 458 printf("addr not ok: pde %d present but pde unwritable\n", pde); 459 return 0; 460 } 461 #elif defined(__arm__) 462 if(writeflag && 463 (pt->pt_dir[pde] & ARCH_VM_PTE_RO)) { 464 printf("addr not ok: pde %d present but pde unwritable\n", pde); 465 return 0; 466 } 467 468 #endif 469 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 470 printf("addr not ok: missing pde %d / pte %d\n", 471 pde, pte); 472 return 0; 473 } 474 475 #if defined(__i386__) 476 if(writeflag && 477 !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) { 478 printf("addr not ok: pde %d / pte %d present but unwritable\n", 479 pde, pte); 480 #elif defined(__arm__) 481 if(writeflag && 482 (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { 483 printf("addr not ok: pde %d / pte %d present but unwritable\n", 484 pde, pte); 485 #endif 486 return 0; 487 } 488 489 return 1; 490 } 491 492 /*===========================================================================* 493 * pt_ptalloc * 494 *===========================================================================*/ 495 static int pt_ptalloc(pt_t *pt, int pde, u32_t flags) 496 { 497 /* Allocate a page table and write its address into the page directory. */ 498 int i; 499 phys_bytes pt_phys; 500 u32_t *p; 501 502 /* Argument must make sense. */ 503 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES); 504 assert(!(flags & ~(PTF_ALLFLAGS))); 505 506 /* We don't expect to overwrite page directory entry, nor 507 * storage for the page table. 508 */ 509 assert(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)); 510 assert(!pt->pt_pt[pde]); 511 512 /* Get storage for the page table. The allocation call may in fact 513 * recursively create the directory entry as a side effect. In that 514 * case, we free the newly allocated page and do nothing else. 515 */ 516 if (!(p = vm_allocpage(&pt_phys, VMP_PAGETABLE))) 517 return ENOMEM; 518 if (pt->pt_pt[pde]) { 519 vm_freepages((vir_bytes) p, 1); 520 assert(pt->pt_pt[pde]); 521 return OK; 522 } 523 pt->pt_pt[pde] = p; 524 525 for(i = 0; i < ARCH_VM_PT_ENTRIES; i++) 526 pt->pt_pt[pde][i] = 0; /* Empty entry. */ 527 528 /* Make page directory entry. 529 * The PDE is always 'present,' 'writable,' and 'user accessible,' 530 * relying on the PTE for protection. 531 */ 532 #if defined(__i386__) 533 pt->pt_dir[pde] = (pt_phys & ARCH_VM_ADDR_MASK) | flags 534 | ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW; 535 #elif defined(__arm__) 536 pt->pt_dir[pde] = (pt_phys & ARCH_VM_PDE_MASK) 537 | ARCH_VM_PDE_PRESENT | ARM_VM_PDE_DOMAIN; //LSC FIXME 538 #endif 539 540 return OK; 541 } 542 543 /*===========================================================================* 544 * pt_ptalloc_in_range * 545 *===========================================================================*/ 546 int pt_ptalloc_in_range(pt_t *pt, vir_bytes start, vir_bytes end, 547 u32_t flags, int verify) 548 { 549 /* Allocate all the page tables in the range specified. */ 550 int pde, first_pde, last_pde; 551 552 first_pde = ARCH_VM_PDE(start); 553 last_pde = ARCH_VM_PDE(end-1); 554 555 assert(first_pde >= 0); 556 assert(last_pde < ARCH_VM_DIR_ENTRIES); 557 558 /* Scan all page-directory entries in the range. */ 559 for(pde = first_pde; pde <= last_pde; pde++) { 560 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE)); 561 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 562 int r; 563 if(verify) { 564 printf("pt_ptalloc_in_range: no pde %d\n", pde); 565 return EFAULT; 566 } 567 assert(!pt->pt_dir[pde]); 568 if((r=pt_ptalloc(pt, pde, flags)) != OK) { 569 /* Couldn't do (complete) mapping. 570 * Don't bother freeing any previously 571 * allocated page tables, they're 572 * still writable, don't point to nonsense, 573 * and pt_ptalloc leaves the directory 574 * and other data in a consistent state. 575 */ 576 return r; 577 } 578 assert(pt->pt_pt[pde]); 579 } 580 assert(pt->pt_pt[pde]); 581 assert(pt->pt_dir[pde]); 582 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT); 583 } 584 585 return OK; 586 } 587 588 static const char *ptestr(u32_t pte) 589 { 590 #define FLAG(constant, name) { \ 591 if(pte & (constant)) { strcat(str, name); strcat(str, " "); } \ 592 } 593 594 static char str[30]; 595 if(!(pte & ARCH_VM_PTE_PRESENT)) { 596 return "not present"; 597 } 598 str[0] = '\0'; 599 #if defined(__i386__) 600 FLAG(ARCH_VM_PTE_RW, "W"); 601 #elif defined(__arm__) 602 if(pte & ARCH_VM_PTE_RO) { 603 strcat(str, "R "); 604 } else { 605 strcat(str, "W "); 606 } 607 #endif 608 FLAG(ARCH_VM_PTE_USER, "U"); 609 #if defined(__i386__) 610 FLAG(I386_VM_PWT, "PWT"); 611 FLAG(I386_VM_PCD, "PCD"); 612 FLAG(I386_VM_ACC, "ACC"); 613 FLAG(I386_VM_DIRTY, "DIRTY"); 614 FLAG(I386_VM_PS, "PS"); 615 FLAG(I386_VM_GLOBAL, "G"); 616 FLAG(I386_VM_PTAVAIL1, "AV1"); 617 FLAG(I386_VM_PTAVAIL2, "AV2"); 618 FLAG(I386_VM_PTAVAIL3, "AV3"); 619 #elif defined(__arm__) 620 FLAG(ARM_VM_PTE_SUPER, "S"); 621 FLAG(ARM_VM_PTE_S, "SH"); 622 FLAG(ARM_VM_PTE_WB, "WB"); 623 FLAG(ARM_VM_PTE_WT, "WT"); 624 #endif 625 626 return str; 627 } 628 629 /*===========================================================================* 630 * pt_map_in_range * 631 *===========================================================================*/ 632 int pt_map_in_range(struct vmproc *src_vmp, struct vmproc *dst_vmp, 633 vir_bytes start, vir_bytes end) 634 { 635 /* Transfer all the mappings from the pt of the source process to the pt of 636 * the destination process in the range specified. 637 */ 638 int pde, pte; 639 vir_bytes viraddr; 640 pt_t *pt, *dst_pt; 641 642 pt = &src_vmp->vm_pt; 643 dst_pt = &dst_vmp->vm_pt; 644 645 end = end ? end : VM_DATATOP; 646 assert(start % VM_PAGE_SIZE == 0); 647 assert(end % VM_PAGE_SIZE == 0); 648 649 assert( /* ARCH_VM_PDE(start) >= 0 && */ start <= end); 650 assert(ARCH_VM_PDE(end) < ARCH_VM_DIR_ENTRIES); 651 652 #if LU_DEBUG 653 printf("VM: pt_map_in_range: src = %d, dst = %d\n", 654 src_vmp->vm_endpoint, dst_vmp->vm_endpoint); 655 printf("VM: pt_map_in_range: transferring from 0x%08x (pde %d pte %d) to 0x%08x (pde %d pte %d)\n", 656 start, ARCH_VM_PDE(start), ARCH_VM_PTE(start), 657 end, ARCH_VM_PDE(end), ARCH_VM_PTE(end)); 658 #endif 659 660 /* Scan all page-table entries in the range. */ 661 for(viraddr = start; viraddr <= end; viraddr += VM_PAGE_SIZE) { 662 pde = ARCH_VM_PDE(viraddr); 663 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 664 if(viraddr == VM_DATATOP) break; 665 continue; 666 } 667 pte = ARCH_VM_PTE(viraddr); 668 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 669 if(viraddr == VM_DATATOP) break; 670 continue; 671 } 672 673 /* Transfer the mapping. */ 674 dst_pt->pt_pt[pde][pte] = pt->pt_pt[pde][pte]; 675 assert(dst_pt->pt_pt[pde]); 676 677 if(viraddr == VM_DATATOP) break; 678 } 679 680 return OK; 681 } 682 683 /*===========================================================================* 684 * pt_ptmap * 685 *===========================================================================*/ 686 int pt_ptmap(struct vmproc *src_vmp, struct vmproc *dst_vmp) 687 { 688 /* Transfer mappings to page dir and page tables from source process and 689 * destination process. 690 */ 691 int pde, r; 692 phys_bytes physaddr; 693 vir_bytes viraddr; 694 pt_t *pt; 695 696 pt = &src_vmp->vm_pt; 697 698 #if LU_DEBUG 699 printf("VM: pt_ptmap: src = %d, dst = %d\n", 700 src_vmp->vm_endpoint, dst_vmp->vm_endpoint); 701 #endif 702 703 /* Transfer mapping to the page directory. */ 704 viraddr = (vir_bytes) pt->pt_dir; 705 physaddr = pt->pt_dir_phys & ARCH_VM_ADDR_MASK; 706 #if defined(__i386__) 707 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE, 708 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW, 709 #elif defined(__arm__) 710 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, ARCH_PAGEDIR_SIZE, 711 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | 712 ARM_VM_PTE_CACHED , 713 #endif 714 WMF_OVERWRITE)) != OK) { 715 return r; 716 } 717 #if LU_DEBUG 718 printf("VM: pt_ptmap: transferred mapping to page dir: 0x%08x (0x%08x)\n", 719 viraddr, physaddr); 720 #endif 721 722 /* Scan all non-reserved page-directory entries. */ 723 for(pde=0; pde < kern_start_pde; pde++) { 724 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 725 continue; 726 } 727 728 if(!pt->pt_pt[pde]) { panic("pde %d empty\n", pde); } 729 730 /* Transfer mapping to the page table. */ 731 viraddr = (vir_bytes) pt->pt_pt[pde]; 732 #if defined(__i386__) 733 physaddr = pt->pt_dir[pde] & ARCH_VM_ADDR_MASK; 734 #elif defined(__arm__) 735 physaddr = pt->pt_dir[pde] & ARCH_VM_PDE_MASK; 736 #endif 737 assert(viraddr); 738 if((r=pt_writemap(dst_vmp, &dst_vmp->vm_pt, viraddr, physaddr, VM_PAGE_SIZE, 739 ARCH_VM_PTE_PRESENT | ARCH_VM_PTE_USER | ARCH_VM_PTE_RW 740 #ifdef __arm__ 741 | ARM_VM_PTE_CACHED 742 #endif 743 , 744 WMF_OVERWRITE)) != OK) { 745 return r; 746 } 747 } 748 749 return OK; 750 } 751 752 void pt_clearmapcache(void) 753 { 754 /* Make sure kernel will invalidate tlb when using current 755 * pagetable (i.e. vm's) to make new mappings before new cr3 756 * is loaded. 757 */ 758 if(sys_vmctl(SELF, VMCTL_CLEARMAPCACHE, 0) != OK) 759 panic("VMCTL_CLEARMAPCACHE failed"); 760 } 761 762 int pt_writable(struct vmproc *vmp, vir_bytes v) 763 { 764 u32_t entry; 765 pt_t *pt = &vmp->vm_pt; 766 assert(!(v % VM_PAGE_SIZE)); 767 int pde = ARCH_VM_PDE(v); 768 int pte = ARCH_VM_PTE(v); 769 770 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT); 771 assert(pt->pt_pt[pde]); 772 773 entry = pt->pt_pt[pde][pte]; 774 775 #if defined(__i386__) 776 return((entry & PTF_WRITE) ? 1 : 0); 777 #elif defined(__arm__) 778 return((entry & ARCH_VM_PTE_RO) ? 0 : 1); 779 #endif 780 } 781 782 /*===========================================================================* 783 * pt_writemap * 784 *===========================================================================*/ 785 int pt_writemap(struct vmproc * vmp, 786 pt_t *pt, 787 vir_bytes v, 788 phys_bytes physaddr, 789 size_t bytes, 790 u32_t flags, 791 u32_t writemapflags) 792 { 793 /* Write mapping into page table. Allocate a new page table if necessary. */ 794 /* Page directory and table entries for this virtual address. */ 795 int p, pages; 796 int verify = 0; 797 int ret = OK; 798 799 #ifdef CONFIG_SMP 800 int vminhibit_clear = 0; 801 /* FIXME 802 * don't do it everytime, stop the process only on the first change and 803 * resume the execution on the last change. Do in a wrapper of this 804 * function 805 */ 806 if (vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR && 807 !(vmp->vm_flags & VMF_EXITING)) { 808 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_SET, 0); 809 vminhibit_clear = 1; 810 } 811 #endif 812 813 if(writemapflags & WMF_VERIFY) 814 verify = 1; 815 816 assert(!(bytes % VM_PAGE_SIZE)); 817 assert(!(flags & ~(PTF_ALLFLAGS))); 818 819 pages = bytes / VM_PAGE_SIZE; 820 821 /* MAP_NONE means to clear the mapping. It doesn't matter 822 * what's actually written into the PTE if PRESENT 823 * isn't on, so we can just write MAP_NONE into it. 824 */ 825 assert(physaddr == MAP_NONE || (flags & ARCH_VM_PTE_PRESENT)); 826 assert(physaddr != MAP_NONE || !flags); 827 828 /* First make sure all the necessary page tables are allocated, 829 * before we start writing in any of them, because it's a pain 830 * to undo our work properly. 831 */ 832 ret = pt_ptalloc_in_range(pt, v, v + VM_PAGE_SIZE*pages, flags, verify); 833 if(ret != OK) { 834 printf("VM: writemap: pt_ptalloc_in_range failed\n"); 835 goto resume_exit; 836 } 837 838 /* Now write in them. */ 839 for(p = 0; p < pages; p++) { 840 u32_t entry; 841 int pde = ARCH_VM_PDE(v); 842 int pte = ARCH_VM_PTE(v); 843 844 assert(!(v % VM_PAGE_SIZE)); 845 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES); 846 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES); 847 848 /* Page table has to be there. */ 849 assert(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT); 850 851 /* We do not expect it to be a bigpage. */ 852 assert(!(pt->pt_dir[pde] & ARCH_VM_BIGPAGE)); 853 854 /* Make sure page directory entry for this page table 855 * is marked present and page table entry is available. 856 */ 857 assert(pt->pt_pt[pde]); 858 859 if(writemapflags & (WMF_WRITEFLAGSONLY|WMF_FREE)) { 860 #if defined(__i386__) 861 physaddr = pt->pt_pt[pde][pte] & ARCH_VM_ADDR_MASK; 862 #elif defined(__arm__) 863 physaddr = pt->pt_pt[pde][pte] & ARM_VM_PTE_MASK; 864 #endif 865 } 866 867 if(writemapflags & WMF_FREE) { 868 free_mem(ABS2CLICK(physaddr), 1); 869 } 870 871 /* Entry we will write. */ 872 #if defined(__i386__) 873 entry = (physaddr & ARCH_VM_ADDR_MASK) | flags; 874 #elif defined(__arm__) 875 entry = (physaddr & ARM_VM_PTE_MASK) | flags; 876 #endif 877 878 if(verify) { 879 u32_t maskedentry; 880 maskedentry = pt->pt_pt[pde][pte]; 881 #if defined(__i386__) 882 maskedentry &= ~(I386_VM_ACC|I386_VM_DIRTY); 883 #endif 884 /* Verify pagetable entry. */ 885 #if defined(__i386__) 886 if(entry & ARCH_VM_PTE_RW) { 887 /* If we expect a writable page, allow a readonly page. */ 888 maskedentry |= ARCH_VM_PTE_RW; 889 } 890 #elif defined(__arm__) 891 if(!(entry & ARCH_VM_PTE_RO)) { 892 /* If we expect a writable page, allow a readonly page. */ 893 maskedentry &= ~ARCH_VM_PTE_RO; 894 } 895 maskedentry &= ~(ARM_VM_PTE_WB|ARM_VM_PTE_WT); 896 #endif 897 if(maskedentry != entry) { 898 printf("pt_writemap: mismatch: "); 899 #if defined(__i386__) 900 if((entry & ARCH_VM_ADDR_MASK) != 901 (maskedentry & ARCH_VM_ADDR_MASK)) { 902 #elif defined(__arm__) 903 if((entry & ARM_VM_PTE_MASK) != 904 (maskedentry & ARM_VM_PTE_MASK)) { 905 #endif 906 printf("pt_writemap: physaddr mismatch (0x%lx, 0x%lx); ", 907 (long)entry, (long)maskedentry); 908 } else printf("phys ok; "); 909 printf(" flags: found %s; ", 910 ptestr(pt->pt_pt[pde][pte])); 911 printf(" masked %s; ", 912 ptestr(maskedentry)); 913 printf(" expected %s\n", ptestr(entry)); 914 printf("found 0x%x, wanted 0x%x\n", 915 pt->pt_pt[pde][pte], entry); 916 ret = EFAULT; 917 goto resume_exit; 918 } 919 } else { 920 /* Write pagetable entry. */ 921 pt->pt_pt[pde][pte] = entry; 922 } 923 924 physaddr += VM_PAGE_SIZE; 925 v += VM_PAGE_SIZE; 926 } 927 928 resume_exit: 929 930 #ifdef CONFIG_SMP 931 if (vminhibit_clear) { 932 assert(vmp && vmp->vm_endpoint != NONE && vmp->vm_endpoint != VM_PROC_NR && 933 !(vmp->vm_flags & VMF_EXITING)); 934 sys_vmctl(vmp->vm_endpoint, VMCTL_VMINHIBIT_CLEAR, 0); 935 } 936 #endif 937 938 return ret; 939 } 940 941 /*===========================================================================* 942 * pt_checkrange * 943 *===========================================================================*/ 944 int pt_checkrange(pt_t *pt, vir_bytes v, size_t bytes, 945 int write) 946 { 947 int p, pages; 948 949 assert(!(bytes % VM_PAGE_SIZE)); 950 951 pages = bytes / VM_PAGE_SIZE; 952 953 for(p = 0; p < pages; p++) { 954 int pde = ARCH_VM_PDE(v); 955 int pte = ARCH_VM_PTE(v); 956 957 assert(!(v % VM_PAGE_SIZE)); 958 assert(pte >= 0 && pte < ARCH_VM_PT_ENTRIES); 959 assert(pde >= 0 && pde < ARCH_VM_DIR_ENTRIES); 960 961 /* Page table has to be there. */ 962 if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) 963 return EFAULT; 964 965 /* Make sure page directory entry for this page table 966 * is marked present and page table entry is available. 967 */ 968 assert((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && pt->pt_pt[pde]); 969 970 if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { 971 return EFAULT; 972 } 973 974 #if defined(__i386__) 975 if(write && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) { 976 #elif defined(__arm__) 977 if(write && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { 978 #endif 979 return EFAULT; 980 } 981 982 v += VM_PAGE_SIZE; 983 } 984 985 return OK; 986 } 987 988 /*===========================================================================* 989 * pt_new * 990 *===========================================================================*/ 991 int pt_new(pt_t *pt) 992 { 993 /* Allocate a pagetable root. Allocate a page-aligned page directory 994 * and set them to 0 (indicating no page tables are allocated). Lookup 995 * its physical address as we'll need that in the future. Verify it's 996 * page-aligned. 997 */ 998 int i, r; 999 1000 /* Don't ever re-allocate/re-move a certain process slot's 1001 * page directory once it's been created. This is a fraction 1002 * faster, but also avoids having to invalidate the page 1003 * mappings from in-kernel page tables pointing to 1004 * the page directories (the page_directories data). 1005 */ 1006 if(!pt->pt_dir && 1007 !(pt->pt_dir = vm_allocpages((phys_bytes *)&pt->pt_dir_phys, 1008 VMP_PAGEDIR, ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE))) { 1009 return ENOMEM; 1010 } 1011 1012 assert(!((u32_t)pt->pt_dir_phys % ARCH_PAGEDIR_SIZE)); 1013 1014 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) { 1015 pt->pt_dir[i] = 0; /* invalid entry (PRESENT bit = 0) */ 1016 pt->pt_pt[i] = NULL; 1017 } 1018 1019 /* Where to start looking for free virtual address space? */ 1020 pt->pt_virtop = 0; 1021 1022 /* Map in kernel. */ 1023 if((r=pt_mapkernel(pt)) != OK) 1024 return r; 1025 1026 return OK; 1027 } 1028 1029 static int freepde(void) 1030 { 1031 int p = kernel_boot_info.freepde_start++; 1032 assert(kernel_boot_info.freepde_start < ARCH_VM_DIR_ENTRIES); 1033 return p; 1034 } 1035 1036 void pt_allocate_kernel_mapped_pagetables(void) 1037 { 1038 /* Reserve PDEs available for mapping in the page directories. */ 1039 int pd; 1040 for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) { 1041 struct pdm *pdm = &pagedir_mappings[pd]; 1042 if(!pdm->pdeno) { 1043 pdm->pdeno = freepde(); 1044 assert(pdm->pdeno); 1045 } 1046 phys_bytes ph; 1047 1048 /* Allocate us a page table in which to 1049 * remember page directory pointers. 1050 */ 1051 if(!(pdm->page_directories = 1052 vm_allocpage(&ph, VMP_PAGETABLE))) { 1053 panic("no virt addr for vm mappings"); 1054 } 1055 memset(pdm->page_directories, 0, VM_PAGE_SIZE); 1056 pdm->phys = ph; 1057 1058 #if defined(__i386__) 1059 pdm->val = (ph & ARCH_VM_ADDR_MASK) | 1060 ARCH_VM_PDE_PRESENT | ARCH_VM_PTE_RW; 1061 #elif defined(__arm__) 1062 pdm->val = (ph & ARCH_VM_PDE_MASK) 1063 | ARCH_VM_PDE_PRESENT 1064 | ARM_VM_PTE_CACHED 1065 | ARM_VM_PDE_DOMAIN; //LSC FIXME 1066 #endif 1067 } 1068 } 1069 1070 static void pt_copy(pt_t *dst, pt_t *src) 1071 { 1072 int pde; 1073 for(pde=0; pde < kern_start_pde; pde++) { 1074 if(!(src->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { 1075 continue; 1076 } 1077 assert(!(src->pt_dir[pde] & ARCH_VM_BIGPAGE)); 1078 if(!src->pt_pt[pde]) { panic("pde %d empty\n", pde); } 1079 if(pt_ptalloc(dst, pde, 0) != OK) 1080 panic("pt_ptalloc failed"); 1081 memcpy(dst->pt_pt[pde], src->pt_pt[pde], 1082 ARCH_VM_PT_ENTRIES * sizeof(*dst->pt_pt[pde])); 1083 } 1084 } 1085 1086 /*===========================================================================* 1087 * pt_init * 1088 *===========================================================================*/ 1089 void pt_init(void) 1090 { 1091 pt_t *newpt, newpt_dyn; 1092 int s, r, p; 1093 phys_bytes phys; 1094 vir_bytes sparepages_mem; 1095 #if defined(__arm__) 1096 vir_bytes sparepagedirs_mem; 1097 #endif 1098 static u32_t currentpagedir[ARCH_VM_DIR_ENTRIES]; 1099 int m = kernel_boot_info.kern_mod; 1100 #if defined(__i386__) 1101 int global_bit_ok = 0; 1102 u32_t mypdbr; /* Page Directory Base Register (cr3) value */ 1103 #elif defined(__arm__) 1104 u32_t myttbr; 1105 #endif 1106 1107 /* Find what the physical location of the kernel is. */ 1108 assert(m >= 0); 1109 assert(m < kernel_boot_info.mods_with_kernel); 1110 assert(kernel_boot_info.mods_with_kernel < MULTIBOOT_MAX_MODS); 1111 kern_mb_mod = &kernel_boot_info.module_list[m]; 1112 kern_size = kern_mb_mod->mod_end - kern_mb_mod->mod_start; 1113 assert(!(kern_mb_mod->mod_start % ARCH_BIG_PAGE_SIZE)); 1114 assert(!(kernel_boot_info.vir_kern_start % ARCH_BIG_PAGE_SIZE)); 1115 kern_start_pde = kernel_boot_info.vir_kern_start / ARCH_BIG_PAGE_SIZE; 1116 1117 /* Get ourselves spare pages. */ 1118 sparepages_mem = (vir_bytes) static_sparepages; 1119 assert(!(sparepages_mem % VM_PAGE_SIZE)); 1120 1121 #if defined(__arm__) 1122 /* Get ourselves spare pagedirs. */ 1123 sparepagedirs_mem = (vir_bytes) static_sparepagedirs; 1124 assert(!(sparepagedirs_mem % ARCH_PAGEDIR_SIZE)); 1125 #endif 1126 1127 /* Spare pages are used to allocate memory before VM has its own page 1128 * table that things (i.e. arbitrary physical memory) can be mapped into. 1129 * We get it by pre-allocating it in our bss (allocated and mapped in by 1130 * the kernel) in static_sparepages. We also need the physical addresses 1131 * though; we look them up now so they are ready for use. 1132 */ 1133 #if defined(__arm__) 1134 missing_sparedirs = 0; 1135 assert(STATIC_SPAREPAGEDIRS <= SPAREPAGEDIRS); 1136 for(s = 0; s < SPAREPAGEDIRS; s++) { 1137 vir_bytes v = (sparepagedirs_mem + s*ARCH_PAGEDIR_SIZE);; 1138 phys_bytes ph; 1139 if((r=sys_umap(SELF, VM_D, (vir_bytes) v, 1140 ARCH_PAGEDIR_SIZE, &ph)) != OK) 1141 panic("pt_init: sys_umap failed: %d", r); 1142 if(s >= STATIC_SPAREPAGEDIRS) { 1143 sparepagedirs[s].pagedir = NULL; 1144 missing_sparedirs++; 1145 continue; 1146 } 1147 sparepagedirs[s].pagedir = (void *) v; 1148 sparepagedirs[s].phys = ph; 1149 } 1150 #endif 1151 1152 if(!(spare_pagequeue = reservedqueue_new(SPAREPAGES, 1, 1, 0))) 1153 panic("reservedqueue_new for single pages failed"); 1154 1155 assert(STATIC_SPAREPAGES < SPAREPAGES); 1156 for(s = 0; s < STATIC_SPAREPAGES; s++) { 1157 void *v = (void *) (sparepages_mem + s*VM_PAGE_SIZE); 1158 phys_bytes ph; 1159 if((r=sys_umap(SELF, VM_D, (vir_bytes) v, 1160 VM_PAGE_SIZE*SPAREPAGES, &ph)) != OK) 1161 panic("pt_init: sys_umap failed: %d", r); 1162 reservedqueue_add(spare_pagequeue, v, ph); 1163 } 1164 1165 #if defined(__i386__) 1166 /* global bit and 4MB pages available? */ 1167 global_bit_ok = _cpufeature(_CPUF_I386_PGE); 1168 bigpage_ok = _cpufeature(_CPUF_I386_PSE); 1169 1170 /* Set bit for PTE's and PDE's if available. */ 1171 if(global_bit_ok) 1172 global_bit = I386_VM_GLOBAL; 1173 #endif 1174 1175 /* Now reserve another pde for kernel's own mappings. */ 1176 { 1177 int kernmap_pde; 1178 phys_bytes addr, len; 1179 int flags, pindex = 0; 1180 u32_t offset = 0; 1181 1182 kernmap_pde = freepde(); 1183 offset = kernmap_pde * ARCH_BIG_PAGE_SIZE; 1184 1185 while(sys_vmctl_get_mapping(pindex, &addr, &len, 1186 &flags) == OK) { 1187 int usedpde; 1188 vir_bytes vir; 1189 if(pindex >= MAX_KERNMAPPINGS) 1190 panic("VM: too many kernel mappings: %d", pindex); 1191 kern_mappings[pindex].phys_addr = addr; 1192 kern_mappings[pindex].len = len; 1193 kern_mappings[pindex].flags = flags; 1194 kern_mappings[pindex].vir_addr = offset; 1195 kern_mappings[pindex].flags = 1196 ARCH_VM_PTE_PRESENT; 1197 if(flags & VMMF_UNCACHED) 1198 #if defined(__i386__) 1199 kern_mappings[pindex].flags |= PTF_NOCACHE; 1200 #elif defined(__arm__) 1201 kern_mappings[pindex].flags |= ARM_VM_PTE_DEVICE; 1202 else { 1203 kern_mappings[pindex].flags |= ARM_VM_PTE_CACHED; 1204 } 1205 #endif 1206 if(flags & VMMF_USER) 1207 kern_mappings[pindex].flags |= ARCH_VM_PTE_USER; 1208 #if defined(__arm__) 1209 else 1210 kern_mappings[pindex].flags |= ARM_VM_PTE_SUPER; 1211 #endif 1212 if(flags & VMMF_WRITE) 1213 kern_mappings[pindex].flags |= ARCH_VM_PTE_RW; 1214 #if defined(__arm__) 1215 else 1216 kern_mappings[pindex].flags |= ARCH_VM_PTE_RO; 1217 #endif 1218 1219 #if defined(__i386__) 1220 if(flags & VMMF_GLO) 1221 kern_mappings[pindex].flags |= I386_VM_GLOBAL; 1222 #endif 1223 1224 if(addr % VM_PAGE_SIZE) 1225 panic("VM: addr unaligned: %lu", addr); 1226 if(len % VM_PAGE_SIZE) 1227 panic("VM: len unaligned: %lu", len); 1228 vir = offset; 1229 if(sys_vmctl_reply_mapping(pindex, vir) != OK) 1230 panic("VM: reply failed"); 1231 offset += len; 1232 pindex++; 1233 kernmappings++; 1234 1235 usedpde = ARCH_VM_PDE(offset); 1236 while(usedpde > kernmap_pde) { 1237 int newpde = freepde(); 1238 assert(newpde == kernmap_pde+1); 1239 kernmap_pde = newpde; 1240 } 1241 } 1242 } 1243 1244 pt_allocate_kernel_mapped_pagetables(); 1245 1246 /* Allright. Now. We have to make our own page directory and page tables, 1247 * that the kernel has already set up, accessible to us. It's easier to 1248 * understand if we just copy all the required pages (i.e. page directory 1249 * and page tables), and set up the pointers as if VM had done it itself. 1250 * 1251 * This allocation will happen without using any page table, and just 1252 * uses spare pages. 1253 */ 1254 newpt = &vmprocess->vm_pt; 1255 if(pt_new(newpt) != OK) 1256 panic("vm pt_new failed"); 1257 1258 /* Get our current pagedir so we can see it. */ 1259 #if defined(__i386__) 1260 if(sys_vmctl_get_pdbr(SELF, &mypdbr) != OK) 1261 #elif defined(__arm__) 1262 if(sys_vmctl_get_pdbr(SELF, &myttbr) != OK) 1263 #endif 1264 1265 panic("VM: sys_vmctl_get_pdbr failed"); 1266 #if defined(__i386__) 1267 if(sys_vircopy(NONE, mypdbr, SELF, 1268 (vir_bytes) currentpagedir, VM_PAGE_SIZE, 0) != OK) 1269 #elif defined(__arm__) 1270 if(sys_vircopy(NONE, myttbr, SELF, 1271 (vir_bytes) currentpagedir, ARCH_PAGEDIR_SIZE, 0) != OK) 1272 #endif 1273 panic("VM: sys_vircopy failed"); 1274 1275 /* We have mapped in kernel ourselves; now copy mappings for VM 1276 * that kernel made, including allocations for BSS. Skip identity 1277 * mapping bits; just map in VM. 1278 */ 1279 for(p = 0; p < ARCH_VM_DIR_ENTRIES; p++) { 1280 u32_t entry = currentpagedir[p]; 1281 phys_bytes ptaddr_kern, ptaddr_us; 1282 1283 /* BIGPAGEs are kernel mapping (do ourselves) or boot 1284 * identity mapping (don't want). 1285 */ 1286 if(!(entry & ARCH_VM_PDE_PRESENT)) continue; 1287 if((entry & ARCH_VM_BIGPAGE)) continue; 1288 1289 if(pt_ptalloc(newpt, p, 0) != OK) 1290 panic("pt_ptalloc failed"); 1291 assert(newpt->pt_dir[p] & ARCH_VM_PDE_PRESENT); 1292 1293 #if defined(__i386__) 1294 ptaddr_kern = entry & ARCH_VM_ADDR_MASK; 1295 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_ADDR_MASK; 1296 #elif defined(__arm__) 1297 ptaddr_kern = entry & ARCH_VM_PDE_MASK; 1298 ptaddr_us = newpt->pt_dir[p] & ARCH_VM_PDE_MASK; 1299 #endif 1300 1301 /* Copy kernel-initialized pagetable contents into our 1302 * normally accessible pagetable. 1303 */ 1304 if(sys_abscopy(ptaddr_kern, ptaddr_us, VM_PAGE_SIZE) != OK) 1305 panic("pt_init: abscopy failed"); 1306 } 1307 1308 /* Inform kernel vm has a newly built page table. */ 1309 assert(vmproc[VM_PROC_NR].vm_endpoint == VM_PROC_NR); 1310 pt_bind(newpt, &vmproc[VM_PROC_NR]); 1311 1312 pt_init_done = 1; 1313 1314 /* VM is now fully functional in that it can dynamically allocate memory 1315 * for itself. 1316 * 1317 * We don't want to keep using the bootstrap statically allocated spare 1318 * pages though, as the physical addresses will change on liveupdate. So we 1319 * re-do part of the initialization now with purely dynamically allocated 1320 * memory. First throw out the static pool. 1321 * 1322 * Then allocate the kernel-shared-pagetables and VM pagetables with dynamic 1323 * memory. 1324 */ 1325 1326 alloc_cycle(); /* Make sure allocating works */ 1327 while(vm_getsparepage(&phys)) ; /* Use up all static pages */ 1328 alloc_cycle(); /* Refill spares with dynamic */ 1329 pt_allocate_kernel_mapped_pagetables(); /* Reallocate in-kernel pages */ 1330 pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */ 1331 pt_mapkernel(newpt); /* Rewrite pagetable info */ 1332 1333 /* Flush TLB just in case any of those mappings have been touched */ 1334 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 1335 panic("VMCTL_FLUSHTLB failed"); 1336 } 1337 1338 /* Recreate VM page table with dynamic-only allocations */ 1339 memset(&newpt_dyn, 0, sizeof(newpt_dyn)); 1340 pt_new(&newpt_dyn); 1341 pt_copy(&newpt_dyn, newpt); 1342 memcpy(newpt, &newpt_dyn, sizeof(*newpt)); 1343 1344 pt_bind(newpt, &vmproc[VM_PROC_NR]); /* Recalculate */ 1345 pt_mapkernel(newpt); /* Rewrite pagetable info */ 1346 1347 /* Flush TLB just in case any of those mappings have been touched */ 1348 if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { 1349 panic("VMCTL_FLUSHTLB failed"); 1350 } 1351 1352 /* All OK. */ 1353 return; 1354 } 1355 1356 /*===========================================================================* 1357 * pt_bind * 1358 *===========================================================================*/ 1359 int pt_bind(pt_t *pt, struct vmproc *who) 1360 { 1361 int procslot, pdeslot; 1362 u32_t phys; 1363 void *pdes; 1364 int pagedir_pde; 1365 int slots_per_pde; 1366 int pages_per_pagedir = ARCH_PAGEDIR_SIZE/VM_PAGE_SIZE; 1367 struct pdm *pdm; 1368 1369 slots_per_pde = ARCH_VM_PT_ENTRIES / pages_per_pagedir; 1370 1371 /* Basic sanity checks. */ 1372 assert(who); 1373 assert(who->vm_flags & VMF_INUSE); 1374 assert(pt); 1375 1376 procslot = who->vm_slot; 1377 pdm = &pagedir_mappings[procslot/slots_per_pde]; 1378 pdeslot = procslot%slots_per_pde; 1379 pagedir_pde = pdm->pdeno; 1380 assert(pdeslot >= 0); 1381 assert(procslot < ELEMENTS(vmproc)); 1382 assert(pdeslot < ARCH_VM_PT_ENTRIES / pages_per_pagedir); 1383 assert(pagedir_pde >= 0); 1384 1385 #if defined(__i386__) 1386 phys = pt->pt_dir_phys & ARCH_VM_ADDR_MASK; 1387 #elif defined(__arm__) 1388 phys = pt->pt_dir_phys & ARM_VM_PTE_MASK; 1389 #endif 1390 assert(pt->pt_dir_phys == phys); 1391 assert(!(pt->pt_dir_phys % ARCH_PAGEDIR_SIZE)); 1392 1393 /* Update "page directory pagetable." */ 1394 #if defined(__i386__) 1395 pdm->page_directories[pdeslot] = 1396 phys | ARCH_VM_PDE_PRESENT|ARCH_VM_PTE_RW; 1397 #elif defined(__arm__) 1398 { 1399 int i; 1400 for (i = 0; i < pages_per_pagedir; i++) { 1401 pdm->page_directories[pdeslot*pages_per_pagedir+i] = 1402 (phys+i*VM_PAGE_SIZE) 1403 | ARCH_VM_PTE_PRESENT 1404 | ARCH_VM_PTE_RW 1405 | ARM_VM_PTE_CACHED 1406 | ARCH_VM_PTE_USER; //LSC FIXME 1407 } 1408 } 1409 #endif 1410 1411 /* This is where the PDE's will be visible to the kernel 1412 * in its address space. 1413 */ 1414 pdes = (void *) (pagedir_pde*ARCH_BIG_PAGE_SIZE + 1415 #if defined(__i386__) 1416 pdeslot * VM_PAGE_SIZE); 1417 #elif defined(__arm__) 1418 pdeslot * ARCH_PAGEDIR_SIZE); 1419 #endif 1420 1421 /* Tell kernel about new page table root. */ 1422 return sys_vmctl_set_addrspace(who->vm_endpoint, pt->pt_dir_phys , pdes); 1423 } 1424 1425 /*===========================================================================* 1426 * pt_free * 1427 *===========================================================================*/ 1428 void pt_free(pt_t *pt) 1429 { 1430 /* Free memory associated with this pagetable. */ 1431 int i; 1432 1433 for(i = 0; i < ARCH_VM_DIR_ENTRIES; i++) 1434 if(pt->pt_pt[i]) 1435 vm_freepages((vir_bytes) pt->pt_pt[i], 1); 1436 1437 return; 1438 } 1439 1440 /*===========================================================================* 1441 * pt_mapkernel * 1442 *===========================================================================*/ 1443 int pt_mapkernel(pt_t *pt) 1444 { 1445 int i; 1446 int kern_pde = kern_start_pde; 1447 phys_bytes addr, mapped = 0; 1448 1449 /* Any page table needs to map in the kernel address space. */ 1450 assert(bigpage_ok); 1451 assert(kern_pde >= 0); 1452 1453 /* pt_init() has made sure this is ok. */ 1454 addr = kern_mb_mod->mod_start; 1455 1456 /* Actually mapping in kernel */ 1457 while(mapped < kern_size) { 1458 #if defined(__i386__) 1459 pt->pt_dir[kern_pde] = addr | ARCH_VM_PDE_PRESENT | 1460 ARCH_VM_BIGPAGE | ARCH_VM_PTE_RW | global_bit; 1461 #elif defined(__arm__) 1462 pt->pt_dir[kern_pde] = (addr & ARM_VM_SECTION_MASK) 1463 | ARM_VM_SECTION 1464 | ARM_VM_SECTION_DOMAIN 1465 | ARM_VM_SECTION_CACHED 1466 | ARM_VM_SECTION_SUPER; 1467 #endif 1468 kern_pde++; 1469 mapped += ARCH_BIG_PAGE_SIZE; 1470 addr += ARCH_BIG_PAGE_SIZE; 1471 } 1472 1473 /* Kernel also wants to know about all page directories. */ 1474 { 1475 int pd; 1476 for(pd = 0; pd < MAX_PAGEDIR_PDES; pd++) { 1477 struct pdm *pdm = &pagedir_mappings[pd]; 1478 1479 assert(pdm->pdeno > 0); 1480 assert(pdm->pdeno > kern_pde); 1481 pt->pt_dir[pdm->pdeno] = pdm->val; 1482 } 1483 } 1484 1485 /* Kernel also wants various mappings of its own. */ 1486 for(i = 0; i < kernmappings; i++) { 1487 int r; 1488 if((r=pt_writemap(NULL, pt, 1489 kern_mappings[i].vir_addr, 1490 kern_mappings[i].phys_addr, 1491 kern_mappings[i].len, 1492 kern_mappings[i].flags, 0)) != OK) { 1493 return r; 1494 } 1495 1496 } 1497 1498 return OK; 1499 } 1500 1501 int get_vm_self_pages(void) { return vm_self_pages; } 1502