1 2 #include "kernel/kernel.h" 3 #include "kernel/vm.h" 4 5 #include <machine/vm.h> 6 7 #include <minix/type.h> 8 #include <minix/syslib.h> 9 #include <minix/cpufeature.h> 10 #include <string.h> 11 #include <assert.h> 12 #include <signal.h> 13 #include <stdlib.h> 14 15 #include <machine/vm.h> 16 17 #include "oxpcie.h" 18 #include "arch_proto.h" 19 20 #ifdef USE_APIC 21 #include "apic.h" 22 #ifdef USE_WATCHDOG 23 #include "kernel/watchdog.h" 24 #endif 25 #endif 26 27 phys_bytes video_mem_vaddr = 0; 28 29 #define HASPT(procptr) ((procptr)->p_seg.p_cr3 != 0) 30 static int nfreepdes = 0; 31 #define MAXFREEPDES 2 32 static int freepdes[MAXFREEPDES]; 33 34 static u32_t phys_get32(phys_bytes v); 35 36 void mem_clear_mapcache(void) 37 { 38 int i; 39 for(i = 0; i < nfreepdes; i++) { 40 struct proc *ptproc = get_cpulocal_var(ptproc); 41 int pde = freepdes[i]; 42 u32_t *ptv; 43 assert(ptproc); 44 ptv = ptproc->p_seg.p_cr3_v; 45 assert(ptv); 46 ptv[pde] = 0; 47 } 48 } 49 50 /* This function sets up a mapping from within the kernel's address 51 * space to any other area of memory, either straight physical 52 * memory (pr == NULL) or a process view of memory, in 4MB windows. 53 * I.e., it maps in 4MB chunks of virtual (or physical) address space 54 * to 4MB chunks of kernel virtual address space. 55 * 56 * It recognizes pr already being in memory as a special case (no 57 * mapping required). 58 * 59 * The target (i.e. in-kernel) mapping area is one of the freepdes[] 60 * VM has earlier already told the kernel about that is available. It is 61 * identified as the 'pde' parameter. This value can be chosen freely 62 * by the caller, as long as it is in range (i.e. 0 or higher and corresponds 63 * to a known freepde slot). It is up to the caller to keep track of which 64 * freepde's are in use, and to determine which ones are free to use. 65 * 66 * The logical number supplied by the caller is translated into an actual 67 * pde number to be used, and a pointer to it (linear address) is returned 68 * for actual use by phys_copy or memset. 69 */ 70 static phys_bytes createpde( 71 const struct proc *pr, /* Requested process, NULL for physical. */ 72 const phys_bytes linaddr,/* Address after segment translation. */ 73 phys_bytes *bytes, /* Size of chunk, function may truncate it. */ 74 int free_pde_idx, /* index of the free slot to use */ 75 int *changed /* If mapping is made, this is set to 1. */ 76 ) 77 { 78 u32_t pdeval; 79 phys_bytes offset; 80 int pde; 81 82 assert(free_pde_idx >= 0 && free_pde_idx < nfreepdes); 83 pde = freepdes[free_pde_idx]; 84 assert(pde >= 0 && pde < 1024); 85 86 if(pr && ((pr == get_cpulocal_var(ptproc)) || iskernelp(pr))) { 87 /* Process memory is requested, and 88 * it's a process that is already in current page table, or 89 * the kernel, which is always there. 90 * Therefore linaddr is valid directly, with the requested 91 * size. 92 */ 93 return linaddr; 94 } 95 96 if(pr) { 97 /* Requested address is in a process that is not currently 98 * accessible directly. Grab the PDE entry of that process' 99 * page table that corresponds to the requested address. 100 */ 101 assert(pr->p_seg.p_cr3_v); 102 pdeval = pr->p_seg.p_cr3_v[I386_VM_PDE(linaddr)]; 103 } else { 104 /* Requested address is physical. Make up the PDE entry. */ 105 pdeval = (linaddr & I386_VM_ADDR_MASK_4MB) | 106 I386_VM_BIGPAGE | I386_VM_PRESENT | 107 I386_VM_WRITE | I386_VM_USER; 108 } 109 110 /* Write the pde value that we need into a pde that the kernel 111 * can access, into the currently loaded page table so it becomes 112 * visible. 113 */ 114 assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v); 115 if(get_cpulocal_var(ptproc)->p_seg.p_cr3_v[pde] != pdeval) { 116 get_cpulocal_var(ptproc)->p_seg.p_cr3_v[pde] = pdeval; 117 *changed = 1; 118 } 119 120 /* Memory is now available, but only the 4MB window of virtual 121 * address space that we have mapped; calculate how much of 122 * the requested range is visible and return that in *bytes, 123 * if that is less than the requested range. 124 */ 125 offset = linaddr & I386_VM_OFFSET_MASK_4MB; /* Offset in 4MB window. */ 126 *bytes = MIN(*bytes, I386_BIG_PAGE_SIZE - offset); 127 128 /* Return the linear address of the start of the new mapping. */ 129 return I386_BIG_PAGE_SIZE*pde + offset; 130 } 131 132 133 /*===========================================================================* 134 * check_resumed_caller * 135 *===========================================================================*/ 136 static int check_resumed_caller(struct proc *caller) 137 { 138 /* Returns the result from VM if caller was resumed, otherwise OK. */ 139 if (caller && (caller->p_misc_flags & MF_KCALL_RESUME)) { 140 assert(caller->p_vmrequest.vmresult != VMSUSPEND); 141 return caller->p_vmrequest.vmresult; 142 } 143 144 return OK; 145 } 146 147 /*===========================================================================* 148 * lin_lin_copy * 149 *===========================================================================*/ 150 static int lin_lin_copy(struct proc *srcproc, vir_bytes srclinaddr, 151 struct proc *dstproc, vir_bytes dstlinaddr, vir_bytes bytes) 152 { 153 u32_t addr; 154 proc_nr_t procslot; 155 156 assert(get_cpulocal_var(ptproc)); 157 assert(get_cpulocal_var(proc_ptr)); 158 assert(read_cr3() == get_cpulocal_var(ptproc)->p_seg.p_cr3); 159 160 procslot = get_cpulocal_var(ptproc)->p_nr; 161 162 assert(procslot >= 0 && procslot < I386_VM_DIR_ENTRIES); 163 164 if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE)); 165 if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE)); 166 assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE)); 167 assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v); 168 if(srcproc) assert(!RTS_ISSET(srcproc, RTS_VMINHIBIT)); 169 if(dstproc) assert(!RTS_ISSET(dstproc, RTS_VMINHIBIT)); 170 171 while(bytes > 0) { 172 phys_bytes srcptr, dstptr; 173 vir_bytes chunk = bytes; 174 int changed = 0; 175 176 #ifdef CONFIG_SMP 177 unsigned cpu = cpuid; 178 179 if (srcproc && GET_BIT(srcproc->p_stale_tlb, cpu)) { 180 changed = 1; 181 UNSET_BIT(srcproc->p_stale_tlb, cpu); 182 } 183 if (dstproc && GET_BIT(dstproc->p_stale_tlb, cpu)) { 184 changed = 1; 185 UNSET_BIT(dstproc->p_stale_tlb, cpu); 186 } 187 #endif 188 189 /* Set up 4MB ranges. */ 190 srcptr = createpde(srcproc, srclinaddr, &chunk, 0, &changed); 191 dstptr = createpde(dstproc, dstlinaddr, &chunk, 1, &changed); 192 if(changed) 193 reload_cr3(); 194 195 /* Check for overflow. */ 196 if (srcptr + chunk < srcptr) return EFAULT_SRC; 197 if (dstptr + chunk < dstptr) return EFAULT_DST; 198 199 /* Copy pages. */ 200 PHYS_COPY_CATCH(srcptr, dstptr, chunk, addr); 201 202 if(addr) { 203 /* If addr is nonzero, a page fault was caught. */ 204 205 if(addr >= srcptr && addr < (srcptr + chunk)) { 206 return EFAULT_SRC; 207 } 208 if(addr >= dstptr && addr < (dstptr + chunk)) { 209 return EFAULT_DST; 210 } 211 212 panic("lin_lin_copy fault out of range"); 213 214 /* Not reached. */ 215 return EFAULT; 216 } 217 218 /* Update counter and addresses for next iteration, if any. */ 219 bytes -= chunk; 220 srclinaddr += chunk; 221 dstlinaddr += chunk; 222 } 223 224 if(srcproc) assert(!RTS_ISSET(srcproc, RTS_SLOT_FREE)); 225 if(dstproc) assert(!RTS_ISSET(dstproc, RTS_SLOT_FREE)); 226 assert(!RTS_ISSET(get_cpulocal_var(ptproc), RTS_SLOT_FREE)); 227 assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v); 228 229 return OK; 230 } 231 232 233 static u32_t phys_get32(phys_bytes addr) 234 { 235 u32_t v; 236 int r; 237 238 if((r=lin_lin_copy(NULL, addr, 239 proc_addr(SYSTEM), (phys_bytes) &v, sizeof(v))) != OK) { 240 panic("lin_lin_copy for phys_get32 failed: %d", r); 241 } 242 243 return v; 244 } 245 246 #if 0 247 static char *cr0_str(u32_t e) 248 { 249 static char str[80]; 250 strcpy(str, ""); 251 #define FLAG(v) do { if(e & (v)) { strcat(str, #v " "); e &= ~v; } } while(0) 252 FLAG(I386_CR0_PE); 253 FLAG(I386_CR0_MP); 254 FLAG(I386_CR0_EM); 255 FLAG(I386_CR0_TS); 256 FLAG(I386_CR0_ET); 257 FLAG(I386_CR0_PG); 258 FLAG(I386_CR0_WP); 259 if(e) { strcat(str, " (++)"); } 260 return str; 261 } 262 263 static char *cr4_str(u32_t e) 264 { 265 static char str[80]; 266 strcpy(str, ""); 267 FLAG(I386_CR4_VME); 268 FLAG(I386_CR4_PVI); 269 FLAG(I386_CR4_TSD); 270 FLAG(I386_CR4_DE); 271 FLAG(I386_CR4_PSE); 272 FLAG(I386_CR4_PAE); 273 FLAG(I386_CR4_MCE); 274 FLAG(I386_CR4_PGE); 275 if(e) { strcat(str, " (++)"); } 276 return str; 277 } 278 #endif 279 280 /*===========================================================================* 281 * umap_virtual * 282 *===========================================================================*/ 283 phys_bytes umap_virtual( 284 register struct proc *rp, /* pointer to proc table entry for process */ 285 int seg, /* T, D, or S segment */ 286 vir_bytes vir_addr, /* virtual address in bytes within the seg */ 287 vir_bytes bytes /* # of bytes to be copied */ 288 ) 289 { 290 phys_bytes phys = 0; 291 292 if(vm_lookup(rp, vir_addr, &phys, NULL) != OK) { 293 printf("SYSTEM:umap_virtual: vm_lookup of %s: seg 0x%x: 0x%lx failed\n", rp->p_name, seg, vir_addr); 294 phys = 0; 295 } else { 296 if(phys == 0) 297 panic("vm_lookup returned phys: 0x%lx", phys); 298 } 299 300 if(phys == 0) { 301 printf("SYSTEM:umap_virtual: lookup failed\n"); 302 return 0; 303 } 304 305 /* Now make sure addresses are contiguous in physical memory 306 * so that the umap makes sense. 307 */ 308 if(bytes > 0 && vm_lookup_range(rp, vir_addr, NULL, bytes) != bytes) { 309 printf("umap_virtual: %s: %lu at 0x%lx (vir 0x%lx) not contiguous\n", 310 rp->p_name, bytes, vir_addr, vir_addr); 311 return 0; 312 } 313 314 /* phys must be larger than 0 (or the caller will think the call 315 * failed), and address must not cross a page boundary. 316 */ 317 assert(phys); 318 319 return phys; 320 } 321 322 323 /*===========================================================================* 324 * vm_lookup * 325 *===========================================================================*/ 326 int vm_lookup(const struct proc *proc, const vir_bytes virtual, 327 phys_bytes *physical, u32_t *ptent) 328 { 329 u32_t *root, *pt; 330 int pde, pte; 331 u32_t pde_v, pte_v; 332 333 assert(proc); 334 assert(physical); 335 assert(!isemptyp(proc)); 336 assert(HASPT(proc)); 337 338 /* Retrieve page directory entry. */ 339 root = (u32_t *) proc->p_seg.p_cr3; 340 assert(!((u32_t) root % I386_PAGE_SIZE)); 341 pde = I386_VM_PDE(virtual); 342 assert(pde >= 0 && pde < I386_VM_DIR_ENTRIES); 343 pde_v = phys_get32((u32_t) (root + pde)); 344 345 if(!(pde_v & I386_VM_PRESENT)) { 346 return EFAULT; 347 } 348 349 /* We don't expect to ever see this. */ 350 if(pde_v & I386_VM_BIGPAGE) { 351 *physical = pde_v & I386_VM_ADDR_MASK_4MB; 352 if(ptent) *ptent = pde_v; 353 *physical += virtual & I386_VM_OFFSET_MASK_4MB; 354 } else { 355 /* Retrieve page table entry. */ 356 pt = (u32_t *) I386_VM_PFA(pde_v); 357 assert(!((u32_t) pt % I386_PAGE_SIZE)); 358 pte = I386_VM_PTE(virtual); 359 assert(pte >= 0 && pte < I386_VM_PT_ENTRIES); 360 pte_v = phys_get32((u32_t) (pt + pte)); 361 if(!(pte_v & I386_VM_PRESENT)) { 362 return EFAULT; 363 } 364 365 if(ptent) *ptent = pte_v; 366 367 /* Actual address now known; retrieve it and add page offset. */ 368 *physical = I386_VM_PFA(pte_v); 369 *physical += virtual % I386_PAGE_SIZE; 370 } 371 372 return OK; 373 } 374 375 /*===========================================================================* 376 * vm_lookup_range * 377 *===========================================================================*/ 378 size_t vm_lookup_range(const struct proc *proc, vir_bytes vir_addr, 379 phys_bytes *phys_addr, size_t bytes) 380 { 381 /* Look up the physical address corresponding to linear virtual address 382 * 'vir_addr' for process 'proc'. Return the size of the range covered 383 * by contiguous physical memory starting from that address; this may 384 * be anywhere between 0 and 'bytes' inclusive. If the return value is 385 * nonzero, and 'phys_addr' is non-NULL, 'phys_addr' will be set to the 386 * base physical address of the range. 'vir_addr' and 'bytes' need not 387 * be page-aligned, but the caller must have verified that the given 388 * linear range is valid for the given process at all. 389 */ 390 phys_bytes phys, next_phys; 391 size_t len; 392 393 assert(proc); 394 assert(bytes > 0); 395 assert(HASPT(proc)); 396 397 /* Look up the first page. */ 398 if (vm_lookup(proc, vir_addr, &phys, NULL) != OK) 399 return 0; 400 401 if (phys_addr != NULL) 402 *phys_addr = phys; 403 404 len = I386_PAGE_SIZE - (vir_addr % I386_PAGE_SIZE); 405 vir_addr += len; 406 next_phys = phys + len; 407 408 /* Look up any next pages and test physical contiguity. */ 409 while (len < bytes) { 410 if (vm_lookup(proc, vir_addr, &phys, NULL) != OK) 411 break; 412 413 if (next_phys != phys) 414 break; 415 416 len += I386_PAGE_SIZE; 417 vir_addr += I386_PAGE_SIZE; 418 next_phys += I386_PAGE_SIZE; 419 } 420 421 /* We might now have overshot the requested length somewhat. */ 422 return MIN(bytes, len); 423 } 424 425 /*===========================================================================* 426 * vm_check_range * 427 *===========================================================================*/ 428 int vm_check_range(struct proc *caller, struct proc *target, 429 vir_bytes vir_addr, size_t bytes, int writeflag) 430 { 431 /* Public interface to vm_suspend(), for use by kernel calls. On behalf 432 * of 'caller', call into VM to check linear virtual address range of 433 * process 'target', starting at 'vir_addr', for 'bytes' bytes. This 434 * function assumes that it will called twice if VM returned an error 435 * the first time (since nothing has changed in that case), and will 436 * then return the error code resulting from the first call. Upon the 437 * first call, a non-success error code is returned as well. 438 */ 439 int r; 440 441 if ((caller->p_misc_flags & MF_KCALL_RESUME) && 442 (r = caller->p_vmrequest.vmresult) != OK) 443 return r; 444 445 vm_suspend(caller, target, vir_addr, bytes, VMSTYPE_KERNELCALL, 446 writeflag); 447 448 return VMSUSPEND; 449 } 450 451 #if 0 452 static char *flagstr(u32_t e, const int dir) 453 { 454 static char str[80]; 455 strcpy(str, ""); 456 FLAG(I386_VM_PRESENT); 457 FLAG(I386_VM_WRITE); 458 FLAG(I386_VM_USER); 459 FLAG(I386_VM_PWT); 460 FLAG(I386_VM_PCD); 461 FLAG(I386_VM_GLOBAL); 462 if(dir) 463 FLAG(I386_VM_BIGPAGE); /* Page directory entry only */ 464 else 465 FLAG(I386_VM_DIRTY); /* Page table entry only */ 466 return str; 467 } 468 469 static void vm_pt_print(u32_t *pagetable, const u32_t v) 470 { 471 int pte; 472 int col = 0; 473 474 assert(!((u32_t) pagetable % I386_PAGE_SIZE)); 475 476 for(pte = 0; pte < I386_VM_PT_ENTRIES; pte++) { 477 u32_t pte_v, pfa; 478 pte_v = phys_get32((u32_t) (pagetable + pte)); 479 if(!(pte_v & I386_VM_PRESENT)) 480 continue; 481 pfa = I386_VM_PFA(pte_v); 482 printf("%4d:%08lx:%08lx %2s ", 483 pte, v + I386_PAGE_SIZE*pte, pfa, 484 (pte_v & I386_VM_WRITE) ? "rw":"RO"); 485 col++; 486 if(col == 3) { printf("\n"); col = 0; } 487 } 488 if(col > 0) printf("\n"); 489 490 return; 491 } 492 493 static void vm_print(u32_t *root) 494 { 495 int pde; 496 497 assert(!((u32_t) root % I386_PAGE_SIZE)); 498 499 printf("page table 0x%lx:\n", root); 500 501 for(pde = 0; pde < I386_VM_DIR_ENTRIES; pde++) { 502 u32_t pde_v; 503 u32_t *pte_a; 504 pde_v = phys_get32((u32_t) (root + pde)); 505 if(!(pde_v & I386_VM_PRESENT)) 506 continue; 507 if(pde_v & I386_VM_BIGPAGE) { 508 printf("%4d: 0x%lx, flags %s\n", 509 pde, I386_VM_PFA(pde_v), flagstr(pde_v, 1)); 510 } else { 511 pte_a = (u32_t *) I386_VM_PFA(pde_v); 512 printf("%4d: pt %08lx %s\n", 513 pde, pte_a, flagstr(pde_v, 1)); 514 vm_pt_print(pte_a, pde * I386_VM_PT_ENTRIES * I386_PAGE_SIZE); 515 printf("\n"); 516 } 517 } 518 519 520 return; 521 } 522 #endif 523 524 /*===========================================================================* 525 * vmmemset * 526 *===========================================================================*/ 527 int vm_memset(struct proc* caller, endpoint_t who, phys_bytes ph, int c, 528 phys_bytes count) 529 { 530 u32_t pattern; 531 struct proc *whoptr = NULL; 532 phys_bytes cur_ph = ph; 533 phys_bytes left = count; 534 phys_bytes ptr, chunk, pfa = 0; 535 int new_cr3, r = OK; 536 537 if ((r = check_resumed_caller(caller)) != OK) 538 return r; 539 540 /* NONE for physical, otherwise virtual */ 541 if (who != NONE && !(whoptr = endpoint_lookup(who))) 542 return ESRCH; 543 544 c &= 0xFF; 545 pattern = c | (c << 8) | (c << 16) | (c << 24); 546 547 assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v); 548 assert(!catch_pagefaults); 549 catch_pagefaults = 1; 550 551 /* We can memset as many bytes as we have remaining, 552 * or as many as remain in the 4MB chunk we mapped in. 553 */ 554 while (left > 0) { 555 new_cr3 = 0; 556 chunk = left; 557 ptr = createpde(whoptr, cur_ph, &chunk, 0, &new_cr3); 558 559 if (new_cr3) 560 reload_cr3(); 561 562 /* If a page fault happens, pfa is non-null */ 563 if ((pfa = phys_memset(ptr, pattern, chunk))) { 564 565 /* If a process pagefaults, VM may help out */ 566 if (whoptr) { 567 vm_suspend(caller, whoptr, ph, count, 568 VMSTYPE_KERNELCALL, 1); 569 assert(catch_pagefaults); 570 catch_pagefaults = 0; 571 return VMSUSPEND; 572 } 573 574 /* Pagefault when phys copying ?! */ 575 panic("vm_memset: pf %lx addr=%lx len=%lu\n", 576 pfa , ptr, chunk); 577 } 578 579 cur_ph += chunk; 580 left -= chunk; 581 } 582 583 assert(get_cpulocal_var(ptproc)->p_seg.p_cr3_v); 584 assert(catch_pagefaults); 585 catch_pagefaults = 0; 586 587 return OK; 588 } 589 590 /*===========================================================================* 591 * virtual_copy_f * 592 *===========================================================================*/ 593 int virtual_copy_f( 594 struct proc * caller, 595 struct vir_addr *src_addr, /* source virtual address */ 596 struct vir_addr *dst_addr, /* destination virtual address */ 597 vir_bytes bytes, /* # of bytes to copy */ 598 int vmcheck /* if nonzero, can return VMSUSPEND */ 599 ) 600 { 601 /* Copy bytes from virtual address src_addr to virtual address dst_addr. */ 602 struct vir_addr *vir_addr[2]; /* virtual source and destination address */ 603 int i, r; 604 struct proc *procs[2]; 605 606 assert((vmcheck && caller) || (!vmcheck && !caller)); 607 608 /* Check copy count. */ 609 if (bytes <= 0) return(EDOM); 610 611 /* Do some more checks and map virtual addresses to physical addresses. */ 612 vir_addr[_SRC_] = src_addr; 613 vir_addr[_DST_] = dst_addr; 614 615 for (i=_SRC_; i<=_DST_; i++) { 616 endpoint_t proc_e = vir_addr[i]->proc_nr_e; 617 int proc_nr; 618 struct proc *p; 619 620 if(proc_e == NONE) { 621 p = NULL; 622 } else { 623 if(!isokendpt(proc_e, &proc_nr)) { 624 printf("virtual_copy: no reasonable endpoint\n"); 625 return ESRCH; 626 } 627 p = proc_addr(proc_nr); 628 } 629 630 procs[i] = p; 631 } 632 633 if ((r = check_resumed_caller(caller)) != OK) 634 return r; 635 636 if((r=lin_lin_copy(procs[_SRC_], vir_addr[_SRC_]->offset, 637 procs[_DST_], vir_addr[_DST_]->offset, bytes)) != OK) { 638 int writeflag; 639 struct proc *target = NULL; 640 phys_bytes lin; 641 if(r != EFAULT_SRC && r != EFAULT_DST) 642 panic("lin_lin_copy failed: %d", r); 643 if(!vmcheck || !caller) { 644 return r; 645 } 646 647 if(r == EFAULT_SRC) { 648 lin = vir_addr[_SRC_]->offset; 649 target = procs[_SRC_]; 650 writeflag = 0; 651 } else if(r == EFAULT_DST) { 652 lin = vir_addr[_DST_]->offset; 653 target = procs[_DST_]; 654 writeflag = 1; 655 } else { 656 panic("r strange: %d", r); 657 } 658 659 assert(caller); 660 assert(target); 661 662 vm_suspend(caller, target, lin, bytes, VMSTYPE_KERNELCALL, writeflag); 663 return VMSUSPEND; 664 } 665 666 return OK; 667 } 668 669 /*===========================================================================* 670 * data_copy * 671 *===========================================================================*/ 672 int data_copy(const endpoint_t from_proc, const vir_bytes from_addr, 673 const endpoint_t to_proc, const vir_bytes to_addr, 674 size_t bytes) 675 { 676 struct vir_addr src, dst; 677 678 src.offset = from_addr; 679 dst.offset = to_addr; 680 src.proc_nr_e = from_proc; 681 dst.proc_nr_e = to_proc; 682 assert(src.proc_nr_e != NONE); 683 assert(dst.proc_nr_e != NONE); 684 685 return virtual_copy(&src, &dst, bytes); 686 } 687 688 /*===========================================================================* 689 * data_copy_vmcheck * 690 *===========================================================================*/ 691 int data_copy_vmcheck(struct proc * caller, 692 const endpoint_t from_proc, const vir_bytes from_addr, 693 const endpoint_t to_proc, const vir_bytes to_addr, 694 size_t bytes) 695 { 696 struct vir_addr src, dst; 697 698 src.offset = from_addr; 699 dst.offset = to_addr; 700 src.proc_nr_e = from_proc; 701 dst.proc_nr_e = to_proc; 702 assert(src.proc_nr_e != NONE); 703 assert(dst.proc_nr_e != NONE); 704 705 return virtual_copy_vmcheck(caller, &src, &dst, bytes); 706 } 707 708 void memory_init(void) 709 { 710 assert(nfreepdes == 0); 711 712 freepdes[nfreepdes++] = kinfo.freepde_start++; 713 freepdes[nfreepdes++] = kinfo.freepde_start++; 714 715 assert(kinfo.freepde_start < I386_VM_DIR_ENTRIES); 716 assert(nfreepdes == 2); 717 assert(nfreepdes <= MAXFREEPDES); 718 } 719 720 /*===========================================================================* 721 * arch_proc_init * 722 *===========================================================================*/ 723 void arch_proc_init(struct proc *pr, const u32_t ip, const u32_t sp, 724 const u32_t ps_str, char *name) 725 { 726 arch_proc_reset(pr); 727 strlcpy(pr->p_name, name, sizeof(pr->p_name)); 728 729 /* set custom state we know */ 730 pr->p_reg.pc = ip; 731 pr->p_reg.sp = sp; 732 pr->p_reg.bx = ps_str; 733 } 734 735 static int oxpcie_mapping_index = -1, 736 lapic_mapping_index = -1, 737 ioapic_first_index = -1, 738 ioapic_last_index = -1, 739 video_mem_mapping_index = -1, 740 usermapped_glo_index = -1, 741 usermapped_index = -1, first_um_idx = -1; 742 743 extern char *video_mem; 744 745 extern char usermapped_start, usermapped_end, usermapped_nonglo_start; 746 747 int arch_phys_map(const int index, 748 phys_bytes *addr, 749 phys_bytes *len, 750 int *flags) 751 { 752 static int first = 1; 753 int freeidx = 0; 754 static char *ser_var = NULL; 755 u32_t glo_len = (u32_t) &usermapped_nonglo_start - 756 (u32_t) &usermapped_start; 757 758 if(first) { 759 memset(&minix_kerninfo, 0, sizeof(minix_kerninfo)); 760 video_mem_mapping_index = freeidx++; 761 if(glo_len > 0) { 762 usermapped_glo_index = freeidx++; 763 } 764 765 usermapped_index = freeidx++; 766 first_um_idx = usermapped_index; 767 if(usermapped_glo_index != -1) 768 first_um_idx = usermapped_glo_index; 769 770 #ifdef USE_APIC 771 if(lapic_addr) 772 lapic_mapping_index = freeidx++; 773 if (ioapic_enabled) { 774 ioapic_first_index = freeidx; 775 assert(nioapics > 0); 776 freeidx += nioapics; 777 ioapic_last_index = freeidx-1; 778 } 779 #endif 780 781 #ifdef CONFIG_OXPCIE 782 if((ser_var = env_get("oxpcie"))) { 783 if(ser_var[0] != '0' || ser_var[1] != 'x') { 784 printf("oxpcie address in hex please\n"); 785 } else { 786 printf("oxpcie address is %s\n", ser_var); 787 oxpcie_mapping_index = freeidx++; 788 } 789 } 790 #endif 791 792 first = 0; 793 } 794 795 if(index == usermapped_glo_index) { 796 *addr = vir2phys(&usermapped_start); 797 *len = glo_len; 798 *flags = VMMF_USER | VMMF_GLO; 799 return OK; 800 } 801 else if(index == usermapped_index) { 802 *addr = vir2phys(&usermapped_nonglo_start); 803 *len = (u32_t) &usermapped_end - 804 (u32_t) &usermapped_nonglo_start; 805 *flags = VMMF_USER; 806 return OK; 807 } 808 else if (index == video_mem_mapping_index) { 809 /* map video memory in so we can print panic messages */ 810 *addr = MULTIBOOT_VIDEO_BUFFER; 811 *len = I386_PAGE_SIZE; 812 *flags = VMMF_WRITE; 813 return OK; 814 } 815 #ifdef USE_APIC 816 else if (index == lapic_mapping_index) { 817 /* map the local APIC if enabled */ 818 if (!lapic_addr) 819 return EINVAL; 820 *addr = lapic_addr; 821 *len = 4 << 10 /* 4kB */; 822 *flags = VMMF_UNCACHED | VMMF_WRITE; 823 return OK; 824 } 825 else if (ioapic_enabled && index >= ioapic_first_index && index <= ioapic_last_index) { 826 int ioapic_idx = index - ioapic_first_index; 827 *addr = io_apic[ioapic_idx].paddr; 828 assert(*addr); 829 *len = 4 << 10 /* 4kB */; 830 *flags = VMMF_UNCACHED | VMMF_WRITE; 831 printf("ioapic map: addr 0x%lx\n", *addr); 832 return OK; 833 } 834 #endif 835 836 #if CONFIG_OXPCIE 837 if(index == oxpcie_mapping_index) { 838 *addr = strtoul(ser_var+2, NULL, 16); 839 *len = 0x4000; 840 *flags = VMMF_UNCACHED | VMMF_WRITE; 841 return OK; 842 } 843 #endif 844 845 return EINVAL; 846 } 847 848 int arch_phys_map_reply(const int index, const vir_bytes addr) 849 { 850 #ifdef USE_APIC 851 /* if local APIC is enabled */ 852 if (index == lapic_mapping_index && lapic_addr) { 853 lapic_addr_vaddr = addr; 854 return OK; 855 } 856 else if (ioapic_enabled && index >= ioapic_first_index && 857 index <= ioapic_last_index) { 858 int i = index - ioapic_first_index; 859 io_apic[i].vaddr = addr; 860 return OK; 861 } 862 #endif 863 864 #if CONFIG_OXPCIE 865 if (index == oxpcie_mapping_index) { 866 oxpcie_set_vaddr((unsigned char *) addr); 867 return OK; 868 } 869 #endif 870 if(index == first_um_idx) { 871 extern struct minix_ipcvecs minix_ipcvecs_sysenter, 872 minix_ipcvecs_syscall, 873 minix_ipcvecs_softint; 874 extern u32_t usermapped_offset; 875 assert(addr > (u32_t) &usermapped_start); 876 usermapped_offset = addr - (u32_t) &usermapped_start; 877 #define FIXEDPTR(ptr) (void *) ((u32_t)ptr + usermapped_offset) 878 #define FIXPTR(ptr) ptr = FIXEDPTR(ptr) 879 #define ASSIGN(minixstruct) minix_kerninfo.minixstruct = FIXEDPTR(&minixstruct) 880 ASSIGN(kinfo); 881 ASSIGN(machine); 882 ASSIGN(kmessages); 883 ASSIGN(loadinfo); 884 ASSIGN(kuserinfo); 885 ASSIGN(arm_frclock); /* eh, why not. */ 886 ASSIGN(kclockinfo); 887 888 /* select the right set of IPC routines to map into processes */ 889 if(minix_feature_flags & MKF_I386_INTEL_SYSENTER) { 890 DEBUGBASIC(("kernel: selecting intel sysenter ipc style\n")); 891 minix_kerninfo.minix_ipcvecs = &minix_ipcvecs_sysenter; 892 } else if(minix_feature_flags & MKF_I386_AMD_SYSCALL) { 893 DEBUGBASIC(("kernel: selecting amd syscall ipc style\n")); 894 minix_kerninfo.minix_ipcvecs = &minix_ipcvecs_syscall; 895 } else { 896 DEBUGBASIC(("kernel: selecting fallback (int) ipc style\n")); 897 minix_kerninfo.minix_ipcvecs = &minix_ipcvecs_softint; 898 } 899 900 /* adjust the pointers of the functions and the struct 901 * itself to the user-accessible mapping 902 */ 903 FIXPTR(minix_kerninfo.minix_ipcvecs->send); 904 FIXPTR(minix_kerninfo.minix_ipcvecs->receive); 905 FIXPTR(minix_kerninfo.minix_ipcvecs->sendrec); 906 FIXPTR(minix_kerninfo.minix_ipcvecs->senda); 907 FIXPTR(minix_kerninfo.minix_ipcvecs->sendnb); 908 FIXPTR(minix_kerninfo.minix_ipcvecs->notify); 909 FIXPTR(minix_kerninfo.minix_ipcvecs->do_kernel_call); 910 FIXPTR(minix_kerninfo.minix_ipcvecs); 911 912 minix_kerninfo.kerninfo_magic = KERNINFO_MAGIC; 913 minix_kerninfo.minix_feature_flags = minix_feature_flags; 914 minix_kerninfo_user = (vir_bytes) FIXEDPTR(&minix_kerninfo); 915 916 /* if libc_ipc is set, disable usermapped ipc functions 917 * and force binaries to use in-libc fallbacks. 918 */ 919 if(env_get("libc_ipc")) { 920 printf("kernel: forcing in-libc fallback ipc style\n"); 921 minix_kerninfo.minix_ipcvecs = NULL; 922 } else { 923 minix_kerninfo.ki_flags |= MINIX_KIF_IPCVECS; 924 } 925 926 minix_kerninfo.ki_flags |= MINIX_KIF_USERINFO; 927 928 return OK; 929 } 930 931 if(index == usermapped_index) return OK; 932 933 if (index == video_mem_mapping_index) { 934 video_mem_vaddr = addr; 935 return OK; 936 } 937 938 return EINVAL; 939 } 940 941 int arch_enable_paging(struct proc * caller) 942 { 943 assert(caller->p_seg.p_cr3); 944 945 /* load caller's page table */ 946 switch_address_space(caller); 947 948 video_mem = (char *) video_mem_vaddr; 949 950 #ifdef USE_APIC 951 /* start using the virtual addresses */ 952 953 /* if local APIC is enabled */ 954 if (lapic_addr) { 955 lapic_addr = lapic_addr_vaddr; 956 lapic_eoi_addr = LAPIC_EOI; 957 } 958 /* if IO apics are enabled */ 959 if (ioapic_enabled) { 960 int i; 961 962 for (i = 0; i < nioapics; i++) { 963 io_apic[i].addr = io_apic[i].vaddr; 964 } 965 } 966 #if CONFIG_SMP 967 barrier(); 968 969 wait_for_APs_to_finish_booting(); 970 #endif 971 #endif 972 973 #ifdef USE_WATCHDOG 974 /* 975 * We make sure that we don't enable the watchdog until paging is turned 976 * on as we might get an NMI while switching and we might still use wrong 977 * lapic address. Bad things would happen. It is unfortunate but such is 978 * life 979 */ 980 if (watchdog_enabled) 981 i386_watchdog_start(); 982 #endif 983 984 return OK; 985 } 986 987 void release_address_space(struct proc *pr) 988 { 989 pr->p_seg.p_cr3_v = NULL; 990 } 991 992 /* computes a checksum of a buffer of a given length. The byte sum must be zero */ 993 int platform_tbl_checksum_ok(void *ptr, unsigned int length) 994 { 995 u8_t total = 0; 996 unsigned int i; 997 for (i = 0; i < length; i++) 998 total += ((unsigned char *)ptr)[i]; 999 return !total; 1000 } 1001 1002 int platform_tbl_ptr(phys_bytes start, 1003 phys_bytes end, 1004 unsigned increment, 1005 void * buff, 1006 unsigned size, 1007 phys_bytes * phys_addr, 1008 int ((* cmp_f)(void *))) 1009 { 1010 phys_bytes addr; 1011 1012 for (addr = start; addr < end; addr += increment) { 1013 phys_copy (addr, (phys_bytes) buff, size); 1014 if (cmp_f(buff)) { 1015 if (phys_addr) 1016 *phys_addr = addr; 1017 return 1; 1018 } 1019 } 1020 return 0; 1021 } 1022