1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/mm/i386/page.c 5 * PURPOSE: Low level memory managment manipulation 6 * 7 * PROGRAMMERS: David Welch (welch@cwcom.net) 8 */ 9 10 /* INCLUDES ***************************************************************/ 11 12 #include <ntoskrnl.h> 13 #define NDEBUG 14 #include <debug.h> 15 16 #include <mm/ARM3/miarm.h> 17 18 #define ADDR_TO_PDE_OFFSET MiAddressToPdeOffset 19 #define ADDR_TO_PAGE_TABLE(v) (((ULONG)(v)) / (1024 * PAGE_SIZE)) 20 21 /* GLOBALS *****************************************************************/ 22 23 #define PA_BIT_PRESENT (0) 24 #define PA_BIT_READWRITE (1) 25 #define PA_BIT_USER (2) 26 #define PA_BIT_WT (3) 27 #define PA_BIT_CD (4) 28 #define PA_BIT_ACCESSED (5) 29 #define PA_BIT_DIRTY (6) 30 #define PA_BIT_GLOBAL (8) 31 32 #define PA_PRESENT (1 << PA_BIT_PRESENT) 33 #define PA_READWRITE (1 << PA_BIT_READWRITE) 34 #define PA_USER (1 << PA_BIT_USER) 35 #define PA_DIRTY (1 << PA_BIT_DIRTY) 36 #define PA_WT (1 << PA_BIT_WT) 37 #define PA_CD (1 << PA_BIT_CD) 38 #define PA_ACCESSED (1 << PA_BIT_ACCESSED) 39 #define PA_GLOBAL (1 << PA_BIT_GLOBAL) 40 41 #define IS_HYPERSPACE(v) (((ULONG)(v) >= HYPER_SPACE && (ULONG)(v) <= HYPER_SPACE_END)) 42 43 #define PTE_TO_PFN(X) ((X) >> PAGE_SHIFT) 44 #define PFN_TO_PTE(X) ((X) << PAGE_SHIFT) 45 46 #define PAGE_MASK(x) ((x)&(~0xfff)) 47 48 const 49 ULONG 50 MmProtectToPteMask[32] = 51 { 52 // 53 // These are the base MM_ protection flags 54 // 55 0, 56 PTE_READONLY | PTE_ENABLE_CACHE, 57 PTE_EXECUTE | PTE_ENABLE_CACHE, 58 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 59 PTE_READWRITE | PTE_ENABLE_CACHE, 60 PTE_WRITECOPY | PTE_ENABLE_CACHE, 61 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 62 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 63 // 64 // These OR in the MM_NOCACHE flag 65 // 66 0, 67 PTE_READONLY | PTE_DISABLE_CACHE, 68 PTE_EXECUTE | PTE_DISABLE_CACHE, 69 PTE_EXECUTE_READ | PTE_DISABLE_CACHE, 70 PTE_READWRITE | PTE_DISABLE_CACHE, 71 PTE_WRITECOPY | PTE_DISABLE_CACHE, 72 PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE, 73 PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE, 74 // 75 // These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM 76 // 77 0, 78 PTE_READONLY | PTE_ENABLE_CACHE, 79 PTE_EXECUTE | PTE_ENABLE_CACHE, 80 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 81 PTE_READWRITE | PTE_ENABLE_CACHE, 82 PTE_WRITECOPY | PTE_ENABLE_CACHE, 83 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 84 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 85 // 86 // These OR in the MM_NOACCESS flag, which seems to enable WriteCombining? 87 // 88 0, 89 PTE_READONLY | PTE_WRITECOMBINED_CACHE, 90 PTE_EXECUTE | PTE_WRITECOMBINED_CACHE, 91 PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE, 92 PTE_READWRITE | PTE_WRITECOMBINED_CACHE, 93 PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 94 PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE, 95 PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 96 }; 97 98 const 99 ULONG MmProtectToValue[32] = 100 { 101 PAGE_NOACCESS, 102 PAGE_READONLY, 103 PAGE_EXECUTE, 104 PAGE_EXECUTE_READ, 105 PAGE_READWRITE, 106 PAGE_WRITECOPY, 107 PAGE_EXECUTE_READWRITE, 108 PAGE_EXECUTE_WRITECOPY, 109 PAGE_NOACCESS, 110 PAGE_NOCACHE | PAGE_READONLY, 111 PAGE_NOCACHE | PAGE_EXECUTE, 112 PAGE_NOCACHE | PAGE_EXECUTE_READ, 113 PAGE_NOCACHE | PAGE_READWRITE, 114 PAGE_NOCACHE | PAGE_WRITECOPY, 115 PAGE_NOCACHE | PAGE_EXECUTE_READWRITE, 116 PAGE_NOCACHE | PAGE_EXECUTE_WRITECOPY, 117 PAGE_NOACCESS, 118 PAGE_GUARD | PAGE_READONLY, 119 PAGE_GUARD | PAGE_EXECUTE, 120 PAGE_GUARD | PAGE_EXECUTE_READ, 121 PAGE_GUARD | PAGE_READWRITE, 122 PAGE_GUARD | PAGE_WRITECOPY, 123 PAGE_GUARD | PAGE_EXECUTE_READWRITE, 124 PAGE_GUARD | PAGE_EXECUTE_WRITECOPY, 125 PAGE_NOACCESS, 126 PAGE_WRITECOMBINE | PAGE_READONLY, 127 PAGE_WRITECOMBINE | PAGE_EXECUTE, 128 PAGE_WRITECOMBINE | PAGE_EXECUTE_READ, 129 PAGE_WRITECOMBINE | PAGE_READWRITE, 130 PAGE_WRITECOMBINE | PAGE_WRITECOPY, 131 PAGE_WRITECOMBINE | PAGE_EXECUTE_READWRITE, 132 PAGE_WRITECOMBINE | PAGE_EXECUTE_WRITECOPY 133 }; 134 135 /* FUNCTIONS ***************************************************************/ 136 137 static BOOLEAN MmUnmapPageTable(PULONG Pt); 138 139 VOID 140 MiFlushTlb(PULONG Pt, PVOID Address) 141 { 142 if ((Pt && MmUnmapPageTable(Pt)) || Address >= MmSystemRangeStart) 143 { 144 KeInvalidateTlbEntry(Address); 145 } 146 } 147 148 static ULONG 149 ProtectToPTE(ULONG flProtect) 150 { 151 ULONG Attributes = 0; 152 153 if (flProtect & (PAGE_NOACCESS|PAGE_GUARD)) 154 { 155 Attributes = 0; 156 } 157 else if (flProtect & PAGE_IS_WRITABLE) 158 { 159 Attributes = PA_PRESENT | PA_READWRITE; 160 } 161 else if (flProtect & (PAGE_IS_READABLE | PAGE_IS_EXECUTABLE)) 162 { 163 Attributes = PA_PRESENT; 164 } 165 else 166 { 167 DPRINT1("Unknown main protection type.\n"); 168 KeBugCheck(MEMORY_MANAGEMENT); 169 } 170 171 if (flProtect & PAGE_SYSTEM) 172 { 173 } 174 else 175 { 176 Attributes = Attributes | PA_USER; 177 } 178 if (flProtect & PAGE_NOCACHE) 179 { 180 Attributes = Attributes | PA_CD; 181 } 182 if (flProtect & PAGE_WRITETHROUGH) 183 { 184 Attributes = Attributes | PA_WT; 185 } 186 return(Attributes); 187 } 188 189 NTSTATUS 190 NTAPI 191 MiDispatchFault(IN ULONG FaultCode, 192 IN PVOID Address, 193 IN PMMPTE PointerPte, 194 IN PMMPTE PointerProtoPte, 195 IN BOOLEAN Recursive, 196 IN PEPROCESS Process, 197 IN PVOID TrapInformation, 198 IN PVOID Vad); 199 200 NTSTATUS 201 NTAPI 202 MiFillSystemPageDirectory(IN PVOID Base, 203 IN SIZE_T NumberOfBytes); 204 205 static PULONG 206 MmGetPageTableForProcess(PEPROCESS Process, PVOID Address, BOOLEAN Create) 207 { 208 PFN_NUMBER Pfn; 209 PULONG Pt; 210 PMMPDE PointerPde; 211 212 if (Address < MmSystemRangeStart) 213 { 214 /* We should have a process for user land addresses */ 215 ASSERT(Process != NULL); 216 217 if(Process != PsGetCurrentProcess()) 218 { 219 PMMPDE PdeBase; 220 ULONG PdeOffset = MiGetPdeOffset(Address); 221 222 /* Nobody but page fault should ask for creating the PDE, 223 * Which imples that Process is the current one */ 224 ASSERT(Create == FALSE); 225 226 PdeBase = MmCreateHyperspaceMapping(PTE_TO_PFN(Process->Pcb.DirectoryTableBase[0])); 227 if (PdeBase == NULL) 228 { 229 KeBugCheck(MEMORY_MANAGEMENT); 230 } 231 PointerPde = PdeBase + PdeOffset; 232 if (PointerPde->u.Hard.Valid == 0) 233 { 234 MmDeleteHyperspaceMapping(PdeBase); 235 return NULL; 236 } 237 else 238 { 239 Pfn = PointerPde->u.Hard.PageFrameNumber; 240 } 241 MmDeleteHyperspaceMapping(PdeBase); 242 Pt = MmCreateHyperspaceMapping(Pfn); 243 if (Pt == NULL) 244 { 245 KeBugCheck(MEMORY_MANAGEMENT); 246 } 247 return Pt + MiAddressToPteOffset(Address); 248 } 249 /* This is for our process */ 250 PointerPde = MiAddressToPde(Address); 251 Pt = (PULONG)MiAddressToPte(Address); 252 if (PointerPde->u.Hard.Valid == 0) 253 { 254 NTSTATUS Status; 255 if (Create == FALSE) 256 { 257 return NULL; 258 } 259 ASSERT(PointerPde->u.Long == 0); 260 261 MI_WRITE_INVALID_PTE(PointerPde, DemandZeroPde); 262 // Tiny HACK: Parameter 1 is the architecture specific FaultCode for an access violation (i.e. page is present) 263 Status = MiDispatchFault(0x1, 264 Pt, 265 PointerPde, 266 NULL, 267 FALSE, 268 PsGetCurrentProcess(), 269 NULL, 270 NULL); 271 DBG_UNREFERENCED_LOCAL_VARIABLE(Status); 272 ASSERT(KeAreAllApcsDisabled() == TRUE); 273 ASSERT(PointerPde->u.Hard.Valid == 1); 274 } 275 return (PULONG)MiAddressToPte(Address); 276 } 277 278 /* This is for kernel land address */ 279 ASSERT(Process == NULL); 280 PointerPde = MiAddressToPde(Address); 281 Pt = (PULONG)MiAddressToPte(Address); 282 if (PointerPde->u.Hard.Valid == 0) 283 { 284 /* Let ARM3 synchronize the PDE */ 285 if(!MiSynchronizeSystemPde(PointerPde)) 286 { 287 /* PDE (still) not valid, let ARM3 allocate one if asked */ 288 if(Create == FALSE) 289 return NULL; 290 MiFillSystemPageDirectory(Address, PAGE_SIZE); 291 } 292 } 293 return Pt; 294 } 295 296 static BOOLEAN MmUnmapPageTable(PULONG Pt) 297 { 298 if (!IS_HYPERSPACE(Pt)) 299 { 300 return TRUE; 301 } 302 303 if (Pt) 304 { 305 MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pt)); 306 } 307 return FALSE; 308 } 309 310 static ULONG MmGetPageEntryForProcess(PEPROCESS Process, PVOID Address) 311 { 312 ULONG Pte; 313 PULONG Pt; 314 315 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 316 if (Pt) 317 { 318 Pte = *Pt; 319 MmUnmapPageTable(Pt); 320 return Pte; 321 } 322 return 0; 323 } 324 325 PFN_NUMBER 326 NTAPI 327 MmGetPfnForProcess(PEPROCESS Process, 328 PVOID Address) 329 { 330 ULONG Entry; 331 Entry = MmGetPageEntryForProcess(Process, Address); 332 if (!(Entry & PA_PRESENT)) 333 { 334 return 0; 335 } 336 return(PTE_TO_PFN(Entry)); 337 } 338 339 VOID 340 NTAPI 341 MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, 342 BOOLEAN* WasDirty, PPFN_NUMBER Page) 343 /* 344 * FUNCTION: Delete a virtual mapping 345 */ 346 { 347 BOOLEAN WasValid = FALSE; 348 PFN_NUMBER Pfn; 349 ULONG Pte; 350 PULONG Pt; 351 352 DPRINT("MmDeleteVirtualMapping(%p, %p, %p, %p)\n", 353 Process, Address, WasDirty, Page); 354 355 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 356 357 if (Pt == NULL) 358 { 359 if (WasDirty != NULL) 360 { 361 *WasDirty = FALSE; 362 } 363 if (Page != NULL) 364 { 365 *Page = 0; 366 } 367 return; 368 } 369 370 /* 371 * Atomically set the entry to zero and get the old value. 372 */ 373 Pte = InterlockedExchangePte(Pt, 0); 374 375 /* We count a mapping as valid if it's a present page, or it's a nonzero pfn with 376 * the swap bit unset, indicating a valid page protected to PAGE_NOACCESS. */ 377 WasValid = (Pte & PA_PRESENT) || ((Pte >> PAGE_SHIFT) && !(Pte & 0x800)); 378 if (WasValid) 379 { 380 /* Flush the TLB since we transitioned this PTE 381 * from valid to invalid so any stale translations 382 * are removed from the cache */ 383 MiFlushTlb(Pt, Address); 384 385 if (Address < MmSystemRangeStart) 386 { 387 /* Remove PDE reference */ 388 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 389 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE); 390 } 391 392 Pfn = PTE_TO_PFN(Pte); 393 } 394 else 395 { 396 MmUnmapPageTable(Pt); 397 Pfn = 0; 398 } 399 400 /* 401 * Return some information to the caller 402 */ 403 if (WasDirty != NULL) 404 { 405 *WasDirty = ((Pte & PA_DIRTY) && (Pte & PA_PRESENT)) ? TRUE : FALSE; 406 } 407 if (Page != NULL) 408 { 409 *Page = Pfn; 410 } 411 } 412 413 VOID 414 NTAPI 415 MmGetPageFileMapping(PEPROCESS Process, PVOID Address, 416 SWAPENTRY* SwapEntry) 417 /* 418 * FUNCTION: Get a page file mapping 419 */ 420 { 421 ULONG Entry = MmGetPageEntryForProcess(Process, Address); 422 *SwapEntry = Entry >> 1; 423 } 424 425 VOID 426 NTAPI 427 MmDeletePageFileMapping(PEPROCESS Process, PVOID Address, 428 SWAPENTRY* SwapEntry) 429 /* 430 * FUNCTION: Delete a virtual mapping 431 */ 432 { 433 ULONG Pte; 434 PULONG Pt; 435 436 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 437 438 if (Pt == NULL) 439 { 440 *SwapEntry = 0; 441 return; 442 } 443 444 /* 445 * Atomically set the entry to zero and get the old value. 446 */ 447 Pte = InterlockedExchangePte(Pt, 0); 448 449 if (Address < MmSystemRangeStart) 450 { 451 /* Remove PDE reference */ 452 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 453 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE); 454 } 455 456 /* We don't need to flush here because page file entries 457 * are invalid translations, so the processor won't cache them */ 458 MmUnmapPageTable(Pt); 459 460 if ((Pte & PA_PRESENT) || !(Pte & 0x800)) 461 { 462 DPRINT1("Pte %x (want not 1 and 0x800)\n", Pte); 463 KeBugCheck(MEMORY_MANAGEMENT); 464 } 465 466 /* 467 * Return some information to the caller 468 */ 469 *SwapEntry = Pte >> 1; 470 } 471 472 BOOLEAN 473 Mmi386MakeKernelPageTableGlobal(PVOID Address) 474 { 475 PMMPDE PointerPde = MiAddressToPde(Address); 476 PMMPTE PointerPte = MiAddressToPte(Address); 477 478 if (PointerPde->u.Hard.Valid == 0) 479 { 480 if(!MiSynchronizeSystemPde(PointerPde)) 481 return FALSE; 482 return PointerPte->u.Hard.Valid != 0; 483 } 484 return FALSE; 485 } 486 487 BOOLEAN 488 NTAPI 489 MmIsDirtyPage(PEPROCESS Process, PVOID Address) 490 { 491 return MmGetPageEntryForProcess(Process, Address) & PA_DIRTY ? TRUE : FALSE; 492 } 493 494 VOID 495 NTAPI 496 MmSetCleanPage(PEPROCESS Process, PVOID Address) 497 { 498 PULONG Pt; 499 ULONG Pte; 500 501 if (Address < MmSystemRangeStart && Process == NULL) 502 { 503 DPRINT1("MmSetCleanPage is called for user space without a process.\n"); 504 KeBugCheck(MEMORY_MANAGEMENT); 505 } 506 507 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 508 if (Pt == NULL) 509 { 510 KeBugCheck(MEMORY_MANAGEMENT); 511 } 512 513 do 514 { 515 Pte = *Pt; 516 } while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_DIRTY, Pte)); 517 518 if (!(Pte & PA_PRESENT)) 519 { 520 KeBugCheck(MEMORY_MANAGEMENT); 521 } 522 else if (Pte & PA_DIRTY) 523 { 524 MiFlushTlb(Pt, Address); 525 } 526 else 527 { 528 MmUnmapPageTable(Pt); 529 } 530 } 531 532 VOID 533 NTAPI 534 MmSetDirtyPage(PEPROCESS Process, PVOID Address) 535 { 536 PULONG Pt; 537 ULONG Pte; 538 539 if (Address < MmSystemRangeStart && Process == NULL) 540 { 541 DPRINT1("MmSetDirtyPage is called for user space without a process.\n"); 542 KeBugCheck(MEMORY_MANAGEMENT); 543 } 544 545 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 546 if (Pt == NULL) 547 { 548 KeBugCheck(MEMORY_MANAGEMENT); 549 } 550 551 do 552 { 553 Pte = *Pt; 554 } while (Pte != InterlockedCompareExchangePte(Pt, Pte | PA_DIRTY, Pte)); 555 556 if (!(Pte & PA_PRESENT)) 557 { 558 KeBugCheck(MEMORY_MANAGEMENT); 559 } 560 else 561 { 562 /* The processor will never clear this bit itself, therefore 563 * we do not need to flush the TLB here when setting it */ 564 MmUnmapPageTable(Pt); 565 } 566 } 567 568 BOOLEAN 569 NTAPI 570 MmIsPagePresent(PEPROCESS Process, PVOID Address) 571 { 572 return MmGetPageEntryForProcess(Process, Address) & PA_PRESENT; 573 } 574 575 BOOLEAN 576 NTAPI 577 MmIsDisabledPage(PEPROCESS Process, PVOID Address) 578 { 579 ULONG_PTR Entry = MmGetPageEntryForProcess(Process, Address); 580 return !(Entry & PA_PRESENT) && !(Entry & 0x800) && (Entry >> PAGE_SHIFT); 581 } 582 583 BOOLEAN 584 NTAPI 585 MmIsPageSwapEntry(PEPROCESS Process, PVOID Address) 586 { 587 ULONG Entry; 588 Entry = MmGetPageEntryForProcess(Process, Address); 589 return !(Entry & PA_PRESENT) && (Entry & 0x800); 590 } 591 592 NTSTATUS 593 NTAPI 594 MmCreatePageFileMapping(PEPROCESS Process, 595 PVOID Address, 596 SWAPENTRY SwapEntry) 597 { 598 PULONG Pt; 599 ULONG Pte; 600 601 if (Process == NULL && Address < MmSystemRangeStart) 602 { 603 DPRINT1("No process\n"); 604 KeBugCheck(MEMORY_MANAGEMENT); 605 } 606 if (Process != NULL && Address >= MmSystemRangeStart) 607 { 608 DPRINT1("Setting kernel address with process context\n"); 609 KeBugCheck(MEMORY_MANAGEMENT); 610 } 611 612 if (SwapEntry & (1 << 31)) 613 { 614 KeBugCheck(MEMORY_MANAGEMENT); 615 } 616 617 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 618 if (Pt == NULL) 619 { 620 /* Nobody should page out an address that hasn't even been mapped */ 621 /* But we might place a wait entry first, requiring the page table */ 622 if (SwapEntry != MM_WAIT_ENTRY) 623 { 624 KeBugCheck(MEMORY_MANAGEMENT); 625 } 626 Pt = MmGetPageTableForProcess(Process, Address, TRUE); 627 } 628 Pte = InterlockedExchangePte(Pt, SwapEntry << 1); 629 if (Pte != 0) 630 { 631 KeBugCheckEx(MEMORY_MANAGEMENT, SwapEntry, (ULONG_PTR)Process, (ULONG_PTR)Address, 0); 632 } 633 634 if (Address < MmSystemRangeStart) 635 { 636 /* Add PDE reference */ 637 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++; 638 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_PER_PAGE); 639 } 640 641 /* We don't need to flush the TLB here because it 642 * only caches valid translations and a zero PTE 643 * is not a valid translation */ 644 MmUnmapPageTable(Pt); 645 646 return(STATUS_SUCCESS); 647 } 648 649 650 NTSTATUS 651 NTAPI 652 MmCreateVirtualMappingUnsafe(PEPROCESS Process, 653 PVOID Address, 654 ULONG flProtect, 655 PPFN_NUMBER Pages, 656 ULONG PageCount) 657 { 658 ULONG Attributes; 659 PVOID Addr; 660 ULONG i; 661 ULONG oldPdeOffset, PdeOffset; 662 PULONG Pt = NULL; 663 ULONG Pte; 664 DPRINT("MmCreateVirtualMappingUnsafe(%p, %p, %lu, %p (%x), %lu)\n", 665 Process, Address, flProtect, Pages, *Pages, PageCount); 666 667 ASSERT(((ULONG_PTR)Address % PAGE_SIZE) == 0); 668 669 if (Process == NULL) 670 { 671 if (Address < MmSystemRangeStart) 672 { 673 DPRINT1("NULL process given for user-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 674 KeBugCheck(MEMORY_MANAGEMENT); 675 } 676 if (PageCount > 0x10000 || 677 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 0x100000) 678 { 679 DPRINT1("Page count too large for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 680 KeBugCheck(MEMORY_MANAGEMENT); 681 } 682 } 683 else 684 { 685 if (Address >= MmSystemRangeStart) 686 { 687 DPRINT1("Process %p given for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 688 KeBugCheck(MEMORY_MANAGEMENT); 689 } 690 if (PageCount > (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE || 691 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 692 (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE) 693 { 694 DPRINT1("Page count too large for process %p user-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 695 KeBugCheck(MEMORY_MANAGEMENT); 696 } 697 } 698 699 Attributes = ProtectToPTE(flProtect); 700 Attributes &= 0xfff; 701 if (Address >= MmSystemRangeStart) 702 { 703 Attributes &= ~PA_USER; 704 } 705 else 706 { 707 Attributes |= PA_USER; 708 } 709 710 Addr = Address; 711 /* MmGetPageTableForProcess should be called on the first run, so 712 * let this trigger it */ 713 oldPdeOffset = ADDR_TO_PDE_OFFSET(Addr) + 1; 714 for (i = 0; i < PageCount; i++, Addr = (PVOID)((ULONG_PTR)Addr + PAGE_SIZE)) 715 { 716 if (!(Attributes & PA_PRESENT) && Pages[i] != 0) 717 { 718 DPRINT1("Setting physical address but not allowing access at address " 719 "0x%p with attributes %x/%x.\n", 720 Addr, Attributes, flProtect); 721 KeBugCheck(MEMORY_MANAGEMENT); 722 } 723 PdeOffset = ADDR_TO_PDE_OFFSET(Addr); 724 if (oldPdeOffset != PdeOffset) 725 { 726 if(Pt) MmUnmapPageTable(Pt); 727 Pt = MmGetPageTableForProcess(Process, Addr, TRUE); 728 if (Pt == NULL) 729 { 730 KeBugCheck(MEMORY_MANAGEMENT); 731 } 732 } 733 else 734 { 735 Pt++; 736 } 737 oldPdeOffset = PdeOffset; 738 739 Pte = InterlockedExchangePte(Pt, PFN_TO_PTE(Pages[i]) | Attributes); 740 741 /* There should not be anything valid here */ 742 if (Pte != 0) 743 { 744 DPRINT1("Bad PTE %lx at %p for %p + %lu\n", Pte, Pt, Address, i); 745 KeBugCheck(MEMORY_MANAGEMENT); 746 } 747 748 /* We don't need to flush the TLB here because it only caches valid translations 749 * and we're moving this PTE from invalid to valid so it can't be cached right now */ 750 751 if (Addr < MmSystemRangeStart) 752 { 753 /* Add PDE reference */ 754 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)]++; 755 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)] <= PTE_PER_PAGE); 756 } 757 } 758 759 ASSERT(Addr > Address); 760 MmUnmapPageTable(Pt); 761 762 return(STATUS_SUCCESS); 763 } 764 765 NTSTATUS 766 NTAPI 767 MmCreateVirtualMapping(PEPROCESS Process, 768 PVOID Address, 769 ULONG flProtect, 770 PPFN_NUMBER Pages, 771 ULONG PageCount) 772 { 773 ULONG i; 774 775 ASSERT((ULONG_PTR)Address % PAGE_SIZE == 0); 776 for (i = 0; i < PageCount; i++) 777 { 778 if (!MmIsPageInUse(Pages[i])) 779 { 780 DPRINT1("Page at address %x not in use\n", PFN_TO_PTE(Pages[i])); 781 KeBugCheck(MEMORY_MANAGEMENT); 782 } 783 } 784 785 return(MmCreateVirtualMappingUnsafe(Process, 786 Address, 787 flProtect, 788 Pages, 789 PageCount)); 790 } 791 792 ULONG 793 NTAPI 794 MmGetPageProtect(PEPROCESS Process, PVOID Address) 795 { 796 ULONG Entry; 797 ULONG Protect; 798 799 Entry = MmGetPageEntryForProcess(Process, Address); 800 801 802 if (!(Entry & PA_PRESENT)) 803 { 804 Protect = PAGE_NOACCESS; 805 } 806 else 807 { 808 if (Entry & PA_READWRITE) 809 { 810 Protect = PAGE_READWRITE; 811 } 812 else 813 { 814 Protect = PAGE_EXECUTE_READ; 815 } 816 if (Entry & PA_CD) 817 { 818 Protect |= PAGE_NOCACHE; 819 } 820 if (Entry & PA_WT) 821 { 822 Protect |= PAGE_WRITETHROUGH; 823 } 824 if (!(Entry & PA_USER)) 825 { 826 Protect |= PAGE_SYSTEM; 827 } 828 829 } 830 return(Protect); 831 } 832 833 VOID 834 NTAPI 835 MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect) 836 { 837 ULONG Attributes = 0; 838 PULONG Pt; 839 ULONG Pte; 840 841 DPRINT("MmSetPageProtect(Process %p Address %p flProtect %x)\n", 842 Process, Address, flProtect); 843 844 Attributes = ProtectToPTE(flProtect); 845 846 Attributes &= 0xfff; 847 if (Address >= MmSystemRangeStart) 848 { 849 Attributes &= ~PA_USER; 850 } 851 else 852 { 853 Attributes |= PA_USER; 854 } 855 856 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 857 if (Pt == NULL) 858 { 859 KeBugCheck(MEMORY_MANAGEMENT); 860 } 861 Pte = InterlockedExchangePte(Pt, PAGE_MASK(*Pt) | Attributes | (*Pt & (PA_ACCESSED|PA_DIRTY))); 862 863 // We should be able to bring a page back from PAGE_NOACCESS 864 if ((Pte & 0x800) || !(Pte >> PAGE_SHIFT)) 865 { 866 DPRINT1("Invalid Pte %lx\n", Pte); 867 KeBugCheck(MEMORY_MANAGEMENT); 868 } 869 870 if((Pte & Attributes) != Attributes) 871 MiFlushTlb(Pt, Address); 872 else 873 MmUnmapPageTable(Pt); 874 } 875 876 CODE_SEG("INIT") 877 VOID 878 NTAPI 879 MmInitGlobalKernelPageDirectory(VOID) 880 { 881 /* Nothing to do here */ 882 } 883 884 /* EOF */ 885