1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/mm/i386/page.c 5 * PURPOSE: Low level memory managment manipulation 6 * 7 * PROGRAMMERS: David Welch (welch@cwcom.net) 8 */ 9 10 /* INCLUDES ***************************************************************/ 11 12 #include <ntoskrnl.h> 13 #define NDEBUG 14 #include <debug.h> 15 #include <mm/ARM3/miarm.h> 16 17 #if defined (ALLOC_PRAGMA) 18 #pragma alloc_text(INIT, MmInitGlobalKernelPageDirectory) 19 #endif 20 21 #define ADDR_TO_PDE_OFFSET MiAddressToPdeOffset 22 #define ADDR_TO_PAGE_TABLE(v) (((ULONG)(v)) / (1024 * PAGE_SIZE)) 23 24 /* GLOBALS *****************************************************************/ 25 26 #define PA_BIT_PRESENT (0) 27 #define PA_BIT_READWRITE (1) 28 #define PA_BIT_USER (2) 29 #define PA_BIT_WT (3) 30 #define PA_BIT_CD (4) 31 #define PA_BIT_ACCESSED (5) 32 #define PA_BIT_DIRTY (6) 33 #define PA_BIT_GLOBAL (8) 34 35 #define PA_PRESENT (1 << PA_BIT_PRESENT) 36 #define PA_READWRITE (1 << PA_BIT_READWRITE) 37 #define PA_USER (1 << PA_BIT_USER) 38 #define PA_DIRTY (1 << PA_BIT_DIRTY) 39 #define PA_WT (1 << PA_BIT_WT) 40 #define PA_CD (1 << PA_BIT_CD) 41 #define PA_ACCESSED (1 << PA_BIT_ACCESSED) 42 #define PA_GLOBAL (1 << PA_BIT_GLOBAL) 43 44 #define IS_HYPERSPACE(v) (((ULONG)(v) >= HYPER_SPACE && (ULONG)(v) <= HYPER_SPACE_END)) 45 46 #define PTE_TO_PFN(X) ((X) >> PAGE_SHIFT) 47 #define PFN_TO_PTE(X) ((X) << PAGE_SHIFT) 48 49 #define PAGE_MASK(x) ((x)&(~0xfff)) 50 51 const 52 ULONG 53 MmProtectToPteMask[32] = 54 { 55 // 56 // These are the base MM_ protection flags 57 // 58 0, 59 PTE_READONLY | PTE_ENABLE_CACHE, 60 PTE_EXECUTE | PTE_ENABLE_CACHE, 61 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 62 PTE_READWRITE | PTE_ENABLE_CACHE, 63 PTE_WRITECOPY | PTE_ENABLE_CACHE, 64 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 65 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 66 // 67 // These OR in the MM_NOCACHE flag 68 // 69 0, 70 PTE_READONLY | PTE_DISABLE_CACHE, 71 PTE_EXECUTE | PTE_DISABLE_CACHE, 72 PTE_EXECUTE_READ | PTE_DISABLE_CACHE, 73 PTE_READWRITE | PTE_DISABLE_CACHE, 74 PTE_WRITECOPY | PTE_DISABLE_CACHE, 75 PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE, 76 PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE, 77 // 78 // These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM 79 // 80 0, 81 PTE_READONLY | PTE_ENABLE_CACHE, 82 PTE_EXECUTE | PTE_ENABLE_CACHE, 83 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 84 PTE_READWRITE | PTE_ENABLE_CACHE, 85 PTE_WRITECOPY | PTE_ENABLE_CACHE, 86 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 87 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 88 // 89 // These OR in the MM_NOACCESS flag, which seems to enable WriteCombining? 90 // 91 0, 92 PTE_READONLY | PTE_WRITECOMBINED_CACHE, 93 PTE_EXECUTE | PTE_WRITECOMBINED_CACHE, 94 PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE, 95 PTE_READWRITE | PTE_WRITECOMBINED_CACHE, 96 PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 97 PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE, 98 PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 99 }; 100 101 const 102 ULONG MmProtectToValue[32] = 103 { 104 PAGE_NOACCESS, 105 PAGE_READONLY, 106 PAGE_EXECUTE, 107 PAGE_EXECUTE_READ, 108 PAGE_READWRITE, 109 PAGE_WRITECOPY, 110 PAGE_EXECUTE_READWRITE, 111 PAGE_EXECUTE_WRITECOPY, 112 PAGE_NOACCESS, 113 PAGE_NOCACHE | PAGE_READONLY, 114 PAGE_NOCACHE | PAGE_EXECUTE, 115 PAGE_NOCACHE | PAGE_EXECUTE_READ, 116 PAGE_NOCACHE | PAGE_READWRITE, 117 PAGE_NOCACHE | PAGE_WRITECOPY, 118 PAGE_NOCACHE | PAGE_EXECUTE_READWRITE, 119 PAGE_NOCACHE | PAGE_EXECUTE_WRITECOPY, 120 PAGE_NOACCESS, 121 PAGE_GUARD | PAGE_READONLY, 122 PAGE_GUARD | PAGE_EXECUTE, 123 PAGE_GUARD | PAGE_EXECUTE_READ, 124 PAGE_GUARD | PAGE_READWRITE, 125 PAGE_GUARD | PAGE_WRITECOPY, 126 PAGE_GUARD | PAGE_EXECUTE_READWRITE, 127 PAGE_GUARD | PAGE_EXECUTE_WRITECOPY, 128 PAGE_NOACCESS, 129 PAGE_WRITECOMBINE | PAGE_READONLY, 130 PAGE_WRITECOMBINE | PAGE_EXECUTE, 131 PAGE_WRITECOMBINE | PAGE_EXECUTE_READ, 132 PAGE_WRITECOMBINE | PAGE_READWRITE, 133 PAGE_WRITECOMBINE | PAGE_WRITECOPY, 134 PAGE_WRITECOMBINE | PAGE_EXECUTE_READWRITE, 135 PAGE_WRITECOMBINE | PAGE_EXECUTE_WRITECOPY 136 }; 137 138 /* FUNCTIONS ***************************************************************/ 139 140 static BOOLEAN MmUnmapPageTable(PULONG Pt); 141 142 VOID 143 MiFlushTlb(PULONG Pt, PVOID Address) 144 { 145 if ((Pt && MmUnmapPageTable(Pt)) || Address >= MmSystemRangeStart) 146 { 147 KeInvalidateTlbEntry(Address); 148 } 149 } 150 151 static ULONG 152 ProtectToPTE(ULONG flProtect) 153 { 154 ULONG Attributes = 0; 155 156 if (flProtect & (PAGE_NOACCESS|PAGE_GUARD)) 157 { 158 Attributes = 0; 159 } 160 else if (flProtect & PAGE_IS_WRITABLE) 161 { 162 Attributes = PA_PRESENT | PA_READWRITE; 163 } 164 else if (flProtect & (PAGE_IS_READABLE | PAGE_IS_EXECUTABLE)) 165 { 166 Attributes = PA_PRESENT; 167 } 168 else 169 { 170 DPRINT1("Unknown main protection type.\n"); 171 KeBugCheck(MEMORY_MANAGEMENT); 172 } 173 174 if (flProtect & PAGE_SYSTEM) 175 { 176 } 177 else 178 { 179 Attributes = Attributes | PA_USER; 180 } 181 if (flProtect & PAGE_NOCACHE) 182 { 183 Attributes = Attributes | PA_CD; 184 } 185 if (flProtect & PAGE_WRITETHROUGH) 186 { 187 Attributes = Attributes | PA_WT; 188 } 189 return(Attributes); 190 } 191 192 NTSTATUS 193 NTAPI 194 MiDispatchFault(IN ULONG FaultCode, 195 IN PVOID Address, 196 IN PMMPTE PointerPte, 197 IN PMMPTE PointerProtoPte, 198 IN BOOLEAN Recursive, 199 IN PEPROCESS Process, 200 IN PVOID TrapInformation, 201 IN PVOID Vad); 202 203 NTSTATUS 204 NTAPI 205 MiFillSystemPageDirectory(IN PVOID Base, 206 IN SIZE_T NumberOfBytes); 207 208 static PULONG 209 MmGetPageTableForProcess(PEPROCESS Process, PVOID Address, BOOLEAN Create) 210 { 211 PFN_NUMBER Pfn; 212 PULONG Pt; 213 PMMPDE PointerPde; 214 215 if (Address < MmSystemRangeStart) 216 { 217 /* We should have a process for user land addresses */ 218 ASSERT(Process != NULL); 219 220 if(Process != PsGetCurrentProcess()) 221 { 222 PMMPDE PdeBase; 223 ULONG PdeOffset = MiGetPdeOffset(Address); 224 225 /* Nobody but page fault should ask for creating the PDE, 226 * Which imples that Process is the current one */ 227 ASSERT(Create == FALSE); 228 229 PdeBase = MmCreateHyperspaceMapping(PTE_TO_PFN(Process->Pcb.DirectoryTableBase[0])); 230 if (PdeBase == NULL) 231 { 232 KeBugCheck(MEMORY_MANAGEMENT); 233 } 234 PointerPde = PdeBase + PdeOffset; 235 if (PointerPde->u.Hard.Valid == 0) 236 { 237 MmDeleteHyperspaceMapping(PdeBase); 238 return NULL; 239 } 240 else 241 { 242 Pfn = PointerPde->u.Hard.PageFrameNumber; 243 } 244 MmDeleteHyperspaceMapping(PdeBase); 245 Pt = MmCreateHyperspaceMapping(Pfn); 246 if (Pt == NULL) 247 { 248 KeBugCheck(MEMORY_MANAGEMENT); 249 } 250 return Pt + MiAddressToPteOffset(Address); 251 } 252 /* This is for our process */ 253 PointerPde = MiAddressToPde(Address); 254 Pt = (PULONG)MiAddressToPte(Address); 255 if (PointerPde->u.Hard.Valid == 0) 256 { 257 NTSTATUS Status; 258 if (Create == FALSE) 259 { 260 return NULL; 261 } 262 ASSERT(PointerPde->u.Long == 0); 263 264 MI_WRITE_INVALID_PTE(PointerPde, DemandZeroPde); 265 // Tiny HACK: Parameter 1 is the architecture specific FaultCode for an access violation (i.e. page is present) 266 Status = MiDispatchFault(0x1, 267 Pt, 268 PointerPde, 269 NULL, 270 FALSE, 271 PsGetCurrentProcess(), 272 NULL, 273 NULL); 274 DBG_UNREFERENCED_LOCAL_VARIABLE(Status); 275 ASSERT(KeAreAllApcsDisabled() == TRUE); 276 ASSERT(PointerPde->u.Hard.Valid == 1); 277 } 278 return (PULONG)MiAddressToPte(Address); 279 } 280 281 /* This is for kernel land address */ 282 ASSERT(Process == NULL); 283 PointerPde = MiAddressToPde(Address); 284 Pt = (PULONG)MiAddressToPte(Address); 285 if (PointerPde->u.Hard.Valid == 0) 286 { 287 /* Let ARM3 synchronize the PDE */ 288 if(!MiSynchronizeSystemPde(PointerPde)) 289 { 290 /* PDE (still) not valid, let ARM3 allocate one if asked */ 291 if(Create == FALSE) 292 return NULL; 293 MiFillSystemPageDirectory(Address, PAGE_SIZE); 294 } 295 } 296 return Pt; 297 } 298 299 static BOOLEAN MmUnmapPageTable(PULONG Pt) 300 { 301 if (!IS_HYPERSPACE(Pt)) 302 { 303 return TRUE; 304 } 305 306 if (Pt) 307 { 308 MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pt)); 309 } 310 return FALSE; 311 } 312 313 static ULONG MmGetPageEntryForProcess(PEPROCESS Process, PVOID Address) 314 { 315 ULONG Pte; 316 PULONG Pt; 317 318 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 319 if (Pt) 320 { 321 Pte = *Pt; 322 MmUnmapPageTable(Pt); 323 return Pte; 324 } 325 return 0; 326 } 327 328 PFN_NUMBER 329 NTAPI 330 MmGetPfnForProcess(PEPROCESS Process, 331 PVOID Address) 332 { 333 ULONG Entry; 334 Entry = MmGetPageEntryForProcess(Process, Address); 335 if (!(Entry & PA_PRESENT)) 336 { 337 return 0; 338 } 339 return(PTE_TO_PFN(Entry)); 340 } 341 342 VOID 343 NTAPI 344 MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, 345 BOOLEAN* WasDirty, PPFN_NUMBER Page) 346 /* 347 * FUNCTION: Delete a virtual mapping 348 */ 349 { 350 BOOLEAN WasValid = FALSE; 351 PFN_NUMBER Pfn; 352 ULONG Pte; 353 PULONG Pt; 354 355 DPRINT("MmDeleteVirtualMapping(%p, %p, %p, %p)\n", 356 Process, Address, WasDirty, Page); 357 358 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 359 360 if (Pt == NULL) 361 { 362 if (WasDirty != NULL) 363 { 364 *WasDirty = FALSE; 365 } 366 if (Page != NULL) 367 { 368 *Page = 0; 369 } 370 return; 371 } 372 373 /* 374 * Atomically set the entry to zero and get the old value. 375 */ 376 Pte = InterlockedExchangePte(Pt, 0); 377 378 /* We count a mapping as valid if it's a present page, or it's a nonzero pfn with 379 * the swap bit unset, indicating a valid page protected to PAGE_NOACCESS. */ 380 WasValid = (Pte & PA_PRESENT) || ((Pte >> PAGE_SHIFT) && !(Pte & 0x800)); 381 if (WasValid) 382 { 383 /* Flush the TLB since we transitioned this PTE 384 * from valid to invalid so any stale translations 385 * are removed from the cache */ 386 MiFlushTlb(Pt, Address); 387 388 if (Address < MmSystemRangeStart) 389 { 390 /* Remove PDE reference */ 391 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 392 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_COUNT); 393 } 394 395 Pfn = PTE_TO_PFN(Pte); 396 } 397 else 398 { 399 MmUnmapPageTable(Pt); 400 Pfn = 0; 401 } 402 403 /* 404 * Return some information to the caller 405 */ 406 if (WasDirty != NULL) 407 { 408 *WasDirty = ((Pte & PA_DIRTY) && (Pte & PA_PRESENT)) ? TRUE : FALSE; 409 } 410 if (Page != NULL) 411 { 412 *Page = Pfn; 413 } 414 } 415 416 VOID 417 NTAPI 418 MmGetPageFileMapping(PEPROCESS Process, PVOID Address, 419 SWAPENTRY* SwapEntry) 420 /* 421 * FUNCTION: Get a page file mapping 422 */ 423 { 424 ULONG Entry = MmGetPageEntryForProcess(Process, Address); 425 *SwapEntry = Entry >> 1; 426 } 427 428 VOID 429 NTAPI 430 MmDeletePageFileMapping(PEPROCESS Process, PVOID Address, 431 SWAPENTRY* SwapEntry) 432 /* 433 * FUNCTION: Delete a virtual mapping 434 */ 435 { 436 ULONG Pte; 437 PULONG Pt; 438 439 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 440 441 if (Pt == NULL) 442 { 443 *SwapEntry = 0; 444 return; 445 } 446 447 /* 448 * Atomically set the entry to zero and get the old value. 449 */ 450 Pte = InterlockedExchangePte(Pt, 0); 451 452 if (Address < MmSystemRangeStart) 453 { 454 /* Remove PDE reference */ 455 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 456 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_COUNT); 457 } 458 459 /* We don't need to flush here because page file entries 460 * are invalid translations, so the processor won't cache them */ 461 MmUnmapPageTable(Pt); 462 463 if ((Pte & PA_PRESENT) || !(Pte & 0x800)) 464 { 465 DPRINT1("Pte %x (want not 1 and 0x800)\n", Pte); 466 KeBugCheck(MEMORY_MANAGEMENT); 467 } 468 469 /* 470 * Return some information to the caller 471 */ 472 *SwapEntry = Pte >> 1; 473 } 474 475 BOOLEAN 476 Mmi386MakeKernelPageTableGlobal(PVOID Address) 477 { 478 PMMPDE PointerPde = MiAddressToPde(Address); 479 PMMPTE PointerPte = MiAddressToPte(Address); 480 481 if (PointerPde->u.Hard.Valid == 0) 482 { 483 if(!MiSynchronizeSystemPde(PointerPde)) 484 return FALSE; 485 return PointerPte->u.Hard.Valid != 0; 486 } 487 return FALSE; 488 } 489 490 BOOLEAN 491 NTAPI 492 MmIsDirtyPage(PEPROCESS Process, PVOID Address) 493 { 494 return MmGetPageEntryForProcess(Process, Address) & PA_DIRTY ? TRUE : FALSE; 495 } 496 497 VOID 498 NTAPI 499 MmSetCleanPage(PEPROCESS Process, PVOID Address) 500 { 501 PULONG Pt; 502 ULONG Pte; 503 504 if (Address < MmSystemRangeStart && Process == NULL) 505 { 506 DPRINT1("MmSetCleanPage is called for user space without a process.\n"); 507 KeBugCheck(MEMORY_MANAGEMENT); 508 } 509 510 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 511 if (Pt == NULL) 512 { 513 KeBugCheck(MEMORY_MANAGEMENT); 514 } 515 516 do 517 { 518 Pte = *Pt; 519 } while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_DIRTY, Pte)); 520 521 if (!(Pte & PA_PRESENT)) 522 { 523 KeBugCheck(MEMORY_MANAGEMENT); 524 } 525 else if (Pte & PA_DIRTY) 526 { 527 MiFlushTlb(Pt, Address); 528 } 529 else 530 { 531 MmUnmapPageTable(Pt); 532 } 533 } 534 535 VOID 536 NTAPI 537 MmSetDirtyPage(PEPROCESS Process, PVOID Address) 538 { 539 PULONG Pt; 540 ULONG Pte; 541 542 if (Address < MmSystemRangeStart && Process == NULL) 543 { 544 DPRINT1("MmSetDirtyPage is called for user space without a process.\n"); 545 KeBugCheck(MEMORY_MANAGEMENT); 546 } 547 548 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 549 if (Pt == NULL) 550 { 551 KeBugCheck(MEMORY_MANAGEMENT); 552 } 553 554 do 555 { 556 Pte = *Pt; 557 } while (Pte != InterlockedCompareExchangePte(Pt, Pte | PA_DIRTY, Pte)); 558 559 if (!(Pte & PA_PRESENT)) 560 { 561 KeBugCheck(MEMORY_MANAGEMENT); 562 } 563 else 564 { 565 /* The processor will never clear this bit itself, therefore 566 * we do not need to flush the TLB here when setting it */ 567 MmUnmapPageTable(Pt); 568 } 569 } 570 571 BOOLEAN 572 NTAPI 573 MmIsPagePresent(PEPROCESS Process, PVOID Address) 574 { 575 return MmGetPageEntryForProcess(Process, Address) & PA_PRESENT; 576 } 577 578 BOOLEAN 579 NTAPI 580 MmIsDisabledPage(PEPROCESS Process, PVOID Address) 581 { 582 ULONG_PTR Entry = MmGetPageEntryForProcess(Process, Address); 583 return !(Entry & PA_PRESENT) && !(Entry & 0x800) && (Entry >> PAGE_SHIFT); 584 } 585 586 BOOLEAN 587 NTAPI 588 MmIsPageSwapEntry(PEPROCESS Process, PVOID Address) 589 { 590 ULONG Entry; 591 Entry = MmGetPageEntryForProcess(Process, Address); 592 return !(Entry & PA_PRESENT) && (Entry & 0x800); 593 } 594 595 NTSTATUS 596 NTAPI 597 MmCreatePageFileMapping(PEPROCESS Process, 598 PVOID Address, 599 SWAPENTRY SwapEntry) 600 { 601 PULONG Pt; 602 ULONG Pte; 603 604 if (Process == NULL && Address < MmSystemRangeStart) 605 { 606 DPRINT1("No process\n"); 607 KeBugCheck(MEMORY_MANAGEMENT); 608 } 609 if (Process != NULL && Address >= MmSystemRangeStart) 610 { 611 DPRINT1("Setting kernel address with process context\n"); 612 KeBugCheck(MEMORY_MANAGEMENT); 613 } 614 615 if (SwapEntry & (1 << 31)) 616 { 617 KeBugCheck(MEMORY_MANAGEMENT); 618 } 619 620 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 621 if (Pt == NULL) 622 { 623 /* Nobody should page out an address that hasn't even been mapped */ 624 /* But we might place a wait entry first, requiring the page table */ 625 if (SwapEntry != MM_WAIT_ENTRY) 626 { 627 KeBugCheck(MEMORY_MANAGEMENT); 628 } 629 Pt = MmGetPageTableForProcess(Process, Address, TRUE); 630 } 631 Pte = InterlockedExchangePte(Pt, SwapEntry << 1); 632 if (Pte != 0) 633 { 634 KeBugCheckEx(MEMORY_MANAGEMENT, SwapEntry, (ULONG_PTR)Process, (ULONG_PTR)Address, 0); 635 } 636 637 if (Address < MmSystemRangeStart) 638 { 639 /* Add PDE reference */ 640 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++; 641 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_COUNT); 642 } 643 644 /* We don't need to flush the TLB here because it 645 * only caches valid translations and a zero PTE 646 * is not a valid translation */ 647 MmUnmapPageTable(Pt); 648 649 return(STATUS_SUCCESS); 650 } 651 652 653 NTSTATUS 654 NTAPI 655 MmCreateVirtualMappingUnsafe(PEPROCESS Process, 656 PVOID Address, 657 ULONG flProtect, 658 PPFN_NUMBER Pages, 659 ULONG PageCount) 660 { 661 ULONG Attributes; 662 PVOID Addr; 663 ULONG i; 664 ULONG oldPdeOffset, PdeOffset; 665 PULONG Pt = NULL; 666 ULONG Pte; 667 DPRINT("MmCreateVirtualMappingUnsafe(%p, %p, %lu, %p (%x), %lu)\n", 668 Process, Address, flProtect, Pages, *Pages, PageCount); 669 670 ASSERT(((ULONG_PTR)Address % PAGE_SIZE) == 0); 671 672 if (Process == NULL) 673 { 674 if (Address < MmSystemRangeStart) 675 { 676 DPRINT1("NULL process given for user-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 677 KeBugCheck(MEMORY_MANAGEMENT); 678 } 679 if (PageCount > 0x10000 || 680 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 0x100000) 681 { 682 DPRINT1("Page count too large for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 683 KeBugCheck(MEMORY_MANAGEMENT); 684 } 685 } 686 else 687 { 688 if (Address >= MmSystemRangeStart) 689 { 690 DPRINT1("Process %p given for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 691 KeBugCheck(MEMORY_MANAGEMENT); 692 } 693 if (PageCount > (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE || 694 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 695 (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE) 696 { 697 DPRINT1("Page count too large for process %p user-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 698 KeBugCheck(MEMORY_MANAGEMENT); 699 } 700 } 701 702 Attributes = ProtectToPTE(flProtect); 703 Attributes &= 0xfff; 704 if (Address >= MmSystemRangeStart) 705 { 706 Attributes &= ~PA_USER; 707 } 708 else 709 { 710 Attributes |= PA_USER; 711 } 712 713 Addr = Address; 714 /* MmGetPageTableForProcess should be called on the first run, so 715 * let this trigger it */ 716 oldPdeOffset = ADDR_TO_PDE_OFFSET(Addr) + 1; 717 for (i = 0; i < PageCount; i++, Addr = (PVOID)((ULONG_PTR)Addr + PAGE_SIZE)) 718 { 719 if (!(Attributes & PA_PRESENT) && Pages[i] != 0) 720 { 721 DPRINT1("Setting physical address but not allowing access at address " 722 "0x%p with attributes %x/%x.\n", 723 Addr, Attributes, flProtect); 724 KeBugCheck(MEMORY_MANAGEMENT); 725 } 726 PdeOffset = ADDR_TO_PDE_OFFSET(Addr); 727 if (oldPdeOffset != PdeOffset) 728 { 729 if(Pt) MmUnmapPageTable(Pt); 730 Pt = MmGetPageTableForProcess(Process, Addr, TRUE); 731 if (Pt == NULL) 732 { 733 KeBugCheck(MEMORY_MANAGEMENT); 734 } 735 } 736 else 737 { 738 Pt++; 739 } 740 oldPdeOffset = PdeOffset; 741 742 Pte = InterlockedExchangePte(Pt, PFN_TO_PTE(Pages[i]) | Attributes); 743 744 /* There should not be anything valid here */ 745 if (Pte != 0) 746 { 747 DPRINT1("Bad PTE %lx at %p for %p + %lu\n", Pte, Pt, Address, i); 748 KeBugCheck(MEMORY_MANAGEMENT); 749 } 750 751 /* We don't need to flush the TLB here because it only caches valid translations 752 * and we're moving this PTE from invalid to valid so it can't be cached right now */ 753 754 if (Addr < MmSystemRangeStart) 755 { 756 /* Add PDE reference */ 757 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)]++; 758 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)] <= PTE_COUNT); 759 } 760 } 761 762 ASSERT(Addr > Address); 763 MmUnmapPageTable(Pt); 764 765 return(STATUS_SUCCESS); 766 } 767 768 NTSTATUS 769 NTAPI 770 MmCreateVirtualMapping(PEPROCESS Process, 771 PVOID Address, 772 ULONG flProtect, 773 PPFN_NUMBER Pages, 774 ULONG PageCount) 775 { 776 ULONG i; 777 778 ASSERT((ULONG_PTR)Address % PAGE_SIZE == 0); 779 for (i = 0; i < PageCount; i++) 780 { 781 if (!MmIsPageInUse(Pages[i])) 782 { 783 DPRINT1("Page at address %x not in use\n", PFN_TO_PTE(Pages[i])); 784 KeBugCheck(MEMORY_MANAGEMENT); 785 } 786 } 787 788 return(MmCreateVirtualMappingUnsafe(Process, 789 Address, 790 flProtect, 791 Pages, 792 PageCount)); 793 } 794 795 ULONG 796 NTAPI 797 MmGetPageProtect(PEPROCESS Process, PVOID Address) 798 { 799 ULONG Entry; 800 ULONG Protect; 801 802 Entry = MmGetPageEntryForProcess(Process, Address); 803 804 805 if (!(Entry & PA_PRESENT)) 806 { 807 Protect = PAGE_NOACCESS; 808 } 809 else 810 { 811 if (Entry & PA_READWRITE) 812 { 813 Protect = PAGE_READWRITE; 814 } 815 else 816 { 817 Protect = PAGE_EXECUTE_READ; 818 } 819 if (Entry & PA_CD) 820 { 821 Protect |= PAGE_NOCACHE; 822 } 823 if (Entry & PA_WT) 824 { 825 Protect |= PAGE_WRITETHROUGH; 826 } 827 if (!(Entry & PA_USER)) 828 { 829 Protect |= PAGE_SYSTEM; 830 } 831 832 } 833 return(Protect); 834 } 835 836 VOID 837 NTAPI 838 MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect) 839 { 840 ULONG Attributes = 0; 841 PULONG Pt; 842 ULONG Pte; 843 844 DPRINT("MmSetPageProtect(Process %p Address %p flProtect %x)\n", 845 Process, Address, flProtect); 846 847 Attributes = ProtectToPTE(flProtect); 848 849 Attributes &= 0xfff; 850 if (Address >= MmSystemRangeStart) 851 { 852 Attributes &= ~PA_USER; 853 } 854 else 855 { 856 Attributes |= PA_USER; 857 } 858 859 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 860 if (Pt == NULL) 861 { 862 KeBugCheck(MEMORY_MANAGEMENT); 863 } 864 Pte = InterlockedExchangePte(Pt, PAGE_MASK(*Pt) | Attributes | (*Pt & (PA_ACCESSED|PA_DIRTY))); 865 866 // We should be able to bring a page back from PAGE_NOACCESS 867 if ((Pte & 0x800) || !(Pte >> PAGE_SHIFT)) 868 { 869 DPRINT1("Invalid Pte %lx\n", Pte); 870 KeBugCheck(MEMORY_MANAGEMENT); 871 } 872 873 if((Pte & Attributes) != Attributes) 874 MiFlushTlb(Pt, Address); 875 else 876 MmUnmapPageTable(Pt); 877 } 878 879 VOID 880 INIT_FUNCTION 881 NTAPI 882 MmInitGlobalKernelPageDirectory(VOID) 883 { 884 /* Nothing to do here */ 885 } 886 887 /* EOF */ 888