1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/mm/i386/page.c 5 * PURPOSE: Low level memory managment manipulation 6 * 7 * PROGRAMMERS: David Welch (welch@cwcom.net) 8 */ 9 10 /* INCLUDES ***************************************************************/ 11 12 #include <ntoskrnl.h> 13 #define NDEBUG 14 #include <debug.h> 15 16 #include <mm/ARM3/miarm.h> 17 18 #if defined (ALLOC_PRAGMA) 19 #pragma alloc_text(INIT, MmInitGlobalKernelPageDirectory) 20 #endif 21 22 #define ADDR_TO_PDE_OFFSET MiAddressToPdeOffset 23 #define ADDR_TO_PAGE_TABLE(v) (((ULONG)(v)) / (1024 * PAGE_SIZE)) 24 25 /* GLOBALS *****************************************************************/ 26 27 #define PA_BIT_PRESENT (0) 28 #define PA_BIT_READWRITE (1) 29 #define PA_BIT_USER (2) 30 #define PA_BIT_WT (3) 31 #define PA_BIT_CD (4) 32 #define PA_BIT_ACCESSED (5) 33 #define PA_BIT_DIRTY (6) 34 #define PA_BIT_GLOBAL (8) 35 36 #define PA_PRESENT (1 << PA_BIT_PRESENT) 37 #define PA_READWRITE (1 << PA_BIT_READWRITE) 38 #define PA_USER (1 << PA_BIT_USER) 39 #define PA_DIRTY (1 << PA_BIT_DIRTY) 40 #define PA_WT (1 << PA_BIT_WT) 41 #define PA_CD (1 << PA_BIT_CD) 42 #define PA_ACCESSED (1 << PA_BIT_ACCESSED) 43 #define PA_GLOBAL (1 << PA_BIT_GLOBAL) 44 45 #define IS_HYPERSPACE(v) (((ULONG)(v) >= HYPER_SPACE && (ULONG)(v) <= HYPER_SPACE_END)) 46 47 #define PTE_TO_PFN(X) ((X) >> PAGE_SHIFT) 48 #define PFN_TO_PTE(X) ((X) << PAGE_SHIFT) 49 50 #define PAGE_MASK(x) ((x)&(~0xfff)) 51 52 const 53 ULONG 54 MmProtectToPteMask[32] = 55 { 56 // 57 // These are the base MM_ protection flags 58 // 59 0, 60 PTE_READONLY | PTE_ENABLE_CACHE, 61 PTE_EXECUTE | PTE_ENABLE_CACHE, 62 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 63 PTE_READWRITE | PTE_ENABLE_CACHE, 64 PTE_WRITECOPY | PTE_ENABLE_CACHE, 65 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 66 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 67 // 68 // These OR in the MM_NOCACHE flag 69 // 70 0, 71 PTE_READONLY | PTE_DISABLE_CACHE, 72 PTE_EXECUTE | PTE_DISABLE_CACHE, 73 PTE_EXECUTE_READ | PTE_DISABLE_CACHE, 74 PTE_READWRITE | PTE_DISABLE_CACHE, 75 PTE_WRITECOPY | PTE_DISABLE_CACHE, 76 PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE, 77 PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE, 78 // 79 // These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM 80 // 81 0, 82 PTE_READONLY | PTE_ENABLE_CACHE, 83 PTE_EXECUTE | PTE_ENABLE_CACHE, 84 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 85 PTE_READWRITE | PTE_ENABLE_CACHE, 86 PTE_WRITECOPY | PTE_ENABLE_CACHE, 87 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 88 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 89 // 90 // These OR in the MM_NOACCESS flag, which seems to enable WriteCombining? 91 // 92 0, 93 PTE_READONLY | PTE_WRITECOMBINED_CACHE, 94 PTE_EXECUTE | PTE_WRITECOMBINED_CACHE, 95 PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE, 96 PTE_READWRITE | PTE_WRITECOMBINED_CACHE, 97 PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 98 PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE, 99 PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 100 }; 101 102 const 103 ULONG MmProtectToValue[32] = 104 { 105 PAGE_NOACCESS, 106 PAGE_READONLY, 107 PAGE_EXECUTE, 108 PAGE_EXECUTE_READ, 109 PAGE_READWRITE, 110 PAGE_WRITECOPY, 111 PAGE_EXECUTE_READWRITE, 112 PAGE_EXECUTE_WRITECOPY, 113 PAGE_NOACCESS, 114 PAGE_NOCACHE | PAGE_READONLY, 115 PAGE_NOCACHE | PAGE_EXECUTE, 116 PAGE_NOCACHE | PAGE_EXECUTE_READ, 117 PAGE_NOCACHE | PAGE_READWRITE, 118 PAGE_NOCACHE | PAGE_WRITECOPY, 119 PAGE_NOCACHE | PAGE_EXECUTE_READWRITE, 120 PAGE_NOCACHE | PAGE_EXECUTE_WRITECOPY, 121 PAGE_NOACCESS, 122 PAGE_GUARD | PAGE_READONLY, 123 PAGE_GUARD | PAGE_EXECUTE, 124 PAGE_GUARD | PAGE_EXECUTE_READ, 125 PAGE_GUARD | PAGE_READWRITE, 126 PAGE_GUARD | PAGE_WRITECOPY, 127 PAGE_GUARD | PAGE_EXECUTE_READWRITE, 128 PAGE_GUARD | PAGE_EXECUTE_WRITECOPY, 129 PAGE_NOACCESS, 130 PAGE_WRITECOMBINE | PAGE_READONLY, 131 PAGE_WRITECOMBINE | PAGE_EXECUTE, 132 PAGE_WRITECOMBINE | PAGE_EXECUTE_READ, 133 PAGE_WRITECOMBINE | PAGE_READWRITE, 134 PAGE_WRITECOMBINE | PAGE_WRITECOPY, 135 PAGE_WRITECOMBINE | PAGE_EXECUTE_READWRITE, 136 PAGE_WRITECOMBINE | PAGE_EXECUTE_WRITECOPY 137 }; 138 139 /* FUNCTIONS ***************************************************************/ 140 141 static BOOLEAN MmUnmapPageTable(PULONG Pt); 142 143 VOID 144 MiFlushTlb(PULONG Pt, PVOID Address) 145 { 146 if ((Pt && MmUnmapPageTable(Pt)) || Address >= MmSystemRangeStart) 147 { 148 KeInvalidateTlbEntry(Address); 149 } 150 } 151 152 static ULONG 153 ProtectToPTE(ULONG flProtect) 154 { 155 ULONG Attributes = 0; 156 157 if (flProtect & (PAGE_NOACCESS|PAGE_GUARD)) 158 { 159 Attributes = 0; 160 } 161 else if (flProtect & PAGE_IS_WRITABLE) 162 { 163 Attributes = PA_PRESENT | PA_READWRITE; 164 } 165 else if (flProtect & (PAGE_IS_READABLE | PAGE_IS_EXECUTABLE)) 166 { 167 Attributes = PA_PRESENT; 168 } 169 else 170 { 171 DPRINT1("Unknown main protection type.\n"); 172 KeBugCheck(MEMORY_MANAGEMENT); 173 } 174 175 if (flProtect & PAGE_SYSTEM) 176 { 177 } 178 else 179 { 180 Attributes = Attributes | PA_USER; 181 } 182 if (flProtect & PAGE_NOCACHE) 183 { 184 Attributes = Attributes | PA_CD; 185 } 186 if (flProtect & PAGE_WRITETHROUGH) 187 { 188 Attributes = Attributes | PA_WT; 189 } 190 return(Attributes); 191 } 192 193 NTSTATUS 194 NTAPI 195 MiDispatchFault(IN ULONG FaultCode, 196 IN PVOID Address, 197 IN PMMPTE PointerPte, 198 IN PMMPTE PointerProtoPte, 199 IN BOOLEAN Recursive, 200 IN PEPROCESS Process, 201 IN PVOID TrapInformation, 202 IN PVOID Vad); 203 204 NTSTATUS 205 NTAPI 206 MiFillSystemPageDirectory(IN PVOID Base, 207 IN SIZE_T NumberOfBytes); 208 209 static PULONG 210 MmGetPageTableForProcess(PEPROCESS Process, PVOID Address, BOOLEAN Create) 211 { 212 PFN_NUMBER Pfn; 213 PULONG Pt; 214 PMMPDE PointerPde; 215 216 if (Address < MmSystemRangeStart) 217 { 218 /* We should have a process for user land addresses */ 219 ASSERT(Process != NULL); 220 221 if(Process != PsGetCurrentProcess()) 222 { 223 PMMPDE PdeBase; 224 ULONG PdeOffset = MiGetPdeOffset(Address); 225 226 /* Nobody but page fault should ask for creating the PDE, 227 * Which imples that Process is the current one */ 228 ASSERT(Create == FALSE); 229 230 PdeBase = MmCreateHyperspaceMapping(PTE_TO_PFN(Process->Pcb.DirectoryTableBase[0])); 231 if (PdeBase == NULL) 232 { 233 KeBugCheck(MEMORY_MANAGEMENT); 234 } 235 PointerPde = PdeBase + PdeOffset; 236 if (PointerPde->u.Hard.Valid == 0) 237 { 238 MmDeleteHyperspaceMapping(PdeBase); 239 return NULL; 240 } 241 else 242 { 243 Pfn = PointerPde->u.Hard.PageFrameNumber; 244 } 245 MmDeleteHyperspaceMapping(PdeBase); 246 Pt = MmCreateHyperspaceMapping(Pfn); 247 if (Pt == NULL) 248 { 249 KeBugCheck(MEMORY_MANAGEMENT); 250 } 251 return Pt + MiAddressToPteOffset(Address); 252 } 253 /* This is for our process */ 254 PointerPde = MiAddressToPde(Address); 255 Pt = (PULONG)MiAddressToPte(Address); 256 if (PointerPde->u.Hard.Valid == 0) 257 { 258 NTSTATUS Status; 259 if (Create == FALSE) 260 { 261 return NULL; 262 } 263 ASSERT(PointerPde->u.Long == 0); 264 265 MI_WRITE_INVALID_PTE(PointerPde, DemandZeroPde); 266 // Tiny HACK: Parameter 1 is the architecture specific FaultCode for an access violation (i.e. page is present) 267 Status = MiDispatchFault(0x1, 268 Pt, 269 PointerPde, 270 NULL, 271 FALSE, 272 PsGetCurrentProcess(), 273 NULL, 274 NULL); 275 DBG_UNREFERENCED_LOCAL_VARIABLE(Status); 276 ASSERT(KeAreAllApcsDisabled() == TRUE); 277 ASSERT(PointerPde->u.Hard.Valid == 1); 278 } 279 return (PULONG)MiAddressToPte(Address); 280 } 281 282 /* This is for kernel land address */ 283 ASSERT(Process == NULL); 284 PointerPde = MiAddressToPde(Address); 285 Pt = (PULONG)MiAddressToPte(Address); 286 if (PointerPde->u.Hard.Valid == 0) 287 { 288 /* Let ARM3 synchronize the PDE */ 289 if(!MiSynchronizeSystemPde(PointerPde)) 290 { 291 /* PDE (still) not valid, let ARM3 allocate one if asked */ 292 if(Create == FALSE) 293 return NULL; 294 MiFillSystemPageDirectory(Address, PAGE_SIZE); 295 } 296 } 297 return Pt; 298 } 299 300 static BOOLEAN MmUnmapPageTable(PULONG Pt) 301 { 302 if (!IS_HYPERSPACE(Pt)) 303 { 304 return TRUE; 305 } 306 307 if (Pt) 308 { 309 MmDeleteHyperspaceMapping((PVOID)PAGE_ROUND_DOWN(Pt)); 310 } 311 return FALSE; 312 } 313 314 static ULONG MmGetPageEntryForProcess(PEPROCESS Process, PVOID Address) 315 { 316 ULONG Pte; 317 PULONG Pt; 318 319 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 320 if (Pt) 321 { 322 Pte = *Pt; 323 MmUnmapPageTable(Pt); 324 return Pte; 325 } 326 return 0; 327 } 328 329 PFN_NUMBER 330 NTAPI 331 MmGetPfnForProcess(PEPROCESS Process, 332 PVOID Address) 333 { 334 ULONG Entry; 335 Entry = MmGetPageEntryForProcess(Process, Address); 336 if (!(Entry & PA_PRESENT)) 337 { 338 return 0; 339 } 340 return(PTE_TO_PFN(Entry)); 341 } 342 343 VOID 344 NTAPI 345 MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, 346 BOOLEAN* WasDirty, PPFN_NUMBER Page) 347 /* 348 * FUNCTION: Delete a virtual mapping 349 */ 350 { 351 BOOLEAN WasValid = FALSE; 352 PFN_NUMBER Pfn; 353 ULONG Pte; 354 PULONG Pt; 355 356 DPRINT("MmDeleteVirtualMapping(%p, %p, %p, %p)\n", 357 Process, Address, WasDirty, Page); 358 359 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 360 361 if (Pt == NULL) 362 { 363 if (WasDirty != NULL) 364 { 365 *WasDirty = FALSE; 366 } 367 if (Page != NULL) 368 { 369 *Page = 0; 370 } 371 return; 372 } 373 374 /* 375 * Atomically set the entry to zero and get the old value. 376 */ 377 Pte = InterlockedExchangePte(Pt, 0); 378 379 /* We count a mapping as valid if it's a present page, or it's a nonzero pfn with 380 * the swap bit unset, indicating a valid page protected to PAGE_NOACCESS. */ 381 WasValid = (Pte & PA_PRESENT) || ((Pte >> PAGE_SHIFT) && !(Pte & 0x800)); 382 if (WasValid) 383 { 384 /* Flush the TLB since we transitioned this PTE 385 * from valid to invalid so any stale translations 386 * are removed from the cache */ 387 MiFlushTlb(Pt, Address); 388 389 if (Address < MmSystemRangeStart) 390 { 391 /* Remove PDE reference */ 392 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 393 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE); 394 } 395 396 Pfn = PTE_TO_PFN(Pte); 397 } 398 else 399 { 400 MmUnmapPageTable(Pt); 401 Pfn = 0; 402 } 403 404 /* 405 * Return some information to the caller 406 */ 407 if (WasDirty != NULL) 408 { 409 *WasDirty = ((Pte & PA_DIRTY) && (Pte & PA_PRESENT)) ? TRUE : FALSE; 410 } 411 if (Page != NULL) 412 { 413 *Page = Pfn; 414 } 415 } 416 417 VOID 418 NTAPI 419 MmGetPageFileMapping(PEPROCESS Process, PVOID Address, 420 SWAPENTRY* SwapEntry) 421 /* 422 * FUNCTION: Get a page file mapping 423 */ 424 { 425 ULONG Entry = MmGetPageEntryForProcess(Process, Address); 426 *SwapEntry = Entry >> 1; 427 } 428 429 VOID 430 NTAPI 431 MmDeletePageFileMapping(PEPROCESS Process, PVOID Address, 432 SWAPENTRY* SwapEntry) 433 /* 434 * FUNCTION: Delete a virtual mapping 435 */ 436 { 437 ULONG Pte; 438 PULONG Pt; 439 440 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 441 442 if (Pt == NULL) 443 { 444 *SwapEntry = 0; 445 return; 446 } 447 448 /* 449 * Atomically set the entry to zero and get the old value. 450 */ 451 Pte = InterlockedExchangePte(Pt, 0); 452 453 if (Address < MmSystemRangeStart) 454 { 455 /* Remove PDE reference */ 456 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 457 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE); 458 } 459 460 /* We don't need to flush here because page file entries 461 * are invalid translations, so the processor won't cache them */ 462 MmUnmapPageTable(Pt); 463 464 if ((Pte & PA_PRESENT) || !(Pte & 0x800)) 465 { 466 DPRINT1("Pte %x (want not 1 and 0x800)\n", Pte); 467 KeBugCheck(MEMORY_MANAGEMENT); 468 } 469 470 /* 471 * Return some information to the caller 472 */ 473 *SwapEntry = Pte >> 1; 474 } 475 476 BOOLEAN 477 Mmi386MakeKernelPageTableGlobal(PVOID Address) 478 { 479 PMMPDE PointerPde = MiAddressToPde(Address); 480 PMMPTE PointerPte = MiAddressToPte(Address); 481 482 if (PointerPde->u.Hard.Valid == 0) 483 { 484 if(!MiSynchronizeSystemPde(PointerPde)) 485 return FALSE; 486 return PointerPte->u.Hard.Valid != 0; 487 } 488 return FALSE; 489 } 490 491 BOOLEAN 492 NTAPI 493 MmIsDirtyPage(PEPROCESS Process, PVOID Address) 494 { 495 return MmGetPageEntryForProcess(Process, Address) & PA_DIRTY ? TRUE : FALSE; 496 } 497 498 VOID 499 NTAPI 500 MmSetCleanPage(PEPROCESS Process, PVOID Address) 501 { 502 PULONG Pt; 503 ULONG Pte; 504 505 if (Address < MmSystemRangeStart && Process == NULL) 506 { 507 DPRINT1("MmSetCleanPage is called for user space without a process.\n"); 508 KeBugCheck(MEMORY_MANAGEMENT); 509 } 510 511 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 512 if (Pt == NULL) 513 { 514 KeBugCheck(MEMORY_MANAGEMENT); 515 } 516 517 do 518 { 519 Pte = *Pt; 520 } while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_DIRTY, Pte)); 521 522 if (!(Pte & PA_PRESENT)) 523 { 524 KeBugCheck(MEMORY_MANAGEMENT); 525 } 526 else if (Pte & PA_DIRTY) 527 { 528 MiFlushTlb(Pt, Address); 529 } 530 else 531 { 532 MmUnmapPageTable(Pt); 533 } 534 } 535 536 VOID 537 NTAPI 538 MmSetDirtyPage(PEPROCESS Process, PVOID Address) 539 { 540 PULONG Pt; 541 ULONG Pte; 542 543 if (Address < MmSystemRangeStart && Process == NULL) 544 { 545 DPRINT1("MmSetDirtyPage is called for user space without a process.\n"); 546 KeBugCheck(MEMORY_MANAGEMENT); 547 } 548 549 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 550 if (Pt == NULL) 551 { 552 KeBugCheck(MEMORY_MANAGEMENT); 553 } 554 555 do 556 { 557 Pte = *Pt; 558 } while (Pte != InterlockedCompareExchangePte(Pt, Pte | PA_DIRTY, Pte)); 559 560 if (!(Pte & PA_PRESENT)) 561 { 562 KeBugCheck(MEMORY_MANAGEMENT); 563 } 564 else 565 { 566 /* The processor will never clear this bit itself, therefore 567 * we do not need to flush the TLB here when setting it */ 568 MmUnmapPageTable(Pt); 569 } 570 } 571 572 BOOLEAN 573 NTAPI 574 MmIsPagePresent(PEPROCESS Process, PVOID Address) 575 { 576 return MmGetPageEntryForProcess(Process, Address) & PA_PRESENT; 577 } 578 579 BOOLEAN 580 NTAPI 581 MmIsDisabledPage(PEPROCESS Process, PVOID Address) 582 { 583 ULONG_PTR Entry = MmGetPageEntryForProcess(Process, Address); 584 return !(Entry & PA_PRESENT) && !(Entry & 0x800) && (Entry >> PAGE_SHIFT); 585 } 586 587 BOOLEAN 588 NTAPI 589 MmIsPageSwapEntry(PEPROCESS Process, PVOID Address) 590 { 591 ULONG Entry; 592 Entry = MmGetPageEntryForProcess(Process, Address); 593 return !(Entry & PA_PRESENT) && (Entry & 0x800); 594 } 595 596 NTSTATUS 597 NTAPI 598 MmCreatePageFileMapping(PEPROCESS Process, 599 PVOID Address, 600 SWAPENTRY SwapEntry) 601 { 602 PULONG Pt; 603 ULONG Pte; 604 605 if (Process == NULL && Address < MmSystemRangeStart) 606 { 607 DPRINT1("No process\n"); 608 KeBugCheck(MEMORY_MANAGEMENT); 609 } 610 if (Process != NULL && Address >= MmSystemRangeStart) 611 { 612 DPRINT1("Setting kernel address with process context\n"); 613 KeBugCheck(MEMORY_MANAGEMENT); 614 } 615 616 if (SwapEntry & (1 << 31)) 617 { 618 KeBugCheck(MEMORY_MANAGEMENT); 619 } 620 621 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 622 if (Pt == NULL) 623 { 624 /* Nobody should page out an address that hasn't even been mapped */ 625 /* But we might place a wait entry first, requiring the page table */ 626 if (SwapEntry != MM_WAIT_ENTRY) 627 { 628 KeBugCheck(MEMORY_MANAGEMENT); 629 } 630 Pt = MmGetPageTableForProcess(Process, Address, TRUE); 631 } 632 Pte = InterlockedExchangePte(Pt, SwapEntry << 1); 633 if (Pte != 0) 634 { 635 KeBugCheckEx(MEMORY_MANAGEMENT, SwapEntry, (ULONG_PTR)Process, (ULONG_PTR)Address, 0); 636 } 637 638 if (Address < MmSystemRangeStart) 639 { 640 /* Add PDE reference */ 641 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++; 642 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_PER_PAGE); 643 } 644 645 /* We don't need to flush the TLB here because it 646 * only caches valid translations and a zero PTE 647 * is not a valid translation */ 648 MmUnmapPageTable(Pt); 649 650 return(STATUS_SUCCESS); 651 } 652 653 654 NTSTATUS 655 NTAPI 656 MmCreateVirtualMappingUnsafe(PEPROCESS Process, 657 PVOID Address, 658 ULONG flProtect, 659 PPFN_NUMBER Pages, 660 ULONG PageCount) 661 { 662 ULONG Attributes; 663 PVOID Addr; 664 ULONG i; 665 ULONG oldPdeOffset, PdeOffset; 666 PULONG Pt = NULL; 667 ULONG Pte; 668 DPRINT("MmCreateVirtualMappingUnsafe(%p, %p, %lu, %p (%x), %lu)\n", 669 Process, Address, flProtect, Pages, *Pages, PageCount); 670 671 ASSERT(((ULONG_PTR)Address % PAGE_SIZE) == 0); 672 673 if (Process == NULL) 674 { 675 if (Address < MmSystemRangeStart) 676 { 677 DPRINT1("NULL process given for user-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 678 KeBugCheck(MEMORY_MANAGEMENT); 679 } 680 if (PageCount > 0x10000 || 681 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 0x100000) 682 { 683 DPRINT1("Page count too large for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 684 KeBugCheck(MEMORY_MANAGEMENT); 685 } 686 } 687 else 688 { 689 if (Address >= MmSystemRangeStart) 690 { 691 DPRINT1("Process %p given for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 692 KeBugCheck(MEMORY_MANAGEMENT); 693 } 694 if (PageCount > (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE || 695 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 696 (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE) 697 { 698 DPRINT1("Page count too large for process %p user-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 699 KeBugCheck(MEMORY_MANAGEMENT); 700 } 701 } 702 703 Attributes = ProtectToPTE(flProtect); 704 Attributes &= 0xfff; 705 if (Address >= MmSystemRangeStart) 706 { 707 Attributes &= ~PA_USER; 708 } 709 else 710 { 711 Attributes |= PA_USER; 712 } 713 714 Addr = Address; 715 /* MmGetPageTableForProcess should be called on the first run, so 716 * let this trigger it */ 717 oldPdeOffset = ADDR_TO_PDE_OFFSET(Addr) + 1; 718 for (i = 0; i < PageCount; i++, Addr = (PVOID)((ULONG_PTR)Addr + PAGE_SIZE)) 719 { 720 if (!(Attributes & PA_PRESENT) && Pages[i] != 0) 721 { 722 DPRINT1("Setting physical address but not allowing access at address " 723 "0x%p with attributes %x/%x.\n", 724 Addr, Attributes, flProtect); 725 KeBugCheck(MEMORY_MANAGEMENT); 726 } 727 PdeOffset = ADDR_TO_PDE_OFFSET(Addr); 728 if (oldPdeOffset != PdeOffset) 729 { 730 if(Pt) MmUnmapPageTable(Pt); 731 Pt = MmGetPageTableForProcess(Process, Addr, TRUE); 732 if (Pt == NULL) 733 { 734 KeBugCheck(MEMORY_MANAGEMENT); 735 } 736 } 737 else 738 { 739 Pt++; 740 } 741 oldPdeOffset = PdeOffset; 742 743 Pte = InterlockedExchangePte(Pt, PFN_TO_PTE(Pages[i]) | Attributes); 744 745 /* There should not be anything valid here */ 746 if (Pte != 0) 747 { 748 DPRINT1("Bad PTE %lx at %p for %p + %lu\n", Pte, Pt, Address, i); 749 KeBugCheck(MEMORY_MANAGEMENT); 750 } 751 752 /* We don't need to flush the TLB here because it only caches valid translations 753 * and we're moving this PTE from invalid to valid so it can't be cached right now */ 754 755 if (Addr < MmSystemRangeStart) 756 { 757 /* Add PDE reference */ 758 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)]++; 759 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)] <= PTE_PER_PAGE); 760 } 761 } 762 763 ASSERT(Addr > Address); 764 MmUnmapPageTable(Pt); 765 766 return(STATUS_SUCCESS); 767 } 768 769 NTSTATUS 770 NTAPI 771 MmCreateVirtualMapping(PEPROCESS Process, 772 PVOID Address, 773 ULONG flProtect, 774 PPFN_NUMBER Pages, 775 ULONG PageCount) 776 { 777 ULONG i; 778 779 ASSERT((ULONG_PTR)Address % PAGE_SIZE == 0); 780 for (i = 0; i < PageCount; i++) 781 { 782 if (!MmIsPageInUse(Pages[i])) 783 { 784 DPRINT1("Page at address %x not in use\n", PFN_TO_PTE(Pages[i])); 785 KeBugCheck(MEMORY_MANAGEMENT); 786 } 787 } 788 789 return(MmCreateVirtualMappingUnsafe(Process, 790 Address, 791 flProtect, 792 Pages, 793 PageCount)); 794 } 795 796 ULONG 797 NTAPI 798 MmGetPageProtect(PEPROCESS Process, PVOID Address) 799 { 800 ULONG Entry; 801 ULONG Protect; 802 803 Entry = MmGetPageEntryForProcess(Process, Address); 804 805 806 if (!(Entry & PA_PRESENT)) 807 { 808 Protect = PAGE_NOACCESS; 809 } 810 else 811 { 812 if (Entry & PA_READWRITE) 813 { 814 Protect = PAGE_READWRITE; 815 } 816 else 817 { 818 Protect = PAGE_EXECUTE_READ; 819 } 820 if (Entry & PA_CD) 821 { 822 Protect |= PAGE_NOCACHE; 823 } 824 if (Entry & PA_WT) 825 { 826 Protect |= PAGE_WRITETHROUGH; 827 } 828 if (!(Entry & PA_USER)) 829 { 830 Protect |= PAGE_SYSTEM; 831 } 832 833 } 834 return(Protect); 835 } 836 837 VOID 838 NTAPI 839 MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect) 840 { 841 ULONG Attributes = 0; 842 PULONG Pt; 843 ULONG Pte; 844 845 DPRINT("MmSetPageProtect(Process %p Address %p flProtect %x)\n", 846 Process, Address, flProtect); 847 848 Attributes = ProtectToPTE(flProtect); 849 850 Attributes &= 0xfff; 851 if (Address >= MmSystemRangeStart) 852 { 853 Attributes &= ~PA_USER; 854 } 855 else 856 { 857 Attributes |= PA_USER; 858 } 859 860 Pt = MmGetPageTableForProcess(Process, Address, FALSE); 861 if (Pt == NULL) 862 { 863 KeBugCheck(MEMORY_MANAGEMENT); 864 } 865 Pte = InterlockedExchangePte(Pt, PAGE_MASK(*Pt) | Attributes | (*Pt & (PA_ACCESSED|PA_DIRTY))); 866 867 // We should be able to bring a page back from PAGE_NOACCESS 868 if ((Pte & 0x800) || !(Pte >> PAGE_SHIFT)) 869 { 870 DPRINT1("Invalid Pte %lx\n", Pte); 871 KeBugCheck(MEMORY_MANAGEMENT); 872 } 873 874 if((Pte & Attributes) != Attributes) 875 MiFlushTlb(Pt, Address); 876 else 877 MmUnmapPageTable(Pt); 878 } 879 880 VOID 881 INIT_FUNCTION 882 NTAPI 883 MmInitGlobalKernelPageDirectory(VOID) 884 { 885 /* Nothing to do here */ 886 } 887 888 /* EOF */ 889