1 /* 2 * COPYRIGHT: See COPYING in the top level directory 3 * PROJECT: ReactOS kernel 4 * FILE: ntoskrnl/mm/i386/page.c 5 * PURPOSE: Low level memory managment manipulation 6 * 7 * PROGRAMMERS: David Welch (welch@cwcom.net) 8 */ 9 10 /* INCLUDES ***************************************************************/ 11 12 #include <ntoskrnl.h> 13 #define NDEBUG 14 #include <debug.h> 15 16 #include <mm/ARM3/miarm.h> 17 18 #define ADDR_TO_PDE_OFFSET MiAddressToPdeOffset 19 #define ADDR_TO_PAGE_TABLE(v) (((ULONG)(v)) / (1024 * PAGE_SIZE)) 20 21 /* GLOBALS *****************************************************************/ 22 23 #define PA_BIT_PRESENT (0) 24 #define PA_BIT_READWRITE (1) 25 #define PA_BIT_USER (2) 26 #define PA_BIT_WT (3) 27 #define PA_BIT_CD (4) 28 #define PA_BIT_ACCESSED (5) 29 #define PA_BIT_DIRTY (6) 30 #define PA_BIT_GLOBAL (8) 31 32 #define PA_PRESENT (1 << PA_BIT_PRESENT) 33 #define PA_READWRITE (1 << PA_BIT_READWRITE) 34 #define PA_USER (1 << PA_BIT_USER) 35 #define PA_DIRTY (1 << PA_BIT_DIRTY) 36 #define PA_WT (1 << PA_BIT_WT) 37 #define PA_CD (1 << PA_BIT_CD) 38 #define PA_ACCESSED (1 << PA_BIT_ACCESSED) 39 #define PA_GLOBAL (1 << PA_BIT_GLOBAL) 40 41 #define IS_HYPERSPACE(v) (((ULONG)(v) >= HYPER_SPACE && (ULONG)(v) <= HYPER_SPACE_END)) 42 43 #define PTE_TO_PFN(X) ((X) >> PAGE_SHIFT) 44 #define PFN_TO_PTE(X) ((X) << PAGE_SHIFT) 45 46 #define PAGE_MASK(x) ((x)&(~0xfff)) 47 48 const 49 ULONG 50 MmProtectToPteMask[32] = 51 { 52 // 53 // These are the base MM_ protection flags 54 // 55 0, 56 PTE_READONLY | PTE_ENABLE_CACHE, 57 PTE_EXECUTE | PTE_ENABLE_CACHE, 58 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 59 PTE_READWRITE | PTE_ENABLE_CACHE, 60 PTE_WRITECOPY | PTE_ENABLE_CACHE, 61 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 62 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 63 // 64 // These OR in the MM_NOCACHE flag 65 // 66 0, 67 PTE_READONLY | PTE_DISABLE_CACHE, 68 PTE_EXECUTE | PTE_DISABLE_CACHE, 69 PTE_EXECUTE_READ | PTE_DISABLE_CACHE, 70 PTE_READWRITE | PTE_DISABLE_CACHE, 71 PTE_WRITECOPY | PTE_DISABLE_CACHE, 72 PTE_EXECUTE_READWRITE | PTE_DISABLE_CACHE, 73 PTE_EXECUTE_WRITECOPY | PTE_DISABLE_CACHE, 74 // 75 // These OR in the MM_DECOMMIT flag, which doesn't seem supported on x86/64/ARM 76 // 77 0, 78 PTE_READONLY | PTE_ENABLE_CACHE, 79 PTE_EXECUTE | PTE_ENABLE_CACHE, 80 PTE_EXECUTE_READ | PTE_ENABLE_CACHE, 81 PTE_READWRITE | PTE_ENABLE_CACHE, 82 PTE_WRITECOPY | PTE_ENABLE_CACHE, 83 PTE_EXECUTE_READWRITE | PTE_ENABLE_CACHE, 84 PTE_EXECUTE_WRITECOPY | PTE_ENABLE_CACHE, 85 // 86 // These OR in the MM_NOACCESS flag, which seems to enable WriteCombining? 87 // 88 0, 89 PTE_READONLY | PTE_WRITECOMBINED_CACHE, 90 PTE_EXECUTE | PTE_WRITECOMBINED_CACHE, 91 PTE_EXECUTE_READ | PTE_WRITECOMBINED_CACHE, 92 PTE_READWRITE | PTE_WRITECOMBINED_CACHE, 93 PTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 94 PTE_EXECUTE_READWRITE | PTE_WRITECOMBINED_CACHE, 95 PTE_EXECUTE_WRITECOPY | PTE_WRITECOMBINED_CACHE, 96 }; 97 98 const 99 ULONG MmProtectToValue[32] = 100 { 101 PAGE_NOACCESS, 102 PAGE_READONLY, 103 PAGE_EXECUTE, 104 PAGE_EXECUTE_READ, 105 PAGE_READWRITE, 106 PAGE_WRITECOPY, 107 PAGE_EXECUTE_READWRITE, 108 PAGE_EXECUTE_WRITECOPY, 109 PAGE_NOACCESS, 110 PAGE_NOCACHE | PAGE_READONLY, 111 PAGE_NOCACHE | PAGE_EXECUTE, 112 PAGE_NOCACHE | PAGE_EXECUTE_READ, 113 PAGE_NOCACHE | PAGE_READWRITE, 114 PAGE_NOCACHE | PAGE_WRITECOPY, 115 PAGE_NOCACHE | PAGE_EXECUTE_READWRITE, 116 PAGE_NOCACHE | PAGE_EXECUTE_WRITECOPY, 117 PAGE_NOACCESS, 118 PAGE_GUARD | PAGE_READONLY, 119 PAGE_GUARD | PAGE_EXECUTE, 120 PAGE_GUARD | PAGE_EXECUTE_READ, 121 PAGE_GUARD | PAGE_READWRITE, 122 PAGE_GUARD | PAGE_WRITECOPY, 123 PAGE_GUARD | PAGE_EXECUTE_READWRITE, 124 PAGE_GUARD | PAGE_EXECUTE_WRITECOPY, 125 PAGE_NOACCESS, 126 PAGE_WRITECOMBINE | PAGE_READONLY, 127 PAGE_WRITECOMBINE | PAGE_EXECUTE, 128 PAGE_WRITECOMBINE | PAGE_EXECUTE_READ, 129 PAGE_WRITECOMBINE | PAGE_READWRITE, 130 PAGE_WRITECOMBINE | PAGE_WRITECOPY, 131 PAGE_WRITECOMBINE | PAGE_EXECUTE_READWRITE, 132 PAGE_WRITECOMBINE | PAGE_EXECUTE_WRITECOPY 133 }; 134 135 /* FUNCTIONS ***************************************************************/ 136 137 static BOOLEAN MmUnmapPageTable(PULONG Pt, KIRQL OldIrql); 138 139 VOID 140 MiFlushTlb(PULONG Pt, PVOID Address, KIRQL OldIrql) 141 { 142 if ((Pt && MmUnmapPageTable(Pt, OldIrql)) || Address >= MmSystemRangeStart) 143 { 144 KeInvalidateTlbEntry(Address); 145 } 146 } 147 148 static ULONG 149 ProtectToPTE(ULONG flProtect) 150 { 151 ULONG Attributes = 0; 152 153 if (flProtect & (PAGE_NOACCESS|PAGE_GUARD)) 154 { 155 Attributes = 0; 156 } 157 else if (flProtect & PAGE_IS_WRITABLE) 158 { 159 Attributes = PA_PRESENT | PA_READWRITE; 160 } 161 else if (flProtect & (PAGE_IS_READABLE | PAGE_IS_EXECUTABLE)) 162 { 163 Attributes = PA_PRESENT; 164 } 165 else 166 { 167 DPRINT1("Unknown main protection type.\n"); 168 KeBugCheck(MEMORY_MANAGEMENT); 169 } 170 171 if (flProtect & PAGE_SYSTEM) 172 { 173 } 174 else 175 { 176 Attributes = Attributes | PA_USER; 177 } 178 if (flProtect & PAGE_NOCACHE) 179 { 180 Attributes = Attributes | PA_CD; 181 } 182 if (flProtect & PAGE_WRITETHROUGH) 183 { 184 Attributes = Attributes | PA_WT; 185 } 186 return(Attributes); 187 } 188 189 NTSTATUS 190 NTAPI 191 MiDispatchFault(IN ULONG FaultCode, 192 IN PVOID Address, 193 IN PMMPTE PointerPte, 194 IN PMMPTE PointerProtoPte, 195 IN BOOLEAN Recursive, 196 IN PEPROCESS Process, 197 IN PVOID TrapInformation, 198 IN PVOID Vad); 199 200 NTSTATUS 201 NTAPI 202 MiFillSystemPageDirectory(IN PVOID Base, 203 IN SIZE_T NumberOfBytes); 204 205 static PULONG 206 MmGetPageTableForProcess(PEPROCESS Process, PVOID Address, BOOLEAN Create, PKIRQL OldIrql) 207 { 208 PFN_NUMBER Pfn; 209 PULONG Pt; 210 PMMPDE PointerPde; 211 212 if (Address < MmSystemRangeStart) 213 { 214 /* We should have a process for user land addresses */ 215 ASSERT(Process != NULL); 216 217 if(Process != PsGetCurrentProcess()) 218 { 219 PMMPDE PdeBase; 220 ULONG PdeOffset = MiGetPdeOffset(Address); 221 222 PdeBase = MiMapPageInHyperSpace(PsGetCurrentProcess(), 223 PTE_TO_PFN(Process->Pcb.DirectoryTableBase[0]), 224 OldIrql); 225 if (PdeBase == NULL) 226 { 227 KeBugCheck(MEMORY_MANAGEMENT); 228 } 229 PointerPde = PdeBase + PdeOffset; 230 if (PointerPde->u.Hard.Valid == 0) 231 { 232 KAPC_STATE ApcState; 233 NTSTATUS Status; 234 235 if (!Create) 236 { 237 MiUnmapPageInHyperSpace(PsGetCurrentProcess(), PdeBase, *OldIrql); 238 return NULL; 239 } 240 241 KeStackAttachProcess(&Process->Pcb, &ApcState); 242 243 Status = MiDispatchFault(0x1, 244 MiAddressToPte(Address), 245 MiAddressToPde(Address), 246 NULL, 247 FALSE, 248 Process, 249 NULL, 250 NULL); 251 252 KeUnstackDetachProcess(&ApcState); 253 if (!NT_SUCCESS(Status)) 254 return NULL; 255 } 256 257 Pfn = PointerPde->u.Hard.PageFrameNumber; 258 MiUnmapPageInHyperSpace(PsGetCurrentProcess(), PdeBase, *OldIrql); 259 Pt = MiMapPageInHyperSpace(PsGetCurrentProcess(), Pfn, OldIrql); 260 if (Pt == NULL) 261 { 262 KeBugCheck(MEMORY_MANAGEMENT); 263 } 264 return Pt + MiAddressToPteOffset(Address); 265 } 266 /* This is for our process */ 267 PointerPde = MiAddressToPde(Address); 268 Pt = (PULONG)MiAddressToPte(Address); 269 if (PointerPde->u.Hard.Valid == 0) 270 { 271 NTSTATUS Status; 272 if (Create == FALSE) 273 { 274 return NULL; 275 } 276 ASSERT(PointerPde->u.Long == 0); 277 278 MI_WRITE_INVALID_PTE(PointerPde, DemandZeroPde); 279 // Tiny HACK: Parameter 1 is the architecture specific FaultCode for an access violation (i.e. page is present) 280 Status = MiDispatchFault(0x1, 281 Pt, 282 PointerPde, 283 NULL, 284 FALSE, 285 PsGetCurrentProcess(), 286 NULL, 287 NULL); 288 DBG_UNREFERENCED_LOCAL_VARIABLE(Status); 289 ASSERT(KeAreAllApcsDisabled() == TRUE); 290 ASSERT(PointerPde->u.Hard.Valid == 1); 291 } 292 return (PULONG)MiAddressToPte(Address); 293 } 294 295 /* This is for kernel land address */ 296 ASSERT(Process == NULL); 297 PointerPde = MiAddressToPde(Address); 298 Pt = (PULONG)MiAddressToPte(Address); 299 if (PointerPde->u.Hard.Valid == 0) 300 { 301 /* Let ARM3 synchronize the PDE */ 302 if(!MiSynchronizeSystemPde(PointerPde)) 303 { 304 /* PDE (still) not valid, let ARM3 allocate one if asked */ 305 if(Create == FALSE) 306 return NULL; 307 MiFillSystemPageDirectory(Address, PAGE_SIZE); 308 } 309 } 310 return Pt; 311 } 312 313 static BOOLEAN MmUnmapPageTable(PULONG Pt, KIRQL OldIrql) 314 { 315 if (!IS_HYPERSPACE(Pt)) 316 { 317 return TRUE; 318 } 319 320 MiUnmapPageInHyperSpace(PsGetCurrentProcess(), Pt, OldIrql); 321 322 return FALSE; 323 } 324 325 static ULONG MmGetPageEntryForProcess(PEPROCESS Process, PVOID Address) 326 { 327 ULONG Pte; 328 PULONG Pt; 329 KIRQL OldIrql; 330 331 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 332 if (Pt) 333 { 334 Pte = *Pt; 335 MmUnmapPageTable(Pt, OldIrql); 336 return Pte; 337 } 338 return 0; 339 } 340 341 PFN_NUMBER 342 NTAPI 343 MmGetPfnForProcess(PEPROCESS Process, 344 PVOID Address) 345 { 346 ULONG Entry; 347 Entry = MmGetPageEntryForProcess(Process, Address); 348 if (!(Entry & PA_PRESENT)) 349 { 350 return 0; 351 } 352 return(PTE_TO_PFN(Entry)); 353 } 354 355 VOID 356 NTAPI 357 MmDeleteVirtualMapping(PEPROCESS Process, PVOID Address, 358 BOOLEAN* WasDirty, PPFN_NUMBER Page) 359 /* 360 * FUNCTION: Delete a virtual mapping 361 */ 362 { 363 BOOLEAN WasValid = FALSE; 364 PFN_NUMBER Pfn; 365 ULONG Pte; 366 PULONG Pt; 367 KIRQL OldIrql; 368 369 DPRINT("MmDeleteVirtualMapping(%p, %p, %p, %p)\n", 370 Process, Address, WasDirty, Page); 371 372 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 373 374 if (Pt == NULL) 375 { 376 if (WasDirty != NULL) 377 { 378 *WasDirty = FALSE; 379 } 380 if (Page != NULL) 381 { 382 *Page = 0; 383 } 384 return; 385 } 386 387 /* 388 * Atomically set the entry to zero and get the old value. 389 */ 390 Pte = InterlockedExchangePte(Pt, 0); 391 392 /* We count a mapping as valid if it's a present page, or it's a nonzero pfn with 393 * the swap bit unset, indicating a valid page protected to PAGE_NOACCESS. */ 394 WasValid = (Pte & PA_PRESENT) || ((Pte >> PAGE_SHIFT) && !(Pte & 0x800)); 395 if (WasValid) 396 { 397 /* Flush the TLB since we transitioned this PTE 398 * from valid to invalid so any stale translations 399 * are removed from the cache */ 400 MiFlushTlb(Pt, Address, OldIrql); 401 402 if (Address < MmSystemRangeStart) 403 { 404 /* Remove PDE reference */ 405 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 406 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE); 407 } 408 409 Pfn = PTE_TO_PFN(Pte); 410 } 411 else 412 { 413 MmUnmapPageTable(Pt, OldIrql); 414 Pfn = 0; 415 } 416 417 /* 418 * Return some information to the caller 419 */ 420 if (WasDirty != NULL) 421 { 422 *WasDirty = ((Pte & PA_DIRTY) && (Pte & PA_PRESENT)) ? TRUE : FALSE; 423 } 424 if (Page != NULL) 425 { 426 *Page = Pfn; 427 } 428 } 429 430 VOID 431 NTAPI 432 MmGetPageFileMapping(PEPROCESS Process, PVOID Address, 433 SWAPENTRY* SwapEntry) 434 /* 435 * FUNCTION: Get a page file mapping 436 */ 437 { 438 ULONG Entry = MmGetPageEntryForProcess(Process, Address); 439 *SwapEntry = Entry >> 1; 440 } 441 442 VOID 443 NTAPI 444 MmDeletePageFileMapping(PEPROCESS Process, PVOID Address, 445 SWAPENTRY* SwapEntry) 446 /* 447 * FUNCTION: Delete a virtual mapping 448 */ 449 { 450 ULONG Pte; 451 PULONG Pt; 452 KIRQL OldIrql; 453 454 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 455 456 if (Pt == NULL) 457 { 458 *SwapEntry = 0; 459 return; 460 } 461 462 /* 463 * Atomically set the entry to zero and get the old value. 464 */ 465 Pte = InterlockedExchangePte(Pt, 0); 466 467 if (Address < MmSystemRangeStart) 468 { 469 /* Remove PDE reference */ 470 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]--; 471 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] < PTE_PER_PAGE); 472 } 473 474 /* We don't need to flush here because page file entries 475 * are invalid translations, so the processor won't cache them */ 476 MmUnmapPageTable(Pt, OldIrql); 477 478 if ((Pte & PA_PRESENT) || !(Pte & 0x800)) 479 { 480 DPRINT1("Pte %x (want not 1 and 0x800)\n", Pte); 481 KeBugCheck(MEMORY_MANAGEMENT); 482 } 483 484 /* 485 * Return some information to the caller 486 */ 487 *SwapEntry = Pte >> 1; 488 } 489 490 BOOLEAN 491 Mmi386MakeKernelPageTableGlobal(PVOID Address) 492 { 493 PMMPDE PointerPde = MiAddressToPde(Address); 494 PMMPTE PointerPte = MiAddressToPte(Address); 495 496 if (PointerPde->u.Hard.Valid == 0) 497 { 498 if(!MiSynchronizeSystemPde(PointerPde)) 499 return FALSE; 500 return PointerPte->u.Hard.Valid != 0; 501 } 502 return FALSE; 503 } 504 505 BOOLEAN 506 NTAPI 507 MmIsDirtyPage(PEPROCESS Process, PVOID Address) 508 { 509 return MmGetPageEntryForProcess(Process, Address) & PA_DIRTY ? TRUE : FALSE; 510 } 511 512 VOID 513 NTAPI 514 MmSetCleanPage(PEPROCESS Process, PVOID Address) 515 { 516 PULONG Pt; 517 ULONG Pte; 518 KIRQL OldIrql; 519 520 if (Address < MmSystemRangeStart && Process == NULL) 521 { 522 DPRINT1("MmSetCleanPage is called for user space without a process.\n"); 523 KeBugCheck(MEMORY_MANAGEMENT); 524 } 525 526 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 527 if (Pt == NULL) 528 { 529 KeBugCheck(MEMORY_MANAGEMENT); 530 } 531 532 do 533 { 534 Pte = *Pt; 535 } while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_DIRTY, Pte)); 536 537 if (!(Pte & PA_PRESENT)) 538 { 539 KeBugCheck(MEMORY_MANAGEMENT); 540 } 541 else if (Pte & PA_DIRTY) 542 { 543 MiFlushTlb(Pt, Address, OldIrql); 544 } 545 else 546 { 547 MmUnmapPageTable(Pt, OldIrql); 548 } 549 } 550 551 VOID 552 NTAPI 553 MmSetDirtyPage(PEPROCESS Process, PVOID Address) 554 { 555 PULONG Pt; 556 ULONG Pte; 557 KIRQL OldIrql; 558 559 if (Address < MmSystemRangeStart && Process == NULL) 560 { 561 DPRINT1("MmSetDirtyPage is called for user space without a process.\n"); 562 KeBugCheck(MEMORY_MANAGEMENT); 563 } 564 565 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 566 if (Pt == NULL) 567 { 568 KeBugCheck(MEMORY_MANAGEMENT); 569 } 570 571 do 572 { 573 Pte = *Pt; 574 } while (Pte != InterlockedCompareExchangePte(Pt, Pte | PA_DIRTY, Pte)); 575 576 if (!(Pte & PA_PRESENT)) 577 { 578 KeBugCheck(MEMORY_MANAGEMENT); 579 } 580 else 581 { 582 /* The processor will never clear this bit itself, therefore 583 * we do not need to flush the TLB here when setting it */ 584 MmUnmapPageTable(Pt, OldIrql); 585 } 586 } 587 588 VOID 589 NTAPI 590 MmClearPageAccessedBit(PEPROCESS Process, PVOID Address) 591 { 592 PULONG Pt; 593 LONG Pte; 594 KIRQL OldIrql; 595 596 if (Address < MmSystemRangeStart && Process == NULL) 597 { 598 DPRINT1("MmClearPageAccessedBit is called for user space without a process.\n"); 599 KeBugCheck(MEMORY_MANAGEMENT); 600 } 601 602 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 603 if (Pt == NULL) 604 { 605 KeBugCheck(MEMORY_MANAGEMENT); 606 } 607 608 do 609 { 610 Pte = *Pt; 611 } while (Pte != InterlockedCompareExchangePte(Pt, Pte & ~PA_ACCESSED, Pte)); 612 613 if (!(Pte & PA_PRESENT)) 614 { 615 KeBugCheck(MEMORY_MANAGEMENT); 616 } 617 618 MiFlushTlb(Pt, Address, OldIrql); 619 } 620 621 BOOLEAN 622 NTAPI 623 MmIsPageAccessed(PEPROCESS Process, PVOID Address) 624 { 625 return BooleanFlagOn(MmGetPageEntryForProcess(Process, Address), PA_ACCESSED); 626 } 627 628 BOOLEAN 629 NTAPI 630 MmIsPagePresent(PEPROCESS Process, PVOID Address) 631 { 632 return MmGetPageEntryForProcess(Process, Address) & PA_PRESENT; 633 } 634 635 BOOLEAN 636 NTAPI 637 MmIsDisabledPage(PEPROCESS Process, PVOID Address) 638 { 639 ULONG_PTR Entry = MmGetPageEntryForProcess(Process, Address); 640 return !(Entry & PA_PRESENT) && !(Entry & 0x800) && (Entry >> PAGE_SHIFT); 641 } 642 643 BOOLEAN 644 NTAPI 645 MmIsPageSwapEntry(PEPROCESS Process, PVOID Address) 646 { 647 ULONG Entry; 648 Entry = MmGetPageEntryForProcess(Process, Address); 649 return !(Entry & PA_PRESENT) && (Entry & 0x800); 650 } 651 652 NTSTATUS 653 NTAPI 654 MmCreatePageFileMapping(PEPROCESS Process, 655 PVOID Address, 656 SWAPENTRY SwapEntry) 657 { 658 PULONG Pt; 659 ULONG Pte; 660 KIRQL OldIrql; 661 662 if (Process == NULL && Address < MmSystemRangeStart) 663 { 664 DPRINT1("No process\n"); 665 KeBugCheck(MEMORY_MANAGEMENT); 666 } 667 if (Process != NULL && Address >= MmSystemRangeStart) 668 { 669 DPRINT1("Setting kernel address with process context\n"); 670 KeBugCheck(MEMORY_MANAGEMENT); 671 } 672 673 if (SwapEntry & (1 << 31)) 674 { 675 KeBugCheck(MEMORY_MANAGEMENT); 676 } 677 678 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 679 if (Pt == NULL) 680 { 681 /* Nobody should page out an address that hasn't even been mapped */ 682 /* But we might place a wait entry first, requiring the page table */ 683 if (SwapEntry != MM_WAIT_ENTRY) 684 { 685 KeBugCheck(MEMORY_MANAGEMENT); 686 } 687 Pt = MmGetPageTableForProcess(Process, Address, TRUE, &OldIrql); 688 } 689 Pte = InterlockedExchangePte(Pt, SwapEntry << 1); 690 if (Pte != 0) 691 { 692 KeBugCheckEx(MEMORY_MANAGEMENT, SwapEntry, (ULONG_PTR)Process, (ULONG_PTR)Address, 0); 693 } 694 695 if (Address < MmSystemRangeStart) 696 { 697 /* Add PDE reference */ 698 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)]++; 699 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Address)] <= PTE_PER_PAGE); 700 } 701 702 /* We don't need to flush the TLB here because it 703 * only caches valid translations and a zero PTE 704 * is not a valid translation */ 705 MmUnmapPageTable(Pt, OldIrql); 706 707 return(STATUS_SUCCESS); 708 } 709 710 711 NTSTATUS 712 NTAPI 713 MmCreateVirtualMappingUnsafe(PEPROCESS Process, 714 PVOID Address, 715 ULONG flProtect, 716 PPFN_NUMBER Pages, 717 ULONG PageCount) 718 { 719 ULONG Attributes; 720 PVOID Addr; 721 ULONG i; 722 ULONG oldPdeOffset, PdeOffset; 723 PULONG Pt = NULL; 724 ULONG Pte; 725 KIRQL OldIrql; 726 727 DPRINT("MmCreateVirtualMappingUnsafe(%p, %p, %lu, %p (%x), %lu)\n", 728 Process, Address, flProtect, Pages, *Pages, PageCount); 729 730 ASSERT(((ULONG_PTR)Address % PAGE_SIZE) == 0); 731 732 if (Process == NULL) 733 { 734 if (Address < MmSystemRangeStart) 735 { 736 DPRINT1("NULL process given for user-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 737 KeBugCheck(MEMORY_MANAGEMENT); 738 } 739 if (PageCount > 0x10000 || 740 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 0x100000) 741 { 742 DPRINT1("Page count too large for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Address, PageCount, *Pages); 743 KeBugCheck(MEMORY_MANAGEMENT); 744 } 745 } 746 else 747 { 748 if (Address >= MmSystemRangeStart) 749 { 750 DPRINT1("Process %p given for kernel-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 751 KeBugCheck(MEMORY_MANAGEMENT); 752 } 753 if (PageCount > (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE || 754 (ULONG_PTR) Address / PAGE_SIZE + PageCount > 755 (ULONG_PTR)MmSystemRangeStart / PAGE_SIZE) 756 { 757 DPRINT1("Page count too large for process %p user-mode mapping at %p -- %lu pages starting at %Ix\n", Process, Address, PageCount, *Pages); 758 KeBugCheck(MEMORY_MANAGEMENT); 759 } 760 } 761 762 Attributes = ProtectToPTE(flProtect); 763 Attributes &= 0xfff; 764 if (Address >= MmSystemRangeStart) 765 { 766 Attributes &= ~PA_USER; 767 } 768 else 769 { 770 Attributes |= PA_USER; 771 } 772 773 Addr = Address; 774 /* MmGetPageTableForProcess should be called on the first run, so 775 * let this trigger it */ 776 oldPdeOffset = ADDR_TO_PDE_OFFSET(Addr) + 1; 777 for (i = 0; i < PageCount; i++, Addr = (PVOID)((ULONG_PTR)Addr + PAGE_SIZE)) 778 { 779 if (!(Attributes & PA_PRESENT) && Pages[i] != 0) 780 { 781 DPRINT1("Setting physical address but not allowing access at address " 782 "0x%p with attributes %x/%x.\n", 783 Addr, Attributes, flProtect); 784 KeBugCheck(MEMORY_MANAGEMENT); 785 } 786 PdeOffset = ADDR_TO_PDE_OFFSET(Addr); 787 if (oldPdeOffset != PdeOffset) 788 { 789 if(Pt) MmUnmapPageTable(Pt, OldIrql); 790 Pt = MmGetPageTableForProcess(Process, Addr, TRUE, &OldIrql); 791 if (Pt == NULL) 792 { 793 KeBugCheck(MEMORY_MANAGEMENT); 794 } 795 } 796 else 797 { 798 Pt++; 799 } 800 oldPdeOffset = PdeOffset; 801 802 Pte = InterlockedExchangePte(Pt, PFN_TO_PTE(Pages[i]) | Attributes); 803 804 /* There should not be anything valid here */ 805 if (Pte != 0) 806 { 807 DPRINT1("Bad PTE %lx at %p for %p + %lu\n", Pte, Pt, Address, i); 808 KeBugCheck(MEMORY_MANAGEMENT); 809 } 810 811 /* We don't need to flush the TLB here because it only caches valid translations 812 * and we're moving this PTE from invalid to valid so it can't be cached right now */ 813 814 if (Addr < MmSystemRangeStart) 815 { 816 /* Add PDE reference */ 817 Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)]++; 818 ASSERT(Process->Vm.VmWorkingSetList->UsedPageTableEntries[MiGetPdeOffset(Addr)] <= PTE_PER_PAGE); 819 } 820 } 821 822 ASSERT(Addr > Address); 823 MmUnmapPageTable(Pt, OldIrql); 824 825 return(STATUS_SUCCESS); 826 } 827 828 NTSTATUS 829 NTAPI 830 MmCreateVirtualMapping(PEPROCESS Process, 831 PVOID Address, 832 ULONG flProtect, 833 PPFN_NUMBER Pages, 834 ULONG PageCount) 835 { 836 ULONG i; 837 838 ASSERT((ULONG_PTR)Address % PAGE_SIZE == 0); 839 for (i = 0; i < PageCount; i++) 840 { 841 if (!MmIsPageInUse(Pages[i])) 842 { 843 DPRINT1("Page at address %x not in use\n", PFN_TO_PTE(Pages[i])); 844 KeBugCheck(MEMORY_MANAGEMENT); 845 } 846 } 847 848 return(MmCreateVirtualMappingUnsafe(Process, 849 Address, 850 flProtect, 851 Pages, 852 PageCount)); 853 } 854 855 ULONG 856 NTAPI 857 MmGetPageProtect(PEPROCESS Process, PVOID Address) 858 { 859 ULONG Entry; 860 ULONG Protect; 861 862 Entry = MmGetPageEntryForProcess(Process, Address); 863 864 865 if (!(Entry & PA_PRESENT)) 866 { 867 Protect = PAGE_NOACCESS; 868 } 869 else 870 { 871 if (Entry & PA_READWRITE) 872 { 873 Protect = PAGE_READWRITE; 874 } 875 else 876 { 877 Protect = PAGE_EXECUTE_READ; 878 } 879 if (Entry & PA_CD) 880 { 881 Protect |= PAGE_NOCACHE; 882 } 883 if (Entry & PA_WT) 884 { 885 Protect |= PAGE_WRITETHROUGH; 886 } 887 if (!(Entry & PA_USER)) 888 { 889 Protect |= PAGE_SYSTEM; 890 } 891 892 } 893 return(Protect); 894 } 895 896 VOID 897 NTAPI 898 MmSetPageProtect(PEPROCESS Process, PVOID Address, ULONG flProtect) 899 { 900 ULONG Attributes = 0; 901 PULONG Pt; 902 ULONG Pte; 903 KIRQL OldIrql; 904 905 DPRINT("MmSetPageProtect(Process %p Address %p flProtect %x)\n", 906 Process, Address, flProtect); 907 908 Attributes = ProtectToPTE(flProtect); 909 910 Attributes &= 0xfff; 911 if (Address >= MmSystemRangeStart) 912 { 913 Attributes &= ~PA_USER; 914 } 915 else 916 { 917 Attributes |= PA_USER; 918 } 919 920 Pt = MmGetPageTableForProcess(Process, Address, FALSE, &OldIrql); 921 if (Pt == NULL) 922 { 923 KeBugCheck(MEMORY_MANAGEMENT); 924 } 925 Pte = InterlockedExchangePte(Pt, PAGE_MASK(*Pt) | Attributes | (*Pt & (PA_ACCESSED|PA_DIRTY))); 926 927 // We should be able to bring a page back from PAGE_NOACCESS 928 if ((Pte & 0x800) || !(Pte >> PAGE_SHIFT)) 929 { 930 DPRINT1("Invalid Pte %lx\n", Pte); 931 KeBugCheck(MEMORY_MANAGEMENT); 932 } 933 934 if((Pte & Attributes) != Attributes) 935 MiFlushTlb(Pt, Address, OldIrql); 936 else 937 MmUnmapPageTable(Pt, OldIrql); 938 } 939 940 CODE_SEG("INIT") 941 VOID 942 NTAPI 943 MmInitGlobalKernelPageDirectory(VOID) 944 { 945 /* Nothing to do here */ 946 } 947 948 /* EOF */ 949