1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c 5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 /* GLOBALS ********************************************************************/ 19 20 BOOLEAN MmTrackPtes; 21 BOOLEAN MmTrackLockedPages; 22 SIZE_T MmSystemLockPagesCount; 23 24 ULONG MiCacheOverride[MiNotMapped + 1]; 25 26 /* INTERNAL FUNCTIONS *********************************************************/ 27 static 28 PVOID 29 NTAPI 30 MiMapLockedPagesInUserSpace( 31 _In_ PMDL Mdl, 32 _In_ PVOID StartVa, 33 _In_ MEMORY_CACHING_TYPE CacheType, 34 _In_opt_ PVOID BaseAddress) 35 { 36 NTSTATUS Status; 37 PEPROCESS Process = PsGetCurrentProcess(); 38 PETHREAD Thread = PsGetCurrentThread(); 39 TABLE_SEARCH_RESULT Result; 40 MI_PFN_CACHE_ATTRIBUTE CacheAttribute; 41 MI_PFN_CACHE_ATTRIBUTE EffectiveCacheAttribute; 42 BOOLEAN IsIoMapping; 43 KIRQL OldIrql; 44 ULONG_PTR StartingVa; 45 ULONG_PTR EndingVa; 46 PMMADDRESS_NODE Parent; 47 PMMVAD_LONG Vad; 48 ULONG NumberOfPages; 49 PMMPTE PointerPte; 50 PMMPDE PointerPde; 51 MMPTE TempPte; 52 PPFN_NUMBER MdlPages; 53 PMMPFN Pfn1; 54 PMMPFN Pfn2; 55 BOOLEAN AddressSpaceLocked = FALSE; 56 57 PAGED_CODE(); 58 59 DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n", 60 Mdl, StartVa, CacheType, BaseAddress); 61 62 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa, 63 MmGetMdlByteCount(Mdl)); 64 MdlPages = MmGetMdlPfnArray(Mdl); 65 66 ASSERT(CacheType <= MmWriteCombined); 67 68 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0; 69 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; 70 71 /* Large pages are always cached, make sure we're not asking for those */ 72 if (CacheAttribute != MiCached) 73 { 74 DPRINT1("FIXME: Need to check for large pages\n"); 75 } 76 77 Status = PsChargeProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG)); 78 if (!NT_SUCCESS(Status)) 79 { 80 Vad = NULL; 81 goto Error; 82 } 83 84 /* Allocate a VAD for our mapped region */ 85 Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV'); 86 if (Vad == NULL) 87 { 88 PsReturnProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG)); 89 Status = STATUS_INSUFFICIENT_RESOURCES; 90 goto Error; 91 } 92 93 /* Initialize PhysicalMemory VAD */ 94 RtlZeroMemory(Vad, sizeof(*Vad)); 95 Vad->u2.VadFlags2.LongVad = 1; 96 Vad->u.VadFlags.VadType = VadDevicePhysicalMemory; 97 Vad->u.VadFlags.Protection = MM_READWRITE; 98 Vad->u.VadFlags.PrivateMemory = 1; 99 100 /* Did the caller specify an address? */ 101 if (BaseAddress == NULL) 102 { 103 /* We get to pick the address */ 104 MmLockAddressSpace(&Process->Vm); 105 AddressSpaceLocked = TRUE; 106 if (Process->VmDeleted) 107 { 108 Status = STATUS_PROCESS_IS_TERMINATING; 109 goto Error; 110 } 111 112 Result = MiFindEmptyAddressRangeInTree(NumberOfPages << PAGE_SHIFT, 113 MM_VIRTMEM_GRANULARITY, 114 &Process->VadRoot, 115 &Parent, 116 &StartingVa); 117 if (Result == TableFoundNode) 118 { 119 Status = STATUS_NO_MEMORY; 120 goto Error; 121 } 122 EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1; 123 BaseAddress = (PVOID)StartingVa; 124 } 125 else 126 { 127 /* Caller specified a base address */ 128 StartingVa = (ULONG_PTR)BaseAddress; 129 EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1; 130 131 /* Make sure it's valid */ 132 if (BYTE_OFFSET(StartingVa) != 0 || 133 EndingVa <= StartingVa || 134 EndingVa > (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS) 135 { 136 Status = STATUS_INVALID_ADDRESS; 137 goto Error; 138 } 139 140 MmLockAddressSpace(&Process->Vm); 141 AddressSpaceLocked = TRUE; 142 if (Process->VmDeleted) 143 { 144 Status = STATUS_PROCESS_IS_TERMINATING; 145 goto Error; 146 } 147 148 /* Check if it's already in use */ 149 Result = MiCheckForConflictingNode(StartingVa >> PAGE_SHIFT, 150 EndingVa >> PAGE_SHIFT, 151 &Process->VadRoot, 152 &Parent); 153 if (Result == TableFoundNode) 154 { 155 Status = STATUS_CONFLICTING_ADDRESSES; 156 goto Error; 157 } 158 } 159 160 Vad->StartingVpn = StartingVa >> PAGE_SHIFT; 161 Vad->EndingVpn = EndingVa >> PAGE_SHIFT; 162 163 MiLockProcessWorkingSetUnsafe(Process, Thread); 164 165 ASSERT(Vad->EndingVpn >= Vad->StartingVpn); 166 MiInsertVad((PMMVAD)Vad, &Process->VadRoot); 167 168 /* Check if this is uncached */ 169 if (CacheAttribute != MiCached) 170 { 171 /* Flush all caches */ 172 KeFlushEntireTb(TRUE, TRUE); 173 KeInvalidateAllCaches(); 174 } 175 176 PointerPte = MiAddressToPte(BaseAddress); 177 while (NumberOfPages != 0 && 178 *MdlPages != LIST_HEAD) 179 { 180 PointerPde = MiPteToPde(PointerPte); 181 MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL); 182 ASSERT(PointerPte->u.Hard.Valid == 0); 183 184 /* Add a PDE reference for each page */ 185 MiIncrementPageTableReferences(BaseAddress); 186 187 /* Set up our basic user PTE */ 188 MI_MAKE_HARDWARE_PTE_USER(&TempPte, 189 PointerPte, 190 MM_READWRITE, 191 *MdlPages); 192 193 EffectiveCacheAttribute = CacheAttribute; 194 195 /* We need to respect the PFN's caching information in some cases */ 196 Pfn2 = MiGetPfnEntry(*MdlPages); 197 if (Pfn2 != NULL) 198 { 199 ASSERT(Pfn2->u3.e2.ReferenceCount != 0); 200 201 switch (Pfn2->u3.e1.CacheAttribute) 202 { 203 case MiNonCached: 204 if (CacheAttribute != MiNonCached) 205 { 206 MiCacheOverride[1]++; 207 EffectiveCacheAttribute = MiNonCached; 208 } 209 break; 210 211 case MiCached: 212 if (CacheAttribute != MiCached) 213 { 214 MiCacheOverride[0]++; 215 EffectiveCacheAttribute = MiCached; 216 } 217 break; 218 219 case MiWriteCombined: 220 if (CacheAttribute != MiWriteCombined) 221 { 222 MiCacheOverride[2]++; 223 EffectiveCacheAttribute = MiWriteCombined; 224 } 225 break; 226 227 default: 228 /* We don't support AWE magic (MiNotMapped) */ 229 DPRINT1("FIXME: MiNotMapped is not supported\n"); 230 ASSERT(FALSE); 231 break; 232 } 233 } 234 235 /* Configure caching */ 236 switch (EffectiveCacheAttribute) 237 { 238 case MiNonCached: 239 MI_PAGE_DISABLE_CACHE(&TempPte); 240 MI_PAGE_WRITE_THROUGH(&TempPte); 241 break; 242 case MiCached: 243 break; 244 case MiWriteCombined: 245 MI_PAGE_DISABLE_CACHE(&TempPte); 246 MI_PAGE_WRITE_COMBINED(&TempPte); 247 break; 248 default: 249 ASSERT(FALSE); 250 break; 251 } 252 253 /* Make the page valid */ 254 MI_WRITE_VALID_PTE(PointerPte, TempPte); 255 256 /* Acquire a share count */ 257 Pfn1 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber); 258 DPRINT("Incrementing %p from %p\n", Pfn1, _ReturnAddress()); 259 OldIrql = MiAcquirePfnLock(); 260 Pfn1->u2.ShareCount++; 261 MiReleasePfnLock(OldIrql); 262 263 /* Next page */ 264 MdlPages++; 265 PointerPte++; 266 NumberOfPages--; 267 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE); 268 } 269 270 MiUnlockProcessWorkingSetUnsafe(Process, Thread); 271 ASSERT(AddressSpaceLocked); 272 MmUnlockAddressSpace(&Process->Vm); 273 274 ASSERT(StartingVa != 0); 275 return (PVOID)((ULONG_PTR)StartingVa + MmGetMdlByteOffset(Mdl)); 276 277 Error: 278 if (AddressSpaceLocked) 279 { 280 MmUnlockAddressSpace(&Process->Vm); 281 } 282 if (Vad != NULL) 283 { 284 ExFreePoolWithTag(Vad, 'ldaV'); 285 PsReturnProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG)); 286 } 287 ExRaiseStatus(Status); 288 } 289 290 static 291 VOID 292 NTAPI 293 MiUnmapLockedPagesInUserSpace( 294 _In_ PVOID BaseAddress, 295 _In_ PMDL Mdl) 296 { 297 PEPROCESS Process = PsGetCurrentProcess(); 298 PETHREAD Thread = PsGetCurrentThread(); 299 PMMVAD Vad; 300 PMMPTE PointerPte; 301 PMMPDE PointerPde; 302 KIRQL OldIrql; 303 ULONG NumberOfPages; 304 PPFN_NUMBER MdlPages; 305 PFN_NUMBER PageTablePage; 306 307 DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress, Mdl); 308 309 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl), 310 MmGetMdlByteCount(Mdl)); 311 ASSERT(NumberOfPages != 0); 312 MdlPages = MmGetMdlPfnArray(Mdl); 313 314 /* Find the VAD */ 315 MmLockAddressSpace(&Process->Vm); 316 Vad = MiLocateAddress(BaseAddress); 317 if (!Vad || 318 Vad->u.VadFlags.VadType != VadDevicePhysicalMemory) 319 { 320 DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress); 321 MmUnlockAddressSpace(&Process->Vm); 322 return; 323 } 324 325 MiLockProcessWorkingSetUnsafe(Process, Thread); 326 327 /* Remove it from the process VAD tree */ 328 ASSERT(Process->VadRoot.NumberGenericTableElements >= 1); 329 MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot); 330 PsReturnProcessNonPagedPoolQuota(Process, sizeof(MMVAD_LONG)); 331 332 /* MiRemoveNode should have removed us if we were the hint */ 333 ASSERT(Process->VadRoot.NodeHint != Vad); 334 335 PointerPte = MiAddressToPte(BaseAddress); 336 OldIrql = MiAcquirePfnLock(); 337 while (NumberOfPages != 0 && 338 *MdlPages != LIST_HEAD) 339 { 340 ASSERT(MiAddressToPte(PointerPte)->u.Hard.Valid == 1); 341 ASSERT(PointerPte->u.Hard.Valid == 1); 342 343 /* Invalidate it */ 344 MI_ERASE_PTE(PointerPte); 345 346 /* We invalidated this PTE, so dereference the PDE */ 347 PointerPde = MiAddressToPde(BaseAddress); 348 PageTablePage = PointerPde->u.Hard.PageFrameNumber; 349 MiDecrementShareCount(MiGetPfnEntry(PageTablePage), PageTablePage); 350 351 if (MiDecrementPageTableReferences(BaseAddress) == 0) 352 { 353 ASSERT(MiIsPteOnPdeBoundary(PointerPte + 1) || (NumberOfPages == 1)); 354 MiDeletePde(PointerPde, Process); 355 } 356 357 /* Next page */ 358 PointerPte++; 359 NumberOfPages--; 360 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE); 361 MdlPages++; 362 } 363 364 KeFlushProcessTb(); 365 MiReleasePfnLock(OldIrql); 366 MiUnlockProcessWorkingSetUnsafe(Process, Thread); 367 MmUnlockAddressSpace(&Process->Vm); 368 ExFreePoolWithTag(Vad, 'ldaV'); 369 } 370 371 /* PUBLIC FUNCTIONS ***********************************************************/ 372 373 /* 374 * @implemented 375 */ 376 PMDL 377 NTAPI 378 MmCreateMdl(IN PMDL Mdl, 379 IN PVOID Base, 380 IN SIZE_T Length) 381 { 382 SIZE_T Size; 383 384 // 385 // Check if we don't have an MDL built 386 // 387 if (!Mdl) 388 { 389 // 390 // Calculate the size we'll need and allocate the MDL 391 // 392 Size = MmSizeOfMdl(Base, Length); 393 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL); 394 if (!Mdl) return NULL; 395 } 396 397 // 398 // Initialize it 399 // 400 MmInitializeMdl(Mdl, Base, Length); 401 return Mdl; 402 } 403 404 /* 405 * @implemented 406 */ 407 SIZE_T 408 NTAPI 409 MmSizeOfMdl(IN PVOID Base, 410 IN SIZE_T Length) 411 { 412 // 413 // Return the MDL size 414 // 415 return sizeof(MDL) + 416 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER)); 417 } 418 419 /* 420 * @implemented 421 */ 422 VOID 423 NTAPI 424 MmBuildMdlForNonPagedPool(IN PMDL Mdl) 425 { 426 PPFN_NUMBER MdlPages, EndPage; 427 PFN_NUMBER Pfn, PageCount; 428 PVOID Base; 429 PMMPTE PointerPte; 430 431 // 432 // Sanity checks 433 // 434 ASSERT(Mdl->ByteCount != 0); 435 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | 436 MDL_MAPPED_TO_SYSTEM_VA | 437 MDL_SOURCE_IS_NONPAGED_POOL | 438 MDL_PARTIAL)) == 0); 439 440 // 441 // We know the MDL isn't associated to a process now 442 // 443 Mdl->Process = NULL; 444 445 // 446 // Get page and VA information 447 // 448 MdlPages = (PPFN_NUMBER)(Mdl + 1); 449 Base = Mdl->StartVa; 450 451 // 452 // Set the system address and now get the page count 453 // 454 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); 455 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa, 456 Mdl->ByteCount); 457 ASSERT(PageCount != 0); 458 EndPage = MdlPages + PageCount; 459 460 // 461 // Loop the PTEs 462 // 463 PointerPte = MiAddressToPte(Base); 464 do 465 { 466 // 467 // Write the PFN 468 // 469 Pfn = PFN_FROM_PTE(PointerPte++); 470 *MdlPages++ = Pfn; 471 } while (MdlPages < EndPage); 472 473 // 474 // Set the nonpaged pool flag 475 // 476 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL; 477 478 // 479 // Check if this is an I/O mapping 480 // 481 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE; 482 } 483 484 /* 485 * @implemented 486 */ 487 PMDL 488 NTAPI 489 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress, 490 IN PHYSICAL_ADDRESS HighAddress, 491 IN PHYSICAL_ADDRESS SkipBytes, 492 IN SIZE_T TotalBytes) 493 { 494 // 495 // Call the internal routine 496 // 497 return MiAllocatePagesForMdl(LowAddress, 498 HighAddress, 499 SkipBytes, 500 TotalBytes, 501 MiNotMapped, 502 0); 503 } 504 505 /* 506 * @implemented 507 */ 508 PMDL 509 NTAPI 510 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress, 511 IN PHYSICAL_ADDRESS HighAddress, 512 IN PHYSICAL_ADDRESS SkipBytes, 513 IN SIZE_T TotalBytes, 514 IN MEMORY_CACHING_TYPE CacheType, 515 IN ULONG Flags) 516 { 517 MI_PFN_CACHE_ATTRIBUTE CacheAttribute; 518 519 // 520 // Check for invalid cache type 521 // 522 if (CacheType > MmWriteCombined) 523 { 524 // 525 // Normalize to default 526 // 527 CacheAttribute = MiNotMapped; 528 } 529 else 530 { 531 // 532 // Convert to internal caching attribute 533 // 534 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType]; 535 } 536 537 // 538 // Only these flags are allowed 539 // 540 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY)) 541 { 542 // 543 // Silently fail 544 // 545 return NULL; 546 } 547 548 // 549 // Call the internal routine 550 // 551 return MiAllocatePagesForMdl(LowAddress, 552 HighAddress, 553 SkipBytes, 554 TotalBytes, 555 CacheAttribute, 556 Flags); 557 } 558 559 /* 560 * @implemented 561 */ 562 VOID 563 NTAPI 564 MmFreePagesFromMdl(IN PMDL Mdl) 565 { 566 PVOID Base; 567 PPFN_NUMBER Pages; 568 LONG NumberOfPages; 569 PMMPFN Pfn1; 570 KIRQL OldIrql; 571 DPRINT("Freeing MDL: %p\n", Mdl); 572 573 // 574 // Sanity checks 575 // 576 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); 577 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0); 578 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); 579 580 // 581 // Get address and page information 582 // 583 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 584 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 585 586 // 587 // Acquire PFN lock 588 // 589 OldIrql = MiAcquirePfnLock(); 590 591 // 592 // Loop all the MDL pages 593 // 594 Pages = (PPFN_NUMBER)(Mdl + 1); 595 do 596 { 597 // 598 // Reached the last page 599 // 600 if (*Pages == LIST_HEAD) break; 601 602 // 603 // Get the page entry 604 // 605 Pfn1 = MiGetPfnEntry(*Pages); 606 ASSERT(Pfn1); 607 ASSERT(Pfn1->u2.ShareCount == 1); 608 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE); 609 if (Pfn1->u4.PteFrame != 0x1FFEDCB) 610 { 611 /* Corrupted PFN entry or invalid free */ 612 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages); 613 } 614 615 // 616 // Clear it 617 // 618 Pfn1->u3.e1.StartOfAllocation = 0; 619 Pfn1->u3.e1.EndOfAllocation = 0; 620 Pfn1->u3.e1.PageLocation = StandbyPageList; 621 Pfn1->u2.ShareCount = 0; 622 623 // 624 // Dereference it 625 // 626 ASSERT(Pfn1->u3.e2.ReferenceCount != 0); 627 if (Pfn1->u3.e2.ReferenceCount != 1) 628 { 629 /* Just take off one reference */ 630 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount); 631 } 632 else 633 { 634 /* We'll be nuking the whole page */ 635 MiDecrementReferenceCount(Pfn1, *Pages); 636 } 637 638 // 639 // Clear this page and move on 640 // 641 *Pages++ = LIST_HEAD; 642 } while (--NumberOfPages != 0); 643 644 // 645 // Release the lock 646 // 647 MiReleasePfnLock(OldIrql); 648 649 // 650 // Remove the pages locked flag 651 // 652 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 653 } 654 655 /* 656 * @implemented 657 */ 658 PVOID 659 NTAPI 660 MmMapLockedPagesSpecifyCache(IN PMDL Mdl, 661 IN KPROCESSOR_MODE AccessMode, 662 IN MEMORY_CACHING_TYPE CacheType, 663 IN PVOID BaseAddress, 664 IN ULONG BugCheckOnFailure, 665 IN ULONG Priority) // MM_PAGE_PRIORITY 666 { 667 PVOID Base; 668 PPFN_NUMBER MdlPages, LastPage; 669 PFN_COUNT PageCount; 670 BOOLEAN IsIoMapping; 671 MI_PFN_CACHE_ATTRIBUTE CacheAttribute; 672 PMMPTE PointerPte; 673 MMPTE TempPte; 674 675 // 676 // Sanity check 677 // 678 ASSERT(Mdl->ByteCount != 0); 679 680 // 681 // Get the base 682 // 683 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 684 685 // 686 // Handle kernel case first 687 // 688 if (AccessMode == KernelMode) 689 { 690 // 691 // Get the list of pages and count 692 // 693 MdlPages = (PPFN_NUMBER)(Mdl + 1); 694 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 695 LastPage = MdlPages + PageCount; 696 697 // 698 // Sanity checks 699 // 700 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | 701 MDL_SOURCE_IS_NONPAGED_POOL | 702 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); 703 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0); 704 705 // 706 // Get the correct cache type 707 // 708 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0; 709 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; 710 711 // 712 // Reserve the PTEs 713 // 714 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace); 715 if (!PointerPte) 716 { 717 // 718 // If it can fail, return NULL 719 // 720 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL; 721 722 // 723 // Should we bugcheck? 724 // 725 if (!BugCheckOnFailure) return NULL; 726 727 // 728 // Yes, crash the system 729 // 730 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0); 731 } 732 733 // 734 // Get the mapped address 735 // 736 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset); 737 738 // 739 // Get the template 740 // 741 TempPte = ValidKernelPte; 742 switch (CacheAttribute) 743 { 744 case MiNonCached: 745 746 // 747 // Disable caching 748 // 749 MI_PAGE_DISABLE_CACHE(&TempPte); 750 MI_PAGE_WRITE_THROUGH(&TempPte); 751 break; 752 753 case MiWriteCombined: 754 755 // 756 // Enable write combining 757 // 758 MI_PAGE_DISABLE_CACHE(&TempPte); 759 MI_PAGE_WRITE_COMBINED(&TempPte); 760 break; 761 762 default: 763 // 764 // Nothing to do 765 // 766 break; 767 } 768 769 // 770 // Loop all PTEs 771 // 772 do 773 { 774 // 775 // We're done here 776 // 777 if (*MdlPages == LIST_HEAD) break; 778 779 // 780 // Write the PTE 781 // 782 TempPte.u.Hard.PageFrameNumber = *MdlPages; 783 MI_WRITE_VALID_PTE(PointerPte++, TempPte); 784 } while (++MdlPages < LastPage); 785 786 // 787 // Mark it as mapped 788 // 789 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); 790 Mdl->MappedSystemVa = Base; 791 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; 792 793 // 794 // Check if it was partial 795 // 796 if (Mdl->MdlFlags & MDL_PARTIAL) 797 { 798 // 799 // Write the appropriate flag here too 800 // 801 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; 802 } 803 804 // 805 // Return the mapped address 806 // 807 return Base; 808 } 809 810 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress); 811 } 812 813 /* 814 * @implemented 815 */ 816 PVOID 817 NTAPI 818 MmMapLockedPages(IN PMDL Mdl, 819 IN KPROCESSOR_MODE AccessMode) 820 { 821 // 822 // Call the extended version 823 // 824 return MmMapLockedPagesSpecifyCache(Mdl, 825 AccessMode, 826 MmCached, 827 NULL, 828 TRUE, 829 HighPagePriority); 830 } 831 832 /* 833 * @implemented 834 */ 835 VOID 836 NTAPI 837 MmUnmapLockedPages(IN PVOID BaseAddress, 838 IN PMDL Mdl) 839 { 840 PVOID Base; 841 PFN_COUNT PageCount, ExtraPageCount; 842 PPFN_NUMBER MdlPages; 843 PMMPTE PointerPte; 844 845 // 846 // Sanity check 847 // 848 ASSERT(Mdl->ByteCount != 0); 849 850 // 851 // Check if this is a kernel request 852 // 853 if (BaseAddress > MM_HIGHEST_USER_ADDRESS) 854 { 855 // 856 // Get base and count information 857 // 858 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 859 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 860 861 // 862 // Sanity checks 863 // 864 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); 865 ASSERT(PageCount != 0); 866 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); 867 868 // 869 // Get the PTE 870 // 871 PointerPte = MiAddressToPte(BaseAddress); 872 873 // 874 // This should be a resident system PTE 875 // 876 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); 877 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); 878 ASSERT(PointerPte->u.Hard.Valid == 1); 879 880 // 881 // Check if the caller wants us to free advanced pages 882 // 883 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) 884 { 885 // 886 // Get the MDL page array 887 // 888 MdlPages = MmGetMdlPfnArray(Mdl); 889 890 /* Number of extra pages stored after the PFN array */ 891 ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount); 892 893 // 894 // Do the math 895 // 896 PageCount += ExtraPageCount; 897 PointerPte -= ExtraPageCount; 898 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); 899 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); 900 901 // 902 // Get the new base address 903 // 904 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - 905 (ExtraPageCount << PAGE_SHIFT)); 906 } 907 908 // 909 // Remove flags 910 // 911 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | 912 MDL_PARTIAL_HAS_BEEN_MAPPED | 913 MDL_FREE_EXTRA_PTES); 914 915 // 916 // Release the system PTEs 917 // 918 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace); 919 } 920 else 921 { 922 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl); 923 } 924 } 925 926 /* 927 * @implemented 928 */ 929 VOID 930 NTAPI 931 MmProbeAndLockPages(IN PMDL Mdl, 932 IN KPROCESSOR_MODE AccessMode, 933 IN LOCK_OPERATION Operation) 934 { 935 PPFN_NUMBER MdlPages; 936 PVOID Base, Address, LastAddress, StartAddress; 937 ULONG LockPages, TotalPages; 938 NTSTATUS Status = STATUS_SUCCESS; 939 PEPROCESS CurrentProcess; 940 NTSTATUS ProbeStatus; 941 PMMPTE PointerPte, LastPte; 942 PMMPDE PointerPde; 943 #if (_MI_PAGING_LEVELS >= 3) 944 PMMPDE PointerPpe; 945 #endif 946 #if (_MI_PAGING_LEVELS == 4) 947 PMMPDE PointerPxe; 948 #endif 949 PFN_NUMBER PageFrameIndex; 950 BOOLEAN UsePfnLock; 951 KIRQL OldIrql; 952 PMMPFN Pfn1; 953 DPRINT("Probing MDL: %p\n", Mdl); 954 955 // 956 // Sanity checks 957 // 958 ASSERT(Mdl->ByteCount != 0); 959 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0); 960 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); 961 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | 962 MDL_MAPPED_TO_SYSTEM_VA | 963 MDL_SOURCE_IS_NONPAGED_POOL | 964 MDL_PARTIAL | 965 MDL_IO_SPACE)) == 0); 966 967 // 968 // Get page and base information 969 // 970 MdlPages = (PPFN_NUMBER)(Mdl + 1); 971 Base = Mdl->StartVa; 972 973 // 974 // Get the addresses and how many pages we span (and need to lock) 975 // 976 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); 977 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount); 978 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount); 979 ASSERT(LockPages != 0); 980 981 /* Block invalid access */ 982 if ((AccessMode != KernelMode) && 983 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress))) 984 { 985 /* Caller should be in SEH, raise the error */ 986 *MdlPages = LIST_HEAD; 987 ExRaiseStatus(STATUS_ACCESS_VIOLATION); 988 } 989 990 // 991 // Get the process 992 // 993 if (Address <= MM_HIGHEST_USER_ADDRESS) 994 { 995 // 996 // Get the process 997 // 998 CurrentProcess = PsGetCurrentProcess(); 999 } 1000 else 1001 { 1002 // 1003 // No process 1004 // 1005 CurrentProcess = NULL; 1006 } 1007 1008 // 1009 // Save the number of pages we'll have to lock, and the start address 1010 // 1011 TotalPages = LockPages; 1012 StartAddress = Address; 1013 1014 /* Large pages not supported */ 1015 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address)); 1016 1017 // 1018 // Now probe them 1019 // 1020 ProbeStatus = STATUS_SUCCESS; 1021 _SEH2_TRY 1022 { 1023 // 1024 // Enter probe loop 1025 // 1026 do 1027 { 1028 // 1029 // Assume failure 1030 // 1031 *MdlPages = LIST_HEAD; 1032 1033 // 1034 // Read 1035 // 1036 *(volatile CHAR*)Address; 1037 1038 // 1039 // Check if this is write access (only probe for user-mode) 1040 // 1041 if ((Operation != IoReadAccess) && 1042 (Address <= MM_HIGHEST_USER_ADDRESS)) 1043 { 1044 // 1045 // Probe for write too 1046 // 1047 ProbeForWriteChar(Address); 1048 } 1049 1050 // 1051 // Next address... 1052 // 1053 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE); 1054 1055 // 1056 // Next page... 1057 // 1058 LockPages--; 1059 MdlPages++; 1060 } while (Address < LastAddress); 1061 1062 // 1063 // Reset back to the original page 1064 // 1065 ASSERT(LockPages == 0); 1066 MdlPages = (PPFN_NUMBER)(Mdl + 1); 1067 } 1068 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 1069 { 1070 // 1071 // Oops :( 1072 // 1073 ProbeStatus = _SEH2_GetExceptionCode(); 1074 } 1075 _SEH2_END; 1076 1077 // 1078 // So how did that go? 1079 // 1080 if (ProbeStatus != STATUS_SUCCESS) 1081 { 1082 // 1083 // Fail 1084 // 1085 DPRINT1("MDL PROBE FAILED!\n"); 1086 Mdl->Process = NULL; 1087 ExRaiseStatus(ProbeStatus); 1088 } 1089 1090 // 1091 // Get the PTE and PDE 1092 // 1093 PointerPte = MiAddressToPte(StartAddress); 1094 PointerPde = MiAddressToPde(StartAddress); 1095 #if (_MI_PAGING_LEVELS >= 3) 1096 PointerPpe = MiAddressToPpe(StartAddress); 1097 #endif 1098 #if (_MI_PAGING_LEVELS == 4) 1099 PointerPxe = MiAddressToPxe(StartAddress); 1100 #endif 1101 1102 // 1103 // Sanity check 1104 // 1105 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1)); 1106 1107 // 1108 // Check what kind of operation this is 1109 // 1110 if (Operation != IoReadAccess) 1111 { 1112 // 1113 // Set the write flag 1114 // 1115 Mdl->MdlFlags |= MDL_WRITE_OPERATION; 1116 } 1117 else 1118 { 1119 // 1120 // Remove the write flag 1121 // 1122 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION); 1123 } 1124 1125 // 1126 // Mark the MDL as locked *now* 1127 // 1128 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 1129 1130 // 1131 // Check if this came from kernel mode 1132 // 1133 if (Base > MM_HIGHEST_USER_ADDRESS) 1134 { 1135 // 1136 // We should not have a process 1137 // 1138 ASSERT(CurrentProcess == NULL); 1139 Mdl->Process = NULL; 1140 1141 // 1142 // In kernel mode, we don't need to check for write access 1143 // 1144 Operation = IoReadAccess; 1145 1146 // 1147 // Use the PFN lock 1148 // 1149 UsePfnLock = TRUE; 1150 OldIrql = MiAcquirePfnLock(); 1151 } 1152 else 1153 { 1154 // 1155 // Sanity checks 1156 // 1157 ASSERT(TotalPages != 0); 1158 ASSERT(CurrentProcess == PsGetCurrentProcess()); 1159 1160 // 1161 // Track locked pages 1162 // 1163 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages, 1164 TotalPages); 1165 1166 // 1167 // Save the process 1168 // 1169 Mdl->Process = CurrentProcess; 1170 1171 /* Lock the process working set */ 1172 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1173 UsePfnLock = FALSE; 1174 OldIrql = MM_NOIRQL; 1175 } 1176 1177 // 1178 // Get the last PTE 1179 // 1180 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1)); 1181 1182 // 1183 // Loop the pages 1184 // 1185 do 1186 { 1187 // 1188 // Assume failure and check for non-mapped pages 1189 // 1190 *MdlPages = LIST_HEAD; 1191 while ( 1192 #if (_MI_PAGING_LEVELS == 4) 1193 (PointerPxe->u.Hard.Valid == 0) || 1194 #endif 1195 #if (_MI_PAGING_LEVELS >= 3) 1196 (PointerPpe->u.Hard.Valid == 0) || 1197 #endif 1198 (PointerPde->u.Hard.Valid == 0) || 1199 (PointerPte->u.Hard.Valid == 0)) 1200 { 1201 // 1202 // What kind of lock were we using? 1203 // 1204 if (UsePfnLock) 1205 { 1206 // 1207 // Release PFN lock 1208 // 1209 MiReleasePfnLock(OldIrql); 1210 } 1211 else 1212 { 1213 /* Release process working set */ 1214 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1215 } 1216 1217 // 1218 // Access the page 1219 // 1220 Address = MiPteToAddress(PointerPte); 1221 1222 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked 1223 Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL); 1224 if (!NT_SUCCESS(Status)) 1225 { 1226 // 1227 // Fail 1228 // 1229 DPRINT1("Access fault failed\n"); 1230 goto Cleanup; 1231 } 1232 1233 // 1234 // What lock should we use? 1235 // 1236 if (UsePfnLock) 1237 { 1238 // 1239 // Grab the PFN lock 1240 // 1241 OldIrql = MiAcquirePfnLock(); 1242 } 1243 else 1244 { 1245 /* Lock the process working set */ 1246 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1247 } 1248 } 1249 1250 // 1251 // Check if this was a write or modify 1252 // 1253 if (Operation != IoReadAccess) 1254 { 1255 // 1256 // Check if the PTE is not writable 1257 // 1258 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE) 1259 { 1260 // 1261 // Check if it's copy on write 1262 // 1263 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte)) 1264 { 1265 // 1266 // Get the base address and allow a change for user-mode 1267 // 1268 Address = MiPteToAddress(PointerPte); 1269 if (Address <= MM_HIGHEST_USER_ADDRESS) 1270 { 1271 // 1272 // What kind of lock were we using? 1273 // 1274 if (UsePfnLock) 1275 { 1276 // 1277 // Release PFN lock 1278 // 1279 MiReleasePfnLock(OldIrql); 1280 } 1281 else 1282 { 1283 /* Release process working set */ 1284 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1285 } 1286 1287 // 1288 // Access the page 1289 // 1290 1291 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked 1292 Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL); 1293 if (!NT_SUCCESS(Status)) 1294 { 1295 // 1296 // Fail 1297 // 1298 DPRINT1("Access fault failed\n"); 1299 goto Cleanup; 1300 } 1301 1302 // 1303 // Re-acquire the lock 1304 // 1305 if (UsePfnLock) 1306 { 1307 // 1308 // Grab the PFN lock 1309 // 1310 OldIrql = MiAcquirePfnLock(); 1311 } 1312 else 1313 { 1314 /* Lock the process working set */ 1315 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1316 } 1317 1318 // 1319 // Start over 1320 // 1321 continue; 1322 } 1323 } 1324 1325 // 1326 // Fail, since we won't allow this 1327 // 1328 Status = STATUS_ACCESS_VIOLATION; 1329 goto CleanupWithLock; 1330 } 1331 } 1332 1333 // 1334 // Grab the PFN 1335 // 1336 PageFrameIndex = PFN_FROM_PTE(PointerPte); 1337 Pfn1 = MiGetPfnEntry(PageFrameIndex); 1338 if (Pfn1) 1339 { 1340 /* Either this is for kernel-mode, or the working set is held */ 1341 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE)); 1342 1343 /* No Physical VADs supported yet */ 1344 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL); 1345 1346 /* This address should already exist and be fully valid */ 1347 MiReferenceProbedPageAndBumpLockCount(Pfn1); 1348 } 1349 else 1350 { 1351 // 1352 // For I/O addresses, just remember this 1353 // 1354 Mdl->MdlFlags |= MDL_IO_SPACE; 1355 } 1356 1357 // 1358 // Write the page and move on 1359 // 1360 *MdlPages++ = PageFrameIndex; 1361 PointerPte++; 1362 1363 /* Check if we're on a PDE boundary */ 1364 if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++; 1365 #if (_MI_PAGING_LEVELS >= 3) 1366 if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++; 1367 #endif 1368 #if (_MI_PAGING_LEVELS == 4) 1369 if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++; 1370 #endif 1371 1372 } while (PointerPte <= LastPte); 1373 1374 // 1375 // What kind of lock were we using? 1376 // 1377 if (UsePfnLock) 1378 { 1379 // 1380 // Release PFN lock 1381 // 1382 MiReleasePfnLock(OldIrql); 1383 } 1384 else 1385 { 1386 /* Release process working set */ 1387 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1388 } 1389 1390 // 1391 // Sanity check 1392 // 1393 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0); 1394 return; 1395 1396 CleanupWithLock: 1397 // 1398 // This is the failure path 1399 // 1400 ASSERT(!NT_SUCCESS(Status)); 1401 1402 // 1403 // What kind of lock were we using? 1404 // 1405 if (UsePfnLock) 1406 { 1407 // 1408 // Release PFN lock 1409 // 1410 MiReleasePfnLock(OldIrql); 1411 } 1412 else 1413 { 1414 /* Release process working set */ 1415 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1416 } 1417 Cleanup: 1418 // 1419 // Pages must be locked so MmUnlock can work 1420 // 1421 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED); 1422 MmUnlockPages(Mdl); 1423 1424 // 1425 // Raise the error 1426 // 1427 ExRaiseStatus(Status); 1428 } 1429 1430 /* 1431 * @implemented 1432 */ 1433 VOID 1434 NTAPI 1435 MmUnlockPages(IN PMDL Mdl) 1436 { 1437 PPFN_NUMBER MdlPages, LastPage; 1438 PEPROCESS Process; 1439 PVOID Base; 1440 ULONG Flags, PageCount; 1441 KIRQL OldIrql; 1442 PMMPFN Pfn1; 1443 DPRINT("Unlocking MDL: %p\n", Mdl); 1444 1445 // 1446 // Sanity checks 1447 // 1448 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0); 1449 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0); 1450 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0); 1451 ASSERT(Mdl->ByteCount != 0); 1452 1453 // 1454 // Get the process associated and capture the flags which are volatile 1455 // 1456 Process = Mdl->Process; 1457 Flags = Mdl->MdlFlags; 1458 1459 // 1460 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL 1461 // 1462 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) 1463 { 1464 // 1465 // Unmap the pages from system space 1466 // 1467 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); 1468 } 1469 1470 // 1471 // Get the page count 1472 // 1473 MdlPages = (PPFN_NUMBER)(Mdl + 1); 1474 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 1475 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 1476 ASSERT(PageCount != 0); 1477 1478 // 1479 // We don't support AWE 1480 // 1481 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE); 1482 1483 // 1484 // Check if the buffer is mapped I/O space 1485 // 1486 if (Flags & MDL_IO_SPACE) 1487 { 1488 // 1489 // Acquire PFN lock 1490 // 1491 OldIrql = MiAcquirePfnLock(); 1492 1493 // 1494 // Loop every page 1495 // 1496 LastPage = MdlPages + PageCount; 1497 do 1498 { 1499 // 1500 // Last page, break out 1501 // 1502 if (*MdlPages == LIST_HEAD) break; 1503 1504 // 1505 // Check if this page is in the PFN database 1506 // 1507 Pfn1 = MiGetPfnEntry(*MdlPages); 1508 if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1); 1509 } while (++MdlPages < LastPage); 1510 1511 // 1512 // Release the lock 1513 // 1514 MiReleasePfnLock(OldIrql); 1515 1516 // 1517 // Check if we have a process 1518 // 1519 if (Process) 1520 { 1521 // 1522 // Handle the accounting of locked pages 1523 // 1524 ASSERT(Process->NumberOfLockedPages > 0); 1525 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, 1526 -(LONG_PTR)PageCount); 1527 } 1528 1529 // 1530 // We're done 1531 // 1532 Mdl->MdlFlags &= ~MDL_IO_SPACE; 1533 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 1534 return; 1535 } 1536 1537 // 1538 // Check if we have a process 1539 // 1540 if (Process) 1541 { 1542 // 1543 // Handle the accounting of locked pages 1544 // 1545 ASSERT(Process->NumberOfLockedPages > 0); 1546 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, 1547 -(LONG_PTR)PageCount); 1548 } 1549 1550 // 1551 // Loop every page 1552 // 1553 LastPage = MdlPages + PageCount; 1554 do 1555 { 1556 // 1557 // Last page reached 1558 // 1559 if (*MdlPages == LIST_HEAD) 1560 { 1561 // 1562 // Were there no pages at all? 1563 // 1564 if (MdlPages == (PPFN_NUMBER)(Mdl + 1)) 1565 { 1566 // 1567 // We're already done 1568 // 1569 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 1570 return; 1571 } 1572 1573 // 1574 // Otherwise, stop here 1575 // 1576 LastPage = MdlPages; 1577 break; 1578 } 1579 1580 /* Save the PFN entry instead for the secondary loop */ 1581 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages); 1582 ASSERT(*MdlPages != 0); 1583 } while (++MdlPages < LastPage); 1584 1585 // 1586 // Reset pointer 1587 // 1588 MdlPages = (PPFN_NUMBER)(Mdl + 1); 1589 1590 // 1591 // Now grab the PFN lock for the actual unlock and dereference 1592 // 1593 OldIrql = MiAcquirePfnLock(); 1594 do 1595 { 1596 /* Get the current entry and reference count */ 1597 Pfn1 = (PMMPFN)*MdlPages; 1598 MiDereferencePfnAndDropLockCount(Pfn1); 1599 } while (++MdlPages < LastPage); 1600 1601 // 1602 // Release the lock 1603 // 1604 MiReleasePfnLock(OldIrql); 1605 1606 // 1607 // We're done 1608 // 1609 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 1610 } 1611 1612 /* 1613 * @unimplemented 1614 */ 1615 NTSTATUS 1616 NTAPI 1617 MmAdvanceMdl(IN PMDL Mdl, 1618 IN ULONG NumberOfBytes) 1619 { 1620 UNIMPLEMENTED; 1621 return STATUS_NOT_IMPLEMENTED; 1622 } 1623 1624 /* 1625 * @implemented 1626 */ 1627 PVOID 1628 NTAPI 1629 MmMapLockedPagesWithReservedMapping( 1630 _In_ PVOID MappingAddress, 1631 _In_ ULONG PoolTag, 1632 _In_ PMDL Mdl, 1633 _In_ MEMORY_CACHING_TYPE CacheType) 1634 { 1635 PPFN_NUMBER MdlPages, LastPage; 1636 PFN_COUNT PageCount; 1637 BOOLEAN IsIoMapping; 1638 MI_PFN_CACHE_ATTRIBUTE CacheAttribute; 1639 PMMPTE PointerPte; 1640 MMPTE TempPte; 1641 1642 ASSERT(Mdl->ByteCount != 0); 1643 1644 // Get the list of pages and count 1645 MdlPages = MmGetMdlPfnArray(Mdl); 1646 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl), 1647 Mdl->ByteCount); 1648 LastPage = MdlPages + PageCount; 1649 1650 // Sanity checks 1651 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | 1652 MDL_SOURCE_IS_NONPAGED_POOL | 1653 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); 1654 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0); 1655 1656 // Get the correct cache type 1657 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0; 1658 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; 1659 1660 // Get the first PTE we reserved 1661 ASSERT(MappingAddress); 1662 PointerPte = MiAddressToPte(MappingAddress) - 2; 1663 ASSERT(!PointerPte[0].u.Hard.Valid && 1664 !PointerPte[1].u.Hard.Valid); 1665 1666 // Verify that the pool tag matches 1667 TempPte.u.Long = PoolTag; 1668 TempPte.u.Hard.Valid = 0; 1669 if (PointerPte[1].u.Long != TempPte.u.Long) 1670 { 1671 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1672 PTE_MAPPING_ADDRESS_NOT_OWNED, /* Trying to map an address it does not own */ 1673 (ULONG_PTR)MappingAddress, 1674 PoolTag, 1675 PointerPte[1].u.Long); 1676 } 1677 1678 // We must have a size, and our helper PTEs must be invalid 1679 if (PointerPte[0].u.List.NextEntry < 3) 1680 { 1681 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1682 PTE_MAPPING_ADDRESS_INVALID, /* Trying to map an invalid address */ 1683 (ULONG_PTR)MappingAddress, 1684 PoolTag, 1685 (ULONG_PTR)_ReturnAddress()); 1686 } 1687 1688 // If the mapping isn't big enough, fail 1689 if (PointerPte[0].u.List.NextEntry - 2 < PageCount) 1690 { 1691 DPRINT1("Reserved mapping too small. Need %Iu pages, have %Iu\n", 1692 PageCount, 1693 PointerPte[0].u.List.NextEntry - 2); 1694 return NULL; 1695 } 1696 // Skip our two helper PTEs 1697 PointerPte += 2; 1698 1699 // Get the template 1700 TempPte = ValidKernelPte; 1701 switch (CacheAttribute) 1702 { 1703 case MiNonCached: 1704 // Disable caching 1705 MI_PAGE_DISABLE_CACHE(&TempPte); 1706 MI_PAGE_WRITE_THROUGH(&TempPte); 1707 break; 1708 1709 case MiWriteCombined: 1710 // Enable write combining 1711 MI_PAGE_DISABLE_CACHE(&TempPte); 1712 MI_PAGE_WRITE_COMBINED(&TempPte); 1713 break; 1714 1715 default: 1716 // Nothing to do 1717 break; 1718 } 1719 1720 // Loop all PTEs 1721 for (; (MdlPages < LastPage) && (*MdlPages != LIST_HEAD); ++MdlPages) 1722 { 1723 // Write the PTE 1724 TempPte.u.Hard.PageFrameNumber = *MdlPages; 1725 MI_WRITE_VALID_PTE(PointerPte++, TempPte); 1726 } 1727 1728 // Mark it as mapped 1729 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); 1730 Mdl->MappedSystemVa = MappingAddress; 1731 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; 1732 1733 // Check if it was partial 1734 if (Mdl->MdlFlags & MDL_PARTIAL) 1735 { 1736 // Write the appropriate flag here too 1737 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; 1738 } 1739 1740 // Return the mapped address 1741 return (PVOID)((ULONG_PTR)MappingAddress + Mdl->ByteOffset); 1742 } 1743 1744 /* 1745 * @implemented 1746 */ 1747 VOID 1748 NTAPI 1749 MmUnmapReservedMapping( 1750 _In_ PVOID BaseAddress, 1751 _In_ ULONG PoolTag, 1752 _In_ PMDL Mdl) 1753 { 1754 PVOID Base; 1755 PFN_COUNT PageCount, ExtraPageCount; 1756 PPFN_NUMBER MdlPages; 1757 PMMPTE PointerPte; 1758 MMPTE TempPte; 1759 1760 // Sanity check 1761 ASSERT(Mdl->ByteCount != 0); 1762 ASSERT(BaseAddress > MM_HIGHEST_USER_ADDRESS); 1763 1764 // Get base and count information 1765 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 1766 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 1767 1768 // Sanity checks 1769 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); 1770 ASSERT(PageCount != 0); 1771 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); 1772 1773 // Get the first PTE we reserved 1774 PointerPte = MiAddressToPte(BaseAddress) - 2; 1775 ASSERT(!PointerPte[0].u.Hard.Valid && 1776 !PointerPte[1].u.Hard.Valid); 1777 1778 // Verify that the pool tag matches 1779 TempPte.u.Long = PoolTag; 1780 TempPte.u.Hard.Valid = 0; 1781 if (PointerPte[1].u.Long != TempPte.u.Long) 1782 { 1783 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1784 PTE_UNMAPPING_ADDRESS_NOT_OWNED, /* Trying to unmap an address it does not own */ 1785 (ULONG_PTR)BaseAddress, 1786 PoolTag, 1787 PointerPte[1].u.Long); 1788 } 1789 1790 // We must have a size 1791 if (PointerPte[0].u.List.NextEntry < 3) 1792 { 1793 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1794 PTE_MAPPING_ADDRESS_EMPTY, /* Mapping apparently empty */ 1795 (ULONG_PTR)BaseAddress, 1796 PoolTag, 1797 (ULONG_PTR)_ReturnAddress()); 1798 } 1799 1800 // Skip our two helper PTEs 1801 PointerPte += 2; 1802 1803 // This should be a resident system PTE 1804 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); 1805 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); 1806 ASSERT(PointerPte->u.Hard.Valid == 1); 1807 1808 // TODO: check the MDL range makes sense with regard to the mapping range 1809 // TODO: check if any of them are already zero 1810 // TODO: check if any outside the MDL range are nonzero 1811 // TODO: find out what to do with extra PTEs 1812 1813 // Check if the caller wants us to free advanced pages 1814 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) 1815 { 1816 // Get the MDL page array 1817 MdlPages = MmGetMdlPfnArray(Mdl); 1818 1819 /* Number of extra pages stored after the PFN array */ 1820 ExtraPageCount = MdlPages[PageCount]; 1821 1822 // Do the math 1823 PageCount += ExtraPageCount; 1824 PointerPte -= ExtraPageCount; 1825 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); 1826 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); 1827 1828 // Get the new base address 1829 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - 1830 (ExtraPageCount << PAGE_SHIFT)); 1831 } 1832 1833 // Zero the PTEs 1834 RtlZeroMemory(PointerPte, PageCount * sizeof(MMPTE)); 1835 1836 // Flush the TLB 1837 KeFlushEntireTb(TRUE, TRUE); 1838 1839 // Remove flags 1840 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | 1841 MDL_PARTIAL_HAS_BEEN_MAPPED | 1842 MDL_FREE_EXTRA_PTES); 1843 } 1844 1845 /* 1846 * @unimplemented 1847 */ 1848 NTSTATUS 1849 NTAPI 1850 MmPrefetchPages(IN ULONG NumberOfLists, 1851 IN PREAD_LIST *ReadLists) 1852 { 1853 UNIMPLEMENTED; 1854 return STATUS_NOT_IMPLEMENTED; 1855 } 1856 1857 /* 1858 * @unimplemented 1859 */ 1860 NTSTATUS 1861 NTAPI 1862 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList, 1863 IN ULONG NewProtect) 1864 { 1865 UNIMPLEMENTED; 1866 return STATUS_NOT_IMPLEMENTED; 1867 } 1868 1869 /** 1870 * @brief 1871 * Probes and locks virtual pages in memory for the specified process. 1872 * 1873 * @param[in,out] MemoryDescriptorList 1874 * Memory Descriptor List (MDL) containing the buffer to be probed and locked. 1875 * 1876 * @param[in] Process 1877 * The process for which the buffer should be probed and locked. 1878 * 1879 * @param[in] AccessMode 1880 * Access mode for probing the pages. Can be KernelMode or UserMode. 1881 * 1882 * @param[in] LockOperation 1883 * The type of the probing and locking operation. Can be IoReadAccess, IoWriteAccess or IoModifyAccess. 1884 * 1885 * @return 1886 * Nothing. 1887 * 1888 * @see MmProbeAndLockPages 1889 * 1890 * @remarks Must be called at IRQL <= APC_LEVEL 1891 */ 1892 _IRQL_requires_max_(APC_LEVEL) 1893 VOID 1894 NTAPI 1895 MmProbeAndLockProcessPages( 1896 _Inout_ PMDL MemoryDescriptorList, 1897 _In_ PEPROCESS Process, 1898 _In_ KPROCESSOR_MODE AccessMode, 1899 _In_ LOCK_OPERATION Operation) 1900 { 1901 KAPC_STATE ApcState; 1902 BOOLEAN IsAttached = FALSE; 1903 1904 if (Process != PsGetCurrentProcess()) 1905 { 1906 KeStackAttachProcess(&Process->Pcb, &ApcState); 1907 IsAttached = TRUE; 1908 } 1909 1910 /* Protect in try/finally to ensure we detach even if MmProbeAndLockPages() throws an exception */ 1911 _SEH2_TRY 1912 { 1913 MmProbeAndLockPages(MemoryDescriptorList, AccessMode, Operation); 1914 } 1915 _SEH2_FINALLY 1916 { 1917 if (IsAttached) 1918 KeUnstackDetachProcess(&ApcState); 1919 } 1920 _SEH2_END; 1921 } 1922 1923 /* 1924 * @unimplemented 1925 */ 1926 VOID 1927 NTAPI 1928 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList, 1929 IN LARGE_INTEGER PageList[], 1930 IN KPROCESSOR_MODE AccessMode, 1931 IN LOCK_OPERATION Operation) 1932 { 1933 UNIMPLEMENTED; 1934 } 1935 1936 /* 1937 * @unimplemented 1938 */ 1939 VOID 1940 NTAPI 1941 MmMapMemoryDumpMdl(IN PMDL Mdl) 1942 { 1943 UNIMPLEMENTED; 1944 } 1945 1946 /* EOF */ 1947