1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/mdlsup.c 5 * PURPOSE: ARM Memory Manager Memory Descriptor List (MDL) Management 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 /* GLOBALS ********************************************************************/ 19 20 BOOLEAN MmTrackPtes; 21 BOOLEAN MmTrackLockedPages; 22 SIZE_T MmSystemLockPagesCount; 23 24 ULONG MiCacheOverride[MiNotMapped + 1]; 25 26 /* INTERNAL FUNCTIONS *********************************************************/ 27 static 28 PVOID 29 NTAPI 30 MiMapLockedPagesInUserSpace( 31 _In_ PMDL Mdl, 32 _In_ PVOID StartVa, 33 _In_ MEMORY_CACHING_TYPE CacheType, 34 _In_opt_ PVOID BaseAddress) 35 { 36 NTSTATUS Status; 37 PEPROCESS Process = PsGetCurrentProcess(); 38 PETHREAD Thread = PsGetCurrentThread(); 39 TABLE_SEARCH_RESULT Result; 40 MI_PFN_CACHE_ATTRIBUTE CacheAttribute; 41 MI_PFN_CACHE_ATTRIBUTE EffectiveCacheAttribute; 42 BOOLEAN IsIoMapping; 43 KIRQL OldIrql; 44 ULONG_PTR StartingVa; 45 ULONG_PTR EndingVa; 46 PMMADDRESS_NODE Parent; 47 PMMVAD_LONG Vad; 48 ULONG NumberOfPages; 49 PMMPTE PointerPte; 50 PMMPDE PointerPde; 51 MMPTE TempPte; 52 PPFN_NUMBER MdlPages; 53 PMMPFN Pfn1; 54 PMMPFN Pfn2; 55 BOOLEAN AddressSpaceLocked = FALSE; 56 57 PAGED_CODE(); 58 59 DPRINT("MiMapLockedPagesInUserSpace(%p, %p, 0x%x, %p)\n", 60 Mdl, StartVa, CacheType, BaseAddress); 61 62 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(StartVa, 63 MmGetMdlByteCount(Mdl)); 64 MdlPages = MmGetMdlPfnArray(Mdl); 65 66 ASSERT(CacheType <= MmWriteCombined); 67 68 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0; 69 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; 70 71 /* Large pages are always cached, make sure we're not asking for those */ 72 if (CacheAttribute != MiCached) 73 { 74 DPRINT1("FIXME: Need to check for large pages\n"); 75 } 76 77 /* Allocate a VAD for our mapped region */ 78 Vad = ExAllocatePoolWithTag(NonPagedPool, sizeof(MMVAD_LONG), 'ldaV'); 79 if (Vad == NULL) 80 { 81 Status = STATUS_INSUFFICIENT_RESOURCES; 82 goto Error; 83 } 84 85 /* Initialize PhysicalMemory VAD */ 86 RtlZeroMemory(Vad, sizeof(*Vad)); 87 Vad->u2.VadFlags2.LongVad = 1; 88 Vad->u.VadFlags.VadType = VadDevicePhysicalMemory; 89 Vad->u.VadFlags.Protection = MM_READWRITE; 90 Vad->u.VadFlags.PrivateMemory = 1; 91 92 /* Did the caller specify an address? */ 93 if (BaseAddress == NULL) 94 { 95 /* We get to pick the address */ 96 MmLockAddressSpace(&Process->Vm); 97 AddressSpaceLocked = TRUE; 98 if (Process->VmDeleted) 99 { 100 Status = STATUS_PROCESS_IS_TERMINATING; 101 goto Error; 102 } 103 104 Result = MiFindEmptyAddressRangeInTree(NumberOfPages << PAGE_SHIFT, 105 MM_VIRTMEM_GRANULARITY, 106 &Process->VadRoot, 107 &Parent, 108 &StartingVa); 109 if (Result == TableFoundNode) 110 { 111 Status = STATUS_NO_MEMORY; 112 goto Error; 113 } 114 EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1; 115 BaseAddress = (PVOID)StartingVa; 116 } 117 else 118 { 119 /* Caller specified a base address */ 120 StartingVa = (ULONG_PTR)BaseAddress; 121 EndingVa = StartingVa + NumberOfPages * PAGE_SIZE - 1; 122 123 /* Make sure it's valid */ 124 if (BYTE_OFFSET(StartingVa) != 0 || 125 EndingVa <= StartingVa || 126 EndingVa > (ULONG_PTR)MM_HIGHEST_VAD_ADDRESS) 127 { 128 Status = STATUS_INVALID_ADDRESS; 129 goto Error; 130 } 131 132 MmLockAddressSpace(&Process->Vm); 133 AddressSpaceLocked = TRUE; 134 if (Process->VmDeleted) 135 { 136 Status = STATUS_PROCESS_IS_TERMINATING; 137 goto Error; 138 } 139 140 /* Check if it's already in use */ 141 Result = MiCheckForConflictingNode(StartingVa >> PAGE_SHIFT, 142 EndingVa >> PAGE_SHIFT, 143 &Process->VadRoot, 144 &Parent); 145 if (Result == TableFoundNode) 146 { 147 Status = STATUS_CONFLICTING_ADDRESSES; 148 goto Error; 149 } 150 } 151 152 Vad->StartingVpn = StartingVa >> PAGE_SHIFT; 153 Vad->EndingVpn = EndingVa >> PAGE_SHIFT; 154 155 MiLockProcessWorkingSetUnsafe(Process, Thread); 156 157 ASSERT(Vad->EndingVpn >= Vad->StartingVpn); 158 159 MiInsertVad((PMMVAD)Vad, &Process->VadRoot); 160 161 /* Check if this is uncached */ 162 if (CacheAttribute != MiCached) 163 { 164 /* Flush all caches */ 165 KeFlushEntireTb(TRUE, TRUE); 166 KeInvalidateAllCaches(); 167 } 168 169 PointerPte = MiAddressToPte(BaseAddress); 170 while (NumberOfPages != 0 && 171 *MdlPages != LIST_HEAD) 172 { 173 PointerPde = MiPteToPde(PointerPte); 174 MiMakePdeExistAndMakeValid(PointerPde, Process, MM_NOIRQL); 175 ASSERT(PointerPte->u.Hard.Valid == 0); 176 177 /* Add a PDE reference for each page */ 178 MiIncrementPageTableReferences(BaseAddress); 179 180 /* Set up our basic user PTE */ 181 MI_MAKE_HARDWARE_PTE_USER(&TempPte, 182 PointerPte, 183 MM_READWRITE, 184 *MdlPages); 185 186 EffectiveCacheAttribute = CacheAttribute; 187 188 /* We need to respect the PFN's caching information in some cases */ 189 Pfn2 = MiGetPfnEntry(*MdlPages); 190 if (Pfn2 != NULL) 191 { 192 ASSERT(Pfn2->u3.e2.ReferenceCount != 0); 193 194 switch (Pfn2->u3.e1.CacheAttribute) 195 { 196 case MiNonCached: 197 if (CacheAttribute != MiNonCached) 198 { 199 MiCacheOverride[1]++; 200 EffectiveCacheAttribute = MiNonCached; 201 } 202 break; 203 204 case MiCached: 205 if (CacheAttribute != MiCached) 206 { 207 MiCacheOverride[0]++; 208 EffectiveCacheAttribute = MiCached; 209 } 210 break; 211 212 case MiWriteCombined: 213 if (CacheAttribute != MiWriteCombined) 214 { 215 MiCacheOverride[2]++; 216 EffectiveCacheAttribute = MiWriteCombined; 217 } 218 break; 219 220 default: 221 /* We don't support AWE magic (MiNotMapped) */ 222 DPRINT1("FIXME: MiNotMapped is not supported\n"); 223 ASSERT(FALSE); 224 break; 225 } 226 } 227 228 /* Configure caching */ 229 switch (EffectiveCacheAttribute) 230 { 231 case MiNonCached: 232 MI_PAGE_DISABLE_CACHE(&TempPte); 233 MI_PAGE_WRITE_THROUGH(&TempPte); 234 break; 235 case MiCached: 236 break; 237 case MiWriteCombined: 238 MI_PAGE_DISABLE_CACHE(&TempPte); 239 MI_PAGE_WRITE_COMBINED(&TempPte); 240 break; 241 default: 242 ASSERT(FALSE); 243 break; 244 } 245 246 /* Make the page valid */ 247 MI_WRITE_VALID_PTE(PointerPte, TempPte); 248 249 /* Acquire a share count */ 250 Pfn1 = MI_PFN_ELEMENT(PointerPde->u.Hard.PageFrameNumber); 251 OldIrql = MiAcquirePfnLock(); 252 Pfn1->u2.ShareCount++; 253 MiReleasePfnLock(OldIrql); 254 255 /* Next page */ 256 MdlPages++; 257 PointerPte++; 258 NumberOfPages--; 259 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE); 260 } 261 262 MiUnlockProcessWorkingSetUnsafe(Process, Thread); 263 ASSERT(AddressSpaceLocked); 264 MmUnlockAddressSpace(&Process->Vm); 265 266 ASSERT(StartingVa != 0); 267 return (PVOID)((ULONG_PTR)StartingVa + MmGetMdlByteOffset(Mdl)); 268 269 Error: 270 if (AddressSpaceLocked) 271 { 272 MmUnlockAddressSpace(&Process->Vm); 273 } 274 if (Vad != NULL) 275 { 276 ExFreePoolWithTag(Vad, 'ldaV'); 277 } 278 ExRaiseStatus(Status); 279 } 280 281 static 282 VOID 283 NTAPI 284 MiUnmapLockedPagesInUserSpace( 285 _In_ PVOID BaseAddress, 286 _In_ PMDL Mdl) 287 { 288 PEPROCESS Process = PsGetCurrentProcess(); 289 PETHREAD Thread = PsGetCurrentThread(); 290 PMMVAD Vad; 291 PMMPTE PointerPte; 292 PMMPDE PointerPde; 293 KIRQL OldIrql; 294 ULONG NumberOfPages; 295 PPFN_NUMBER MdlPages; 296 PFN_NUMBER PageTablePage; 297 298 DPRINT("MiUnmapLockedPagesInUserSpace(%p, %p)\n", BaseAddress, Mdl); 299 300 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(MmGetMdlVirtualAddress(Mdl), 301 MmGetMdlByteCount(Mdl)); 302 ASSERT(NumberOfPages != 0); 303 MdlPages = MmGetMdlPfnArray(Mdl); 304 305 /* Find the VAD */ 306 MmLockAddressSpace(&Process->Vm); 307 Vad = MiLocateAddress(BaseAddress); 308 if (!Vad || 309 Vad->u.VadFlags.VadType != VadDevicePhysicalMemory) 310 { 311 DPRINT1("MiUnmapLockedPagesInUserSpace invalid for %p\n", BaseAddress); 312 MmUnlockAddressSpace(&Process->Vm); 313 return; 314 } 315 316 MiLockProcessWorkingSetUnsafe(Process, Thread); 317 318 /* Remove it from the process VAD tree */ 319 ASSERT(Process->VadRoot.NumberGenericTableElements >= 1); 320 MiRemoveNode((PMMADDRESS_NODE)Vad, &Process->VadRoot); 321 322 /* MiRemoveNode should have removed us if we were the hint */ 323 ASSERT(Process->VadRoot.NodeHint != Vad); 324 325 PointerPte = MiAddressToPte(BaseAddress); 326 OldIrql = MiAcquirePfnLock(); 327 while (NumberOfPages != 0 && 328 *MdlPages != LIST_HEAD) 329 { 330 ASSERT(MiAddressToPte(PointerPte)->u.Hard.Valid == 1); 331 ASSERT(PointerPte->u.Hard.Valid == 1); 332 333 /* Dereference the page */ 334 MiDecrementPageTableReferences(BaseAddress); 335 336 /* Invalidate it */ 337 MI_ERASE_PTE(PointerPte); 338 339 /* We invalidated this PTE, so dereference the PDE */ 340 PointerPde = MiAddressToPde(BaseAddress); 341 PageTablePage = PointerPde->u.Hard.PageFrameNumber; 342 MiDecrementShareCount(MiGetPfnEntry(PageTablePage), PageTablePage); 343 344 /* Next page */ 345 PointerPte++; 346 NumberOfPages--; 347 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress + PAGE_SIZE); 348 MdlPages++; 349 350 /* Moving to a new PDE? */ 351 if (PointerPde != MiAddressToPde(BaseAddress)) 352 { 353 /* See if we should delete it */ 354 KeFlushProcessTb(); 355 PointerPde = MiPteToPde(PointerPte - 1); 356 ASSERT(PointerPde->u.Hard.Valid == 1); 357 if (MiQueryPageTableReferences(BaseAddress) == 0) 358 { 359 ASSERT(PointerPde->u.Long != 0); 360 MiDeletePte(PointerPde, 361 MiPteToAddress(PointerPde), 362 Process, 363 NULL); 364 } 365 } 366 } 367 368 KeFlushProcessTb(); 369 MiReleasePfnLock(OldIrql); 370 MiUnlockProcessWorkingSetUnsafe(Process, Thread); 371 MmUnlockAddressSpace(&Process->Vm); 372 ExFreePoolWithTag(Vad, 'ldaV'); 373 } 374 375 /* PUBLIC FUNCTIONS ***********************************************************/ 376 377 /* 378 * @implemented 379 */ 380 PMDL 381 NTAPI 382 MmCreateMdl(IN PMDL Mdl, 383 IN PVOID Base, 384 IN SIZE_T Length) 385 { 386 SIZE_T Size; 387 388 // 389 // Check if we don't have an MDL built 390 // 391 if (!Mdl) 392 { 393 // 394 // Calculate the size we'll need and allocate the MDL 395 // 396 Size = MmSizeOfMdl(Base, Length); 397 Mdl = ExAllocatePoolWithTag(NonPagedPool, Size, TAG_MDL); 398 if (!Mdl) return NULL; 399 } 400 401 // 402 // Initialize it 403 // 404 MmInitializeMdl(Mdl, Base, Length); 405 return Mdl; 406 } 407 408 /* 409 * @implemented 410 */ 411 SIZE_T 412 NTAPI 413 MmSizeOfMdl(IN PVOID Base, 414 IN SIZE_T Length) 415 { 416 // 417 // Return the MDL size 418 // 419 return sizeof(MDL) + 420 (ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Length) * sizeof(PFN_NUMBER)); 421 } 422 423 /* 424 * @implemented 425 */ 426 VOID 427 NTAPI 428 MmBuildMdlForNonPagedPool(IN PMDL Mdl) 429 { 430 PPFN_NUMBER MdlPages, EndPage; 431 PFN_NUMBER Pfn, PageCount; 432 PVOID Base; 433 PMMPTE PointerPte; 434 435 // 436 // Sanity checks 437 // 438 ASSERT(Mdl->ByteCount != 0); 439 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | 440 MDL_MAPPED_TO_SYSTEM_VA | 441 MDL_SOURCE_IS_NONPAGED_POOL | 442 MDL_PARTIAL)) == 0); 443 444 // 445 // We know the MDL isn't associated to a process now 446 // 447 Mdl->Process = NULL; 448 449 // 450 // Get page and VA information 451 // 452 MdlPages = (PPFN_NUMBER)(Mdl + 1); 453 Base = Mdl->StartVa; 454 455 // 456 // Set the system address and now get the page count 457 // 458 Mdl->MappedSystemVa = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); 459 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Mdl->MappedSystemVa, 460 Mdl->ByteCount); 461 ASSERT(PageCount != 0); 462 EndPage = MdlPages + PageCount; 463 464 // 465 // Loop the PTEs 466 // 467 PointerPte = MiAddressToPte(Base); 468 do 469 { 470 // 471 // Write the PFN 472 // 473 Pfn = PFN_FROM_PTE(PointerPte++); 474 *MdlPages++ = Pfn; 475 } while (MdlPages < EndPage); 476 477 // 478 // Set the nonpaged pool flag 479 // 480 Mdl->MdlFlags |= MDL_SOURCE_IS_NONPAGED_POOL; 481 482 // 483 // Check if this is an I/O mapping 484 // 485 if (!MiGetPfnEntry(Pfn)) Mdl->MdlFlags |= MDL_IO_SPACE; 486 } 487 488 /* 489 * @implemented 490 */ 491 PMDL 492 NTAPI 493 MmAllocatePagesForMdl(IN PHYSICAL_ADDRESS LowAddress, 494 IN PHYSICAL_ADDRESS HighAddress, 495 IN PHYSICAL_ADDRESS SkipBytes, 496 IN SIZE_T TotalBytes) 497 { 498 // 499 // Call the internal routine 500 // 501 return MiAllocatePagesForMdl(LowAddress, 502 HighAddress, 503 SkipBytes, 504 TotalBytes, 505 MiNotMapped, 506 0); 507 } 508 509 /* 510 * @implemented 511 */ 512 PMDL 513 NTAPI 514 MmAllocatePagesForMdlEx(IN PHYSICAL_ADDRESS LowAddress, 515 IN PHYSICAL_ADDRESS HighAddress, 516 IN PHYSICAL_ADDRESS SkipBytes, 517 IN SIZE_T TotalBytes, 518 IN MEMORY_CACHING_TYPE CacheType, 519 IN ULONG Flags) 520 { 521 MI_PFN_CACHE_ATTRIBUTE CacheAttribute; 522 523 // 524 // Check for invalid cache type 525 // 526 if (CacheType > MmWriteCombined) 527 { 528 // 529 // Normalize to default 530 // 531 CacheAttribute = MiNotMapped; 532 } 533 else 534 { 535 // 536 // Conver to internal caching attribute 537 // 538 CacheAttribute = MiPlatformCacheAttributes[FALSE][CacheType]; 539 } 540 541 // 542 // Only these flags are allowed 543 // 544 if (Flags & ~(MM_DONT_ZERO_ALLOCATION | MM_ALLOCATE_FROM_LOCAL_NODE_ONLY)) 545 { 546 // 547 // Silently fail 548 // 549 return NULL; 550 } 551 552 // 553 // Call the internal routine 554 // 555 return MiAllocatePagesForMdl(LowAddress, 556 HighAddress, 557 SkipBytes, 558 TotalBytes, 559 CacheAttribute, 560 Flags); 561 } 562 563 /* 564 * @implemented 565 */ 566 VOID 567 NTAPI 568 MmFreePagesFromMdl(IN PMDL Mdl) 569 { 570 PVOID Base; 571 PPFN_NUMBER Pages; 572 LONG NumberOfPages; 573 PMMPFN Pfn1; 574 KIRQL OldIrql; 575 DPRINT("Freeing MDL: %p\n", Mdl); 576 577 // 578 // Sanity checks 579 // 580 ASSERT(KeGetCurrentIrql() <= APC_LEVEL); 581 ASSERT((Mdl->MdlFlags & MDL_IO_SPACE) == 0); 582 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); 583 584 // 585 // Get address and page information 586 // 587 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 588 NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 589 590 // 591 // Acquire PFN lock 592 // 593 OldIrql = MiAcquirePfnLock(); 594 595 // 596 // Loop all the MDL pages 597 // 598 Pages = (PPFN_NUMBER)(Mdl + 1); 599 do 600 { 601 // 602 // Reached the last page 603 // 604 if (*Pages == LIST_HEAD) break; 605 606 // 607 // Get the page entry 608 // 609 Pfn1 = MiGetPfnEntry(*Pages); 610 ASSERT(Pfn1); 611 ASSERT(Pfn1->u2.ShareCount == 1); 612 ASSERT(MI_IS_PFN_DELETED(Pfn1) == TRUE); 613 if (Pfn1->u4.PteFrame != 0x1FFEDCB) 614 { 615 /* Corrupted PFN entry or invalid free */ 616 KeBugCheckEx(MEMORY_MANAGEMENT, 0x1236, (ULONG_PTR)Mdl, (ULONG_PTR)Pages, *Pages); 617 } 618 619 // 620 // Clear it 621 // 622 Pfn1->u3.e1.StartOfAllocation = 0; 623 Pfn1->u3.e1.EndOfAllocation = 0; 624 Pfn1->u3.e1.PageLocation = StandbyPageList; 625 Pfn1->u2.ShareCount = 0; 626 627 // 628 // Dereference it 629 // 630 ASSERT(Pfn1->u3.e2.ReferenceCount != 0); 631 if (Pfn1->u3.e2.ReferenceCount != 1) 632 { 633 /* Just take off one reference */ 634 InterlockedDecrement16((PSHORT)&Pfn1->u3.e2.ReferenceCount); 635 } 636 else 637 { 638 /* We'll be nuking the whole page */ 639 MiDecrementReferenceCount(Pfn1, *Pages); 640 } 641 642 // 643 // Clear this page and move on 644 // 645 *Pages++ = LIST_HEAD; 646 } while (--NumberOfPages != 0); 647 648 // 649 // Release the lock 650 // 651 MiReleasePfnLock(OldIrql); 652 653 // 654 // Remove the pages locked flag 655 // 656 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 657 } 658 659 /* 660 * @implemented 661 */ 662 PVOID 663 NTAPI 664 MmMapLockedPagesSpecifyCache(IN PMDL Mdl, 665 IN KPROCESSOR_MODE AccessMode, 666 IN MEMORY_CACHING_TYPE CacheType, 667 IN PVOID BaseAddress, 668 IN ULONG BugCheckOnFailure, 669 IN ULONG Priority) // MM_PAGE_PRIORITY 670 { 671 PVOID Base; 672 PPFN_NUMBER MdlPages, LastPage; 673 PFN_COUNT PageCount; 674 BOOLEAN IsIoMapping; 675 MI_PFN_CACHE_ATTRIBUTE CacheAttribute; 676 PMMPTE PointerPte; 677 MMPTE TempPte; 678 679 // 680 // Sanity check 681 // 682 ASSERT(Mdl->ByteCount != 0); 683 684 // 685 // Get the base 686 // 687 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 688 689 // 690 // Handle kernel case first 691 // 692 if (AccessMode == KernelMode) 693 { 694 // 695 // Get the list of pages and count 696 // 697 MdlPages = (PPFN_NUMBER)(Mdl + 1); 698 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 699 LastPage = MdlPages + PageCount; 700 701 // 702 // Sanity checks 703 // 704 ASSERT((Mdl->MdlFlags & (MDL_MAPPED_TO_SYSTEM_VA | 705 MDL_SOURCE_IS_NONPAGED_POOL | 706 MDL_PARTIAL_HAS_BEEN_MAPPED)) == 0); 707 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | MDL_PARTIAL)) != 0); 708 709 // 710 // Get the correct cache type 711 // 712 IsIoMapping = (Mdl->MdlFlags & MDL_IO_SPACE) != 0; 713 CacheAttribute = MiPlatformCacheAttributes[IsIoMapping][CacheType]; 714 715 // 716 // Reserve the PTEs 717 // 718 PointerPte = MiReserveSystemPtes(PageCount, SystemPteSpace); 719 if (!PointerPte) 720 { 721 // 722 // If it can fail, return NULL 723 // 724 if (Mdl->MdlFlags & MDL_MAPPING_CAN_FAIL) return NULL; 725 726 // 727 // Should we bugcheck? 728 // 729 if (!BugCheckOnFailure) return NULL; 730 731 // 732 // Yes, crash the system 733 // 734 KeBugCheckEx(NO_MORE_SYSTEM_PTES, 0, PageCount, 0, 0); 735 } 736 737 // 738 // Get the mapped address 739 // 740 Base = (PVOID)((ULONG_PTR)MiPteToAddress(PointerPte) + Mdl->ByteOffset); 741 742 // 743 // Get the template 744 // 745 TempPte = ValidKernelPte; 746 switch (CacheAttribute) 747 { 748 case MiNonCached: 749 750 // 751 // Disable caching 752 // 753 MI_PAGE_DISABLE_CACHE(&TempPte); 754 MI_PAGE_WRITE_THROUGH(&TempPte); 755 break; 756 757 case MiWriteCombined: 758 759 // 760 // Enable write combining 761 // 762 MI_PAGE_DISABLE_CACHE(&TempPte); 763 MI_PAGE_WRITE_COMBINED(&TempPte); 764 break; 765 766 default: 767 // 768 // Nothing to do 769 // 770 break; 771 } 772 773 // 774 // Loop all PTEs 775 // 776 do 777 { 778 // 779 // We're done here 780 // 781 if (*MdlPages == LIST_HEAD) break; 782 783 // 784 // Write the PTE 785 // 786 TempPte.u.Hard.PageFrameNumber = *MdlPages; 787 MI_WRITE_VALID_PTE(PointerPte++, TempPte); 788 } while (++MdlPages < LastPage); 789 790 // 791 // Mark it as mapped 792 // 793 ASSERT((Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) == 0); 794 Mdl->MappedSystemVa = Base; 795 Mdl->MdlFlags |= MDL_MAPPED_TO_SYSTEM_VA; 796 797 // 798 // Check if it was partial 799 // 800 if (Mdl->MdlFlags & MDL_PARTIAL) 801 { 802 // 803 // Write the appropriate flag here too 804 // 805 Mdl->MdlFlags |= MDL_PARTIAL_HAS_BEEN_MAPPED; 806 } 807 808 // 809 // Return the mapped address 810 // 811 return Base; 812 } 813 814 return MiMapLockedPagesInUserSpace(Mdl, Base, CacheType, BaseAddress); 815 } 816 817 /* 818 * @implemented 819 */ 820 PVOID 821 NTAPI 822 MmMapLockedPages(IN PMDL Mdl, 823 IN KPROCESSOR_MODE AccessMode) 824 { 825 // 826 // Call the extended version 827 // 828 return MmMapLockedPagesSpecifyCache(Mdl, 829 AccessMode, 830 MmCached, 831 NULL, 832 TRUE, 833 HighPagePriority); 834 } 835 836 /* 837 * @implemented 838 */ 839 VOID 840 NTAPI 841 MmUnmapLockedPages(IN PVOID BaseAddress, 842 IN PMDL Mdl) 843 { 844 PVOID Base; 845 PFN_COUNT PageCount, ExtraPageCount; 846 PPFN_NUMBER MdlPages; 847 PMMPTE PointerPte; 848 849 // 850 // Sanity check 851 // 852 ASSERT(Mdl->ByteCount != 0); 853 854 // 855 // Check if this is a kernel request 856 // 857 if (BaseAddress > MM_HIGHEST_USER_ADDRESS) 858 { 859 // 860 // Get base and count information 861 // 862 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 863 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 864 865 // 866 // Sanity checks 867 // 868 ASSERT((Mdl->MdlFlags & MDL_PARENT_MAPPED_SYSTEM_VA) == 0); 869 ASSERT(PageCount != 0); 870 ASSERT(Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA); 871 872 // 873 // Get the PTE 874 // 875 PointerPte = MiAddressToPte(BaseAddress); 876 877 // 878 // This should be a resident system PTE 879 // 880 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); 881 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); 882 ASSERT(PointerPte->u.Hard.Valid == 1); 883 884 // 885 // Check if the caller wants us to free advanced pages 886 // 887 if (Mdl->MdlFlags & MDL_FREE_EXTRA_PTES) 888 { 889 // 890 // Get the MDL page array 891 // 892 MdlPages = MmGetMdlPfnArray(Mdl); 893 894 /* Number of extra pages stored after the PFN array */ 895 ExtraPageCount = (PFN_COUNT)*(MdlPages + PageCount); 896 897 // 898 // Do the math 899 // 900 PageCount += ExtraPageCount; 901 PointerPte -= ExtraPageCount; 902 ASSERT(PointerPte >= MmSystemPtesStart[SystemPteSpace]); 903 ASSERT(PointerPte <= MmSystemPtesEnd[SystemPteSpace]); 904 905 // 906 // Get the new base address 907 // 908 BaseAddress = (PVOID)((ULONG_PTR)BaseAddress - 909 (ExtraPageCount << PAGE_SHIFT)); 910 } 911 912 // 913 // Remove flags 914 // 915 Mdl->MdlFlags &= ~(MDL_MAPPED_TO_SYSTEM_VA | 916 MDL_PARTIAL_HAS_BEEN_MAPPED | 917 MDL_FREE_EXTRA_PTES); 918 919 // 920 // Release the system PTEs 921 // 922 MiReleaseSystemPtes(PointerPte, PageCount, SystemPteSpace); 923 } 924 else 925 { 926 MiUnmapLockedPagesInUserSpace(BaseAddress, Mdl); 927 } 928 } 929 930 /* 931 * @implemented 932 */ 933 VOID 934 NTAPI 935 MmProbeAndLockPages(IN PMDL Mdl, 936 IN KPROCESSOR_MODE AccessMode, 937 IN LOCK_OPERATION Operation) 938 { 939 PPFN_NUMBER MdlPages; 940 PVOID Base, Address, LastAddress, StartAddress; 941 ULONG LockPages, TotalPages; 942 NTSTATUS Status = STATUS_SUCCESS; 943 PEPROCESS CurrentProcess; 944 NTSTATUS ProbeStatus; 945 PMMPTE PointerPte, LastPte; 946 PMMPDE PointerPde; 947 #if (_MI_PAGING_LEVELS >= 3) 948 PMMPDE PointerPpe; 949 #endif 950 #if (_MI_PAGING_LEVELS == 4) 951 PMMPDE PointerPxe; 952 #endif 953 PFN_NUMBER PageFrameIndex; 954 BOOLEAN UsePfnLock; 955 KIRQL OldIrql; 956 PMMPFN Pfn1; 957 DPRINT("Probing MDL: %p\n", Mdl); 958 959 // 960 // Sanity checks 961 // 962 ASSERT(Mdl->ByteCount != 0); 963 ASSERT(((ULONG)Mdl->ByteOffset & ~(PAGE_SIZE - 1)) == 0); 964 ASSERT(((ULONG_PTR)Mdl->StartVa & (PAGE_SIZE - 1)) == 0); 965 ASSERT((Mdl->MdlFlags & (MDL_PAGES_LOCKED | 966 MDL_MAPPED_TO_SYSTEM_VA | 967 MDL_SOURCE_IS_NONPAGED_POOL | 968 MDL_PARTIAL | 969 MDL_IO_SPACE)) == 0); 970 971 // 972 // Get page and base information 973 // 974 MdlPages = (PPFN_NUMBER)(Mdl + 1); 975 Base = Mdl->StartVa; 976 977 // 978 // Get the addresses and how many pages we span (and need to lock) 979 // 980 Address = (PVOID)((ULONG_PTR)Base + Mdl->ByteOffset); 981 LastAddress = (PVOID)((ULONG_PTR)Address + Mdl->ByteCount); 982 LockPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Address, Mdl->ByteCount); 983 ASSERT(LockPages != 0); 984 985 /* Block invalid access */ 986 if ((AccessMode != KernelMode) && 987 ((LastAddress > (PVOID)MM_USER_PROBE_ADDRESS) || (Address >= LastAddress))) 988 { 989 /* Caller should be in SEH, raise the error */ 990 *MdlPages = LIST_HEAD; 991 ExRaiseStatus(STATUS_ACCESS_VIOLATION); 992 } 993 994 // 995 // Get the process 996 // 997 if (Address <= MM_HIGHEST_USER_ADDRESS) 998 { 999 // 1000 // Get the process 1001 // 1002 CurrentProcess = PsGetCurrentProcess(); 1003 } 1004 else 1005 { 1006 // 1007 // No process 1008 // 1009 CurrentProcess = NULL; 1010 } 1011 1012 // 1013 // Save the number of pages we'll have to lock, and the start address 1014 // 1015 TotalPages = LockPages; 1016 StartAddress = Address; 1017 1018 /* Large pages not supported */ 1019 ASSERT(!MI_IS_PHYSICAL_ADDRESS(Address)); 1020 1021 // 1022 // Now probe them 1023 // 1024 ProbeStatus = STATUS_SUCCESS; 1025 _SEH2_TRY 1026 { 1027 // 1028 // Enter probe loop 1029 // 1030 do 1031 { 1032 // 1033 // Assume failure 1034 // 1035 *MdlPages = LIST_HEAD; 1036 1037 // 1038 // Read 1039 // 1040 *(volatile CHAR*)Address; 1041 1042 // 1043 // Check if this is write access (only probe for user-mode) 1044 // 1045 if ((Operation != IoReadAccess) && 1046 (Address <= MM_HIGHEST_USER_ADDRESS)) 1047 { 1048 // 1049 // Probe for write too 1050 // 1051 ProbeForWriteChar(Address); 1052 } 1053 1054 // 1055 // Next address... 1056 // 1057 Address = PAGE_ALIGN((ULONG_PTR)Address + PAGE_SIZE); 1058 1059 // 1060 // Next page... 1061 // 1062 LockPages--; 1063 MdlPages++; 1064 } while (Address < LastAddress); 1065 1066 // 1067 // Reset back to the original page 1068 // 1069 ASSERT(LockPages == 0); 1070 MdlPages = (PPFN_NUMBER)(Mdl + 1); 1071 } 1072 _SEH2_EXCEPT(EXCEPTION_EXECUTE_HANDLER) 1073 { 1074 // 1075 // Oops :( 1076 // 1077 ProbeStatus = _SEH2_GetExceptionCode(); 1078 } 1079 _SEH2_END; 1080 1081 // 1082 // So how did that go? 1083 // 1084 if (ProbeStatus != STATUS_SUCCESS) 1085 { 1086 // 1087 // Fail 1088 // 1089 DPRINT1("MDL PROBE FAILED!\n"); 1090 Mdl->Process = NULL; 1091 ExRaiseStatus(ProbeStatus); 1092 } 1093 1094 // 1095 // Get the PTE and PDE 1096 // 1097 PointerPte = MiAddressToPte(StartAddress); 1098 PointerPde = MiAddressToPde(StartAddress); 1099 #if (_MI_PAGING_LEVELS >= 3) 1100 PointerPpe = MiAddressToPpe(StartAddress); 1101 #endif 1102 #if (_MI_PAGING_LEVELS == 4) 1103 PointerPxe = MiAddressToPxe(StartAddress); 1104 #endif 1105 1106 // 1107 // Sanity check 1108 // 1109 ASSERT(MdlPages == (PPFN_NUMBER)(Mdl + 1)); 1110 1111 // 1112 // Check what kind of operation this is 1113 // 1114 if (Operation != IoReadAccess) 1115 { 1116 // 1117 // Set the write flag 1118 // 1119 Mdl->MdlFlags |= MDL_WRITE_OPERATION; 1120 } 1121 else 1122 { 1123 // 1124 // Remove the write flag 1125 // 1126 Mdl->MdlFlags &= ~(MDL_WRITE_OPERATION); 1127 } 1128 1129 // 1130 // Mark the MDL as locked *now* 1131 // 1132 Mdl->MdlFlags |= MDL_PAGES_LOCKED; 1133 1134 // 1135 // Check if this came from kernel mode 1136 // 1137 if (Base > MM_HIGHEST_USER_ADDRESS) 1138 { 1139 // 1140 // We should not have a process 1141 // 1142 ASSERT(CurrentProcess == NULL); 1143 Mdl->Process = NULL; 1144 1145 // 1146 // In kernel mode, we don't need to check for write access 1147 // 1148 Operation = IoReadAccess; 1149 1150 // 1151 // Use the PFN lock 1152 // 1153 UsePfnLock = TRUE; 1154 OldIrql = MiAcquirePfnLock(); 1155 } 1156 else 1157 { 1158 // 1159 // Sanity checks 1160 // 1161 ASSERT(TotalPages != 0); 1162 ASSERT(CurrentProcess == PsGetCurrentProcess()); 1163 1164 // 1165 // Track locked pages 1166 // 1167 InterlockedExchangeAddSizeT(&CurrentProcess->NumberOfLockedPages, 1168 TotalPages); 1169 1170 // 1171 // Save the process 1172 // 1173 Mdl->Process = CurrentProcess; 1174 1175 /* Lock the process working set */ 1176 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1177 UsePfnLock = FALSE; 1178 OldIrql = MM_NOIRQL; 1179 } 1180 1181 // 1182 // Get the last PTE 1183 // 1184 LastPte = MiAddressToPte((PVOID)((ULONG_PTR)LastAddress - 1)); 1185 1186 // 1187 // Loop the pages 1188 // 1189 do 1190 { 1191 // 1192 // Assume failure and check for non-mapped pages 1193 // 1194 *MdlPages = LIST_HEAD; 1195 while ( 1196 #if (_MI_PAGING_LEVELS == 4) 1197 (PointerPxe->u.Hard.Valid == 0) || 1198 #endif 1199 #if (_MI_PAGING_LEVELS >= 3) 1200 (PointerPpe->u.Hard.Valid == 0) || 1201 #endif 1202 (PointerPde->u.Hard.Valid == 0) || 1203 (PointerPte->u.Hard.Valid == 0)) 1204 { 1205 // 1206 // What kind of lock were we using? 1207 // 1208 if (UsePfnLock) 1209 { 1210 // 1211 // Release PFN lock 1212 // 1213 MiReleasePfnLock(OldIrql); 1214 } 1215 else 1216 { 1217 /* Release process working set */ 1218 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1219 } 1220 1221 // 1222 // Access the page 1223 // 1224 Address = MiPteToAddress(PointerPte); 1225 1226 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked 1227 Status = MmAccessFault(FALSE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL); 1228 if (!NT_SUCCESS(Status)) 1229 { 1230 // 1231 // Fail 1232 // 1233 DPRINT1("Access fault failed\n"); 1234 goto Cleanup; 1235 } 1236 1237 // 1238 // What lock should we use? 1239 // 1240 if (UsePfnLock) 1241 { 1242 // 1243 // Grab the PFN lock 1244 // 1245 OldIrql = MiAcquirePfnLock(); 1246 } 1247 else 1248 { 1249 /* Lock the process working set */ 1250 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1251 } 1252 } 1253 1254 // 1255 // Check if this was a write or modify 1256 // 1257 if (Operation != IoReadAccess) 1258 { 1259 // 1260 // Check if the PTE is not writable 1261 // 1262 if (MI_IS_PAGE_WRITEABLE(PointerPte) == FALSE) 1263 { 1264 // 1265 // Check if it's copy on write 1266 // 1267 if (MI_IS_PAGE_COPY_ON_WRITE(PointerPte)) 1268 { 1269 // 1270 // Get the base address and allow a change for user-mode 1271 // 1272 Address = MiPteToAddress(PointerPte); 1273 if (Address <= MM_HIGHEST_USER_ADDRESS) 1274 { 1275 // 1276 // What kind of lock were we using? 1277 // 1278 if (UsePfnLock) 1279 { 1280 // 1281 // Release PFN lock 1282 // 1283 MiReleasePfnLock(OldIrql); 1284 } 1285 else 1286 { 1287 /* Release process working set */ 1288 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1289 } 1290 1291 // 1292 // Access the page 1293 // 1294 1295 //HACK: Pass a placeholder TrapInformation so the fault handler knows we're unlocked 1296 Status = MmAccessFault(TRUE, Address, KernelMode, (PVOID)(ULONG_PTR)0xBADBADA3BADBADA3ULL); 1297 if (!NT_SUCCESS(Status)) 1298 { 1299 // 1300 // Fail 1301 // 1302 DPRINT1("Access fault failed\n"); 1303 goto Cleanup; 1304 } 1305 1306 // 1307 // Re-acquire the lock 1308 // 1309 if (UsePfnLock) 1310 { 1311 // 1312 // Grab the PFN lock 1313 // 1314 OldIrql = MiAcquirePfnLock(); 1315 } 1316 else 1317 { 1318 /* Lock the process working set */ 1319 MiLockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1320 } 1321 1322 // 1323 // Start over 1324 // 1325 continue; 1326 } 1327 } 1328 1329 // 1330 // Fail, since we won't allow this 1331 // 1332 Status = STATUS_ACCESS_VIOLATION; 1333 goto CleanupWithLock; 1334 } 1335 } 1336 1337 // 1338 // Grab the PFN 1339 // 1340 PageFrameIndex = PFN_FROM_PTE(PointerPte); 1341 Pfn1 = MiGetPfnEntry(PageFrameIndex); 1342 if (Pfn1) 1343 { 1344 /* Either this is for kernel-mode, or the working set is held */ 1345 ASSERT((CurrentProcess == NULL) || (UsePfnLock == FALSE)); 1346 1347 /* No Physical VADs supported yet */ 1348 if (CurrentProcess) ASSERT(CurrentProcess->PhysicalVadRoot == NULL); 1349 1350 /* This address should already exist and be fully valid */ 1351 MiReferenceProbedPageAndBumpLockCount(Pfn1); 1352 } 1353 else 1354 { 1355 // 1356 // For I/O addresses, just remember this 1357 // 1358 Mdl->MdlFlags |= MDL_IO_SPACE; 1359 } 1360 1361 // 1362 // Write the page and move on 1363 // 1364 *MdlPages++ = PageFrameIndex; 1365 PointerPte++; 1366 1367 /* Check if we're on a PDE boundary */ 1368 if (MiIsPteOnPdeBoundary(PointerPte)) PointerPde++; 1369 #if (_MI_PAGING_LEVELS >= 3) 1370 if (MiIsPteOnPpeBoundary(PointerPte)) PointerPpe++; 1371 #endif 1372 #if (_MI_PAGING_LEVELS == 4) 1373 if (MiIsPteOnPxeBoundary(PointerPte)) PointerPxe++; 1374 #endif 1375 1376 } while (PointerPte <= LastPte); 1377 1378 // 1379 // What kind of lock were we using? 1380 // 1381 if (UsePfnLock) 1382 { 1383 // 1384 // Release PFN lock 1385 // 1386 MiReleasePfnLock(OldIrql); 1387 } 1388 else 1389 { 1390 /* Release process working set */ 1391 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1392 } 1393 1394 // 1395 // Sanity check 1396 // 1397 ASSERT((Mdl->MdlFlags & MDL_DESCRIBES_AWE) == 0); 1398 return; 1399 1400 CleanupWithLock: 1401 // 1402 // This is the failure path 1403 // 1404 ASSERT(!NT_SUCCESS(Status)); 1405 1406 // 1407 // What kind of lock were we using? 1408 // 1409 if (UsePfnLock) 1410 { 1411 // 1412 // Release PFN lock 1413 // 1414 MiReleasePfnLock(OldIrql); 1415 } 1416 else 1417 { 1418 /* Release process working set */ 1419 MiUnlockProcessWorkingSet(CurrentProcess, PsGetCurrentThread()); 1420 } 1421 Cleanup: 1422 // 1423 // Pages must be locked so MmUnlock can work 1424 // 1425 ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED); 1426 MmUnlockPages(Mdl); 1427 1428 // 1429 // Raise the error 1430 // 1431 ExRaiseStatus(Status); 1432 } 1433 1434 /* 1435 * @implemented 1436 */ 1437 VOID 1438 NTAPI 1439 MmUnlockPages(IN PMDL Mdl) 1440 { 1441 PPFN_NUMBER MdlPages, LastPage; 1442 PEPROCESS Process; 1443 PVOID Base; 1444 ULONG Flags, PageCount; 1445 KIRQL OldIrql; 1446 PMMPFN Pfn1; 1447 DPRINT("Unlocking MDL: %p\n", Mdl); 1448 1449 // 1450 // Sanity checks 1451 // 1452 ASSERT((Mdl->MdlFlags & MDL_PAGES_LOCKED) != 0); 1453 ASSERT((Mdl->MdlFlags & MDL_SOURCE_IS_NONPAGED_POOL) == 0); 1454 ASSERT((Mdl->MdlFlags & MDL_PARTIAL) == 0); 1455 ASSERT(Mdl->ByteCount != 0); 1456 1457 // 1458 // Get the process associated and capture the flags which are volatile 1459 // 1460 Process = Mdl->Process; 1461 Flags = Mdl->MdlFlags; 1462 1463 // 1464 // Automagically undo any calls to MmGetSystemAddressForMdl's for this MDL 1465 // 1466 if (Mdl->MdlFlags & MDL_MAPPED_TO_SYSTEM_VA) 1467 { 1468 // 1469 // Unmap the pages from system space 1470 // 1471 MmUnmapLockedPages(Mdl->MappedSystemVa, Mdl); 1472 } 1473 1474 // 1475 // Get the page count 1476 // 1477 MdlPages = (PPFN_NUMBER)(Mdl + 1); 1478 Base = (PVOID)((ULONG_PTR)Mdl->StartVa + Mdl->ByteOffset); 1479 PageCount = ADDRESS_AND_SIZE_TO_SPAN_PAGES(Base, Mdl->ByteCount); 1480 ASSERT(PageCount != 0); 1481 1482 // 1483 // We don't support AWE 1484 // 1485 if (Flags & MDL_DESCRIBES_AWE) ASSERT(FALSE); 1486 1487 // 1488 // Check if the buffer is mapped I/O space 1489 // 1490 if (Flags & MDL_IO_SPACE) 1491 { 1492 // 1493 // Acquire PFN lock 1494 // 1495 OldIrql = MiAcquirePfnLock(); 1496 1497 // 1498 // Loop every page 1499 // 1500 LastPage = MdlPages + PageCount; 1501 do 1502 { 1503 // 1504 // Last page, break out 1505 // 1506 if (*MdlPages == LIST_HEAD) break; 1507 1508 // 1509 // Check if this page is in the PFN database 1510 // 1511 Pfn1 = MiGetPfnEntry(*MdlPages); 1512 if (Pfn1) MiDereferencePfnAndDropLockCount(Pfn1); 1513 } while (++MdlPages < LastPage); 1514 1515 // 1516 // Release the lock 1517 // 1518 MiReleasePfnLock(OldIrql); 1519 1520 // 1521 // Check if we have a process 1522 // 1523 if (Process) 1524 { 1525 // 1526 // Handle the accounting of locked pages 1527 // 1528 ASSERT(Process->NumberOfLockedPages > 0); 1529 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, 1530 -(LONG_PTR)PageCount); 1531 } 1532 1533 // 1534 // We're done 1535 // 1536 Mdl->MdlFlags &= ~MDL_IO_SPACE; 1537 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 1538 return; 1539 } 1540 1541 // 1542 // Check if we have a process 1543 // 1544 if (Process) 1545 { 1546 // 1547 // Handle the accounting of locked pages 1548 // 1549 ASSERT(Process->NumberOfLockedPages > 0); 1550 InterlockedExchangeAddSizeT(&Process->NumberOfLockedPages, 1551 -(LONG_PTR)PageCount); 1552 } 1553 1554 // 1555 // Loop every page 1556 // 1557 LastPage = MdlPages + PageCount; 1558 do 1559 { 1560 // 1561 // Last page reached 1562 // 1563 if (*MdlPages == LIST_HEAD) 1564 { 1565 // 1566 // Were there no pages at all? 1567 // 1568 if (MdlPages == (PPFN_NUMBER)(Mdl + 1)) 1569 { 1570 // 1571 // We're already done 1572 // 1573 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 1574 return; 1575 } 1576 1577 // 1578 // Otherwise, stop here 1579 // 1580 LastPage = MdlPages; 1581 break; 1582 } 1583 1584 /* Save the PFN entry instead for the secondary loop */ 1585 *MdlPages = (PFN_NUMBER)MiGetPfnEntry(*MdlPages); 1586 ASSERT(*MdlPages != 0); 1587 } while (++MdlPages < LastPage); 1588 1589 // 1590 // Reset pointer 1591 // 1592 MdlPages = (PPFN_NUMBER)(Mdl + 1); 1593 1594 // 1595 // Now grab the PFN lock for the actual unlock and dereference 1596 // 1597 OldIrql = MiAcquirePfnLock(); 1598 do 1599 { 1600 /* Get the current entry and reference count */ 1601 Pfn1 = (PMMPFN)*MdlPages; 1602 MiDereferencePfnAndDropLockCount(Pfn1); 1603 } while (++MdlPages < LastPage); 1604 1605 // 1606 // Release the lock 1607 // 1608 MiReleasePfnLock(OldIrql); 1609 1610 // 1611 // We're done 1612 // 1613 Mdl->MdlFlags &= ~MDL_PAGES_LOCKED; 1614 } 1615 1616 /* 1617 * @unimplemented 1618 */ 1619 NTSTATUS 1620 NTAPI 1621 MmAdvanceMdl(IN PMDL Mdl, 1622 IN ULONG NumberOfBytes) 1623 { 1624 UNIMPLEMENTED; 1625 return STATUS_NOT_IMPLEMENTED; 1626 } 1627 1628 /* 1629 * @unimplemented 1630 */ 1631 PVOID 1632 NTAPI 1633 MmMapLockedPagesWithReservedMapping(IN PVOID MappingAddress, 1634 IN ULONG PoolTag, 1635 IN PMDL MemoryDescriptorList, 1636 IN MEMORY_CACHING_TYPE CacheType) 1637 { 1638 UNIMPLEMENTED; 1639 return 0; 1640 } 1641 1642 /* 1643 * @unimplemented 1644 */ 1645 VOID 1646 NTAPI 1647 MmUnmapReservedMapping(IN PVOID BaseAddress, 1648 IN ULONG PoolTag, 1649 IN PMDL MemoryDescriptorList) 1650 { 1651 UNIMPLEMENTED; 1652 } 1653 1654 /* 1655 * @unimplemented 1656 */ 1657 NTSTATUS 1658 NTAPI 1659 MmPrefetchPages(IN ULONG NumberOfLists, 1660 IN PREAD_LIST *ReadLists) 1661 { 1662 UNIMPLEMENTED; 1663 return STATUS_NOT_IMPLEMENTED; 1664 } 1665 1666 /* 1667 * @unimplemented 1668 */ 1669 NTSTATUS 1670 NTAPI 1671 MmProtectMdlSystemAddress(IN PMDL MemoryDescriptorList, 1672 IN ULONG NewProtect) 1673 { 1674 UNIMPLEMENTED; 1675 return STATUS_NOT_IMPLEMENTED; 1676 } 1677 1678 /* 1679 * @unimplemented 1680 */ 1681 VOID 1682 NTAPI 1683 MmProbeAndLockProcessPages(IN OUT PMDL MemoryDescriptorList, 1684 IN PEPROCESS Process, 1685 IN KPROCESSOR_MODE AccessMode, 1686 IN LOCK_OPERATION Operation) 1687 { 1688 UNIMPLEMENTED; 1689 } 1690 1691 1692 /* 1693 * @unimplemented 1694 */ 1695 VOID 1696 NTAPI 1697 MmProbeAndLockSelectedPages(IN OUT PMDL MemoryDescriptorList, 1698 IN LARGE_INTEGER PageList[], 1699 IN KPROCESSOR_MODE AccessMode, 1700 IN LOCK_OPERATION Operation) 1701 { 1702 UNIMPLEMENTED; 1703 } 1704 1705 /* 1706 * @unimplemented 1707 */ 1708 VOID 1709 NTAPI 1710 MmMapMemoryDumpMdl(IN PMDL Mdl) 1711 { 1712 UNIMPLEMENTED; 1713 } 1714 1715 /* EOF */ 1716