1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/pool.c 5 * PURPOSE: ARM Memory Manager Pool Allocator 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 /* GLOBALS ********************************************************************/ 19 20 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]; 21 PFN_COUNT MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge; 22 PVOID MmNonPagedPoolEnd0; 23 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame; 24 KGUARDED_MUTEX MmPagedPoolMutex; 25 MM_PAGED_POOL_INFO MmPagedPoolInfo; 26 SIZE_T MmAllocatedNonPagedPool; 27 SIZE_T MmTotalNonPagedPoolQuota; 28 SIZE_T MmTotalPagedPoolQuota; 29 ULONG MmSpecialPoolTag; 30 ULONG MmConsumedPoolPercentage; 31 BOOLEAN MmProtectFreedNonPagedPool; 32 SLIST_HEADER MiNonPagedPoolSListHead; 33 ULONG MiNonPagedPoolSListMaximum = 4; 34 SLIST_HEADER MiPagedPoolSListHead; 35 ULONG MiPagedPoolSListMaximum = 8; 36 37 /* PRIVATE FUNCTIONS **********************************************************/ 38 39 VOID 40 NTAPI 41 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress, 42 IN ULONG PageCount) 43 { 44 PMMPTE PointerPte, LastPte; 45 MMPTE TempPte; 46 47 /* If pool is physical, can't protect PTEs */ 48 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return; 49 50 /* Get PTE pointers and loop */ 51 PointerPte = MiAddressToPte(VirtualAddress); 52 LastPte = PointerPte + PageCount; 53 do 54 { 55 /* Capture the PTE for safety */ 56 TempPte = *PointerPte; 57 58 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */ 59 TempPte.u.Hard.Valid = 0; 60 TempPte.u.Soft.Prototype = 1; 61 MI_WRITE_INVALID_PTE(PointerPte, TempPte); 62 } while (++PointerPte < LastPte); 63 64 /* Flush the TLB */ 65 KeFlushEntireTb(TRUE, TRUE); 66 } 67 68 BOOLEAN 69 NTAPI 70 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress, 71 IN ULONG PageCount) 72 { 73 PMMPTE PointerPte; 74 MMPTE TempPte; 75 PFN_NUMBER UnprotectedPages = 0; 76 77 /* If pool is physical, can't protect PTEs */ 78 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE; 79 80 /* Get, and capture the PTE */ 81 PointerPte = MiAddressToPte(VirtualAddress); 82 TempPte = *PointerPte; 83 84 /* Loop protected PTEs */ 85 while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1)) 86 { 87 /* Unprotect the PTE */ 88 TempPte.u.Hard.Valid = 1; 89 TempPte.u.Soft.Prototype = 0; 90 MI_WRITE_VALID_PTE(PointerPte, TempPte); 91 92 /* One more page */ 93 if (++UnprotectedPages == PageCount) break; 94 95 /* Capture next PTE */ 96 TempPte = *(++PointerPte); 97 } 98 99 /* Return if any pages were unprotected */ 100 return UnprotectedPages ? TRUE : FALSE; 101 } 102 103 FORCEINLINE 104 VOID 105 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links, 106 OUT PVOID* PoolFlink, 107 OUT PVOID* PoolBlink) 108 { 109 BOOLEAN Safe; 110 PVOID PoolVa; 111 112 /* Initialize variables */ 113 *PoolFlink = *PoolBlink = NULL; 114 115 /* Check if the list has entries */ 116 if (IsListEmpty(Links) == FALSE) 117 { 118 /* We are going to need to forward link to do an insert */ 119 PoolVa = Links->Flink; 120 121 /* So make it safe to access */ 122 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1); 123 if (Safe) *PoolFlink = PoolVa; 124 } 125 126 /* Are we going to need a backward link too? */ 127 if (Links != Links->Blink) 128 { 129 /* Get the head's backward link for the insert */ 130 PoolVa = Links->Blink; 131 132 /* Make it safe to access */ 133 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1); 134 if (Safe) *PoolBlink = PoolVa; 135 } 136 } 137 138 FORCEINLINE 139 VOID 140 MiProtectedPoolProtectLinks(IN PVOID PoolFlink, 141 IN PVOID PoolBlink) 142 { 143 /* Reprotect the pages, if they got unprotected earlier */ 144 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1); 145 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1); 146 } 147 148 VOID 149 NTAPI 150 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead, 151 IN PLIST_ENTRY Entry, 152 IN BOOLEAN Critical) 153 { 154 PVOID PoolFlink, PoolBlink; 155 156 /* Make the list accessible */ 157 MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink); 158 159 /* Now insert in the right position */ 160 Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry); 161 162 /* And reprotect the pages containing the free links */ 163 MiProtectedPoolProtectLinks(PoolFlink, PoolBlink); 164 } 165 166 VOID 167 NTAPI 168 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry) 169 { 170 PVOID PoolFlink, PoolBlink; 171 172 /* Make the list accessible */ 173 MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink); 174 175 /* Now remove */ 176 RemoveEntryList(Entry); 177 178 /* And reprotect the pages containing the free links */ 179 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1); 180 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1); 181 } 182 183 CODE_SEG("INIT") 184 VOID 185 NTAPI 186 MiInitializeNonPagedPoolThresholds(VOID) 187 { 188 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages; 189 190 /* Default low threshold of 8MB or one third of nonpaged pool */ 191 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT; 192 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3); 193 194 /* Default high threshold of 20MB or 50% */ 195 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT; 196 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2); 197 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold); 198 } 199 200 CODE_SEG("INIT") 201 VOID 202 NTAPI 203 MiInitializePoolEvents(VOID) 204 { 205 KIRQL OldIrql; 206 PFN_NUMBER FreePoolInPages; 207 208 /* Lock paged pool */ 209 KeAcquireGuardedMutex(&MmPagedPoolMutex); 210 211 /* Total size of the paged pool minus the allocated size, is free */ 212 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool; 213 214 /* Check the initial state high state */ 215 if (FreePoolInPages >= MiHighPagedPoolThreshold) 216 { 217 /* We have plenty of pool */ 218 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE); 219 } 220 else 221 { 222 /* We don't */ 223 KeClearEvent(MiHighPagedPoolEvent); 224 } 225 226 /* Check the initial low state */ 227 if (FreePoolInPages <= MiLowPagedPoolThreshold) 228 { 229 /* We're very low in free pool memory */ 230 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE); 231 } 232 else 233 { 234 /* We're not */ 235 KeClearEvent(MiLowPagedPoolEvent); 236 } 237 238 /* Release the paged pool lock */ 239 KeReleaseGuardedMutex(&MmPagedPoolMutex); 240 241 /* Now it's time for the nonpaged pool lock */ 242 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 243 244 /* Free pages are the maximum minus what's been allocated */ 245 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool; 246 247 /* Check if we have plenty */ 248 if (FreePoolInPages >= MiHighNonPagedPoolThreshold) 249 { 250 /* We do, set the event */ 251 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE); 252 } 253 else 254 { 255 /* We don't, clear the event */ 256 KeClearEvent(MiHighNonPagedPoolEvent); 257 } 258 259 /* Check if we have very little */ 260 if (FreePoolInPages <= MiLowNonPagedPoolThreshold) 261 { 262 /* We do, set the event */ 263 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE); 264 } 265 else 266 { 267 /* We don't, clear it */ 268 KeClearEvent(MiLowNonPagedPoolEvent); 269 } 270 271 /* We're done, release the nonpaged pool lock */ 272 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 273 } 274 275 CODE_SEG("INIT") 276 VOID 277 NTAPI 278 MiInitializeNonPagedPool(VOID) 279 { 280 ULONG i; 281 PFN_COUNT PoolPages; 282 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry; 283 PMMPTE PointerPte; 284 PAGED_CODE(); 285 286 // 287 // Initialize the pool S-LISTs as well as their maximum count. In general, 288 // we'll allow 8 times the default on a 2GB system, and two times the default 289 // on a 1GB system. 290 // 291 InitializeSListHead(&MiPagedPoolSListHead); 292 InitializeSListHead(&MiNonPagedPoolSListHead); 293 if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE)) 294 { 295 MiNonPagedPoolSListMaximum *= 8; 296 MiPagedPoolSListMaximum *= 8; 297 } 298 else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE)) 299 { 300 MiNonPagedPoolSListMaximum *= 2; 301 MiPagedPoolSListMaximum *= 2; 302 } 303 304 // 305 // However if debugging options for the pool are enabled, turn off the S-LIST 306 // to reduce the risk of messing things up even more 307 // 308 if (MmProtectFreedNonPagedPool) 309 { 310 MiNonPagedPoolSListMaximum = 0; 311 MiPagedPoolSListMaximum = 0; 312 } 313 314 // 315 // We keep 4 lists of free pages (4 lists help avoid contention) 316 // 317 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++) 318 { 319 // 320 // Initialize each of them 321 // 322 InitializeListHead(&MmNonPagedPoolFreeListHead[i]); 323 } 324 325 // 326 // Calculate how many pages the initial nonpaged pool has 327 // 328 PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes); 329 MmNumberOfFreeNonPagedPool = PoolPages; 330 331 // 332 // Initialize the first free entry 333 // 334 FreeEntry = MmNonPagedPoolStart; 335 FirstEntry = FreeEntry; 336 FreeEntry->Size = PoolPages; 337 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE; 338 FreeEntry->Owner = FirstEntry; 339 340 // 341 // Insert it into the last list 342 // 343 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1], 344 &FreeEntry->List); 345 346 // 347 // Now create free entries for every single other page 348 // 349 while (PoolPages-- > 1) 350 { 351 // 352 // Link them all back to the original entry 353 // 354 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE); 355 FreeEntry->Owner = FirstEntry; 356 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE; 357 } 358 359 // 360 // Validate and remember first allocated pool page 361 // 362 PointerPte = MiAddressToPte(MmNonPagedPoolStart); 363 ASSERT(PointerPte->u.Hard.Valid == 1); 364 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte); 365 366 // 367 // Keep track of where initial nonpaged pool ends 368 // 369 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart + 370 MmSizeOfNonPagedPoolInBytes); 371 372 // 373 // Validate and remember last allocated pool page 374 // 375 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1)); 376 ASSERT(PointerPte->u.Hard.Valid == 1); 377 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte); 378 379 // 380 // Validate the first nonpaged pool expansion page (which is a guard page) 381 // 382 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart); 383 ASSERT(PointerPte->u.Hard.Valid == 0); 384 385 // 386 // Calculate the size of the expansion region alone 387 // 388 MiExpansionPoolPagesInitialCharge = (PFN_COUNT) 389 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes); 390 391 // 392 // Remove 2 pages, since there's a guard page on top and on the bottom 393 // 394 MiExpansionPoolPagesInitialCharge -= 2; 395 396 // 397 // Now initialize the nonpaged pool expansion PTE space. Remember there's a 398 // guard page on top so make sure to skip it. The bottom guard page will be 399 // guaranteed by the fact our size is off by one. 400 // 401 MiInitializeSystemPtes(PointerPte + 1, 402 MiExpansionPoolPagesInitialCharge, 403 NonPagedPoolExpansion); 404 } 405 406 POOL_TYPE 407 NTAPI 408 MmDeterminePoolType(IN PVOID PoolAddress) 409 { 410 // 411 // Use a simple bounds check 412 // 413 if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd) 414 return PagedPool; 415 else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd) 416 return NonPagedPool; 417 KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0); 418 } 419 420 PVOID 421 NTAPI 422 MiAllocatePoolPages(IN POOL_TYPE PoolType, 423 IN SIZE_T SizeInBytes) 424 { 425 PFN_NUMBER PageFrameNumber; 426 PFN_COUNT SizeInPages, PageTableCount; 427 ULONG i; 428 KIRQL OldIrql; 429 PLIST_ENTRY NextEntry, NextHead, LastHead; 430 PMMPTE PointerPte, StartPte; 431 PMMPDE PointerPde; 432 ULONG EndAllocation; 433 MMPTE TempPte; 434 MMPDE TempPde; 435 PMMPFN Pfn1; 436 PVOID BaseVa, BaseVaStart; 437 PMMFREE_POOL_ENTRY FreeEntry; 438 439 // 440 // Figure out how big the allocation is in pages 441 // 442 SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes); 443 444 // 445 // Check for overflow 446 // 447 if (SizeInPages == 0) 448 { 449 // 450 // Fail 451 // 452 return NULL; 453 } 454 455 // 456 // Handle paged pool 457 // 458 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) 459 { 460 // 461 // If only one page is being requested, try to grab it from the S-LIST 462 // 463 if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead))) 464 { 465 BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead); 466 if (BaseVa) return BaseVa; 467 } 468 469 // 470 // Lock the paged pool mutex 471 // 472 KeAcquireGuardedMutex(&MmPagedPoolMutex); 473 474 // 475 // Find some empty allocation space 476 // 477 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, 478 SizeInPages, 479 MmPagedPoolInfo.PagedPoolHint); 480 if (i == 0xFFFFFFFF) 481 { 482 // 483 // Get the page bit count 484 // 485 i = ((SizeInPages - 1) / PTE_PER_PAGE) + 1; 486 DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages); 487 488 // 489 // Check if there is enougn paged pool expansion space left 490 // 491 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion > 492 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) 493 { 494 // 495 // Out of memory! 496 // 497 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes); 498 KeReleaseGuardedMutex(&MmPagedPoolMutex); 499 return NULL; 500 } 501 502 // 503 // Check if we'll have to expand past the last PTE we have available 504 // 505 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) > 506 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) 507 { 508 // 509 // We can only support this much then 510 // 511 PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool); 512 PageTableCount = (PFN_COUNT)(PointerPde + 1 - 513 MmPagedPoolInfo.NextPdeForPagedPoolExpansion); 514 ASSERT(PageTableCount < i); 515 i = PageTableCount; 516 } 517 else 518 { 519 // 520 // Otherwise, there is plenty of space left for this expansion 521 // 522 PageTableCount = i; 523 } 524 525 // 526 // Get the template PDE we'll use to expand 527 // 528 TempPde = ValidKernelPde; 529 530 // 531 // Get the first PTE in expansion space 532 // 533 PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion; 534 BaseVa = MiPdeToPte(PointerPde); 535 BaseVaStart = BaseVa; 536 537 // 538 // Lock the PFN database and loop pages 539 // 540 OldIrql = MiAcquirePfnLock(); 541 do 542 { 543 // 544 // It should not already be valid 545 // 546 ASSERT(PointerPde->u.Hard.Valid == 0); 547 548 /* Request a page */ 549 MI_SET_USAGE(MI_USAGE_PAGED_POOL); 550 MI_SET_PROCESS2("Kernel"); 551 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); 552 TempPde.u.Hard.PageFrameNumber = PageFrameNumber; 553 #if (_MI_PAGING_LEVELS >= 3) 554 /* On PAE/x64 systems, there's no double-buffering */ 555 /* Initialize the PFN entry for it */ 556 MiInitializePfnForOtherProcess(PageFrameNumber, 557 (PMMPTE)PointerPde, 558 PFN_FROM_PTE(MiAddressToPte(PointerPde))); 559 560 /* Write the actual PDE now */ 561 MI_WRITE_VALID_PDE(PointerPde, TempPde); 562 #else 563 // 564 // Save it into our double-buffered system page directory 565 // 566 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde; 567 568 /* Initialize the PFN */ 569 MiInitializePfnForOtherProcess(PageFrameNumber, 570 (PMMPTE)PointerPde, 571 MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_PER_PAGE]); 572 #endif 573 574 // 575 // Move on to the next expansion address 576 // 577 PointerPde++; 578 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE); 579 i--; 580 } while (i > 0); 581 582 // 583 // Release the PFN database lock 584 // 585 MiReleasePfnLock(OldIrql); 586 587 // 588 // These pages are now available, clear their availablity bits 589 // 590 EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion - 591 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) * 592 PTE_PER_PAGE; 593 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 594 EndAllocation, 595 PageTableCount * PTE_PER_PAGE); 596 597 // 598 // Update the next expansion location 599 // 600 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount; 601 602 // 603 // Zero out the newly available memory 604 // 605 RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE); 606 607 // 608 // Now try consuming the pages again 609 // 610 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, 611 SizeInPages, 612 0); 613 if (i == 0xFFFFFFFF) 614 { 615 // 616 // Out of memory! 617 // 618 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes); 619 KeReleaseGuardedMutex(&MmPagedPoolMutex); 620 return NULL; 621 } 622 } 623 624 // 625 // Update the pool hint if the request was just one page 626 // 627 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1; 628 629 // 630 // Update the end bitmap so we know the bounds of this allocation when 631 // the time comes to free it 632 // 633 EndAllocation = i + SizeInPages - 1; 634 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation); 635 636 // 637 // Now we can release the lock (it mainly protects the bitmap) 638 // 639 KeReleaseGuardedMutex(&MmPagedPoolMutex); 640 641 // 642 // Now figure out where this allocation starts 643 // 644 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT)); 645 646 // 647 // Flush the TLB 648 // 649 KeFlushEntireTb(TRUE, TRUE); 650 651 /* Setup a demand-zero writable PTE */ 652 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE); 653 654 // 655 // Find the first and last PTE, then loop them all 656 // 657 PointerPte = MiAddressToPte(BaseVa); 658 StartPte = PointerPte + SizeInPages; 659 do 660 { 661 // 662 // Write the demand zero PTE and keep going 663 // 664 MI_WRITE_INVALID_PTE(PointerPte, TempPte); 665 } while (++PointerPte < StartPte); 666 667 // 668 // Return the allocation address to the caller 669 // 670 return BaseVa; 671 } 672 673 // 674 // If only one page is being requested, try to grab it from the S-LIST 675 // 676 if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead))) 677 { 678 BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead); 679 if (BaseVa) return BaseVa; 680 } 681 682 // 683 // Allocations of less than 4 pages go into their individual buckets 684 // 685 i = min(SizeInPages, MI_MAX_FREE_PAGE_LISTS) - 1; 686 687 // 688 // Loop through all the free page lists based on the page index 689 // 690 NextHead = &MmNonPagedPoolFreeListHead[i]; 691 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]; 692 693 // 694 // Acquire the nonpaged pool lock 695 // 696 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 697 do 698 { 699 // 700 // Now loop through all the free page entries in this given list 701 // 702 NextEntry = NextHead->Flink; 703 while (NextEntry != NextHead) 704 { 705 /* Is freed non paged pool enabled */ 706 if (MmProtectFreedNonPagedPool) 707 { 708 /* We need to be able to touch this page, unprotect it */ 709 MiUnProtectFreeNonPagedPool(NextEntry, 0); 710 } 711 712 // 713 // Grab the entry and see if it can handle our allocation 714 // 715 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List); 716 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); 717 if (FreeEntry->Size >= SizeInPages) 718 { 719 // 720 // It does, so consume the pages from here 721 // 722 FreeEntry->Size -= SizeInPages; 723 724 // 725 // The allocation will begin in this free page area 726 // 727 BaseVa = (PVOID)((ULONG_PTR)FreeEntry + 728 (FreeEntry->Size << PAGE_SHIFT)); 729 730 /* Remove the item from the list, depending if pool is protected */ 731 if (MmProtectFreedNonPagedPool) 732 MiProtectedPoolRemoveEntryList(&FreeEntry->List); 733 else 734 RemoveEntryList(&FreeEntry->List); 735 736 // 737 // However, check if its' still got space left 738 // 739 if (FreeEntry->Size != 0) 740 { 741 /* Check which list to insert this entry into */ 742 i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1; 743 744 /* Insert the entry into the free list head, check for prot. pool */ 745 if (MmProtectFreedNonPagedPool) 746 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); 747 else 748 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); 749 750 /* Is freed non paged pool protected? */ 751 if (MmProtectFreedNonPagedPool) 752 { 753 /* Protect the freed pool! */ 754 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); 755 } 756 } 757 758 // 759 // Grab the PTE for this allocation 760 // 761 PointerPte = MiAddressToPte(BaseVa); 762 ASSERT(PointerPte->u.Hard.Valid == 1); 763 764 // 765 // Grab the PFN NextEntry and index 766 // 767 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte)); 768 769 // 770 // Now mark it as the beginning of an allocation 771 // 772 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0); 773 Pfn1->u3.e1.StartOfAllocation = 1; 774 775 /* Mark it as special pool if needed */ 776 ASSERT(Pfn1->u4.VerifierAllocation == 0); 777 if (PoolType & VERIFIER_POOL_MASK) 778 { 779 Pfn1->u4.VerifierAllocation = 1; 780 } 781 782 // 783 // Check if the allocation is larger than one page 784 // 785 if (SizeInPages != 1) 786 { 787 // 788 // Navigate to the last PFN entry and PTE 789 // 790 PointerPte += SizeInPages - 1; 791 ASSERT(PointerPte->u.Hard.Valid == 1); 792 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 793 } 794 795 // 796 // Mark this PFN as the last (might be the same as the first) 797 // 798 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0); 799 Pfn1->u3.e1.EndOfAllocation = 1; 800 801 // 802 // Release the nonpaged pool lock, and return the allocation 803 // 804 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 805 return BaseVa; 806 } 807 808 // 809 // Try the next free page entry 810 // 811 NextEntry = FreeEntry->List.Flink; 812 813 /* Is freed non paged pool protected? */ 814 if (MmProtectFreedNonPagedPool) 815 { 816 /* Protect the freed pool! */ 817 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); 818 } 819 } 820 } while (++NextHead < LastHead); 821 822 // 823 // If we got here, we're out of space. 824 // Start by releasing the lock 825 // 826 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 827 828 // 829 // Allocate some system PTEs 830 // 831 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion); 832 PointerPte = StartPte; 833 if (StartPte == NULL) 834 { 835 // 836 // Ran out of memory 837 // 838 DPRINT("Out of NP Expansion Pool\n"); 839 return NULL; 840 } 841 842 // 843 // Acquire the pool lock now 844 // 845 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 846 847 // 848 // Lock the PFN database too 849 // 850 MiAcquirePfnLockAtDpcLevel(); 851 852 /* Check that we have enough available pages for this request */ 853 if (MmAvailablePages < SizeInPages) 854 { 855 MiReleasePfnLockFromDpcLevel(); 856 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 857 858 MiReleaseSystemPtes(StartPte, SizeInPages, NonPagedPoolExpansion); 859 860 DPRINT1("OUT OF AVAILABLE PAGES! Required %lu, Available %lu\n", SizeInPages, MmAvailablePages); 861 862 return NULL; 863 } 864 865 // 866 // Loop the pages 867 // 868 TempPte = ValidKernelPte; 869 do 870 { 871 /* Allocate a page */ 872 MI_SET_USAGE(MI_USAGE_PAGED_POOL); 873 MI_SET_PROCESS2("Kernel"); 874 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); 875 876 /* Get the PFN entry for it and fill it out */ 877 Pfn1 = MiGetPfnEntry(PageFrameNumber); 878 Pfn1->u3.e2.ReferenceCount = 1; 879 Pfn1->u2.ShareCount = 1; 880 Pfn1->PteAddress = PointerPte; 881 Pfn1->u3.e1.PageLocation = ActiveAndValid; 882 Pfn1->u4.VerifierAllocation = 0; 883 884 /* Write the PTE for it */ 885 TempPte.u.Hard.PageFrameNumber = PageFrameNumber; 886 MI_WRITE_VALID_PTE(PointerPte++, TempPte); 887 } while (--SizeInPages > 0); 888 889 // 890 // This is the last page 891 // 892 Pfn1->u3.e1.EndOfAllocation = 1; 893 894 // 895 // Get the first page and mark it as such 896 // 897 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber); 898 Pfn1->u3.e1.StartOfAllocation = 1; 899 900 /* Mark it as a verifier allocation if needed */ 901 ASSERT(Pfn1->u4.VerifierAllocation == 0); 902 if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1; 903 904 // 905 // Release the PFN and nonpaged pool lock 906 // 907 MiReleasePfnLockFromDpcLevel(); 908 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 909 910 // 911 // Return the address 912 // 913 return MiPteToAddress(StartPte); 914 } 915 916 ULONG 917 NTAPI 918 MiFreePoolPages(IN PVOID StartingVa) 919 { 920 PMMPTE PointerPte, StartPte; 921 PMMPFN Pfn1, StartPfn; 922 PFN_COUNT FreePages, NumberOfPages; 923 KIRQL OldIrql; 924 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry; 925 ULONG i, End; 926 ULONG_PTR Offset; 927 928 // 929 // Handle paged pool 930 // 931 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd)) 932 { 933 // 934 // Calculate the offset from the beginning of paged pool, and convert it 935 // into pages 936 // 937 Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart; 938 i = (ULONG)(Offset >> PAGE_SHIFT); 939 End = i; 940 941 // 942 // Now use the end bitmap to scan until we find a set bit, meaning that 943 // this allocation finishes here 944 // 945 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++; 946 947 // 948 // Now calculate the total number of pages this allocation spans. If it's 949 // only one page, add it to the S-LIST instead of freeing it 950 // 951 NumberOfPages = End - i + 1; 952 if ((NumberOfPages == 1) && 953 (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum)) 954 { 955 InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa); 956 return 1; 957 } 958 959 /* Delete the actual pages */ 960 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i; 961 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL); 962 ASSERT(FreePages == NumberOfPages); 963 964 // 965 // Acquire the paged pool lock 966 // 967 KeAcquireGuardedMutex(&MmPagedPoolMutex); 968 969 // 970 // Clear the allocation and free bits 971 // 972 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End); 973 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages); 974 975 // 976 // Update the hint if we need to 977 // 978 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i; 979 980 // 981 // Release the lock protecting the bitmaps 982 // 983 KeReleaseGuardedMutex(&MmPagedPoolMutex); 984 985 // 986 // And finally return the number of pages freed 987 // 988 return NumberOfPages; 989 } 990 991 // 992 // Get the first PTE and its corresponding PFN entry. If this is also the 993 // last PTE, meaning that this allocation was only for one page, push it into 994 // the S-LIST instead of freeing it 995 // 996 StartPte = PointerPte = MiAddressToPte(StartingVa); 997 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 998 if ((Pfn1->u3.e1.EndOfAllocation == 1) && 999 (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum)) 1000 { 1001 InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa); 1002 return 1; 1003 } 1004 1005 // 1006 // Loop until we find the last PTE 1007 // 1008 while (Pfn1->u3.e1.EndOfAllocation == 0) 1009 { 1010 // 1011 // Keep going 1012 // 1013 PointerPte++; 1014 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 1015 } 1016 1017 // 1018 // Now we know how many pages we have 1019 // 1020 NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1); 1021 1022 // 1023 // Acquire the nonpaged pool lock 1024 // 1025 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 1026 1027 // 1028 // Mark the first and last PTEs as not part of an allocation anymore 1029 // 1030 StartPfn->u3.e1.StartOfAllocation = 0; 1031 Pfn1->u3.e1.EndOfAllocation = 0; 1032 1033 // 1034 // Assume we will free as many pages as the allocation was 1035 // 1036 FreePages = NumberOfPages; 1037 1038 // 1039 // Peek one page past the end of the allocation 1040 // 1041 PointerPte++; 1042 1043 // 1044 // Guard against going past initial nonpaged pool 1045 // 1046 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame) 1047 { 1048 // 1049 // This page is on the outskirts of initial nonpaged pool, so ignore it 1050 // 1051 Pfn1 = NULL; 1052 } 1053 else 1054 { 1055 /* Sanity check */ 1056 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd); 1057 1058 /* Check if protected pool is enabled */ 1059 if (MmProtectFreedNonPagedPool) 1060 { 1061 /* The freed block will be merged, it must be made accessible */ 1062 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0); 1063 } 1064 1065 // 1066 // Otherwise, our entire allocation must've fit within the initial non 1067 // paged pool, or the expansion nonpaged pool, so get the PFN entry of 1068 // the next allocation 1069 // 1070 if (PointerPte->u.Hard.Valid == 1) 1071 { 1072 // 1073 // It's either expansion or initial: get the PFN entry 1074 // 1075 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 1076 } 1077 else 1078 { 1079 // 1080 // This means we've reached the guard page that protects the end of 1081 // the expansion nonpaged pool 1082 // 1083 Pfn1 = NULL; 1084 } 1085 1086 } 1087 1088 // 1089 // Check if this allocation actually exists 1090 // 1091 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0)) 1092 { 1093 // 1094 // It doesn't, so we should actually locate a free entry descriptor 1095 // 1096 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa + 1097 (NumberOfPages << PAGE_SHIFT)); 1098 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); 1099 ASSERT(FreeEntry->Owner == FreeEntry); 1100 1101 /* Consume this entry's pages */ 1102 FreePages += FreeEntry->Size; 1103 1104 /* Remove the item from the list, depending if pool is protected */ 1105 if (MmProtectFreedNonPagedPool) 1106 MiProtectedPoolRemoveEntryList(&FreeEntry->List); 1107 else 1108 RemoveEntryList(&FreeEntry->List); 1109 } 1110 1111 // 1112 // Now get the official free entry we'll create for the caller's allocation 1113 // 1114 FreeEntry = StartingVa; 1115 1116 // 1117 // Check if the our allocation is the very first page 1118 // 1119 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame) 1120 { 1121 // 1122 // Then we can't do anything or we'll risk underflowing 1123 // 1124 Pfn1 = NULL; 1125 } 1126 else 1127 { 1128 // 1129 // Otherwise, get the PTE for the page right before our allocation 1130 // 1131 PointerPte -= NumberOfPages + 1; 1132 1133 /* Check if protected pool is enabled */ 1134 if (MmProtectFreedNonPagedPool) 1135 { 1136 /* The freed block will be merged, it must be made accessible */ 1137 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0); 1138 } 1139 1140 /* Check if this is valid pool, or a guard page */ 1141 if (PointerPte->u.Hard.Valid == 1) 1142 { 1143 // 1144 // It's either expansion or initial nonpaged pool, get the PFN entry 1145 // 1146 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 1147 } 1148 else 1149 { 1150 // 1151 // We must've reached the guard page, so don't risk touching it 1152 // 1153 Pfn1 = NULL; 1154 } 1155 } 1156 1157 // 1158 // Check if there is a valid PFN entry for the page before the allocation 1159 // and then check if this page was actually the end of an allocation. 1160 // If it wasn't, then we know for sure it's a free page 1161 // 1162 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0)) 1163 { 1164 // 1165 // Get the free entry descriptor for that given page range 1166 // 1167 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE); 1168 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); 1169 FreeEntry = FreeEntry->Owner; 1170 1171 /* Check if protected pool is enabled */ 1172 if (MmProtectFreedNonPagedPool) 1173 { 1174 /* The freed block will be merged, it must be made accessible */ 1175 MiUnProtectFreeNonPagedPool(FreeEntry, 0); 1176 } 1177 1178 // 1179 // Check if the entry is small enough (1-3 pages) to be indexed on a free list 1180 // If it is, we'll want to re-insert it, since we're about to 1181 // collapse our pages on top of it, which will change its count 1182 // 1183 if (FreeEntry->Size < MI_MAX_FREE_PAGE_LISTS) 1184 { 1185 /* Remove the item from the list, depending if pool is protected */ 1186 if (MmProtectFreedNonPagedPool) 1187 MiProtectedPoolRemoveEntryList(&FreeEntry->List); 1188 else 1189 RemoveEntryList(&FreeEntry->List); 1190 1191 // 1192 // Update its size 1193 // 1194 FreeEntry->Size += FreePages; 1195 1196 // 1197 // And now find the new appropriate list to place it in 1198 // 1199 i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1; 1200 1201 /* Insert the entry into the free list head, check for prot. pool */ 1202 if (MmProtectFreedNonPagedPool) 1203 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); 1204 else 1205 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); 1206 } 1207 else 1208 { 1209 // 1210 // Otherwise, just combine our free pages into this entry 1211 // 1212 FreeEntry->Size += FreePages; 1213 } 1214 } 1215 1216 // 1217 // Check if we were unable to do any compaction, and we'll stick with this 1218 // 1219 if (FreeEntry == StartingVa) 1220 { 1221 // 1222 // Well, now we are a free entry. At worse we just have our newly freed 1223 // pages, at best we have our pages plus whatever entry came after us 1224 // 1225 FreeEntry->Size = FreePages; 1226 1227 // 1228 // Find the appropriate list we should be on 1229 // 1230 i = min(FreeEntry->Size, MI_MAX_FREE_PAGE_LISTS) - 1; 1231 1232 /* Insert the entry into the free list head, check for prot. pool */ 1233 if (MmProtectFreedNonPagedPool) 1234 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); 1235 else 1236 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); 1237 } 1238 1239 // 1240 // Just a sanity check 1241 // 1242 ASSERT(FreePages != 0); 1243 1244 // 1245 // Get all the pages between our allocation and its end. These will all now 1246 // become free page chunks. 1247 // 1248 NextEntry = StartingVa; 1249 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT)); 1250 do 1251 { 1252 // 1253 // Link back to the parent free entry, and keep going 1254 // 1255 NextEntry->Owner = FreeEntry; 1256 NextEntry->Signature = MM_FREE_POOL_SIGNATURE; 1257 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE); 1258 } while (NextEntry != LastEntry); 1259 1260 /* Is freed non paged pool protected? */ 1261 if (MmProtectFreedNonPagedPool) 1262 { 1263 /* Protect the freed pool! */ 1264 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); 1265 } 1266 1267 // 1268 // We're done, release the lock and let the caller know how much we freed 1269 // 1270 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 1271 return NumberOfPages; 1272 } 1273 1274 NTSTATUS 1275 NTAPI 1276 MiInitializeSessionPool(VOID) 1277 { 1278 PMMPTE PointerPte, LastPte; 1279 PMMPDE PointerPde, LastPde; 1280 PFN_NUMBER PageFrameIndex, PdeCount; 1281 PPOOL_DESCRIPTOR PoolDescriptor; 1282 PMM_SESSION_SPACE SessionGlobal; 1283 PMM_PAGED_POOL_INFO PagedPoolInfo; 1284 NTSTATUS Status; 1285 ULONG Index, PoolSize, BitmapSize; 1286 PAGED_CODE(); 1287 1288 /* Lock session pool */ 1289 SessionGlobal = MmSessionSpace->GlobalVirtualAddress; 1290 KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex); 1291 1292 /* Setup a valid pool descriptor */ 1293 PoolDescriptor = &MmSessionSpace->PagedPool; 1294 ExInitializePoolDescriptor(PoolDescriptor, 1295 PagedPoolSession, 1296 0, 1297 0, 1298 &SessionGlobal->PagedPoolMutex); 1299 1300 /* Setup the pool addresses */ 1301 MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart; 1302 MmSessionSpace->PagedPoolEnd = (PVOID)((ULONG_PTR)MiSessionPoolEnd - 1); 1303 DPRINT1("Session Pool Start: 0x%p End: 0x%p\n", 1304 MmSessionSpace->PagedPoolStart, MmSessionSpace->PagedPoolEnd); 1305 1306 /* Reset all the counters */ 1307 PagedPoolInfo = &MmSessionSpace->PagedPoolInfo; 1308 PagedPoolInfo->PagedPoolCommit = 0; 1309 PagedPoolInfo->PagedPoolHint = 0; 1310 PagedPoolInfo->AllocatedPagedPool = 0; 1311 1312 /* Compute PDE and PTE addresses */ 1313 PointerPde = MiAddressToPde(MmSessionSpace->PagedPoolStart); 1314 PointerPte = MiAddressToPte(MmSessionSpace->PagedPoolStart); 1315 LastPde = MiAddressToPde(MmSessionSpace->PagedPoolEnd); 1316 LastPte = MiAddressToPte(MmSessionSpace->PagedPoolEnd); 1317 1318 /* Write them down */ 1319 MmSessionSpace->PagedPoolBasePde = PointerPde; 1320 PagedPoolInfo->FirstPteForPagedPool = PointerPte; 1321 PagedPoolInfo->LastPteForPagedPool = LastPte; 1322 PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1; 1323 1324 /* Zero the PDEs */ 1325 PdeCount = LastPde - PointerPde; 1326 RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE)); 1327 1328 /* Initialize the PFN for the PDE */ 1329 Status = MiInitializeAndChargePfn(&PageFrameIndex, 1330 PointerPde, 1331 MmSessionSpace->SessionPageDirectoryIndex, 1332 TRUE); 1333 ASSERT(NT_SUCCESS(Status) == TRUE); 1334 1335 /* Initialize the first page table */ 1336 Index = (ULONG_PTR)MmSessionSpace->PagedPoolStart - (ULONG_PTR)MmSessionBase; 1337 Index >>= 22; 1338 #ifndef _M_AMD64 // FIXME 1339 ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0); 1340 MmSessionSpace->PageTables[Index] = *PointerPde; 1341 #endif 1342 1343 /* Bump up counters */ 1344 InterlockedIncrementSizeT(&MmSessionSpace->NonPageablePages); 1345 InterlockedIncrementSizeT(&MmSessionSpace->CommittedPages); 1346 1347 /* Compute the size of the pool in pages, and of the bitmap for it */ 1348 PoolSize = MmSessionPoolSize >> PAGE_SHIFT; 1349 BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG); 1350 1351 /* Allocate and initialize the bitmap to track allocations */ 1352 PagedPoolInfo->PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool, 1353 BitmapSize, 1354 TAG_MM); 1355 ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL); 1356 RtlInitializeBitMap(PagedPoolInfo->PagedPoolAllocationMap, 1357 (PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1), 1358 PoolSize); 1359 1360 /* Set all bits, but clear the first page table's worth */ 1361 RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap); 1362 RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE); 1363 1364 /* Allocate and initialize the bitmap to track free space */ 1365 PagedPoolInfo->EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool, 1366 BitmapSize, 1367 TAG_MM); 1368 ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL); 1369 RtlInitializeBitMap(PagedPoolInfo->EndOfPagedPoolBitmap, 1370 (PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1), 1371 PoolSize); 1372 1373 /* Clear all the bits and return success */ 1374 RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap); 1375 return STATUS_SUCCESS; 1376 } 1377 1378 /** 1379 * @brief 1380 * Raises the quota limit, depending on the given 1381 * pool type of the quota in question. The routine 1382 * is used exclusively by Process Manager for 1383 * quota handling. 1384 * 1385 * @param[in] PoolType 1386 * The type of quota pool which the quota in question 1387 * has to be raised. 1388 * 1389 * @param[in] CurrentMaxQuota 1390 * The current maximum limit of quota threshold. 1391 * 1392 * @param[out] NewMaxQuota 1393 * The newly raised maximum limit of quota threshold, 1394 * returned to the caller. 1395 * 1396 * @return 1397 * Returns TRUE if quota raising procedure has succeeded 1398 * without problems, FALSE otherwise. 1399 * 1400 * @remarks 1401 * A spin lock must be held when raising the pool quota 1402 * limit to avoid race occurences. 1403 */ 1404 _Requires_lock_held_(PspQuotaLock) 1405 BOOLEAN 1406 NTAPI 1407 MmRaisePoolQuota( 1408 _In_ POOL_TYPE PoolType, 1409 _In_ SIZE_T CurrentMaxQuota, 1410 _Out_ PSIZE_T NewMaxQuota) 1411 { 1412 /* 1413 * We must be in dispatch level interrupt here 1414 * as we should be under a spin lock at this point. 1415 */ 1416 ASSERT_IRQL_EQUAL(DISPATCH_LEVEL); 1417 1418 switch (PoolType) 1419 { 1420 case NonPagedPool: 1421 { 1422 /* 1423 * When concerning with a raise (charge) of quota 1424 * in a non paged pool scenario, make sure that 1425 * we've got at least 200 pages necessary to provide. 1426 */ 1427 if (MmAvailablePages < MI_QUOTA_NON_PAGED_NEEDED_PAGES) 1428 { 1429 DPRINT1("MmRaisePoolQuota(): Not enough pages available (current pages -- %lu)\n", MmAvailablePages); 1430 return FALSE; 1431 } 1432 1433 /* 1434 * Check if there's at least some space available 1435 * in the non paged pool area. 1436 */ 1437 if (MmMaximumNonPagedPoolInPages < (MmAllocatedNonPagedPool >> PAGE_SHIFT)) 1438 { 1439 /* There's too much allocated space, bail out */ 1440 DPRINT1("MmRaisePoolQuota(): Failed to increase pool quota, not enough non paged pool space (current size -- %lu || allocated size -- %lu)\n", 1441 MmMaximumNonPagedPoolInPages, MmAllocatedNonPagedPool); 1442 return FALSE; 1443 } 1444 1445 /* Do we have enough resident pages to increase our quota? */ 1446 if (MmResidentAvailablePages < MI_NON_PAGED_QUOTA_MIN_RESIDENT_PAGES) 1447 { 1448 DPRINT1("MmRaisePoolQuota(): Failed to increase pool quota, not enough resident pages available (current available pages -- %lu)\n", 1449 MmResidentAvailablePages); 1450 return FALSE; 1451 } 1452 1453 /* 1454 * Raise the non paged pool quota indicator and set 1455 * up new maximum limit of quota for the process. 1456 */ 1457 MmTotalNonPagedPoolQuota += MI_CHARGE_NON_PAGED_POOL_QUOTA; 1458 *NewMaxQuota = CurrentMaxQuota + MI_CHARGE_NON_PAGED_POOL_QUOTA; 1459 DPRINT("MmRaisePoolQuota(): Non paged pool quota increased (before -- %lu || after -- %lu)\n", CurrentMaxQuota, NewMaxQuota); 1460 return TRUE; 1461 } 1462 1463 case PagedPool: 1464 { 1465 /* 1466 * Before raising the quota limit of a paged quota 1467 * pool, make sure we've got enough space that is available. 1468 * On Windows it seems it wants to check for at least 1 MB of space 1469 * needed so that it would be possible to raise the paged pool quota. 1470 */ 1471 if (MmSizeOfPagedPoolInPages < (MmPagedPoolInfo.AllocatedPagedPool >> PAGE_SHIFT)) 1472 { 1473 /* We haven't gotten enough space, bail out */ 1474 DPRINT1("MmRaisePoolQuota(): Failed to increase pool quota, not enough paged pool space (current size -- %lu || allocated size -- %lu)\n", 1475 MmSizeOfPagedPoolInPages, MmPagedPoolInfo.AllocatedPagedPool >> PAGE_SHIFT); 1476 return FALSE; 1477 } 1478 1479 /* 1480 * Raise the paged pool quota indicator and set 1481 * up new maximum limit of quota for the process. 1482 */ 1483 MmTotalPagedPoolQuota += MI_CHARGE_PAGED_POOL_QUOTA; 1484 *NewMaxQuota = CurrentMaxQuota + MI_CHARGE_PAGED_POOL_QUOTA; 1485 DPRINT("MmRaisePoolQuota(): Paged pool quota increased (before -- %lu || after -- %lu)\n", CurrentMaxQuota, NewMaxQuota); 1486 return TRUE; 1487 } 1488 1489 /* Only NonPagedPool and PagedPool are used */ 1490 DEFAULT_UNREACHABLE; 1491 } 1492 } 1493 1494 /** 1495 * @brief 1496 * Returns the quota, depending on the given 1497 * pool type of the quota in question. The routine 1498 * is used exclusively by Process Manager for quota 1499 * handling. 1500 * 1501 * @param[in] PoolType 1502 * The type of quota pool which the quota in question 1503 * has to be raised. 1504 * 1505 * @param[in] CurrentMaxQuota 1506 * The current maximum limit of quota threshold. 1507 * 1508 * @return 1509 * Nothing. 1510 * 1511 * @remarks 1512 * A spin lock must be held when raising the pool quota 1513 * limit to avoid race occurences. 1514 */ 1515 _Requires_lock_held_(PspQuotaLock) 1516 VOID 1517 NTAPI 1518 MmReturnPoolQuota( 1519 _In_ POOL_TYPE PoolType, 1520 _In_ SIZE_T QuotaToReturn) 1521 { 1522 /* 1523 * We must be in dispatch level interrupt here 1524 * as we should be under a spin lock at this point. 1525 */ 1526 ASSERT_IRQL_EQUAL(DISPATCH_LEVEL); 1527 1528 switch (PoolType) 1529 { 1530 case NonPagedPool: 1531 { 1532 /* This is a non paged pool type, decrease the non paged quota */ 1533 ASSERT(MmTotalNonPagedPoolQuota >= QuotaToReturn); 1534 MmTotalNonPagedPoolQuota -= QuotaToReturn; 1535 DPRINT("MmReturnPoolQuota(): Non paged pool quota returned (current size -- %lu)\n", MmTotalNonPagedPoolQuota); 1536 break; 1537 } 1538 1539 case PagedPool: 1540 { 1541 /* This is a paged pool type, decrease the paged quota */ 1542 ASSERT(MmTotalPagedPoolQuota >= QuotaToReturn); 1543 MmTotalPagedPoolQuota -= QuotaToReturn; 1544 DPRINT("MmReturnPoolQuota(): Paged pool quota returned (current size -- %lu)\n", MmTotalPagedPoolQuota); 1545 break; 1546 } 1547 1548 /* Only NonPagedPool and PagedPool are used */ 1549 DEFAULT_UNREACHABLE; 1550 } 1551 } 1552 1553 /* PUBLIC FUNCTIONS ***********************************************************/ 1554 1555 /** 1556 * @brief 1557 * Reserves the specified amount of memory in system virtual address space. 1558 * 1559 * @param[in] NumberOfBytes 1560 * Size, in bytes, of memory to reserve. 1561 * 1562 * @param[in] PoolTag 1563 * Pool Tag identifying the buffer. Usually consists from 4 characters in reversed order. 1564 * 1565 * @return 1566 * A pointer to the 1st memory block of the reserved buffer in case of success, NULL otherwise. 1567 * 1568 * @remarks Must be called at IRQL <= APC_LEVEL 1569 */ 1570 _Must_inspect_result_ 1571 _IRQL_requires_max_(APC_LEVEL) 1572 _Ret_maybenull_ 1573 PVOID 1574 NTAPI 1575 MmAllocateMappingAddress( 1576 _In_ SIZE_T NumberOfBytes, 1577 _In_ ULONG PoolTag) 1578 { 1579 PFN_NUMBER SizeInPages; 1580 PMMPTE PointerPte; 1581 MMPTE TempPte; 1582 1583 /* Fast exit if PoolTag is NULL */ 1584 if (!PoolTag) 1585 return NULL; 1586 1587 /* How many PTEs does the caller want? */ 1588 SizeInPages = BYTES_TO_PAGES(NumberOfBytes); 1589 if (SizeInPages == 0) 1590 { 1591 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1592 PTE_MAPPING_NONE, /* Requested 0 mappings */ 1593 SizeInPages, 1594 PoolTag, 1595 (ULONG_PTR)_ReturnAddress()); 1596 } 1597 1598 /* We need two extra PTEs to store size and pool tag in */ 1599 SizeInPages += 2; 1600 1601 /* Reserve our PTEs */ 1602 PointerPte = MiReserveSystemPtes(SizeInPages, SystemPteSpace); 1603 if (!PointerPte) 1604 { 1605 /* Failed to reserve PTEs */ 1606 DPRINT1("Failed to reserve system PTEs\n"); 1607 return NULL; 1608 } 1609 1610 ASSERT(SizeInPages <= MM_EMPTY_PTE_LIST); 1611 TempPte.u.Long = 0; 1612 TempPte.u.List.NextEntry = SizeInPages; 1613 MI_WRITE_INVALID_PTE(&PointerPte[0], TempPte); 1614 TempPte.u.Long = PoolTag; 1615 TempPte.u.Hard.Valid = 0; 1616 MI_WRITE_INVALID_PTE(&PointerPte[1], TempPte); 1617 return MiPteToAddress(PointerPte + 2); 1618 } 1619 1620 /** 1621 * @brief 1622 * Frees previously reserved amount of memory in system virtual address space. 1623 * 1624 * @param[in] BaseAddress 1625 * A pointer to the 1st memory block of the reserved buffer. 1626 * 1627 * @param[in] PoolTag 1628 * Pool Tag identifying the buffer. Usually consists from 4 characters in reversed order. 1629 * 1630 * @return 1631 * Nothing. 1632 * 1633 * @see MmAllocateMappingAddress 1634 * 1635 * @remarks Must be called at IRQL <= APC_LEVEL 1636 */ 1637 _IRQL_requires_max_(APC_LEVEL) 1638 VOID 1639 NTAPI 1640 MmFreeMappingAddress( 1641 _In_ __drv_freesMem(Mem) _Post_invalid_ PVOID BaseAddress, 1642 _In_ ULONG PoolTag) 1643 { 1644 PMMPTE PointerPte; 1645 MMPTE TempPte; 1646 PFN_NUMBER SizeInPages; 1647 PFN_NUMBER i; 1648 1649 /* Get the first PTE we reserved */ 1650 PointerPte = MiAddressToPte(BaseAddress) - 2; 1651 1652 /* Verify that the pool tag matches */ 1653 TempPte.u.Long = PoolTag; 1654 TempPte.u.Hard.Valid = 0; 1655 if (PointerPte[1].u.Long != TempPte.u.Long) 1656 { 1657 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1658 PTE_MAPPING_NOT_OWNED, /* Trying to free an address it does not own */ 1659 (ULONG_PTR)BaseAddress, 1660 PoolTag, 1661 PointerPte[1].u.Long); 1662 } 1663 1664 /* We must have a size */ 1665 SizeInPages = PointerPte[0].u.List.NextEntry; 1666 if (SizeInPages < 3) 1667 { 1668 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1669 PTE_MAPPING_EMPTY, /* Mapping apparently empty */ 1670 (ULONG_PTR)BaseAddress, 1671 PoolTag, 1672 (ULONG_PTR)_ReturnAddress()); 1673 } 1674 1675 /* Enumerate all PTEs and make sure they are empty */ 1676 for (i = 2; i < SizeInPages; i++) 1677 { 1678 if (PointerPte[i].u.Long != 0) 1679 { 1680 KeBugCheckEx(SYSTEM_PTE_MISUSE, 1681 PTE_MAPPING_RESERVED, /* Mapping address still reserved */ 1682 (ULONG_PTR)PointerPte, 1683 PoolTag, 1684 SizeInPages - 2); 1685 } 1686 } 1687 1688 /* Release the PTEs */ 1689 MiReleaseSystemPtes(PointerPte, SizeInPages, SystemPteSpace); 1690 } 1691 1692 /* EOF */ 1693