1 /* 2 * PROJECT: ReactOS Kernel 3 * LICENSE: BSD - See COPYING.ARM in the top level directory 4 * FILE: ntoskrnl/mm/ARM3/pool.c 5 * PURPOSE: ARM Memory Manager Pool Allocator 6 * PROGRAMMERS: ReactOS Portable Systems Group 7 */ 8 9 /* INCLUDES *******************************************************************/ 10 11 #include <ntoskrnl.h> 12 #define NDEBUG 13 #include <debug.h> 14 15 #define MODULE_INVOLVED_IN_ARM3 16 #include <mm/ARM3/miarm.h> 17 18 /* GLOBALS ********************************************************************/ 19 20 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]; 21 PFN_COUNT MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge; 22 PVOID MmNonPagedPoolEnd0; 23 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame; 24 KGUARDED_MUTEX MmPagedPoolMutex; 25 MM_PAGED_POOL_INFO MmPagedPoolInfo; 26 SIZE_T MmAllocatedNonPagedPool; 27 ULONG MmSpecialPoolTag; 28 ULONG MmConsumedPoolPercentage; 29 BOOLEAN MmProtectFreedNonPagedPool; 30 SLIST_HEADER MiNonPagedPoolSListHead; 31 ULONG MiNonPagedPoolSListMaximum = 4; 32 SLIST_HEADER MiPagedPoolSListHead; 33 ULONG MiPagedPoolSListMaximum = 8; 34 35 /* PRIVATE FUNCTIONS **********************************************************/ 36 37 VOID 38 NTAPI 39 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress, 40 IN ULONG PageCount) 41 { 42 PMMPTE PointerPte, LastPte; 43 MMPTE TempPte; 44 45 /* If pool is physical, can't protect PTEs */ 46 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return; 47 48 /* Get PTE pointers and loop */ 49 PointerPte = MiAddressToPte(VirtualAddress); 50 LastPte = PointerPte + PageCount; 51 do 52 { 53 /* Capture the PTE for safety */ 54 TempPte = *PointerPte; 55 56 /* Mark it as an invalid PTE, set proto bit to recognize it as pool */ 57 TempPte.u.Hard.Valid = 0; 58 TempPte.u.Soft.Prototype = 1; 59 MI_WRITE_INVALID_PTE(PointerPte, TempPte); 60 } while (++PointerPte < LastPte); 61 62 /* Flush the TLB */ 63 KeFlushEntireTb(TRUE, TRUE); 64 } 65 66 BOOLEAN 67 NTAPI 68 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress, 69 IN ULONG PageCount) 70 { 71 PMMPTE PointerPte; 72 MMPTE TempPte; 73 PFN_NUMBER UnprotectedPages = 0; 74 75 /* If pool is physical, can't protect PTEs */ 76 if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE; 77 78 /* Get, and capture the PTE */ 79 PointerPte = MiAddressToPte(VirtualAddress); 80 TempPte = *PointerPte; 81 82 /* Loop protected PTEs */ 83 while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1)) 84 { 85 /* Unprotect the PTE */ 86 TempPte.u.Hard.Valid = 1; 87 TempPte.u.Soft.Prototype = 0; 88 MI_WRITE_VALID_PTE(PointerPte, TempPte); 89 90 /* One more page */ 91 if (++UnprotectedPages == PageCount) break; 92 93 /* Capture next PTE */ 94 TempPte = *(++PointerPte); 95 } 96 97 /* Return if any pages were unprotected */ 98 return UnprotectedPages ? TRUE : FALSE; 99 } 100 101 FORCEINLINE 102 VOID 103 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links, 104 OUT PVOID* PoolFlink, 105 OUT PVOID* PoolBlink) 106 { 107 BOOLEAN Safe; 108 PVOID PoolVa; 109 110 /* Initialize variables */ 111 *PoolFlink = *PoolBlink = NULL; 112 113 /* Check if the list has entries */ 114 if (IsListEmpty(Links) == FALSE) 115 { 116 /* We are going to need to forward link to do an insert */ 117 PoolVa = Links->Flink; 118 119 /* So make it safe to access */ 120 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1); 121 if (Safe) *PoolFlink = PoolVa; 122 } 123 124 /* Are we going to need a backward link too? */ 125 if (Links != Links->Blink) 126 { 127 /* Get the head's backward link for the insert */ 128 PoolVa = Links->Blink; 129 130 /* Make it safe to access */ 131 Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1); 132 if (Safe) *PoolBlink = PoolVa; 133 } 134 } 135 136 FORCEINLINE 137 VOID 138 MiProtectedPoolProtectLinks(IN PVOID PoolFlink, 139 IN PVOID PoolBlink) 140 { 141 /* Reprotect the pages, if they got unprotected earlier */ 142 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1); 143 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1); 144 } 145 146 VOID 147 NTAPI 148 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead, 149 IN PLIST_ENTRY Entry, 150 IN BOOLEAN Critical) 151 { 152 PVOID PoolFlink, PoolBlink; 153 154 /* Make the list accessible */ 155 MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink); 156 157 /* Now insert in the right position */ 158 Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry); 159 160 /* And reprotect the pages containing the free links */ 161 MiProtectedPoolProtectLinks(PoolFlink, PoolBlink); 162 } 163 164 VOID 165 NTAPI 166 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry) 167 { 168 PVOID PoolFlink, PoolBlink; 169 170 /* Make the list accessible */ 171 MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink); 172 173 /* Now remove */ 174 RemoveEntryList(Entry); 175 176 /* And reprotect the pages containing the free links */ 177 if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1); 178 if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1); 179 } 180 181 INIT_FUNCTION 182 VOID 183 NTAPI 184 MiInitializeNonPagedPoolThresholds(VOID) 185 { 186 PFN_NUMBER Size = MmMaximumNonPagedPoolInPages; 187 188 /* Default low threshold of 8MB or one third of nonpaged pool */ 189 MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT; 190 MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3); 191 192 /* Default high threshold of 20MB or 50% */ 193 MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT; 194 MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2); 195 ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold); 196 } 197 198 INIT_FUNCTION 199 VOID 200 NTAPI 201 MiInitializePoolEvents(VOID) 202 { 203 KIRQL OldIrql; 204 PFN_NUMBER FreePoolInPages; 205 206 /* Lock paged pool */ 207 KeAcquireGuardedMutex(&MmPagedPoolMutex); 208 209 /* Total size of the paged pool minus the allocated size, is free */ 210 FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool; 211 212 /* Check the initial state high state */ 213 if (FreePoolInPages >= MiHighPagedPoolThreshold) 214 { 215 /* We have plenty of pool */ 216 KeSetEvent(MiHighPagedPoolEvent, 0, FALSE); 217 } 218 else 219 { 220 /* We don't */ 221 KeClearEvent(MiHighPagedPoolEvent); 222 } 223 224 /* Check the initial low state */ 225 if (FreePoolInPages <= MiLowPagedPoolThreshold) 226 { 227 /* We're very low in free pool memory */ 228 KeSetEvent(MiLowPagedPoolEvent, 0, FALSE); 229 } 230 else 231 { 232 /* We're not */ 233 KeClearEvent(MiLowPagedPoolEvent); 234 } 235 236 /* Release the paged pool lock */ 237 KeReleaseGuardedMutex(&MmPagedPoolMutex); 238 239 /* Now it's time for the nonpaged pool lock */ 240 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 241 242 /* Free pages are the maximum minus what's been allocated */ 243 FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool; 244 245 /* Check if we have plenty */ 246 if (FreePoolInPages >= MiHighNonPagedPoolThreshold) 247 { 248 /* We do, set the event */ 249 KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE); 250 } 251 else 252 { 253 /* We don't, clear the event */ 254 KeClearEvent(MiHighNonPagedPoolEvent); 255 } 256 257 /* Check if we have very little */ 258 if (FreePoolInPages <= MiLowNonPagedPoolThreshold) 259 { 260 /* We do, set the event */ 261 KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE); 262 } 263 else 264 { 265 /* We don't, clear it */ 266 KeClearEvent(MiLowNonPagedPoolEvent); 267 } 268 269 /* We're done, release the nonpaged pool lock */ 270 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 271 } 272 273 INIT_FUNCTION 274 VOID 275 NTAPI 276 MiInitializeNonPagedPool(VOID) 277 { 278 ULONG i; 279 PFN_COUNT PoolPages; 280 PMMFREE_POOL_ENTRY FreeEntry, FirstEntry; 281 PMMPTE PointerPte; 282 PAGED_CODE(); 283 284 // 285 // Initialize the pool S-LISTs as well as their maximum count. In general, 286 // we'll allow 8 times the default on a 2GB system, and two times the default 287 // on a 1GB system. 288 // 289 InitializeSListHead(&MiPagedPoolSListHead); 290 InitializeSListHead(&MiNonPagedPoolSListHead); 291 if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE)) 292 { 293 MiNonPagedPoolSListMaximum *= 8; 294 MiPagedPoolSListMaximum *= 8; 295 } 296 else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE)) 297 { 298 MiNonPagedPoolSListMaximum *= 2; 299 MiPagedPoolSListMaximum *= 2; 300 } 301 302 // 303 // However if debugging options for the pool are enabled, turn off the S-LIST 304 // to reduce the risk of messing things up even more 305 // 306 if (MmProtectFreedNonPagedPool) 307 { 308 MiNonPagedPoolSListMaximum = 0; 309 MiPagedPoolSListMaximum = 0; 310 } 311 312 // 313 // We keep 4 lists of free pages (4 lists help avoid contention) 314 // 315 for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++) 316 { 317 // 318 // Initialize each of them 319 // 320 InitializeListHead(&MmNonPagedPoolFreeListHead[i]); 321 } 322 323 // 324 // Calculate how many pages the initial nonpaged pool has 325 // 326 PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes); 327 MmNumberOfFreeNonPagedPool = PoolPages; 328 329 // 330 // Initialize the first free entry 331 // 332 FreeEntry = MmNonPagedPoolStart; 333 FirstEntry = FreeEntry; 334 FreeEntry->Size = PoolPages; 335 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE; 336 FreeEntry->Owner = FirstEntry; 337 338 // 339 // Insert it into the last list 340 // 341 InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1], 342 &FreeEntry->List); 343 344 // 345 // Now create free entries for every single other page 346 // 347 while (PoolPages-- > 1) 348 { 349 // 350 // Link them all back to the original entry 351 // 352 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE); 353 FreeEntry->Owner = FirstEntry; 354 FreeEntry->Signature = MM_FREE_POOL_SIGNATURE; 355 } 356 357 // 358 // Validate and remember first allocated pool page 359 // 360 PointerPte = MiAddressToPte(MmNonPagedPoolStart); 361 ASSERT(PointerPte->u.Hard.Valid == 1); 362 MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte); 363 364 // 365 // Keep track of where initial nonpaged pool ends 366 // 367 MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart + 368 MmSizeOfNonPagedPoolInBytes); 369 370 // 371 // Validate and remember last allocated pool page 372 // 373 PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1)); 374 ASSERT(PointerPte->u.Hard.Valid == 1); 375 MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte); 376 377 // 378 // Validate the first nonpaged pool expansion page (which is a guard page) 379 // 380 PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart); 381 ASSERT(PointerPte->u.Hard.Valid == 0); 382 383 // 384 // Calculate the size of the expansion region alone 385 // 386 MiExpansionPoolPagesInitialCharge = (PFN_COUNT) 387 BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes); 388 389 // 390 // Remove 2 pages, since there's a guard page on top and on the bottom 391 // 392 MiExpansionPoolPagesInitialCharge -= 2; 393 394 // 395 // Now initialize the nonpaged pool expansion PTE space. Remember there's a 396 // guard page on top so make sure to skip it. The bottom guard page will be 397 // guaranteed by the fact our size is off by one. 398 // 399 MiInitializeSystemPtes(PointerPte + 1, 400 MiExpansionPoolPagesInitialCharge, 401 NonPagedPoolExpansion); 402 } 403 404 POOL_TYPE 405 NTAPI 406 MmDeterminePoolType(IN PVOID PoolAddress) 407 { 408 // 409 // Use a simple bounds check 410 // 411 if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd) 412 return PagedPool; 413 else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd) 414 return NonPagedPool; 415 KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0); 416 } 417 418 PVOID 419 NTAPI 420 MiAllocatePoolPages(IN POOL_TYPE PoolType, 421 IN SIZE_T SizeInBytes) 422 { 423 PFN_NUMBER PageFrameNumber; 424 PFN_COUNT SizeInPages, PageTableCount; 425 ULONG i; 426 KIRQL OldIrql; 427 PLIST_ENTRY NextEntry, NextHead, LastHead; 428 PMMPTE PointerPte, StartPte; 429 PMMPDE PointerPde; 430 ULONG EndAllocation; 431 MMPTE TempPte; 432 MMPDE TempPde; 433 PMMPFN Pfn1; 434 PVOID BaseVa, BaseVaStart; 435 PMMFREE_POOL_ENTRY FreeEntry; 436 437 // 438 // Figure out how big the allocation is in pages 439 // 440 SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes); 441 442 // 443 // Check for overflow 444 // 445 if (SizeInPages == 0) 446 { 447 // 448 // Fail 449 // 450 return NULL; 451 } 452 453 // 454 // Handle paged pool 455 // 456 if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool) 457 { 458 // 459 // If only one page is being requested, try to grab it from the S-LIST 460 // 461 if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead))) 462 { 463 BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead); 464 if (BaseVa) return BaseVa; 465 } 466 467 // 468 // Lock the paged pool mutex 469 // 470 KeAcquireGuardedMutex(&MmPagedPoolMutex); 471 472 // 473 // Find some empty allocation space 474 // 475 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, 476 SizeInPages, 477 MmPagedPoolInfo.PagedPoolHint); 478 if (i == 0xFFFFFFFF) 479 { 480 // 481 // Get the page bit count 482 // 483 i = ((SizeInPages - 1) / PTE_COUNT) + 1; 484 DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages); 485 486 // 487 // Check if there is enougn paged pool expansion space left 488 // 489 if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion > 490 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) 491 { 492 // 493 // Out of memory! 494 // 495 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes); 496 KeReleaseGuardedMutex(&MmPagedPoolMutex); 497 return NULL; 498 } 499 500 // 501 // Check if we'll have to expand past the last PTE we have available 502 // 503 if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) > 504 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool)) 505 { 506 // 507 // We can only support this much then 508 // 509 PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool); 510 PageTableCount = (PFN_COUNT)(PointerPde + 1 - 511 MmPagedPoolInfo.NextPdeForPagedPoolExpansion); 512 ASSERT(PageTableCount < i); 513 i = PageTableCount; 514 } 515 else 516 { 517 // 518 // Otherwise, there is plenty of space left for this expansion 519 // 520 PageTableCount = i; 521 } 522 523 // 524 // Get the template PDE we'll use to expand 525 // 526 TempPde = ValidKernelPde; 527 528 // 529 // Get the first PTE in expansion space 530 // 531 PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion; 532 BaseVa = MiPdeToPte(PointerPde); 533 BaseVaStart = BaseVa; 534 535 // 536 // Lock the PFN database and loop pages 537 // 538 OldIrql = MiAcquirePfnLock(); 539 do 540 { 541 // 542 // It should not already be valid 543 // 544 ASSERT(PointerPde->u.Hard.Valid == 0); 545 546 /* Request a page */ 547 MI_SET_USAGE(MI_USAGE_PAGED_POOL); 548 MI_SET_PROCESS2("Kernel"); 549 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); 550 TempPde.u.Hard.PageFrameNumber = PageFrameNumber; 551 #if (_MI_PAGING_LEVELS >= 3) 552 /* On PAE/x64 systems, there's no double-buffering */ 553 /* Initialize the PFN entry for it */ 554 MiInitializePfnForOtherProcess(PageFrameNumber, 555 (PMMPTE)PointerPde, 556 PFN_FROM_PTE(MiAddressToPte(PointerPde))); 557 558 /* Write the actual PDE now */ 559 MI_WRITE_VALID_PDE(PointerPde, TempPde); 560 #else 561 // 562 // Save it into our double-buffered system page directory 563 // 564 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde; 565 566 /* Initialize the PFN */ 567 MiInitializePfnForOtherProcess(PageFrameNumber, 568 (PMMPTE)PointerPde, 569 MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]); 570 #endif 571 572 // 573 // Move on to the next expansion address 574 // 575 PointerPde++; 576 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE); 577 i--; 578 } while (i > 0); 579 580 // 581 // Release the PFN database lock 582 // 583 MiReleasePfnLock(OldIrql); 584 585 // 586 // These pages are now available, clear their availablity bits 587 // 588 EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion - 589 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) * 590 PTE_COUNT; 591 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, 592 EndAllocation, 593 PageTableCount * PTE_COUNT); 594 595 // 596 // Update the next expansion location 597 // 598 MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount; 599 600 // 601 // Zero out the newly available memory 602 // 603 RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE); 604 605 // 606 // Now try consuming the pages again 607 // 608 i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap, 609 SizeInPages, 610 0); 611 if (i == 0xFFFFFFFF) 612 { 613 // 614 // Out of memory! 615 // 616 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes); 617 KeReleaseGuardedMutex(&MmPagedPoolMutex); 618 return NULL; 619 } 620 } 621 622 // 623 // Update the pool hint if the request was just one page 624 // 625 if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1; 626 627 // 628 // Update the end bitmap so we know the bounds of this allocation when 629 // the time comes to free it 630 // 631 EndAllocation = i + SizeInPages - 1; 632 RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation); 633 634 // 635 // Now we can release the lock (it mainly protects the bitmap) 636 // 637 KeReleaseGuardedMutex(&MmPagedPoolMutex); 638 639 // 640 // Now figure out where this allocation starts 641 // 642 BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT)); 643 644 // 645 // Flush the TLB 646 // 647 KeFlushEntireTb(TRUE, TRUE); 648 649 /* Setup a demand-zero writable PTE */ 650 MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE); 651 652 // 653 // Find the first and last PTE, then loop them all 654 // 655 PointerPte = MiAddressToPte(BaseVa); 656 StartPte = PointerPte + SizeInPages; 657 do 658 { 659 // 660 // Write the demand zero PTE and keep going 661 // 662 MI_WRITE_INVALID_PTE(PointerPte, TempPte); 663 } while (++PointerPte < StartPte); 664 665 // 666 // Return the allocation address to the caller 667 // 668 #if DBG 669 RtlFillMemoryUlong(BaseVa, ROUND_TO_PAGES(SizeInBytes), 0xABABABAB); 670 #endif 671 return BaseVa; 672 } 673 674 // 675 // If only one page is being requested, try to grab it from the S-LIST 676 // 677 if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead))) 678 { 679 BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead); 680 if (BaseVa) 681 { 682 #if DBG 683 RtlFillMemoryUlong(BaseVa, ROUND_TO_PAGES(SizeInBytes), 0xABABABAB); 684 #endif 685 return BaseVa; 686 } 687 } 688 689 // 690 // Allocations of less than 4 pages go into their individual buckets 691 // 692 i = SizeInPages - 1; 693 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; 694 695 // 696 // Loop through all the free page lists based on the page index 697 // 698 NextHead = &MmNonPagedPoolFreeListHead[i]; 699 LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS]; 700 701 // 702 // Acquire the nonpaged pool lock 703 // 704 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 705 do 706 { 707 // 708 // Now loop through all the free page entries in this given list 709 // 710 NextEntry = NextHead->Flink; 711 while (NextEntry != NextHead) 712 { 713 /* Is freed non paged pool enabled */ 714 if (MmProtectFreedNonPagedPool) 715 { 716 /* We need to be able to touch this page, unprotect it */ 717 MiUnProtectFreeNonPagedPool(NextEntry, 0); 718 } 719 720 // 721 // Grab the entry and see if it can handle our allocation 722 // 723 FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List); 724 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); 725 if (FreeEntry->Size >= SizeInPages) 726 { 727 // 728 // It does, so consume the pages from here 729 // 730 FreeEntry->Size -= SizeInPages; 731 732 // 733 // The allocation will begin in this free page area 734 // 735 BaseVa = (PVOID)((ULONG_PTR)FreeEntry + 736 (FreeEntry->Size << PAGE_SHIFT)); 737 738 /* Remove the item from the list, depending if pool is protected */ 739 if (MmProtectFreedNonPagedPool) 740 MiProtectedPoolRemoveEntryList(&FreeEntry->List); 741 else 742 RemoveEntryList(&FreeEntry->List); 743 744 // 745 // However, check if its' still got space left 746 // 747 if (FreeEntry->Size != 0) 748 { 749 /* Check which list to insert this entry into */ 750 i = FreeEntry->Size - 1; 751 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; 752 753 /* Insert the entry into the free list head, check for prot. pool */ 754 if (MmProtectFreedNonPagedPool) 755 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); 756 else 757 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); 758 759 /* Is freed non paged pool protected? */ 760 if (MmProtectFreedNonPagedPool) 761 { 762 /* Protect the freed pool! */ 763 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); 764 } 765 } 766 767 // 768 // Grab the PTE for this allocation 769 // 770 PointerPte = MiAddressToPte(BaseVa); 771 ASSERT(PointerPte->u.Hard.Valid == 1); 772 773 // 774 // Grab the PFN NextEntry and index 775 // 776 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte)); 777 778 // 779 // Now mark it as the beginning of an allocation 780 // 781 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0); 782 Pfn1->u3.e1.StartOfAllocation = 1; 783 784 /* Mark it as special pool if needed */ 785 ASSERT(Pfn1->u4.VerifierAllocation == 0); 786 if (PoolType & VERIFIER_POOL_MASK) 787 { 788 Pfn1->u4.VerifierAllocation = 1; 789 } 790 791 // 792 // Check if the allocation is larger than one page 793 // 794 if (SizeInPages != 1) 795 { 796 // 797 // Navigate to the last PFN entry and PTE 798 // 799 PointerPte += SizeInPages - 1; 800 ASSERT(PointerPte->u.Hard.Valid == 1); 801 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 802 } 803 804 // 805 // Mark this PFN as the last (might be the same as the first) 806 // 807 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0); 808 Pfn1->u3.e1.EndOfAllocation = 1; 809 810 // 811 // Release the nonpaged pool lock, and return the allocation 812 // 813 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 814 #if DBG 815 RtlFillMemoryUlong(BaseVa, ROUND_TO_PAGES(SizeInBytes), 0xABABABAB); 816 #endif 817 return BaseVa; 818 } 819 820 // 821 // Try the next free page entry 822 // 823 NextEntry = FreeEntry->List.Flink; 824 825 /* Is freed non paged pool protected? */ 826 if (MmProtectFreedNonPagedPool) 827 { 828 /* Protect the freed pool! */ 829 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); 830 } 831 } 832 } while (++NextHead < LastHead); 833 834 // 835 // If we got here, we're out of space. 836 // Start by releasing the lock 837 // 838 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 839 840 // 841 // Allocate some system PTEs 842 // 843 StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion); 844 PointerPte = StartPte; 845 if (StartPte == NULL) 846 { 847 // 848 // Ran out of memory 849 // 850 DPRINT1("Out of NP Expansion Pool\n"); 851 return NULL; 852 } 853 854 // 855 // Acquire the pool lock now 856 // 857 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 858 859 // 860 // Lock the PFN database too 861 // 862 MiAcquirePfnLockAtDpcLevel(); 863 864 // 865 // Loop the pages 866 // 867 TempPte = ValidKernelPte; 868 do 869 { 870 /* Allocate a page */ 871 MI_SET_USAGE(MI_USAGE_PAGED_POOL); 872 MI_SET_PROCESS2("Kernel"); 873 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR()); 874 875 /* Get the PFN entry for it and fill it out */ 876 Pfn1 = MiGetPfnEntry(PageFrameNumber); 877 Pfn1->u3.e2.ReferenceCount = 1; 878 Pfn1->u2.ShareCount = 1; 879 Pfn1->PteAddress = PointerPte; 880 Pfn1->u3.e1.PageLocation = ActiveAndValid; 881 Pfn1->u4.VerifierAllocation = 0; 882 883 /* Write the PTE for it */ 884 TempPte.u.Hard.PageFrameNumber = PageFrameNumber; 885 MI_WRITE_VALID_PTE(PointerPte++, TempPte); 886 } while (--SizeInPages > 0); 887 888 // 889 // This is the last page 890 // 891 Pfn1->u3.e1.EndOfAllocation = 1; 892 893 // 894 // Get the first page and mark it as such 895 // 896 Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber); 897 Pfn1->u3.e1.StartOfAllocation = 1; 898 899 /* Mark it as a verifier allocation if needed */ 900 ASSERT(Pfn1->u4.VerifierAllocation == 0); 901 if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1; 902 903 // 904 // Release the PFN and nonpaged pool lock 905 // 906 MiReleasePfnLockFromDpcLevel(); 907 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 908 909 // 910 // Return the address 911 // 912 BaseVa = MiPteToAddress(StartPte); 913 #if DBG 914 RtlFillMemoryUlong(BaseVa, ROUND_TO_PAGES(SizeInBytes), 0xABABABAB); 915 #endif 916 return BaseVa; 917 } 918 919 ULONG 920 NTAPI 921 MiFreePoolPages(IN PVOID StartingVa) 922 { 923 PMMPTE PointerPte, StartPte; 924 PMMPFN Pfn1, StartPfn; 925 PFN_COUNT FreePages, NumberOfPages; 926 KIRQL OldIrql; 927 PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry; 928 ULONG i, End; 929 ULONG_PTR Offset; 930 931 // 932 // Handle paged pool 933 // 934 if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd)) 935 { 936 // 937 // Calculate the offset from the beginning of paged pool, and convert it 938 // into pages 939 // 940 Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart; 941 i = (ULONG)(Offset >> PAGE_SHIFT); 942 End = i; 943 944 // 945 // Now use the end bitmap to scan until we find a set bit, meaning that 946 // this allocation finishes here 947 // 948 while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++; 949 950 // 951 // Now calculate the total number of pages this allocation spans. If it's 952 // only one page, add it to the S-LIST instead of freeing it 953 // 954 NumberOfPages = End - i + 1; 955 if ((NumberOfPages == 1) && 956 (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum)) 957 { 958 InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa); 959 return 1; 960 } 961 962 /* Delete the actual pages */ 963 PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i; 964 FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL); 965 ASSERT(FreePages == NumberOfPages); 966 967 // 968 // Acquire the paged pool lock 969 // 970 KeAcquireGuardedMutex(&MmPagedPoolMutex); 971 972 // 973 // Clear the allocation and free bits 974 // 975 RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End); 976 RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages); 977 978 // 979 // Update the hint if we need to 980 // 981 if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i; 982 983 // 984 // Release the lock protecting the bitmaps 985 // 986 KeReleaseGuardedMutex(&MmPagedPoolMutex); 987 988 // 989 // And finally return the number of pages freed 990 // 991 return NumberOfPages; 992 } 993 994 // 995 // Get the first PTE and its corresponding PFN entry. If this is also the 996 // last PTE, meaning that this allocation was only for one page, push it into 997 // the S-LIST instead of freeing it 998 // 999 StartPte = PointerPte = MiAddressToPte(StartingVa); 1000 StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 1001 if ((Pfn1->u3.e1.EndOfAllocation == 1) && 1002 (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum)) 1003 { 1004 InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa); 1005 return 1; 1006 } 1007 1008 // 1009 // Loop until we find the last PTE 1010 // 1011 while (Pfn1->u3.e1.EndOfAllocation == 0) 1012 { 1013 // 1014 // Keep going 1015 // 1016 PointerPte++; 1017 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 1018 } 1019 1020 // 1021 // Now we know how many pages we have 1022 // 1023 NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1); 1024 1025 // 1026 // Acquire the nonpaged pool lock 1027 // 1028 OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock); 1029 1030 // 1031 // Mark the first and last PTEs as not part of an allocation anymore 1032 // 1033 StartPfn->u3.e1.StartOfAllocation = 0; 1034 Pfn1->u3.e1.EndOfAllocation = 0; 1035 1036 // 1037 // Assume we will free as many pages as the allocation was 1038 // 1039 FreePages = NumberOfPages; 1040 1041 // 1042 // Peek one page past the end of the allocation 1043 // 1044 PointerPte++; 1045 1046 // 1047 // Guard against going past initial nonpaged pool 1048 // 1049 if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame) 1050 { 1051 // 1052 // This page is on the outskirts of initial nonpaged pool, so ignore it 1053 // 1054 Pfn1 = NULL; 1055 } 1056 else 1057 { 1058 /* Sanity check */ 1059 ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd); 1060 1061 /* Check if protected pool is enabled */ 1062 if (MmProtectFreedNonPagedPool) 1063 { 1064 /* The freed block will be merged, it must be made accessible */ 1065 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0); 1066 } 1067 1068 // 1069 // Otherwise, our entire allocation must've fit within the initial non 1070 // paged pool, or the expansion nonpaged pool, so get the PFN entry of 1071 // the next allocation 1072 // 1073 if (PointerPte->u.Hard.Valid == 1) 1074 { 1075 // 1076 // It's either expansion or initial: get the PFN entry 1077 // 1078 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 1079 } 1080 else 1081 { 1082 // 1083 // This means we've reached the guard page that protects the end of 1084 // the expansion nonpaged pool 1085 // 1086 Pfn1 = NULL; 1087 } 1088 1089 } 1090 1091 // 1092 // Check if this allocation actually exists 1093 // 1094 if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0)) 1095 { 1096 // 1097 // It doesn't, so we should actually locate a free entry descriptor 1098 // 1099 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa + 1100 (NumberOfPages << PAGE_SHIFT)); 1101 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); 1102 ASSERT(FreeEntry->Owner == FreeEntry); 1103 1104 /* Consume this entry's pages */ 1105 FreePages += FreeEntry->Size; 1106 1107 /* Remove the item from the list, depending if pool is protected */ 1108 if (MmProtectFreedNonPagedPool) 1109 MiProtectedPoolRemoveEntryList(&FreeEntry->List); 1110 else 1111 RemoveEntryList(&FreeEntry->List); 1112 } 1113 1114 // 1115 // Now get the official free entry we'll create for the caller's allocation 1116 // 1117 FreeEntry = StartingVa; 1118 1119 // 1120 // Check if the our allocation is the very first page 1121 // 1122 if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame) 1123 { 1124 // 1125 // Then we can't do anything or we'll risk underflowing 1126 // 1127 Pfn1 = NULL; 1128 } 1129 else 1130 { 1131 // 1132 // Otherwise, get the PTE for the page right before our allocation 1133 // 1134 PointerPte -= NumberOfPages + 1; 1135 1136 /* Check if protected pool is enabled */ 1137 if (MmProtectFreedNonPagedPool) 1138 { 1139 /* The freed block will be merged, it must be made accessible */ 1140 MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0); 1141 } 1142 1143 /* Check if this is valid pool, or a guard page */ 1144 if (PointerPte->u.Hard.Valid == 1) 1145 { 1146 // 1147 // It's either expansion or initial nonpaged pool, get the PFN entry 1148 // 1149 Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber); 1150 } 1151 else 1152 { 1153 // 1154 // We must've reached the guard page, so don't risk touching it 1155 // 1156 Pfn1 = NULL; 1157 } 1158 } 1159 1160 // 1161 // Check if there is a valid PFN entry for the page before the allocation 1162 // and then check if this page was actually the end of an allocation. 1163 // If it wasn't, then we know for sure it's a free page 1164 // 1165 if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0)) 1166 { 1167 // 1168 // Get the free entry descriptor for that given page range 1169 // 1170 FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE); 1171 ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE); 1172 FreeEntry = FreeEntry->Owner; 1173 1174 /* Check if protected pool is enabled */ 1175 if (MmProtectFreedNonPagedPool) 1176 { 1177 /* The freed block will be merged, it must be made accessible */ 1178 MiUnProtectFreeNonPagedPool(FreeEntry, 0); 1179 } 1180 1181 // 1182 // Check if the entry is small enough to be indexed on a free list 1183 // If it is, we'll want to re-insert it, since we're about to 1184 // collapse our pages on top of it, which will change its count 1185 // 1186 if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1)) 1187 { 1188 /* Remove the item from the list, depending if pool is protected */ 1189 if (MmProtectFreedNonPagedPool) 1190 MiProtectedPoolRemoveEntryList(&FreeEntry->List); 1191 else 1192 RemoveEntryList(&FreeEntry->List); 1193 1194 // 1195 // Update its size 1196 // 1197 FreeEntry->Size += FreePages; 1198 1199 // 1200 // And now find the new appropriate list to place it in 1201 // 1202 i = (ULONG)(FreeEntry->Size - 1); 1203 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; 1204 1205 /* Insert the entry into the free list head, check for prot. pool */ 1206 if (MmProtectFreedNonPagedPool) 1207 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); 1208 else 1209 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); 1210 } 1211 else 1212 { 1213 // 1214 // Otherwise, just combine our free pages into this entry 1215 // 1216 FreeEntry->Size += FreePages; 1217 } 1218 } 1219 1220 // 1221 // Check if we were unable to do any compaction, and we'll stick with this 1222 // 1223 if (FreeEntry == StartingVa) 1224 { 1225 // 1226 // Well, now we are a free entry. At worse we just have our newly freed 1227 // pages, at best we have our pages plus whatever entry came after us 1228 // 1229 FreeEntry->Size = FreePages; 1230 1231 // 1232 // Find the appropriate list we should be on 1233 // 1234 i = FreeEntry->Size - 1; 1235 if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1; 1236 1237 /* Insert the entry into the free list head, check for prot. pool */ 1238 if (MmProtectFreedNonPagedPool) 1239 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE); 1240 else 1241 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List); 1242 } 1243 1244 // 1245 // Just a sanity check 1246 // 1247 ASSERT(FreePages != 0); 1248 1249 // 1250 // Get all the pages between our allocation and its end. These will all now 1251 // become free page chunks. 1252 // 1253 NextEntry = StartingVa; 1254 LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT)); 1255 do 1256 { 1257 // 1258 // Link back to the parent free entry, and keep going 1259 // 1260 NextEntry->Owner = FreeEntry; 1261 NextEntry->Signature = MM_FREE_POOL_SIGNATURE; 1262 NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE); 1263 } while (NextEntry != LastEntry); 1264 1265 /* Is freed non paged pool protected? */ 1266 if (MmProtectFreedNonPagedPool) 1267 { 1268 /* Protect the freed pool! */ 1269 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size); 1270 } 1271 1272 // 1273 // We're done, release the lock and let the caller know how much we freed 1274 // 1275 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql); 1276 return NumberOfPages; 1277 } 1278 1279 1280 BOOLEAN 1281 NTAPI 1282 MiRaisePoolQuota(IN POOL_TYPE PoolType, 1283 IN ULONG CurrentMaxQuota, 1284 OUT PULONG NewMaxQuota) 1285 { 1286 // 1287 // Not implemented 1288 // 1289 UNIMPLEMENTED; 1290 *NewMaxQuota = CurrentMaxQuota + 65536; 1291 return TRUE; 1292 } 1293 1294 NTSTATUS 1295 NTAPI 1296 MiInitializeSessionPool(VOID) 1297 { 1298 PMMPTE PointerPte, LastPte; 1299 PMMPDE PointerPde, LastPde; 1300 PFN_NUMBER PageFrameIndex, PdeCount; 1301 PPOOL_DESCRIPTOR PoolDescriptor; 1302 PMM_SESSION_SPACE SessionGlobal; 1303 PMM_PAGED_POOL_INFO PagedPoolInfo; 1304 NTSTATUS Status; 1305 ULONG Index, PoolSize, BitmapSize; 1306 PAGED_CODE(); 1307 1308 /* Lock session pool */ 1309 SessionGlobal = MmSessionSpace->GlobalVirtualAddress; 1310 KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex); 1311 1312 /* Setup a valid pool descriptor */ 1313 PoolDescriptor = &MmSessionSpace->PagedPool; 1314 ExInitializePoolDescriptor(PoolDescriptor, 1315 PagedPoolSession, 1316 0, 1317 0, 1318 &SessionGlobal->PagedPoolMutex); 1319 1320 /* Setup the pool addresses */ 1321 MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart; 1322 MmSessionSpace->PagedPoolEnd = (PVOID)((ULONG_PTR)MiSessionPoolEnd - 1); 1323 DPRINT1("Session Pool Start: 0x%p End: 0x%p\n", 1324 MmSessionSpace->PagedPoolStart, MmSessionSpace->PagedPoolEnd); 1325 1326 /* Reset all the counters */ 1327 PagedPoolInfo = &MmSessionSpace->PagedPoolInfo; 1328 PagedPoolInfo->PagedPoolCommit = 0; 1329 PagedPoolInfo->PagedPoolHint = 0; 1330 PagedPoolInfo->AllocatedPagedPool = 0; 1331 1332 /* Compute PDE and PTE addresses */ 1333 PointerPde = MiAddressToPde(MmSessionSpace->PagedPoolStart); 1334 PointerPte = MiAddressToPte(MmSessionSpace->PagedPoolStart); 1335 LastPde = MiAddressToPde(MmSessionSpace->PagedPoolEnd); 1336 LastPte = MiAddressToPte(MmSessionSpace->PagedPoolEnd); 1337 1338 /* Write them down */ 1339 MmSessionSpace->PagedPoolBasePde = PointerPde; 1340 PagedPoolInfo->FirstPteForPagedPool = PointerPte; 1341 PagedPoolInfo->LastPteForPagedPool = LastPte; 1342 PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1; 1343 1344 /* Zero the PDEs */ 1345 PdeCount = LastPde - PointerPde; 1346 RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE)); 1347 1348 /* Initialize the PFN for the PDE */ 1349 Status = MiInitializeAndChargePfn(&PageFrameIndex, 1350 PointerPde, 1351 MmSessionSpace->SessionPageDirectoryIndex, 1352 TRUE); 1353 ASSERT(NT_SUCCESS(Status) == TRUE); 1354 1355 /* Initialize the first page table */ 1356 Index = (ULONG_PTR)MmSessionSpace->PagedPoolStart - (ULONG_PTR)MmSessionBase; 1357 Index >>= 22; 1358 #ifndef _M_AMD64 // FIXME 1359 ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0); 1360 MmSessionSpace->PageTables[Index] = *PointerPde; 1361 #endif 1362 1363 /* Bump up counters */ 1364 InterlockedIncrementSizeT(&MmSessionSpace->NonPageablePages); 1365 InterlockedIncrementSizeT(&MmSessionSpace->CommittedPages); 1366 1367 /* Compute the size of the pool in pages, and of the bitmap for it */ 1368 PoolSize = MmSessionPoolSize >> PAGE_SHIFT; 1369 BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG); 1370 1371 /* Allocate and initialize the bitmap to track allocations */ 1372 PagedPoolInfo->PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool, 1373 BitmapSize, 1374 TAG_MM); 1375 ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL); 1376 RtlInitializeBitMap(PagedPoolInfo->PagedPoolAllocationMap, 1377 (PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1), 1378 PoolSize); 1379 1380 /* Set all bits, but clear the first page table's worth */ 1381 RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap); 1382 RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE); 1383 1384 /* Allocate and initialize the bitmap to track free space */ 1385 PagedPoolInfo->EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool, 1386 BitmapSize, 1387 TAG_MM); 1388 ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL); 1389 RtlInitializeBitMap(PagedPoolInfo->EndOfPagedPoolBitmap, 1390 (PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1), 1391 PoolSize); 1392 1393 /* Clear all the bits and return success */ 1394 RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap); 1395 return STATUS_SUCCESS; 1396 } 1397 1398 /* PUBLIC FUNCTIONS ***********************************************************/ 1399 1400 /* 1401 * @unimplemented 1402 */ 1403 PVOID 1404 NTAPI 1405 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes, 1406 IN ULONG PoolTag) 1407 { 1408 UNIMPLEMENTED; 1409 return NULL; 1410 } 1411 1412 /* 1413 * @unimplemented 1414 */ 1415 VOID 1416 NTAPI 1417 MmFreeMappingAddress(IN PVOID BaseAddress, 1418 IN ULONG PoolTag) 1419 { 1420 UNIMPLEMENTED; 1421 } 1422 1423 /* EOF */ 1424