xref: /reactos/ntoskrnl/mm/ARM3/pool.c (revision c2c66aff)
1 /*
2  * PROJECT:         ReactOS Kernel
3  * LICENSE:         BSD - See COPYING.ARM in the top level directory
4  * FILE:            ntoskrnl/mm/ARM3/pool.c
5  * PURPOSE:         ARM Memory Manager Pool Allocator
6  * PROGRAMMERS:     ReactOS Portable Systems Group
7  */
8 
9 /* INCLUDES *******************************************************************/
10 
11 #include <ntoskrnl.h>
12 #define NDEBUG
13 #include <debug.h>
14 
15 #define MODULE_INVOLVED_IN_ARM3
16 #include <mm/ARM3/miarm.h>
17 
18 /* GLOBALS ********************************************************************/
19 
20 LIST_ENTRY MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
21 PFN_COUNT MmNumberOfFreeNonPagedPool, MiExpansionPoolPagesInitialCharge;
22 PVOID MmNonPagedPoolEnd0;
23 PFN_NUMBER MiStartOfInitialPoolFrame, MiEndOfInitialPoolFrame;
24 KGUARDED_MUTEX MmPagedPoolMutex;
25 MM_PAGED_POOL_INFO MmPagedPoolInfo;
26 SIZE_T MmAllocatedNonPagedPool;
27 ULONG MmSpecialPoolTag;
28 ULONG MmConsumedPoolPercentage;
29 BOOLEAN MmProtectFreedNonPagedPool;
30 SLIST_HEADER MiNonPagedPoolSListHead;
31 ULONG MiNonPagedPoolSListMaximum = 4;
32 SLIST_HEADER MiPagedPoolSListHead;
33 ULONG MiPagedPoolSListMaximum = 8;
34 
35 /* PRIVATE FUNCTIONS **********************************************************/
36 
37 VOID
38 NTAPI
39 MiProtectFreeNonPagedPool(IN PVOID VirtualAddress,
40                           IN ULONG PageCount)
41 {
42     PMMPTE PointerPte, LastPte;
43     MMPTE TempPte;
44 
45     /* If pool is physical, can't protect PTEs */
46     if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return;
47 
48     /* Get PTE pointers and loop */
49     PointerPte = MiAddressToPte(VirtualAddress);
50     LastPte = PointerPte + PageCount;
51     do
52     {
53         /* Capture the PTE for safety */
54         TempPte = *PointerPte;
55 
56         /* Mark it as an invalid PTE, set proto bit to recognize it as pool */
57         TempPte.u.Hard.Valid = 0;
58         TempPte.u.Soft.Prototype = 1;
59         MI_WRITE_INVALID_PTE(PointerPte, TempPte);
60     } while (++PointerPte < LastPte);
61 
62     /* Flush the TLB */
63     KeFlushEntireTb(TRUE, TRUE);
64 }
65 
66 BOOLEAN
67 NTAPI
68 MiUnProtectFreeNonPagedPool(IN PVOID VirtualAddress,
69                             IN ULONG PageCount)
70 {
71     PMMPTE PointerPte;
72     MMPTE TempPte;
73     PFN_NUMBER UnprotectedPages = 0;
74 
75     /* If pool is physical, can't protect PTEs */
76     if (MI_IS_PHYSICAL_ADDRESS(VirtualAddress)) return FALSE;
77 
78     /* Get, and capture the PTE */
79     PointerPte = MiAddressToPte(VirtualAddress);
80     TempPte = *PointerPte;
81 
82     /* Loop protected PTEs */
83     while ((TempPte.u.Hard.Valid == 0) && (TempPte.u.Soft.Prototype == 1))
84     {
85         /* Unprotect the PTE */
86         TempPte.u.Hard.Valid = 1;
87         TempPte.u.Soft.Prototype = 0;
88         MI_WRITE_VALID_PTE(PointerPte, TempPte);
89 
90         /* One more page */
91         if (++UnprotectedPages == PageCount) break;
92 
93         /* Capture next PTE */
94         TempPte = *(++PointerPte);
95     }
96 
97     /* Return if any pages were unprotected */
98     return UnprotectedPages ? TRUE : FALSE;
99 }
100 
101 FORCEINLINE
102 VOID
103 MiProtectedPoolUnProtectLinks(IN PLIST_ENTRY Links,
104                               OUT PVOID* PoolFlink,
105                               OUT PVOID* PoolBlink)
106 {
107     BOOLEAN Safe;
108     PVOID PoolVa;
109 
110     /* Initialize variables */
111     *PoolFlink = *PoolBlink = NULL;
112 
113     /* Check if the list has entries */
114     if (IsListEmpty(Links) == FALSE)
115     {
116         /* We are going to need to forward link to do an insert */
117         PoolVa = Links->Flink;
118 
119         /* So make it safe to access */
120         Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
121         if (Safe) *PoolFlink = PoolVa;
122     }
123 
124     /* Are we going to need a backward link too? */
125     if (Links != Links->Blink)
126     {
127         /* Get the head's backward link for the insert */
128         PoolVa = Links->Blink;
129 
130         /* Make it safe to access */
131         Safe = MiUnProtectFreeNonPagedPool(PoolVa, 1);
132         if (Safe) *PoolBlink = PoolVa;
133     }
134 }
135 
136 FORCEINLINE
137 VOID
138 MiProtectedPoolProtectLinks(IN PVOID PoolFlink,
139                             IN PVOID PoolBlink)
140 {
141     /* Reprotect the pages, if they got unprotected earlier */
142     if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
143     if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
144 }
145 
146 VOID
147 NTAPI
148 MiProtectedPoolInsertList(IN PLIST_ENTRY ListHead,
149                           IN PLIST_ENTRY Entry,
150                           IN BOOLEAN Critical)
151 {
152     PVOID PoolFlink, PoolBlink;
153 
154     /* Make the list accessible */
155     MiProtectedPoolUnProtectLinks(ListHead, &PoolFlink, &PoolBlink);
156 
157     /* Now insert in the right position */
158     Critical ? InsertHeadList(ListHead, Entry) : InsertTailList(ListHead, Entry);
159 
160     /* And reprotect the pages containing the free links */
161     MiProtectedPoolProtectLinks(PoolFlink, PoolBlink);
162 }
163 
164 VOID
165 NTAPI
166 MiProtectedPoolRemoveEntryList(IN PLIST_ENTRY Entry)
167 {
168     PVOID PoolFlink, PoolBlink;
169 
170     /* Make the list accessible */
171     MiProtectedPoolUnProtectLinks(Entry, &PoolFlink, &PoolBlink);
172 
173     /* Now remove */
174     RemoveEntryList(Entry);
175 
176     /* And reprotect the pages containing the free links */
177     if (PoolFlink) MiProtectFreeNonPagedPool(PoolFlink, 1);
178     if (PoolBlink) MiProtectFreeNonPagedPool(PoolBlink, 1);
179 }
180 
181 VOID
182 NTAPI
183 INIT_FUNCTION
184 MiInitializeNonPagedPoolThresholds(VOID)
185 {
186     PFN_NUMBER Size = MmMaximumNonPagedPoolInPages;
187 
188     /* Default low threshold of 8MB or one third of nonpaged pool */
189     MiLowNonPagedPoolThreshold = (8 * _1MB) >> PAGE_SHIFT;
190     MiLowNonPagedPoolThreshold = min(MiLowNonPagedPoolThreshold, Size / 3);
191 
192     /* Default high threshold of 20MB or 50% */
193     MiHighNonPagedPoolThreshold = (20 * _1MB) >> PAGE_SHIFT;
194     MiHighNonPagedPoolThreshold = min(MiHighNonPagedPoolThreshold, Size / 2);
195     ASSERT(MiLowNonPagedPoolThreshold < MiHighNonPagedPoolThreshold);
196 }
197 
198 VOID
199 NTAPI
200 INIT_FUNCTION
201 MiInitializePoolEvents(VOID)
202 {
203     KIRQL OldIrql;
204     PFN_NUMBER FreePoolInPages;
205 
206     /* Lock paged pool */
207     KeAcquireGuardedMutex(&MmPagedPoolMutex);
208 
209     /* Total size of the paged pool minus the allocated size, is free */
210     FreePoolInPages = MmSizeOfPagedPoolInPages - MmPagedPoolInfo.AllocatedPagedPool;
211 
212     /* Check the initial state high state */
213     if (FreePoolInPages >= MiHighPagedPoolThreshold)
214     {
215         /* We have plenty of pool */
216         KeSetEvent(MiHighPagedPoolEvent, 0, FALSE);
217     }
218     else
219     {
220         /* We don't */
221         KeClearEvent(MiHighPagedPoolEvent);
222     }
223 
224     /* Check the initial low state */
225     if (FreePoolInPages <= MiLowPagedPoolThreshold)
226     {
227         /* We're very low in free pool memory */
228         KeSetEvent(MiLowPagedPoolEvent, 0, FALSE);
229     }
230     else
231     {
232         /* We're not */
233         KeClearEvent(MiLowPagedPoolEvent);
234     }
235 
236     /* Release the paged pool lock */
237     KeReleaseGuardedMutex(&MmPagedPoolMutex);
238 
239     /* Now it's time for the nonpaged pool lock */
240     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
241 
242     /* Free pages are the maximum minus what's been allocated */
243     FreePoolInPages = MmMaximumNonPagedPoolInPages - MmAllocatedNonPagedPool;
244 
245     /* Check if we have plenty */
246     if (FreePoolInPages >= MiHighNonPagedPoolThreshold)
247     {
248         /* We do, set the event */
249         KeSetEvent(MiHighNonPagedPoolEvent, 0, FALSE);
250     }
251     else
252     {
253         /* We don't, clear the event */
254         KeClearEvent(MiHighNonPagedPoolEvent);
255     }
256 
257     /* Check if we have very little */
258     if (FreePoolInPages <= MiLowNonPagedPoolThreshold)
259     {
260         /* We do, set the event */
261         KeSetEvent(MiLowNonPagedPoolEvent, 0, FALSE);
262     }
263     else
264     {
265         /* We don't, clear it */
266         KeClearEvent(MiLowNonPagedPoolEvent);
267     }
268 
269     /* We're done, release the nonpaged pool lock */
270     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
271 }
272 
273 VOID
274 NTAPI
275 INIT_FUNCTION
276 MiInitializeNonPagedPool(VOID)
277 {
278     ULONG i;
279     PFN_COUNT PoolPages;
280     PMMFREE_POOL_ENTRY FreeEntry, FirstEntry;
281     PMMPTE PointerPte;
282     PAGED_CODE();
283 
284     //
285     // Initialize the pool S-LISTs as well as their maximum count. In general,
286     // we'll allow 8 times the default on a 2GB system, and two times the default
287     // on a 1GB system.
288     //
289     InitializeSListHead(&MiPagedPoolSListHead);
290     InitializeSListHead(&MiNonPagedPoolSListHead);
291     if (MmNumberOfPhysicalPages >= ((2 * _1GB) /PAGE_SIZE))
292     {
293         MiNonPagedPoolSListMaximum *= 8;
294         MiPagedPoolSListMaximum *= 8;
295     }
296     else if (MmNumberOfPhysicalPages >= (_1GB /PAGE_SIZE))
297     {
298         MiNonPagedPoolSListMaximum *= 2;
299         MiPagedPoolSListMaximum *= 2;
300     }
301 
302     //
303     // However if debugging options for the pool are enabled, turn off the S-LIST
304     // to reduce the risk of messing things up even more
305     //
306     if (MmProtectFreedNonPagedPool)
307     {
308         MiNonPagedPoolSListMaximum = 0;
309         MiPagedPoolSListMaximum = 0;
310     }
311 
312     //
313     // We keep 4 lists of free pages (4 lists help avoid contention)
314     //
315     for (i = 0; i < MI_MAX_FREE_PAGE_LISTS; i++)
316     {
317         //
318         // Initialize each of them
319         //
320         InitializeListHead(&MmNonPagedPoolFreeListHead[i]);
321     }
322 
323     //
324     // Calculate how many pages the initial nonpaged pool has
325     //
326     PoolPages = (PFN_COUNT)BYTES_TO_PAGES(MmSizeOfNonPagedPoolInBytes);
327     MmNumberOfFreeNonPagedPool = PoolPages;
328 
329     //
330     // Initialize the first free entry
331     //
332     FreeEntry = MmNonPagedPoolStart;
333     FirstEntry = FreeEntry;
334     FreeEntry->Size = PoolPages;
335     FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
336     FreeEntry->Owner = FirstEntry;
337 
338     //
339     // Insert it into the last list
340     //
341     InsertHeadList(&MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS - 1],
342                    &FreeEntry->List);
343 
344     //
345     // Now create free entries for every single other page
346     //
347     while (PoolPages-- > 1)
348     {
349         //
350         // Link them all back to the original entry
351         //
352         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)FreeEntry + PAGE_SIZE);
353         FreeEntry->Owner = FirstEntry;
354         FreeEntry->Signature = MM_FREE_POOL_SIGNATURE;
355     }
356 
357     //
358     // Validate and remember first allocated pool page
359     //
360     PointerPte = MiAddressToPte(MmNonPagedPoolStart);
361     ASSERT(PointerPte->u.Hard.Valid == 1);
362     MiStartOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
363 
364     //
365     // Keep track of where initial nonpaged pool ends
366     //
367     MmNonPagedPoolEnd0 = (PVOID)((ULONG_PTR)MmNonPagedPoolStart +
368                                  MmSizeOfNonPagedPoolInBytes);
369 
370     //
371     // Validate and remember last allocated pool page
372     //
373     PointerPte = MiAddressToPte((PVOID)((ULONG_PTR)MmNonPagedPoolEnd0 - 1));
374     ASSERT(PointerPte->u.Hard.Valid == 1);
375     MiEndOfInitialPoolFrame = PFN_FROM_PTE(PointerPte);
376 
377     //
378     // Validate the first nonpaged pool expansion page (which is a guard page)
379     //
380     PointerPte = MiAddressToPte(MmNonPagedPoolExpansionStart);
381     ASSERT(PointerPte->u.Hard.Valid == 0);
382 
383     //
384     // Calculate the size of the expansion region alone
385     //
386     MiExpansionPoolPagesInitialCharge = (PFN_COUNT)
387     BYTES_TO_PAGES(MmMaximumNonPagedPoolInBytes - MmSizeOfNonPagedPoolInBytes);
388 
389     //
390     // Remove 2 pages, since there's a guard page on top and on the bottom
391     //
392     MiExpansionPoolPagesInitialCharge -= 2;
393 
394     //
395     // Now initialize the nonpaged pool expansion PTE space. Remember there's a
396     // guard page on top so make sure to skip it. The bottom guard page will be
397     // guaranteed by the fact our size is off by one.
398     //
399     MiInitializeSystemPtes(PointerPte + 1,
400                            MiExpansionPoolPagesInitialCharge,
401                            NonPagedPoolExpansion);
402 }
403 
404 POOL_TYPE
405 NTAPI
406 MmDeterminePoolType(IN PVOID PoolAddress)
407 {
408     //
409     // Use a simple bounds check
410     //
411     if (PoolAddress >= MmPagedPoolStart && PoolAddress <= MmPagedPoolEnd)
412         return PagedPool;
413     else if (PoolAddress >= MmNonPagedPoolStart && PoolAddress <= MmNonPagedPoolEnd)
414         return NonPagedPool;
415     KeBugCheckEx(BAD_POOL_CALLER, 0x42, (ULONG_PTR)PoolAddress, 0, 0);
416 }
417 
418 PVOID
419 NTAPI
420 MiAllocatePoolPages(IN POOL_TYPE PoolType,
421                     IN SIZE_T SizeInBytes)
422 {
423     PFN_NUMBER PageFrameNumber;
424     PFN_COUNT SizeInPages, PageTableCount;
425     ULONG i;
426     KIRQL OldIrql;
427     PLIST_ENTRY NextEntry, NextHead, LastHead;
428     PMMPTE PointerPte, StartPte;
429     PMMPDE PointerPde;
430     ULONG EndAllocation;
431     MMPTE TempPte;
432     MMPDE TempPde;
433     PMMPFN Pfn1;
434     PVOID BaseVa, BaseVaStart;
435     PMMFREE_POOL_ENTRY FreeEntry;
436     PKSPIN_LOCK_QUEUE LockQueue;
437 
438     //
439     // Figure out how big the allocation is in pages
440     //
441     SizeInPages = (PFN_COUNT)BYTES_TO_PAGES(SizeInBytes);
442 
443     //
444     // Check for overflow
445     //
446     if (SizeInPages == 0)
447     {
448         //
449         // Fail
450         //
451         return NULL;
452     }
453 
454     //
455     // Handle paged pool
456     //
457     if ((PoolType & BASE_POOL_TYPE_MASK) == PagedPool)
458     {
459         //
460         // If only one page is being requested, try to grab it from the S-LIST
461         //
462         if ((SizeInPages == 1) && (ExQueryDepthSList(&MiPagedPoolSListHead)))
463         {
464             BaseVa = InterlockedPopEntrySList(&MiPagedPoolSListHead);
465             if (BaseVa) return BaseVa;
466         }
467 
468         //
469         // Lock the paged pool mutex
470         //
471         KeAcquireGuardedMutex(&MmPagedPoolMutex);
472 
473         //
474         // Find some empty allocation space
475         //
476         i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
477                                    SizeInPages,
478                                    MmPagedPoolInfo.PagedPoolHint);
479         if (i == 0xFFFFFFFF)
480         {
481             //
482             // Get the page bit count
483             //
484             i = ((SizeInPages - 1) / PTE_COUNT) + 1;
485             DPRINT("Paged pool expansion: %lu %x\n", i, SizeInPages);
486 
487             //
488             // Check if there is enougn paged pool expansion space left
489             //
490             if (MmPagedPoolInfo.NextPdeForPagedPoolExpansion >
491                 (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
492             {
493                 //
494                 // Out of memory!
495                 //
496                 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
497                 KeReleaseGuardedMutex(&MmPagedPoolMutex);
498                 return NULL;
499             }
500 
501             //
502             // Check if we'll have to expand past the last PTE we have available
503             //
504             if (((i - 1) + MmPagedPoolInfo.NextPdeForPagedPoolExpansion) >
505                  (PMMPDE)MiAddressToPte(MmPagedPoolInfo.LastPteForPagedPool))
506             {
507                 //
508                 // We can only support this much then
509                 //
510                 PointerPde = MiPteToPde(MmPagedPoolInfo.LastPteForPagedPool);
511                 PageTableCount = (PFN_COUNT)(PointerPde + 1 -
512                                  MmPagedPoolInfo.NextPdeForPagedPoolExpansion);
513                 ASSERT(PageTableCount < i);
514                 i = PageTableCount;
515             }
516             else
517             {
518                 //
519                 // Otherwise, there is plenty of space left for this expansion
520                 //
521                 PageTableCount = i;
522             }
523 
524             //
525             // Get the template PDE we'll use to expand
526             //
527             TempPde = ValidKernelPde;
528 
529             //
530             // Get the first PTE in expansion space
531             //
532             PointerPde = MmPagedPoolInfo.NextPdeForPagedPoolExpansion;
533             BaseVa = MiPdeToPte(PointerPde);
534             BaseVaStart = BaseVa;
535 
536             //
537             // Lock the PFN database and loop pages
538             //
539             OldIrql = KeAcquireQueuedSpinLock(LockQueuePfnLock);
540             do
541             {
542                 //
543                 // It should not already be valid
544                 //
545                 ASSERT(PointerPde->u.Hard.Valid == 0);
546 
547                 /* Request a page */
548                 MI_SET_USAGE(MI_USAGE_PAGED_POOL);
549                 MI_SET_PROCESS2("Kernel");
550                 PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
551                 TempPde.u.Hard.PageFrameNumber = PageFrameNumber;
552 #if (_MI_PAGING_LEVELS >= 3)
553                 /* On PAE/x64 systems, there's no double-buffering */
554                 ASSERT(FALSE);
555 #else
556                 //
557                 // Save it into our double-buffered system page directory
558                 //
559                 MmSystemPagePtes[((ULONG_PTR)PointerPde & (SYSTEM_PD_SIZE - 1)) / sizeof(MMPTE)] = TempPde;
560 
561                 /* Initialize the PFN */
562                 MiInitializePfnForOtherProcess(PageFrameNumber,
563                                                (PMMPTE)PointerPde,
564                                                MmSystemPageDirectory[(PointerPde - MiAddressToPde(NULL)) / PDE_COUNT]);
565 
566                 /* Write the actual PDE now */
567 //                MI_WRITE_VALID_PDE(PointerPde, TempPde);
568 #endif
569                 //
570                 // Move on to the next expansion address
571                 //
572                 PointerPde++;
573                 BaseVa = (PVOID)((ULONG_PTR)BaseVa + PAGE_SIZE);
574                 i--;
575             } while (i > 0);
576 
577             //
578             // Release the PFN database lock
579             //
580             KeReleaseQueuedSpinLock(LockQueuePfnLock, OldIrql);
581 
582             //
583             // These pages are now available, clear their availablity bits
584             //
585             EndAllocation = (ULONG)(MmPagedPoolInfo.NextPdeForPagedPoolExpansion -
586                              (PMMPDE)MiAddressToPte(MmPagedPoolInfo.FirstPteForPagedPool)) *
587                              PTE_COUNT;
588             RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap,
589                          EndAllocation,
590                          PageTableCount * PTE_COUNT);
591 
592             //
593             // Update the next expansion location
594             //
595             MmPagedPoolInfo.NextPdeForPagedPoolExpansion += PageTableCount;
596 
597             //
598             // Zero out the newly available memory
599             //
600             RtlZeroMemory(BaseVaStart, PageTableCount * PAGE_SIZE);
601 
602             //
603             // Now try consuming the pages again
604             //
605             i = RtlFindClearBitsAndSet(MmPagedPoolInfo.PagedPoolAllocationMap,
606                                        SizeInPages,
607                                        0);
608             if (i == 0xFFFFFFFF)
609             {
610                 //
611                 // Out of memory!
612                 //
613                 DPRINT1("FAILED to allocate %Iu bytes from paged pool\n", SizeInBytes);
614                 KeReleaseGuardedMutex(&MmPagedPoolMutex);
615                 return NULL;
616             }
617         }
618 
619         //
620         // Update the pool hint if the request was just one page
621         //
622         if (SizeInPages == 1) MmPagedPoolInfo.PagedPoolHint = i + 1;
623 
624         //
625         // Update the end bitmap so we know the bounds of this allocation when
626         // the time comes to free it
627         //
628         EndAllocation = i + SizeInPages - 1;
629         RtlSetBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, EndAllocation);
630 
631         //
632         // Now we can release the lock (it mainly protects the bitmap)
633         //
634         KeReleaseGuardedMutex(&MmPagedPoolMutex);
635 
636         //
637         // Now figure out where this allocation starts
638         //
639         BaseVa = (PVOID)((ULONG_PTR)MmPagedPoolStart + (i << PAGE_SHIFT));
640 
641         //
642         // Flush the TLB
643         //
644         KeFlushEntireTb(TRUE, TRUE);
645 
646         /* Setup a demand-zero writable PTE */
647         MI_MAKE_SOFTWARE_PTE(&TempPte, MM_READWRITE);
648 
649         //
650         // Find the first and last PTE, then loop them all
651         //
652         PointerPte = MiAddressToPte(BaseVa);
653         StartPte = PointerPte + SizeInPages;
654         do
655         {
656             //
657             // Write the demand zero PTE and keep going
658             //
659             MI_WRITE_INVALID_PTE(PointerPte, TempPte);
660         } while (++PointerPte < StartPte);
661 
662         //
663         // Return the allocation address to the caller
664         //
665         return BaseVa;
666     }
667 
668     //
669     // If only one page is being requested, try to grab it from the S-LIST
670     //
671     if ((SizeInPages == 1) && (ExQueryDepthSList(&MiNonPagedPoolSListHead)))
672     {
673         BaseVa = InterlockedPopEntrySList(&MiNonPagedPoolSListHead);
674         if (BaseVa) return BaseVa;
675     }
676 
677     //
678     // Allocations of less than 4 pages go into their individual buckets
679     //
680     i = SizeInPages - 1;
681     if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
682 
683     //
684     // Loop through all the free page lists based on the page index
685     //
686     NextHead = &MmNonPagedPoolFreeListHead[i];
687     LastHead = &MmNonPagedPoolFreeListHead[MI_MAX_FREE_PAGE_LISTS];
688 
689     //
690     // Acquire the nonpaged pool lock
691     //
692     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
693     do
694     {
695         //
696         // Now loop through all the free page entries in this given list
697         //
698         NextEntry = NextHead->Flink;
699         while (NextEntry != NextHead)
700         {
701             /* Is freed non paged pool enabled */
702             if (MmProtectFreedNonPagedPool)
703             {
704                 /* We need to be able to touch this page, unprotect it */
705                 MiUnProtectFreeNonPagedPool(NextEntry, 0);
706             }
707 
708             //
709             // Grab the entry and see if it can handle our allocation
710             //
711             FreeEntry = CONTAINING_RECORD(NextEntry, MMFREE_POOL_ENTRY, List);
712             ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
713             if (FreeEntry->Size >= SizeInPages)
714             {
715                 //
716                 // It does, so consume the pages from here
717                 //
718                 FreeEntry->Size -= SizeInPages;
719 
720                 //
721                 // The allocation will begin in this free page area
722                 //
723                 BaseVa = (PVOID)((ULONG_PTR)FreeEntry +
724                                  (FreeEntry->Size  << PAGE_SHIFT));
725 
726                 /* Remove the item from the list, depending if pool is protected */
727                 if (MmProtectFreedNonPagedPool)
728                     MiProtectedPoolRemoveEntryList(&FreeEntry->List);
729                 else
730                     RemoveEntryList(&FreeEntry->List);
731 
732                 //
733                 // However, check if its' still got space left
734                 //
735                 if (FreeEntry->Size != 0)
736                 {
737                     /* Check which list to insert this entry into */
738                     i = FreeEntry->Size - 1;
739                     if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
740 
741                     /* Insert the entry into the free list head, check for prot. pool */
742                     if (MmProtectFreedNonPagedPool)
743                         MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
744                     else
745                         InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
746 
747                     /* Is freed non paged pool protected? */
748                     if (MmProtectFreedNonPagedPool)
749                     {
750                         /* Protect the freed pool! */
751                         MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
752                     }
753                 }
754 
755                 //
756                 // Grab the PTE for this allocation
757                 //
758                 PointerPte = MiAddressToPte(BaseVa);
759                 ASSERT(PointerPte->u.Hard.Valid == 1);
760 
761                 //
762                 // Grab the PFN NextEntry and index
763                 //
764                 Pfn1 = MiGetPfnEntry(PFN_FROM_PTE(PointerPte));
765 
766                 //
767                 // Now mark it as the beginning of an allocation
768                 //
769                 ASSERT(Pfn1->u3.e1.StartOfAllocation == 0);
770                 Pfn1->u3.e1.StartOfAllocation = 1;
771 
772                 /* Mark it as special pool if needed */
773                 ASSERT(Pfn1->u4.VerifierAllocation == 0);
774                 if (PoolType & VERIFIER_POOL_MASK)
775                 {
776                     Pfn1->u4.VerifierAllocation = 1;
777                 }
778 
779                 //
780                 // Check if the allocation is larger than one page
781                 //
782                 if (SizeInPages != 1)
783                 {
784                     //
785                     // Navigate to the last PFN entry and PTE
786                     //
787                     PointerPte += SizeInPages - 1;
788                     ASSERT(PointerPte->u.Hard.Valid == 1);
789                     Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
790                 }
791 
792                 //
793                 // Mark this PFN as the last (might be the same as the first)
794                 //
795                 ASSERT(Pfn1->u3.e1.EndOfAllocation == 0);
796                 Pfn1->u3.e1.EndOfAllocation = 1;
797 
798                 //
799                 // Release the nonpaged pool lock, and return the allocation
800                 //
801                 KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
802                 return BaseVa;
803             }
804 
805             //
806             // Try the next free page entry
807             //
808             NextEntry = FreeEntry->List.Flink;
809 
810             /* Is freed non paged pool protected? */
811             if (MmProtectFreedNonPagedPool)
812             {
813                 /* Protect the freed pool! */
814                 MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
815             }
816         }
817     } while (++NextHead < LastHead);
818 
819     //
820     // If we got here, we're out of space.
821     // Start by releasing the lock
822     //
823     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
824 
825     //
826     // Allocate some system PTEs
827     //
828     StartPte = MiReserveSystemPtes(SizeInPages, NonPagedPoolExpansion);
829     PointerPte = StartPte;
830     if (StartPte == NULL)
831     {
832         //
833         // Ran out of memory
834         //
835         DPRINT1("Out of NP Expansion Pool\n");
836         return NULL;
837     }
838 
839     //
840     // Acquire the pool lock now
841     //
842     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
843 
844     //
845     // Lock the PFN database too
846     //
847     LockQueue = &KeGetCurrentPrcb()->LockQueue[LockQueuePfnLock];
848     KeAcquireQueuedSpinLockAtDpcLevel(LockQueue);
849 
850     //
851     // Loop the pages
852     //
853     TempPte = ValidKernelPte;
854     do
855     {
856         /* Allocate a page */
857         MI_SET_USAGE(MI_USAGE_PAGED_POOL);
858         MI_SET_PROCESS2("Kernel");
859         PageFrameNumber = MiRemoveAnyPage(MI_GET_NEXT_COLOR());
860 
861         /* Get the PFN entry for it and fill it out */
862         Pfn1 = MiGetPfnEntry(PageFrameNumber);
863         Pfn1->u3.e2.ReferenceCount = 1;
864         Pfn1->u2.ShareCount = 1;
865         Pfn1->PteAddress = PointerPte;
866         Pfn1->u3.e1.PageLocation = ActiveAndValid;
867         Pfn1->u4.VerifierAllocation = 0;
868 
869         /* Write the PTE for it */
870         TempPte.u.Hard.PageFrameNumber = PageFrameNumber;
871         MI_WRITE_VALID_PTE(PointerPte++, TempPte);
872     } while (--SizeInPages > 0);
873 
874     //
875     // This is the last page
876     //
877     Pfn1->u3.e1.EndOfAllocation = 1;
878 
879     //
880     // Get the first page and mark it as such
881     //
882     Pfn1 = MiGetPfnEntry(StartPte->u.Hard.PageFrameNumber);
883     Pfn1->u3.e1.StartOfAllocation = 1;
884 
885     /* Mark it as a verifier allocation if needed */
886     ASSERT(Pfn1->u4.VerifierAllocation == 0);
887     if (PoolType & VERIFIER_POOL_MASK) Pfn1->u4.VerifierAllocation = 1;
888 
889     //
890     // Release the PFN and nonpaged pool lock
891     //
892     KeReleaseQueuedSpinLockFromDpcLevel(LockQueue);
893     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
894 
895     //
896     // Return the address
897     //
898     return MiPteToAddress(StartPte);
899 }
900 
901 ULONG
902 NTAPI
903 MiFreePoolPages(IN PVOID StartingVa)
904 {
905     PMMPTE PointerPte, StartPte;
906     PMMPFN Pfn1, StartPfn;
907     PFN_COUNT FreePages, NumberOfPages;
908     KIRQL OldIrql;
909     PMMFREE_POOL_ENTRY FreeEntry, NextEntry, LastEntry;
910     ULONG i, End;
911     ULONG_PTR Offset;
912 
913     //
914     // Handle paged pool
915     //
916     if ((StartingVa >= MmPagedPoolStart) && (StartingVa <= MmPagedPoolEnd))
917     {
918         //
919         // Calculate the offset from the beginning of paged pool, and convert it
920         // into pages
921         //
922         Offset = (ULONG_PTR)StartingVa - (ULONG_PTR)MmPagedPoolStart;
923         i = (ULONG)(Offset >> PAGE_SHIFT);
924         End = i;
925 
926         //
927         // Now use the end bitmap to scan until we find a set bit, meaning that
928         // this allocation finishes here
929         //
930         while (!RtlTestBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End)) End++;
931 
932         //
933         // Now calculate the total number of pages this allocation spans. If it's
934         // only one page, add it to the S-LIST instead of freeing it
935         //
936         NumberOfPages = End - i + 1;
937         if ((NumberOfPages == 1) &&
938             (ExQueryDepthSList(&MiPagedPoolSListHead) < MiPagedPoolSListMaximum))
939         {
940             InterlockedPushEntrySList(&MiPagedPoolSListHead, StartingVa);
941             return 1;
942         }
943 
944         /* Delete the actual pages */
945         PointerPte = MmPagedPoolInfo.FirstPteForPagedPool + i;
946         FreePages = MiDeleteSystemPageableVm(PointerPte, NumberOfPages, 0, NULL);
947         ASSERT(FreePages == NumberOfPages);
948 
949         //
950         // Acquire the paged pool lock
951         //
952         KeAcquireGuardedMutex(&MmPagedPoolMutex);
953 
954         //
955         // Clear the allocation and free bits
956         //
957         RtlClearBit(MmPagedPoolInfo.EndOfPagedPoolBitmap, End);
958         RtlClearBits(MmPagedPoolInfo.PagedPoolAllocationMap, i, NumberOfPages);
959 
960         //
961         // Update the hint if we need to
962         //
963         if (i < MmPagedPoolInfo.PagedPoolHint) MmPagedPoolInfo.PagedPoolHint = i;
964 
965         //
966         // Release the lock protecting the bitmaps
967         //
968         KeReleaseGuardedMutex(&MmPagedPoolMutex);
969 
970         //
971         // And finally return the number of pages freed
972         //
973         return NumberOfPages;
974     }
975 
976     //
977     // Get the first PTE and its corresponding PFN entry. If this is also the
978     // last PTE, meaning that this allocation was only for one page, push it into
979     // the S-LIST instead of freeing it
980     //
981     StartPte = PointerPte = MiAddressToPte(StartingVa);
982     StartPfn = Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
983     if ((Pfn1->u3.e1.EndOfAllocation == 1) &&
984         (ExQueryDepthSList(&MiNonPagedPoolSListHead) < MiNonPagedPoolSListMaximum))
985     {
986         InterlockedPushEntrySList(&MiNonPagedPoolSListHead, StartingVa);
987         return 1;
988     }
989 
990     //
991     // Loop until we find the last PTE
992     //
993     while (Pfn1->u3.e1.EndOfAllocation == 0)
994     {
995         //
996         // Keep going
997         //
998         PointerPte++;
999         Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1000     }
1001 
1002     //
1003     // Now we know how many pages we have
1004     //
1005     NumberOfPages = (PFN_COUNT)(PointerPte - StartPte + 1);
1006 
1007     //
1008     // Acquire the nonpaged pool lock
1009     //
1010     OldIrql = KeAcquireQueuedSpinLock(LockQueueMmNonPagedPoolLock);
1011 
1012     //
1013     // Mark the first and last PTEs as not part of an allocation anymore
1014     //
1015     StartPfn->u3.e1.StartOfAllocation = 0;
1016     Pfn1->u3.e1.EndOfAllocation = 0;
1017 
1018     //
1019     // Assume we will free as many pages as the allocation was
1020     //
1021     FreePages = NumberOfPages;
1022 
1023     //
1024     // Peek one page past the end of the allocation
1025     //
1026     PointerPte++;
1027 
1028     //
1029     // Guard against going past initial nonpaged pool
1030     //
1031     if (MiGetPfnEntryIndex(Pfn1) == MiEndOfInitialPoolFrame)
1032     {
1033         //
1034         // This page is on the outskirts of initial nonpaged pool, so ignore it
1035         //
1036         Pfn1 = NULL;
1037     }
1038     else
1039     {
1040         /* Sanity check */
1041         ASSERT((ULONG_PTR)StartingVa + NumberOfPages <= (ULONG_PTR)MmNonPagedPoolEnd);
1042 
1043         /* Check if protected pool is enabled */
1044         if (MmProtectFreedNonPagedPool)
1045         {
1046             /* The freed block will be merged, it must be made accessible */
1047             MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1048         }
1049 
1050         //
1051         // Otherwise, our entire allocation must've fit within the initial non
1052         // paged pool, or the expansion nonpaged pool, so get the PFN entry of
1053         // the next allocation
1054         //
1055         if (PointerPte->u.Hard.Valid == 1)
1056         {
1057             //
1058             // It's either expansion or initial: get the PFN entry
1059             //
1060             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1061         }
1062         else
1063         {
1064             //
1065             // This means we've reached the guard page that protects the end of
1066             // the expansion nonpaged pool
1067             //
1068             Pfn1 = NULL;
1069         }
1070 
1071     }
1072 
1073     //
1074     // Check if this allocation actually exists
1075     //
1076     if ((Pfn1) && (Pfn1->u3.e1.StartOfAllocation == 0))
1077     {
1078         //
1079         // It doesn't, so we should actually locate a free entry descriptor
1080         //
1081         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa +
1082                                          (NumberOfPages << PAGE_SHIFT));
1083         ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1084         ASSERT(FreeEntry->Owner == FreeEntry);
1085 
1086         /* Consume this entry's pages */
1087         FreePages += FreeEntry->Size;
1088 
1089         /* Remove the item from the list, depending if pool is protected */
1090         if (MmProtectFreedNonPagedPool)
1091             MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1092         else
1093             RemoveEntryList(&FreeEntry->List);
1094     }
1095 
1096     //
1097     // Now get the official free entry we'll create for the caller's allocation
1098     //
1099     FreeEntry = StartingVa;
1100 
1101     //
1102     // Check if the our allocation is the very first page
1103     //
1104     if (MiGetPfnEntryIndex(StartPfn) == MiStartOfInitialPoolFrame)
1105     {
1106         //
1107         // Then we can't do anything or we'll risk underflowing
1108         //
1109         Pfn1 = NULL;
1110     }
1111     else
1112     {
1113         //
1114         // Otherwise, get the PTE for the page right before our allocation
1115         //
1116         PointerPte -= NumberOfPages + 1;
1117 
1118         /* Check if protected pool is enabled */
1119         if (MmProtectFreedNonPagedPool)
1120         {
1121             /* The freed block will be merged, it must be made accessible */
1122             MiUnProtectFreeNonPagedPool(MiPteToAddress(PointerPte), 0);
1123         }
1124 
1125         /* Check if this is valid pool, or a guard page */
1126         if (PointerPte->u.Hard.Valid == 1)
1127         {
1128             //
1129             // It's either expansion or initial nonpaged pool, get the PFN entry
1130             //
1131             Pfn1 = MiGetPfnEntry(PointerPte->u.Hard.PageFrameNumber);
1132         }
1133         else
1134         {
1135             //
1136             // We must've reached the guard page, so don't risk touching it
1137             //
1138             Pfn1 = NULL;
1139         }
1140     }
1141 
1142     //
1143     // Check if there is a valid PFN entry for the page before the allocation
1144     // and then check if this page was actually the end of an allocation.
1145     // If it wasn't, then we know for sure it's a free page
1146     //
1147     if ((Pfn1) && (Pfn1->u3.e1.EndOfAllocation == 0))
1148     {
1149         //
1150         // Get the free entry descriptor for that given page range
1151         //
1152         FreeEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)StartingVa - PAGE_SIZE);
1153         ASSERT(FreeEntry->Signature == MM_FREE_POOL_SIGNATURE);
1154         FreeEntry = FreeEntry->Owner;
1155 
1156         /* Check if protected pool is enabled */
1157         if (MmProtectFreedNonPagedPool)
1158         {
1159             /* The freed block will be merged, it must be made accessible */
1160             MiUnProtectFreeNonPagedPool(FreeEntry, 0);
1161         }
1162 
1163         //
1164         // Check if the entry is small enough to be indexed on a free list
1165         // If it is, we'll want to re-insert it, since we're about to
1166         // collapse our pages on top of it, which will change its count
1167         //
1168         if (FreeEntry->Size < (MI_MAX_FREE_PAGE_LISTS - 1))
1169         {
1170             /* Remove the item from the list, depending if pool is protected */
1171             if (MmProtectFreedNonPagedPool)
1172                 MiProtectedPoolRemoveEntryList(&FreeEntry->List);
1173             else
1174                 RemoveEntryList(&FreeEntry->List);
1175 
1176             //
1177             // Update its size
1178             //
1179             FreeEntry->Size += FreePages;
1180 
1181             //
1182             // And now find the new appropriate list to place it in
1183             //
1184             i = (ULONG)(FreeEntry->Size - 1);
1185             if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1186 
1187             /* Insert the entry into the free list head, check for prot. pool */
1188             if (MmProtectFreedNonPagedPool)
1189                 MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1190             else
1191                 InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1192         }
1193         else
1194         {
1195             //
1196             // Otherwise, just combine our free pages into this entry
1197             //
1198             FreeEntry->Size += FreePages;
1199         }
1200     }
1201 
1202     //
1203     // Check if we were unable to do any compaction, and we'll stick with this
1204     //
1205     if (FreeEntry == StartingVa)
1206     {
1207         //
1208         // Well, now we are a free entry. At worse we just have our newly freed
1209         // pages, at best we have our pages plus whatever entry came after us
1210         //
1211         FreeEntry->Size = FreePages;
1212 
1213         //
1214         // Find the appropriate list we should be on
1215         //
1216         i = FreeEntry->Size - 1;
1217         if (i >= MI_MAX_FREE_PAGE_LISTS) i = MI_MAX_FREE_PAGE_LISTS - 1;
1218 
1219         /* Insert the entry into the free list head, check for prot. pool */
1220         if (MmProtectFreedNonPagedPool)
1221             MiProtectedPoolInsertList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List, TRUE);
1222         else
1223             InsertTailList(&MmNonPagedPoolFreeListHead[i], &FreeEntry->List);
1224     }
1225 
1226     //
1227     // Just a sanity check
1228     //
1229     ASSERT(FreePages != 0);
1230 
1231     //
1232     // Get all the pages between our allocation and its end. These will all now
1233     // become free page chunks.
1234     //
1235     NextEntry = StartingVa;
1236     LastEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + (FreePages << PAGE_SHIFT));
1237     do
1238     {
1239         //
1240         // Link back to the parent free entry, and keep going
1241         //
1242         NextEntry->Owner = FreeEntry;
1243         NextEntry->Signature = MM_FREE_POOL_SIGNATURE;
1244         NextEntry = (PMMFREE_POOL_ENTRY)((ULONG_PTR)NextEntry + PAGE_SIZE);
1245     } while (NextEntry != LastEntry);
1246 
1247     /* Is freed non paged pool protected? */
1248     if (MmProtectFreedNonPagedPool)
1249     {
1250         /* Protect the freed pool! */
1251         MiProtectFreeNonPagedPool(FreeEntry, FreeEntry->Size);
1252     }
1253 
1254     //
1255     // We're done, release the lock and let the caller know how much we freed
1256     //
1257     KeReleaseQueuedSpinLock(LockQueueMmNonPagedPoolLock, OldIrql);
1258     return NumberOfPages;
1259 }
1260 
1261 
1262 BOOLEAN
1263 NTAPI
1264 MiRaisePoolQuota(IN POOL_TYPE PoolType,
1265                  IN ULONG CurrentMaxQuota,
1266                  OUT PULONG NewMaxQuota)
1267 {
1268     //
1269     // Not implemented
1270     //
1271     UNIMPLEMENTED;
1272     *NewMaxQuota = CurrentMaxQuota + 65536;
1273     return TRUE;
1274 }
1275 
1276 NTSTATUS
1277 NTAPI
1278 MiInitializeSessionPool(VOID)
1279 {
1280     PMMPTE PointerPte, LastPte;
1281     PMMPDE PointerPde, LastPde;
1282     PFN_NUMBER PageFrameIndex, PdeCount;
1283     PPOOL_DESCRIPTOR PoolDescriptor;
1284     PMM_SESSION_SPACE SessionGlobal;
1285     PMM_PAGED_POOL_INFO PagedPoolInfo;
1286     NTSTATUS Status;
1287     ULONG Index, PoolSize, BitmapSize;
1288     PAGED_CODE();
1289 
1290     /* Lock session pool */
1291     SessionGlobal = MmSessionSpace->GlobalVirtualAddress;
1292     KeInitializeGuardedMutex(&SessionGlobal->PagedPoolMutex);
1293 
1294     /* Setup a valid pool descriptor */
1295     PoolDescriptor = &MmSessionSpace->PagedPool;
1296     ExInitializePoolDescriptor(PoolDescriptor,
1297                                PagedPoolSession,
1298                                0,
1299                                0,
1300                                &SessionGlobal->PagedPoolMutex);
1301 
1302     /* Setup the pool addresses */
1303     MmSessionSpace->PagedPoolStart = (PVOID)MiSessionPoolStart;
1304     MmSessionSpace->PagedPoolEnd = (PVOID)((ULONG_PTR)MiSessionPoolEnd - 1);
1305     DPRINT1("Session Pool Start: 0x%p End: 0x%p\n",
1306             MmSessionSpace->PagedPoolStart, MmSessionSpace->PagedPoolEnd);
1307 
1308     /* Reset all the counters */
1309     PagedPoolInfo = &MmSessionSpace->PagedPoolInfo;
1310     PagedPoolInfo->PagedPoolCommit = 0;
1311     PagedPoolInfo->PagedPoolHint = 0;
1312     PagedPoolInfo->AllocatedPagedPool = 0;
1313 
1314     /* Compute PDE and PTE addresses */
1315     PointerPde = MiAddressToPde(MmSessionSpace->PagedPoolStart);
1316     PointerPte = MiAddressToPte(MmSessionSpace->PagedPoolStart);
1317     LastPde = MiAddressToPde(MmSessionSpace->PagedPoolEnd);
1318     LastPte = MiAddressToPte(MmSessionSpace->PagedPoolEnd);
1319 
1320     /* Write them down */
1321     MmSessionSpace->PagedPoolBasePde = PointerPde;
1322     PagedPoolInfo->FirstPteForPagedPool = PointerPte;
1323     PagedPoolInfo->LastPteForPagedPool = LastPte;
1324     PagedPoolInfo->NextPdeForPagedPoolExpansion = PointerPde + 1;
1325 
1326     /* Zero the PDEs */
1327     PdeCount = LastPde - PointerPde;
1328     RtlZeroMemory(PointerPde, (PdeCount + 1) * sizeof(MMPTE));
1329 
1330     /* Initialize the PFN for the PDE */
1331     Status = MiInitializeAndChargePfn(&PageFrameIndex,
1332                                       PointerPde,
1333                                       MmSessionSpace->SessionPageDirectoryIndex,
1334                                       TRUE);
1335     ASSERT(NT_SUCCESS(Status) == TRUE);
1336 
1337     /* Initialize the first page table */
1338     Index = (ULONG_PTR)MmSessionSpace->PagedPoolStart - (ULONG_PTR)MmSessionBase;
1339     Index >>= 22;
1340 #ifndef _M_AMD64 // FIXME
1341     ASSERT(MmSessionSpace->PageTables[Index].u.Long == 0);
1342     MmSessionSpace->PageTables[Index] = *PointerPde;
1343 #endif
1344 
1345     /* Bump up counters */
1346     InterlockedIncrementSizeT(&MmSessionSpace->NonPageablePages);
1347     InterlockedIncrementSizeT(&MmSessionSpace->CommittedPages);
1348 
1349     /* Compute the size of the pool in pages, and of the bitmap for it */
1350     PoolSize = MmSessionPoolSize >> PAGE_SHIFT;
1351     BitmapSize = sizeof(RTL_BITMAP) + ((PoolSize + 31) / 32) * sizeof(ULONG);
1352 
1353     /* Allocate and initialize the bitmap to track allocations */
1354     PagedPoolInfo->PagedPoolAllocationMap = ExAllocatePoolWithTag(NonPagedPool,
1355                                                                   BitmapSize,
1356                                                                   TAG_MM);
1357     ASSERT(PagedPoolInfo->PagedPoolAllocationMap != NULL);
1358     RtlInitializeBitMap(PagedPoolInfo->PagedPoolAllocationMap,
1359                         (PULONG)(PagedPoolInfo->PagedPoolAllocationMap + 1),
1360                         PoolSize);
1361 
1362     /* Set all bits, but clear the first page table's worth */
1363     RtlSetAllBits(PagedPoolInfo->PagedPoolAllocationMap);
1364     RtlClearBits(PagedPoolInfo->PagedPoolAllocationMap, 0, PTE_PER_PAGE);
1365 
1366     /* Allocate and initialize the bitmap to track free space */
1367     PagedPoolInfo->EndOfPagedPoolBitmap = ExAllocatePoolWithTag(NonPagedPool,
1368                                                                 BitmapSize,
1369                                                                 TAG_MM);
1370     ASSERT(PagedPoolInfo->EndOfPagedPoolBitmap != NULL);
1371     RtlInitializeBitMap(PagedPoolInfo->EndOfPagedPoolBitmap,
1372                         (PULONG)(PagedPoolInfo->EndOfPagedPoolBitmap + 1),
1373                         PoolSize);
1374 
1375     /* Clear all the bits and return success */
1376     RtlClearAllBits(PagedPoolInfo->EndOfPagedPoolBitmap);
1377     return STATUS_SUCCESS;
1378 }
1379 
1380 /* PUBLIC FUNCTIONS ***********************************************************/
1381 
1382 /*
1383  * @unimplemented
1384  */
1385 PVOID
1386 NTAPI
1387 MmAllocateMappingAddress(IN SIZE_T NumberOfBytes,
1388                          IN ULONG PoolTag)
1389 {
1390     UNIMPLEMENTED;
1391     return NULL;
1392 }
1393 
1394 /*
1395  * @unimplemented
1396  */
1397 VOID
1398 NTAPI
1399 MmFreeMappingAddress(IN PVOID BaseAddress,
1400                      IN ULONG PoolTag)
1401 {
1402     UNIMPLEMENTED;
1403 }
1404 
1405 /* EOF */
1406